2 * Copyright (c) 2013 Zhixiang Yu <zcore@freebsd.org>
3 * Copyright (c) 2015-2016 Alexander Motin <mav@FreeBSD.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
33 #include <sys/param.h>
34 #include <sys/linker_set.h>
37 #include <sys/ioctl.h>
40 #include <sys/endian.h>
52 #include <pthread_np.h>
61 #define DEF_PORTS 6 /* Intel ICH8 AHCI supports 6 ports */
62 #define MAX_PORTS 32 /* AHCI supports 32 ports */
64 #define PxSIG_ATA 0x00000101 /* ATA drive */
65 #define PxSIG_ATAPI 0xeb140101 /* ATAPI drive */
68 FIS_TYPE_REGH2D = 0x27, /* Register FIS - host to device */
69 FIS_TYPE_REGD2H = 0x34, /* Register FIS - device to host */
70 FIS_TYPE_DMAACT = 0x39, /* DMA activate FIS - device to host */
71 FIS_TYPE_DMASETUP = 0x41, /* DMA setup FIS - bidirectional */
72 FIS_TYPE_DATA = 0x46, /* Data FIS - bidirectional */
73 FIS_TYPE_BIST = 0x58, /* BIST activate FIS - bidirectional */
74 FIS_TYPE_PIOSETUP = 0x5F, /* PIO setup FIS - device to host */
75 FIS_TYPE_SETDEVBITS = 0xA1, /* Set dev bits FIS - device to host */
81 #define TEST_UNIT_READY 0x00
82 #define REQUEST_SENSE 0x03
84 #define START_STOP_UNIT 0x1B
85 #define PREVENT_ALLOW 0x1E
86 #define READ_CAPACITY 0x25
88 #define POSITION_TO_ELEMENT 0x2B
90 #define GET_EVENT_STATUS_NOTIFICATION 0x4A
91 #define MODE_SENSE_10 0x5A
92 #define REPORT_LUNS 0xA0
97 * SCSI mode page codes
99 #define MODEPAGE_RW_ERROR_RECOVERY 0x01
100 #define MODEPAGE_CD_CAPABILITIES 0x2A
105 #define ATA_SF_ENAB_SATA_SF 0x10
106 #define ATA_SATA_SF_AN 0x05
107 #define ATA_SF_DIS_SATA_SF 0x90
114 #define DPRINTF(format, arg...) do{fprintf(dbg, format, ##arg);fflush(dbg);}while(0)
116 #define DPRINTF(format, arg...)
118 #define WPRINTF(format, arg...) printf(format, ##arg)
121 struct blockif_req io_req;
122 struct ahci_port *io_pr;
123 STAILQ_ENTRY(ahci_ioreq) io_flist;
124 TAILQ_ENTRY(ahci_ioreq) io_blist;
133 struct blockif_ctxt *bctx;
134 struct pci_ahci_softc *pr_sc;
143 uint8_t err_cfis[20];
170 struct ahci_ioreq *ioreq;
172 STAILQ_HEAD(ahci_fhead, ahci_ioreq) iofhd;
173 TAILQ_HEAD(ahci_bhead, ahci_ioreq) iobhd;
176 struct ahci_cmd_hdr {
181 uint32_t reserved[4];
184 struct ahci_prdt_entry {
187 #define DBCMASK 0x3fffff
191 struct pci_ahci_softc {
192 struct pci_devinst *asc_pi;
207 struct ahci_port port[MAX_PORTS];
209 #define ahci_ctx(sc) ((sc)->asc_pi->pi_vmctx)
211 static void ahci_handle_port(struct ahci_port *p);
213 static inline void lba_to_msf(uint8_t *buf, int lba)
216 buf[0] = (lba / 75) / 60;
217 buf[1] = (lba / 75) % 60;
222 * generate HBA intr depending on whether or not ports within
223 * the controller have an interrupt pending.
226 ahci_generate_intr(struct pci_ahci_softc *sc)
228 struct pci_devinst *pi;
233 for (i = 0; i < sc->ports; i++) {
234 struct ahci_port *pr;
240 DPRINTF("%s %x\n", __func__, sc->is);
242 if (sc->is && (sc->ghc & AHCI_GHC_IE)) {
243 if (pci_msi_enabled(pi)) {
245 * Generate an MSI interrupt on every edge
247 pci_generate_msi(pi, 0);
248 } else if (!sc->lintr) {
250 * Only generate a pin-based interrupt if one wasn't
254 pci_lintr_assert(pi);
256 } else if (sc->lintr) {
258 * No interrupts: deassert pin-based signal if it had
261 pci_lintr_deassert(pi);
267 ahci_write_fis(struct ahci_port *p, enum sata_fis_type ft, uint8_t *fis)
269 int offset, len, irq;
271 if (p->rfis == NULL || !(p->cmd & AHCI_P_CMD_FRE))
275 case FIS_TYPE_REGD2H:
278 irq = (fis[1] & (1 << 6)) ? AHCI_P_IX_DHR : 0;
280 case FIS_TYPE_SETDEVBITS:
283 irq = (fis[1] & (1 << 6)) ? AHCI_P_IX_SDB : 0;
285 case FIS_TYPE_PIOSETUP:
288 irq = (fis[1] & (1 << 6)) ? AHCI_P_IX_PS : 0;
291 WPRINTF("unsupported fis type %d\n", ft);
294 if (fis[2] & ATA_S_ERROR) {
296 irq |= AHCI_P_IX_TFE;
298 memcpy(p->rfis + offset, fis, len);
301 ahci_generate_intr(p->pr_sc);
306 ahci_write_fis_piosetup(struct ahci_port *p)
310 memset(fis, 0, sizeof(fis));
311 fis[0] = FIS_TYPE_PIOSETUP;
312 ahci_write_fis(p, FIS_TYPE_PIOSETUP, fis);
316 ahci_write_fis_sdb(struct ahci_port *p, int slot, uint8_t *cfis, uint32_t tfd)
321 error = (tfd >> 8) & 0xff;
323 memset(fis, 0, sizeof(fis));
324 fis[0] = FIS_TYPE_SETDEVBITS;
328 if (fis[2] & ATA_S_ERROR) {
329 p->err_cfis[0] = slot;
330 p->err_cfis[2] = tfd;
331 p->err_cfis[3] = error;
332 memcpy(&p->err_cfis[4], cfis + 4, 16);
334 *(uint32_t *)(fis + 4) = (1 << slot);
335 p->sact &= ~(1 << slot);
339 ahci_write_fis(p, FIS_TYPE_SETDEVBITS, fis);
343 ahci_write_fis_d2h(struct ahci_port *p, int slot, uint8_t *cfis, uint32_t tfd)
348 error = (tfd >> 8) & 0xff;
349 memset(fis, 0, sizeof(fis));
350 fis[0] = FIS_TYPE_REGD2H;
364 if (fis[2] & ATA_S_ERROR) {
365 p->err_cfis[0] = 0x80;
366 p->err_cfis[2] = tfd & 0xff;
367 p->err_cfis[3] = error;
368 memcpy(&p->err_cfis[4], cfis + 4, 16);
370 p->ci &= ~(1 << slot);
372 ahci_write_fis(p, FIS_TYPE_REGD2H, fis);
376 ahci_write_fis_d2h_ncq(struct ahci_port *p, int slot)
380 p->tfd = ATA_S_READY | ATA_S_DSC;
381 memset(fis, 0, sizeof(fis));
382 fis[0] = FIS_TYPE_REGD2H;
383 fis[1] = 0; /* No interrupt */
384 fis[2] = p->tfd; /* Status */
385 fis[3] = 0; /* No error */
386 p->ci &= ~(1 << slot);
387 ahci_write_fis(p, FIS_TYPE_REGD2H, fis);
391 ahci_write_reset_fis_d2h(struct ahci_port *p)
395 memset(fis, 0, sizeof(fis));
396 fis[0] = FIS_TYPE_REGD2H;
404 ahci_write_fis(p, FIS_TYPE_REGD2H, fis);
408 ahci_check_stopped(struct ahci_port *p)
411 * If we are no longer processing the command list and nothing
412 * is in-flight, clear the running bit, the current command
413 * slot, the command issue and active bits.
415 if (!(p->cmd & AHCI_P_CMD_ST)) {
416 if (p->pending == 0) {
418 p->cmd &= ~(AHCI_P_CMD_CR | AHCI_P_CMD_CCS_MASK);
427 ahci_port_stop(struct ahci_port *p)
429 struct ahci_ioreq *aior;
435 assert(pthread_mutex_isowned_np(&p->pr_sc->mtx));
437 TAILQ_FOREACH(aior, &p->iobhd, io_blist) {
439 * Try to cancel the outstanding blockif request.
441 error = blockif_cancel(p->bctx, &aior->io_req);
447 if (cfis[2] == ATA_WRITE_FPDMA_QUEUED ||
448 cfis[2] == ATA_READ_FPDMA_QUEUED ||
449 cfis[2] == ATA_SEND_FPDMA_QUEUED)
453 p->sact &= ~(1 << slot);
455 p->ci &= ~(1 << slot);
458 * This command is now done.
460 p->pending &= ~(1 << slot);
463 * Delete the blockif request from the busy list
465 TAILQ_REMOVE(&p->iobhd, aior, io_blist);
468 * Move the blockif request back to the free list
470 STAILQ_INSERT_TAIL(&p->iofhd, aior, io_flist);
473 ahci_check_stopped(p);
477 ahci_port_reset(struct ahci_port *pr)
481 pr->xfermode = ATA_UDMA6;
482 pr->mult_sectors = 128;
485 pr->ssts = ATA_SS_DET_NO_DEVICE;
486 pr->sig = 0xFFFFFFFF;
490 pr->ssts = ATA_SS_DET_PHY_ONLINE | ATA_SS_IPM_ACTIVE;
491 if (pr->sctl & ATA_SC_SPD_MASK)
492 pr->ssts |= (pr->sctl & ATA_SC_SPD_MASK);
494 pr->ssts |= ATA_SS_SPD_GEN3;
495 pr->tfd = (1 << 8) | ATA_S_DSC | ATA_S_DMA;
498 pr->tfd |= ATA_S_READY;
500 pr->sig = PxSIG_ATAPI;
501 ahci_write_reset_fis_d2h(pr);
505 ahci_reset(struct pci_ahci_softc *sc)
509 sc->ghc = AHCI_GHC_AE;
513 pci_lintr_deassert(sc->asc_pi);
517 for (i = 0; i < sc->ports; i++) {
520 sc->port[i].cmd = (AHCI_P_CMD_SUD | AHCI_P_CMD_POD);
521 if (sc->port[i].bctx)
522 sc->port[i].cmd |= AHCI_P_CMD_CPS;
523 sc->port[i].sctl = 0;
524 ahci_port_reset(&sc->port[i]);
529 ata_string(uint8_t *dest, const char *src, int len)
533 for (i = 0; i < len; i++) {
535 dest[i ^ 1] = *src++;
542 atapi_string(uint8_t *dest, const char *src, int len)
546 for (i = 0; i < len; i++) {
555 * Build up the iovec based on the PRDT, 'done' and 'len'.
558 ahci_build_iov(struct ahci_port *p, struct ahci_ioreq *aior,
559 struct ahci_prdt_entry *prdt, uint16_t prdtl)
561 struct blockif_req *breq = &aior->io_req;
562 int i, j, skip, todo, left, extra;
565 /* Copy part of PRDT between 'done' and 'len' bytes into the iov. */
567 left = aior->len - aior->done;
569 for (i = 0, j = 0; i < prdtl && j < BLOCKIF_IOV_MAX && left > 0;
571 dbcsz = (prdt->dbc & DBCMASK) + 1;
572 /* Skip already done part of the PRDT */
580 breq->br_iov[j].iov_base = paddr_guest2host(ahci_ctx(p->pr_sc),
581 prdt->dba + skip, dbcsz);
582 breq->br_iov[j].iov_len = dbcsz;
589 /* If we got limited by IOV length, round I/O down to sector size. */
590 if (j == BLOCKIF_IOV_MAX) {
591 extra = todo % blockif_sectsz(p->bctx);
595 if (breq->br_iov[j - 1].iov_len > extra) {
596 breq->br_iov[j - 1].iov_len -= extra;
599 extra -= breq->br_iov[j - 1].iov_len;
605 breq->br_resid = todo;
607 aior->more = (aior->done < aior->len && i < prdtl);
611 ahci_handle_rw(struct ahci_port *p, int slot, uint8_t *cfis, uint32_t done)
613 struct ahci_ioreq *aior;
614 struct blockif_req *breq;
615 struct ahci_prdt_entry *prdt;
616 struct ahci_cmd_hdr *hdr;
619 int err, first, ncq, readop;
621 prdt = (struct ahci_prdt_entry *)(cfis + 0x80);
622 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
627 if (cfis[2] == ATA_WRITE || cfis[2] == ATA_WRITE48 ||
628 cfis[2] == ATA_WRITE_MUL || cfis[2] == ATA_WRITE_MUL48 ||
629 cfis[2] == ATA_WRITE_DMA || cfis[2] == ATA_WRITE_DMA48 ||
630 cfis[2] == ATA_WRITE_FPDMA_QUEUED)
633 if (cfis[2] == ATA_WRITE_FPDMA_QUEUED ||
634 cfis[2] == ATA_READ_FPDMA_QUEUED) {
635 lba = ((uint64_t)cfis[10] << 40) |
636 ((uint64_t)cfis[9] << 32) |
637 ((uint64_t)cfis[8] << 24) |
638 ((uint64_t)cfis[6] << 16) |
639 ((uint64_t)cfis[5] << 8) |
641 len = cfis[11] << 8 | cfis[3];
645 } else if (cfis[2] == ATA_READ48 || cfis[2] == ATA_WRITE48 ||
646 cfis[2] == ATA_READ_MUL48 || cfis[2] == ATA_WRITE_MUL48 ||
647 cfis[2] == ATA_READ_DMA48 || cfis[2] == ATA_WRITE_DMA48) {
648 lba = ((uint64_t)cfis[10] << 40) |
649 ((uint64_t)cfis[9] << 32) |
650 ((uint64_t)cfis[8] << 24) |
651 ((uint64_t)cfis[6] << 16) |
652 ((uint64_t)cfis[5] << 8) |
654 len = cfis[13] << 8 | cfis[12];
658 lba = ((cfis[7] & 0xf) << 24) | (cfis[6] << 16) |
659 (cfis[5] << 8) | cfis[4];
664 lba *= blockif_sectsz(p->bctx);
665 len *= blockif_sectsz(p->bctx);
667 /* Pull request off free list */
668 aior = STAILQ_FIRST(&p->iofhd);
669 assert(aior != NULL);
670 STAILQ_REMOVE_HEAD(&p->iofhd, io_flist);
676 breq = &aior->io_req;
677 breq->br_offset = lba + done;
678 ahci_build_iov(p, aior, prdt, hdr->prdtl);
680 /* Mark this command in-flight. */
681 p->pending |= 1 << slot;
683 /* Stuff request onto busy list. */
684 TAILQ_INSERT_HEAD(&p->iobhd, aior, io_blist);
687 ahci_write_fis_d2h_ncq(p, slot);
690 err = blockif_read(p->bctx, breq);
692 err = blockif_write(p->bctx, breq);
697 ahci_handle_flush(struct ahci_port *p, int slot, uint8_t *cfis)
699 struct ahci_ioreq *aior;
700 struct blockif_req *breq;
704 * Pull request off free list
706 aior = STAILQ_FIRST(&p->iofhd);
707 assert(aior != NULL);
708 STAILQ_REMOVE_HEAD(&p->iofhd, io_flist);
714 breq = &aior->io_req;
717 * Mark this command in-flight.
719 p->pending |= 1 << slot;
722 * Stuff request onto busy list
724 TAILQ_INSERT_HEAD(&p->iobhd, aior, io_blist);
726 err = blockif_flush(p->bctx, breq);
731 read_prdt(struct ahci_port *p, int slot, uint8_t *cfis,
734 struct ahci_cmd_hdr *hdr;
735 struct ahci_prdt_entry *prdt;
739 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
742 prdt = (struct ahci_prdt_entry *)(cfis + 0x80);
743 for (i = 0; i < hdr->prdtl && len; i++) {
748 dbcsz = (prdt->dbc & DBCMASK) + 1;
749 ptr = paddr_guest2host(ahci_ctx(p->pr_sc), prdt->dba, dbcsz);
750 sublen = len < dbcsz ? len : dbcsz;
751 memcpy(to, ptr, sublen);
759 ahci_handle_dsm_trim(struct ahci_port *p, int slot, uint8_t *cfis, uint32_t done)
761 struct ahci_ioreq *aior;
762 struct blockif_req *breq;
770 if (cfis[2] == ATA_DATA_SET_MANAGEMENT) {
771 len = (uint16_t)cfis[13] << 8 | cfis[12];
774 } else { /* ATA_SEND_FPDMA_QUEUED */
775 len = (uint16_t)cfis[11] << 8 | cfis[3];
779 read_prdt(p, slot, cfis, buf, sizeof(buf));
783 elba = ((uint64_t)entry[5] << 40) |
784 ((uint64_t)entry[4] << 32) |
785 ((uint64_t)entry[3] << 24) |
786 ((uint64_t)entry[2] << 16) |
787 ((uint64_t)entry[1] << 8) |
789 elen = (uint16_t)entry[7] << 8 | entry[6];
795 ahci_write_fis_d2h_ncq(p, slot);
796 ahci_write_fis_sdb(p, slot, cfis,
797 ATA_S_READY | ATA_S_DSC);
799 ahci_write_fis_d2h(p, slot, cfis,
800 ATA_S_READY | ATA_S_DSC);
802 p->pending &= ~(1 << slot);
803 ahci_check_stopped(p);
812 * Pull request off free list
814 aior = STAILQ_FIRST(&p->iofhd);
815 assert(aior != NULL);
816 STAILQ_REMOVE_HEAD(&p->iofhd, io_flist);
821 aior->more = (len != done);
823 breq = &aior->io_req;
824 breq->br_offset = elba * blockif_sectsz(p->bctx);
825 breq->br_resid = elen * blockif_sectsz(p->bctx);
828 * Mark this command in-flight.
830 p->pending |= 1 << slot;
833 * Stuff request onto busy list
835 TAILQ_INSERT_HEAD(&p->iobhd, aior, io_blist);
838 ahci_write_fis_d2h_ncq(p, slot);
840 err = blockif_delete(p->bctx, breq);
845 write_prdt(struct ahci_port *p, int slot, uint8_t *cfis,
848 struct ahci_cmd_hdr *hdr;
849 struct ahci_prdt_entry *prdt;
853 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
856 prdt = (struct ahci_prdt_entry *)(cfis + 0x80);
857 for (i = 0; i < hdr->prdtl && len; i++) {
862 dbcsz = (prdt->dbc & DBCMASK) + 1;
863 ptr = paddr_guest2host(ahci_ctx(p->pr_sc), prdt->dba, dbcsz);
864 sublen = len < dbcsz ? len : dbcsz;
865 memcpy(ptr, from, sublen);
870 hdr->prdbc = size - len;
874 ahci_checksum(uint8_t *buf, int size)
879 for (i = 0; i < size - 1; i++)
881 buf[size - 1] = 0x100 - sum;
885 ahci_handle_read_log(struct ahci_port *p, int slot, uint8_t *cfis)
887 struct ahci_cmd_hdr *hdr;
890 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
891 if (p->atapi || hdr->prdtl == 0 || cfis[4] != 0x10 ||
892 cfis[5] != 0 || cfis[9] != 0 || cfis[12] != 1 || cfis[13] != 0) {
893 ahci_write_fis_d2h(p, slot, cfis,
894 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
898 memset(buf, 0, sizeof(buf));
899 memcpy(buf, p->err_cfis, sizeof(p->err_cfis));
900 ahci_checksum(buf, sizeof(buf));
902 if (cfis[2] == ATA_READ_LOG_EXT)
903 ahci_write_fis_piosetup(p);
904 write_prdt(p, slot, cfis, (void *)buf, sizeof(buf));
905 ahci_write_fis_d2h(p, slot, cfis, ATA_S_DSC | ATA_S_READY);
909 handle_identify(struct ahci_port *p, int slot, uint8_t *cfis)
911 struct ahci_cmd_hdr *hdr;
913 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
914 if (p->atapi || hdr->prdtl == 0) {
915 ahci_write_fis_d2h(p, slot, cfis,
916 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
920 int sectsz, psectsz, psectoff, candelete, ro;
924 ro = blockif_is_ro(p->bctx);
925 candelete = blockif_candelete(p->bctx);
926 sectsz = blockif_sectsz(p->bctx);
927 sectors = blockif_size(p->bctx) / sectsz;
928 blockif_chs(p->bctx, &cyl, &heads, &sech);
929 blockif_psectsz(p->bctx, &psectsz, &psectoff);
930 memset(buf, 0, sizeof(buf));
935 ata_string((uint8_t *)(buf+10), p->ident, 20);
936 ata_string((uint8_t *)(buf+23), "001", 8);
937 ata_string((uint8_t *)(buf+27), "BHYVE SATA DISK", 40);
938 buf[47] = (0x8000 | 128);
940 buf[49] = (1 << 8 | 1 << 9 | 1 << 11);
942 buf[53] = (1 << 1 | 1 << 2);
944 buf[59] = (0x100 | p->mult_sectors);
945 if (sectors <= 0x0fffffff) {
947 buf[61] = (sectors >> 16);
953 if (p->xfermode & ATA_WDMA0)
954 buf[63] |= (1 << ((p->xfermode & 7) + 8));
962 buf[76] = (ATA_SATA_GEN1 | ATA_SATA_GEN2 | ATA_SATA_GEN3 |
964 buf[77] = (ATA_SUPPORT_RCVSND_FPDMA_QUEUED |
965 (p->ssts & ATA_SS_SPD_MASK) >> 3);
968 buf[82] = (ATA_SUPPORT_POWERMGT | ATA_SUPPORT_WRITECACHE|
969 ATA_SUPPORT_LOOKAHEAD | ATA_SUPPORT_NOP);
970 buf[83] = (ATA_SUPPORT_ADDRESS48 | ATA_SUPPORT_FLUSHCACHE |
971 ATA_SUPPORT_FLUSHCACHE48 | 1 << 14);
973 buf[85] = (ATA_SUPPORT_POWERMGT | ATA_SUPPORT_WRITECACHE|
974 ATA_SUPPORT_LOOKAHEAD | ATA_SUPPORT_NOP);
975 buf[86] = (ATA_SUPPORT_ADDRESS48 | ATA_SUPPORT_FLUSHCACHE |
976 ATA_SUPPORT_FLUSHCACHE48 | 1 << 15);
979 if (p->xfermode & ATA_UDMA0)
980 buf[88] |= (1 << ((p->xfermode & 7) + 8));
982 buf[101] = (sectors >> 16);
983 buf[102] = (sectors >> 32);
984 buf[103] = (sectors >> 48);
985 if (candelete && !ro) {
986 buf[69] |= ATA_SUPPORT_RZAT | ATA_SUPPORT_DRAT;
988 buf[169] = ATA_SUPPORT_DSM_TRIM;
992 if (psectsz > sectsz) {
994 buf[106] |= ffsl(psectsz / sectsz) - 1;
995 buf[209] |= (psectoff / sectsz);
999 buf[117] = sectsz / 2;
1000 buf[118] = ((sectsz / 2) >> 16);
1002 buf[119] = (ATA_SUPPORT_RWLOGDMAEXT | 1 << 14);
1003 buf[120] = (ATA_SUPPORT_RWLOGDMAEXT | 1 << 14);
1006 ahci_checksum((uint8_t *)buf, sizeof(buf));
1007 ahci_write_fis_piosetup(p);
1008 write_prdt(p, slot, cfis, (void *)buf, sizeof(buf));
1009 ahci_write_fis_d2h(p, slot, cfis, ATA_S_DSC | ATA_S_READY);
1014 handle_atapi_identify(struct ahci_port *p, int slot, uint8_t *cfis)
1017 ahci_write_fis_d2h(p, slot, cfis,
1018 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
1022 memset(buf, 0, sizeof(buf));
1023 buf[0] = (2 << 14 | 5 << 8 | 1 << 7 | 2 << 5);
1024 ata_string((uint8_t *)(buf+10), p->ident, 20);
1025 ata_string((uint8_t *)(buf+23), "001", 8);
1026 ata_string((uint8_t *)(buf+27), "BHYVE SATA DVD ROM", 40);
1027 buf[49] = (1 << 9 | 1 << 8);
1028 buf[50] = (1 << 14 | 1);
1029 buf[53] = (1 << 2 | 1 << 1);
1032 if (p->xfermode & ATA_WDMA0)
1033 buf[63] |= (1 << ((p->xfermode & 7) + 8));
1039 buf[76] = (ATA_SATA_GEN1 | ATA_SATA_GEN2 | ATA_SATA_GEN3);
1040 buf[77] = ((p->ssts & ATA_SS_SPD_MASK) >> 3);
1043 buf[82] = (ATA_SUPPORT_POWERMGT | ATA_SUPPORT_PACKET |
1044 ATA_SUPPORT_RESET | ATA_SUPPORT_NOP);
1045 buf[83] = (1 << 14);
1046 buf[84] = (1 << 14);
1047 buf[85] = (ATA_SUPPORT_POWERMGT | ATA_SUPPORT_PACKET |
1048 ATA_SUPPORT_RESET | ATA_SUPPORT_NOP);
1049 buf[87] = (1 << 14);
1051 if (p->xfermode & ATA_UDMA0)
1052 buf[88] |= (1 << ((p->xfermode & 7) + 8));
1055 ahci_checksum((uint8_t *)buf, sizeof(buf));
1056 ahci_write_fis_piosetup(p);
1057 write_prdt(p, slot, cfis, (void *)buf, sizeof(buf));
1058 ahci_write_fis_d2h(p, slot, cfis, ATA_S_DSC | ATA_S_READY);
1063 atapi_inquiry(struct ahci_port *p, int slot, uint8_t *cfis)
1072 if (acmd[1] & 1) { /* VPD */
1073 if (acmd[2] == 0) { /* Supported VPD pages */
1081 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1083 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1084 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1085 ahci_write_fis_d2h(p, slot, cfis, tfd);
1097 atapi_string(buf + 8, "BHYVE", 8);
1098 atapi_string(buf + 16, "BHYVE DVD-ROM", 16);
1099 atapi_string(buf + 32, "001", 4);
1105 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1106 write_prdt(p, slot, cfis, buf, len);
1107 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1111 atapi_read_capacity(struct ahci_port *p, int slot, uint8_t *cfis)
1116 sectors = blockif_size(p->bctx) / 2048;
1117 be32enc(buf, sectors - 1);
1118 be32enc(buf + 4, 2048);
1119 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1120 write_prdt(p, slot, cfis, buf, sizeof(buf));
1121 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1125 atapi_read_toc(struct ahci_port *p, int slot, uint8_t *cfis)
1133 len = be16dec(acmd + 7);
1134 format = acmd[9] >> 6;
1140 uint8_t start_track, buf[20], *bp;
1142 msf = (acmd[1] >> 1) & 1;
1143 start_track = acmd[6];
1144 if (start_track > 1 && start_track != 0xaa) {
1146 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1148 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1149 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1150 ahci_write_fis_d2h(p, slot, cfis, tfd);
1156 if (start_track <= 1) {
1176 sectors = blockif_size(p->bctx) / blockif_sectsz(p->bctx);
1180 lba_to_msf(bp, sectors);
1183 be32enc(bp, sectors);
1187 be16enc(buf, size - 2);
1190 write_prdt(p, slot, cfis, buf, len);
1191 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1192 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1199 memset(buf, 0, sizeof(buf));
1203 if (len > sizeof(buf))
1205 write_prdt(p, slot, cfis, buf, len);
1206 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1207 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1214 uint8_t start_track, *bp, buf[50];
1216 msf = (acmd[1] >> 1) & 1;
1217 start_track = acmd[6];
1253 sectors = blockif_size(p->bctx) / blockif_sectsz(p->bctx);
1257 lba_to_msf(bp, sectors);
1260 be32enc(bp, sectors);
1283 be16enc(buf, size - 2);
1286 write_prdt(p, slot, cfis, buf, len);
1287 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1288 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1295 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1297 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1298 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1299 ahci_write_fis_d2h(p, slot, cfis, tfd);
1306 atapi_report_luns(struct ahci_port *p, int slot, uint8_t *cfis)
1310 memset(buf, 0, sizeof(buf));
1313 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1314 write_prdt(p, slot, cfis, buf, sizeof(buf));
1315 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1319 atapi_read(struct ahci_port *p, int slot, uint8_t *cfis, uint32_t done)
1321 struct ahci_ioreq *aior;
1322 struct ahci_cmd_hdr *hdr;
1323 struct ahci_prdt_entry *prdt;
1324 struct blockif_req *breq;
1325 struct pci_ahci_softc *sc;
1333 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
1334 prdt = (struct ahci_prdt_entry *)(cfis + 0x80);
1336 lba = be32dec(acmd + 2);
1337 if (acmd[0] == READ_10)
1338 len = be16dec(acmd + 7);
1340 len = be32dec(acmd + 6);
1342 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1343 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1349 * Pull request off free list
1351 aior = STAILQ_FIRST(&p->iofhd);
1352 assert(aior != NULL);
1353 STAILQ_REMOVE_HEAD(&p->iofhd, io_flist);
1358 breq = &aior->io_req;
1359 breq->br_offset = lba + done;
1360 ahci_build_iov(p, aior, prdt, hdr->prdtl);
1362 /* Mark this command in-flight. */
1363 p->pending |= 1 << slot;
1365 /* Stuff request onto busy list. */
1366 TAILQ_INSERT_HEAD(&p->iobhd, aior, io_blist);
1368 err = blockif_read(p->bctx, breq);
1373 atapi_request_sense(struct ahci_port *p, int slot, uint8_t *cfis)
1381 if (len > sizeof(buf))
1383 memset(buf, 0, len);
1384 buf[0] = 0x70 | (1 << 7);
1385 buf[2] = p->sense_key;
1388 write_prdt(p, slot, cfis, buf, len);
1389 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1390 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1394 atapi_start_stop_unit(struct ahci_port *p, int slot, uint8_t *cfis)
1396 uint8_t *acmd = cfis + 0x40;
1399 switch (acmd[4] & 3) {
1403 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1404 tfd = ATA_S_READY | ATA_S_DSC;
1407 /* TODO eject media */
1408 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1409 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1411 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1414 ahci_write_fis_d2h(p, slot, cfis, tfd);
1418 atapi_mode_sense(struct ahci_port *p, int slot, uint8_t *cfis)
1426 len = be16dec(acmd + 7);
1428 code = acmd[2] & 0x3f;
1433 case MODEPAGE_RW_ERROR_RECOVERY:
1437 if (len > sizeof(buf))
1440 memset(buf, 0, sizeof(buf));
1441 be16enc(buf, 16 - 2);
1446 write_prdt(p, slot, cfis, buf, len);
1447 tfd = ATA_S_READY | ATA_S_DSC;
1450 case MODEPAGE_CD_CAPABILITIES:
1454 if (len > sizeof(buf))
1457 memset(buf, 0, sizeof(buf));
1458 be16enc(buf, 30 - 2);
1464 be16enc(&buf[18], 2);
1465 be16enc(&buf[20], 512);
1466 write_prdt(p, slot, cfis, buf, len);
1467 tfd = ATA_S_READY | ATA_S_DSC;
1476 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1478 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1483 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1485 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1488 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1489 ahci_write_fis_d2h(p, slot, cfis, tfd);
1493 atapi_get_event_status_notification(struct ahci_port *p, int slot,
1501 /* we don't support asynchronous operation */
1502 if (!(acmd[1] & 1)) {
1503 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1505 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1510 len = be16dec(acmd + 7);
1511 if (len > sizeof(buf))
1514 memset(buf, 0, sizeof(buf));
1515 be16enc(buf, 8 - 2);
1519 write_prdt(p, slot, cfis, buf, len);
1520 tfd = ATA_S_READY | ATA_S_DSC;
1522 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1523 ahci_write_fis_d2h(p, slot, cfis, tfd);
1527 handle_packet_cmd(struct ahci_port *p, int slot, uint8_t *cfis)
1537 for (i = 0; i < 16; i++)
1538 DPRINTF("%02x ", acmd[i]);
1544 case TEST_UNIT_READY:
1545 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1546 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1549 atapi_inquiry(p, slot, cfis);
1552 atapi_read_capacity(p, slot, cfis);
1556 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1557 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1560 atapi_read_toc(p, slot, cfis);
1563 atapi_report_luns(p, slot, cfis);
1567 atapi_read(p, slot, cfis, 0);
1570 atapi_request_sense(p, slot, cfis);
1572 case START_STOP_UNIT:
1573 atapi_start_stop_unit(p, slot, cfis);
1576 atapi_mode_sense(p, slot, cfis);
1578 case GET_EVENT_STATUS_NOTIFICATION:
1579 atapi_get_event_status_notification(p, slot, cfis);
1582 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1583 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1585 ahci_write_fis_d2h(p, slot, cfis, (p->sense_key << 12) |
1586 ATA_S_READY | ATA_S_ERROR);
1592 ahci_handle_cmd(struct ahci_port *p, int slot, uint8_t *cfis)
1595 p->tfd |= ATA_S_BUSY;
1597 case ATA_ATA_IDENTIFY:
1598 handle_identify(p, slot, cfis);
1600 case ATA_SETFEATURES:
1603 case ATA_SF_ENAB_SATA_SF:
1605 case ATA_SATA_SF_AN:
1606 p->tfd = ATA_S_DSC | ATA_S_READY;
1609 p->tfd = ATA_S_ERROR | ATA_S_READY;
1610 p->tfd |= (ATA_ERROR_ABORT << 8);
1614 case ATA_SF_ENAB_WCACHE:
1615 case ATA_SF_DIS_WCACHE:
1616 case ATA_SF_ENAB_RCACHE:
1617 case ATA_SF_DIS_RCACHE:
1618 p->tfd = ATA_S_DSC | ATA_S_READY;
1620 case ATA_SF_SETXFER:
1622 switch (cfis[12] & 0xf8) {
1628 p->xfermode = (cfis[12] & 0x7);
1631 p->tfd = ATA_S_DSC | ATA_S_READY;
1635 p->tfd = ATA_S_ERROR | ATA_S_READY;
1636 p->tfd |= (ATA_ERROR_ABORT << 8);
1639 ahci_write_fis_d2h(p, slot, cfis, p->tfd);
1643 if (cfis[12] != 0 &&
1644 (cfis[12] > 128 || (cfis[12] & (cfis[12] - 1)))) {
1645 p->tfd = ATA_S_ERROR | ATA_S_READY;
1646 p->tfd |= (ATA_ERROR_ABORT << 8);
1648 p->mult_sectors = cfis[12];
1649 p->tfd = ATA_S_DSC | ATA_S_READY;
1651 ahci_write_fis_d2h(p, slot, cfis, p->tfd);
1659 case ATA_READ_MUL48:
1660 case ATA_WRITE_MUL48:
1663 case ATA_READ_DMA48:
1664 case ATA_WRITE_DMA48:
1665 case ATA_READ_FPDMA_QUEUED:
1666 case ATA_WRITE_FPDMA_QUEUED:
1667 ahci_handle_rw(p, slot, cfis, 0);
1669 case ATA_FLUSHCACHE:
1670 case ATA_FLUSHCACHE48:
1671 ahci_handle_flush(p, slot, cfis);
1673 case ATA_DATA_SET_MANAGEMENT:
1674 if (cfis[11] == 0 && cfis[3] == ATA_DSM_TRIM &&
1675 cfis[13] == 0 && cfis[12] == 1) {
1676 ahci_handle_dsm_trim(p, slot, cfis, 0);
1679 ahci_write_fis_d2h(p, slot, cfis,
1680 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
1682 case ATA_SEND_FPDMA_QUEUED:
1683 if ((cfis[13] & 0x1f) == ATA_SFPDMA_DSM &&
1684 cfis[17] == 0 && cfis[16] == ATA_DSM_TRIM &&
1685 cfis[11] == 0 && cfis[3] == 1) {
1686 ahci_handle_dsm_trim(p, slot, cfis, 0);
1689 ahci_write_fis_d2h(p, slot, cfis,
1690 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
1692 case ATA_READ_LOG_EXT:
1693 case ATA_READ_LOG_DMA_EXT:
1694 ahci_handle_read_log(p, slot, cfis);
1696 case ATA_SECURITY_FREEZE_LOCK:
1699 ahci_write_fis_d2h(p, slot, cfis,
1700 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
1702 case ATA_CHECK_POWER_MODE:
1703 cfis[12] = 0xff; /* always on */
1704 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1706 case ATA_STANDBY_CMD:
1707 case ATA_STANDBY_IMMEDIATE:
1709 case ATA_IDLE_IMMEDIATE:
1711 case ATA_READ_VERIFY:
1712 case ATA_READ_VERIFY48:
1713 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1715 case ATA_ATAPI_IDENTIFY:
1716 handle_atapi_identify(p, slot, cfis);
1718 case ATA_PACKET_CMD:
1720 ahci_write_fis_d2h(p, slot, cfis,
1721 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
1723 handle_packet_cmd(p, slot, cfis);
1726 WPRINTF("Unsupported cmd:%02x\n", cfis[2]);
1727 ahci_write_fis_d2h(p, slot, cfis,
1728 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
1734 ahci_handle_slot(struct ahci_port *p, int slot)
1736 struct ahci_cmd_hdr *hdr;
1738 struct ahci_prdt_entry *prdt;
1740 struct pci_ahci_softc *sc;
1747 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
1749 cfl = (hdr->flags & 0x1f) * 4;
1751 cfis = paddr_guest2host(ahci_ctx(sc), hdr->ctba,
1752 0x80 + hdr->prdtl * sizeof(struct ahci_prdt_entry));
1754 prdt = (struct ahci_prdt_entry *)(cfis + 0x80);
1757 for (i = 0; i < cfl; i++) {
1760 DPRINTF("%02x ", cfis[i]);
1764 for (i = 0; i < hdr->prdtl; i++) {
1765 DPRINTF("%d@%08"PRIx64"\n", prdt->dbc & 0x3fffff, prdt->dba);
1770 if (cfis[0] != FIS_TYPE_REGH2D) {
1771 WPRINTF("Not a H2D FIS:%02x\n", cfis[0]);
1775 if (cfis[1] & 0x80) {
1776 ahci_handle_cmd(p, slot, cfis);
1778 if (cfis[15] & (1 << 2))
1780 else if (p->reset) {
1784 p->ci &= ~(1 << slot);
1789 ahci_handle_port(struct ahci_port *p)
1792 if (!(p->cmd & AHCI_P_CMD_ST))
1796 * Search for any new commands to issue ignoring those that
1797 * are already in-flight. Stop if device is busy or in error.
1799 for (; (p->ci & ~p->pending) != 0; p->ccs = ((p->ccs + 1) & 31)) {
1800 if ((p->tfd & (ATA_S_BUSY | ATA_S_DRQ)) != 0)
1802 if (p->waitforclear)
1804 if ((p->ci & ~p->pending & (1 << p->ccs)) != 0) {
1805 p->cmd &= ~AHCI_P_CMD_CCS_MASK;
1806 p->cmd |= p->ccs << AHCI_P_CMD_CCS_SHIFT;
1807 ahci_handle_slot(p, p->ccs);
1813 * blockif callback routine - this runs in the context of the blockif
1814 * i/o thread, so the mutex needs to be acquired.
1817 ata_ioreq_cb(struct blockif_req *br, int err)
1819 struct ahci_cmd_hdr *hdr;
1820 struct ahci_ioreq *aior;
1821 struct ahci_port *p;
1822 struct pci_ahci_softc *sc;
1827 DPRINTF("%s %d\n", __func__, err);
1830 aior = br->br_param;
1835 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
1837 if (cfis[2] == ATA_WRITE_FPDMA_QUEUED ||
1838 cfis[2] == ATA_READ_FPDMA_QUEUED ||
1839 cfis[2] == ATA_SEND_FPDMA_QUEUED)
1841 if (cfis[2] == ATA_DATA_SET_MANAGEMENT ||
1842 (cfis[2] == ATA_SEND_FPDMA_QUEUED &&
1843 (cfis[13] & 0x1f) == ATA_SFPDMA_DSM))
1846 pthread_mutex_lock(&sc->mtx);
1849 * Delete the blockif request from the busy list
1851 TAILQ_REMOVE(&p->iobhd, aior, io_blist);
1854 * Move the blockif request back to the free list
1856 STAILQ_INSERT_TAIL(&p->iofhd, aior, io_flist);
1859 hdr->prdbc = aior->done;
1861 if (!err && aior->more) {
1863 ahci_handle_dsm_trim(p, slot, cfis, aior->done);
1865 ahci_handle_rw(p, slot, cfis, aior->done);
1870 tfd = ATA_S_READY | ATA_S_DSC;
1872 tfd = (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR;
1874 ahci_write_fis_sdb(p, slot, cfis, tfd);
1876 ahci_write_fis_d2h(p, slot, cfis, tfd);
1879 * This command is now complete.
1881 p->pending &= ~(1 << slot);
1883 ahci_check_stopped(p);
1884 ahci_handle_port(p);
1886 pthread_mutex_unlock(&sc->mtx);
1887 DPRINTF("%s exit\n", __func__);
1891 atapi_ioreq_cb(struct blockif_req *br, int err)
1893 struct ahci_cmd_hdr *hdr;
1894 struct ahci_ioreq *aior;
1895 struct ahci_port *p;
1896 struct pci_ahci_softc *sc;
1901 DPRINTF("%s %d\n", __func__, err);
1903 aior = br->br_param;
1908 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + aior->slot * AHCI_CL_SIZE);
1910 pthread_mutex_lock(&sc->mtx);
1913 * Delete the blockif request from the busy list
1915 TAILQ_REMOVE(&p->iobhd, aior, io_blist);
1918 * Move the blockif request back to the free list
1920 STAILQ_INSERT_TAIL(&p->iofhd, aior, io_flist);
1923 hdr->prdbc = aior->done;
1925 if (!err && aior->more) {
1926 atapi_read(p, slot, cfis, aior->done);
1931 tfd = ATA_S_READY | ATA_S_DSC;
1933 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1935 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1937 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1938 ahci_write_fis_d2h(p, slot, cfis, tfd);
1941 * This command is now complete.
1943 p->pending &= ~(1 << slot);
1945 ahci_check_stopped(p);
1946 ahci_handle_port(p);
1948 pthread_mutex_unlock(&sc->mtx);
1949 DPRINTF("%s exit\n", __func__);
1953 pci_ahci_ioreq_init(struct ahci_port *pr)
1955 struct ahci_ioreq *vr;
1958 pr->ioqsz = blockif_queuesz(pr->bctx);
1959 pr->ioreq = calloc(pr->ioqsz, sizeof(struct ahci_ioreq));
1960 STAILQ_INIT(&pr->iofhd);
1963 * Add all i/o request entries to the free queue
1965 for (i = 0; i < pr->ioqsz; i++) {
1969 vr->io_req.br_callback = ata_ioreq_cb;
1971 vr->io_req.br_callback = atapi_ioreq_cb;
1972 vr->io_req.br_param = vr;
1973 STAILQ_INSERT_TAIL(&pr->iofhd, vr, io_flist);
1976 TAILQ_INIT(&pr->iobhd);
1980 pci_ahci_port_write(struct pci_ahci_softc *sc, uint64_t offset, uint64_t value)
1982 int port = (offset - AHCI_OFFSET) / AHCI_STEP;
1983 offset = (offset - AHCI_OFFSET) % AHCI_STEP;
1984 struct ahci_port *p = &sc->port[port];
1986 DPRINTF("pci_ahci_port %d: write offset 0x%"PRIx64" value 0x%"PRIx64"\n",
1987 port, offset, value);
2006 p->ie = value & 0xFDC000FF;
2007 ahci_generate_intr(sc);
2011 p->cmd &= ~(AHCI_P_CMD_ST | AHCI_P_CMD_SUD | AHCI_P_CMD_POD |
2012 AHCI_P_CMD_CLO | AHCI_P_CMD_FRE | AHCI_P_CMD_APSTE |
2013 AHCI_P_CMD_ATAPI | AHCI_P_CMD_DLAE | AHCI_P_CMD_ALPE |
2014 AHCI_P_CMD_ASP | AHCI_P_CMD_ICC_MASK);
2015 p->cmd |= (AHCI_P_CMD_ST | AHCI_P_CMD_SUD | AHCI_P_CMD_POD |
2016 AHCI_P_CMD_CLO | AHCI_P_CMD_FRE | AHCI_P_CMD_APSTE |
2017 AHCI_P_CMD_ATAPI | AHCI_P_CMD_DLAE | AHCI_P_CMD_ALPE |
2018 AHCI_P_CMD_ASP | AHCI_P_CMD_ICC_MASK) & value;
2020 if (!(value & AHCI_P_CMD_ST)) {
2025 p->cmd |= AHCI_P_CMD_CR;
2026 clb = (uint64_t)p->clbu << 32 | p->clb;
2027 p->cmd_lst = paddr_guest2host(ahci_ctx(sc), clb,
2028 AHCI_CL_SIZE * AHCI_MAX_SLOTS);
2031 if (value & AHCI_P_CMD_FRE) {
2034 p->cmd |= AHCI_P_CMD_FR;
2035 fb = (uint64_t)p->fbu << 32 | p->fb;
2036 /* we don't support FBSCP, so rfis size is 256Bytes */
2037 p->rfis = paddr_guest2host(ahci_ctx(sc), fb, 256);
2039 p->cmd &= ~AHCI_P_CMD_FR;
2042 if (value & AHCI_P_CMD_CLO) {
2043 p->tfd &= ~(ATA_S_BUSY | ATA_S_DRQ);
2044 p->cmd &= ~AHCI_P_CMD_CLO;
2047 if (value & AHCI_P_CMD_ICC_MASK) {
2048 p->cmd &= ~AHCI_P_CMD_ICC_MASK;
2051 ahci_handle_port(p);
2057 WPRINTF("pci_ahci_port: read only registers 0x%"PRIx64"\n", offset);
2061 if (!(p->cmd & AHCI_P_CMD_ST)) {
2062 if (value & ATA_SC_DET_RESET)
2074 ahci_handle_port(p);
2084 pci_ahci_host_write(struct pci_ahci_softc *sc, uint64_t offset, uint64_t value)
2086 DPRINTF("pci_ahci_host: write offset 0x%"PRIx64" value 0x%"PRIx64"\n",
2094 DPRINTF("pci_ahci_host: read only registers 0x%"PRIx64"\n", offset);
2097 if (value & AHCI_GHC_HR)
2099 else if (value & AHCI_GHC_IE) {
2100 sc->ghc |= AHCI_GHC_IE;
2101 ahci_generate_intr(sc);
2106 ahci_generate_intr(sc);
2114 pci_ahci_write(struct vmctx *ctx, int vcpu, struct pci_devinst *pi,
2115 int baridx, uint64_t offset, int size, uint64_t value)
2117 struct pci_ahci_softc *sc = pi->pi_arg;
2119 assert(baridx == 5);
2120 assert((offset % 4) == 0 && size == 4);
2122 pthread_mutex_lock(&sc->mtx);
2124 if (offset < AHCI_OFFSET)
2125 pci_ahci_host_write(sc, offset, value);
2126 else if (offset < AHCI_OFFSET + sc->ports * AHCI_STEP)
2127 pci_ahci_port_write(sc, offset, value);
2129 WPRINTF("pci_ahci: unknown i/o write offset 0x%"PRIx64"\n", offset);
2131 pthread_mutex_unlock(&sc->mtx);
2135 pci_ahci_host_read(struct pci_ahci_softc *sc, uint64_t offset)
2151 uint32_t *p = &sc->cap;
2152 p += (offset - AHCI_CAP) / sizeof(uint32_t);
2160 DPRINTF("pci_ahci_host: read offset 0x%"PRIx64" value 0x%x\n",
2167 pci_ahci_port_read(struct pci_ahci_softc *sc, uint64_t offset)
2170 int port = (offset - AHCI_OFFSET) / AHCI_STEP;
2171 offset = (offset - AHCI_OFFSET) % AHCI_STEP;
2191 uint32_t *p= &sc->port[port].clb;
2192 p += (offset - AHCI_P_CLB) / sizeof(uint32_t);
2201 DPRINTF("pci_ahci_port %d: read offset 0x%"PRIx64" value 0x%x\n",
2202 port, offset, value);
2208 pci_ahci_read(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, int baridx,
2209 uint64_t regoff, int size)
2211 struct pci_ahci_softc *sc = pi->pi_arg;
2215 assert(baridx == 5);
2216 assert(size == 1 || size == 2 || size == 4);
2217 assert((regoff & (size - 1)) == 0);
2219 pthread_mutex_lock(&sc->mtx);
2221 offset = regoff & ~0x3; /* round down to a multiple of 4 bytes */
2222 if (offset < AHCI_OFFSET)
2223 value = pci_ahci_host_read(sc, offset);
2224 else if (offset < AHCI_OFFSET + sc->ports * AHCI_STEP)
2225 value = pci_ahci_port_read(sc, offset);
2228 WPRINTF("pci_ahci: unknown i/o read offset 0x%"PRIx64"\n",
2231 value >>= 8 * (regoff & 0x3);
2233 pthread_mutex_unlock(&sc->mtx);
2239 pci_ahci_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts, int atapi)
2241 char bident[sizeof("XX:XX:XX")];
2242 struct blockif_ctxt *bctxt;
2243 struct pci_ahci_softc *sc;
2252 dbg = fopen("/tmp/log", "w+");
2255 sc = calloc(1, sizeof(struct pci_ahci_softc));
2258 pthread_mutex_init(&sc->mtx, NULL);
2263 for (p = 0; p < MAX_PORTS && opts != NULL; p++, opts = next) {
2264 /* Identify and cut off type of present port. */
2265 if (strncmp(opts, "hd:", 3) == 0) {
2268 } else if (strncmp(opts, "cd:", 3) == 0) {
2273 /* Find and cut off the next port options. */
2274 next = strstr(opts, ",hd:");
2275 next2 = strstr(opts, ",cd:");
2276 if (next == NULL || (next2 != NULL && next2 < next))
2287 * Attempt to open the backing image. Use the PCI slot/func
2288 * and the port number for the identifier string.
2290 snprintf(bident, sizeof(bident), "%d:%d:%d", pi->pi_slot,
2292 bctxt = blockif_open(opts, bident);
2293 if (bctxt == NULL) {
2298 sc->port[p].bctx = bctxt;
2299 sc->port[p].pr_sc = sc;
2300 sc->port[p].atapi = atapi;
2303 * Create an identifier for the backing file.
2304 * Use parts of the md5 sum of the filename
2307 MD5Update(&mdctx, opts, strlen(opts));
2308 MD5Final(digest, &mdctx);
2309 sprintf(sc->port[p].ident, "BHYVE-%02X%02X-%02X%02X-%02X%02X",
2310 digest[0], digest[1], digest[2], digest[3], digest[4],
2314 * Allocate blockif request structures and add them
2317 pci_ahci_ioreq_init(&sc->port[p]);
2320 if (sc->port[p].ioqsz < slots)
2321 slots = sc->port[p].ioqsz;
2325 /* Intel ICH8 AHCI */
2327 if (sc->ports < DEF_PORTS)
2328 sc->ports = DEF_PORTS;
2329 sc->cap = AHCI_CAP_64BIT | AHCI_CAP_SNCQ | AHCI_CAP_SSNTF |
2330 AHCI_CAP_SMPS | AHCI_CAP_SSS | AHCI_CAP_SALP |
2331 AHCI_CAP_SAL | AHCI_CAP_SCLO | (0x3 << AHCI_CAP_ISS_SHIFT)|
2332 AHCI_CAP_PMD | AHCI_CAP_SSC | AHCI_CAP_PSC |
2333 (slots << AHCI_CAP_NCS_SHIFT) | AHCI_CAP_SXS | (sc->ports - 1);
2336 sc->cap2 = AHCI_CAP2_APST;
2339 pci_set_cfgdata16(pi, PCIR_DEVICE, 0x2821);
2340 pci_set_cfgdata16(pi, PCIR_VENDOR, 0x8086);
2341 pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_STORAGE);
2342 pci_set_cfgdata8(pi, PCIR_SUBCLASS, PCIS_STORAGE_SATA);
2343 pci_set_cfgdata8(pi, PCIR_PROGIF, PCIP_STORAGE_SATA_AHCI_1_0);
2344 pci_emul_add_msicap(pi, 1);
2345 pci_emul_alloc_bar(pi, 5, PCIBAR_MEM32,
2346 AHCI_OFFSET + sc->ports * AHCI_STEP);
2348 pci_lintr_request(pi);
2352 for (p = 0; p < sc->ports; p++) {
2353 if (sc->port[p].bctx != NULL)
2354 blockif_close(sc->port[p].bctx);
2363 pci_ahci_hd_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts)
2366 return (pci_ahci_init(ctx, pi, opts, 0));
2370 pci_ahci_atapi_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts)
2373 return (pci_ahci_init(ctx, pi, opts, 1));
2377 * Use separate emulation names to distinguish drive and atapi devices
2379 struct pci_devemu pci_de_ahci = {
2381 .pe_init = pci_ahci_hd_init,
2382 .pe_barwrite = pci_ahci_write,
2383 .pe_barread = pci_ahci_read
2385 PCI_EMUL_SET(pci_de_ahci);
2387 struct pci_devemu pci_de_ahci_hd = {
2388 .pe_emu = "ahci-hd",
2389 .pe_init = pci_ahci_hd_init,
2390 .pe_barwrite = pci_ahci_write,
2391 .pe_barread = pci_ahci_read
2393 PCI_EMUL_SET(pci_de_ahci_hd);
2395 struct pci_devemu pci_de_ahci_cd = {
2396 .pe_emu = "ahci-cd",
2397 .pe_init = pci_ahci_atapi_init,
2398 .pe_barwrite = pci_ahci_write,
2399 .pe_barread = pci_ahci_read
2401 PCI_EMUL_SET(pci_de_ahci_cd);