2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2013 Zhixiang Yu <zcore@freebsd.org>
5 * Copyright (c) 2015-2016 Alexander Motin <mav@FreeBSD.org>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
35 #include <sys/param.h>
36 #include <sys/linker_set.h>
39 #include <sys/ioctl.h>
42 #include <sys/endian.h>
54 #include <pthread_np.h>
63 #define DEF_PORTS 6 /* Intel ICH8 AHCI supports 6 ports */
64 #define MAX_PORTS 32 /* AHCI supports 32 ports */
66 #define PxSIG_ATA 0x00000101 /* ATA drive */
67 #define PxSIG_ATAPI 0xeb140101 /* ATAPI drive */
70 FIS_TYPE_REGH2D = 0x27, /* Register FIS - host to device */
71 FIS_TYPE_REGD2H = 0x34, /* Register FIS - device to host */
72 FIS_TYPE_DMAACT = 0x39, /* DMA activate FIS - device to host */
73 FIS_TYPE_DMASETUP = 0x41, /* DMA setup FIS - bidirectional */
74 FIS_TYPE_DATA = 0x46, /* Data FIS - bidirectional */
75 FIS_TYPE_BIST = 0x58, /* BIST activate FIS - bidirectional */
76 FIS_TYPE_PIOSETUP = 0x5F, /* PIO setup FIS - device to host */
77 FIS_TYPE_SETDEVBITS = 0xA1, /* Set dev bits FIS - device to host */
83 #define TEST_UNIT_READY 0x00
84 #define REQUEST_SENSE 0x03
86 #define START_STOP_UNIT 0x1B
87 #define PREVENT_ALLOW 0x1E
88 #define READ_CAPACITY 0x25
90 #define POSITION_TO_ELEMENT 0x2B
92 #define GET_EVENT_STATUS_NOTIFICATION 0x4A
93 #define MODE_SENSE_10 0x5A
94 #define REPORT_LUNS 0xA0
99 * SCSI mode page codes
101 #define MODEPAGE_RW_ERROR_RECOVERY 0x01
102 #define MODEPAGE_CD_CAPABILITIES 0x2A
107 #define ATA_SF_ENAB_SATA_SF 0x10
108 #define ATA_SATA_SF_AN 0x05
109 #define ATA_SF_DIS_SATA_SF 0x90
116 #define DPRINTF(format, arg...) do{fprintf(dbg, format, ##arg);fflush(dbg);}while(0)
118 #define DPRINTF(format, arg...)
120 #define WPRINTF(format, arg...) printf(format, ##arg)
123 struct blockif_req io_req;
124 struct ahci_port *io_pr;
125 STAILQ_ENTRY(ahci_ioreq) io_flist;
126 TAILQ_ENTRY(ahci_ioreq) io_blist;
135 struct blockif_ctxt *bctx;
136 struct pci_ahci_softc *pr_sc;
146 uint8_t err_cfis[20];
173 struct ahci_ioreq *ioreq;
175 STAILQ_HEAD(ahci_fhead, ahci_ioreq) iofhd;
176 TAILQ_HEAD(ahci_bhead, ahci_ioreq) iobhd;
179 struct ahci_cmd_hdr {
184 uint32_t reserved[4];
187 struct ahci_prdt_entry {
190 #define DBCMASK 0x3fffff
194 struct pci_ahci_softc {
195 struct pci_devinst *asc_pi;
210 struct ahci_port port[MAX_PORTS];
212 #define ahci_ctx(sc) ((sc)->asc_pi->pi_vmctx)
214 static void ahci_handle_port(struct ahci_port *p);
216 static inline void lba_to_msf(uint8_t *buf, int lba)
219 buf[0] = (lba / 75) / 60;
220 buf[1] = (lba / 75) % 60;
225 * Generate HBA interrupts on global IS register write.
228 ahci_generate_intr(struct pci_ahci_softc *sc, uint32_t mask)
230 struct pci_devinst *pi = sc->asc_pi;
235 /* Update global IS from PxIS/PxIE. */
236 for (i = 0; i < sc->ports; i++) {
241 DPRINTF("%s(%08x) %08x\n", __func__, mask, sc->is);
243 /* If there is nothing enabled -- clear legacy interrupt and exit. */
244 if (sc->is == 0 || (sc->ghc & AHCI_GHC_IE) == 0) {
246 pci_lintr_deassert(pi);
252 /* If there is anything and no MSI -- assert legacy interrupt. */
253 nmsg = pci_msi_maxmsgnum(pi);
257 pci_lintr_assert(pi);
262 /* Assert respective MSIs for ports that were touched. */
263 for (i = 0; i < nmsg; i++) {
264 if (sc->ports <= nmsg || i < nmsg - 1)
267 mmask = 0xffffffff << i;
268 if (sc->is & mask && mmask & mask)
269 pci_generate_msi(pi, i);
274 * Generate HBA interrupt on specific port event.
277 ahci_port_intr(struct ahci_port *p)
279 struct pci_ahci_softc *sc = p->pr_sc;
280 struct pci_devinst *pi = sc->asc_pi;
283 DPRINTF("%s(%d) %08x/%08x %08x\n", __func__,
284 p->port, p->is, p->ie, sc->is);
286 /* If there is nothing enabled -- we are done. */
287 if ((p->is & p->ie) == 0)
290 /* In case of non-shared MSI always generate interrupt. */
291 nmsg = pci_msi_maxmsgnum(pi);
292 if (sc->ports <= nmsg || p->port < nmsg - 1) {
293 sc->is |= (1 << p->port);
294 if ((sc->ghc & AHCI_GHC_IE) == 0)
296 pci_generate_msi(pi, p->port);
300 /* If IS for this port is already set -- do nothing. */
301 if (sc->is & (1 << p->port))
304 sc->is |= (1 << p->port);
306 /* If interrupts are enabled -- generate one. */
307 if ((sc->ghc & AHCI_GHC_IE) == 0)
310 pci_generate_msi(pi, nmsg - 1);
311 } else if (!sc->lintr) {
313 pci_lintr_assert(pi);
318 ahci_write_fis(struct ahci_port *p, enum sata_fis_type ft, uint8_t *fis)
320 int offset, len, irq;
322 if (p->rfis == NULL || !(p->cmd & AHCI_P_CMD_FRE))
326 case FIS_TYPE_REGD2H:
329 irq = (fis[1] & (1 << 6)) ? AHCI_P_IX_DHR : 0;
331 case FIS_TYPE_SETDEVBITS:
334 irq = (fis[1] & (1 << 6)) ? AHCI_P_IX_SDB : 0;
336 case FIS_TYPE_PIOSETUP:
339 irq = (fis[1] & (1 << 6)) ? AHCI_P_IX_PS : 0;
342 WPRINTF("unsupported fis type %d\n", ft);
345 if (fis[2] & ATA_S_ERROR) {
347 irq |= AHCI_P_IX_TFE;
349 memcpy(p->rfis + offset, fis, len);
359 ahci_write_fis_piosetup(struct ahci_port *p)
363 memset(fis, 0, sizeof(fis));
364 fis[0] = FIS_TYPE_PIOSETUP;
365 ahci_write_fis(p, FIS_TYPE_PIOSETUP, fis);
369 ahci_write_fis_sdb(struct ahci_port *p, int slot, uint8_t *cfis, uint32_t tfd)
374 error = (tfd >> 8) & 0xff;
376 memset(fis, 0, sizeof(fis));
377 fis[0] = FIS_TYPE_SETDEVBITS;
381 if (fis[2] & ATA_S_ERROR) {
382 p->err_cfis[0] = slot;
383 p->err_cfis[2] = tfd;
384 p->err_cfis[3] = error;
385 memcpy(&p->err_cfis[4], cfis + 4, 16);
387 *(uint32_t *)(fis + 4) = (1 << slot);
388 p->sact &= ~(1 << slot);
392 ahci_write_fis(p, FIS_TYPE_SETDEVBITS, fis);
396 ahci_write_fis_d2h(struct ahci_port *p, int slot, uint8_t *cfis, uint32_t tfd)
401 error = (tfd >> 8) & 0xff;
402 memset(fis, 0, sizeof(fis));
403 fis[0] = FIS_TYPE_REGD2H;
417 if (fis[2] & ATA_S_ERROR) {
418 p->err_cfis[0] = 0x80;
419 p->err_cfis[2] = tfd & 0xff;
420 p->err_cfis[3] = error;
421 memcpy(&p->err_cfis[4], cfis + 4, 16);
423 p->ci &= ~(1 << slot);
425 ahci_write_fis(p, FIS_TYPE_REGD2H, fis);
429 ahci_write_fis_d2h_ncq(struct ahci_port *p, int slot)
433 p->tfd = ATA_S_READY | ATA_S_DSC;
434 memset(fis, 0, sizeof(fis));
435 fis[0] = FIS_TYPE_REGD2H;
436 fis[1] = 0; /* No interrupt */
437 fis[2] = p->tfd; /* Status */
438 fis[3] = 0; /* No error */
439 p->ci &= ~(1 << slot);
440 ahci_write_fis(p, FIS_TYPE_REGD2H, fis);
444 ahci_write_reset_fis_d2h(struct ahci_port *p)
448 memset(fis, 0, sizeof(fis));
449 fis[0] = FIS_TYPE_REGD2H;
457 ahci_write_fis(p, FIS_TYPE_REGD2H, fis);
461 ahci_check_stopped(struct ahci_port *p)
464 * If we are no longer processing the command list and nothing
465 * is in-flight, clear the running bit, the current command
466 * slot, the command issue and active bits.
468 if (!(p->cmd & AHCI_P_CMD_ST)) {
469 if (p->pending == 0) {
471 p->cmd &= ~(AHCI_P_CMD_CR | AHCI_P_CMD_CCS_MASK);
480 ahci_port_stop(struct ahci_port *p)
482 struct ahci_ioreq *aior;
487 assert(pthread_mutex_isowned_np(&p->pr_sc->mtx));
489 TAILQ_FOREACH(aior, &p->iobhd, io_blist) {
491 * Try to cancel the outstanding blockif request.
493 error = blockif_cancel(p->bctx, &aior->io_req);
499 if (cfis[2] == ATA_WRITE_FPDMA_QUEUED ||
500 cfis[2] == ATA_READ_FPDMA_QUEUED ||
501 cfis[2] == ATA_SEND_FPDMA_QUEUED)
502 p->sact &= ~(1 << slot); /* NCQ */
504 p->ci &= ~(1 << slot);
507 * This command is now done.
509 p->pending &= ~(1 << slot);
512 * Delete the blockif request from the busy list
514 TAILQ_REMOVE(&p->iobhd, aior, io_blist);
517 * Move the blockif request back to the free list
519 STAILQ_INSERT_TAIL(&p->iofhd, aior, io_flist);
522 ahci_check_stopped(p);
526 ahci_port_reset(struct ahci_port *pr)
530 pr->xfermode = ATA_UDMA6;
531 pr->mult_sectors = 128;
534 pr->ssts = ATA_SS_DET_NO_DEVICE;
535 pr->sig = 0xFFFFFFFF;
539 pr->ssts = ATA_SS_DET_PHY_ONLINE | ATA_SS_IPM_ACTIVE;
540 if (pr->sctl & ATA_SC_SPD_MASK)
541 pr->ssts |= (pr->sctl & ATA_SC_SPD_MASK);
543 pr->ssts |= ATA_SS_SPD_GEN3;
544 pr->tfd = (1 << 8) | ATA_S_DSC | ATA_S_DMA;
547 pr->tfd |= ATA_S_READY;
549 pr->sig = PxSIG_ATAPI;
550 ahci_write_reset_fis_d2h(pr);
554 ahci_reset(struct pci_ahci_softc *sc)
558 sc->ghc = AHCI_GHC_AE;
562 pci_lintr_deassert(sc->asc_pi);
566 for (i = 0; i < sc->ports; i++) {
569 sc->port[i].cmd = (AHCI_P_CMD_SUD | AHCI_P_CMD_POD);
570 if (sc->port[i].bctx)
571 sc->port[i].cmd |= AHCI_P_CMD_CPS;
572 sc->port[i].sctl = 0;
573 ahci_port_reset(&sc->port[i]);
578 ata_string(uint8_t *dest, const char *src, int len)
582 for (i = 0; i < len; i++) {
584 dest[i ^ 1] = *src++;
591 atapi_string(uint8_t *dest, const char *src, int len)
595 for (i = 0; i < len; i++) {
604 * Build up the iovec based on the PRDT, 'done' and 'len'.
607 ahci_build_iov(struct ahci_port *p, struct ahci_ioreq *aior,
608 struct ahci_prdt_entry *prdt, uint16_t prdtl)
610 struct blockif_req *breq = &aior->io_req;
611 int i, j, skip, todo, left, extra;
614 /* Copy part of PRDT between 'done' and 'len' bytes into the iov. */
616 left = aior->len - aior->done;
618 for (i = 0, j = 0; i < prdtl && j < BLOCKIF_IOV_MAX && left > 0;
620 dbcsz = (prdt->dbc & DBCMASK) + 1;
621 /* Skip already done part of the PRDT */
629 breq->br_iov[j].iov_base = paddr_guest2host(ahci_ctx(p->pr_sc),
630 prdt->dba + skip, dbcsz);
631 breq->br_iov[j].iov_len = dbcsz;
638 /* If we got limited by IOV length, round I/O down to sector size. */
639 if (j == BLOCKIF_IOV_MAX) {
640 extra = todo % blockif_sectsz(p->bctx);
644 if (breq->br_iov[j - 1].iov_len > extra) {
645 breq->br_iov[j - 1].iov_len -= extra;
648 extra -= breq->br_iov[j - 1].iov_len;
654 breq->br_resid = todo;
656 aior->more = (aior->done < aior->len && i < prdtl);
660 ahci_handle_rw(struct ahci_port *p, int slot, uint8_t *cfis, uint32_t done)
662 struct ahci_ioreq *aior;
663 struct blockif_req *breq;
664 struct ahci_prdt_entry *prdt;
665 struct ahci_cmd_hdr *hdr;
668 int err, first, ncq, readop;
670 prdt = (struct ahci_prdt_entry *)(cfis + 0x80);
671 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
676 if (cfis[2] == ATA_WRITE || cfis[2] == ATA_WRITE48 ||
677 cfis[2] == ATA_WRITE_MUL || cfis[2] == ATA_WRITE_MUL48 ||
678 cfis[2] == ATA_WRITE_DMA || cfis[2] == ATA_WRITE_DMA48 ||
679 cfis[2] == ATA_WRITE_FPDMA_QUEUED)
682 if (cfis[2] == ATA_WRITE_FPDMA_QUEUED ||
683 cfis[2] == ATA_READ_FPDMA_QUEUED) {
684 lba = ((uint64_t)cfis[10] << 40) |
685 ((uint64_t)cfis[9] << 32) |
686 ((uint64_t)cfis[8] << 24) |
687 ((uint64_t)cfis[6] << 16) |
688 ((uint64_t)cfis[5] << 8) |
690 len = cfis[11] << 8 | cfis[3];
694 } else if (cfis[2] == ATA_READ48 || cfis[2] == ATA_WRITE48 ||
695 cfis[2] == ATA_READ_MUL48 || cfis[2] == ATA_WRITE_MUL48 ||
696 cfis[2] == ATA_READ_DMA48 || cfis[2] == ATA_WRITE_DMA48) {
697 lba = ((uint64_t)cfis[10] << 40) |
698 ((uint64_t)cfis[9] << 32) |
699 ((uint64_t)cfis[8] << 24) |
700 ((uint64_t)cfis[6] << 16) |
701 ((uint64_t)cfis[5] << 8) |
703 len = cfis[13] << 8 | cfis[12];
707 lba = ((cfis[7] & 0xf) << 24) | (cfis[6] << 16) |
708 (cfis[5] << 8) | cfis[4];
713 lba *= blockif_sectsz(p->bctx);
714 len *= blockif_sectsz(p->bctx);
716 /* Pull request off free list */
717 aior = STAILQ_FIRST(&p->iofhd);
718 assert(aior != NULL);
719 STAILQ_REMOVE_HEAD(&p->iofhd, io_flist);
725 breq = &aior->io_req;
726 breq->br_offset = lba + done;
727 ahci_build_iov(p, aior, prdt, hdr->prdtl);
729 /* Mark this command in-flight. */
730 p->pending |= 1 << slot;
732 /* Stuff request onto busy list. */
733 TAILQ_INSERT_HEAD(&p->iobhd, aior, io_blist);
736 ahci_write_fis_d2h_ncq(p, slot);
739 err = blockif_read(p->bctx, breq);
741 err = blockif_write(p->bctx, breq);
746 ahci_handle_flush(struct ahci_port *p, int slot, uint8_t *cfis)
748 struct ahci_ioreq *aior;
749 struct blockif_req *breq;
753 * Pull request off free list
755 aior = STAILQ_FIRST(&p->iofhd);
756 assert(aior != NULL);
757 STAILQ_REMOVE_HEAD(&p->iofhd, io_flist);
763 breq = &aior->io_req;
766 * Mark this command in-flight.
768 p->pending |= 1 << slot;
771 * Stuff request onto busy list
773 TAILQ_INSERT_HEAD(&p->iobhd, aior, io_blist);
775 err = blockif_flush(p->bctx, breq);
780 read_prdt(struct ahci_port *p, int slot, uint8_t *cfis,
783 struct ahci_cmd_hdr *hdr;
784 struct ahci_prdt_entry *prdt;
788 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
791 prdt = (struct ahci_prdt_entry *)(cfis + 0x80);
792 for (i = 0; i < hdr->prdtl && len; i++) {
797 dbcsz = (prdt->dbc & DBCMASK) + 1;
798 ptr = paddr_guest2host(ahci_ctx(p->pr_sc), prdt->dba, dbcsz);
799 sublen = MIN(len, dbcsz);
800 memcpy(to, ptr, sublen);
808 ahci_handle_dsm_trim(struct ahci_port *p, int slot, uint8_t *cfis, uint32_t done)
810 struct ahci_ioreq *aior;
811 struct blockif_req *breq;
819 if (cfis[2] == ATA_DATA_SET_MANAGEMENT) {
820 len = (uint16_t)cfis[13] << 8 | cfis[12];
823 } else { /* ATA_SEND_FPDMA_QUEUED */
824 len = (uint16_t)cfis[11] << 8 | cfis[3];
828 read_prdt(p, slot, cfis, buf, sizeof(buf));
832 elba = ((uint64_t)entry[5] << 40) |
833 ((uint64_t)entry[4] << 32) |
834 ((uint64_t)entry[3] << 24) |
835 ((uint64_t)entry[2] << 16) |
836 ((uint64_t)entry[1] << 8) |
838 elen = (uint16_t)entry[7] << 8 | entry[6];
844 ahci_write_fis_d2h_ncq(p, slot);
845 ahci_write_fis_sdb(p, slot, cfis,
846 ATA_S_READY | ATA_S_DSC);
848 ahci_write_fis_d2h(p, slot, cfis,
849 ATA_S_READY | ATA_S_DSC);
851 p->pending &= ~(1 << slot);
852 ahci_check_stopped(p);
861 * Pull request off free list
863 aior = STAILQ_FIRST(&p->iofhd);
864 assert(aior != NULL);
865 STAILQ_REMOVE_HEAD(&p->iofhd, io_flist);
870 aior->more = (len != done);
872 breq = &aior->io_req;
873 breq->br_offset = elba * blockif_sectsz(p->bctx);
874 breq->br_resid = elen * blockif_sectsz(p->bctx);
877 * Mark this command in-flight.
879 p->pending |= 1 << slot;
882 * Stuff request onto busy list
884 TAILQ_INSERT_HEAD(&p->iobhd, aior, io_blist);
887 ahci_write_fis_d2h_ncq(p, slot);
889 err = blockif_delete(p->bctx, breq);
894 write_prdt(struct ahci_port *p, int slot, uint8_t *cfis,
897 struct ahci_cmd_hdr *hdr;
898 struct ahci_prdt_entry *prdt;
902 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
905 prdt = (struct ahci_prdt_entry *)(cfis + 0x80);
906 for (i = 0; i < hdr->prdtl && len; i++) {
911 dbcsz = (prdt->dbc & DBCMASK) + 1;
912 ptr = paddr_guest2host(ahci_ctx(p->pr_sc), prdt->dba, dbcsz);
913 sublen = MIN(len, dbcsz);
914 memcpy(ptr, from, sublen);
919 hdr->prdbc = size - len;
923 ahci_checksum(uint8_t *buf, int size)
928 for (i = 0; i < size - 1; i++)
930 buf[size - 1] = 0x100 - sum;
934 ahci_handle_read_log(struct ahci_port *p, int slot, uint8_t *cfis)
936 struct ahci_cmd_hdr *hdr;
938 uint8_t *buf8 = (uint8_t *)buf;
939 uint16_t *buf16 = (uint16_t *)buf;
941 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
942 if (p->atapi || hdr->prdtl == 0 || cfis[5] != 0 ||
943 cfis[9] != 0 || cfis[12] != 1 || cfis[13] != 0) {
944 ahci_write_fis_d2h(p, slot, cfis,
945 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
949 memset(buf, 0, sizeof(buf));
950 if (cfis[4] == 0x00) { /* Log directory */
951 buf16[0x00] = 1; /* Version -- 1 */
952 buf16[0x10] = 1; /* NCQ Command Error Log -- 1 page */
953 buf16[0x13] = 1; /* SATA NCQ Send and Receive Log -- 1 page */
954 } else if (cfis[4] == 0x10) { /* NCQ Command Error Log */
955 memcpy(buf8, p->err_cfis, sizeof(p->err_cfis));
956 ahci_checksum(buf8, sizeof(buf));
957 } else if (cfis[4] == 0x13) { /* SATA NCQ Send and Receive Log */
958 if (blockif_candelete(p->bctx) && !blockif_is_ro(p->bctx)) {
959 buf[0x00] = 1; /* SFQ DSM supported */
960 buf[0x01] = 1; /* SFQ DSM TRIM supported */
963 ahci_write_fis_d2h(p, slot, cfis,
964 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
968 if (cfis[2] == ATA_READ_LOG_EXT)
969 ahci_write_fis_piosetup(p);
970 write_prdt(p, slot, cfis, (void *)buf, sizeof(buf));
971 ahci_write_fis_d2h(p, slot, cfis, ATA_S_DSC | ATA_S_READY);
975 handle_identify(struct ahci_port *p, int slot, uint8_t *cfis)
977 struct ahci_cmd_hdr *hdr;
979 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
980 if (p->atapi || hdr->prdtl == 0) {
981 ahci_write_fis_d2h(p, slot, cfis,
982 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
986 int sectsz, psectsz, psectoff, candelete, ro;
990 ro = blockif_is_ro(p->bctx);
991 candelete = blockif_candelete(p->bctx);
992 sectsz = blockif_sectsz(p->bctx);
993 sectors = blockif_size(p->bctx) / sectsz;
994 blockif_chs(p->bctx, &cyl, &heads, &sech);
995 blockif_psectsz(p->bctx, &psectsz, &psectoff);
996 memset(buf, 0, sizeof(buf));
1001 ata_string((uint8_t *)(buf+10), p->ident, 20);
1002 ata_string((uint8_t *)(buf+23), "001", 8);
1003 ata_string((uint8_t *)(buf+27), "BHYVE SATA DISK", 40);
1004 buf[47] = (0x8000 | 128);
1006 buf[49] = (1 << 8 | 1 << 9 | 1 << 11);
1007 buf[50] = (1 << 14);
1008 buf[53] = (1 << 1 | 1 << 2);
1009 if (p->mult_sectors)
1010 buf[59] = (0x100 | p->mult_sectors);
1011 if (sectors <= 0x0fffffff) {
1013 buf[61] = (sectors >> 16);
1019 if (p->xfermode & ATA_WDMA0)
1020 buf[63] |= (1 << ((p->xfermode & 7) + 8));
1028 buf[76] = (ATA_SATA_GEN1 | ATA_SATA_GEN2 | ATA_SATA_GEN3 |
1030 buf[77] = (ATA_SUPPORT_RCVSND_FPDMA_QUEUED |
1031 (p->ssts & ATA_SS_SPD_MASK) >> 3);
1034 buf[82] = (ATA_SUPPORT_POWERMGT | ATA_SUPPORT_WRITECACHE|
1035 ATA_SUPPORT_LOOKAHEAD | ATA_SUPPORT_NOP);
1036 buf[83] = (ATA_SUPPORT_ADDRESS48 | ATA_SUPPORT_FLUSHCACHE |
1037 ATA_SUPPORT_FLUSHCACHE48 | 1 << 14);
1038 buf[84] = (1 << 14);
1039 buf[85] = (ATA_SUPPORT_POWERMGT | ATA_SUPPORT_WRITECACHE|
1040 ATA_SUPPORT_LOOKAHEAD | ATA_SUPPORT_NOP);
1041 buf[86] = (ATA_SUPPORT_ADDRESS48 | ATA_SUPPORT_FLUSHCACHE |
1042 ATA_SUPPORT_FLUSHCACHE48 | 1 << 15);
1043 buf[87] = (1 << 14);
1045 if (p->xfermode & ATA_UDMA0)
1046 buf[88] |= (1 << ((p->xfermode & 7) + 8));
1048 buf[101] = (sectors >> 16);
1049 buf[102] = (sectors >> 32);
1050 buf[103] = (sectors >> 48);
1051 if (candelete && !ro) {
1052 buf[69] |= ATA_SUPPORT_RZAT | ATA_SUPPORT_DRAT;
1054 buf[169] = ATA_SUPPORT_DSM_TRIM;
1058 if (psectsz > sectsz) {
1060 buf[106] |= ffsl(psectsz / sectsz) - 1;
1061 buf[209] |= (psectoff / sectsz);
1065 buf[117] = sectsz / 2;
1066 buf[118] = ((sectsz / 2) >> 16);
1068 buf[119] = (ATA_SUPPORT_RWLOGDMAEXT | 1 << 14);
1069 buf[120] = (ATA_SUPPORT_RWLOGDMAEXT | 1 << 14);
1072 ahci_checksum((uint8_t *)buf, sizeof(buf));
1073 ahci_write_fis_piosetup(p);
1074 write_prdt(p, slot, cfis, (void *)buf, sizeof(buf));
1075 ahci_write_fis_d2h(p, slot, cfis, ATA_S_DSC | ATA_S_READY);
1080 handle_atapi_identify(struct ahci_port *p, int slot, uint8_t *cfis)
1083 ahci_write_fis_d2h(p, slot, cfis,
1084 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
1088 memset(buf, 0, sizeof(buf));
1089 buf[0] = (2 << 14 | 5 << 8 | 1 << 7 | 2 << 5);
1090 ata_string((uint8_t *)(buf+10), p->ident, 20);
1091 ata_string((uint8_t *)(buf+23), "001", 8);
1092 ata_string((uint8_t *)(buf+27), "BHYVE SATA DVD ROM", 40);
1093 buf[49] = (1 << 9 | 1 << 8);
1094 buf[50] = (1 << 14 | 1);
1095 buf[53] = (1 << 2 | 1 << 1);
1098 if (p->xfermode & ATA_WDMA0)
1099 buf[63] |= (1 << ((p->xfermode & 7) + 8));
1105 buf[76] = (ATA_SATA_GEN1 | ATA_SATA_GEN2 | ATA_SATA_GEN3);
1106 buf[77] = ((p->ssts & ATA_SS_SPD_MASK) >> 3);
1109 buf[82] = (ATA_SUPPORT_POWERMGT | ATA_SUPPORT_PACKET |
1110 ATA_SUPPORT_RESET | ATA_SUPPORT_NOP);
1111 buf[83] = (1 << 14);
1112 buf[84] = (1 << 14);
1113 buf[85] = (ATA_SUPPORT_POWERMGT | ATA_SUPPORT_PACKET |
1114 ATA_SUPPORT_RESET | ATA_SUPPORT_NOP);
1115 buf[87] = (1 << 14);
1117 if (p->xfermode & ATA_UDMA0)
1118 buf[88] |= (1 << ((p->xfermode & 7) + 8));
1121 ahci_checksum((uint8_t *)buf, sizeof(buf));
1122 ahci_write_fis_piosetup(p);
1123 write_prdt(p, slot, cfis, (void *)buf, sizeof(buf));
1124 ahci_write_fis_d2h(p, slot, cfis, ATA_S_DSC | ATA_S_READY);
1129 atapi_inquiry(struct ahci_port *p, int slot, uint8_t *cfis)
1138 if (acmd[1] & 1) { /* VPD */
1139 if (acmd[2] == 0) { /* Supported VPD pages */
1147 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1149 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1150 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1151 ahci_write_fis_d2h(p, slot, cfis, tfd);
1163 atapi_string(buf + 8, "BHYVE", 8);
1164 atapi_string(buf + 16, "BHYVE DVD-ROM", 16);
1165 atapi_string(buf + 32, "001", 4);
1171 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1172 write_prdt(p, slot, cfis, buf, len);
1173 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1177 atapi_read_capacity(struct ahci_port *p, int slot, uint8_t *cfis)
1182 sectors = blockif_size(p->bctx) / 2048;
1183 be32enc(buf, sectors - 1);
1184 be32enc(buf + 4, 2048);
1185 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1186 write_prdt(p, slot, cfis, buf, sizeof(buf));
1187 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1191 atapi_read_toc(struct ahci_port *p, int slot, uint8_t *cfis)
1199 len = be16dec(acmd + 7);
1200 format = acmd[9] >> 6;
1206 uint8_t start_track, buf[20], *bp;
1208 msf = (acmd[1] >> 1) & 1;
1209 start_track = acmd[6];
1210 if (start_track > 1 && start_track != 0xaa) {
1212 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1214 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1215 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1216 ahci_write_fis_d2h(p, slot, cfis, tfd);
1222 if (start_track <= 1) {
1242 sectors = blockif_size(p->bctx) / blockif_sectsz(p->bctx);
1246 lba_to_msf(bp, sectors);
1249 be32enc(bp, sectors);
1253 be16enc(buf, size - 2);
1256 write_prdt(p, slot, cfis, buf, len);
1257 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1258 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1265 memset(buf, 0, sizeof(buf));
1269 if (len > sizeof(buf))
1271 write_prdt(p, slot, cfis, buf, len);
1272 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1273 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1280 uint8_t *bp, buf[50];
1282 msf = (acmd[1] >> 1) & 1;
1318 sectors = blockif_size(p->bctx) / blockif_sectsz(p->bctx);
1322 lba_to_msf(bp, sectors);
1325 be32enc(bp, sectors);
1348 be16enc(buf, size - 2);
1351 write_prdt(p, slot, cfis, buf, len);
1352 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1353 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1360 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1362 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1363 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1364 ahci_write_fis_d2h(p, slot, cfis, tfd);
1371 atapi_report_luns(struct ahci_port *p, int slot, uint8_t *cfis)
1375 memset(buf, 0, sizeof(buf));
1378 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1379 write_prdt(p, slot, cfis, buf, sizeof(buf));
1380 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1384 atapi_read(struct ahci_port *p, int slot, uint8_t *cfis, uint32_t done)
1386 struct ahci_ioreq *aior;
1387 struct ahci_cmd_hdr *hdr;
1388 struct ahci_prdt_entry *prdt;
1389 struct blockif_req *breq;
1396 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
1397 prdt = (struct ahci_prdt_entry *)(cfis + 0x80);
1399 lba = be32dec(acmd + 2);
1400 if (acmd[0] == READ_10)
1401 len = be16dec(acmd + 7);
1403 len = be32dec(acmd + 6);
1405 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1406 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1412 * Pull request off free list
1414 aior = STAILQ_FIRST(&p->iofhd);
1415 assert(aior != NULL);
1416 STAILQ_REMOVE_HEAD(&p->iofhd, io_flist);
1421 breq = &aior->io_req;
1422 breq->br_offset = lba + done;
1423 ahci_build_iov(p, aior, prdt, hdr->prdtl);
1425 /* Mark this command in-flight. */
1426 p->pending |= 1 << slot;
1428 /* Stuff request onto busy list. */
1429 TAILQ_INSERT_HEAD(&p->iobhd, aior, io_blist);
1431 err = blockif_read(p->bctx, breq);
1436 atapi_request_sense(struct ahci_port *p, int slot, uint8_t *cfis)
1444 if (len > sizeof(buf))
1446 memset(buf, 0, len);
1447 buf[0] = 0x70 | (1 << 7);
1448 buf[2] = p->sense_key;
1451 write_prdt(p, slot, cfis, buf, len);
1452 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1453 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1457 atapi_start_stop_unit(struct ahci_port *p, int slot, uint8_t *cfis)
1459 uint8_t *acmd = cfis + 0x40;
1462 switch (acmd[4] & 3) {
1466 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1467 tfd = ATA_S_READY | ATA_S_DSC;
1470 /* TODO eject media */
1471 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1472 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1474 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1477 ahci_write_fis_d2h(p, slot, cfis, tfd);
1481 atapi_mode_sense(struct ahci_port *p, int slot, uint8_t *cfis)
1489 len = be16dec(acmd + 7);
1491 code = acmd[2] & 0x3f;
1496 case MODEPAGE_RW_ERROR_RECOVERY:
1500 if (len > sizeof(buf))
1503 memset(buf, 0, sizeof(buf));
1504 be16enc(buf, 16 - 2);
1509 write_prdt(p, slot, cfis, buf, len);
1510 tfd = ATA_S_READY | ATA_S_DSC;
1513 case MODEPAGE_CD_CAPABILITIES:
1517 if (len > sizeof(buf))
1520 memset(buf, 0, sizeof(buf));
1521 be16enc(buf, 30 - 2);
1527 be16enc(&buf[18], 2);
1528 be16enc(&buf[20], 512);
1529 write_prdt(p, slot, cfis, buf, len);
1530 tfd = ATA_S_READY | ATA_S_DSC;
1539 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1541 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1546 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1548 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1551 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1552 ahci_write_fis_d2h(p, slot, cfis, tfd);
1556 atapi_get_event_status_notification(struct ahci_port *p, int slot,
1564 /* we don't support asynchronous operation */
1565 if (!(acmd[1] & 1)) {
1566 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1568 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
1573 len = be16dec(acmd + 7);
1574 if (len > sizeof(buf))
1577 memset(buf, 0, sizeof(buf));
1578 be16enc(buf, 8 - 2);
1582 write_prdt(p, slot, cfis, buf, len);
1583 tfd = ATA_S_READY | ATA_S_DSC;
1585 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1586 ahci_write_fis_d2h(p, slot, cfis, tfd);
1590 handle_packet_cmd(struct ahci_port *p, int slot, uint8_t *cfis)
1600 for (i = 0; i < 16; i++)
1601 DPRINTF("%02x ", acmd[i]);
1607 case TEST_UNIT_READY:
1608 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1609 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1612 atapi_inquiry(p, slot, cfis);
1615 atapi_read_capacity(p, slot, cfis);
1619 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1620 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1623 atapi_read_toc(p, slot, cfis);
1626 atapi_report_luns(p, slot, cfis);
1630 atapi_read(p, slot, cfis, 0);
1633 atapi_request_sense(p, slot, cfis);
1635 case START_STOP_UNIT:
1636 atapi_start_stop_unit(p, slot, cfis);
1639 atapi_mode_sense(p, slot, cfis);
1641 case GET_EVENT_STATUS_NOTIFICATION:
1642 atapi_get_event_status_notification(p, slot, cfis);
1645 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
1646 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1648 ahci_write_fis_d2h(p, slot, cfis, (p->sense_key << 12) |
1649 ATA_S_READY | ATA_S_ERROR);
1655 ahci_handle_cmd(struct ahci_port *p, int slot, uint8_t *cfis)
1658 p->tfd |= ATA_S_BUSY;
1660 case ATA_ATA_IDENTIFY:
1661 handle_identify(p, slot, cfis);
1663 case ATA_SETFEATURES:
1666 case ATA_SF_ENAB_SATA_SF:
1668 case ATA_SATA_SF_AN:
1669 p->tfd = ATA_S_DSC | ATA_S_READY;
1672 p->tfd = ATA_S_ERROR | ATA_S_READY;
1673 p->tfd |= (ATA_ERROR_ABORT << 8);
1677 case ATA_SF_ENAB_WCACHE:
1678 case ATA_SF_DIS_WCACHE:
1679 case ATA_SF_ENAB_RCACHE:
1680 case ATA_SF_DIS_RCACHE:
1681 p->tfd = ATA_S_DSC | ATA_S_READY;
1683 case ATA_SF_SETXFER:
1685 switch (cfis[12] & 0xf8) {
1691 p->xfermode = (cfis[12] & 0x7);
1694 p->tfd = ATA_S_DSC | ATA_S_READY;
1698 p->tfd = ATA_S_ERROR | ATA_S_READY;
1699 p->tfd |= (ATA_ERROR_ABORT << 8);
1702 ahci_write_fis_d2h(p, slot, cfis, p->tfd);
1706 if (cfis[12] != 0 &&
1707 (cfis[12] > 128 || (cfis[12] & (cfis[12] - 1)))) {
1708 p->tfd = ATA_S_ERROR | ATA_S_READY;
1709 p->tfd |= (ATA_ERROR_ABORT << 8);
1711 p->mult_sectors = cfis[12];
1712 p->tfd = ATA_S_DSC | ATA_S_READY;
1714 ahci_write_fis_d2h(p, slot, cfis, p->tfd);
1722 case ATA_READ_MUL48:
1723 case ATA_WRITE_MUL48:
1726 case ATA_READ_DMA48:
1727 case ATA_WRITE_DMA48:
1728 case ATA_READ_FPDMA_QUEUED:
1729 case ATA_WRITE_FPDMA_QUEUED:
1730 ahci_handle_rw(p, slot, cfis, 0);
1732 case ATA_FLUSHCACHE:
1733 case ATA_FLUSHCACHE48:
1734 ahci_handle_flush(p, slot, cfis);
1736 case ATA_DATA_SET_MANAGEMENT:
1737 if (cfis[11] == 0 && cfis[3] == ATA_DSM_TRIM &&
1738 cfis[13] == 0 && cfis[12] == 1) {
1739 ahci_handle_dsm_trim(p, slot, cfis, 0);
1742 ahci_write_fis_d2h(p, slot, cfis,
1743 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
1745 case ATA_SEND_FPDMA_QUEUED:
1746 if ((cfis[13] & 0x1f) == ATA_SFPDMA_DSM &&
1747 cfis[17] == 0 && cfis[16] == ATA_DSM_TRIM &&
1748 cfis[11] == 0 && cfis[3] == 1) {
1749 ahci_handle_dsm_trim(p, slot, cfis, 0);
1752 ahci_write_fis_d2h(p, slot, cfis,
1753 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
1755 case ATA_READ_LOG_EXT:
1756 case ATA_READ_LOG_DMA_EXT:
1757 ahci_handle_read_log(p, slot, cfis);
1759 case ATA_SECURITY_FREEZE_LOCK:
1762 ahci_write_fis_d2h(p, slot, cfis,
1763 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
1765 case ATA_CHECK_POWER_MODE:
1766 cfis[12] = 0xff; /* always on */
1767 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1769 case ATA_STANDBY_CMD:
1770 case ATA_STANDBY_IMMEDIATE:
1772 case ATA_IDLE_IMMEDIATE:
1774 case ATA_READ_VERIFY:
1775 case ATA_READ_VERIFY48:
1776 ahci_write_fis_d2h(p, slot, cfis, ATA_S_READY | ATA_S_DSC);
1778 case ATA_ATAPI_IDENTIFY:
1779 handle_atapi_identify(p, slot, cfis);
1781 case ATA_PACKET_CMD:
1783 ahci_write_fis_d2h(p, slot, cfis,
1784 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
1786 handle_packet_cmd(p, slot, cfis);
1789 WPRINTF("Unsupported cmd:%02x\n", cfis[2]);
1790 ahci_write_fis_d2h(p, slot, cfis,
1791 (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR);
1797 ahci_handle_slot(struct ahci_port *p, int slot)
1799 struct ahci_cmd_hdr *hdr;
1801 struct ahci_prdt_entry *prdt;
1803 struct pci_ahci_softc *sc;
1810 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
1812 cfl = (hdr->flags & 0x1f) * 4;
1814 cfis = paddr_guest2host(ahci_ctx(sc), hdr->ctba,
1815 0x80 + hdr->prdtl * sizeof(struct ahci_prdt_entry));
1817 prdt = (struct ahci_prdt_entry *)(cfis + 0x80);
1820 for (i = 0; i < cfl; i++) {
1823 DPRINTF("%02x ", cfis[i]);
1827 for (i = 0; i < hdr->prdtl; i++) {
1828 DPRINTF("%d@%08"PRIx64"\n", prdt->dbc & 0x3fffff, prdt->dba);
1833 if (cfis[0] != FIS_TYPE_REGH2D) {
1834 WPRINTF("Not a H2D FIS:%02x\n", cfis[0]);
1838 if (cfis[1] & 0x80) {
1839 ahci_handle_cmd(p, slot, cfis);
1841 if (cfis[15] & (1 << 2))
1843 else if (p->reset) {
1847 p->ci &= ~(1 << slot);
1852 ahci_handle_port(struct ahci_port *p)
1855 if (!(p->cmd & AHCI_P_CMD_ST))
1859 * Search for any new commands to issue ignoring those that
1860 * are already in-flight. Stop if device is busy or in error.
1862 for (; (p->ci & ~p->pending) != 0; p->ccs = ((p->ccs + 1) & 31)) {
1863 if ((p->tfd & (ATA_S_BUSY | ATA_S_DRQ)) != 0)
1865 if (p->waitforclear)
1867 if ((p->ci & ~p->pending & (1 << p->ccs)) != 0) {
1868 p->cmd &= ~AHCI_P_CMD_CCS_MASK;
1869 p->cmd |= p->ccs << AHCI_P_CMD_CCS_SHIFT;
1870 ahci_handle_slot(p, p->ccs);
1876 * blockif callback routine - this runs in the context of the blockif
1877 * i/o thread, so the mutex needs to be acquired.
1880 ata_ioreq_cb(struct blockif_req *br, int err)
1882 struct ahci_cmd_hdr *hdr;
1883 struct ahci_ioreq *aior;
1884 struct ahci_port *p;
1885 struct pci_ahci_softc *sc;
1890 DPRINTF("%s %d\n", __func__, err);
1893 aior = br->br_param;
1898 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + slot * AHCI_CL_SIZE);
1900 if (cfis[2] == ATA_WRITE_FPDMA_QUEUED ||
1901 cfis[2] == ATA_READ_FPDMA_QUEUED ||
1902 cfis[2] == ATA_SEND_FPDMA_QUEUED)
1904 if (cfis[2] == ATA_DATA_SET_MANAGEMENT ||
1905 (cfis[2] == ATA_SEND_FPDMA_QUEUED &&
1906 (cfis[13] & 0x1f) == ATA_SFPDMA_DSM))
1909 pthread_mutex_lock(&sc->mtx);
1912 * Delete the blockif request from the busy list
1914 TAILQ_REMOVE(&p->iobhd, aior, io_blist);
1917 * Move the blockif request back to the free list
1919 STAILQ_INSERT_TAIL(&p->iofhd, aior, io_flist);
1922 hdr->prdbc = aior->done;
1924 if (!err && aior->more) {
1926 ahci_handle_dsm_trim(p, slot, cfis, aior->done);
1928 ahci_handle_rw(p, slot, cfis, aior->done);
1933 tfd = ATA_S_READY | ATA_S_DSC;
1935 tfd = (ATA_E_ABORT << 8) | ATA_S_READY | ATA_S_ERROR;
1937 ahci_write_fis_sdb(p, slot, cfis, tfd);
1939 ahci_write_fis_d2h(p, slot, cfis, tfd);
1942 * This command is now complete.
1944 p->pending &= ~(1 << slot);
1946 ahci_check_stopped(p);
1947 ahci_handle_port(p);
1949 pthread_mutex_unlock(&sc->mtx);
1950 DPRINTF("%s exit\n", __func__);
1954 atapi_ioreq_cb(struct blockif_req *br, int err)
1956 struct ahci_cmd_hdr *hdr;
1957 struct ahci_ioreq *aior;
1958 struct ahci_port *p;
1959 struct pci_ahci_softc *sc;
1964 DPRINTF("%s %d\n", __func__, err);
1966 aior = br->br_param;
1971 hdr = (struct ahci_cmd_hdr *)(p->cmd_lst + aior->slot * AHCI_CL_SIZE);
1973 pthread_mutex_lock(&sc->mtx);
1976 * Delete the blockif request from the busy list
1978 TAILQ_REMOVE(&p->iobhd, aior, io_blist);
1981 * Move the blockif request back to the free list
1983 STAILQ_INSERT_TAIL(&p->iofhd, aior, io_flist);
1986 hdr->prdbc = aior->done;
1988 if (!err && aior->more) {
1989 atapi_read(p, slot, cfis, aior->done);
1994 tfd = ATA_S_READY | ATA_S_DSC;
1996 p->sense_key = ATA_SENSE_ILLEGAL_REQUEST;
1998 tfd = (p->sense_key << 12) | ATA_S_READY | ATA_S_ERROR;
2000 cfis[4] = (cfis[4] & ~7) | ATA_I_CMD | ATA_I_IN;
2001 ahci_write_fis_d2h(p, slot, cfis, tfd);
2004 * This command is now complete.
2006 p->pending &= ~(1 << slot);
2008 ahci_check_stopped(p);
2009 ahci_handle_port(p);
2011 pthread_mutex_unlock(&sc->mtx);
2012 DPRINTF("%s exit\n", __func__);
2016 pci_ahci_ioreq_init(struct ahci_port *pr)
2018 struct ahci_ioreq *vr;
2021 pr->ioqsz = blockif_queuesz(pr->bctx);
2022 pr->ioreq = calloc(pr->ioqsz, sizeof(struct ahci_ioreq));
2023 STAILQ_INIT(&pr->iofhd);
2026 * Add all i/o request entries to the free queue
2028 for (i = 0; i < pr->ioqsz; i++) {
2032 vr->io_req.br_callback = ata_ioreq_cb;
2034 vr->io_req.br_callback = atapi_ioreq_cb;
2035 vr->io_req.br_param = vr;
2036 STAILQ_INSERT_TAIL(&pr->iofhd, vr, io_flist);
2039 TAILQ_INIT(&pr->iobhd);
2043 pci_ahci_port_write(struct pci_ahci_softc *sc, uint64_t offset, uint64_t value)
2045 int port = (offset - AHCI_OFFSET) / AHCI_STEP;
2046 offset = (offset - AHCI_OFFSET) % AHCI_STEP;
2047 struct ahci_port *p = &sc->port[port];
2049 DPRINTF("pci_ahci_port %d: write offset 0x%"PRIx64" value 0x%"PRIx64"\n",
2050 port, offset, value);
2070 p->ie = value & 0xFDC000FF;
2075 p->cmd &= ~(AHCI_P_CMD_ST | AHCI_P_CMD_SUD | AHCI_P_CMD_POD |
2076 AHCI_P_CMD_CLO | AHCI_P_CMD_FRE | AHCI_P_CMD_APSTE |
2077 AHCI_P_CMD_ATAPI | AHCI_P_CMD_DLAE | AHCI_P_CMD_ALPE |
2078 AHCI_P_CMD_ASP | AHCI_P_CMD_ICC_MASK);
2079 p->cmd |= (AHCI_P_CMD_ST | AHCI_P_CMD_SUD | AHCI_P_CMD_POD |
2080 AHCI_P_CMD_CLO | AHCI_P_CMD_FRE | AHCI_P_CMD_APSTE |
2081 AHCI_P_CMD_ATAPI | AHCI_P_CMD_DLAE | AHCI_P_CMD_ALPE |
2082 AHCI_P_CMD_ASP | AHCI_P_CMD_ICC_MASK) & value;
2084 if (!(value & AHCI_P_CMD_ST)) {
2089 p->cmd |= AHCI_P_CMD_CR;
2090 clb = (uint64_t)p->clbu << 32 | p->clb;
2091 p->cmd_lst = paddr_guest2host(ahci_ctx(sc), clb,
2092 AHCI_CL_SIZE * AHCI_MAX_SLOTS);
2095 if (value & AHCI_P_CMD_FRE) {
2098 p->cmd |= AHCI_P_CMD_FR;
2099 fb = (uint64_t)p->fbu << 32 | p->fb;
2100 /* we don't support FBSCP, so rfis size is 256Bytes */
2101 p->rfis = paddr_guest2host(ahci_ctx(sc), fb, 256);
2103 p->cmd &= ~AHCI_P_CMD_FR;
2106 if (value & AHCI_P_CMD_CLO) {
2107 p->tfd &= ~(ATA_S_BUSY | ATA_S_DRQ);
2108 p->cmd &= ~AHCI_P_CMD_CLO;
2111 if (value & AHCI_P_CMD_ICC_MASK) {
2112 p->cmd &= ~AHCI_P_CMD_ICC_MASK;
2115 ahci_handle_port(p);
2121 WPRINTF("pci_ahci_port: read only registers 0x%"PRIx64"\n", offset);
2125 if (!(p->cmd & AHCI_P_CMD_ST)) {
2126 if (value & ATA_SC_DET_RESET)
2138 ahci_handle_port(p);
2148 pci_ahci_host_write(struct pci_ahci_softc *sc, uint64_t offset, uint64_t value)
2150 DPRINTF("pci_ahci_host: write offset 0x%"PRIx64" value 0x%"PRIx64"\n",
2158 DPRINTF("pci_ahci_host: read only registers 0x%"PRIx64"\n", offset);
2161 if (value & AHCI_GHC_HR) {
2165 if (value & AHCI_GHC_IE)
2166 sc->ghc |= AHCI_GHC_IE;
2168 sc->ghc &= ~AHCI_GHC_IE;
2169 ahci_generate_intr(sc, 0xffffffff);
2173 ahci_generate_intr(sc, value);
2181 pci_ahci_write(struct vmctx *ctx, int vcpu, struct pci_devinst *pi,
2182 int baridx, uint64_t offset, int size, uint64_t value)
2184 struct pci_ahci_softc *sc = pi->pi_arg;
2186 assert(baridx == 5);
2187 assert((offset % 4) == 0 && size == 4);
2189 pthread_mutex_lock(&sc->mtx);
2191 if (offset < AHCI_OFFSET)
2192 pci_ahci_host_write(sc, offset, value);
2193 else if (offset < AHCI_OFFSET + sc->ports * AHCI_STEP)
2194 pci_ahci_port_write(sc, offset, value);
2196 WPRINTF("pci_ahci: unknown i/o write offset 0x%"PRIx64"\n", offset);
2198 pthread_mutex_unlock(&sc->mtx);
2202 pci_ahci_host_read(struct pci_ahci_softc *sc, uint64_t offset)
2218 uint32_t *p = &sc->cap;
2219 p += (offset - AHCI_CAP) / sizeof(uint32_t);
2227 DPRINTF("pci_ahci_host: read offset 0x%"PRIx64" value 0x%x\n",
2234 pci_ahci_port_read(struct pci_ahci_softc *sc, uint64_t offset)
2237 int port = (offset - AHCI_OFFSET) / AHCI_STEP;
2238 offset = (offset - AHCI_OFFSET) % AHCI_STEP;
2258 uint32_t *p= &sc->port[port].clb;
2259 p += (offset - AHCI_P_CLB) / sizeof(uint32_t);
2268 DPRINTF("pci_ahci_port %d: read offset 0x%"PRIx64" value 0x%x\n",
2269 port, offset, value);
2275 pci_ahci_read(struct vmctx *ctx, int vcpu, struct pci_devinst *pi, int baridx,
2276 uint64_t regoff, int size)
2278 struct pci_ahci_softc *sc = pi->pi_arg;
2282 assert(baridx == 5);
2283 assert(size == 1 || size == 2 || size == 4);
2284 assert((regoff & (size - 1)) == 0);
2286 pthread_mutex_lock(&sc->mtx);
2288 offset = regoff & ~0x3; /* round down to a multiple of 4 bytes */
2289 if (offset < AHCI_OFFSET)
2290 value = pci_ahci_host_read(sc, offset);
2291 else if (offset < AHCI_OFFSET + sc->ports * AHCI_STEP)
2292 value = pci_ahci_port_read(sc, offset);
2295 WPRINTF("pci_ahci: unknown i/o read offset 0x%"PRIx64"\n",
2298 value >>= 8 * (regoff & 0x3);
2300 pthread_mutex_unlock(&sc->mtx);
2306 pci_ahci_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts, int atapi)
2308 char bident[sizeof("XX:XX:XX")];
2309 struct blockif_ctxt *bctxt;
2310 struct pci_ahci_softc *sc;
2319 dbg = fopen("/tmp/log", "w+");
2322 sc = calloc(1, sizeof(struct pci_ahci_softc));
2325 pthread_mutex_init(&sc->mtx, NULL);
2330 for (p = 0; p < MAX_PORTS && opts != NULL; p++, opts = next) {
2331 /* Identify and cut off type of present port. */
2332 if (strncmp(opts, "hd:", 3) == 0) {
2335 } else if (strncmp(opts, "cd:", 3) == 0) {
2340 /* Find and cut off the next port options. */
2341 next = strstr(opts, ",hd:");
2342 next2 = strstr(opts, ",cd:");
2343 if (next == NULL || (next2 != NULL && next2 < next))
2354 * Attempt to open the backing image. Use the PCI slot/func
2355 * and the port number for the identifier string.
2357 snprintf(bident, sizeof(bident), "%d:%d:%d", pi->pi_slot,
2359 bctxt = blockif_open(opts, bident);
2360 if (bctxt == NULL) {
2365 sc->port[p].bctx = bctxt;
2366 sc->port[p].pr_sc = sc;
2367 sc->port[p].port = p;
2368 sc->port[p].atapi = atapi;
2371 * Create an identifier for the backing file.
2372 * Use parts of the md5 sum of the filename
2375 MD5Update(&mdctx, opts, strlen(opts));
2376 MD5Final(digest, &mdctx);
2377 sprintf(sc->port[p].ident, "BHYVE-%02X%02X-%02X%02X-%02X%02X",
2378 digest[0], digest[1], digest[2], digest[3], digest[4],
2382 * Allocate blockif request structures and add them
2385 pci_ahci_ioreq_init(&sc->port[p]);
2388 if (sc->port[p].ioqsz < slots)
2389 slots = sc->port[p].ioqsz;
2393 /* Intel ICH8 AHCI */
2395 if (sc->ports < DEF_PORTS)
2396 sc->ports = DEF_PORTS;
2397 sc->cap = AHCI_CAP_64BIT | AHCI_CAP_SNCQ | AHCI_CAP_SSNTF |
2398 AHCI_CAP_SMPS | AHCI_CAP_SSS | AHCI_CAP_SALP |
2399 AHCI_CAP_SAL | AHCI_CAP_SCLO | (0x3 << AHCI_CAP_ISS_SHIFT)|
2400 AHCI_CAP_PMD | AHCI_CAP_SSC | AHCI_CAP_PSC |
2401 (slots << AHCI_CAP_NCS_SHIFT) | AHCI_CAP_SXS | (sc->ports - 1);
2404 sc->cap2 = AHCI_CAP2_APST;
2407 pci_set_cfgdata16(pi, PCIR_DEVICE, 0x2821);
2408 pci_set_cfgdata16(pi, PCIR_VENDOR, 0x8086);
2409 pci_set_cfgdata8(pi, PCIR_CLASS, PCIC_STORAGE);
2410 pci_set_cfgdata8(pi, PCIR_SUBCLASS, PCIS_STORAGE_SATA);
2411 pci_set_cfgdata8(pi, PCIR_PROGIF, PCIP_STORAGE_SATA_AHCI_1_0);
2412 p = MIN(sc->ports, 16);
2413 p = flsl(p) - ((p & (p - 1)) ? 0 : 1);
2414 pci_emul_add_msicap(pi, 1 << p);
2415 pci_emul_alloc_bar(pi, 5, PCIBAR_MEM32,
2416 AHCI_OFFSET + sc->ports * AHCI_STEP);
2418 pci_lintr_request(pi);
2422 for (p = 0; p < sc->ports; p++) {
2423 if (sc->port[p].bctx != NULL)
2424 blockif_close(sc->port[p].bctx);
2433 pci_ahci_hd_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts)
2436 return (pci_ahci_init(ctx, pi, opts, 0));
2440 pci_ahci_atapi_init(struct vmctx *ctx, struct pci_devinst *pi, char *opts)
2443 return (pci_ahci_init(ctx, pi, opts, 1));
2447 * Use separate emulation names to distinguish drive and atapi devices
2449 struct pci_devemu pci_de_ahci = {
2451 .pe_init = pci_ahci_hd_init,
2452 .pe_barwrite = pci_ahci_write,
2453 .pe_barread = pci_ahci_read
2455 PCI_EMUL_SET(pci_de_ahci);
2457 struct pci_devemu pci_de_ahci_hd = {
2458 .pe_emu = "ahci-hd",
2459 .pe_init = pci_ahci_hd_init,
2460 .pe_barwrite = pci_ahci_write,
2461 .pe_barread = pci_ahci_read
2463 PCI_EMUL_SET(pci_de_ahci_hd);
2465 struct pci_devemu pci_de_ahci_cd = {
2466 .pe_emu = "ahci-cd",
2467 .pe_init = pci_ahci_atapi_init,
2468 .pe_barwrite = pci_ahci_write,
2469 .pe_barread = pci_ahci_read
2471 PCI_EMUL_SET(pci_de_ahci_cd);