2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 1999,2000 Michael Smith
5 * Copyright (c) 2000 BSDi
6 * Copyright (c) 2005 Scott Long
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 * Copyright (c) 2002 Eric Moore
32 * Copyright (c) 2002, 2004 LSI Logic Corporation
33 * All rights reserved.
35 * Redistribution and use in source and binary forms, with or without
36 * modification, are permitted provided that the following conditions
38 * 1. Redistributions of source code must retain the above copyright
39 * notice, this list of conditions and the following disclaimer.
40 * 2. Redistributions in binary form must reproduce the above copyright
41 * notice, this list of conditions and the following disclaimer in the
42 * documentation and/or other materials provided with the distribution.
43 * 3. The party using or redistributing the source code and binary forms
44 * agrees to the disclaimer below and the terms and conditions set forth
47 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
48 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
49 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
50 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
51 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
52 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
53 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
54 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
55 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
56 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
60 #include <sys/cdefs.h>
61 __FBSDID("$FreeBSD$");
64 * Driver for the AMI MegaRaid family of controllers.
67 #include <sys/param.h>
68 #include <sys/systm.h>
69 #include <sys/malloc.h>
70 #include <sys/kernel.h>
72 #include <sys/sysctl.h>
79 #include <machine/bus.h>
80 #include <machine/cpu.h>
81 #include <machine/resource.h>
84 #include <dev/pci/pcireg.h>
85 #include <dev/pci/pcivar.h>
87 #include <dev/amr/amrio.h>
88 #include <dev/amr/amrreg.h>
89 #include <dev/amr/amrvar.h>
90 #define AMR_DEFINE_TABLES
91 #include <dev/amr/amr_tables.h>
93 SYSCTL_NODE(_hw, OID_AUTO, amr, CTLFLAG_RD | CTLFLAG_MPSAFE, 0,
94 "AMR driver parameters");
96 static d_open_t amr_open;
97 static d_close_t amr_close;
98 static d_ioctl_t amr_ioctl;
100 static struct cdevsw amr_cdevsw = {
101 .d_version = D_VERSION,
102 .d_flags = D_NEEDGIANT,
104 .d_close = amr_close,
105 .d_ioctl = amr_ioctl,
109 int linux_no_adapter = 0;
111 * Initialisation, bus interface.
113 static void amr_startup(void *arg);
118 static int amr_query_controller(struct amr_softc *sc);
119 static void *amr_enquiry(struct amr_softc *sc, size_t bufsize,
120 u_int8_t cmd, u_int8_t cmdsub, u_int8_t cmdqual, int *status);
121 static void amr_completeio(struct amr_command *ac);
122 static int amr_support_ext_cdb(struct amr_softc *sc);
125 * Command buffer allocation.
127 static void amr_alloccmd_cluster(struct amr_softc *sc);
128 static void amr_freecmd_cluster(struct amr_command_cluster *acc);
131 * Command processing.
133 static int amr_bio_command(struct amr_softc *sc, struct amr_command **acp);
134 static int amr_wait_command(struct amr_command *ac) __unused;
135 static int amr_mapcmd(struct amr_command *ac);
136 static void amr_unmapcmd(struct amr_command *ac);
137 static int amr_start(struct amr_command *ac);
138 static void amr_complete(void *context, ac_qhead_t *head);
139 static void amr_setup_sg(void *arg, bus_dma_segment_t *segs, int nsegments, int error);
140 static void amr_setup_data(void *arg, bus_dma_segment_t *segs, int nsegments, int error);
141 static void amr_setup_ccb(void *arg, bus_dma_segment_t *segs, int nsegments, int error);
142 static void amr_abort_load(struct amr_command *ac);
145 * Interface-specific shims
147 static int amr_quartz_submit_command(struct amr_command *ac);
148 static int amr_quartz_get_work(struct amr_softc *sc, struct amr_mailbox *mbsave);
149 static int amr_quartz_poll_command(struct amr_command *ac);
150 static int amr_quartz_poll_command1(struct amr_softc *sc, struct amr_command *ac);
152 static int amr_std_submit_command(struct amr_command *ac);
153 static int amr_std_get_work(struct amr_softc *sc, struct amr_mailbox *mbsave);
154 static int amr_std_poll_command(struct amr_command *ac);
155 static void amr_std_attach_mailbox(struct amr_softc *sc);
157 #ifdef AMR_BOARD_INIT
158 static int amr_quartz_init(struct amr_softc *sc);
159 static int amr_std_init(struct amr_softc *sc);
165 static void amr_describe_controller(struct amr_softc *sc);
168 static void amr_printcommand(struct amr_command *ac);
172 static void amr_init_sysctl(struct amr_softc *sc);
173 static int amr_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t addr,
174 int32_t flag, struct thread *td);
176 static MALLOC_DEFINE(M_AMR, "amr", "AMR memory");
178 /********************************************************************************
179 ********************************************************************************
181 ********************************************************************************
182 ********************************************************************************/
184 /********************************************************************************
185 ********************************************************************************
187 ********************************************************************************
188 ********************************************************************************/
190 /********************************************************************************
191 * Initialise the controller and softc.
194 amr_attach(struct amr_softc *sc)
201 * Initialise per-controller queues.
203 amr_init_qhead(&sc->amr_freecmds);
204 amr_init_qhead(&sc->amr_ready);
205 TAILQ_INIT(&sc->amr_cmd_clusters);
206 bioq_init(&sc->amr_bioq);
208 debug(2, "queue init done");
211 * Configure for this controller type.
213 if (AMR_IS_QUARTZ(sc)) {
214 sc->amr_submit_command = amr_quartz_submit_command;
215 sc->amr_get_work = amr_quartz_get_work;
216 sc->amr_poll_command = amr_quartz_poll_command;
217 sc->amr_poll_command1 = amr_quartz_poll_command1;
219 sc->amr_submit_command = amr_std_submit_command;
220 sc->amr_get_work = amr_std_get_work;
221 sc->amr_poll_command = amr_std_poll_command;
222 amr_std_attach_mailbox(sc);
225 #ifdef AMR_BOARD_INIT
226 if ((AMR_IS_QUARTZ(sc) ? amr_quartz_init(sc) : amr_std_init(sc)))
231 * Allocate initial commands.
233 amr_alloccmd_cluster(sc);
236 * Quiz controller for features and limits.
238 if (amr_query_controller(sc))
241 debug(2, "controller query complete");
244 * preallocate the remaining commands.
246 while (sc->amr_nextslot < sc->amr_maxio)
247 amr_alloccmd_cluster(sc);
255 * Attach our 'real' SCSI channels to CAM.
257 child = device_add_child(sc->amr_dev, "amrp", -1);
258 sc->amr_pass = child;
260 device_set_softc(child, sc);
261 device_set_desc(child, "SCSI Passthrough Bus");
262 bus_generic_attach(sc->amr_dev);
266 * Create the control device.
268 sc->amr_dev_t = make_dev(&amr_cdevsw, device_get_unit(sc->amr_dev), UID_ROOT, GID_OPERATOR,
269 S_IRUSR | S_IWUSR, "amr%d", device_get_unit(sc->amr_dev));
270 sc->amr_dev_t->si_drv1 = sc;
272 if (device_get_unit(sc->amr_dev) == 0)
273 make_dev_alias(sc->amr_dev_t, "megadev0");
276 * Schedule ourselves to bring the controller up once interrupts are
279 bzero(&sc->amr_ich, sizeof(struct intr_config_hook));
280 sc->amr_ich.ich_func = amr_startup;
281 sc->amr_ich.ich_arg = sc;
282 if (config_intrhook_establish(&sc->amr_ich) != 0) {
283 device_printf(sc->amr_dev, "can't establish configuration hook\n");
288 * Print a little information about the controller.
290 amr_describe_controller(sc);
292 debug(2, "attach complete");
296 /********************************************************************************
297 * Locate disk resources and attach children to them.
300 amr_startup(void *arg)
302 struct amr_softc *sc = (struct amr_softc *)arg;
303 struct amr_logdrive *dr;
308 /* get up-to-date drive information */
309 if (amr_query_controller(sc)) {
310 device_printf(sc->amr_dev, "can't scan controller for drives\n");
314 /* iterate over available drives */
315 for (i = 0, dr = &sc->amr_drive[0]; (i < AMR_MAXLD) && (dr->al_size != 0xffffffff); i++, dr++) {
316 /* are we already attached to this drive? */
317 if (dr->al_disk == 0) {
318 /* generate geometry information */
319 if (dr->al_size > 0x200000) { /* extended translation? */
326 dr->al_cylinders = dr->al_size / (dr->al_heads * dr->al_sectors);
328 dr->al_disk = device_add_child(sc->amr_dev, NULL, -1);
329 if (dr->al_disk == 0)
330 device_printf(sc->amr_dev, "device_add_child failed\n");
331 device_set_ivars(dr->al_disk, dr);
335 if ((error = bus_generic_attach(sc->amr_dev)) != 0)
336 device_printf(sc->amr_dev, "bus_generic_attach returned %d\n", error);
338 /* mark controller back up */
339 sc->amr_state &= ~AMR_STATE_SHUTDOWN;
341 /* interrupts will be enabled before we do anything more */
342 sc->amr_state |= AMR_STATE_INTEN;
344 /* pull ourselves off the intrhook chain */
345 if (sc->amr_ich.ich_func)
346 config_intrhook_disestablish(&sc->amr_ich);
347 sc->amr_ich.ich_func = NULL;
353 amr_init_sysctl(struct amr_softc *sc)
356 SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->amr_dev),
357 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->amr_dev)),
358 OID_AUTO, "allow_volume_configure", CTLFLAG_RW, &sc->amr_allow_vol_config, 0,
360 SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->amr_dev),
361 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->amr_dev)),
362 OID_AUTO, "nextslot", CTLFLAG_RD, &sc->amr_nextslot, 0,
364 SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->amr_dev),
365 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->amr_dev)),
366 OID_AUTO, "busyslots", CTLFLAG_RD, &sc->amr_busyslots, 0,
368 SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->amr_dev),
369 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->amr_dev)),
370 OID_AUTO, "maxio", CTLFLAG_RD, &sc->amr_maxio, 0,
374 /*******************************************************************************
375 * Free resources associated with a controller instance
378 amr_free(struct amr_softc *sc)
380 struct amr_command_cluster *acc;
382 /* detach from CAM */
383 if (sc->amr_pass != NULL)
384 device_delete_child(sc->amr_dev, sc->amr_pass);
386 /* throw away any command buffers */
387 while ((acc = TAILQ_FIRST(&sc->amr_cmd_clusters)) != NULL) {
388 TAILQ_REMOVE(&sc->amr_cmd_clusters, acc, acc_link);
389 amr_freecmd_cluster(acc);
392 /* destroy control device */
393 if( sc->amr_dev_t != (struct cdev *)NULL)
394 destroy_dev(sc->amr_dev_t);
396 if (mtx_initialized(&sc->amr_hw_lock))
397 mtx_destroy(&sc->amr_hw_lock);
399 if (mtx_initialized(&sc->amr_list_lock))
400 mtx_destroy(&sc->amr_list_lock);
403 /*******************************************************************************
404 * Receive a bio structure from a child device and queue it on a particular
405 * disk resource, then poke the disk resource to start as much work as it can.
408 amr_submit_bio(struct amr_softc *sc, struct bio *bio)
412 mtx_lock(&sc->amr_list_lock);
413 amr_enqueue_bio(sc, bio);
415 mtx_unlock(&sc->amr_list_lock);
419 /********************************************************************************
420 * Accept an open operation on the control device.
423 amr_open(struct cdev *dev, int flags, int fmt, struct thread *td)
425 int unit = dev2unit(dev);
426 struct amr_softc *sc = devclass_get_softc(devclass_find("amr"), unit);
430 sc->amr_state |= AMR_STATE_OPEN;
436 amr_del_ld(struct amr_softc *sc, int drv_no, int status)
441 sc->amr_state &= ~AMR_STATE_QUEUE_FRZN;
442 sc->amr_state &= ~AMR_STATE_LD_DELETE;
443 sc->amr_state |= AMR_STATE_REMAP_LD;
444 debug(1, "State Set");
447 debug(1, "disk begin destroyed %d",drv_no);
448 if (--amr_disks_registered == 0)
449 cdevsw_remove(&amrddisk_cdevsw);
450 debug(1, "disk begin destroyed success");
456 amr_prepare_ld_delete(struct amr_softc *sc)
460 if (sc->ld_del_supported == 0)
463 sc->amr_state |= AMR_STATE_QUEUE_FRZN;
464 sc->amr_state |= AMR_STATE_LD_DELETE;
466 /* 5 minutes for the all the commands to be flushed.*/
467 tsleep((void *)&sc->ld_del_supported, PCATCH | PRIBIO,"delete_logical_drv",hz * 60 * 1);
468 if ( sc->amr_busyslots )
475 /********************************************************************************
476 * Accept the last close on the control device.
479 amr_close(struct cdev *dev, int flags, int fmt, struct thread *td)
481 int unit = dev2unit(dev);
482 struct amr_softc *sc = devclass_get_softc(devclass_find("amr"), unit);
486 sc->amr_state &= ~AMR_STATE_OPEN;
490 /********************************************************************************
491 * Handle controller-specific control operations.
494 amr_rescan_drives(struct cdev *dev)
496 struct amr_softc *sc = (struct amr_softc *)dev->si_drv1;
499 sc->amr_state |= AMR_STATE_REMAP_LD;
500 while (sc->amr_busyslots) {
501 device_printf(sc->amr_dev, "idle controller\n");
505 /* mark ourselves as in-shutdown */
506 sc->amr_state |= AMR_STATE_SHUTDOWN;
508 /* flush controller */
509 device_printf(sc->amr_dev, "flushing cache...");
510 printf("%s\n", amr_flush(sc) ? "failed" : "done");
512 /* delete all our child devices */
513 for(i = 0 ; i < AMR_MAXLD; i++) {
514 if(sc->amr_drive[i].al_disk != 0) {
515 if((error = device_delete_child(sc->amr_dev,
516 sc->amr_drive[i].al_disk)) != 0)
519 sc->amr_drive[i].al_disk = 0;
528 * Bug-for-bug compatibility with Linux!
529 * Some apps will send commands with inlen and outlen set to 0,
530 * even though they expect data to be transferred to them from the
531 * card. Linux accidentally allows this by allocating a 4KB
532 * buffer for the transfer anyways, but it then throws it away
533 * without copying it back to the app.
535 * The amr(4) firmware relies on this feature. In fact, it assumes
536 * the buffer is always a power of 2 up to a max of 64k. There is
537 * also at least one case where it assumes a buffer less than 16k is
538 * greater than 16k. However, forcing all buffers to a size of 32k
539 * causes stalls in the firmware. Force each command smaller than
540 * 64k up to the next power of two except that commands between 8k
541 * and 16k are rounded up to 32k instead of 16k.
544 amr_ioctl_buffer_length(unsigned long len)
551 if (len <= 32 * 1024)
553 if (len <= 64 * 1024)
559 amr_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t addr, int32_t flag,
562 struct amr_softc *sc = (struct amr_softc *)dev->si_drv1;
563 struct amr_command *ac;
564 struct amr_mailbox *mb;
565 struct amr_linux_ioctl ali;
568 int len, ac_flags = 0;
569 int logical_drives_changed = 0;
570 u_int32_t linux_version = 0x02100000;
572 struct amr_passthrough *ap; /* 60 bytes */
579 if ((error = copyin(addr, &ali, sizeof(ali))) != 0)
581 switch (ali.ui.fcs.opcode) {
583 switch(ali.ui.fcs.subopcode) {
585 copyout(&linux_version, (void *)(uintptr_t)ali.data,
586 sizeof(linux_version));
591 copyout(&linux_no_adapter, (void *)(uintptr_t)ali.data,
592 sizeof(linux_no_adapter));
593 td->td_retval[0] = linux_no_adapter;
598 printf("Unknown subopcode\n");
606 if (ali.ui.fcs.opcode == 0x80)
607 len = max(ali.outlen, ali.inlen);
609 len = ali.ui.fcs.length;
611 mb = (void *)&ali.mbox[0];
613 if ((ali.mbox[0] == FC_DEL_LOGDRV && ali.mbox[2] == OP_DEL_LOGDRV) || /* delete */
614 (ali.mbox[0] == AMR_CMD_CONFIG && ali.mbox[2] == 0x0d)) { /* create */
615 if (sc->amr_allow_vol_config == 0) {
619 logical_drives_changed = 1;
622 if (ali.mbox[0] == AMR_CMD_PASS) {
623 mtx_lock(&sc->amr_list_lock);
624 while ((ac = amr_alloccmd(sc)) == NULL)
625 msleep(sc, &sc->amr_list_lock, PPAUSE, "amrioc", hz);
626 mtx_unlock(&sc->amr_list_lock);
627 ap = &ac->ac_ccb->ccb_pthru;
629 error = copyin((void *)(uintptr_t)mb->mb_physaddr, ap,
630 sizeof(struct amr_passthrough));
634 if (ap->ap_data_transfer_length)
635 dp = malloc(ap->ap_data_transfer_length, M_AMR,
639 error = copyin((void *)(uintptr_t)ap->ap_data_transfer_address,
640 dp, ap->ap_data_transfer_length);
645 ac_flags = AMR_CMD_DATAIN|AMR_CMD_DATAOUT|AMR_CMD_CCB;
646 bzero(&ac->ac_mailbox, sizeof(ac->ac_mailbox));
647 ac->ac_mailbox.mb_command = AMR_CMD_PASS;
648 ac->ac_flags = ac_flags;
651 ac->ac_length = ap->ap_data_transfer_length;
652 temp = (void *)(uintptr_t)ap->ap_data_transfer_address;
654 mtx_lock(&sc->amr_list_lock);
655 error = amr_wait_command(ac);
656 mtx_unlock(&sc->amr_list_lock);
660 status = ac->ac_status;
661 error = copyout(&status, &((struct amr_passthrough *)(uintptr_t)mb->mb_physaddr)->ap_scsi_status, sizeof(status));
666 error = copyout(dp, temp, ap->ap_data_transfer_length);
670 error = copyout(ap->ap_request_sense_area, ((struct amr_passthrough *)(uintptr_t)mb->mb_physaddr)->ap_request_sense_area, ap->ap_request_sense_length);
676 } else if (ali.mbox[0] == AMR_CMD_PASS_64) {
677 printf("No AMR_CMD_PASS_64\n");
680 } else if (ali.mbox[0] == AMR_CMD_EXTPASS) {
681 printf("No AMR_CMD_EXTPASS\n");
685 len = amr_ioctl_buffer_length(imax(ali.inlen, ali.outlen));
687 dp = malloc(len, M_AMR, M_WAITOK | M_ZERO);
690 error = copyin((void *)(uintptr_t)mb->mb_physaddr, dp, len);
695 mtx_lock(&sc->amr_list_lock);
696 while ((ac = amr_alloccmd(sc)) == NULL)
697 msleep(sc, &sc->amr_list_lock, PPAUSE, "amrioc", hz);
699 ac_flags = AMR_CMD_DATAIN|AMR_CMD_DATAOUT;
700 bzero(&ac->ac_mailbox, sizeof(ac->ac_mailbox));
701 bcopy(&ali.mbox[0], &ac->ac_mailbox, sizeof(ali.mbox));
705 ac->ac_flags = ac_flags;
707 error = amr_wait_command(ac);
708 mtx_unlock(&sc->amr_list_lock);
712 status = ac->ac_status;
713 error = copyout(&status, &((struct amr_mailbox *)&((struct amr_linux_ioctl *)addr)->mbox[0])->mb_status, sizeof(status));
715 error = copyout(dp, (void *)(uintptr_t)mb->mb_physaddr, ali.outlen);
721 if (logical_drives_changed)
722 amr_rescan_drives(dev);
728 debug(1, "unknown linux ioctl 0x%lx", cmd);
729 printf("unknown linux ioctl 0x%lx\n", cmd);
735 * At this point, we know that there is a lock held and that these
736 * objects have been allocated.
738 mtx_lock(&sc->amr_list_lock);
741 mtx_unlock(&sc->amr_list_lock);
748 amr_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int32_t flag, struct thread *td)
750 struct amr_softc *sc = (struct amr_softc *)dev->si_drv1;
753 struct amr_user_ioctl *au;
754 #ifdef AMR_IO_COMMAND32
755 struct amr_user_ioctl32 *au32;
759 struct amr_command *ac;
760 struct amr_mailbox_ioctl *mbi;
761 void *dp, *au_buffer;
762 unsigned long au_length, real_length;
763 unsigned char *au_cmd;
766 struct amr_passthrough *ap; /* 60 bytes */
767 int logical_drives_changed = 0;
771 arg._p = (void *)addr;
780 debug(1, "AMR_IO_VERSION");
781 *arg.result = AMR_IO_VERSION_NUMBER;
784 #ifdef AMR_IO_COMMAND32
786 * Accept ioctl-s from 32-bit binaries on non-32-bit
787 * platforms, such as AMD. LSI's MEGAMGR utility is
788 * the only example known today... -mi
790 case AMR_IO_COMMAND32:
791 debug(1, "AMR_IO_COMMAND32 0x%x", arg.au32->au_cmd[0]);
792 au_cmd = arg.au32->au_cmd;
793 au_buffer = (void *)(u_int64_t)arg.au32->au_buffer;
794 au_length = arg.au32->au_length;
795 au_statusp = &arg.au32->au_status;
800 debug(1, "AMR_IO_COMMAND 0x%x", arg.au->au_cmd[0]);
801 au_cmd = arg.au->au_cmd;
802 au_buffer = (void *)arg.au->au_buffer;
803 au_length = arg.au->au_length;
804 au_statusp = &arg.au->au_status;
808 case 0xc06e6d00: /* Linux emulation */
811 struct amr_linux_ioctl ali;
814 devclass = devclass_find("amr");
815 if (devclass == NULL)
818 error = copyin(addr, &ali, sizeof(ali));
821 if (ali.ui.fcs.opcode == 0x82)
824 adapter = (ali.ui.fcs.adapno) ^ 'm' << 8;
826 sc = devclass_get_softc(devclass, adapter);
830 return (amr_linux_ioctl_int(sc->amr_dev_t, cmd, addr, 0, td));
833 debug(1, "unknown ioctl 0x%lx", cmd);
837 if ((au_cmd[0] == FC_DEL_LOGDRV && au_cmd[1] == OP_DEL_LOGDRV) || /* delete */
838 (au_cmd[0] == AMR_CMD_CONFIG && au_cmd[1] == 0x0d)) { /* create */
839 if (sc->amr_allow_vol_config == 0) {
843 logical_drives_changed = 1;
845 if ((error = amr_prepare_ld_delete(sc)) != 0)
850 /* handle inbound data buffer */
851 real_length = amr_ioctl_buffer_length(au_length);
852 dp = malloc(real_length, M_AMR, M_WAITOK|M_ZERO);
853 if (au_length != 0 && au_cmd[0] != 0x06) {
854 if ((error = copyin(au_buffer, dp, au_length)) != 0) {
858 debug(2, "copyin %ld bytes from %p -> %p", au_length, au_buffer, dp);
861 /* Allocate this now before the mutex gets held */
863 mtx_lock(&sc->amr_list_lock);
864 while ((ac = amr_alloccmd(sc)) == NULL)
865 msleep(sc, &sc->amr_list_lock, PPAUSE, "amrioc", hz);
867 /* handle SCSI passthrough command */
868 if (au_cmd[0] == AMR_CMD_PASS) {
871 ap = &ac->ac_ccb->ccb_pthru;
872 bzero(ap, sizeof(struct amr_passthrough));
876 ap->ap_cdb_length = len;
877 bcopy(au_cmd + 3, ap->ap_cdb, len);
879 /* build passthrough */
880 ap->ap_timeout = au_cmd[len + 3] & 0x07;
881 ap->ap_ars = (au_cmd[len + 3] & 0x08) ? 1 : 0;
882 ap->ap_islogical = (au_cmd[len + 3] & 0x80) ? 1 : 0;
883 ap->ap_logical_drive_no = au_cmd[len + 4];
884 ap->ap_channel = au_cmd[len + 5];
885 ap->ap_scsi_id = au_cmd[len + 6];
886 ap->ap_request_sense_length = 14;
887 ap->ap_data_transfer_length = au_length;
888 /* XXX what about the request-sense area? does the caller want it? */
891 ac->ac_mailbox.mb_command = AMR_CMD_PASS;
892 ac->ac_flags = AMR_CMD_CCB;
895 /* direct command to controller */
896 mbi = (struct amr_mailbox_ioctl *)&ac->ac_mailbox;
898 /* copy pertinent mailbox items */
899 mbi->mb_command = au_cmd[0];
900 mbi->mb_channel = au_cmd[1];
901 mbi->mb_param = au_cmd[2];
902 mbi->mb_pad[0] = au_cmd[3];
903 mbi->mb_drive = au_cmd[4];
907 /* build the command */
909 ac->ac_length = real_length;
910 ac->ac_flags |= AMR_CMD_DATAIN|AMR_CMD_DATAOUT;
912 /* run the command */
913 error = amr_wait_command(ac);
914 mtx_unlock(&sc->amr_list_lock);
918 /* copy out data and set status */
919 if (au_length != 0) {
920 error = copyout(dp, au_buffer, au_length);
922 debug(2, "copyout %ld bytes from %p -> %p", au_length, dp, au_buffer);
923 debug(2, "%p status 0x%x", dp, ac->ac_status);
924 *au_statusp = ac->ac_status;
928 * At this point, we know that there is a lock held and that these
929 * objects have been allocated.
931 mtx_lock(&sc->amr_list_lock);
934 mtx_unlock(&sc->amr_list_lock);
939 if (logical_drives_changed)
940 amr_rescan_drives(dev);
946 /********************************************************************************
947 ********************************************************************************
949 ********************************************************************************
950 ********************************************************************************/
952 /********************************************************************************
953 * Interrogate the controller for the operational parameters we require.
956 amr_query_controller(struct amr_softc *sc)
958 struct amr_enquiry3 *aex;
959 struct amr_prodinfo *ap;
960 struct amr_enquiry *ae;
965 * Greater than 10 byte cdb support
967 sc->support_ext_cdb = amr_support_ext_cdb(sc);
969 if(sc->support_ext_cdb) {
970 debug(2,"supports extended CDBs.");
974 * Try to issue an ENQUIRY3 command
976 if ((aex = amr_enquiry(sc, 2048, AMR_CMD_CONFIG, AMR_CONFIG_ENQ3,
977 AMR_CONFIG_ENQ3_SOLICITED_FULL, &status)) != NULL) {
979 * Fetch current state of logical drives.
981 for (ldrv = 0; ldrv < aex->ae_numldrives; ldrv++) {
982 sc->amr_drive[ldrv].al_size = aex->ae_drivesize[ldrv];
983 sc->amr_drive[ldrv].al_state = aex->ae_drivestate[ldrv];
984 sc->amr_drive[ldrv].al_properties = aex->ae_driveprop[ldrv];
985 debug(2, " drive %d: %d state %x properties %x\n", ldrv, sc->amr_drive[ldrv].al_size,
986 sc->amr_drive[ldrv].al_state, sc->amr_drive[ldrv].al_properties);
991 * Get product info for channel count.
993 if ((ap = amr_enquiry(sc, 2048, AMR_CMD_CONFIG, AMR_CONFIG_PRODUCT_INFO, 0, &status)) == NULL) {
994 device_printf(sc->amr_dev, "can't obtain product data from controller\n");
997 sc->amr_maxdrives = 40;
998 sc->amr_maxchan = ap->ap_nschan;
999 sc->amr_maxio = ap->ap_maxio;
1000 sc->amr_type |= AMR_TYPE_40LD;
1003 ap = amr_enquiry(sc, 0, FC_DEL_LOGDRV, OP_SUP_DEL_LOGDRV, 0, &status);
1007 sc->amr_ld_del_supported = 1;
1008 device_printf(sc->amr_dev, "delete logical drives supported by controller\n");
1011 /* failed, try the 8LD ENQUIRY commands */
1012 if ((ae = (struct amr_enquiry *)amr_enquiry(sc, 2048, AMR_CMD_EXT_ENQUIRY2, 0, 0, &status)) == NULL) {
1013 if ((ae = (struct amr_enquiry *)amr_enquiry(sc, 2048, AMR_CMD_ENQUIRY, 0, 0, &status)) == NULL) {
1014 device_printf(sc->amr_dev, "can't obtain configuration data from controller\n");
1017 ae->ae_signature = 0;
1021 * Fetch current state of logical drives.
1023 for (ldrv = 0; ldrv < ae->ae_ldrv.al_numdrives; ldrv++) {
1024 sc->amr_drive[ldrv].al_size = ae->ae_ldrv.al_size[ldrv];
1025 sc->amr_drive[ldrv].al_state = ae->ae_ldrv.al_state[ldrv];
1026 sc->amr_drive[ldrv].al_properties = ae->ae_ldrv.al_properties[ldrv];
1027 debug(2, " drive %d: %d state %x properties %x\n", ldrv, sc->amr_drive[ldrv].al_size,
1028 sc->amr_drive[ldrv].al_state, sc->amr_drive[ldrv].al_properties);
1031 sc->amr_maxdrives = 8;
1032 sc->amr_maxchan = ae->ae_adapter.aa_channels;
1033 sc->amr_maxio = ae->ae_adapter.aa_maxio;
1038 * Mark remaining drives as unused.
1040 for (; ldrv < AMR_MAXLD; ldrv++)
1041 sc->amr_drive[ldrv].al_size = 0xffffffff;
1044 * Cap the maximum number of outstanding I/Os. AMI's Linux driver doesn't trust
1045 * the controller's reported value, and lockups have been seen when we do.
1047 sc->amr_maxio = imin(sc->amr_maxio, AMR_LIMITCMD);
1052 /********************************************************************************
1053 * Run a generic enquiry-style command.
1056 amr_enquiry(struct amr_softc *sc, size_t bufsize, u_int8_t cmd, u_int8_t cmdsub, u_int8_t cmdqual, int *status)
1058 struct amr_command *ac;
1068 /* get ourselves a command buffer */
1069 mtx_lock(&sc->amr_list_lock);
1070 ac = amr_alloccmd(sc);
1071 mtx_unlock(&sc->amr_list_lock);
1074 /* allocate the response structure */
1075 if ((result = malloc(bufsize, M_AMR, M_ZERO|M_NOWAIT)) == NULL)
1077 /* set command flags */
1079 ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAIN;
1081 /* point the command at our data */
1082 ac->ac_data = result;
1083 ac->ac_length = bufsize;
1085 /* build the command proper */
1086 mbox = (u_int8_t *)&ac->ac_mailbox; /* XXX want a real structure for this? */
1092 /* can't assume that interrupts are going to work here, so play it safe */
1093 if (sc->amr_poll_command(ac))
1095 error = ac->ac_status;
1096 *status = ac->ac_status;
1099 mtx_lock(&sc->amr_list_lock);
1102 mtx_unlock(&sc->amr_list_lock);
1103 if ((error != 0) && (result != NULL)) {
1104 free(result, M_AMR);
1110 /********************************************************************************
1111 * Flush the controller's internal cache, return status.
1114 amr_flush(struct amr_softc *sc)
1116 struct amr_command *ac;
1119 /* get ourselves a command buffer */
1121 mtx_lock(&sc->amr_list_lock);
1122 ac = amr_alloccmd(sc);
1123 mtx_unlock(&sc->amr_list_lock);
1126 /* set command flags */
1127 ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAOUT;
1129 /* build the command proper */
1130 ac->ac_mailbox.mb_command = AMR_CMD_FLUSH;
1132 /* we have to poll, as the system may be going down or otherwise damaged */
1133 if (sc->amr_poll_command(ac))
1135 error = ac->ac_status;
1138 mtx_lock(&sc->amr_list_lock);
1141 mtx_unlock(&sc->amr_list_lock);
1145 /********************************************************************************
1146 * Detect extented cdb >> greater than 10 byte cdb support
1147 * returns '1' means this support exist
1148 * returns '0' means this support doesn't exist
1151 amr_support_ext_cdb(struct amr_softc *sc)
1153 struct amr_command *ac;
1157 /* get ourselves a command buffer */
1159 mtx_lock(&sc->amr_list_lock);
1160 ac = amr_alloccmd(sc);
1161 mtx_unlock(&sc->amr_list_lock);
1164 /* set command flags */
1165 ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAOUT;
1167 /* build the command proper */
1168 mbox = (u_int8_t *)&ac->ac_mailbox; /* XXX want a real structure for this? */
1172 /* we have to poll, as the system may be going down or otherwise damaged */
1173 if (sc->amr_poll_command(ac))
1175 if( ac->ac_status == AMR_STATUS_SUCCESS ) {
1180 mtx_lock(&sc->amr_list_lock);
1183 mtx_unlock(&sc->amr_list_lock);
1187 /********************************************************************************
1188 * Try to find I/O work for the controller from one or more of the work queues.
1190 * We make the assumption that if the controller is not ready to take a command
1191 * at some given time, it will generate an interrupt at some later time when
1195 amr_startio(struct amr_softc *sc)
1197 struct amr_command *ac;
1199 /* spin until something prevents us from doing any work */
1201 /* Don't bother to queue commands no bounce buffers are available. */
1202 if (sc->amr_state & AMR_STATE_QUEUE_FRZN)
1205 /* try to get a ready command */
1206 ac = amr_dequeue_ready(sc);
1208 /* if that failed, build a command from a bio */
1210 (void)amr_bio_command(sc, &ac);
1212 /* if that failed, build a command from a ccb */
1213 if ((ac == NULL) && (sc->amr_cam_command != NULL))
1214 sc->amr_cam_command(sc, &ac);
1216 /* if we don't have anything to do, give up */
1220 /* try to give the command to the controller; if this fails save it for later and give up */
1221 if (amr_start(ac)) {
1222 debug(2, "controller busy, command deferred");
1223 amr_requeue_ready(ac); /* XXX schedule retry very soon? */
1229 /********************************************************************************
1230 * Handle completion of an I/O command.
1233 amr_completeio(struct amr_command *ac)
1235 struct amrd_softc *sc = ac->ac_bio->bio_disk->d_drv1;
1236 static struct timeval lastfail;
1239 if (ac->ac_status != AMR_STATUS_SUCCESS) { /* could be more verbose here? */
1240 ac->ac_bio->bio_error = EIO;
1241 ac->ac_bio->bio_flags |= BIO_ERROR;
1243 if (ppsratecheck(&lastfail, &curfail, 1))
1244 device_printf(sc->amrd_dev, "I/O error - 0x%x\n", ac->ac_status);
1245 /* amr_printcommand(ac);*/
1247 amrd_intr(ac->ac_bio);
1248 mtx_lock(&ac->ac_sc->amr_list_lock);
1250 mtx_unlock(&ac->ac_sc->amr_list_lock);
1253 /********************************************************************************
1254 ********************************************************************************
1256 ********************************************************************************
1257 ********************************************************************************/
1259 /********************************************************************************
1260 * Convert a bio off the top of the bio queue into a command.
1263 amr_bio_command(struct amr_softc *sc, struct amr_command **acp)
1265 struct amr_command *ac;
1266 struct amrd_softc *amrd;
1277 if ((ac = amr_alloccmd(sc)) == NULL)
1280 /* get a bio to work on */
1281 if ((bio = amr_dequeue_bio(sc)) == NULL) {
1286 /* connect the bio to the command */
1287 ac->ac_complete = amr_completeio;
1289 ac->ac_data = bio->bio_data;
1290 ac->ac_length = bio->bio_bcount;
1292 switch (bio->bio_cmd) {
1294 ac->ac_flags |= AMR_CMD_DATAIN;
1295 if (AMR_IS_SG64(sc)) {
1296 cmd = AMR_CMD_LREAD64;
1297 ac->ac_flags |= AMR_CMD_SG64;
1299 cmd = AMR_CMD_LREAD;
1302 ac->ac_flags |= AMR_CMD_DATAOUT;
1303 if (AMR_IS_SG64(sc)) {
1304 cmd = AMR_CMD_LWRITE64;
1305 ac->ac_flags |= AMR_CMD_SG64;
1307 cmd = AMR_CMD_LWRITE;
1310 ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAOUT;
1311 cmd = AMR_CMD_FLUSH;
1314 biofinish(bio, NULL, EOPNOTSUPP);
1318 amrd = (struct amrd_softc *)bio->bio_disk->d_drv1;
1319 driveno = amrd->amrd_drive - sc->amr_drive;
1320 blkcount = howmany(bio->bio_bcount, AMR_BLKSIZE);
1322 ac->ac_mailbox.mb_command = cmd;
1323 if (bio->bio_cmd == BIO_READ || bio->bio_cmd == BIO_WRITE) {
1324 ac->ac_mailbox.mb_blkcount = blkcount;
1325 ac->ac_mailbox.mb_lba = bio->bio_pblkno;
1326 if ((bio->bio_pblkno + blkcount) > sc->amr_drive[driveno].al_size) {
1327 device_printf(sc->amr_dev,
1328 "I/O beyond end of unit (%lld,%d > %lu)\n",
1329 (long long)bio->bio_pblkno, blkcount,
1330 (u_long)sc->amr_drive[driveno].al_size);
1333 ac->ac_mailbox.mb_drive = driveno;
1334 if (sc->amr_state & AMR_STATE_REMAP_LD)
1335 ac->ac_mailbox.mb_drive |= 0x80;
1337 /* we fill in the s/g related data when the command is mapped */
1343 /********************************************************************************
1344 * Take a command, submit it to the controller and sleep until it completes
1345 * or fails. Interrupts must be enabled, returns nonzero on error.
1348 amr_wait_command(struct amr_command *ac)
1351 struct amr_softc *sc = ac->ac_sc;
1355 ac->ac_complete = NULL;
1356 ac->ac_flags |= AMR_CMD_SLEEP;
1357 if ((error = amr_start(ac)) != 0) {
1361 while ((ac->ac_flags & AMR_CMD_BUSY) && (error != EWOULDBLOCK)) {
1362 error = msleep(ac,&sc->amr_list_lock, PRIBIO, "amrwcmd", 0);
1368 /********************************************************************************
1369 * Take a command, submit it to the controller and busy-wait for it to return.
1370 * Returns nonzero on error. Can be safely called with interrupts enabled.
1373 amr_std_poll_command(struct amr_command *ac)
1375 struct amr_softc *sc = ac->ac_sc;
1380 ac->ac_complete = NULL;
1381 if ((error = amr_start(ac)) != 0)
1387 * Poll for completion, although the interrupt handler may beat us to it.
1388 * Note that the timeout here is somewhat arbitrary.
1392 } while ((ac->ac_flags & AMR_CMD_BUSY) && (count++ < 1000));
1393 if (!(ac->ac_flags & AMR_CMD_BUSY)) {
1396 /* XXX the slot is now marked permanently busy */
1398 device_printf(sc->amr_dev, "polled command timeout\n");
1404 amr_setup_polled_dmamap(void *arg, bus_dma_segment_t *segs, int nsegs, int err)
1406 struct amr_command *ac = arg;
1407 struct amr_softc *sc = ac->ac_sc;
1411 device_printf(sc->amr_dev, "error %d in %s", err, __FUNCTION__);
1412 ac->ac_status = AMR_STATUS_ABORTED;
1416 amr_setup_sg(arg, segs, nsegs, err);
1418 /* for AMR_CMD_CONFIG Read/Write the s/g count goes elsewhere */
1419 mb_channel = ((struct amr_mailbox_ioctl *)&ac->ac_mailbox)->mb_channel;
1420 if (ac->ac_mailbox.mb_command == AMR_CMD_CONFIG &&
1421 ((mb_channel == AMR_CONFIG_READ_NVRAM_CONFIG) ||
1422 (mb_channel == AMR_CONFIG_WRITE_NVRAM_CONFIG)))
1423 ((struct amr_mailbox_ioctl *)&ac->ac_mailbox)->mb_param = ac->ac_nsegments;
1425 ac->ac_mailbox.mb_nsgelem = ac->ac_nsegments;
1426 ac->ac_mailbox.mb_physaddr = ac->ac_mb_physaddr;
1427 if (AC_IS_SG64(ac)) {
1429 ac->ac_sg64_lo = ac->ac_sgbusaddr;
1432 sc->amr_poll_command1(sc, ac);
1435 /********************************************************************************
1436 * Take a command, submit it to the controller and busy-wait for it to return.
1437 * Returns nonzero on error. Can be safely called with interrupts enabled.
1440 amr_quartz_poll_command(struct amr_command *ac)
1442 struct amr_softc *sc = ac->ac_sc;
1449 if (AC_IS_SG64(ac)) {
1450 ac->ac_tag = sc->amr_buffer64_dmat;
1451 ac->ac_datamap = ac->ac_dma64map;
1453 ac->ac_tag = sc->amr_buffer_dmat;
1454 ac->ac_datamap = ac->ac_dmamap;
1457 /* now we have a slot, we can map the command (unmapped in amr_complete) */
1458 if (ac->ac_data != 0) {
1459 if (bus_dmamap_load(ac->ac_tag, ac->ac_datamap, ac->ac_data,
1460 ac->ac_length, amr_setup_polled_dmamap, ac, BUS_DMA_NOWAIT) != 0) {
1464 error = amr_quartz_poll_command1(sc, ac);
1471 amr_quartz_poll_command1(struct amr_softc *sc, struct amr_command *ac)
1475 mtx_lock(&sc->amr_hw_lock);
1476 if ((sc->amr_state & AMR_STATE_INTEN) == 0) {
1478 while (sc->amr_busyslots) {
1479 msleep(sc, &sc->amr_hw_lock, PRIBIO | PCATCH, "amrpoll", hz);
1485 if(sc->amr_busyslots) {
1486 device_printf(sc->amr_dev, "adapter is busy\n");
1487 mtx_unlock(&sc->amr_hw_lock);
1488 if (ac->ac_data != NULL) {
1489 bus_dmamap_unload(ac->ac_tag, ac->ac_datamap);
1496 bcopy(&ac->ac_mailbox, (void *)(uintptr_t)(volatile void *)sc->amr_mailbox, AMR_MBOX_CMDSIZE);
1498 /* clear the poll/ack fields in the mailbox */
1499 sc->amr_mailbox->mb_ident = 0xFE;
1500 sc->amr_mailbox->mb_nstatus = 0xFF;
1501 sc->amr_mailbox->mb_status = 0xFF;
1502 sc->amr_mailbox->mb_poll = 0;
1503 sc->amr_mailbox->mb_ack = 0;
1504 sc->amr_mailbox->mb_busy = 1;
1506 AMR_QPUT_IDB(sc, sc->amr_mailboxphys | AMR_QIDB_SUBMIT);
1508 while(sc->amr_mailbox->mb_nstatus == 0xFF)
1510 while(sc->amr_mailbox->mb_status == 0xFF)
1512 ac->ac_status=sc->amr_mailbox->mb_status;
1513 error = (ac->ac_status !=AMR_STATUS_SUCCESS) ? 1:0;
1514 while(sc->amr_mailbox->mb_poll != 0x77)
1516 sc->amr_mailbox->mb_poll = 0;
1517 sc->amr_mailbox->mb_ack = 0x77;
1519 /* acknowledge that we have the commands */
1520 AMR_QPUT_IDB(sc, sc->amr_mailboxphys | AMR_QIDB_ACK);
1521 while(AMR_QGET_IDB(sc) & AMR_QIDB_ACK)
1523 mtx_unlock(&sc->amr_hw_lock);
1525 /* unmap the command's data buffer */
1526 if (ac->ac_flags & AMR_CMD_DATAIN) {
1527 bus_dmamap_sync(ac->ac_tag, ac->ac_datamap, BUS_DMASYNC_POSTREAD);
1529 if (ac->ac_flags & AMR_CMD_DATAOUT) {
1530 bus_dmamap_sync(ac->ac_tag, ac->ac_datamap, BUS_DMASYNC_POSTWRITE);
1532 bus_dmamap_unload(ac->ac_tag, ac->ac_datamap);
1538 amr_freeslot(struct amr_command *ac)
1540 struct amr_softc *sc = ac->ac_sc;
1546 if (sc->amr_busycmd[slot] == NULL)
1547 panic("amr: slot %d not busy?\n", slot);
1549 sc->amr_busycmd[slot] = NULL;
1550 atomic_subtract_int(&sc->amr_busyslots, 1);
1555 /********************************************************************************
1556 * Map/unmap (ac)'s data in the controller's addressable space as required.
1558 * These functions may be safely called multiple times on a given command.
1561 amr_setup_sg(void *arg, bus_dma_segment_t *segs, int nsegments, int error)
1563 struct amr_command *ac = (struct amr_command *)arg;
1564 struct amr_sgentry *sg;
1565 struct amr_sg64entry *sg64;
1570 /* get base address of s/g table */
1571 sg = ac->ac_sg.sg32;
1572 sg64 = ac->ac_sg.sg64;
1574 if (AC_IS_SG64(ac)) {
1575 ac->ac_nsegments = nsegments;
1576 ac->ac_mb_physaddr = 0xffffffff;
1577 for (i = 0; i < nsegments; i++, sg64++) {
1578 sg64->sg_addr = segs[i].ds_addr;
1579 sg64->sg_count = segs[i].ds_len;
1582 /* decide whether we need to populate the s/g table */
1583 if (nsegments < 2) {
1584 ac->ac_nsegments = 0;
1585 ac->ac_mb_physaddr = segs[0].ds_addr;
1587 ac->ac_nsegments = nsegments;
1588 ac->ac_mb_physaddr = ac->ac_sgbusaddr;
1589 for (i = 0; i < nsegments; i++, sg++) {
1590 sg->sg_addr = segs[i].ds_addr;
1591 sg->sg_count = segs[i].ds_len;
1597 if (ac->ac_flags & AMR_CMD_DATAIN)
1598 flags |= BUS_DMASYNC_PREREAD;
1599 if (ac->ac_flags & AMR_CMD_DATAOUT)
1600 flags |= BUS_DMASYNC_PREWRITE;
1601 bus_dmamap_sync(ac->ac_tag, ac->ac_datamap, flags);
1602 ac->ac_flags |= AMR_CMD_MAPPED;
1606 amr_setup_data(void *arg, bus_dma_segment_t *segs, int nsegs, int err)
1608 struct amr_command *ac = arg;
1609 struct amr_softc *sc = ac->ac_sc;
1613 device_printf(sc->amr_dev, "error %d in %s", err, __FUNCTION__);
1618 amr_setup_sg(arg, segs, nsegs, err);
1620 /* for AMR_CMD_CONFIG Read/Write the s/g count goes elsewhere */
1621 mb_channel = ((struct amr_mailbox_ioctl *)&ac->ac_mailbox)->mb_channel;
1622 if (ac->ac_mailbox.mb_command == AMR_CMD_CONFIG &&
1623 ((mb_channel == AMR_CONFIG_READ_NVRAM_CONFIG) ||
1624 (mb_channel == AMR_CONFIG_WRITE_NVRAM_CONFIG)))
1625 ((struct amr_mailbox_ioctl *)&ac->ac_mailbox)->mb_param = ac->ac_nsegments;
1627 ac->ac_mailbox.mb_nsgelem = ac->ac_nsegments;
1628 ac->ac_mailbox.mb_physaddr = ac->ac_mb_physaddr;
1629 if (AC_IS_SG64(ac)) {
1631 ac->ac_sg64_lo = ac->ac_sgbusaddr;
1634 if (sc->amr_submit_command(ac) == EBUSY) {
1636 amr_requeue_ready(ac);
1641 amr_setup_ccb(void *arg, bus_dma_segment_t *segs, int nsegs, int err)
1643 struct amr_command *ac = arg;
1644 struct amr_softc *sc = ac->ac_sc;
1645 struct amr_passthrough *ap = &ac->ac_ccb->ccb_pthru;
1646 struct amr_ext_passthrough *aep = &ac->ac_ccb->ccb_epthru;
1649 device_printf(sc->amr_dev, "error %d in %s", err, __FUNCTION__);
1654 /* Set up the mailbox portion of the command to point at the ccb */
1655 ac->ac_mailbox.mb_nsgelem = 0;
1656 ac->ac_mailbox.mb_physaddr = ac->ac_ccb_busaddr;
1658 amr_setup_sg(arg, segs, nsegs, err);
1660 switch (ac->ac_mailbox.mb_command) {
1661 case AMR_CMD_EXTPASS:
1662 aep->ap_no_sg_elements = ac->ac_nsegments;
1663 aep->ap_data_transfer_address = ac->ac_mb_physaddr;
1666 ap->ap_no_sg_elements = ac->ac_nsegments;
1667 ap->ap_data_transfer_address = ac->ac_mb_physaddr;
1670 panic("Unknown ccb command");
1673 if (sc->amr_submit_command(ac) == EBUSY) {
1675 amr_requeue_ready(ac);
1680 amr_mapcmd(struct amr_command *ac)
1682 bus_dmamap_callback_t *cb;
1683 struct amr_softc *sc = ac->ac_sc;
1687 if (AC_IS_SG64(ac)) {
1688 ac->ac_tag = sc->amr_buffer64_dmat;
1689 ac->ac_datamap = ac->ac_dma64map;
1691 ac->ac_tag = sc->amr_buffer_dmat;
1692 ac->ac_datamap = ac->ac_dmamap;
1695 if (ac->ac_flags & AMR_CMD_CCB)
1698 cb = amr_setup_data;
1700 /* if the command involves data at all, and hasn't been mapped */
1701 if ((ac->ac_flags & AMR_CMD_MAPPED) == 0 && (ac->ac_data != NULL)) {
1702 /* map the data buffers into bus space and build the s/g list */
1703 if (bus_dmamap_load(ac->ac_tag, ac->ac_datamap, ac->ac_data,
1704 ac->ac_length, cb, ac, 0) == EINPROGRESS) {
1705 sc->amr_state |= AMR_STATE_QUEUE_FRZN;
1708 if (sc->amr_submit_command(ac) == EBUSY) {
1710 amr_requeue_ready(ac);
1718 amr_unmapcmd(struct amr_command *ac)
1724 /* if the command involved data at all and was mapped */
1725 if (ac->ac_flags & AMR_CMD_MAPPED) {
1726 if (ac->ac_data != NULL) {
1728 if (ac->ac_flags & AMR_CMD_DATAIN)
1729 flag |= BUS_DMASYNC_POSTREAD;
1730 if (ac->ac_flags & AMR_CMD_DATAOUT)
1731 flag |= BUS_DMASYNC_POSTWRITE;
1733 bus_dmamap_sync(ac->ac_tag, ac->ac_datamap, flag);
1734 bus_dmamap_unload(ac->ac_tag, ac->ac_datamap);
1737 ac->ac_flags &= ~AMR_CMD_MAPPED;
1742 amr_abort_load(struct amr_command *ac)
1745 struct amr_softc *sc = ac->ac_sc;
1747 mtx_assert(&sc->amr_list_lock, MA_OWNED);
1749 ac->ac_status = AMR_STATUS_ABORTED;
1750 amr_init_qhead(&head);
1751 amr_enqueue_completed(ac, &head);
1753 mtx_unlock(&sc->amr_list_lock);
1754 amr_complete(sc, &head);
1755 mtx_lock(&sc->amr_list_lock);
1758 /********************************************************************************
1759 * Take a command and give it to the controller, returns 0 if successful, or
1760 * EBUSY if the command should be retried later.
1763 amr_start(struct amr_command *ac)
1765 struct amr_softc *sc;
1771 /* mark command as busy so that polling consumer can tell */
1773 ac->ac_flags |= AMR_CMD_BUSY;
1775 /* get a command slot (freed in amr_done) */
1777 if (sc->amr_busycmd[slot] != NULL)
1778 panic("amr: slot %d busy?\n", slot);
1779 sc->amr_busycmd[slot] = ac;
1780 atomic_add_int(&sc->amr_busyslots, 1);
1782 /* Now we have a slot, we can map the command (unmapped in amr_complete). */
1783 if ((error = amr_mapcmd(ac)) == ENOMEM) {
1785 * Memory resources are short, so free the slot and let this be tried
1794 /********************************************************************************
1795 * Extract one or more completed commands from the controller (sc)
1797 * Returns nonzero if any commands on the work queue were marked as completed.
1801 amr_done(struct amr_softc *sc)
1804 struct amr_command *ac;
1805 struct amr_mailbox mbox;
1810 /* See if there's anything for us to do */
1812 amr_init_qhead(&head);
1814 /* loop collecting completed commands */
1816 /* poll for a completed command's identifier and status */
1817 if (sc->amr_get_work(sc, &mbox)) {
1820 /* iterate over completed commands in this result */
1821 for (i = 0; i < mbox.mb_nstatus; i++) {
1822 /* get pointer to busy command */
1823 idx = mbox.mb_completed[i] - 1;
1824 ac = sc->amr_busycmd[idx];
1826 /* really a busy command? */
1828 /* pull the command from the busy index */
1831 /* save status for later use */
1832 ac->ac_status = mbox.mb_status;
1833 amr_enqueue_completed(ac, &head);
1834 debug(3, "completed command with status %x", mbox.mb_status);
1836 device_printf(sc->amr_dev, "bad slot %d completed\n", idx);
1840 break; /* no work */
1843 /* handle completion and timeouts */
1844 amr_complete(sc, &head);
1849 /********************************************************************************
1850 * Do completion processing on done commands on (sc)
1854 amr_complete(void *context, ac_qhead_t *head)
1856 struct amr_softc *sc = (struct amr_softc *)context;
1857 struct amr_command *ac;
1861 /* pull completed commands off the queue */
1863 ac = amr_dequeue_completed(sc, head);
1867 /* unmap the command's data buffer */
1871 * Is there a completion handler?
1873 if (ac->ac_complete != NULL) {
1874 /* unbusy the command */
1875 ac->ac_flags &= ~AMR_CMD_BUSY;
1876 ac->ac_complete(ac);
1879 * Is someone sleeping on this one?
1882 mtx_lock(&sc->amr_list_lock);
1883 ac->ac_flags &= ~AMR_CMD_BUSY;
1884 if (ac->ac_flags & AMR_CMD_SLEEP) {
1885 /* unbusy the command */
1888 mtx_unlock(&sc->amr_list_lock);
1891 if(!sc->amr_busyslots) {
1896 mtx_lock(&sc->amr_list_lock);
1897 sc->amr_state &= ~AMR_STATE_QUEUE_FRZN;
1899 mtx_unlock(&sc->amr_list_lock);
1902 /********************************************************************************
1903 ********************************************************************************
1904 Command Buffer Management
1905 ********************************************************************************
1906 ********************************************************************************/
1908 /********************************************************************************
1909 * Get a new command buffer.
1911 * This may return NULL in low-memory cases.
1913 * If possible, we recycle a command buffer that's been used before.
1915 struct amr_command *
1916 amr_alloccmd(struct amr_softc *sc)
1918 struct amr_command *ac;
1922 ac = amr_dequeue_free(sc);
1924 sc->amr_state |= AMR_STATE_QUEUE_FRZN;
1928 /* clear out significant fields */
1930 bzero(&ac->ac_mailbox, sizeof(struct amr_mailbox));
1934 ac->ac_complete = NULL;
1937 ac->ac_datamap = NULL;
1941 /********************************************************************************
1942 * Release a command buffer for recycling.
1945 amr_releasecmd(struct amr_command *ac)
1949 amr_enqueue_free(ac);
1952 /********************************************************************************
1953 * Allocate a new command cluster and initialise it.
1956 amr_alloccmd_cluster(struct amr_softc *sc)
1958 struct amr_command_cluster *acc;
1959 struct amr_command *ac;
1963 * If we haven't found the real limit yet, let us have a couple of
1964 * commands in order to be able to probe.
1966 if (sc->amr_maxio == 0)
1969 if (sc->amr_nextslot > sc->amr_maxio)
1971 acc = malloc(AMR_CMD_CLUSTERSIZE, M_AMR, M_NOWAIT | M_ZERO);
1973 nextslot = sc->amr_nextslot;
1974 mtx_lock(&sc->amr_list_lock);
1975 TAILQ_INSERT_TAIL(&sc->amr_cmd_clusters, acc, acc_link);
1976 mtx_unlock(&sc->amr_list_lock);
1977 for (i = 0; i < AMR_CMD_CLUSTERCOUNT; i++) {
1978 ac = &acc->acc_command[i];
1980 ac->ac_slot = nextslot;
1983 * The SG table for each slot is a fixed size and is assumed to
1984 * to hold 64-bit s/g objects when the driver is configured to do
1985 * 64-bit DMA. 32-bit DMA commands still use the same table, but
1986 * cast down to 32-bit objects.
1988 if (AMR_IS_SG64(sc)) {
1989 ac->ac_sgbusaddr = sc->amr_sgbusaddr +
1990 (ac->ac_slot * AMR_NSEG * sizeof(struct amr_sg64entry));
1991 ac->ac_sg.sg64 = sc->amr_sg64table + (ac->ac_slot * AMR_NSEG);
1993 ac->ac_sgbusaddr = sc->amr_sgbusaddr +
1994 (ac->ac_slot * AMR_NSEG * sizeof(struct amr_sgentry));
1995 ac->ac_sg.sg32 = sc->amr_sgtable + (ac->ac_slot * AMR_NSEG);
1998 ac->ac_ccb = sc->amr_ccb + ac->ac_slot;
1999 ac->ac_ccb_busaddr = sc->amr_ccb_busaddr +
2000 (ac->ac_slot * sizeof(union amr_ccb));
2002 if (bus_dmamap_create(sc->amr_buffer_dmat, 0, &ac->ac_dmamap))
2004 if (AMR_IS_SG64(sc) &&
2005 (bus_dmamap_create(sc->amr_buffer64_dmat, 0,&ac->ac_dma64map)))
2008 if (++nextslot > sc->amr_maxio)
2011 sc->amr_nextslot = nextslot;
2015 /********************************************************************************
2016 * Free a command cluster
2019 amr_freecmd_cluster(struct amr_command_cluster *acc)
2021 struct amr_softc *sc = acc->acc_command[0].ac_sc;
2024 for (i = 0; i < AMR_CMD_CLUSTERCOUNT; i++) {
2025 if (acc->acc_command[i].ac_sc == NULL)
2027 bus_dmamap_destroy(sc->amr_buffer_dmat, acc->acc_command[i].ac_dmamap);
2028 if (AMR_IS_SG64(sc))
2029 bus_dmamap_destroy(sc->amr_buffer64_dmat, acc->acc_command[i].ac_dma64map);
2034 /********************************************************************************
2035 ********************************************************************************
2036 Interface-specific Shims
2037 ********************************************************************************
2038 ********************************************************************************/
2040 /********************************************************************************
2041 * Tell the controller that the mailbox contains a valid command
2044 amr_quartz_submit_command(struct amr_command *ac)
2046 struct amr_softc *sc = ac->ac_sc;
2047 static struct timeval lastfail;
2051 mtx_lock(&sc->amr_hw_lock);
2052 while (sc->amr_mailbox->mb_busy && (i++ < 10)) {
2054 /* This is a no-op read that flushes pending mailbox updates */
2057 if (sc->amr_mailbox->mb_busy) {
2058 mtx_unlock(&sc->amr_hw_lock);
2059 if (ac->ac_retries++ > 1000) {
2060 if (ppsratecheck(&lastfail, &curfail, 1))
2061 device_printf(sc->amr_dev, "Too many retries on command %p. "
2062 "Controller is likely dead\n", ac);
2069 * Save the slot number so that we can locate this command when complete.
2070 * Note that ident = 0 seems to be special, so we don't use it.
2072 ac->ac_mailbox.mb_ident = ac->ac_slot + 1; /* will be coppied into mbox */
2073 bcopy(&ac->ac_mailbox, (void *)(uintptr_t)(volatile void *)sc->amr_mailbox, 14);
2074 sc->amr_mailbox->mb_busy = 1;
2075 sc->amr_mailbox->mb_poll = 0;
2076 sc->amr_mailbox->mb_ack = 0;
2077 sc->amr_mailbox64->sg64_hi = ac->ac_sg64_hi;
2078 sc->amr_mailbox64->sg64_lo = ac->ac_sg64_lo;
2080 AMR_QPUT_IDB(sc, sc->amr_mailboxphys | AMR_QIDB_SUBMIT);
2081 mtx_unlock(&sc->amr_hw_lock);
2086 amr_std_submit_command(struct amr_command *ac)
2088 struct amr_softc *sc = ac->ac_sc;
2089 static struct timeval lastfail;
2092 mtx_lock(&sc->amr_hw_lock);
2093 if (AMR_SGET_MBSTAT(sc) & AMR_SMBOX_BUSYFLAG) {
2094 mtx_unlock(&sc->amr_hw_lock);
2095 if (ac->ac_retries++ > 1000) {
2096 if (ppsratecheck(&lastfail, &curfail, 1))
2097 device_printf(sc->amr_dev, "Too many retries on command %p. "
2098 "Controller is likely dead\n", ac);
2105 * Save the slot number so that we can locate this command when complete.
2106 * Note that ident = 0 seems to be special, so we don't use it.
2108 ac->ac_mailbox.mb_ident = ac->ac_slot + 1; /* will be coppied into mbox */
2109 bcopy(&ac->ac_mailbox, (void *)(uintptr_t)(volatile void *)sc->amr_mailbox, 14);
2110 sc->amr_mailbox->mb_busy = 1;
2111 sc->amr_mailbox->mb_poll = 0;
2112 sc->amr_mailbox->mb_ack = 0;
2114 AMR_SPOST_COMMAND(sc);
2115 mtx_unlock(&sc->amr_hw_lock);
2119 /********************************************************************************
2120 * Claim any work that the controller has completed; acknowledge completion,
2121 * save details of the completion in (mbsave)
2124 amr_quartz_get_work(struct amr_softc *sc, struct amr_mailbox *mbsave)
2129 u_int8_t completed[46];
2135 /* work waiting for us? */
2136 if ((outd = AMR_QGET_ODB(sc)) == AMR_QODB_READY) {
2137 /* acknowledge interrupt */
2138 AMR_QPUT_ODB(sc, AMR_QODB_READY);
2140 while ((nstatus = sc->amr_mailbox->mb_nstatus) == 0xff)
2142 sc->amr_mailbox->mb_nstatus = 0xff;
2144 /* wait until fw wrote out all completions */
2145 for (i = 0; i < nstatus; i++) {
2146 while ((completed[i] = sc->amr_mailbox->mb_completed[i]) == 0xff)
2148 sc->amr_mailbox->mb_completed[i] = 0xff;
2151 /* Save information for later processing */
2152 mbsave->mb_nstatus = nstatus;
2153 mbsave->mb_status = sc->amr_mailbox->mb_status;
2154 sc->amr_mailbox->mb_status = 0xff;
2156 for (i = 0; i < nstatus; i++)
2157 mbsave->mb_completed[i] = completed[i];
2159 /* acknowledge that we have the commands */
2160 AMR_QPUT_IDB(sc, AMR_QIDB_ACK);
2163 #ifndef AMR_QUARTZ_GOFASTER
2165 * This waits for the controller to notice that we've taken the
2166 * command from it. It's very inefficient, and we shouldn't do it,
2167 * but if we remove this code, we stop completing commands under
2170 * Peter J says we shouldn't do this. The documentation says we
2171 * should. Who is right?
2173 while(AMR_QGET_IDB(sc) & AMR_QIDB_ACK)
2174 ; /* XXX aiee! what if it dies? */
2178 worked = 1; /* got some work */
2185 amr_std_get_work(struct amr_softc *sc, struct amr_mailbox *mbsave)
2194 /* check for valid interrupt status */
2195 istat = AMR_SGET_ISTAT(sc);
2196 if ((istat & AMR_SINTR_VALID) != 0) {
2197 AMR_SPUT_ISTAT(sc, istat); /* ack interrupt status */
2199 /* save mailbox, which contains a list of completed commands */
2200 bcopy((void *)(uintptr_t)(volatile void *)sc->amr_mailbox, mbsave, sizeof(*mbsave));
2202 AMR_SACK_INTERRUPT(sc); /* acknowledge we have the mailbox */
2209 /********************************************************************************
2210 * Notify the controller of the mailbox location.
2213 amr_std_attach_mailbox(struct amr_softc *sc)
2216 /* program the mailbox physical address */
2217 AMR_SBYTE_SET(sc, AMR_SMBOX_0, sc->amr_mailboxphys & 0xff);
2218 AMR_SBYTE_SET(sc, AMR_SMBOX_1, (sc->amr_mailboxphys >> 8) & 0xff);
2219 AMR_SBYTE_SET(sc, AMR_SMBOX_2, (sc->amr_mailboxphys >> 16) & 0xff);
2220 AMR_SBYTE_SET(sc, AMR_SMBOX_3, (sc->amr_mailboxphys >> 24) & 0xff);
2221 AMR_SBYTE_SET(sc, AMR_SMBOX_ENABLE, AMR_SMBOX_ADDR);
2223 /* clear any outstanding interrupt and enable interrupts proper */
2224 AMR_SACK_INTERRUPT(sc);
2225 AMR_SENABLE_INTR(sc);
2228 #ifdef AMR_BOARD_INIT
2229 /********************************************************************************
2230 * Initialise the controller
2233 amr_quartz_init(struct amr_softc *sc)
2235 int status, ostatus;
2237 device_printf(sc->amr_dev, "initial init status %x\n", AMR_QGET_INITSTATUS(sc));
2242 while ((status = AMR_QGET_INITSTATUS(sc)) != AMR_QINIT_DONE) {
2243 if (status != ostatus) {
2244 device_printf(sc->amr_dev, "(%x) %s\n", status, amr_describe_code(amr_table_qinit, status));
2248 case AMR_QINIT_NOMEM:
2251 case AMR_QINIT_SCAN:
2252 /* XXX we could print channel/target here */
2260 amr_std_init(struct amr_softc *sc)
2262 int status, ostatus;
2264 device_printf(sc->amr_dev, "initial init status %x\n", AMR_SGET_INITSTATUS(sc));
2269 while ((status = AMR_SGET_INITSTATUS(sc)) != AMR_SINIT_DONE) {
2270 if (status != ostatus) {
2271 device_printf(sc->amr_dev, "(%x) %s\n", status, amr_describe_code(amr_table_sinit, status));
2275 case AMR_SINIT_NOMEM:
2278 case AMR_SINIT_INPROG:
2279 /* XXX we could print channel/target here? */
2287 /********************************************************************************
2288 ********************************************************************************
2290 ********************************************************************************
2291 ********************************************************************************/
2293 /********************************************************************************
2294 * Identify the controller and print some information about it.
2297 amr_describe_controller(struct amr_softc *sc)
2299 struct amr_prodinfo *ap;
2300 struct amr_enquiry *ae;
2305 * Try to get 40LD product info, which tells us what the card is labelled as.
2307 if ((ap = amr_enquiry(sc, 2048, AMR_CMD_CONFIG, AMR_CONFIG_PRODUCT_INFO, 0, &status)) != NULL) {
2308 device_printf(sc->amr_dev, "<LSILogic %.80s> Firmware %.16s, BIOS %.16s, %dMB RAM\n",
2309 ap->ap_product, ap->ap_firmware, ap->ap_bios,
2317 * Try 8LD extended ENQUIRY to get controller signature, and use lookup table.
2319 if ((ae = (struct amr_enquiry *)amr_enquiry(sc, 2048, AMR_CMD_EXT_ENQUIRY2, 0, 0, &status)) != NULL) {
2320 prod = amr_describe_code(amr_table_adaptertype, ae->ae_signature);
2322 } else if ((ae = (struct amr_enquiry *)amr_enquiry(sc, 2048, AMR_CMD_ENQUIRY, 0, 0, &status)) != NULL) {
2324 * Try to work it out based on the PCI signatures.
2326 switch (pci_get_device(sc->amr_dev)) {
2328 prod = "Series 428";
2331 prod = "Series 434";
2334 prod = "unknown controller";
2338 device_printf(sc->amr_dev, "<unsupported controller>\n");
2343 * HP NetRaid controllers have a special encoding of the firmware and
2344 * BIOS versions. The AMI version seems to have it as strings whereas
2345 * the HP version does it with a leading uppercase character and two
2349 if(ae->ae_adapter.aa_firmware[2] >= 'A' &&
2350 ae->ae_adapter.aa_firmware[2] <= 'Z' &&
2351 ae->ae_adapter.aa_firmware[1] < ' ' &&
2352 ae->ae_adapter.aa_firmware[0] < ' ' &&
2353 ae->ae_adapter.aa_bios[2] >= 'A' &&
2354 ae->ae_adapter.aa_bios[2] <= 'Z' &&
2355 ae->ae_adapter.aa_bios[1] < ' ' &&
2356 ae->ae_adapter.aa_bios[0] < ' ') {
2357 /* this looks like we have an HP NetRaid version of the MegaRaid */
2359 if(ae->ae_signature == AMR_SIG_438) {
2360 /* the AMI 438 is a NetRaid 3si in HP-land */
2361 prod = "HP NetRaid 3si";
2364 device_printf(sc->amr_dev, "<%s> Firmware %c.%02d.%02d, BIOS %c.%02d.%02d, %dMB RAM\n",
2365 prod, ae->ae_adapter.aa_firmware[2],
2366 ae->ae_adapter.aa_firmware[1],
2367 ae->ae_adapter.aa_firmware[0],
2368 ae->ae_adapter.aa_bios[2],
2369 ae->ae_adapter.aa_bios[1],
2370 ae->ae_adapter.aa_bios[0],
2371 ae->ae_adapter.aa_memorysize);
2373 device_printf(sc->amr_dev, "<%s> Firmware %.4s, BIOS %.4s, %dMB RAM\n",
2374 prod, ae->ae_adapter.aa_firmware, ae->ae_adapter.aa_bios,
2375 ae->ae_adapter.aa_memorysize);
2381 amr_dump_blocks(struct amr_softc *sc, int unit, u_int32_t lba, void *data, int blks)
2383 struct amr_command *ac;
2388 sc->amr_state |= AMR_STATE_INTEN;
2390 /* get ourselves a command buffer */
2391 if ((ac = amr_alloccmd(sc)) == NULL)
2393 /* set command flags */
2394 ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAOUT;
2396 /* point the command at our data */
2398 ac->ac_length = blks * AMR_BLKSIZE;
2400 /* build the command proper */
2401 ac->ac_mailbox.mb_command = AMR_CMD_LWRITE;
2402 ac->ac_mailbox.mb_blkcount = blks;
2403 ac->ac_mailbox.mb_lba = lba;
2404 ac->ac_mailbox.mb_drive = unit;
2406 /* can't assume that interrupts are going to work here, so play it safe */
2407 if (sc->amr_poll_command(ac))
2409 error = ac->ac_status;
2415 sc->amr_state &= ~AMR_STATE_INTEN;
2420 /********************************************************************************
2421 * Print the command (ac) in human-readable format
2425 amr_printcommand(struct amr_command *ac)
2427 struct amr_softc *sc = ac->ac_sc;
2428 struct amr_sgentry *sg;
2431 device_printf(sc->amr_dev, "cmd %x ident %d drive %d\n",
2432 ac->ac_mailbox.mb_command, ac->ac_mailbox.mb_ident, ac->ac_mailbox.mb_drive);
2433 device_printf(sc->amr_dev, "blkcount %d lba %d\n",
2434 ac->ac_mailbox.mb_blkcount, ac->ac_mailbox.mb_lba);
2435 device_printf(sc->amr_dev, "virtaddr %p length %lu\n", ac->ac_data, (unsigned long)ac->ac_length);
2436 device_printf(sc->amr_dev, "sg physaddr %08x nsg %d\n",
2437 ac->ac_mailbox.mb_physaddr, ac->ac_mailbox.mb_nsgelem);
2438 device_printf(sc->amr_dev, "ccb %p bio %p\n", ac->ac_ccb_data, ac->ac_bio);
2440 /* get base address of s/g table */
2441 sg = sc->amr_sgtable + (ac->ac_slot * AMR_NSEG);
2442 for (i = 0; i < ac->ac_mailbox.mb_nsgelem; i++, sg++)
2443 device_printf(sc->amr_dev, " %x/%d\n", sg->sg_addr, sg->sg_count);