2 * Copyright (c) 1999,2000 Michael Smith
3 * Copyright (c) 2000 BSDi
4 * Copyright (c) 2005 Scott Long
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * Copyright (c) 2002 Eric Moore
30 * Copyright (c) 2002, 2004 LSI Logic Corporation
31 * All rights reserved.
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. The party using or redistributing the source code and binary forms
42 * agrees to the disclaimer below and the terms and conditions set forth
45 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
46 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
47 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
48 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
49 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
50 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
51 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
52 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
53 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
54 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 #include <sys/cdefs.h>
59 __FBSDID("$FreeBSD$");
62 * Driver for the AMI MegaRaid family of controllers.
65 #include <sys/param.h>
66 #include <sys/systm.h>
67 #include <sys/malloc.h>
68 #include <sys/kernel.h>
70 #include <sys/sysctl.h>
77 #include <machine/bus.h>
78 #include <machine/cpu.h>
79 #include <machine/resource.h>
82 #include <dev/pci/pcireg.h>
83 #include <dev/pci/pcivar.h>
85 #include <dev/amr/amrio.h>
86 #include <dev/amr/amrreg.h>
87 #include <dev/amr/amrvar.h>
88 #define AMR_DEFINE_TABLES
89 #include <dev/amr/amr_tables.h>
92 * The CAM interface appears to be completely broken. Disable it.
94 #ifndef AMR_ENABLE_CAM
95 #define AMR_ENABLE_CAM 1
98 SYSCTL_NODE(_hw, OID_AUTO, amr, CTLFLAG_RD, 0, "AMR driver parameters");
100 static d_open_t amr_open;
101 static d_close_t amr_close;
102 static d_ioctl_t amr_ioctl;
104 static struct cdevsw amr_cdevsw = {
105 .d_version = D_VERSION,
106 .d_flags = D_NEEDGIANT,
108 .d_close = amr_close,
109 .d_ioctl = amr_ioctl,
113 int linux_no_adapter = 0;
115 * Initialisation, bus interface.
117 static void amr_startup(void *arg);
122 static int amr_query_controller(struct amr_softc *sc);
123 static void *amr_enquiry(struct amr_softc *sc, size_t bufsize,
124 u_int8_t cmd, u_int8_t cmdsub, u_int8_t cmdqual, int *status);
125 static void amr_completeio(struct amr_command *ac);
126 static int amr_support_ext_cdb(struct amr_softc *sc);
129 * Command buffer allocation.
131 static void amr_alloccmd_cluster(struct amr_softc *sc);
132 static void amr_freecmd_cluster(struct amr_command_cluster *acc);
135 * Command processing.
137 static int amr_bio_command(struct amr_softc *sc, struct amr_command **acp);
138 static int amr_wait_command(struct amr_command *ac) __unused;
139 static int amr_mapcmd(struct amr_command *ac);
140 static void amr_unmapcmd(struct amr_command *ac);
141 static int amr_start(struct amr_command *ac);
142 static void amr_complete(void *context, ac_qhead_t *head);
143 static void amr_setup_sg(void *arg, bus_dma_segment_t *segs, int nsegments, int error);
144 static void amr_setup_data(void *arg, bus_dma_segment_t *segs, int nsegments, int error);
145 static void amr_setup_ccb(void *arg, bus_dma_segment_t *segs, int nsegments, int error);
146 static void amr_abort_load(struct amr_command *ac);
151 static void amr_periodic(void *data);
154 * Interface-specific shims
156 static int amr_quartz_submit_command(struct amr_command *ac);
157 static int amr_quartz_get_work(struct amr_softc *sc, struct amr_mailbox *mbsave);
158 static int amr_quartz_poll_command(struct amr_command *ac);
159 static int amr_quartz_poll_command1(struct amr_softc *sc, struct amr_command *ac);
161 static int amr_std_submit_command(struct amr_command *ac);
162 static int amr_std_get_work(struct amr_softc *sc, struct amr_mailbox *mbsave);
163 static int amr_std_poll_command(struct amr_command *ac);
164 static void amr_std_attach_mailbox(struct amr_softc *sc);
166 #ifdef AMR_BOARD_INIT
167 static int amr_quartz_init(struct amr_softc *sc);
168 static int amr_std_init(struct amr_softc *sc);
174 static void amr_describe_controller(struct amr_softc *sc);
177 static void amr_printcommand(struct amr_command *ac);
181 static void amr_init_sysctl(struct amr_softc *sc);
182 static int amr_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t addr,
183 int32_t flag, d_thread_t *td);
185 MALLOC_DEFINE(M_AMR, "amr", "AMR memory");
187 /********************************************************************************
188 ********************************************************************************
190 ********************************************************************************
191 ********************************************************************************/
193 /********************************************************************************
194 ********************************************************************************
196 ********************************************************************************
197 ********************************************************************************/
199 /********************************************************************************
200 * Initialise the controller and softc.
203 amr_attach(struct amr_softc *sc)
209 * Initialise per-controller queues.
211 amr_init_qhead(&sc->amr_freecmds);
212 amr_init_qhead(&sc->amr_ready);
213 TAILQ_INIT(&sc->amr_cmd_clusters);
214 bioq_init(&sc->amr_bioq);
216 debug(2, "queue init done");
219 * Configure for this controller type.
221 if (AMR_IS_QUARTZ(sc)) {
222 sc->amr_submit_command = amr_quartz_submit_command;
223 sc->amr_get_work = amr_quartz_get_work;
224 sc->amr_poll_command = amr_quartz_poll_command;
225 sc->amr_poll_command1 = amr_quartz_poll_command1;
227 sc->amr_submit_command = amr_std_submit_command;
228 sc->amr_get_work = amr_std_get_work;
229 sc->amr_poll_command = amr_std_poll_command;
230 amr_std_attach_mailbox(sc);;
233 #ifdef AMR_BOARD_INIT
234 if ((AMR_IS_QUARTZ(sc) ? amr_quartz_init(sc) : amr_std_init(sc))))
239 * Allocate initial commands.
241 amr_alloccmd_cluster(sc);
244 * Quiz controller for features and limits.
246 if (amr_query_controller(sc))
249 debug(2, "controller query complete");
252 * preallocate the remaining commands.
254 while (sc->amr_nextslot < sc->amr_maxio)
255 amr_alloccmd_cluster(sc);
262 #if AMR_ENABLE_CAM != 0
264 * Attach our 'real' SCSI channels to CAM.
266 if (amr_cam_attach(sc))
268 debug(2, "CAM attach done");
272 * Create the control device.
274 sc->amr_dev_t = make_dev(&amr_cdevsw, device_get_unit(sc->amr_dev), UID_ROOT, GID_OPERATOR,
275 S_IRUSR | S_IWUSR, "amr%d", device_get_unit(sc->amr_dev));
276 sc->amr_dev_t->si_drv1 = sc;
278 if (device_get_unit(sc->amr_dev) == 0)
279 make_dev_alias(sc->amr_dev_t, "megadev0");
282 * Schedule ourselves to bring the controller up once interrupts are
285 bzero(&sc->amr_ich, sizeof(struct intr_config_hook));
286 sc->amr_ich.ich_func = amr_startup;
287 sc->amr_ich.ich_arg = sc;
288 if (config_intrhook_establish(&sc->amr_ich) != 0) {
289 device_printf(sc->amr_dev, "can't establish configuration hook\n");
294 * Print a little information about the controller.
296 amr_describe_controller(sc);
298 debug(2, "attach complete");
302 /********************************************************************************
303 * Locate disk resources and attach children to them.
306 amr_startup(void *arg)
308 struct amr_softc *sc = (struct amr_softc *)arg;
309 struct amr_logdrive *dr;
314 /* pull ourselves off the intrhook chain */
315 if (sc->amr_ich.ich_func)
316 config_intrhook_disestablish(&sc->amr_ich);
317 sc->amr_ich.ich_func = NULL;
319 /* get up-to-date drive information */
320 if (amr_query_controller(sc)) {
321 device_printf(sc->amr_dev, "can't scan controller for drives\n");
325 /* iterate over available drives */
326 for (i = 0, dr = &sc->amr_drive[0]; (i < AMR_MAXLD) && (dr->al_size != 0xffffffff); i++, dr++) {
327 /* are we already attached to this drive? */
328 if (dr->al_disk == 0) {
329 /* generate geometry information */
330 if (dr->al_size > 0x200000) { /* extended translation? */
337 dr->al_cylinders = dr->al_size / (dr->al_heads * dr->al_sectors);
339 dr->al_disk = device_add_child(sc->amr_dev, NULL, -1);
340 if (dr->al_disk == 0)
341 device_printf(sc->amr_dev, "device_add_child failed\n");
342 device_set_ivars(dr->al_disk, dr);
346 if ((error = bus_generic_attach(sc->amr_dev)) != 0)
347 device_printf(sc->amr_dev, "bus_generic_attach returned %d\n", error);
349 /* mark controller back up */
350 sc->amr_state &= ~AMR_STATE_SHUTDOWN;
352 /* interrupts will be enabled before we do anything more */
353 sc->amr_state |= AMR_STATE_INTEN;
356 * Start the timeout routine.
358 /* sc->amr_timeout = timeout(amr_periodic, sc, hz);*/
364 amr_init_sysctl(struct amr_softc *sc)
367 SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->amr_dev),
368 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->amr_dev)),
369 OID_AUTO, "allow_volume_configure", CTLFLAG_RW, &sc->amr_allow_vol_config, 0,
371 SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->amr_dev),
372 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->amr_dev)),
373 OID_AUTO, "nextslot", CTLFLAG_RD, &sc->amr_nextslot, 0,
375 SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->amr_dev),
376 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->amr_dev)),
377 OID_AUTO, "busyslots", CTLFLAG_RD, &sc->amr_busyslots, 0,
379 SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->amr_dev),
380 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->amr_dev)),
381 OID_AUTO, "maxio", CTLFLAG_RD, &sc->amr_maxio, 0,
386 /*******************************************************************************
387 * Free resources associated with a controller instance
390 amr_free(struct amr_softc *sc)
392 struct amr_command_cluster *acc;
394 #if AMR_ENABLE_CAM != 0
395 /* detach from CAM */
399 /* cancel status timeout */
400 untimeout(amr_periodic, sc, sc->amr_timeout);
402 /* throw away any command buffers */
403 while ((acc = TAILQ_FIRST(&sc->amr_cmd_clusters)) != NULL) {
404 TAILQ_REMOVE(&sc->amr_cmd_clusters, acc, acc_link);
405 amr_freecmd_cluster(acc);
408 /* destroy control device */
409 if( sc->amr_dev_t != (struct cdev *)NULL)
410 destroy_dev(sc->amr_dev_t);
412 if (mtx_initialized(&sc->amr_hw_lock))
413 mtx_destroy(&sc->amr_hw_lock);
415 if (mtx_initialized(&sc->amr_list_lock))
416 mtx_destroy(&sc->amr_list_lock);
419 /*******************************************************************************
420 * Receive a bio structure from a child device and queue it on a particular
421 * disk resource, then poke the disk resource to start as much work as it can.
424 amr_submit_bio(struct amr_softc *sc, struct bio *bio)
428 mtx_lock(&sc->amr_list_lock);
429 amr_enqueue_bio(sc, bio);
431 mtx_unlock(&sc->amr_list_lock);
435 /********************************************************************************
436 * Accept an open operation on the control device.
439 amr_open(struct cdev *dev, int flags, int fmt, d_thread_t *td)
441 int unit = dev2unit(dev);
442 struct amr_softc *sc = devclass_get_softc(devclass_find("amr"), unit);
446 sc->amr_state |= AMR_STATE_OPEN;
452 amr_del_ld(struct amr_softc *sc, int drv_no, int status)
457 sc->amr_state &= ~AMR_STATE_QUEUE_FRZN;
458 sc->amr_state &= ~AMR_STATE_LD_DELETE;
459 sc->amr_state |= AMR_STATE_REMAP_LD;
460 debug(1, "State Set");
463 debug(1, "disk begin destroyed %d",drv_no);
464 if (--amr_disks_registered == 0)
465 cdevsw_remove(&amrddisk_cdevsw);
466 debug(1, "disk begin destroyed success");
472 amr_prepare_ld_delete(struct amr_softc *sc)
476 if (sc->ld_del_supported == 0)
479 sc->amr_state |= AMR_STATE_QUEUE_FRZN;
480 sc->amr_state |= AMR_STATE_LD_DELETE;
482 /* 5 minutes for the all the commands to be flushed.*/
483 tsleep((void *)&sc->ld_del_supported, PCATCH | PRIBIO,"delete_logical_drv",hz * 60 * 1);
484 if ( sc->amr_busyslots )
491 /********************************************************************************
492 * Accept the last close on the control device.
495 amr_close(struct cdev *dev, int flags, int fmt, d_thread_t *td)
497 int unit = dev2unit(dev);
498 struct amr_softc *sc = devclass_get_softc(devclass_find("amr"), unit);
502 sc->amr_state &= ~AMR_STATE_OPEN;
506 /********************************************************************************
507 * Handle controller-specific control operations.
510 amr_rescan_drives(struct cdev *dev)
512 struct amr_softc *sc = (struct amr_softc *)dev->si_drv1;
515 sc->amr_state |= AMR_STATE_REMAP_LD;
516 while (sc->amr_busyslots) {
517 device_printf(sc->amr_dev, "idle controller\n");
521 /* mark ourselves as in-shutdown */
522 sc->amr_state |= AMR_STATE_SHUTDOWN;
524 /* flush controller */
525 device_printf(sc->amr_dev, "flushing cache...");
526 printf("%s\n", amr_flush(sc) ? "failed" : "done");
528 /* delete all our child devices */
529 for(i = 0 ; i < AMR_MAXLD; i++) {
530 if(sc->amr_drive[i].al_disk != 0) {
531 if((error = device_delete_child(sc->amr_dev,
532 sc->amr_drive[i].al_disk)) != 0)
535 sc->amr_drive[i].al_disk = 0;
544 amr_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t addr, int32_t flag,
547 struct amr_softc *sc = (struct amr_softc *)dev->si_drv1;
548 struct amr_command *ac;
549 struct amr_mailbox *mb;
550 struct amr_linux_ioctl ali;
553 int adapter, len, ac_flags = 0;
554 int logical_drives_changed = 0;
555 u_int32_t linux_version = 0x02100000;
557 struct amr_passthrough *ap; /* 60 bytes */
564 if ((error = copyin(addr, &ali, sizeof(ali))) != 0)
566 switch (ali.ui.fcs.opcode) {
568 switch(ali.ui.fcs.subopcode) {
570 copyout(&linux_version, (void *)(uintptr_t)ali.data,
571 sizeof(linux_version));
576 copyout(&linux_no_adapter, (void *)(uintptr_t)ali.data,
577 sizeof(linux_no_adapter));
578 td->td_retval[0] = linux_no_adapter;
583 printf("Unknown subopcode\n");
591 if (ali.ui.fcs.opcode == 0x80)
592 len = max(ali.outlen, ali.inlen);
594 len = ali.ui.fcs.length;
596 adapter = (ali.ui.fcs.adapno) ^ 'm' << 8;
598 mb = (void *)&ali.mbox[0];
600 if ((ali.mbox[0] == FC_DEL_LOGDRV && ali.mbox[2] == OP_DEL_LOGDRV) || /* delete */
601 (ali.mbox[0] == AMR_CMD_CONFIG && ali.mbox[2] == 0x0d)) { /* create */
602 if (sc->amr_allow_vol_config == 0) {
606 logical_drives_changed = 1;
609 if (ali.mbox[0] == AMR_CMD_PASS) {
610 mtx_lock(&sc->amr_list_lock);
611 while ((ac = amr_alloccmd(sc)) == NULL)
612 msleep(sc, &sc->amr_list_lock, PPAUSE, "amrioc", hz);
613 mtx_unlock(&sc->amr_list_lock);
614 ap = &ac->ac_ccb->ccb_pthru;
616 error = copyin((void *)(uintptr_t)mb->mb_physaddr, ap,
617 sizeof(struct amr_passthrough));
621 if (ap->ap_data_transfer_length)
622 dp = malloc(ap->ap_data_transfer_length, M_AMR,
626 error = copyin((void *)(uintptr_t)ap->ap_data_transfer_address,
627 dp, ap->ap_data_transfer_length);
632 ac_flags = AMR_CMD_DATAIN|AMR_CMD_DATAOUT|AMR_CMD_CCB;
633 bzero(&ac->ac_mailbox, sizeof(ac->ac_mailbox));
634 ac->ac_mailbox.mb_command = AMR_CMD_PASS;
635 ac->ac_flags = ac_flags;
638 ac->ac_length = ap->ap_data_transfer_length;
639 temp = (void *)(uintptr_t)ap->ap_data_transfer_address;
641 mtx_lock(&sc->amr_list_lock);
642 error = amr_wait_command(ac);
643 mtx_unlock(&sc->amr_list_lock);
647 status = ac->ac_status;
648 error = copyout(&status, &((struct amr_passthrough *)(uintptr_t)mb->mb_physaddr)->ap_scsi_status, sizeof(status));
653 error = copyout(dp, temp, ap->ap_data_transfer_length);
657 error = copyout(ap->ap_request_sense_area, ((struct amr_passthrough *)(uintptr_t)mb->mb_physaddr)->ap_request_sense_area, ap->ap_request_sense_length);
663 } else if (ali.mbox[0] == AMR_CMD_PASS_64) {
664 printf("No AMR_CMD_PASS_64\n");
667 } else if (ali.mbox[0] == AMR_CMD_EXTPASS) {
668 printf("No AMR_CMD_EXTPASS\n");
673 * Bug-for-bug compatibility with Linux!
674 * Some apps will send commands with inlen and outlen set to 0,
675 * even though they expect data to be transfered to them from the
676 * card. Linux accidentally allows this by allocating a 4KB
677 * buffer for the transfer anyways, but it then throws it away
678 * without copying it back to the app.
683 dp = malloc(len, M_AMR, M_WAITOK | M_ZERO);
686 error = copyin((void *)(uintptr_t)mb->mb_physaddr, dp, len);
691 mtx_lock(&sc->amr_list_lock);
692 while ((ac = amr_alloccmd(sc)) == NULL)
693 msleep(sc, &sc->amr_list_lock, PPAUSE, "amrioc", hz);
695 ac_flags = AMR_CMD_DATAIN|AMR_CMD_DATAOUT;
696 bzero(&ac->ac_mailbox, sizeof(ac->ac_mailbox));
697 bcopy(&ali.mbox[0], &ac->ac_mailbox, sizeof(ali.mbox));
701 ac->ac_flags = ac_flags;
703 error = amr_wait_command(ac);
704 mtx_unlock(&sc->amr_list_lock);
708 status = ac->ac_status;
709 error = copyout(&status, &((struct amr_mailbox *)&((struct amr_linux_ioctl *)addr)->mbox[0])->mb_status, sizeof(status));
711 error = copyout(dp, (void *)(uintptr_t)mb->mb_physaddr, len);
717 if (logical_drives_changed)
718 amr_rescan_drives(dev);
724 debug(1, "unknown linux ioctl 0x%lx", cmd);
725 printf("unknown linux ioctl 0x%lx\n", cmd);
731 * At this point, we know that there is a lock held and that these
732 * objects have been allocated.
734 mtx_lock(&sc->amr_list_lock);
737 mtx_unlock(&sc->amr_list_lock);
744 amr_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int32_t flag, d_thread_t *td)
746 struct amr_softc *sc = (struct amr_softc *)dev->si_drv1;
749 struct amr_user_ioctl *au;
750 #ifdef AMR_IO_COMMAND32
751 struct amr_user_ioctl32 *au32;
755 struct amr_command *ac;
756 struct amr_mailbox_ioctl *mbi;
757 void *dp, *au_buffer;
758 unsigned long au_length;
759 unsigned char *au_cmd;
760 int *au_statusp, au_direction;
762 struct amr_passthrough *ap; /* 60 bytes */
763 int logical_drives_changed = 0;
767 arg._p = (void *)addr;
777 debug(1, "AMR_IO_VERSION");
778 *arg.result = AMR_IO_VERSION_NUMBER;
781 #ifdef AMR_IO_COMMAND32
783 * Accept ioctl-s from 32-bit binaries on non-32-bit
784 * platforms, such as AMD. LSI's MEGAMGR utility is
785 * the only example known today... -mi
787 case AMR_IO_COMMAND32:
788 debug(1, "AMR_IO_COMMAND32 0x%x", arg.au32->au_cmd[0]);
789 au_cmd = arg.au32->au_cmd;
790 au_buffer = (void *)(u_int64_t)arg.au32->au_buffer;
791 au_length = arg.au32->au_length;
792 au_direction = arg.au32->au_direction;
793 au_statusp = &arg.au32->au_status;
798 debug(1, "AMR_IO_COMMAND 0x%x", arg.au->au_cmd[0]);
799 au_cmd = arg.au->au_cmd;
800 au_buffer = (void *)arg.au->au_buffer;
801 au_length = arg.au->au_length;
802 au_direction = arg.au->au_direction;
803 au_statusp = &arg.au->au_status;
807 case 0xc06e6d00: /* Linux emulation */
810 struct amr_linux_ioctl ali;
813 devclass = devclass_find("amr");
814 if (devclass == NULL)
817 error = copyin(addr, &ali, sizeof(ali));
820 if (ali.ui.fcs.opcode == 0x82)
823 adapter = (ali.ui.fcs.adapno) ^ 'm' << 8;
825 sc = devclass_get_softc(devclass, adapter);
829 return (amr_linux_ioctl_int(sc->amr_dev_t, cmd, addr, 0, td));
832 debug(1, "unknown ioctl 0x%lx", cmd);
836 if ((au_cmd[0] == FC_DEL_LOGDRV && au_cmd[1] == OP_DEL_LOGDRV) || /* delete */
837 (au_cmd[0] == AMR_CMD_CONFIG && au_cmd[1] == 0x0d)) { /* create */
838 if (sc->amr_allow_vol_config == 0) {
842 logical_drives_changed = 1;
844 if ((error = amr_prepare_ld_delete(sc)) != 0)
849 /* handle inbound data buffer */
850 if (au_length != 0 && au_cmd[0] != 0x06) {
851 if ((dp = malloc(au_length, M_AMR, M_WAITOK|M_ZERO)) == NULL) {
855 if ((error = copyin(au_buffer, dp, au_length)) != 0) {
859 debug(2, "copyin %ld bytes from %p -> %p", au_length, au_buffer, dp);
862 /* Allocate this now before the mutex gets held */
864 mtx_lock(&sc->amr_list_lock);
865 while ((ac = amr_alloccmd(sc)) == NULL)
866 msleep(sc, &sc->amr_list_lock, PPAUSE, "amrioc", hz);
868 /* handle SCSI passthrough command */
869 if (au_cmd[0] == AMR_CMD_PASS) {
872 ap = &ac->ac_ccb->ccb_pthru;
873 bzero(ap, sizeof(struct amr_passthrough));
877 ap->ap_cdb_length = len;
878 bcopy(au_cmd + 3, ap->ap_cdb, len);
880 /* build passthrough */
881 ap->ap_timeout = au_cmd[len + 3] & 0x07;
882 ap->ap_ars = (au_cmd[len + 3] & 0x08) ? 1 : 0;
883 ap->ap_islogical = (au_cmd[len + 3] & 0x80) ? 1 : 0;
884 ap->ap_logical_drive_no = au_cmd[len + 4];
885 ap->ap_channel = au_cmd[len + 5];
886 ap->ap_scsi_id = au_cmd[len + 6];
887 ap->ap_request_sense_length = 14;
888 ap->ap_data_transfer_length = au_length;
889 /* XXX what about the request-sense area? does the caller want it? */
892 ac->ac_mailbox.mb_command = AMR_CMD_PASS;
893 ac->ac_flags = AMR_CMD_CCB;
896 /* direct command to controller */
897 mbi = (struct amr_mailbox_ioctl *)&ac->ac_mailbox;
899 /* copy pertinent mailbox items */
900 mbi->mb_command = au_cmd[0];
901 mbi->mb_channel = au_cmd[1];
902 mbi->mb_param = au_cmd[2];
903 mbi->mb_pad[0] = au_cmd[3];
904 mbi->mb_drive = au_cmd[4];
908 /* build the command */
910 ac->ac_length = au_length;
911 ac->ac_flags |= AMR_CMD_DATAIN|AMR_CMD_DATAOUT;
913 /* run the command */
914 error = amr_wait_command(ac);
915 mtx_unlock(&sc->amr_list_lock);
919 /* copy out data and set status */
920 if (au_length != 0) {
921 error = copyout(dp, au_buffer, au_length);
923 debug(2, "copyout %ld bytes from %p -> %p", au_length, dp, au_buffer);
925 debug(2, "%p status 0x%x", dp, ac->ac_status);
926 *au_statusp = ac->ac_status;
930 * At this point, we know that there is a lock held and that these
931 * objects have been allocated.
933 mtx_lock(&sc->amr_list_lock);
936 mtx_unlock(&sc->amr_list_lock);
941 if (logical_drives_changed)
942 amr_rescan_drives(dev);
948 /********************************************************************************
949 ********************************************************************************
951 ********************************************************************************
952 ********************************************************************************/
954 /********************************************************************************
955 * Perform a periodic check of the controller status
958 amr_periodic(void *data)
960 struct amr_softc *sc = (struct amr_softc *)data;
964 /* XXX perform periodic status checks here */
966 /* compensate for missed interrupts */
970 sc->amr_timeout = timeout(amr_periodic, sc, hz);
973 /********************************************************************************
974 ********************************************************************************
976 ********************************************************************************
977 ********************************************************************************/
979 /********************************************************************************
980 * Interrogate the controller for the operational parameters we require.
983 amr_query_controller(struct amr_softc *sc)
985 struct amr_enquiry3 *aex;
986 struct amr_prodinfo *ap;
987 struct amr_enquiry *ae;
992 * Greater than 10 byte cdb support
994 sc->support_ext_cdb = amr_support_ext_cdb(sc);
996 if(sc->support_ext_cdb) {
997 debug(2,"supports extended CDBs.");
1001 * Try to issue an ENQUIRY3 command
1003 if ((aex = amr_enquiry(sc, 2048, AMR_CMD_CONFIG, AMR_CONFIG_ENQ3,
1004 AMR_CONFIG_ENQ3_SOLICITED_FULL, &status)) != NULL) {
1007 * Fetch current state of logical drives.
1009 for (ldrv = 0; ldrv < aex->ae_numldrives; ldrv++) {
1010 sc->amr_drive[ldrv].al_size = aex->ae_drivesize[ldrv];
1011 sc->amr_drive[ldrv].al_state = aex->ae_drivestate[ldrv];
1012 sc->amr_drive[ldrv].al_properties = aex->ae_driveprop[ldrv];
1013 debug(2, " drive %d: %d state %x properties %x\n", ldrv, sc->amr_drive[ldrv].al_size,
1014 sc->amr_drive[ldrv].al_state, sc->amr_drive[ldrv].al_properties);
1019 * Get product info for channel count.
1021 if ((ap = amr_enquiry(sc, 2048, AMR_CMD_CONFIG, AMR_CONFIG_PRODUCT_INFO, 0, &status)) == NULL) {
1022 device_printf(sc->amr_dev, "can't obtain product data from controller\n");
1025 sc->amr_maxdrives = 40;
1026 sc->amr_maxchan = ap->ap_nschan;
1027 sc->amr_maxio = ap->ap_maxio;
1028 sc->amr_type |= AMR_TYPE_40LD;
1031 ap = amr_enquiry(sc, 0, FC_DEL_LOGDRV, OP_SUP_DEL_LOGDRV, 0, &status);
1035 sc->amr_ld_del_supported = 1;
1036 device_printf(sc->amr_dev, "delete logical drives supported by controller\n");
1040 /* failed, try the 8LD ENQUIRY commands */
1041 if ((ae = (struct amr_enquiry *)amr_enquiry(sc, 2048, AMR_CMD_EXT_ENQUIRY2, 0, 0, &status)) == NULL) {
1042 if ((ae = (struct amr_enquiry *)amr_enquiry(sc, 2048, AMR_CMD_ENQUIRY, 0, 0, &status)) == NULL) {
1043 device_printf(sc->amr_dev, "can't obtain configuration data from controller\n");
1046 ae->ae_signature = 0;
1050 * Fetch current state of logical drives.
1052 for (ldrv = 0; ldrv < ae->ae_ldrv.al_numdrives; ldrv++) {
1053 sc->amr_drive[ldrv].al_size = ae->ae_ldrv.al_size[ldrv];
1054 sc->amr_drive[ldrv].al_state = ae->ae_ldrv.al_state[ldrv];
1055 sc->amr_drive[ldrv].al_properties = ae->ae_ldrv.al_properties[ldrv];
1056 debug(2, " drive %d: %d state %x properties %x\n", ldrv, sc->amr_drive[ldrv].al_size,
1057 sc->amr_drive[ldrv].al_state, sc->amr_drive[ldrv].al_properties);
1060 sc->amr_maxdrives = 8;
1061 sc->amr_maxchan = ae->ae_adapter.aa_channels;
1062 sc->amr_maxio = ae->ae_adapter.aa_maxio;
1067 * Mark remaining drives as unused.
1069 for (; ldrv < AMR_MAXLD; ldrv++)
1070 sc->amr_drive[ldrv].al_size = 0xffffffff;
1073 * Cap the maximum number of outstanding I/Os. AMI's Linux driver doesn't trust
1074 * the controller's reported value, and lockups have been seen when we do.
1076 sc->amr_maxio = imin(sc->amr_maxio, AMR_LIMITCMD);
1081 /********************************************************************************
1082 * Run a generic enquiry-style command.
1085 amr_enquiry(struct amr_softc *sc, size_t bufsize, u_int8_t cmd, u_int8_t cmdsub, u_int8_t cmdqual, int *status)
1087 struct amr_command *ac;
1097 /* get ourselves a command buffer */
1098 mtx_lock(&sc->amr_list_lock);
1099 ac = amr_alloccmd(sc);
1100 mtx_unlock(&sc->amr_list_lock);
1103 /* allocate the response structure */
1104 if ((result = malloc(bufsize, M_AMR, M_ZERO|M_NOWAIT)) == NULL)
1106 /* set command flags */
1108 ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAIN;
1110 /* point the command at our data */
1111 ac->ac_data = result;
1112 ac->ac_length = bufsize;
1114 /* build the command proper */
1115 mbox = (u_int8_t *)&ac->ac_mailbox; /* XXX want a real structure for this? */
1121 /* can't assume that interrupts are going to work here, so play it safe */
1122 if (sc->amr_poll_command(ac))
1124 error = ac->ac_status;
1125 *status = ac->ac_status;
1128 mtx_lock(&sc->amr_list_lock);
1131 mtx_unlock(&sc->amr_list_lock);
1132 if ((error != 0) && (result != NULL)) {
1133 free(result, M_AMR);
1139 /********************************************************************************
1140 * Flush the controller's internal cache, return status.
1143 amr_flush(struct amr_softc *sc)
1145 struct amr_command *ac;
1148 /* get ourselves a command buffer */
1150 mtx_lock(&sc->amr_list_lock);
1151 ac = amr_alloccmd(sc);
1152 mtx_unlock(&sc->amr_list_lock);
1155 /* set command flags */
1156 ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAOUT;
1158 /* build the command proper */
1159 ac->ac_mailbox.mb_command = AMR_CMD_FLUSH;
1161 /* we have to poll, as the system may be going down or otherwise damaged */
1162 if (sc->amr_poll_command(ac))
1164 error = ac->ac_status;
1167 mtx_lock(&sc->amr_list_lock);
1170 mtx_unlock(&sc->amr_list_lock);
1174 /********************************************************************************
1175 * Detect extented cdb >> greater than 10 byte cdb support
1176 * returns '1' means this support exist
1177 * returns '0' means this support doesn't exist
1180 amr_support_ext_cdb(struct amr_softc *sc)
1182 struct amr_command *ac;
1186 /* get ourselves a command buffer */
1188 mtx_lock(&sc->amr_list_lock);
1189 ac = amr_alloccmd(sc);
1190 mtx_unlock(&sc->amr_list_lock);
1193 /* set command flags */
1194 ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAOUT;
1196 /* build the command proper */
1197 mbox = (u_int8_t *)&ac->ac_mailbox; /* XXX want a real structure for this? */
1202 /* we have to poll, as the system may be going down or otherwise damaged */
1203 if (sc->amr_poll_command(ac))
1205 if( ac->ac_status == AMR_STATUS_SUCCESS ) {
1210 mtx_lock(&sc->amr_list_lock);
1213 mtx_unlock(&sc->amr_list_lock);
1217 /********************************************************************************
1218 * Try to find I/O work for the controller from one or more of the work queues.
1220 * We make the assumption that if the controller is not ready to take a command
1221 * at some given time, it will generate an interrupt at some later time when
1225 amr_startio(struct amr_softc *sc)
1227 struct amr_command *ac;
1229 /* spin until something prevents us from doing any work */
1232 /* Don't bother to queue commands no bounce buffers are available. */
1233 if (sc->amr_state & AMR_STATE_QUEUE_FRZN)
1236 /* try to get a ready command */
1237 ac = amr_dequeue_ready(sc);
1239 /* if that failed, build a command from a bio */
1241 (void)amr_bio_command(sc, &ac);
1243 #if AMR_ENABLE_CAM != 0
1244 /* if that failed, build a command from a ccb */
1246 (void)amr_cam_command(sc, &ac);
1249 /* if we don't have anything to do, give up */
1253 /* try to give the command to the controller; if this fails save it for later and give up */
1254 if (amr_start(ac)) {
1255 debug(2, "controller busy, command deferred");
1256 amr_requeue_ready(ac); /* XXX schedule retry very soon? */
1262 /********************************************************************************
1263 * Handle completion of an I/O command.
1266 amr_completeio(struct amr_command *ac)
1268 struct amrd_softc *sc = ac->ac_bio->bio_disk->d_drv1;
1269 static struct timeval lastfail;
1272 if (ac->ac_status != AMR_STATUS_SUCCESS) { /* could be more verbose here? */
1273 ac->ac_bio->bio_error = EIO;
1274 ac->ac_bio->bio_flags |= BIO_ERROR;
1276 if (ppsratecheck(&lastfail, &curfail, 1))
1277 device_printf(sc->amrd_dev, "I/O error - 0x%x\n", ac->ac_status);
1278 /* amr_printcommand(ac);*/
1280 amrd_intr(ac->ac_bio);
1281 mtx_lock(&ac->ac_sc->amr_list_lock);
1283 mtx_unlock(&ac->ac_sc->amr_list_lock);
1286 /********************************************************************************
1287 ********************************************************************************
1289 ********************************************************************************
1290 ********************************************************************************/
1292 /********************************************************************************
1293 * Convert a bio off the top of the bio queue into a command.
1296 amr_bio_command(struct amr_softc *sc, struct amr_command **acp)
1298 struct amr_command *ac;
1299 struct amrd_softc *amrd;
1310 if ((ac = amr_alloccmd(sc)) == NULL)
1313 /* get a bio to work on */
1314 if ((bio = amr_dequeue_bio(sc)) == NULL) {
1319 /* connect the bio to the command */
1320 ac->ac_complete = amr_completeio;
1322 ac->ac_data = bio->bio_data;
1323 ac->ac_length = bio->bio_bcount;
1325 switch (bio->bio_cmd) {
1327 ac->ac_flags |= AMR_CMD_DATAIN;
1328 if (AMR_IS_SG64(sc)) {
1329 cmd = AMR_CMD_LREAD64;
1330 ac->ac_flags |= AMR_CMD_SG64;
1332 cmd = AMR_CMD_LREAD;
1335 ac->ac_flags |= AMR_CMD_DATAOUT;
1336 if (AMR_IS_SG64(sc)) {
1337 cmd = AMR_CMD_LWRITE64;
1338 ac->ac_flags |= AMR_CMD_SG64;
1340 cmd = AMR_CMD_LWRITE;
1343 ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAOUT;
1344 cmd = AMR_CMD_FLUSH;
1347 amrd = (struct amrd_softc *)bio->bio_disk->d_drv1;
1348 driveno = amrd->amrd_drive - sc->amr_drive;
1349 blkcount = (bio->bio_bcount + AMR_BLKSIZE - 1) / AMR_BLKSIZE;
1351 ac->ac_mailbox.mb_command = cmd;
1352 if (bio->bio_cmd & (BIO_READ|BIO_WRITE)) {
1353 ac->ac_mailbox.mb_blkcount = blkcount;
1354 ac->ac_mailbox.mb_lba = bio->bio_pblkno;
1355 if ((bio->bio_pblkno + blkcount) > sc->amr_drive[driveno].al_size) {
1356 device_printf(sc->amr_dev,
1357 "I/O beyond end of unit (%lld,%d > %lu)\n",
1358 (long long)bio->bio_pblkno, blkcount,
1359 (u_long)sc->amr_drive[driveno].al_size);
1362 ac->ac_mailbox.mb_drive = driveno;
1363 if (sc->amr_state & AMR_STATE_REMAP_LD)
1364 ac->ac_mailbox.mb_drive |= 0x80;
1366 /* we fill in the s/g related data when the command is mapped */
1373 /********************************************************************************
1374 * Take a command, submit it to the controller and sleep until it completes
1375 * or fails. Interrupts must be enabled, returns nonzero on error.
1378 amr_wait_command(struct amr_command *ac)
1381 struct amr_softc *sc = ac->ac_sc;
1385 ac->ac_complete = NULL;
1386 ac->ac_flags |= AMR_CMD_SLEEP;
1387 if ((error = amr_start(ac)) != 0) {
1391 while ((ac->ac_flags & AMR_CMD_BUSY) && (error != EWOULDBLOCK)) {
1392 error = msleep(ac,&sc->amr_list_lock, PRIBIO, "amrwcmd", 0);
1398 /********************************************************************************
1399 * Take a command, submit it to the controller and busy-wait for it to return.
1400 * Returns nonzero on error. Can be safely called with interrupts enabled.
1403 amr_std_poll_command(struct amr_command *ac)
1405 struct amr_softc *sc = ac->ac_sc;
1410 ac->ac_complete = NULL;
1411 if ((error = amr_start(ac)) != 0)
1417 * Poll for completion, although the interrupt handler may beat us to it.
1418 * Note that the timeout here is somewhat arbitrary.
1422 } while ((ac->ac_flags & AMR_CMD_BUSY) && (count++ < 1000));
1423 if (!(ac->ac_flags & AMR_CMD_BUSY)) {
1426 /* XXX the slot is now marked permanently busy */
1428 device_printf(sc->amr_dev, "polled command timeout\n");
1434 amr_setup_polled_dmamap(void *arg, bus_dma_segment_t *segs, int nsegs, int err)
1436 struct amr_command *ac = arg;
1437 struct amr_softc *sc = ac->ac_sc;
1441 device_printf(sc->amr_dev, "error %d in %s", err, __FUNCTION__);
1442 ac->ac_status = AMR_STATUS_ABORTED;
1446 amr_setup_sg(arg, segs, nsegs, err);
1448 /* for AMR_CMD_CONFIG Read/Write the s/g count goes elsewhere */
1449 mb_channel = ((struct amr_mailbox_ioctl *)&ac->ac_mailbox)->mb_channel;
1450 if (ac->ac_mailbox.mb_command == AMR_CMD_CONFIG &&
1451 ((mb_channel == AMR_CONFIG_READ_NVRAM_CONFIG) ||
1452 (mb_channel == AMR_CONFIG_WRITE_NVRAM_CONFIG)))
1453 ((struct amr_mailbox_ioctl *)&ac->ac_mailbox)->mb_param = ac->ac_nsegments;
1455 ac->ac_mailbox.mb_nsgelem = ac->ac_nsegments;
1456 ac->ac_mailbox.mb_physaddr = ac->ac_mb_physaddr;
1457 if (AC_IS_SG64(ac)) {
1459 ac->ac_sg64_lo = ac->ac_sgbusaddr;
1462 sc->amr_poll_command1(sc, ac);
1465 /********************************************************************************
1466 * Take a command, submit it to the controller and busy-wait for it to return.
1467 * Returns nonzero on error. Can be safely called with interrupts enabled.
1470 amr_quartz_poll_command(struct amr_command *ac)
1472 struct amr_softc *sc = ac->ac_sc;
1479 if (AC_IS_SG64(ac)) {
1480 ac->ac_tag = sc->amr_buffer64_dmat;
1481 ac->ac_datamap = ac->ac_dma64map;
1483 ac->ac_tag = sc->amr_buffer_dmat;
1484 ac->ac_datamap = ac->ac_dmamap;
1487 /* now we have a slot, we can map the command (unmapped in amr_complete) */
1488 if (ac->ac_data != 0) {
1489 if (bus_dmamap_load(ac->ac_tag, ac->ac_datamap, ac->ac_data,
1490 ac->ac_length, amr_setup_polled_dmamap, ac, BUS_DMA_NOWAIT) != 0) {
1494 error = amr_quartz_poll_command1(sc, ac);
1501 amr_quartz_poll_command1(struct amr_softc *sc, struct amr_command *ac)
1505 mtx_lock(&sc->amr_hw_lock);
1506 if ((sc->amr_state & AMR_STATE_INTEN) == 0) {
1508 while (sc->amr_busyslots) {
1509 msleep(sc, &sc->amr_hw_lock, PRIBIO | PCATCH, "amrpoll", hz);
1515 if(sc->amr_busyslots) {
1516 device_printf(sc->amr_dev, "adapter is busy\n");
1517 mtx_unlock(&sc->amr_hw_lock);
1518 if (ac->ac_data != NULL) {
1519 bus_dmamap_unload(ac->ac_tag, ac->ac_datamap);
1526 bcopy(&ac->ac_mailbox, (void *)(uintptr_t)(volatile void *)sc->amr_mailbox, AMR_MBOX_CMDSIZE);
1528 /* clear the poll/ack fields in the mailbox */
1529 sc->amr_mailbox->mb_ident = 0xFE;
1530 sc->amr_mailbox->mb_nstatus = 0xFF;
1531 sc->amr_mailbox->mb_status = 0xFF;
1532 sc->amr_mailbox->mb_poll = 0;
1533 sc->amr_mailbox->mb_ack = 0;
1534 sc->amr_mailbox->mb_busy = 1;
1536 AMR_QPUT_IDB(sc, sc->amr_mailboxphys | AMR_QIDB_SUBMIT);
1538 while(sc->amr_mailbox->mb_nstatus == 0xFF)
1540 while(sc->amr_mailbox->mb_status == 0xFF)
1542 ac->ac_status=sc->amr_mailbox->mb_status;
1543 error = (ac->ac_status !=AMR_STATUS_SUCCESS) ? 1:0;
1544 while(sc->amr_mailbox->mb_poll != 0x77)
1546 sc->amr_mailbox->mb_poll = 0;
1547 sc->amr_mailbox->mb_ack = 0x77;
1549 /* acknowledge that we have the commands */
1550 AMR_QPUT_IDB(sc, sc->amr_mailboxphys | AMR_QIDB_ACK);
1551 while(AMR_QGET_IDB(sc) & AMR_QIDB_ACK)
1553 mtx_unlock(&sc->amr_hw_lock);
1555 /* unmap the command's data buffer */
1556 if (ac->ac_flags & AMR_CMD_DATAIN) {
1557 bus_dmamap_sync(ac->ac_tag, ac->ac_datamap, BUS_DMASYNC_POSTREAD);
1559 if (ac->ac_flags & AMR_CMD_DATAOUT) {
1560 bus_dmamap_sync(ac->ac_tag, ac->ac_datamap, BUS_DMASYNC_POSTWRITE);
1562 bus_dmamap_unload(ac->ac_tag, ac->ac_datamap);
1568 amr_freeslot(struct amr_command *ac)
1570 struct amr_softc *sc = ac->ac_sc;
1576 if (sc->amr_busycmd[slot] == NULL)
1577 panic("amr: slot %d not busy?\n", slot);
1579 sc->amr_busycmd[slot] = NULL;
1580 atomic_subtract_int(&sc->amr_busyslots, 1);
1585 /********************************************************************************
1586 * Map/unmap (ac)'s data in the controller's addressable space as required.
1588 * These functions may be safely called multiple times on a given command.
1591 amr_setup_sg(void *arg, bus_dma_segment_t *segs, int nsegments, int error)
1593 struct amr_command *ac = (struct amr_command *)arg;
1594 struct amr_sgentry *sg;
1595 struct amr_sg64entry *sg64;
1600 /* get base address of s/g table */
1601 sg = ac->ac_sg.sg32;
1602 sg64 = ac->ac_sg.sg64;
1604 if (AC_IS_SG64(ac)) {
1605 ac->ac_nsegments = nsegments;
1606 ac->ac_mb_physaddr = 0xffffffff;
1607 for (i = 0; i < nsegments; i++, sg64++) {
1608 sg64->sg_addr = segs[i].ds_addr;
1609 sg64->sg_count = segs[i].ds_len;
1612 /* decide whether we need to populate the s/g table */
1613 if (nsegments < 2) {
1614 ac->ac_nsegments = 0;
1615 ac->ac_mb_physaddr = segs[0].ds_addr;
1617 ac->ac_nsegments = nsegments;
1618 ac->ac_mb_physaddr = ac->ac_sgbusaddr;
1619 for (i = 0; i < nsegments; i++, sg++) {
1620 sg->sg_addr = segs[i].ds_addr;
1621 sg->sg_count = segs[i].ds_len;
1627 if (ac->ac_flags & AMR_CMD_DATAIN)
1628 flags |= BUS_DMASYNC_PREREAD;
1629 if (ac->ac_flags & AMR_CMD_DATAOUT)
1630 flags |= BUS_DMASYNC_PREWRITE;
1631 bus_dmamap_sync(ac->ac_tag, ac->ac_datamap, flags);
1632 ac->ac_flags |= AMR_CMD_MAPPED;
1636 amr_setup_data(void *arg, bus_dma_segment_t *segs, int nsegs, int err)
1638 struct amr_command *ac = arg;
1639 struct amr_softc *sc = ac->ac_sc;
1643 device_printf(sc->amr_dev, "error %d in %s", err, __FUNCTION__);
1648 amr_setup_sg(arg, segs, nsegs, err);
1650 /* for AMR_CMD_CONFIG Read/Write the s/g count goes elsewhere */
1651 mb_channel = ((struct amr_mailbox_ioctl *)&ac->ac_mailbox)->mb_channel;
1652 if (ac->ac_mailbox.mb_command == AMR_CMD_CONFIG &&
1653 ((mb_channel == AMR_CONFIG_READ_NVRAM_CONFIG) ||
1654 (mb_channel == AMR_CONFIG_WRITE_NVRAM_CONFIG)))
1655 ((struct amr_mailbox_ioctl *)&ac->ac_mailbox)->mb_param = ac->ac_nsegments;
1657 ac->ac_mailbox.mb_nsgelem = ac->ac_nsegments;
1658 ac->ac_mailbox.mb_physaddr = ac->ac_mb_physaddr;
1659 if (AC_IS_SG64(ac)) {
1661 ac->ac_sg64_lo = ac->ac_sgbusaddr;
1664 if (sc->amr_submit_command(ac) == EBUSY) {
1666 amr_requeue_ready(ac);
1671 amr_setup_ccb(void *arg, bus_dma_segment_t *segs, int nsegs, int err)
1673 struct amr_command *ac = arg;
1674 struct amr_softc *sc = ac->ac_sc;
1675 struct amr_passthrough *ap = &ac->ac_ccb->ccb_pthru;
1676 struct amr_ext_passthrough *aep = &ac->ac_ccb->ccb_epthru;
1679 device_printf(sc->amr_dev, "error %d in %s", err, __FUNCTION__);
1684 /* Set up the mailbox portion of the command to point at the ccb */
1685 ac->ac_mailbox.mb_nsgelem = 0;
1686 ac->ac_mailbox.mb_physaddr = ac->ac_ccb_busaddr;
1688 amr_setup_sg(arg, segs, nsegs, err);
1690 switch (ac->ac_mailbox.mb_command) {
1691 case AMR_CMD_EXTPASS:
1692 aep->ap_no_sg_elements = ac->ac_nsegments;
1693 aep->ap_data_transfer_address = ac->ac_mb_physaddr;
1696 ap->ap_no_sg_elements = ac->ac_nsegments;
1697 ap->ap_data_transfer_address = ac->ac_mb_physaddr;
1700 panic("Unknown ccb command");
1703 if (sc->amr_submit_command(ac) == EBUSY) {
1705 amr_requeue_ready(ac);
1710 amr_mapcmd(struct amr_command *ac)
1712 bus_dmamap_callback_t *cb;
1713 struct amr_softc *sc = ac->ac_sc;
1717 if (AC_IS_SG64(ac)) {
1718 ac->ac_tag = sc->amr_buffer64_dmat;
1719 ac->ac_datamap = ac->ac_dma64map;
1721 ac->ac_tag = sc->amr_buffer_dmat;
1722 ac->ac_datamap = ac->ac_dmamap;
1725 if (ac->ac_flags & AMR_CMD_CCB)
1728 cb = amr_setup_data;
1730 /* if the command involves data at all, and hasn't been mapped */
1731 if ((ac->ac_flags & AMR_CMD_MAPPED) == 0 && (ac->ac_data != NULL)) {
1732 /* map the data buffers into bus space and build the s/g list */
1733 if (bus_dmamap_load(ac->ac_tag, ac->ac_datamap, ac->ac_data,
1734 ac->ac_length, cb, ac, 0) == EINPROGRESS) {
1735 sc->amr_state |= AMR_STATE_QUEUE_FRZN;
1738 if (sc->amr_submit_command(ac) == EBUSY) {
1740 amr_requeue_ready(ac);
1748 amr_unmapcmd(struct amr_command *ac)
1754 /* if the command involved data at all and was mapped */
1755 if (ac->ac_flags & AMR_CMD_MAPPED) {
1757 if (ac->ac_data != NULL) {
1760 if (ac->ac_flags & AMR_CMD_DATAIN)
1761 flag |= BUS_DMASYNC_POSTREAD;
1762 if (ac->ac_flags & AMR_CMD_DATAOUT)
1763 flag |= BUS_DMASYNC_POSTWRITE;
1765 bus_dmamap_sync(ac->ac_tag, ac->ac_datamap, flag);
1766 bus_dmamap_unload(ac->ac_tag, ac->ac_datamap);
1769 ac->ac_flags &= ~AMR_CMD_MAPPED;
1774 amr_abort_load(struct amr_command *ac)
1777 struct amr_softc *sc = ac->ac_sc;
1779 mtx_assert(&sc->amr_list_lock, MA_OWNED);
1781 ac->ac_status = AMR_STATUS_ABORTED;
1782 amr_init_qhead(&head);
1783 amr_enqueue_completed(ac, &head);
1785 mtx_unlock(&sc->amr_list_lock);
1786 amr_complete(sc, &head);
1787 mtx_lock(&sc->amr_list_lock);
1790 /********************************************************************************
1791 * Take a command and give it to the controller, returns 0 if successful, or
1792 * EBUSY if the command should be retried later.
1795 amr_start(struct amr_command *ac)
1797 struct amr_softc *sc;
1803 /* mark command as busy so that polling consumer can tell */
1805 ac->ac_flags |= AMR_CMD_BUSY;
1807 /* get a command slot (freed in amr_done) */
1809 if (sc->amr_busycmd[slot] != NULL)
1810 panic("amr: slot %d busy?\n", slot);
1811 sc->amr_busycmd[slot] = ac;
1812 atomic_add_int(&sc->amr_busyslots, 1);
1814 /* Now we have a slot, we can map the command (unmapped in amr_complete). */
1815 if ((error = amr_mapcmd(ac)) == ENOMEM) {
1817 * Memroy resources are short, so free the slot and let this be tried
1826 /********************************************************************************
1827 * Extract one or more completed commands from the controller (sc)
1829 * Returns nonzero if any commands on the work queue were marked as completed.
1833 amr_done(struct amr_softc *sc)
1836 struct amr_command *ac;
1837 struct amr_mailbox mbox;
1842 /* See if there's anything for us to do */
1844 amr_init_qhead(&head);
1846 /* loop collecting completed commands */
1848 /* poll for a completed command's identifier and status */
1849 if (sc->amr_get_work(sc, &mbox)) {
1852 /* iterate over completed commands in this result */
1853 for (i = 0; i < mbox.mb_nstatus; i++) {
1854 /* get pointer to busy command */
1855 idx = mbox.mb_completed[i] - 1;
1856 ac = sc->amr_busycmd[idx];
1858 /* really a busy command? */
1861 /* pull the command from the busy index */
1864 /* save status for later use */
1865 ac->ac_status = mbox.mb_status;
1866 amr_enqueue_completed(ac, &head);
1867 debug(3, "completed command with status %x", mbox.mb_status);
1869 device_printf(sc->amr_dev, "bad slot %d completed\n", idx);
1873 break; /* no work */
1876 /* handle completion and timeouts */
1877 amr_complete(sc, &head);
1882 /********************************************************************************
1883 * Do completion processing on done commands on (sc)
1887 amr_complete(void *context, ac_qhead_t *head)
1889 struct amr_softc *sc = (struct amr_softc *)context;
1890 struct amr_command *ac;
1894 /* pull completed commands off the queue */
1896 ac = amr_dequeue_completed(sc, head);
1900 /* unmap the command's data buffer */
1904 * Is there a completion handler?
1906 if (ac->ac_complete != NULL) {
1907 /* unbusy the command */
1908 ac->ac_flags &= ~AMR_CMD_BUSY;
1909 ac->ac_complete(ac);
1912 * Is someone sleeping on this one?
1915 mtx_lock(&sc->amr_list_lock);
1916 ac->ac_flags &= ~AMR_CMD_BUSY;
1917 if (ac->ac_flags & AMR_CMD_SLEEP) {
1918 /* unbusy the command */
1921 mtx_unlock(&sc->amr_list_lock);
1924 if(!sc->amr_busyslots) {
1929 mtx_lock(&sc->amr_list_lock);
1930 sc->amr_state &= ~AMR_STATE_QUEUE_FRZN;
1932 mtx_unlock(&sc->amr_list_lock);
1935 /********************************************************************************
1936 ********************************************************************************
1937 Command Buffer Management
1938 ********************************************************************************
1939 ********************************************************************************/
1941 /********************************************************************************
1942 * Get a new command buffer.
1944 * This may return NULL in low-memory cases.
1946 * If possible, we recycle a command buffer that's been used before.
1948 struct amr_command *
1949 amr_alloccmd(struct amr_softc *sc)
1951 struct amr_command *ac;
1955 ac = amr_dequeue_free(sc);
1957 sc->amr_state |= AMR_STATE_QUEUE_FRZN;
1961 /* clear out significant fields */
1963 bzero(&ac->ac_mailbox, sizeof(struct amr_mailbox));
1967 ac->ac_complete = NULL;
1970 ac->ac_datamap = NULL;
1974 /********************************************************************************
1975 * Release a command buffer for recycling.
1978 amr_releasecmd(struct amr_command *ac)
1982 amr_enqueue_free(ac);
1985 /********************************************************************************
1986 * Allocate a new command cluster and initialise it.
1989 amr_alloccmd_cluster(struct amr_softc *sc)
1991 struct amr_command_cluster *acc;
1992 struct amr_command *ac;
1996 * If we haven't found the real limit yet, let us have a couple of
1997 * commands in order to be able to probe.
1999 if (sc->amr_maxio == 0)
2002 if (sc->amr_nextslot > sc->amr_maxio)
2004 acc = malloc(AMR_CMD_CLUSTERSIZE, M_AMR, M_NOWAIT | M_ZERO);
2006 nextslot = sc->amr_nextslot;
2007 mtx_lock(&sc->amr_list_lock);
2008 TAILQ_INSERT_TAIL(&sc->amr_cmd_clusters, acc, acc_link);
2009 mtx_unlock(&sc->amr_list_lock);
2010 for (i = 0; i < AMR_CMD_CLUSTERCOUNT; i++) {
2011 ac = &acc->acc_command[i];
2013 ac->ac_slot = nextslot;
2016 * The SG table for each slot is a fixed size and is assumed to
2017 * to hold 64-bit s/g objects when the driver is configured to do
2018 * 64-bit DMA. 32-bit DMA commands still use the same table, but
2019 * cast down to 32-bit objects.
2021 if (AMR_IS_SG64(sc)) {
2022 ac->ac_sgbusaddr = sc->amr_sgbusaddr +
2023 (ac->ac_slot * AMR_NSEG * sizeof(struct amr_sg64entry));
2024 ac->ac_sg.sg64 = sc->amr_sg64table + (ac->ac_slot * AMR_NSEG);
2026 ac->ac_sgbusaddr = sc->amr_sgbusaddr +
2027 (ac->ac_slot * AMR_NSEG * sizeof(struct amr_sgentry));
2028 ac->ac_sg.sg32 = sc->amr_sgtable + (ac->ac_slot * AMR_NSEG);
2031 ac->ac_ccb = sc->amr_ccb + ac->ac_slot;
2032 ac->ac_ccb_busaddr = sc->amr_ccb_busaddr +
2033 (ac->ac_slot * sizeof(union amr_ccb));
2035 if (bus_dmamap_create(sc->amr_buffer_dmat, 0, &ac->ac_dmamap))
2037 if (AMR_IS_SG64(sc) &&
2038 (bus_dmamap_create(sc->amr_buffer64_dmat, 0,&ac->ac_dma64map)))
2041 if (++nextslot > sc->amr_maxio)
2044 sc->amr_nextslot = nextslot;
2048 /********************************************************************************
2049 * Free a command cluster
2052 amr_freecmd_cluster(struct amr_command_cluster *acc)
2054 struct amr_softc *sc = acc->acc_command[0].ac_sc;
2057 for (i = 0; i < AMR_CMD_CLUSTERCOUNT; i++) {
2058 if (acc->acc_command[i].ac_sc == NULL)
2060 bus_dmamap_destroy(sc->amr_buffer_dmat, acc->acc_command[i].ac_dmamap);
2061 if (AMR_IS_SG64(sc))
2062 bus_dmamap_destroy(sc->amr_buffer64_dmat, acc->acc_command[i].ac_dma64map);
2067 /********************************************************************************
2068 ********************************************************************************
2069 Interface-specific Shims
2070 ********************************************************************************
2071 ********************************************************************************/
2073 /********************************************************************************
2074 * Tell the controller that the mailbox contains a valid command
2077 amr_quartz_submit_command(struct amr_command *ac)
2079 struct amr_softc *sc = ac->ac_sc;
2080 static struct timeval lastfail;
2084 mtx_lock(&sc->amr_hw_lock);
2085 while (sc->amr_mailbox->mb_busy && (i++ < 10)) {
2087 /* This is a no-op read that flushes pending mailbox updates */
2090 if (sc->amr_mailbox->mb_busy) {
2091 mtx_unlock(&sc->amr_hw_lock);
2092 if (ac->ac_retries++ > 1000) {
2093 if (ppsratecheck(&lastfail, &curfail, 1))
2094 device_printf(sc->amr_dev, "Too many retries on command %p. "
2095 "Controller is likely dead\n", ac);
2102 * Save the slot number so that we can locate this command when complete.
2103 * Note that ident = 0 seems to be special, so we don't use it.
2105 ac->ac_mailbox.mb_ident = ac->ac_slot + 1; /* will be coppied into mbox */
2106 bcopy(&ac->ac_mailbox, (void *)(uintptr_t)(volatile void *)sc->amr_mailbox, 14);
2107 sc->amr_mailbox->mb_busy = 1;
2108 sc->amr_mailbox->mb_poll = 0;
2109 sc->amr_mailbox->mb_ack = 0;
2110 sc->amr_mailbox64->sg64_hi = ac->ac_sg64_hi;
2111 sc->amr_mailbox64->sg64_lo = ac->ac_sg64_lo;
2113 AMR_QPUT_IDB(sc, sc->amr_mailboxphys | AMR_QIDB_SUBMIT);
2114 mtx_unlock(&sc->amr_hw_lock);
2119 amr_std_submit_command(struct amr_command *ac)
2121 struct amr_softc *sc = ac->ac_sc;
2122 static struct timeval lastfail;
2125 mtx_lock(&sc->amr_hw_lock);
2126 if (AMR_SGET_MBSTAT(sc) & AMR_SMBOX_BUSYFLAG) {
2127 mtx_unlock(&sc->amr_hw_lock);
2128 if (ac->ac_retries++ > 1000) {
2129 if (ppsratecheck(&lastfail, &curfail, 1))
2130 device_printf(sc->amr_dev, "Too many retries on command %p. "
2131 "Controller is likely dead\n", ac);
2138 * Save the slot number so that we can locate this command when complete.
2139 * Note that ident = 0 seems to be special, so we don't use it.
2141 ac->ac_mailbox.mb_ident = ac->ac_slot + 1; /* will be coppied into mbox */
2142 bcopy(&ac->ac_mailbox, (void *)(uintptr_t)(volatile void *)sc->amr_mailbox, 14);
2143 sc->amr_mailbox->mb_busy = 1;
2144 sc->amr_mailbox->mb_poll = 0;
2145 sc->amr_mailbox->mb_ack = 0;
2147 AMR_SPOST_COMMAND(sc);
2148 mtx_unlock(&sc->amr_hw_lock);
2152 /********************************************************************************
2153 * Claim any work that the controller has completed; acknowledge completion,
2154 * save details of the completion in (mbsave)
2157 amr_quartz_get_work(struct amr_softc *sc, struct amr_mailbox *mbsave)
2162 u_int8_t completed[46];
2168 /* work waiting for us? */
2169 if ((outd = AMR_QGET_ODB(sc)) == AMR_QODB_READY) {
2171 /* acknowledge interrupt */
2172 AMR_QPUT_ODB(sc, AMR_QODB_READY);
2174 while ((nstatus = sc->amr_mailbox->mb_nstatus) == 0xff)
2176 sc->amr_mailbox->mb_nstatus = 0xff;
2178 /* wait until fw wrote out all completions */
2179 for (i = 0; i < nstatus; i++) {
2180 while ((completed[i] = sc->amr_mailbox->mb_completed[i]) == 0xff)
2182 sc->amr_mailbox->mb_completed[i] = 0xff;
2185 /* Save information for later processing */
2186 mbsave->mb_nstatus = nstatus;
2187 mbsave->mb_status = sc->amr_mailbox->mb_status;
2188 sc->amr_mailbox->mb_status = 0xff;
2190 for (i = 0; i < nstatus; i++)
2191 mbsave->mb_completed[i] = completed[i];
2193 /* acknowledge that we have the commands */
2194 AMR_QPUT_IDB(sc, AMR_QIDB_ACK);
2197 #ifndef AMR_QUARTZ_GOFASTER
2199 * This waits for the controller to notice that we've taken the
2200 * command from it. It's very inefficient, and we shouldn't do it,
2201 * but if we remove this code, we stop completing commands under
2204 * Peter J says we shouldn't do this. The documentation says we
2205 * should. Who is right?
2207 while(AMR_QGET_IDB(sc) & AMR_QIDB_ACK)
2208 ; /* XXX aiee! what if it dies? */
2212 worked = 1; /* got some work */
2219 amr_std_get_work(struct amr_softc *sc, struct amr_mailbox *mbsave)
2228 /* check for valid interrupt status */
2229 istat = AMR_SGET_ISTAT(sc);
2230 if ((istat & AMR_SINTR_VALID) != 0) {
2231 AMR_SPUT_ISTAT(sc, istat); /* ack interrupt status */
2233 /* save mailbox, which contains a list of completed commands */
2234 bcopy((void *)(uintptr_t)(volatile void *)sc->amr_mailbox, mbsave, sizeof(*mbsave));
2236 AMR_SACK_INTERRUPT(sc); /* acknowledge we have the mailbox */
2243 /********************************************************************************
2244 * Notify the controller of the mailbox location.
2247 amr_std_attach_mailbox(struct amr_softc *sc)
2250 /* program the mailbox physical address */
2251 AMR_SBYTE_SET(sc, AMR_SMBOX_0, sc->amr_mailboxphys & 0xff);
2252 AMR_SBYTE_SET(sc, AMR_SMBOX_1, (sc->amr_mailboxphys >> 8) & 0xff);
2253 AMR_SBYTE_SET(sc, AMR_SMBOX_2, (sc->amr_mailboxphys >> 16) & 0xff);
2254 AMR_SBYTE_SET(sc, AMR_SMBOX_3, (sc->amr_mailboxphys >> 24) & 0xff);
2255 AMR_SBYTE_SET(sc, AMR_SMBOX_ENABLE, AMR_SMBOX_ADDR);
2257 /* clear any outstanding interrupt and enable interrupts proper */
2258 AMR_SACK_INTERRUPT(sc);
2259 AMR_SENABLE_INTR(sc);
2262 #ifdef AMR_BOARD_INIT
2263 /********************************************************************************
2264 * Initialise the controller
2267 amr_quartz_init(struct amr_softc *sc)
2269 int status, ostatus;
2271 device_printf(sc->amr_dev, "initial init status %x\n", AMR_QGET_INITSTATUS(sc));
2276 while ((status = AMR_QGET_INITSTATUS(sc)) != AMR_QINIT_DONE) {
2277 if (status != ostatus) {
2278 device_printf(sc->amr_dev, "(%x) %s\n", status, amr_describe_code(amr_table_qinit, status));
2282 case AMR_QINIT_NOMEM:
2285 case AMR_QINIT_SCAN:
2286 /* XXX we could print channel/target here */
2294 amr_std_init(struct amr_softc *sc)
2296 int status, ostatus;
2298 device_printf(sc->amr_dev, "initial init status %x\n", AMR_SGET_INITSTATUS(sc));
2303 while ((status = AMR_SGET_INITSTATUS(sc)) != AMR_SINIT_DONE) {
2304 if (status != ostatus) {
2305 device_printf(sc->amr_dev, "(%x) %s\n", status, amr_describe_code(amr_table_sinit, status));
2309 case AMR_SINIT_NOMEM:
2312 case AMR_SINIT_INPROG:
2313 /* XXX we could print channel/target here? */
2321 /********************************************************************************
2322 ********************************************************************************
2324 ********************************************************************************
2325 ********************************************************************************/
2327 /********************************************************************************
2328 * Identify the controller and print some information about it.
2331 amr_describe_controller(struct amr_softc *sc)
2333 struct amr_prodinfo *ap;
2334 struct amr_enquiry *ae;
2339 * Try to get 40LD product info, which tells us what the card is labelled as.
2341 if ((ap = amr_enquiry(sc, 2048, AMR_CMD_CONFIG, AMR_CONFIG_PRODUCT_INFO, 0, &status)) != NULL) {
2342 device_printf(sc->amr_dev, "<LSILogic %.80s> Firmware %.16s, BIOS %.16s, %dMB RAM\n",
2343 ap->ap_product, ap->ap_firmware, ap->ap_bios,
2351 * Try 8LD extended ENQUIRY to get controller signature, and use lookup table.
2353 if ((ae = (struct amr_enquiry *)amr_enquiry(sc, 2048, AMR_CMD_EXT_ENQUIRY2, 0, 0, &status)) != NULL) {
2354 prod = amr_describe_code(amr_table_adaptertype, ae->ae_signature);
2356 } else if ((ae = (struct amr_enquiry *)amr_enquiry(sc, 2048, AMR_CMD_ENQUIRY, 0, 0, &status)) != NULL) {
2359 * Try to work it out based on the PCI signatures.
2361 switch (pci_get_device(sc->amr_dev)) {
2363 prod = "Series 428";
2366 prod = "Series 434";
2369 prod = "unknown controller";
2373 device_printf(sc->amr_dev, "<unsupported controller>\n");
2378 * HP NetRaid controllers have a special encoding of the firmware and
2379 * BIOS versions. The AMI version seems to have it as strings whereas
2380 * the HP version does it with a leading uppercase character and two
2384 if(ae->ae_adapter.aa_firmware[2] >= 'A' &&
2385 ae->ae_adapter.aa_firmware[2] <= 'Z' &&
2386 ae->ae_adapter.aa_firmware[1] < ' ' &&
2387 ae->ae_adapter.aa_firmware[0] < ' ' &&
2388 ae->ae_adapter.aa_bios[2] >= 'A' &&
2389 ae->ae_adapter.aa_bios[2] <= 'Z' &&
2390 ae->ae_adapter.aa_bios[1] < ' ' &&
2391 ae->ae_adapter.aa_bios[0] < ' ') {
2393 /* this looks like we have an HP NetRaid version of the MegaRaid */
2395 if(ae->ae_signature == AMR_SIG_438) {
2396 /* the AMI 438 is a NetRaid 3si in HP-land */
2397 prod = "HP NetRaid 3si";
2400 device_printf(sc->amr_dev, "<%s> Firmware %c.%02d.%02d, BIOS %c.%02d.%02d, %dMB RAM\n",
2401 prod, ae->ae_adapter.aa_firmware[2],
2402 ae->ae_adapter.aa_firmware[1],
2403 ae->ae_adapter.aa_firmware[0],
2404 ae->ae_adapter.aa_bios[2],
2405 ae->ae_adapter.aa_bios[1],
2406 ae->ae_adapter.aa_bios[0],
2407 ae->ae_adapter.aa_memorysize);
2409 device_printf(sc->amr_dev, "<%s> Firmware %.4s, BIOS %.4s, %dMB RAM\n",
2410 prod, ae->ae_adapter.aa_firmware, ae->ae_adapter.aa_bios,
2411 ae->ae_adapter.aa_memorysize);
2417 amr_dump_blocks(struct amr_softc *sc, int unit, u_int32_t lba, void *data, int blks)
2419 struct amr_command *ac;
2424 sc->amr_state |= AMR_STATE_INTEN;
2426 /* get ourselves a command buffer */
2427 if ((ac = amr_alloccmd(sc)) == NULL)
2429 /* set command flags */
2430 ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAOUT;
2432 /* point the command at our data */
2434 ac->ac_length = blks * AMR_BLKSIZE;
2436 /* build the command proper */
2437 ac->ac_mailbox.mb_command = AMR_CMD_LWRITE;
2438 ac->ac_mailbox.mb_blkcount = blks;
2439 ac->ac_mailbox.mb_lba = lba;
2440 ac->ac_mailbox.mb_drive = unit;
2442 /* can't assume that interrupts are going to work here, so play it safe */
2443 if (sc->amr_poll_command(ac))
2445 error = ac->ac_status;
2451 sc->amr_state &= ~AMR_STATE_INTEN;
2458 /********************************************************************************
2459 * Print the command (ac) in human-readable format
2463 amr_printcommand(struct amr_command *ac)
2465 struct amr_softc *sc = ac->ac_sc;
2466 struct amr_sgentry *sg;
2469 device_printf(sc->amr_dev, "cmd %x ident %d drive %d\n",
2470 ac->ac_mailbox.mb_command, ac->ac_mailbox.mb_ident, ac->ac_mailbox.mb_drive);
2471 device_printf(sc->amr_dev, "blkcount %d lba %d\n",
2472 ac->ac_mailbox.mb_blkcount, ac->ac_mailbox.mb_lba);
2473 device_printf(sc->amr_dev, "virtaddr %p length %lu\n", ac->ac_data, (unsigned long)ac->ac_length);
2474 device_printf(sc->amr_dev, "sg physaddr %08x nsg %d\n",
2475 ac->ac_mailbox.mb_physaddr, ac->ac_mailbox.mb_nsgelem);
2476 device_printf(sc->amr_dev, "ccb %p bio %p\n", ac->ac_ccb_data, ac->ac_bio);
2478 /* get base address of s/g table */
2479 sg = sc->amr_sgtable + (ac->ac_slot * AMR_NSEG);
2480 for (i = 0; i < ac->ac_mailbox.mb_nsgelem; i++, sg++)
2481 device_printf(sc->amr_dev, " %x/%d\n", sg->sg_addr, sg->sg_count);