2 * Copyright (c) 1999,2000 Michael Smith
3 * Copyright (c) 2000 BSDi
4 * Copyright (c) 2005 Scott Long
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * Copyright (c) 2002 Eric Moore
30 * Copyright (c) 2002, 2004 LSI Logic Corporation
31 * All rights reserved.
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. The party using or redistributing the source code and binary forms
42 * agrees to the disclaimer below and the terms and conditions set forth
45 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
46 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
47 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
48 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
49 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
50 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
51 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
52 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
53 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
54 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 #include <sys/cdefs.h>
59 __FBSDID("$FreeBSD$");
62 * Driver for the AMI MegaRaid family of controllers.
65 #include <sys/param.h>
66 #include <sys/systm.h>
67 #include <sys/malloc.h>
68 #include <sys/kernel.h>
70 #include <sys/sysctl.h>
77 #include <machine/bus.h>
78 #include <machine/cpu.h>
79 #include <machine/resource.h>
82 #include <dev/pci/pcireg.h>
83 #include <dev/pci/pcivar.h>
85 #include <dev/amr/amrio.h>
86 #include <dev/amr/amrreg.h>
87 #include <dev/amr/amrvar.h>
88 #define AMR_DEFINE_TABLES
89 #include <dev/amr/amr_tables.h>
92 * The CAM interface appears to be completely broken. Disable it.
94 #ifndef AMR_ENABLE_CAM
95 #define AMR_ENABLE_CAM 0
98 SYSCTL_NODE(_hw, OID_AUTO, amr, CTLFLAG_RD, 0, "AMR driver parameters");
100 static d_open_t amr_open;
101 static d_close_t amr_close;
102 static d_ioctl_t amr_ioctl;
104 static struct cdevsw amr_cdevsw = {
105 .d_version = D_VERSION,
106 .d_flags = D_NEEDGIANT,
108 .d_close = amr_close,
109 .d_ioctl = amr_ioctl,
113 int linux_no_adapter = 0;
115 * Initialisation, bus interface.
117 static void amr_startup(void *arg);
122 static int amr_query_controller(struct amr_softc *sc);
123 static void *amr_enquiry(struct amr_softc *sc, size_t bufsize,
124 u_int8_t cmd, u_int8_t cmdsub, u_int8_t cmdqual, int *status);
125 static void amr_completeio(struct amr_command *ac);
126 static int amr_support_ext_cdb(struct amr_softc *sc);
129 * Command buffer allocation.
131 static void amr_alloccmd_cluster(struct amr_softc *sc);
132 static void amr_freecmd_cluster(struct amr_command_cluster *acc);
135 * Command processing.
137 static int amr_bio_command(struct amr_softc *sc, struct amr_command **acp);
138 static int amr_wait_command(struct amr_command *ac) __unused;
139 static int amr_mapcmd(struct amr_command *ac);
140 static void amr_unmapcmd(struct amr_command *ac);
141 static int amr_start(struct amr_command *ac);
142 static void amr_complete(void *context, int pending);
143 static void amr_setup_dmamap(void *arg, bus_dma_segment_t *segs, int nsegments, int error);
144 static void amr_setup_dma64map(void *arg, bus_dma_segment_t *segs, int nsegments, int error);
145 static void amr_setup_data_dmamap(void *arg, bus_dma_segment_t *segs, int nsegments, int error);
150 static void amr_periodic(void *data);
153 * Interface-specific shims
155 static int amr_quartz_submit_command(struct amr_command *ac);
156 static int amr_quartz_get_work(struct amr_softc *sc, struct amr_mailbox *mbsave);
157 static int amr_quartz_poll_command(struct amr_command *ac);
158 static int amr_quartz_poll_command1(struct amr_softc *sc, struct amr_command *ac);
160 static int amr_std_submit_command(struct amr_command *ac);
161 static int amr_std_get_work(struct amr_softc *sc, struct amr_mailbox *mbsave);
162 static int amr_std_poll_command(struct amr_command *ac);
163 static void amr_std_attach_mailbox(struct amr_softc *sc);
165 #ifdef AMR_BOARD_INIT
166 static int amr_quartz_init(struct amr_softc *sc);
167 static int amr_std_init(struct amr_softc *sc);
173 static void amr_describe_controller(struct amr_softc *sc);
176 static void amr_printcommand(struct amr_command *ac);
180 static void amr_init_sysctl(struct amr_softc *sc);
181 static int amr_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t addr,
182 int32_t flag, d_thread_t *td);
184 /********************************************************************************
185 ********************************************************************************
187 ********************************************************************************
188 ********************************************************************************/
190 /********************************************************************************
191 ********************************************************************************
193 ********************************************************************************
194 ********************************************************************************/
196 /********************************************************************************
197 * Initialise the controller and softc.
200 amr_attach(struct amr_softc *sc)
206 * Initialise per-controller queues.
208 TAILQ_INIT(&sc->amr_completed);
209 TAILQ_INIT(&sc->amr_freecmds);
210 TAILQ_INIT(&sc->amr_cmd_clusters);
211 TAILQ_INIT(&sc->amr_ready);
212 bioq_init(&sc->amr_bioq);
214 debug(2, "queue init done");
217 * Configure for this controller type.
219 if (AMR_IS_QUARTZ(sc)) {
220 sc->amr_submit_command = amr_quartz_submit_command;
221 sc->amr_get_work = amr_quartz_get_work;
222 sc->amr_poll_command = amr_quartz_poll_command;
223 sc->amr_poll_command1 = amr_quartz_poll_command1;
225 sc->amr_submit_command = amr_std_submit_command;
226 sc->amr_get_work = amr_std_get_work;
227 sc->amr_poll_command = amr_std_poll_command;
228 amr_std_attach_mailbox(sc);;
231 #ifdef AMR_BOARD_INIT
232 if ((AMR_IS_QUARTZ(sc) ? amr_quartz_init(sc) : amr_std_init(sc))))
237 * Quiz controller for features and limits.
239 if (amr_query_controller(sc))
242 debug(2, "controller query complete");
249 #if AMR_ENABLE_CAM != 0
251 * Attach our 'real' SCSI channels to CAM.
253 if (amr_cam_attach(sc))
255 debug(2, "CAM attach done");
259 * Create the control device.
261 sc->amr_dev_t = make_dev(&amr_cdevsw, device_get_unit(sc->amr_dev), UID_ROOT, GID_OPERATOR,
262 S_IRUSR | S_IWUSR, "amr%d", device_get_unit(sc->amr_dev));
263 sc->amr_dev_t->si_drv1 = sc;
265 if (device_get_unit(sc->amr_dev) == 0)
266 make_dev_alias(sc->amr_dev_t, "megadev0");
269 * Schedule ourselves to bring the controller up once interrupts are
272 bzero(&sc->amr_ich, sizeof(struct intr_config_hook));
273 sc->amr_ich.ich_func = amr_startup;
274 sc->amr_ich.ich_arg = sc;
275 if (config_intrhook_establish(&sc->amr_ich) != 0) {
276 device_printf(sc->amr_dev, "can't establish configuration hook\n");
281 * Print a little information about the controller.
283 amr_describe_controller(sc);
285 debug(2, "attach complete");
289 /********************************************************************************
290 * Locate disk resources and attach children to them.
293 amr_startup(void *arg)
295 struct amr_softc *sc = (struct amr_softc *)arg;
296 struct amr_logdrive *dr;
301 /* pull ourselves off the intrhook chain */
302 if (sc->amr_ich.ich_func)
303 config_intrhook_disestablish(&sc->amr_ich);
304 sc->amr_ich.ich_func = NULL;
306 /* get up-to-date drive information */
307 if (amr_query_controller(sc)) {
308 device_printf(sc->amr_dev, "can't scan controller for drives\n");
312 /* iterate over available drives */
313 for (i = 0, dr = &sc->amr_drive[0]; (i < AMR_MAXLD) && (dr->al_size != 0xffffffff); i++, dr++) {
314 /* are we already attached to this drive? */
315 if (dr->al_disk == 0) {
316 /* generate geometry information */
317 if (dr->al_size > 0x200000) { /* extended translation? */
324 dr->al_cylinders = dr->al_size / (dr->al_heads * dr->al_sectors);
326 dr->al_disk = device_add_child(sc->amr_dev, NULL, -1);
327 if (dr->al_disk == 0)
328 device_printf(sc->amr_dev, "device_add_child failed\n");
329 device_set_ivars(dr->al_disk, dr);
333 if ((error = bus_generic_attach(sc->amr_dev)) != 0)
334 device_printf(sc->amr_dev, "bus_generic_attach returned %d\n", error);
336 /* mark controller back up */
337 sc->amr_state &= ~AMR_STATE_SHUTDOWN;
339 /* interrupts will be enabled before we do anything more */
340 sc->amr_state |= AMR_STATE_INTEN;
343 * Start the timeout routine.
345 /* sc->amr_timeout = timeout(amr_periodic, sc, hz);*/
351 amr_init_sysctl(struct amr_softc *sc)
354 SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->amr_dev),
355 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->amr_dev)),
356 OID_AUTO, "allow_volume_configure", CTLFLAG_RW, &sc->amr_allow_vol_config, 0,
361 /*******************************************************************************
362 * Free resources associated with a controller instance
365 amr_free(struct amr_softc *sc)
367 struct amr_command_cluster *acc;
369 #if AMR_ENABLE_CAM != 0
370 /* detach from CAM */
374 /* cancel status timeout */
375 untimeout(amr_periodic, sc, sc->amr_timeout);
377 /* throw away any command buffers */
378 while ((acc = TAILQ_FIRST(&sc->amr_cmd_clusters)) != NULL) {
379 TAILQ_REMOVE(&sc->amr_cmd_clusters, acc, acc_link);
380 amr_freecmd_cluster(acc);
383 /* destroy control device */
384 if( sc->amr_dev_t != (struct cdev *)NULL)
385 destroy_dev(sc->amr_dev_t);
387 if (mtx_initialized(&sc->amr_hw_lock))
388 mtx_destroy(&sc->amr_hw_lock);
390 if (mtx_initialized(&sc->amr_list_lock))
391 mtx_destroy(&sc->amr_list_lock);
394 /*******************************************************************************
395 * Receive a bio structure from a child device and queue it on a particular
396 * disk resource, then poke the disk resource to start as much work as it can.
399 amr_submit_bio(struct amr_softc *sc, struct bio *bio)
403 mtx_lock(&sc->amr_list_lock);
404 amr_enqueue_bio(sc, bio);
406 mtx_unlock(&sc->amr_list_lock);
410 /********************************************************************************
411 * Accept an open operation on the control device.
414 amr_open(struct cdev *dev, int flags, int fmt, d_thread_t *td)
416 int unit = minor(dev);
417 struct amr_softc *sc = devclass_get_softc(devclass_find("amr"), unit);
421 sc->amr_state |= AMR_STATE_OPEN;
427 amr_del_ld(struct amr_softc *sc, int drv_no, int status)
432 sc->amr_state &= ~AMR_STATE_QUEUE_FRZN;
433 sc->amr_state &= ~AMR_STATE_LD_DELETE;
434 sc->amr_state |= AMR_STATE_REMAP_LD;
435 debug(1, "State Set");
438 debug(1, "disk begin destroyed %d",drv_no);
439 if (--amr_disks_registered == 0)
440 cdevsw_remove(&amrddisk_cdevsw);
441 debug(1, "disk begin destroyed success");
447 amr_prepare_ld_delete(struct amr_softc *sc)
451 if (sc->ld_del_supported == 0)
454 sc->amr_state |= AMR_STATE_QUEUE_FRZN;
455 sc->amr_state |= AMR_STATE_LD_DELETE;
457 /* 5 minutes for the all the commands to be flushed.*/
458 tsleep((void *)&sc->ld_del_supported, PCATCH | PRIBIO,"delete_logical_drv",hz * 60 * 1);
459 if ( sc->amr_busyslots )
466 /********************************************************************************
467 * Accept the last close on the control device.
470 amr_close(struct cdev *dev, int flags, int fmt, d_thread_t *td)
472 int unit = minor(dev);
473 struct amr_softc *sc = devclass_get_softc(devclass_find("amr"), unit);
477 sc->amr_state &= ~AMR_STATE_OPEN;
481 /********************************************************************************
482 * Handle controller-specific control operations.
485 amr_rescan_drives(struct cdev *dev)
487 struct amr_softc *sc = (struct amr_softc *)dev->si_drv1;
490 sc->amr_state |= AMR_STATE_REMAP_LD;
491 while (sc->amr_busyslots) {
492 device_printf(sc->amr_dev, "idle controller\n");
496 /* mark ourselves as in-shutdown */
497 sc->amr_state |= AMR_STATE_SHUTDOWN;
499 /* flush controller */
500 device_printf(sc->amr_dev, "flushing cache...");
501 printf("%s\n", amr_flush(sc) ? "failed" : "done");
503 /* delete all our child devices */
504 for(i = 0 ; i < AMR_MAXLD; i++) {
505 if(sc->amr_drive[i].al_disk != 0) {
506 if((error = device_delete_child(sc->amr_dev,
507 sc->amr_drive[i].al_disk)) != 0)
510 sc->amr_drive[i].al_disk = 0;
519 amr_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t addr, int32_t flag,
522 struct amr_softc *sc = (struct amr_softc *)dev->si_drv1;
523 struct amr_command *ac;
524 struct amr_mailbox *mb;
525 struct amr_linux_ioctl ali;
528 int adapter, len, ac_flags = 0;
529 int logical_drives_changed = 0;
530 u_int32_t linux_version = 0x02100000;
532 struct amr_passthrough *ap; /* 60 bytes */
539 if ((error = copyin(addr, &ali, sizeof(ali))) != 0)
541 switch (ali.ui.fcs.opcode) {
543 switch(ali.ui.fcs.subopcode) {
545 copyout(&linux_version, (void *)(uintptr_t)ali.data,
546 sizeof(linux_version));
551 copyout(&linux_no_adapter, (void *)(uintptr_t)ali.data,
552 sizeof(linux_no_adapter));
553 td->td_retval[0] = linux_no_adapter;
558 printf("Unknown subopcode\n");
566 if (ali.ui.fcs.opcode == 0x80)
567 len = max(ali.outlen, ali.inlen);
569 len = ali.ui.fcs.length;
571 adapter = (ali.ui.fcs.adapno) ^ 'm' << 8;
573 ap = malloc(sizeof(struct amr_passthrough),
574 M_DEVBUF, M_WAITOK | M_ZERO);
576 mb = (void *)&ali.mbox[0];
578 if ((ali.mbox[0] == FC_DEL_LOGDRV && ali.mbox[2] == OP_DEL_LOGDRV) || /* delete */
579 (ali.mbox[0] == AMR_CMD_CONFIG && ali.mbox[2] == 0x0d)) { /* create */
580 if (sc->amr_allow_vol_config == 0) {
584 logical_drives_changed = 1;
587 if (ali.mbox[0] == AMR_CMD_PASS) {
588 error = copyin((void *)(uintptr_t)mb->mb_physaddr, ap,
589 sizeof(struct amr_passthrough));
593 if (ap->ap_data_transfer_length)
594 dp = malloc(ap->ap_data_transfer_length, M_DEVBUF,
598 error = copyin((void *)(uintptr_t)ap->ap_data_transfer_address,
599 dp, ap->ap_data_transfer_length);
604 mtx_lock(&sc->amr_list_lock);
605 while ((ac = amr_alloccmd(sc)) == NULL)
606 msleep(sc, &sc->amr_list_lock, PPAUSE, "amrioc", hz);
608 ac_flags = AMR_CMD_DATAIN|AMR_CMD_DATAOUT|AMR_CMD_CCB_DATAIN|AMR_CMD_CCB_DATAOUT;
609 bzero(&ac->ac_mailbox, sizeof(ac->ac_mailbox));
610 ac->ac_mailbox.mb_command = AMR_CMD_PASS;
611 ac->ac_flags = ac_flags;
614 ac->ac_length = sizeof(struct amr_passthrough);
615 ac->ac_ccb_data = dp;
616 ac->ac_ccb_length = ap->ap_data_transfer_length;
617 temp = (void *)(uintptr_t)ap->ap_data_transfer_address;
619 error = amr_wait_command(ac);
620 mtx_unlock(&sc->amr_list_lock);
624 status = ac->ac_status;
625 error = copyout(&status, &((struct amr_passthrough *)(uintptr_t)mb->mb_physaddr)->ap_scsi_status, sizeof(status));
630 error = copyout(dp, temp, ap->ap_data_transfer_length);
634 error = copyout(ap->ap_request_sense_area, ((struct amr_passthrough *)(uintptr_t)mb->mb_physaddr)->ap_request_sense_area, ap->ap_request_sense_length);
640 } else if (ali.mbox[0] == AMR_CMD_PASS_64) {
641 printf("No AMR_CMD_PASS_64\n");
644 } else if (ali.mbox[0] == AMR_CMD_EXTPASS) {
645 printf("No AMR_CMD_EXTPASS\n");
650 dp = malloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
653 error = copyin((void *)(uintptr_t)mb->mb_physaddr, dp, len);
658 mtx_lock(&sc->amr_list_lock);
659 while ((ac = amr_alloccmd(sc)) == NULL)
660 msleep(sc, &sc->amr_list_lock, PPAUSE, "amrioc", hz);
662 ac_flags = AMR_CMD_DATAIN|AMR_CMD_DATAOUT;
663 bzero(&ac->ac_mailbox, sizeof(ac->ac_mailbox));
664 bcopy(&ali.mbox[0], &ac->ac_mailbox, sizeof(ali.mbox));
668 ac->ac_flags = ac_flags;
670 error = amr_wait_command(ac);
671 mtx_unlock(&sc->amr_list_lock);
675 status = ac->ac_status;
676 error = copyout(&status, &((struct amr_mailbox *)&((struct amr_linux_ioctl *)addr)->mbox[0])->mb_status, sizeof(status));
678 error = copyout(dp, (void *)(uintptr_t)mb->mb_physaddr, len);
684 if (logical_drives_changed)
685 amr_rescan_drives(dev);
691 debug(1, "unknown linux ioctl 0x%lx", cmd);
692 printf("unknown linux ioctl 0x%lx\n", cmd);
698 * At this point, we know that there is a lock held and that these
699 * objects have been allocated.
701 mtx_lock(&sc->amr_list_lock);
704 mtx_unlock(&sc->amr_list_lock);
713 amr_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int32_t flag, d_thread_t *td)
715 struct amr_softc *sc = (struct amr_softc *)dev->si_drv1;
718 struct amr_user_ioctl *au;
719 #ifdef AMR_IO_COMMAND32
720 struct amr_user_ioctl32 *au32;
724 struct amr_command *ac;
725 struct amr_mailbox_ioctl *mbi;
726 void *dp, *au_buffer;
727 unsigned long au_length;
728 unsigned char *au_cmd;
729 int *au_statusp, au_direction;
730 int error, ac_flags = 0;
731 struct amr_passthrough *ap; /* 60 bytes */
732 int logical_drives_changed = 0;
736 arg._p = (void *)addr;
746 debug(1, "AMR_IO_VERSION");
747 *arg.result = AMR_IO_VERSION_NUMBER;
750 #ifdef AMR_IO_COMMAND32
752 * Accept ioctl-s from 32-bit binaries on non-32-bit
753 * platforms, such as AMD. LSI's MEGAMGR utility is
754 * the only example known today... -mi
756 case AMR_IO_COMMAND32:
757 debug(1, "AMR_IO_COMMAND32 0x%x", arg.au32->au_cmd[0]);
758 au_cmd = arg.au32->au_cmd;
759 au_buffer = (void *)(u_int64_t)arg.au32->au_buffer;
760 au_length = arg.au32->au_length;
761 au_direction = arg.au32->au_direction;
762 au_statusp = &arg.au32->au_status;
767 debug(1, "AMR_IO_COMMAND 0x%x", arg.au->au_cmd[0]);
768 au_cmd = arg.au->au_cmd;
769 au_buffer = (void *)arg.au->au_buffer;
770 au_length = arg.au->au_length;
771 au_direction = arg.au->au_direction;
772 au_statusp = &arg.au->au_status;
776 case 0xc06e6d00: /* Linux emulation */
779 struct amr_linux_ioctl ali;
782 devclass = devclass_find("amr");
783 if (devclass == NULL)
786 error = copyin(addr, &ali, sizeof(ali));
789 if (ali.ui.fcs.opcode == 0x82)
792 adapter = (ali.ui.fcs.adapno) ^ 'm' << 8;
794 sc = devclass_get_softc(devclass, adapter);
798 return (amr_linux_ioctl_int(sc->amr_dev_t, cmd,
802 debug(1, "unknown ioctl 0x%lx", cmd);
806 if ((au_cmd[0] == FC_DEL_LOGDRV && au_cmd[1] == OP_DEL_LOGDRV) || /* delete */
807 (au_cmd[0] == AMR_CMD_CONFIG && au_cmd[1] == 0x0d)) { /* create */
808 if (sc->amr_allow_vol_config == 0) {
812 logical_drives_changed = 1;
814 if ((error = amr_prepare_ld_delete(sc)) != 0)
819 /* handle inbound data buffer */
820 if (au_length != 0 && au_cmd[0] != 0x06) {
821 if ((dp = malloc(au_length, M_DEVBUF, M_WAITOK|M_ZERO)) == NULL) {
825 if ((error = copyin(au_buffer, dp, au_length)) != 0) {
829 debug(2, "copyin %ld bytes from %p -> %p", au_length, au_buffer, dp);
832 /* Allocate this now before the mutex gets held */
833 if (au_cmd[0] == AMR_CMD_PASS)
834 ap = malloc(sizeof(struct amr_passthrough), M_DEVBUF, M_WAITOK|M_ZERO);
836 mtx_lock(&sc->amr_list_lock);
837 while ((ac = amr_alloccmd(sc)) == NULL)
838 msleep(sc, &sc->amr_list_lock, PPAUSE, "amrioc", hz);
840 /* handle SCSI passthrough command */
841 if (au_cmd[0] == AMR_CMD_PASS) {
846 ap->ap_cdb_length = len;
847 bcopy(au_cmd + 3, ap->ap_cdb, len);
849 /* build passthrough */
850 ap->ap_timeout = au_cmd[len + 3] & 0x07;
851 ap->ap_ars = (au_cmd[len + 3] & 0x08) ? 1 : 0;
852 ap->ap_islogical = (au_cmd[len + 3] & 0x80) ? 1 : 0;
853 ap->ap_logical_drive_no = au_cmd[len + 4];
854 ap->ap_channel = au_cmd[len + 5];
855 ap->ap_scsi_id = au_cmd[len + 6];
856 ap->ap_request_sense_length = 14;
857 ap->ap_data_transfer_length = au_length;
858 /* XXX what about the request-sense area? does the caller want it? */
862 ac->ac_length = sizeof(struct amr_passthrough);
863 ac->ac_ccb_data = dp;
864 ac->ac_ccb_length = au_length;
866 ac->ac_mailbox.mb_command = AMR_CMD_PASS;
867 ac_flags = AMR_CMD_DATAIN|AMR_CMD_DATAOUT|AMR_CMD_CCB_DATAIN|AMR_CMD_CCB_DATAOUT;
870 /* direct command to controller */
871 mbi = (struct amr_mailbox_ioctl *)&ac->ac_mailbox;
873 /* copy pertinent mailbox items */
874 mbi->mb_command = au_cmd[0];
875 mbi->mb_channel = au_cmd[1];
876 mbi->mb_param = au_cmd[2];
877 mbi->mb_pad[0] = au_cmd[3];
878 mbi->mb_drive = au_cmd[4];
880 /* build the command */
882 ac->ac_length = au_length;
883 ac_flags = AMR_CMD_DATAIN|AMR_CMD_DATAOUT;
886 ac->ac_flags = ac_flags;
888 /* run the command */
889 error = amr_wait_command(ac);
890 mtx_unlock(&sc->amr_list_lock);
894 /* copy out data and set status */
895 if (au_length != 0) {
896 error = copyout(dp, au_buffer, au_length);
898 debug(2, "copyout %ld bytes from %p -> %p", au_length, dp, au_buffer);
900 debug(2, "%16d", (int)dp);
901 *au_statusp = ac->ac_status;
905 * At this point, we know that there is a lock held and that these
906 * objects have been allocated.
908 mtx_lock(&sc->amr_list_lock);
911 mtx_unlock(&sc->amr_list_lock);
918 if (logical_drives_changed)
919 amr_rescan_drives(dev);
925 /********************************************************************************
926 ********************************************************************************
928 ********************************************************************************
929 ********************************************************************************/
931 /********************************************************************************
932 * Perform a periodic check of the controller status
935 amr_periodic(void *data)
937 struct amr_softc *sc = (struct amr_softc *)data;
941 /* XXX perform periodic status checks here */
943 /* compensate for missed interrupts */
947 sc->amr_timeout = timeout(amr_periodic, sc, hz);
950 /********************************************************************************
951 ********************************************************************************
953 ********************************************************************************
954 ********************************************************************************/
956 /********************************************************************************
957 * Interrogate the controller for the operational parameters we require.
960 amr_query_controller(struct amr_softc *sc)
962 struct amr_enquiry3 *aex;
963 struct amr_prodinfo *ap;
964 struct amr_enquiry *ae;
969 * If we haven't found the real limit yet, let us have a couple of commands in
970 * order to be able to probe.
972 if (sc->amr_maxio == 0)
976 * Greater than 10 byte cdb support
978 sc->support_ext_cdb = amr_support_ext_cdb(sc);
980 if(sc->support_ext_cdb) {
981 debug(2,"supports extended CDBs.");
985 * Try to issue an ENQUIRY3 command
987 if ((aex = amr_enquiry(sc, 2048, AMR_CMD_CONFIG, AMR_CONFIG_ENQ3,
988 AMR_CONFIG_ENQ3_SOLICITED_FULL, &status)) != NULL) {
991 * Fetch current state of logical drives.
993 for (ldrv = 0; ldrv < aex->ae_numldrives; ldrv++) {
994 sc->amr_drive[ldrv].al_size = aex->ae_drivesize[ldrv];
995 sc->amr_drive[ldrv].al_state = aex->ae_drivestate[ldrv];
996 sc->amr_drive[ldrv].al_properties = aex->ae_driveprop[ldrv];
997 debug(2, " drive %d: %d state %x properties %x\n", ldrv, sc->amr_drive[ldrv].al_size,
998 sc->amr_drive[ldrv].al_state, sc->amr_drive[ldrv].al_properties);
1000 free(aex, M_DEVBUF);
1003 * Get product info for channel count.
1005 if ((ap = amr_enquiry(sc, 2048, AMR_CMD_CONFIG, AMR_CONFIG_PRODUCT_INFO, 0, &status)) == NULL) {
1006 device_printf(sc->amr_dev, "can't obtain product data from controller\n");
1009 sc->amr_maxdrives = 40;
1010 sc->amr_maxchan = ap->ap_nschan;
1011 sc->amr_maxio = ap->ap_maxio;
1012 sc->amr_type |= AMR_TYPE_40LD;
1015 ap = amr_enquiry(sc, 0, FC_DEL_LOGDRV, OP_SUP_DEL_LOGDRV, 0, &status);
1019 sc->amr_ld_del_supported = 1;
1020 device_printf(sc->amr_dev, "delete logical drives supported by controller\n");
1024 /* failed, try the 8LD ENQUIRY commands */
1025 if ((ae = (struct amr_enquiry *)amr_enquiry(sc, 2048, AMR_CMD_EXT_ENQUIRY2, 0, 0, &status)) == NULL) {
1026 if ((ae = (struct amr_enquiry *)amr_enquiry(sc, 2048, AMR_CMD_ENQUIRY, 0, 0, &status)) == NULL) {
1027 device_printf(sc->amr_dev, "can't obtain configuration data from controller\n");
1030 ae->ae_signature = 0;
1034 * Fetch current state of logical drives.
1036 for (ldrv = 0; ldrv < ae->ae_ldrv.al_numdrives; ldrv++) {
1037 sc->amr_drive[ldrv].al_size = ae->ae_ldrv.al_size[ldrv];
1038 sc->amr_drive[ldrv].al_state = ae->ae_ldrv.al_state[ldrv];
1039 sc->amr_drive[ldrv].al_properties = ae->ae_ldrv.al_properties[ldrv];
1040 debug(2, " drive %d: %d state %x properties %x\n", ldrv, sc->amr_drive[ldrv].al_size,
1041 sc->amr_drive[ldrv].al_state, sc->amr_drive[ldrv].al_properties);
1044 sc->amr_maxdrives = 8;
1045 sc->amr_maxchan = ae->ae_adapter.aa_channels;
1046 sc->amr_maxio = ae->ae_adapter.aa_maxio;
1051 * Mark remaining drives as unused.
1053 for (; ldrv < AMR_MAXLD; ldrv++)
1054 sc->amr_drive[ldrv].al_size = 0xffffffff;
1057 * Cap the maximum number of outstanding I/Os. AMI's Linux driver doesn't trust
1058 * the controller's reported value, and lockups have been seen when we do.
1060 sc->amr_maxio = imin(sc->amr_maxio, AMR_LIMITCMD);
1065 /********************************************************************************
1066 * Run a generic enquiry-style command.
1069 amr_enquiry(struct amr_softc *sc, size_t bufsize, u_int8_t cmd, u_int8_t cmdsub, u_int8_t cmdqual, int *status)
1071 struct amr_command *ac;
1081 /* get ourselves a command buffer */
1082 mtx_lock(&sc->amr_list_lock);
1083 ac = amr_alloccmd(sc);
1084 mtx_unlock(&sc->amr_list_lock);
1087 /* allocate the response structure */
1088 if ((result = malloc(bufsize, M_DEVBUF, M_ZERO|M_NOWAIT)) == NULL)
1090 /* set command flags */
1092 ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAIN;
1094 /* point the command at our data */
1095 ac->ac_data = result;
1096 ac->ac_length = bufsize;
1098 /* build the command proper */
1099 mbox = (u_int8_t *)&ac->ac_mailbox; /* XXX want a real structure for this? */
1105 /* can't assume that interrupts are going to work here, so play it safe */
1106 if (sc->amr_poll_command(ac))
1108 error = ac->ac_status;
1109 *status = ac->ac_status;
1112 mtx_lock(&sc->amr_list_lock);
1115 mtx_unlock(&sc->amr_list_lock);
1116 if ((error != 0) && (result != NULL)) {
1117 free(result, M_DEVBUF);
1123 /********************************************************************************
1124 * Flush the controller's internal cache, return status.
1127 amr_flush(struct amr_softc *sc)
1129 struct amr_command *ac;
1132 /* get ourselves a command buffer */
1134 mtx_lock(&sc->amr_list_lock);
1135 ac = amr_alloccmd(sc);
1136 mtx_unlock(&sc->amr_list_lock);
1139 /* set command flags */
1140 ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAOUT;
1142 /* build the command proper */
1143 ac->ac_mailbox.mb_command = AMR_CMD_FLUSH;
1145 /* we have to poll, as the system may be going down or otherwise damaged */
1146 if (sc->amr_poll_command(ac))
1148 error = ac->ac_status;
1151 mtx_lock(&sc->amr_list_lock);
1154 mtx_unlock(&sc->amr_list_lock);
1158 /********************************************************************************
1159 * Detect extented cdb >> greater than 10 byte cdb support
1160 * returns '1' means this support exist
1161 * returns '0' means this support doesn't exist
1164 amr_support_ext_cdb(struct amr_softc *sc)
1166 struct amr_command *ac;
1170 /* get ourselves a command buffer */
1172 mtx_lock(&sc->amr_list_lock);
1173 ac = amr_alloccmd(sc);
1174 mtx_unlock(&sc->amr_list_lock);
1177 /* set command flags */
1178 ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAOUT;
1180 /* build the command proper */
1181 mbox = (u_int8_t *)&ac->ac_mailbox; /* XXX want a real structure for this? */
1186 /* we have to poll, as the system may be going down or otherwise damaged */
1187 if (sc->amr_poll_command(ac))
1189 if( ac->ac_status == AMR_STATUS_SUCCESS ) {
1194 mtx_lock(&sc->amr_list_lock);
1197 mtx_unlock(&sc->amr_list_lock);
1201 /********************************************************************************
1202 * Try to find I/O work for the controller from one or more of the work queues.
1204 * We make the assumption that if the controller is not ready to take a command
1205 * at some given time, it will generate an interrupt at some later time when
1209 amr_startio(struct amr_softc *sc)
1211 struct amr_command *ac;
1213 /* spin until something prevents us from doing any work */
1216 /* Don't bother to queue commands no bounce buffers are available. */
1217 if (sc->amr_state & AMR_STATE_QUEUE_FRZN)
1220 /* try to get a ready command */
1221 ac = amr_dequeue_ready(sc);
1223 /* if that failed, build a command from a bio */
1225 (void)amr_bio_command(sc, &ac);
1227 #if AMR_ENABLE_CAM != 0
1228 /* if that failed, build a command from a ccb */
1230 (void)amr_cam_command(sc, &ac);
1233 /* if we don't have anything to do, give up */
1237 /* try to give the command to the controller; if this fails save it for later and give up */
1238 if (amr_start(ac)) {
1239 debug(2, "controller busy, command deferred");
1240 amr_requeue_ready(ac); /* XXX schedule retry very soon? */
1246 /********************************************************************************
1247 * Handle completion of an I/O command.
1250 amr_completeio(struct amr_command *ac)
1252 struct amrd_softc *sc = ac->ac_bio->bio_disk->d_drv1;
1253 static struct timeval lastfail;
1256 if (ac->ac_status != AMR_STATUS_SUCCESS) { /* could be more verbose here? */
1257 ac->ac_bio->bio_error = EIO;
1258 ac->ac_bio->bio_flags |= BIO_ERROR;
1260 if (ppsratecheck(&lastfail, &curfail, 1))
1261 device_printf(sc->amrd_dev, "I/O error - 0x%x\n", ac->ac_status);
1262 /* amr_printcommand(ac);*/
1264 amrd_intr(ac->ac_bio);
1265 mtx_lock(&ac->ac_sc->amr_list_lock);
1267 mtx_unlock(&ac->ac_sc->amr_list_lock);
1270 /********************************************************************************
1271 ********************************************************************************
1273 ********************************************************************************
1274 ********************************************************************************/
1276 /********************************************************************************
1277 * Convert a bio off the top of the bio queue into a command.
1280 amr_bio_command(struct amr_softc *sc, struct amr_command **acp)
1282 struct amr_command *ac;
1283 struct amrd_softc *amrd;
1294 if ((ac = amr_alloccmd(sc)) == NULL)
1297 /* get a bio to work on */
1298 if ((bio = amr_dequeue_bio(sc)) == NULL) {
1303 /* connect the bio to the command */
1304 ac->ac_complete = amr_completeio;
1306 ac->ac_data = bio->bio_data;
1307 ac->ac_length = bio->bio_bcount;
1309 switch (bio->bio_cmd) {
1311 ac->ac_flags |= AMR_CMD_DATAIN;
1312 if (AMR_IS_SG64(sc)) {
1313 cmd = AMR_CMD_LREAD64;
1314 ac->ac_flags |= AMR_CMD_SG64;
1316 cmd = AMR_CMD_LREAD;
1319 ac->ac_flags |= AMR_CMD_DATAOUT;
1320 if (AMR_IS_SG64(sc)) {
1321 cmd = AMR_CMD_LWRITE64;
1322 ac->ac_flags |= AMR_CMD_SG64;
1324 cmd = AMR_CMD_LWRITE;
1327 ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAOUT;
1328 cmd = AMR_CMD_FLUSH;
1331 amrd = (struct amrd_softc *)bio->bio_disk->d_drv1;
1332 driveno = amrd->amrd_drive - sc->amr_drive;
1333 blkcount = (bio->bio_bcount + AMR_BLKSIZE - 1) / AMR_BLKSIZE;
1335 ac->ac_mailbox.mb_command = cmd;
1336 if (bio->bio_cmd & (BIO_READ|BIO_WRITE)) {
1337 ac->ac_mailbox.mb_blkcount = blkcount;
1338 ac->ac_mailbox.mb_lba = bio->bio_pblkno;
1339 if ((bio->bio_pblkno + blkcount) > sc->amr_drive[driveno].al_size) {
1340 device_printf(sc->amr_dev,
1341 "I/O beyond end of unit (%lld,%d > %lu)\n",
1342 (long long)bio->bio_pblkno, blkcount,
1343 (u_long)sc->amr_drive[driveno].al_size);
1346 ac->ac_mailbox.mb_drive = driveno;
1347 if (sc->amr_state & AMR_STATE_REMAP_LD)
1348 ac->ac_mailbox.mb_drive |= 0x80;
1350 /* we fill in the s/g related data when the command is mapped */
1356 /********************************************************************************
1357 * Take a command, submit it to the controller and sleep until it completes
1358 * or fails. Interrupts must be enabled, returns nonzero on error.
1361 amr_wait_command(struct amr_command *ac)
1364 struct amr_softc *sc = ac->ac_sc;
1368 ac->ac_complete = NULL;
1369 ac->ac_flags |= AMR_CMD_SLEEP;
1370 if ((error = amr_start(ac)) != 0) {
1374 while ((ac->ac_flags & AMR_CMD_BUSY) && (error != EWOULDBLOCK)) {
1375 error = msleep(ac,&sc->amr_list_lock, PRIBIO, "amrwcmd", 0);
1381 /********************************************************************************
1382 * Take a command, submit it to the controller and busy-wait for it to return.
1383 * Returns nonzero on error. Can be safely called with interrupts enabled.
1386 amr_std_poll_command(struct amr_command *ac)
1388 struct amr_softc *sc = ac->ac_sc;
1393 ac->ac_complete = NULL;
1394 if ((error = amr_start(ac)) != 0)
1400 * Poll for completion, although the interrupt handler may beat us to it.
1401 * Note that the timeout here is somewhat arbitrary.
1405 } while ((ac->ac_flags & AMR_CMD_BUSY) && (count++ < 1000));
1406 if (!(ac->ac_flags & AMR_CMD_BUSY)) {
1409 /* XXX the slot is now marked permanently busy */
1411 device_printf(sc->amr_dev, "polled command timeout\n");
1417 amr_setup_polled_dmamap(void *arg, bus_dma_segment_t *segs, int nsegs, int err)
1419 struct amr_command *ac = arg;
1420 struct amr_softc *sc = ac->ac_sc;
1424 if (ac->ac_flags & AMR_CMD_DATAIN)
1425 flags |= BUS_DMASYNC_PREREAD;
1426 if (ac->ac_flags & AMR_CMD_DATAOUT)
1427 flags |= BUS_DMASYNC_PREWRITE;
1429 if (AC_IS_SG64(ac)) {
1430 amr_setup_dma64map(arg, segs, nsegs, err);
1431 bus_dmamap_sync(sc->amr_buffer64_dmat,ac->ac_dma64map, flags);
1433 amr_setup_dmamap(arg, segs, nsegs, err);
1434 bus_dmamap_sync(sc->amr_buffer_dmat,ac->ac_dmamap, flags);
1436 sc->amr_poll_command1(sc, ac);
1439 /********************************************************************************
1440 * Take a command, submit it to the controller and busy-wait for it to return.
1441 * Returns nonzero on error. Can be safely called with interrupts enabled.
1444 amr_quartz_poll_command(struct amr_command *ac)
1447 bus_dmamap_t datamap;
1448 struct amr_softc *sc = ac->ac_sc;
1455 if (AC_IS_SG64(ac)) {
1456 tag = sc->amr_buffer64_dmat;
1457 datamap = ac->ac_dma64map;
1459 tag = sc->amr_buffer_dmat;
1460 datamap = ac->ac_dmamap;
1463 /* now we have a slot, we can map the command (unmapped in amr_complete) */
1464 if (ac->ac_data != 0) {
1465 if (bus_dmamap_load(tag, datamap, ac->ac_data, ac->ac_length,
1466 amr_setup_polled_dmamap, ac, BUS_DMA_NOWAIT) != 0) {
1470 error = amr_quartz_poll_command1(sc, ac);
1477 amr_quartz_poll_command1(struct amr_softc *sc, struct amr_command *ac)
1481 mtx_lock(&sc->amr_hw_lock);
1482 if ((sc->amr_state & AMR_STATE_INTEN) == 0) {
1484 while (sc->amr_busyslots) {
1485 msleep(sc, &sc->amr_hw_lock, PRIBIO | PCATCH, "amrpoll", hz);
1491 if(sc->amr_busyslots) {
1492 device_printf(sc->amr_dev, "adapter is busy\n");
1493 mtx_unlock(&sc->amr_hw_lock);
1494 if (ac->ac_data != NULL) {
1496 bus_dmamap_unload(sc->amr_buffer64_dmat, ac->ac_dma64map);
1498 bus_dmamap_unload(sc->amr_buffer_dmat, ac->ac_dmamap);
1505 bcopy(&ac->ac_mailbox, (void *)(uintptr_t)(volatile void *)sc->amr_mailbox, AMR_MBOX_CMDSIZE);
1507 /* clear the poll/ack fields in the mailbox */
1508 sc->amr_mailbox->mb_ident = 0xFE;
1509 sc->amr_mailbox->mb_nstatus = 0xFF;
1510 sc->amr_mailbox->mb_status = 0xFF;
1511 sc->amr_mailbox->mb_poll = 0;
1512 sc->amr_mailbox->mb_ack = 0;
1513 sc->amr_mailbox->mb_busy = 1;
1515 AMR_QPUT_IDB(sc, sc->amr_mailboxphys | AMR_QIDB_SUBMIT);
1517 while(sc->amr_mailbox->mb_nstatus == 0xFF)
1519 while(sc->amr_mailbox->mb_status == 0xFF)
1521 ac->ac_status=sc->amr_mailbox->mb_status;
1522 error = (ac->ac_status !=AMR_STATUS_SUCCESS) ? 1:0;
1523 while(sc->amr_mailbox->mb_poll != 0x77)
1525 sc->amr_mailbox->mb_poll = 0;
1526 sc->amr_mailbox->mb_ack = 0x77;
1528 /* acknowledge that we have the commands */
1529 AMR_QPUT_IDB(sc, sc->amr_mailboxphys | AMR_QIDB_ACK);
1530 while(AMR_QGET_IDB(sc) & AMR_QIDB_ACK)
1532 mtx_unlock(&sc->amr_hw_lock);
1534 /* unmap the command's data buffer */
1535 if (ac->ac_flags & AMR_CMD_DATAIN) {
1536 bus_dmamap_sync(sc->amr_buffer_dmat,ac->ac_dmamap,
1537 BUS_DMASYNC_POSTREAD);
1539 if (ac->ac_flags & AMR_CMD_DATAOUT) {
1540 bus_dmamap_sync(sc->amr_buffer_dmat,ac->ac_dmamap,
1541 BUS_DMASYNC_POSTWRITE);
1544 bus_dmamap_unload(sc->amr_buffer64_dmat, ac->ac_dma64map);
1546 bus_dmamap_unload(sc->amr_buffer_dmat, ac->ac_dmamap);
1552 amr_freeslot(struct amr_command *ac)
1554 struct amr_softc *sc = ac->ac_sc;
1560 if (sc->amr_busycmd[slot] == NULL)
1561 panic("amr: slot %d not busy?\n", slot);
1563 sc->amr_busycmd[slot] = NULL;
1564 atomic_subtract_int(&sc->amr_busyslots, 1);
1569 /********************************************************************************
1570 * Map/unmap (ac)'s data in the controller's addressable space as required.
1572 * These functions may be safely called multiple times on a given command.
1575 amr_setup_dmamap(void *arg, bus_dma_segment_t *segs, int nsegments, int error)
1577 struct amr_command *ac = (struct amr_command *)arg;
1578 struct amr_sgentry *sg;
1584 /* get base address of s/g table */
1585 sg = ac->ac_sg.sg32;
1587 /* save data physical address */
1589 /* for AMR_CMD_CONFIG Read/Write the s/g count goes elsewhere */
1590 if (ac->ac_mailbox.mb_command == AMR_CMD_CONFIG && (
1591 ((struct amr_mailbox_ioctl *)&ac->ac_mailbox)->mb_channel == AMR_CONFIG_READ_NVRAM_CONFIG ||
1592 ((struct amr_mailbox_ioctl *)&ac->ac_mailbox)->mb_channel == AMR_CONFIG_WRITE_NVRAM_CONFIG)) {
1593 sgc = &(((struct amr_mailbox_ioctl *)&ac->ac_mailbox)->mb_param);
1595 sgc = &ac->ac_mailbox.mb_nsgelem;
1598 /* decide whether we need to populate the s/g table */
1599 if (nsegments < 2) {
1601 ac->ac_mailbox.mb_nsgelem = 0;
1602 ac->ac_mailbox.mb_physaddr = segs[0].ds_addr;
1604 ac->ac_mailbox.mb_nsgelem = nsegments;
1606 /* XXX Setting these to 0 might not be needed. */
1609 ac->ac_mailbox.mb_physaddr = ac->ac_sgbusaddr;
1610 for (i = 0; i < nsegments; i++, sg++) {
1611 sg->sg_addr = segs[i].ds_addr;
1612 sg->sg_count = segs[i].ds_len;
1619 amr_setup_dma64map(void *arg, bus_dma_segment_t *segs, int nsegments, int error)
1621 struct amr_command *ac = (struct amr_command *)arg;
1622 struct amr_sg64entry *sg;
1628 /* get base address of s/g table */
1629 sg = ac->ac_sg.sg64;
1631 /* save data physical address */
1633 /* for AMR_CMD_CONFIG Read/Write the s/g count goes elsewhere */
1634 if (ac->ac_mailbox.mb_command == AMR_CMD_CONFIG && (
1635 ((struct amr_mailbox_ioctl *)&ac->ac_mailbox)->mb_channel == AMR_CONFIG_READ_NVRAM_CONFIG ||
1636 ((struct amr_mailbox_ioctl *)&ac->ac_mailbox)->mb_channel == AMR_CONFIG_WRITE_NVRAM_CONFIG)) {
1637 sgc = &(((struct amr_mailbox_ioctl *)&ac->ac_mailbox)->mb_param);
1639 sgc = &ac->ac_mailbox.mb_nsgelem;
1642 ac->ac_mailbox.mb_nsgelem = nsegments;
1645 ac->ac_sg64_lo = ac->ac_sgbusaddr;
1646 ac->ac_mailbox.mb_physaddr = 0xffffffff;
1647 for (i = 0; i < nsegments; i++, sg++) {
1648 sg->sg_addr = segs[i].ds_addr;
1649 sg->sg_count = segs[i].ds_len;
1654 amr_setup_ccbmap(void *arg, bus_dma_segment_t *segs, int nsegments, int error)
1656 struct amr_command *ac = (struct amr_command *)arg;
1657 struct amr_softc *sc = ac->ac_sc;
1658 struct amr_sgentry *sg;
1659 struct amr_passthrough *ap = (struct amr_passthrough *)ac->ac_data;
1660 struct amr_ext_passthrough *aep = (struct amr_ext_passthrough *)ac->ac_data;
1663 /* get base address of s/g table */
1664 sg = ac->ac_sg.sg32;
1666 /* decide whether we need to populate the s/g table */
1667 if( ac->ac_mailbox.mb_command == AMR_CMD_EXTPASS ) {
1668 if (nsegments < 2) {
1669 aep->ap_no_sg_elements = 0;
1670 aep->ap_data_transfer_address = segs[0].ds_addr;
1672 /* save s/g table information in passthrough */
1673 aep->ap_no_sg_elements = nsegments;
1674 aep->ap_data_transfer_address = ac->ac_sgbusaddr;
1676 * populate s/g table (overwrites previous call which mapped the
1679 for (i = 0; i < nsegments; i++, sg++) {
1680 sg->sg_addr = segs[i].ds_addr;
1681 sg->sg_count = segs[i].ds_len;
1682 debug(3, " %d: 0x%x/%d", i, sg->sg_addr, sg->sg_count);
1685 debug(3, "slot %d %d segments at 0x%x\n", ac->ac_slot,
1686 aep->ap_no_sg_elements, aep->ap_data_transfer_address);
1688 if (nsegments < 2) {
1689 ap->ap_no_sg_elements = 0;
1690 ap->ap_data_transfer_address = segs[0].ds_addr;
1692 /* save s/g table information in passthrough */
1693 ap->ap_no_sg_elements = nsegments;
1694 ap->ap_data_transfer_address = ac->ac_sgbusaddr;
1696 * populate s/g table (overwrites previous call which mapped the
1699 for (i = 0; i < nsegments; i++, sg++) {
1700 sg->sg_addr = segs[i].ds_addr;
1701 sg->sg_count = segs[i].ds_len;
1702 debug(3, " %d: 0x%x/%d", i, sg->sg_addr, sg->sg_count);
1705 debug(3, "slot %d %d segments at 0x%x\n", ac->ac_slot,
1706 ap->ap_no_sg_elements, ap->ap_data_transfer_address);
1708 if (ac->ac_flags & AMR_CMD_CCB_DATAIN)
1709 bus_dmamap_sync(sc->amr_buffer_dmat, ac->ac_ccb_dmamap,
1710 BUS_DMASYNC_PREREAD);
1711 if (ac->ac_flags & AMR_CMD_CCB_DATAOUT)
1712 bus_dmamap_sync(sc->amr_buffer_dmat, ac->ac_ccb_dmamap,
1713 BUS_DMASYNC_PREWRITE);
1714 if ((ac->ac_flags & (AMR_CMD_CCB_DATAIN | AMR_CMD_CCB_DATAOUT)) == 0)
1715 panic("no direction for ccb?\n");
1717 if (ac->ac_flags & AMR_CMD_DATAIN)
1718 bus_dmamap_sync(sc->amr_buffer_dmat,ac->ac_dmamap,BUS_DMASYNC_PREREAD);
1719 if (ac->ac_flags & AMR_CMD_DATAOUT)
1720 bus_dmamap_sync(sc->amr_buffer_dmat,ac->ac_dmamap,BUS_DMASYNC_PREWRITE);
1722 ac->ac_flags |= AMR_CMD_MAPPED;
1724 if (sc->amr_submit_command(ac) == EBUSY) {
1726 amr_requeue_ready(ac);
1731 amr_setup_ccb64map(void *arg, bus_dma_segment_t *segs, int nsegments, int error)
1733 struct amr_command *ac = (struct amr_command *)arg;
1734 struct amr_softc *sc = ac->ac_sc;
1735 struct amr_sg64entry *sg;
1736 struct amr_passthrough *ap = (struct amr_passthrough *)ac->ac_data;
1737 struct amr_ext_passthrough *aep = (struct amr_ext_passthrough *)ac->ac_data;
1740 /* get base address of s/g table */
1741 sg = ac->ac_sg.sg64;
1743 /* decide whether we need to populate the s/g table */
1744 if( ac->ac_mailbox.mb_command == AMR_CMD_EXTPASS ) {
1745 /* save s/g table information in passthrough */
1746 aep->ap_no_sg_elements = nsegments;
1747 aep->ap_data_transfer_address = ac->ac_sgbusaddr;
1749 * populate s/g table (overwrites previous call which mapped the
1752 for (i = 0; i < nsegments; i++, sg++) {
1753 sg->sg_addr = segs[i].ds_addr;
1754 sg->sg_count = segs[i].ds_len;
1755 debug(3, " %d: 0x%x/%d", i, sg->sg_addr, sg->sg_count);
1757 debug(3, "slot %d %d segments at 0x%x\n", ac->ac_slot,
1758 aep->ap_no_sg_elements, aep->ap_data_transfer_address);
1760 /* save s/g table information in passthrough */
1761 ap->ap_no_sg_elements = nsegments;
1762 ap->ap_data_transfer_address = ac->ac_sgbusaddr;
1764 * populate s/g table (overwrites previous call which mapped the
1767 for (i = 0; i < nsegments; i++, sg++) {
1768 sg->sg_addr = segs[i].ds_addr;
1769 sg->sg_count = segs[i].ds_len;
1770 debug(3, " %d: 0x%x/%d", i, sg->sg_addr, sg->sg_count);
1772 debug(3, "slot %d %d segments at 0x%x\n", ac->ac_slot,
1773 ap->ap_no_sg_elements, ap->ap_data_transfer_address);
1775 if (ac->ac_flags & AMR_CMD_CCB_DATAIN)
1776 bus_dmamap_sync(sc->amr_buffer64_dmat, ac->ac_ccb_dma64map,
1777 BUS_DMASYNC_PREREAD);
1778 if (ac->ac_flags & AMR_CMD_CCB_DATAOUT)
1779 bus_dmamap_sync(sc->amr_buffer64_dmat, ac->ac_ccb_dma64map,
1780 BUS_DMASYNC_PREWRITE);
1781 if ((ac->ac_flags & (AMR_CMD_CCB_DATAIN | AMR_CMD_CCB_DATAOUT)) == 0)
1782 panic("no direction for ccb?\n");
1784 if (ac->ac_flags & AMR_CMD_DATAIN)
1785 bus_dmamap_sync(sc->amr_buffer64_dmat, ac->ac_dma64map,
1786 BUS_DMASYNC_PREREAD);
1787 if (ac->ac_flags & AMR_CMD_DATAOUT)
1788 bus_dmamap_sync(sc->amr_buffer64_dmat, ac->ac_dma64map,
1789 BUS_DMASYNC_PREWRITE);
1791 ac->ac_flags |= AMR_CMD_MAPPED;
1793 if (sc->amr_submit_command(ac) == EBUSY) {
1795 amr_requeue_ready(ac);
1800 amr_setup_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nsegments,
1803 struct amr_command *ac = (struct amr_command *)arg;
1804 struct amr_softc *sc = ac->ac_sc;
1806 amr_setup_dmamap(arg, segs, nsegments, error);
1808 if (bus_dmamap_load(sc->amr_buffer_dmat, ac->ac_ccb_dmamap,
1809 ac->ac_ccb_data, ac->ac_ccb_length, amr_setup_ccbmap, ac,
1810 0) == EINPROGRESS) {
1811 sc->amr_state |= AMR_STATE_QUEUE_FRZN;
1816 amr_setup_dma64map_cb(void *arg, bus_dma_segment_t *segs, int nsegments,
1819 struct amr_command *ac = (struct amr_command *)arg;
1820 struct amr_softc *sc = ac->ac_sc;
1822 amr_setup_dma64map(arg, segs, nsegments, error);
1824 if (bus_dmamap_load(sc->amr_buffer64_dmat, ac->ac_ccb_dma64map,
1825 ac->ac_ccb_data, ac->ac_ccb_length, amr_setup_ccb64map, ac,
1826 0) == EINPROGRESS) {
1827 sc->amr_state |= AMR_STATE_QUEUE_FRZN;
1832 amr_mapcmd(struct amr_command *ac)
1835 bus_dmamap_t datamap;
1836 bus_dmamap_callback_t *cb;
1837 struct amr_softc *sc = ac->ac_sc;
1841 if (AC_IS_SG64(ac)) {
1842 tag = sc->amr_buffer64_dmat;
1843 datamap = ac->ac_dma64map;
1844 cb = amr_setup_dma64map_cb;
1846 tag = sc->amr_buffer_dmat;
1847 datamap = ac->ac_dmamap;
1848 cb = amr_setup_dmamap_cb;
1851 /* if the command involves data at all, and hasn't been mapped */
1852 if ((ac->ac_flags & AMR_CMD_MAPPED) == 0 && (ac->ac_data != NULL)) {
1853 if (ac->ac_ccb_data == NULL)
1854 cb = amr_setup_data_dmamap;
1855 /* map the data buffers into bus space and build the s/g list */
1856 if (bus_dmamap_load(tag, datamap, ac->ac_data, ac->ac_length,
1857 cb, ac, 0) == EINPROGRESS) {
1858 sc->amr_state |= AMR_STATE_QUEUE_FRZN;
1861 if (sc->amr_submit_command(ac) == EBUSY) {
1863 amr_requeue_ready(ac);
1871 amr_unmapcmd(struct amr_command *ac)
1873 struct amr_softc *sc = ac->ac_sc;
1878 /* if the command involved data at all and was mapped */
1879 if (ac->ac_flags & AMR_CMD_MAPPED) {
1881 if (ac->ac_data != NULL) {
1884 if (ac->ac_flags & AMR_CMD_DATAIN)
1885 flag |= BUS_DMASYNC_POSTREAD;
1886 if (ac->ac_flags & AMR_CMD_DATAOUT)
1887 flag |= BUS_DMASYNC_POSTWRITE;
1889 if (AC_IS_SG64(ac)) {
1890 bus_dmamap_sync(sc->amr_buffer64_dmat, ac->ac_dma64map, flag);
1891 bus_dmamap_unload(sc->amr_buffer64_dmat, ac->ac_dma64map);
1893 bus_dmamap_sync(sc->amr_buffer_dmat, ac->ac_dmamap, flag);
1894 bus_dmamap_unload(sc->amr_buffer_dmat, ac->ac_dmamap);
1898 if (ac->ac_ccb_data != NULL) {
1901 if (ac->ac_flags & AMR_CMD_CCB_DATAIN)
1902 flag |= BUS_DMASYNC_POSTREAD;
1903 if (ac->ac_flags & AMR_CMD_CCB_DATAOUT)
1904 flag |= BUS_DMASYNC_POSTWRITE;
1906 if (AC_IS_SG64(ac)) {
1907 bus_dmamap_sync(sc->amr_buffer64_dmat,ac->ac_ccb_dma64map,flag);
1908 bus_dmamap_unload(sc->amr_buffer64_dmat, ac->ac_ccb_dma64map);
1910 bus_dmamap_sync(sc->amr_buffer_dmat, ac->ac_ccb_dmamap, flag);
1911 bus_dmamap_unload(sc->amr_buffer_dmat, ac->ac_ccb_dmamap);
1914 ac->ac_flags &= ~AMR_CMD_MAPPED;
1919 amr_setup_data_dmamap(void *arg, bus_dma_segment_t *segs, int nsegs, int err)
1921 struct amr_command *ac = arg;
1922 struct amr_softc *sc = ac->ac_sc;
1926 if (ac->ac_flags & AMR_CMD_DATAIN)
1927 flags |= BUS_DMASYNC_PREREAD;
1928 if (ac->ac_flags & AMR_CMD_DATAOUT)
1929 flags |= BUS_DMASYNC_PREWRITE;
1931 if (AC_IS_SG64(ac)) {
1932 amr_setup_dma64map(arg, segs, nsegs, err);
1933 bus_dmamap_sync(sc->amr_buffer64_dmat,ac->ac_dma64map, flags);
1935 amr_setup_dmamap(arg, segs, nsegs, err);
1936 bus_dmamap_sync(sc->amr_buffer_dmat,ac->ac_dmamap, flags);
1938 ac->ac_flags |= AMR_CMD_MAPPED;
1940 if (sc->amr_submit_command(ac) == EBUSY) {
1942 amr_requeue_ready(ac);
1946 /********************************************************************************
1947 * Take a command and give it to the controller, returns 0 if successful, or
1948 * EBUSY if the command should be retried later.
1951 amr_start(struct amr_command *ac)
1953 struct amr_softc *sc;
1959 /* mark command as busy so that polling consumer can tell */
1961 ac->ac_flags |= AMR_CMD_BUSY;
1963 /* get a command slot (freed in amr_done) */
1965 if (sc->amr_busycmd[slot] != NULL)
1966 panic("amr: slot %d busy?\n", slot);
1967 sc->amr_busycmd[slot] = ac;
1968 atomic_add_int(&sc->amr_busyslots, 1);
1970 /* Now we have a slot, we can map the command (unmapped in amr_complete). */
1971 if ((error = amr_mapcmd(ac)) == ENOMEM) {
1973 * Memroy resources are short, so free the slot and let this be tried
1982 /********************************************************************************
1983 * Extract one or more completed commands from the controller (sc)
1985 * Returns nonzero if any commands on the work queue were marked as completed.
1989 amr_done(struct amr_softc *sc)
1991 struct amr_command *ac;
1992 struct amr_mailbox mbox;
1997 /* See if there's anything for us to do */
2000 /* loop collecting completed commands */
2002 /* poll for a completed command's identifier and status */
2003 if (sc->amr_get_work(sc, &mbox)) {
2006 /* iterate over completed commands in this result */
2007 for (i = 0; i < mbox.mb_nstatus; i++) {
2008 /* get pointer to busy command */
2009 idx = mbox.mb_completed[i] - 1;
2010 ac = sc->amr_busycmd[idx];
2012 /* really a busy command? */
2015 /* pull the command from the busy index */
2018 /* save status for later use */
2019 ac->ac_status = mbox.mb_status;
2020 amr_enqueue_completed(ac);
2021 debug(3, "completed command with status %x", mbox.mb_status);
2023 device_printf(sc->amr_dev, "bad slot %d completed\n", idx);
2027 break; /* no work */
2030 /* handle completion and timeouts */
2031 amr_complete(sc, 0);
2036 /********************************************************************************
2037 * Do completion processing on done commands on (sc)
2041 amr_complete(void *context, int pending)
2043 struct amr_softc *sc = (struct amr_softc *)context;
2044 struct amr_command *ac;
2048 /* pull completed commands off the queue */
2050 ac = amr_dequeue_completed(sc);
2054 /* unmap the command's data buffer */
2058 * Is there a completion handler?
2060 if (ac->ac_complete != NULL) {
2061 /* unbusy the command */
2062 ac->ac_flags &= ~AMR_CMD_BUSY;
2063 ac->ac_complete(ac);
2066 * Is someone sleeping on this one?
2069 mtx_lock(&sc->amr_list_lock);
2070 ac->ac_flags &= ~AMR_CMD_BUSY;
2071 if (ac->ac_flags & AMR_CMD_SLEEP) {
2072 /* unbusy the command */
2075 mtx_unlock(&sc->amr_list_lock);
2078 if(!sc->amr_busyslots) {
2083 mtx_lock(&sc->amr_list_lock);
2084 sc->amr_state &= ~AMR_STATE_QUEUE_FRZN;
2086 mtx_unlock(&sc->amr_list_lock);
2089 /********************************************************************************
2090 ********************************************************************************
2091 Command Buffer Management
2092 ********************************************************************************
2093 ********************************************************************************/
2095 /********************************************************************************
2096 * Get a new command buffer.
2098 * This may return NULL in low-memory cases.
2100 * If possible, we recycle a command buffer that's been used before.
2102 struct amr_command *
2103 amr_alloccmd(struct amr_softc *sc)
2105 struct amr_command *ac;
2109 ac = amr_dequeue_free(sc);
2111 amr_alloccmd_cluster(sc);
2112 ac = amr_dequeue_free(sc);
2115 sc->amr_state |= AMR_STATE_QUEUE_FRZN;
2119 /* clear out significant fields */
2121 bzero(&ac->ac_mailbox, sizeof(struct amr_mailbox));
2125 ac->ac_ccb_data = NULL;
2126 ac->ac_complete = NULL;
2130 /********************************************************************************
2131 * Release a command buffer for recycling.
2134 amr_releasecmd(struct amr_command *ac)
2138 amr_enqueue_free(ac);
2141 /********************************************************************************
2142 * Allocate a new command cluster and initialise it.
2145 amr_alloccmd_cluster(struct amr_softc *sc)
2147 struct amr_command_cluster *acc;
2148 struct amr_command *ac;
2151 if (sc->amr_nextslot > sc->amr_maxio)
2153 acc = malloc(AMR_CMD_CLUSTERSIZE, M_DEVBUF, M_NOWAIT | M_ZERO);
2155 nextslot = sc->amr_nextslot;
2156 TAILQ_INSERT_TAIL(&sc->amr_cmd_clusters, acc, acc_link);
2157 for (i = 0; i < AMR_CMD_CLUSTERCOUNT; i++) {
2158 ac = &acc->acc_command[i];
2160 ac->ac_slot = nextslot;
2163 * The SG table for each slot is a fixed size and is assumed to
2164 * to hold 64-bit s/g objects when the driver is configured to do
2165 * 64-bit DMA. 32-bit DMA commands still use the same table, but
2166 * cast down to 32-bit objects.
2168 if (AMR_IS_SG64(sc)) {
2169 ac->ac_sgbusaddr = sc->amr_sgbusaddr +
2170 (ac->ac_slot * AMR_NSEG * sizeof(struct amr_sg64entry));
2171 ac->ac_sg.sg64 = sc->amr_sg64table + (ac->ac_slot * AMR_NSEG);
2173 ac->ac_sgbusaddr = sc->amr_sgbusaddr +
2174 (ac->ac_slot * AMR_NSEG * sizeof(struct amr_sgentry));
2175 ac->ac_sg.sg32 = sc->amr_sgtable + (ac->ac_slot * AMR_NSEG);
2178 if (bus_dmamap_create(sc->amr_buffer_dmat, 0, &ac->ac_dmamap) ||
2179 bus_dmamap_create(sc->amr_buffer_dmat, 0, &ac->ac_ccb_dmamap) ||
2181 (bus_dmamap_create(sc->amr_buffer64_dmat, 0,&ac->ac_dma64map) ||
2182 bus_dmamap_create(sc->amr_buffer64_dmat, 0, &ac->ac_ccb_dma64map))))
2185 if (++nextslot > sc->amr_maxio)
2188 sc->amr_nextslot = nextslot;
2192 /********************************************************************************
2193 * Free a command cluster
2196 amr_freecmd_cluster(struct amr_command_cluster *acc)
2198 struct amr_softc *sc = acc->acc_command[0].ac_sc;
2201 for (i = 0; i < AMR_CMD_CLUSTERCOUNT; i++) {
2202 bus_dmamap_destroy(sc->amr_buffer_dmat, acc->acc_command[i].ac_dmamap);
2203 bus_dmamap_destroy(sc->amr_buffer_dmat, acc->acc_command[i].ac_ccb_dmamap);
2204 if (AMR_IS_SG64(sc))
2205 bus_dmamap_destroy(sc->amr_buffer64_dmat, acc->acc_command[i].ac_dma64map);
2206 bus_dmamap_destroy(sc->amr_buffer64_dmat, acc->acc_command[i].ac_ccb_dma64map);
2208 free(acc, M_DEVBUF);
2211 /********************************************************************************
2212 ********************************************************************************
2213 Interface-specific Shims
2214 ********************************************************************************
2215 ********************************************************************************/
2217 /********************************************************************************
2218 * Tell the controller that the mailbox contains a valid command
2221 amr_quartz_submit_command(struct amr_command *ac)
2223 struct amr_softc *sc = ac->ac_sc;
2226 mtx_lock(&sc->amr_hw_lock);
2227 while (sc->amr_mailbox->mb_busy && (i++ < 10))
2229 if (sc->amr_mailbox->mb_busy) {
2230 mtx_unlock(&sc->amr_hw_lock);
2235 * Save the slot number so that we can locate this command when complete.
2236 * Note that ident = 0 seems to be special, so we don't use it.
2238 ac->ac_mailbox.mb_ident = ac->ac_slot + 1; /* will be coppied into mbox */
2239 bcopy(&ac->ac_mailbox, (void *)(uintptr_t)(volatile void *)sc->amr_mailbox, 14);
2240 sc->amr_mailbox->mb_busy = 1;
2241 sc->amr_mailbox->mb_poll = 0;
2242 sc->amr_mailbox->mb_ack = 0;
2243 sc->amr_mailbox64->sg64_hi = ac->ac_sg64_hi;
2244 sc->amr_mailbox64->sg64_lo = ac->ac_sg64_lo;
2246 AMR_QPUT_IDB(sc, sc->amr_mailboxphys | AMR_QIDB_SUBMIT);
2247 mtx_unlock(&sc->amr_hw_lock);
2252 amr_std_submit_command(struct amr_command *ac)
2254 struct amr_softc *sc = ac->ac_sc;
2256 mtx_lock(&sc->amr_hw_lock);
2257 if (AMR_SGET_MBSTAT(sc) & AMR_SMBOX_BUSYFLAG) {
2258 mtx_unlock(&sc->amr_hw_lock);
2263 * Save the slot number so that we can locate this command when complete.
2264 * Note that ident = 0 seems to be special, so we don't use it.
2266 ac->ac_mailbox.mb_ident = ac->ac_slot + 1; /* will be coppied into mbox */
2267 bcopy(&ac->ac_mailbox, (void *)(uintptr_t)(volatile void *)sc->amr_mailbox, 14);
2268 sc->amr_mailbox->mb_busy = 1;
2269 sc->amr_mailbox->mb_poll = 0;
2270 sc->amr_mailbox->mb_ack = 0;
2272 AMR_SPOST_COMMAND(sc);
2273 mtx_unlock(&sc->amr_hw_lock);
2277 /********************************************************************************
2278 * Claim any work that the controller has completed; acknowledge completion,
2279 * save details of the completion in (mbsave)
2282 amr_quartz_get_work(struct amr_softc *sc, struct amr_mailbox *mbsave)
2287 u_int8_t completed[46];
2293 /* work waiting for us? */
2294 if ((outd = AMR_QGET_ODB(sc)) == AMR_QODB_READY) {
2296 /* acknowledge interrupt */
2297 AMR_QPUT_ODB(sc, AMR_QODB_READY);
2299 while ((nstatus = sc->amr_mailbox->mb_nstatus) == 0xff)
2301 sc->amr_mailbox->mb_nstatus = 0xff;
2303 /* wait until fw wrote out all completions */
2304 for (i = 0; i < nstatus; i++) {
2305 while ((completed[i] = sc->amr_mailbox->mb_completed[i]) == 0xff)
2307 sc->amr_mailbox->mb_completed[i] = 0xff;
2310 /* Save information for later processing */
2311 mbsave->mb_nstatus = nstatus;
2312 mbsave->mb_status = sc->amr_mailbox->mb_status;
2313 sc->amr_mailbox->mb_status = 0xff;
2315 for (i = 0; i < nstatus; i++)
2316 mbsave->mb_completed[i] = completed[i];
2318 /* acknowledge that we have the commands */
2319 AMR_QPUT_IDB(sc, AMR_QIDB_ACK);
2322 #ifndef AMR_QUARTZ_GOFASTER
2324 * This waits for the controller to notice that we've taken the
2325 * command from it. It's very inefficient, and we shouldn't do it,
2326 * but if we remove this code, we stop completing commands under
2329 * Peter J says we shouldn't do this. The documentation says we
2330 * should. Who is right?
2332 while(AMR_QGET_IDB(sc) & AMR_QIDB_ACK)
2333 ; /* XXX aiee! what if it dies? */
2337 worked = 1; /* got some work */
2344 amr_std_get_work(struct amr_softc *sc, struct amr_mailbox *mbsave)
2353 /* check for valid interrupt status */
2354 istat = AMR_SGET_ISTAT(sc);
2355 if ((istat & AMR_SINTR_VALID) != 0) {
2356 AMR_SPUT_ISTAT(sc, istat); /* ack interrupt status */
2358 /* save mailbox, which contains a list of completed commands */
2359 bcopy((void *)(uintptr_t)(volatile void *)sc->amr_mailbox, mbsave, sizeof(*mbsave));
2361 AMR_SACK_INTERRUPT(sc); /* acknowledge we have the mailbox */
2368 /********************************************************************************
2369 * Notify the controller of the mailbox location.
2372 amr_std_attach_mailbox(struct amr_softc *sc)
2375 /* program the mailbox physical address */
2376 AMR_SBYTE_SET(sc, AMR_SMBOX_0, sc->amr_mailboxphys & 0xff);
2377 AMR_SBYTE_SET(sc, AMR_SMBOX_1, (sc->amr_mailboxphys >> 8) & 0xff);
2378 AMR_SBYTE_SET(sc, AMR_SMBOX_2, (sc->amr_mailboxphys >> 16) & 0xff);
2379 AMR_SBYTE_SET(sc, AMR_SMBOX_3, (sc->amr_mailboxphys >> 24) & 0xff);
2380 AMR_SBYTE_SET(sc, AMR_SMBOX_ENABLE, AMR_SMBOX_ADDR);
2382 /* clear any outstanding interrupt and enable interrupts proper */
2383 AMR_SACK_INTERRUPT(sc);
2384 AMR_SENABLE_INTR(sc);
2387 #ifdef AMR_BOARD_INIT
2388 /********************************************************************************
2389 * Initialise the controller
2392 amr_quartz_init(struct amr_softc *sc)
2394 int status, ostatus;
2396 device_printf(sc->amr_dev, "initial init status %x\n", AMR_QGET_INITSTATUS(sc));
2401 while ((status = AMR_QGET_INITSTATUS(sc)) != AMR_QINIT_DONE) {
2402 if (status != ostatus) {
2403 device_printf(sc->amr_dev, "(%x) %s\n", status, amr_describe_code(amr_table_qinit, status));
2407 case AMR_QINIT_NOMEM:
2410 case AMR_QINIT_SCAN:
2411 /* XXX we could print channel/target here */
2419 amr_std_init(struct amr_softc *sc)
2421 int status, ostatus;
2423 device_printf(sc->amr_dev, "initial init status %x\n", AMR_SGET_INITSTATUS(sc));
2428 while ((status = AMR_SGET_INITSTATUS(sc)) != AMR_SINIT_DONE) {
2429 if (status != ostatus) {
2430 device_printf(sc->amr_dev, "(%x) %s\n", status, amr_describe_code(amr_table_sinit, status));
2434 case AMR_SINIT_NOMEM:
2437 case AMR_SINIT_INPROG:
2438 /* XXX we could print channel/target here? */
2446 /********************************************************************************
2447 ********************************************************************************
2449 ********************************************************************************
2450 ********************************************************************************/
2452 /********************************************************************************
2453 * Identify the controller and print some information about it.
2456 amr_describe_controller(struct amr_softc *sc)
2458 struct amr_prodinfo *ap;
2459 struct amr_enquiry *ae;
2464 * Try to get 40LD product info, which tells us what the card is labelled as.
2466 if ((ap = amr_enquiry(sc, 2048, AMR_CMD_CONFIG, AMR_CONFIG_PRODUCT_INFO, 0, &status)) != NULL) {
2467 device_printf(sc->amr_dev, "<LSILogic %.80s> Firmware %.16s, BIOS %.16s, %dMB RAM\n",
2468 ap->ap_product, ap->ap_firmware, ap->ap_bios,
2476 * Try 8LD extended ENQUIRY to get controller signature, and use lookup table.
2478 if ((ae = (struct amr_enquiry *)amr_enquiry(sc, 2048, AMR_CMD_EXT_ENQUIRY2, 0, 0, &status)) != NULL) {
2479 prod = amr_describe_code(amr_table_adaptertype, ae->ae_signature);
2481 } else if ((ae = (struct amr_enquiry *)amr_enquiry(sc, 2048, AMR_CMD_ENQUIRY, 0, 0, &status)) != NULL) {
2484 * Try to work it out based on the PCI signatures.
2486 switch (pci_get_device(sc->amr_dev)) {
2488 prod = "Series 428";
2491 prod = "Series 434";
2494 prod = "unknown controller";
2498 device_printf(sc->amr_dev, "<unsupported controller>\n");
2503 * HP NetRaid controllers have a special encoding of the firmware and
2504 * BIOS versions. The AMI version seems to have it as strings whereas
2505 * the HP version does it with a leading uppercase character and two
2509 if(ae->ae_adapter.aa_firmware[2] >= 'A' &&
2510 ae->ae_adapter.aa_firmware[2] <= 'Z' &&
2511 ae->ae_adapter.aa_firmware[1] < ' ' &&
2512 ae->ae_adapter.aa_firmware[0] < ' ' &&
2513 ae->ae_adapter.aa_bios[2] >= 'A' &&
2514 ae->ae_adapter.aa_bios[2] <= 'Z' &&
2515 ae->ae_adapter.aa_bios[1] < ' ' &&
2516 ae->ae_adapter.aa_bios[0] < ' ') {
2518 /* this looks like we have an HP NetRaid version of the MegaRaid */
2520 if(ae->ae_signature == AMR_SIG_438) {
2521 /* the AMI 438 is a NetRaid 3si in HP-land */
2522 prod = "HP NetRaid 3si";
2525 device_printf(sc->amr_dev, "<%s> Firmware %c.%02d.%02d, BIOS %c.%02d.%02d, %dMB RAM\n",
2526 prod, ae->ae_adapter.aa_firmware[2],
2527 ae->ae_adapter.aa_firmware[1],
2528 ae->ae_adapter.aa_firmware[0],
2529 ae->ae_adapter.aa_bios[2],
2530 ae->ae_adapter.aa_bios[1],
2531 ae->ae_adapter.aa_bios[0],
2532 ae->ae_adapter.aa_memorysize);
2534 device_printf(sc->amr_dev, "<%s> Firmware %.4s, BIOS %.4s, %dMB RAM\n",
2535 prod, ae->ae_adapter.aa_firmware, ae->ae_adapter.aa_bios,
2536 ae->ae_adapter.aa_memorysize);
2542 amr_dump_blocks(struct amr_softc *sc, int unit, u_int32_t lba, void *data, int blks)
2544 struct amr_command *ac;
2549 sc->amr_state |= AMR_STATE_INTEN;
2551 /* get ourselves a command buffer */
2552 if ((ac = amr_alloccmd(sc)) == NULL)
2554 /* set command flags */
2555 ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAOUT;
2557 /* point the command at our data */
2559 ac->ac_length = blks * AMR_BLKSIZE;
2561 /* build the command proper */
2562 ac->ac_mailbox.mb_command = AMR_CMD_LWRITE;
2563 ac->ac_mailbox.mb_blkcount = blks;
2564 ac->ac_mailbox.mb_lba = lba;
2565 ac->ac_mailbox.mb_drive = unit;
2567 /* can't assume that interrupts are going to work here, so play it safe */
2568 if (sc->amr_poll_command(ac))
2570 error = ac->ac_status;
2576 sc->amr_state &= ~AMR_STATE_INTEN;
2583 /********************************************************************************
2584 * Print the command (ac) in human-readable format
2588 amr_printcommand(struct amr_command *ac)
2590 struct amr_softc *sc = ac->ac_sc;
2591 struct amr_sgentry *sg;
2594 device_printf(sc->amr_dev, "cmd %x ident %d drive %d\n",
2595 ac->ac_mailbox.mb_command, ac->ac_mailbox.mb_ident, ac->ac_mailbox.mb_drive);
2596 device_printf(sc->amr_dev, "blkcount %d lba %d\n",
2597 ac->ac_mailbox.mb_blkcount, ac->ac_mailbox.mb_lba);
2598 device_printf(sc->amr_dev, "virtaddr %p length %lu\n", ac->ac_data, (unsigned long)ac->ac_length);
2599 device_printf(sc->amr_dev, "sg physaddr %08x nsg %d\n",
2600 ac->ac_mailbox.mb_physaddr, ac->ac_mailbox.mb_nsgelem);
2601 device_printf(sc->amr_dev, "ccb %p bio %p\n", ac->ac_ccb_data, ac->ac_bio);
2603 /* get base address of s/g table */
2604 sg = sc->amr_sgtable + (ac->ac_slot * AMR_NSEG);
2605 for (i = 0; i < ac->ac_mailbox.mb_nsgelem; i++, sg++)
2606 device_printf(sc->amr_dev, " %x/%d\n", sg->sg_addr, sg->sg_count);