2 * Copyright (c) 1999,2000 Michael Smith
3 * Copyright (c) 2000 BSDi
4 * Copyright (c) 2005 Scott Long
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 * Copyright (c) 2002 Eric Moore
30 * Copyright (c) 2002, 2004 LSI Logic Corporation
31 * All rights reserved.
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce the above copyright
39 * notice, this list of conditions and the following disclaimer in the
40 * documentation and/or other materials provided with the distribution.
41 * 3. The party using or redistributing the source code and binary forms
42 * agrees to the disclaimer below and the terms and conditions set forth
45 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
46 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
47 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
48 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
49 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
50 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
51 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
52 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
53 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
54 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
58 #include <sys/cdefs.h>
59 __FBSDID("$FreeBSD$");
62 * Driver for the AMI MegaRaid family of controllers.
65 #include <sys/param.h>
66 #include <sys/systm.h>
67 #include <sys/malloc.h>
68 #include <sys/kernel.h>
70 #include <sys/sysctl.h>
77 #include <machine/bus.h>
78 #include <machine/cpu.h>
79 #include <machine/resource.h>
82 #include <dev/pci/pcireg.h>
83 #include <dev/pci/pcivar.h>
85 #include <dev/amr/amrio.h>
86 #include <dev/amr/amrreg.h>
87 #include <dev/amr/amrvar.h>
88 #define AMR_DEFINE_TABLES
89 #include <dev/amr/amr_tables.h>
92 * The CAM interface appears to be completely broken. Disable it.
94 #ifndef AMR_ENABLE_CAM
95 #define AMR_ENABLE_CAM 0
98 SYSCTL_NODE(_hw, OID_AUTO, amr, CTLFLAG_RD, 0, "AMR driver parameters");
100 static d_open_t amr_open;
101 static d_close_t amr_close;
102 static d_ioctl_t amr_ioctl;
104 static struct cdevsw amr_cdevsw = {
105 .d_version = D_VERSION,
106 .d_flags = D_NEEDGIANT,
108 .d_close = amr_close,
109 .d_ioctl = amr_ioctl,
114 * Initialisation, bus interface.
116 static void amr_startup(void *arg);
121 static int amr_query_controller(struct amr_softc *sc);
122 static void *amr_enquiry(struct amr_softc *sc, size_t bufsize,
123 u_int8_t cmd, u_int8_t cmdsub, u_int8_t cmdqual, int *status);
124 static void amr_completeio(struct amr_command *ac);
125 static int amr_support_ext_cdb(struct amr_softc *sc);
128 * Command buffer allocation.
130 static void amr_alloccmd_cluster(struct amr_softc *sc);
131 static void amr_freecmd_cluster(struct amr_command_cluster *acc);
134 * Command processing.
136 static int amr_bio_command(struct amr_softc *sc, struct amr_command **acp);
137 static int amr_wait_command(struct amr_command *ac) __unused;
138 static int amr_mapcmd(struct amr_command *ac);
139 static void amr_unmapcmd(struct amr_command *ac);
140 static int amr_start(struct amr_command *ac);
141 static void amr_complete(void *context, int pending);
142 static void amr_setup_dmamap(void *arg, bus_dma_segment_t *segs, int nsegments, int error);
143 static void amr_setup_dma64map(void *arg, bus_dma_segment_t *segs, int nsegments, int error);
144 static void amr_setup_data_dmamap(void *arg, bus_dma_segment_t *segs, int nsegments, int error);
149 static void amr_periodic(void *data);
152 * Interface-specific shims
154 static int amr_quartz_submit_command(struct amr_command *ac);
155 static int amr_quartz_get_work(struct amr_softc *sc, struct amr_mailbox *mbsave);
156 static int amr_quartz_poll_command(struct amr_command *ac);
157 static int amr_quartz_poll_command1(struct amr_softc *sc, struct amr_command *ac);
159 static int amr_std_submit_command(struct amr_command *ac);
160 static int amr_std_get_work(struct amr_softc *sc, struct amr_mailbox *mbsave);
161 static int amr_std_poll_command(struct amr_command *ac);
162 static void amr_std_attach_mailbox(struct amr_softc *sc);
164 #ifdef AMR_BOARD_INIT
165 static int amr_quartz_init(struct amr_softc *sc);
166 static int amr_std_init(struct amr_softc *sc);
172 static void amr_describe_controller(struct amr_softc *sc);
175 static void amr_printcommand(struct amr_command *ac);
179 static void amr_init_sysctl(struct amr_softc *sc);
181 /********************************************************************************
182 ********************************************************************************
184 ********************************************************************************
185 ********************************************************************************/
187 /********************************************************************************
188 ********************************************************************************
190 ********************************************************************************
191 ********************************************************************************/
193 /********************************************************************************
194 * Initialise the controller and softc.
197 amr_attach(struct amr_softc *sc)
203 * Initialise per-controller queues.
205 TAILQ_INIT(&sc->amr_completed);
206 TAILQ_INIT(&sc->amr_freecmds);
207 TAILQ_INIT(&sc->amr_cmd_clusters);
208 TAILQ_INIT(&sc->amr_ready);
209 bioq_init(&sc->amr_bioq);
211 debug(2, "queue init done");
214 * Configure for this controller type.
216 if (AMR_IS_QUARTZ(sc)) {
217 sc->amr_submit_command = amr_quartz_submit_command;
218 sc->amr_get_work = amr_quartz_get_work;
219 sc->amr_poll_command = amr_quartz_poll_command;
220 sc->amr_poll_command1 = amr_quartz_poll_command1;
222 sc->amr_submit_command = amr_std_submit_command;
223 sc->amr_get_work = amr_std_get_work;
224 sc->amr_poll_command = amr_std_poll_command;
225 amr_std_attach_mailbox(sc);;
228 #ifdef AMR_BOARD_INIT
229 if ((AMR_IS_QUARTZ(sc) ? amr_quartz_init(sc) : amr_std_init(sc))))
234 * Quiz controller for features and limits.
236 if (amr_query_controller(sc))
239 debug(2, "controller query complete");
246 #if AMR_ENABLE_CAM != 0
248 * Attach our 'real' SCSI channels to CAM.
250 if (amr_cam_attach(sc))
252 debug(2, "CAM attach done");
256 * Create the control device.
258 sc->amr_dev_t = make_dev(&amr_cdevsw, device_get_unit(sc->amr_dev), UID_ROOT, GID_OPERATOR,
259 S_IRUSR | S_IWUSR, "amr%d", device_get_unit(sc->amr_dev));
260 sc->amr_dev_t->si_drv1 = sc;
263 * Schedule ourselves to bring the controller up once interrupts are
266 bzero(&sc->amr_ich, sizeof(struct intr_config_hook));
267 sc->amr_ich.ich_func = amr_startup;
268 sc->amr_ich.ich_arg = sc;
269 if (config_intrhook_establish(&sc->amr_ich) != 0) {
270 device_printf(sc->amr_dev, "can't establish configuration hook\n");
275 * Print a little information about the controller.
277 amr_describe_controller(sc);
279 debug(2, "attach complete");
283 /********************************************************************************
284 * Locate disk resources and attach children to them.
287 amr_startup(void *arg)
289 struct amr_softc *sc = (struct amr_softc *)arg;
290 struct amr_logdrive *dr;
295 /* pull ourselves off the intrhook chain */
296 if (sc->amr_ich.ich_func)
297 config_intrhook_disestablish(&sc->amr_ich);
298 sc->amr_ich.ich_func = NULL;
300 /* get up-to-date drive information */
301 if (amr_query_controller(sc)) {
302 device_printf(sc->amr_dev, "can't scan controller for drives\n");
306 /* iterate over available drives */
307 for (i = 0, dr = &sc->amr_drive[0]; (i < AMR_MAXLD) && (dr->al_size != 0xffffffff); i++, dr++) {
308 /* are we already attached to this drive? */
309 if (dr->al_disk == 0) {
310 /* generate geometry information */
311 if (dr->al_size > 0x200000) { /* extended translation? */
318 dr->al_cylinders = dr->al_size / (dr->al_heads * dr->al_sectors);
320 dr->al_disk = device_add_child(sc->amr_dev, NULL, -1);
321 if (dr->al_disk == 0)
322 device_printf(sc->amr_dev, "device_add_child failed\n");
323 device_set_ivars(dr->al_disk, dr);
327 if ((error = bus_generic_attach(sc->amr_dev)) != 0)
328 device_printf(sc->amr_dev, "bus_generic_attach returned %d\n", error);
330 /* mark controller back up */
331 sc->amr_state &= ~AMR_STATE_SHUTDOWN;
333 /* interrupts will be enabled before we do anything more */
334 sc->amr_state |= AMR_STATE_INTEN;
337 * Start the timeout routine.
339 /* sc->amr_timeout = timeout(amr_periodic, sc, hz);*/
345 amr_init_sysctl(struct amr_softc *sc)
348 SYSCTL_ADD_INT(device_get_sysctl_ctx(sc->amr_dev),
349 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->amr_dev)),
350 OID_AUTO, "allow_volume_configure", CTLFLAG_RW, &sc->amr_allow_vol_config, 0,
355 /*******************************************************************************
356 * Free resources associated with a controller instance
359 amr_free(struct amr_softc *sc)
361 struct amr_command_cluster *acc;
363 #if AMR_ENABLE_CAM != 0
364 /* detach from CAM */
368 /* cancel status timeout */
369 untimeout(amr_periodic, sc, sc->amr_timeout);
371 /* throw away any command buffers */
372 while ((acc = TAILQ_FIRST(&sc->amr_cmd_clusters)) != NULL) {
373 TAILQ_REMOVE(&sc->amr_cmd_clusters, acc, acc_link);
374 amr_freecmd_cluster(acc);
377 /* destroy control device */
378 if( sc->amr_dev_t != (struct cdev *)NULL)
379 destroy_dev(sc->amr_dev_t);
381 if (mtx_initialized(&sc->amr_hw_lock))
382 mtx_destroy(&sc->amr_hw_lock);
384 if (mtx_initialized(&sc->amr_list_lock))
385 mtx_destroy(&sc->amr_list_lock);
388 /*******************************************************************************
389 * Receive a bio structure from a child device and queue it on a particular
390 * disk resource, then poke the disk resource to start as much work as it can.
393 amr_submit_bio(struct amr_softc *sc, struct bio *bio)
397 mtx_lock(&sc->amr_list_lock);
398 amr_enqueue_bio(sc, bio);
400 mtx_unlock(&sc->amr_list_lock);
404 /********************************************************************************
405 * Accept an open operation on the control device.
408 amr_open(struct cdev *dev, int flags, int fmt, d_thread_t *td)
410 int unit = minor(dev);
411 struct amr_softc *sc = devclass_get_softc(devclass_find("amr"), unit);
415 sc->amr_state |= AMR_STATE_OPEN;
421 amr_del_ld(struct amr_softc *sc, int drv_no, int status)
426 sc->amr_state &= ~AMR_STATE_QUEUE_FRZN;
427 sc->amr_state &= ~AMR_STATE_LD_DELETE;
428 sc->amr_state |= AMR_STATE_REMAP_LD;
429 debug(1, "State Set");
432 debug(1, "disk begin destroyed %d",drv_no);
433 if (--amr_disks_registered == 0)
434 cdevsw_remove(&amrddisk_cdevsw);
435 debug(1, "disk begin destroyed success");
441 amr_prepare_ld_delete(struct amr_softc *sc)
445 if (sc->ld_del_supported == 0)
448 sc->amr_state |= AMR_STATE_QUEUE_FRZN;
449 sc->amr_state |= AMR_STATE_LD_DELETE;
451 /* 5 minutes for the all the commands to be flushed.*/
452 tsleep((void *)&sc->ld_del_supported, PCATCH | PRIBIO,"delete_logical_drv",hz * 60 * 1);
453 if ( sc->amr_busyslots )
460 /********************************************************************************
461 * Accept the last close on the control device.
464 amr_close(struct cdev *dev, int flags, int fmt, d_thread_t *td)
466 int unit = minor(dev);
467 struct amr_softc *sc = devclass_get_softc(devclass_find("amr"), unit);
471 sc->amr_state &= ~AMR_STATE_OPEN;
475 /********************************************************************************
476 * Handle controller-specific control operations.
479 amr_rescan_drives(struct cdev *dev)
481 struct amr_softc *sc = (struct amr_softc *)dev->si_drv1;
484 sc->amr_state |= AMR_STATE_REMAP_LD;
485 while (sc->amr_busyslots) {
486 device_printf(sc->amr_dev, "idle controller\n");
490 /* mark ourselves as in-shutdown */
491 sc->amr_state |= AMR_STATE_SHUTDOWN;
493 /* flush controller */
494 device_printf(sc->amr_dev, "flushing cache...");
495 printf("%s\n", amr_flush(sc) ? "failed" : "done");
497 /* delete all our child devices */
498 for(i = 0 ; i < AMR_MAXLD; i++) {
499 if(sc->amr_drive[i].al_disk != 0) {
500 if((error = device_delete_child(sc->amr_dev,
501 sc->amr_drive[i].al_disk)) != 0)
504 sc->amr_drive[i].al_disk = 0;
513 amr_linux_ioctl_int(struct cdev *dev, u_long cmd, caddr_t addr, int32_t flag,
516 struct amr_softc *sc = (struct amr_softc *)dev->si_drv1;
517 struct amr_command *ac;
518 struct amr_mailbox *mb;
519 struct amr_linux_ioctl ali;
522 int adapter, len, ac_flags = 0;
523 int logical_drives_changed = 0;
524 u_int32_t linux_version = 0x02100000;
526 struct amr_passthrough *ap; /* 60 bytes */
533 if ((error = copyin(addr, &ali, sizeof(ali))) != 0)
535 switch (ali.ui.fcs.opcode) {
537 switch(ali.ui.fcs.subopcode) {
539 copyout(&linux_version, (void *)(uintptr_t)ali.data,
540 sizeof(linux_version));
545 copyout(&sc->amr_linux_no_adapters, (void *)(uintptr_t)ali.data,
546 sizeof(sc->amr_linux_no_adapters));
547 td->td_retval[0] = sc->amr_linux_no_adapters;
552 printf("Unknown subopcode\n");
560 if (ali.ui.fcs.opcode == 0x80)
561 len = max(ali.outlen, ali.inlen);
563 len = ali.ui.fcs.length;
565 adapter = (ali.ui.fcs.adapno) ^ 'm' << 8;
567 ap = malloc(sizeof(struct amr_passthrough),
568 M_DEVBUF, M_WAITOK | M_ZERO);
570 mb = (void *)&ali.mbox[0];
572 if ((ali.mbox[0] == FC_DEL_LOGDRV && ali.mbox[2] == OP_DEL_LOGDRV) || /* delete */
573 (ali.mbox[0] == AMR_CMD_CONFIG && ali.mbox[2] == 0x0d)) { /* create */
574 if (sc->amr_allow_vol_config == 0) {
578 logical_drives_changed = 1;
581 if (ali.mbox[0] == AMR_CMD_PASS) {
582 error = copyin((void *)(uintptr_t)mb->mb_physaddr, ap,
583 sizeof(struct amr_passthrough));
587 if (ap->ap_data_transfer_length)
588 dp = malloc(ap->ap_data_transfer_length, M_DEVBUF,
592 error = copyin((void *)(uintptr_t)ap->ap_data_transfer_address,
593 dp, ap->ap_data_transfer_length);
598 mtx_lock(&sc->amr_list_lock);
599 while ((ac = amr_alloccmd(sc)) == NULL)
600 msleep(sc, &sc->amr_list_lock, PPAUSE, "amrioc", hz);
601 mtx_unlock(&sc->amr_list_lock);
603 ac_flags = AMR_CMD_DATAIN|AMR_CMD_DATAOUT|AMR_CMD_CCB_DATAIN|AMR_CMD_CCB_DATAOUT;
604 bzero(&ac->ac_mailbox, sizeof(ac->ac_mailbox));
605 ac->ac_mailbox.mb_command = AMR_CMD_PASS;
606 ac->ac_flags = ac_flags;
609 ac->ac_length = sizeof(struct amr_passthrough);
610 ac->ac_ccb_data = dp;
611 ac->ac_ccb_length = ap->ap_data_transfer_length;
612 temp = (void *)(uintptr_t)ap->ap_data_transfer_address;
614 error = amr_wait_command(ac);
618 status = ac->ac_status;
619 error = copyout(&status, &((struct amr_passthrough *)(uintptr_t)mb->mb_physaddr)->ap_scsi_status, sizeof(status));
624 error = copyout(dp, temp, ap->ap_data_transfer_length);
628 error = copyout(ap->ap_request_sense_area, ((struct amr_passthrough *)(uintptr_t)mb->mb_physaddr)->ap_request_sense_area, ap->ap_request_sense_length);
634 } else if (ali.mbox[0] == AMR_CMD_PASS_64) {
635 printf("No AMR_CMD_PASS_64\n");
638 } else if (ali.mbox[0] == AMR_CMD_EXTPASS) {
639 printf("No AMR_CMD_EXTPASS\n");
644 dp = malloc(len, M_DEVBUF, M_WAITOK | M_ZERO);
647 error = copyin((void *)(uintptr_t)mb->mb_physaddr, dp, len);
652 mtx_lock(&sc->amr_list_lock);
653 while ((ac = amr_alloccmd(sc)) == NULL)
654 msleep(sc, &sc->amr_list_lock, PPAUSE, "amrioc", hz);
655 mtx_unlock(&sc->amr_list_lock);
657 ac_flags = AMR_CMD_DATAIN|AMR_CMD_DATAOUT;
658 bzero(&ac->ac_mailbox, sizeof(ac->ac_mailbox));
659 bcopy(&ali.mbox[0], &ac->ac_mailbox, sizeof(ali.mbox));
663 ac->ac_flags = ac_flags;
665 error = amr_wait_command(ac);
669 status = ac->ac_status;
670 error = copyout(&status, &((struct amr_mailbox *)&((struct amr_linux_ioctl *)addr)->mbox[0])->mb_status, sizeof(status));
672 error = copyout(dp, (void *)(uintptr_t)mb->mb_physaddr, len);
678 if (logical_drives_changed)
679 amr_rescan_drives(dev);
685 debug(1, "unknown linux ioctl 0x%lx", cmd);
686 printf("unknown linux ioctl 0x%lx\n", cmd);
692 * At this point, we know that there is a lock held and that these
693 * objects have been allocated.
695 mtx_lock(&sc->amr_list_lock);
698 mtx_unlock(&sc->amr_list_lock);
707 amr_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int32_t flag, d_thread_t *td)
709 struct amr_softc *sc = (struct amr_softc *)dev->si_drv1;
712 struct amr_user_ioctl *au;
713 #ifdef AMR_IO_COMMAND32
714 struct amr_user_ioctl32 *au32;
718 struct amr_command *ac;
719 struct amr_mailbox_ioctl *mbi;
720 void *dp, *au_buffer;
721 unsigned long au_length;
722 unsigned char *au_cmd;
723 int *au_statusp, au_direction;
724 int error, ac_flags = 0;
725 struct amr_passthrough *ap; /* 60 bytes */
726 int logical_drives_changed = 0;
730 arg._p = (void *)addr;
740 debug(1, "AMR_IO_VERSION");
741 *arg.result = AMR_IO_VERSION_NUMBER;
744 #ifdef AMR_IO_COMMAND32
746 * Accept ioctl-s from 32-bit binaries on non-32-bit
747 * platforms, such as AMD. LSI's MEGAMGR utility is
748 * the only example known today... -mi
750 case AMR_IO_COMMAND32:
751 debug(1, "AMR_IO_COMMAND32 0x%x", arg.au32->au_cmd[0]);
752 au_cmd = arg.au32->au_cmd;
753 au_buffer = (void *)(u_int64_t)arg.au32->au_buffer;
754 au_length = arg.au32->au_length;
755 au_direction = arg.au32->au_direction;
756 au_statusp = &arg.au32->au_status;
761 debug(1, "AMR_IO_COMMAND 0x%x", arg.au->au_cmd[0]);
762 au_cmd = arg.au->au_cmd;
763 au_buffer = (void *)arg.au->au_buffer;
764 au_length = arg.au->au_length;
765 au_direction = arg.au->au_direction;
766 au_statusp = &arg.au->au_status;
770 case 0xc06e6d00: /* Linux emulation */
771 return amr_linux_ioctl_int(dev, cmd, addr, flag, td);
775 debug(1, "unknown ioctl 0x%lx", cmd);
779 if ((au_cmd[0] == FC_DEL_LOGDRV && au_cmd[1] == OP_DEL_LOGDRV) || /* delete */
780 (au_cmd[0] == AMR_CMD_CONFIG && au_cmd[1] == 0x0d)) { /* create */
781 if (sc->amr_allow_vol_config == 0) {
785 logical_drives_changed = 1;
787 if ((error = amr_prepare_ld_delete(sc)) != 0)
792 /* handle inbound data buffer */
793 if (au_length != 0 && au_cmd[0] != 0x06) {
794 if ((dp = malloc(au_length, M_DEVBUF, M_WAITOK|M_ZERO)) == NULL) {
798 if ((error = copyin(au_buffer, dp, au_length)) != 0) {
802 debug(2, "copyin %ld bytes from %p -> %p", au_length, au_buffer, dp);
805 /* Allocate this now before the mutex gets held */
806 if (au_cmd[0] == AMR_CMD_PASS)
807 ap = malloc(sizeof(struct amr_passthrough), M_DEVBUF, M_WAITOK|M_ZERO);
809 mtx_lock(&sc->amr_list_lock);
810 while ((ac = amr_alloccmd(sc)) == NULL)
811 msleep(sc, &sc->amr_list_lock, PPAUSE, "amrioc", hz);
812 mtx_unlock(&sc->amr_list_lock);
814 /* handle SCSI passthrough command */
815 if (au_cmd[0] == AMR_CMD_PASS) {
820 ap->ap_cdb_length = len;
821 bcopy(au_cmd + 3, ap->ap_cdb, len);
823 /* build passthrough */
824 ap->ap_timeout = au_cmd[len + 3] & 0x07;
825 ap->ap_ars = (au_cmd[len + 3] & 0x08) ? 1 : 0;
826 ap->ap_islogical = (au_cmd[len + 3] & 0x80) ? 1 : 0;
827 ap->ap_logical_drive_no = au_cmd[len + 4];
828 ap->ap_channel = au_cmd[len + 5];
829 ap->ap_scsi_id = au_cmd[len + 6];
830 ap->ap_request_sense_length = 14;
831 ap->ap_data_transfer_length = au_length;
832 /* XXX what about the request-sense area? does the caller want it? */
836 ac->ac_length = sizeof(struct amr_passthrough);
837 ac->ac_ccb_data = dp;
838 ac->ac_ccb_length = au_length;
840 ac->ac_mailbox.mb_command = AMR_CMD_PASS;
841 ac_flags = AMR_CMD_DATAIN|AMR_CMD_DATAOUT|AMR_CMD_CCB_DATAIN|AMR_CMD_CCB_DATAOUT;
844 /* direct command to controller */
845 mbi = (struct amr_mailbox_ioctl *)&ac->ac_mailbox;
847 /* copy pertinent mailbox items */
848 mbi->mb_command = au_cmd[0];
849 mbi->mb_channel = au_cmd[1];
850 mbi->mb_param = au_cmd[2];
851 mbi->mb_pad[0] = au_cmd[3];
852 mbi->mb_drive = au_cmd[4];
854 /* build the command */
856 ac->ac_length = au_length;
857 ac_flags = AMR_CMD_DATAIN|AMR_CMD_DATAOUT;
860 ac->ac_flags = ac_flags;
862 /* run the command */
863 if ((error = amr_wait_command(ac)) != 0)
866 /* copy out data and set status */
867 if (au_length != 0) {
868 error = copyout(dp, au_buffer, au_length);
870 debug(2, "copyout %ld bytes from %p -> %p", au_length, dp, au_buffer);
872 debug(2, "%16d", (int)dp);
873 *au_statusp = ac->ac_status;
877 * At this point, we know that there is a lock held and that these
878 * objects have been allocated.
880 mtx_lock(&sc->amr_list_lock);
883 mtx_unlock(&sc->amr_list_lock);
890 if (logical_drives_changed)
891 amr_rescan_drives(dev);
897 /********************************************************************************
898 ********************************************************************************
900 ********************************************************************************
901 ********************************************************************************/
903 /********************************************************************************
904 * Perform a periodic check of the controller status
907 amr_periodic(void *data)
909 struct amr_softc *sc = (struct amr_softc *)data;
913 /* XXX perform periodic status checks here */
915 /* compensate for missed interrupts */
919 sc->amr_timeout = timeout(amr_periodic, sc, hz);
922 /********************************************************************************
923 ********************************************************************************
925 ********************************************************************************
926 ********************************************************************************/
928 /********************************************************************************
929 * Interrogate the controller for the operational parameters we require.
932 amr_query_controller(struct amr_softc *sc)
934 struct amr_enquiry3 *aex;
935 struct amr_prodinfo *ap;
936 struct amr_enquiry *ae;
941 * If we haven't found the real limit yet, let us have a couple of commands in
942 * order to be able to probe.
944 if (sc->amr_maxio == 0)
948 * Greater than 10 byte cdb support
950 sc->support_ext_cdb = amr_support_ext_cdb(sc);
952 if(sc->support_ext_cdb) {
953 debug(2,"supports extended CDBs.");
957 * Try to issue an ENQUIRY3 command
959 if ((aex = amr_enquiry(sc, 2048, AMR_CMD_CONFIG, AMR_CONFIG_ENQ3,
960 AMR_CONFIG_ENQ3_SOLICITED_FULL, &status)) != NULL) {
963 * Fetch current state of logical drives.
965 for (ldrv = 0; ldrv < aex->ae_numldrives; ldrv++) {
966 sc->amr_drive[ldrv].al_size = aex->ae_drivesize[ldrv];
967 sc->amr_drive[ldrv].al_state = aex->ae_drivestate[ldrv];
968 sc->amr_drive[ldrv].al_properties = aex->ae_driveprop[ldrv];
969 debug(2, " drive %d: %d state %x properties %x\n", ldrv, sc->amr_drive[ldrv].al_size,
970 sc->amr_drive[ldrv].al_state, sc->amr_drive[ldrv].al_properties);
975 * Get product info for channel count.
977 if ((ap = amr_enquiry(sc, 2048, AMR_CMD_CONFIG, AMR_CONFIG_PRODUCT_INFO, 0, &status)) == NULL) {
978 device_printf(sc->amr_dev, "can't obtain product data from controller\n");
981 sc->amr_maxdrives = 40;
982 sc->amr_maxchan = ap->ap_nschan;
983 sc->amr_maxio = ap->ap_maxio;
984 sc->amr_type |= AMR_TYPE_40LD;
987 ap = amr_enquiry(sc, 0, FC_DEL_LOGDRV, OP_SUP_DEL_LOGDRV, 0, &status);
991 sc->amr_ld_del_supported = 1;
992 device_printf(sc->amr_dev, "delete logical drives supported by controller\n");
996 /* failed, try the 8LD ENQUIRY commands */
997 if ((ae = (struct amr_enquiry *)amr_enquiry(sc, 2048, AMR_CMD_EXT_ENQUIRY2, 0, 0, &status)) == NULL) {
998 if ((ae = (struct amr_enquiry *)amr_enquiry(sc, 2048, AMR_CMD_ENQUIRY, 0, 0, &status)) == NULL) {
999 device_printf(sc->amr_dev, "can't obtain configuration data from controller\n");
1002 ae->ae_signature = 0;
1006 * Fetch current state of logical drives.
1008 for (ldrv = 0; ldrv < ae->ae_ldrv.al_numdrives; ldrv++) {
1009 sc->amr_drive[ldrv].al_size = ae->ae_ldrv.al_size[ldrv];
1010 sc->amr_drive[ldrv].al_state = ae->ae_ldrv.al_state[ldrv];
1011 sc->amr_drive[ldrv].al_properties = ae->ae_ldrv.al_properties[ldrv];
1012 debug(2, " drive %d: %d state %x properties %x\n", ldrv, sc->amr_drive[ldrv].al_size,
1013 sc->amr_drive[ldrv].al_state, sc->amr_drive[ldrv].al_properties);
1016 sc->amr_maxdrives = 8;
1017 sc->amr_maxchan = ae->ae_adapter.aa_channels;
1018 sc->amr_maxio = ae->ae_adapter.aa_maxio;
1023 * Mark remaining drives as unused.
1025 for (; ldrv < AMR_MAXLD; ldrv++)
1026 sc->amr_drive[ldrv].al_size = 0xffffffff;
1029 * Cap the maximum number of outstanding I/Os. AMI's Linux driver doesn't trust
1030 * the controller's reported value, and lockups have been seen when we do.
1032 sc->amr_maxio = imin(sc->amr_maxio, AMR_LIMITCMD);
1037 /********************************************************************************
1038 * Run a generic enquiry-style command.
1041 amr_enquiry(struct amr_softc *sc, size_t bufsize, u_int8_t cmd, u_int8_t cmdsub, u_int8_t cmdqual, int *status)
1043 struct amr_command *ac;
1053 /* get ourselves a command buffer */
1054 mtx_lock(&sc->amr_list_lock);
1055 ac = amr_alloccmd(sc);
1056 mtx_unlock(&sc->amr_list_lock);
1059 /* allocate the response structure */
1060 if ((result = malloc(bufsize, M_DEVBUF, M_ZERO|M_NOWAIT)) == NULL)
1062 /* set command flags */
1064 ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAIN;
1066 /* point the command at our data */
1067 ac->ac_data = result;
1068 ac->ac_length = bufsize;
1070 /* build the command proper */
1071 mbox = (u_int8_t *)&ac->ac_mailbox; /* XXX want a real structure for this? */
1077 /* can't assume that interrupts are going to work here, so play it safe */
1078 if (sc->amr_poll_command(ac))
1080 error = ac->ac_status;
1081 *status = ac->ac_status;
1084 mtx_lock(&sc->amr_list_lock);
1087 mtx_unlock(&sc->amr_list_lock);
1088 if ((error != 0) && (result != NULL)) {
1089 free(result, M_DEVBUF);
1095 /********************************************************************************
1096 * Flush the controller's internal cache, return status.
1099 amr_flush(struct amr_softc *sc)
1101 struct amr_command *ac;
1104 /* get ourselves a command buffer */
1106 mtx_lock(&sc->amr_list_lock);
1107 ac = amr_alloccmd(sc);
1108 mtx_unlock(&sc->amr_list_lock);
1111 /* set command flags */
1112 ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAOUT;
1114 /* build the command proper */
1115 ac->ac_mailbox.mb_command = AMR_CMD_FLUSH;
1117 /* we have to poll, as the system may be going down or otherwise damaged */
1118 if (sc->amr_poll_command(ac))
1120 error = ac->ac_status;
1123 mtx_lock(&sc->amr_list_lock);
1126 mtx_unlock(&sc->amr_list_lock);
1130 /********************************************************************************
1131 * Detect extented cdb >> greater than 10 byte cdb support
1132 * returns '1' means this support exist
1133 * returns '0' means this support doesn't exist
1136 amr_support_ext_cdb(struct amr_softc *sc)
1138 struct amr_command *ac;
1142 /* get ourselves a command buffer */
1144 mtx_lock(&sc->amr_list_lock);
1145 ac = amr_alloccmd(sc);
1146 mtx_unlock(&sc->amr_list_lock);
1149 /* set command flags */
1150 ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAOUT;
1152 /* build the command proper */
1153 mbox = (u_int8_t *)&ac->ac_mailbox; /* XXX want a real structure for this? */
1158 /* we have to poll, as the system may be going down or otherwise damaged */
1159 if (sc->amr_poll_command(ac))
1161 if( ac->ac_status == AMR_STATUS_SUCCESS ) {
1166 mtx_lock(&sc->amr_list_lock);
1169 mtx_unlock(&sc->amr_list_lock);
1173 /********************************************************************************
1174 * Try to find I/O work for the controller from one or more of the work queues.
1176 * We make the assumption that if the controller is not ready to take a command
1177 * at some given time, it will generate an interrupt at some later time when
1181 amr_startio(struct amr_softc *sc)
1183 struct amr_command *ac;
1185 /* spin until something prevents us from doing any work */
1188 /* Don't bother to queue commands no bounce buffers are available. */
1189 if (sc->amr_state & AMR_STATE_QUEUE_FRZN)
1192 /* try to get a ready command */
1193 ac = amr_dequeue_ready(sc);
1195 /* if that failed, build a command from a bio */
1197 (void)amr_bio_command(sc, &ac);
1199 #if AMR_ENABLE_CAM != 0
1200 /* if that failed, build a command from a ccb */
1202 (void)amr_cam_command(sc, &ac);
1205 /* if we don't have anything to do, give up */
1209 /* try to give the command to the controller; if this fails save it for later and give up */
1210 if (amr_start(ac)) {
1211 debug(2, "controller busy, command deferred");
1212 amr_requeue_ready(ac); /* XXX schedule retry very soon? */
1218 /********************************************************************************
1219 * Handle completion of an I/O command.
1222 amr_completeio(struct amr_command *ac)
1224 struct amrd_softc *sc = ac->ac_bio->bio_disk->d_drv1;
1225 static struct timeval lastfail;
1228 if (ac->ac_status != AMR_STATUS_SUCCESS) { /* could be more verbose here? */
1229 ac->ac_bio->bio_error = EIO;
1230 ac->ac_bio->bio_flags |= BIO_ERROR;
1232 if (ppsratecheck(&lastfail, &curfail, 1))
1233 device_printf(sc->amrd_dev, "I/O error - 0x%x\n", ac->ac_status);
1234 /* amr_printcommand(ac);*/
1236 amrd_intr(ac->ac_bio);
1237 mtx_lock(&ac->ac_sc->amr_list_lock);
1239 mtx_unlock(&ac->ac_sc->amr_list_lock);
1242 /********************************************************************************
1243 ********************************************************************************
1245 ********************************************************************************
1246 ********************************************************************************/
1248 /********************************************************************************
1249 * Convert a bio off the top of the bio queue into a command.
1252 amr_bio_command(struct amr_softc *sc, struct amr_command **acp)
1254 struct amr_command *ac;
1255 struct amrd_softc *amrd;
1266 if ((ac = amr_alloccmd(sc)) == NULL)
1269 /* get a bio to work on */
1270 if ((bio = amr_dequeue_bio(sc)) == NULL) {
1275 /* connect the bio to the command */
1276 ac->ac_complete = amr_completeio;
1278 ac->ac_data = bio->bio_data;
1279 ac->ac_length = bio->bio_bcount;
1280 if (bio->bio_cmd == BIO_READ) {
1281 ac->ac_flags |= AMR_CMD_DATAIN;
1282 if (AMR_IS_SG64(sc)) {
1283 cmd = AMR_CMD_LREAD64;
1284 ac->ac_flags |= AMR_CMD_SG64;
1286 cmd = AMR_CMD_LREAD;
1288 ac->ac_flags |= AMR_CMD_DATAOUT;
1289 if (AMR_IS_SG64(sc)) {
1290 cmd = AMR_CMD_LWRITE64;
1291 ac->ac_flags |= AMR_CMD_SG64;
1293 cmd = AMR_CMD_LWRITE;
1295 amrd = (struct amrd_softc *)bio->bio_disk->d_drv1;
1296 driveno = amrd->amrd_drive - sc->amr_drive;
1297 blkcount = (bio->bio_bcount + AMR_BLKSIZE - 1) / AMR_BLKSIZE;
1299 ac->ac_mailbox.mb_command = cmd;
1300 ac->ac_mailbox.mb_blkcount = blkcount;
1301 ac->ac_mailbox.mb_lba = bio->bio_pblkno;
1302 ac->ac_mailbox.mb_drive = driveno;
1303 if (sc->amr_state & AMR_STATE_REMAP_LD)
1304 ac->ac_mailbox.mb_drive |= 0x80;
1306 /* we fill in the s/g related data when the command is mapped */
1308 if ((bio->bio_pblkno + blkcount) > sc->amr_drive[driveno].al_size)
1309 device_printf(sc->amr_dev, "I/O beyond end of unit (%lld,%d > %lu)\n",
1310 (long long)bio->bio_pblkno, blkcount,
1311 (u_long)sc->amr_drive[driveno].al_size);
1317 /********************************************************************************
1318 * Take a command, submit it to the controller and sleep until it completes
1319 * or fails. Interrupts must be enabled, returns nonzero on error.
1322 amr_wait_command(struct amr_command *ac)
1328 ac->ac_complete = NULL;
1329 ac->ac_flags |= AMR_CMD_SLEEP;
1330 if ((error = amr_start(ac)) != 0) {
1334 while ((ac->ac_flags & AMR_CMD_BUSY) && (error != EWOULDBLOCK)) {
1335 error = tsleep(ac, PRIBIO, "amrwcmd", 0);
1340 /********************************************************************************
1341 * Take a command, submit it to the controller and busy-wait for it to return.
1342 * Returns nonzero on error. Can be safely called with interrupts enabled.
1345 amr_std_poll_command(struct amr_command *ac)
1347 struct amr_softc *sc = ac->ac_sc;
1352 ac->ac_complete = NULL;
1353 if ((error = amr_start(ac)) != 0)
1359 * Poll for completion, although the interrupt handler may beat us to it.
1360 * Note that the timeout here is somewhat arbitrary.
1364 } while ((ac->ac_flags & AMR_CMD_BUSY) && (count++ < 1000));
1365 if (!(ac->ac_flags & AMR_CMD_BUSY)) {
1368 /* XXX the slot is now marked permanently busy */
1370 device_printf(sc->amr_dev, "polled command timeout\n");
1376 amr_setup_polled_dmamap(void *arg, bus_dma_segment_t *segs, int nsegs, int err)
1378 struct amr_command *ac = arg;
1379 struct amr_softc *sc = ac->ac_sc;
1383 if (ac->ac_flags & AMR_CMD_DATAIN)
1384 flags |= BUS_DMASYNC_PREREAD;
1385 if (ac->ac_flags & AMR_CMD_DATAOUT)
1386 flags |= BUS_DMASYNC_PREWRITE;
1388 if (AC_IS_SG64(ac)) {
1389 amr_setup_dma64map(arg, segs, nsegs, err);
1390 bus_dmamap_sync(sc->amr_buffer64_dmat,ac->ac_dma64map, flags);
1392 amr_setup_dmamap(arg, segs, nsegs, err);
1393 bus_dmamap_sync(sc->amr_buffer_dmat,ac->ac_dmamap, flags);
1395 sc->amr_poll_command1(sc, ac);
1398 /********************************************************************************
1399 * Take a command, submit it to the controller and busy-wait for it to return.
1400 * Returns nonzero on error. Can be safely called with interrupts enabled.
1403 amr_quartz_poll_command(struct amr_command *ac)
1406 bus_dmamap_t datamap;
1407 struct amr_softc *sc = ac->ac_sc;
1414 if (AC_IS_SG64(ac)) {
1415 tag = sc->amr_buffer64_dmat;
1416 datamap = ac->ac_dma64map;
1418 tag = sc->amr_buffer_dmat;
1419 datamap = ac->ac_dmamap;
1422 /* now we have a slot, we can map the command (unmapped in amr_complete) */
1423 if (ac->ac_data != 0) {
1424 if (bus_dmamap_load(tag, datamap, ac->ac_data, ac->ac_length,
1425 amr_setup_polled_dmamap, ac, BUS_DMA_NOWAIT) != 0) {
1429 error = amr_quartz_poll_command1(sc, ac);
1436 amr_quartz_poll_command1(struct amr_softc *sc, struct amr_command *ac)
1440 mtx_lock(&sc->amr_hw_lock);
1441 if ((sc->amr_state & AMR_STATE_INTEN) == 0) {
1443 while (sc->amr_busyslots) {
1444 msleep(sc, &sc->amr_hw_lock, PRIBIO | PCATCH, "amrpoll", hz);
1450 if(sc->amr_busyslots) {
1451 device_printf(sc->amr_dev, "adapter is busy\n");
1452 mtx_unlock(&sc->amr_hw_lock);
1453 if (ac->ac_data != NULL) {
1455 bus_dmamap_unload(sc->amr_buffer64_dmat, ac->ac_dma64map);
1457 bus_dmamap_unload(sc->amr_buffer_dmat, ac->ac_dmamap);
1464 bcopy(&ac->ac_mailbox, (void *)(uintptr_t)(volatile void *)sc->amr_mailbox, AMR_MBOX_CMDSIZE);
1466 /* clear the poll/ack fields in the mailbox */
1467 sc->amr_mailbox->mb_ident = 0xFE;
1468 sc->amr_mailbox->mb_nstatus = 0xFF;
1469 sc->amr_mailbox->mb_status = 0xFF;
1470 sc->amr_mailbox->mb_poll = 0;
1471 sc->amr_mailbox->mb_ack = 0;
1472 sc->amr_mailbox->mb_busy = 1;
1474 AMR_QPUT_IDB(sc, sc->amr_mailboxphys | AMR_QIDB_SUBMIT);
1476 while(sc->amr_mailbox->mb_nstatus == 0xFF)
1478 while(sc->amr_mailbox->mb_status == 0xFF)
1480 ac->ac_status=sc->amr_mailbox->mb_status;
1481 error = (ac->ac_status !=AMR_STATUS_SUCCESS) ? 1:0;
1482 while(sc->amr_mailbox->mb_poll != 0x77)
1484 sc->amr_mailbox->mb_poll = 0;
1485 sc->amr_mailbox->mb_ack = 0x77;
1487 /* acknowledge that we have the commands */
1488 AMR_QPUT_IDB(sc, sc->amr_mailboxphys | AMR_QIDB_ACK);
1489 while(AMR_QGET_IDB(sc) & AMR_QIDB_ACK)
1491 mtx_unlock(&sc->amr_hw_lock);
1493 /* unmap the command's data buffer */
1494 if (ac->ac_flags & AMR_CMD_DATAIN) {
1495 bus_dmamap_sync(sc->amr_buffer_dmat,ac->ac_dmamap,
1496 BUS_DMASYNC_POSTREAD);
1498 if (ac->ac_flags & AMR_CMD_DATAOUT) {
1499 bus_dmamap_sync(sc->amr_buffer_dmat,ac->ac_dmamap,
1500 BUS_DMASYNC_POSTWRITE);
1503 bus_dmamap_unload(sc->amr_buffer64_dmat, ac->ac_dma64map);
1505 bus_dmamap_unload(sc->amr_buffer_dmat, ac->ac_dmamap);
1511 amr_freeslot(struct amr_command *ac)
1513 struct amr_softc *sc = ac->ac_sc;
1519 if (sc->amr_busycmd[slot] == NULL)
1520 panic("amr: slot %d not busy?\n", slot);
1522 sc->amr_busycmd[slot] = NULL;
1523 atomic_subtract_int(&sc->amr_busyslots, 1);
1528 /********************************************************************************
1529 * Map/unmap (ac)'s data in the controller's addressable space as required.
1531 * These functions may be safely called multiple times on a given command.
1534 amr_setup_dmamap(void *arg, bus_dma_segment_t *segs, int nsegments, int error)
1536 struct amr_command *ac = (struct amr_command *)arg;
1537 struct amr_sgentry *sg;
1543 /* get base address of s/g table */
1544 sg = ac->ac_sg.sg32;
1546 /* save data physical address */
1548 /* for AMR_CMD_CONFIG Read/Write the s/g count goes elsewhere */
1549 if (ac->ac_mailbox.mb_command == AMR_CMD_CONFIG && (
1550 ((struct amr_mailbox_ioctl *)&ac->ac_mailbox)->mb_channel == AMR_CONFIG_READ_NVRAM_CONFIG ||
1551 ((struct amr_mailbox_ioctl *)&ac->ac_mailbox)->mb_channel == AMR_CONFIG_WRITE_NVRAM_CONFIG)) {
1552 sgc = &(((struct amr_mailbox_ioctl *)&ac->ac_mailbox)->mb_param);
1554 sgc = &ac->ac_mailbox.mb_nsgelem;
1557 /* decide whether we need to populate the s/g table */
1558 if (nsegments < 2) {
1560 ac->ac_mailbox.mb_nsgelem = 0;
1561 ac->ac_mailbox.mb_physaddr = segs[0].ds_addr;
1563 ac->ac_mailbox.mb_nsgelem = nsegments;
1565 /* XXX Setting these to 0 might not be needed. */
1568 ac->ac_mailbox.mb_physaddr = ac->ac_sgbusaddr;
1569 for (i = 0; i < nsegments; i++, sg++) {
1570 sg->sg_addr = segs[i].ds_addr;
1571 sg->sg_count = segs[i].ds_len;
1578 amr_setup_dma64map(void *arg, bus_dma_segment_t *segs, int nsegments, int error)
1580 struct amr_command *ac = (struct amr_command *)arg;
1581 struct amr_sg64entry *sg;
1587 /* get base address of s/g table */
1588 sg = ac->ac_sg.sg64;
1590 /* save data physical address */
1592 /* for AMR_CMD_CONFIG Read/Write the s/g count goes elsewhere */
1593 if (ac->ac_mailbox.mb_command == AMR_CMD_CONFIG && (
1594 ((struct amr_mailbox_ioctl *)&ac->ac_mailbox)->mb_channel == AMR_CONFIG_READ_NVRAM_CONFIG ||
1595 ((struct amr_mailbox_ioctl *)&ac->ac_mailbox)->mb_channel == AMR_CONFIG_WRITE_NVRAM_CONFIG)) {
1596 sgc = &(((struct amr_mailbox_ioctl *)&ac->ac_mailbox)->mb_param);
1598 sgc = &ac->ac_mailbox.mb_nsgelem;
1601 ac->ac_mailbox.mb_nsgelem = nsegments;
1604 ac->ac_sg64_lo = ac->ac_sgbusaddr;
1605 ac->ac_mailbox.mb_physaddr = 0xffffffff;
1606 for (i = 0; i < nsegments; i++, sg++) {
1607 sg->sg_addr = segs[i].ds_addr;
1608 sg->sg_count = segs[i].ds_len;
1613 amr_setup_ccbmap(void *arg, bus_dma_segment_t *segs, int nsegments, int error)
1615 struct amr_command *ac = (struct amr_command *)arg;
1616 struct amr_softc *sc = ac->ac_sc;
1617 struct amr_sgentry *sg;
1618 struct amr_passthrough *ap = (struct amr_passthrough *)ac->ac_data;
1619 struct amr_ext_passthrough *aep = (struct amr_ext_passthrough *)ac->ac_data;
1622 /* get base address of s/g table */
1623 sg = ac->ac_sg.sg32;
1625 /* decide whether we need to populate the s/g table */
1626 if( ac->ac_mailbox.mb_command == AMR_CMD_EXTPASS ) {
1627 if (nsegments < 2) {
1628 aep->ap_no_sg_elements = 0;
1629 aep->ap_data_transfer_address = segs[0].ds_addr;
1631 /* save s/g table information in passthrough */
1632 aep->ap_no_sg_elements = nsegments;
1633 aep->ap_data_transfer_address = ac->ac_sgbusaddr;
1635 * populate s/g table (overwrites previous call which mapped the
1638 for (i = 0; i < nsegments; i++, sg++) {
1639 sg->sg_addr = segs[i].ds_addr;
1640 sg->sg_count = segs[i].ds_len;
1641 debug(3, " %d: 0x%x/%d", i, sg->sg_addr, sg->sg_count);
1644 debug(3, "slot %d %d segments at 0x%x\n", ac->ac_slot,
1645 aep->ap_no_sg_elements, aep->ap_data_transfer_address);
1647 if (nsegments < 2) {
1648 ap->ap_no_sg_elements = 0;
1649 ap->ap_data_transfer_address = segs[0].ds_addr;
1651 /* save s/g table information in passthrough */
1652 ap->ap_no_sg_elements = nsegments;
1653 ap->ap_data_transfer_address = ac->ac_sgbusaddr;
1655 * populate s/g table (overwrites previous call which mapped the
1658 for (i = 0; i < nsegments; i++, sg++) {
1659 sg->sg_addr = segs[i].ds_addr;
1660 sg->sg_count = segs[i].ds_len;
1661 debug(3, " %d: 0x%x/%d", i, sg->sg_addr, sg->sg_count);
1664 debug(3, "slot %d %d segments at 0x%x\n", ac->ac_slot,
1665 ap->ap_no_sg_elements, ap->ap_data_transfer_address);
1667 if (ac->ac_flags & AMR_CMD_CCB_DATAIN)
1668 bus_dmamap_sync(sc->amr_buffer_dmat, ac->ac_ccb_dmamap,
1669 BUS_DMASYNC_PREREAD);
1670 if (ac->ac_flags & AMR_CMD_CCB_DATAOUT)
1671 bus_dmamap_sync(sc->amr_buffer_dmat, ac->ac_ccb_dmamap,
1672 BUS_DMASYNC_PREWRITE);
1673 if ((ac->ac_flags & (AMR_CMD_CCB_DATAIN | AMR_CMD_CCB_DATAOUT)) == 0)
1674 panic("no direction for ccb?\n");
1676 if (ac->ac_flags & AMR_CMD_DATAIN)
1677 bus_dmamap_sync(sc->amr_buffer_dmat,ac->ac_dmamap,BUS_DMASYNC_PREREAD);
1678 if (ac->ac_flags & AMR_CMD_DATAOUT)
1679 bus_dmamap_sync(sc->amr_buffer_dmat,ac->ac_dmamap,BUS_DMASYNC_PREWRITE);
1681 ac->ac_flags |= AMR_CMD_MAPPED;
1683 if (sc->amr_submit_command(ac) == EBUSY) {
1685 amr_requeue_ready(ac);
1690 amr_setup_ccb64map(void *arg, bus_dma_segment_t *segs, int nsegments, int error)
1692 struct amr_command *ac = (struct amr_command *)arg;
1693 struct amr_softc *sc = ac->ac_sc;
1694 struct amr_sg64entry *sg;
1695 struct amr_passthrough *ap = (struct amr_passthrough *)ac->ac_data;
1696 struct amr_ext_passthrough *aep = (struct amr_ext_passthrough *)ac->ac_data;
1699 /* get base address of s/g table */
1700 sg = ac->ac_sg.sg64;
1702 /* decide whether we need to populate the s/g table */
1703 if( ac->ac_mailbox.mb_command == AMR_CMD_EXTPASS ) {
1704 /* save s/g table information in passthrough */
1705 aep->ap_no_sg_elements = nsegments;
1706 aep->ap_data_transfer_address = ac->ac_sgbusaddr;
1708 * populate s/g table (overwrites previous call which mapped the
1711 for (i = 0; i < nsegments; i++, sg++) {
1712 sg->sg_addr = segs[i].ds_addr;
1713 sg->sg_count = segs[i].ds_len;
1714 debug(3, " %d: 0x%x/%d", i, sg->sg_addr, sg->sg_count);
1716 debug(3, "slot %d %d segments at 0x%x\n", ac->ac_slot,
1717 aep->ap_no_sg_elements, aep->ap_data_transfer_address);
1719 /* save s/g table information in passthrough */
1720 ap->ap_no_sg_elements = nsegments;
1721 ap->ap_data_transfer_address = ac->ac_sgbusaddr;
1723 * populate s/g table (overwrites previous call which mapped the
1726 for (i = 0; i < nsegments; i++, sg++) {
1727 sg->sg_addr = segs[i].ds_addr;
1728 sg->sg_count = segs[i].ds_len;
1729 debug(3, " %d: 0x%x/%d", i, sg->sg_addr, sg->sg_count);
1731 debug(3, "slot %d %d segments at 0x%x\n", ac->ac_slot,
1732 ap->ap_no_sg_elements, ap->ap_data_transfer_address);
1734 if (ac->ac_flags & AMR_CMD_CCB_DATAIN)
1735 bus_dmamap_sync(sc->amr_buffer64_dmat, ac->ac_ccb_dma64map,
1736 BUS_DMASYNC_PREREAD);
1737 if (ac->ac_flags & AMR_CMD_CCB_DATAOUT)
1738 bus_dmamap_sync(sc->amr_buffer64_dmat, ac->ac_ccb_dma64map,
1739 BUS_DMASYNC_PREWRITE);
1740 if ((ac->ac_flags & (AMR_CMD_CCB_DATAIN | AMR_CMD_CCB_DATAOUT)) == 0)
1741 panic("no direction for ccb?\n");
1743 if (ac->ac_flags & AMR_CMD_DATAIN)
1744 bus_dmamap_sync(sc->amr_buffer64_dmat, ac->ac_dma64map,
1745 BUS_DMASYNC_PREREAD);
1746 if (ac->ac_flags & AMR_CMD_DATAOUT)
1747 bus_dmamap_sync(sc->amr_buffer64_dmat, ac->ac_dma64map,
1748 BUS_DMASYNC_PREWRITE);
1750 ac->ac_flags |= AMR_CMD_MAPPED;
1752 if (sc->amr_submit_command(ac) == EBUSY) {
1754 amr_requeue_ready(ac);
1759 amr_mapcmd(struct amr_command *ac)
1762 bus_dmamap_t datamap, ccbmap;
1763 bus_dmamap_callback_t *cb;
1764 bus_dmamap_callback_t *ccb_cb;
1765 struct amr_softc *sc = ac->ac_sc;
1769 if (AC_IS_SG64(ac)) {
1770 tag = sc->amr_buffer64_dmat;
1771 datamap = ac->ac_dma64map;
1772 ccbmap = ac->ac_ccb_dma64map;
1773 cb = amr_setup_dma64map;
1774 ccb_cb = amr_setup_ccb64map;
1776 tag = sc->amr_buffer_dmat;
1777 datamap = ac->ac_dmamap;
1778 ccbmap = ac->ac_ccb_dmamap;
1779 cb = amr_setup_dmamap;
1780 ccb_cb = amr_setup_ccbmap;
1783 /* if the command involves data at all, and hasn't been mapped */
1784 if ((ac->ac_flags & AMR_CMD_MAPPED) == 0 && (ac->ac_data != NULL)) {
1785 if (ac->ac_ccb_data == NULL) {
1786 /* map the data buffers into bus space and build the s/g list */
1787 if (bus_dmamap_load(tag, datamap, ac->ac_data, ac->ac_length,
1788 amr_setup_data_dmamap, ac, 0) == EINPROGRESS) {
1789 sc->amr_state |= AMR_STATE_QUEUE_FRZN;
1792 if (bus_dmamap_load(tag, datamap, ac->ac_data, ac->ac_length,
1793 cb, ac, BUS_DMA_NOWAIT) != 0) {
1796 if (bus_dmamap_load(tag, ccbmap, ac->ac_ccb_data,
1797 ac->ac_ccb_length, ccb_cb, ac, 0) == EINPROGRESS) {
1798 sc->amr_state |= AMR_STATE_QUEUE_FRZN;
1802 if (sc->amr_submit_command(ac) == EBUSY) {
1804 amr_requeue_ready(ac);
1812 amr_unmapcmd(struct amr_command *ac)
1814 struct amr_softc *sc = ac->ac_sc;
1819 /* if the command involved data at all and was mapped */
1820 if (ac->ac_flags & AMR_CMD_MAPPED) {
1822 if (ac->ac_data != NULL) {
1825 if (ac->ac_flags & AMR_CMD_DATAIN)
1826 flag |= BUS_DMASYNC_POSTREAD;
1827 if (ac->ac_flags & AMR_CMD_DATAOUT)
1828 flag |= BUS_DMASYNC_POSTWRITE;
1830 if (AC_IS_SG64(ac)) {
1831 bus_dmamap_sync(sc->amr_buffer64_dmat, ac->ac_dma64map, flag);
1832 bus_dmamap_unload(sc->amr_buffer64_dmat, ac->ac_dma64map);
1834 bus_dmamap_sync(sc->amr_buffer_dmat, ac->ac_dmamap, flag);
1835 bus_dmamap_unload(sc->amr_buffer_dmat, ac->ac_dmamap);
1839 if (ac->ac_ccb_data != NULL) {
1842 if (ac->ac_flags & AMR_CMD_CCB_DATAIN)
1843 flag |= BUS_DMASYNC_POSTREAD;
1844 if (ac->ac_flags & AMR_CMD_CCB_DATAOUT)
1845 flag |= BUS_DMASYNC_POSTWRITE;
1847 if (AC_IS_SG64(ac)) {
1848 bus_dmamap_sync(sc->amr_buffer64_dmat,ac->ac_ccb_dma64map,flag);
1849 bus_dmamap_unload(sc->amr_buffer64_dmat, ac->ac_ccb_dma64map);
1851 bus_dmamap_sync(sc->amr_buffer_dmat, ac->ac_ccb_dmamap, flag);
1852 bus_dmamap_unload(sc->amr_buffer_dmat, ac->ac_ccb_dmamap);
1855 ac->ac_flags &= ~AMR_CMD_MAPPED;
1860 amr_setup_data_dmamap(void *arg, bus_dma_segment_t *segs, int nsegs, int err)
1862 struct amr_command *ac = arg;
1863 struct amr_softc *sc = ac->ac_sc;
1867 if (ac->ac_flags & AMR_CMD_DATAIN)
1868 flags |= BUS_DMASYNC_PREREAD;
1869 if (ac->ac_flags & AMR_CMD_DATAOUT)
1870 flags |= BUS_DMASYNC_PREWRITE;
1872 if (AC_IS_SG64(ac)) {
1873 amr_setup_dma64map(arg, segs, nsegs, err);
1874 bus_dmamap_sync(sc->amr_buffer64_dmat,ac->ac_dma64map, flags);
1876 amr_setup_dmamap(arg, segs, nsegs, err);
1877 bus_dmamap_sync(sc->amr_buffer_dmat,ac->ac_dmamap, flags);
1879 ac->ac_flags |= AMR_CMD_MAPPED;
1881 if (sc->amr_submit_command(ac) == EBUSY) {
1883 amr_requeue_ready(ac);
1887 /********************************************************************************
1888 * Take a command and give it to the controller, returns 0 if successful, or
1889 * EBUSY if the command should be retried later.
1892 amr_start(struct amr_command *ac)
1894 struct amr_softc *sc;
1900 /* mark command as busy so that polling consumer can tell */
1902 ac->ac_flags |= AMR_CMD_BUSY;
1904 /* get a command slot (freed in amr_done) */
1906 if (sc->amr_busycmd[slot] != NULL)
1907 panic("amr: slot %d busy?\n", slot);
1908 sc->amr_busycmd[slot] = ac;
1909 atomic_add_int(&sc->amr_busyslots, 1);
1911 /* Now we have a slot, we can map the command (unmapped in amr_complete). */
1912 if ((error = amr_mapcmd(ac)) == ENOMEM) {
1914 * Memroy resources are short, so free the slot and let this be tried
1923 /********************************************************************************
1924 * Extract one or more completed commands from the controller (sc)
1926 * Returns nonzero if any commands on the work queue were marked as completed.
1930 amr_done(struct amr_softc *sc)
1932 struct amr_command *ac;
1933 struct amr_mailbox mbox;
1938 /* See if there's anything for us to do */
1941 /* loop collecting completed commands */
1943 /* poll for a completed command's identifier and status */
1944 if (sc->amr_get_work(sc, &mbox)) {
1947 /* iterate over completed commands in this result */
1948 for (i = 0; i < mbox.mb_nstatus; i++) {
1949 /* get pointer to busy command */
1950 idx = mbox.mb_completed[i] - 1;
1951 ac = sc->amr_busycmd[idx];
1953 /* really a busy command? */
1956 /* pull the command from the busy index */
1959 /* save status for later use */
1960 ac->ac_status = mbox.mb_status;
1961 amr_enqueue_completed(ac);
1962 debug(3, "completed command with status %x", mbox.mb_status);
1964 device_printf(sc->amr_dev, "bad slot %d completed\n", idx);
1968 break; /* no work */
1971 /* handle completion and timeouts */
1972 amr_complete(sc, 0);
1977 /********************************************************************************
1978 * Do completion processing on done commands on (sc)
1982 amr_complete(void *context, int pending)
1984 struct amr_softc *sc = (struct amr_softc *)context;
1985 struct amr_command *ac;
1989 /* pull completed commands off the queue */
1991 ac = amr_dequeue_completed(sc);
1995 /* unmap the command's data buffer */
1998 /* unbusy the command */
1999 ac->ac_flags &= ~AMR_CMD_BUSY;
2002 * Is there a completion handler?
2004 if (ac->ac_complete != NULL) {
2005 ac->ac_complete(ac);
2008 * Is someone sleeping on this one?
2010 } else if (ac->ac_flags & AMR_CMD_SLEEP) {
2014 if(!sc->amr_busyslots) {
2019 mtx_lock(&sc->amr_list_lock);
2020 sc->amr_state &= ~AMR_STATE_QUEUE_FRZN;
2022 mtx_unlock(&sc->amr_list_lock);
2025 /********************************************************************************
2026 ********************************************************************************
2027 Command Buffer Management
2028 ********************************************************************************
2029 ********************************************************************************/
2031 /********************************************************************************
2032 * Get a new command buffer.
2034 * This may return NULL in low-memory cases.
2036 * If possible, we recycle a command buffer that's been used before.
2038 struct amr_command *
2039 amr_alloccmd(struct amr_softc *sc)
2041 struct amr_command *ac;
2045 ac = amr_dequeue_free(sc);
2047 amr_alloccmd_cluster(sc);
2048 ac = amr_dequeue_free(sc);
2051 sc->amr_state |= AMR_STATE_QUEUE_FRZN;
2055 /* clear out significant fields */
2057 bzero(&ac->ac_mailbox, sizeof(struct amr_mailbox));
2061 ac->ac_ccb_data = NULL;
2062 ac->ac_complete = NULL;
2066 /********************************************************************************
2067 * Release a command buffer for recycling.
2070 amr_releasecmd(struct amr_command *ac)
2074 amr_enqueue_free(ac);
2077 /********************************************************************************
2078 * Allocate a new command cluster and initialise it.
2081 amr_alloccmd_cluster(struct amr_softc *sc)
2083 struct amr_command_cluster *acc;
2084 struct amr_command *ac;
2087 if (sc->amr_nextslot > sc->amr_maxio)
2089 acc = malloc(AMR_CMD_CLUSTERSIZE, M_DEVBUF, M_NOWAIT | M_ZERO);
2091 nextslot = sc->amr_nextslot;
2092 TAILQ_INSERT_TAIL(&sc->amr_cmd_clusters, acc, acc_link);
2093 for (i = 0; i < AMR_CMD_CLUSTERCOUNT; i++) {
2094 ac = &acc->acc_command[i];
2096 ac->ac_slot = nextslot;
2099 * The SG table for each slot is a fixed size and is assumed to
2100 * to hold 64-bit s/g objects when the driver is configured to do
2101 * 64-bit DMA. 32-bit DMA commands still use the same table, but
2102 * cast down to 32-bit objects.
2104 if (AMR_IS_SG64(sc)) {
2105 ac->ac_sgbusaddr = sc->amr_sgbusaddr +
2106 (ac->ac_slot * AMR_NSEG * sizeof(struct amr_sg64entry));
2107 ac->ac_sg.sg64 = sc->amr_sg64table + (ac->ac_slot * AMR_NSEG);
2109 ac->ac_sgbusaddr = sc->amr_sgbusaddr +
2110 (ac->ac_slot * AMR_NSEG * sizeof(struct amr_sgentry));
2111 ac->ac_sg.sg32 = sc->amr_sgtable + (ac->ac_slot * AMR_NSEG);
2114 if (bus_dmamap_create(sc->amr_buffer_dmat, 0, &ac->ac_dmamap) ||
2115 bus_dmamap_create(sc->amr_buffer_dmat, 0, &ac->ac_ccb_dmamap) ||
2117 (bus_dmamap_create(sc->amr_buffer64_dmat, 0,&ac->ac_dma64map) ||
2118 bus_dmamap_create(sc->amr_buffer64_dmat, 0, &ac->ac_ccb_dma64map))))
2121 if (++nextslot > sc->amr_maxio)
2124 sc->amr_nextslot = nextslot;
2128 /********************************************************************************
2129 * Free a command cluster
2132 amr_freecmd_cluster(struct amr_command_cluster *acc)
2134 struct amr_softc *sc = acc->acc_command[0].ac_sc;
2137 for (i = 0; i < AMR_CMD_CLUSTERCOUNT; i++) {
2138 bus_dmamap_destroy(sc->amr_buffer_dmat, acc->acc_command[i].ac_dmamap);
2139 bus_dmamap_destroy(sc->amr_buffer_dmat, acc->acc_command[i].ac_ccb_dmamap);
2140 if (AMR_IS_SG64(sc))
2141 bus_dmamap_destroy(sc->amr_buffer64_dmat, acc->acc_command[i].ac_dma64map);
2142 bus_dmamap_destroy(sc->amr_buffer64_dmat, acc->acc_command[i].ac_ccb_dma64map);
2144 free(acc, M_DEVBUF);
2147 /********************************************************************************
2148 ********************************************************************************
2149 Interface-specific Shims
2150 ********************************************************************************
2151 ********************************************************************************/
2153 /********************************************************************************
2154 * Tell the controller that the mailbox contains a valid command
2157 amr_quartz_submit_command(struct amr_command *ac)
2159 struct amr_softc *sc = ac->ac_sc;
2162 mtx_lock(&sc->amr_hw_lock);
2163 while (sc->amr_mailbox->mb_busy && (i++ < 10))
2165 if (sc->amr_mailbox->mb_busy) {
2166 mtx_unlock(&sc->amr_hw_lock);
2171 * Save the slot number so that we can locate this command when complete.
2172 * Note that ident = 0 seems to be special, so we don't use it.
2174 ac->ac_mailbox.mb_ident = ac->ac_slot + 1; /* will be coppied into mbox */
2175 bcopy(&ac->ac_mailbox, (void *)(uintptr_t)(volatile void *)sc->amr_mailbox, 14);
2176 sc->amr_mailbox->mb_busy = 1;
2177 sc->amr_mailbox->mb_poll = 0;
2178 sc->amr_mailbox->mb_ack = 0;
2179 sc->amr_mailbox64->sg64_hi = ac->ac_sg64_hi;
2180 sc->amr_mailbox64->sg64_lo = ac->ac_sg64_lo;
2182 AMR_QPUT_IDB(sc, sc->amr_mailboxphys | AMR_QIDB_SUBMIT);
2183 mtx_unlock(&sc->amr_hw_lock);
2188 amr_std_submit_command(struct amr_command *ac)
2190 struct amr_softc *sc = ac->ac_sc;
2192 mtx_lock(&sc->amr_hw_lock);
2193 if (AMR_SGET_MBSTAT(sc) & AMR_SMBOX_BUSYFLAG) {
2194 mtx_unlock(&sc->amr_hw_lock);
2199 * Save the slot number so that we can locate this command when complete.
2200 * Note that ident = 0 seems to be special, so we don't use it.
2202 ac->ac_mailbox.mb_ident = ac->ac_slot + 1; /* will be coppied into mbox */
2203 bcopy(&ac->ac_mailbox, (void *)(uintptr_t)(volatile void *)sc->amr_mailbox, 14);
2204 sc->amr_mailbox->mb_busy = 1;
2205 sc->amr_mailbox->mb_poll = 0;
2206 sc->amr_mailbox->mb_ack = 0;
2208 AMR_SPOST_COMMAND(sc);
2209 mtx_unlock(&sc->amr_hw_lock);
2213 /********************************************************************************
2214 * Claim any work that the controller has completed; acknowledge completion,
2215 * save details of the completion in (mbsave)
2218 amr_quartz_get_work(struct amr_softc *sc, struct amr_mailbox *mbsave)
2223 u_int8_t completed[46];
2229 /* work waiting for us? */
2230 if ((outd = AMR_QGET_ODB(sc)) == AMR_QODB_READY) {
2232 /* acknowledge interrupt */
2233 AMR_QPUT_ODB(sc, AMR_QODB_READY);
2235 while ((nstatus = sc->amr_mailbox->mb_nstatus) == 0xff)
2237 sc->amr_mailbox->mb_nstatus = 0xff;
2239 /* wait until fw wrote out all completions */
2240 for (i = 0; i < nstatus; i++) {
2241 while ((completed[i] = sc->amr_mailbox->mb_completed[i]) == 0xff)
2243 sc->amr_mailbox->mb_completed[i] = 0xff;
2246 /* Save information for later processing */
2247 mbsave->mb_nstatus = nstatus;
2248 mbsave->mb_status = sc->amr_mailbox->mb_status;
2249 sc->amr_mailbox->mb_status = 0xff;
2251 for (i = 0; i < nstatus; i++)
2252 mbsave->mb_completed[i] = completed[i];
2254 /* acknowledge that we have the commands */
2255 AMR_QPUT_IDB(sc, AMR_QIDB_ACK);
2258 #ifndef AMR_QUARTZ_GOFASTER
2260 * This waits for the controller to notice that we've taken the
2261 * command from it. It's very inefficient, and we shouldn't do it,
2262 * but if we remove this code, we stop completing commands under
2265 * Peter J says we shouldn't do this. The documentation says we
2266 * should. Who is right?
2268 while(AMR_QGET_IDB(sc) & AMR_QIDB_ACK)
2269 ; /* XXX aiee! what if it dies? */
2273 worked = 1; /* got some work */
2280 amr_std_get_work(struct amr_softc *sc, struct amr_mailbox *mbsave)
2289 /* check for valid interrupt status */
2290 istat = AMR_SGET_ISTAT(sc);
2291 if ((istat & AMR_SINTR_VALID) != 0) {
2292 AMR_SPUT_ISTAT(sc, istat); /* ack interrupt status */
2294 /* save mailbox, which contains a list of completed commands */
2295 bcopy((void *)(uintptr_t)(volatile void *)sc->amr_mailbox, mbsave, sizeof(*mbsave));
2297 AMR_SACK_INTERRUPT(sc); /* acknowledge we have the mailbox */
2304 /********************************************************************************
2305 * Notify the controller of the mailbox location.
2308 amr_std_attach_mailbox(struct amr_softc *sc)
2311 /* program the mailbox physical address */
2312 AMR_SBYTE_SET(sc, AMR_SMBOX_0, sc->amr_mailboxphys & 0xff);
2313 AMR_SBYTE_SET(sc, AMR_SMBOX_1, (sc->amr_mailboxphys >> 8) & 0xff);
2314 AMR_SBYTE_SET(sc, AMR_SMBOX_2, (sc->amr_mailboxphys >> 16) & 0xff);
2315 AMR_SBYTE_SET(sc, AMR_SMBOX_3, (sc->amr_mailboxphys >> 24) & 0xff);
2316 AMR_SBYTE_SET(sc, AMR_SMBOX_ENABLE, AMR_SMBOX_ADDR);
2318 /* clear any outstanding interrupt and enable interrupts proper */
2319 AMR_SACK_INTERRUPT(sc);
2320 AMR_SENABLE_INTR(sc);
2323 #ifdef AMR_BOARD_INIT
2324 /********************************************************************************
2325 * Initialise the controller
2328 amr_quartz_init(struct amr_softc *sc)
2330 int status, ostatus;
2332 device_printf(sc->amr_dev, "initial init status %x\n", AMR_QGET_INITSTATUS(sc));
2337 while ((status = AMR_QGET_INITSTATUS(sc)) != AMR_QINIT_DONE) {
2338 if (status != ostatus) {
2339 device_printf(sc->amr_dev, "(%x) %s\n", status, amr_describe_code(amr_table_qinit, status));
2343 case AMR_QINIT_NOMEM:
2346 case AMR_QINIT_SCAN:
2347 /* XXX we could print channel/target here */
2355 amr_std_init(struct amr_softc *sc)
2357 int status, ostatus;
2359 device_printf(sc->amr_dev, "initial init status %x\n", AMR_SGET_INITSTATUS(sc));
2364 while ((status = AMR_SGET_INITSTATUS(sc)) != AMR_SINIT_DONE) {
2365 if (status != ostatus) {
2366 device_printf(sc->amr_dev, "(%x) %s\n", status, amr_describe_code(amr_table_sinit, status));
2370 case AMR_SINIT_NOMEM:
2373 case AMR_SINIT_INPROG:
2374 /* XXX we could print channel/target here? */
2382 /********************************************************************************
2383 ********************************************************************************
2385 ********************************************************************************
2386 ********************************************************************************/
2388 /********************************************************************************
2389 * Identify the controller and print some information about it.
2392 amr_describe_controller(struct amr_softc *sc)
2394 struct amr_prodinfo *ap;
2395 struct amr_enquiry *ae;
2400 * Try to get 40LD product info, which tells us what the card is labelled as.
2402 if ((ap = amr_enquiry(sc, 2048, AMR_CMD_CONFIG, AMR_CONFIG_PRODUCT_INFO, 0, &status)) != NULL) {
2403 device_printf(sc->amr_dev, "<LSILogic %.80s> Firmware %.16s, BIOS %.16s, %dMB RAM\n",
2404 ap->ap_product, ap->ap_firmware, ap->ap_bios,
2412 * Try 8LD extended ENQUIRY to get controller signature, and use lookup table.
2414 if ((ae = (struct amr_enquiry *)amr_enquiry(sc, 2048, AMR_CMD_EXT_ENQUIRY2, 0, 0, &status)) != NULL) {
2415 prod = amr_describe_code(amr_table_adaptertype, ae->ae_signature);
2417 } else if ((ae = (struct amr_enquiry *)amr_enquiry(sc, 2048, AMR_CMD_ENQUIRY, 0, 0, &status)) != NULL) {
2420 * Try to work it out based on the PCI signatures.
2422 switch (pci_get_device(sc->amr_dev)) {
2424 prod = "Series 428";
2427 prod = "Series 434";
2430 prod = "unknown controller";
2434 device_printf(sc->amr_dev, "<unsupported controller>\n");
2439 * HP NetRaid controllers have a special encoding of the firmware and
2440 * BIOS versions. The AMI version seems to have it as strings whereas
2441 * the HP version does it with a leading uppercase character and two
2445 if(ae->ae_adapter.aa_firmware[2] >= 'A' &&
2446 ae->ae_adapter.aa_firmware[2] <= 'Z' &&
2447 ae->ae_adapter.aa_firmware[1] < ' ' &&
2448 ae->ae_adapter.aa_firmware[0] < ' ' &&
2449 ae->ae_adapter.aa_bios[2] >= 'A' &&
2450 ae->ae_adapter.aa_bios[2] <= 'Z' &&
2451 ae->ae_adapter.aa_bios[1] < ' ' &&
2452 ae->ae_adapter.aa_bios[0] < ' ') {
2454 /* this looks like we have an HP NetRaid version of the MegaRaid */
2456 if(ae->ae_signature == AMR_SIG_438) {
2457 /* the AMI 438 is a NetRaid 3si in HP-land */
2458 prod = "HP NetRaid 3si";
2461 device_printf(sc->amr_dev, "<%s> Firmware %c.%02d.%02d, BIOS %c.%02d.%02d, %dMB RAM\n",
2462 prod, ae->ae_adapter.aa_firmware[2],
2463 ae->ae_adapter.aa_firmware[1],
2464 ae->ae_adapter.aa_firmware[0],
2465 ae->ae_adapter.aa_bios[2],
2466 ae->ae_adapter.aa_bios[1],
2467 ae->ae_adapter.aa_bios[0],
2468 ae->ae_adapter.aa_memorysize);
2470 device_printf(sc->amr_dev, "<%s> Firmware %.4s, BIOS %.4s, %dMB RAM\n",
2471 prod, ae->ae_adapter.aa_firmware, ae->ae_adapter.aa_bios,
2472 ae->ae_adapter.aa_memorysize);
2478 amr_dump_blocks(struct amr_softc *sc, int unit, u_int32_t lba, void *data, int blks)
2480 struct amr_command *ac;
2485 sc->amr_state |= AMR_STATE_INTEN;
2487 /* get ourselves a command buffer */
2488 if ((ac = amr_alloccmd(sc)) == NULL)
2490 /* set command flags */
2491 ac->ac_flags |= AMR_CMD_PRIORITY | AMR_CMD_DATAOUT;
2493 /* point the command at our data */
2495 ac->ac_length = blks * AMR_BLKSIZE;
2497 /* build the command proper */
2498 ac->ac_mailbox.mb_command = AMR_CMD_LWRITE;
2499 ac->ac_mailbox.mb_blkcount = blks;
2500 ac->ac_mailbox.mb_lba = lba;
2501 ac->ac_mailbox.mb_drive = unit;
2503 /* can't assume that interrupts are going to work here, so play it safe */
2504 if (sc->amr_poll_command(ac))
2506 error = ac->ac_status;
2512 sc->amr_state &= ~AMR_STATE_INTEN;
2519 /********************************************************************************
2520 * Print the command (ac) in human-readable format
2524 amr_printcommand(struct amr_command *ac)
2526 struct amr_softc *sc = ac->ac_sc;
2527 struct amr_sgentry *sg;
2530 device_printf(sc->amr_dev, "cmd %x ident %d drive %d\n",
2531 ac->ac_mailbox.mb_command, ac->ac_mailbox.mb_ident, ac->ac_mailbox.mb_drive);
2532 device_printf(sc->amr_dev, "blkcount %d lba %d\n",
2533 ac->ac_mailbox.mb_blkcount, ac->ac_mailbox.mb_lba);
2534 device_printf(sc->amr_dev, "virtaddr %p length %lu\n", ac->ac_data, (unsigned long)ac->ac_length);
2535 device_printf(sc->amr_dev, "sg physaddr %08x nsg %d\n",
2536 ac->ac_mailbox.mb_physaddr, ac->ac_mailbox.mb_nsgelem);
2537 device_printf(sc->amr_dev, "ccb %p bio %p\n", ac->ac_ccb_data, ac->ac_bio);
2539 /* get base address of s/g table */
2540 sg = sc->amr_sgtable + (ac->ac_slot * AMR_NSEG);
2541 for (i = 0; i < ac->ac_mailbox.mb_nsgelem; i++, sg++)
2542 device_printf(sc->amr_dev, " %x/%d\n", sg->sg_addr, sg->sg_count);