2 * Copyright (c) 1999 Michael Smith
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * Driver for the Mylex DAC960 family of RAID controllers.
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/malloc.h>
36 #include <sys/kernel.h>
42 #include <machine/resource.h>
43 #include <machine/bus.h>
44 #include <machine/clock.h>
47 #include <geom/geom_disk.h>
49 #include <dev/mlx/mlx_compat.h>
50 #include <dev/mlx/mlxio.h>
51 #include <dev/mlx/mlxvar.h>
52 #include <dev/mlx/mlxreg.h>
54 static struct cdevsw mlx_cdevsw = {
55 .d_version = D_VERSION,
56 .d_flags = D_NEEDGIANT,
63 devclass_t mlx_devclass;
66 * Per-interface accessor methods
68 static int mlx_v3_tryqueue(struct mlx_softc *sc, struct mlx_command *mc);
69 static int mlx_v3_findcomplete(struct mlx_softc *sc, u_int8_t *slot, u_int16_t *status);
70 static void mlx_v3_intaction(struct mlx_softc *sc, int action);
71 static int mlx_v3_fw_handshake(struct mlx_softc *sc, int *error, int *param1, int *param2);
73 static int mlx_v4_tryqueue(struct mlx_softc *sc, struct mlx_command *mc);
74 static int mlx_v4_findcomplete(struct mlx_softc *sc, u_int8_t *slot, u_int16_t *status);
75 static void mlx_v4_intaction(struct mlx_softc *sc, int action);
76 static int mlx_v4_fw_handshake(struct mlx_softc *sc, int *error, int *param1, int *param2);
78 static int mlx_v5_tryqueue(struct mlx_softc *sc, struct mlx_command *mc);
79 static int mlx_v5_findcomplete(struct mlx_softc *sc, u_int8_t *slot, u_int16_t *status);
80 static void mlx_v5_intaction(struct mlx_softc *sc, int action);
81 static int mlx_v5_fw_handshake(struct mlx_softc *sc, int *error, int *param1, int *param2);
86 static void mlx_periodic(void *data);
87 static void mlx_periodic_enquiry(struct mlx_command *mc);
88 static void mlx_periodic_eventlog_poll(struct mlx_softc *sc);
89 static void mlx_periodic_eventlog_respond(struct mlx_command *mc);
90 static void mlx_periodic_rebuild(struct mlx_command *mc);
95 static void mlx_pause_action(struct mlx_softc *sc);
96 static void mlx_pause_done(struct mlx_command *mc);
101 static void *mlx_enquire(struct mlx_softc *sc, int command, size_t bufsize,
102 void (*complete)(struct mlx_command *mc));
103 static int mlx_flush(struct mlx_softc *sc);
104 static int mlx_check(struct mlx_softc *sc, int drive);
105 static int mlx_rebuild(struct mlx_softc *sc, int channel, int target);
106 static int mlx_wait_command(struct mlx_command *mc);
107 static int mlx_poll_command(struct mlx_command *mc);
108 void mlx_startio_cb(void *arg,
109 bus_dma_segment_t *segs,
110 int nsegments, int error);
111 static void mlx_startio(struct mlx_softc *sc);
112 static void mlx_completeio(struct mlx_command *mc);
113 static int mlx_user_command(struct mlx_softc *sc,
114 struct mlx_usercommand *mu);
115 void mlx_user_cb(void *arg, bus_dma_segment_t *segs,
116 int nsegments, int error);
119 * Command buffer allocation.
121 static struct mlx_command *mlx_alloccmd(struct mlx_softc *sc);
122 static void mlx_releasecmd(struct mlx_command *mc);
123 static void mlx_freecmd(struct mlx_command *mc);
126 * Command management.
128 static int mlx_getslot(struct mlx_command *mc);
129 static void mlx_setup_dmamap(struct mlx_command *mc,
130 bus_dma_segment_t *segs,
131 int nsegments, int error);
132 static void mlx_unmapcmd(struct mlx_command *mc);
133 static int mlx_start(struct mlx_command *mc);
134 static int mlx_done(struct mlx_softc *sc);
135 static void mlx_complete(struct mlx_softc *sc);
140 static char *mlx_diagnose_command(struct mlx_command *mc);
141 static void mlx_describe_controller(struct mlx_softc *sc);
142 static int mlx_fw_message(struct mlx_softc *sc, int status, int param1, int param2);
147 static struct mlx_sysdrive *mlx_findunit(struct mlx_softc *sc, int unit);
149 /********************************************************************************
150 ********************************************************************************
152 ********************************************************************************
153 ********************************************************************************/
155 /********************************************************************************
156 * Free all of the resources associated with (sc)
158 * Should not be called if the controller is active.
161 mlx_free(struct mlx_softc *sc)
163 struct mlx_command *mc;
167 /* cancel status timeout */
168 untimeout(mlx_periodic, sc, sc->mlx_timeout);
170 /* throw away any command buffers */
171 while ((mc = TAILQ_FIRST(&sc->mlx_freecmds)) != NULL) {
172 TAILQ_REMOVE(&sc->mlx_freecmds, mc, mc_link);
176 /* destroy data-transfer DMA tag */
177 if (sc->mlx_buffer_dmat)
178 bus_dma_tag_destroy(sc->mlx_buffer_dmat);
180 /* free and destroy DMA memory and tag for s/g lists */
182 bus_dmamem_free(sc->mlx_sg_dmat, sc->mlx_sgtable, sc->mlx_sg_dmamap);
184 bus_dma_tag_destroy(sc->mlx_sg_dmat);
186 /* disconnect the interrupt handler */
188 bus_teardown_intr(sc->mlx_dev, sc->mlx_irq, sc->mlx_intr);
189 if (sc->mlx_irq != NULL)
190 bus_release_resource(sc->mlx_dev, SYS_RES_IRQ, 0, sc->mlx_irq);
192 /* destroy the parent DMA tag */
193 if (sc->mlx_parent_dmat)
194 bus_dma_tag_destroy(sc->mlx_parent_dmat);
196 /* release the register window mapping */
197 if (sc->mlx_mem != NULL)
198 bus_release_resource(sc->mlx_dev, sc->mlx_mem_type, sc->mlx_mem_rid, sc->mlx_mem);
200 /* free controller enquiry data */
201 if (sc->mlx_enq2 != NULL)
202 free(sc->mlx_enq2, M_DEVBUF);
204 /* destroy control device */
205 if (sc->mlx_dev_t != (struct cdev *)NULL)
206 destroy_dev(sc->mlx_dev_t);
209 /********************************************************************************
210 * Map the scatter/gather table into bus space
213 mlx_dma_map_sg(void *arg, bus_dma_segment_t *segs, int nseg, int error)
215 struct mlx_softc *sc = (struct mlx_softc *)arg;
219 /* save base of s/g table's address in bus space */
220 sc->mlx_sgbusaddr = segs->ds_addr;
224 mlx_sglist_map(struct mlx_softc *sc)
231 /* destroy any existing mappings */
233 bus_dmamem_free(sc->mlx_sg_dmat, sc->mlx_sgtable, sc->mlx_sg_dmamap);
235 bus_dma_tag_destroy(sc->mlx_sg_dmat);
238 * Create a single tag describing a region large enough to hold all of
239 * the s/g lists we will need. If we're called early on, we don't know how
240 * many commands we're going to be asked to support, so only allocate enough
243 if (sc->mlx_enq2 == NULL) {
246 ncmd = sc->mlx_enq2->me_max_commands;
248 segsize = sizeof(struct mlx_sgentry) * MLX_NSEG * ncmd;
249 error = bus_dma_tag_create(sc->mlx_parent_dmat, /* parent */
250 1, 0, /* alignment,boundary */
251 BUS_SPACE_MAXADDR, /* lowaddr */
252 BUS_SPACE_MAXADDR, /* highaddr */
253 NULL, NULL, /* filter, filterarg */
254 segsize, 1, /* maxsize, nsegments */
255 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
257 NULL, NULL, /* lockfunc, lockarg */
260 device_printf(sc->mlx_dev, "can't allocate scatter/gather DMA tag\n");
265 * Allocate enough s/g maps for all commands and permanently map them into
266 * controller-visible space.
268 * XXX this assumes we can get enough space for all the s/g maps in one
269 * contiguous slab. We may need to switch to a more complex arrangement
270 * where we allocate in smaller chunks and keep a lookup table from slot
273 error = bus_dmamem_alloc(sc->mlx_sg_dmat, (void **)&sc->mlx_sgtable,
274 BUS_DMA_NOWAIT, &sc->mlx_sg_dmamap);
276 device_printf(sc->mlx_dev, "can't allocate s/g table\n");
279 (void)bus_dmamap_load(sc->mlx_sg_dmat, sc->mlx_sg_dmamap, sc->mlx_sgtable,
280 segsize, mlx_dma_map_sg, sc, 0);
284 /********************************************************************************
285 * Initialise the controller and softc
288 mlx_attach(struct mlx_softc *sc)
290 struct mlx_enquiry_old *meo;
291 int rid, error, fwminor, hscode, hserror, hsparam1, hsparam2, hsmsg;
296 * Initialise per-controller queues.
298 TAILQ_INIT(&sc->mlx_work);
299 TAILQ_INIT(&sc->mlx_freecmds);
300 MLX_BIO_QINIT(sc->mlx_bioq);
303 * Select accessor methods based on controller interface type.
305 switch(sc->mlx_iftype) {
308 sc->mlx_tryqueue = mlx_v3_tryqueue;
309 sc->mlx_findcomplete = mlx_v3_findcomplete;
310 sc->mlx_intaction = mlx_v3_intaction;
311 sc->mlx_fw_handshake = mlx_v3_fw_handshake;
314 sc->mlx_tryqueue = mlx_v4_tryqueue;
315 sc->mlx_findcomplete = mlx_v4_findcomplete;
316 sc->mlx_intaction = mlx_v4_intaction;
317 sc->mlx_fw_handshake = mlx_v4_fw_handshake;
320 sc->mlx_tryqueue = mlx_v5_tryqueue;
321 sc->mlx_findcomplete = mlx_v5_findcomplete;
322 sc->mlx_intaction = mlx_v5_intaction;
323 sc->mlx_fw_handshake = mlx_v5_fw_handshake;
326 return(ENXIO); /* should never happen */
329 /* disable interrupts before we start talking to the controller */
330 sc->mlx_intaction(sc, MLX_INTACTION_DISABLE);
333 * Wait for the controller to come ready, handshake with the firmware if required.
334 * This is typically only necessary on platforms where the controller BIOS does not
339 while ((hscode = sc->mlx_fw_handshake(sc, &hserror, &hsparam1, &hsparam2)) != 0) {
340 /* report first time around... */
342 device_printf(sc->mlx_dev, "controller initialisation in progress...\n");
345 /* did we get a real message? */
347 hscode = mlx_fw_message(sc, hserror, hsparam1, hsparam2);
348 /* fatal initialisation error? */
355 device_printf(sc->mlx_dev, "initialisation complete.\n");
358 * Allocate and connect our interrupt.
361 sc->mlx_irq = bus_alloc_resource_any(sc->mlx_dev, SYS_RES_IRQ, &rid,
362 RF_SHAREABLE | RF_ACTIVE);
363 if (sc->mlx_irq == NULL) {
364 device_printf(sc->mlx_dev, "can't allocate interrupt\n");
367 error = bus_setup_intr(sc->mlx_dev, sc->mlx_irq, INTR_TYPE_BIO | INTR_ENTROPY, NULL, mlx_intr, sc, &sc->mlx_intr);
369 device_printf(sc->mlx_dev, "can't set up interrupt\n");
374 * Create DMA tag for mapping buffers into controller-addressable space.
376 error = bus_dma_tag_create(sc->mlx_parent_dmat, /* parent */
377 1, 0, /* align, boundary */
378 BUS_SPACE_MAXADDR, /* lowaddr */
379 BUS_SPACE_MAXADDR, /* highaddr */
380 NULL, NULL, /* filter, filterarg */
381 MAXBSIZE, MLX_NSEG, /* maxsize, nsegments */
382 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
384 busdma_lock_mutex, /* lockfunc */
385 &Giant, /* lockarg */
386 &sc->mlx_buffer_dmat);
388 device_printf(sc->mlx_dev, "can't allocate buffer DMA tag\n");
393 * Create some initial scatter/gather mappings so we can run the probe
396 error = mlx_sglist_map(sc);
398 device_printf(sc->mlx_dev, "can't make initial s/g list mapping\n");
403 * We don't (yet) know where the event log is up to.
405 sc->mlx_currevent = -1;
408 * Obtain controller feature information
410 if ((sc->mlx_enq2 = mlx_enquire(sc, MLX_CMD_ENQUIRY2, sizeof(struct mlx_enquiry2), NULL)) == NULL) {
411 device_printf(sc->mlx_dev, "ENQUIRY2 failed\n");
416 * Do quirk/feature related things.
418 fwminor = (sc->mlx_enq2->me_firmware_id >> 8) & 0xff;
419 switch(sc->mlx_iftype) {
421 /* These controllers don't report the firmware version in the ENQUIRY2 response */
422 if ((meo = mlx_enquire(sc, MLX_CMD_ENQUIRY_OLD, sizeof(struct mlx_enquiry_old), NULL)) == NULL) {
423 device_printf(sc->mlx_dev, "ENQUIRY_OLD failed\n");
426 sc->mlx_enq2->me_firmware_id = ('0' << 24) | (0 << 16) | (meo->me_fwminor << 8) | meo->me_fwmajor;
428 /* XXX require 2.42 or better (PCI) or 2.14 or better (EISA) */
429 if (meo->me_fwminor < 42) {
430 device_printf(sc->mlx_dev, " *** WARNING *** This firmware revision is not recommended\n");
431 device_printf(sc->mlx_dev, " *** WARNING *** Use revision 2.42 or later\n");
436 /* XXX certify 3.52? */
438 device_printf(sc->mlx_dev, " *** WARNING *** This firmware revision is not recommended\n");
439 device_printf(sc->mlx_dev, " *** WARNING *** Use revision 3.51 or later\n");
443 /* XXX certify firmware versions? */
445 device_printf(sc->mlx_dev, " *** WARNING *** This firmware revision is not recommended\n");
446 device_printf(sc->mlx_dev, " *** WARNING *** Use revision 4.06 or later\n");
451 device_printf(sc->mlx_dev, " *** WARNING *** This firmware revision is not recommended\n");
452 device_printf(sc->mlx_dev, " *** WARNING *** Use revision 5.07 or later\n");
456 return(ENXIO); /* should never happen */
460 * Create the final scatter/gather mappings now that we have characterised the controller.
462 error = mlx_sglist_map(sc);
464 device_printf(sc->mlx_dev, "can't make final s/g list mapping\n");
469 * No user-requested background operation is in progress.
471 sc->mlx_background = 0;
472 sc->mlx_rebuildstat.rs_code = MLX_REBUILDSTAT_IDLE;
475 * Create the control device.
477 sc->mlx_dev_t = make_dev(&mlx_cdevsw, device_get_unit(sc->mlx_dev), UID_ROOT, GID_OPERATOR,
478 S_IRUSR | S_IWUSR, "mlx%d", device_get_unit(sc->mlx_dev));
481 * Start the timeout routine.
483 sc->mlx_timeout = timeout(mlx_periodic, sc, hz);
485 /* print a little information about the controller */
486 mlx_describe_controller(sc);
491 /********************************************************************************
492 * Locate disk resources and attach children to them.
495 mlx_startup(struct mlx_softc *sc)
497 struct mlx_enq_sys_drive *mes;
498 struct mlx_sysdrive *dr;
504 * Scan all the system drives and attach children for those that
505 * don't currently have them.
507 mes = mlx_enquire(sc, MLX_CMD_ENQSYSDRIVE, sizeof(*mes) * MLX_MAXDRIVES, NULL);
509 device_printf(sc->mlx_dev, "error fetching drive status\n");
513 /* iterate over drives returned */
514 for (i = 0, dr = &sc->mlx_sysdrive[0];
515 (i < MLX_MAXDRIVES) && (mes[i].sd_size != 0xffffffff);
517 /* are we already attached to this drive? */
518 if (dr->ms_disk == 0) {
519 /* pick up drive information */
520 dr->ms_size = mes[i].sd_size;
521 dr->ms_raidlevel = mes[i].sd_raidlevel & 0xf;
522 dr->ms_state = mes[i].sd_state;
524 /* generate geometry information */
525 if (sc->mlx_geom == MLX_GEOM_128_32) {
528 dr->ms_cylinders = dr->ms_size / (128 * 32);
529 } else { /* MLX_GEOM_255/63 */
532 dr->ms_cylinders = dr->ms_size / (255 * 63);
534 dr->ms_disk = device_add_child(sc->mlx_dev, /*"mlxd"*/NULL, -1);
535 if (dr->ms_disk == 0)
536 device_printf(sc->mlx_dev, "device_add_child failed\n");
537 device_set_ivars(dr->ms_disk, dr);
541 if ((error = bus_generic_attach(sc->mlx_dev)) != 0)
542 device_printf(sc->mlx_dev, "bus_generic_attach returned %d", error);
544 /* mark controller back up */
545 sc->mlx_state &= ~MLX_STATE_SHUTDOWN;
547 /* enable interrupts */
548 sc->mlx_intaction(sc, MLX_INTACTION_ENABLE);
551 /********************************************************************************
552 * Disconnect from the controller completely, in preparation for unload.
555 mlx_detach(device_t dev)
557 struct mlx_softc *sc = device_get_softc(dev);
558 struct mlxd_softc *mlxd;
565 if (sc->mlx_state & MLX_STATE_OPEN)
568 for (i = 0; i < MLX_MAXDRIVES; i++) {
569 if (sc->mlx_sysdrive[i].ms_disk != 0) {
570 mlxd = device_get_softc(sc->mlx_sysdrive[i].ms_disk);
571 if (mlxd->mlxd_flags & MLXD_OPEN) { /* drive is mounted, abort detach */
572 device_printf(sc->mlx_sysdrive[i].ms_disk, "still open, can't detach\n");
577 if ((error = mlx_shutdown(dev)))
588 /********************************************************************************
589 * Bring the controller down to a dormant state and detach all child devices.
591 * This function is called before detach, system shutdown, or before performing
592 * an operation which may add or delete system disks. (Call mlx_startup to
593 * resume normal operation.)
595 * Note that we can assume that the bioq on the controller is empty, as we won't
596 * allow shutdown if any device is open.
599 mlx_shutdown(device_t dev)
601 struct mlx_softc *sc = device_get_softc(dev);
609 sc->mlx_state |= MLX_STATE_SHUTDOWN;
610 sc->mlx_intaction(sc, MLX_INTACTION_DISABLE);
612 /* flush controller */
613 device_printf(sc->mlx_dev, "flushing cache...");
620 /* delete all our child devices */
621 for (i = 0; i < MLX_MAXDRIVES; i++) {
622 if (sc->mlx_sysdrive[i].ms_disk != 0) {
623 if ((error = device_delete_child(sc->mlx_dev, sc->mlx_sysdrive[i].ms_disk)) != 0)
625 sc->mlx_sysdrive[i].ms_disk = 0;
634 /********************************************************************************
635 * Bring the controller to a quiescent state, ready for system suspend.
638 mlx_suspend(device_t dev)
640 struct mlx_softc *sc = device_get_softc(dev);
646 sc->mlx_state |= MLX_STATE_SUSPEND;
648 /* flush controller */
649 device_printf(sc->mlx_dev, "flushing cache...");
650 printf("%s\n", mlx_flush(sc) ? "failed" : "done");
652 sc->mlx_intaction(sc, MLX_INTACTION_DISABLE);
658 /********************************************************************************
659 * Bring the controller back to a state ready for operation.
662 mlx_resume(device_t dev)
664 struct mlx_softc *sc = device_get_softc(dev);
668 sc->mlx_state &= ~MLX_STATE_SUSPEND;
669 sc->mlx_intaction(sc, MLX_INTACTION_ENABLE);
674 /*******************************************************************************
675 * Take an interrupt, or be poked by other code to look for interrupt-worthy
681 struct mlx_softc *sc = (struct mlx_softc *)arg;
685 /* collect finished commands, queue anything waiting */
689 /*******************************************************************************
690 * Receive a buf structure from a child device and queue it on a particular
691 * disk resource, then poke the disk resource to start as much work as it can.
694 mlx_submit_buf(struct mlx_softc *sc, mlx_bio *bp)
701 MLX_BIO_QINSERT(sc->mlx_bioq, bp);
708 /********************************************************************************
709 * Accept an open operation on the control device.
712 mlx_open(struct cdev *dev, int flags, int fmt, struct thread *td)
714 int unit = dev2unit(dev);
715 struct mlx_softc *sc = devclass_get_softc(mlx_devclass, unit);
717 sc->mlx_state |= MLX_STATE_OPEN;
721 /********************************************************************************
722 * Accept the last close on the control device.
725 mlx_close(struct cdev *dev, int flags, int fmt, struct thread *td)
727 int unit = dev2unit(dev);
728 struct mlx_softc *sc = devclass_get_softc(mlx_devclass, unit);
730 sc->mlx_state &= ~MLX_STATE_OPEN;
734 /********************************************************************************
735 * Handle controller-specific control operations.
738 mlx_ioctl(struct cdev *dev, u_long cmd, caddr_t addr, int32_t flag, struct thread *td)
740 int unit = dev2unit(dev);
741 struct mlx_softc *sc = devclass_get_softc(mlx_devclass, unit);
742 struct mlx_rebuild_request *rb = (struct mlx_rebuild_request *)addr;
743 struct mlx_rebuild_status *rs = (struct mlx_rebuild_status *)addr;
744 int *arg = (int *)addr;
745 struct mlx_pause *mp;
746 struct mlx_sysdrive *dr;
747 struct mlxd_softc *mlxd;
752 * Enumerate connected system drives; returns the first system drive's
753 * unit number if *arg is -1, or the next unit after *arg if it's
754 * a valid unit on this controller.
757 /* search system drives */
758 for (i = 0; i < MLX_MAXDRIVES; i++) {
759 /* is this one attached? */
760 if (sc->mlx_sysdrive[i].ms_disk != 0) {
761 /* looking for the next one we come across? */
763 *arg = device_get_unit(sc->mlx_sysdrive[i].ms_disk);
766 /* we want the one after this one */
767 if (*arg == device_get_unit(sc->mlx_sysdrive[i].ms_disk))
774 * Scan the controller to see whether new drives have appeared.
776 case MLX_RESCAN_DRIVES:
781 * Disconnect from the specified drive; it may be about to go
784 case MLX_DETACH_DRIVE: /* detach one drive */
786 if (((dr = mlx_findunit(sc, *arg)) == NULL) ||
787 ((mlxd = device_get_softc(dr->ms_disk)) == NULL))
790 device_printf(dr->ms_disk, "detaching...");
792 if (mlxd->mlxd_flags & MLXD_OPEN) {
797 /* flush controller */
804 if ((error = device_delete_child(sc->mlx_dev, dr->ms_disk)) != 0)
817 * Pause one or more SCSI channels for a period of time, to assist
818 * in the process of hot-swapping devices.
820 * Note that at least the 3.51 firmware on the DAC960PL doesn't seem
823 case MLX_PAUSE_CHANNEL: /* schedule a channel pause */
824 /* Does this command work on this firmware? */
825 if (!(sc->mlx_feature & MLX_FEAT_PAUSEWORKS))
828 mp = (struct mlx_pause *)addr;
829 if ((mp->mp_which == MLX_PAUSE_CANCEL) && (sc->mlx_pause.mp_when != 0)) {
830 /* cancel a pending pause operation */
831 sc->mlx_pause.mp_which = 0;
833 /* fix for legal channels */
834 mp->mp_which &= ((1 << sc->mlx_enq2->me_actual_channels) -1);
835 /* check time values */
836 if ((mp->mp_when < 0) || (mp->mp_when > 3600))
838 if ((mp->mp_howlong < 1) || (mp->mp_howlong > (0xf * 30)))
841 /* check for a pause currently running */
842 if ((sc->mlx_pause.mp_which != 0) && (sc->mlx_pause.mp_when == 0))
845 /* looks ok, go with it */
846 sc->mlx_pause.mp_which = mp->mp_which;
847 sc->mlx_pause.mp_when = time_second + mp->mp_when;
848 sc->mlx_pause.mp_howlong = sc->mlx_pause.mp_when + mp->mp_howlong;
853 * Accept a command passthrough-style.
856 return(mlx_user_command(sc, (struct mlx_usercommand *)addr));
859 * Start a rebuild on a given SCSI disk
861 case MLX_REBUILDASYNC:
862 if (sc->mlx_background != 0) {
863 rb->rr_status = 0x0106;
866 rb->rr_status = mlx_rebuild(sc, rb->rr_channel, rb->rr_target);
867 switch (rb->rr_status) {
872 error = ENOMEM; /* couldn't set up the command */
891 sc->mlx_background = MLX_BACKGROUND_REBUILD;
895 * Get the status of the current rebuild or consistency check.
897 case MLX_REBUILDSTAT:
898 *rs = sc->mlx_rebuildstat;
902 * Return the per-controller system drive number matching the
903 * disk device number in (arg), if it happens to belong to us.
905 case MLX_GET_SYSDRIVE:
907 mlxd = (struct mlxd_softc *)devclass_get_softc(mlxd_devclass, *arg);
908 if ((mlxd != NULL) && (mlxd->mlxd_drive >= sc->mlx_sysdrive) &&
909 (mlxd->mlxd_drive < (sc->mlx_sysdrive + MLX_MAXDRIVES))) {
911 *arg = mlxd->mlxd_drive - sc->mlx_sysdrive;
920 /********************************************************************************
921 * Handle operations requested by a System Drive connected to this controller.
924 mlx_submit_ioctl(struct mlx_softc *sc, struct mlx_sysdrive *drive, u_long cmd,
925 caddr_t addr, int32_t flag, struct thread *td)
927 int *arg = (int *)addr;
932 * Return the current status of this drive.
935 *arg = drive->ms_state;
939 * Start a background consistency check on this drive.
941 case MLXD_CHECKASYNC: /* start a background consistency check */
942 if (sc->mlx_background != 0) {
946 result = mlx_check(sc, drive - &sc->mlx_sysdrive[0]);
952 error = ENOMEM; /* couldn't set up the command */
968 sc->mlx_background = MLX_BACKGROUND_CHECK;
977 /********************************************************************************
978 ********************************************************************************
980 ********************************************************************************
981 ********************************************************************************/
983 /********************************************************************************
984 * Fire off commands to periodically check the status of connected drives.
987 mlx_periodic(void *data)
989 struct mlx_softc *sc = (struct mlx_softc *)data;
996 if ((sc->mlx_pause.mp_which != 0) &&
997 (sc->mlx_pause.mp_when > 0) &&
998 (time_second >= sc->mlx_pause.mp_when)){
1000 mlx_pause_action(sc); /* pause is running */
1001 sc->mlx_pause.mp_when = 0;
1005 * Bus pause still running?
1007 } else if ((sc->mlx_pause.mp_which != 0) &&
1008 (sc->mlx_pause.mp_when == 0)) {
1010 /* time to stop bus pause? */
1011 if (time_second >= sc->mlx_pause.mp_howlong) {
1012 mlx_pause_action(sc);
1013 sc->mlx_pause.mp_which = 0; /* pause is complete */
1016 sysbeep((time_second % 5) * 100 + 500, hz/8);
1020 * Run normal periodic activities?
1022 } else if (time_second > (sc->mlx_lastpoll + 10)) {
1023 sc->mlx_lastpoll = time_second;
1026 * Check controller status.
1028 * XXX Note that this may not actually launch a command in situations of high load.
1030 mlx_enquire(sc, (sc->mlx_iftype == MLX_IFTYPE_2) ? MLX_CMD_ENQUIRY_OLD : MLX_CMD_ENQUIRY,
1031 imax(sizeof(struct mlx_enquiry), sizeof(struct mlx_enquiry_old)), mlx_periodic_enquiry);
1034 * Check system drive status.
1036 * XXX This might be better left to event-driven detection, eg. I/O to an offline
1037 * drive will detect it's offline, rebuilds etc. should detect the drive is back
1040 mlx_enquire(sc, MLX_CMD_ENQSYSDRIVE, sizeof(struct mlx_enq_sys_drive) * MLX_MAXDRIVES,
1041 mlx_periodic_enquiry);
1045 /* get drive rebuild/check status */
1046 /* XXX should check sc->mlx_background if this is only valid while in progress */
1047 mlx_enquire(sc, MLX_CMD_REBUILDSTAT, sizeof(struct mlx_rebuild_stat), mlx_periodic_rebuild);
1049 /* deal with possibly-missed interrupts and timed-out commands */
1052 /* reschedule another poll next second or so */
1053 sc->mlx_timeout = timeout(mlx_periodic, sc, hz);
1056 /********************************************************************************
1057 * Handle the result of an ENQUIRY command instigated by periodic status polling.
1060 mlx_periodic_enquiry(struct mlx_command *mc)
1062 struct mlx_softc *sc = mc->mc_sc;
1066 /* Command completed OK? */
1067 if (mc->mc_status != 0) {
1068 device_printf(sc->mlx_dev, "periodic enquiry failed - %s\n", mlx_diagnose_command(mc));
1072 /* respond to command */
1073 switch(mc->mc_mailbox[0]) {
1075 * This is currently a bit fruitless, as we don't know how to extract the eventlog
1078 case MLX_CMD_ENQUIRY_OLD:
1080 struct mlx_enquiry *me = (struct mlx_enquiry *)mc->mc_data;
1081 struct mlx_enquiry_old *meo = (struct mlx_enquiry_old *)mc->mc_data;
1084 /* convert data in-place to new format */
1085 for (i = (sizeof(me->me_dead) / sizeof(me->me_dead[0])) - 1; i >= 0; i--) {
1086 me->me_dead[i].dd_chan = meo->me_dead[i].dd_chan;
1087 me->me_dead[i].dd_targ = meo->me_dead[i].dd_targ;
1089 me->me_misc_flags = 0;
1090 me->me_rebuild_count = meo->me_rebuild_count;
1091 me->me_dead_count = meo->me_dead_count;
1092 me->me_critical_sd_count = meo->me_critical_sd_count;
1093 me->me_event_log_seq_num = 0;
1094 me->me_offline_sd_count = meo->me_offline_sd_count;
1095 me->me_max_commands = meo->me_max_commands;
1096 me->me_rebuild_flag = meo->me_rebuild_flag;
1097 me->me_fwmajor = meo->me_fwmajor;
1098 me->me_fwminor = meo->me_fwminor;
1099 me->me_status_flags = meo->me_status_flags;
1100 me->me_flash_age = meo->me_flash_age;
1101 for (i = (sizeof(me->me_drvsize) / sizeof(me->me_drvsize[0])) - 1; i >= 0; i--) {
1102 if (i > ((sizeof(meo->me_drvsize) / sizeof(meo->me_drvsize[0])) - 1)) {
1103 me->me_drvsize[i] = 0; /* drive beyond supported range */
1105 me->me_drvsize[i] = meo->me_drvsize[i];
1108 me->me_num_sys_drvs = meo->me_num_sys_drvs;
1113 * Generic controller status update. We could do more with this than just
1114 * checking the event log.
1116 case MLX_CMD_ENQUIRY:
1118 struct mlx_enquiry *me = (struct mlx_enquiry *)mc->mc_data;
1120 if (sc->mlx_currevent == -1) {
1121 /* initialise our view of the event log */
1122 sc->mlx_currevent = sc->mlx_lastevent = me->me_event_log_seq_num;
1123 } else if ((me->me_event_log_seq_num != sc->mlx_lastevent) && !(sc->mlx_flags & MLX_EVENTLOG_BUSY)) {
1124 /* record where current events are up to */
1125 sc->mlx_currevent = me->me_event_log_seq_num;
1126 debug(1, "event log pointer was %d, now %d\n", sc->mlx_lastevent, sc->mlx_currevent);
1128 /* mark the event log as busy */
1129 atomic_set_int(&sc->mlx_flags, MLX_EVENTLOG_BUSY);
1131 /* drain new eventlog entries */
1132 mlx_periodic_eventlog_poll(sc);
1136 case MLX_CMD_ENQSYSDRIVE:
1138 struct mlx_enq_sys_drive *mes = (struct mlx_enq_sys_drive *)mc->mc_data;
1139 struct mlx_sysdrive *dr;
1142 for (i = 0, dr = &sc->mlx_sysdrive[0];
1143 (i < MLX_MAXDRIVES) && (mes[i].sd_size != 0xffffffff);
1146 /* has state been changed by controller? */
1147 if (dr->ms_state != mes[i].sd_state) {
1148 switch(mes[i].sd_state) {
1149 case MLX_SYSD_OFFLINE:
1150 device_printf(dr->ms_disk, "drive offline\n");
1152 case MLX_SYSD_ONLINE:
1153 device_printf(dr->ms_disk, "drive online\n");
1155 case MLX_SYSD_CRITICAL:
1156 device_printf(dr->ms_disk, "drive critical\n");
1159 /* save new state */
1160 dr->ms_state = mes[i].sd_state;
1166 device_printf(sc->mlx_dev, "%s: unknown command 0x%x", __func__, mc->mc_mailbox[0]);
1171 free(mc->mc_data, M_DEVBUF);
1176 mlx_eventlog_cb(void *arg, bus_dma_segment_t *segs, int nsegments, int error)
1178 struct mlx_command *mc;
1180 mc = (struct mlx_command *)arg;
1181 mlx_setup_dmamap(mc, segs, nsegments, error);
1183 /* build the command to get one entry */
1184 mlx_make_type3(mc, MLX_CMD_LOGOP, MLX_LOGOP_GET, 1,
1185 mc->mc_sc->mlx_lastevent, 0, 0, mc->mc_dataphys, 0);
1186 mc->mc_complete = mlx_periodic_eventlog_respond;
1187 mc->mc_private = mc;
1189 /* start the command */
1190 if (mlx_start(mc) != 0) {
1192 free(mc->mc_data, M_DEVBUF);
1198 /********************************************************************************
1199 * Instigate a poll for one event log message on (sc).
1200 * We only poll for one message at a time, to keep our command usage down.
1203 mlx_periodic_eventlog_poll(struct mlx_softc *sc)
1205 struct mlx_command *mc;
1206 void *result = NULL;
1211 /* get ourselves a command buffer */
1213 if ((mc = mlx_alloccmd(sc)) == NULL)
1216 /* allocate the response structure */
1217 if ((result = malloc(/*sizeof(struct mlx_eventlog_entry)*/1024, M_DEVBUF,
1221 /* get a command slot */
1222 if (mlx_getslot(mc))
1225 /* map the command so the controller can see it */
1226 mc->mc_data = result;
1227 mc->mc_length = /*sizeof(struct mlx_eventlog_entry)*/1024;
1228 error = bus_dmamap_load(sc->mlx_buffer_dmat, mc->mc_dmamap, mc->mc_data,
1229 mc->mc_length, mlx_eventlog_cb, mc, BUS_DMA_NOWAIT);
1235 if ((result != NULL) && (mc->mc_data != NULL))
1236 free(result, M_DEVBUF);
1240 /********************************************************************************
1241 * Handle the result of polling for a log message, generate diagnostic output.
1242 * If this wasn't the last message waiting for us, we'll go collect another.
1244 static char *mlx_sense_messages[] = {
1245 "because write recovery failed",
1246 "because of SCSI bus reset failure",
1247 "because of double check condition",
1248 "because it was removed",
1249 "because of gross error on SCSI chip",
1250 "because of bad tag returned from drive",
1251 "because of timeout on SCSI command",
1252 "because of reset SCSI command issued from system",
1253 "because busy or parity error count exceeded limit",
1254 "because of 'kill drive' command from system",
1255 "because of selection timeout",
1256 "due to SCSI phase sequence error",
1257 "due to unknown status"
1261 mlx_periodic_eventlog_respond(struct mlx_command *mc)
1263 struct mlx_softc *sc = mc->mc_sc;
1264 struct mlx_eventlog_entry *el = (struct mlx_eventlog_entry *)mc->mc_data;
1269 sc->mlx_lastevent++; /* next message... */
1270 if (mc->mc_status == 0) {
1272 /* handle event log message */
1273 switch(el->el_type) {
1275 * This is the only sort of message we understand at the moment.
1276 * The tests here are probably incomplete.
1278 case MLX_LOGMSG_SENSE: /* sense data */
1279 /* Mylex vendor-specific message indicating a drive was killed? */
1280 if ((el->el_sensekey == 9) &&
1281 (el->el_asc == 0x80)) {
1282 if (el->el_asq < (sizeof(mlx_sense_messages) / sizeof(mlx_sense_messages[0]))) {
1283 reason = mlx_sense_messages[el->el_asq];
1285 reason = "for unknown reason";
1287 device_printf(sc->mlx_dev, "physical drive %d:%d killed %s\n",
1288 el->el_channel, el->el_target, reason);
1290 /* SCSI drive was reset? */
1291 if ((el->el_sensekey == 6) && (el->el_asc == 0x29)) {
1292 device_printf(sc->mlx_dev, "physical drive %d:%d reset\n",
1293 el->el_channel, el->el_target);
1295 /* SCSI drive error? */
1296 if (!((el->el_sensekey == 0) ||
1297 ((el->el_sensekey == 2) &&
1298 (el->el_asc == 0x04) &&
1299 ((el->el_asq == 0x01) ||
1300 (el->el_asq == 0x02))))) {
1301 device_printf(sc->mlx_dev, "physical drive %d:%d error log: sense = %d asc = %x asq = %x\n",
1302 el->el_channel, el->el_target, el->el_sensekey, el->el_asc, el->el_asq);
1303 device_printf(sc->mlx_dev, " info %4D csi %4D\n", el->el_information, ":", el->el_csi, ":");
1308 device_printf(sc->mlx_dev, "unknown log message type 0x%x\n", el->el_type);
1312 device_printf(sc->mlx_dev, "error reading message log - %s\n", mlx_diagnose_command(mc));
1313 /* give up on all the outstanding messages, as we may have come unsynched */
1314 sc->mlx_lastevent = sc->mlx_currevent;
1317 /* dispose of command and data */
1318 free(mc->mc_data, M_DEVBUF);
1321 /* is there another message to obtain? */
1322 if (sc->mlx_lastevent != sc->mlx_currevent) {
1323 mlx_periodic_eventlog_poll(sc);
1325 /* clear log-busy status */
1326 atomic_clear_int(&sc->mlx_flags, MLX_EVENTLOG_BUSY);
1330 /********************************************************************************
1331 * Handle check/rebuild operations in progress.
1334 mlx_periodic_rebuild(struct mlx_command *mc)
1336 struct mlx_softc *sc = mc->mc_sc;
1337 struct mlx_rebuild_status *mr = (struct mlx_rebuild_status *)mc->mc_data;
1339 switch(mc->mc_status) {
1340 case 0: /* operation running, update stats */
1341 sc->mlx_rebuildstat = *mr;
1343 /* spontaneous rebuild/check? */
1344 if (sc->mlx_background == 0) {
1345 sc->mlx_background = MLX_BACKGROUND_SPONTANEOUS;
1346 device_printf(sc->mlx_dev, "background check/rebuild operation started\n");
1350 case 0x0105: /* nothing running, finalise stats and report */
1351 switch(sc->mlx_background) {
1352 case MLX_BACKGROUND_CHECK:
1353 device_printf(sc->mlx_dev, "consistency check completed\n"); /* XXX print drive? */
1355 case MLX_BACKGROUND_REBUILD:
1356 device_printf(sc->mlx_dev, "drive rebuild completed\n"); /* XXX print channel/target? */
1358 case MLX_BACKGROUND_SPONTANEOUS:
1360 /* if we have previously been non-idle, report the transition */
1361 if (sc->mlx_rebuildstat.rs_code != MLX_REBUILDSTAT_IDLE) {
1362 device_printf(sc->mlx_dev, "background check/rebuild operation completed\n");
1365 sc->mlx_background = 0;
1366 sc->mlx_rebuildstat.rs_code = MLX_REBUILDSTAT_IDLE;
1369 free(mc->mc_data, M_DEVBUF);
1373 /********************************************************************************
1374 ********************************************************************************
1376 ********************************************************************************
1377 ********************************************************************************/
1379 /********************************************************************************
1380 * It's time to perform a channel pause action for (sc), either start or stop
1384 mlx_pause_action(struct mlx_softc *sc)
1386 struct mlx_command *mc;
1387 int failsafe, i, command;
1389 /* What are we doing here? */
1390 if (sc->mlx_pause.mp_when == 0) {
1391 command = MLX_CMD_STARTCHANNEL;
1395 command = MLX_CMD_STOPCHANNEL;
1398 * Channels will always start again after the failsafe period,
1399 * which is specified in multiples of 30 seconds.
1400 * This constrains us to a maximum pause of 450 seconds.
1402 failsafe = ((sc->mlx_pause.mp_howlong - time_second) + 5) / 30;
1403 if (failsafe > 0xf) {
1405 sc->mlx_pause.mp_howlong = time_second + (0xf * 30) - 5;
1409 /* build commands for every channel requested */
1410 for (i = 0; i < sc->mlx_enq2->me_actual_channels; i++) {
1411 if ((1 << i) & sc->mlx_pause.mp_which) {
1413 /* get ourselves a command buffer */
1414 if ((mc = mlx_alloccmd(sc)) == NULL)
1416 /* get a command slot */
1417 mc->mc_flags |= MLX_CMD_PRIORITY;
1418 if (mlx_getslot(mc))
1421 /* build the command */
1422 mlx_make_type2(mc, command, (failsafe << 4) | i, 0, 0, 0, 0, 0, 0, 0);
1423 mc->mc_complete = mlx_pause_done;
1424 mc->mc_private = sc; /* XXX not needed */
1427 /* command submitted OK */
1431 device_printf(sc->mlx_dev, "%s failed for channel %d\n",
1432 command == MLX_CMD_STOPCHANNEL ? "pause" : "resume", i);
1440 mlx_pause_done(struct mlx_command *mc)
1442 struct mlx_softc *sc = mc->mc_sc;
1443 int command = mc->mc_mailbox[0];
1444 int channel = mc->mc_mailbox[2] & 0xf;
1446 if (mc->mc_status != 0) {
1447 device_printf(sc->mlx_dev, "%s command failed - %s\n",
1448 command == MLX_CMD_STOPCHANNEL ? "pause" : "resume", mlx_diagnose_command(mc));
1449 } else if (command == MLX_CMD_STOPCHANNEL) {
1450 device_printf(sc->mlx_dev, "channel %d pausing for %ld seconds\n",
1451 channel, (long)(sc->mlx_pause.mp_howlong - time_second));
1453 device_printf(sc->mlx_dev, "channel %d resuming\n", channel);
1458 /********************************************************************************
1459 ********************************************************************************
1461 ********************************************************************************
1462 ********************************************************************************/
1465 mlx_enquire_cb(void *arg, bus_dma_segment_t *segs, int nsegments, int error)
1467 struct mlx_softc *sc;
1468 struct mlx_command *mc;
1470 mc = (struct mlx_command *)arg;
1474 mlx_setup_dmamap(mc, segs, nsegments, error);
1476 /* build an enquiry command */
1478 mlx_make_type2(mc, mc->mc_command, 0, 0, 0, 0, 0, 0, mc->mc_dataphys, 0);
1480 /* do we want a completion callback? */
1481 if (mc->mc_complete != NULL) {
1482 if ((error = mlx_start(mc)) != 0)
1485 /* run the command in either polled or wait mode */
1486 if ((sc->mlx_state & MLX_STATE_INTEN) ? mlx_wait_command(mc) :
1487 mlx_poll_command(mc))
1490 /* command completed OK? */
1491 if (mc->mc_status != 0) {
1492 device_printf(sc->mlx_dev, "ENQUIRY failed - %s\n",
1493 mlx_diagnose_command(mc));
1499 /********************************************************************************
1500 * Perform an Enquiry command using a type-3 command buffer and a return a single
1501 * linear result buffer. If the completion function is specified, it will
1502 * be called with the completed command (and the result response will not be
1503 * valid until that point). Otherwise, the command will either be busy-waited
1504 * for (interrupts not enabled), or slept for.
1507 mlx_enquire(struct mlx_softc *sc, int command, size_t bufsize, void (* complete)(struct mlx_command *mc))
1509 struct mlx_command *mc;
1515 /* get ourselves a command buffer */
1518 if ((mc = mlx_alloccmd(sc)) == NULL)
1520 /* allocate the response structure */
1521 if ((result = malloc(bufsize, M_DEVBUF, M_NOWAIT)) == NULL)
1523 /* get a command slot */
1524 mc->mc_flags |= MLX_CMD_PRIORITY | MLX_CMD_DATAOUT;
1525 if (mlx_getslot(mc))
1528 /* map the command so the controller can see it */
1529 mc->mc_data = result;
1530 mc->mc_length = bufsize;
1531 mc->mc_command = command;
1533 if (complete != NULL) {
1534 mc->mc_complete = complete;
1535 mc->mc_private = mc;
1538 error = bus_dmamap_load(sc->mlx_buffer_dmat, mc->mc_dmamap, mc->mc_data,
1539 mc->mc_length, mlx_enquire_cb, mc, BUS_DMA_NOWAIT);
1542 /* we got a command, but nobody else will free it */
1543 if ((mc != NULL) && (mc->mc_complete == NULL))
1545 /* we got an error, and we allocated a result */
1546 if ((error != 0) && (result != NULL)) {
1547 free(result, M_DEVBUF);
1554 /********************************************************************************
1555 * Perform a Flush command on the nominated controller.
1557 * May be called with interrupts enabled or disabled; will not return until
1558 * the flush operation completes or fails.
1561 mlx_flush(struct mlx_softc *sc)
1563 struct mlx_command *mc;
1568 /* get ourselves a command buffer */
1570 if ((mc = mlx_alloccmd(sc)) == NULL)
1572 /* get a command slot */
1573 if (mlx_getslot(mc))
1576 /* build a flush command */
1577 mlx_make_type2(mc, MLX_CMD_FLUSH, 0, 0, 0, 0, 0, 0, 0, 0);
1579 /* can't assume that interrupts are going to work here, so play it safe */
1580 if (mlx_poll_command(mc))
1583 /* command completed OK? */
1584 if (mc->mc_status != 0) {
1585 device_printf(sc->mlx_dev, "FLUSH failed - %s\n", mlx_diagnose_command(mc));
1589 error = 0; /* success */
1596 /********************************************************************************
1597 * Start a background consistency check on (drive).
1599 * May be called with interrupts enabled or disabled; will return as soon as the
1600 * operation has started or been refused.
1603 mlx_check(struct mlx_softc *sc, int drive)
1605 struct mlx_command *mc;
1610 /* get ourselves a command buffer */
1612 if ((mc = mlx_alloccmd(sc)) == NULL)
1614 /* get a command slot */
1615 if (mlx_getslot(mc))
1618 /* build a checkasync command, set the "fix it" flag */
1619 mlx_make_type2(mc, MLX_CMD_CHECKASYNC, 0, 0, 0, 0, 0, drive | 0x80, 0, 0);
1621 /* start the command and wait for it to be returned */
1622 if (mlx_wait_command(mc))
1625 /* command completed OK? */
1626 if (mc->mc_status != 0) {
1627 device_printf(sc->mlx_dev, "CHECK ASYNC failed - %s\n", mlx_diagnose_command(mc));
1629 device_printf(sc->mlx_sysdrive[drive].ms_disk, "consistency check started");
1631 error = mc->mc_status;
1639 /********************************************************************************
1640 * Start a background rebuild of the physical drive at (channel),(target).
1642 * May be called with interrupts enabled or disabled; will return as soon as the
1643 * operation has started or been refused.
1646 mlx_rebuild(struct mlx_softc *sc, int channel, int target)
1648 struct mlx_command *mc;
1653 /* get ourselves a command buffer */
1655 if ((mc = mlx_alloccmd(sc)) == NULL)
1657 /* get a command slot */
1658 if (mlx_getslot(mc))
1661 /* build a checkasync command, set the "fix it" flag */
1662 mlx_make_type2(mc, MLX_CMD_REBUILDASYNC, channel, target, 0, 0, 0, 0, 0, 0);
1664 /* start the command and wait for it to be returned */
1665 if (mlx_wait_command(mc))
1668 /* command completed OK? */
1669 if (mc->mc_status != 0) {
1670 device_printf(sc->mlx_dev, "REBUILD ASYNC failed - %s\n", mlx_diagnose_command(mc));
1672 device_printf(sc->mlx_dev, "drive rebuild started for %d:%d\n", channel, target);
1674 error = mc->mc_status;
1682 /********************************************************************************
1683 * Run the command (mc) and return when it completes.
1685 * Interrupts need to be enabled; returns nonzero on error.
1688 mlx_wait_command(struct mlx_command *mc)
1690 struct mlx_softc *sc = mc->mc_sc;
1695 mc->mc_complete = NULL;
1696 mc->mc_private = mc; /* wake us when you're done */
1697 if ((error = mlx_start(mc)) != 0)
1701 /* XXX better timeout? */
1702 while ((mc->mc_status == MLX_STATUS_BUSY) && (count < 30)) {
1703 tsleep(mc->mc_private, PRIBIO | PCATCH, "mlxwcmd", hz);
1706 if (mc->mc_status != 0) {
1707 device_printf(sc->mlx_dev, "command failed - %s\n", mlx_diagnose_command(mc));
1714 /********************************************************************************
1715 * Start the command (mc) and busy-wait for it to complete.
1717 * Should only be used when interrupts can't be relied upon. Returns 0 on
1718 * success, nonzero on error.
1719 * Successfully completed commands are dequeued.
1722 mlx_poll_command(struct mlx_command *mc)
1724 struct mlx_softc *sc = mc->mc_sc;
1725 int error, count, s;
1729 mc->mc_complete = NULL;
1730 mc->mc_private = NULL; /* we will poll for it */
1731 if ((error = mlx_start(mc)) != 0)
1736 /* poll for completion */
1737 mlx_done(mc->mc_sc);
1739 } while ((mc->mc_status == MLX_STATUS_BUSY) && (count++ < 15000000));
1740 if (mc->mc_status != MLX_STATUS_BUSY) {
1742 TAILQ_REMOVE(&sc->mlx_work, mc, mc_link);
1746 device_printf(sc->mlx_dev, "command failed - %s\n", mlx_diagnose_command(mc));
1751 mlx_startio_cb(void *arg, bus_dma_segment_t *segs, int nsegments, int error)
1753 struct mlx_command *mc;
1754 struct mlxd_softc *mlxd;
1755 struct mlx_softc *sc;
1761 mc = (struct mlx_command *)arg;
1762 mlx_setup_dmamap(mc, segs, nsegments, error);
1765 bp = mc->mc_private;
1767 if (MLX_BIO_IS_READ(bp)) {
1768 mc->mc_flags |= MLX_CMD_DATAIN;
1769 cmd = MLX_CMD_READSG;
1771 mc->mc_flags |= MLX_CMD_DATAOUT;
1772 cmd = MLX_CMD_WRITESG;
1775 /* build a suitable I/O command (assumes 512-byte rounded transfers) */
1776 mlxd = (struct mlxd_softc *)MLX_BIO_SOFTC(bp);
1777 driveno = mlxd->mlxd_drive - sc->mlx_sysdrive;
1778 blkcount = (MLX_BIO_LENGTH(bp) + MLX_BLKSIZE - 1) / MLX_BLKSIZE;
1780 if ((MLX_BIO_LBA(bp) + blkcount) > sc->mlx_sysdrive[driveno].ms_size)
1781 device_printf(sc->mlx_dev,
1782 "I/O beyond end of unit (%lld,%d > %lu)\n",
1783 (long long)MLX_BIO_LBA(bp), blkcount,
1784 (u_long)sc->mlx_sysdrive[driveno].ms_size);
1787 * Build the I/O command. Note that the SG list type bits are set to zero,
1788 * denoting the format of SG list that we are using.
1790 if (sc->mlx_iftype == MLX_IFTYPE_2) {
1791 mlx_make_type1(mc, (cmd == MLX_CMD_WRITESG) ? MLX_CMD_WRITESG_OLD :
1793 blkcount & 0xff, /* xfer length low byte */
1794 MLX_BIO_LBA(bp), /* physical block number */
1795 driveno, /* target drive number */
1796 mc->mc_sgphys, /* location of SG list */
1797 mc->mc_nsgent & 0x3f); /* size of SG list */
1799 mlx_make_type5(mc, cmd,
1800 blkcount & 0xff, /* xfer length low byte */
1801 (driveno << 3) | ((blkcount >> 8) & 0x07),
1802 /* target+length high 3 bits */
1803 MLX_BIO_LBA(bp), /* physical block number */
1804 mc->mc_sgphys, /* location of SG list */
1805 mc->mc_nsgent & 0x3f); /* size of SG list */
1808 /* try to give command to controller */
1809 if (mlx_start(mc) != 0) {
1810 /* fail the command */
1811 mc->mc_status = MLX_STATUS_WEDGED;
1816 /********************************************************************************
1817 * Pull as much work off the softc's work queue as possible and give it to the
1818 * controller. Leave a couple of slots free for emergencies.
1820 * Must be called at splbio or in an equivalent fashion that prevents
1821 * reentry or activity on the bioq.
1824 mlx_startio(struct mlx_softc *sc)
1826 struct mlx_command *mc;
1831 /* avoid reentrancy */
1832 if (mlx_lock_tas(sc, MLX_LOCK_STARTING))
1835 /* spin until something prevents us from doing any work */
1839 /* see if there's work to be done */
1840 if ((bp = MLX_BIO_QFIRST(sc->mlx_bioq)) == NULL)
1843 if ((mc = mlx_alloccmd(sc)) == NULL)
1845 /* get a slot for the command */
1846 if (mlx_getslot(mc) != 0) {
1850 /* get the buf containing our work */
1851 MLX_BIO_QREMOVE(sc->mlx_bioq, bp);
1855 /* connect the buf to the command */
1856 mc->mc_complete = mlx_completeio;
1857 mc->mc_private = bp;
1858 mc->mc_data = MLX_BIO_DATA(bp);
1859 mc->mc_length = MLX_BIO_LENGTH(bp);
1861 /* map the command so the controller can work with it */
1862 error = bus_dmamap_load(sc->mlx_buffer_dmat, mc->mc_dmamap, mc->mc_data,
1863 mc->mc_length, mlx_startio_cb, mc, 0);
1864 if (error == EINPROGRESS) {
1871 mlx_lock_clr(sc, MLX_LOCK_STARTING);
1874 /********************************************************************************
1875 * Handle completion of an I/O command.
1878 mlx_completeio(struct mlx_command *mc)
1880 struct mlx_softc *sc = mc->mc_sc;
1881 mlx_bio *bp = (mlx_bio *)mc->mc_private;
1882 struct mlxd_softc *mlxd = (struct mlxd_softc *)MLX_BIO_SOFTC(bp);
1884 if (mc->mc_status != MLX_STATUS_OK) { /* could be more verbose here? */
1885 MLX_BIO_SET_ERROR(bp, EIO);
1887 switch(mc->mc_status) {
1888 case MLX_STATUS_RDWROFFLINE: /* system drive has gone offline */
1889 device_printf(mlxd->mlxd_dev, "drive offline\n");
1890 /* should signal this with a return code */
1891 mlxd->mlxd_drive->ms_state = MLX_SYSD_OFFLINE;
1894 default: /* other I/O error */
1895 device_printf(sc->mlx_dev, "I/O error - %s\n", mlx_diagnose_command(mc));
1897 device_printf(sc->mlx_dev, " b_bcount %ld blkcount %ld b_pblkno %d\n",
1898 MLX_BIO_LENGTH(bp), MLX_BIO_LENGTH(bp) / MLX_BLKSIZE, MLX_BIO_LBA(bp));
1899 device_printf(sc->mlx_dev, " %13D\n", mc->mc_mailbox, " ");
1909 mlx_user_cb(void *arg, bus_dma_segment_t *segs, int nsegments, int error)
1911 struct mlx_usercommand *mu;
1912 struct mlx_command *mc;
1913 struct mlx_dcdb *dcdb;
1915 mc = (struct mlx_command *)arg;
1919 mlx_setup_dmamap(mc, segs, nsegments, error);
1921 mu = (struct mlx_usercommand *)mc->mc_private;
1925 * If this is a passthrough SCSI command, the DCDB is packed at the
1926 * beginning of the data area. Fix up the DCDB to point to the correct
1927 * physical address and override any bufptr supplied by the caller since
1928 * we know what it's meant to be.
1930 if (mc->mc_mailbox[0] == MLX_CMD_DIRECT_CDB) {
1931 dcdb = (struct mlx_dcdb *)mc->mc_data;
1932 dcdb->dcdb_physaddr = mc->mc_dataphys + sizeof(*dcdb);
1937 * If there's a data buffer, fix up the command's buffer pointer.
1939 if (mu->mu_datasize > 0) {
1940 mc->mc_mailbox[mu->mu_bufptr ] = mc->mc_dataphys & 0xff;
1941 mc->mc_mailbox[mu->mu_bufptr + 1] = (mc->mc_dataphys >> 8) & 0xff;
1942 mc->mc_mailbox[mu->mu_bufptr + 2] = (mc->mc_dataphys >> 16) & 0xff;
1943 mc->mc_mailbox[mu->mu_bufptr + 3] = (mc->mc_dataphys >> 24) & 0xff;
1945 debug(0, "command fixup");
1947 /* submit the command and wait */
1948 if (mlx_wait_command(mc) != 0)
1953 /********************************************************************************
1954 * Take a command from user-space and try to run it.
1956 * XXX Note that this can't perform very much in the way of error checking, and
1957 * as such, applications _must_ be considered trustworthy.
1958 * XXX Commands using S/G for data are not supported.
1961 mlx_user_command(struct mlx_softc *sc, struct mlx_usercommand *mu)
1963 struct mlx_command *mc;
1973 /* get ourselves a command and copy in from user space */
1974 if ((mc = mlx_alloccmd(sc)) == NULL)
1976 bcopy(mu->mu_command, mc->mc_mailbox, sizeof(mc->mc_mailbox));
1977 debug(0, "got command buffer");
1980 * if we need a buffer for data transfer, allocate one and copy in its
1983 if (mu->mu_datasize > 0) {
1984 if (mu->mu_datasize > MAXPHYS) {
1988 if (((kbuf = malloc(mu->mu_datasize, M_DEVBUF, M_WAITOK)) == NULL) ||
1989 (error = copyin(mu->mu_buf, kbuf, mu->mu_datasize)))
1991 debug(0, "got kernel buffer");
1994 /* get a command slot */
1995 if (mlx_getslot(mc))
1997 debug(0, "got a slot");
1999 if (mu->mu_datasize > 0) {
2001 /* range check the pointer to physical buffer address */
2002 if ((mu->mu_bufptr < 0) || (mu->mu_bufptr > (sizeof(mu->mu_command) -
2003 sizeof(u_int32_t)))) {
2009 /* map the command so the controller can see it */
2011 mc->mc_length = mu->mu_datasize;
2012 mc->mc_private = mu;
2013 error = bus_dmamap_load(sc->mlx_buffer_dmat, mc->mc_dmamap, mc->mc_data,
2014 mc->mc_length, mlx_user_cb, mc, BUS_DMA_NOWAIT);
2016 /* copy out status and data */
2017 mu->mu_status = mc->mc_status;
2018 if ((mu->mu_datasize > 0) &&
2019 ((error = copyout(kbuf, mu->mu_buf, mu->mu_datasize))))
2027 free(kbuf, M_DEVBUF);
2031 /********************************************************************************
2032 ********************************************************************************
2033 Command I/O to Controller
2034 ********************************************************************************
2035 ********************************************************************************/
2037 /********************************************************************************
2038 * Find a free command slot for (mc).
2040 * Don't hand out a slot to a normal-priority command unless there are at least
2041 * 4 slots free for priority commands.
2044 mlx_getslot(struct mlx_command *mc)
2046 struct mlx_softc *sc = mc->mc_sc;
2052 * Enforce slot-usage limit, if we have the required information.
2054 if (sc->mlx_enq2 != NULL) {
2055 limit = sc->mlx_enq2->me_max_commands;
2059 if (sc->mlx_busycmds >= ((mc->mc_flags & MLX_CMD_PRIORITY) ? limit : limit - 4))
2063 * Allocate an outstanding command slot
2065 * XXX linear search is slow
2068 for (slot = 0; slot < limit; slot++) {
2069 debug(2, "try slot %d", slot);
2070 if (sc->mlx_busycmd[slot] == NULL)
2074 sc->mlx_busycmd[slot] = mc;
2083 debug(2, "got slot %d", slot);
2088 /********************************************************************************
2089 * Map/unmap (mc)'s data in the controller's addressable space.
2092 mlx_setup_dmamap(struct mlx_command *mc, bus_dma_segment_t *segs, int nsegments,
2095 struct mlx_softc *sc = mc->mc_sc;
2096 struct mlx_sgentry *sg;
2101 /* XXX should be unnecessary */
2102 if (sc->mlx_enq2 && (nsegments > sc->mlx_enq2->me_max_sg))
2103 panic("MLX: too many s/g segments (%d, max %d)", nsegments,
2104 sc->mlx_enq2->me_max_sg);
2106 /* get base address of s/g table */
2107 sg = sc->mlx_sgtable + (mc->mc_slot * MLX_NSEG);
2109 /* save s/g table information in command */
2110 mc->mc_nsgent = nsegments;
2111 mc->mc_sgphys = sc->mlx_sgbusaddr +
2112 (mc->mc_slot * MLX_NSEG * sizeof(struct mlx_sgentry));
2113 mc->mc_dataphys = segs[0].ds_addr;
2115 /* populate s/g table */
2116 for (i = 0; i < nsegments; i++, sg++) {
2117 sg->sg_addr = segs[i].ds_addr;
2118 sg->sg_count = segs[i].ds_len;
2121 /* Make sure the buffers are visible on the bus. */
2122 if (mc->mc_flags & MLX_CMD_DATAIN)
2123 bus_dmamap_sync(sc->mlx_buffer_dmat, mc->mc_dmamap,
2124 BUS_DMASYNC_PREREAD);
2125 if (mc->mc_flags & MLX_CMD_DATAOUT)
2126 bus_dmamap_sync(sc->mlx_buffer_dmat, mc->mc_dmamap,
2127 BUS_DMASYNC_PREWRITE);
2131 mlx_unmapcmd(struct mlx_command *mc)
2133 struct mlx_softc *sc = mc->mc_sc;
2137 /* if the command involved data at all */
2138 if (mc->mc_data != NULL) {
2140 if (mc->mc_flags & MLX_CMD_DATAIN)
2141 bus_dmamap_sync(sc->mlx_buffer_dmat, mc->mc_dmamap, BUS_DMASYNC_POSTREAD);
2142 if (mc->mc_flags & MLX_CMD_DATAOUT)
2143 bus_dmamap_sync(sc->mlx_buffer_dmat, mc->mc_dmamap, BUS_DMASYNC_POSTWRITE);
2145 bus_dmamap_unload(sc->mlx_buffer_dmat, mc->mc_dmamap);
2149 /********************************************************************************
2150 * Try to deliver (mc) to the controller.
2152 * Can be called at any interrupt level, with or without interrupts enabled.
2155 mlx_start(struct mlx_command *mc)
2157 struct mlx_softc *sc = mc->mc_sc;
2162 /* save the slot number as ident so we can handle this command when complete */
2163 mc->mc_mailbox[0x1] = mc->mc_slot;
2165 /* mark the command as currently being processed */
2166 mc->mc_status = MLX_STATUS_BUSY;
2168 /* set a default 60-second timeout XXX tunable? XXX not currently used */
2169 mc->mc_timeout = time_second + 60;
2171 /* spin waiting for the mailbox */
2172 for (i = 100000, done = 0; (i > 0) && !done; i--) {
2174 if (sc->mlx_tryqueue(sc, mc)) {
2176 /* move command to work queue */
2177 TAILQ_INSERT_TAIL(&sc->mlx_work, mc, mc_link);
2179 splx(s); /* drop spl to allow completion interrupts */
2182 /* command is enqueued */
2187 * We couldn't get the controller to take the command. Revoke the slot
2188 * that the command was given and return it with a bad status.
2190 sc->mlx_busycmd[mc->mc_slot] = NULL;
2191 device_printf(sc->mlx_dev, "controller wedged (not taking commands)\n");
2192 mc->mc_status = MLX_STATUS_WEDGED;
2197 /********************************************************************************
2198 * Poll the controller (sc) for completed commands.
2199 * Update command status and free slots for reuse. If any slots were freed,
2200 * new commands may be posted.
2202 * Returns nonzero if one or more commands were completed.
2205 mlx_done(struct mlx_softc *sc)
2207 struct mlx_command *mc;
2216 /* loop collecting completed commands */
2219 /* poll for a completed command's identifier and status */
2220 if (sc->mlx_findcomplete(sc, &slot, &status)) {
2222 mc = sc->mlx_busycmd[slot]; /* find command */
2223 if (mc != NULL) { /* paranoia */
2224 if (mc->mc_status == MLX_STATUS_BUSY) {
2225 mc->mc_status = status; /* save status */
2227 /* free slot for reuse */
2228 sc->mlx_busycmd[slot] = NULL;
2231 device_printf(sc->mlx_dev, "duplicate done event for slot %d\n", slot);
2234 device_printf(sc->mlx_dev, "done event for nonbusy slot %d\n", slot);
2242 /* if we've completed any commands, try posting some more */
2246 /* handle completion and timeouts */
2252 /********************************************************************************
2253 * Perform post-completion processing for commands on (sc).
2256 mlx_complete(struct mlx_softc *sc)
2258 struct mlx_command *mc, *nc;
2263 /* avoid reentrancy XXX might want to signal and request a restart */
2264 if (mlx_lock_tas(sc, MLX_LOCK_COMPLETING))
2270 /* scan the list of busy/done commands */
2271 mc = TAILQ_FIRST(&sc->mlx_work);
2272 while (mc != NULL) {
2273 nc = TAILQ_NEXT(mc, mc_link);
2275 /* Command has been completed in some fashion */
2276 if (mc->mc_status != MLX_STATUS_BUSY) {
2278 /* unmap the command's data buffer */
2281 * Does the command have a completion handler?
2283 if (mc->mc_complete != NULL) {
2284 /* remove from list and give to handler */
2285 TAILQ_REMOVE(&sc->mlx_work, mc, mc_link);
2286 mc->mc_complete(mc);
2289 * Is there a sleeper waiting on this command?
2291 } else if (mc->mc_private != NULL) { /* sleeping caller wants to know about it */
2293 /* remove from list and wake up sleeper */
2294 TAILQ_REMOVE(&sc->mlx_work, mc, mc_link);
2295 wakeup_one(mc->mc_private);
2298 * Leave the command for a caller that's polling for it.
2307 mlx_lock_clr(sc, MLX_LOCK_COMPLETING);
2310 /********************************************************************************
2311 ********************************************************************************
2312 Command Buffer Management
2313 ********************************************************************************
2314 ********************************************************************************/
2316 /********************************************************************************
2317 * Get a new command buffer.
2319 * This may return NULL in low-memory cases.
2321 * Note that using malloc() is expensive (the command buffer is << 1 page) but
2322 * necessary if we are to be a loadable module before the zone allocator is fixed.
2324 * If possible, we recycle a command buffer that's been used before.
2326 * XXX Note that command buffers are not cleaned out - it is the caller's
2327 * responsibility to ensure that all required fields are filled in before
2330 static struct mlx_command *
2331 mlx_alloccmd(struct mlx_softc *sc)
2333 struct mlx_command *mc;
2340 if ((mc = TAILQ_FIRST(&sc->mlx_freecmds)) != NULL)
2341 TAILQ_REMOVE(&sc->mlx_freecmds, mc, mc_link);
2344 /* allocate a new command buffer? */
2346 mc = (struct mlx_command *)malloc(sizeof(*mc), M_DEVBUF, M_NOWAIT | M_ZERO);
2349 error = bus_dmamap_create(sc->mlx_buffer_dmat, 0, &mc->mc_dmamap);
2359 /********************************************************************************
2360 * Release a command buffer for recycling.
2362 * XXX It might be a good idea to limit the number of commands we save for reuse
2363 * if it's shown that this list bloats out massively.
2366 mlx_releasecmd(struct mlx_command *mc)
2373 TAILQ_INSERT_HEAD(&mc->mc_sc->mlx_freecmds, mc, mc_link);
2377 /********************************************************************************
2378 * Permanently discard a command buffer.
2381 mlx_freecmd(struct mlx_command *mc)
2383 struct mlx_softc *sc = mc->mc_sc;
2386 bus_dmamap_destroy(sc->mlx_buffer_dmat, mc->mc_dmamap);
2391 /********************************************************************************
2392 ********************************************************************************
2393 Type 3 interface accessor methods
2394 ********************************************************************************
2395 ********************************************************************************/
2397 /********************************************************************************
2398 * Try to give (mc) to the controller. Returns 1 if successful, 0 on failure
2399 * (the controller is not ready to take a command).
2401 * Must be called at splbio or in a fashion that prevents reentry.
2404 mlx_v3_tryqueue(struct mlx_softc *sc, struct mlx_command *mc)
2410 /* ready for our command? */
2411 if (!(MLX_V3_GET_IDBR(sc) & MLX_V3_IDB_FULL)) {
2412 /* copy mailbox data to window */
2413 for (i = 0; i < 13; i++)
2414 MLX_V3_PUT_MAILBOX(sc, i, mc->mc_mailbox[i]);
2417 MLX_V3_PUT_IDBR(sc, MLX_V3_IDB_FULL);
2423 /********************************************************************************
2424 * See if a command has been completed, if so acknowledge its completion
2425 * and recover the slot number and status code.
2427 * Must be called at splbio or in a fashion that prevents reentry.
2430 mlx_v3_findcomplete(struct mlx_softc *sc, u_int8_t *slot, u_int16_t *status)
2435 /* status available? */
2436 if (MLX_V3_GET_ODBR(sc) & MLX_V3_ODB_SAVAIL) {
2437 *slot = MLX_V3_GET_STATUS_IDENT(sc); /* get command identifier */
2438 *status = MLX_V3_GET_STATUS(sc); /* get status */
2440 /* acknowledge completion */
2441 MLX_V3_PUT_ODBR(sc, MLX_V3_ODB_SAVAIL);
2442 MLX_V3_PUT_IDBR(sc, MLX_V3_IDB_SACK);
2448 /********************************************************************************
2449 * Enable/disable interrupts as requested. (No acknowledge required)
2451 * Must be called at splbio or in a fashion that prevents reentry.
2454 mlx_v3_intaction(struct mlx_softc *sc, int action)
2459 case MLX_INTACTION_DISABLE:
2460 MLX_V3_PUT_IER(sc, 0);
2461 sc->mlx_state &= ~MLX_STATE_INTEN;
2463 case MLX_INTACTION_ENABLE:
2464 MLX_V3_PUT_IER(sc, 1);
2465 sc->mlx_state |= MLX_STATE_INTEN;
2470 /********************************************************************************
2471 * Poll for firmware error codes during controller initialisation.
2472 * Returns 0 if initialisation is complete, 1 if still in progress but no
2473 * error has been fetched, 2 if an error has been retrieved.
2476 mlx_v3_fw_handshake(struct mlx_softc *sc, int *error, int *param1, int *param2)
2479 static int initted = 0;
2483 /* first time around, clear any hardware completion status */
2485 MLX_V3_PUT_IDBR(sc, MLX_V3_IDB_SACK);
2490 /* init in progress? */
2491 if (!(MLX_V3_GET_IDBR(sc) & MLX_V3_IDB_INIT_BUSY))
2494 /* test error value */
2495 fwerror = MLX_V3_GET_FWERROR(sc);
2496 if (!(fwerror & MLX_V3_FWERROR_PEND))
2499 /* mask status pending bit, fetch status */
2500 *error = fwerror & ~MLX_V3_FWERROR_PEND;
2501 *param1 = MLX_V3_GET_FWERROR_PARAM1(sc);
2502 *param2 = MLX_V3_GET_FWERROR_PARAM2(sc);
2505 MLX_V3_PUT_FWERROR(sc, 0);
2510 /********************************************************************************
2511 ********************************************************************************
2512 Type 4 interface accessor methods
2513 ********************************************************************************
2514 ********************************************************************************/
2516 /********************************************************************************
2517 * Try to give (mc) to the controller. Returns 1 if successful, 0 on failure
2518 * (the controller is not ready to take a command).
2520 * Must be called at splbio or in a fashion that prevents reentry.
2523 mlx_v4_tryqueue(struct mlx_softc *sc, struct mlx_command *mc)
2529 /* ready for our command? */
2530 if (!(MLX_V4_GET_IDBR(sc) & MLX_V4_IDB_FULL)) {
2531 /* copy mailbox data to window */
2532 for (i = 0; i < 13; i++)
2533 MLX_V4_PUT_MAILBOX(sc, i, mc->mc_mailbox[i]);
2535 /* memory-mapped controller, so issue a write barrier to ensure the mailbox is filled */
2536 bus_space_barrier(sc->mlx_btag, sc->mlx_bhandle, MLX_V4_MAILBOX, MLX_V4_MAILBOX_LENGTH,
2537 BUS_SPACE_BARRIER_WRITE);
2540 MLX_V4_PUT_IDBR(sc, MLX_V4_IDB_HWMBOX_CMD);
2546 /********************************************************************************
2547 * See if a command has been completed, if so acknowledge its completion
2548 * and recover the slot number and status code.
2550 * Must be called at splbio or in a fashion that prevents reentry.
2553 mlx_v4_findcomplete(struct mlx_softc *sc, u_int8_t *slot, u_int16_t *status)
2558 /* status available? */
2559 if (MLX_V4_GET_ODBR(sc) & MLX_V4_ODB_HWSAVAIL) {
2560 *slot = MLX_V4_GET_STATUS_IDENT(sc); /* get command identifier */
2561 *status = MLX_V4_GET_STATUS(sc); /* get status */
2563 /* acknowledge completion */
2564 MLX_V4_PUT_ODBR(sc, MLX_V4_ODB_HWMBOX_ACK);
2565 MLX_V4_PUT_IDBR(sc, MLX_V4_IDB_SACK);
2571 /********************************************************************************
2572 * Enable/disable interrupts as requested.
2574 * Must be called at splbio or in a fashion that prevents reentry.
2577 mlx_v4_intaction(struct mlx_softc *sc, int action)
2582 case MLX_INTACTION_DISABLE:
2583 MLX_V4_PUT_IER(sc, MLX_V4_IER_MASK | MLX_V4_IER_DISINT);
2584 sc->mlx_state &= ~MLX_STATE_INTEN;
2586 case MLX_INTACTION_ENABLE:
2587 MLX_V4_PUT_IER(sc, MLX_V4_IER_MASK & ~MLX_V4_IER_DISINT);
2588 sc->mlx_state |= MLX_STATE_INTEN;
2593 /********************************************************************************
2594 * Poll for firmware error codes during controller initialisation.
2595 * Returns 0 if initialisation is complete, 1 if still in progress but no
2596 * error has been fetched, 2 if an error has been retrieved.
2599 mlx_v4_fw_handshake(struct mlx_softc *sc, int *error, int *param1, int *param2)
2602 static int initted = 0;
2606 /* first time around, clear any hardware completion status */
2608 MLX_V4_PUT_IDBR(sc, MLX_V4_IDB_SACK);
2613 /* init in progress? */
2614 if (!(MLX_V4_GET_IDBR(sc) & MLX_V4_IDB_INIT_BUSY))
2617 /* test error value */
2618 fwerror = MLX_V4_GET_FWERROR(sc);
2619 if (!(fwerror & MLX_V4_FWERROR_PEND))
2622 /* mask status pending bit, fetch status */
2623 *error = fwerror & ~MLX_V4_FWERROR_PEND;
2624 *param1 = MLX_V4_GET_FWERROR_PARAM1(sc);
2625 *param2 = MLX_V4_GET_FWERROR_PARAM2(sc);
2628 MLX_V4_PUT_FWERROR(sc, 0);
2633 /********************************************************************************
2634 ********************************************************************************
2635 Type 5 interface accessor methods
2636 ********************************************************************************
2637 ********************************************************************************/
2639 /********************************************************************************
2640 * Try to give (mc) to the controller. Returns 1 if successful, 0 on failure
2641 * (the controller is not ready to take a command).
2643 * Must be called at splbio or in a fashion that prevents reentry.
2646 mlx_v5_tryqueue(struct mlx_softc *sc, struct mlx_command *mc)
2652 /* ready for our command? */
2653 if (MLX_V5_GET_IDBR(sc) & MLX_V5_IDB_EMPTY) {
2654 /* copy mailbox data to window */
2655 for (i = 0; i < 13; i++)
2656 MLX_V5_PUT_MAILBOX(sc, i, mc->mc_mailbox[i]);
2659 MLX_V5_PUT_IDBR(sc, MLX_V5_IDB_HWMBOX_CMD);
2665 /********************************************************************************
2666 * See if a command has been completed, if so acknowledge its completion
2667 * and recover the slot number and status code.
2669 * Must be called at splbio or in a fashion that prevents reentry.
2672 mlx_v5_findcomplete(struct mlx_softc *sc, u_int8_t *slot, u_int16_t *status)
2677 /* status available? */
2678 if (MLX_V5_GET_ODBR(sc) & MLX_V5_ODB_HWSAVAIL) {
2679 *slot = MLX_V5_GET_STATUS_IDENT(sc); /* get command identifier */
2680 *status = MLX_V5_GET_STATUS(sc); /* get status */
2682 /* acknowledge completion */
2683 MLX_V5_PUT_ODBR(sc, MLX_V5_ODB_HWMBOX_ACK);
2684 MLX_V5_PUT_IDBR(sc, MLX_V5_IDB_SACK);
2690 /********************************************************************************
2691 * Enable/disable interrupts as requested.
2693 * Must be called at splbio or in a fashion that prevents reentry.
2696 mlx_v5_intaction(struct mlx_softc *sc, int action)
2701 case MLX_INTACTION_DISABLE:
2702 MLX_V5_PUT_IER(sc, 0xff & MLX_V5_IER_DISINT);
2703 sc->mlx_state &= ~MLX_STATE_INTEN;
2705 case MLX_INTACTION_ENABLE:
2706 MLX_V5_PUT_IER(sc, 0xff & ~MLX_V5_IER_DISINT);
2707 sc->mlx_state |= MLX_STATE_INTEN;
2712 /********************************************************************************
2713 * Poll for firmware error codes during controller initialisation.
2714 * Returns 0 if initialisation is complete, 1 if still in progress but no
2715 * error has been fetched, 2 if an error has been retrieved.
2718 mlx_v5_fw_handshake(struct mlx_softc *sc, int *error, int *param1, int *param2)
2721 static int initted = 0;
2725 /* first time around, clear any hardware completion status */
2727 MLX_V5_PUT_IDBR(sc, MLX_V5_IDB_SACK);
2732 /* init in progress? */
2733 if (MLX_V5_GET_IDBR(sc) & MLX_V5_IDB_INIT_DONE)
2736 /* test for error value */
2737 fwerror = MLX_V5_GET_FWERROR(sc);
2738 if (!(fwerror & MLX_V5_FWERROR_PEND))
2741 /* mask status pending bit, fetch status */
2742 *error = fwerror & ~MLX_V5_FWERROR_PEND;
2743 *param1 = MLX_V5_GET_FWERROR_PARAM1(sc);
2744 *param2 = MLX_V5_GET_FWERROR_PARAM2(sc);
2747 MLX_V5_PUT_FWERROR(sc, 0xff);
2752 /********************************************************************************
2753 ********************************************************************************
2755 ********************************************************************************
2756 ********************************************************************************/
2758 /********************************************************************************
2759 * Return a status message describing (mc)
2761 static char *mlx_status_messages[] = {
2762 "normal completion", /* 00 */
2763 "irrecoverable data error", /* 01 */
2764 "drive does not exist, or is offline", /* 02 */
2765 "attempt to write beyond end of drive", /* 03 */
2766 "bad data encountered", /* 04 */
2767 "invalid log entry request", /* 05 */
2768 "attempt to rebuild online drive", /* 06 */
2769 "new disk failed during rebuild", /* 07 */
2770 "invalid channel/target", /* 08 */
2771 "rebuild/check already in progress", /* 09 */
2772 "one or more disks are dead", /* 10 */
2773 "invalid or non-redundant drive", /* 11 */
2774 "channel is busy", /* 12 */
2775 "channel is not stopped", /* 13 */
2776 "rebuild successfully terminated", /* 14 */
2777 "unsupported command", /* 15 */
2778 "check condition received", /* 16 */
2779 "device is busy", /* 17 */
2780 "selection or command timeout", /* 18 */
2781 "command terminated abnormally", /* 19 */
2790 } mlx_messages[] = {
2791 {MLX_CMD_READSG, 0x0001, 1},
2792 {MLX_CMD_READSG, 0x0002, 1},
2793 {MLX_CMD_READSG, 0x0105, 3},
2794 {MLX_CMD_READSG, 0x010c, 4},
2795 {MLX_CMD_WRITESG, 0x0001, 1},
2796 {MLX_CMD_WRITESG, 0x0002, 1},
2797 {MLX_CMD_WRITESG, 0x0105, 3},
2798 {MLX_CMD_READSG_OLD, 0x0001, 1},
2799 {MLX_CMD_READSG_OLD, 0x0002, 1},
2800 {MLX_CMD_READSG_OLD, 0x0105, 3},
2801 {MLX_CMD_WRITESG_OLD, 0x0001, 1},
2802 {MLX_CMD_WRITESG_OLD, 0x0002, 1},
2803 {MLX_CMD_WRITESG_OLD, 0x0105, 3},
2804 {MLX_CMD_LOGOP, 0x0105, 5},
2805 {MLX_CMD_REBUILDASYNC, 0x0002, 6},
2806 {MLX_CMD_REBUILDASYNC, 0x0004, 7},
2807 {MLX_CMD_REBUILDASYNC, 0x0105, 8},
2808 {MLX_CMD_REBUILDASYNC, 0x0106, 9},
2809 {MLX_CMD_REBUILDASYNC, 0x0107, 14},
2810 {MLX_CMD_CHECKASYNC, 0x0002, 10},
2811 {MLX_CMD_CHECKASYNC, 0x0105, 11},
2812 {MLX_CMD_CHECKASYNC, 0x0106, 9},
2813 {MLX_CMD_STOPCHANNEL, 0x0106, 12},
2814 {MLX_CMD_STOPCHANNEL, 0x0105, 8},
2815 {MLX_CMD_STARTCHANNEL, 0x0005, 13},
2816 {MLX_CMD_STARTCHANNEL, 0x0105, 8},
2817 {MLX_CMD_DIRECT_CDB, 0x0002, 16},
2818 {MLX_CMD_DIRECT_CDB, 0x0008, 17},
2819 {MLX_CMD_DIRECT_CDB, 0x000e, 18},
2820 {MLX_CMD_DIRECT_CDB, 0x000f, 19},
2821 {MLX_CMD_DIRECT_CDB, 0x0105, 8},
2828 mlx_diagnose_command(struct mlx_command *mc)
2830 static char unkmsg[80];
2833 /* look up message in table */
2834 for (i = 0; mlx_messages[i].command != -1; i++)
2835 if (((mc->mc_mailbox[0] == mlx_messages[i].command) || (mlx_messages[i].command == 0)) &&
2836 (mc->mc_status == mlx_messages[i].status))
2837 return(mlx_status_messages[mlx_messages[i].msg]);
2839 sprintf(unkmsg, "unknown response 0x%x for command 0x%x", (int)mc->mc_status, (int)mc->mc_mailbox[0]);
2843 /*******************************************************************************
2844 * Print a string describing the controller (sc)
2850 } mlx_controller_names[] = {
2865 mlx_describe_controller(struct mlx_softc *sc)
2867 static char buf[80];
2871 for (i = 0, model = NULL; mlx_controller_names[i].name != NULL; i++) {
2872 if ((sc->mlx_enq2->me_hardware_id & 0xff) == mlx_controller_names[i].hwid) {
2873 model = mlx_controller_names[i].name;
2877 if (model == NULL) {
2878 sprintf(buf, " model 0x%x", sc->mlx_enq2->me_hardware_id & 0xff);
2881 device_printf(sc->mlx_dev, "DAC%s, %d channel%s, firmware %d.%02d-%c-%02d, %dMB RAM\n",
2883 sc->mlx_enq2->me_actual_channels,
2884 sc->mlx_enq2->me_actual_channels > 1 ? "s" : "",
2885 sc->mlx_enq2->me_firmware_id & 0xff,
2886 (sc->mlx_enq2->me_firmware_id >> 8) & 0xff,
2887 (sc->mlx_enq2->me_firmware_id >> 24) & 0xff,
2888 (sc->mlx_enq2->me_firmware_id >> 16) & 0xff,
2889 sc->mlx_enq2->me_mem_size / (1024 * 1024));
2892 device_printf(sc->mlx_dev, " Hardware ID 0x%08x\n", sc->mlx_enq2->me_hardware_id);
2893 device_printf(sc->mlx_dev, " Firmware ID 0x%08x\n", sc->mlx_enq2->me_firmware_id);
2894 device_printf(sc->mlx_dev, " Configured/Actual channels %d/%d\n", sc->mlx_enq2->me_configured_channels,
2895 sc->mlx_enq2->me_actual_channels);
2896 device_printf(sc->mlx_dev, " Max Targets %d\n", sc->mlx_enq2->me_max_targets);
2897 device_printf(sc->mlx_dev, " Max Tags %d\n", sc->mlx_enq2->me_max_tags);
2898 device_printf(sc->mlx_dev, " Max System Drives %d\n", sc->mlx_enq2->me_max_sys_drives);
2899 device_printf(sc->mlx_dev, " Max Arms %d\n", sc->mlx_enq2->me_max_arms);
2900 device_printf(sc->mlx_dev, " Max Spans %d\n", sc->mlx_enq2->me_max_spans);
2901 device_printf(sc->mlx_dev, " DRAM/cache/flash/NVRAM size %d/%d/%d/%d\n", sc->mlx_enq2->me_mem_size,
2902 sc->mlx_enq2->me_cache_size, sc->mlx_enq2->me_flash_size, sc->mlx_enq2->me_nvram_size);
2903 device_printf(sc->mlx_dev, " DRAM type %d\n", sc->mlx_enq2->me_mem_type);
2904 device_printf(sc->mlx_dev, " Clock Speed %dns\n", sc->mlx_enq2->me_clock_speed);
2905 device_printf(sc->mlx_dev, " Hardware Speed %dns\n", sc->mlx_enq2->me_hardware_speed);
2906 device_printf(sc->mlx_dev, " Max Commands %d\n", sc->mlx_enq2->me_max_commands);
2907 device_printf(sc->mlx_dev, " Max SG Entries %d\n", sc->mlx_enq2->me_max_sg);
2908 device_printf(sc->mlx_dev, " Max DP %d\n", sc->mlx_enq2->me_max_dp);
2909 device_printf(sc->mlx_dev, " Max IOD %d\n", sc->mlx_enq2->me_max_iod);
2910 device_printf(sc->mlx_dev, " Max Comb %d\n", sc->mlx_enq2->me_max_comb);
2911 device_printf(sc->mlx_dev, " Latency %ds\n", sc->mlx_enq2->me_latency);
2912 device_printf(sc->mlx_dev, " SCSI Timeout %ds\n", sc->mlx_enq2->me_scsi_timeout);
2913 device_printf(sc->mlx_dev, " Min Free Lines %d\n", sc->mlx_enq2->me_min_freelines);
2914 device_printf(sc->mlx_dev, " Rate Constant %d\n", sc->mlx_enq2->me_rate_const);
2915 device_printf(sc->mlx_dev, " MAXBLK %d\n", sc->mlx_enq2->me_maxblk);
2916 device_printf(sc->mlx_dev, " Blocking Factor %d sectors\n", sc->mlx_enq2->me_blocking_factor);
2917 device_printf(sc->mlx_dev, " Cache Line Size %d blocks\n", sc->mlx_enq2->me_cacheline);
2918 device_printf(sc->mlx_dev, " SCSI Capability %s%dMHz, %d bit\n",
2919 sc->mlx_enq2->me_scsi_cap & (1<<4) ? "differential " : "",
2920 (1 << ((sc->mlx_enq2->me_scsi_cap >> 2) & 3)) * 10,
2921 8 << (sc->mlx_enq2->me_scsi_cap & 0x3));
2922 device_printf(sc->mlx_dev, " Firmware Build Number %d\n", sc->mlx_enq2->me_firmware_build);
2923 device_printf(sc->mlx_dev, " Fault Management Type %d\n", sc->mlx_enq2->me_fault_mgmt_type);
2924 device_printf(sc->mlx_dev, " Features %b\n", sc->mlx_enq2->me_firmware_features,
2925 "\20\4Background Init\3Read Ahead\2MORE\1Cluster\n");
2930 /*******************************************************************************
2931 * Emit a string describing the firmware handshake status code, and return a flag
2932 * indicating whether the code represents a fatal error.
2934 * Error code interpretations are from the Linux driver, and don't directly match
2935 * the messages printed by Mylex's BIOS. This may change if documentation on the
2936 * codes is forthcoming.
2939 mlx_fw_message(struct mlx_softc *sc, int error, int param1, int param2)
2943 device_printf(sc->mlx_dev, "physical drive %d:%d not responding\n", param2, param1);
2946 /* we could be neater about this and give some indication when we receive more of them */
2947 if (!(sc->mlx_flags & MLX_SPINUP_REPORTED)) {
2948 device_printf(sc->mlx_dev, "spinning up drives...\n");
2949 sc->mlx_flags |= MLX_SPINUP_REPORTED;
2953 device_printf(sc->mlx_dev, "configuration checksum error\n");
2956 device_printf(sc->mlx_dev, "mirror race recovery failed\n");
2959 device_printf(sc->mlx_dev, "mirror race recovery in progress\n");
2962 device_printf(sc->mlx_dev, "physical drive %d:%d COD mismatch\n", param2, param1);
2965 device_printf(sc->mlx_dev, "logical drive installation aborted\n");
2968 device_printf(sc->mlx_dev, "mirror race on a critical system drive\n");
2971 device_printf(sc->mlx_dev, "new controller configuration found\n");
2974 device_printf(sc->mlx_dev, "FATAL MEMORY PARITY ERROR\n");
2977 device_printf(sc->mlx_dev, "unknown firmware initialisation error %02x:%02x:%02x\n", error, param1, param2);
2983 /********************************************************************************
2984 ********************************************************************************
2986 ********************************************************************************
2987 ********************************************************************************/
2989 /********************************************************************************
2990 * Find the disk whose unit number is (unit) on this controller
2992 static struct mlx_sysdrive *
2993 mlx_findunit(struct mlx_softc *sc, int unit)
2997 /* search system drives */
2998 for (i = 0; i < MLX_MAXDRIVES; i++) {
2999 /* is this one attached? */
3000 if (sc->mlx_sysdrive[i].ms_disk != 0) {
3001 /* is this the one? */
3002 if (unit == device_get_unit(sc->mlx_sysdrive[i].ms_disk))
3003 return(&sc->mlx_sysdrive[i]);