]> CyberLeo.Net >> Repos - FreeBSD/releng/8.1.git/blob - sys/dev/mvs/mvs.c
Copy stable/8 to releng/8.1 in preparation for 8.1-RC1.
[FreeBSD/releng/8.1.git] / sys / dev / mvs / mvs.c
1 /*-
2  * Copyright (c) 2010 Alexander Motin <mav@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer,
10  *    without modification, immediately at the beginning of the file.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25  */
26
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29
30 #include <sys/param.h>
31 #include <sys/module.h>
32 #include <sys/systm.h>
33 #include <sys/kernel.h>
34 #include <sys/ata.h>
35 #include <sys/bus.h>
36 #include <sys/endian.h>
37 #include <sys/malloc.h>
38 #include <sys/lock.h>
39 #include <sys/mutex.h>
40 #include <vm/uma.h>
41 #include <machine/stdarg.h>
42 #include <machine/resource.h>
43 #include <machine/bus.h>
44 #include <sys/rman.h>
45 #include "mvs.h"
46
47 #include <cam/cam.h>
48 #include <cam/cam_ccb.h>
49 #include <cam/cam_sim.h>
50 #include <cam/cam_xpt_sim.h>
51 #include <cam/cam_debug.h>
52
53 /* local prototypes */
54 static int mvs_ch_init(device_t dev);
55 static int mvs_ch_deinit(device_t dev);
56 static int mvs_ch_suspend(device_t dev);
57 static int mvs_ch_resume(device_t dev);
58 static void mvs_dmainit(device_t dev);
59 static void mvs_dmasetupc_cb(void *xsc, bus_dma_segment_t *segs, int nsegs, int error);
60 static void mvs_dmafini(device_t dev);
61 static void mvs_slotsalloc(device_t dev);
62 static void mvs_slotsfree(device_t dev);
63 static void mvs_setup_edma_queues(device_t dev);
64 static void mvs_set_edma_mode(device_t dev, enum mvs_edma_mode mode);
65 static void mvs_ch_pm(void *arg);
66 static void mvs_ch_intr_locked(void *data);
67 static void mvs_ch_intr(void *data);
68 static void mvs_reset(device_t dev);
69 static void mvs_softreset(device_t dev, union ccb *ccb);
70
71 static int mvs_sata_connect(struct mvs_channel *ch);
72 static int mvs_sata_phy_reset(device_t dev);
73 static int mvs_wait(device_t dev, u_int s, u_int c, int t);
74 static void mvs_tfd_read(device_t dev, union ccb *ccb);
75 static void mvs_tfd_write(device_t dev, union ccb *ccb);
76 static void mvs_legacy_intr(device_t dev);
77 static void mvs_crbq_intr(device_t dev);
78 static void mvs_begin_transaction(device_t dev, union ccb *ccb);
79 static void mvs_legacy_execute_transaction(struct mvs_slot *slot);
80 static void mvs_timeout(struct mvs_slot *slot);
81 static void mvs_dmasetprd(void *arg, bus_dma_segment_t *segs, int nsegs, int error);
82 static void mvs_requeue_frozen(device_t dev);
83 static void mvs_execute_transaction(struct mvs_slot *slot);
84 static void mvs_end_transaction(struct mvs_slot *slot, enum mvs_err_type et);
85
86 static void mvs_issue_read_log(device_t dev);
87 static void mvs_process_read_log(device_t dev, union ccb *ccb);
88
89 static void mvsaction(struct cam_sim *sim, union ccb *ccb);
90 static void mvspoll(struct cam_sim *sim);
91
92 MALLOC_DEFINE(M_MVS, "MVS driver", "MVS driver data buffers");
93
94 static int
95 mvs_ch_probe(device_t dev)
96 {
97
98         device_set_desc_copy(dev, "Marvell SATA channel");
99         return (0);
100 }
101
102 static int
103 mvs_ch_attach(device_t dev)
104 {
105         struct mvs_controller *ctlr = device_get_softc(device_get_parent(dev));
106         struct mvs_channel *ch = device_get_softc(dev);
107         struct cam_devq *devq;
108         int rid, error, i, sata_rev = 0;
109
110         ch->dev = dev;
111         ch->unit = (intptr_t)device_get_ivars(dev);
112         ch->quirks = ctlr->quirks;
113         mtx_init(&ch->mtx, "MVS channel lock", NULL, MTX_DEF);
114         resource_int_value(device_get_name(dev),
115             device_get_unit(dev), "pm_level", &ch->pm_level);
116         if (ch->pm_level > 3)
117                 callout_init_mtx(&ch->pm_timer, &ch->mtx, 0);
118         resource_int_value(device_get_name(dev),
119             device_get_unit(dev), "sata_rev", &sata_rev);
120         for (i = 0; i < 16; i++) {
121                 ch->user[i].revision = sata_rev;
122                 ch->user[i].mode = 0;
123                 ch->user[i].bytecount = (ch->quirks & MVS_Q_GENIIE) ? 8192 : 2048;
124                 ch->user[i].tags = MVS_MAX_SLOTS;
125                 ch->curr[i] = ch->user[i];
126                 if (ch->pm_level) {
127                         ch->user[i].caps = CTS_SATA_CAPS_H_PMREQ |
128                             CTS_SATA_CAPS_H_APST |
129                             CTS_SATA_CAPS_D_PMREQ | CTS_SATA_CAPS_D_APST;
130                 }
131         }
132         rid = ch->unit;
133         if (!(ch->r_mem = bus_alloc_resource_any(dev, SYS_RES_MEMORY,
134             &rid, RF_ACTIVE)))
135                 return (ENXIO);
136         mvs_dmainit(dev);
137         mvs_slotsalloc(dev);
138         mvs_ch_init(dev);
139         mtx_lock(&ch->mtx);
140         rid = ATA_IRQ_RID;
141         if (!(ch->r_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ,
142             &rid, RF_SHAREABLE | RF_ACTIVE))) {
143                 device_printf(dev, "Unable to map interrupt\n");
144                 error = ENXIO;
145                 goto err0;
146         }
147         if ((bus_setup_intr(dev, ch->r_irq, ATA_INTR_FLAGS, NULL,
148             mvs_ch_intr_locked, dev, &ch->ih))) {
149                 device_printf(dev, "Unable to setup interrupt\n");
150                 error = ENXIO;
151                 goto err1;
152         }
153         /* Create the device queue for our SIM. */
154         devq = cam_simq_alloc(MVS_MAX_SLOTS - 1);
155         if (devq == NULL) {
156                 device_printf(dev, "Unable to allocate simq\n");
157                 error = ENOMEM;
158                 goto err1;
159         }
160         /* Construct SIM entry */
161         ch->sim = cam_sim_alloc(mvsaction, mvspoll, "mvsch", ch,
162             device_get_unit(dev), &ch->mtx,
163             2, (ch->quirks & MVS_Q_GENI) ? 0 : MVS_MAX_SLOTS - 1,
164             devq);
165         if (ch->sim == NULL) {
166                 cam_simq_free(devq);
167                 device_printf(dev, "unable to allocate sim\n");
168                 error = ENOMEM;
169                 goto err1;
170         }
171         if (xpt_bus_register(ch->sim, dev, 0) != CAM_SUCCESS) {
172                 device_printf(dev, "unable to register xpt bus\n");
173                 error = ENXIO;
174                 goto err2;
175         }
176         if (xpt_create_path(&ch->path, /*periph*/NULL, cam_sim_path(ch->sim),
177             CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
178                 device_printf(dev, "unable to create path\n");
179                 error = ENXIO;
180                 goto err3;
181         }
182         if (ch->pm_level > 3) {
183                 callout_reset(&ch->pm_timer,
184                     (ch->pm_level == 4) ? hz / 1000 : hz / 8,
185                     mvs_ch_pm, dev);
186         }
187         mtx_unlock(&ch->mtx);
188         return (0);
189
190 err3:
191         xpt_bus_deregister(cam_sim_path(ch->sim));
192 err2:
193         cam_sim_free(ch->sim, /*free_devq*/TRUE);
194 err1:
195         bus_release_resource(dev, SYS_RES_IRQ, ATA_IRQ_RID, ch->r_irq);
196 err0:
197         bus_release_resource(dev, SYS_RES_MEMORY, ch->unit, ch->r_mem);
198         mtx_unlock(&ch->mtx);
199         mtx_destroy(&ch->mtx);
200         return (error);
201 }
202
203 static int
204 mvs_ch_detach(device_t dev)
205 {
206         struct mvs_channel *ch = device_get_softc(dev);
207
208         mtx_lock(&ch->mtx);
209         xpt_async(AC_LOST_DEVICE, ch->path, NULL);
210         xpt_free_path(ch->path);
211         xpt_bus_deregister(cam_sim_path(ch->sim));
212         cam_sim_free(ch->sim, /*free_devq*/TRUE);
213         mtx_unlock(&ch->mtx);
214
215         if (ch->pm_level > 3)
216                 callout_drain(&ch->pm_timer);
217         bus_teardown_intr(dev, ch->r_irq, ch->ih);
218         bus_release_resource(dev, SYS_RES_IRQ, ATA_IRQ_RID, ch->r_irq);
219
220         mvs_ch_deinit(dev);
221         mvs_slotsfree(dev);
222         mvs_dmafini(dev);
223
224         bus_release_resource(dev, SYS_RES_MEMORY, ch->unit, ch->r_mem);
225         mtx_destroy(&ch->mtx);
226         return (0);
227 }
228
229 static int
230 mvs_ch_init(device_t dev)
231 {
232         struct mvs_channel *ch = device_get_softc(dev);
233         uint32_t reg;
234
235         /* Disable port interrupts */
236         ATA_OUTL(ch->r_mem, EDMA_IEM, 0);
237         /* Stop EDMA */
238         ch->curr_mode = MVS_EDMA_UNKNOWN;
239         mvs_set_edma_mode(dev, MVS_EDMA_OFF);
240         /* Clear and configure FIS interrupts. */
241         ATA_OUTL(ch->r_mem, SATA_FISIC, 0);
242         reg = ATA_INL(ch->r_mem, SATA_FISC);
243         reg |= SATA_FISC_FISWAIT4HOSTRDYEN_B1;
244         ATA_OUTL(ch->r_mem, SATA_FISC, reg);
245         reg = ATA_INL(ch->r_mem, SATA_FISIM);
246         reg |= SATA_FISC_FISWAIT4HOSTRDYEN_B1;
247         ATA_OUTL(ch->r_mem, SATA_FISC, reg);
248         /* Clear SATA error register. */
249         ATA_OUTL(ch->r_mem, SATA_SE, 0xffffffff);
250         /* Clear any outstanding error interrupts. */
251         ATA_OUTL(ch->r_mem, EDMA_IEC, 0);
252         /* Unmask all error interrupts */
253         ATA_OUTL(ch->r_mem, EDMA_IEM, ~EDMA_IE_TRANSIENT);
254         return (0);
255 }
256
257 static int
258 mvs_ch_deinit(device_t dev)
259 {
260         struct mvs_channel *ch = device_get_softc(dev);
261
262         /* Stop EDMA */
263         mvs_set_edma_mode(dev, MVS_EDMA_OFF);
264         /* Disable port interrupts. */
265         ATA_OUTL(ch->r_mem, EDMA_IEM, 0);
266         return (0);
267 }
268
269 static int
270 mvs_ch_suspend(device_t dev)
271 {
272         struct mvs_channel *ch = device_get_softc(dev);
273
274         mtx_lock(&ch->mtx);
275         xpt_freeze_simq(ch->sim, 1);
276         while (ch->oslots)
277                 msleep(ch, &ch->mtx, PRIBIO, "mvssusp", hz/100);
278         mvs_ch_deinit(dev);
279         mtx_unlock(&ch->mtx);
280         return (0);
281 }
282
283 static int
284 mvs_ch_resume(device_t dev)
285 {
286         struct mvs_channel *ch = device_get_softc(dev);
287
288         mtx_lock(&ch->mtx);
289         mvs_ch_init(dev);
290         mvs_reset(dev);
291         xpt_release_simq(ch->sim, TRUE);
292         mtx_unlock(&ch->mtx);
293         return (0);
294 }
295
296 struct mvs_dc_cb_args {
297         bus_addr_t maddr;
298         int error;
299 };
300
301 static void
302 mvs_dmainit(device_t dev)
303 {
304         struct mvs_channel *ch = device_get_softc(dev);
305         struct mvs_dc_cb_args dcba;
306
307         /* EDMA command request area. */
308         if (bus_dma_tag_create(bus_get_dma_tag(dev), 1024, 0,
309             BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
310             NULL, NULL, MVS_WORKRQ_SIZE, 1, MVS_WORKRQ_SIZE,
311             0, NULL, NULL, &ch->dma.workrq_tag))
312                 goto error;
313         if (bus_dmamem_alloc(ch->dma.workrq_tag, (void **)&ch->dma.workrq, 0,
314             &ch->dma.workrq_map))
315                 goto error;
316         if (bus_dmamap_load(ch->dma.workrq_tag, ch->dma.workrq_map, ch->dma.workrq,
317             MVS_WORKRQ_SIZE, mvs_dmasetupc_cb, &dcba, 0) || dcba.error) {
318                 bus_dmamem_free(ch->dma.workrq_tag, ch->dma.workrq, ch->dma.workrq_map);
319                 goto error;
320         }
321         ch->dma.workrq_bus = dcba.maddr;
322         /* EDMA command response area. */
323         if (bus_dma_tag_create(bus_get_dma_tag(dev), 256, 0,
324             BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
325             NULL, NULL, MVS_WORKRP_SIZE, 1, MVS_WORKRP_SIZE,
326             0, NULL, NULL, &ch->dma.workrp_tag))
327                 goto error;
328         if (bus_dmamem_alloc(ch->dma.workrp_tag, (void **)&ch->dma.workrp, 0,
329             &ch->dma.workrp_map))
330                 goto error;
331         if (bus_dmamap_load(ch->dma.workrp_tag, ch->dma.workrp_map, ch->dma.workrp,
332             MVS_WORKRP_SIZE, mvs_dmasetupc_cb, &dcba, 0) || dcba.error) {
333                 bus_dmamem_free(ch->dma.workrp_tag, ch->dma.workrp, ch->dma.workrp_map);
334                 goto error;
335         }
336         ch->dma.workrp_bus = dcba.maddr;
337         /* Data area. */
338         if (bus_dma_tag_create(bus_get_dma_tag(dev), 2, MVS_EPRD_MAX,
339             BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR,
340             NULL, NULL,
341             MVS_SG_ENTRIES * PAGE_SIZE * MVS_MAX_SLOTS,
342             MVS_SG_ENTRIES, MVS_EPRD_MAX,
343             0, busdma_lock_mutex, &ch->mtx, &ch->dma.data_tag)) {
344                 goto error;
345         }
346         return;
347
348 error:
349         device_printf(dev, "WARNING - DMA initialization failed\n");
350         mvs_dmafini(dev);
351 }
352
353 static void
354 mvs_dmasetupc_cb(void *xsc, bus_dma_segment_t *segs, int nsegs, int error)
355 {
356         struct mvs_dc_cb_args *dcba = (struct mvs_dc_cb_args *)xsc;
357
358         if (!(dcba->error = error))
359                 dcba->maddr = segs[0].ds_addr;
360 }
361
362 static void
363 mvs_dmafini(device_t dev)
364 {
365         struct mvs_channel *ch = device_get_softc(dev);
366
367         if (ch->dma.data_tag) {
368                 bus_dma_tag_destroy(ch->dma.data_tag);
369                 ch->dma.data_tag = NULL;
370         }
371         if (ch->dma.workrp_bus) {
372                 bus_dmamap_unload(ch->dma.workrp_tag, ch->dma.workrp_map);
373                 bus_dmamem_free(ch->dma.workrp_tag, ch->dma.workrp, ch->dma.workrp_map);
374                 ch->dma.workrp_bus = 0;
375                 ch->dma.workrp_map = NULL;
376                 ch->dma.workrp = NULL;
377         }
378         if (ch->dma.workrp_tag) {
379                 bus_dma_tag_destroy(ch->dma.workrp_tag);
380                 ch->dma.workrp_tag = NULL;
381         }
382         if (ch->dma.workrq_bus) {
383                 bus_dmamap_unload(ch->dma.workrq_tag, ch->dma.workrq_map);
384                 bus_dmamem_free(ch->dma.workrq_tag, ch->dma.workrq, ch->dma.workrq_map);
385                 ch->dma.workrq_bus = 0;
386                 ch->dma.workrq_map = NULL;
387                 ch->dma.workrq = NULL;
388         }
389         if (ch->dma.workrq_tag) {
390                 bus_dma_tag_destroy(ch->dma.workrq_tag);
391                 ch->dma.workrq_tag = NULL;
392         }
393 }
394
395 static void
396 mvs_slotsalloc(device_t dev)
397 {
398         struct mvs_channel *ch = device_get_softc(dev);
399         int i;
400
401         /* Alloc and setup command/dma slots */
402         bzero(ch->slot, sizeof(ch->slot));
403         for (i = 0; i < MVS_MAX_SLOTS; i++) {
404                 struct mvs_slot *slot = &ch->slot[i];
405
406                 slot->dev = dev;
407                 slot->slot = i;
408                 slot->state = MVS_SLOT_EMPTY;
409                 slot->ccb = NULL;
410                 callout_init_mtx(&slot->timeout, &ch->mtx, 0);
411
412                 if (bus_dmamap_create(ch->dma.data_tag, 0, &slot->dma.data_map))
413                         device_printf(ch->dev, "FAILURE - create data_map\n");
414         }
415 }
416
417 static void
418 mvs_slotsfree(device_t dev)
419 {
420         struct mvs_channel *ch = device_get_softc(dev);
421         int i;
422
423         /* Free all dma slots */
424         for (i = 0; i < MVS_MAX_SLOTS; i++) {
425                 struct mvs_slot *slot = &ch->slot[i];
426
427                 callout_drain(&slot->timeout);
428                 if (slot->dma.data_map) {
429                         bus_dmamap_destroy(ch->dma.data_tag, slot->dma.data_map);
430                         slot->dma.data_map = NULL;
431                 }
432         }
433 }
434
435 static void
436 mvs_setup_edma_queues(device_t dev)
437 {
438         struct mvs_channel *ch = device_get_softc(dev);
439         uint64_t work;
440
441         /* Requests queue. */
442         work = ch->dma.workrq_bus;
443         ATA_OUTL(ch->r_mem, EDMA_REQQBAH, work >> 32);
444         ATA_OUTL(ch->r_mem, EDMA_REQQIP, work & 0xffffffff);
445         ATA_OUTL(ch->r_mem, EDMA_REQQOP, work & 0xffffffff);
446         bus_dmamap_sync(ch->dma.workrq_tag, ch->dma.workrq_map, BUS_DMASYNC_PREWRITE);
447         /* Reponses queue. */
448         bzero(ch->dma.workrp, 256);
449         work = ch->dma.workrp_bus;
450         ATA_OUTL(ch->r_mem, EDMA_RESQBAH, work >> 32);
451         ATA_OUTL(ch->r_mem, EDMA_RESQIP, work & 0xffffffff);
452         ATA_OUTL(ch->r_mem, EDMA_RESQOP, work & 0xffffffff);
453         bus_dmamap_sync(ch->dma.workrp_tag, ch->dma.workrp_map, BUS_DMASYNC_PREREAD);
454         ch->out_idx = 0;
455         ch->in_idx = 0;
456 }
457
458 static void
459 mvs_set_edma_mode(device_t dev, enum mvs_edma_mode mode)
460 {
461         struct mvs_channel *ch = device_get_softc(dev);
462         int timeout;
463         uint32_t ecfg, fcfg, hc, ltm, unkn;
464
465         if (mode == ch->curr_mode)
466                 return;
467         /* If we are running, we should stop first. */
468         if (ch->curr_mode != MVS_EDMA_OFF) {
469                 ATA_OUTL(ch->r_mem, EDMA_CMD, EDMA_CMD_EDSEDMA);
470                 timeout = 0;
471                 while (ATA_INL(ch->r_mem, EDMA_CMD) & EDMA_CMD_EENEDMA) {
472                         DELAY(1000);
473                         if (timeout++ > 1000) {
474                                 device_printf(dev, "stopping EDMA engine failed\n");
475                                 break;
476                         }
477                 };
478         }
479         ch->curr_mode = mode;
480         ch->fbs_enabled = 0;
481         ch->fake_busy = 0;
482         /* Report mode to controller. Needed for correct CCC operation. */
483         MVS_EDMA(device_get_parent(dev), dev, mode);
484         /* Configure new mode. */
485         ecfg = EDMA_CFG_RESERVED | EDMA_CFG_RESERVED2 | EDMA_CFG_EHOSTQUEUECACHEEN;
486         if (ch->pm_present) {
487                 ecfg |= EDMA_CFG_EMASKRXPM;
488                 if (ch->quirks & MVS_Q_GENIIE) {
489                         ecfg |= EDMA_CFG_EEDMAFBS;
490                         ch->fbs_enabled = 1;
491                 }
492         }
493         if (ch->quirks & MVS_Q_GENI)
494                 ecfg |= EDMA_CFG_ERDBSZ;
495         else if (ch->quirks & MVS_Q_GENII)
496                 ecfg |= EDMA_CFG_ERDBSZEXT | EDMA_CFG_EWRBUFFERLEN;
497         if (ch->quirks & MVS_Q_CT)
498                 ecfg |= EDMA_CFG_ECUTTHROUGHEN;
499         if (mode != MVS_EDMA_OFF)
500                 ecfg |= EDMA_CFG_EEARLYCOMPLETIONEN;
501         if (mode == MVS_EDMA_QUEUED)
502                 ecfg |= EDMA_CFG_EQUE;
503         else if (mode == MVS_EDMA_NCQ)
504                 ecfg |= EDMA_CFG_ESATANATVCMDQUE;
505         ATA_OUTL(ch->r_mem, EDMA_CFG, ecfg);
506         mvs_setup_edma_queues(dev);
507         if (ch->quirks & MVS_Q_GENIIE) {
508                 /* Configure FBS-related registers */
509                 fcfg = ATA_INL(ch->r_mem, SATA_FISC);
510                 ltm = ATA_INL(ch->r_mem, SATA_LTM);
511                 hc = ATA_INL(ch->r_mem, EDMA_HC);
512                 if (ch->fbs_enabled) {
513                         fcfg |= SATA_FISC_FISDMAACTIVATESYNCRESP;
514                         if (mode == MVS_EDMA_NCQ) {
515                                 fcfg &= ~SATA_FISC_FISWAIT4HOSTRDYEN_B0;
516                                 hc &= ~EDMA_IE_EDEVERR;
517                         } else {
518                                 fcfg |= SATA_FISC_FISWAIT4HOSTRDYEN_B0;
519                                 hc |= EDMA_IE_EDEVERR;
520                         }
521                         ltm |= (1 << 8);
522                 } else {
523                         fcfg &= ~SATA_FISC_FISDMAACTIVATESYNCRESP;
524                         fcfg &= ~SATA_FISC_FISWAIT4HOSTRDYEN_B0;
525                         hc |= EDMA_IE_EDEVERR;
526                         ltm &= ~(1 << 8);
527                 }
528                 ATA_OUTL(ch->r_mem, SATA_FISC, fcfg);
529                 ATA_OUTL(ch->r_mem, SATA_LTM, ltm);
530                 ATA_OUTL(ch->r_mem, EDMA_HC, hc);
531                 /* This is some magic, required to handle several DRQs
532                  * with basic DMA. */
533                 unkn = ATA_INL(ch->r_mem, EDMA_UNKN_RESD);
534                 if (mode == MVS_EDMA_OFF)
535                         unkn |= 1;
536                 else
537                         unkn &= ~1;
538                 ATA_OUTL(ch->r_mem, EDMA_UNKN_RESD, unkn);
539         }
540         /* Run EDMA. */
541         if (mode != MVS_EDMA_OFF)
542                 ATA_OUTL(ch->r_mem, EDMA_CMD, EDMA_CMD_EENEDMA);
543 }
544
545 devclass_t mvs_devclass;
546 devclass_t mvsch_devclass;
547 static device_method_t mvsch_methods[] = {
548         DEVMETHOD(device_probe,     mvs_ch_probe),
549         DEVMETHOD(device_attach,    mvs_ch_attach),
550         DEVMETHOD(device_detach,    mvs_ch_detach),
551         DEVMETHOD(device_suspend,   mvs_ch_suspend),
552         DEVMETHOD(device_resume,    mvs_ch_resume),
553         { 0, 0 }
554 };
555 static driver_t mvsch_driver = {
556         "mvsch",
557         mvsch_methods,
558         sizeof(struct mvs_channel)
559 };
560 DRIVER_MODULE(mvsch, mvs, mvsch_driver, mvsch_devclass, 0, 0);
561 DRIVER_MODULE(mvsch, sata, mvsch_driver, mvsch_devclass, 0, 0);
562
563 static void
564 mvs_phy_check_events(device_t dev, u_int32_t serr)
565 {
566         struct mvs_channel *ch = device_get_softc(dev);
567
568         if (ch->pm_level == 0) {
569                 u_int32_t status = ATA_INL(ch->r_mem, SATA_SS);
570                 union ccb *ccb;
571
572                 if (bootverbose) {
573                         if (((status & SATA_SS_DET_MASK) == SATA_SS_DET_PHY_ONLINE) &&
574                             ((status & SATA_SS_SPD_MASK) != SATA_SS_SPD_NO_SPEED) &&
575                             ((status & SATA_SS_IPM_MASK) == SATA_SS_IPM_ACTIVE)) {
576                                 device_printf(dev, "CONNECT requested\n");
577                         } else
578                                 device_printf(dev, "DISCONNECT requested\n");
579                 }
580                 mvs_reset(dev);
581                 if ((ccb = xpt_alloc_ccb_nowait()) == NULL)
582                         return;
583                 if (xpt_create_path(&ccb->ccb_h.path, NULL,
584                     cam_sim_path(ch->sim),
585                     CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
586                         xpt_free_ccb(ccb);
587                         return;
588                 }
589                 xpt_rescan(ccb);
590         }
591 }
592
593 static void
594 mvs_notify_events(device_t dev)
595 {
596         struct mvs_channel *ch = device_get_softc(dev);
597         struct cam_path *dpath;
598         uint32_t fis;
599         int d;
600
601         /* Try to read PMP field from SDB FIS. Present only for Gen-IIe. */
602         fis = ATA_INL(ch->r_mem, SATA_FISDW0);
603         if ((fis & 0x80ff) == 0x80a1)
604                 d = (fis & 0x0f00) >> 8;
605         else
606                 d = ch->pm_present ? 15 : 0;
607         if (bootverbose)
608                 device_printf(dev, "SNTF %d\n", d);
609         if (xpt_create_path(&dpath, NULL,
610             xpt_path_path_id(ch->path), d, 0) == CAM_REQ_CMP) {
611                 xpt_async(AC_SCSI_AEN, dpath, NULL);
612                 xpt_free_path(dpath);
613         }
614 }
615
616 static void
617 mvs_ch_intr_locked(void *data)
618 {
619         struct mvs_intr_arg *arg = (struct mvs_intr_arg *)data;
620         device_t dev = (device_t)arg->arg;
621         struct mvs_channel *ch = device_get_softc(dev);
622
623         mtx_lock(&ch->mtx);
624         mvs_ch_intr(data);
625         mtx_unlock(&ch->mtx);
626 }
627
628 static void
629 mvs_ch_pm(void *arg)
630 {
631         device_t dev = (device_t)arg;
632         struct mvs_channel *ch = device_get_softc(dev);
633         uint32_t work;
634
635         if (ch->numrslots != 0)
636                 return;
637         /* If we are idle - request power state transition. */
638         work = ATA_INL(ch->r_mem, SATA_SC);
639         work &= ~SATA_SC_SPM_MASK;
640         if (ch->pm_level == 4)
641                 work |= SATA_SC_SPM_PARTIAL;
642         else
643                 work |= SATA_SC_SPM_SLUMBER;
644         ATA_OUTL(ch->r_mem, SATA_SC, work);
645 }
646
647 static void
648 mvs_ch_pm_wake(device_t dev)
649 {
650         struct mvs_channel *ch = device_get_softc(dev);
651         uint32_t work;
652         int timeout = 0;
653
654         work = ATA_INL(ch->r_mem, SATA_SS);
655         if (work & SATA_SS_IPM_ACTIVE)
656                 return;
657         /* If we are not in active state - request power state transition. */
658         work = ATA_INL(ch->r_mem, SATA_SC);
659         work &= ~SATA_SC_SPM_MASK;
660         work |= SATA_SC_SPM_ACTIVE;
661         ATA_OUTL(ch->r_mem, SATA_SC, work);
662         /* Wait for transition to happen. */
663         while ((ATA_INL(ch->r_mem, SATA_SS) & SATA_SS_IPM_ACTIVE) == 0 &&
664             timeout++ < 100) {
665                 DELAY(100);
666         }
667 }
668
669 static void
670 mvs_ch_intr(void *data)
671 {
672         struct mvs_intr_arg *arg = (struct mvs_intr_arg *)data;
673         device_t dev = (device_t)arg->arg;
674         struct mvs_channel *ch = device_get_softc(dev);
675         uint32_t iec, serr = 0, fisic = 0;
676         enum mvs_err_type et;
677         int i, ccs, port = -1, selfdis = 0;
678         int edma = (ch->numtslots != 0 || ch->numdslots != 0);
679
680 //device_printf(dev, "irq cause %02x EDMA %d IEC %08x\n",
681 //    arg->cause, edma, ATA_INL(ch->r_mem, EDMA_IEC));
682         /* New item in response queue. */
683         if ((arg->cause & 2) && edma)
684                 mvs_crbq_intr(dev);
685         /* Some error or special event. */
686         if (arg->cause & 1) {
687                 iec = ATA_INL(ch->r_mem, EDMA_IEC);
688 //device_printf(dev, "irq cause %02x EDMA %d IEC %08x\n",
689 //    arg->cause, edma, iec);
690                 if (iec & EDMA_IE_SERRINT) {
691                         serr = ATA_INL(ch->r_mem, SATA_SE);
692                         ATA_OUTL(ch->r_mem, SATA_SE, serr);
693 //device_printf(dev, "SERR %08x\n", serr);
694                 }
695                 /* EDMA self-disabled due to error. */
696                 if (iec & EDMA_IE_ESELFDIS)
697                         selfdis = 1;
698                 /* Transport interrupt. */
699                 if (iec & EDMA_IE_ETRANSINT) {
700                         /* For Gen-I this bit means self-disable. */
701                         if (ch->quirks & MVS_Q_GENI)
702                                 selfdis = 1;
703                         /* For Gen-II this bit means SDB-N. */
704                         else if (ch->quirks & MVS_Q_GENII)
705                                 fisic = SATA_FISC_FISWAIT4HOSTRDYEN_B1;
706                         else    /* For Gen-IIe - read FIS interrupt cause. */
707                                 fisic = ATA_INL(ch->r_mem, SATA_FISIC);
708 //device_printf(dev, "FISIC %08x\n", fisic);
709                 }
710                 if (selfdis)
711                         ch->curr_mode = MVS_EDMA_UNKNOWN;
712                 ATA_OUTL(ch->r_mem, EDMA_IEC, ~iec);
713                 /* Interface errors or Device error. */
714                 if (iec & (0xfc1e9000 | EDMA_IE_EDEVERR)) {
715                         port = -1;
716                         if (ch->numpslots != 0) {
717                                 ccs = 0;
718                         } else {
719                                 if (ch->quirks & MVS_Q_GENIIE)
720                                         ccs = EDMA_S_EIOID(ATA_INL(ch->r_mem, EDMA_S));
721                                 else
722                                         ccs = EDMA_S_EDEVQUETAG(ATA_INL(ch->r_mem, EDMA_S));
723                                 /* Check if error is one-PMP-port-specific, */
724                                 if (ch->fbs_enabled) {
725                                         /* Which ports were active. */
726                                         for (i = 0; i < 16; i++) {
727                                                 if (ch->numrslotspd[i] == 0)
728                                                         continue;
729                                                 if (port == -1)
730                                                         port = i;
731                                                 else if (port != i) {
732                                                         port = -2;
733                                                         break;
734                                                 }
735                                         }
736                                         /* If several ports were active and EDMA still enabled - 
737                                          * other ports are probably unaffected and may continue.
738                                          */
739                                         if (port == -2 && !selfdis) {
740                                                 uint16_t p = ATA_INL(ch->r_mem, SATA_SATAITC) >> 16;
741                                                 port = ffs(p) - 1;
742                                                 if (port != (fls(p) - 1))
743                                                         port = -2;
744                                         }
745                                 }
746                         }
747 //device_printf(dev, "err slot %d port %d\n", ccs, port);
748                         mvs_requeue_frozen(dev);
749                         for (i = 0; i < MVS_MAX_SLOTS; i++) {
750                                 /* XXX: reqests in loading state. */
751                                 if (((ch->rslots >> i) & 1) == 0)
752                                         continue;
753                                 if (port >= 0 &&
754                                     ch->slot[i].ccb->ccb_h.target_id != port)
755                                         continue;
756                                 if (iec & EDMA_IE_EDEVERR) { /* Device error. */
757                                     if (port != -2) {
758                                         if (ch->numtslots == 0) {
759                                                 /* Untagged operation. */
760                                                 if (i == ccs)
761                                                         et = MVS_ERR_TFE;
762                                                 else
763                                                         et = MVS_ERR_INNOCENT;
764                                         } else {
765                                                 /* Tagged operation. */
766                                                 et = MVS_ERR_NCQ;
767                                         }
768                                     } else {
769                                         et = MVS_ERR_TFE;
770                                         ch->fatalerr = 1;
771                                     }
772                                 } else if (iec & 0xfc1e9000) {
773                                         if (ch->numtslots == 0 && i != ccs && port != -2)
774                                                 et = MVS_ERR_INNOCENT;
775                                         else
776                                                 et = MVS_ERR_SATA;
777                                 } else
778                                         et = MVS_ERR_INVALID;
779                                 mvs_end_transaction(&ch->slot[i], et);
780                         }
781                 }
782                 /* Process SDB-N. */
783                 if (fisic & SATA_FISC_FISWAIT4HOSTRDYEN_B1)
784                         mvs_notify_events(dev);
785                 if (fisic)
786                         ATA_OUTL(ch->r_mem, SATA_FISIC, ~fisic);
787                 /* Process hot-plug. */
788                 if ((iec & (EDMA_IE_EDEVDIS | EDMA_IE_EDEVCON)) ||
789                     (serr & SATA_SE_PHY_CHANGED))
790                         mvs_phy_check_events(dev, serr);
791         }
792         /* Legacy mode device interrupt. */
793         if ((arg->cause & 2) && !edma)
794                 mvs_legacy_intr(dev);
795 }
796
797 static uint8_t
798 mvs_getstatus(device_t dev, int clear)
799 {
800         struct mvs_channel *ch = device_get_softc(dev);
801         uint8_t status = ATA_INB(ch->r_mem, clear ? ATA_STATUS : ATA_ALTSTAT);
802
803         if (ch->fake_busy) {
804                 if (status & (ATA_S_BUSY | ATA_S_DRQ | ATA_S_ERROR))
805                         ch->fake_busy = 0;
806                 else
807                         status |= ATA_S_BUSY;
808         }
809         return (status);
810 }
811
812 static void
813 mvs_legacy_intr(device_t dev)
814 {
815         struct mvs_channel *ch = device_get_softc(dev);
816         struct mvs_slot *slot = &ch->slot[0]; /* PIO is always in slot 0. */
817         union ccb *ccb = slot->ccb;
818         enum mvs_err_type et = MVS_ERR_NONE;
819         int port;
820         u_int length;
821         uint8_t status, ireason;
822
823         /* Clear interrupt and get status. */
824         status = mvs_getstatus(dev, 1);
825 //      device_printf(dev, "Legacy intr status %02x\n",
826 //          status);
827         if (slot->state < MVS_SLOT_RUNNING)
828             return;
829         port = ccb->ccb_h.target_id & 0x0f;
830         /* Wait a bit for late !BUSY status update. */
831         if (status & ATA_S_BUSY) {
832                 DELAY(100);
833                 if ((status = mvs_getstatus(dev, 1)) & ATA_S_BUSY) {
834                         DELAY(1000);
835                         if ((status = mvs_getstatus(dev, 1)) & ATA_S_BUSY)
836                                 return;
837                 }
838         }
839         /* If we got an error, we are done. */
840         if (status & ATA_S_ERROR) {
841                 et = MVS_ERR_TFE;
842                 goto end_finished;
843         }
844         if (ccb->ccb_h.func_code == XPT_ATA_IO) { /* ATA PIO */
845                 ccb->ataio.res.status = status;
846                 /* Are we moving data? */
847                 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
848                     /* If data read command - get them. */
849                     if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
850                         if (mvs_wait(dev, ATA_S_DRQ, ATA_S_BUSY, 1000) < 0) {
851                             device_printf(dev, "timeout waiting for read DRQ\n");
852                             et = MVS_ERR_TIMEOUT;
853                             goto end_finished;
854                         }
855                         ATA_INSW_STRM(ch->r_mem, ATA_DATA,
856                            (uint16_t *)(ccb->ataio.data_ptr + ch->donecount),
857                            ch->transfersize / 2);
858                     }
859                     /* Update how far we've gotten. */
860                     ch->donecount += ch->transfersize;
861                     /* Do we need more? */
862                     if (ccb->ataio.dxfer_len > ch->donecount) {
863                         /* Set this transfer size according to HW capabilities */
864                         ch->transfersize = min(ccb->ataio.dxfer_len - ch->donecount,
865                             ch->curr[ccb->ccb_h.target_id].bytecount);
866                         /* If data write command - put them */
867                         if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
868                                 if (mvs_wait(dev, ATA_S_DRQ, ATA_S_BUSY, 1000) < 0) {
869                                     device_printf(dev, "timeout waiting for write DRQ\n");
870                                     et = MVS_ERR_TIMEOUT;
871                                     goto end_finished;
872                                 }
873                                 ATA_OUTSW_STRM(ch->r_mem, ATA_DATA,
874                                    (uint16_t *)(ccb->ataio.data_ptr + ch->donecount),
875                                    ch->transfersize / 2);
876                                 return;
877                         }
878                         /* If data read command, return & wait for interrupt */
879                         if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN)
880                                 return;
881                     }
882                 }
883         } else if (ch->basic_dma) {     /* ATAPI DMA */
884                 if (status & ATA_S_DWF)
885                         et = MVS_ERR_TFE;
886                 else if (ATA_INL(ch->r_mem, DMA_S) & DMA_S_ERR)
887                         et = MVS_ERR_TFE;
888                 /* Stop basic DMA. */
889                 ATA_OUTL(ch->r_mem, DMA_C, 0);
890                 goto end_finished;
891         } else {                        /* ATAPI PIO */
892                 length = ATA_INB(ch->r_mem,ATA_CYL_LSB) | (ATA_INB(ch->r_mem,ATA_CYL_MSB) << 8);
893                 ireason = ATA_INB(ch->r_mem,ATA_IREASON);
894 //device_printf(dev, "status %02x, ireason %02x, length %d\n", status, ireason, length);
895                 switch ((ireason & (ATA_I_CMD | ATA_I_IN)) |
896                         (status & ATA_S_DRQ)) {
897
898                 case ATAPI_P_CMDOUT:
899 device_printf(dev, "ATAPI CMDOUT\n");
900                     /* Return wait for interrupt */
901                     return;
902
903                 case ATAPI_P_WRITE:
904 //device_printf(dev, "ATAPI WRITE\n");
905                     if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
906                         device_printf(dev, "trying to write on read buffer\n");
907                         et = MVS_ERR_TFE;
908                         goto end_finished;
909                         break;
910                     }
911                     ATA_OUTSW_STRM(ch->r_mem, ATA_DATA,
912                         (uint16_t *)(ccb->csio.data_ptr + ch->donecount),
913                         length / 2);
914                     ch->donecount += length;
915                     /* Set next transfer size according to HW capabilities */
916                     ch->transfersize = min(ccb->csio.dxfer_len - ch->donecount,
917                             ch->curr[ccb->ccb_h.target_id].bytecount);
918                     /* Return wait for interrupt */
919                     return;
920
921                 case ATAPI_P_READ:
922 //device_printf(dev, "ATAPI READ\n");
923                     if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
924                         device_printf(dev, "trying to read on write buffer\n");
925                         et = MVS_ERR_TFE;
926                         goto end_finished;
927                     }
928                     ATA_INSW_STRM(ch->r_mem, ATA_DATA,
929                         (uint16_t *)(ccb->csio.data_ptr + ch->donecount),
930                         length / 2);
931                     ch->donecount += length;
932                     /* Set next transfer size according to HW capabilities */
933                     ch->transfersize = min(ccb->csio.dxfer_len - ch->donecount,
934                             ch->curr[ccb->ccb_h.target_id].bytecount);
935                     /* Return wait for interrupt */
936                     return;
937
938                 case ATAPI_P_DONEDRQ:
939 device_printf(dev, "ATAPI DONEDRQ\n");
940                     device_printf(dev,
941                           "WARNING - DONEDRQ non conformant device\n");
942                     if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
943                         ATA_INSW_STRM(ch->r_mem, ATA_DATA,
944                             (uint16_t *)(ccb->csio.data_ptr + ch->donecount),
945                             length / 2);
946                         ch->donecount += length;
947                     }
948                     else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
949                         ATA_OUTSW_STRM(ch->r_mem, ATA_DATA,
950                             (uint16_t *)(ccb->csio.data_ptr + ch->donecount),
951                             length / 2);
952                         ch->donecount += length;
953                     }
954                     else
955                         et = MVS_ERR_TFE;
956                     /* FALLTHROUGH */
957
958                 case ATAPI_P_ABORT:
959                 case ATAPI_P_DONE:
960 //device_printf(dev, "ATAPI ABORT/DONE\n");
961                     if (status & (ATA_S_ERROR | ATA_S_DWF))
962                         et = MVS_ERR_TFE;
963                     goto end_finished;
964
965                 default:
966                     device_printf(dev, "unknown transfer phase (status %02x, ireason %02x)\n",
967                         status, ireason);
968                     et = MVS_ERR_TFE;
969                 }
970         }
971
972 end_finished:
973         mvs_end_transaction(slot, et);
974 }
975
976 static void
977 mvs_crbq_intr(device_t dev)
978 {
979         struct mvs_channel *ch = device_get_softc(dev);
980         struct mvs_crpb *crpb;
981         union ccb *ccb;
982         int in_idx, cin_idx, slot;
983         uint16_t flags;
984
985         in_idx = (ATA_INL(ch->r_mem, EDMA_RESQIP) & EDMA_RESQP_ERPQP_MASK) >>
986             EDMA_RESQP_ERPQP_SHIFT;
987         bus_dmamap_sync(ch->dma.workrp_tag, ch->dma.workrp_map,
988             BUS_DMASYNC_POSTREAD);
989         cin_idx = ch->in_idx;
990         ch->in_idx = in_idx;
991         while (in_idx != cin_idx) {
992                 crpb = (struct mvs_crpb *)
993                     (ch->dma.workrp + MVS_CRPB_OFFSET + (MVS_CRPB_SIZE * cin_idx));
994                 slot = le16toh(crpb->id) & MVS_CRPB_TAG_MASK;
995                 flags = le16toh(crpb->rspflg);
996 //device_printf(dev, "CRPB %d %d %04x\n", cin_idx, slot, flags);
997                 /*
998                  * Handle only successfull completions here.
999                  * Errors will be handled by main intr handler.
1000                  */
1001                 if (ch->numtslots != 0 || (flags & EDMA_IE_EDEVERR) == 0) {
1002 if ((flags >> 8) & ATA_S_ERROR)
1003 device_printf(dev, "ERROR STATUS CRPB %d %d %04x\n", cin_idx, slot, flags);
1004                         if (ch->slot[slot].state >= MVS_SLOT_RUNNING) {
1005                                 ccb = ch->slot[slot].ccb;
1006                                 ccb->ataio.res.status = (flags & MVS_CRPB_ATASTS_MASK) >>
1007                                     MVS_CRPB_ATASTS_SHIFT;
1008                                 mvs_end_transaction(&ch->slot[slot], MVS_ERR_NONE);
1009                         } else 
1010 device_printf(dev, "EMPTY CRPB %d (->%d) %d %04x\n", cin_idx, in_idx, slot, flags);
1011                 } else
1012 device_printf(dev, "ERROR FLAGS CRPB %d %d %04x\n", cin_idx, slot, flags);
1013
1014                 cin_idx = (cin_idx + 1) & (MVS_MAX_SLOTS - 1);
1015         }
1016         bus_dmamap_sync(ch->dma.workrp_tag, ch->dma.workrp_map,
1017             BUS_DMASYNC_PREREAD);
1018         if (cin_idx == ch->in_idx) {
1019                 ATA_OUTL(ch->r_mem, EDMA_RESQOP,
1020                     ch->dma.workrp_bus | (cin_idx << EDMA_RESQP_ERPQP_SHIFT));
1021         }
1022 }
1023
1024 /* Must be called with channel locked. */
1025 static int
1026 mvs_check_collision(device_t dev, union ccb *ccb)
1027 {
1028         struct mvs_channel *ch = device_get_softc(dev);
1029
1030         if (ccb->ccb_h.func_code == XPT_ATA_IO) {
1031                 /* NCQ DMA */
1032                 if (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA) {
1033                         /* Can't mix NCQ and non-NCQ DMA commands. */
1034                         if (ch->numdslots != 0)
1035                                 return (1);
1036                         /* Can't mix NCQ and PIO commands. */
1037                         if (ch->numpslots != 0)
1038                                 return (1);
1039                         /* If we have no FBS */
1040                         if (!ch->fbs_enabled) {
1041                                 /* Tagged command while tagged to other target is active. */
1042                                 if (ch->numtslots != 0 &&
1043                                     ch->taggedtarget != ccb->ccb_h.target_id)
1044                                         return (1);
1045                         }
1046                 /* Non-NCQ DMA */
1047                 } else if (ccb->ataio.cmd.flags & CAM_ATAIO_DMA) {
1048                         /* Can't mix non-NCQ DMA and NCQ commands. */
1049                         if (ch->numtslots != 0)
1050                                 return (1);
1051                         /* Can't mix non-NCQ DMA and PIO commands. */
1052                         if (ch->numpslots != 0)
1053                                 return (1);
1054                 /* PIO */
1055                 } else {
1056                         /* Can't mix PIO with anything. */
1057                         if (ch->numrslots != 0)
1058                                 return (1);
1059                 }
1060                 if (ccb->ataio.cmd.flags & (CAM_ATAIO_CONTROL | CAM_ATAIO_NEEDRESULT)) {
1061                         /* Atomic command while anything active. */
1062                         if (ch->numrslots != 0)
1063                                 return (1);
1064                 }
1065         } else { /* ATAPI */
1066                 /* ATAPI goes without EDMA, so can't mix it with anything. */
1067                 if (ch->numrslots != 0)
1068                         return (1);
1069         }
1070         /* We have some atomic command running. */
1071         if (ch->aslots != 0)
1072                 return (1);
1073         return (0);
1074 }
1075
1076 static void
1077 mvs_tfd_read(device_t dev, union ccb *ccb)
1078 {
1079         struct mvs_channel *ch = device_get_softc(dev);
1080         struct ata_res *res = &ccb->ataio.res;
1081
1082         res->status = ATA_INB(ch->r_mem, ATA_ALTSTAT);
1083         res->error =  ATA_INB(ch->r_mem, ATA_ERROR);
1084         res->device = ATA_INB(ch->r_mem, ATA_DRIVE);
1085         ATA_OUTB(ch->r_mem, ATA_CONTROL, ATA_A_HOB);
1086         res->sector_count_exp = ATA_INB(ch->r_mem, ATA_COUNT);
1087         res->lba_low_exp = ATA_INB(ch->r_mem, ATA_SECTOR);
1088         res->lba_mid_exp = ATA_INB(ch->r_mem, ATA_CYL_LSB);
1089         res->lba_high_exp = ATA_INB(ch->r_mem, ATA_CYL_MSB);
1090         ATA_OUTB(ch->r_mem, ATA_CONTROL, 0);
1091         res->sector_count = ATA_INB(ch->r_mem, ATA_COUNT);
1092         res->lba_low = ATA_INB(ch->r_mem, ATA_SECTOR);
1093         res->lba_mid = ATA_INB(ch->r_mem, ATA_CYL_LSB);
1094         res->lba_high = ATA_INB(ch->r_mem, ATA_CYL_MSB);
1095 }
1096
1097 static void
1098 mvs_tfd_write(device_t dev, union ccb *ccb)
1099 {
1100         struct mvs_channel *ch = device_get_softc(dev);
1101         struct ata_cmd *cmd = &ccb->ataio.cmd;
1102
1103         ATA_OUTB(ch->r_mem, ATA_DRIVE, cmd->device);
1104         ATA_OUTB(ch->r_mem, ATA_CONTROL, cmd->control);
1105         ATA_OUTB(ch->r_mem, ATA_FEATURE, cmd->features_exp);
1106         ATA_OUTB(ch->r_mem, ATA_FEATURE, cmd->features);
1107         ATA_OUTB(ch->r_mem, ATA_COUNT, cmd->sector_count_exp);
1108         ATA_OUTB(ch->r_mem, ATA_COUNT, cmd->sector_count);
1109         ATA_OUTB(ch->r_mem, ATA_SECTOR, cmd->lba_low_exp);
1110         ATA_OUTB(ch->r_mem, ATA_SECTOR, cmd->lba_low);
1111         ATA_OUTB(ch->r_mem, ATA_CYL_LSB, cmd->lba_mid_exp);
1112         ATA_OUTB(ch->r_mem, ATA_CYL_LSB, cmd->lba_mid);
1113         ATA_OUTB(ch->r_mem, ATA_CYL_MSB, cmd->lba_high_exp);
1114         ATA_OUTB(ch->r_mem, ATA_CYL_MSB, cmd->lba_high);
1115         ATA_OUTB(ch->r_mem, ATA_COMMAND, cmd->command);
1116 }
1117
1118
1119 /* Must be called with channel locked. */
1120 static void
1121 mvs_begin_transaction(device_t dev, union ccb *ccb)
1122 {
1123         struct mvs_channel *ch = device_get_softc(dev);
1124         struct mvs_slot *slot;
1125         int slotn, tag;
1126
1127         if (ch->pm_level > 0)
1128                 mvs_ch_pm_wake(dev);
1129         /* Softreset is a special case. */
1130         if (ccb->ccb_h.func_code == XPT_ATA_IO &&
1131             (ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL)) {
1132                 mvs_softreset(dev, ccb);
1133                 return;
1134         }
1135         /* Choose empty slot. */
1136         slotn = ffs(~ch->oslots) - 1;
1137         if ((ccb->ccb_h.func_code == XPT_ATA_IO) &&
1138             (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA)) {
1139                 if (ch->quirks & MVS_Q_GENIIE)
1140                         tag = ffs(~ch->otagspd[ccb->ccb_h.target_id]) - 1;
1141                 else
1142                         tag = slotn;
1143         } else
1144                 tag = 0;
1145         /* Occupy chosen slot. */
1146         slot = &ch->slot[slotn];
1147         slot->ccb = ccb;
1148         slot->tag = tag;
1149         /* Stop PM timer. */
1150         if (ch->numrslots == 0 && ch->pm_level > 3)
1151                 callout_stop(&ch->pm_timer);
1152         /* Update channel stats. */
1153         ch->oslots |= (1 << slot->slot);
1154         ch->numrslots++;
1155         ch->numrslotspd[ccb->ccb_h.target_id]++;
1156         if (ccb->ccb_h.func_code == XPT_ATA_IO) {
1157                 if (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA) {
1158                         ch->otagspd[ccb->ccb_h.target_id] |= (1 << slot->tag);
1159                         ch->numtslots++;
1160                         ch->numtslotspd[ccb->ccb_h.target_id]++;
1161                         ch->taggedtarget = ccb->ccb_h.target_id;
1162                         mvs_set_edma_mode(dev, MVS_EDMA_NCQ);
1163                 } else if (ccb->ataio.cmd.flags & CAM_ATAIO_DMA) {
1164                         ch->numdslots++;
1165                         mvs_set_edma_mode(dev, MVS_EDMA_ON);
1166                 } else {
1167                         ch->numpslots++;
1168                         mvs_set_edma_mode(dev, MVS_EDMA_OFF);
1169                 }
1170                 if (ccb->ataio.cmd.flags &
1171                     (CAM_ATAIO_CONTROL | CAM_ATAIO_NEEDRESULT)) {
1172                         ch->aslots |= (1 << slot->slot);
1173                 }
1174         } else {
1175                 uint8_t *cdb = (ccb->ccb_h.flags & CAM_CDB_POINTER) ?
1176                     ccb->csio.cdb_io.cdb_ptr : ccb->csio.cdb_io.cdb_bytes;
1177                 ch->numpslots++;
1178                 /* Use ATAPI DMA only for commands without under-/overruns. */
1179                 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE &&
1180                     ch->curr[ccb->ccb_h.target_id].mode >= ATA_DMA &&
1181                     (ch->quirks & MVS_Q_SOC) == 0 &&
1182                     (cdb[0] == 0x08 ||
1183                      cdb[0] == 0x0a ||
1184                      cdb[0] == 0x28 ||
1185                      cdb[0] == 0x2a ||
1186                      cdb[0] == 0x88 ||
1187                      cdb[0] == 0x8a ||
1188                      cdb[0] == 0xa8 ||
1189                      cdb[0] == 0xaa ||
1190                      cdb[0] == 0xbe)) {
1191                         ch->basic_dma = 1;
1192                 }
1193                 mvs_set_edma_mode(dev, MVS_EDMA_OFF);
1194         }
1195         if (ch->numpslots == 0 || ch->basic_dma) {
1196                 void *buf;
1197                 bus_size_t size;
1198
1199                 slot->state = MVS_SLOT_LOADING;
1200                 if (ccb->ccb_h.func_code == XPT_ATA_IO) {
1201                         buf = ccb->ataio.data_ptr;
1202                         size = ccb->ataio.dxfer_len;
1203                 } else {
1204                         buf = ccb->csio.data_ptr;
1205                         size = ccb->csio.dxfer_len;
1206                 }
1207                 bus_dmamap_load(ch->dma.data_tag, slot->dma.data_map,
1208                     buf, size, mvs_dmasetprd, slot, 0);
1209         } else
1210                 mvs_legacy_execute_transaction(slot);
1211 }
1212
1213 /* Locked by busdma engine. */
1214 static void
1215 mvs_dmasetprd(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
1216 {    
1217         struct mvs_slot *slot = arg;
1218         struct mvs_channel *ch = device_get_softc(slot->dev);
1219         struct mvs_eprd *eprd;
1220         int i;
1221
1222         if (error) {
1223                 device_printf(slot->dev, "DMA load error\n");
1224                 mvs_end_transaction(slot, MVS_ERR_INVALID);
1225                 return;
1226         }
1227         KASSERT(nsegs <= MVS_SG_ENTRIES, ("too many DMA segment entries\n"));
1228         /* If there is only one segment - no need to use S/G table on Gen-IIe. */
1229         if (nsegs == 1 && ch->basic_dma == 0 && (ch->quirks & MVS_Q_GENIIE)) {
1230                 slot->dma.addr = segs[0].ds_addr;
1231                 slot->dma.len = segs[0].ds_len;
1232         } else {
1233                 slot->dma.addr = 0;
1234                 /* Get a piece of the workspace for this EPRD */
1235                 eprd = (struct mvs_eprd *)
1236                     (ch->dma.workrq + MVS_EPRD_OFFSET + (MVS_EPRD_SIZE * slot->slot));
1237                 /* Fill S/G table */
1238                 for (i = 0; i < nsegs; i++) {
1239                         eprd[i].prdbal = htole32(segs[i].ds_addr);
1240                         eprd[i].bytecount = htole32(segs[i].ds_len & MVS_EPRD_MASK);
1241                         eprd[i].prdbah = htole32((segs[i].ds_addr >> 16) >> 16);
1242                 }
1243                 eprd[i - 1].bytecount |= htole32(MVS_EPRD_EOF);
1244         }
1245         bus_dmamap_sync(ch->dma.data_tag, slot->dma.data_map,
1246             ((slot->ccb->ccb_h.flags & CAM_DIR_IN) ?
1247             BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE));
1248         if (ch->basic_dma)
1249                 mvs_legacy_execute_transaction(slot);
1250         else
1251                 mvs_execute_transaction(slot);
1252 }
1253
1254 static void
1255 mvs_legacy_execute_transaction(struct mvs_slot *slot)
1256 {
1257         device_t dev = slot->dev;
1258         struct mvs_channel *ch = device_get_softc(dev);
1259         bus_addr_t eprd;
1260         union ccb *ccb = slot->ccb;
1261         int port = ccb->ccb_h.target_id & 0x0f;
1262         int timeout;
1263
1264         slot->state = MVS_SLOT_RUNNING;
1265         ch->rslots |= (1 << slot->slot);
1266         ATA_OUTB(ch->r_mem, SATA_SATAICTL, port << SATA_SATAICTL_PMPTX_SHIFT);
1267         if (ccb->ccb_h.func_code == XPT_ATA_IO) {
1268 //              device_printf(dev, "%d Legacy command %02x size %d\n",
1269 //                  port, ccb->ataio.cmd.command, ccb->ataio.dxfer_len);
1270                 mvs_tfd_write(dev, ccb);
1271                 /* Device reset doesn't interrupt. */
1272                 if (ccb->ataio.cmd.command == ATA_DEVICE_RESET) {
1273                         int timeout = 1000000;
1274                         do {
1275                             DELAY(10);
1276                             ccb->ataio.res.status = ATA_INB(ch->r_mem, ATA_STATUS);
1277                         } while (ccb->ataio.res.status & ATA_S_BUSY && timeout--);
1278                         mvs_legacy_intr(dev);
1279                         return;
1280                 }
1281                 ch->donecount = 0;
1282                 ch->transfersize = min(ccb->ataio.dxfer_len,
1283                     ch->curr[port].bytecount);
1284                 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE)
1285                         ch->fake_busy = 1;
1286                 /* If data write command - output the data */
1287                 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1288                         if (mvs_wait(dev, ATA_S_DRQ, ATA_S_BUSY, 1000) < 0) {
1289                                 device_printf(dev, "timeout waiting for write DRQ\n");
1290                                 mvs_end_transaction(slot, MVS_ERR_TIMEOUT);
1291                                 return;
1292                         }
1293                         ATA_OUTSW_STRM(ch->r_mem, ATA_DATA,
1294                            (uint16_t *)(ccb->ataio.data_ptr + ch->donecount),
1295                            ch->transfersize / 2);
1296                 }
1297         } else {
1298 //              device_printf(dev, "%d ATAPI command %02x size %d dma %d\n",
1299 //                  port, ccb->csio.cdb_io.cdb_bytes[0], ccb->csio.dxfer_len,
1300 //                  ch->basic_dma);
1301                 ch->donecount = 0;
1302                 ch->transfersize = min(ccb->csio.dxfer_len,
1303                     ch->curr[port].bytecount);
1304                 /* Write ATA PACKET command. */
1305                 if (ch->basic_dma) {
1306                         ATA_OUTB(ch->r_mem, ATA_FEATURE, ATA_F_DMA);
1307                         ATA_OUTB(ch->r_mem, ATA_CYL_LSB, 0);
1308                         ATA_OUTB(ch->r_mem, ATA_CYL_MSB, 0);
1309                 } else {
1310                         ATA_OUTB(ch->r_mem, ATA_FEATURE, 0);
1311                         ATA_OUTB(ch->r_mem, ATA_CYL_LSB, ch->transfersize);
1312                         ATA_OUTB(ch->r_mem, ATA_CYL_MSB, ch->transfersize >> 8);
1313                 }
1314                 ATA_OUTB(ch->r_mem, ATA_COMMAND, ATA_PACKET_CMD);
1315                 ch->fake_busy = 1;
1316                 /* Wait for ready to write ATAPI command block */
1317                 if (mvs_wait(dev, 0, ATA_S_BUSY, 1000) < 0) {
1318                         device_printf(dev, "timeout waiting for ATAPI !BUSY\n");
1319                         mvs_end_transaction(slot, MVS_ERR_TIMEOUT);
1320                         return;
1321                 }
1322                 timeout = 5000;
1323                 while (timeout--) {
1324                     int reason = ATA_INB(ch->r_mem, ATA_IREASON);
1325                     int status = ATA_INB(ch->r_mem, ATA_STATUS);
1326
1327                     if (((reason & (ATA_I_CMD | ATA_I_IN)) |
1328                          (status & (ATA_S_DRQ | ATA_S_BUSY))) == ATAPI_P_CMDOUT)
1329                         break;
1330                     DELAY(20);
1331                 }
1332                 if (timeout <= 0) {
1333                         device_printf(dev, "timeout waiting for ATAPI command ready\n");
1334                         mvs_end_transaction(slot, MVS_ERR_TIMEOUT);
1335                         return;
1336                 }
1337                 /* Write ATAPI command. */
1338                 ATA_OUTSW_STRM(ch->r_mem, ATA_DATA,
1339                    (uint16_t *)((ccb->ccb_h.flags & CAM_CDB_POINTER) ?
1340                     ccb->csio.cdb_io.cdb_ptr : ccb->csio.cdb_io.cdb_bytes),
1341                    ch->curr[port].atapi / 2);
1342                 DELAY(10);
1343                 if (ch->basic_dma) {
1344                         /* Start basic DMA. */
1345                         eprd = ch->dma.workrq_bus + MVS_EPRD_OFFSET +
1346                             (MVS_EPRD_SIZE * slot->slot);
1347                         ATA_OUTL(ch->r_mem, DMA_DTLBA, eprd);
1348                         ATA_OUTL(ch->r_mem, DMA_DTHBA, (eprd >> 16) >> 16);
1349                         ATA_OUTL(ch->r_mem, DMA_C, DMA_C_START |
1350                             (((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) ?
1351                             DMA_C_READ : 0));
1352                 } else if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE)
1353                         ch->fake_busy = 1;
1354         }
1355         /* Start command execution timeout */
1356         callout_reset(&slot->timeout, (int)ccb->ccb_h.timeout * hz / 1000,
1357             (timeout_t*)mvs_timeout, slot);
1358 }
1359
1360 /* Must be called with channel locked. */
1361 static void
1362 mvs_execute_transaction(struct mvs_slot *slot)
1363 {
1364         device_t dev = slot->dev;
1365         struct mvs_channel *ch = device_get_softc(dev);
1366         bus_addr_t eprd;
1367         struct mvs_crqb *crqb;
1368         struct mvs_crqb_gen2e *crqb2e;
1369         union ccb *ccb = slot->ccb;
1370         int port = ccb->ccb_h.target_id & 0x0f;
1371         int i;
1372
1373 //      device_printf(dev, "%d EDMA command %02x size %d slot %d tag %d\n",
1374 //          port, ccb->ataio.cmd.command, ccb->ataio.dxfer_len, slot->slot, slot->tag);
1375         /* Get address of the prepared EPRD */
1376         eprd = ch->dma.workrq_bus + MVS_EPRD_OFFSET + (MVS_EPRD_SIZE * slot->slot);
1377         /* Prepare CRQB. Gen IIe uses different CRQB format. */
1378         if (ch->quirks & MVS_Q_GENIIE) {
1379                 crqb2e = (struct mvs_crqb_gen2e *)
1380                     (ch->dma.workrq + MVS_CRQB_OFFSET + (MVS_CRQB_SIZE * ch->out_idx));
1381                 crqb2e->ctrlflg = htole32(
1382                     ((ccb->ccb_h.flags & CAM_DIR_IN) ? MVS_CRQB2E_READ : 0) |
1383                     (slot->tag << MVS_CRQB2E_DTAG_SHIFT) |
1384                     (port << MVS_CRQB2E_PMP_SHIFT) |
1385                     (slot->slot << MVS_CRQB2E_HTAG_SHIFT));
1386                 /* If there is only one segment - no need to use S/G table. */
1387                 if (slot->dma.addr != 0) {
1388                         eprd = slot->dma.addr;
1389                         crqb2e->ctrlflg |= htole32(MVS_CRQB2E_CPRD);
1390                         crqb2e->drbc = slot->dma.len;
1391                 }
1392                 crqb2e->cprdbl = htole32(eprd);
1393                 crqb2e->cprdbh = htole32((eprd >> 16) >> 16);
1394                 crqb2e->cmd[0] = 0;
1395                 crqb2e->cmd[1] = 0;
1396                 crqb2e->cmd[2] = ccb->ataio.cmd.command;
1397                 crqb2e->cmd[3] = ccb->ataio.cmd.features;
1398                 crqb2e->cmd[4] = ccb->ataio.cmd.lba_low;
1399                 crqb2e->cmd[5] = ccb->ataio.cmd.lba_mid;
1400                 crqb2e->cmd[6] = ccb->ataio.cmd.lba_high;
1401                 crqb2e->cmd[7] = ccb->ataio.cmd.device;
1402                 crqb2e->cmd[8] = ccb->ataio.cmd.lba_low_exp;
1403                 crqb2e->cmd[9] = ccb->ataio.cmd.lba_mid_exp;
1404                 crqb2e->cmd[10] = ccb->ataio.cmd.lba_high_exp;
1405                 crqb2e->cmd[11] = ccb->ataio.cmd.features_exp;
1406                 if (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA) {
1407                         crqb2e->cmd[12] = slot->tag << 3;
1408                         crqb2e->cmd[13] = 0;
1409                 } else {
1410                         crqb2e->cmd[12] = ccb->ataio.cmd.sector_count;
1411                         crqb2e->cmd[13] = ccb->ataio.cmd.sector_count_exp;
1412                 }
1413                 crqb2e->cmd[14] = 0;
1414                 crqb2e->cmd[15] = 0;
1415         } else {
1416                 crqb = (struct mvs_crqb *)
1417                     (ch->dma.workrq + MVS_CRQB_OFFSET + (MVS_CRQB_SIZE * ch->out_idx));
1418                 crqb->cprdbl = htole32(eprd);
1419                 crqb->cprdbh = htole32((eprd >> 16) >> 16);
1420                 crqb->ctrlflg = htole16(
1421                     ((ccb->ccb_h.flags & CAM_DIR_IN) ? MVS_CRQB_READ : 0) |
1422                     (slot->slot << MVS_CRQB_TAG_SHIFT) |
1423                     (port << MVS_CRQB_PMP_SHIFT));
1424                 i = 0;
1425                 /*
1426                  * Controller can handle only 11 of 12 ATA registers,
1427                  * so we have to choose which one to skip.
1428                  */
1429                 if (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA) {
1430                         crqb->cmd[i++] = ccb->ataio.cmd.features_exp;
1431                         crqb->cmd[i++] = 0x11;
1432                 }
1433                 crqb->cmd[i++] = ccb->ataio.cmd.features;
1434                 crqb->cmd[i++] = 0x11;
1435                 if (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA) {
1436                         crqb->cmd[i++] = slot->tag << 3;
1437                         crqb->cmd[i++] = 0x12;
1438                 } else {
1439                         crqb->cmd[i++] = ccb->ataio.cmd.sector_count_exp;
1440                         crqb->cmd[i++] = 0x12;
1441                         crqb->cmd[i++] = ccb->ataio.cmd.sector_count;
1442                         crqb->cmd[i++] = 0x12;
1443                 }
1444                 crqb->cmd[i++] = ccb->ataio.cmd.lba_low_exp;
1445                 crqb->cmd[i++] = 0x13;
1446                 crqb->cmd[i++] = ccb->ataio.cmd.lba_low;
1447                 crqb->cmd[i++] = 0x13;
1448                 crqb->cmd[i++] = ccb->ataio.cmd.lba_mid_exp;
1449                 crqb->cmd[i++] = 0x14;
1450                 crqb->cmd[i++] = ccb->ataio.cmd.lba_mid;
1451                 crqb->cmd[i++] = 0x14;
1452                 crqb->cmd[i++] = ccb->ataio.cmd.lba_high_exp;
1453                 crqb->cmd[i++] = 0x15;
1454                 crqb->cmd[i++] = ccb->ataio.cmd.lba_high;
1455                 crqb->cmd[i++] = 0x15;
1456                 crqb->cmd[i++] = ccb->ataio.cmd.device;
1457                 crqb->cmd[i++] = 0x16;
1458                 crqb->cmd[i++] = ccb->ataio.cmd.command;
1459                 crqb->cmd[i++] = 0x97;
1460         }
1461         bus_dmamap_sync(ch->dma.workrq_tag, ch->dma.workrq_map,
1462             BUS_DMASYNC_PREWRITE);
1463         bus_dmamap_sync(ch->dma.workrp_tag, ch->dma.workrp_map,
1464             BUS_DMASYNC_PREREAD);
1465         slot->state = MVS_SLOT_RUNNING;
1466         ch->rslots |= (1 << slot->slot);
1467         /* Issue command to the controller. */
1468         ch->out_idx = (ch->out_idx + 1) & (MVS_MAX_SLOTS - 1);
1469         ATA_OUTL(ch->r_mem, EDMA_REQQIP,
1470             ch->dma.workrq_bus + MVS_CRQB_OFFSET + (MVS_CRQB_SIZE * ch->out_idx));
1471         /* Start command execution timeout */
1472         callout_reset(&slot->timeout, (int)ccb->ccb_h.timeout * hz / 1000,
1473             (timeout_t*)mvs_timeout, slot);
1474         return;
1475 }
1476
1477 /* Must be called with channel locked. */
1478 static void
1479 mvs_process_timeout(device_t dev)
1480 {
1481         struct mvs_channel *ch = device_get_softc(dev);
1482         int i;
1483
1484         mtx_assert(&ch->mtx, MA_OWNED);
1485         /* Handle the rest of commands. */
1486         for (i = 0; i < MVS_MAX_SLOTS; i++) {
1487                 /* Do we have a running request on slot? */
1488                 if (ch->slot[i].state < MVS_SLOT_RUNNING)
1489                         continue;
1490                 mvs_end_transaction(&ch->slot[i], MVS_ERR_TIMEOUT);
1491         }
1492 }
1493
1494 /* Must be called with channel locked. */
1495 static void
1496 mvs_rearm_timeout(device_t dev)
1497 {
1498         struct mvs_channel *ch = device_get_softc(dev);
1499         int i;
1500
1501         mtx_assert(&ch->mtx, MA_OWNED);
1502         for (i = 0; i < MVS_MAX_SLOTS; i++) {
1503                 struct mvs_slot *slot = &ch->slot[i];
1504
1505                 /* Do we have a running request on slot? */
1506                 if (slot->state < MVS_SLOT_RUNNING)
1507                         continue;
1508                 if ((ch->toslots & (1 << i)) == 0)
1509                         continue;
1510                 callout_reset(&slot->timeout,
1511                     (int)slot->ccb->ccb_h.timeout * hz / 2000,
1512                     (timeout_t*)mvs_timeout, slot);
1513         }
1514 }
1515
1516 /* Locked by callout mechanism. */
1517 static void
1518 mvs_timeout(struct mvs_slot *slot)
1519 {
1520         device_t dev = slot->dev;
1521         struct mvs_channel *ch = device_get_softc(dev);
1522
1523         /* Check for stale timeout. */
1524         if (slot->state < MVS_SLOT_RUNNING)
1525                 return;
1526         device_printf(dev, "Timeout on slot %d\n", slot->slot);
1527         device_printf(dev, "iec %08x sstat %08x serr %08x edma_s %08x "
1528             "dma_c %08x dma_s %08x rs %08x status %02x\n",
1529             ATA_INL(ch->r_mem, EDMA_IEC),
1530             ATA_INL(ch->r_mem, SATA_SS), ATA_INL(ch->r_mem, SATA_SE),
1531             ATA_INL(ch->r_mem, EDMA_S), ATA_INL(ch->r_mem, DMA_C),
1532             ATA_INL(ch->r_mem, DMA_S), ch->rslots,
1533             ATA_INB(ch->r_mem, ATA_ALTSTAT));
1534         /* Handle frozen command. */
1535         mvs_requeue_frozen(dev);
1536         /* We wait for other commands timeout and pray. */
1537         if (ch->toslots == 0)
1538                 xpt_freeze_simq(ch->sim, 1);
1539         ch->toslots |= (1 << slot->slot);
1540         if ((ch->rslots & ~ch->toslots) == 0)
1541                 mvs_process_timeout(dev);
1542         else
1543                 device_printf(dev, " ... waiting for slots %08x\n",
1544                     ch->rslots & ~ch->toslots);
1545 }
1546
1547 /* Must be called with channel locked. */
1548 static void
1549 mvs_end_transaction(struct mvs_slot *slot, enum mvs_err_type et)
1550 {
1551         device_t dev = slot->dev;
1552         struct mvs_channel *ch = device_get_softc(dev);
1553         union ccb *ccb = slot->ccb;
1554
1555 //device_printf(dev, "cmd done status %d\n", et);
1556         bus_dmamap_sync(ch->dma.workrq_tag, ch->dma.workrq_map,
1557             BUS_DMASYNC_POSTWRITE);
1558         /* Read result registers to the result struct
1559          * May be incorrect if several commands finished same time,
1560          * so read only when sure or have to.
1561          */
1562         if (ccb->ccb_h.func_code == XPT_ATA_IO) {
1563                 struct ata_res *res = &ccb->ataio.res;
1564
1565                 if ((et == MVS_ERR_TFE) ||
1566                     (ccb->ataio.cmd.flags & CAM_ATAIO_NEEDRESULT)) {
1567                         mvs_tfd_read(dev, ccb);
1568                 } else
1569                         bzero(res, sizeof(*res));
1570         }
1571         if (ch->numpslots == 0 || ch->basic_dma) {
1572                 if ((ccb->ccb_h.flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
1573                         bus_dmamap_sync(ch->dma.data_tag, slot->dma.data_map,
1574                             (ccb->ccb_h.flags & CAM_DIR_IN) ?
1575                             BUS_DMASYNC_POSTREAD : BUS_DMASYNC_POSTWRITE);
1576                         bus_dmamap_unload(ch->dma.data_tag, slot->dma.data_map);
1577                 }
1578         }
1579         if (et != MVS_ERR_NONE)
1580                 ch->eslots |= (1 << slot->slot);
1581         /* In case of error, freeze device for proper recovery. */
1582         if ((et != MVS_ERR_NONE) && (!ch->readlog) &&
1583             !(ccb->ccb_h.status & CAM_DEV_QFRZN)) {
1584                 xpt_freeze_devq(ccb->ccb_h.path, 1);
1585                 ccb->ccb_h.status |= CAM_DEV_QFRZN;
1586         }
1587         /* Set proper result status. */
1588         ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1589         switch (et) {
1590         case MVS_ERR_NONE:
1591                 ccb->ccb_h.status |= CAM_REQ_CMP;
1592                 if (ccb->ccb_h.func_code == XPT_SCSI_IO)
1593                         ccb->csio.scsi_status = SCSI_STATUS_OK;
1594                 break;
1595         case MVS_ERR_INVALID:
1596                 ch->fatalerr = 1;
1597                 ccb->ccb_h.status |= CAM_REQ_INVALID;
1598                 break;
1599         case MVS_ERR_INNOCENT:
1600                 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1601                 break;
1602         case MVS_ERR_TFE:
1603         case MVS_ERR_NCQ:
1604                 if (ccb->ccb_h.func_code == XPT_SCSI_IO) {
1605                         ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
1606                         ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
1607                 } else {
1608                         ccb->ccb_h.status |= CAM_ATA_STATUS_ERROR;
1609                 }
1610                 break;
1611         case MVS_ERR_SATA:
1612                 ch->fatalerr = 1;
1613                 if (!ch->readlog) {
1614                         xpt_freeze_simq(ch->sim, 1);
1615                         ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1616                         ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
1617                 }
1618                 ccb->ccb_h.status |= CAM_UNCOR_PARITY;
1619                 break;
1620         case MVS_ERR_TIMEOUT:
1621                 if (!ch->readlog) {
1622                         xpt_freeze_simq(ch->sim, 1);
1623                         ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1624                         ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
1625                 }
1626                 ccb->ccb_h.status |= CAM_CMD_TIMEOUT;
1627                 break;
1628         default:
1629                 ch->fatalerr = 1;
1630                 ccb->ccb_h.status |= CAM_REQ_CMP_ERR;
1631         }
1632         /* Free slot. */
1633         ch->oslots &= ~(1 << slot->slot);
1634         ch->rslots &= ~(1 << slot->slot);
1635         ch->aslots &= ~(1 << slot->slot);
1636         if (et != MVS_ERR_TIMEOUT) {
1637                 if (ch->toslots == (1 << slot->slot))
1638                         xpt_release_simq(ch->sim, TRUE);
1639                 ch->toslots &= ~(1 << slot->slot);
1640         }
1641         slot->state = MVS_SLOT_EMPTY;
1642         slot->ccb = NULL;
1643         /* Update channel stats. */
1644         ch->numrslots--;
1645         ch->numrslotspd[ccb->ccb_h.target_id]--;
1646         if (ccb->ccb_h.func_code == XPT_ATA_IO) {
1647                 if (ccb->ataio.cmd.flags & CAM_ATAIO_FPDMA) {
1648                         ch->otagspd[ccb->ccb_h.target_id] &= ~(1 << slot->tag);
1649                         ch->numtslots--;
1650                         ch->numtslotspd[ccb->ccb_h.target_id]--;
1651                 } else if (ccb->ataio.cmd.flags & CAM_ATAIO_DMA) {
1652                         ch->numdslots--;
1653                 } else {
1654                         ch->numpslots--;
1655                 }
1656         } else {
1657                 ch->numpslots--;
1658                 ch->basic_dma = 0;
1659         }
1660         /* If it was our READ LOG command - process it. */
1661         if (ch->readlog) {
1662                 mvs_process_read_log(dev, ccb);
1663         /* If it was NCQ command error, put result on hold. */
1664         } else if (et == MVS_ERR_NCQ) {
1665                 ch->hold[slot->slot] = ccb;
1666                 ch->holdtag[slot->slot] = slot->tag;
1667                 ch->numhslots++;
1668         } else
1669                 xpt_done(ccb);
1670         /* Unfreeze frozen command. */
1671         if (ch->frozen && !mvs_check_collision(dev, ch->frozen)) {
1672                 union ccb *fccb = ch->frozen;
1673                 ch->frozen = NULL;
1674                 mvs_begin_transaction(dev, fccb);
1675                 xpt_release_simq(ch->sim, TRUE);
1676         }
1677         /* If we have no other active commands, ... */
1678         if (ch->rslots == 0) {
1679                 /* if there was fatal error - reset port. */
1680                 if (ch->toslots != 0 || ch->fatalerr) {
1681                         mvs_reset(dev);
1682                 } else {
1683                         /* if we have slots in error, we can reinit port. */
1684                         if (ch->eslots != 0) {
1685                                 mvs_set_edma_mode(dev, MVS_EDMA_OFF);
1686                                 ch->eslots = 0;
1687                         }
1688                         /* if there commands on hold, we can do READ LOG. */
1689                         if (!ch->readlog && ch->numhslots)
1690                                 mvs_issue_read_log(dev);
1691                 }
1692         /* If all the rest of commands are in timeout - give them chance. */
1693         } else if ((ch->rslots & ~ch->toslots) == 0 &&
1694             et != MVS_ERR_TIMEOUT)
1695                 mvs_rearm_timeout(dev);
1696         /* Start PM timer. */
1697         if (ch->numrslots == 0 && ch->pm_level > 3 &&
1698             (ch->curr[ch->pm_present ? 15 : 0].caps & CTS_SATA_CAPS_D_PMREQ)) {
1699                 callout_schedule(&ch->pm_timer,
1700                     (ch->pm_level == 4) ? hz / 1000 : hz / 8);
1701         }
1702 }
1703
1704 static void
1705 mvs_issue_read_log(device_t dev)
1706 {
1707         struct mvs_channel *ch = device_get_softc(dev);
1708         union ccb *ccb;
1709         struct ccb_ataio *ataio;
1710         int i;
1711
1712         ch->readlog = 1;
1713         /* Find some holden command. */
1714         for (i = 0; i < MVS_MAX_SLOTS; i++) {
1715                 if (ch->hold[i])
1716                         break;
1717         }
1718         ccb = xpt_alloc_ccb_nowait();
1719         if (ccb == NULL) {
1720                 device_printf(dev, "Unable allocate READ LOG command");
1721                 return; /* XXX */
1722         }
1723         ccb->ccb_h = ch->hold[i]->ccb_h;        /* Reuse old header. */
1724         ccb->ccb_h.func_code = XPT_ATA_IO;
1725         ccb->ccb_h.flags = CAM_DIR_IN;
1726         ccb->ccb_h.timeout = 1000;      /* 1s should be enough. */
1727         ataio = &ccb->ataio;
1728         ataio->data_ptr = malloc(512, M_MVS, M_NOWAIT);
1729         if (ataio->data_ptr == NULL) {
1730                 device_printf(dev, "Unable allocate memory for READ LOG command");
1731                 return; /* XXX */
1732         }
1733         ataio->dxfer_len = 512;
1734         bzero(&ataio->cmd, sizeof(ataio->cmd));
1735         ataio->cmd.flags = CAM_ATAIO_48BIT;
1736         ataio->cmd.command = 0x2F;      /* READ LOG EXT */
1737         ataio->cmd.sector_count = 1;
1738         ataio->cmd.sector_count_exp = 0;
1739         ataio->cmd.lba_low = 0x10;
1740         ataio->cmd.lba_mid = 0;
1741         ataio->cmd.lba_mid_exp = 0;
1742         /* Freeze SIM while doing READ LOG EXT. */
1743         xpt_freeze_simq(ch->sim, 1);
1744         mvs_begin_transaction(dev, ccb);
1745 }
1746
1747 static void
1748 mvs_process_read_log(device_t dev, union ccb *ccb)
1749 {
1750         struct mvs_channel *ch = device_get_softc(dev);
1751         uint8_t *data;
1752         struct ata_res *res;
1753         int i;
1754
1755         ch->readlog = 0;
1756
1757         data = ccb->ataio.data_ptr;
1758         if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_CMP &&
1759             (data[0] & 0x80) == 0) {
1760                 for (i = 0; i < MVS_MAX_SLOTS; i++) {
1761                         if (!ch->hold[i])
1762                                 continue;
1763                         if (ch->hold[i]->ccb_h.target_id != ccb->ccb_h.target_id)
1764                                 continue;
1765                         if ((data[0] & 0x1F) == ch->holdtag[i]) {
1766                                 res = &ch->hold[i]->ataio.res;
1767                                 res->status = data[2];
1768                                 res->error = data[3];
1769                                 res->lba_low = data[4];
1770                                 res->lba_mid = data[5];
1771                                 res->lba_high = data[6];
1772                                 res->device = data[7];
1773                                 res->lba_low_exp = data[8];
1774                                 res->lba_mid_exp = data[9];
1775                                 res->lba_high_exp = data[10];
1776                                 res->sector_count = data[12];
1777                                 res->sector_count_exp = data[13];
1778                         } else {
1779                                 ch->hold[i]->ccb_h.status &= ~CAM_STATUS_MASK;
1780                                 ch->hold[i]->ccb_h.status |= CAM_REQUEUE_REQ;
1781                         }
1782                         xpt_done(ch->hold[i]);
1783                         ch->hold[i] = NULL;
1784                         ch->numhslots--;
1785                 }
1786         } else {
1787                 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_CMP)
1788                         device_printf(dev, "Error while READ LOG EXT\n");
1789                 else if ((data[0] & 0x80) == 0) {
1790                         device_printf(dev, "Non-queued command error in READ LOG EXT\n");
1791                 }
1792                 for (i = 0; i < MVS_MAX_SLOTS; i++) {
1793                         if (!ch->hold[i])
1794                                 continue;
1795                         if (ch->hold[i]->ccb_h.target_id != ccb->ccb_h.target_id)
1796                                 continue;
1797                         xpt_done(ch->hold[i]);
1798                         ch->hold[i] = NULL;
1799                         ch->numhslots--;
1800                 }
1801         }
1802         free(ccb->ataio.data_ptr, M_MVS);
1803         xpt_free_ccb(ccb);
1804         xpt_release_simq(ch->sim, TRUE);
1805 }
1806
1807 static int
1808 mvs_wait(device_t dev, u_int s, u_int c, int t)
1809 {
1810         int timeout = 0;
1811         uint8_t st;
1812
1813         while (((st =  mvs_getstatus(dev, 0)) & (s | c)) != s) {
1814                 DELAY(1000);
1815                 if (timeout++ > t) {
1816                         device_printf(dev, "Wait status %02x\n", st);
1817                         return (-1);
1818                 }
1819         } 
1820         return (timeout);
1821 }
1822
1823 static void
1824 mvs_requeue_frozen(device_t dev)
1825 {
1826         struct mvs_channel *ch = device_get_softc(dev);
1827         union ccb *fccb = ch->frozen;
1828
1829         if (fccb) {
1830                 ch->frozen = NULL;
1831                 fccb->ccb_h.status = CAM_REQUEUE_REQ | CAM_RELEASE_SIMQ;
1832                 if (!(fccb->ccb_h.status & CAM_DEV_QFRZN)) {
1833                         xpt_freeze_devq(fccb->ccb_h.path, 1);
1834                         fccb->ccb_h.status |= CAM_DEV_QFRZN;
1835                 }
1836                 xpt_done(fccb);
1837         }
1838 }
1839
1840 static void
1841 mvs_reset(device_t dev)
1842 {
1843         struct mvs_channel *ch = device_get_softc(dev);
1844         int i;
1845
1846         xpt_freeze_simq(ch->sim, 1);
1847         if (bootverbose)
1848                 device_printf(dev, "MVS reset...\n");
1849         /* Requeue freezed command. */
1850         mvs_requeue_frozen(dev);
1851         /* Kill the engine and requeue all running commands. */
1852         mvs_set_edma_mode(dev, MVS_EDMA_OFF);
1853         ATA_OUTL(ch->r_mem, DMA_C, 0);
1854         for (i = 0; i < MVS_MAX_SLOTS; i++) {
1855                 /* Do we have a running request on slot? */
1856                 if (ch->slot[i].state < MVS_SLOT_RUNNING)
1857                         continue;
1858                 /* XXX; Commands in loading state. */
1859                 mvs_end_transaction(&ch->slot[i], MVS_ERR_INNOCENT);
1860         }
1861         for (i = 0; i < MVS_MAX_SLOTS; i++) {
1862                 if (!ch->hold[i])
1863                         continue;
1864                 xpt_done(ch->hold[i]);
1865                 ch->hold[i] = NULL;
1866                 ch->numhslots--;
1867         }
1868         if (ch->toslots != 0)
1869                 xpt_release_simq(ch->sim, TRUE);
1870         ch->eslots = 0;
1871         ch->toslots = 0;
1872         ch->fatalerr = 0;
1873         /* Tell the XPT about the event */
1874         xpt_async(AC_BUS_RESET, ch->path, NULL);
1875         ATA_OUTL(ch->r_mem, EDMA_IEM, 0);
1876         ATA_OUTL(ch->r_mem, EDMA_CMD, EDMA_CMD_EATARST);
1877         DELAY(25);
1878         ATA_OUTL(ch->r_mem, EDMA_CMD, 0);
1879         /* Reset and reconnect PHY, */
1880         if (!mvs_sata_phy_reset(dev)) {
1881                 if (bootverbose)
1882                         device_printf(dev,
1883                             "MVS reset done: phy reset found no device\n");
1884                 ch->devices = 0;
1885                 ATA_OUTL(ch->r_mem, SATA_SE, 0xffffffff);
1886                 ATA_OUTL(ch->r_mem, EDMA_IEC, 0);
1887                 ATA_OUTL(ch->r_mem, EDMA_IEM, ~EDMA_IE_TRANSIENT);
1888                 xpt_release_simq(ch->sim, TRUE);
1889                 return;
1890         }
1891         /* Wait for clearing busy status. */
1892         if ((i = mvs_wait(dev, 0, ATA_S_BUSY | ATA_S_DRQ, 15000)) < 0)
1893                 device_printf(dev, "device is not ready\n");
1894         else if (bootverbose)                                                        
1895                 device_printf(dev, "ready wait time=%dms\n", i);
1896         ch->devices = 1;
1897         ATA_OUTL(ch->r_mem, SATA_SE, 0xffffffff);
1898         ATA_OUTL(ch->r_mem, EDMA_IEC, 0);
1899         ATA_OUTL(ch->r_mem, EDMA_IEM, ~EDMA_IE_TRANSIENT);
1900         if (bootverbose)
1901                 device_printf(dev, "MVS reset done: device found\n");
1902         xpt_release_simq(ch->sim, TRUE);
1903 }
1904
1905 static void
1906 mvs_softreset(device_t dev, union ccb *ccb)
1907 {
1908         struct mvs_channel *ch = device_get_softc(dev);
1909         int port = ccb->ccb_h.target_id & 0x0f;
1910         int i;
1911
1912         mvs_set_edma_mode(dev, MVS_EDMA_OFF);
1913         ATA_OUTB(ch->r_mem, SATA_SATAICTL, port << SATA_SATAICTL_PMPTX_SHIFT);
1914         ATA_OUTB(ch->r_mem, ATA_CONTROL, ATA_A_RESET);
1915         DELAY(10000);
1916         ATA_OUTB(ch->r_mem, ATA_CONTROL, 0);
1917         ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1918         /* Wait for clearing busy status. */
1919         if ((i = mvs_wait(dev, 0, ATA_S_BUSY | ATA_S_DRQ, ccb->ccb_h.timeout)) < 0) {
1920                 ccb->ccb_h.status |= CAM_CMD_TIMEOUT;
1921         } else {
1922                 ccb->ccb_h.status |= CAM_REQ_CMP;
1923         }
1924         mvs_tfd_read(dev, ccb);
1925         xpt_done(ccb);
1926 }
1927
1928 static int
1929 mvs_sata_connect(struct mvs_channel *ch)
1930 {
1931         u_int32_t status;
1932         int timeout;
1933
1934         /* Wait up to 100ms for "connect well" */
1935         for (timeout = 0; timeout < 100 ; timeout++) {
1936                 status = ATA_INL(ch->r_mem, SATA_SS);
1937                 if (((status & SATA_SS_DET_MASK) == SATA_SS_DET_PHY_ONLINE) &&
1938                     ((status & SATA_SS_SPD_MASK) != SATA_SS_SPD_NO_SPEED) &&
1939                     ((status & SATA_SS_IPM_MASK) == SATA_SS_IPM_ACTIVE))
1940                         break;
1941                 if ((status & SATA_SS_DET_MASK) == SATA_SS_DET_PHY_OFFLINE) {
1942                         if (bootverbose) {
1943                                 device_printf(ch->dev, "SATA offline status=%08x\n",
1944                                     status);
1945                         }
1946                         return (0);
1947                 }
1948                 DELAY(1000);
1949         }
1950         if (timeout >= 100) {
1951                 if (bootverbose) {
1952                         device_printf(ch->dev, "SATA connect timeout status=%08x\n",
1953                             status);
1954                 }
1955                 return (0);
1956         }
1957         if (bootverbose) {
1958                 device_printf(ch->dev, "SATA connect time=%dms status=%08x\n",
1959                     timeout, status);
1960         }
1961         /* Clear SATA error register */
1962         ATA_OUTL(ch->r_mem, SATA_SE, 0xffffffff);
1963         return (1);
1964 }
1965
1966 static int
1967 mvs_sata_phy_reset(device_t dev)
1968 {
1969         struct mvs_channel *ch = device_get_softc(dev);
1970         int sata_rev;
1971         uint32_t val;
1972
1973         sata_rev = ch->user[ch->pm_present ? 15 : 0].revision;
1974         if (sata_rev == 1)
1975                 val = SATA_SC_SPD_SPEED_GEN1;
1976         else if (sata_rev == 2)
1977                 val = SATA_SC_SPD_SPEED_GEN2;
1978         else if (sata_rev == 3)
1979                 val = SATA_SC_SPD_SPEED_GEN3;
1980         else
1981                 val = 0;
1982         ATA_OUTL(ch->r_mem, SATA_SC,
1983             SATA_SC_DET_RESET | val |
1984             SATA_SC_IPM_DIS_PARTIAL | SATA_SC_IPM_DIS_SLUMBER);
1985         DELAY(5000);
1986         ATA_OUTL(ch->r_mem, SATA_SC,
1987             SATA_SC_DET_IDLE | val | ((ch->pm_level > 0) ? 0 :
1988             (SATA_SC_IPM_DIS_PARTIAL | SATA_SC_IPM_DIS_SLUMBER)));
1989         DELAY(5000);
1990         if (!mvs_sata_connect(ch)) {
1991                 if (ch->pm_level > 0)
1992                         ATA_OUTL(ch->r_mem, SATA_SC, SATA_SC_DET_DISABLE);
1993                 return (0);
1994         }
1995         return (1);
1996 }
1997
1998 static int
1999 mvs_check_ids(device_t dev, union ccb *ccb)
2000 {
2001         struct mvs_channel *ch = device_get_softc(dev);
2002
2003         if (ccb->ccb_h.target_id > ((ch->quirks & MVS_Q_GENI) ? 0 : 15)) {
2004                 ccb->ccb_h.status = CAM_TID_INVALID;
2005                 xpt_done(ccb);
2006                 return (-1);
2007         }
2008         if (ccb->ccb_h.target_lun != 0) {
2009                 ccb->ccb_h.status = CAM_LUN_INVALID;
2010                 xpt_done(ccb);
2011                 return (-1);
2012         }
2013         return (0);
2014 }
2015
2016 static void
2017 mvsaction(struct cam_sim *sim, union ccb *ccb)
2018 {
2019         device_t dev;
2020         struct mvs_channel *ch;
2021
2022         CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("mvsaction func_code=%x\n",
2023             ccb->ccb_h.func_code));
2024
2025         ch = (struct mvs_channel *)cam_sim_softc(sim);
2026         dev = ch->dev;
2027         switch (ccb->ccb_h.func_code) {
2028         /* Common cases first */
2029         case XPT_ATA_IO:        /* Execute the requested I/O operation */
2030         case XPT_SCSI_IO:
2031                 if (mvs_check_ids(dev, ccb))
2032                         return;
2033                 if (ch->devices == 0 ||
2034                     (ch->pm_present == 0 &&
2035                      ccb->ccb_h.target_id > 0 && ccb->ccb_h.target_id < 15)) {
2036                         ccb->ccb_h.status = CAM_SEL_TIMEOUT;
2037                         break;
2038                 }
2039                 /* Check for command collision. */
2040                 if (mvs_check_collision(dev, ccb)) {
2041                         /* Freeze command. */
2042                         ch->frozen = ccb;
2043                         /* We have only one frozen slot, so freeze simq also. */
2044                         xpt_freeze_simq(ch->sim, 1);
2045                         return;
2046                 }
2047                 mvs_begin_transaction(dev, ccb);
2048                 return;
2049         case XPT_EN_LUN:                /* Enable LUN as a target */
2050         case XPT_TARGET_IO:             /* Execute target I/O request */
2051         case XPT_ACCEPT_TARGET_IO:      /* Accept Host Target Mode CDB */
2052         case XPT_CONT_TARGET_IO:        /* Continue Host Target I/O Connection*/
2053         case XPT_ABORT:                 /* Abort the specified CCB */
2054                 /* XXX Implement */
2055                 ccb->ccb_h.status = CAM_REQ_INVALID;
2056                 break;
2057         case XPT_SET_TRAN_SETTINGS:
2058         {
2059                 struct  ccb_trans_settings *cts = &ccb->cts;
2060                 struct  mvs_device *d; 
2061
2062                 if (mvs_check_ids(dev, ccb))
2063                         return;
2064                 if (cts->type == CTS_TYPE_CURRENT_SETTINGS)
2065                         d = &ch->curr[ccb->ccb_h.target_id];
2066                 else
2067                         d = &ch->user[ccb->ccb_h.target_id];
2068                 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_REVISION)
2069                         d->revision = cts->xport_specific.sata.revision;
2070                 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_MODE)
2071                         d->mode = cts->xport_specific.sata.mode;
2072                 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_BYTECOUNT) {
2073                         d->bytecount = min((ch->quirks & MVS_Q_GENIIE) ? 8192 : 2048,
2074                             cts->xport_specific.sata.bytecount);
2075                 }
2076                 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_TAGS)
2077                         d->tags = min(MVS_MAX_SLOTS, cts->xport_specific.sata.tags);
2078                 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_PM)
2079                         ch->pm_present = cts->xport_specific.sata.pm_present;
2080                 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_ATAPI)
2081                         d->atapi = cts->xport_specific.sata.atapi;
2082                 if (cts->xport_specific.sata.valid & CTS_SATA_VALID_CAPS)
2083                         d->caps = cts->xport_specific.sata.caps;
2084                 ccb->ccb_h.status = CAM_REQ_CMP;
2085                 break;
2086         }
2087         case XPT_GET_TRAN_SETTINGS:
2088         /* Get default/user set transfer settings for the target */
2089         {
2090                 struct  ccb_trans_settings *cts = &ccb->cts;
2091                 struct  mvs_device *d;
2092                 uint32_t status;
2093
2094                 if (mvs_check_ids(dev, ccb))
2095                         return;
2096                 if (cts->type == CTS_TYPE_CURRENT_SETTINGS)
2097                         d = &ch->curr[ccb->ccb_h.target_id];
2098                 else
2099                         d = &ch->user[ccb->ccb_h.target_id];
2100                 cts->protocol = PROTO_ATA;
2101                 cts->protocol_version = PROTO_VERSION_UNSPECIFIED;
2102                 cts->transport = XPORT_SATA;
2103                 cts->transport_version = XPORT_VERSION_UNSPECIFIED;
2104                 cts->proto_specific.valid = 0;
2105                 cts->xport_specific.sata.valid = 0;
2106                 if (cts->type == CTS_TYPE_CURRENT_SETTINGS &&
2107                     (ccb->ccb_h.target_id == 15 ||
2108                     (ccb->ccb_h.target_id == 0 && !ch->pm_present))) {
2109                         status = ATA_INL(ch->r_mem, SATA_SS) & SATA_SS_SPD_MASK;
2110                         if (status & 0x0f0) {
2111                                 cts->xport_specific.sata.revision =
2112                                     (status & 0x0f0) >> 4;
2113                                 cts->xport_specific.sata.valid |=
2114                                     CTS_SATA_VALID_REVISION;
2115                         }
2116                         cts->xport_specific.sata.caps = d->caps & CTS_SATA_CAPS_D;
2117 //                      if (ch->pm_level)
2118 //                              cts->xport_specific.sata.caps |= CTS_SATA_CAPS_H_PMREQ;
2119                         cts->xport_specific.sata.caps &=
2120                             ch->user[ccb->ccb_h.target_id].caps;
2121                         cts->xport_specific.sata.valid |= CTS_SATA_VALID_CAPS;
2122                 } else {
2123                         cts->xport_specific.sata.revision = d->revision;
2124                         cts->xport_specific.sata.valid |= CTS_SATA_VALID_REVISION;
2125                         cts->xport_specific.sata.caps = d->caps;
2126                         cts->xport_specific.sata.valid |= CTS_SATA_VALID_CAPS;
2127                 }
2128                 cts->xport_specific.sata.mode = d->mode;
2129                 cts->xport_specific.sata.valid |= CTS_SATA_VALID_MODE;
2130                 cts->xport_specific.sata.bytecount = d->bytecount;
2131                 cts->xport_specific.sata.valid |= CTS_SATA_VALID_BYTECOUNT;
2132                 cts->xport_specific.sata.pm_present = ch->pm_present;
2133                 cts->xport_specific.sata.valid |= CTS_SATA_VALID_PM;
2134                 cts->xport_specific.sata.tags = d->tags;
2135                 cts->xport_specific.sata.valid |= CTS_SATA_VALID_TAGS;
2136                 cts->xport_specific.sata.atapi = d->atapi;
2137                 cts->xport_specific.sata.valid |= CTS_SATA_VALID_ATAPI;
2138                 ccb->ccb_h.status = CAM_REQ_CMP;
2139                 break;
2140         }
2141         case XPT_RESET_BUS:             /* Reset the specified SCSI bus */
2142         case XPT_RESET_DEV:     /* Bus Device Reset the specified SCSI device */
2143                 mvs_reset(dev);
2144                 ccb->ccb_h.status = CAM_REQ_CMP;
2145                 break;
2146         case XPT_TERM_IO:               /* Terminate the I/O process */
2147                 /* XXX Implement */
2148                 ccb->ccb_h.status = CAM_REQ_INVALID;
2149                 break;
2150         case XPT_PATH_INQ:              /* Path routing inquiry */
2151         {
2152                 struct ccb_pathinq *cpi = &ccb->cpi;
2153
2154                 cpi->version_num = 1; /* XXX??? */
2155                 cpi->hba_inquiry = PI_SDTR_ABLE;
2156                 if (!(ch->quirks & MVS_Q_GENI)) {
2157                         cpi->hba_inquiry |= PI_SATAPM;
2158                         /* Gen-II is extremely slow with NCQ on PMP. */
2159                         if ((ch->quirks & MVS_Q_GENIIE) || ch->pm_present == 0)
2160                                 cpi->hba_inquiry |= PI_TAG_ABLE;
2161                 }
2162                 cpi->target_sprt = 0;
2163                 cpi->hba_misc = PIM_SEQSCAN;
2164                 cpi->hba_eng_cnt = 0;
2165                 if (!(ch->quirks & MVS_Q_GENI))
2166                         cpi->max_target = 15;
2167                 else
2168                         cpi->max_target = 0;
2169                 cpi->max_lun = 0;
2170                 cpi->initiator_id = 0;
2171                 cpi->bus_id = cam_sim_bus(sim);
2172                 cpi->base_transfer_speed = 150000;
2173                 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
2174                 strncpy(cpi->hba_vid, "Marvell", HBA_IDLEN);
2175                 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
2176                 cpi->unit_number = cam_sim_unit(sim);
2177                 cpi->transport = XPORT_SATA;
2178                 cpi->transport_version = XPORT_VERSION_UNSPECIFIED;
2179                 cpi->protocol = PROTO_ATA;
2180                 cpi->protocol_version = PROTO_VERSION_UNSPECIFIED;
2181                 cpi->maxio = MAXPHYS;
2182                 cpi->ccb_h.status = CAM_REQ_CMP;
2183                 break;
2184         }
2185         default:
2186                 ccb->ccb_h.status = CAM_REQ_INVALID;
2187                 break;
2188         }
2189         xpt_done(ccb);
2190 }
2191
2192 static void
2193 mvspoll(struct cam_sim *sim)
2194 {
2195         struct mvs_channel *ch = (struct mvs_channel *)cam_sim_softc(sim);
2196         struct mvs_intr_arg arg;
2197
2198         arg.arg = ch->dev;
2199         arg.cause = 2; /* XXX */
2200         mvs_ch_intr(&arg);
2201 }
2202