]> CyberLeo.Net >> Repos - FreeBSD/releng/9.0.git/blob - sys/dev/hptiop/hptiop.c
Copy stable/9 to releng/9.0 as part of the FreeBSD 9.0-RELEASE release
[FreeBSD/releng/9.0.git] / sys / dev / hptiop / hptiop.c
1 /*
2  * HighPoint RR3xxx/4xxx RAID Driver for FreeBSD
3  * Copyright (C) 2007-2008 HighPoint Technologies, Inc. All Rights Reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29
30 #include <sys/param.h>
31 #include <sys/types.h>
32 #include <sys/cons.h>
33 #if (__FreeBSD_version >= 500000)
34 #include <sys/time.h>
35 #include <sys/systm.h>
36 #else
37 #include <machine/clock.h>
38 #endif
39
40 #include <sys/stat.h>
41 #include <sys/malloc.h>
42 #include <sys/conf.h>
43 #include <sys/libkern.h>
44 #include <sys/kernel.h>
45
46 #if (__FreeBSD_version >= 500000)
47 #include <sys/kthread.h>
48 #include <sys/mutex.h>
49 #include <sys/module.h>
50 #endif
51
52 #include <sys/eventhandler.h>
53 #include <sys/bus.h>
54 #include <sys/taskqueue.h>
55 #include <sys/ioccom.h>
56
57 #include <machine/resource.h>
58 #include <machine/bus.h>
59 #include <machine/stdarg.h>
60 #include <sys/rman.h>
61
62 #include <vm/vm.h>
63 #include <vm/pmap.h>
64
65 #if (__FreeBSD_version >= 500000)
66 #include <dev/pci/pcireg.h>
67 #include <dev/pci/pcivar.h>
68 #else
69 #include <pci/pcivar.h>
70 #include <pci/pcireg.h>
71 #endif
72
73 #if (__FreeBSD_version <= 500043)
74 #include <sys/devicestat.h>
75 #endif
76
77 #include <cam/cam.h>
78 #include <cam/cam_ccb.h>
79 #include <cam/cam_sim.h>
80 #include <cam/cam_xpt_sim.h>
81 #include <cam/cam_debug.h>
82 #include <cam/cam_periph.h>
83 #include <cam/scsi/scsi_all.h>
84 #include <cam/scsi/scsi_message.h>
85
86 #if (__FreeBSD_version < 500043)
87 #include <sys/bus_private.h>
88 #endif
89
90 #include <dev/hptiop/hptiop.h>
91
92 static char driver_name[] = "hptiop";
93 static char driver_version[] = "v1.3 (010208)";
94
95 static devclass_t hptiop_devclass;
96
97 static int hptiop_send_sync_msg(struct hpt_iop_hba *hba,
98                                 u_int32_t msg, u_int32_t millisec);
99 static void hptiop_request_callback_itl(struct hpt_iop_hba *hba,
100                                                         u_int32_t req);
101 static void hptiop_request_callback_mv(struct hpt_iop_hba *hba, u_int64_t req);
102 static void hptiop_os_message_callback(struct hpt_iop_hba *hba, u_int32_t msg);
103 static int  hptiop_do_ioctl_itl(struct hpt_iop_hba *hba,
104                                 struct hpt_iop_ioctl_param *pParams);
105 static int  hptiop_do_ioctl_mv(struct hpt_iop_hba *hba,
106                                 struct hpt_iop_ioctl_param *pParams);
107 static int  hptiop_rescan_bus(struct hpt_iop_hba *hba);
108 static int hptiop_alloc_pci_res_itl(struct hpt_iop_hba *hba);
109 static int hptiop_alloc_pci_res_mv(struct hpt_iop_hba *hba);
110 static int hptiop_get_config_itl(struct hpt_iop_hba *hba,
111                                 struct hpt_iop_request_get_config *config);
112 static int hptiop_get_config_mv(struct hpt_iop_hba *hba,
113                                 struct hpt_iop_request_get_config *config);
114 static int hptiop_set_config_itl(struct hpt_iop_hba *hba,
115                                 struct hpt_iop_request_set_config *config);
116 static int hptiop_set_config_mv(struct hpt_iop_hba *hba,
117                                 struct hpt_iop_request_set_config *config);
118 static int hptiop_internal_memalloc_mv(struct hpt_iop_hba *hba);
119 static int hptiop_internal_memfree_mv(struct hpt_iop_hba *hba);
120 static int  hptiop_post_ioctl_command_itl(struct hpt_iop_hba *hba,
121                         u_int32_t req32, struct hpt_iop_ioctl_param *pParams);
122 static int  hptiop_post_ioctl_command_mv(struct hpt_iop_hba *hba,
123                                 struct hpt_iop_request_ioctl_command *req,
124                                 struct hpt_iop_ioctl_param *pParams);
125 static void hptiop_post_req_itl(struct hpt_iop_hba *hba,
126                                 struct hpt_iop_srb *srb,
127                                 bus_dma_segment_t *segs, int nsegs);
128 static void hptiop_post_req_mv(struct hpt_iop_hba *hba,
129                                 struct hpt_iop_srb *srb,
130                                 bus_dma_segment_t *segs, int nsegs);
131 static void hptiop_post_msg_itl(struct hpt_iop_hba *hba, u_int32_t msg);
132 static void hptiop_post_msg_mv(struct hpt_iop_hba *hba, u_int32_t msg);
133 static void hptiop_enable_intr_itl(struct hpt_iop_hba *hba);
134 static void hptiop_enable_intr_mv(struct hpt_iop_hba *hba);
135 static void hptiop_disable_intr_itl(struct hpt_iop_hba *hba);
136 static void hptiop_disable_intr_mv(struct hpt_iop_hba *hba);
137 static void hptiop_free_srb(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb);
138 static int  hptiop_os_query_remove_device(struct hpt_iop_hba *hba, int tid);
139 static int  hptiop_probe(device_t dev);
140 static int  hptiop_attach(device_t dev);
141 static int  hptiop_detach(device_t dev);
142 static int  hptiop_shutdown(device_t dev);
143 static void hptiop_action(struct cam_sim *sim, union ccb *ccb);
144 static void hptiop_poll(struct cam_sim *sim);
145 static void hptiop_async(void *callback_arg, u_int32_t code,
146                                         struct cam_path *path, void *arg);
147 static void hptiop_pci_intr(void *arg);
148 static void hptiop_release_resource(struct hpt_iop_hba *hba);
149 static int  hptiop_reset_adapter(struct hpt_iop_hba *hba);
150
151 static d_open_t hptiop_open;
152 static d_close_t hptiop_close;
153 static d_ioctl_t hptiop_ioctl;
154
155 static struct cdevsw hptiop_cdevsw = {
156         .d_open = hptiop_open,
157         .d_close = hptiop_close,
158         .d_ioctl = hptiop_ioctl,
159         .d_name = driver_name,
160 #if __FreeBSD_version>=503000
161         .d_version = D_VERSION,
162 #endif
163 #if (__FreeBSD_version>=503000 && __FreeBSD_version<600034)
164         .d_flags = D_NEEDGIANT,
165 #endif
166 #if __FreeBSD_version<600034
167 #if __FreeBSD_version>=501000
168         .d_maj = MAJOR_AUTO,
169 #else
170         .d_maj = HPT_DEV_MAJOR,
171 #endif
172 #endif
173 };
174
175 #if __FreeBSD_version < 503000
176 #define hba_from_dev(dev) ((struct hpt_iop_hba *)(dev)->si_drv1)
177 #else
178 #define hba_from_dev(dev) \
179         ((struct hpt_iop_hba *)devclass_get_softc(hptiop_devclass, dev2unit(dev)))
180 #endif
181
182 #define BUS_SPACE_WRT4_ITL(offset, value) bus_space_write_4(hba->bar0t,\
183                 hba->bar0h, offsetof(struct hpt_iopmu_itl, offset), (value))
184 #define BUS_SPACE_RD4_ITL(offset) bus_space_read_4(hba->bar0t,\
185                 hba->bar0h, offsetof(struct hpt_iopmu_itl, offset))
186
187 #define BUS_SPACE_WRT4_MV0(offset, value) bus_space_write_4(hba->bar0t,\
188                 hba->bar0h, offsetof(struct hpt_iopmv_regs, offset), value)
189 #define BUS_SPACE_RD4_MV0(offset) bus_space_read_4(hba->bar0t,\
190                 hba->bar0h, offsetof(struct hpt_iopmv_regs, offset))
191 #define BUS_SPACE_WRT4_MV2(offset, value) bus_space_write_4(hba->bar2t,\
192                 hba->bar2h, offsetof(struct hpt_iopmu_mv, offset), value)
193 #define BUS_SPACE_RD4_MV2(offset) bus_space_read_4(hba->bar2t,\
194                 hba->bar2h, offsetof(struct hpt_iopmu_mv, offset))
195
196 static int hptiop_open(ioctl_dev_t dev, int flags,
197                                         int devtype, ioctl_thread_t proc)
198 {
199         struct hpt_iop_hba *hba = hba_from_dev(dev);
200
201         if (hba==NULL)
202                 return ENXIO;
203         if (hba->flag & HPT_IOCTL_FLAG_OPEN)
204                 return EBUSY;
205         hba->flag |= HPT_IOCTL_FLAG_OPEN;
206         return 0;
207 }
208
209 static int hptiop_close(ioctl_dev_t dev, int flags,
210                                         int devtype, ioctl_thread_t proc)
211 {
212         struct hpt_iop_hba *hba = hba_from_dev(dev);
213         hba->flag &= ~(u_int32_t)HPT_IOCTL_FLAG_OPEN;
214         return 0;
215 }
216
217 static int hptiop_ioctl(ioctl_dev_t dev, u_long cmd, caddr_t data,
218                                         int flags, ioctl_thread_t proc)
219 {
220         int ret = EFAULT;
221         struct hpt_iop_hba *hba = hba_from_dev(dev);
222
223 #if (__FreeBSD_version >= 500000)
224         mtx_lock(&Giant);
225 #endif
226
227         switch (cmd) {
228         case HPT_DO_IOCONTROL:
229                 ret = hba->ops->do_ioctl(hba,
230                                 (struct hpt_iop_ioctl_param *)data);
231                 break;
232         case HPT_SCAN_BUS:
233                 ret = hptiop_rescan_bus(hba);
234                 break;
235         }
236
237 #if (__FreeBSD_version >= 500000)
238         mtx_unlock(&Giant);
239 #endif
240
241         return ret;
242 }
243
244 static u_int64_t hptiop_mv_outbound_read(struct hpt_iop_hba *hba)
245 {
246         u_int64_t p;
247         u_int32_t outbound_tail = BUS_SPACE_RD4_MV2(outbound_tail);
248         u_int32_t outbound_head = BUS_SPACE_RD4_MV2(outbound_head);
249
250         if (outbound_tail != outbound_head) {
251                 bus_space_read_region_4(hba->bar2t, hba->bar2h,
252                         offsetof(struct hpt_iopmu_mv,
253                                 outbound_q[outbound_tail]),
254                         (u_int32_t *)&p, 2);
255
256                 outbound_tail++;
257
258                 if (outbound_tail == MVIOP_QUEUE_LEN)
259                         outbound_tail = 0;
260
261                 BUS_SPACE_WRT4_MV2(outbound_tail, outbound_tail);
262                 return p;
263         } else
264                 return 0;
265 }
266
267 static void hptiop_mv_inbound_write(u_int64_t p, struct hpt_iop_hba *hba)
268 {
269         u_int32_t inbound_head = BUS_SPACE_RD4_MV2(inbound_head);
270         u_int32_t head = inbound_head + 1;
271
272         if (head == MVIOP_QUEUE_LEN)
273                 head = 0;
274
275         bus_space_write_region_4(hba->bar2t, hba->bar2h,
276                         offsetof(struct hpt_iopmu_mv, inbound_q[inbound_head]),
277                         (u_int32_t *)&p, 2);
278         BUS_SPACE_WRT4_MV2(inbound_head, head);
279         BUS_SPACE_WRT4_MV0(inbound_doorbell, MVIOP_MU_INBOUND_INT_POSTQUEUE);
280 }
281
282 static void hptiop_post_msg_itl(struct hpt_iop_hba *hba, u_int32_t msg)
283 {
284         BUS_SPACE_WRT4_ITL(inbound_msgaddr0, msg);
285         BUS_SPACE_RD4_ITL(outbound_intstatus);
286 }
287
288 static void hptiop_post_msg_mv(struct hpt_iop_hba *hba, u_int32_t msg)
289 {
290
291         BUS_SPACE_WRT4_MV2(inbound_msg, msg);
292         BUS_SPACE_WRT4_MV0(inbound_doorbell, MVIOP_MU_INBOUND_INT_MSG);
293
294         BUS_SPACE_RD4_MV0(outbound_intmask);
295 }
296
297 static int hptiop_wait_ready_itl(struct hpt_iop_hba * hba, u_int32_t millisec)
298 {
299         u_int32_t req=0;
300         int i;
301
302         for (i = 0; i < millisec; i++) {
303                 req = BUS_SPACE_RD4_ITL(inbound_queue);
304                 if (req != IOPMU_QUEUE_EMPTY)
305                         break;
306                 DELAY(1000);
307         }
308
309         if (req!=IOPMU_QUEUE_EMPTY) {
310                 BUS_SPACE_WRT4_ITL(outbound_queue, req);
311                 BUS_SPACE_RD4_ITL(outbound_intstatus);
312                 return 0;
313         }
314
315         return -1;
316 }
317
318 static int hptiop_wait_ready_mv(struct hpt_iop_hba * hba, u_int32_t millisec)
319 {
320         if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec))
321                 return -1;
322
323         return 0;
324 }
325
326 static void hptiop_request_callback_itl(struct hpt_iop_hba * hba,
327                                                         u_int32_t index)
328 {
329         struct hpt_iop_srb *srb;
330         struct hpt_iop_request_scsi_command *req=0;
331         union ccb *ccb;
332         u_int8_t *cdb;
333         u_int32_t result, temp, dxfer;
334         u_int64_t temp64;
335
336         if (index & IOPMU_QUEUE_MASK_HOST_BITS) { /*host req*/
337                 if (hba->firmware_version > 0x01020000 ||
338                         hba->interface_version > 0x01020000) {
339                         srb = hba->srb[index & ~(u_int32_t)
340                                 (IOPMU_QUEUE_ADDR_HOST_BIT
341                                 | IOPMU_QUEUE_REQUEST_RESULT_BIT)];
342                         req = (struct hpt_iop_request_scsi_command *)srb;
343                         if (index & IOPMU_QUEUE_REQUEST_RESULT_BIT)
344                                 result = IOP_RESULT_SUCCESS;
345                         else
346                                 result = req->header.result;
347                 } else {
348                         srb = hba->srb[index &
349                                 ~(u_int32_t)IOPMU_QUEUE_ADDR_HOST_BIT];
350                         req = (struct hpt_iop_request_scsi_command *)srb;
351                         result = req->header.result;
352                 }
353                 dxfer = req->dataxfer_length;
354                 goto srb_complete;
355         }
356
357         /*iop req*/
358         temp = bus_space_read_4(hba->bar0t, hba->bar0h, index +
359                 offsetof(struct hpt_iop_request_header, type));
360         result = bus_space_read_4(hba->bar0t, hba->bar0h, index +
361                 offsetof(struct hpt_iop_request_header, result));
362         switch(temp) {
363         case IOP_REQUEST_TYPE_IOCTL_COMMAND:
364         {
365                 temp64 = 0;
366                 bus_space_write_region_4(hba->bar0t, hba->bar0h, index +
367                         offsetof(struct hpt_iop_request_header, context),
368                         (u_int32_t *)&temp64, 2);
369                 wakeup((void *)((unsigned long)hba->u.itl.mu + index));
370                 break;
371         }
372
373         case IOP_REQUEST_TYPE_SCSI_COMMAND:
374                 bus_space_read_region_4(hba->bar0t, hba->bar0h, index +
375                         offsetof(struct hpt_iop_request_header, context),
376                         (u_int32_t *)&temp64, 2);
377                 srb = (struct hpt_iop_srb *)(unsigned long)temp64;
378                 dxfer = bus_space_read_4(hba->bar0t, hba->bar0h, 
379                                 index + offsetof(struct hpt_iop_request_scsi_command,
380                                 dataxfer_length));      
381 srb_complete:
382                 ccb = (union ccb *)srb->ccb;
383                 if (ccb->ccb_h.flags & CAM_CDB_POINTER)
384                         cdb = ccb->csio.cdb_io.cdb_ptr;
385                 else
386                         cdb = ccb->csio.cdb_io.cdb_bytes;
387
388                 if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */
389                         ccb->ccb_h.status = CAM_REQ_CMP;
390                         goto scsi_done;
391                 }
392
393                 switch (result) {
394                 case IOP_RESULT_SUCCESS:
395                         switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
396                         case CAM_DIR_IN:
397                                 bus_dmamap_sync(hba->io_dmat,
398                                         srb->dma_map, BUS_DMASYNC_POSTREAD);
399                                 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
400                                 break;
401                         case CAM_DIR_OUT:
402                                 bus_dmamap_sync(hba->io_dmat,
403                                         srb->dma_map, BUS_DMASYNC_POSTWRITE);
404                                 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
405                                 break;
406                         }
407
408                         ccb->ccb_h.status = CAM_REQ_CMP;
409                         break;
410
411                 case IOP_RESULT_BAD_TARGET:
412                         ccb->ccb_h.status = CAM_DEV_NOT_THERE;
413                         break;
414                 case IOP_RESULT_BUSY:
415                         ccb->ccb_h.status = CAM_BUSY;
416                         break;
417                 case IOP_RESULT_INVALID_REQUEST:
418                         ccb->ccb_h.status = CAM_REQ_INVALID;
419                         break;
420                 case IOP_RESULT_FAIL:
421                         ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
422                         break;
423                 case IOP_RESULT_RESET:
424                         ccb->ccb_h.status = CAM_BUSY;
425                         break;
426                 case IOP_RESULT_CHECK_CONDITION:
427                         memset(&ccb->csio.sense_data, 0,
428                             sizeof(ccb->csio.sense_data));
429                         if (dxfer < ccb->csio.sense_len)
430                                 ccb->csio.sense_resid = ccb->csio.sense_len -
431                                     dxfer;
432                         else
433                                 ccb->csio.sense_resid = 0;
434                         if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS) {/*iop*/
435                                 bus_space_read_region_1(hba->bar0t, hba->bar0h,
436                                         index + offsetof(struct hpt_iop_request_scsi_command,
437                                         sg_list), (u_int8_t *)&ccb->csio.sense_data, 
438                                         MIN(dxfer, sizeof(ccb->csio.sense_data)));
439                         } else {
440                                 memcpy(&ccb->csio.sense_data, &req->sg_list, 
441                                         MIN(dxfer, sizeof(ccb->csio.sense_data)));
442                         }
443                         ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
444                         ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
445                         ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
446                         break;
447                 default:
448                         ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
449                         break;
450                 }
451 scsi_done:
452                 if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS)
453                         BUS_SPACE_WRT4_ITL(outbound_queue, index);
454
455                 ccb->csio.resid = ccb->csio.dxfer_len - dxfer;
456
457                 hptiop_free_srb(hba, srb);
458                 xpt_done(ccb);
459                 break;
460         }
461 }
462
463 static void hptiop_drain_outbound_queue_itl(struct hpt_iop_hba *hba)
464 {
465         u_int32_t req, temp;
466
467         while ((req = BUS_SPACE_RD4_ITL(outbound_queue)) !=IOPMU_QUEUE_EMPTY) {
468                 if (req & IOPMU_QUEUE_MASK_HOST_BITS)
469                         hptiop_request_callback_itl(hba, req);
470                 else {
471                         struct hpt_iop_request_header *p;
472
473                         p = (struct hpt_iop_request_header *)
474                                 ((char *)hba->u.itl.mu + req);
475                         temp = bus_space_read_4(hba->bar0t,
476                                         hba->bar0h,req +
477                                         offsetof(struct hpt_iop_request_header,
478                                                 flags));
479                         if (temp & IOP_REQUEST_FLAG_SYNC_REQUEST) {
480                                 u_int64_t temp64;
481                                 bus_space_read_region_4(hba->bar0t,
482                                         hba->bar0h,req +
483                                         offsetof(struct hpt_iop_request_header,
484                                                 context),
485                                         (u_int32_t *)&temp64, 2);
486                                 if (temp64) {
487                                         hptiop_request_callback_itl(hba, req);
488                                 } else {
489                                         temp64 = 1;
490                                         bus_space_write_region_4(hba->bar0t,
491                                                 hba->bar0h,req +
492                                                 offsetof(struct hpt_iop_request_header,
493                                                         context),
494                                                 (u_int32_t *)&temp64, 2);
495                                 }
496                         } else
497                                 hptiop_request_callback_itl(hba, req);
498                 }
499         }
500 }
501
502 static int hptiop_intr_itl(struct hpt_iop_hba * hba)
503 {
504         u_int32_t status;
505         int ret = 0;
506
507         status = BUS_SPACE_RD4_ITL(outbound_intstatus);
508
509         if (status & IOPMU_OUTBOUND_INT_MSG0) {
510                 u_int32_t msg = BUS_SPACE_RD4_ITL(outbound_msgaddr0);
511                 KdPrint(("hptiop: received outbound msg %x\n", msg));
512                 BUS_SPACE_WRT4_ITL(outbound_intstatus, IOPMU_OUTBOUND_INT_MSG0);
513                 hptiop_os_message_callback(hba, msg);
514                 ret = 1;
515         }
516
517         if (status & IOPMU_OUTBOUND_INT_POSTQUEUE) {
518                 hptiop_drain_outbound_queue_itl(hba);
519                 ret = 1;
520         }
521
522         return ret;
523 }
524
525 static void hptiop_request_callback_mv(struct hpt_iop_hba * hba,
526                                                         u_int64_t _tag)
527 {
528         u_int32_t context = (u_int32_t)_tag;
529
530         if (context & MVIOP_CMD_TYPE_SCSI) {
531                 struct hpt_iop_srb *srb;
532                 struct hpt_iop_request_scsi_command *req;
533                 union ccb *ccb;
534                 u_int8_t *cdb;
535
536                 srb = hba->srb[context >> MVIOP_REQUEST_NUMBER_START_BIT];
537                 req = (struct hpt_iop_request_scsi_command *)srb;
538                 ccb = (union ccb *)srb->ccb;
539                 if (ccb->ccb_h.flags & CAM_CDB_POINTER)
540                         cdb = ccb->csio.cdb_io.cdb_ptr;
541                 else
542                         cdb = ccb->csio.cdb_io.cdb_bytes;
543
544                 if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */
545                         ccb->ccb_h.status = CAM_REQ_CMP;
546                         goto scsi_done;
547                 }
548                 if (context & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT)
549                         req->header.result = IOP_RESULT_SUCCESS;
550
551                 switch (req->header.result) {
552                 case IOP_RESULT_SUCCESS:
553                         switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
554                         case CAM_DIR_IN:
555                                 bus_dmamap_sync(hba->io_dmat,
556                                         srb->dma_map, BUS_DMASYNC_POSTREAD);
557                                 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
558                                 break;
559                         case CAM_DIR_OUT:
560                                 bus_dmamap_sync(hba->io_dmat,
561                                         srb->dma_map, BUS_DMASYNC_POSTWRITE);
562                                 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
563                                 break;
564                         }
565                         ccb->ccb_h.status = CAM_REQ_CMP;
566                         break;
567                 case IOP_RESULT_BAD_TARGET:
568                         ccb->ccb_h.status = CAM_DEV_NOT_THERE;
569                         break;
570                 case IOP_RESULT_BUSY:
571                         ccb->ccb_h.status = CAM_BUSY;
572                         break;
573                 case IOP_RESULT_INVALID_REQUEST:
574                         ccb->ccb_h.status = CAM_REQ_INVALID;
575                         break;
576                 case IOP_RESULT_FAIL:
577                         ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
578                         break;
579                 case IOP_RESULT_RESET:
580                         ccb->ccb_h.status = CAM_BUSY;
581                         break;
582                 case IOP_RESULT_CHECK_CONDITION:
583                         memset(&ccb->csio.sense_data, 0,
584                             sizeof(ccb->csio.sense_data));
585                         if (req->dataxfer_length < ccb->csio.sense_len)
586                                 ccb->csio.sense_resid = ccb->csio.sense_len -
587                                     req->dataxfer_length;
588                         else
589                                 ccb->csio.sense_resid = 0;
590                         memcpy(&ccb->csio.sense_data, &req->sg_list, 
591                                 MIN(req->dataxfer_length, sizeof(ccb->csio.sense_data)));
592                         ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
593                         ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
594                         ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
595                         break;
596                 default:
597                         ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
598                         break;
599                 }
600 scsi_done:
601                 ccb->csio.resid = ccb->csio.dxfer_len - req->dataxfer_length;
602                 
603                 hptiop_free_srb(hba, srb);
604                 xpt_done(ccb);
605         } else if (context & MVIOP_CMD_TYPE_IOCTL) {
606                 struct hpt_iop_request_ioctl_command *req = hba->ctlcfg_ptr;
607                 if (context & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT)
608                         hba->config_done = 1;
609                 else
610                         hba->config_done = -1;
611                 wakeup(req);
612         } else if (context &
613                         (MVIOP_CMD_TYPE_SET_CONFIG |
614                                 MVIOP_CMD_TYPE_GET_CONFIG))
615                 hba->config_done = 1;
616         else {
617                 device_printf(hba->pcidev, "wrong callback type\n");
618         }
619 }
620
621 static void hptiop_drain_outbound_queue_mv(struct hpt_iop_hba * hba)
622 {
623         u_int64_t req;
624
625         while ((req = hptiop_mv_outbound_read(hba))) {
626                 if (req & MVIOP_MU_QUEUE_ADDR_HOST_BIT) {
627                         if (req & MVIOP_MU_QUEUE_REQUEST_RETURN_CONTEXT) {
628                                 hptiop_request_callback_mv(hba, req);
629                         }
630                 }
631         }
632 }
633
634 static int hptiop_intr_mv(struct hpt_iop_hba * hba)
635 {
636         u_int32_t status;
637         int ret = 0;
638
639         status = BUS_SPACE_RD4_MV0(outbound_doorbell);
640
641         if (status)
642                 BUS_SPACE_WRT4_MV0(outbound_doorbell, ~status);
643
644         if (status & MVIOP_MU_OUTBOUND_INT_MSG) {
645                 u_int32_t msg = BUS_SPACE_RD4_MV2(outbound_msg);
646                 KdPrint(("hptiop: received outbound msg %x\n", msg));
647                 hptiop_os_message_callback(hba, msg);
648                 ret = 1;
649         }
650
651         if (status & MVIOP_MU_OUTBOUND_INT_POSTQUEUE) {
652                 hptiop_drain_outbound_queue_mv(hba);
653                 ret = 1;
654         }
655
656         return ret;
657 }
658
659 static int hptiop_send_sync_request_itl(struct hpt_iop_hba * hba,
660                                         u_int32_t req32, u_int32_t millisec)
661 {
662         u_int32_t i;
663         u_int64_t temp64;
664
665         BUS_SPACE_WRT4_ITL(inbound_queue, req32);
666         BUS_SPACE_RD4_ITL(outbound_intstatus);
667
668         for (i = 0; i < millisec; i++) {
669                 hptiop_intr_itl(hba);
670                 bus_space_read_region_4(hba->bar0t, hba->bar0h, req32 +
671                         offsetof(struct hpt_iop_request_header, context),
672                         (u_int32_t *)&temp64, 2);
673                 if (temp64)
674                         return 0;
675                 DELAY(1000);
676         }
677
678         return -1;
679 }
680
681 static int hptiop_send_sync_request_mv(struct hpt_iop_hba *hba,
682                                         void *req, u_int32_t millisec)
683 {
684         u_int32_t i;
685         u_int64_t phy_addr;
686         hba->config_done = 0;
687
688         phy_addr = hba->ctlcfgcmd_phy |
689                         (u_int64_t)MVIOP_MU_QUEUE_ADDR_HOST_BIT;
690         ((struct hpt_iop_request_get_config *)req)->header.flags |=
691                 IOP_REQUEST_FLAG_SYNC_REQUEST |
692                 IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
693         hptiop_mv_inbound_write(phy_addr, hba);
694         BUS_SPACE_RD4_MV0(outbound_intmask);
695
696         for (i = 0; i < millisec; i++) {
697                 hptiop_intr_mv(hba);
698                 if (hba->config_done)
699                         return 0;
700                 DELAY(1000);
701         }
702         return -1;
703 }
704
705 static int hptiop_send_sync_msg(struct hpt_iop_hba *hba,
706                                         u_int32_t msg, u_int32_t millisec)
707 {
708         u_int32_t i;
709
710         hba->msg_done = 0;
711         hba->ops->post_msg(hba, msg);
712
713         for (i=0; i<millisec; i++) {
714                 hba->ops->iop_intr(hba);
715                 if (hba->msg_done)
716                         break;
717                 DELAY(1000);
718         }
719
720         return hba->msg_done? 0 : -1;
721 }
722
723 static int hptiop_get_config_itl(struct hpt_iop_hba * hba,
724                                 struct hpt_iop_request_get_config * config)
725 {
726         u_int32_t req32;
727
728         config->header.size = sizeof(struct hpt_iop_request_get_config);
729         config->header.type = IOP_REQUEST_TYPE_GET_CONFIG;
730         config->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST;
731         config->header.result = IOP_RESULT_PENDING;
732         config->header.context = 0;
733
734         req32 = BUS_SPACE_RD4_ITL(inbound_queue);
735         if (req32 == IOPMU_QUEUE_EMPTY)
736                 return -1;
737
738         bus_space_write_region_4(hba->bar0t, hba->bar0h,
739                         req32, (u_int32_t *)config,
740                         sizeof(struct hpt_iop_request_header) >> 2);
741
742         if (hptiop_send_sync_request_itl(hba, req32, 20000)) {
743                 KdPrint(("hptiop: get config send cmd failed"));
744                 return -1;
745         }
746
747         bus_space_read_region_4(hba->bar0t, hba->bar0h,
748                         req32, (u_int32_t *)config,
749                         sizeof(struct hpt_iop_request_get_config) >> 2);
750
751         BUS_SPACE_WRT4_ITL(outbound_queue, req32);
752
753         return 0;
754 }
755
756 static int hptiop_get_config_mv(struct hpt_iop_hba * hba,
757                                 struct hpt_iop_request_get_config * config)
758 {
759         struct hpt_iop_request_get_config *req;
760
761         if (!(req = hba->ctlcfg_ptr))
762                 return -1;
763
764         req->header.flags = 0;
765         req->header.type = IOP_REQUEST_TYPE_GET_CONFIG;
766         req->header.size = sizeof(struct hpt_iop_request_get_config);
767         req->header.result = IOP_RESULT_PENDING;
768         req->header.context = MVIOP_CMD_TYPE_GET_CONFIG;
769
770         if (hptiop_send_sync_request_mv(hba, req, 20000)) {
771                 KdPrint(("hptiop: get config send cmd failed"));
772                 return -1;
773         }
774
775         *config = *req;
776         return 0;
777 }
778
779 static int hptiop_set_config_itl(struct hpt_iop_hba *hba,
780                                 struct hpt_iop_request_set_config *config)
781 {
782         u_int32_t req32;
783
784         req32 = BUS_SPACE_RD4_ITL(inbound_queue);
785
786         if (req32 == IOPMU_QUEUE_EMPTY)
787                 return -1;
788
789         config->header.size = sizeof(struct hpt_iop_request_set_config);
790         config->header.type = IOP_REQUEST_TYPE_SET_CONFIG;
791         config->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST;
792         config->header.result = IOP_RESULT_PENDING;
793         config->header.context = 0;
794
795         bus_space_write_region_4(hba->bar0t, hba->bar0h, req32, 
796                 (u_int32_t *)config, 
797                 sizeof(struct hpt_iop_request_set_config) >> 2);
798
799         if (hptiop_send_sync_request_itl(hba, req32, 20000)) {
800                 KdPrint(("hptiop: set config send cmd failed"));
801                 return -1;
802         }
803
804         BUS_SPACE_WRT4_ITL(outbound_queue, req32);
805
806         return 0;
807 }
808
809 static int hptiop_set_config_mv(struct hpt_iop_hba *hba,
810                                 struct hpt_iop_request_set_config *config)
811 {
812         struct hpt_iop_request_set_config *req;
813
814         if (!(req = hba->ctlcfg_ptr))
815                 return -1;
816
817         memcpy((u_int8_t *)req + sizeof(struct hpt_iop_request_header),
818                 (u_int8_t *)config + sizeof(struct hpt_iop_request_header),
819                 sizeof(struct hpt_iop_request_set_config) -
820                         sizeof(struct hpt_iop_request_header));
821
822         req->header.flags = 0;
823         req->header.type = IOP_REQUEST_TYPE_SET_CONFIG;
824         req->header.size = sizeof(struct hpt_iop_request_set_config);
825         req->header.result = IOP_RESULT_PENDING;
826         req->header.context = MVIOP_CMD_TYPE_SET_CONFIG;
827
828         if (hptiop_send_sync_request_mv(hba, req, 20000)) {
829                 KdPrint(("hptiop: set config send cmd failed"));
830                 return -1;
831         }
832
833         return 0;
834 }
835
836 static int hptiop_post_ioctl_command_itl(struct hpt_iop_hba *hba,
837                                 u_int32_t req32,
838                                 struct hpt_iop_ioctl_param *pParams)
839 {
840         u_int64_t temp64;
841         struct hpt_iop_request_ioctl_command req;
842
843         if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) >
844                         (hba->max_request_size -
845                         offsetof(struct hpt_iop_request_ioctl_command, buf))) {
846                 device_printf(hba->pcidev, "request size beyond max value");
847                 return -1;
848         }
849
850         req.header.size = offsetof(struct hpt_iop_request_ioctl_command, buf)
851                 + pParams->nInBufferSize;
852         req.header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND;
853         req.header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST;
854         req.header.result = IOP_RESULT_PENDING;
855         req.header.context = req32 + (u_int64_t)(unsigned long)hba->u.itl.mu;
856         req.ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode);
857         req.inbuf_size = pParams->nInBufferSize;
858         req.outbuf_size = pParams->nOutBufferSize;
859         req.bytes_returned = 0;
860
861         bus_space_write_region_4(hba->bar0t, hba->bar0h, req32, (u_int32_t *)&req, 
862                 offsetof(struct hpt_iop_request_ioctl_command, buf)>>2);
863         
864         hptiop_lock_adapter(hba);
865
866         BUS_SPACE_WRT4_ITL(inbound_queue, req32);
867         BUS_SPACE_RD4_ITL(outbound_intstatus);
868
869         bus_space_read_region_4(hba->bar0t, hba->bar0h, req32 +
870                 offsetof(struct hpt_iop_request_ioctl_command, header.context),
871                 (u_int32_t *)&temp64, 2);
872         while (temp64) {
873                 if (hptiop_sleep(hba, (void *)((unsigned long)hba->u.itl.mu + req32),
874                                 PPAUSE, "hptctl", HPT_OSM_TIMEOUT)==0)
875                         break;
876                 hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000);
877                 bus_space_read_region_4(hba->bar0t, hba->bar0h,req32 +
878                         offsetof(struct hpt_iop_request_ioctl_command,
879                                 header.context),
880                         (u_int32_t *)&temp64, 2);
881         }
882
883         hptiop_unlock_adapter(hba);
884         return 0;
885 }
886
887 static int hptiop_bus_space_copyin(struct hpt_iop_hba *hba, u_int32_t bus, void *user, int size)
888 {
889         unsigned char byte;
890         int i;
891
892         for (i=0; i<size; i++) {
893                 if (copyin((u_int8_t *)user + i, &byte, 1))
894                         return -1;
895                 bus_space_write_1(hba->bar0t, hba->bar0h, bus + i, byte);
896         }
897
898         return 0;
899 }
900
901 static int hptiop_bus_space_copyout(struct hpt_iop_hba *hba, u_int32_t bus, void *user, int size)
902 {
903         unsigned char byte;
904         int i;
905
906         for (i=0; i<size; i++) {
907                 byte = bus_space_read_1(hba->bar0t, hba->bar0h, bus + i);
908                 if (copyout(&byte, (u_int8_t *)user + i, 1))
909                         return -1;
910         }
911
912         return 0;
913 }
914
915 static int hptiop_do_ioctl_itl(struct hpt_iop_hba *hba,
916                                 struct hpt_iop_ioctl_param * pParams)
917 {
918         u_int32_t req32;
919         u_int32_t result;
920
921         if ((pParams->Magic != HPT_IOCTL_MAGIC) &&
922                 (pParams->Magic != HPT_IOCTL_MAGIC32))
923                 return EFAULT;
924         
925         req32 = BUS_SPACE_RD4_ITL(inbound_queue);
926         if (req32 == IOPMU_QUEUE_EMPTY)
927                 return EFAULT;
928
929         if (pParams->nInBufferSize)
930                 if (hptiop_bus_space_copyin(hba, req32 +
931                         offsetof(struct hpt_iop_request_ioctl_command, buf),
932                         (void *)pParams->lpInBuffer, pParams->nInBufferSize))
933                         goto invalid;
934
935         if (hptiop_post_ioctl_command_itl(hba, req32, pParams))
936                 goto invalid;
937
938         result = bus_space_read_4(hba->bar0t, hba->bar0h, req32 +
939                         offsetof(struct hpt_iop_request_ioctl_command,
940                                 header.result));
941
942         if (result == IOP_RESULT_SUCCESS) {
943                 if (pParams->nOutBufferSize)
944                         if (hptiop_bus_space_copyout(hba, req32 +
945                                 offsetof(struct hpt_iop_request_ioctl_command, buf) + 
946                                         ((pParams->nInBufferSize + 3) & ~3),
947                                 (void *)pParams->lpOutBuffer, pParams->nOutBufferSize))
948                                 goto invalid;
949
950                 if (pParams->lpBytesReturned) {
951                         if (hptiop_bus_space_copyout(hba, req32 + 
952                                 offsetof(struct hpt_iop_request_ioctl_command, bytes_returned),
953                                 (void *)pParams->lpBytesReturned, sizeof(unsigned  long)))
954                                 goto invalid;
955                 }
956
957                 BUS_SPACE_WRT4_ITL(outbound_queue, req32);
958
959                 return 0;
960         } else{
961 invalid:
962                 BUS_SPACE_WRT4_ITL(outbound_queue, req32);
963
964                 return EFAULT;
965         }
966 }
967
968 static int hptiop_post_ioctl_command_mv(struct hpt_iop_hba *hba,
969                                 struct hpt_iop_request_ioctl_command *req,
970                                 struct hpt_iop_ioctl_param *pParams)
971 {
972         u_int64_t req_phy;
973         int size = 0;
974
975         if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) >
976                         (hba->max_request_size -
977                         offsetof(struct hpt_iop_request_ioctl_command, buf))) {
978                 device_printf(hba->pcidev, "request size beyond max value");
979                 return -1;
980         }
981
982         req->ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode);
983         req->inbuf_size = pParams->nInBufferSize;
984         req->outbuf_size = pParams->nOutBufferSize;
985         req->header.size = offsetof(struct hpt_iop_request_ioctl_command, buf)
986                                         + pParams->nInBufferSize;
987         req->header.context = (u_int64_t)MVIOP_CMD_TYPE_IOCTL;
988         req->header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND;
989         req->header.result = IOP_RESULT_PENDING;
990         req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
991         size = req->header.size >> 8;
992         size = size > 3 ? 3 : size;
993         req_phy = hba->ctlcfgcmd_phy | MVIOP_MU_QUEUE_ADDR_HOST_BIT | size;
994         hptiop_mv_inbound_write(req_phy, hba);
995
996         BUS_SPACE_RD4_MV0(outbound_intmask);
997
998         while (hba->config_done == 0) {
999                 if (hptiop_sleep(hba, req, PPAUSE,
1000                         "hptctl", HPT_OSM_TIMEOUT)==0)
1001                         continue;
1002                 hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000);
1003         }
1004         return 0;
1005 }
1006
1007 static int hptiop_do_ioctl_mv(struct hpt_iop_hba *hba,
1008                                 struct hpt_iop_ioctl_param *pParams)
1009 {
1010         struct hpt_iop_request_ioctl_command *req;
1011
1012         if ((pParams->Magic != HPT_IOCTL_MAGIC) &&
1013                 (pParams->Magic != HPT_IOCTL_MAGIC32))
1014                 return EFAULT;
1015
1016         req = (struct hpt_iop_request_ioctl_command *)(hba->ctlcfg_ptr);
1017         hba->config_done = 0;
1018         hptiop_lock_adapter(hba);
1019         if (pParams->nInBufferSize)
1020                 if (copyin((void *)pParams->lpInBuffer,
1021                                 req->buf, pParams->nInBufferSize))
1022                         goto invalid;
1023         if (hptiop_post_ioctl_command_mv(hba, req, pParams))
1024                 goto invalid;
1025
1026         if (hba->config_done == 1) {
1027                 if (pParams->nOutBufferSize)
1028                         if (copyout(req->buf +
1029                                 ((pParams->nInBufferSize + 3) & ~3),
1030                                 (void *)pParams->lpOutBuffer,
1031                                 pParams->nOutBufferSize))
1032                                 goto invalid;
1033
1034                 if (pParams->lpBytesReturned)
1035                         if (copyout(&req->bytes_returned,
1036                                 (void*)pParams->lpBytesReturned,
1037                                 sizeof(u_int32_t)))
1038                                 goto invalid;
1039                 hptiop_unlock_adapter(hba);
1040                 return 0;
1041         } else{
1042 invalid:
1043                 hptiop_unlock_adapter(hba);
1044                 return EFAULT;
1045         }
1046 }
1047
1048 static int  hptiop_rescan_bus(struct hpt_iop_hba * hba)
1049 {
1050         union ccb           *ccb;
1051
1052         if ((ccb = xpt_alloc_ccb()) == NULL)
1053                 return(ENOMEM);
1054         if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, cam_sim_path(hba->sim),
1055                 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1056                 xpt_free_ccb(ccb);
1057                 return(EIO);
1058         }
1059         xpt_rescan(ccb);
1060         return(0);
1061 }
1062
1063 static  bus_dmamap_callback_t   hptiop_map_srb;
1064 static  bus_dmamap_callback_t   hptiop_post_scsi_command;
1065 static  bus_dmamap_callback_t   hptiop_mv_map_ctlcfg;
1066
1067 static int hptiop_alloc_pci_res_itl(struct hpt_iop_hba *hba)
1068 {
1069         hba->bar0_rid = 0x10;
1070         hba->bar0_res = bus_alloc_resource_any(hba->pcidev,
1071                         SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE);
1072
1073         if (hba->bar0_res == NULL) {
1074                 device_printf(hba->pcidev,
1075                         "failed to get iop base adrress.\n");
1076                 return -1;
1077         }
1078         hba->bar0t = rman_get_bustag(hba->bar0_res);
1079         hba->bar0h = rman_get_bushandle(hba->bar0_res);
1080         hba->u.itl.mu = (struct hpt_iopmu_itl *)
1081                                 rman_get_virtual(hba->bar0_res);
1082
1083         if (!hba->u.itl.mu) {
1084                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1085                                         hba->bar0_rid, hba->bar0_res);
1086                 device_printf(hba->pcidev, "alloc mem res failed\n");
1087                 return -1;
1088         }
1089
1090         return 0;
1091 }
1092
1093 static int hptiop_alloc_pci_res_mv(struct hpt_iop_hba *hba)
1094 {
1095         hba->bar0_rid = 0x10;
1096         hba->bar0_res = bus_alloc_resource_any(hba->pcidev,
1097                         SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE);
1098
1099         if (hba->bar0_res == NULL) {
1100                 device_printf(hba->pcidev, "failed to get iop bar0.\n");
1101                 return -1;
1102         }
1103         hba->bar0t = rman_get_bustag(hba->bar0_res);
1104         hba->bar0h = rman_get_bushandle(hba->bar0_res);
1105         hba->u.mv.regs = (struct hpt_iopmv_regs *)
1106                                 rman_get_virtual(hba->bar0_res);
1107
1108         if (!hba->u.mv.regs) {
1109                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1110                                         hba->bar0_rid, hba->bar0_res);
1111                 device_printf(hba->pcidev, "alloc bar0 mem res failed\n");
1112                 return -1;
1113         }
1114
1115         hba->bar2_rid = 0x18;
1116         hba->bar2_res = bus_alloc_resource_any(hba->pcidev,
1117                         SYS_RES_MEMORY, &hba->bar2_rid, RF_ACTIVE);
1118
1119         if (hba->bar2_res == NULL) {
1120                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1121                                         hba->bar0_rid, hba->bar0_res);
1122                 device_printf(hba->pcidev, "failed to get iop bar2.\n");
1123                 return -1;
1124         }
1125
1126         hba->bar2t = rman_get_bustag(hba->bar2_res);
1127         hba->bar2h = rman_get_bushandle(hba->bar2_res);
1128         hba->u.mv.mu = (struct hpt_iopmu_mv *)rman_get_virtual(hba->bar2_res);
1129
1130         if (!hba->u.mv.mu) {
1131                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1132                                         hba->bar0_rid, hba->bar0_res);
1133                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1134                                         hba->bar2_rid, hba->bar2_res);
1135                 device_printf(hba->pcidev, "alloc mem bar2 res failed\n");
1136                 return -1;
1137         }
1138
1139         return 0;
1140 }
1141
1142 static void hptiop_release_pci_res_itl(struct hpt_iop_hba *hba)
1143 {
1144         if (hba->bar0_res)
1145                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1146                         hba->bar0_rid, hba->bar0_res);
1147 }
1148
1149 static void hptiop_release_pci_res_mv(struct hpt_iop_hba *hba)
1150 {
1151         if (hba->bar0_res)
1152                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1153                         hba->bar0_rid, hba->bar0_res);
1154         if (hba->bar2_res)
1155                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1156                         hba->bar2_rid, hba->bar2_res);
1157 }
1158
1159 static int hptiop_internal_memalloc_mv(struct hpt_iop_hba *hba)
1160 {
1161         if (bus_dma_tag_create(hba->parent_dmat,
1162                                 1,
1163                                 0,
1164                                 BUS_SPACE_MAXADDR_32BIT,
1165                                 BUS_SPACE_MAXADDR,
1166                                 NULL, NULL,
1167                                 0x800 - 0x8,
1168                                 1,
1169                                 BUS_SPACE_MAXSIZE_32BIT,
1170                                 BUS_DMA_ALLOCNOW,
1171 #if __FreeBSD_version > 502000
1172                                 NULL,
1173                                 NULL,
1174 #endif
1175                                         &hba->ctlcfg_dmat)) {
1176                 device_printf(hba->pcidev, "alloc ctlcfg_dmat failed\n");
1177                 return -1;
1178         }
1179
1180         if (bus_dmamem_alloc(hba->ctlcfg_dmat, (void **)&hba->ctlcfg_ptr,
1181 #if __FreeBSD_version>501000
1182                 BUS_DMA_WAITOK | BUS_DMA_COHERENT,
1183 #else
1184                 BUS_DMA_WAITOK,
1185 #endif
1186                 &hba->ctlcfg_dmamap) != 0) {
1187                         device_printf(hba->pcidev,
1188                                         "bus_dmamem_alloc failed!\n");
1189                         bus_dma_tag_destroy(hba->ctlcfg_dmat);
1190                         return -1;
1191         }
1192
1193         if (bus_dmamap_load(hba->ctlcfg_dmat,
1194                         hba->ctlcfg_dmamap, hba->ctlcfg_ptr,
1195                         MVIOP_IOCTLCFG_SIZE,
1196                         hptiop_mv_map_ctlcfg, hba, 0)) {
1197                 device_printf(hba->pcidev, "bus_dmamap_load failed!\n");
1198                 if (hba->ctlcfg_dmat)
1199                         bus_dmamem_free(hba->ctlcfg_dmat,
1200                                 hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
1201                         bus_dma_tag_destroy(hba->ctlcfg_dmat);
1202                 return -1;
1203         }
1204
1205         return 0;
1206 }
1207
1208 static int hptiop_internal_memfree_mv(struct hpt_iop_hba *hba)
1209 {
1210         if (hba->ctlcfg_dmat) {
1211                 bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap);
1212                 bus_dmamem_free(hba->ctlcfg_dmat,
1213                                         hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
1214                 bus_dma_tag_destroy(hba->ctlcfg_dmat);
1215         }
1216
1217         return 0;
1218 }
1219
1220 /*
1221  * CAM driver interface
1222  */
1223 static device_method_t driver_methods[] = {
1224         /* Device interface */
1225         DEVMETHOD(device_probe,     hptiop_probe),
1226         DEVMETHOD(device_attach,    hptiop_attach),
1227         DEVMETHOD(device_detach,    hptiop_detach),
1228         DEVMETHOD(device_shutdown,  hptiop_shutdown),
1229         { 0, 0 }
1230 };
1231
1232 static struct hptiop_adapter_ops hptiop_itl_ops = {
1233         .iop_wait_ready    = hptiop_wait_ready_itl,
1234         .internal_memalloc = 0,
1235         .internal_memfree  = 0,
1236         .alloc_pci_res     = hptiop_alloc_pci_res_itl,
1237         .release_pci_res   = hptiop_release_pci_res_itl,
1238         .enable_intr       = hptiop_enable_intr_itl,
1239         .disable_intr      = hptiop_disable_intr_itl,
1240         .get_config        = hptiop_get_config_itl,
1241         .set_config        = hptiop_set_config_itl,
1242         .iop_intr          = hptiop_intr_itl,
1243         .post_msg          = hptiop_post_msg_itl,
1244         .post_req          = hptiop_post_req_itl,
1245         .do_ioctl          = hptiop_do_ioctl_itl,
1246 };
1247
1248 static struct hptiop_adapter_ops hptiop_mv_ops = {
1249         .iop_wait_ready    = hptiop_wait_ready_mv,
1250         .internal_memalloc = hptiop_internal_memalloc_mv,
1251         .internal_memfree  = hptiop_internal_memfree_mv,
1252         .alloc_pci_res     = hptiop_alloc_pci_res_mv,
1253         .release_pci_res   = hptiop_release_pci_res_mv,
1254         .enable_intr       = hptiop_enable_intr_mv,
1255         .disable_intr      = hptiop_disable_intr_mv,
1256         .get_config        = hptiop_get_config_mv,
1257         .set_config        = hptiop_set_config_mv,
1258         .iop_intr          = hptiop_intr_mv,
1259         .post_msg          = hptiop_post_msg_mv,
1260         .post_req          = hptiop_post_req_mv,
1261         .do_ioctl          = hptiop_do_ioctl_mv,
1262 };
1263
1264 static driver_t hptiop_pci_driver = {
1265         driver_name,
1266         driver_methods,
1267         sizeof(struct hpt_iop_hba)
1268 };
1269
1270 DRIVER_MODULE(hptiop, pci, hptiop_pci_driver, hptiop_devclass, 0, 0);
1271
1272 static int hptiop_probe(device_t dev)
1273 {
1274         struct hpt_iop_hba *hba;
1275         u_int32_t id;
1276         static char buf[256];
1277         int sas = 0;
1278         struct hptiop_adapter_ops *ops;
1279
1280         if (pci_get_vendor(dev) != 0x1103)
1281                 return (ENXIO);
1282
1283         id = pci_get_device(dev);
1284
1285         switch (id) {
1286                 case 0x4322:
1287                 case 0x4321:
1288                 case 0x4320:
1289                         sas = 1;
1290                 case 0x3220:
1291                 case 0x3320:
1292                 case 0x3410:
1293                 case 0x3520:
1294                 case 0x3510:
1295                 case 0x3511:
1296                 case 0x3521:
1297                 case 0x3522:
1298                 case 0x3540:
1299                         ops = &hptiop_itl_ops;
1300                         break;
1301                 case 0x3120:
1302                 case 0x3122:
1303                 case 0x3020:
1304                         ops = &hptiop_mv_ops;
1305                         break;
1306                 default:
1307                         return (ENXIO);
1308         }
1309
1310         device_printf(dev, "adapter at PCI %d:%d:%d, IRQ %d\n",
1311                 pci_get_bus(dev), pci_get_slot(dev),
1312                 pci_get_function(dev), pci_get_irq(dev));
1313
1314         sprintf(buf, "RocketRAID %x %s Controller\n",
1315                                 id, sas ? "SAS" : "SATA");
1316         device_set_desc_copy(dev, buf);
1317
1318         hba = (struct hpt_iop_hba *)device_get_softc(dev);
1319         bzero(hba, sizeof(struct hpt_iop_hba));
1320         hba->ops = ops;
1321
1322         KdPrint(("hba->ops=%p\n", hba->ops));
1323         return 0;
1324 }
1325
1326 static int hptiop_attach(device_t dev)
1327 {
1328         struct hpt_iop_hba *hba = (struct hpt_iop_hba *)device_get_softc(dev);
1329         struct hpt_iop_request_get_config  iop_config;
1330         struct hpt_iop_request_set_config  set_config;
1331         int rid = 0;
1332         struct cam_devq *devq;
1333         struct ccb_setasync ccb;
1334         u_int32_t unit = device_get_unit(dev);
1335
1336         device_printf(dev, "%d RocketRAID 3xxx/4xxx controller driver %s\n",
1337                         unit, driver_version);
1338
1339         KdPrint(("hptiop: attach(%d, %d/%d/%d) ops=%p\n", unit,
1340                 pci_get_bus(dev), pci_get_slot(dev),
1341                 pci_get_function(dev), hba->ops));
1342
1343 #if __FreeBSD_version >=440000
1344         pci_enable_busmaster(dev);
1345 #endif
1346         hba->pcidev = dev;
1347         hba->pciunit = unit;
1348
1349         if (hba->ops->alloc_pci_res(hba))
1350                 return ENXIO;
1351
1352         if (hba->ops->iop_wait_ready(hba, 2000)) {
1353                 device_printf(dev, "adapter is not ready\n");
1354                 goto release_pci_res;
1355         }
1356
1357 #if (__FreeBSD_version >= 500000)
1358         mtx_init(&hba->lock, "hptioplock", NULL, MTX_DEF);
1359 #endif
1360
1361         if (bus_dma_tag_create(NULL,/* parent */
1362                         1,  /* alignment */
1363                         0, /* boundary */
1364                         BUS_SPACE_MAXADDR,  /* lowaddr */
1365                         BUS_SPACE_MAXADDR,  /* highaddr */
1366                         NULL, NULL,         /* filter, filterarg */
1367                         BUS_SPACE_MAXSIZE_32BIT,    /* maxsize */
1368                         BUS_SPACE_UNRESTRICTED, /* nsegments */
1369                         BUS_SPACE_MAXSIZE_32BIT,    /* maxsegsize */
1370                         0,      /* flags */
1371 #if __FreeBSD_version>502000
1372                         NULL,   /* lockfunc */
1373                         NULL,       /* lockfuncarg */
1374 #endif
1375                         &hba->parent_dmat   /* tag */))
1376         {
1377                 device_printf(dev, "alloc parent_dmat failed\n");
1378                 goto release_pci_res;
1379         }
1380
1381         if (hba->ops->internal_memalloc) {
1382                 if (hba->ops->internal_memalloc(hba)) {
1383                         device_printf(dev, "alloc srb_dmat failed\n");
1384                         goto destroy_parent_tag;
1385                 }
1386         }
1387         
1388         if (hba->ops->get_config(hba, &iop_config)) {
1389                 device_printf(dev, "get iop config failed.\n");
1390                 goto get_config_failed;
1391         }
1392
1393         hba->firmware_version = iop_config.firmware_version;
1394         hba->interface_version = iop_config.interface_version;
1395         hba->max_requests = iop_config.max_requests;
1396         hba->max_devices = iop_config.max_devices;
1397         hba->max_request_size = iop_config.request_size;
1398         hba->max_sg_count = iop_config.max_sg_count;
1399
1400         if (bus_dma_tag_create(hba->parent_dmat,/* parent */
1401                         4,  /* alignment */
1402                         BUS_SPACE_MAXADDR_32BIT+1, /* boundary */
1403                         BUS_SPACE_MAXADDR,  /* lowaddr */
1404                         BUS_SPACE_MAXADDR,  /* highaddr */
1405                         NULL, NULL,         /* filter, filterarg */
1406                         PAGE_SIZE * (hba->max_sg_count-1),  /* maxsize */
1407                         hba->max_sg_count,  /* nsegments */
1408                         0x20000,    /* maxsegsize */
1409                         BUS_DMA_ALLOCNOW,       /* flags */
1410 #if __FreeBSD_version>502000
1411                         busdma_lock_mutex,  /* lockfunc */
1412                         &hba->lock,     /* lockfuncarg */
1413 #endif
1414                         &hba->io_dmat   /* tag */))
1415         {
1416                 device_printf(dev, "alloc io_dmat failed\n");
1417                 goto get_config_failed;
1418         }
1419
1420         if (bus_dma_tag_create(hba->parent_dmat,/* parent */
1421                         1,  /* alignment */
1422                         0, /* boundary */
1423                         BUS_SPACE_MAXADDR_32BIT,    /* lowaddr */
1424                         BUS_SPACE_MAXADDR,  /* highaddr */
1425                         NULL, NULL,         /* filter, filterarg */
1426                         HPT_SRB_MAX_SIZE * HPT_SRB_MAX_QUEUE_SIZE + 0x20,
1427                         1,  /* nsegments */
1428                         BUS_SPACE_MAXSIZE_32BIT,    /* maxsegsize */
1429                         0,      /* flags */
1430 #if __FreeBSD_version>502000
1431                         NULL,   /* lockfunc */
1432                         NULL,       /* lockfuncarg */
1433 #endif
1434                         &hba->srb_dmat  /* tag */))
1435         {
1436                 device_printf(dev, "alloc srb_dmat failed\n");
1437                 goto destroy_io_dmat;
1438         }
1439
1440         if (bus_dmamem_alloc(hba->srb_dmat, (void **)&hba->uncached_ptr,
1441 #if __FreeBSD_version>501000
1442                         BUS_DMA_WAITOK | BUS_DMA_COHERENT,
1443 #else
1444                         BUS_DMA_WAITOK,
1445 #endif
1446                         &hba->srb_dmamap) != 0)
1447         {
1448                 device_printf(dev, "srb bus_dmamem_alloc failed!\n");
1449                 goto destroy_srb_dmat;
1450         }
1451
1452         if (bus_dmamap_load(hba->srb_dmat,
1453                         hba->srb_dmamap, hba->uncached_ptr,
1454                         (HPT_SRB_MAX_SIZE * HPT_SRB_MAX_QUEUE_SIZE) + 0x20,
1455                         hptiop_map_srb, hba, 0))
1456         {
1457                 device_printf(dev, "bus_dmamap_load failed!\n");
1458                 goto srb_dmamem_free;
1459         }
1460
1461         if ((devq = cam_simq_alloc(hba->max_requests - 1 )) == NULL) {
1462                 device_printf(dev, "cam_simq_alloc failed\n");
1463                 goto srb_dmamap_unload;
1464         }
1465
1466 #if __FreeBSD_version <700000
1467         hba->sim = cam_sim_alloc(hptiop_action, hptiop_poll, driver_name,
1468                         hba, unit, hba->max_requests - 1, 1, devq);
1469 #else
1470         hba->sim = cam_sim_alloc(hptiop_action, hptiop_poll, driver_name,
1471                         hba, unit, &Giant, hba->max_requests - 1, 1, devq);
1472 #endif
1473         if (!hba->sim) {
1474                 device_printf(dev, "cam_sim_alloc failed\n");
1475                 cam_simq_free(devq);
1476                 goto srb_dmamap_unload;
1477         }
1478 #if __FreeBSD_version <700000
1479         if (xpt_bus_register(hba->sim, 0) != CAM_SUCCESS)
1480 #else
1481         if (xpt_bus_register(hba->sim, dev, 0) != CAM_SUCCESS)
1482 #endif
1483         {
1484                 device_printf(dev, "xpt_bus_register failed\n");
1485                 goto free_cam_sim;
1486         }
1487
1488         if (xpt_create_path(&hba->path, /*periph */ NULL,
1489                         cam_sim_path(hba->sim), CAM_TARGET_WILDCARD,
1490                         CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1491                 device_printf(dev, "xpt_create_path failed\n");
1492                 goto deregister_xpt_bus;
1493         }
1494
1495         bzero(&set_config, sizeof(set_config));
1496         set_config.iop_id = unit;
1497         set_config.vbus_id = cam_sim_path(hba->sim);
1498         set_config.max_host_request_size = HPT_SRB_MAX_REQ_SIZE;
1499
1500         if (hba->ops->set_config(hba, &set_config)) {
1501                 device_printf(dev, "set iop config failed.\n");
1502                 goto free_hba_path;
1503         }
1504
1505         xpt_setup_ccb(&ccb.ccb_h, hba->path, /*priority*/5);
1506         ccb.ccb_h.func_code = XPT_SASYNC_CB;
1507         ccb.event_enable = (AC_FOUND_DEVICE | AC_LOST_DEVICE);
1508         ccb.callback = hptiop_async;
1509         ccb.callback_arg = hba->sim;
1510         xpt_action((union ccb *)&ccb);
1511
1512         rid = 0;
1513         if ((hba->irq_res = bus_alloc_resource(hba->pcidev, SYS_RES_IRQ,
1514                         &rid, 0, ~0ul, 1, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
1515                 device_printf(dev, "allocate irq failed!\n");
1516                 goto free_hba_path;
1517         }
1518
1519 #if __FreeBSD_version <700000
1520         if (bus_setup_intr(hba->pcidev, hba->irq_res, INTR_TYPE_CAM,
1521                                 hptiop_pci_intr, hba, &hba->irq_handle))
1522 #else
1523         if (bus_setup_intr(hba->pcidev, hba->irq_res, INTR_TYPE_CAM,
1524                                 NULL, hptiop_pci_intr, hba, &hba->irq_handle))
1525 #endif
1526         {
1527                 device_printf(dev, "allocate intr function failed!\n");
1528                 goto free_irq_resource;
1529         }
1530
1531         if (hptiop_send_sync_msg(hba,
1532                         IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000)) {
1533                 device_printf(dev, "fail to start background task\n");
1534                 goto teartown_irq_resource;
1535         }
1536
1537         hba->ops->enable_intr(hba);
1538
1539         hba->ioctl_dev = make_dev(&hptiop_cdevsw, unit,
1540                                 UID_ROOT, GID_WHEEL /*GID_OPERATOR*/,
1541                                 S_IRUSR | S_IWUSR, "%s%d", driver_name, unit);
1542
1543 #if __FreeBSD_version < 503000
1544         hba->ioctl_dev->si_drv1 = hba;
1545 #endif
1546
1547         return 0;
1548
1549
1550 teartown_irq_resource:
1551         bus_teardown_intr(dev, hba->irq_res, hba->irq_handle);
1552
1553 free_irq_resource:
1554         bus_release_resource(dev, SYS_RES_IRQ, 0, hba->irq_res);
1555
1556 free_hba_path:
1557         xpt_free_path(hba->path);
1558
1559 deregister_xpt_bus:
1560         xpt_bus_deregister(cam_sim_path(hba->sim));
1561
1562 free_cam_sim:
1563         cam_sim_free(hba->sim, /*free devq*/ TRUE);
1564
1565 srb_dmamap_unload:
1566         if (hba->uncached_ptr)
1567                 bus_dmamap_unload(hba->srb_dmat, hba->srb_dmamap);
1568
1569 srb_dmamem_free:
1570         if (hba->uncached_ptr)
1571                 bus_dmamem_free(hba->srb_dmat,
1572                         hba->uncached_ptr, hba->srb_dmamap);
1573
1574 destroy_srb_dmat:
1575         if (hba->srb_dmat)
1576                 bus_dma_tag_destroy(hba->srb_dmat);
1577
1578 destroy_io_dmat:
1579         if (hba->io_dmat)
1580                 bus_dma_tag_destroy(hba->io_dmat);
1581
1582 get_config_failed:
1583         if (hba->ops->internal_memfree)
1584                 hba->ops->internal_memfree(hba);
1585
1586 destroy_parent_tag:
1587         if (hba->parent_dmat)
1588                 bus_dma_tag_destroy(hba->parent_dmat);
1589
1590 release_pci_res:
1591         if (hba->ops->release_pci_res)
1592                 hba->ops->release_pci_res(hba);
1593
1594         return ENXIO;
1595 }
1596
1597 static int hptiop_detach(device_t dev)
1598 {
1599         struct hpt_iop_hba * hba = (struct hpt_iop_hba *)device_get_softc(dev);
1600         int i;
1601         int error = EBUSY;
1602
1603         hptiop_lock_adapter(hba);
1604         for (i = 0; i < hba->max_devices; i++)
1605                 if (hptiop_os_query_remove_device(hba, i)) {
1606                         device_printf(dev, "%d file system is busy. id=%d",
1607                                                 hba->pciunit, i);
1608                         goto out;
1609                 }
1610
1611         if ((error = hptiop_shutdown(dev)) != 0)
1612                 goto out;
1613         if (hptiop_send_sync_msg(hba,
1614                 IOPMU_INBOUND_MSG0_STOP_BACKGROUND_TASK, 60000))
1615                 goto out;
1616
1617         hptiop_release_resource(hba);
1618         error = 0;
1619 out:
1620         hptiop_unlock_adapter(hba);
1621         return error;
1622 }
1623
1624 static int hptiop_shutdown(device_t dev)
1625 {
1626         struct hpt_iop_hba * hba = (struct hpt_iop_hba *)device_get_softc(dev);
1627
1628         int error = 0;
1629
1630         if (hba->flag & HPT_IOCTL_FLAG_OPEN) {
1631                 device_printf(dev, "%d device is busy", hba->pciunit);
1632                 return EBUSY;
1633         }
1634
1635         hba->ops->disable_intr(hba);
1636
1637         if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_SHUTDOWN, 60000))
1638                 error = EBUSY;
1639
1640         return error;
1641 }
1642
1643 static void hptiop_pci_intr(void *arg)
1644 {
1645         struct hpt_iop_hba * hba = (struct hpt_iop_hba *)arg;
1646         hptiop_lock_adapter(hba);
1647         hba->ops->iop_intr(hba);
1648         hptiop_unlock_adapter(hba);
1649 }
1650
1651 static void hptiop_poll(struct cam_sim *sim)
1652 {
1653         hptiop_pci_intr(cam_sim_softc(sim));
1654 }
1655
1656 static void hptiop_async(void * callback_arg, u_int32_t code,
1657                                         struct cam_path * path, void * arg)
1658 {
1659 }
1660
1661 static void hptiop_enable_intr_itl(struct hpt_iop_hba *hba)
1662 {
1663         BUS_SPACE_WRT4_ITL(outbound_intmask,
1664                 ~(IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0));
1665 }
1666
1667 static void hptiop_enable_intr_mv(struct hpt_iop_hba *hba)
1668 {
1669         u_int32_t int_mask;
1670
1671         int_mask = BUS_SPACE_RD4_MV0(outbound_intmask);
1672                         
1673         int_mask |= MVIOP_MU_OUTBOUND_INT_POSTQUEUE
1674                         | MVIOP_MU_OUTBOUND_INT_MSG;
1675         BUS_SPACE_WRT4_MV0(outbound_intmask,int_mask);
1676 }
1677
1678 static void hptiop_disable_intr_itl(struct hpt_iop_hba *hba)
1679 {
1680         u_int32_t int_mask;
1681
1682         int_mask = BUS_SPACE_RD4_ITL(outbound_intmask);
1683
1684         int_mask |= IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0;
1685         BUS_SPACE_WRT4_ITL(outbound_intmask, int_mask);
1686         BUS_SPACE_RD4_ITL(outbound_intstatus);
1687 }
1688
1689 static void hptiop_disable_intr_mv(struct hpt_iop_hba *hba)
1690 {
1691         u_int32_t int_mask;
1692         int_mask = BUS_SPACE_RD4_MV0(outbound_intmask);
1693         
1694         int_mask &= ~(MVIOP_MU_OUTBOUND_INT_MSG
1695                         | MVIOP_MU_OUTBOUND_INT_POSTQUEUE);
1696         BUS_SPACE_WRT4_MV0(outbound_intmask,int_mask);
1697         BUS_SPACE_RD4_MV0(outbound_intmask);
1698 }
1699
1700 static int hptiop_reset_adapter(struct hpt_iop_hba * hba)
1701 {
1702         return hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000);
1703 }
1704
1705 static void *hptiop_get_srb(struct hpt_iop_hba * hba)
1706 {
1707         struct hpt_iop_srb * srb;
1708
1709         if (hba->srb_list) {
1710                 srb = hba->srb_list;
1711                 hba->srb_list = srb->next;
1712                 return srb;
1713         }
1714
1715         return NULL;
1716 }
1717
1718 static void hptiop_free_srb(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb)
1719 {
1720         srb->next = hba->srb_list;
1721         hba->srb_list = srb;
1722 }
1723
1724 static void hptiop_action(struct cam_sim *sim, union ccb *ccb)
1725 {
1726         struct hpt_iop_hba * hba = (struct hpt_iop_hba *)cam_sim_softc(sim);
1727         struct hpt_iop_srb * srb;
1728
1729         switch (ccb->ccb_h.func_code) {
1730
1731         case XPT_SCSI_IO:
1732                 hptiop_lock_adapter(hba);
1733                 if (ccb->ccb_h.target_lun != 0 ||
1734                         ccb->ccb_h.target_id >= hba->max_devices ||
1735                         (ccb->ccb_h.flags & CAM_CDB_PHYS))
1736                 {
1737                         ccb->ccb_h.status = CAM_TID_INVALID;
1738                         xpt_done(ccb);
1739                         goto scsi_done;
1740                 }
1741
1742                 if ((srb = hptiop_get_srb(hba)) == NULL) {
1743                         device_printf(hba->pcidev, "srb allocated failed");
1744                         ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1745                         xpt_done(ccb);
1746                         goto scsi_done;
1747                 }
1748
1749                 srb->ccb = ccb;
1750
1751                 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
1752                         hptiop_post_scsi_command(srb, NULL, 0, 0);
1753                 else if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
1754                         if ((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0) {
1755                                 int error;
1756
1757                                 error = bus_dmamap_load(hba->io_dmat,
1758                                                 srb->dma_map,
1759                                                 ccb->csio.data_ptr,
1760                                                 ccb->csio.dxfer_len,
1761                                                 hptiop_post_scsi_command,
1762                                                 srb, 0);
1763
1764                                 if (error && error != EINPROGRESS) {
1765                                         device_printf(hba->pcidev,
1766                                                 "%d bus_dmamap_load error %d",
1767                                                 hba->pciunit, error);
1768                                         xpt_freeze_simq(hba->sim, 1);
1769                                         ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1770 invalid:
1771                                         hptiop_free_srb(hba, srb);
1772                                         xpt_done(ccb);
1773                                         goto scsi_done;
1774                                 }
1775                         }
1776                         else {
1777                                 device_printf(hba->pcidev,
1778                                         "CAM_DATA_PHYS not supported");
1779                                 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1780                                 goto invalid;
1781                         }
1782                 }
1783                 else {
1784                         struct bus_dma_segment *segs;
1785
1786                         if ((ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0 ||
1787                                 (ccb->ccb_h.flags & CAM_DATA_PHYS) != 0) {
1788                                 device_printf(hba->pcidev, "SCSI cmd failed");
1789                                 ccb->ccb_h.status=CAM_PROVIDE_FAIL;
1790                                 goto invalid;
1791                         }
1792
1793                         segs = (struct bus_dma_segment *)ccb->csio.data_ptr;
1794                         hptiop_post_scsi_command(srb, segs,
1795                                                 ccb->csio.sglist_cnt, 0);
1796                 }
1797
1798 scsi_done:
1799                 hptiop_unlock_adapter(hba);
1800                 return;
1801
1802         case XPT_RESET_BUS:
1803                 device_printf(hba->pcidev, "reset adapter");
1804                 hptiop_lock_adapter(hba);
1805                 hba->msg_done = 0;
1806                 hptiop_reset_adapter(hba);
1807                 hptiop_unlock_adapter(hba);
1808                 break;
1809
1810         case XPT_GET_TRAN_SETTINGS:
1811         case XPT_SET_TRAN_SETTINGS:
1812                 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
1813                 break;
1814
1815         case XPT_CALC_GEOMETRY:
1816                 ccb->ccg.heads = 255;
1817                 ccb->ccg.secs_per_track = 63;
1818                 ccb->ccg.cylinders = ccb->ccg.volume_size /
1819                                 (ccb->ccg.heads * ccb->ccg.secs_per_track);
1820                 ccb->ccb_h.status = CAM_REQ_CMP;
1821                 break;
1822
1823         case XPT_PATH_INQ:
1824         {
1825                 struct ccb_pathinq *cpi = &ccb->cpi;
1826
1827                 cpi->version_num = 1;
1828                 cpi->hba_inquiry = PI_SDTR_ABLE;
1829                 cpi->target_sprt = 0;
1830                 cpi->hba_misc = PIM_NOBUSRESET;
1831                 cpi->hba_eng_cnt = 0;
1832                 cpi->max_target = hba->max_devices;
1833                 cpi->max_lun = 0;
1834                 cpi->unit_number = cam_sim_unit(sim);
1835                 cpi->bus_id = cam_sim_bus(sim);
1836                 cpi->initiator_id = hba->max_devices;
1837                 cpi->base_transfer_speed = 3300;
1838
1839                 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1840                 strncpy(cpi->hba_vid, "HPT   ", HBA_IDLEN);
1841                 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1842                 cpi->transport = XPORT_SPI;
1843                 cpi->transport_version = 2;
1844                 cpi->protocol = PROTO_SCSI;
1845                 cpi->protocol_version = SCSI_REV_2;
1846                 cpi->ccb_h.status = CAM_REQ_CMP;
1847                 break;
1848         }
1849
1850         default:
1851                 ccb->ccb_h.status = CAM_REQ_INVALID;
1852                 break;
1853         }
1854
1855         xpt_done(ccb);
1856         return;
1857 }
1858
1859 static void hptiop_post_req_itl(struct hpt_iop_hba *hba,
1860                                 struct hpt_iop_srb *srb,
1861                                 bus_dma_segment_t *segs, int nsegs)
1862 {
1863         int idx;
1864         union ccb *ccb = srb->ccb;
1865         u_int8_t *cdb;
1866
1867         if (ccb->ccb_h.flags & CAM_CDB_POINTER)
1868                 cdb = ccb->csio.cdb_io.cdb_ptr;
1869         else
1870                 cdb = ccb->csio.cdb_io.cdb_bytes;
1871
1872         KdPrint(("ccb=%p %x-%x-%x\n",
1873                 ccb, *(u_int32_t *)cdb, *((u_int32_t *)cdb+1), *((u_int32_t *)cdb+2)));
1874
1875         if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS) {
1876                 u_int32_t iop_req32;
1877                 struct hpt_iop_request_scsi_command req;
1878
1879                 iop_req32 = BUS_SPACE_RD4_ITL(inbound_queue);
1880
1881                 if (iop_req32 == IOPMU_QUEUE_EMPTY) {
1882                         device_printf(hba->pcidev, "invaild req offset\n");
1883                         ccb->ccb_h.status = CAM_BUSY;
1884                         bus_dmamap_unload(hba->io_dmat, srb->dma_map);
1885                         hptiop_free_srb(hba, srb);
1886                         xpt_done(ccb);
1887                         return;
1888                 }
1889
1890                 if (ccb->csio.dxfer_len && nsegs > 0) {
1891                         struct hpt_iopsg *psg = req.sg_list;
1892                         for (idx = 0; idx < nsegs; idx++, psg++) {
1893                                 psg->pci_address = (u_int64_t)segs[idx].ds_addr;
1894                                 psg->size = segs[idx].ds_len;
1895                                 psg->eot = 0;
1896                         }
1897                         psg[-1].eot = 1;
1898                 }
1899
1900                 bcopy(cdb, req.cdb, ccb->csio.cdb_len);
1901
1902                 req.header.size = offsetof(struct hpt_iop_request_scsi_command, sg_list)
1903                                 + nsegs*sizeof(struct hpt_iopsg);
1904                 req.header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
1905                 req.header.flags = 0;
1906                 req.header.result = IOP_RESULT_PENDING;
1907                 req.header.context = (u_int64_t)(unsigned long)srb;
1908                 req.dataxfer_length = ccb->csio.dxfer_len;
1909                 req.channel =  0;
1910                 req.target =  ccb->ccb_h.target_id;
1911                 req.lun =  ccb->ccb_h.target_lun;
1912
1913                 bus_space_write_region_1(hba->bar0t, hba->bar0h, iop_req32,
1914                         (u_int8_t *)&req, req.header.size);
1915
1916                 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1917                         bus_dmamap_sync(hba->io_dmat,
1918                                 srb->dma_map, BUS_DMASYNC_PREREAD);
1919                 }
1920                 else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
1921                         bus_dmamap_sync(hba->io_dmat,
1922                                 srb->dma_map, BUS_DMASYNC_PREWRITE);
1923
1924                 BUS_SPACE_WRT4_ITL(inbound_queue,iop_req32);
1925         } else {
1926                 struct hpt_iop_request_scsi_command *req;
1927
1928                 req = (struct hpt_iop_request_scsi_command *)srb;
1929                 if (ccb->csio.dxfer_len && nsegs > 0) {
1930                         struct hpt_iopsg *psg = req->sg_list;
1931                         for (idx = 0; idx < nsegs; idx++, psg++) {
1932                                 psg->pci_address = 
1933                                         (u_int64_t)segs[idx].ds_addr;
1934                                 psg->size = segs[idx].ds_len;
1935                                 psg->eot = 0;
1936                         }
1937                         psg[-1].eot = 1;
1938                 }
1939
1940                 bcopy(cdb, req->cdb, ccb->csio.cdb_len);
1941
1942                 req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
1943                 req->header.result = IOP_RESULT_PENDING;
1944                 req->dataxfer_length = ccb->csio.dxfer_len;
1945                 req->channel =  0;
1946                 req->target =  ccb->ccb_h.target_id;
1947                 req->lun =  ccb->ccb_h.target_lun;
1948                 req->header.size = offsetof(struct hpt_iop_request_scsi_command, sg_list)
1949                         + nsegs*sizeof(struct hpt_iopsg);
1950                 req->header.context = (u_int64_t)srb->index |
1951                                                 IOPMU_QUEUE_ADDR_HOST_BIT;
1952                 req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
1953
1954                 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1955                         bus_dmamap_sync(hba->io_dmat,
1956                                 srb->dma_map, BUS_DMASYNC_PREREAD);
1957                 }else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1958                         bus_dmamap_sync(hba->io_dmat,
1959                                 srb->dma_map, BUS_DMASYNC_PREWRITE);
1960                 }
1961
1962                 if (hba->firmware_version > 0x01020000
1963                         || hba->interface_version > 0x01020000) {
1964                         u_int32_t size_bits;
1965
1966                         if (req->header.size < 256)
1967                                 size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT;
1968                         else if (req->header.size < 512)
1969                                 size_bits = IOPMU_QUEUE_ADDR_HOST_BIT;
1970                         else
1971                                 size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT
1972                                                 | IOPMU_QUEUE_ADDR_HOST_BIT;
1973
1974                         BUS_SPACE_WRT4_ITL(inbound_queue,
1975                                 (u_int32_t)srb->phy_addr | size_bits);
1976                 } else
1977                         BUS_SPACE_WRT4_ITL(inbound_queue, (u_int32_t)srb->phy_addr
1978                                 |IOPMU_QUEUE_ADDR_HOST_BIT);
1979         }
1980 }
1981
1982 static void hptiop_post_req_mv(struct hpt_iop_hba *hba,
1983                                 struct hpt_iop_srb *srb,
1984                                 bus_dma_segment_t *segs, int nsegs)
1985 {
1986         int idx, size;
1987         union ccb *ccb = srb->ccb;
1988         u_int8_t *cdb;
1989         struct hpt_iop_request_scsi_command *req;
1990         u_int64_t req_phy;
1991
1992         req = (struct hpt_iop_request_scsi_command *)srb;
1993         req_phy = srb->phy_addr;
1994
1995         if (ccb->csio.dxfer_len && nsegs > 0) {
1996                 struct hpt_iopsg *psg = req->sg_list;
1997                 for (idx = 0; idx < nsegs; idx++, psg++) {
1998                         psg->pci_address = (u_int64_t)segs[idx].ds_addr;
1999                         psg->size = segs[idx].ds_len;
2000                         psg->eot = 0;
2001                 }
2002                 psg[-1].eot = 1;
2003         }
2004         if (ccb->ccb_h.flags & CAM_CDB_POINTER)
2005                 cdb = ccb->csio.cdb_io.cdb_ptr;
2006         else
2007                 cdb = ccb->csio.cdb_io.cdb_bytes;
2008
2009         bcopy(cdb, req->cdb, ccb->csio.cdb_len);
2010         req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
2011         req->header.result = IOP_RESULT_PENDING;
2012         req->dataxfer_length = ccb->csio.dxfer_len;
2013         req->channel = 0;
2014         req->target =  ccb->ccb_h.target_id;
2015         req->lun =  ccb->ccb_h.target_lun;
2016         req->header.size = sizeof(struct hpt_iop_request_scsi_command)
2017                                 - sizeof(struct hpt_iopsg)
2018                                 + nsegs * sizeof(struct hpt_iopsg);
2019         if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2020                 bus_dmamap_sync(hba->io_dmat,
2021                         srb->dma_map, BUS_DMASYNC_PREREAD);
2022         }
2023         else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
2024                 bus_dmamap_sync(hba->io_dmat,
2025                         srb->dma_map, BUS_DMASYNC_PREWRITE);
2026         req->header.context = (u_int64_t)srb->index
2027                                         << MVIOP_REQUEST_NUMBER_START_BIT
2028                                         | MVIOP_CMD_TYPE_SCSI;
2029         req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
2030         size = req->header.size >> 8;
2031         hptiop_mv_inbound_write(req_phy
2032                         | MVIOP_MU_QUEUE_ADDR_HOST_BIT
2033                         | (size > 3 ? 3 : size), hba);
2034 }
2035
2036 static void hptiop_post_scsi_command(void *arg, bus_dma_segment_t *segs,
2037                                         int nsegs, int error)
2038 {
2039         struct hpt_iop_srb *srb = (struct hpt_iop_srb *)arg;
2040         union ccb *ccb = srb->ccb;
2041         struct hpt_iop_hba *hba = srb->hba;
2042
2043         if (error || nsegs > hba->max_sg_count) {
2044                 KdPrint(("hptiop: func_code=%x tid=%x lun=%x nsegs=%d\n",
2045                         ccb->ccb_h.func_code,
2046                         ccb->ccb_h.target_id,
2047                         ccb->ccb_h.target_lun, nsegs));
2048                 ccb->ccb_h.status = CAM_BUSY;
2049                 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
2050                 hptiop_free_srb(hba, srb);
2051                 xpt_done(ccb);
2052                 return;
2053         }
2054
2055         hba->ops->post_req(hba, srb, segs, nsegs);
2056 }
2057
2058 static void hptiop_mv_map_ctlcfg(void *arg, bus_dma_segment_t *segs,
2059                                 int nsegs, int error)
2060 {
2061         struct hpt_iop_hba *hba = (struct hpt_iop_hba *)arg;
2062         hba->ctlcfgcmd_phy = ((u_int64_t)segs->ds_addr + 0x1F) 
2063                                 & ~(u_int64_t)0x1F;
2064         hba->ctlcfg_ptr = (u_int8_t *)(((unsigned long)hba->ctlcfg_ptr + 0x1F)
2065                                 & ~0x1F);
2066 }
2067
2068 static void hptiop_map_srb(void *arg, bus_dma_segment_t *segs,
2069                                 int nsegs, int error)
2070 {
2071         struct hpt_iop_hba * hba = (struct hpt_iop_hba *)arg;
2072         bus_addr_t phy_addr = (segs->ds_addr + 0x1F) & ~(bus_addr_t)0x1F;
2073         struct hpt_iop_srb *srb, *tmp_srb;
2074         int i;
2075
2076         if (error || nsegs == 0) {
2077                 device_printf(hba->pcidev, "hptiop_map_srb error");
2078                 return;
2079         }
2080
2081         /* map srb */
2082         srb = (struct hpt_iop_srb *)
2083                 (((unsigned long)hba->uncached_ptr + 0x1F)
2084                 & ~(unsigned long)0x1F);
2085
2086         for (i = 0; i < HPT_SRB_MAX_QUEUE_SIZE; i++) {
2087                 tmp_srb = (struct hpt_iop_srb *)
2088                                         ((char *)srb + i * HPT_SRB_MAX_SIZE);
2089                 if (((unsigned long)tmp_srb & 0x1F) == 0) {
2090                         if (bus_dmamap_create(hba->io_dmat,
2091                                                 0, &tmp_srb->dma_map)) {
2092                                 device_printf(hba->pcidev, "dmamap create failed");
2093                                 return;
2094                         }
2095
2096                         bzero(tmp_srb, sizeof(struct hpt_iop_srb));
2097                         tmp_srb->hba = hba;
2098                         tmp_srb->index = i;
2099                         if (hba->ctlcfg_ptr == 0) {/*itl iop*/
2100                                 tmp_srb->phy_addr = (u_int64_t)(u_int32_t)
2101                                                         (phy_addr >> 5);
2102                                 if (phy_addr & IOPMU_MAX_MEM_SUPPORT_MASK_32G)
2103                                         tmp_srb->srb_flag =
2104                                                 HPT_SRB_FLAG_HIGH_MEM_ACESS;
2105                         } else {
2106                                 tmp_srb->phy_addr = phy_addr;
2107                         }
2108
2109                         hptiop_free_srb(hba, tmp_srb);
2110                         hba->srb[i] = tmp_srb;
2111                         phy_addr += HPT_SRB_MAX_SIZE;
2112                 }
2113                 else {
2114                         device_printf(hba->pcidev, "invalid alignment");
2115                         return;
2116                 }
2117         }
2118 }
2119
2120 static void hptiop_os_message_callback(struct hpt_iop_hba * hba, u_int32_t msg)
2121 {
2122                 hba->msg_done = 1;
2123 }
2124
2125 static  int hptiop_os_query_remove_device(struct hpt_iop_hba * hba,
2126                                                 int target_id)
2127 {
2128         struct cam_periph       *periph = NULL;
2129         struct cam_path         *path;
2130         int                     status, retval = 0;
2131
2132         status = xpt_create_path(&path, NULL, hba->sim->path_id, target_id, 0);
2133
2134         if (status == CAM_REQ_CMP) {
2135                 if ((periph = cam_periph_find(path, "da")) != NULL) {
2136                         if (periph->refcount >= 1) {
2137                                 device_printf(hba->pcidev, "%d ,"
2138                                         "target_id=0x%x,"
2139                                         "refcount=%d",
2140                                     hba->pciunit, target_id, periph->refcount);
2141                                 retval = -1;
2142                         }
2143                 }
2144                 xpt_free_path(path);
2145         }
2146         return retval;
2147 }
2148
2149 static void hptiop_release_resource(struct hpt_iop_hba *hba)
2150 {
2151         int i;
2152         if (hba->path) {
2153                 struct ccb_setasync ccb;
2154
2155                 xpt_setup_ccb(&ccb.ccb_h, hba->path, /*priority*/5);
2156                 ccb.ccb_h.func_code = XPT_SASYNC_CB;
2157                 ccb.event_enable = 0;
2158                 ccb.callback = hptiop_async;
2159                 ccb.callback_arg = hba->sim;
2160                 xpt_action((union ccb *)&ccb);
2161                 xpt_free_path(hba->path);
2162         }
2163
2164         if (hba->sim) {
2165                 xpt_bus_deregister(cam_sim_path(hba->sim));
2166                 cam_sim_free(hba->sim, TRUE);
2167         }
2168
2169         if (hba->ctlcfg_dmat) {
2170                 bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap);
2171                 bus_dmamem_free(hba->ctlcfg_dmat,
2172                                         hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
2173                 bus_dma_tag_destroy(hba->ctlcfg_dmat);
2174         }
2175
2176         for (i = 0; i < HPT_SRB_MAX_QUEUE_SIZE; i++) {
2177                 struct hpt_iop_srb *srb = hba->srb[i];
2178                 if (srb->dma_map)
2179                         bus_dmamap_destroy(hba->io_dmat, srb->dma_map);
2180         }
2181
2182         if (hba->srb_dmat) {
2183                 bus_dmamap_unload(hba->srb_dmat, hba->srb_dmamap);
2184                 bus_dmamap_destroy(hba->srb_dmat, hba->srb_dmamap);
2185                 bus_dma_tag_destroy(hba->srb_dmat);
2186         }
2187
2188         if (hba->io_dmat)
2189                 bus_dma_tag_destroy(hba->io_dmat);
2190
2191         if (hba->parent_dmat)
2192                 bus_dma_tag_destroy(hba->parent_dmat);
2193
2194         if (hba->irq_handle)
2195                 bus_teardown_intr(hba->pcidev, hba->irq_res, hba->irq_handle);
2196
2197         if (hba->irq_res)
2198                 bus_release_resource(hba->pcidev, SYS_RES_IRQ,
2199                                         0, hba->irq_res);
2200
2201         if (hba->bar0_res)
2202                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
2203                                         hba->bar0_rid, hba->bar0_res);
2204         if (hba->bar2_res)
2205                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
2206                                         hba->bar2_rid, hba->bar2_res);
2207         if (hba->ioctl_dev)
2208                 destroy_dev(hba->ioctl_dev);
2209 }