]> CyberLeo.Net >> Repos - FreeBSD/releng/7.2.git/blob - sys/dev/hptiop/hptiop.c
Create releng/7.2 from stable/7 in preparation for 7.2-RELEASE.
[FreeBSD/releng/7.2.git] / sys / dev / hptiop / hptiop.c
1 /*
2  * HighPoint RR3xxx/4xxx RAID Driver for FreeBSD
3  * Copyright (C) 2007-2008 HighPoint Technologies, Inc. All Rights Reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29
30 #include <sys/param.h>
31 #include <sys/types.h>
32 #include <sys/cons.h>
33 #if (__FreeBSD_version >= 500000)
34 #include <sys/time.h>
35 #include <sys/systm.h>
36 #else
37 #include <machine/clock.h>
38 #endif
39
40 #include <sys/stat.h>
41 #include <sys/malloc.h>
42 #include <sys/conf.h>
43 #include <sys/libkern.h>
44 #include <sys/kernel.h>
45
46 #if (__FreeBSD_version >= 500000)
47 #include <sys/kthread.h>
48 #include <sys/mutex.h>
49 #include <sys/module.h>
50 #endif
51
52 #include <sys/eventhandler.h>
53 #include <sys/bus.h>
54 #include <sys/taskqueue.h>
55 #include <sys/ioccom.h>
56
57 #include <machine/resource.h>
58 #include <machine/bus.h>
59 #include <machine/stdarg.h>
60 #include <sys/rman.h>
61
62 #include <vm/vm.h>
63 #include <vm/pmap.h>
64
65 #if (__FreeBSD_version >= 500000)
66 #include <dev/pci/pcireg.h>
67 #include <dev/pci/pcivar.h>
68 #else
69 #include <pci/pcivar.h>
70 #include <pci/pcireg.h>
71 #endif
72
73 #if (__FreeBSD_version <= 500043)
74 #include <sys/devicestat.h>
75 #endif
76
77 #include <cam/cam.h>
78 #include <cam/cam_ccb.h>
79 #include <cam/cam_sim.h>
80 #include <cam/cam_xpt_sim.h>
81 #include <cam/cam_debug.h>
82 #include <cam/cam_xpt_periph.h>
83 #include <cam/cam_periph.h>
84 #include <cam/scsi/scsi_all.h>
85 #include <cam/scsi/scsi_message.h>
86
87 #if (__FreeBSD_version < 500043)
88 #include <sys/bus_private.h>
89 #endif
90
91 #include <dev/hptiop/hptiop.h>
92
93 static char driver_name[] = "hptiop";
94 static char driver_version[] = "v1.3 (010208)";
95
96 static devclass_t hptiop_devclass;
97
98 static int hptiop_send_sync_msg(struct hpt_iop_hba *hba,
99                                 u_int32_t msg, u_int32_t millisec);
100 static void hptiop_request_callback_itl(struct hpt_iop_hba *hba,
101                                                         u_int32_t req);
102 static void hptiop_request_callback_mv(struct hpt_iop_hba *hba, u_int64_t req);
103 static void hptiop_os_message_callback(struct hpt_iop_hba *hba, u_int32_t msg);
104 static int  hptiop_do_ioctl_itl(struct hpt_iop_hba *hba,
105                                 struct hpt_iop_ioctl_param *pParams);
106 static int  hptiop_do_ioctl_mv(struct hpt_iop_hba *hba,
107                                 struct hpt_iop_ioctl_param *pParams);
108 static void hptiop_bus_scan_cb(struct cam_periph *periph, union ccb *ccb);
109 static int  hptiop_rescan_bus(struct hpt_iop_hba *hba);
110 static int hptiop_alloc_pci_res_itl(struct hpt_iop_hba *hba);
111 static int hptiop_alloc_pci_res_mv(struct hpt_iop_hba *hba);
112 static int hptiop_get_config_itl(struct hpt_iop_hba *hba,
113                                 struct hpt_iop_request_get_config *config);
114 static int hptiop_get_config_mv(struct hpt_iop_hba *hba,
115                                 struct hpt_iop_request_get_config *config);
116 static int hptiop_set_config_itl(struct hpt_iop_hba *hba,
117                                 struct hpt_iop_request_set_config *config);
118 static int hptiop_set_config_mv(struct hpt_iop_hba *hba,
119                                 struct hpt_iop_request_set_config *config);
120 static int hptiop_internal_memalloc_mv(struct hpt_iop_hba *hba);
121 static int hptiop_internal_memfree_mv(struct hpt_iop_hba *hba);
122 static int  hptiop_post_ioctl_command_itl(struct hpt_iop_hba *hba,
123                         u_int32_t req32, struct hpt_iop_ioctl_param *pParams);
124 static int  hptiop_post_ioctl_command_mv(struct hpt_iop_hba *hba,
125                                 struct hpt_iop_request_ioctl_command *req,
126                                 struct hpt_iop_ioctl_param *pParams);
127 static void hptiop_post_req_itl(struct hpt_iop_hba *hba,
128                                 struct hpt_iop_srb *srb,
129                                 bus_dma_segment_t *segs, int nsegs);
130 static void hptiop_post_req_mv(struct hpt_iop_hba *hba,
131                                 struct hpt_iop_srb *srb,
132                                 bus_dma_segment_t *segs, int nsegs);
133 static void hptiop_post_msg_itl(struct hpt_iop_hba *hba, u_int32_t msg);
134 static void hptiop_post_msg_mv(struct hpt_iop_hba *hba, u_int32_t msg);
135 static void hptiop_enable_intr_itl(struct hpt_iop_hba *hba);
136 static void hptiop_enable_intr_mv(struct hpt_iop_hba *hba);
137 static void hptiop_disable_intr_itl(struct hpt_iop_hba *hba);
138 static void hptiop_disable_intr_mv(struct hpt_iop_hba *hba);
139 static void hptiop_free_srb(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb);
140 static int  hptiop_os_query_remove_device(struct hpt_iop_hba *hba, int tid);
141 static int  hptiop_probe(device_t dev);
142 static int  hptiop_attach(device_t dev);
143 static int  hptiop_detach(device_t dev);
144 static int  hptiop_shutdown(device_t dev);
145 static void hptiop_action(struct cam_sim *sim, union ccb *ccb);
146 static void hptiop_poll(struct cam_sim *sim);
147 static void hptiop_async(void *callback_arg, u_int32_t code,
148                                         struct cam_path *path, void *arg);
149 static void hptiop_pci_intr(void *arg);
150 static void hptiop_release_resource(struct hpt_iop_hba *hba);
151 static int  hptiop_reset_adapter(struct hpt_iop_hba *hba);
152
153 static d_open_t hptiop_open;
154 static d_close_t hptiop_close;
155 static d_ioctl_t hptiop_ioctl;
156
157 static struct cdevsw hptiop_cdevsw = {
158         .d_open = hptiop_open,
159         .d_close = hptiop_close,
160         .d_ioctl = hptiop_ioctl,
161         .d_name = driver_name,
162 #if __FreeBSD_version>=503000
163         .d_version = D_VERSION,
164 #endif
165 #if (__FreeBSD_version>=503000 && __FreeBSD_version<600034)
166         .d_flags = D_NEEDGIANT,
167 #endif
168 #if __FreeBSD_version<600034
169 #if __FreeBSD_version>=501000
170         .d_maj = MAJOR_AUTO,
171 #else
172         .d_maj = HPT_DEV_MAJOR,
173 #endif
174 #endif
175 };
176
177 #if __FreeBSD_version < 503000
178 #define hba_from_dev(dev) ((struct hpt_iop_hba *)(dev)->si_drv1)
179 #else
180 #define hba_from_dev(dev) \
181         ((struct hpt_iop_hba *)devclass_get_softc(hptiop_devclass, minor(dev)))
182 #endif
183
184 #define BUS_SPACE_WRT4_ITL(offset, value) bus_space_write_4(hba->bar0t,\
185                 hba->bar0h, offsetof(struct hpt_iopmu_itl, offset), (value))
186 #define BUS_SPACE_RD4_ITL(offset) bus_space_read_4(hba->bar0t,\
187                 hba->bar0h, offsetof(struct hpt_iopmu_itl, offset))
188
189 #define BUS_SPACE_WRT4_MV0(offset, value) bus_space_write_4(hba->bar0t,\
190                 hba->bar0h, offsetof(struct hpt_iopmv_regs, offset), value)
191 #define BUS_SPACE_RD4_MV0(offset) bus_space_read_4(hba->bar0t,\
192                 hba->bar0h, offsetof(struct hpt_iopmv_regs, offset))
193 #define BUS_SPACE_WRT4_MV2(offset, value) bus_space_write_4(hba->bar2t,\
194                 hba->bar2h, offsetof(struct hpt_iopmu_mv, offset), value)
195 #define BUS_SPACE_RD4_MV2(offset) bus_space_read_4(hba->bar2t,\
196                 hba->bar2h, offsetof(struct hpt_iopmu_mv, offset))
197
198 static int hptiop_open(ioctl_dev_t dev, int flags,
199                                         int devtype, ioctl_thread_t proc)
200 {
201         struct hpt_iop_hba *hba = hba_from_dev(dev);
202
203         if (hba==NULL)
204                 return ENXIO;
205         if (hba->flag & HPT_IOCTL_FLAG_OPEN)
206                 return EBUSY;
207         hba->flag |= HPT_IOCTL_FLAG_OPEN;
208         return 0;
209 }
210
211 static int hptiop_close(ioctl_dev_t dev, int flags,
212                                         int devtype, ioctl_thread_t proc)
213 {
214         struct hpt_iop_hba *hba = hba_from_dev(dev);
215         hba->flag &= ~(u_int32_t)HPT_IOCTL_FLAG_OPEN;
216         return 0;
217 }
218
219 static int hptiop_ioctl(ioctl_dev_t dev, u_long cmd, caddr_t data,
220                                         int flags, ioctl_thread_t proc)
221 {
222         int ret = EFAULT;
223         struct hpt_iop_hba *hba = hba_from_dev(dev);
224
225 #if (__FreeBSD_version >= 500000)
226         mtx_lock(&Giant);
227 #endif
228
229         switch (cmd) {
230         case HPT_DO_IOCONTROL:
231                 ret = hba->ops->do_ioctl(hba,
232                                 (struct hpt_iop_ioctl_param *)data);
233                 break;
234         case HPT_SCAN_BUS:
235                 ret = hptiop_rescan_bus(hba);
236                 break;
237         }
238
239 #if (__FreeBSD_version >= 500000)
240         mtx_unlock(&Giant);
241 #endif
242
243         return ret;
244 }
245
246 static u_int64_t hptiop_mv_outbound_read(struct hpt_iop_hba *hba)
247 {
248         u_int64_t p;
249         u_int32_t outbound_tail = BUS_SPACE_RD4_MV2(outbound_tail);
250         u_int32_t outbound_head = BUS_SPACE_RD4_MV2(outbound_head);
251
252         if (outbound_tail != outbound_head) {
253                 bus_space_read_region_4(hba->bar2t, hba->bar2h,
254                         offsetof(struct hpt_iopmu_mv,
255                                 outbound_q[outbound_tail]),
256                         (u_int32_t *)&p, 2);
257
258                 outbound_tail++;
259
260                 if (outbound_tail == MVIOP_QUEUE_LEN)
261                         outbound_tail = 0;
262
263                 BUS_SPACE_WRT4_MV2(outbound_tail, outbound_tail);
264                 return p;
265         } else
266                 return 0;
267 }
268
269 static void hptiop_mv_inbound_write(u_int64_t p, struct hpt_iop_hba *hba)
270 {
271         u_int32_t inbound_head = BUS_SPACE_RD4_MV2(inbound_head);
272         u_int32_t head = inbound_head + 1;
273
274         if (head == MVIOP_QUEUE_LEN)
275                 head = 0;
276
277         bus_space_write_region_4(hba->bar2t, hba->bar2h,
278                         offsetof(struct hpt_iopmu_mv, inbound_q[inbound_head]),
279                         (u_int32_t *)&p, 2);
280         BUS_SPACE_WRT4_MV2(inbound_head, head);
281         BUS_SPACE_WRT4_MV0(inbound_doorbell, MVIOP_MU_INBOUND_INT_POSTQUEUE);
282 }
283
284 static void hptiop_post_msg_itl(struct hpt_iop_hba *hba, u_int32_t msg)
285 {
286         BUS_SPACE_WRT4_ITL(inbound_msgaddr0, msg);
287         BUS_SPACE_RD4_ITL(outbound_intstatus);
288 }
289
290 static void hptiop_post_msg_mv(struct hpt_iop_hba *hba, u_int32_t msg)
291 {
292
293         BUS_SPACE_WRT4_MV2(inbound_msg, msg);
294         BUS_SPACE_WRT4_MV0(inbound_doorbell, MVIOP_MU_INBOUND_INT_MSG);
295
296         BUS_SPACE_RD4_MV0(outbound_intmask);
297 }
298
299 static int hptiop_wait_ready_itl(struct hpt_iop_hba * hba, u_int32_t millisec)
300 {
301         u_int32_t req=0;
302         int i;
303
304         for (i = 0; i < millisec; i++) {
305                 req = BUS_SPACE_RD4_ITL(inbound_queue);
306                 if (req != IOPMU_QUEUE_EMPTY)
307                         break;
308                 DELAY(1000);
309         }
310
311         if (req!=IOPMU_QUEUE_EMPTY) {
312                 BUS_SPACE_WRT4_ITL(outbound_queue, req);
313                 BUS_SPACE_RD4_ITL(outbound_intstatus);
314                 return 0;
315         }
316
317         return -1;
318 }
319
320 static int hptiop_wait_ready_mv(struct hpt_iop_hba * hba, u_int32_t millisec)
321 {
322         if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec))
323                 return -1;
324
325         return 0;
326 }
327
328 static void hptiop_request_callback_itl(struct hpt_iop_hba * hba,
329                                                         u_int32_t index)
330 {
331         struct hpt_iop_srb *srb;
332         struct hpt_iop_request_scsi_command *req=0;
333         union ccb *ccb;
334         u_int8_t *cdb;
335         u_int32_t result, temp, dxfer;
336         u_int64_t temp64;
337
338         if (index & IOPMU_QUEUE_MASK_HOST_BITS) { /*host req*/
339                 if (hba->firmware_version > 0x01020000 ||
340                         hba->interface_version > 0x01020000) {
341                         srb = hba->srb[index & ~(u_int32_t)
342                                 (IOPMU_QUEUE_ADDR_HOST_BIT
343                                 | IOPMU_QUEUE_REQUEST_RESULT_BIT)];
344                         req = (struct hpt_iop_request_scsi_command *)srb;
345                         if (index & IOPMU_QUEUE_REQUEST_RESULT_BIT)
346                                 result = IOP_RESULT_SUCCESS;
347                         else
348                                 result = req->header.result;
349                 } else {
350                         srb = hba->srb[index &
351                                 ~(u_int32_t)IOPMU_QUEUE_ADDR_HOST_BIT];
352                         req = (struct hpt_iop_request_scsi_command *)srb;
353                         result = req->header.result;
354                 }
355                 dxfer = req->dataxfer_length;
356                 goto srb_complete;
357         }
358
359         /*iop req*/
360         temp = bus_space_read_4(hba->bar0t, hba->bar0h, index +
361                 offsetof(struct hpt_iop_request_header, type));
362         result = bus_space_read_4(hba->bar0t, hba->bar0h, index +
363                 offsetof(struct hpt_iop_request_header, result));
364         switch(temp) {
365         case IOP_REQUEST_TYPE_IOCTL_COMMAND:
366         {
367                 temp64 = 0;
368                 bus_space_write_region_4(hba->bar0t, hba->bar0h, index +
369                         offsetof(struct hpt_iop_request_header, context),
370                         (u_int32_t *)&temp64, 2);
371                 wakeup((void *)((unsigned long)hba->u.itl.mu + index));
372                 break;
373         }
374
375         case IOP_REQUEST_TYPE_SCSI_COMMAND:
376                 bus_space_read_region_4(hba->bar0t, hba->bar0h, index +
377                         offsetof(struct hpt_iop_request_header, context),
378                         (u_int32_t *)&temp64, 2);
379                 srb = (struct hpt_iop_srb *)(unsigned long)temp64;
380                 dxfer = bus_space_read_4(hba->bar0t, hba->bar0h, 
381                                 index + offsetof(struct hpt_iop_request_scsi_command,
382                                 dataxfer_length));      
383 srb_complete:
384                 ccb = (union ccb *)srb->ccb;
385                 if (ccb->ccb_h.flags & CAM_CDB_POINTER)
386                         cdb = ccb->csio.cdb_io.cdb_ptr;
387                 else
388                         cdb = ccb->csio.cdb_io.cdb_bytes;
389
390                 if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */
391                         ccb->ccb_h.status = CAM_REQ_CMP;
392                         goto scsi_done;
393                 }
394
395                 switch (result) {
396                 case IOP_RESULT_SUCCESS:
397                         switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
398                         case CAM_DIR_IN:
399                                 bus_dmamap_sync(hba->io_dmat,
400                                         srb->dma_map, BUS_DMASYNC_POSTREAD);
401                                 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
402                                 break;
403                         case CAM_DIR_OUT:
404                                 bus_dmamap_sync(hba->io_dmat,
405                                         srb->dma_map, BUS_DMASYNC_POSTWRITE);
406                                 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
407                                 break;
408                         }
409
410                         ccb->ccb_h.status = CAM_REQ_CMP;
411                         break;
412
413                 case IOP_RESULT_BAD_TARGET:
414                         ccb->ccb_h.status = CAM_DEV_NOT_THERE;
415                         break;
416                 case IOP_RESULT_BUSY:
417                         ccb->ccb_h.status = CAM_BUSY;
418                         break;
419                 case IOP_RESULT_INVALID_REQUEST:
420                         ccb->ccb_h.status = CAM_REQ_INVALID;
421                         break;
422                 case IOP_RESULT_FAIL:
423                         ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
424                         break;
425                 case IOP_RESULT_RESET:
426                         ccb->ccb_h.status = CAM_BUSY;
427                         break;
428                 case IOP_RESULT_CHECK_CONDITION:
429                         if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS) {/*iop*/
430                                 bus_space_read_region_1(hba->bar0t, hba->bar0h,
431                                         index + offsetof(struct hpt_iop_request_scsi_command,
432                                         sg_list), (u_int8_t *)&ccb->csio.sense_data, 
433                                         MIN(dxfer, sizeof(ccb->csio.sense_data)));
434                         } else {
435                                 memcpy(&ccb->csio.sense_data, &req->sg_list, 
436                                         MIN(dxfer, sizeof(ccb->csio.sense_data)));
437                         }
438                         ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
439                         ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
440                         ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
441                         break;
442                 default:
443                         ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
444                         break;
445                 }
446 scsi_done:
447                 if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS)
448                         BUS_SPACE_WRT4_ITL(outbound_queue, index);
449
450                 ccb->csio.resid = ccb->csio.dxfer_len - dxfer;
451
452                 hptiop_free_srb(hba, srb);
453                 xpt_done(ccb);
454                 break;
455         }
456 }
457
458 static void hptiop_drain_outbound_queue_itl(struct hpt_iop_hba *hba)
459 {
460         u_int32_t req, temp;
461
462         while ((req = BUS_SPACE_RD4_ITL(outbound_queue)) !=IOPMU_QUEUE_EMPTY) {
463                 if (req & IOPMU_QUEUE_MASK_HOST_BITS)
464                         hptiop_request_callback_itl(hba, req);
465                 else {
466                         struct hpt_iop_request_header *p;
467
468                         p = (struct hpt_iop_request_header *)
469                                 ((char *)hba->u.itl.mu + req);
470                         temp = bus_space_read_4(hba->bar0t,
471                                         hba->bar0h,req +
472                                         offsetof(struct hpt_iop_request_header,
473                                                 flags));
474                         if (temp & IOP_REQUEST_FLAG_SYNC_REQUEST) {
475                                 u_int64_t temp64;
476                                 bus_space_read_region_4(hba->bar0t,
477                                         hba->bar0h,req +
478                                         offsetof(struct hpt_iop_request_header,
479                                                 context),
480                                         (u_int32_t *)&temp64, 2);
481                                 if (temp64) {
482                                         hptiop_request_callback_itl(hba, req);
483                                 } else {
484                                         temp64 = 1;
485                                         bus_space_write_region_4(hba->bar0t,
486                                                 hba->bar0h,req +
487                                                 offsetof(struct hpt_iop_request_header,
488                                                         context),
489                                                 (u_int32_t *)&temp64, 2);
490                                 }
491                         } else
492                                 hptiop_request_callback_itl(hba, req);
493                 }
494         }
495 }
496
497 static int hptiop_intr_itl(struct hpt_iop_hba * hba)
498 {
499         u_int32_t status;
500         int ret = 0;
501
502         status = BUS_SPACE_RD4_ITL(outbound_intstatus);
503
504         if (status & IOPMU_OUTBOUND_INT_MSG0) {
505                 u_int32_t msg = BUS_SPACE_RD4_ITL(outbound_msgaddr0);
506                 KdPrint(("hptiop: received outbound msg %x\n", msg));
507                 BUS_SPACE_WRT4_ITL(outbound_intstatus, IOPMU_OUTBOUND_INT_MSG0);
508                 hptiop_os_message_callback(hba, msg);
509                 ret = 1;
510         }
511
512         if (status & IOPMU_OUTBOUND_INT_POSTQUEUE) {
513                 hptiop_drain_outbound_queue_itl(hba);
514                 ret = 1;
515         }
516
517         return ret;
518 }
519
520 static void hptiop_request_callback_mv(struct hpt_iop_hba * hba,
521                                                         u_int64_t _tag)
522 {
523         u_int32_t context = (u_int32_t)_tag;
524
525         if (context & MVIOP_CMD_TYPE_SCSI) {
526                 struct hpt_iop_srb *srb;
527                 struct hpt_iop_request_scsi_command *req;
528                 union ccb *ccb;
529                 u_int8_t *cdb;
530
531                 srb = hba->srb[context >> MVIOP_REQUEST_NUMBER_START_BIT];
532                 req = (struct hpt_iop_request_scsi_command *)srb;
533                 ccb = (union ccb *)srb->ccb;
534                 if (ccb->ccb_h.flags & CAM_CDB_POINTER)
535                         cdb = ccb->csio.cdb_io.cdb_ptr;
536                 else
537                         cdb = ccb->csio.cdb_io.cdb_bytes;
538
539                 if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */
540                         ccb->ccb_h.status = CAM_REQ_CMP;
541                         goto scsi_done;
542                 }
543                 if (context & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT)
544                         req->header.result = IOP_RESULT_SUCCESS;
545
546                 switch (req->header.result) {
547                 case IOP_RESULT_SUCCESS:
548                         switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
549                         case CAM_DIR_IN:
550                                 bus_dmamap_sync(hba->io_dmat,
551                                         srb->dma_map, BUS_DMASYNC_POSTREAD);
552                                 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
553                                 break;
554                         case CAM_DIR_OUT:
555                                 bus_dmamap_sync(hba->io_dmat,
556                                         srb->dma_map, BUS_DMASYNC_POSTWRITE);
557                                 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
558                                 break;
559                         }
560                         ccb->ccb_h.status = CAM_REQ_CMP;
561                         break;
562                 case IOP_RESULT_BAD_TARGET:
563                         ccb->ccb_h.status = CAM_DEV_NOT_THERE;
564                         break;
565                 case IOP_RESULT_BUSY:
566                         ccb->ccb_h.status = CAM_BUSY;
567                         break;
568                 case IOP_RESULT_INVALID_REQUEST:
569                         ccb->ccb_h.status = CAM_REQ_INVALID;
570                         break;
571                 case IOP_RESULT_FAIL:
572                         ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
573                         break;
574                 case IOP_RESULT_RESET:
575                         ccb->ccb_h.status = CAM_BUSY;
576                         break;
577                 case IOP_RESULT_CHECK_CONDITION:
578                         memcpy(&ccb->csio.sense_data, &req->sg_list, 
579                                 MIN(req->dataxfer_length, sizeof(ccb->csio.sense_data)));
580                         ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
581                         ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
582                         ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
583                         break;
584                 default:
585                         ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
586                         break;
587                 }
588 scsi_done:
589                 ccb->csio.resid = ccb->csio.dxfer_len - req->dataxfer_length;
590                 
591                 hptiop_free_srb(hba, srb);
592                 xpt_done(ccb);
593         } else if (context & MVIOP_CMD_TYPE_IOCTL) {
594                 struct hpt_iop_request_ioctl_command *req = hba->ctlcfg_ptr;
595                 if (context & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT)
596                         hba->config_done = 1;
597                 else
598                         hba->config_done = -1;
599                 wakeup(req);
600         } else if (context &
601                         (MVIOP_CMD_TYPE_SET_CONFIG |
602                                 MVIOP_CMD_TYPE_GET_CONFIG))
603                 hba->config_done = 1;
604         else {
605                 device_printf(hba->pcidev, "wrong callback type\n");
606         }
607 }
608
609 static void hptiop_drain_outbound_queue_mv(struct hpt_iop_hba * hba)
610 {
611         u_int64_t req;
612
613         while ((req = hptiop_mv_outbound_read(hba))) {
614                 if (req & MVIOP_MU_QUEUE_ADDR_HOST_BIT) {
615                         if (req & MVIOP_MU_QUEUE_REQUEST_RETURN_CONTEXT) {
616                                 hptiop_request_callback_mv(hba, req);
617                         }
618                 }
619         }
620 }
621
622 static int hptiop_intr_mv(struct hpt_iop_hba * hba)
623 {
624         u_int32_t status;
625         int ret = 0;
626
627         status = BUS_SPACE_RD4_MV0(outbound_doorbell);
628
629         if (status)
630                 BUS_SPACE_WRT4_MV0(outbound_doorbell, ~status);
631
632         if (status & MVIOP_MU_OUTBOUND_INT_MSG) {
633                 u_int32_t msg = BUS_SPACE_RD4_MV2(outbound_msg);
634                 KdPrint(("hptiop: received outbound msg %x\n", msg));
635                 hptiop_os_message_callback(hba, msg);
636                 ret = 1;
637         }
638
639         if (status & MVIOP_MU_OUTBOUND_INT_POSTQUEUE) {
640                 hptiop_drain_outbound_queue_mv(hba);
641                 ret = 1;
642         }
643
644         return ret;
645 }
646
647 static int hptiop_send_sync_request_itl(struct hpt_iop_hba * hba,
648                                         u_int32_t req32, u_int32_t millisec)
649 {
650         u_int32_t i;
651         u_int64_t temp64;
652
653         BUS_SPACE_WRT4_ITL(inbound_queue, req32);
654         BUS_SPACE_RD4_ITL(outbound_intstatus);
655
656         for (i = 0; i < millisec; i++) {
657                 hptiop_intr_itl(hba);
658                 bus_space_read_region_4(hba->bar0t, hba->bar0h, req32 +
659                         offsetof(struct hpt_iop_request_header, context),
660                         (u_int32_t *)&temp64, 2);
661                 if (temp64)
662                         return 0;
663                 DELAY(1000);
664         }
665
666         return -1;
667 }
668
669 static int hptiop_send_sync_request_mv(struct hpt_iop_hba *hba,
670                                         void *req, u_int32_t millisec)
671 {
672         u_int32_t i;
673         u_int64_t phy_addr;
674         hba->config_done = 0;
675
676         phy_addr = hba->ctlcfgcmd_phy |
677                         (u_int64_t)MVIOP_MU_QUEUE_ADDR_HOST_BIT;
678         ((struct hpt_iop_request_get_config *)req)->header.flags |=
679                 IOP_REQUEST_FLAG_SYNC_REQUEST |
680                 IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
681         hptiop_mv_inbound_write(phy_addr, hba);
682         BUS_SPACE_RD4_MV0(outbound_intmask);
683
684         for (i = 0; i < millisec; i++) {
685                 hptiop_intr_mv(hba);
686                 if (hba->config_done)
687                         return 0;
688                 DELAY(1000);
689         }
690         return -1;
691 }
692
693 static int hptiop_send_sync_msg(struct hpt_iop_hba *hba,
694                                         u_int32_t msg, u_int32_t millisec)
695 {
696         u_int32_t i;
697
698         hba->msg_done = 0;
699         hba->ops->post_msg(hba, msg);
700
701         for (i=0; i<millisec; i++) {
702                 hba->ops->iop_intr(hba);
703                 if (hba->msg_done)
704                         break;
705                 DELAY(1000);
706         }
707
708         return hba->msg_done? 0 : -1;
709 }
710
711 static int hptiop_get_config_itl(struct hpt_iop_hba * hba,
712                                 struct hpt_iop_request_get_config * config)
713 {
714         u_int32_t req32;
715
716         config->header.size = sizeof(struct hpt_iop_request_get_config);
717         config->header.type = IOP_REQUEST_TYPE_GET_CONFIG;
718         config->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST;
719         config->header.result = IOP_RESULT_PENDING;
720         config->header.context = 0;
721
722         req32 = BUS_SPACE_RD4_ITL(inbound_queue);
723         if (req32 == IOPMU_QUEUE_EMPTY)
724                 return -1;
725
726         bus_space_write_region_4(hba->bar0t, hba->bar0h,
727                         req32, (u_int32_t *)config,
728                         sizeof(struct hpt_iop_request_header) >> 2);
729
730         if (hptiop_send_sync_request_itl(hba, req32, 20000)) {
731                 KdPrint(("hptiop: get config send cmd failed"));
732                 return -1;
733         }
734
735         bus_space_read_region_4(hba->bar0t, hba->bar0h,
736                         req32, (u_int32_t *)config,
737                         sizeof(struct hpt_iop_request_get_config) >> 2);
738
739         BUS_SPACE_WRT4_ITL(outbound_queue, req32);
740
741         return 0;
742 }
743
744 static int hptiop_get_config_mv(struct hpt_iop_hba * hba,
745                                 struct hpt_iop_request_get_config * config)
746 {
747         struct hpt_iop_request_get_config *req;
748
749         if (!(req = hba->ctlcfg_ptr))
750                 return -1;
751
752         req->header.flags = 0;
753         req->header.type = IOP_REQUEST_TYPE_GET_CONFIG;
754         req->header.size = sizeof(struct hpt_iop_request_get_config);
755         req->header.result = IOP_RESULT_PENDING;
756         req->header.context = MVIOP_CMD_TYPE_GET_CONFIG;
757
758         if (hptiop_send_sync_request_mv(hba, req, 20000)) {
759                 KdPrint(("hptiop: get config send cmd failed"));
760                 return -1;
761         }
762
763         *config = *req;
764         return 0;
765 }
766
767 static int hptiop_set_config_itl(struct hpt_iop_hba *hba,
768                                 struct hpt_iop_request_set_config *config)
769 {
770         u_int32_t req32;
771
772         req32 = BUS_SPACE_RD4_ITL(inbound_queue);
773
774         if (req32 == IOPMU_QUEUE_EMPTY)
775                 return -1;
776
777         config->header.size = sizeof(struct hpt_iop_request_set_config);
778         config->header.type = IOP_REQUEST_TYPE_SET_CONFIG;
779         config->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST;
780         config->header.result = IOP_RESULT_PENDING;
781         config->header.context = 0;
782
783         bus_space_write_region_4(hba->bar0t, hba->bar0h, req32, 
784                 (u_int32_t *)config, 
785                 sizeof(struct hpt_iop_request_set_config) >> 2);
786
787         if (hptiop_send_sync_request_itl(hba, req32, 20000)) {
788                 KdPrint(("hptiop: set config send cmd failed"));
789                 return -1;
790         }
791
792         BUS_SPACE_WRT4_ITL(outbound_queue, req32);
793
794         return 0;
795 }
796
797 static int hptiop_set_config_mv(struct hpt_iop_hba *hba,
798                                 struct hpt_iop_request_set_config *config)
799 {
800         struct hpt_iop_request_set_config *req;
801
802         if (!(req = hba->ctlcfg_ptr))
803                 return -1;
804
805         memcpy((u_int8_t *)req + sizeof(struct hpt_iop_request_header),
806                 (u_int8_t *)config + sizeof(struct hpt_iop_request_header),
807                 sizeof(struct hpt_iop_request_set_config) -
808                         sizeof(struct hpt_iop_request_header));
809
810         req->header.flags = 0;
811         req->header.type = IOP_REQUEST_TYPE_SET_CONFIG;
812         req->header.size = sizeof(struct hpt_iop_request_set_config);
813         req->header.result = IOP_RESULT_PENDING;
814         req->header.context = MVIOP_CMD_TYPE_SET_CONFIG;
815
816         if (hptiop_send_sync_request_mv(hba, req, 20000)) {
817                 KdPrint(("hptiop: set config send cmd failed"));
818                 return -1;
819         }
820
821         return 0;
822 }
823
824 static int hptiop_post_ioctl_command_itl(struct hpt_iop_hba *hba,
825                                 u_int32_t req32,
826                                 struct hpt_iop_ioctl_param *pParams)
827 {
828         u_int64_t temp64;
829         struct hpt_iop_request_ioctl_command req;
830
831         if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) >
832                         (hba->max_request_size -
833                         offsetof(struct hpt_iop_request_ioctl_command, buf))) {
834                 device_printf(hba->pcidev, "request size beyond max value");
835                 return -1;
836         }
837
838         req.header.size = offsetof(struct hpt_iop_request_ioctl_command, buf)
839                 + pParams->nInBufferSize;
840         req.header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND;
841         req.header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST;
842         req.header.result = IOP_RESULT_PENDING;
843         req.header.context = req32 + (u_int64_t)(unsigned long)hba->u.itl.mu;
844         req.ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode);
845         req.inbuf_size = pParams->nInBufferSize;
846         req.outbuf_size = pParams->nOutBufferSize;
847         req.bytes_returned = 0;
848
849         bus_space_write_region_4(hba->bar0t, hba->bar0h, req32, (u_int32_t *)&req, 
850                 offsetof(struct hpt_iop_request_ioctl_command, buf)>>2);
851         
852         hptiop_lock_adapter(hba);
853
854         BUS_SPACE_WRT4_ITL(inbound_queue, req32);
855         BUS_SPACE_RD4_ITL(outbound_intstatus);
856
857         bus_space_read_region_4(hba->bar0t, hba->bar0h, req32 +
858                 offsetof(struct hpt_iop_request_ioctl_command, header.context),
859                 (u_int32_t *)&temp64, 2);
860         while (temp64) {
861                 if (hptiop_sleep(hba, (void *)((unsigned long)hba->u.itl.mu + req32),
862                                 PPAUSE, "hptctl", HPT_OSM_TIMEOUT)==0)
863                         break;
864                 hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000);
865                 bus_space_read_region_4(hba->bar0t, hba->bar0h,req32 +
866                         offsetof(struct hpt_iop_request_ioctl_command,
867                                 header.context),
868                         (u_int32_t *)&temp64, 2);
869         }
870
871         hptiop_unlock_adapter(hba);
872         return 0;
873 }
874
875 static int hptiop_bus_space_copyin(struct hpt_iop_hba *hba, u_int32_t bus, void *user, int size)
876 {
877         unsigned char byte;
878         int i;
879
880         for (i=0; i<size; i++) {
881                 if (copyin((u_int8_t *)user + i, &byte, 1))
882                         return -1;
883                 bus_space_write_1(hba->bar0t, hba->bar0h, bus + i, byte);
884         }
885
886         return 0;
887 }
888
889 static int hptiop_bus_space_copyout(struct hpt_iop_hba *hba, u_int32_t bus, void *user, int size)
890 {
891         unsigned char byte;
892         int i;
893
894         for (i=0; i<size; i++) {
895                 byte = bus_space_read_1(hba->bar0t, hba->bar0h, bus + i);
896                 if (copyout(&byte, (u_int8_t *)user + i, 1))
897                         return -1;
898         }
899
900         return 0;
901 }
902
903 static int hptiop_do_ioctl_itl(struct hpt_iop_hba *hba,
904                                 struct hpt_iop_ioctl_param * pParams)
905 {
906         u_int32_t req32;
907         u_int32_t result;
908
909         if ((pParams->Magic != HPT_IOCTL_MAGIC) &&
910                 (pParams->Magic != HPT_IOCTL_MAGIC32))
911                 return EFAULT;
912         
913         req32 = BUS_SPACE_RD4_ITL(inbound_queue);
914         if (req32 == IOPMU_QUEUE_EMPTY)
915                 return EFAULT;
916
917         if (pParams->nInBufferSize)
918                 if (hptiop_bus_space_copyin(hba, req32 +
919                         offsetof(struct hpt_iop_request_ioctl_command, buf),
920                         (void *)pParams->lpInBuffer, pParams->nInBufferSize))
921                         goto invalid;
922
923         if (hptiop_post_ioctl_command_itl(hba, req32, pParams))
924                 goto invalid;
925
926         result = bus_space_read_4(hba->bar0t, hba->bar0h, req32 +
927                         offsetof(struct hpt_iop_request_ioctl_command,
928                                 header.result));
929
930         if (result == IOP_RESULT_SUCCESS) {
931                 if (pParams->nOutBufferSize)
932                         if (hptiop_bus_space_copyout(hba, req32 +
933                                 offsetof(struct hpt_iop_request_ioctl_command, buf) + 
934                                         ((pParams->nInBufferSize + 3) & ~3),
935                                 (void *)pParams->lpOutBuffer, pParams->nOutBufferSize))
936                                 goto invalid;
937
938                 if (pParams->lpBytesReturned) {
939                         if (hptiop_bus_space_copyout(hba, req32 + 
940                                 offsetof(struct hpt_iop_request_ioctl_command, bytes_returned),
941                                 (void *)pParams->lpBytesReturned, sizeof(unsigned  long)))
942                                 goto invalid;
943                 }
944
945                 BUS_SPACE_WRT4_ITL(outbound_queue, req32);
946
947                 return 0;
948         } else{
949 invalid:
950                 BUS_SPACE_WRT4_ITL(outbound_queue, req32);
951
952                 return EFAULT;
953         }
954 }
955
956 static int hptiop_post_ioctl_command_mv(struct hpt_iop_hba *hba,
957                                 struct hpt_iop_request_ioctl_command *req,
958                                 struct hpt_iop_ioctl_param *pParams)
959 {
960         u_int64_t req_phy;
961         int size = 0;
962
963         if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) >
964                         (hba->max_request_size -
965                         offsetof(struct hpt_iop_request_ioctl_command, buf))) {
966                 device_printf(hba->pcidev, "request size beyond max value");
967                 return -1;
968         }
969
970         req->ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode);
971         req->inbuf_size = pParams->nInBufferSize;
972         req->outbuf_size = pParams->nOutBufferSize;
973         req->header.size = offsetof(struct hpt_iop_request_ioctl_command, buf)
974                                         + pParams->nInBufferSize;
975         req->header.context = (u_int64_t)MVIOP_CMD_TYPE_IOCTL;
976         req->header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND;
977         req->header.result = IOP_RESULT_PENDING;
978         req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
979         size = req->header.size >> 8;
980         size = size > 3 ? 3 : size;
981         req_phy = hba->ctlcfgcmd_phy | MVIOP_MU_QUEUE_ADDR_HOST_BIT | size;
982         hptiop_mv_inbound_write(req_phy, hba);
983
984         BUS_SPACE_RD4_MV0(outbound_intmask);
985
986         while (hba->config_done == 0) {
987                 if (hptiop_sleep(hba, req, PPAUSE,
988                         "hptctl", HPT_OSM_TIMEOUT)==0)
989                         continue;
990                 hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000);
991         }
992         return 0;
993 }
994
995 static int hptiop_do_ioctl_mv(struct hpt_iop_hba *hba,
996                                 struct hpt_iop_ioctl_param *pParams)
997 {
998         struct hpt_iop_request_ioctl_command *req;
999
1000         if ((pParams->Magic != HPT_IOCTL_MAGIC) &&
1001                 (pParams->Magic != HPT_IOCTL_MAGIC32))
1002                 return EFAULT;
1003
1004         req = (struct hpt_iop_request_ioctl_command *)(hba->ctlcfg_ptr);
1005         hba->config_done = 0;
1006         hptiop_lock_adapter(hba);
1007         if (pParams->nInBufferSize)
1008                 if (copyin((void *)pParams->lpInBuffer,
1009                                 req->buf, pParams->nInBufferSize))
1010                         goto invalid;
1011         if (hptiop_post_ioctl_command_mv(hba, req, pParams))
1012                 goto invalid;
1013
1014         if (hba->config_done == 1) {
1015                 if (pParams->nOutBufferSize)
1016                         if (copyout(req->buf +
1017                                 ((pParams->nInBufferSize + 3) & ~3),
1018                                 (void *)pParams->lpOutBuffer,
1019                                 pParams->nOutBufferSize))
1020                                 goto invalid;
1021
1022                 if (pParams->lpBytesReturned)
1023                         if (copyout(&req->bytes_returned,
1024                                 (void*)pParams->lpBytesReturned,
1025                                 sizeof(u_int32_t)))
1026                                 goto invalid;
1027                 hptiop_unlock_adapter(hba);
1028                 return 0;
1029         } else{
1030 invalid:
1031                 hptiop_unlock_adapter(hba);
1032                 return EFAULT;
1033         }
1034 }
1035
1036 static int  hptiop_rescan_bus(struct hpt_iop_hba * hba)
1037 {
1038         struct cam_path     *path;
1039         union ccb           *ccb;
1040         if (xpt_create_path(&path, xpt_periph, cam_sim_path(hba->sim),
1041                 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP)
1042                 return(EIO);
1043         if ((ccb = malloc(sizeof(union ccb), M_TEMP, M_WAITOK)) == NULL)
1044                 return(ENOMEM);
1045         bzero(ccb, sizeof(union ccb));
1046         xpt_setup_ccb(&ccb->ccb_h, path, 5);
1047         ccb->ccb_h.func_code = XPT_SCAN_BUS;
1048         ccb->ccb_h.cbfcnp = hptiop_bus_scan_cb;
1049         ccb->crcn.flags = CAM_FLAG_NONE;
1050         xpt_action(ccb);
1051         return(0);
1052 }
1053
1054 static void hptiop_bus_scan_cb(struct cam_periph *periph, union ccb *ccb)
1055 {
1056         xpt_free_path(ccb->ccb_h.path);
1057         free(ccb, M_TEMP);
1058 }
1059
1060 static  bus_dmamap_callback_t   hptiop_map_srb;
1061 static  bus_dmamap_callback_t   hptiop_post_scsi_command;
1062 static  bus_dmamap_callback_t   hptiop_mv_map_ctlcfg;
1063
1064 static int hptiop_alloc_pci_res_itl(struct hpt_iop_hba *hba)
1065 {
1066         hba->bar0_rid = 0x10;
1067         hba->bar0_res = bus_alloc_resource_any(hba->pcidev,
1068                         SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE);
1069
1070         if (hba->bar0_res == NULL) {
1071                 device_printf(hba->pcidev,
1072                         "failed to get iop base adrress.\n");
1073                 return -1;
1074         }
1075         hba->bar0t = rman_get_bustag(hba->bar0_res);
1076         hba->bar0h = rman_get_bushandle(hba->bar0_res);
1077         hba->u.itl.mu = (struct hpt_iopmu_itl *)
1078                                 rman_get_virtual(hba->bar0_res);
1079
1080         if (!hba->u.itl.mu) {
1081                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1082                                         hba->bar0_rid, hba->bar0_res);
1083                 device_printf(hba->pcidev, "alloc mem res failed\n");
1084                 return -1;
1085         }
1086
1087         return 0;
1088 }
1089
1090 static int hptiop_alloc_pci_res_mv(struct hpt_iop_hba *hba)
1091 {
1092         hba->bar0_rid = 0x10;
1093         hba->bar0_res = bus_alloc_resource_any(hba->pcidev,
1094                         SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE);
1095
1096         if (hba->bar0_res == NULL) {
1097                 device_printf(hba->pcidev, "failed to get iop bar0.\n");
1098                 return -1;
1099         }
1100         hba->bar0t = rman_get_bustag(hba->bar0_res);
1101         hba->bar0h = rman_get_bushandle(hba->bar0_res);
1102         hba->u.mv.regs = (struct hpt_iopmv_regs *)
1103                                 rman_get_virtual(hba->bar0_res);
1104
1105         if (!hba->u.mv.regs) {
1106                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1107                                         hba->bar0_rid, hba->bar0_res);
1108                 device_printf(hba->pcidev, "alloc bar0 mem res failed\n");
1109                 return -1;
1110         }
1111
1112         hba->bar2_rid = 0x18;
1113         hba->bar2_res = bus_alloc_resource_any(hba->pcidev,
1114                         SYS_RES_MEMORY, &hba->bar2_rid, RF_ACTIVE);
1115
1116         if (hba->bar2_res == NULL) {
1117                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1118                                         hba->bar0_rid, hba->bar0_res);
1119                 device_printf(hba->pcidev, "failed to get iop bar2.\n");
1120                 return -1;
1121         }
1122
1123         hba->bar2t = rman_get_bustag(hba->bar2_res);
1124         hba->bar2h = rman_get_bushandle(hba->bar2_res);
1125         hba->u.mv.mu = (struct hpt_iopmu_mv *)rman_get_virtual(hba->bar2_res);
1126
1127         if (!hba->u.mv.mu) {
1128                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1129                                         hba->bar0_rid, hba->bar0_res);
1130                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1131                                         hba->bar2_rid, hba->bar2_res);
1132                 device_printf(hba->pcidev, "alloc mem bar2 res failed\n");
1133                 return -1;
1134         }
1135
1136         return 0;
1137 }
1138
1139 static void hptiop_release_pci_res_itl(struct hpt_iop_hba *hba)
1140 {
1141         if (hba->bar0_res)
1142                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1143                         hba->bar0_rid, hba->bar0_res);
1144 }
1145
1146 static void hptiop_release_pci_res_mv(struct hpt_iop_hba *hba)
1147 {
1148         if (hba->bar0_res)
1149                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1150                         hba->bar0_rid, hba->bar0_res);
1151         if (hba->bar2_res)
1152                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1153                         hba->bar2_rid, hba->bar2_res);
1154 }
1155
1156 static int hptiop_internal_memalloc_mv(struct hpt_iop_hba *hba)
1157 {
1158         if (bus_dma_tag_create(hba->parent_dmat,
1159                                 1,
1160                                 0,
1161                                 BUS_SPACE_MAXADDR_32BIT,
1162                                 BUS_SPACE_MAXADDR,
1163                                 NULL, NULL,
1164                                 0x800 - 0x8,
1165                                 1,
1166                                 BUS_SPACE_MAXSIZE_32BIT,
1167                                 BUS_DMA_ALLOCNOW,
1168 #if __FreeBSD_version > 502000
1169                                 NULL,
1170                                 NULL,
1171 #endif
1172                                         &hba->ctlcfg_dmat)) {
1173                 device_printf(hba->pcidev, "alloc ctlcfg_dmat failed\n");
1174                 return -1;
1175         }
1176
1177         if (bus_dmamem_alloc(hba->ctlcfg_dmat, (void **)&hba->ctlcfg_ptr,
1178 #if __FreeBSD_version>501000
1179                 BUS_DMA_WAITOK | BUS_DMA_COHERENT,
1180 #else
1181                 BUS_DMA_WAITOK,
1182 #endif
1183                 &hba->ctlcfg_dmamap) != 0) {
1184                         device_printf(hba->pcidev,
1185                                         "bus_dmamem_alloc failed!\n");
1186                         bus_dma_tag_destroy(hba->ctlcfg_dmat);
1187                         return -1;
1188         }
1189
1190         if (bus_dmamap_load(hba->ctlcfg_dmat,
1191                         hba->ctlcfg_dmamap, hba->ctlcfg_ptr,
1192                         MVIOP_IOCTLCFG_SIZE,
1193                         hptiop_mv_map_ctlcfg, hba, 0)) {
1194                 device_printf(hba->pcidev, "bus_dmamap_load failed!\n");
1195                 if (hba->ctlcfg_dmat)
1196                         bus_dmamem_free(hba->ctlcfg_dmat,
1197                                 hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
1198                         bus_dma_tag_destroy(hba->ctlcfg_dmat);
1199                 return -1;
1200         }
1201
1202         return 0;
1203 }
1204
1205 static int hptiop_internal_memfree_mv(struct hpt_iop_hba *hba)
1206 {
1207         if (hba->ctlcfg_dmat) {
1208                 bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap);
1209                 bus_dmamem_free(hba->ctlcfg_dmat,
1210                                         hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
1211                 bus_dma_tag_destroy(hba->ctlcfg_dmat);
1212         }
1213
1214         return 0;
1215 }
1216
1217 /*
1218  * CAM driver interface
1219  */
1220 static device_method_t driver_methods[] = {
1221         /* Device interface */
1222         DEVMETHOD(device_probe,     hptiop_probe),
1223         DEVMETHOD(device_attach,    hptiop_attach),
1224         DEVMETHOD(device_detach,    hptiop_detach),
1225         DEVMETHOD(device_shutdown,  hptiop_shutdown),
1226         { 0, 0 }
1227 };
1228
1229 static struct hptiop_adapter_ops hptiop_itl_ops = {
1230         .iop_wait_ready    = hptiop_wait_ready_itl,
1231         .internal_memalloc = 0,
1232         .internal_memfree  = 0,
1233         .alloc_pci_res     = hptiop_alloc_pci_res_itl,
1234         .release_pci_res   = hptiop_release_pci_res_itl,
1235         .enable_intr       = hptiop_enable_intr_itl,
1236         .disable_intr      = hptiop_disable_intr_itl,
1237         .get_config        = hptiop_get_config_itl,
1238         .set_config        = hptiop_set_config_itl,
1239         .iop_intr          = hptiop_intr_itl,
1240         .post_msg          = hptiop_post_msg_itl,
1241         .post_req          = hptiop_post_req_itl,
1242         .do_ioctl          = hptiop_do_ioctl_itl,
1243 };
1244
1245 static struct hptiop_adapter_ops hptiop_mv_ops = {
1246         .iop_wait_ready    = hptiop_wait_ready_mv,
1247         .internal_memalloc = hptiop_internal_memalloc_mv,
1248         .internal_memfree  = hptiop_internal_memfree_mv,
1249         .alloc_pci_res     = hptiop_alloc_pci_res_mv,
1250         .release_pci_res   = hptiop_release_pci_res_mv,
1251         .enable_intr       = hptiop_enable_intr_mv,
1252         .disable_intr      = hptiop_disable_intr_mv,
1253         .get_config        = hptiop_get_config_mv,
1254         .set_config        = hptiop_set_config_mv,
1255         .iop_intr          = hptiop_intr_mv,
1256         .post_msg          = hptiop_post_msg_mv,
1257         .post_req          = hptiop_post_req_mv,
1258         .do_ioctl          = hptiop_do_ioctl_mv,
1259 };
1260
1261 static driver_t hptiop_pci_driver = {
1262         driver_name,
1263         driver_methods,
1264         sizeof(struct hpt_iop_hba)
1265 };
1266
1267 DRIVER_MODULE(hptiop, pci, hptiop_pci_driver, hptiop_devclass, 0, 0);
1268
1269 static int hptiop_probe(device_t dev)
1270 {
1271         struct hpt_iop_hba *hba;
1272         u_int32_t id;
1273         static char buf[256];
1274         int sas = 0;
1275         struct hptiop_adapter_ops *ops;
1276
1277         if (pci_get_vendor(dev) != 0x1103)
1278                 return (ENXIO);
1279
1280         id = pci_get_device(dev);
1281
1282         switch (id) {
1283                 case 0x4320:
1284                         sas = 1;
1285                 case 0x3220:
1286                 case 0x3320:
1287                 case 0x3410:
1288                 case 0x3520:
1289                 case 0x3510:
1290                 case 0x3511:
1291                 case 0x3521:
1292                 case 0x3522:
1293                 case 0x3540:
1294                         ops = &hptiop_itl_ops;
1295                         break;
1296                 case 0x3120:
1297                 case 0x3122:
1298                 case 0x3020:
1299                         ops = &hptiop_mv_ops;
1300                         break;
1301                 default:
1302                         return (ENXIO);
1303         }
1304
1305         device_printf(dev, "adapter at PCI %d:%d:%d, IRQ %d\n",
1306                 pci_get_bus(dev), pci_get_slot(dev),
1307                 pci_get_function(dev), pci_get_irq(dev));
1308
1309         sprintf(buf, "RocketRAID %x %s Controller\n",
1310                                 id, sas ? "SAS" : "SATA");
1311         device_set_desc_copy(dev, buf);
1312
1313         hba = (struct hpt_iop_hba *)device_get_softc(dev);
1314         bzero(hba, sizeof(struct hpt_iop_hba));
1315         hba->ops = ops;
1316
1317         KdPrint(("hba->ops=%p\n", hba->ops));
1318         return 0;
1319 }
1320
1321 static int hptiop_attach(device_t dev)
1322 {
1323         struct hpt_iop_hba *hba = (struct hpt_iop_hba *)device_get_softc(dev);
1324         struct hpt_iop_request_get_config  iop_config;
1325         struct hpt_iop_request_set_config  set_config;
1326         int rid = 0;
1327         struct cam_devq *devq;
1328         struct ccb_setasync ccb;
1329         u_int32_t unit = device_get_unit(dev);
1330
1331         device_printf(dev, "%d RocketRAID 3xxx/4xxx controller driver %s\n",
1332                         unit, driver_version);
1333
1334         KdPrint(("hptiop: attach(%d, %d/%d/%d) ops=%p\n", unit,
1335                 pci_get_bus(dev), pci_get_slot(dev),
1336                 pci_get_function(dev), hba->ops));
1337
1338 #if __FreeBSD_version >=440000
1339         pci_enable_busmaster(dev);
1340 #endif
1341         hba->pcidev = dev;
1342         hba->pciunit = unit;
1343
1344         if (hba->ops->alloc_pci_res(hba))
1345                 return ENXIO;
1346
1347         if (hba->ops->iop_wait_ready(hba, 2000)) {
1348                 device_printf(dev, "adapter is not ready\n");
1349                 goto release_pci_res;
1350         }
1351
1352 #if (__FreeBSD_version >= 500000)
1353         mtx_init(&hba->lock, "hptioplock", NULL, MTX_DEF);
1354 #endif
1355
1356         if (bus_dma_tag_create(NULL,/* parent */
1357                         1,  /* alignment */
1358                         0, /* boundary */
1359                         BUS_SPACE_MAXADDR,  /* lowaddr */
1360                         BUS_SPACE_MAXADDR,  /* highaddr */
1361                         NULL, NULL,         /* filter, filterarg */
1362                         BUS_SPACE_MAXSIZE_32BIT,    /* maxsize */
1363                         BUS_SPACE_UNRESTRICTED, /* nsegments */
1364                         BUS_SPACE_MAXSIZE_32BIT,    /* maxsegsize */
1365                         0,      /* flags */
1366 #if __FreeBSD_version>502000
1367                         NULL,   /* lockfunc */
1368                         NULL,       /* lockfuncarg */
1369 #endif
1370                         &hba->parent_dmat   /* tag */))
1371         {
1372                 device_printf(dev, "alloc parent_dmat failed\n");
1373                 goto release_pci_res;
1374         }
1375
1376         if (hba->ops->internal_memalloc) {
1377                 if (hba->ops->internal_memalloc(hba)) {
1378                         device_printf(dev, "alloc srb_dmat failed\n");
1379                         goto destroy_parent_tag;
1380                 }
1381         }
1382         
1383         if (hba->ops->get_config(hba, &iop_config)) {
1384                 device_printf(dev, "get iop config failed.\n");
1385                 goto get_config_failed;
1386         }
1387
1388         hba->firmware_version = iop_config.firmware_version;
1389         hba->interface_version = iop_config.interface_version;
1390         hba->max_requests = iop_config.max_requests;
1391         hba->max_devices = iop_config.max_devices;
1392         hba->max_request_size = iop_config.request_size;
1393         hba->max_sg_count = iop_config.max_sg_count;
1394
1395         if (bus_dma_tag_create(hba->parent_dmat,/* parent */
1396                         4,  /* alignment */
1397                         BUS_SPACE_MAXADDR_32BIT+1, /* boundary */
1398                         BUS_SPACE_MAXADDR,  /* lowaddr */
1399                         BUS_SPACE_MAXADDR,  /* highaddr */
1400                         NULL, NULL,         /* filter, filterarg */
1401                         PAGE_SIZE * (hba->max_sg_count-1),  /* maxsize */
1402                         hba->max_sg_count,  /* nsegments */
1403                         0x20000,    /* maxsegsize */
1404                         BUS_DMA_ALLOCNOW,       /* flags */
1405 #if __FreeBSD_version>502000
1406                         busdma_lock_mutex,  /* lockfunc */
1407                         &hba->lock,     /* lockfuncarg */
1408 #endif
1409                         &hba->io_dmat   /* tag */))
1410         {
1411                 device_printf(dev, "alloc io_dmat failed\n");
1412                 goto get_config_failed;
1413         }
1414
1415         if (bus_dma_tag_create(hba->parent_dmat,/* parent */
1416                         1,  /* alignment */
1417                         0, /* boundary */
1418                         BUS_SPACE_MAXADDR_32BIT,    /* lowaddr */
1419                         BUS_SPACE_MAXADDR,  /* highaddr */
1420                         NULL, NULL,         /* filter, filterarg */
1421                         HPT_SRB_MAX_SIZE * HPT_SRB_MAX_QUEUE_SIZE + 0x20,
1422                         1,  /* nsegments */
1423                         BUS_SPACE_MAXSIZE_32BIT,    /* maxsegsize */
1424                         0,      /* flags */
1425 #if __FreeBSD_version>502000
1426                         NULL,   /* lockfunc */
1427                         NULL,       /* lockfuncarg */
1428 #endif
1429                         &hba->srb_dmat  /* tag */))
1430         {
1431                 device_printf(dev, "alloc srb_dmat failed\n");
1432                 goto destroy_io_dmat;
1433         }
1434
1435         if (bus_dmamem_alloc(hba->srb_dmat, (void **)&hba->uncached_ptr,
1436 #if __FreeBSD_version>501000
1437                         BUS_DMA_WAITOK | BUS_DMA_COHERENT,
1438 #else
1439                         BUS_DMA_WAITOK,
1440 #endif
1441                         &hba->srb_dmamap) != 0)
1442         {
1443                 device_printf(dev, "srb bus_dmamem_alloc failed!\n");
1444                 goto destroy_srb_dmat;
1445         }
1446
1447         if (bus_dmamap_load(hba->srb_dmat,
1448                         hba->srb_dmamap, hba->uncached_ptr,
1449                         (HPT_SRB_MAX_SIZE * HPT_SRB_MAX_QUEUE_SIZE) + 0x20,
1450                         hptiop_map_srb, hba, 0))
1451         {
1452                 device_printf(dev, "bus_dmamap_load failed!\n");
1453                 goto srb_dmamem_free;
1454         }
1455
1456         if ((devq = cam_simq_alloc(hba->max_requests - 1 )) == NULL) {
1457                 device_printf(dev, "cam_simq_alloc failed\n");
1458                 goto srb_dmamap_unload;
1459         }
1460
1461 #if __FreeBSD_version <700000
1462         hba->sim = cam_sim_alloc(hptiop_action, hptiop_poll, driver_name,
1463                         hba, unit, hba->max_requests - 1, 1, devq);
1464 #else
1465         hba->sim = cam_sim_alloc(hptiop_action, hptiop_poll, driver_name,
1466                         hba, unit, &Giant, hba->max_requests - 1, 1, devq);
1467 #endif
1468         if (!hba->sim) {
1469                 device_printf(dev, "cam_sim_alloc failed\n");
1470                 cam_simq_free(devq);
1471                 goto srb_dmamap_unload;
1472         }
1473 #if __FreeBSD_version <700000
1474         if (xpt_bus_register(hba->sim, 0) != CAM_SUCCESS)
1475 #else
1476         if (xpt_bus_register(hba->sim, dev, 0) != CAM_SUCCESS)
1477 #endif
1478         {
1479                 device_printf(dev, "xpt_bus_register failed\n");
1480                 goto free_cam_sim;
1481         }
1482
1483         if (xpt_create_path(&hba->path, /*periph */ NULL,
1484                         cam_sim_path(hba->sim), CAM_TARGET_WILDCARD,
1485                         CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1486                 device_printf(dev, "xpt_create_path failed\n");
1487                 goto deregister_xpt_bus;
1488         }
1489
1490         bzero(&set_config, sizeof(set_config));
1491         set_config.iop_id = unit;
1492         set_config.vbus_id = cam_sim_path(hba->sim);
1493         set_config.max_host_request_size = HPT_SRB_MAX_REQ_SIZE;
1494
1495         if (hba->ops->set_config(hba, &set_config)) {
1496                 device_printf(dev, "set iop config failed.\n");
1497                 goto free_hba_path;
1498         }
1499
1500         xpt_setup_ccb(&ccb.ccb_h, hba->path, /*priority*/5);
1501         ccb.ccb_h.func_code = XPT_SASYNC_CB;
1502         ccb.event_enable = (AC_FOUND_DEVICE | AC_LOST_DEVICE);
1503         ccb.callback = hptiop_async;
1504         ccb.callback_arg = hba->sim;
1505         xpt_action((union ccb *)&ccb);
1506
1507         rid = 0;
1508         if ((hba->irq_res = bus_alloc_resource(hba->pcidev, SYS_RES_IRQ,
1509                         &rid, 0, ~0ul, 1, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
1510                 device_printf(dev, "allocate irq failed!\n");
1511                 goto free_hba_path;
1512         }
1513
1514 #if __FreeBSD_version <700000
1515         if (bus_setup_intr(hba->pcidev, hba->irq_res, INTR_TYPE_CAM,
1516                                 hptiop_pci_intr, hba, &hba->irq_handle))
1517 #else
1518         if (bus_setup_intr(hba->pcidev, hba->irq_res, INTR_TYPE_CAM,
1519                                 NULL, hptiop_pci_intr, hba, &hba->irq_handle))
1520 #endif
1521         {
1522                 device_printf(dev, "allocate intr function failed!\n");
1523                 goto free_irq_resource;
1524         }
1525
1526         if (hptiop_send_sync_msg(hba,
1527                         IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000)) {
1528                 device_printf(dev, "fail to start background task\n");
1529                 goto teartown_irq_resource;
1530         }
1531
1532         hba->ops->enable_intr(hba);
1533
1534         hba->ioctl_dev = make_dev(&hptiop_cdevsw, unit,
1535                                 UID_ROOT, GID_WHEEL /*GID_OPERATOR*/,
1536                                 S_IRUSR | S_IWUSR, "%s%d", driver_name, unit);
1537
1538 #if __FreeBSD_version < 503000
1539         hba->ioctl_dev->si_drv1 = hba;
1540 #endif
1541
1542         hptiop_rescan_bus(hba);
1543
1544         return 0;
1545
1546
1547 teartown_irq_resource:
1548         bus_teardown_intr(dev, hba->irq_res, hba->irq_handle);
1549
1550 free_irq_resource:
1551         bus_release_resource(dev, SYS_RES_IRQ, 0, hba->irq_res);
1552
1553 free_hba_path:
1554         xpt_free_path(hba->path);
1555
1556 deregister_xpt_bus:
1557         xpt_bus_deregister(cam_sim_path(hba->sim));
1558
1559 free_cam_sim:
1560         cam_sim_free(hba->sim, /*free devq*/ TRUE);
1561
1562 srb_dmamap_unload:
1563         if (hba->uncached_ptr)
1564                 bus_dmamap_unload(hba->srb_dmat, hba->srb_dmamap);
1565
1566 srb_dmamem_free:
1567         if (hba->uncached_ptr)
1568                 bus_dmamem_free(hba->srb_dmat,
1569                         hba->uncached_ptr, hba->srb_dmamap);
1570
1571 destroy_srb_dmat:
1572         if (hba->srb_dmat)
1573                 bus_dma_tag_destroy(hba->srb_dmat);
1574
1575 destroy_io_dmat:
1576         if (hba->io_dmat)
1577                 bus_dma_tag_destroy(hba->io_dmat);
1578
1579 get_config_failed:
1580         if (hba->ops->internal_memfree)
1581                 hba->ops->internal_memfree(hba);
1582
1583 destroy_parent_tag:
1584         if (hba->parent_dmat)
1585                 bus_dma_tag_destroy(hba->parent_dmat);
1586
1587 release_pci_res:
1588         if (hba->ops->release_pci_res)
1589                 hba->ops->release_pci_res(hba);
1590
1591         return ENXIO;
1592 }
1593
1594 static int hptiop_detach(device_t dev)
1595 {
1596         struct hpt_iop_hba * hba = (struct hpt_iop_hba *)device_get_softc(dev);
1597         int i;
1598         int error = EBUSY;
1599
1600         hptiop_lock_adapter(hba);
1601         for (i = 0; i < hba->max_devices; i++)
1602                 if (hptiop_os_query_remove_device(hba, i)) {
1603                         device_printf(dev, "%d file system is busy. id=%d",
1604                                                 hba->pciunit, i);
1605                         goto out;
1606                 }
1607
1608         if ((error = hptiop_shutdown(dev)) != 0)
1609                 goto out;
1610         if (hptiop_send_sync_msg(hba,
1611                 IOPMU_INBOUND_MSG0_STOP_BACKGROUND_TASK, 60000))
1612                 goto out;
1613
1614         hptiop_release_resource(hba);
1615         error = 0;
1616 out:
1617         hptiop_unlock_adapter(hba);
1618         return error;
1619 }
1620
1621 static int hptiop_shutdown(device_t dev)
1622 {
1623         struct hpt_iop_hba * hba = (struct hpt_iop_hba *)device_get_softc(dev);
1624
1625         int error = 0;
1626
1627         if (hba->flag & HPT_IOCTL_FLAG_OPEN) {
1628                 device_printf(dev, "%d device is busy", hba->pciunit);
1629                 return EBUSY;
1630         }
1631
1632         hba->ops->disable_intr(hba);
1633
1634         if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_SHUTDOWN, 60000))
1635                 error = EBUSY;
1636
1637         return error;
1638 }
1639
1640 static void hptiop_pci_intr(void *arg)
1641 {
1642         struct hpt_iop_hba * hba = (struct hpt_iop_hba *)arg;
1643         hptiop_lock_adapter(hba);
1644         hba->ops->iop_intr(hba);
1645         hptiop_unlock_adapter(hba);
1646 }
1647
1648 static void hptiop_poll(struct cam_sim *sim)
1649 {
1650         hptiop_pci_intr(cam_sim_softc(sim));
1651 }
1652
1653 static void hptiop_async(void * callback_arg, u_int32_t code,
1654                                         struct cam_path * path, void * arg)
1655 {
1656 }
1657
1658 static void hptiop_enable_intr_itl(struct hpt_iop_hba *hba)
1659 {
1660         BUS_SPACE_WRT4_ITL(outbound_intmask,
1661                 ~(IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0));
1662 }
1663
1664 static void hptiop_enable_intr_mv(struct hpt_iop_hba *hba)
1665 {
1666         u_int32_t int_mask;
1667
1668         int_mask = BUS_SPACE_RD4_MV0(outbound_intmask);
1669                         
1670         int_mask |= MVIOP_MU_OUTBOUND_INT_POSTQUEUE
1671                         | MVIOP_MU_OUTBOUND_INT_MSG;
1672         BUS_SPACE_WRT4_MV0(outbound_intmask,int_mask);
1673 }
1674
1675 static void hptiop_disable_intr_itl(struct hpt_iop_hba *hba)
1676 {
1677         u_int32_t int_mask;
1678
1679         int_mask = BUS_SPACE_RD4_ITL(outbound_intmask);
1680
1681         int_mask |= IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0;
1682         BUS_SPACE_WRT4_ITL(outbound_intmask, int_mask);
1683         BUS_SPACE_RD4_ITL(outbound_intstatus);
1684 }
1685
1686 static void hptiop_disable_intr_mv(struct hpt_iop_hba *hba)
1687 {
1688         u_int32_t int_mask;
1689         int_mask = BUS_SPACE_RD4_MV0(outbound_intmask);
1690         
1691         int_mask &= ~(MVIOP_MU_OUTBOUND_INT_MSG
1692                         | MVIOP_MU_OUTBOUND_INT_POSTQUEUE);
1693         BUS_SPACE_WRT4_MV0(outbound_intmask,int_mask);
1694         BUS_SPACE_RD4_MV0(outbound_intmask);
1695 }
1696
1697 static int hptiop_reset_adapter(struct hpt_iop_hba * hba)
1698 {
1699         return hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000);
1700 }
1701
1702 static void *hptiop_get_srb(struct hpt_iop_hba * hba)
1703 {
1704         struct hpt_iop_srb * srb;
1705
1706         if (hba->srb_list) {
1707                 srb = hba->srb_list;
1708                 hba->srb_list = srb->next;
1709                 return srb;
1710         }
1711
1712         return NULL;
1713 }
1714
1715 static void hptiop_free_srb(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb)
1716 {
1717         srb->next = hba->srb_list;
1718         hba->srb_list = srb;
1719 }
1720
1721 static void hptiop_action(struct cam_sim *sim, union ccb *ccb)
1722 {
1723         struct hpt_iop_hba * hba = (struct hpt_iop_hba *)cam_sim_softc(sim);
1724         struct hpt_iop_srb * srb;
1725
1726         switch (ccb->ccb_h.func_code) {
1727
1728         case XPT_SCSI_IO:
1729                 hptiop_lock_adapter(hba);
1730                 if (ccb->ccb_h.target_lun != 0 ||
1731                         ccb->ccb_h.target_id >= hba->max_devices ||
1732                         (ccb->ccb_h.flags & CAM_CDB_PHYS))
1733                 {
1734                         ccb->ccb_h.status = CAM_TID_INVALID;
1735                         xpt_done(ccb);
1736                         goto scsi_done;
1737                 }
1738
1739                 if ((srb = hptiop_get_srb(hba)) == NULL) {
1740                         device_printf(hba->pcidev, "srb allocated failed");
1741                         ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1742                         xpt_done(ccb);
1743                         goto scsi_done;
1744                 }
1745
1746                 srb->ccb = ccb;
1747
1748                 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
1749                         hptiop_post_scsi_command(srb, NULL, 0, 0);
1750                 else if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
1751                         if ((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0) {
1752                                 int error;
1753
1754                                 error = bus_dmamap_load(hba->io_dmat,
1755                                                 srb->dma_map,
1756                                                 ccb->csio.data_ptr,
1757                                                 ccb->csio.dxfer_len,
1758                                                 hptiop_post_scsi_command,
1759                                                 srb, 0);
1760
1761                                 if (error && error != EINPROGRESS) {
1762                                         device_printf(hba->pcidev,
1763                                                 "%d bus_dmamap_load error %d",
1764                                                 hba->pciunit, error);
1765                                         xpt_freeze_simq(hba->sim, 1);
1766                                         ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1767 invalid:
1768                                         hptiop_free_srb(hba, srb);
1769                                         xpt_done(ccb);
1770                                         goto scsi_done;
1771                                 }
1772                         }
1773                         else {
1774                                 device_printf(hba->pcidev,
1775                                         "CAM_DATA_PHYS not supported");
1776                                 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
1777                                 goto invalid;
1778                         }
1779                 }
1780                 else {
1781                         struct bus_dma_segment *segs;
1782
1783                         if ((ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0 ||
1784                                 (ccb->ccb_h.flags & CAM_DATA_PHYS) != 0) {
1785                                 device_printf(hba->pcidev, "SCSI cmd failed");
1786                                 ccb->ccb_h.status=CAM_PROVIDE_FAIL;
1787                                 goto invalid;
1788                         }
1789
1790                         segs = (struct bus_dma_segment *)ccb->csio.data_ptr;
1791                         hptiop_post_scsi_command(srb, segs,
1792                                                 ccb->csio.sglist_cnt, 0);
1793                 }
1794
1795 scsi_done:
1796                 hptiop_unlock_adapter(hba);
1797                 return;
1798
1799         case XPT_RESET_BUS:
1800                 device_printf(hba->pcidev, "reset adapter");
1801                 hptiop_lock_adapter(hba);
1802                 hba->msg_done = 0;
1803                 hptiop_reset_adapter(hba);
1804                 hptiop_unlock_adapter(hba);
1805                 break;
1806
1807         case XPT_GET_TRAN_SETTINGS:
1808         case XPT_SET_TRAN_SETTINGS:
1809                 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
1810                 break;
1811
1812         case XPT_CALC_GEOMETRY:
1813                 ccb->ccg.heads = 255;
1814                 ccb->ccg.secs_per_track = 63;
1815                 ccb->ccg.cylinders = ccb->ccg.volume_size /
1816                                 (ccb->ccg.heads * ccb->ccg.secs_per_track);
1817                 ccb->ccb_h.status = CAM_REQ_CMP;
1818                 break;
1819
1820         case XPT_PATH_INQ:
1821         {
1822                 struct ccb_pathinq *cpi = &ccb->cpi;
1823
1824                 cpi->version_num = 1;
1825                 cpi->hba_inquiry = PI_SDTR_ABLE;
1826                 cpi->target_sprt = 0;
1827                 cpi->hba_misc = PIM_NOBUSRESET;
1828                 cpi->hba_eng_cnt = 0;
1829                 cpi->max_target = hba->max_devices;
1830                 cpi->max_lun = 0;
1831                 cpi->unit_number = cam_sim_unit(sim);
1832                 cpi->bus_id = cam_sim_bus(sim);
1833                 cpi->initiator_id = hba->max_devices;
1834                 cpi->base_transfer_speed = 3300;
1835
1836                 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
1837                 strncpy(cpi->hba_vid, "HPT   ", HBA_IDLEN);
1838                 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
1839                 cpi->ccb_h.status = CAM_REQ_CMP;
1840                 break;
1841         }
1842
1843         default:
1844                 ccb->ccb_h.status = CAM_REQ_INVALID;
1845                 break;
1846         }
1847
1848         xpt_done(ccb);
1849         return;
1850 }
1851
1852 static void hptiop_post_req_itl(struct hpt_iop_hba *hba,
1853                                 struct hpt_iop_srb *srb,
1854                                 bus_dma_segment_t *segs, int nsegs)
1855 {
1856         int idx;
1857         union ccb *ccb = srb->ccb;
1858         u_int8_t *cdb;
1859
1860         if (ccb->ccb_h.flags & CAM_CDB_POINTER)
1861                 cdb = ccb->csio.cdb_io.cdb_ptr;
1862         else
1863                 cdb = ccb->csio.cdb_io.cdb_bytes;
1864
1865         KdPrint(("ccb=%p %x-%x-%x\n",
1866                 ccb, *(u_int32_t *)cdb, *((u_int32_t *)cdb+1), *((u_int32_t *)cdb+2)));
1867
1868         if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS) {
1869                 u_int32_t iop_req32;
1870                 struct hpt_iop_request_scsi_command req;
1871
1872                 iop_req32 = BUS_SPACE_RD4_ITL(inbound_queue);
1873
1874                 if (iop_req32 == IOPMU_QUEUE_EMPTY) {
1875                         device_printf(hba->pcidev, "invaild req offset\n");
1876                         ccb->ccb_h.status = CAM_BUSY;
1877                         bus_dmamap_unload(hba->io_dmat, srb->dma_map);
1878                         hptiop_free_srb(hba, srb);
1879                         xpt_done(ccb);
1880                         return;
1881                 }
1882
1883                 if (ccb->csio.dxfer_len && nsegs > 0) {
1884                         struct hpt_iopsg *psg = req.sg_list;
1885                         for (idx = 0; idx < nsegs; idx++, psg++) {
1886                                 psg->pci_address = (u_int64_t)segs[idx].ds_addr;
1887                                 psg->size = segs[idx].ds_len;
1888                                 psg->eot = 0;
1889                         }
1890                         psg[-1].eot = 1;
1891                 }
1892
1893                 bcopy(cdb, req.cdb, ccb->csio.cdb_len);
1894
1895                 req.header.size = offsetof(struct hpt_iop_request_scsi_command, sg_list)
1896                                 + nsegs*sizeof(struct hpt_iopsg);
1897                 req.header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
1898                 req.header.flags = 0;
1899                 req.header.result = IOP_RESULT_PENDING;
1900                 req.header.context = (u_int64_t)(unsigned long)srb;
1901                 req.dataxfer_length = ccb->csio.dxfer_len;
1902                 req.channel =  0;
1903                 req.target =  ccb->ccb_h.target_id;
1904                 req.lun =  ccb->ccb_h.target_lun;
1905
1906                 bus_space_write_region_1(hba->bar0t, hba->bar0h, iop_req32,
1907                         (u_int8_t *)&req, req.header.size);
1908
1909                 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1910                         bus_dmamap_sync(hba->io_dmat,
1911                                 srb->dma_map, BUS_DMASYNC_PREREAD);
1912                 }
1913                 else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
1914                         bus_dmamap_sync(hba->io_dmat,
1915                                 srb->dma_map, BUS_DMASYNC_PREWRITE);
1916
1917                 BUS_SPACE_WRT4_ITL(inbound_queue,iop_req32);
1918         } else {
1919                 struct hpt_iop_request_scsi_command *req;
1920
1921                 req = (struct hpt_iop_request_scsi_command *)srb;
1922                 if (ccb->csio.dxfer_len && nsegs > 0) {
1923                         struct hpt_iopsg *psg = req->sg_list;
1924                         for (idx = 0; idx < nsegs; idx++, psg++) {
1925                                 psg->pci_address = 
1926                                         (u_int64_t)segs[idx].ds_addr;
1927                                 psg->size = segs[idx].ds_len;
1928                                 psg->eot = 0;
1929                         }
1930                         psg[-1].eot = 1;
1931                 }
1932
1933                 bcopy(cdb, req->cdb, ccb->csio.cdb_len);
1934
1935                 req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
1936                 req->header.result = IOP_RESULT_PENDING;
1937                 req->dataxfer_length = ccb->csio.dxfer_len;
1938                 req->channel =  0;
1939                 req->target =  ccb->ccb_h.target_id;
1940                 req->lun =  ccb->ccb_h.target_lun;
1941                 req->header.size = offsetof(struct hpt_iop_request_scsi_command, sg_list)
1942                         + nsegs*sizeof(struct hpt_iopsg);
1943                 req->header.context = (u_int64_t)srb->index |
1944                                                 IOPMU_QUEUE_ADDR_HOST_BIT;
1945                 req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
1946
1947                 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
1948                         bus_dmamap_sync(hba->io_dmat,
1949                                 srb->dma_map, BUS_DMASYNC_PREREAD);
1950                 }else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
1951                         bus_dmamap_sync(hba->io_dmat,
1952                                 srb->dma_map, BUS_DMASYNC_PREWRITE);
1953                 }
1954
1955                 if (hba->firmware_version > 0x01020000
1956                         || hba->interface_version > 0x01020000) {
1957                         u_int32_t size_bits;
1958
1959                         if (req->header.size < 256)
1960                                 size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT;
1961                         else if (req->header.size < 512)
1962                                 size_bits = IOPMU_QUEUE_ADDR_HOST_BIT;
1963                         else
1964                                 size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT
1965                                                 | IOPMU_QUEUE_ADDR_HOST_BIT;
1966
1967                         BUS_SPACE_WRT4_ITL(inbound_queue,
1968                                 (u_int32_t)srb->phy_addr | size_bits);
1969                 } else
1970                         BUS_SPACE_WRT4_ITL(inbound_queue, (u_int32_t)srb->phy_addr
1971                                 |IOPMU_QUEUE_ADDR_HOST_BIT);
1972         }
1973 }
1974
1975 static void hptiop_post_req_mv(struct hpt_iop_hba *hba,
1976                                 struct hpt_iop_srb *srb,
1977                                 bus_dma_segment_t *segs, int nsegs)
1978 {
1979         int idx, size;
1980         union ccb *ccb = srb->ccb;
1981         u_int8_t *cdb;
1982         struct hpt_iop_request_scsi_command *req;
1983         u_int64_t req_phy;
1984
1985         req = (struct hpt_iop_request_scsi_command *)srb;
1986         req_phy = srb->phy_addr;
1987
1988         if (ccb->csio.dxfer_len && nsegs > 0) {
1989                 struct hpt_iopsg *psg = req->sg_list;
1990                 for (idx = 0; idx < nsegs; idx++, psg++) {
1991                         psg->pci_address = (u_int64_t)segs[idx].ds_addr;
1992                         psg->size = segs[idx].ds_len;
1993                         psg->eot = 0;
1994                 }
1995                 psg[-1].eot = 1;
1996         }
1997         if (ccb->ccb_h.flags & CAM_CDB_POINTER)
1998                 cdb = ccb->csio.cdb_io.cdb_ptr;
1999         else
2000                 cdb = ccb->csio.cdb_io.cdb_bytes;
2001
2002         bcopy(cdb, req->cdb, ccb->csio.cdb_len);
2003         req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
2004         req->header.result = IOP_RESULT_PENDING;
2005         req->dataxfer_length = ccb->csio.dxfer_len;
2006         req->channel = 0;
2007         req->target =  ccb->ccb_h.target_id;
2008         req->lun =  ccb->ccb_h.target_lun;
2009         req->header.size = sizeof(struct hpt_iop_request_scsi_command)
2010                                 - sizeof(struct hpt_iopsg)
2011                                 + nsegs * sizeof(struct hpt_iopsg);
2012         if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2013                 bus_dmamap_sync(hba->io_dmat,
2014                         srb->dma_map, BUS_DMASYNC_PREREAD);
2015         }
2016         else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
2017                 bus_dmamap_sync(hba->io_dmat,
2018                         srb->dma_map, BUS_DMASYNC_PREWRITE);
2019         req->header.context = (u_int64_t)srb->index
2020                                         << MVIOP_REQUEST_NUMBER_START_BIT
2021                                         | MVIOP_CMD_TYPE_SCSI;
2022         req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
2023         size = req->header.size >> 8;
2024         hptiop_mv_inbound_write(req_phy
2025                         | MVIOP_MU_QUEUE_ADDR_HOST_BIT
2026                         | (size > 3 ? 3 : size), hba);
2027 }
2028
2029 static void hptiop_post_scsi_command(void *arg, bus_dma_segment_t *segs,
2030                                         int nsegs, int error)
2031 {
2032         struct hpt_iop_srb *srb = (struct hpt_iop_srb *)arg;
2033         union ccb *ccb = srb->ccb;
2034         struct hpt_iop_hba *hba = srb->hba;
2035
2036         if (error || nsegs > hba->max_sg_count) {
2037                 KdPrint(("hptiop: func_code=%x tid=%x lun=%x nsegs=%d\n",
2038                         ccb->ccb_h.func_code,
2039                         ccb->ccb_h.target_id,
2040                         ccb->ccb_h.target_lun, nsegs));
2041                 ccb->ccb_h.status = CAM_BUSY;
2042                 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
2043                 hptiop_free_srb(hba, srb);
2044                 xpt_done(ccb);
2045                 return;
2046         }
2047
2048         hba->ops->post_req(hba, srb, segs, nsegs);
2049 }
2050
2051 static void hptiop_mv_map_ctlcfg(void *arg, bus_dma_segment_t *segs,
2052                                 int nsegs, int error)
2053 {
2054         struct hpt_iop_hba *hba = (struct hpt_iop_hba *)arg;
2055         hba->ctlcfgcmd_phy = ((u_int64_t)segs->ds_addr + 0x1F) 
2056                                 & ~(u_int64_t)0x1F;
2057         hba->ctlcfg_ptr = (u_int8_t *)(((unsigned long)hba->ctlcfg_ptr + 0x1F)
2058                                 & ~0x1F);
2059 }
2060
2061 static void hptiop_map_srb(void *arg, bus_dma_segment_t *segs,
2062                                 int nsegs, int error)
2063 {
2064         struct hpt_iop_hba * hba = (struct hpt_iop_hba *)arg;
2065         bus_addr_t phy_addr = (segs->ds_addr + 0x1F) & ~(bus_addr_t)0x1F;
2066         struct hpt_iop_srb *srb, *tmp_srb;
2067         int i;
2068
2069         if (error || nsegs == 0) {
2070                 device_printf(hba->pcidev, "hptiop_map_srb error");
2071                 return;
2072         }
2073
2074         /* map srb */
2075         srb = (struct hpt_iop_srb *)
2076                 (((unsigned long)hba->uncached_ptr + 0x1F)
2077                 & ~(unsigned long)0x1F);
2078
2079         for (i = 0; i < HPT_SRB_MAX_QUEUE_SIZE; i++) {
2080                 tmp_srb = (struct hpt_iop_srb *)
2081                                         ((char *)srb + i * HPT_SRB_MAX_SIZE);
2082                 if (((unsigned long)tmp_srb & 0x1F) == 0) {
2083                         if (bus_dmamap_create(hba->io_dmat,
2084                                                 0, &tmp_srb->dma_map)) {
2085                                 device_printf(hba->pcidev, "dmamap create failed");
2086                                 return;
2087                         }
2088
2089                         bzero(tmp_srb, sizeof(struct hpt_iop_srb));
2090                         tmp_srb->hba = hba;
2091                         tmp_srb->index = i;
2092                         if (hba->ctlcfg_ptr == 0) {/*itl iop*/
2093                                 tmp_srb->phy_addr = (u_int64_t)(u_int32_t)
2094                                                         (phy_addr >> 5);
2095                                 if (phy_addr & IOPMU_MAX_MEM_SUPPORT_MASK_32G)
2096                                         tmp_srb->srb_flag =
2097                                                 HPT_SRB_FLAG_HIGH_MEM_ACESS;
2098                         } else {
2099                                 tmp_srb->phy_addr = phy_addr;
2100                         }
2101
2102                         hptiop_free_srb(hba, tmp_srb);
2103                         hba->srb[i] = tmp_srb;
2104                         phy_addr += HPT_SRB_MAX_SIZE;
2105                 }
2106                 else {
2107                         device_printf(hba->pcidev, "invalid alignment");
2108                         return;
2109                 }
2110         }
2111 }
2112
2113 static void hptiop_os_message_callback(struct hpt_iop_hba * hba, u_int32_t msg)
2114 {
2115                 hba->msg_done = 1;
2116 }
2117
2118 static  int hptiop_os_query_remove_device(struct hpt_iop_hba * hba,
2119                                                 int target_id)
2120 {
2121         struct cam_periph       *periph = NULL;
2122         struct cam_path         *path;
2123         int                     status, retval = 0;
2124
2125         status = xpt_create_path(&path, NULL, hba->sim->path_id, target_id, 0);
2126
2127         if (status == CAM_REQ_CMP) {
2128                 if ((periph = cam_periph_find(path, "da")) != NULL) {
2129                         if (periph->refcount >= 1) {
2130                                 device_printf(hba->pcidev, "%d ,"
2131                                         "target_id=0x%x,"
2132                                         "refcount=%d",
2133                                     hba->pciunit, target_id, periph->refcount);
2134                                 retval = -1;
2135                         }
2136                 }
2137                 xpt_free_path(path);
2138         }
2139         return retval;
2140 }
2141
2142 static void hptiop_release_resource(struct hpt_iop_hba *hba)
2143 {
2144         int i;
2145         if (hba->path) {
2146                 struct ccb_setasync ccb;
2147
2148                 xpt_setup_ccb(&ccb.ccb_h, hba->path, /*priority*/5);
2149                 ccb.ccb_h.func_code = XPT_SASYNC_CB;
2150                 ccb.event_enable = 0;
2151                 ccb.callback = hptiop_async;
2152                 ccb.callback_arg = hba->sim;
2153                 xpt_action((union ccb *)&ccb);
2154                 xpt_free_path(hba->path);
2155         }
2156
2157         if (hba->sim) {
2158                 xpt_bus_deregister(cam_sim_path(hba->sim));
2159                 cam_sim_free(hba->sim, TRUE);
2160         }
2161
2162         if (hba->ctlcfg_dmat) {
2163                 bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap);
2164                 bus_dmamem_free(hba->ctlcfg_dmat,
2165                                         hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
2166                 bus_dma_tag_destroy(hba->ctlcfg_dmat);
2167         }
2168
2169         for (i = 0; i < HPT_SRB_MAX_QUEUE_SIZE; i++) {
2170                 struct hpt_iop_srb *srb = hba->srb[i];
2171                 if (srb->dma_map)
2172                         bus_dmamap_destroy(hba->io_dmat, srb->dma_map);
2173         }
2174
2175         if (hba->srb_dmat) {
2176                 bus_dmamap_unload(hba->srb_dmat, hba->srb_dmamap);
2177                 bus_dmamap_destroy(hba->srb_dmat, hba->srb_dmamap);
2178                 bus_dma_tag_destroy(hba->srb_dmat);
2179         }
2180
2181         if (hba->io_dmat)
2182                 bus_dma_tag_destroy(hba->io_dmat);
2183
2184         if (hba->parent_dmat)
2185                 bus_dma_tag_destroy(hba->parent_dmat);
2186
2187         if (hba->irq_handle)
2188                 bus_teardown_intr(hba->pcidev, hba->irq_res, hba->irq_handle);
2189
2190         if (hba->irq_res)
2191                 bus_release_resource(hba->pcidev, SYS_RES_IRQ,
2192                                         0, hba->irq_res);
2193
2194         if (hba->bar0_res)
2195                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
2196                                         hba->bar0_rid, hba->bar0_res);
2197         if (hba->bar2_res)
2198                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
2199                                         hba->bar2_rid, hba->bar2_res);
2200         if (hba->ioctl_dev)
2201                 destroy_dev(hba->ioctl_dev);
2202 }