]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/hptiop/hptiop.c
MFC
[FreeBSD/FreeBSD.git] / sys / dev / hptiop / hptiop.c
1 /*
2  * HighPoint RR3xxx/4xxx RAID Driver for FreeBSD
3  * Copyright (C) 2007-2012 HighPoint Technologies, Inc. All Rights Reserved.
4  *
5  * Redistribution and use in source and binary forms, with or without
6  * modification, are permitted provided that the following conditions
7  * are met:
8  * 1. Redistributions of source code must retain the above copyright
9  *    notice, this list of conditions and the following disclaimer.
10  * 2. Redistributions in binary form must reproduce the above copyright
11  *    notice, this list of conditions and the following disclaimer in the
12  *    documentation and/or other materials provided with the distribution.
13  *
14  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
24  * SUCH DAMAGE.
25  */
26
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
29
30 #include <sys/param.h>
31 #include <sys/types.h>
32 #include <sys/cons.h>
33 #if (__FreeBSD_version >= 500000)
34 #include <sys/time.h>
35 #include <sys/systm.h>
36 #else
37 #include <machine/clock.h>
38 #endif
39
40 #include <sys/stat.h>
41 #include <sys/malloc.h>
42 #include <sys/conf.h>
43 #include <sys/libkern.h>
44 #include <sys/kernel.h>
45
46 #if (__FreeBSD_version >= 500000)
47 #include <sys/kthread.h>
48 #include <sys/mutex.h>
49 #include <sys/module.h>
50 #endif
51
52 #include <sys/eventhandler.h>
53 #include <sys/bus.h>
54 #include <sys/taskqueue.h>
55 #include <sys/ioccom.h>
56
57 #include <machine/resource.h>
58 #include <machine/bus.h>
59 #include <machine/stdarg.h>
60 #include <sys/rman.h>
61
62 #include <vm/vm.h>
63 #include <vm/pmap.h>
64
65 #if (__FreeBSD_version >= 500000)
66 #include <dev/pci/pcireg.h>
67 #include <dev/pci/pcivar.h>
68 #else
69 #include <pci/pcivar.h>
70 #include <pci/pcireg.h>
71 #endif
72
73 #if (__FreeBSD_version <= 500043)
74 #include <sys/devicestat.h>
75 #endif
76
77 #include <cam/cam.h>
78 #include <cam/cam_ccb.h>
79 #include <cam/cam_sim.h>
80 #include <cam/cam_xpt_sim.h>
81 #include <cam/cam_debug.h>
82 #include <cam/cam_periph.h>
83 #include <cam/scsi/scsi_all.h>
84 #include <cam/scsi/scsi_message.h>
85
86 #if (__FreeBSD_version < 500043)
87 #include <sys/bus_private.h>
88 #endif
89
90 #include <dev/hptiop/hptiop.h>
91
92 static const char driver_name[] = "hptiop";
93 static const char driver_version[] = "v1.8";
94
95 static devclass_t hptiop_devclass;
96
97 static int hptiop_send_sync_msg(struct hpt_iop_hba *hba,
98                                 u_int32_t msg, u_int32_t millisec);
99 static void hptiop_request_callback_itl(struct hpt_iop_hba *hba,
100                                                         u_int32_t req);
101 static void hptiop_request_callback_mv(struct hpt_iop_hba *hba, u_int64_t req);
102 static void hptiop_request_callback_mvfrey(struct hpt_iop_hba *hba,
103                                                         u_int32_t req);
104 static void hptiop_os_message_callback(struct hpt_iop_hba *hba, u_int32_t msg);
105 static int  hptiop_do_ioctl_itl(struct hpt_iop_hba *hba,
106                                 struct hpt_iop_ioctl_param *pParams);
107 static int  hptiop_do_ioctl_mv(struct hpt_iop_hba *hba,
108                                 struct hpt_iop_ioctl_param *pParams);
109 static int  hptiop_do_ioctl_mvfrey(struct hpt_iop_hba *hba,
110                                 struct hpt_iop_ioctl_param *pParams);
111 static int  hptiop_rescan_bus(struct hpt_iop_hba *hba);
112 static int hptiop_alloc_pci_res_itl(struct hpt_iop_hba *hba);
113 static int hptiop_alloc_pci_res_mv(struct hpt_iop_hba *hba);
114 static int hptiop_alloc_pci_res_mvfrey(struct hpt_iop_hba *hba);
115 static int hptiop_get_config_itl(struct hpt_iop_hba *hba,
116                                 struct hpt_iop_request_get_config *config);
117 static int hptiop_get_config_mv(struct hpt_iop_hba *hba,
118                                 struct hpt_iop_request_get_config *config);
119 static int hptiop_get_config_mvfrey(struct hpt_iop_hba *hba,
120                                 struct hpt_iop_request_get_config *config);
121 static int hptiop_set_config_itl(struct hpt_iop_hba *hba,
122                                 struct hpt_iop_request_set_config *config);
123 static int hptiop_set_config_mv(struct hpt_iop_hba *hba,
124                                 struct hpt_iop_request_set_config *config);
125 static int hptiop_set_config_mvfrey(struct hpt_iop_hba *hba,
126                                 struct hpt_iop_request_set_config *config);
127 static int hptiop_internal_memalloc_mv(struct hpt_iop_hba *hba);
128 static int hptiop_internal_memalloc_mvfrey(struct hpt_iop_hba *hba);
129 static int hptiop_internal_memfree_itl(struct hpt_iop_hba *hba);
130 static int hptiop_internal_memfree_mv(struct hpt_iop_hba *hba);
131 static int hptiop_internal_memfree_mvfrey(struct hpt_iop_hba *hba);
132 static int  hptiop_post_ioctl_command_itl(struct hpt_iop_hba *hba,
133                         u_int32_t req32, struct hpt_iop_ioctl_param *pParams);
134 static int  hptiop_post_ioctl_command_mv(struct hpt_iop_hba *hba,
135                                 struct hpt_iop_request_ioctl_command *req,
136                                 struct hpt_iop_ioctl_param *pParams);
137 static int  hptiop_post_ioctl_command_mvfrey(struct hpt_iop_hba *hba,
138                                 struct hpt_iop_request_ioctl_command *req,
139                                 struct hpt_iop_ioctl_param *pParams);
140 static void hptiop_post_req_itl(struct hpt_iop_hba *hba,
141                                 struct hpt_iop_srb *srb,
142                                 bus_dma_segment_t *segs, int nsegs);
143 static void hptiop_post_req_mv(struct hpt_iop_hba *hba,
144                                 struct hpt_iop_srb *srb,
145                                 bus_dma_segment_t *segs, int nsegs);
146 static void hptiop_post_req_mvfrey(struct hpt_iop_hba *hba,
147                                 struct hpt_iop_srb *srb,
148                                 bus_dma_segment_t *segs, int nsegs);
149 static void hptiop_post_msg_itl(struct hpt_iop_hba *hba, u_int32_t msg);
150 static void hptiop_post_msg_mv(struct hpt_iop_hba *hba, u_int32_t msg);
151 static void hptiop_post_msg_mvfrey(struct hpt_iop_hba *hba, u_int32_t msg);
152 static void hptiop_enable_intr_itl(struct hpt_iop_hba *hba);
153 static void hptiop_enable_intr_mv(struct hpt_iop_hba *hba);
154 static void hptiop_enable_intr_mvfrey(struct hpt_iop_hba *hba);
155 static void hptiop_disable_intr_itl(struct hpt_iop_hba *hba);
156 static void hptiop_disable_intr_mv(struct hpt_iop_hba *hba);
157 static void hptiop_disable_intr_mvfrey(struct hpt_iop_hba *hba);
158 static void hptiop_free_srb(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb);
159 static int  hptiop_os_query_remove_device(struct hpt_iop_hba *hba, int tid);
160 static int  hptiop_probe(device_t dev);
161 static int  hptiop_attach(device_t dev);
162 static int  hptiop_detach(device_t dev);
163 static int  hptiop_shutdown(device_t dev);
164 static void hptiop_action(struct cam_sim *sim, union ccb *ccb);
165 static void hptiop_poll(struct cam_sim *sim);
166 static void hptiop_async(void *callback_arg, u_int32_t code,
167                                         struct cam_path *path, void *arg);
168 static void hptiop_pci_intr(void *arg);
169 static void hptiop_release_resource(struct hpt_iop_hba *hba);
170 static void hptiop_reset_adapter(void *argv);
171 static d_open_t hptiop_open;
172 static d_close_t hptiop_close;
173 static d_ioctl_t hptiop_ioctl;
174
175 static struct cdevsw hptiop_cdevsw = {
176         .d_open = hptiop_open,
177         .d_close = hptiop_close,
178         .d_ioctl = hptiop_ioctl,
179         .d_name = driver_name,
180 #if __FreeBSD_version>=503000
181         .d_version = D_VERSION,
182 #endif
183 #if (__FreeBSD_version>=503000 && __FreeBSD_version<600034)
184         .d_flags = D_NEEDGIANT,
185 #endif
186 #if __FreeBSD_version<600034
187 #if __FreeBSD_version>=501000
188         .d_maj = MAJOR_AUTO,
189 #else
190         .d_maj = HPT_DEV_MAJOR,
191 #endif
192 #endif
193 };
194
195 #if __FreeBSD_version < 503000
196 #define hba_from_dev(dev) ((struct hpt_iop_hba *)(dev)->si_drv1)
197 #else
198 #define hba_from_dev(dev) \
199         ((struct hpt_iop_hba *)devclass_get_softc(hptiop_devclass, dev2unit(dev)))
200 #endif
201
202 #define BUS_SPACE_WRT4_ITL(offset, value) bus_space_write_4(hba->bar0t,\
203                 hba->bar0h, offsetof(struct hpt_iopmu_itl, offset), (value))
204 #define BUS_SPACE_RD4_ITL(offset) bus_space_read_4(hba->bar0t,\
205                 hba->bar0h, offsetof(struct hpt_iopmu_itl, offset))
206
207 #define BUS_SPACE_WRT4_MV0(offset, value) bus_space_write_4(hba->bar0t,\
208                 hba->bar0h, offsetof(struct hpt_iopmv_regs, offset), value)
209 #define BUS_SPACE_RD4_MV0(offset) bus_space_read_4(hba->bar0t,\
210                 hba->bar0h, offsetof(struct hpt_iopmv_regs, offset))
211 #define BUS_SPACE_WRT4_MV2(offset, value) bus_space_write_4(hba->bar2t,\
212                 hba->bar2h, offsetof(struct hpt_iopmu_mv, offset), value)
213 #define BUS_SPACE_RD4_MV2(offset) bus_space_read_4(hba->bar2t,\
214                 hba->bar2h, offsetof(struct hpt_iopmu_mv, offset))
215
216 #define BUS_SPACE_WRT4_MVFREY2(offset, value) bus_space_write_4(hba->bar2t,\
217                 hba->bar2h, offsetof(struct hpt_iopmu_mvfrey, offset), value)
218 #define BUS_SPACE_RD4_MVFREY2(offset) bus_space_read_4(hba->bar2t,\
219                 hba->bar2h, offsetof(struct hpt_iopmu_mvfrey, offset))
220
221 static int hptiop_open(ioctl_dev_t dev, int flags,
222                                         int devtype, ioctl_thread_t proc)
223 {
224         struct hpt_iop_hba *hba = hba_from_dev(dev);
225
226         if (hba==NULL)
227                 return ENXIO;
228         if (hba->flag & HPT_IOCTL_FLAG_OPEN)
229                 return EBUSY;
230         hba->flag |= HPT_IOCTL_FLAG_OPEN;
231         return 0;
232 }
233
234 static int hptiop_close(ioctl_dev_t dev, int flags,
235                                         int devtype, ioctl_thread_t proc)
236 {
237         struct hpt_iop_hba *hba = hba_from_dev(dev);
238         hba->flag &= ~(u_int32_t)HPT_IOCTL_FLAG_OPEN;
239         return 0;
240 }
241
242 static int hptiop_ioctl(ioctl_dev_t dev, u_long cmd, caddr_t data,
243                                         int flags, ioctl_thread_t proc)
244 {
245         int ret = EFAULT;
246         struct hpt_iop_hba *hba = hba_from_dev(dev);
247
248 #if (__FreeBSD_version >= 500000)
249         mtx_lock(&Giant);
250 #endif
251
252         switch (cmd) {
253         case HPT_DO_IOCONTROL:
254                 ret = hba->ops->do_ioctl(hba,
255                                 (struct hpt_iop_ioctl_param *)data);
256                 break;
257         case HPT_SCAN_BUS:
258                 ret = hptiop_rescan_bus(hba);
259                 break;
260         }
261
262 #if (__FreeBSD_version >= 500000)
263         mtx_unlock(&Giant);
264 #endif
265
266         return ret;
267 }
268
269 static u_int64_t hptiop_mv_outbound_read(struct hpt_iop_hba *hba)
270 {
271         u_int64_t p;
272         u_int32_t outbound_tail = BUS_SPACE_RD4_MV2(outbound_tail);
273         u_int32_t outbound_head = BUS_SPACE_RD4_MV2(outbound_head);
274
275         if (outbound_tail != outbound_head) {
276                 bus_space_read_region_4(hba->bar2t, hba->bar2h,
277                         offsetof(struct hpt_iopmu_mv,
278                                 outbound_q[outbound_tail]),
279                         (u_int32_t *)&p, 2);
280
281                 outbound_tail++;
282
283                 if (outbound_tail == MVIOP_QUEUE_LEN)
284                         outbound_tail = 0;
285
286                 BUS_SPACE_WRT4_MV2(outbound_tail, outbound_tail);
287                 return p;
288         } else
289                 return 0;
290 }
291
292 static void hptiop_mv_inbound_write(u_int64_t p, struct hpt_iop_hba *hba)
293 {
294         u_int32_t inbound_head = BUS_SPACE_RD4_MV2(inbound_head);
295         u_int32_t head = inbound_head + 1;
296
297         if (head == MVIOP_QUEUE_LEN)
298                 head = 0;
299
300         bus_space_write_region_4(hba->bar2t, hba->bar2h,
301                         offsetof(struct hpt_iopmu_mv, inbound_q[inbound_head]),
302                         (u_int32_t *)&p, 2);
303         BUS_SPACE_WRT4_MV2(inbound_head, head);
304         BUS_SPACE_WRT4_MV0(inbound_doorbell, MVIOP_MU_INBOUND_INT_POSTQUEUE);
305 }
306
307 static void hptiop_post_msg_itl(struct hpt_iop_hba *hba, u_int32_t msg)
308 {
309         BUS_SPACE_WRT4_ITL(inbound_msgaddr0, msg);
310         BUS_SPACE_RD4_ITL(outbound_intstatus);
311 }
312
313 static void hptiop_post_msg_mv(struct hpt_iop_hba *hba, u_int32_t msg)
314 {
315
316         BUS_SPACE_WRT4_MV2(inbound_msg, msg);
317         BUS_SPACE_WRT4_MV0(inbound_doorbell, MVIOP_MU_INBOUND_INT_MSG);
318
319         BUS_SPACE_RD4_MV0(outbound_intmask);
320 }
321
322 static void hptiop_post_msg_mvfrey(struct hpt_iop_hba *hba, u_int32_t msg)
323 {
324         BUS_SPACE_WRT4_MVFREY2(f0_to_cpu_msg_a, msg);
325         BUS_SPACE_RD4_MVFREY2(f0_to_cpu_msg_a);
326 }
327
328 static int hptiop_wait_ready_itl(struct hpt_iop_hba * hba, u_int32_t millisec)
329 {
330         u_int32_t req=0;
331         int i;
332
333         for (i = 0; i < millisec; i++) {
334                 req = BUS_SPACE_RD4_ITL(inbound_queue);
335                 if (req != IOPMU_QUEUE_EMPTY)
336                         break;
337                 DELAY(1000);
338         }
339
340         if (req!=IOPMU_QUEUE_EMPTY) {
341                 BUS_SPACE_WRT4_ITL(outbound_queue, req);
342                 BUS_SPACE_RD4_ITL(outbound_intstatus);
343                 return 0;
344         }
345
346         return -1;
347 }
348
349 static int hptiop_wait_ready_mv(struct hpt_iop_hba * hba, u_int32_t millisec)
350 {
351         if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec))
352                 return -1;
353
354         return 0;
355 }
356
357 static int hptiop_wait_ready_mvfrey(struct hpt_iop_hba * hba,
358                                                         u_int32_t millisec)
359 {
360         if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec))
361                 return -1;
362
363         return 0;
364 }
365
366 static void hptiop_request_callback_itl(struct hpt_iop_hba * hba,
367                                                         u_int32_t index)
368 {
369         struct hpt_iop_srb *srb;
370         struct hpt_iop_request_scsi_command *req=0;
371         union ccb *ccb;
372         u_int8_t *cdb;
373         u_int32_t result, temp, dxfer;
374         u_int64_t temp64;
375
376         if (index & IOPMU_QUEUE_MASK_HOST_BITS) { /*host req*/
377                 if (hba->firmware_version > 0x01020000 ||
378                         hba->interface_version > 0x01020000) {
379                         srb = hba->srb[index & ~(u_int32_t)
380                                 (IOPMU_QUEUE_ADDR_HOST_BIT
381                                 | IOPMU_QUEUE_REQUEST_RESULT_BIT)];
382                         req = (struct hpt_iop_request_scsi_command *)srb;
383                         if (index & IOPMU_QUEUE_REQUEST_RESULT_BIT)
384                                 result = IOP_RESULT_SUCCESS;
385                         else
386                                 result = req->header.result;
387                 } else {
388                         srb = hba->srb[index &
389                                 ~(u_int32_t)IOPMU_QUEUE_ADDR_HOST_BIT];
390                         req = (struct hpt_iop_request_scsi_command *)srb;
391                         result = req->header.result;
392                 }
393                 dxfer = req->dataxfer_length;
394                 goto srb_complete;
395         }
396
397         /*iop req*/
398         temp = bus_space_read_4(hba->bar0t, hba->bar0h, index +
399                 offsetof(struct hpt_iop_request_header, type));
400         result = bus_space_read_4(hba->bar0t, hba->bar0h, index +
401                 offsetof(struct hpt_iop_request_header, result));
402         switch(temp) {
403         case IOP_REQUEST_TYPE_IOCTL_COMMAND:
404         {
405                 temp64 = 0;
406                 bus_space_write_region_4(hba->bar0t, hba->bar0h, index +
407                         offsetof(struct hpt_iop_request_header, context),
408                         (u_int32_t *)&temp64, 2);
409                 wakeup((void *)((unsigned long)hba->u.itl.mu + index));
410                 break;
411         }
412
413         case IOP_REQUEST_TYPE_SCSI_COMMAND:
414                 bus_space_read_region_4(hba->bar0t, hba->bar0h, index +
415                         offsetof(struct hpt_iop_request_header, context),
416                         (u_int32_t *)&temp64, 2);
417                 srb = (struct hpt_iop_srb *)(unsigned long)temp64;
418                 dxfer = bus_space_read_4(hba->bar0t, hba->bar0h, 
419                                 index + offsetof(struct hpt_iop_request_scsi_command,
420                                 dataxfer_length));      
421 srb_complete:
422                 ccb = (union ccb *)srb->ccb;
423                 if (ccb->ccb_h.flags & CAM_CDB_POINTER)
424                         cdb = ccb->csio.cdb_io.cdb_ptr;
425                 else
426                         cdb = ccb->csio.cdb_io.cdb_bytes;
427
428                 if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */
429                         ccb->ccb_h.status = CAM_REQ_CMP;
430                         goto scsi_done;
431                 }
432
433                 switch (result) {
434                 case IOP_RESULT_SUCCESS:
435                         switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
436                         case CAM_DIR_IN:
437                                 bus_dmamap_sync(hba->io_dmat,
438                                         srb->dma_map, BUS_DMASYNC_POSTREAD);
439                                 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
440                                 break;
441                         case CAM_DIR_OUT:
442                                 bus_dmamap_sync(hba->io_dmat,
443                                         srb->dma_map, BUS_DMASYNC_POSTWRITE);
444                                 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
445                                 break;
446                         }
447
448                         ccb->ccb_h.status = CAM_REQ_CMP;
449                         break;
450
451                 case IOP_RESULT_BAD_TARGET:
452                         ccb->ccb_h.status = CAM_DEV_NOT_THERE;
453                         break;
454                 case IOP_RESULT_BUSY:
455                         ccb->ccb_h.status = CAM_BUSY;
456                         break;
457                 case IOP_RESULT_INVALID_REQUEST:
458                         ccb->ccb_h.status = CAM_REQ_INVALID;
459                         break;
460                 case IOP_RESULT_FAIL:
461                         ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
462                         break;
463                 case IOP_RESULT_RESET:
464                         ccb->ccb_h.status = CAM_BUSY;
465                         break;
466                 case IOP_RESULT_CHECK_CONDITION:
467                         memset(&ccb->csio.sense_data, 0,
468                             sizeof(ccb->csio.sense_data));
469                         if (dxfer < ccb->csio.sense_len)
470                                 ccb->csio.sense_resid = ccb->csio.sense_len -
471                                     dxfer;
472                         else
473                                 ccb->csio.sense_resid = 0;
474                         if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS) {/*iop*/
475                                 bus_space_read_region_1(hba->bar0t, hba->bar0h,
476                                         index + offsetof(struct hpt_iop_request_scsi_command,
477                                         sg_list), (u_int8_t *)&ccb->csio.sense_data, 
478                                         MIN(dxfer, sizeof(ccb->csio.sense_data)));
479                         } else {
480                                 memcpy(&ccb->csio.sense_data, &req->sg_list, 
481                                         MIN(dxfer, sizeof(ccb->csio.sense_data)));
482                         }
483                         ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
484                         ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
485                         ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
486                         break;
487                 default:
488                         ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
489                         break;
490                 }
491 scsi_done:
492                 if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS)
493                         BUS_SPACE_WRT4_ITL(outbound_queue, index);
494
495                 ccb->csio.resid = ccb->csio.dxfer_len - dxfer;
496
497                 hptiop_free_srb(hba, srb);
498                 xpt_done(ccb);
499                 break;
500         }
501 }
502
503 static void hptiop_drain_outbound_queue_itl(struct hpt_iop_hba *hba)
504 {
505         u_int32_t req, temp;
506
507         while ((req = BUS_SPACE_RD4_ITL(outbound_queue)) !=IOPMU_QUEUE_EMPTY) {
508                 if (req & IOPMU_QUEUE_MASK_HOST_BITS)
509                         hptiop_request_callback_itl(hba, req);
510                 else {
511                         struct hpt_iop_request_header *p;
512
513                         p = (struct hpt_iop_request_header *)
514                                 ((char *)hba->u.itl.mu + req);
515                         temp = bus_space_read_4(hba->bar0t,
516                                         hba->bar0h,req +
517                                         offsetof(struct hpt_iop_request_header,
518                                                 flags));
519                         if (temp & IOP_REQUEST_FLAG_SYNC_REQUEST) {
520                                 u_int64_t temp64;
521                                 bus_space_read_region_4(hba->bar0t,
522                                         hba->bar0h,req +
523                                         offsetof(struct hpt_iop_request_header,
524                                                 context),
525                                         (u_int32_t *)&temp64, 2);
526                                 if (temp64) {
527                                         hptiop_request_callback_itl(hba, req);
528                                 } else {
529                                         temp64 = 1;
530                                         bus_space_write_region_4(hba->bar0t,
531                                                 hba->bar0h,req +
532                                                 offsetof(struct hpt_iop_request_header,
533                                                         context),
534                                                 (u_int32_t *)&temp64, 2);
535                                 }
536                         } else
537                                 hptiop_request_callback_itl(hba, req);
538                 }
539         }
540 }
541
542 static int hptiop_intr_itl(struct hpt_iop_hba * hba)
543 {
544         u_int32_t status;
545         int ret = 0;
546
547         status = BUS_SPACE_RD4_ITL(outbound_intstatus);
548
549         if (status & IOPMU_OUTBOUND_INT_MSG0) {
550                 u_int32_t msg = BUS_SPACE_RD4_ITL(outbound_msgaddr0);
551                 KdPrint(("hptiop: received outbound msg %x\n", msg));
552                 BUS_SPACE_WRT4_ITL(outbound_intstatus, IOPMU_OUTBOUND_INT_MSG0);
553                 hptiop_os_message_callback(hba, msg);
554                 ret = 1;
555         }
556
557         if (status & IOPMU_OUTBOUND_INT_POSTQUEUE) {
558                 hptiop_drain_outbound_queue_itl(hba);
559                 ret = 1;
560         }
561
562         return ret;
563 }
564
565 static void hptiop_request_callback_mv(struct hpt_iop_hba * hba,
566                                                         u_int64_t _tag)
567 {
568         u_int32_t context = (u_int32_t)_tag;
569
570         if (context & MVIOP_CMD_TYPE_SCSI) {
571                 struct hpt_iop_srb *srb;
572                 struct hpt_iop_request_scsi_command *req;
573                 union ccb *ccb;
574                 u_int8_t *cdb;
575
576                 srb = hba->srb[context >> MVIOP_REQUEST_NUMBER_START_BIT];
577                 req = (struct hpt_iop_request_scsi_command *)srb;
578                 ccb = (union ccb *)srb->ccb;
579                 if (ccb->ccb_h.flags & CAM_CDB_POINTER)
580                         cdb = ccb->csio.cdb_io.cdb_ptr;
581                 else
582                         cdb = ccb->csio.cdb_io.cdb_bytes;
583
584                 if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */
585                         ccb->ccb_h.status = CAM_REQ_CMP;
586                         goto scsi_done;
587                 }
588                 if (context & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT)
589                         req->header.result = IOP_RESULT_SUCCESS;
590
591                 switch (req->header.result) {
592                 case IOP_RESULT_SUCCESS:
593                         switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
594                         case CAM_DIR_IN:
595                                 bus_dmamap_sync(hba->io_dmat,
596                                         srb->dma_map, BUS_DMASYNC_POSTREAD);
597                                 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
598                                 break;
599                         case CAM_DIR_OUT:
600                                 bus_dmamap_sync(hba->io_dmat,
601                                         srb->dma_map, BUS_DMASYNC_POSTWRITE);
602                                 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
603                                 break;
604                         }
605                         ccb->ccb_h.status = CAM_REQ_CMP;
606                         break;
607                 case IOP_RESULT_BAD_TARGET:
608                         ccb->ccb_h.status = CAM_DEV_NOT_THERE;
609                         break;
610                 case IOP_RESULT_BUSY:
611                         ccb->ccb_h.status = CAM_BUSY;
612                         break;
613                 case IOP_RESULT_INVALID_REQUEST:
614                         ccb->ccb_h.status = CAM_REQ_INVALID;
615                         break;
616                 case IOP_RESULT_FAIL:
617                         ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
618                         break;
619                 case IOP_RESULT_RESET:
620                         ccb->ccb_h.status = CAM_BUSY;
621                         break;
622                 case IOP_RESULT_CHECK_CONDITION:
623                         memset(&ccb->csio.sense_data, 0,
624                             sizeof(ccb->csio.sense_data));
625                         if (req->dataxfer_length < ccb->csio.sense_len)
626                                 ccb->csio.sense_resid = ccb->csio.sense_len -
627                                     req->dataxfer_length;
628                         else
629                                 ccb->csio.sense_resid = 0;
630                         memcpy(&ccb->csio.sense_data, &req->sg_list, 
631                                 MIN(req->dataxfer_length, sizeof(ccb->csio.sense_data)));
632                         ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
633                         ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
634                         ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
635                         break;
636                 default:
637                         ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
638                         break;
639                 }
640 scsi_done:
641                 ccb->csio.resid = ccb->csio.dxfer_len - req->dataxfer_length;
642                 
643                 hptiop_free_srb(hba, srb);
644                 xpt_done(ccb);
645         } else if (context & MVIOP_CMD_TYPE_IOCTL) {
646                 struct hpt_iop_request_ioctl_command *req = hba->ctlcfg_ptr;
647                 if (context & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT)
648                         hba->config_done = 1;
649                 else
650                         hba->config_done = -1;
651                 wakeup(req);
652         } else if (context &
653                         (MVIOP_CMD_TYPE_SET_CONFIG |
654                                 MVIOP_CMD_TYPE_GET_CONFIG))
655                 hba->config_done = 1;
656         else {
657                 device_printf(hba->pcidev, "wrong callback type\n");
658         }
659 }
660
661 static void hptiop_request_callback_mvfrey(struct hpt_iop_hba * hba,
662                                 u_int32_t _tag)
663 {
664         u_int32_t req_type = _tag & 0xf;
665
666         struct hpt_iop_srb *srb;
667         struct hpt_iop_request_scsi_command *req;
668         union ccb *ccb;
669         u_int8_t *cdb;
670
671         switch (req_type) {
672         case IOP_REQUEST_TYPE_GET_CONFIG:
673         case IOP_REQUEST_TYPE_SET_CONFIG:
674                 hba->config_done = 1;
675                 break;
676
677         case IOP_REQUEST_TYPE_SCSI_COMMAND:
678                 srb = hba->srb[(_tag >> 4) & 0xff];
679                 req = (struct hpt_iop_request_scsi_command *)srb;
680
681                 ccb = (union ccb *)srb->ccb;
682
683                 untimeout(hptiop_reset_adapter, hba, ccb->ccb_h.timeout_ch);
684
685                 if (ccb->ccb_h.flags & CAM_CDB_POINTER)
686                         cdb = ccb->csio.cdb_io.cdb_ptr;
687                 else
688                         cdb = ccb->csio.cdb_io.cdb_bytes;
689
690                 if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */
691                         ccb->ccb_h.status = CAM_REQ_CMP;
692                         goto scsi_done;
693                 }
694
695                 if (_tag & MVFREYIOPMU_QUEUE_REQUEST_RESULT_BIT)
696                         req->header.result = IOP_RESULT_SUCCESS;
697
698                 switch (req->header.result) {
699                 case IOP_RESULT_SUCCESS:
700                         switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
701                         case CAM_DIR_IN:
702                                 bus_dmamap_sync(hba->io_dmat,
703                                                 srb->dma_map, BUS_DMASYNC_POSTREAD);
704                                 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
705                                 break;
706                         case CAM_DIR_OUT:
707                                 bus_dmamap_sync(hba->io_dmat,
708                                                 srb->dma_map, BUS_DMASYNC_POSTWRITE);
709                                 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
710                                 break;
711                         }
712                         ccb->ccb_h.status = CAM_REQ_CMP;
713                         break;
714                 case IOP_RESULT_BAD_TARGET:
715                         ccb->ccb_h.status = CAM_DEV_NOT_THERE;
716                         break;
717                 case IOP_RESULT_BUSY:
718                         ccb->ccb_h.status = CAM_BUSY;
719                         break;
720                 case IOP_RESULT_INVALID_REQUEST:
721                         ccb->ccb_h.status = CAM_REQ_INVALID;
722                         break;
723                 case IOP_RESULT_FAIL:
724                         ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
725                         break;
726                 case IOP_RESULT_RESET:
727                         ccb->ccb_h.status = CAM_BUSY;
728                         break;
729                 case IOP_RESULT_CHECK_CONDITION:
730                         memset(&ccb->csio.sense_data, 0,
731                                sizeof(ccb->csio.sense_data));
732                         if (req->dataxfer_length < ccb->csio.sense_len)
733                                 ccb->csio.sense_resid = ccb->csio.sense_len -
734                                 req->dataxfer_length;
735                         else
736                                 ccb->csio.sense_resid = 0;
737                         memcpy(&ccb->csio.sense_data, &req->sg_list, 
738                                MIN(req->dataxfer_length, sizeof(ccb->csio.sense_data)));
739                         ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
740                         ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
741                         ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
742                         break;
743                 default:
744                         ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
745                         break;
746                 }
747 scsi_done:
748                 ccb->csio.resid = ccb->csio.dxfer_len - req->dataxfer_length;
749                 
750                 hptiop_free_srb(hba, srb);
751                 xpt_done(ccb);
752                 break;
753         case IOP_REQUEST_TYPE_IOCTL_COMMAND:
754                 if (_tag & MVFREYIOPMU_QUEUE_REQUEST_RESULT_BIT)
755                         hba->config_done = 1;
756                 else
757                         hba->config_done = -1;
758                 wakeup((struct hpt_iop_request_ioctl_command *)hba->ctlcfg_ptr);
759                 break;
760         default:
761                 device_printf(hba->pcidev, "wrong callback type\n");
762                 break;
763         }
764 }
765
766 static void hptiop_drain_outbound_queue_mv(struct hpt_iop_hba * hba)
767 {
768         u_int64_t req;
769
770         while ((req = hptiop_mv_outbound_read(hba))) {
771                 if (req & MVIOP_MU_QUEUE_ADDR_HOST_BIT) {
772                         if (req & MVIOP_MU_QUEUE_REQUEST_RETURN_CONTEXT) {
773                                 hptiop_request_callback_mv(hba, req);
774                         }
775                 }
776         }
777 }
778
779 static int hptiop_intr_mv(struct hpt_iop_hba * hba)
780 {
781         u_int32_t status;
782         int ret = 0;
783
784         status = BUS_SPACE_RD4_MV0(outbound_doorbell);
785
786         if (status)
787                 BUS_SPACE_WRT4_MV0(outbound_doorbell, ~status);
788
789         if (status & MVIOP_MU_OUTBOUND_INT_MSG) {
790                 u_int32_t msg = BUS_SPACE_RD4_MV2(outbound_msg);
791                 KdPrint(("hptiop: received outbound msg %x\n", msg));
792                 hptiop_os_message_callback(hba, msg);
793                 ret = 1;
794         }
795
796         if (status & MVIOP_MU_OUTBOUND_INT_POSTQUEUE) {
797                 hptiop_drain_outbound_queue_mv(hba);
798                 ret = 1;
799         }
800
801         return ret;
802 }
803
804 static int hptiop_intr_mvfrey(struct hpt_iop_hba * hba)
805 {
806         u_int32_t status, _tag, cptr;
807         int ret = 0;
808
809         if (hba->initialized) {
810                 BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0);
811         }
812
813         status = BUS_SPACE_RD4_MVFREY2(f0_doorbell);
814         if (status) {
815                 BUS_SPACE_WRT4_MVFREY2(f0_doorbell, status);
816                 if (status & CPU_TO_F0_DRBL_MSG_A_BIT) {
817                         u_int32_t msg = BUS_SPACE_RD4_MVFREY2(cpu_to_f0_msg_a);
818                         hptiop_os_message_callback(hba, msg);
819                 }
820                 ret = 1;
821         }
822
823         status = BUS_SPACE_RD4_MVFREY2(isr_cause);
824         if (status) {
825                 BUS_SPACE_WRT4_MVFREY2(isr_cause, status);
826                 do {
827                         cptr = *hba->u.mvfrey.outlist_cptr & 0xff;
828                         while (hba->u.mvfrey.outlist_rptr != cptr) {
829                                 hba->u.mvfrey.outlist_rptr++;
830                                 if (hba->u.mvfrey.outlist_rptr == hba->u.mvfrey.list_count) {
831                                         hba->u.mvfrey.outlist_rptr = 0;
832                                 }
833         
834                                 _tag = hba->u.mvfrey.outlist[hba->u.mvfrey.outlist_rptr].val;
835                                 hptiop_request_callback_mvfrey(hba, _tag);
836                                 ret = 2;
837                         }
838                 } while (cptr != (*hba->u.mvfrey.outlist_cptr & 0xff));
839         }
840
841         if (hba->initialized) {
842                 BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0x1010);
843         }
844
845         return ret;
846 }
847
848 static int hptiop_send_sync_request_itl(struct hpt_iop_hba * hba,
849                                         u_int32_t req32, u_int32_t millisec)
850 {
851         u_int32_t i;
852         u_int64_t temp64;
853
854         BUS_SPACE_WRT4_ITL(inbound_queue, req32);
855         BUS_SPACE_RD4_ITL(outbound_intstatus);
856
857         for (i = 0; i < millisec; i++) {
858                 hptiop_intr_itl(hba);
859                 bus_space_read_region_4(hba->bar0t, hba->bar0h, req32 +
860                         offsetof(struct hpt_iop_request_header, context),
861                         (u_int32_t *)&temp64, 2);
862                 if (temp64)
863                         return 0;
864                 DELAY(1000);
865         }
866
867         return -1;
868 }
869
870 static int hptiop_send_sync_request_mv(struct hpt_iop_hba *hba,
871                                         void *req, u_int32_t millisec)
872 {
873         u_int32_t i;
874         u_int64_t phy_addr;
875         hba->config_done = 0;
876
877         phy_addr = hba->ctlcfgcmd_phy |
878                         (u_int64_t)MVIOP_MU_QUEUE_ADDR_HOST_BIT;
879         ((struct hpt_iop_request_get_config *)req)->header.flags |=
880                 IOP_REQUEST_FLAG_SYNC_REQUEST |
881                 IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
882         hptiop_mv_inbound_write(phy_addr, hba);
883         BUS_SPACE_RD4_MV0(outbound_intmask);
884
885         for (i = 0; i < millisec; i++) {
886                 hptiop_intr_mv(hba);
887                 if (hba->config_done)
888                         return 0;
889                 DELAY(1000);
890         }
891         return -1;
892 }
893
894 static int hptiop_send_sync_request_mvfrey(struct hpt_iop_hba *hba,
895                                         void *req, u_int32_t millisec)
896 {
897         u_int32_t i, index;
898         u_int64_t phy_addr;
899         struct hpt_iop_request_header *reqhdr =
900                                                                                 (struct hpt_iop_request_header *)req;
901         
902         hba->config_done = 0;
903
904         phy_addr = hba->ctlcfgcmd_phy;
905         reqhdr->flags = IOP_REQUEST_FLAG_SYNC_REQUEST
906                                         | IOP_REQUEST_FLAG_OUTPUT_CONTEXT
907                                         | IOP_REQUEST_FLAG_ADDR_BITS
908                                         | ((phy_addr >> 16) & 0xffff0000);
909         reqhdr->context = ((phy_addr & 0xffffffff) << 32 )
910                                         | IOPMU_QUEUE_ADDR_HOST_BIT | reqhdr->type;
911
912         hba->u.mvfrey.inlist_wptr++;
913         index = hba->u.mvfrey.inlist_wptr & 0x3fff;
914
915         if (index == hba->u.mvfrey.list_count) {
916                 index = 0;
917                 hba->u.mvfrey.inlist_wptr &= ~0x3fff;
918                 hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE;
919         }
920
921         hba->u.mvfrey.inlist[index].addr = phy_addr;
922         hba->u.mvfrey.inlist[index].intrfc_len = (reqhdr->size + 3) / 4;
923
924         BUS_SPACE_WRT4_MVFREY2(inbound_write_ptr, hba->u.mvfrey.inlist_wptr);
925         BUS_SPACE_RD4_MVFREY2(inbound_write_ptr);
926
927         for (i = 0; i < millisec; i++) {
928                 hptiop_intr_mvfrey(hba);
929                 if (hba->config_done)
930                         return 0;
931                 DELAY(1000);
932         }
933         return -1;
934 }
935
936 static int hptiop_send_sync_msg(struct hpt_iop_hba *hba,
937                                         u_int32_t msg, u_int32_t millisec)
938 {
939         u_int32_t i;
940
941         hba->msg_done = 0;
942         hba->ops->post_msg(hba, msg);
943
944         for (i=0; i<millisec; i++) {
945                 hba->ops->iop_intr(hba);
946                 if (hba->msg_done)
947                         break;
948                 DELAY(1000);
949         }
950
951         return hba->msg_done? 0 : -1;
952 }
953
954 static int hptiop_get_config_itl(struct hpt_iop_hba * hba,
955                                 struct hpt_iop_request_get_config * config)
956 {
957         u_int32_t req32;
958
959         config->header.size = sizeof(struct hpt_iop_request_get_config);
960         config->header.type = IOP_REQUEST_TYPE_GET_CONFIG;
961         config->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST;
962         config->header.result = IOP_RESULT_PENDING;
963         config->header.context = 0;
964
965         req32 = BUS_SPACE_RD4_ITL(inbound_queue);
966         if (req32 == IOPMU_QUEUE_EMPTY)
967                 return -1;
968
969         bus_space_write_region_4(hba->bar0t, hba->bar0h,
970                         req32, (u_int32_t *)config,
971                         sizeof(struct hpt_iop_request_header) >> 2);
972
973         if (hptiop_send_sync_request_itl(hba, req32, 20000)) {
974                 KdPrint(("hptiop: get config send cmd failed"));
975                 return -1;
976         }
977
978         bus_space_read_region_4(hba->bar0t, hba->bar0h,
979                         req32, (u_int32_t *)config,
980                         sizeof(struct hpt_iop_request_get_config) >> 2);
981
982         BUS_SPACE_WRT4_ITL(outbound_queue, req32);
983
984         return 0;
985 }
986
987 static int hptiop_get_config_mv(struct hpt_iop_hba * hba,
988                                 struct hpt_iop_request_get_config * config)
989 {
990         struct hpt_iop_request_get_config *req;
991
992         if (!(req = hba->ctlcfg_ptr))
993                 return -1;
994
995         req->header.flags = 0;
996         req->header.type = IOP_REQUEST_TYPE_GET_CONFIG;
997         req->header.size = sizeof(struct hpt_iop_request_get_config);
998         req->header.result = IOP_RESULT_PENDING;
999         req->header.context = MVIOP_CMD_TYPE_GET_CONFIG;
1000
1001         if (hptiop_send_sync_request_mv(hba, req, 20000)) {
1002                 KdPrint(("hptiop: get config send cmd failed"));
1003                 return -1;
1004         }
1005
1006         *config = *req;
1007         return 0;
1008 }
1009
1010 static int hptiop_get_config_mvfrey(struct hpt_iop_hba * hba,
1011                                 struct hpt_iop_request_get_config * config)
1012 {
1013         struct hpt_iop_request_get_config *info = hba->u.mvfrey.config;
1014
1015         if (info->header.size != sizeof(struct hpt_iop_request_get_config) ||
1016             info->header.type != IOP_REQUEST_TYPE_GET_CONFIG) {
1017                 KdPrint(("hptiop: header size %x/%x type %x/%x",
1018                          info->header.size, (int)sizeof(struct hpt_iop_request_get_config),
1019                          info->header.type, IOP_REQUEST_TYPE_GET_CONFIG));
1020                 return -1;
1021         }
1022
1023         config->interface_version = info->interface_version;
1024         config->firmware_version = info->firmware_version;
1025         config->max_requests = info->max_requests;
1026         config->request_size = info->request_size;
1027         config->max_sg_count = info->max_sg_count;
1028         config->data_transfer_length = info->data_transfer_length;
1029         config->alignment_mask = info->alignment_mask;
1030         config->max_devices = info->max_devices;
1031         config->sdram_size = info->sdram_size;
1032
1033         KdPrint(("hptiop: maxreq %x reqsz %x datalen %x maxdev %x sdram %x",
1034                  config->max_requests, config->request_size,
1035                  config->data_transfer_length, config->max_devices,
1036                  config->sdram_size));
1037
1038         return 0;
1039 }
1040
1041 static int hptiop_set_config_itl(struct hpt_iop_hba *hba,
1042                                 struct hpt_iop_request_set_config *config)
1043 {
1044         u_int32_t req32;
1045
1046         req32 = BUS_SPACE_RD4_ITL(inbound_queue);
1047
1048         if (req32 == IOPMU_QUEUE_EMPTY)
1049                 return -1;
1050
1051         config->header.size = sizeof(struct hpt_iop_request_set_config);
1052         config->header.type = IOP_REQUEST_TYPE_SET_CONFIG;
1053         config->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST;
1054         config->header.result = IOP_RESULT_PENDING;
1055         config->header.context = 0;
1056
1057         bus_space_write_region_4(hba->bar0t, hba->bar0h, req32, 
1058                 (u_int32_t *)config, 
1059                 sizeof(struct hpt_iop_request_set_config) >> 2);
1060
1061         if (hptiop_send_sync_request_itl(hba, req32, 20000)) {
1062                 KdPrint(("hptiop: set config send cmd failed"));
1063                 return -1;
1064         }
1065
1066         BUS_SPACE_WRT4_ITL(outbound_queue, req32);
1067
1068         return 0;
1069 }
1070
1071 static int hptiop_set_config_mv(struct hpt_iop_hba *hba,
1072                                 struct hpt_iop_request_set_config *config)
1073 {
1074         struct hpt_iop_request_set_config *req;
1075
1076         if (!(req = hba->ctlcfg_ptr))
1077                 return -1;
1078
1079         memcpy((u_int8_t *)req + sizeof(struct hpt_iop_request_header),
1080                 (u_int8_t *)config + sizeof(struct hpt_iop_request_header),
1081                 sizeof(struct hpt_iop_request_set_config) -
1082                         sizeof(struct hpt_iop_request_header));
1083
1084         req->header.flags = 0;
1085         req->header.type = IOP_REQUEST_TYPE_SET_CONFIG;
1086         req->header.size = sizeof(struct hpt_iop_request_set_config);
1087         req->header.result = IOP_RESULT_PENDING;
1088         req->header.context = MVIOP_CMD_TYPE_SET_CONFIG;
1089
1090         if (hptiop_send_sync_request_mv(hba, req, 20000)) {
1091                 KdPrint(("hptiop: set config send cmd failed"));
1092                 return -1;
1093         }
1094
1095         return 0;
1096 }
1097
1098 static int hptiop_set_config_mvfrey(struct hpt_iop_hba *hba,
1099                                 struct hpt_iop_request_set_config *config)
1100 {
1101         struct hpt_iop_request_set_config *req;
1102
1103         if (!(req = hba->ctlcfg_ptr))
1104                 return -1;
1105
1106         memcpy((u_int8_t *)req + sizeof(struct hpt_iop_request_header),
1107                 (u_int8_t *)config + sizeof(struct hpt_iop_request_header),
1108                 sizeof(struct hpt_iop_request_set_config) -
1109                         sizeof(struct hpt_iop_request_header));
1110
1111         req->header.type = IOP_REQUEST_TYPE_SET_CONFIG;
1112         req->header.size = sizeof(struct hpt_iop_request_set_config);
1113         req->header.result = IOP_RESULT_PENDING;
1114
1115         if (hptiop_send_sync_request_mvfrey(hba, req, 20000)) {
1116                 KdPrint(("hptiop: set config send cmd failed"));
1117                 return -1;
1118         }
1119
1120         return 0;
1121 }
1122
1123 static int hptiop_post_ioctl_command_itl(struct hpt_iop_hba *hba,
1124                                 u_int32_t req32,
1125                                 struct hpt_iop_ioctl_param *pParams)
1126 {
1127         u_int64_t temp64;
1128         struct hpt_iop_request_ioctl_command req;
1129
1130         if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) >
1131                         (hba->max_request_size -
1132                         offsetof(struct hpt_iop_request_ioctl_command, buf))) {
1133                 device_printf(hba->pcidev, "request size beyond max value");
1134                 return -1;
1135         }
1136
1137         req.header.size = offsetof(struct hpt_iop_request_ioctl_command, buf)
1138                 + pParams->nInBufferSize;
1139         req.header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND;
1140         req.header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST;
1141         req.header.result = IOP_RESULT_PENDING;
1142         req.header.context = req32 + (u_int64_t)(unsigned long)hba->u.itl.mu;
1143         req.ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode);
1144         req.inbuf_size = pParams->nInBufferSize;
1145         req.outbuf_size = pParams->nOutBufferSize;
1146         req.bytes_returned = 0;
1147
1148         bus_space_write_region_4(hba->bar0t, hba->bar0h, req32, (u_int32_t *)&req, 
1149                 offsetof(struct hpt_iop_request_ioctl_command, buf)>>2);
1150         
1151         hptiop_lock_adapter(hba);
1152
1153         BUS_SPACE_WRT4_ITL(inbound_queue, req32);
1154         BUS_SPACE_RD4_ITL(outbound_intstatus);
1155
1156         bus_space_read_region_4(hba->bar0t, hba->bar0h, req32 +
1157                 offsetof(struct hpt_iop_request_ioctl_command, header.context),
1158                 (u_int32_t *)&temp64, 2);
1159         while (temp64) {
1160                 if (hptiop_sleep(hba, (void *)((unsigned long)hba->u.itl.mu + req32),
1161                                 PPAUSE, "hptctl", HPT_OSM_TIMEOUT)==0)
1162                         break;
1163                 hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000);
1164                 bus_space_read_region_4(hba->bar0t, hba->bar0h,req32 +
1165                         offsetof(struct hpt_iop_request_ioctl_command,
1166                                 header.context),
1167                         (u_int32_t *)&temp64, 2);
1168         }
1169
1170         hptiop_unlock_adapter(hba);
1171         return 0;
1172 }
1173
1174 static int hptiop_bus_space_copyin(struct hpt_iop_hba *hba, u_int32_t bus,
1175                                                                         void *user, int size)
1176 {
1177         unsigned char byte;
1178         int i;
1179
1180         for (i=0; i<size; i++) {
1181                 if (copyin((u_int8_t *)user + i, &byte, 1))
1182                         return -1;
1183                 bus_space_write_1(hba->bar0t, hba->bar0h, bus + i, byte);
1184         }
1185
1186         return 0;
1187 }
1188
1189 static int hptiop_bus_space_copyout(struct hpt_iop_hba *hba, u_int32_t bus,
1190                                                                         void *user, int size)
1191 {
1192         unsigned char byte;
1193         int i;
1194
1195         for (i=0; i<size; i++) {
1196                 byte = bus_space_read_1(hba->bar0t, hba->bar0h, bus + i);
1197                 if (copyout(&byte, (u_int8_t *)user + i, 1))
1198                         return -1;
1199         }
1200
1201         return 0;
1202 }
1203
1204 static int hptiop_do_ioctl_itl(struct hpt_iop_hba *hba,
1205                                 struct hpt_iop_ioctl_param * pParams)
1206 {
1207         u_int32_t req32;
1208         u_int32_t result;
1209
1210         if ((pParams->Magic != HPT_IOCTL_MAGIC) &&
1211                 (pParams->Magic != HPT_IOCTL_MAGIC32))
1212                 return EFAULT;
1213         
1214         req32 = BUS_SPACE_RD4_ITL(inbound_queue);
1215         if (req32 == IOPMU_QUEUE_EMPTY)
1216                 return EFAULT;
1217
1218         if (pParams->nInBufferSize)
1219                 if (hptiop_bus_space_copyin(hba, req32 +
1220                         offsetof(struct hpt_iop_request_ioctl_command, buf),
1221                         (void *)pParams->lpInBuffer, pParams->nInBufferSize))
1222                         goto invalid;
1223
1224         if (hptiop_post_ioctl_command_itl(hba, req32, pParams))
1225                 goto invalid;
1226
1227         result = bus_space_read_4(hba->bar0t, hba->bar0h, req32 +
1228                         offsetof(struct hpt_iop_request_ioctl_command,
1229                                 header.result));
1230
1231         if (result == IOP_RESULT_SUCCESS) {
1232                 if (pParams->nOutBufferSize)
1233                         if (hptiop_bus_space_copyout(hba, req32 +
1234                                 offsetof(struct hpt_iop_request_ioctl_command, buf) + 
1235                                         ((pParams->nInBufferSize + 3) & ~3),
1236                                 (void *)pParams->lpOutBuffer, pParams->nOutBufferSize))
1237                                 goto invalid;
1238
1239                 if (pParams->lpBytesReturned) {
1240                         if (hptiop_bus_space_copyout(hba, req32 + 
1241                                 offsetof(struct hpt_iop_request_ioctl_command, bytes_returned),
1242                                 (void *)pParams->lpBytesReturned, sizeof(unsigned  long)))
1243                                 goto invalid;
1244                 }
1245
1246                 BUS_SPACE_WRT4_ITL(outbound_queue, req32);
1247
1248                 return 0;
1249         } else{
1250 invalid:
1251                 BUS_SPACE_WRT4_ITL(outbound_queue, req32);
1252
1253                 return EFAULT;
1254         }
1255 }
1256
1257 static int hptiop_post_ioctl_command_mv(struct hpt_iop_hba *hba,
1258                                 struct hpt_iop_request_ioctl_command *req,
1259                                 struct hpt_iop_ioctl_param *pParams)
1260 {
1261         u_int64_t req_phy;
1262         int size = 0;
1263
1264         if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) >
1265                         (hba->max_request_size -
1266                         offsetof(struct hpt_iop_request_ioctl_command, buf))) {
1267                 device_printf(hba->pcidev, "request size beyond max value");
1268                 return -1;
1269         }
1270
1271         req->ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode);
1272         req->inbuf_size = pParams->nInBufferSize;
1273         req->outbuf_size = pParams->nOutBufferSize;
1274         req->header.size = offsetof(struct hpt_iop_request_ioctl_command, buf)
1275                                         + pParams->nInBufferSize;
1276         req->header.context = (u_int64_t)MVIOP_CMD_TYPE_IOCTL;
1277         req->header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND;
1278         req->header.result = IOP_RESULT_PENDING;
1279         req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
1280         size = req->header.size >> 8;
1281         size = size > 3 ? 3 : size;
1282         req_phy = hba->ctlcfgcmd_phy | MVIOP_MU_QUEUE_ADDR_HOST_BIT | size;
1283         hptiop_mv_inbound_write(req_phy, hba);
1284
1285         BUS_SPACE_RD4_MV0(outbound_intmask);
1286
1287         while (hba->config_done == 0) {
1288                 if (hptiop_sleep(hba, req, PPAUSE,
1289                         "hptctl", HPT_OSM_TIMEOUT)==0)
1290                         continue;
1291                 hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000);
1292         }
1293         return 0;
1294 }
1295
1296 static int hptiop_do_ioctl_mv(struct hpt_iop_hba *hba,
1297                                 struct hpt_iop_ioctl_param *pParams)
1298 {
1299         struct hpt_iop_request_ioctl_command *req;
1300
1301         if ((pParams->Magic != HPT_IOCTL_MAGIC) &&
1302                 (pParams->Magic != HPT_IOCTL_MAGIC32))
1303                 return EFAULT;
1304
1305         req = (struct hpt_iop_request_ioctl_command *)(hba->ctlcfg_ptr);
1306         hba->config_done = 0;
1307         hptiop_lock_adapter(hba);
1308         if (pParams->nInBufferSize)
1309                 if (copyin((void *)pParams->lpInBuffer,
1310                                 req->buf, pParams->nInBufferSize))
1311                         goto invalid;
1312         if (hptiop_post_ioctl_command_mv(hba, req, pParams))
1313                 goto invalid;
1314
1315         if (hba->config_done == 1) {
1316                 if (pParams->nOutBufferSize)
1317                         if (copyout(req->buf +
1318                                 ((pParams->nInBufferSize + 3) & ~3),
1319                                 (void *)pParams->lpOutBuffer,
1320                                 pParams->nOutBufferSize))
1321                                 goto invalid;
1322
1323                 if (pParams->lpBytesReturned)
1324                         if (copyout(&req->bytes_returned,
1325                                 (void*)pParams->lpBytesReturned,
1326                                 sizeof(u_int32_t)))
1327                                 goto invalid;
1328                 hptiop_unlock_adapter(hba);
1329                 return 0;
1330         } else{
1331 invalid:
1332                 hptiop_unlock_adapter(hba);
1333                 return EFAULT;
1334         }
1335 }
1336
1337 static int hptiop_post_ioctl_command_mvfrey(struct hpt_iop_hba *hba,
1338                                 struct hpt_iop_request_ioctl_command *req,
1339                                 struct hpt_iop_ioctl_param *pParams)
1340 {
1341         u_int64_t phy_addr;
1342         u_int32_t index;
1343
1344         phy_addr = hba->ctlcfgcmd_phy;
1345
1346         if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) >
1347                         (hba->max_request_size -
1348                         offsetof(struct hpt_iop_request_ioctl_command, buf))) {
1349                 device_printf(hba->pcidev, "request size beyond max value");
1350                 return -1;
1351         }
1352
1353         req->ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode);
1354         req->inbuf_size = pParams->nInBufferSize;
1355         req->outbuf_size = pParams->nOutBufferSize;
1356         req->header.size = offsetof(struct hpt_iop_request_ioctl_command, buf)
1357                                         + pParams->nInBufferSize;
1358
1359         req->header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND;
1360         req->header.result = IOP_RESULT_PENDING;
1361
1362         req->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST
1363                                                 | IOP_REQUEST_FLAG_OUTPUT_CONTEXT
1364                                                 | IOP_REQUEST_FLAG_ADDR_BITS
1365                                                 | ((phy_addr >> 16) & 0xffff0000);
1366         req->header.context = ((phy_addr & 0xffffffff) << 32 )
1367                                                 | IOPMU_QUEUE_ADDR_HOST_BIT | req->header.type;
1368
1369         hba->u.mvfrey.inlist_wptr++;
1370         index = hba->u.mvfrey.inlist_wptr & 0x3fff;
1371
1372         if (index == hba->u.mvfrey.list_count) {
1373                 index = 0;
1374                 hba->u.mvfrey.inlist_wptr &= ~0x3fff;
1375                 hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE;
1376         }
1377
1378         hba->u.mvfrey.inlist[index].addr = phy_addr;
1379         hba->u.mvfrey.inlist[index].intrfc_len = (req->header.size + 3) / 4;
1380
1381         BUS_SPACE_WRT4_MVFREY2(inbound_write_ptr, hba->u.mvfrey.inlist_wptr);
1382         BUS_SPACE_RD4_MVFREY2(inbound_write_ptr);
1383
1384         while (hba->config_done == 0) {
1385                 if (hptiop_sleep(hba, req, PPAUSE,
1386                         "hptctl", HPT_OSM_TIMEOUT)==0)
1387                         continue;
1388                 hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000);
1389         }
1390         return 0;
1391 }
1392
1393 static int hptiop_do_ioctl_mvfrey(struct hpt_iop_hba *hba,
1394                                 struct hpt_iop_ioctl_param *pParams)
1395 {
1396         struct hpt_iop_request_ioctl_command *req;
1397
1398         if ((pParams->Magic != HPT_IOCTL_MAGIC) &&
1399                 (pParams->Magic != HPT_IOCTL_MAGIC32))
1400                 return EFAULT;
1401
1402         req = (struct hpt_iop_request_ioctl_command *)(hba->ctlcfg_ptr);
1403         hba->config_done = 0;
1404         hptiop_lock_adapter(hba);
1405         if (pParams->nInBufferSize)
1406                 if (copyin((void *)pParams->lpInBuffer,
1407                                 req->buf, pParams->nInBufferSize))
1408                         goto invalid;
1409         if (hptiop_post_ioctl_command_mvfrey(hba, req, pParams))
1410                 goto invalid;
1411
1412         if (hba->config_done == 1) {
1413                 if (pParams->nOutBufferSize)
1414                         if (copyout(req->buf +
1415                                 ((pParams->nInBufferSize + 3) & ~3),
1416                                 (void *)pParams->lpOutBuffer,
1417                                 pParams->nOutBufferSize))
1418                                 goto invalid;
1419
1420                 if (pParams->lpBytesReturned)
1421                         if (copyout(&req->bytes_returned,
1422                                 (void*)pParams->lpBytesReturned,
1423                                 sizeof(u_int32_t)))
1424                                 goto invalid;
1425                 hptiop_unlock_adapter(hba);
1426                 return 0;
1427         } else{
1428 invalid:
1429                 hptiop_unlock_adapter(hba);
1430                 return EFAULT;
1431         }
1432 }
1433
1434 static int  hptiop_rescan_bus(struct hpt_iop_hba * hba)
1435 {
1436         union ccb           *ccb;
1437
1438         if ((ccb = xpt_alloc_ccb()) == NULL)
1439                 return(ENOMEM);
1440         if (xpt_create_path(&ccb->ccb_h.path, xpt_periph, cam_sim_path(hba->sim),
1441                 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1442                 xpt_free_ccb(ccb);
1443                 return(EIO);
1444         }
1445         xpt_rescan(ccb);
1446         return(0);
1447 }
1448
1449 static  bus_dmamap_callback_t   hptiop_map_srb;
1450 static  bus_dmamap_callback_t   hptiop_post_scsi_command;
1451 static  bus_dmamap_callback_t   hptiop_mv_map_ctlcfg;
1452 static  bus_dmamap_callback_t   hptiop_mvfrey_map_ctlcfg;
1453
1454 static int hptiop_alloc_pci_res_itl(struct hpt_iop_hba *hba)
1455 {
1456         hba->bar0_rid = 0x10;
1457         hba->bar0_res = bus_alloc_resource_any(hba->pcidev,
1458                         SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE);
1459
1460         if (hba->bar0_res == NULL) {
1461                 device_printf(hba->pcidev,
1462                         "failed to get iop base adrress.\n");
1463                 return -1;
1464         }
1465         hba->bar0t = rman_get_bustag(hba->bar0_res);
1466         hba->bar0h = rman_get_bushandle(hba->bar0_res);
1467         hba->u.itl.mu = (struct hpt_iopmu_itl *)
1468                                 rman_get_virtual(hba->bar0_res);
1469
1470         if (!hba->u.itl.mu) {
1471                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1472                                         hba->bar0_rid, hba->bar0_res);
1473                 device_printf(hba->pcidev, "alloc mem res failed\n");
1474                 return -1;
1475         }
1476
1477         return 0;
1478 }
1479
1480 static int hptiop_alloc_pci_res_mv(struct hpt_iop_hba *hba)
1481 {
1482         hba->bar0_rid = 0x10;
1483         hba->bar0_res = bus_alloc_resource_any(hba->pcidev,
1484                         SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE);
1485
1486         if (hba->bar0_res == NULL) {
1487                 device_printf(hba->pcidev, "failed to get iop bar0.\n");
1488                 return -1;
1489         }
1490         hba->bar0t = rman_get_bustag(hba->bar0_res);
1491         hba->bar0h = rman_get_bushandle(hba->bar0_res);
1492         hba->u.mv.regs = (struct hpt_iopmv_regs *)
1493                                 rman_get_virtual(hba->bar0_res);
1494
1495         if (!hba->u.mv.regs) {
1496                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1497                                         hba->bar0_rid, hba->bar0_res);
1498                 device_printf(hba->pcidev, "alloc bar0 mem res failed\n");
1499                 return -1;
1500         }
1501
1502         hba->bar2_rid = 0x18;
1503         hba->bar2_res = bus_alloc_resource_any(hba->pcidev,
1504                         SYS_RES_MEMORY, &hba->bar2_rid, RF_ACTIVE);
1505
1506         if (hba->bar2_res == NULL) {
1507                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1508                                         hba->bar0_rid, hba->bar0_res);
1509                 device_printf(hba->pcidev, "failed to get iop bar2.\n");
1510                 return -1;
1511         }
1512
1513         hba->bar2t = rman_get_bustag(hba->bar2_res);
1514         hba->bar2h = rman_get_bushandle(hba->bar2_res);
1515         hba->u.mv.mu = (struct hpt_iopmu_mv *)rman_get_virtual(hba->bar2_res);
1516
1517         if (!hba->u.mv.mu) {
1518                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1519                                         hba->bar0_rid, hba->bar0_res);
1520                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1521                                         hba->bar2_rid, hba->bar2_res);
1522                 device_printf(hba->pcidev, "alloc mem bar2 res failed\n");
1523                 return -1;
1524         }
1525
1526         return 0;
1527 }
1528
1529 static int hptiop_alloc_pci_res_mvfrey(struct hpt_iop_hba *hba)
1530 {
1531         hba->bar0_rid = 0x10;
1532         hba->bar0_res = bus_alloc_resource_any(hba->pcidev,
1533                         SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE);
1534
1535         if (hba->bar0_res == NULL) {
1536                 device_printf(hba->pcidev, "failed to get iop bar0.\n");
1537                 return -1;
1538         }
1539         hba->bar0t = rman_get_bustag(hba->bar0_res);
1540         hba->bar0h = rman_get_bushandle(hba->bar0_res);
1541         hba->u.mvfrey.config = (struct hpt_iop_request_get_config *)
1542                                 rman_get_virtual(hba->bar0_res);
1543
1544         if (!hba->u.mvfrey.config) {
1545                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1546                                         hba->bar0_rid, hba->bar0_res);
1547                 device_printf(hba->pcidev, "alloc bar0 mem res failed\n");
1548                 return -1;
1549         }
1550
1551         hba->bar2_rid = 0x18;
1552         hba->bar2_res = bus_alloc_resource_any(hba->pcidev,
1553                         SYS_RES_MEMORY, &hba->bar2_rid, RF_ACTIVE);
1554
1555         if (hba->bar2_res == NULL) {
1556                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1557                                         hba->bar0_rid, hba->bar0_res);
1558                 device_printf(hba->pcidev, "failed to get iop bar2.\n");
1559                 return -1;
1560         }
1561
1562         hba->bar2t = rman_get_bustag(hba->bar2_res);
1563         hba->bar2h = rman_get_bushandle(hba->bar2_res);
1564         hba->u.mvfrey.mu =
1565                                         (struct hpt_iopmu_mvfrey *)rman_get_virtual(hba->bar2_res);
1566
1567         if (!hba->u.mvfrey.mu) {
1568                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1569                                         hba->bar0_rid, hba->bar0_res);
1570                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1571                                         hba->bar2_rid, hba->bar2_res);
1572                 device_printf(hba->pcidev, "alloc mem bar2 res failed\n");
1573                 return -1;
1574         }
1575
1576         return 0;
1577 }
1578
1579 static void hptiop_release_pci_res_itl(struct hpt_iop_hba *hba)
1580 {
1581         if (hba->bar0_res)
1582                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1583                         hba->bar0_rid, hba->bar0_res);
1584 }
1585
1586 static void hptiop_release_pci_res_mv(struct hpt_iop_hba *hba)
1587 {
1588         if (hba->bar0_res)
1589                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1590                         hba->bar0_rid, hba->bar0_res);
1591         if (hba->bar2_res)
1592                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1593                         hba->bar2_rid, hba->bar2_res);
1594 }
1595
1596 static void hptiop_release_pci_res_mvfrey(struct hpt_iop_hba *hba)
1597 {
1598         if (hba->bar0_res)
1599                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1600                         hba->bar0_rid, hba->bar0_res);
1601         if (hba->bar2_res)
1602                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1603                         hba->bar2_rid, hba->bar2_res);
1604 }
1605
1606 static int hptiop_internal_memalloc_mv(struct hpt_iop_hba *hba)
1607 {
1608         if (bus_dma_tag_create(hba->parent_dmat,
1609                                 1,
1610                                 0,
1611                                 BUS_SPACE_MAXADDR_32BIT,
1612                                 BUS_SPACE_MAXADDR,
1613                                 NULL, NULL,
1614                                 0x800 - 0x8,
1615                                 1,
1616                                 BUS_SPACE_MAXSIZE_32BIT,
1617                                 BUS_DMA_ALLOCNOW,
1618 #if __FreeBSD_version > 502000
1619                                 NULL,
1620                                 NULL,
1621 #endif
1622                                 &hba->ctlcfg_dmat)) {
1623                 device_printf(hba->pcidev, "alloc ctlcfg_dmat failed\n");
1624                 return -1;
1625         }
1626
1627         if (bus_dmamem_alloc(hba->ctlcfg_dmat, (void **)&hba->ctlcfg_ptr,
1628 #if __FreeBSD_version>501000
1629                 BUS_DMA_WAITOK | BUS_DMA_COHERENT,
1630 #else
1631                 BUS_DMA_WAITOK,
1632 #endif
1633                 &hba->ctlcfg_dmamap) != 0) {
1634                         device_printf(hba->pcidev,
1635                                         "bus_dmamem_alloc failed!\n");
1636                         bus_dma_tag_destroy(hba->ctlcfg_dmat);
1637                         return -1;
1638         }
1639
1640         if (bus_dmamap_load(hba->ctlcfg_dmat,
1641                         hba->ctlcfg_dmamap, hba->ctlcfg_ptr,
1642                         MVIOP_IOCTLCFG_SIZE,
1643                         hptiop_mv_map_ctlcfg, hba, 0)) {
1644                 device_printf(hba->pcidev, "bus_dmamap_load failed!\n");
1645                 if (hba->ctlcfg_dmat)
1646                         bus_dmamem_free(hba->ctlcfg_dmat,
1647                                 hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
1648                         bus_dma_tag_destroy(hba->ctlcfg_dmat);
1649                 return -1;
1650         }
1651
1652         return 0;
1653 }
1654
1655 static int hptiop_internal_memalloc_mvfrey(struct hpt_iop_hba *hba)
1656 {
1657         u_int32_t list_count = BUS_SPACE_RD4_MVFREY2(inbound_conf_ctl);
1658
1659         list_count >>= 16;
1660
1661         if (list_count == 0) {
1662                 return -1;
1663         }
1664
1665         hba->u.mvfrey.list_count = list_count;
1666         hba->u.mvfrey.internal_mem_size = 0x800
1667                                                         + list_count * sizeof(struct mvfrey_inlist_entry)
1668                                                         + list_count * sizeof(struct mvfrey_outlist_entry)
1669                                                         + sizeof(int);
1670         if (bus_dma_tag_create(hba->parent_dmat,
1671                                 1,
1672                                 0,
1673                                 BUS_SPACE_MAXADDR_32BIT,
1674                                 BUS_SPACE_MAXADDR,
1675                                 NULL, NULL,
1676                                 hba->u.mvfrey.internal_mem_size,
1677                                 1,
1678                                 BUS_SPACE_MAXSIZE_32BIT,
1679                                 BUS_DMA_ALLOCNOW,
1680 #if __FreeBSD_version > 502000
1681                                 NULL,
1682                                 NULL,
1683 #endif
1684                                 &hba->ctlcfg_dmat)) {
1685                 device_printf(hba->pcidev, "alloc ctlcfg_dmat failed\n");
1686                 return -1;
1687         }
1688
1689         if (bus_dmamem_alloc(hba->ctlcfg_dmat, (void **)&hba->ctlcfg_ptr,
1690 #if __FreeBSD_version>501000
1691                 BUS_DMA_WAITOK | BUS_DMA_COHERENT,
1692 #else
1693                 BUS_DMA_WAITOK,
1694 #endif
1695                 &hba->ctlcfg_dmamap) != 0) {
1696                         device_printf(hba->pcidev,
1697                                         "bus_dmamem_alloc failed!\n");
1698                         bus_dma_tag_destroy(hba->ctlcfg_dmat);
1699                         return -1;
1700         }
1701
1702         if (bus_dmamap_load(hba->ctlcfg_dmat,
1703                         hba->ctlcfg_dmamap, hba->ctlcfg_ptr,
1704                         hba->u.mvfrey.internal_mem_size,
1705                         hptiop_mvfrey_map_ctlcfg, hba, 0)) {
1706                 device_printf(hba->pcidev, "bus_dmamap_load failed!\n");
1707                 if (hba->ctlcfg_dmat)
1708                         bus_dmamem_free(hba->ctlcfg_dmat,
1709                                 hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
1710                         bus_dma_tag_destroy(hba->ctlcfg_dmat);
1711                 return -1;
1712         }
1713
1714         return 0;
1715 }
1716
1717 static int hptiop_internal_memfree_itl(struct hpt_iop_hba *hba) {
1718         return 0;
1719 }
1720
1721 static int hptiop_internal_memfree_mv(struct hpt_iop_hba *hba)
1722 {
1723         if (hba->ctlcfg_dmat) {
1724                 bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap);
1725                 bus_dmamem_free(hba->ctlcfg_dmat,
1726                                         hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
1727                 bus_dma_tag_destroy(hba->ctlcfg_dmat);
1728         }
1729
1730         return 0;
1731 }
1732
1733 static int hptiop_internal_memfree_mvfrey(struct hpt_iop_hba *hba)
1734 {
1735         if (hba->ctlcfg_dmat) {
1736                 bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap);
1737                 bus_dmamem_free(hba->ctlcfg_dmat,
1738                                         hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
1739                 bus_dma_tag_destroy(hba->ctlcfg_dmat);
1740         }
1741
1742         return 0;
1743 }
1744
1745 static int hptiop_reset_comm_mvfrey(struct hpt_iop_hba *hba)
1746 {
1747         u_int32_t i = 100;
1748
1749         if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET_COMM, 3000))
1750                 return -1;
1751
1752         /* wait 100ms for MCU ready */
1753         while(i--) {
1754                 DELAY(1000);
1755         }
1756
1757         BUS_SPACE_WRT4_MVFREY2(inbound_base,
1758                                                         hba->u.mvfrey.inlist_phy & 0xffffffff);
1759         BUS_SPACE_WRT4_MVFREY2(inbound_base_high,
1760                                                         (hba->u.mvfrey.inlist_phy >> 16) >> 16);
1761
1762         BUS_SPACE_WRT4_MVFREY2(outbound_base,
1763                                                         hba->u.mvfrey.outlist_phy & 0xffffffff);
1764         BUS_SPACE_WRT4_MVFREY2(outbound_base_high,
1765                                                         (hba->u.mvfrey.outlist_phy >> 16) >> 16);
1766
1767         BUS_SPACE_WRT4_MVFREY2(outbound_shadow_base,
1768                                                         hba->u.mvfrey.outlist_cptr_phy & 0xffffffff);
1769         BUS_SPACE_WRT4_MVFREY2(outbound_shadow_base_high,
1770                                                         (hba->u.mvfrey.outlist_cptr_phy >> 16) >> 16);
1771
1772         hba->u.mvfrey.inlist_wptr = (hba->u.mvfrey.list_count - 1)
1773                                                                 | CL_POINTER_TOGGLE;
1774         *hba->u.mvfrey.outlist_cptr = (hba->u.mvfrey.list_count - 1)
1775                                                                 | CL_POINTER_TOGGLE;
1776         hba->u.mvfrey.outlist_rptr = hba->u.mvfrey.list_count - 1;
1777         
1778         return 0;
1779 }
1780
1781 /*
1782  * CAM driver interface
1783  */
1784 static device_method_t driver_methods[] = {
1785         /* Device interface */
1786         DEVMETHOD(device_probe,     hptiop_probe),
1787         DEVMETHOD(device_attach,    hptiop_attach),
1788         DEVMETHOD(device_detach,    hptiop_detach),
1789         DEVMETHOD(device_shutdown,  hptiop_shutdown),
1790         { 0, 0 }
1791 };
1792
1793 static struct hptiop_adapter_ops hptiop_itl_ops = {
1794         .family            = INTEL_BASED_IOP,
1795         .iop_wait_ready    = hptiop_wait_ready_itl,
1796         .internal_memalloc = 0,
1797         .internal_memfree  = hptiop_internal_memfree_itl,
1798         .alloc_pci_res     = hptiop_alloc_pci_res_itl,
1799         .release_pci_res   = hptiop_release_pci_res_itl,
1800         .enable_intr       = hptiop_enable_intr_itl,
1801         .disable_intr      = hptiop_disable_intr_itl,
1802         .get_config        = hptiop_get_config_itl,
1803         .set_config        = hptiop_set_config_itl,
1804         .iop_intr          = hptiop_intr_itl,
1805         .post_msg          = hptiop_post_msg_itl,
1806         .post_req          = hptiop_post_req_itl,
1807         .do_ioctl          = hptiop_do_ioctl_itl,
1808         .reset_comm        = 0,
1809 };
1810
1811 static struct hptiop_adapter_ops hptiop_mv_ops = {
1812         .family            = MV_BASED_IOP,
1813         .iop_wait_ready    = hptiop_wait_ready_mv,
1814         .internal_memalloc = hptiop_internal_memalloc_mv,
1815         .internal_memfree  = hptiop_internal_memfree_mv,
1816         .alloc_pci_res     = hptiop_alloc_pci_res_mv,
1817         .release_pci_res   = hptiop_release_pci_res_mv,
1818         .enable_intr       = hptiop_enable_intr_mv,
1819         .disable_intr      = hptiop_disable_intr_mv,
1820         .get_config        = hptiop_get_config_mv,
1821         .set_config        = hptiop_set_config_mv,
1822         .iop_intr          = hptiop_intr_mv,
1823         .post_msg          = hptiop_post_msg_mv,
1824         .post_req          = hptiop_post_req_mv,
1825         .do_ioctl          = hptiop_do_ioctl_mv,
1826         .reset_comm        = 0,
1827 };
1828
1829 static struct hptiop_adapter_ops hptiop_mvfrey_ops = {
1830         .family            = MVFREY_BASED_IOP,
1831         .iop_wait_ready    = hptiop_wait_ready_mvfrey,
1832         .internal_memalloc = hptiop_internal_memalloc_mvfrey,
1833         .internal_memfree  = hptiop_internal_memfree_mvfrey,
1834         .alloc_pci_res     = hptiop_alloc_pci_res_mvfrey,
1835         .release_pci_res   = hptiop_release_pci_res_mvfrey,
1836         .enable_intr       = hptiop_enable_intr_mvfrey,
1837         .disable_intr      = hptiop_disable_intr_mvfrey,
1838         .get_config        = hptiop_get_config_mvfrey,
1839         .set_config        = hptiop_set_config_mvfrey,
1840         .iop_intr          = hptiop_intr_mvfrey,
1841         .post_msg          = hptiop_post_msg_mvfrey,
1842         .post_req          = hptiop_post_req_mvfrey,
1843         .do_ioctl          = hptiop_do_ioctl_mvfrey,
1844         .reset_comm        = hptiop_reset_comm_mvfrey,
1845 };
1846
1847 static driver_t hptiop_pci_driver = {
1848         driver_name,
1849         driver_methods,
1850         sizeof(struct hpt_iop_hba)
1851 };
1852
1853 DRIVER_MODULE(hptiop, pci, hptiop_pci_driver, hptiop_devclass, 0, 0);
1854 MODULE_DEPEND(hptiop, cam, 1, 1, 1);
1855
1856 static int hptiop_probe(device_t dev)
1857 {
1858         struct hpt_iop_hba *hba;
1859         u_int32_t id;
1860         static char buf[256];
1861         int sas = 0;
1862         struct hptiop_adapter_ops *ops;
1863
1864         if (pci_get_vendor(dev) != 0x1103)
1865                 return (ENXIO);
1866
1867         id = pci_get_device(dev);
1868
1869         switch (id) {
1870                 case 0x4520:
1871                 case 0x4522:
1872                         sas = 1;
1873                         ops = &hptiop_mvfrey_ops;
1874                         break;
1875                 case 0x4210:
1876                 case 0x4211:
1877                 case 0x4310:
1878                 case 0x4311:
1879                 case 0x4320:
1880                 case 0x4321:
1881                 case 0x4322:
1882                         sas = 1;
1883                 case 0x3220:
1884                 case 0x3320:
1885                 case 0x3410:
1886                 case 0x3520:
1887                 case 0x3510:
1888                 case 0x3511:
1889                 case 0x3521:
1890                 case 0x3522:
1891                 case 0x3530:
1892                 case 0x3540:
1893                 case 0x3560:
1894                         ops = &hptiop_itl_ops;
1895                         break;
1896                 case 0x3020:
1897                 case 0x3120:
1898                 case 0x3122:
1899                         ops = &hptiop_mv_ops;
1900                         break;
1901                 default:
1902                         return (ENXIO);
1903         }
1904
1905         device_printf(dev, "adapter at PCI %d:%d:%d, IRQ %d\n",
1906                 pci_get_bus(dev), pci_get_slot(dev),
1907                 pci_get_function(dev), pci_get_irq(dev));
1908
1909         sprintf(buf, "RocketRAID %x %s Controller\n",
1910                                 id, sas ? "SAS" : "SATA");
1911         device_set_desc_copy(dev, buf);
1912
1913         hba = (struct hpt_iop_hba *)device_get_softc(dev);
1914         bzero(hba, sizeof(struct hpt_iop_hba));
1915         hba->ops = ops;
1916
1917         KdPrint(("hba->ops=%p\n", hba->ops));
1918         return 0;
1919 }
1920
1921 static int hptiop_attach(device_t dev)
1922 {
1923         struct hpt_iop_hba *hba = (struct hpt_iop_hba *)device_get_softc(dev);
1924         struct hpt_iop_request_get_config  iop_config;
1925         struct hpt_iop_request_set_config  set_config;
1926         int rid = 0;
1927         struct cam_devq *devq;
1928         struct ccb_setasync ccb;
1929         u_int32_t unit = device_get_unit(dev);
1930
1931         device_printf(dev, "%d RocketRAID 3xxx/4xxx controller driver %s\n",
1932                         unit, driver_version);
1933
1934         KdPrint(("hptiop: attach(%d, %d/%d/%d) ops=%p\n", unit,
1935                 pci_get_bus(dev), pci_get_slot(dev),
1936                 pci_get_function(dev), hba->ops));
1937
1938 #if __FreeBSD_version >=440000
1939         pci_enable_busmaster(dev);
1940 #endif
1941         hba->pcidev = dev;
1942         hba->pciunit = unit;
1943
1944         if (hba->ops->alloc_pci_res(hba))
1945                 return ENXIO;
1946
1947         if (hba->ops->iop_wait_ready(hba, 2000)) {
1948                 device_printf(dev, "adapter is not ready\n");
1949                 goto release_pci_res;
1950         }
1951
1952 #if (__FreeBSD_version >= 500000)
1953         mtx_init(&hba->lock, "hptioplock", NULL, MTX_DEF);
1954 #endif
1955
1956         if (bus_dma_tag_create(bus_get_dma_tag(dev),/* PCI parent */
1957                         1,  /* alignment */
1958                         0, /* boundary */
1959                         BUS_SPACE_MAXADDR,  /* lowaddr */
1960                         BUS_SPACE_MAXADDR,  /* highaddr */
1961                         NULL, NULL,         /* filter, filterarg */
1962                         BUS_SPACE_MAXSIZE_32BIT,    /* maxsize */
1963                         BUS_SPACE_UNRESTRICTED, /* nsegments */
1964                         BUS_SPACE_MAXSIZE_32BIT,    /* maxsegsize */
1965                         0,      /* flags */
1966 #if __FreeBSD_version>502000
1967                         NULL,   /* lockfunc */
1968                         NULL,       /* lockfuncarg */
1969 #endif
1970                         &hba->parent_dmat   /* tag */))
1971         {
1972                 device_printf(dev, "alloc parent_dmat failed\n");
1973                 goto release_pci_res;
1974         }
1975
1976         if (hba->ops->family == MV_BASED_IOP) {
1977                 if (hba->ops->internal_memalloc(hba)) {
1978                         device_printf(dev, "alloc srb_dmat failed\n");
1979                         goto destroy_parent_tag;
1980                 }
1981         }
1982         
1983         if (hba->ops->get_config(hba, &iop_config)) {
1984                 device_printf(dev, "get iop config failed.\n");
1985                 goto get_config_failed;
1986         }
1987
1988         hba->firmware_version = iop_config.firmware_version;
1989         hba->interface_version = iop_config.interface_version;
1990         hba->max_requests = iop_config.max_requests;
1991         hba->max_devices = iop_config.max_devices;
1992         hba->max_request_size = iop_config.request_size;
1993         hba->max_sg_count = iop_config.max_sg_count;
1994
1995         if (hba->ops->family == MVFREY_BASED_IOP) {
1996                 if (hba->ops->internal_memalloc(hba)) {
1997                         device_printf(dev, "alloc srb_dmat failed\n");
1998                         goto destroy_parent_tag;
1999                 }
2000                 if (hba->ops->reset_comm(hba)) {
2001                         device_printf(dev, "reset comm failed\n");
2002                         goto get_config_failed;
2003                 }
2004         }
2005
2006         if (bus_dma_tag_create(hba->parent_dmat,/* parent */
2007                         4,  /* alignment */
2008                         BUS_SPACE_MAXADDR_32BIT+1, /* boundary */
2009                         BUS_SPACE_MAXADDR,  /* lowaddr */
2010                         BUS_SPACE_MAXADDR,  /* highaddr */
2011                         NULL, NULL,         /* filter, filterarg */
2012                         PAGE_SIZE * (hba->max_sg_count-1),  /* maxsize */
2013                         hba->max_sg_count,  /* nsegments */
2014                         0x20000,    /* maxsegsize */
2015                         BUS_DMA_ALLOCNOW,       /* flags */
2016 #if __FreeBSD_version>502000
2017                         busdma_lock_mutex,  /* lockfunc */
2018                         &hba->lock,     /* lockfuncarg */
2019 #endif
2020                         &hba->io_dmat   /* tag */))
2021         {
2022                 device_printf(dev, "alloc io_dmat failed\n");
2023                 goto get_config_failed;
2024         }
2025
2026         if (bus_dma_tag_create(hba->parent_dmat,/* parent */
2027                         1,  /* alignment */
2028                         0, /* boundary */
2029                         BUS_SPACE_MAXADDR_32BIT,    /* lowaddr */
2030                         BUS_SPACE_MAXADDR,  /* highaddr */
2031                         NULL, NULL,         /* filter, filterarg */
2032                         HPT_SRB_MAX_SIZE * HPT_SRB_MAX_QUEUE_SIZE + 0x20,
2033                         1,  /* nsegments */
2034                         BUS_SPACE_MAXSIZE_32BIT,    /* maxsegsize */
2035                         0,      /* flags */
2036 #if __FreeBSD_version>502000
2037                         NULL,   /* lockfunc */
2038                         NULL,       /* lockfuncarg */
2039 #endif
2040                         &hba->srb_dmat  /* tag */))
2041         {
2042                 device_printf(dev, "alloc srb_dmat failed\n");
2043                 goto destroy_io_dmat;
2044         }
2045
2046         if (bus_dmamem_alloc(hba->srb_dmat, (void **)&hba->uncached_ptr,
2047 #if __FreeBSD_version>501000
2048                         BUS_DMA_WAITOK | BUS_DMA_COHERENT,
2049 #else
2050                         BUS_DMA_WAITOK,
2051 #endif
2052                         &hba->srb_dmamap) != 0)
2053         {
2054                 device_printf(dev, "srb bus_dmamem_alloc failed!\n");
2055                 goto destroy_srb_dmat;
2056         }
2057
2058         if (bus_dmamap_load(hba->srb_dmat,
2059                         hba->srb_dmamap, hba->uncached_ptr,
2060                         (HPT_SRB_MAX_SIZE * HPT_SRB_MAX_QUEUE_SIZE) + 0x20,
2061                         hptiop_map_srb, hba, 0))
2062         {
2063                 device_printf(dev, "bus_dmamap_load failed!\n");
2064                 goto srb_dmamem_free;
2065         }
2066
2067         if ((devq = cam_simq_alloc(hba->max_requests - 1 )) == NULL) {
2068                 device_printf(dev, "cam_simq_alloc failed\n");
2069                 goto srb_dmamap_unload;
2070         }
2071
2072 #if __FreeBSD_version <700000
2073         hba->sim = cam_sim_alloc(hptiop_action, hptiop_poll, driver_name,
2074                         hba, unit, hba->max_requests - 1, 1, devq);
2075 #else
2076         hba->sim = cam_sim_alloc(hptiop_action, hptiop_poll, driver_name,
2077                         hba, unit, &Giant, hba->max_requests - 1, 1, devq);
2078 #endif
2079         if (!hba->sim) {
2080                 device_printf(dev, "cam_sim_alloc failed\n");
2081                 cam_simq_free(devq);
2082                 goto srb_dmamap_unload;
2083         }
2084 #if __FreeBSD_version <700000
2085         if (xpt_bus_register(hba->sim, 0) != CAM_SUCCESS)
2086 #else
2087         if (xpt_bus_register(hba->sim, dev, 0) != CAM_SUCCESS)
2088 #endif
2089         {
2090                 device_printf(dev, "xpt_bus_register failed\n");
2091                 goto free_cam_sim;
2092         }
2093
2094         if (xpt_create_path(&hba->path, /*periph */ NULL,
2095                         cam_sim_path(hba->sim), CAM_TARGET_WILDCARD,
2096                         CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2097                 device_printf(dev, "xpt_create_path failed\n");
2098                 goto deregister_xpt_bus;
2099         }
2100
2101         bzero(&set_config, sizeof(set_config));
2102         set_config.iop_id = unit;
2103         set_config.vbus_id = cam_sim_path(hba->sim);
2104         set_config.max_host_request_size = HPT_SRB_MAX_REQ_SIZE;
2105
2106         if (hba->ops->set_config(hba, &set_config)) {
2107                 device_printf(dev, "set iop config failed.\n");
2108                 goto free_hba_path;
2109         }
2110
2111         xpt_setup_ccb(&ccb.ccb_h, hba->path, /*priority*/5);
2112         ccb.ccb_h.func_code = XPT_SASYNC_CB;
2113         ccb.event_enable = (AC_FOUND_DEVICE | AC_LOST_DEVICE);
2114         ccb.callback = hptiop_async;
2115         ccb.callback_arg = hba->sim;
2116         xpt_action((union ccb *)&ccb);
2117
2118         rid = 0;
2119         if ((hba->irq_res = bus_alloc_resource(hba->pcidev, SYS_RES_IRQ,
2120                         &rid, 0, ~0ul, 1, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
2121                 device_printf(dev, "allocate irq failed!\n");
2122                 goto free_hba_path;
2123         }
2124
2125 #if __FreeBSD_version <700000
2126         if (bus_setup_intr(hba->pcidev, hba->irq_res, INTR_TYPE_CAM,
2127                                 hptiop_pci_intr, hba, &hba->irq_handle))
2128 #else
2129         if (bus_setup_intr(hba->pcidev, hba->irq_res, INTR_TYPE_CAM,
2130                                 NULL, hptiop_pci_intr, hba, &hba->irq_handle))
2131 #endif
2132         {
2133                 device_printf(dev, "allocate intr function failed!\n");
2134                 goto free_irq_resource;
2135         }
2136
2137         if (hptiop_send_sync_msg(hba,
2138                         IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000)) {
2139                 device_printf(dev, "fail to start background task\n");
2140                 goto teartown_irq_resource;
2141         }
2142
2143         hba->ops->enable_intr(hba);
2144         hba->initialized = 1;
2145
2146         hba->ioctl_dev = make_dev(&hptiop_cdevsw, unit,
2147                                 UID_ROOT, GID_WHEEL /*GID_OPERATOR*/,
2148                                 S_IRUSR | S_IWUSR, "%s%d", driver_name, unit);
2149
2150 #if __FreeBSD_version < 503000
2151         hba->ioctl_dev->si_drv1 = hba;
2152 #endif
2153
2154         return 0;
2155
2156
2157 teartown_irq_resource:
2158         bus_teardown_intr(dev, hba->irq_res, hba->irq_handle);
2159
2160 free_irq_resource:
2161         bus_release_resource(dev, SYS_RES_IRQ, 0, hba->irq_res);
2162
2163 free_hba_path:
2164         xpt_free_path(hba->path);
2165
2166 deregister_xpt_bus:
2167         xpt_bus_deregister(cam_sim_path(hba->sim));
2168
2169 free_cam_sim:
2170         cam_sim_free(hba->sim, /*free devq*/ TRUE);
2171
2172 srb_dmamap_unload:
2173         if (hba->uncached_ptr)
2174                 bus_dmamap_unload(hba->srb_dmat, hba->srb_dmamap);
2175
2176 srb_dmamem_free:
2177         if (hba->uncached_ptr)
2178                 bus_dmamem_free(hba->srb_dmat,
2179                         hba->uncached_ptr, hba->srb_dmamap);
2180
2181 destroy_srb_dmat:
2182         if (hba->srb_dmat)
2183                 bus_dma_tag_destroy(hba->srb_dmat);
2184
2185 destroy_io_dmat:
2186         if (hba->io_dmat)
2187                 bus_dma_tag_destroy(hba->io_dmat);
2188
2189 get_config_failed:
2190         hba->ops->internal_memfree(hba);
2191
2192 destroy_parent_tag:
2193         if (hba->parent_dmat)
2194                 bus_dma_tag_destroy(hba->parent_dmat);
2195
2196 release_pci_res:
2197         if (hba->ops->release_pci_res)
2198                 hba->ops->release_pci_res(hba);
2199
2200         return ENXIO;
2201 }
2202
2203 static int hptiop_detach(device_t dev)
2204 {
2205         struct hpt_iop_hba * hba = (struct hpt_iop_hba *)device_get_softc(dev);
2206         int i;
2207         int error = EBUSY;
2208
2209         hptiop_lock_adapter(hba);
2210         for (i = 0; i < hba->max_devices; i++)
2211                 if (hptiop_os_query_remove_device(hba, i)) {
2212                         device_printf(dev, "%d file system is busy. id=%d",
2213                                                 hba->pciunit, i);
2214                         goto out;
2215                 }
2216
2217         if ((error = hptiop_shutdown(dev)) != 0)
2218                 goto out;
2219         if (hptiop_send_sync_msg(hba,
2220                 IOPMU_INBOUND_MSG0_STOP_BACKGROUND_TASK, 60000))
2221                 goto out;
2222
2223         hptiop_release_resource(hba);
2224         error = 0;
2225 out:
2226         hptiop_unlock_adapter(hba);
2227         return error;
2228 }
2229
2230 static int hptiop_shutdown(device_t dev)
2231 {
2232         struct hpt_iop_hba * hba = (struct hpt_iop_hba *)device_get_softc(dev);
2233
2234         int error = 0;
2235
2236         if (hba->flag & HPT_IOCTL_FLAG_OPEN) {
2237                 device_printf(dev, "%d device is busy", hba->pciunit);
2238                 return EBUSY;
2239         }
2240
2241         hba->ops->disable_intr(hba);
2242
2243         if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_SHUTDOWN, 60000))
2244                 error = EBUSY;
2245
2246         return error;
2247 }
2248
2249 static void hptiop_pci_intr(void *arg)
2250 {
2251         struct hpt_iop_hba * hba = (struct hpt_iop_hba *)arg;
2252         hptiop_lock_adapter(hba);
2253         hba->ops->iop_intr(hba);
2254         hptiop_unlock_adapter(hba);
2255 }
2256
2257 static void hptiop_poll(struct cam_sim *sim)
2258 {
2259         hptiop_pci_intr(cam_sim_softc(sim));
2260 }
2261
2262 static void hptiop_async(void * callback_arg, u_int32_t code,
2263                                         struct cam_path * path, void * arg)
2264 {
2265 }
2266
2267 static void hptiop_enable_intr_itl(struct hpt_iop_hba *hba)
2268 {
2269         BUS_SPACE_WRT4_ITL(outbound_intmask,
2270                 ~(IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0));
2271 }
2272
2273 static void hptiop_enable_intr_mv(struct hpt_iop_hba *hba)
2274 {
2275         u_int32_t int_mask;
2276
2277         int_mask = BUS_SPACE_RD4_MV0(outbound_intmask);
2278                         
2279         int_mask |= MVIOP_MU_OUTBOUND_INT_POSTQUEUE
2280                         | MVIOP_MU_OUTBOUND_INT_MSG;
2281         BUS_SPACE_WRT4_MV0(outbound_intmask,int_mask);
2282 }
2283
2284 static void hptiop_enable_intr_mvfrey(struct hpt_iop_hba *hba)
2285 {
2286         BUS_SPACE_WRT4_MVFREY2(f0_doorbell_enable, CPU_TO_F0_DRBL_MSG_A_BIT);
2287         BUS_SPACE_RD4_MVFREY2(f0_doorbell_enable);
2288
2289         BUS_SPACE_WRT4_MVFREY2(isr_enable, 0x1);
2290         BUS_SPACE_RD4_MVFREY2(isr_enable);
2291
2292         BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0x1010);
2293         BUS_SPACE_RD4_MVFREY2(pcie_f0_int_enable);
2294 }
2295
2296 static void hptiop_disable_intr_itl(struct hpt_iop_hba *hba)
2297 {
2298         u_int32_t int_mask;
2299
2300         int_mask = BUS_SPACE_RD4_ITL(outbound_intmask);
2301
2302         int_mask |= IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0;
2303         BUS_SPACE_WRT4_ITL(outbound_intmask, int_mask);
2304         BUS_SPACE_RD4_ITL(outbound_intstatus);
2305 }
2306
2307 static void hptiop_disable_intr_mv(struct hpt_iop_hba *hba)
2308 {
2309         u_int32_t int_mask;
2310         int_mask = BUS_SPACE_RD4_MV0(outbound_intmask);
2311         
2312         int_mask &= ~(MVIOP_MU_OUTBOUND_INT_MSG
2313                         | MVIOP_MU_OUTBOUND_INT_POSTQUEUE);
2314         BUS_SPACE_WRT4_MV0(outbound_intmask,int_mask);
2315         BUS_SPACE_RD4_MV0(outbound_intmask);
2316 }
2317
2318 static void hptiop_disable_intr_mvfrey(struct hpt_iop_hba *hba)
2319 {
2320         BUS_SPACE_WRT4_MVFREY2(f0_doorbell_enable, 0);
2321         BUS_SPACE_RD4_MVFREY2(f0_doorbell_enable);
2322
2323         BUS_SPACE_WRT4_MVFREY2(isr_enable, 0);
2324         BUS_SPACE_RD4_MVFREY2(isr_enable);
2325
2326         BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0);
2327         BUS_SPACE_RD4_MVFREY2(pcie_f0_int_enable);
2328 }
2329
2330 static void hptiop_reset_adapter(void *argv)
2331 {
2332         struct hpt_iop_hba * hba = (struct hpt_iop_hba *)argv;
2333         if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000))
2334                 return;
2335         hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000);
2336 }
2337
2338 static void *hptiop_get_srb(struct hpt_iop_hba * hba)
2339 {
2340         struct hpt_iop_srb * srb;
2341
2342         if (hba->srb_list) {
2343                 srb = hba->srb_list;
2344                 hba->srb_list = srb->next;
2345                 return srb;
2346         }
2347
2348         return NULL;
2349 }
2350
2351 static void hptiop_free_srb(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb)
2352 {
2353         srb->next = hba->srb_list;
2354         hba->srb_list = srb;
2355 }
2356
2357 static void hptiop_action(struct cam_sim *sim, union ccb *ccb)
2358 {
2359         struct hpt_iop_hba * hba = (struct hpt_iop_hba *)cam_sim_softc(sim);
2360         struct hpt_iop_srb * srb;
2361
2362         switch (ccb->ccb_h.func_code) {
2363
2364         case XPT_SCSI_IO:
2365                 hptiop_lock_adapter(hba);
2366                 if (ccb->ccb_h.target_lun != 0 ||
2367                         ccb->ccb_h.target_id >= hba->max_devices ||
2368                         (ccb->ccb_h.flags & CAM_CDB_PHYS))
2369                 {
2370                         ccb->ccb_h.status = CAM_TID_INVALID;
2371                         xpt_done(ccb);
2372                         goto scsi_done;
2373                 }
2374
2375                 if ((srb = hptiop_get_srb(hba)) == NULL) {
2376                         device_printf(hba->pcidev, "srb allocated failed");
2377                         ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2378                         xpt_done(ccb);
2379                         goto scsi_done;
2380                 }
2381
2382                 srb->ccb = ccb;
2383
2384                 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_NONE)
2385                         hptiop_post_scsi_command(srb, NULL, 0, 0);
2386                 else if ((ccb->ccb_h.flags & CAM_SCATTER_VALID) == 0) {
2387                         if ((ccb->ccb_h.flags & CAM_DATA_PHYS) == 0) {
2388                                 int error;
2389
2390                                 error = bus_dmamap_load(hba->io_dmat,
2391                                                 srb->dma_map,
2392                                                 ccb->csio.data_ptr,
2393                                                 ccb->csio.dxfer_len,
2394                                                 hptiop_post_scsi_command,
2395                                                 srb, 0);
2396
2397                                 if (error && error != EINPROGRESS) {
2398                                         device_printf(hba->pcidev,
2399                                                 "%d bus_dmamap_load error %d",
2400                                                 hba->pciunit, error);
2401                                         xpt_freeze_simq(hba->sim, 1);
2402                                         ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2403 invalid:
2404                                         hptiop_free_srb(hba, srb);
2405                                         xpt_done(ccb);
2406                                         goto scsi_done;
2407                                 }
2408                         }
2409                         else {
2410                                 device_printf(hba->pcidev,
2411                                         "CAM_DATA_PHYS not supported");
2412                                 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2413                                 goto invalid;
2414                         }
2415                 }
2416                 else {
2417                         struct bus_dma_segment *segs;
2418
2419                         if ((ccb->ccb_h.flags & CAM_SG_LIST_PHYS) == 0 ||
2420                                 (ccb->ccb_h.flags & CAM_DATA_PHYS) != 0) {
2421                                 device_printf(hba->pcidev, "SCSI cmd failed");
2422                                 ccb->ccb_h.status=CAM_PROVIDE_FAIL;
2423                                 goto invalid;
2424                         }
2425
2426                         segs = (struct bus_dma_segment *)ccb->csio.data_ptr;
2427                         hptiop_post_scsi_command(srb, segs,
2428                                                 ccb->csio.sglist_cnt, 0);
2429                 }
2430
2431 scsi_done:
2432                 hptiop_unlock_adapter(hba);
2433                 return;
2434
2435         case XPT_RESET_BUS:
2436                 device_printf(hba->pcidev, "reset adapter");
2437                 hptiop_lock_adapter(hba);
2438                 hba->msg_done = 0;
2439                 hptiop_reset_adapter(hba);
2440                 hptiop_unlock_adapter(hba);
2441                 break;
2442
2443         case XPT_GET_TRAN_SETTINGS:
2444         case XPT_SET_TRAN_SETTINGS:
2445                 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
2446                 break;
2447
2448         case XPT_CALC_GEOMETRY:
2449 #if __FreeBSD_version >= 500000
2450                 cam_calc_geometry(&ccb->ccg, 1);
2451 #else
2452                 ccb->ccg.heads = 255;
2453                 ccb->ccg.secs_per_track = 63;
2454                 ccb->ccg.cylinders = ccb->ccg.volume_size /
2455                                 (ccb->ccg.heads * ccb->ccg.secs_per_track);
2456                 ccb->ccb_h.status = CAM_REQ_CMP;
2457 #endif
2458                 break;
2459
2460         case XPT_PATH_INQ:
2461         {
2462                 struct ccb_pathinq *cpi = &ccb->cpi;
2463
2464                 cpi->version_num = 1;
2465                 cpi->hba_inquiry = PI_SDTR_ABLE;
2466                 cpi->target_sprt = 0;
2467                 cpi->hba_misc = PIM_NOBUSRESET;
2468                 cpi->hba_eng_cnt = 0;
2469                 cpi->max_target = hba->max_devices;
2470                 cpi->max_lun = 0;
2471                 cpi->unit_number = cam_sim_unit(sim);
2472                 cpi->bus_id = cam_sim_bus(sim);
2473                 cpi->initiator_id = hba->max_devices;
2474                 cpi->base_transfer_speed = 3300;
2475
2476                 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
2477                 strncpy(cpi->hba_vid, "HPT   ", HBA_IDLEN);
2478                 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
2479                 cpi->transport = XPORT_SPI;
2480                 cpi->transport_version = 2;
2481                 cpi->protocol = PROTO_SCSI;
2482                 cpi->protocol_version = SCSI_REV_2;
2483                 cpi->ccb_h.status = CAM_REQ_CMP;
2484                 break;
2485         }
2486
2487         default:
2488                 ccb->ccb_h.status = CAM_REQ_INVALID;
2489                 break;
2490         }
2491
2492         xpt_done(ccb);
2493         return;
2494 }
2495
2496 static void hptiop_post_req_itl(struct hpt_iop_hba *hba,
2497                                 struct hpt_iop_srb *srb,
2498                                 bus_dma_segment_t *segs, int nsegs)
2499 {
2500         int idx;
2501         union ccb *ccb = srb->ccb;
2502         u_int8_t *cdb;
2503
2504         if (ccb->ccb_h.flags & CAM_CDB_POINTER)
2505                 cdb = ccb->csio.cdb_io.cdb_ptr;
2506         else
2507                 cdb = ccb->csio.cdb_io.cdb_bytes;
2508
2509         KdPrint(("ccb=%p %x-%x-%x\n",
2510                 ccb, *(u_int32_t *)cdb, *((u_int32_t *)cdb+1), *((u_int32_t *)cdb+2)));
2511
2512         if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS) {
2513                 u_int32_t iop_req32;
2514                 struct hpt_iop_request_scsi_command req;
2515
2516                 iop_req32 = BUS_SPACE_RD4_ITL(inbound_queue);
2517
2518                 if (iop_req32 == IOPMU_QUEUE_EMPTY) {
2519                         device_printf(hba->pcidev, "invaild req offset\n");
2520                         ccb->ccb_h.status = CAM_BUSY;
2521                         bus_dmamap_unload(hba->io_dmat, srb->dma_map);
2522                         hptiop_free_srb(hba, srb);
2523                         xpt_done(ccb);
2524                         return;
2525                 }
2526
2527                 if (ccb->csio.dxfer_len && nsegs > 0) {
2528                         struct hpt_iopsg *psg = req.sg_list;
2529                         for (idx = 0; idx < nsegs; idx++, psg++) {
2530                                 psg->pci_address = (u_int64_t)segs[idx].ds_addr;
2531                                 psg->size = segs[idx].ds_len;
2532                                 psg->eot = 0;
2533                         }
2534                         psg[-1].eot = 1;
2535                 }
2536
2537                 bcopy(cdb, req.cdb, ccb->csio.cdb_len);
2538
2539                 req.header.size =
2540                                 offsetof(struct hpt_iop_request_scsi_command, sg_list)
2541                                 + nsegs*sizeof(struct hpt_iopsg);
2542                 req.header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
2543                 req.header.flags = 0;
2544                 req.header.result = IOP_RESULT_PENDING;
2545                 req.header.context = (u_int64_t)(unsigned long)srb;
2546                 req.dataxfer_length = ccb->csio.dxfer_len;
2547                 req.channel =  0;
2548                 req.target =  ccb->ccb_h.target_id;
2549                 req.lun =  ccb->ccb_h.target_lun;
2550
2551                 bus_space_write_region_1(hba->bar0t, hba->bar0h, iop_req32,
2552                         (u_int8_t *)&req, req.header.size);
2553
2554                 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2555                         bus_dmamap_sync(hba->io_dmat,
2556                                 srb->dma_map, BUS_DMASYNC_PREREAD);
2557                 }
2558                 else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
2559                         bus_dmamap_sync(hba->io_dmat,
2560                                 srb->dma_map, BUS_DMASYNC_PREWRITE);
2561
2562                 BUS_SPACE_WRT4_ITL(inbound_queue,iop_req32);
2563         } else {
2564                 struct hpt_iop_request_scsi_command *req;
2565
2566                 req = (struct hpt_iop_request_scsi_command *)srb;
2567                 if (ccb->csio.dxfer_len && nsegs > 0) {
2568                         struct hpt_iopsg *psg = req->sg_list;
2569                         for (idx = 0; idx < nsegs; idx++, psg++) {
2570                                 psg->pci_address = 
2571                                         (u_int64_t)segs[idx].ds_addr;
2572                                 psg->size = segs[idx].ds_len;
2573                                 psg->eot = 0;
2574                         }
2575                         psg[-1].eot = 1;
2576                 }
2577
2578                 bcopy(cdb, req->cdb, ccb->csio.cdb_len);
2579
2580                 req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
2581                 req->header.result = IOP_RESULT_PENDING;
2582                 req->dataxfer_length = ccb->csio.dxfer_len;
2583                 req->channel =  0;
2584                 req->target =  ccb->ccb_h.target_id;
2585                 req->lun =  ccb->ccb_h.target_lun;
2586                 req->header.size =
2587                         offsetof(struct hpt_iop_request_scsi_command, sg_list)
2588                         + nsegs*sizeof(struct hpt_iopsg);
2589                 req->header.context = (u_int64_t)srb->index |
2590                                                 IOPMU_QUEUE_ADDR_HOST_BIT;
2591                 req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
2592
2593                 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2594                         bus_dmamap_sync(hba->io_dmat,
2595                                 srb->dma_map, BUS_DMASYNC_PREREAD);
2596                 }else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
2597                         bus_dmamap_sync(hba->io_dmat,
2598                                 srb->dma_map, BUS_DMASYNC_PREWRITE);
2599                 }
2600
2601                 if (hba->firmware_version > 0x01020000
2602                         || hba->interface_version > 0x01020000) {
2603                         u_int32_t size_bits;
2604
2605                         if (req->header.size < 256)
2606                                 size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT;
2607                         else if (req->header.size < 512)
2608                                 size_bits = IOPMU_QUEUE_ADDR_HOST_BIT;
2609                         else
2610                                 size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT
2611                                                 | IOPMU_QUEUE_ADDR_HOST_BIT;
2612
2613                         BUS_SPACE_WRT4_ITL(inbound_queue,
2614                                 (u_int32_t)srb->phy_addr | size_bits);
2615                 } else
2616                         BUS_SPACE_WRT4_ITL(inbound_queue, (u_int32_t)srb->phy_addr
2617                                 |IOPMU_QUEUE_ADDR_HOST_BIT);
2618         }
2619 }
2620
2621 static void hptiop_post_req_mv(struct hpt_iop_hba *hba,
2622                                 struct hpt_iop_srb *srb,
2623                                 bus_dma_segment_t *segs, int nsegs)
2624 {
2625         int idx, size;
2626         union ccb *ccb = srb->ccb;
2627         u_int8_t *cdb;
2628         struct hpt_iop_request_scsi_command *req;
2629         u_int64_t req_phy;
2630
2631         req = (struct hpt_iop_request_scsi_command *)srb;
2632         req_phy = srb->phy_addr;
2633
2634         if (ccb->csio.dxfer_len && nsegs > 0) {
2635                 struct hpt_iopsg *psg = req->sg_list;
2636                 for (idx = 0; idx < nsegs; idx++, psg++) {
2637                         psg->pci_address = (u_int64_t)segs[idx].ds_addr;
2638                         psg->size = segs[idx].ds_len;
2639                         psg->eot = 0;
2640                 }
2641                 psg[-1].eot = 1;
2642         }
2643         if (ccb->ccb_h.flags & CAM_CDB_POINTER)
2644                 cdb = ccb->csio.cdb_io.cdb_ptr;
2645         else
2646                 cdb = ccb->csio.cdb_io.cdb_bytes;
2647
2648         bcopy(cdb, req->cdb, ccb->csio.cdb_len);
2649         req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
2650         req->header.result = IOP_RESULT_PENDING;
2651         req->dataxfer_length = ccb->csio.dxfer_len;
2652         req->channel = 0;
2653         req->target =  ccb->ccb_h.target_id;
2654         req->lun =  ccb->ccb_h.target_lun;
2655         req->header.size = sizeof(struct hpt_iop_request_scsi_command)
2656                                 - sizeof(struct hpt_iopsg)
2657                                 + nsegs * sizeof(struct hpt_iopsg);
2658         if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2659                 bus_dmamap_sync(hba->io_dmat,
2660                         srb->dma_map, BUS_DMASYNC_PREREAD);
2661         }
2662         else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
2663                 bus_dmamap_sync(hba->io_dmat,
2664                         srb->dma_map, BUS_DMASYNC_PREWRITE);
2665         req->header.context = (u_int64_t)srb->index
2666                                         << MVIOP_REQUEST_NUMBER_START_BIT
2667                                         | MVIOP_CMD_TYPE_SCSI;
2668         req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
2669         size = req->header.size >> 8;
2670         hptiop_mv_inbound_write(req_phy
2671                         | MVIOP_MU_QUEUE_ADDR_HOST_BIT
2672                         | (size > 3 ? 3 : size), hba);
2673 }
2674
2675 static void hptiop_post_req_mvfrey(struct hpt_iop_hba *hba,
2676                                 struct hpt_iop_srb *srb,
2677                                 bus_dma_segment_t *segs, int nsegs)
2678 {
2679         int idx, index;
2680         union ccb *ccb = srb->ccb;
2681         u_int8_t *cdb;
2682         struct hpt_iop_request_scsi_command *req;
2683         u_int64_t req_phy;
2684
2685         req = (struct hpt_iop_request_scsi_command *)srb;
2686         req_phy = srb->phy_addr;
2687
2688         if (ccb->csio.dxfer_len && nsegs > 0) {
2689                 struct hpt_iopsg *psg = req->sg_list;
2690                 for (idx = 0; idx < nsegs; idx++, psg++) {
2691                         psg->pci_address = (u_int64_t)segs[idx].ds_addr | 1;
2692                         psg->size = segs[idx].ds_len;
2693                         psg->eot = 0;
2694                 }
2695                 psg[-1].eot = 1;
2696         }
2697         if (ccb->ccb_h.flags & CAM_CDB_POINTER)
2698                 cdb = ccb->csio.cdb_io.cdb_ptr;
2699         else
2700                 cdb = ccb->csio.cdb_io.cdb_bytes;
2701
2702         bcopy(cdb, req->cdb, ccb->csio.cdb_len);
2703         req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
2704         req->header.result = IOP_RESULT_PENDING;
2705         req->dataxfer_length = ccb->csio.dxfer_len;
2706         req->channel = 0;
2707         req->target = ccb->ccb_h.target_id;
2708         req->lun = ccb->ccb_h.target_lun;
2709         req->header.size = sizeof(struct hpt_iop_request_scsi_command)
2710                                 - sizeof(struct hpt_iopsg)
2711                                 + nsegs * sizeof(struct hpt_iopsg);
2712         if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2713                 bus_dmamap_sync(hba->io_dmat,
2714                         srb->dma_map, BUS_DMASYNC_PREREAD);
2715         }
2716         else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
2717                 bus_dmamap_sync(hba->io_dmat,
2718                         srb->dma_map, BUS_DMASYNC_PREWRITE);
2719
2720         req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT
2721                                                 | IOP_REQUEST_FLAG_ADDR_BITS
2722                                                 | ((req_phy >> 16) & 0xffff0000);
2723         req->header.context = ((req_phy & 0xffffffff) << 32 )
2724                                                 | srb->index << 4
2725                                                 | IOPMU_QUEUE_ADDR_HOST_BIT | req->header.type;
2726
2727         hba->u.mvfrey.inlist_wptr++;
2728         index = hba->u.mvfrey.inlist_wptr & 0x3fff;
2729
2730         if (index == hba->u.mvfrey.list_count) {
2731                 index = 0;
2732                 hba->u.mvfrey.inlist_wptr &= ~0x3fff;
2733                 hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE;
2734         }
2735
2736         hba->u.mvfrey.inlist[index].addr = req_phy;
2737         hba->u.mvfrey.inlist[index].intrfc_len = (req->header.size + 3) / 4;
2738
2739         BUS_SPACE_WRT4_MVFREY2(inbound_write_ptr, hba->u.mvfrey.inlist_wptr);
2740         BUS_SPACE_RD4_MVFREY2(inbound_write_ptr);
2741
2742         if (req->header.type == IOP_REQUEST_TYPE_SCSI_COMMAND) {
2743                 ccb->ccb_h.timeout_ch = timeout(hptiop_reset_adapter, hba, 20*hz);
2744         }
2745 }
2746
2747 static void hptiop_post_scsi_command(void *arg, bus_dma_segment_t *segs,
2748                                         int nsegs, int error)
2749 {
2750         struct hpt_iop_srb *srb = (struct hpt_iop_srb *)arg;
2751         union ccb *ccb = srb->ccb;
2752         struct hpt_iop_hba *hba = srb->hba;
2753
2754         if (error || nsegs > hba->max_sg_count) {
2755                 KdPrint(("hptiop: func_code=%x tid=%x lun=%x nsegs=%d\n",
2756                         ccb->ccb_h.func_code,
2757                         ccb->ccb_h.target_id,
2758                         ccb->ccb_h.target_lun, nsegs));
2759                 ccb->ccb_h.status = CAM_BUSY;
2760                 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
2761                 hptiop_free_srb(hba, srb);
2762                 xpt_done(ccb);
2763                 return;
2764         }
2765
2766         hba->ops->post_req(hba, srb, segs, nsegs);
2767 }
2768
2769 static void hptiop_mv_map_ctlcfg(void *arg, bus_dma_segment_t *segs,
2770                                 int nsegs, int error)
2771 {
2772         struct hpt_iop_hba *hba = (struct hpt_iop_hba *)arg;
2773         hba->ctlcfgcmd_phy = ((u_int64_t)segs->ds_addr + 0x1F) 
2774                                 & ~(u_int64_t)0x1F;
2775         hba->ctlcfg_ptr = (u_int8_t *)(((unsigned long)hba->ctlcfg_ptr + 0x1F)
2776                                 & ~0x1F);
2777 }
2778
2779 static void hptiop_mvfrey_map_ctlcfg(void *arg, bus_dma_segment_t *segs,
2780                                 int nsegs, int error)
2781 {
2782         struct hpt_iop_hba *hba = (struct hpt_iop_hba *)arg;
2783         char *p;
2784         u_int64_t phy;
2785         u_int32_t list_count = hba->u.mvfrey.list_count;
2786
2787         phy = ((u_int64_t)segs->ds_addr + 0x1F) 
2788                                 & ~(u_int64_t)0x1F;
2789         p = (u_int8_t *)(((unsigned long)hba->ctlcfg_ptr + 0x1F)
2790                                 & ~0x1F);
2791         
2792         hba->ctlcfgcmd_phy = phy;
2793         hba->ctlcfg_ptr = p;
2794
2795         p += 0x800;
2796         phy += 0x800;
2797
2798         hba->u.mvfrey.inlist = (struct mvfrey_inlist_entry *)p;
2799         hba->u.mvfrey.inlist_phy = phy;
2800
2801         p += list_count * sizeof(struct mvfrey_inlist_entry);
2802         phy += list_count * sizeof(struct mvfrey_inlist_entry);
2803
2804         hba->u.mvfrey.outlist = (struct mvfrey_outlist_entry *)p;
2805         hba->u.mvfrey.outlist_phy = phy;
2806
2807         p += list_count * sizeof(struct mvfrey_outlist_entry);
2808         phy += list_count * sizeof(struct mvfrey_outlist_entry);
2809
2810         hba->u.mvfrey.outlist_cptr = (u_int32_t *)p;
2811         hba->u.mvfrey.outlist_cptr_phy = phy;
2812 }
2813
2814 static void hptiop_map_srb(void *arg, bus_dma_segment_t *segs,
2815                                 int nsegs, int error)
2816 {
2817         struct hpt_iop_hba * hba = (struct hpt_iop_hba *)arg;
2818         bus_addr_t phy_addr = (segs->ds_addr + 0x1F) & ~(bus_addr_t)0x1F;
2819         struct hpt_iop_srb *srb, *tmp_srb;
2820         int i;
2821
2822         if (error || nsegs == 0) {
2823                 device_printf(hba->pcidev, "hptiop_map_srb error");
2824                 return;
2825         }
2826
2827         /* map srb */
2828         srb = (struct hpt_iop_srb *)
2829                 (((unsigned long)hba->uncached_ptr + 0x1F)
2830                 & ~(unsigned long)0x1F);
2831
2832         for (i = 0; i < HPT_SRB_MAX_QUEUE_SIZE; i++) {
2833                 tmp_srb = (struct hpt_iop_srb *)
2834                                         ((char *)srb + i * HPT_SRB_MAX_SIZE);
2835                 if (((unsigned long)tmp_srb & 0x1F) == 0) {
2836                         if (bus_dmamap_create(hba->io_dmat,
2837                                                 0, &tmp_srb->dma_map)) {
2838                                 device_printf(hba->pcidev, "dmamap create failed");
2839                                 return;
2840                         }
2841
2842                         bzero(tmp_srb, sizeof(struct hpt_iop_srb));
2843                         tmp_srb->hba = hba;
2844                         tmp_srb->index = i;
2845                         if (hba->ctlcfg_ptr == 0) {/*itl iop*/
2846                                 tmp_srb->phy_addr = (u_int64_t)(u_int32_t)
2847                                                         (phy_addr >> 5);
2848                                 if (phy_addr & IOPMU_MAX_MEM_SUPPORT_MASK_32G)
2849                                         tmp_srb->srb_flag =
2850                                                 HPT_SRB_FLAG_HIGH_MEM_ACESS;
2851                         } else {
2852                                 tmp_srb->phy_addr = phy_addr;
2853                         }
2854
2855                         hptiop_free_srb(hba, tmp_srb);
2856                         hba->srb[i] = tmp_srb;
2857                         phy_addr += HPT_SRB_MAX_SIZE;
2858                 }
2859                 else {
2860                         device_printf(hba->pcidev, "invalid alignment");
2861                         return;
2862                 }
2863         }
2864 }
2865
2866 static void hptiop_os_message_callback(struct hpt_iop_hba * hba, u_int32_t msg)
2867 {
2868         hba->msg_done = 1;
2869 }
2870
2871 static  int hptiop_os_query_remove_device(struct hpt_iop_hba * hba,
2872                                                 int target_id)
2873 {
2874         struct cam_periph       *periph = NULL;
2875         struct cam_path         *path;
2876         int                     status, retval = 0;
2877
2878         status = xpt_create_path(&path, NULL, hba->sim->path_id, target_id, 0);
2879
2880         if (status == CAM_REQ_CMP) {
2881                 if ((periph = cam_periph_find(path, "da")) != NULL) {
2882                         if (periph->refcount >= 1) {
2883                                 device_printf(hba->pcidev, "%d ,"
2884                                         "target_id=0x%x,"
2885                                         "refcount=%d",
2886                                     hba->pciunit, target_id, periph->refcount);
2887                                 retval = -1;
2888                         }
2889                 }
2890                 xpt_free_path(path);
2891         }
2892         return retval;
2893 }
2894
2895 static void hptiop_release_resource(struct hpt_iop_hba *hba)
2896 {
2897         int i;
2898         if (hba->path) {
2899                 struct ccb_setasync ccb;
2900
2901                 xpt_setup_ccb(&ccb.ccb_h, hba->path, /*priority*/5);
2902                 ccb.ccb_h.func_code = XPT_SASYNC_CB;
2903                 ccb.event_enable = 0;
2904                 ccb.callback = hptiop_async;
2905                 ccb.callback_arg = hba->sim;
2906                 xpt_action((union ccb *)&ccb);
2907                 xpt_free_path(hba->path);
2908         }
2909
2910         if (hba->sim) {
2911                 xpt_bus_deregister(cam_sim_path(hba->sim));
2912                 cam_sim_free(hba->sim, TRUE);
2913         }
2914
2915         if (hba->ctlcfg_dmat) {
2916                 bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap);
2917                 bus_dmamem_free(hba->ctlcfg_dmat,
2918                                         hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
2919                 bus_dma_tag_destroy(hba->ctlcfg_dmat);
2920         }
2921
2922         for (i = 0; i < HPT_SRB_MAX_QUEUE_SIZE; i++) {
2923                 struct hpt_iop_srb *srb = hba->srb[i];
2924                 if (srb->dma_map)
2925                         bus_dmamap_destroy(hba->io_dmat, srb->dma_map);
2926         }
2927
2928         if (hba->srb_dmat) {
2929                 bus_dmamap_unload(hba->srb_dmat, hba->srb_dmamap);
2930                 bus_dmamap_destroy(hba->srb_dmat, hba->srb_dmamap);
2931                 bus_dma_tag_destroy(hba->srb_dmat);
2932         }
2933
2934         if (hba->io_dmat)
2935                 bus_dma_tag_destroy(hba->io_dmat);
2936
2937         if (hba->parent_dmat)
2938                 bus_dma_tag_destroy(hba->parent_dmat);
2939
2940         if (hba->irq_handle)
2941                 bus_teardown_intr(hba->pcidev, hba->irq_res, hba->irq_handle);
2942
2943         if (hba->irq_res)
2944                 bus_release_resource(hba->pcidev, SYS_RES_IRQ,
2945                                         0, hba->irq_res);
2946
2947         if (hba->bar0_res)
2948                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
2949                                         hba->bar0_rid, hba->bar0_res);
2950         if (hba->bar2_res)
2951                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
2952                                         hba->bar2_rid, hba->bar2_res);
2953         if (hba->ioctl_dev)
2954                 destroy_dev(hba->ioctl_dev);
2955 }