]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/hptiop/hptiop.c
as3722_gpio_pin_setflags: Use computed mode instead of hardcoded PUSHPULL.
[FreeBSD/FreeBSD.git] / sys / dev / hptiop / hptiop.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * HighPoint RR3xxx/4xxx RAID Driver for FreeBSD
5  * Copyright (C) 2007-2012 HighPoint Technologies, Inc. All Rights Reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31
32 #include <sys/param.h>
33 #include <sys/types.h>
34 #include <sys/cons.h>
35 #include <sys/time.h>
36 #include <sys/systm.h>
37
38 #include <sys/stat.h>
39 #include <sys/malloc.h>
40 #include <sys/conf.h>
41 #include <sys/libkern.h>
42 #include <sys/kernel.h>
43
44 #include <sys/kthread.h>
45 #include <sys/mutex.h>
46 #include <sys/module.h>
47
48 #include <sys/eventhandler.h>
49 #include <sys/bus.h>
50 #include <sys/taskqueue.h>
51 #include <sys/ioccom.h>
52
53 #include <machine/resource.h>
54 #include <machine/bus.h>
55 #include <machine/stdarg.h>
56 #include <sys/rman.h>
57
58 #include <vm/vm.h>
59 #include <vm/pmap.h>
60
61 #include <dev/pci/pcireg.h>
62 #include <dev/pci/pcivar.h>
63
64
65 #include <cam/cam.h>
66 #include <cam/cam_ccb.h>
67 #include <cam/cam_sim.h>
68 #include <cam/cam_xpt_sim.h>
69 #include <cam/cam_debug.h>
70 #include <cam/cam_periph.h>
71 #include <cam/scsi/scsi_all.h>
72 #include <cam/scsi/scsi_message.h>
73
74
75 #include <dev/hptiop/hptiop.h>
76
77 static const char driver_name[] = "hptiop";
78 static const char driver_version[] = "v1.9";
79
80 static devclass_t hptiop_devclass;
81
82 static int hptiop_send_sync_msg(struct hpt_iop_hba *hba,
83                                 u_int32_t msg, u_int32_t millisec);
84 static void hptiop_request_callback_itl(struct hpt_iop_hba *hba,
85                                                         u_int32_t req);
86 static void hptiop_request_callback_mv(struct hpt_iop_hba *hba, u_int64_t req);
87 static void hptiop_request_callback_mvfrey(struct hpt_iop_hba *hba,
88                                                         u_int32_t req);
89 static void hptiop_os_message_callback(struct hpt_iop_hba *hba, u_int32_t msg);
90 static int  hptiop_do_ioctl_itl(struct hpt_iop_hba *hba,
91                                 struct hpt_iop_ioctl_param *pParams);
92 static int  hptiop_do_ioctl_mv(struct hpt_iop_hba *hba,
93                                 struct hpt_iop_ioctl_param *pParams);
94 static int  hptiop_do_ioctl_mvfrey(struct hpt_iop_hba *hba,
95                                 struct hpt_iop_ioctl_param *pParams);
96 static int  hptiop_rescan_bus(struct hpt_iop_hba *hba);
97 static int hptiop_alloc_pci_res_itl(struct hpt_iop_hba *hba);
98 static int hptiop_alloc_pci_res_mv(struct hpt_iop_hba *hba);
99 static int hptiop_alloc_pci_res_mvfrey(struct hpt_iop_hba *hba);
100 static int hptiop_get_config_itl(struct hpt_iop_hba *hba,
101                                 struct hpt_iop_request_get_config *config);
102 static int hptiop_get_config_mv(struct hpt_iop_hba *hba,
103                                 struct hpt_iop_request_get_config *config);
104 static int hptiop_get_config_mvfrey(struct hpt_iop_hba *hba,
105                                 struct hpt_iop_request_get_config *config);
106 static int hptiop_set_config_itl(struct hpt_iop_hba *hba,
107                                 struct hpt_iop_request_set_config *config);
108 static int hptiop_set_config_mv(struct hpt_iop_hba *hba,
109                                 struct hpt_iop_request_set_config *config);
110 static int hptiop_set_config_mvfrey(struct hpt_iop_hba *hba,
111                                 struct hpt_iop_request_set_config *config);
112 static int hptiop_internal_memalloc_mv(struct hpt_iop_hba *hba);
113 static int hptiop_internal_memalloc_mvfrey(struct hpt_iop_hba *hba);
114 static int hptiop_internal_memfree_itl(struct hpt_iop_hba *hba);
115 static int hptiop_internal_memfree_mv(struct hpt_iop_hba *hba);
116 static int hptiop_internal_memfree_mvfrey(struct hpt_iop_hba *hba);
117 static int  hptiop_post_ioctl_command_itl(struct hpt_iop_hba *hba,
118                         u_int32_t req32, struct hpt_iop_ioctl_param *pParams);
119 static int  hptiop_post_ioctl_command_mv(struct hpt_iop_hba *hba,
120                                 struct hpt_iop_request_ioctl_command *req,
121                                 struct hpt_iop_ioctl_param *pParams);
122 static int  hptiop_post_ioctl_command_mvfrey(struct hpt_iop_hba *hba,
123                                 struct hpt_iop_request_ioctl_command *req,
124                                 struct hpt_iop_ioctl_param *pParams);
125 static void hptiop_post_req_itl(struct hpt_iop_hba *hba,
126                                 struct hpt_iop_srb *srb,
127                                 bus_dma_segment_t *segs, int nsegs);
128 static void hptiop_post_req_mv(struct hpt_iop_hba *hba,
129                                 struct hpt_iop_srb *srb,
130                                 bus_dma_segment_t *segs, int nsegs);
131 static void hptiop_post_req_mvfrey(struct hpt_iop_hba *hba,
132                                 struct hpt_iop_srb *srb,
133                                 bus_dma_segment_t *segs, int nsegs);
134 static void hptiop_post_msg_itl(struct hpt_iop_hba *hba, u_int32_t msg);
135 static void hptiop_post_msg_mv(struct hpt_iop_hba *hba, u_int32_t msg);
136 static void hptiop_post_msg_mvfrey(struct hpt_iop_hba *hba, u_int32_t msg);
137 static void hptiop_enable_intr_itl(struct hpt_iop_hba *hba);
138 static void hptiop_enable_intr_mv(struct hpt_iop_hba *hba);
139 static void hptiop_enable_intr_mvfrey(struct hpt_iop_hba *hba);
140 static void hptiop_disable_intr_itl(struct hpt_iop_hba *hba);
141 static void hptiop_disable_intr_mv(struct hpt_iop_hba *hba);
142 static void hptiop_disable_intr_mvfrey(struct hpt_iop_hba *hba);
143 static void hptiop_free_srb(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb);
144 static int  hptiop_os_query_remove_device(struct hpt_iop_hba *hba, int tid);
145 static int  hptiop_probe(device_t dev);
146 static int  hptiop_attach(device_t dev);
147 static int  hptiop_detach(device_t dev);
148 static int  hptiop_shutdown(device_t dev);
149 static void hptiop_action(struct cam_sim *sim, union ccb *ccb);
150 static void hptiop_poll(struct cam_sim *sim);
151 static void hptiop_async(void *callback_arg, u_int32_t code,
152                                         struct cam_path *path, void *arg);
153 static void hptiop_pci_intr(void *arg);
154 static void hptiop_release_resource(struct hpt_iop_hba *hba);
155 static void hptiop_reset_adapter(void *argv);
156 static d_open_t hptiop_open;
157 static d_close_t hptiop_close;
158 static d_ioctl_t hptiop_ioctl;
159
160 static struct cdevsw hptiop_cdevsw = {
161         .d_open = hptiop_open,
162         .d_close = hptiop_close,
163         .d_ioctl = hptiop_ioctl,
164         .d_name = driver_name,
165         .d_version = D_VERSION,
166 };
167
168 #define hba_from_dev(dev) \
169         ((struct hpt_iop_hba *)devclass_get_softc(hptiop_devclass, dev2unit(dev)))
170
171 #define BUS_SPACE_WRT4_ITL(offset, value) bus_space_write_4(hba->bar0t,\
172                 hba->bar0h, offsetof(struct hpt_iopmu_itl, offset), (value))
173 #define BUS_SPACE_RD4_ITL(offset) bus_space_read_4(hba->bar0t,\
174                 hba->bar0h, offsetof(struct hpt_iopmu_itl, offset))
175
176 #define BUS_SPACE_WRT4_MV0(offset, value) bus_space_write_4(hba->bar0t,\
177                 hba->bar0h, offsetof(struct hpt_iopmv_regs, offset), value)
178 #define BUS_SPACE_RD4_MV0(offset) bus_space_read_4(hba->bar0t,\
179                 hba->bar0h, offsetof(struct hpt_iopmv_regs, offset))
180 #define BUS_SPACE_WRT4_MV2(offset, value) bus_space_write_4(hba->bar2t,\
181                 hba->bar2h, offsetof(struct hpt_iopmu_mv, offset), value)
182 #define BUS_SPACE_RD4_MV2(offset) bus_space_read_4(hba->bar2t,\
183                 hba->bar2h, offsetof(struct hpt_iopmu_mv, offset))
184
185 #define BUS_SPACE_WRT4_MVFREY2(offset, value) bus_space_write_4(hba->bar2t,\
186                 hba->bar2h, offsetof(struct hpt_iopmu_mvfrey, offset), value)
187 #define BUS_SPACE_RD4_MVFREY2(offset) bus_space_read_4(hba->bar2t,\
188                 hba->bar2h, offsetof(struct hpt_iopmu_mvfrey, offset))
189
190 static int hptiop_open(ioctl_dev_t dev, int flags,
191                                         int devtype, ioctl_thread_t proc)
192 {
193         struct hpt_iop_hba *hba = hba_from_dev(dev);
194
195         if (hba==NULL)
196                 return ENXIO;
197         if (hba->flag & HPT_IOCTL_FLAG_OPEN)
198                 return EBUSY;
199         hba->flag |= HPT_IOCTL_FLAG_OPEN;
200         return 0;
201 }
202
203 static int hptiop_close(ioctl_dev_t dev, int flags,
204                                         int devtype, ioctl_thread_t proc)
205 {
206         struct hpt_iop_hba *hba = hba_from_dev(dev);
207         hba->flag &= ~(u_int32_t)HPT_IOCTL_FLAG_OPEN;
208         return 0;
209 }
210
211 static int hptiop_ioctl(ioctl_dev_t dev, u_long cmd, caddr_t data,
212                                         int flags, ioctl_thread_t proc)
213 {
214         int ret = EFAULT;
215         struct hpt_iop_hba *hba = hba_from_dev(dev);
216
217         switch (cmd) {
218         case HPT_DO_IOCONTROL:
219                 ret = hba->ops->do_ioctl(hba,
220                                 (struct hpt_iop_ioctl_param *)data);
221                 break;
222         case HPT_SCAN_BUS:
223                 ret = hptiop_rescan_bus(hba);
224                 break;
225         }
226         return ret;
227 }
228
229 static u_int64_t hptiop_mv_outbound_read(struct hpt_iop_hba *hba)
230 {
231         u_int64_t p;
232         u_int32_t outbound_tail = BUS_SPACE_RD4_MV2(outbound_tail);
233         u_int32_t outbound_head = BUS_SPACE_RD4_MV2(outbound_head);
234
235         if (outbound_tail != outbound_head) {
236                 bus_space_read_region_4(hba->bar2t, hba->bar2h,
237                         offsetof(struct hpt_iopmu_mv,
238                                 outbound_q[outbound_tail]),
239                         (u_int32_t *)&p, 2);
240
241                 outbound_tail++;
242
243                 if (outbound_tail == MVIOP_QUEUE_LEN)
244                         outbound_tail = 0;
245
246                 BUS_SPACE_WRT4_MV2(outbound_tail, outbound_tail);
247                 return p;
248         } else
249                 return 0;
250 }
251
252 static void hptiop_mv_inbound_write(u_int64_t p, struct hpt_iop_hba *hba)
253 {
254         u_int32_t inbound_head = BUS_SPACE_RD4_MV2(inbound_head);
255         u_int32_t head = inbound_head + 1;
256
257         if (head == MVIOP_QUEUE_LEN)
258                 head = 0;
259
260         bus_space_write_region_4(hba->bar2t, hba->bar2h,
261                         offsetof(struct hpt_iopmu_mv, inbound_q[inbound_head]),
262                         (u_int32_t *)&p, 2);
263         BUS_SPACE_WRT4_MV2(inbound_head, head);
264         BUS_SPACE_WRT4_MV0(inbound_doorbell, MVIOP_MU_INBOUND_INT_POSTQUEUE);
265 }
266
267 static void hptiop_post_msg_itl(struct hpt_iop_hba *hba, u_int32_t msg)
268 {
269         BUS_SPACE_WRT4_ITL(inbound_msgaddr0, msg);
270         BUS_SPACE_RD4_ITL(outbound_intstatus);
271 }
272
273 static void hptiop_post_msg_mv(struct hpt_iop_hba *hba, u_int32_t msg)
274 {
275
276         BUS_SPACE_WRT4_MV2(inbound_msg, msg);
277         BUS_SPACE_WRT4_MV0(inbound_doorbell, MVIOP_MU_INBOUND_INT_MSG);
278
279         BUS_SPACE_RD4_MV0(outbound_intmask);
280 }
281
282 static void hptiop_post_msg_mvfrey(struct hpt_iop_hba *hba, u_int32_t msg)
283 {
284         BUS_SPACE_WRT4_MVFREY2(f0_to_cpu_msg_a, msg);
285         BUS_SPACE_RD4_MVFREY2(f0_to_cpu_msg_a);
286 }
287
288 static int hptiop_wait_ready_itl(struct hpt_iop_hba * hba, u_int32_t millisec)
289 {
290         u_int32_t req=0;
291         int i;
292
293         for (i = 0; i < millisec; i++) {
294                 req = BUS_SPACE_RD4_ITL(inbound_queue);
295                 if (req != IOPMU_QUEUE_EMPTY)
296                         break;
297                 DELAY(1000);
298         }
299
300         if (req!=IOPMU_QUEUE_EMPTY) {
301                 BUS_SPACE_WRT4_ITL(outbound_queue, req);
302                 BUS_SPACE_RD4_ITL(outbound_intstatus);
303                 return 0;
304         }
305
306         return -1;
307 }
308
309 static int hptiop_wait_ready_mv(struct hpt_iop_hba * hba, u_int32_t millisec)
310 {
311         if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec))
312                 return -1;
313
314         return 0;
315 }
316
317 static int hptiop_wait_ready_mvfrey(struct hpt_iop_hba * hba,
318                                                         u_int32_t millisec)
319 {
320         if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec))
321                 return -1;
322
323         return 0;
324 }
325
326 static void hptiop_request_callback_itl(struct hpt_iop_hba * hba,
327                                                         u_int32_t index)
328 {
329         struct hpt_iop_srb *srb;
330         struct hpt_iop_request_scsi_command *req=NULL;
331         union ccb *ccb;
332         u_int8_t *cdb;
333         u_int32_t result, temp, dxfer;
334         u_int64_t temp64;
335
336         if (index & IOPMU_QUEUE_MASK_HOST_BITS) { /*host req*/
337                 if (hba->firmware_version > 0x01020000 ||
338                         hba->interface_version > 0x01020000) {
339                         srb = hba->srb[index & ~(u_int32_t)
340                                 (IOPMU_QUEUE_ADDR_HOST_BIT
341                                 | IOPMU_QUEUE_REQUEST_RESULT_BIT)];
342                         req = (struct hpt_iop_request_scsi_command *)srb;
343                         if (index & IOPMU_QUEUE_REQUEST_RESULT_BIT)
344                                 result = IOP_RESULT_SUCCESS;
345                         else
346                                 result = req->header.result;
347                 } else {
348                         srb = hba->srb[index &
349                                 ~(u_int32_t)IOPMU_QUEUE_ADDR_HOST_BIT];
350                         req = (struct hpt_iop_request_scsi_command *)srb;
351                         result = req->header.result;
352                 }
353                 dxfer = req->dataxfer_length;
354                 goto srb_complete;
355         }
356
357         /*iop req*/
358         temp = bus_space_read_4(hba->bar0t, hba->bar0h, index +
359                 offsetof(struct hpt_iop_request_header, type));
360         result = bus_space_read_4(hba->bar0t, hba->bar0h, index +
361                 offsetof(struct hpt_iop_request_header, result));
362         switch(temp) {
363         case IOP_REQUEST_TYPE_IOCTL_COMMAND:
364         {
365                 temp64 = 0;
366                 bus_space_write_region_4(hba->bar0t, hba->bar0h, index +
367                         offsetof(struct hpt_iop_request_header, context),
368                         (u_int32_t *)&temp64, 2);
369                 wakeup((void *)((unsigned long)hba->u.itl.mu + index));
370                 break;
371         }
372
373         case IOP_REQUEST_TYPE_SCSI_COMMAND:
374                 bus_space_read_region_4(hba->bar0t, hba->bar0h, index +
375                         offsetof(struct hpt_iop_request_header, context),
376                         (u_int32_t *)&temp64, 2);
377                 srb = (struct hpt_iop_srb *)(unsigned long)temp64;
378                 dxfer = bus_space_read_4(hba->bar0t, hba->bar0h, 
379                                 index + offsetof(struct hpt_iop_request_scsi_command,
380                                 dataxfer_length));      
381 srb_complete:
382                 ccb = (union ccb *)srb->ccb;
383                 if (ccb->ccb_h.flags & CAM_CDB_POINTER)
384                         cdb = ccb->csio.cdb_io.cdb_ptr;
385                 else
386                         cdb = ccb->csio.cdb_io.cdb_bytes;
387
388                 if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */
389                         ccb->ccb_h.status = CAM_REQ_CMP;
390                         goto scsi_done;
391                 }
392
393                 switch (result) {
394                 case IOP_RESULT_SUCCESS:
395                         switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
396                         case CAM_DIR_IN:
397                                 bus_dmamap_sync(hba->io_dmat,
398                                         srb->dma_map, BUS_DMASYNC_POSTREAD);
399                                 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
400                                 break;
401                         case CAM_DIR_OUT:
402                                 bus_dmamap_sync(hba->io_dmat,
403                                         srb->dma_map, BUS_DMASYNC_POSTWRITE);
404                                 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
405                                 break;
406                         }
407
408                         ccb->ccb_h.status = CAM_REQ_CMP;
409                         break;
410
411                 case IOP_RESULT_BAD_TARGET:
412                         ccb->ccb_h.status = CAM_DEV_NOT_THERE;
413                         break;
414                 case IOP_RESULT_BUSY:
415                         ccb->ccb_h.status = CAM_BUSY;
416                         break;
417                 case IOP_RESULT_INVALID_REQUEST:
418                         ccb->ccb_h.status = CAM_REQ_INVALID;
419                         break;
420                 case IOP_RESULT_FAIL:
421                         ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
422                         break;
423                 case IOP_RESULT_RESET:
424                         ccb->ccb_h.status = CAM_BUSY;
425                         break;
426                 case IOP_RESULT_CHECK_CONDITION:
427                         memset(&ccb->csio.sense_data, 0,
428                             sizeof(ccb->csio.sense_data));
429                         if (dxfer < ccb->csio.sense_len)
430                                 ccb->csio.sense_resid = ccb->csio.sense_len -
431                                     dxfer;
432                         else
433                                 ccb->csio.sense_resid = 0;
434                         if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS) {/*iop*/
435                                 bus_space_read_region_1(hba->bar0t, hba->bar0h,
436                                         index + offsetof(struct hpt_iop_request_scsi_command,
437                                         sg_list), (u_int8_t *)&ccb->csio.sense_data, 
438                                         MIN(dxfer, sizeof(ccb->csio.sense_data)));
439                         } else {
440                                 memcpy(&ccb->csio.sense_data, &req->sg_list, 
441                                         MIN(dxfer, sizeof(ccb->csio.sense_data)));
442                         }
443                         ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
444                         ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
445                         ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
446                         break;
447                 default:
448                         ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
449                         break;
450                 }
451 scsi_done:
452                 if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS)
453                         BUS_SPACE_WRT4_ITL(outbound_queue, index);
454
455                 ccb->csio.resid = ccb->csio.dxfer_len - dxfer;
456
457                 hptiop_free_srb(hba, srb);
458                 xpt_done(ccb);
459                 break;
460         }
461 }
462
463 static void hptiop_drain_outbound_queue_itl(struct hpt_iop_hba *hba)
464 {
465         u_int32_t req, temp;
466
467         while ((req = BUS_SPACE_RD4_ITL(outbound_queue)) !=IOPMU_QUEUE_EMPTY) {
468                 if (req & IOPMU_QUEUE_MASK_HOST_BITS)
469                         hptiop_request_callback_itl(hba, req);
470                 else {
471                         struct hpt_iop_request_header *p;
472
473                         p = (struct hpt_iop_request_header *)
474                                 ((char *)hba->u.itl.mu + req);
475                         temp = bus_space_read_4(hba->bar0t,
476                                         hba->bar0h,req +
477                                         offsetof(struct hpt_iop_request_header,
478                                                 flags));
479                         if (temp & IOP_REQUEST_FLAG_SYNC_REQUEST) {
480                                 u_int64_t temp64;
481                                 bus_space_read_region_4(hba->bar0t,
482                                         hba->bar0h,req +
483                                         offsetof(struct hpt_iop_request_header,
484                                                 context),
485                                         (u_int32_t *)&temp64, 2);
486                                 if (temp64) {
487                                         hptiop_request_callback_itl(hba, req);
488                                 } else {
489                                         temp64 = 1;
490                                         bus_space_write_region_4(hba->bar0t,
491                                                 hba->bar0h,req +
492                                                 offsetof(struct hpt_iop_request_header,
493                                                         context),
494                                                 (u_int32_t *)&temp64, 2);
495                                 }
496                         } else
497                                 hptiop_request_callback_itl(hba, req);
498                 }
499         }
500 }
501
502 static int hptiop_intr_itl(struct hpt_iop_hba * hba)
503 {
504         u_int32_t status;
505         int ret = 0;
506
507         status = BUS_SPACE_RD4_ITL(outbound_intstatus);
508
509         if (status & IOPMU_OUTBOUND_INT_MSG0) {
510                 u_int32_t msg = BUS_SPACE_RD4_ITL(outbound_msgaddr0);
511                 KdPrint(("hptiop: received outbound msg %x\n", msg));
512                 BUS_SPACE_WRT4_ITL(outbound_intstatus, IOPMU_OUTBOUND_INT_MSG0);
513                 hptiop_os_message_callback(hba, msg);
514                 ret = 1;
515         }
516
517         if (status & IOPMU_OUTBOUND_INT_POSTQUEUE) {
518                 hptiop_drain_outbound_queue_itl(hba);
519                 ret = 1;
520         }
521
522         return ret;
523 }
524
525 static void hptiop_request_callback_mv(struct hpt_iop_hba * hba,
526                                                         u_int64_t _tag)
527 {
528         u_int32_t context = (u_int32_t)_tag;
529
530         if (context & MVIOP_CMD_TYPE_SCSI) {
531                 struct hpt_iop_srb *srb;
532                 struct hpt_iop_request_scsi_command *req;
533                 union ccb *ccb;
534                 u_int8_t *cdb;
535
536                 srb = hba->srb[context >> MVIOP_REQUEST_NUMBER_START_BIT];
537                 req = (struct hpt_iop_request_scsi_command *)srb;
538                 ccb = (union ccb *)srb->ccb;
539                 if (ccb->ccb_h.flags & CAM_CDB_POINTER)
540                         cdb = ccb->csio.cdb_io.cdb_ptr;
541                 else
542                         cdb = ccb->csio.cdb_io.cdb_bytes;
543
544                 if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */
545                         ccb->ccb_h.status = CAM_REQ_CMP;
546                         goto scsi_done;
547                 }
548                 if (context & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT)
549                         req->header.result = IOP_RESULT_SUCCESS;
550
551                 switch (req->header.result) {
552                 case IOP_RESULT_SUCCESS:
553                         switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
554                         case CAM_DIR_IN:
555                                 bus_dmamap_sync(hba->io_dmat,
556                                         srb->dma_map, BUS_DMASYNC_POSTREAD);
557                                 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
558                                 break;
559                         case CAM_DIR_OUT:
560                                 bus_dmamap_sync(hba->io_dmat,
561                                         srb->dma_map, BUS_DMASYNC_POSTWRITE);
562                                 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
563                                 break;
564                         }
565                         ccb->ccb_h.status = CAM_REQ_CMP;
566                         break;
567                 case IOP_RESULT_BAD_TARGET:
568                         ccb->ccb_h.status = CAM_DEV_NOT_THERE;
569                         break;
570                 case IOP_RESULT_BUSY:
571                         ccb->ccb_h.status = CAM_BUSY;
572                         break;
573                 case IOP_RESULT_INVALID_REQUEST:
574                         ccb->ccb_h.status = CAM_REQ_INVALID;
575                         break;
576                 case IOP_RESULT_FAIL:
577                         ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
578                         break;
579                 case IOP_RESULT_RESET:
580                         ccb->ccb_h.status = CAM_BUSY;
581                         break;
582                 case IOP_RESULT_CHECK_CONDITION:
583                         memset(&ccb->csio.sense_data, 0,
584                             sizeof(ccb->csio.sense_data));
585                         if (req->dataxfer_length < ccb->csio.sense_len)
586                                 ccb->csio.sense_resid = ccb->csio.sense_len -
587                                     req->dataxfer_length;
588                         else
589                                 ccb->csio.sense_resid = 0;
590                         memcpy(&ccb->csio.sense_data, &req->sg_list, 
591                                 MIN(req->dataxfer_length, sizeof(ccb->csio.sense_data)));
592                         ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
593                         ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
594                         ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
595                         break;
596                 default:
597                         ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
598                         break;
599                 }
600 scsi_done:
601                 ccb->csio.resid = ccb->csio.dxfer_len - req->dataxfer_length;
602                 
603                 hptiop_free_srb(hba, srb);
604                 xpt_done(ccb);
605         } else if (context & MVIOP_CMD_TYPE_IOCTL) {
606                 struct hpt_iop_request_ioctl_command *req = hba->ctlcfg_ptr;
607                 if (context & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT)
608                         hba->config_done = 1;
609                 else
610                         hba->config_done = -1;
611                 wakeup(req);
612         } else if (context &
613                         (MVIOP_CMD_TYPE_SET_CONFIG |
614                                 MVIOP_CMD_TYPE_GET_CONFIG))
615                 hba->config_done = 1;
616         else {
617                 device_printf(hba->pcidev, "wrong callback type\n");
618         }
619 }
620
621 static void hptiop_request_callback_mvfrey(struct hpt_iop_hba * hba,
622                                 u_int32_t _tag)
623 {
624         u_int32_t req_type = _tag & 0xf;
625
626         struct hpt_iop_srb *srb;
627         struct hpt_iop_request_scsi_command *req;
628         union ccb *ccb;
629         u_int8_t *cdb;
630
631         switch (req_type) {
632         case IOP_REQUEST_TYPE_GET_CONFIG:
633         case IOP_REQUEST_TYPE_SET_CONFIG:
634                 hba->config_done = 1;
635                 break;
636
637         case IOP_REQUEST_TYPE_SCSI_COMMAND:
638                 srb = hba->srb[(_tag >> 4) & 0xff];
639                 req = (struct hpt_iop_request_scsi_command *)srb;
640
641                 ccb = (union ccb *)srb->ccb;
642
643                 callout_stop(&srb->timeout);
644
645                 if (ccb->ccb_h.flags & CAM_CDB_POINTER)
646                         cdb = ccb->csio.cdb_io.cdb_ptr;
647                 else
648                         cdb = ccb->csio.cdb_io.cdb_bytes;
649
650                 if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */
651                         ccb->ccb_h.status = CAM_REQ_CMP;
652                         goto scsi_done;
653                 }
654
655                 if (_tag & MVFREYIOPMU_QUEUE_REQUEST_RESULT_BIT)
656                         req->header.result = IOP_RESULT_SUCCESS;
657
658                 switch (req->header.result) {
659                 case IOP_RESULT_SUCCESS:
660                         switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
661                         case CAM_DIR_IN:
662                                 bus_dmamap_sync(hba->io_dmat,
663                                                 srb->dma_map, BUS_DMASYNC_POSTREAD);
664                                 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
665                                 break;
666                         case CAM_DIR_OUT:
667                                 bus_dmamap_sync(hba->io_dmat,
668                                                 srb->dma_map, BUS_DMASYNC_POSTWRITE);
669                                 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
670                                 break;
671                         }
672                         ccb->ccb_h.status = CAM_REQ_CMP;
673                         break;
674                 case IOP_RESULT_BAD_TARGET:
675                         ccb->ccb_h.status = CAM_DEV_NOT_THERE;
676                         break;
677                 case IOP_RESULT_BUSY:
678                         ccb->ccb_h.status = CAM_BUSY;
679                         break;
680                 case IOP_RESULT_INVALID_REQUEST:
681                         ccb->ccb_h.status = CAM_REQ_INVALID;
682                         break;
683                 case IOP_RESULT_FAIL:
684                         ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
685                         break;
686                 case IOP_RESULT_RESET:
687                         ccb->ccb_h.status = CAM_BUSY;
688                         break;
689                 case IOP_RESULT_CHECK_CONDITION:
690                         memset(&ccb->csio.sense_data, 0,
691                                sizeof(ccb->csio.sense_data));
692                         if (req->dataxfer_length < ccb->csio.sense_len)
693                                 ccb->csio.sense_resid = ccb->csio.sense_len -
694                                 req->dataxfer_length;
695                         else
696                                 ccb->csio.sense_resid = 0;
697                         memcpy(&ccb->csio.sense_data, &req->sg_list, 
698                                MIN(req->dataxfer_length, sizeof(ccb->csio.sense_data)));
699                         ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
700                         ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
701                         ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
702                         break;
703                 default:
704                         ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
705                         break;
706                 }
707 scsi_done:
708                 ccb->csio.resid = ccb->csio.dxfer_len - req->dataxfer_length;
709                 
710                 hptiop_free_srb(hba, srb);
711                 xpt_done(ccb);
712                 break;
713         case IOP_REQUEST_TYPE_IOCTL_COMMAND:
714                 if (_tag & MVFREYIOPMU_QUEUE_REQUEST_RESULT_BIT)
715                         hba->config_done = 1;
716                 else
717                         hba->config_done = -1;
718                 wakeup((struct hpt_iop_request_ioctl_command *)hba->ctlcfg_ptr);
719                 break;
720         default:
721                 device_printf(hba->pcidev, "wrong callback type\n");
722                 break;
723         }
724 }
725
726 static void hptiop_drain_outbound_queue_mv(struct hpt_iop_hba * hba)
727 {
728         u_int64_t req;
729
730         while ((req = hptiop_mv_outbound_read(hba))) {
731                 if (req & MVIOP_MU_QUEUE_ADDR_HOST_BIT) {
732                         if (req & MVIOP_MU_QUEUE_REQUEST_RETURN_CONTEXT) {
733                                 hptiop_request_callback_mv(hba, req);
734                         }
735                 }
736         }
737 }
738
739 static int hptiop_intr_mv(struct hpt_iop_hba * hba)
740 {
741         u_int32_t status;
742         int ret = 0;
743
744         status = BUS_SPACE_RD4_MV0(outbound_doorbell);
745
746         if (status)
747                 BUS_SPACE_WRT4_MV0(outbound_doorbell, ~status);
748
749         if (status & MVIOP_MU_OUTBOUND_INT_MSG) {
750                 u_int32_t msg = BUS_SPACE_RD4_MV2(outbound_msg);
751                 KdPrint(("hptiop: received outbound msg %x\n", msg));
752                 hptiop_os_message_callback(hba, msg);
753                 ret = 1;
754         }
755
756         if (status & MVIOP_MU_OUTBOUND_INT_POSTQUEUE) {
757                 hptiop_drain_outbound_queue_mv(hba);
758                 ret = 1;
759         }
760
761         return ret;
762 }
763
764 static int hptiop_intr_mvfrey(struct hpt_iop_hba * hba)
765 {
766         u_int32_t status, _tag, cptr;
767         int ret = 0;
768
769         if (hba->initialized) {
770                 BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0);
771         }
772
773         status = BUS_SPACE_RD4_MVFREY2(f0_doorbell);
774         if (status) {
775                 BUS_SPACE_WRT4_MVFREY2(f0_doorbell, status);
776                 if (status & CPU_TO_F0_DRBL_MSG_A_BIT) {
777                         u_int32_t msg = BUS_SPACE_RD4_MVFREY2(cpu_to_f0_msg_a);
778                         hptiop_os_message_callback(hba, msg);
779                 }
780                 ret = 1;
781         }
782
783         status = BUS_SPACE_RD4_MVFREY2(isr_cause);
784         if (status) {
785                 BUS_SPACE_WRT4_MVFREY2(isr_cause, status);
786                 do {
787                         cptr = *hba->u.mvfrey.outlist_cptr & 0xff;
788                         while (hba->u.mvfrey.outlist_rptr != cptr) {
789                                 hba->u.mvfrey.outlist_rptr++;
790                                 if (hba->u.mvfrey.outlist_rptr == hba->u.mvfrey.list_count) {
791                                         hba->u.mvfrey.outlist_rptr = 0;
792                                 }
793         
794                                 _tag = hba->u.mvfrey.outlist[hba->u.mvfrey.outlist_rptr].val;
795                                 hptiop_request_callback_mvfrey(hba, _tag);
796                                 ret = 2;
797                         }
798                 } while (cptr != (*hba->u.mvfrey.outlist_cptr & 0xff));
799         }
800
801         if (hba->initialized) {
802                 BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0x1010);
803         }
804
805         return ret;
806 }
807
808 static int hptiop_send_sync_request_itl(struct hpt_iop_hba * hba,
809                                         u_int32_t req32, u_int32_t millisec)
810 {
811         u_int32_t i;
812         u_int64_t temp64;
813
814         BUS_SPACE_WRT4_ITL(inbound_queue, req32);
815         BUS_SPACE_RD4_ITL(outbound_intstatus);
816
817         for (i = 0; i < millisec; i++) {
818                 hptiop_intr_itl(hba);
819                 bus_space_read_region_4(hba->bar0t, hba->bar0h, req32 +
820                         offsetof(struct hpt_iop_request_header, context),
821                         (u_int32_t *)&temp64, 2);
822                 if (temp64)
823                         return 0;
824                 DELAY(1000);
825         }
826
827         return -1;
828 }
829
830 static int hptiop_send_sync_request_mv(struct hpt_iop_hba *hba,
831                                         void *req, u_int32_t millisec)
832 {
833         u_int32_t i;
834         u_int64_t phy_addr;
835         hba->config_done = 0;
836
837         phy_addr = hba->ctlcfgcmd_phy |
838                         (u_int64_t)MVIOP_MU_QUEUE_ADDR_HOST_BIT;
839         ((struct hpt_iop_request_get_config *)req)->header.flags |=
840                 IOP_REQUEST_FLAG_SYNC_REQUEST |
841                 IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
842         hptiop_mv_inbound_write(phy_addr, hba);
843         BUS_SPACE_RD4_MV0(outbound_intmask);
844
845         for (i = 0; i < millisec; i++) {
846                 hptiop_intr_mv(hba);
847                 if (hba->config_done)
848                         return 0;
849                 DELAY(1000);
850         }
851         return -1;
852 }
853
854 static int hptiop_send_sync_request_mvfrey(struct hpt_iop_hba *hba,
855                                         void *req, u_int32_t millisec)
856 {
857         u_int32_t i, index;
858         u_int64_t phy_addr;
859         struct hpt_iop_request_header *reqhdr =
860                                                                                 (struct hpt_iop_request_header *)req;
861         
862         hba->config_done = 0;
863
864         phy_addr = hba->ctlcfgcmd_phy;
865         reqhdr->flags = IOP_REQUEST_FLAG_SYNC_REQUEST
866                                         | IOP_REQUEST_FLAG_OUTPUT_CONTEXT
867                                         | IOP_REQUEST_FLAG_ADDR_BITS
868                                         | ((phy_addr >> 16) & 0xffff0000);
869         reqhdr->context = ((phy_addr & 0xffffffff) << 32 )
870                                         | IOPMU_QUEUE_ADDR_HOST_BIT | reqhdr->type;
871
872         hba->u.mvfrey.inlist_wptr++;
873         index = hba->u.mvfrey.inlist_wptr & 0x3fff;
874
875         if (index == hba->u.mvfrey.list_count) {
876                 index = 0;
877                 hba->u.mvfrey.inlist_wptr &= ~0x3fff;
878                 hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE;
879         }
880
881         hba->u.mvfrey.inlist[index].addr = phy_addr;
882         hba->u.mvfrey.inlist[index].intrfc_len = (reqhdr->size + 3) / 4;
883
884         BUS_SPACE_WRT4_MVFREY2(inbound_write_ptr, hba->u.mvfrey.inlist_wptr);
885         BUS_SPACE_RD4_MVFREY2(inbound_write_ptr);
886
887         for (i = 0; i < millisec; i++) {
888                 hptiop_intr_mvfrey(hba);
889                 if (hba->config_done)
890                         return 0;
891                 DELAY(1000);
892         }
893         return -1;
894 }
895
896 static int hptiop_send_sync_msg(struct hpt_iop_hba *hba,
897                                         u_int32_t msg, u_int32_t millisec)
898 {
899         u_int32_t i;
900
901         hba->msg_done = 0;
902         hba->ops->post_msg(hba, msg);
903
904         for (i=0; i<millisec; i++) {
905                 hba->ops->iop_intr(hba);
906                 if (hba->msg_done)
907                         break;
908                 DELAY(1000);
909         }
910
911         return hba->msg_done? 0 : -1;
912 }
913
914 static int hptiop_get_config_itl(struct hpt_iop_hba * hba,
915                                 struct hpt_iop_request_get_config * config)
916 {
917         u_int32_t req32;
918
919         config->header.size = sizeof(struct hpt_iop_request_get_config);
920         config->header.type = IOP_REQUEST_TYPE_GET_CONFIG;
921         config->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST;
922         config->header.result = IOP_RESULT_PENDING;
923         config->header.context = 0;
924
925         req32 = BUS_SPACE_RD4_ITL(inbound_queue);
926         if (req32 == IOPMU_QUEUE_EMPTY)
927                 return -1;
928
929         bus_space_write_region_4(hba->bar0t, hba->bar0h,
930                         req32, (u_int32_t *)config,
931                         sizeof(struct hpt_iop_request_header) >> 2);
932
933         if (hptiop_send_sync_request_itl(hba, req32, 20000)) {
934                 KdPrint(("hptiop: get config send cmd failed"));
935                 return -1;
936         }
937
938         bus_space_read_region_4(hba->bar0t, hba->bar0h,
939                         req32, (u_int32_t *)config,
940                         sizeof(struct hpt_iop_request_get_config) >> 2);
941
942         BUS_SPACE_WRT4_ITL(outbound_queue, req32);
943
944         return 0;
945 }
946
947 static int hptiop_get_config_mv(struct hpt_iop_hba * hba,
948                                 struct hpt_iop_request_get_config * config)
949 {
950         struct hpt_iop_request_get_config *req;
951
952         if (!(req = hba->ctlcfg_ptr))
953                 return -1;
954
955         req->header.flags = 0;
956         req->header.type = IOP_REQUEST_TYPE_GET_CONFIG;
957         req->header.size = sizeof(struct hpt_iop_request_get_config);
958         req->header.result = IOP_RESULT_PENDING;
959         req->header.context = MVIOP_CMD_TYPE_GET_CONFIG;
960
961         if (hptiop_send_sync_request_mv(hba, req, 20000)) {
962                 KdPrint(("hptiop: get config send cmd failed"));
963                 return -1;
964         }
965
966         *config = *req;
967         return 0;
968 }
969
970 static int hptiop_get_config_mvfrey(struct hpt_iop_hba * hba,
971                                 struct hpt_iop_request_get_config * config)
972 {
973         struct hpt_iop_request_get_config *info = hba->u.mvfrey.config;
974
975         if (info->header.size != sizeof(struct hpt_iop_request_get_config) ||
976             info->header.type != IOP_REQUEST_TYPE_GET_CONFIG) {
977                 KdPrint(("hptiop: header size %x/%x type %x/%x",
978                          info->header.size, (int)sizeof(struct hpt_iop_request_get_config),
979                          info->header.type, IOP_REQUEST_TYPE_GET_CONFIG));
980                 return -1;
981         }
982
983         config->interface_version = info->interface_version;
984         config->firmware_version = info->firmware_version;
985         config->max_requests = info->max_requests;
986         config->request_size = info->request_size;
987         config->max_sg_count = info->max_sg_count;
988         config->data_transfer_length = info->data_transfer_length;
989         config->alignment_mask = info->alignment_mask;
990         config->max_devices = info->max_devices;
991         config->sdram_size = info->sdram_size;
992
993         KdPrint(("hptiop: maxreq %x reqsz %x datalen %x maxdev %x sdram %x",
994                  config->max_requests, config->request_size,
995                  config->data_transfer_length, config->max_devices,
996                  config->sdram_size));
997
998         return 0;
999 }
1000
1001 static int hptiop_set_config_itl(struct hpt_iop_hba *hba,
1002                                 struct hpt_iop_request_set_config *config)
1003 {
1004         u_int32_t req32;
1005
1006         req32 = BUS_SPACE_RD4_ITL(inbound_queue);
1007
1008         if (req32 == IOPMU_QUEUE_EMPTY)
1009                 return -1;
1010
1011         config->header.size = sizeof(struct hpt_iop_request_set_config);
1012         config->header.type = IOP_REQUEST_TYPE_SET_CONFIG;
1013         config->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST;
1014         config->header.result = IOP_RESULT_PENDING;
1015         config->header.context = 0;
1016
1017         bus_space_write_region_4(hba->bar0t, hba->bar0h, req32, 
1018                 (u_int32_t *)config, 
1019                 sizeof(struct hpt_iop_request_set_config) >> 2);
1020
1021         if (hptiop_send_sync_request_itl(hba, req32, 20000)) {
1022                 KdPrint(("hptiop: set config send cmd failed"));
1023                 return -1;
1024         }
1025
1026         BUS_SPACE_WRT4_ITL(outbound_queue, req32);
1027
1028         return 0;
1029 }
1030
1031 static int hptiop_set_config_mv(struct hpt_iop_hba *hba,
1032                                 struct hpt_iop_request_set_config *config)
1033 {
1034         struct hpt_iop_request_set_config *req;
1035
1036         if (!(req = hba->ctlcfg_ptr))
1037                 return -1;
1038
1039         memcpy((u_int8_t *)req + sizeof(struct hpt_iop_request_header),
1040                 (u_int8_t *)config + sizeof(struct hpt_iop_request_header),
1041                 sizeof(struct hpt_iop_request_set_config) -
1042                         sizeof(struct hpt_iop_request_header));
1043
1044         req->header.flags = 0;
1045         req->header.type = IOP_REQUEST_TYPE_SET_CONFIG;
1046         req->header.size = sizeof(struct hpt_iop_request_set_config);
1047         req->header.result = IOP_RESULT_PENDING;
1048         req->header.context = MVIOP_CMD_TYPE_SET_CONFIG;
1049
1050         if (hptiop_send_sync_request_mv(hba, req, 20000)) {
1051                 KdPrint(("hptiop: set config send cmd failed"));
1052                 return -1;
1053         }
1054
1055         return 0;
1056 }
1057
1058 static int hptiop_set_config_mvfrey(struct hpt_iop_hba *hba,
1059                                 struct hpt_iop_request_set_config *config)
1060 {
1061         struct hpt_iop_request_set_config *req;
1062
1063         if (!(req = hba->ctlcfg_ptr))
1064                 return -1;
1065
1066         memcpy((u_int8_t *)req + sizeof(struct hpt_iop_request_header),
1067                 (u_int8_t *)config + sizeof(struct hpt_iop_request_header),
1068                 sizeof(struct hpt_iop_request_set_config) -
1069                         sizeof(struct hpt_iop_request_header));
1070
1071         req->header.type = IOP_REQUEST_TYPE_SET_CONFIG;
1072         req->header.size = sizeof(struct hpt_iop_request_set_config);
1073         req->header.result = IOP_RESULT_PENDING;
1074
1075         if (hptiop_send_sync_request_mvfrey(hba, req, 20000)) {
1076                 KdPrint(("hptiop: set config send cmd failed"));
1077                 return -1;
1078         }
1079
1080         return 0;
1081 }
1082
1083 static int hptiop_post_ioctl_command_itl(struct hpt_iop_hba *hba,
1084                                 u_int32_t req32,
1085                                 struct hpt_iop_ioctl_param *pParams)
1086 {
1087         u_int64_t temp64;
1088         struct hpt_iop_request_ioctl_command req;
1089
1090         if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) >
1091                         (hba->max_request_size -
1092                         offsetof(struct hpt_iop_request_ioctl_command, buf))) {
1093                 device_printf(hba->pcidev, "request size beyond max value");
1094                 return -1;
1095         }
1096
1097         req.header.size = offsetof(struct hpt_iop_request_ioctl_command, buf)
1098                 + pParams->nInBufferSize;
1099         req.header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND;
1100         req.header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST;
1101         req.header.result = IOP_RESULT_PENDING;
1102         req.header.context = req32 + (u_int64_t)(unsigned long)hba->u.itl.mu;
1103         req.ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode);
1104         req.inbuf_size = pParams->nInBufferSize;
1105         req.outbuf_size = pParams->nOutBufferSize;
1106         req.bytes_returned = 0;
1107
1108         bus_space_write_region_4(hba->bar0t, hba->bar0h, req32, (u_int32_t *)&req, 
1109                 offsetof(struct hpt_iop_request_ioctl_command, buf)>>2);
1110         
1111         hptiop_lock_adapter(hba);
1112
1113         BUS_SPACE_WRT4_ITL(inbound_queue, req32);
1114         BUS_SPACE_RD4_ITL(outbound_intstatus);
1115
1116         bus_space_read_region_4(hba->bar0t, hba->bar0h, req32 +
1117                 offsetof(struct hpt_iop_request_ioctl_command, header.context),
1118                 (u_int32_t *)&temp64, 2);
1119         while (temp64) {
1120                 if (hptiop_sleep(hba, (void *)((unsigned long)hba->u.itl.mu + req32),
1121                                 PPAUSE, "hptctl", HPT_OSM_TIMEOUT)==0)
1122                         break;
1123                 hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000);
1124                 bus_space_read_region_4(hba->bar0t, hba->bar0h,req32 +
1125                         offsetof(struct hpt_iop_request_ioctl_command,
1126                                 header.context),
1127                         (u_int32_t *)&temp64, 2);
1128         }
1129
1130         hptiop_unlock_adapter(hba);
1131         return 0;
1132 }
1133
1134 static int hptiop_bus_space_copyin(struct hpt_iop_hba *hba, u_int32_t bus,
1135                                                                         void *user, int size)
1136 {
1137         unsigned char byte;
1138         int i;
1139
1140         for (i=0; i<size; i++) {
1141                 if (copyin((u_int8_t *)user + i, &byte, 1))
1142                         return -1;
1143                 bus_space_write_1(hba->bar0t, hba->bar0h, bus + i, byte);
1144         }
1145
1146         return 0;
1147 }
1148
1149 static int hptiop_bus_space_copyout(struct hpt_iop_hba *hba, u_int32_t bus,
1150                                                                         void *user, int size)
1151 {
1152         unsigned char byte;
1153         int i;
1154
1155         for (i=0; i<size; i++) {
1156                 byte = bus_space_read_1(hba->bar0t, hba->bar0h, bus + i);
1157                 if (copyout(&byte, (u_int8_t *)user + i, 1))
1158                         return -1;
1159         }
1160
1161         return 0;
1162 }
1163
1164 static int hptiop_do_ioctl_itl(struct hpt_iop_hba *hba,
1165                                 struct hpt_iop_ioctl_param * pParams)
1166 {
1167         u_int32_t req32;
1168         u_int32_t result;
1169
1170         if ((pParams->Magic != HPT_IOCTL_MAGIC) &&
1171                 (pParams->Magic != HPT_IOCTL_MAGIC32))
1172                 return EFAULT;
1173         
1174         req32 = BUS_SPACE_RD4_ITL(inbound_queue);
1175         if (req32 == IOPMU_QUEUE_EMPTY)
1176                 return EFAULT;
1177
1178         if (pParams->nInBufferSize)
1179                 if (hptiop_bus_space_copyin(hba, req32 +
1180                         offsetof(struct hpt_iop_request_ioctl_command, buf),
1181                         (void *)pParams->lpInBuffer, pParams->nInBufferSize))
1182                         goto invalid;
1183
1184         if (hptiop_post_ioctl_command_itl(hba, req32, pParams))
1185                 goto invalid;
1186
1187         result = bus_space_read_4(hba->bar0t, hba->bar0h, req32 +
1188                         offsetof(struct hpt_iop_request_ioctl_command,
1189                                 header.result));
1190
1191         if (result == IOP_RESULT_SUCCESS) {
1192                 if (pParams->nOutBufferSize)
1193                         if (hptiop_bus_space_copyout(hba, req32 +
1194                                 offsetof(struct hpt_iop_request_ioctl_command, buf) + 
1195                                         ((pParams->nInBufferSize + 3) & ~3),
1196                                 (void *)pParams->lpOutBuffer, pParams->nOutBufferSize))
1197                                 goto invalid;
1198
1199                 if (pParams->lpBytesReturned) {
1200                         if (hptiop_bus_space_copyout(hba, req32 + 
1201                                 offsetof(struct hpt_iop_request_ioctl_command, bytes_returned),
1202                                 (void *)pParams->lpBytesReturned, sizeof(unsigned  long)))
1203                                 goto invalid;
1204                 }
1205
1206                 BUS_SPACE_WRT4_ITL(outbound_queue, req32);
1207
1208                 return 0;
1209         } else{
1210 invalid:
1211                 BUS_SPACE_WRT4_ITL(outbound_queue, req32);
1212
1213                 return EFAULT;
1214         }
1215 }
1216
1217 static int hptiop_post_ioctl_command_mv(struct hpt_iop_hba *hba,
1218                                 struct hpt_iop_request_ioctl_command *req,
1219                                 struct hpt_iop_ioctl_param *pParams)
1220 {
1221         u_int64_t req_phy;
1222         int size = 0;
1223
1224         if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) >
1225                         (hba->max_request_size -
1226                         offsetof(struct hpt_iop_request_ioctl_command, buf))) {
1227                 device_printf(hba->pcidev, "request size beyond max value");
1228                 return -1;
1229         }
1230
1231         req->ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode);
1232         req->inbuf_size = pParams->nInBufferSize;
1233         req->outbuf_size = pParams->nOutBufferSize;
1234         req->header.size = offsetof(struct hpt_iop_request_ioctl_command, buf)
1235                                         + pParams->nInBufferSize;
1236         req->header.context = (u_int64_t)MVIOP_CMD_TYPE_IOCTL;
1237         req->header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND;
1238         req->header.result = IOP_RESULT_PENDING;
1239         req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
1240         size = req->header.size >> 8;
1241         size = imin(3, size);
1242         req_phy = hba->ctlcfgcmd_phy | MVIOP_MU_QUEUE_ADDR_HOST_BIT | size;
1243         hptiop_mv_inbound_write(req_phy, hba);
1244
1245         BUS_SPACE_RD4_MV0(outbound_intmask);
1246
1247         while (hba->config_done == 0) {
1248                 if (hptiop_sleep(hba, req, PPAUSE,
1249                         "hptctl", HPT_OSM_TIMEOUT)==0)
1250                         continue;
1251                 hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000);
1252         }
1253         return 0;
1254 }
1255
1256 static int hptiop_do_ioctl_mv(struct hpt_iop_hba *hba,
1257                                 struct hpt_iop_ioctl_param *pParams)
1258 {
1259         struct hpt_iop_request_ioctl_command *req;
1260
1261         if ((pParams->Magic != HPT_IOCTL_MAGIC) &&
1262                 (pParams->Magic != HPT_IOCTL_MAGIC32))
1263                 return EFAULT;
1264
1265         req = (struct hpt_iop_request_ioctl_command *)(hba->ctlcfg_ptr);
1266         hba->config_done = 0;
1267         hptiop_lock_adapter(hba);
1268         if (pParams->nInBufferSize)
1269                 if (copyin((void *)pParams->lpInBuffer,
1270                                 req->buf, pParams->nInBufferSize))
1271                         goto invalid;
1272         if (hptiop_post_ioctl_command_mv(hba, req, pParams))
1273                 goto invalid;
1274
1275         if (hba->config_done == 1) {
1276                 if (pParams->nOutBufferSize)
1277                         if (copyout(req->buf +
1278                                 ((pParams->nInBufferSize + 3) & ~3),
1279                                 (void *)pParams->lpOutBuffer,
1280                                 pParams->nOutBufferSize))
1281                                 goto invalid;
1282
1283                 if (pParams->lpBytesReturned)
1284                         if (copyout(&req->bytes_returned,
1285                                 (void*)pParams->lpBytesReturned,
1286                                 sizeof(u_int32_t)))
1287                                 goto invalid;
1288                 hptiop_unlock_adapter(hba);
1289                 return 0;
1290         } else{
1291 invalid:
1292                 hptiop_unlock_adapter(hba);
1293                 return EFAULT;
1294         }
1295 }
1296
1297 static int hptiop_post_ioctl_command_mvfrey(struct hpt_iop_hba *hba,
1298                                 struct hpt_iop_request_ioctl_command *req,
1299                                 struct hpt_iop_ioctl_param *pParams)
1300 {
1301         u_int64_t phy_addr;
1302         u_int32_t index;
1303
1304         phy_addr = hba->ctlcfgcmd_phy;
1305
1306         if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) >
1307                         (hba->max_request_size -
1308                         offsetof(struct hpt_iop_request_ioctl_command, buf))) {
1309                 device_printf(hba->pcidev, "request size beyond max value");
1310                 return -1;
1311         }
1312
1313         req->ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode);
1314         req->inbuf_size = pParams->nInBufferSize;
1315         req->outbuf_size = pParams->nOutBufferSize;
1316         req->header.size = offsetof(struct hpt_iop_request_ioctl_command, buf)
1317                                         + pParams->nInBufferSize;
1318
1319         req->header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND;
1320         req->header.result = IOP_RESULT_PENDING;
1321
1322         req->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST
1323                                                 | IOP_REQUEST_FLAG_OUTPUT_CONTEXT
1324                                                 | IOP_REQUEST_FLAG_ADDR_BITS
1325                                                 | ((phy_addr >> 16) & 0xffff0000);
1326         req->header.context = ((phy_addr & 0xffffffff) << 32 )
1327                                                 | IOPMU_QUEUE_ADDR_HOST_BIT | req->header.type;
1328
1329         hba->u.mvfrey.inlist_wptr++;
1330         index = hba->u.mvfrey.inlist_wptr & 0x3fff;
1331
1332         if (index == hba->u.mvfrey.list_count) {
1333                 index = 0;
1334                 hba->u.mvfrey.inlist_wptr &= ~0x3fff;
1335                 hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE;
1336         }
1337
1338         hba->u.mvfrey.inlist[index].addr = phy_addr;
1339         hba->u.mvfrey.inlist[index].intrfc_len = (req->header.size + 3) / 4;
1340
1341         BUS_SPACE_WRT4_MVFREY2(inbound_write_ptr, hba->u.mvfrey.inlist_wptr);
1342         BUS_SPACE_RD4_MVFREY2(inbound_write_ptr);
1343
1344         while (hba->config_done == 0) {
1345                 if (hptiop_sleep(hba, req, PPAUSE,
1346                         "hptctl", HPT_OSM_TIMEOUT)==0)
1347                         continue;
1348                 hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000);
1349         }
1350         return 0;
1351 }
1352
1353 static int hptiop_do_ioctl_mvfrey(struct hpt_iop_hba *hba,
1354                                 struct hpt_iop_ioctl_param *pParams)
1355 {
1356         struct hpt_iop_request_ioctl_command *req;
1357
1358         if ((pParams->Magic != HPT_IOCTL_MAGIC) &&
1359                 (pParams->Magic != HPT_IOCTL_MAGIC32))
1360                 return EFAULT;
1361
1362         req = (struct hpt_iop_request_ioctl_command *)(hba->ctlcfg_ptr);
1363         hba->config_done = 0;
1364         hptiop_lock_adapter(hba);
1365         if (pParams->nInBufferSize)
1366                 if (copyin((void *)pParams->lpInBuffer,
1367                                 req->buf, pParams->nInBufferSize))
1368                         goto invalid;
1369         if (hptiop_post_ioctl_command_mvfrey(hba, req, pParams))
1370                 goto invalid;
1371
1372         if (hba->config_done == 1) {
1373                 if (pParams->nOutBufferSize)
1374                         if (copyout(req->buf +
1375                                 ((pParams->nInBufferSize + 3) & ~3),
1376                                 (void *)pParams->lpOutBuffer,
1377                                 pParams->nOutBufferSize))
1378                                 goto invalid;
1379
1380                 if (pParams->lpBytesReturned)
1381                         if (copyout(&req->bytes_returned,
1382                                 (void*)pParams->lpBytesReturned,
1383                                 sizeof(u_int32_t)))
1384                                 goto invalid;
1385                 hptiop_unlock_adapter(hba);
1386                 return 0;
1387         } else{
1388 invalid:
1389                 hptiop_unlock_adapter(hba);
1390                 return EFAULT;
1391         }
1392 }
1393
1394 static int  hptiop_rescan_bus(struct hpt_iop_hba * hba)
1395 {
1396         union ccb           *ccb;
1397
1398         if ((ccb = xpt_alloc_ccb()) == NULL)
1399                 return(ENOMEM);
1400         if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(hba->sim),
1401                 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1402                 xpt_free_ccb(ccb);
1403                 return(EIO);
1404         }
1405         xpt_rescan(ccb);
1406         return(0);
1407 }
1408
1409 static  bus_dmamap_callback_t   hptiop_map_srb;
1410 static  bus_dmamap_callback_t   hptiop_post_scsi_command;
1411 static  bus_dmamap_callback_t   hptiop_mv_map_ctlcfg;
1412 static  bus_dmamap_callback_t   hptiop_mvfrey_map_ctlcfg;
1413
1414 static int hptiop_alloc_pci_res_itl(struct hpt_iop_hba *hba)
1415 {
1416         hba->bar0_rid = 0x10;
1417         hba->bar0_res = bus_alloc_resource_any(hba->pcidev,
1418                         SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE);
1419
1420         if (hba->bar0_res == NULL) {
1421                 device_printf(hba->pcidev,
1422                         "failed to get iop base adrress.\n");
1423                 return -1;
1424         }
1425         hba->bar0t = rman_get_bustag(hba->bar0_res);
1426         hba->bar0h = rman_get_bushandle(hba->bar0_res);
1427         hba->u.itl.mu = (struct hpt_iopmu_itl *)
1428                                 rman_get_virtual(hba->bar0_res);
1429
1430         if (!hba->u.itl.mu) {
1431                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1432                                         hba->bar0_rid, hba->bar0_res);
1433                 device_printf(hba->pcidev, "alloc mem res failed\n");
1434                 return -1;
1435         }
1436
1437         return 0;
1438 }
1439
1440 static int hptiop_alloc_pci_res_mv(struct hpt_iop_hba *hba)
1441 {
1442         hba->bar0_rid = 0x10;
1443         hba->bar0_res = bus_alloc_resource_any(hba->pcidev,
1444                         SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE);
1445
1446         if (hba->bar0_res == NULL) {
1447                 device_printf(hba->pcidev, "failed to get iop bar0.\n");
1448                 return -1;
1449         }
1450         hba->bar0t = rman_get_bustag(hba->bar0_res);
1451         hba->bar0h = rman_get_bushandle(hba->bar0_res);
1452         hba->u.mv.regs = (struct hpt_iopmv_regs *)
1453                                 rman_get_virtual(hba->bar0_res);
1454
1455         if (!hba->u.mv.regs) {
1456                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1457                                         hba->bar0_rid, hba->bar0_res);
1458                 device_printf(hba->pcidev, "alloc bar0 mem res failed\n");
1459                 return -1;
1460         }
1461
1462         hba->bar2_rid = 0x18;
1463         hba->bar2_res = bus_alloc_resource_any(hba->pcidev,
1464                         SYS_RES_MEMORY, &hba->bar2_rid, RF_ACTIVE);
1465
1466         if (hba->bar2_res == NULL) {
1467                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1468                                         hba->bar0_rid, hba->bar0_res);
1469                 device_printf(hba->pcidev, "failed to get iop bar2.\n");
1470                 return -1;
1471         }
1472
1473         hba->bar2t = rman_get_bustag(hba->bar2_res);
1474         hba->bar2h = rman_get_bushandle(hba->bar2_res);
1475         hba->u.mv.mu = (struct hpt_iopmu_mv *)rman_get_virtual(hba->bar2_res);
1476
1477         if (!hba->u.mv.mu) {
1478                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1479                                         hba->bar0_rid, hba->bar0_res);
1480                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1481                                         hba->bar2_rid, hba->bar2_res);
1482                 device_printf(hba->pcidev, "alloc mem bar2 res failed\n");
1483                 return -1;
1484         }
1485
1486         return 0;
1487 }
1488
1489 static int hptiop_alloc_pci_res_mvfrey(struct hpt_iop_hba *hba)
1490 {
1491         hba->bar0_rid = 0x10;
1492         hba->bar0_res = bus_alloc_resource_any(hba->pcidev,
1493                         SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE);
1494
1495         if (hba->bar0_res == NULL) {
1496                 device_printf(hba->pcidev, "failed to get iop bar0.\n");
1497                 return -1;
1498         }
1499         hba->bar0t = rman_get_bustag(hba->bar0_res);
1500         hba->bar0h = rman_get_bushandle(hba->bar0_res);
1501         hba->u.mvfrey.config = (struct hpt_iop_request_get_config *)
1502                                 rman_get_virtual(hba->bar0_res);
1503
1504         if (!hba->u.mvfrey.config) {
1505                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1506                                         hba->bar0_rid, hba->bar0_res);
1507                 device_printf(hba->pcidev, "alloc bar0 mem res failed\n");
1508                 return -1;
1509         }
1510
1511         hba->bar2_rid = 0x18;
1512         hba->bar2_res = bus_alloc_resource_any(hba->pcidev,
1513                         SYS_RES_MEMORY, &hba->bar2_rid, RF_ACTIVE);
1514
1515         if (hba->bar2_res == NULL) {
1516                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1517                                         hba->bar0_rid, hba->bar0_res);
1518                 device_printf(hba->pcidev, "failed to get iop bar2.\n");
1519                 return -1;
1520         }
1521
1522         hba->bar2t = rman_get_bustag(hba->bar2_res);
1523         hba->bar2h = rman_get_bushandle(hba->bar2_res);
1524         hba->u.mvfrey.mu =
1525                                         (struct hpt_iopmu_mvfrey *)rman_get_virtual(hba->bar2_res);
1526
1527         if (!hba->u.mvfrey.mu) {
1528                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1529                                         hba->bar0_rid, hba->bar0_res);
1530                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1531                                         hba->bar2_rid, hba->bar2_res);
1532                 device_printf(hba->pcidev, "alloc mem bar2 res failed\n");
1533                 return -1;
1534         }
1535
1536         return 0;
1537 }
1538
1539 static void hptiop_release_pci_res_itl(struct hpt_iop_hba *hba)
1540 {
1541         if (hba->bar0_res)
1542                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1543                         hba->bar0_rid, hba->bar0_res);
1544 }
1545
1546 static void hptiop_release_pci_res_mv(struct hpt_iop_hba *hba)
1547 {
1548         if (hba->bar0_res)
1549                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1550                         hba->bar0_rid, hba->bar0_res);
1551         if (hba->bar2_res)
1552                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1553                         hba->bar2_rid, hba->bar2_res);
1554 }
1555
1556 static void hptiop_release_pci_res_mvfrey(struct hpt_iop_hba *hba)
1557 {
1558         if (hba->bar0_res)
1559                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1560                         hba->bar0_rid, hba->bar0_res);
1561         if (hba->bar2_res)
1562                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1563                         hba->bar2_rid, hba->bar2_res);
1564 }
1565
1566 static int hptiop_internal_memalloc_mv(struct hpt_iop_hba *hba)
1567 {
1568         if (bus_dma_tag_create(hba->parent_dmat,
1569                                 1,
1570                                 0,
1571                                 BUS_SPACE_MAXADDR_32BIT,
1572                                 BUS_SPACE_MAXADDR,
1573                                 NULL, NULL,
1574                                 0x800 - 0x8,
1575                                 1,
1576                                 BUS_SPACE_MAXSIZE_32BIT,
1577                                 BUS_DMA_ALLOCNOW,
1578                                 NULL,
1579                                 NULL,
1580                                 &hba->ctlcfg_dmat)) {
1581                 device_printf(hba->pcidev, "alloc ctlcfg_dmat failed\n");
1582                 return -1;
1583         }
1584
1585         if (bus_dmamem_alloc(hba->ctlcfg_dmat, (void **)&hba->ctlcfg_ptr,
1586                 BUS_DMA_WAITOK | BUS_DMA_COHERENT,
1587                 &hba->ctlcfg_dmamap) != 0) {
1588                         device_printf(hba->pcidev,
1589                                         "bus_dmamem_alloc failed!\n");
1590                         bus_dma_tag_destroy(hba->ctlcfg_dmat);
1591                         return -1;
1592         }
1593
1594         if (bus_dmamap_load(hba->ctlcfg_dmat,
1595                         hba->ctlcfg_dmamap, hba->ctlcfg_ptr,
1596                         MVIOP_IOCTLCFG_SIZE,
1597                         hptiop_mv_map_ctlcfg, hba, 0)) {
1598                 device_printf(hba->pcidev, "bus_dmamap_load failed!\n");
1599                 if (hba->ctlcfg_dmat) {
1600                         bus_dmamem_free(hba->ctlcfg_dmat,
1601                                 hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
1602                         bus_dma_tag_destroy(hba->ctlcfg_dmat);
1603                 }
1604                 return -1;
1605         }
1606
1607         return 0;
1608 }
1609
1610 static int hptiop_internal_memalloc_mvfrey(struct hpt_iop_hba *hba)
1611 {
1612         u_int32_t list_count = BUS_SPACE_RD4_MVFREY2(inbound_conf_ctl);
1613
1614         list_count >>= 16;
1615
1616         if (list_count == 0) {
1617                 return -1;
1618         }
1619
1620         hba->u.mvfrey.list_count = list_count;
1621         hba->u.mvfrey.internal_mem_size = 0x800
1622                                                         + list_count * sizeof(struct mvfrey_inlist_entry)
1623                                                         + list_count * sizeof(struct mvfrey_outlist_entry)
1624                                                         + sizeof(int);
1625         if (bus_dma_tag_create(hba->parent_dmat,
1626                                 1,
1627                                 0,
1628                                 BUS_SPACE_MAXADDR_32BIT,
1629                                 BUS_SPACE_MAXADDR,
1630                                 NULL, NULL,
1631                                 hba->u.mvfrey.internal_mem_size,
1632                                 1,
1633                                 BUS_SPACE_MAXSIZE_32BIT,
1634                                 BUS_DMA_ALLOCNOW,
1635                                 NULL,
1636                                 NULL,
1637                                 &hba->ctlcfg_dmat)) {
1638                 device_printf(hba->pcidev, "alloc ctlcfg_dmat failed\n");
1639                 return -1;
1640         }
1641
1642         if (bus_dmamem_alloc(hba->ctlcfg_dmat, (void **)&hba->ctlcfg_ptr,
1643                 BUS_DMA_WAITOK | BUS_DMA_COHERENT,
1644                 &hba->ctlcfg_dmamap) != 0) {
1645                         device_printf(hba->pcidev,
1646                                         "bus_dmamem_alloc failed!\n");
1647                         bus_dma_tag_destroy(hba->ctlcfg_dmat);
1648                         return -1;
1649         }
1650
1651         if (bus_dmamap_load(hba->ctlcfg_dmat,
1652                         hba->ctlcfg_dmamap, hba->ctlcfg_ptr,
1653                         hba->u.mvfrey.internal_mem_size,
1654                         hptiop_mvfrey_map_ctlcfg, hba, 0)) {
1655                 device_printf(hba->pcidev, "bus_dmamap_load failed!\n");
1656                 if (hba->ctlcfg_dmat) {
1657                         bus_dmamem_free(hba->ctlcfg_dmat,
1658                                 hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
1659                         bus_dma_tag_destroy(hba->ctlcfg_dmat);
1660                 }
1661                 return -1;
1662         }
1663
1664         return 0;
1665 }
1666
1667 static int hptiop_internal_memfree_itl(struct hpt_iop_hba *hba) {
1668         return 0;
1669 }
1670
1671 static int hptiop_internal_memfree_mv(struct hpt_iop_hba *hba)
1672 {
1673         if (hba->ctlcfg_dmat) {
1674                 bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap);
1675                 bus_dmamem_free(hba->ctlcfg_dmat,
1676                                         hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
1677                 bus_dma_tag_destroy(hba->ctlcfg_dmat);
1678         }
1679
1680         return 0;
1681 }
1682
1683 static int hptiop_internal_memfree_mvfrey(struct hpt_iop_hba *hba)
1684 {
1685         if (hba->ctlcfg_dmat) {
1686                 bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap);
1687                 bus_dmamem_free(hba->ctlcfg_dmat,
1688                                         hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
1689                 bus_dma_tag_destroy(hba->ctlcfg_dmat);
1690         }
1691
1692         return 0;
1693 }
1694
1695 static int hptiop_reset_comm_mvfrey(struct hpt_iop_hba *hba)
1696 {
1697         u_int32_t i = 100;
1698
1699         if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET_COMM, 3000))
1700                 return -1;
1701
1702         /* wait 100ms for MCU ready */
1703         while(i--) {
1704                 DELAY(1000);
1705         }
1706
1707         BUS_SPACE_WRT4_MVFREY2(inbound_base,
1708                                                         hba->u.mvfrey.inlist_phy & 0xffffffff);
1709         BUS_SPACE_WRT4_MVFREY2(inbound_base_high,
1710                                                         (hba->u.mvfrey.inlist_phy >> 16) >> 16);
1711
1712         BUS_SPACE_WRT4_MVFREY2(outbound_base,
1713                                                         hba->u.mvfrey.outlist_phy & 0xffffffff);
1714         BUS_SPACE_WRT4_MVFREY2(outbound_base_high,
1715                                                         (hba->u.mvfrey.outlist_phy >> 16) >> 16);
1716
1717         BUS_SPACE_WRT4_MVFREY2(outbound_shadow_base,
1718                                                         hba->u.mvfrey.outlist_cptr_phy & 0xffffffff);
1719         BUS_SPACE_WRT4_MVFREY2(outbound_shadow_base_high,
1720                                                         (hba->u.mvfrey.outlist_cptr_phy >> 16) >> 16);
1721
1722         hba->u.mvfrey.inlist_wptr = (hba->u.mvfrey.list_count - 1)
1723                                                                 | CL_POINTER_TOGGLE;
1724         *hba->u.mvfrey.outlist_cptr = (hba->u.mvfrey.list_count - 1)
1725                                                                 | CL_POINTER_TOGGLE;
1726         hba->u.mvfrey.outlist_rptr = hba->u.mvfrey.list_count - 1;
1727         
1728         return 0;
1729 }
1730
1731 /*
1732  * CAM driver interface
1733  */
1734 static device_method_t driver_methods[] = {
1735         /* Device interface */
1736         DEVMETHOD(device_probe,     hptiop_probe),
1737         DEVMETHOD(device_attach,    hptiop_attach),
1738         DEVMETHOD(device_detach,    hptiop_detach),
1739         DEVMETHOD(device_shutdown,  hptiop_shutdown),
1740         { 0, 0 }
1741 };
1742
1743 static struct hptiop_adapter_ops hptiop_itl_ops = {
1744         .family            = INTEL_BASED_IOP,
1745         .iop_wait_ready    = hptiop_wait_ready_itl,
1746         .internal_memalloc = 0,
1747         .internal_memfree  = hptiop_internal_memfree_itl,
1748         .alloc_pci_res     = hptiop_alloc_pci_res_itl,
1749         .release_pci_res   = hptiop_release_pci_res_itl,
1750         .enable_intr       = hptiop_enable_intr_itl,
1751         .disable_intr      = hptiop_disable_intr_itl,
1752         .get_config        = hptiop_get_config_itl,
1753         .set_config        = hptiop_set_config_itl,
1754         .iop_intr          = hptiop_intr_itl,
1755         .post_msg          = hptiop_post_msg_itl,
1756         .post_req          = hptiop_post_req_itl,
1757         .do_ioctl          = hptiop_do_ioctl_itl,
1758         .reset_comm        = 0,
1759 };
1760
1761 static struct hptiop_adapter_ops hptiop_mv_ops = {
1762         .family            = MV_BASED_IOP,
1763         .iop_wait_ready    = hptiop_wait_ready_mv,
1764         .internal_memalloc = hptiop_internal_memalloc_mv,
1765         .internal_memfree  = hptiop_internal_memfree_mv,
1766         .alloc_pci_res     = hptiop_alloc_pci_res_mv,
1767         .release_pci_res   = hptiop_release_pci_res_mv,
1768         .enable_intr       = hptiop_enable_intr_mv,
1769         .disable_intr      = hptiop_disable_intr_mv,
1770         .get_config        = hptiop_get_config_mv,
1771         .set_config        = hptiop_set_config_mv,
1772         .iop_intr          = hptiop_intr_mv,
1773         .post_msg          = hptiop_post_msg_mv,
1774         .post_req          = hptiop_post_req_mv,
1775         .do_ioctl          = hptiop_do_ioctl_mv,
1776         .reset_comm        = 0,
1777 };
1778
1779 static struct hptiop_adapter_ops hptiop_mvfrey_ops = {
1780         .family            = MVFREY_BASED_IOP,
1781         .iop_wait_ready    = hptiop_wait_ready_mvfrey,
1782         .internal_memalloc = hptiop_internal_memalloc_mvfrey,
1783         .internal_memfree  = hptiop_internal_memfree_mvfrey,
1784         .alloc_pci_res     = hptiop_alloc_pci_res_mvfrey,
1785         .release_pci_res   = hptiop_release_pci_res_mvfrey,
1786         .enable_intr       = hptiop_enable_intr_mvfrey,
1787         .disable_intr      = hptiop_disable_intr_mvfrey,
1788         .get_config        = hptiop_get_config_mvfrey,
1789         .set_config        = hptiop_set_config_mvfrey,
1790         .iop_intr          = hptiop_intr_mvfrey,
1791         .post_msg          = hptiop_post_msg_mvfrey,
1792         .post_req          = hptiop_post_req_mvfrey,
1793         .do_ioctl          = hptiop_do_ioctl_mvfrey,
1794         .reset_comm        = hptiop_reset_comm_mvfrey,
1795 };
1796
1797 static driver_t hptiop_pci_driver = {
1798         driver_name,
1799         driver_methods,
1800         sizeof(struct hpt_iop_hba)
1801 };
1802
1803 DRIVER_MODULE(hptiop, pci, hptiop_pci_driver, hptiop_devclass, 0, 0);
1804 MODULE_DEPEND(hptiop, cam, 1, 1, 1);
1805
1806 static int hptiop_probe(device_t dev)
1807 {
1808         struct hpt_iop_hba *hba;
1809         u_int32_t id;
1810         static char buf[256];
1811         int sas = 0;
1812         struct hptiop_adapter_ops *ops;
1813
1814         if (pci_get_vendor(dev) != 0x1103)
1815                 return (ENXIO);
1816
1817         id = pci_get_device(dev);
1818
1819         switch (id) {
1820                 case 0x4520:
1821                 case 0x4521:
1822                 case 0x4522:
1823                         sas = 1;
1824                 case 0x3620:
1825                 case 0x3622:
1826                 case 0x3640:
1827                         ops = &hptiop_mvfrey_ops;
1828                         break;
1829                 case 0x4210:
1830                 case 0x4211:
1831                 case 0x4310:
1832                 case 0x4311:
1833                 case 0x4320:
1834                 case 0x4321:
1835                 case 0x4322:
1836                         sas = 1;
1837                 case 0x3220:
1838                 case 0x3320:
1839                 case 0x3410:
1840                 case 0x3520:
1841                 case 0x3510:
1842                 case 0x3511:
1843                 case 0x3521:
1844                 case 0x3522:
1845                 case 0x3530:
1846                 case 0x3540:
1847                 case 0x3560:
1848                         ops = &hptiop_itl_ops;
1849                         break;
1850                 case 0x3020:
1851                 case 0x3120:
1852                 case 0x3122:
1853                         ops = &hptiop_mv_ops;
1854                         break;
1855                 default:
1856                         return (ENXIO);
1857         }
1858
1859         device_printf(dev, "adapter at PCI %d:%d:%d, IRQ %d\n",
1860                 pci_get_bus(dev), pci_get_slot(dev),
1861                 pci_get_function(dev), pci_get_irq(dev));
1862
1863         sprintf(buf, "RocketRAID %x %s Controller\n",
1864                                 id, sas ? "SAS" : "SATA");
1865         device_set_desc_copy(dev, buf);
1866
1867         hba = (struct hpt_iop_hba *)device_get_softc(dev);
1868         bzero(hba, sizeof(struct hpt_iop_hba));
1869         hba->ops = ops;
1870
1871         KdPrint(("hba->ops=%p\n", hba->ops));
1872         return 0;
1873 }
1874
1875 static int hptiop_attach(device_t dev)
1876 {
1877         struct hpt_iop_hba *hba = (struct hpt_iop_hba *)device_get_softc(dev);
1878         struct hpt_iop_request_get_config  iop_config;
1879         struct hpt_iop_request_set_config  set_config;
1880         int rid = 0;
1881         struct cam_devq *devq;
1882         struct ccb_setasync ccb;
1883         u_int32_t unit = device_get_unit(dev);
1884
1885         device_printf(dev, "%d RocketRAID 3xxx/4xxx controller driver %s\n",
1886                         unit, driver_version);
1887
1888         KdPrint(("hptiop: attach(%d, %d/%d/%d) ops=%p\n", unit,
1889                 pci_get_bus(dev), pci_get_slot(dev),
1890                 pci_get_function(dev), hba->ops));
1891
1892         pci_enable_busmaster(dev);
1893         hba->pcidev = dev;
1894         hba->pciunit = unit;
1895
1896         if (hba->ops->alloc_pci_res(hba))
1897                 return ENXIO;
1898
1899         if (hba->ops->iop_wait_ready(hba, 2000)) {
1900                 device_printf(dev, "adapter is not ready\n");
1901                 goto release_pci_res;
1902         }
1903
1904         mtx_init(&hba->lock, "hptioplock", NULL, MTX_DEF);
1905
1906         if (bus_dma_tag_create(bus_get_dma_tag(dev),/* PCI parent */
1907                         1,  /* alignment */
1908                         0, /* boundary */
1909                         BUS_SPACE_MAXADDR,  /* lowaddr */
1910                         BUS_SPACE_MAXADDR,  /* highaddr */
1911                         NULL, NULL,         /* filter, filterarg */
1912                         BUS_SPACE_MAXSIZE_32BIT,    /* maxsize */
1913                         BUS_SPACE_UNRESTRICTED, /* nsegments */
1914                         BUS_SPACE_MAXSIZE_32BIT,    /* maxsegsize */
1915                         0,      /* flags */
1916                         NULL,   /* lockfunc */
1917                         NULL,       /* lockfuncarg */
1918                         &hba->parent_dmat   /* tag */))
1919         {
1920                 device_printf(dev, "alloc parent_dmat failed\n");
1921                 goto release_pci_res;
1922         }
1923
1924         if (hba->ops->family == MV_BASED_IOP) {
1925                 if (hba->ops->internal_memalloc(hba)) {
1926                         device_printf(dev, "alloc srb_dmat failed\n");
1927                         goto destroy_parent_tag;
1928                 }
1929         }
1930         
1931         if (hba->ops->get_config(hba, &iop_config)) {
1932                 device_printf(dev, "get iop config failed.\n");
1933                 goto get_config_failed;
1934         }
1935
1936         hba->firmware_version = iop_config.firmware_version;
1937         hba->interface_version = iop_config.interface_version;
1938         hba->max_requests = iop_config.max_requests;
1939         hba->max_devices = iop_config.max_devices;
1940         hba->max_request_size = iop_config.request_size;
1941         hba->max_sg_count = iop_config.max_sg_count;
1942
1943         if (hba->ops->family == MVFREY_BASED_IOP) {
1944                 if (hba->ops->internal_memalloc(hba)) {
1945                         device_printf(dev, "alloc srb_dmat failed\n");
1946                         goto destroy_parent_tag;
1947                 }
1948                 if (hba->ops->reset_comm(hba)) {
1949                         device_printf(dev, "reset comm failed\n");
1950                         goto get_config_failed;
1951                 }
1952         }
1953
1954         if (bus_dma_tag_create(hba->parent_dmat,/* parent */
1955                         4,  /* alignment */
1956                         BUS_SPACE_MAXADDR_32BIT+1, /* boundary */
1957                         BUS_SPACE_MAXADDR,  /* lowaddr */
1958                         BUS_SPACE_MAXADDR,  /* highaddr */
1959                         NULL, NULL,         /* filter, filterarg */
1960                         PAGE_SIZE * (hba->max_sg_count-1),  /* maxsize */
1961                         hba->max_sg_count,  /* nsegments */
1962                         0x20000,    /* maxsegsize */
1963                         BUS_DMA_ALLOCNOW,       /* flags */
1964                         busdma_lock_mutex,  /* lockfunc */
1965                         &hba->lock,     /* lockfuncarg */
1966                         &hba->io_dmat   /* tag */))
1967         {
1968                 device_printf(dev, "alloc io_dmat failed\n");
1969                 goto get_config_failed;
1970         }
1971
1972         if (bus_dma_tag_create(hba->parent_dmat,/* parent */
1973                         1,  /* alignment */
1974                         0, /* boundary */
1975                         BUS_SPACE_MAXADDR_32BIT,    /* lowaddr */
1976                         BUS_SPACE_MAXADDR,  /* highaddr */
1977                         NULL, NULL,         /* filter, filterarg */
1978                         HPT_SRB_MAX_SIZE * HPT_SRB_MAX_QUEUE_SIZE + 0x20,
1979                         1,  /* nsegments */
1980                         BUS_SPACE_MAXSIZE_32BIT,    /* maxsegsize */
1981                         0,      /* flags */
1982                         NULL,   /* lockfunc */
1983                         NULL,       /* lockfuncarg */
1984                         &hba->srb_dmat  /* tag */))
1985         {
1986                 device_printf(dev, "alloc srb_dmat failed\n");
1987                 goto destroy_io_dmat;
1988         }
1989
1990         if (bus_dmamem_alloc(hba->srb_dmat, (void **)&hba->uncached_ptr,
1991                         BUS_DMA_WAITOK | BUS_DMA_COHERENT,
1992                         &hba->srb_dmamap) != 0)
1993         {
1994                 device_printf(dev, "srb bus_dmamem_alloc failed!\n");
1995                 goto destroy_srb_dmat;
1996         }
1997
1998         if (bus_dmamap_load(hba->srb_dmat,
1999                         hba->srb_dmamap, hba->uncached_ptr,
2000                         (HPT_SRB_MAX_SIZE * HPT_SRB_MAX_QUEUE_SIZE) + 0x20,
2001                         hptiop_map_srb, hba, 0))
2002         {
2003                 device_printf(dev, "bus_dmamap_load failed!\n");
2004                 goto srb_dmamem_free;
2005         }
2006
2007         if ((devq = cam_simq_alloc(hba->max_requests - 1 )) == NULL) {
2008                 device_printf(dev, "cam_simq_alloc failed\n");
2009                 goto srb_dmamap_unload;
2010         }
2011
2012         hba->sim = cam_sim_alloc(hptiop_action, hptiop_poll, driver_name,
2013                         hba, unit, &hba->lock, hba->max_requests - 1, 1, devq);
2014         if (!hba->sim) {
2015                 device_printf(dev, "cam_sim_alloc failed\n");
2016                 cam_simq_free(devq);
2017                 goto srb_dmamap_unload;
2018         }
2019         hptiop_lock_adapter(hba);
2020         if (xpt_bus_register(hba->sim, dev, 0) != CAM_SUCCESS)
2021         {
2022                 device_printf(dev, "xpt_bus_register failed\n");
2023                 goto free_cam_sim;
2024         }
2025
2026         if (xpt_create_path(&hba->path, /*periph */ NULL,
2027                         cam_sim_path(hba->sim), CAM_TARGET_WILDCARD,
2028                         CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2029                 device_printf(dev, "xpt_create_path failed\n");
2030                 goto deregister_xpt_bus;
2031         }
2032         hptiop_unlock_adapter(hba);
2033
2034         bzero(&set_config, sizeof(set_config));
2035         set_config.iop_id = unit;
2036         set_config.vbus_id = cam_sim_path(hba->sim);
2037         set_config.max_host_request_size = HPT_SRB_MAX_REQ_SIZE;
2038
2039         if (hba->ops->set_config(hba, &set_config)) {
2040                 device_printf(dev, "set iop config failed.\n");
2041                 goto free_hba_path;
2042         }
2043
2044         xpt_setup_ccb(&ccb.ccb_h, hba->path, /*priority*/5);
2045         ccb.ccb_h.func_code = XPT_SASYNC_CB;
2046         ccb.event_enable = (AC_FOUND_DEVICE | AC_LOST_DEVICE);
2047         ccb.callback = hptiop_async;
2048         ccb.callback_arg = hba->sim;
2049         xpt_action((union ccb *)&ccb);
2050
2051         rid = 0;
2052         if ((hba->irq_res = bus_alloc_resource_any(hba->pcidev, SYS_RES_IRQ,
2053                         &rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
2054                 device_printf(dev, "allocate irq failed!\n");
2055                 goto free_hba_path;
2056         }
2057
2058         if (bus_setup_intr(hba->pcidev, hba->irq_res, INTR_TYPE_CAM | INTR_MPSAFE,
2059                                 NULL, hptiop_pci_intr, hba, &hba->irq_handle))
2060         {
2061                 device_printf(dev, "allocate intr function failed!\n");
2062                 goto free_irq_resource;
2063         }
2064
2065         if (hptiop_send_sync_msg(hba,
2066                         IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000)) {
2067                 device_printf(dev, "fail to start background task\n");
2068                 goto teartown_irq_resource;
2069         }
2070
2071         hba->ops->enable_intr(hba);
2072         hba->initialized = 1;
2073
2074         hba->ioctl_dev = make_dev(&hptiop_cdevsw, unit,
2075                                 UID_ROOT, GID_WHEEL /*GID_OPERATOR*/,
2076                                 S_IRUSR | S_IWUSR, "%s%d", driver_name, unit);
2077
2078
2079         return 0;
2080
2081
2082 teartown_irq_resource:
2083         bus_teardown_intr(dev, hba->irq_res, hba->irq_handle);
2084
2085 free_irq_resource:
2086         bus_release_resource(dev, SYS_RES_IRQ, 0, hba->irq_res);
2087
2088         hptiop_lock_adapter(hba);
2089 free_hba_path:
2090         xpt_free_path(hba->path);
2091
2092 deregister_xpt_bus:
2093         xpt_bus_deregister(cam_sim_path(hba->sim));
2094
2095 free_cam_sim:
2096         cam_sim_free(hba->sim, /*free devq*/ TRUE);
2097         hptiop_unlock_adapter(hba);
2098
2099 srb_dmamap_unload:
2100         if (hba->uncached_ptr)
2101                 bus_dmamap_unload(hba->srb_dmat, hba->srb_dmamap);
2102
2103 srb_dmamem_free:
2104         if (hba->uncached_ptr)
2105                 bus_dmamem_free(hba->srb_dmat,
2106                         hba->uncached_ptr, hba->srb_dmamap);
2107
2108 destroy_srb_dmat:
2109         if (hba->srb_dmat)
2110                 bus_dma_tag_destroy(hba->srb_dmat);
2111
2112 destroy_io_dmat:
2113         if (hba->io_dmat)
2114                 bus_dma_tag_destroy(hba->io_dmat);
2115
2116 get_config_failed:
2117         hba->ops->internal_memfree(hba);
2118
2119 destroy_parent_tag:
2120         if (hba->parent_dmat)
2121                 bus_dma_tag_destroy(hba->parent_dmat);
2122
2123 release_pci_res:
2124         if (hba->ops->release_pci_res)
2125                 hba->ops->release_pci_res(hba);
2126
2127         return ENXIO;
2128 }
2129
2130 static int hptiop_detach(device_t dev)
2131 {
2132         struct hpt_iop_hba * hba = (struct hpt_iop_hba *)device_get_softc(dev);
2133         int i;
2134         int error = EBUSY;
2135
2136         hptiop_lock_adapter(hba);
2137         for (i = 0; i < hba->max_devices; i++)
2138                 if (hptiop_os_query_remove_device(hba, i)) {
2139                         device_printf(dev, "%d file system is busy. id=%d",
2140                                                 hba->pciunit, i);
2141                         goto out;
2142                 }
2143
2144         if ((error = hptiop_shutdown(dev)) != 0)
2145                 goto out;
2146         if (hptiop_send_sync_msg(hba,
2147                 IOPMU_INBOUND_MSG0_STOP_BACKGROUND_TASK, 60000))
2148                 goto out;
2149         hptiop_unlock_adapter(hba);
2150
2151         hptiop_release_resource(hba);
2152         return (0);
2153 out:
2154         hptiop_unlock_adapter(hba);
2155         return error;
2156 }
2157
2158 static int hptiop_shutdown(device_t dev)
2159 {
2160         struct hpt_iop_hba * hba = (struct hpt_iop_hba *)device_get_softc(dev);
2161
2162         int error = 0;
2163
2164         if (hba->flag & HPT_IOCTL_FLAG_OPEN) {
2165                 device_printf(dev, "%d device is busy", hba->pciunit);
2166                 return EBUSY;
2167         }
2168
2169         hba->ops->disable_intr(hba);
2170
2171         if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_SHUTDOWN, 60000))
2172                 error = EBUSY;
2173
2174         return error;
2175 }
2176
2177 static void hptiop_pci_intr(void *arg)
2178 {
2179         struct hpt_iop_hba * hba = (struct hpt_iop_hba *)arg;
2180         hptiop_lock_adapter(hba);
2181         hba->ops->iop_intr(hba);
2182         hptiop_unlock_adapter(hba);
2183 }
2184
2185 static void hptiop_poll(struct cam_sim *sim)
2186 {
2187         struct hpt_iop_hba *hba;
2188
2189         hba = cam_sim_softc(sim);
2190         hba->ops->iop_intr(hba);
2191 }
2192
2193 static void hptiop_async(void * callback_arg, u_int32_t code,
2194                                         struct cam_path * path, void * arg)
2195 {
2196 }
2197
2198 static void hptiop_enable_intr_itl(struct hpt_iop_hba *hba)
2199 {
2200         BUS_SPACE_WRT4_ITL(outbound_intmask,
2201                 ~(IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0));
2202 }
2203
2204 static void hptiop_enable_intr_mv(struct hpt_iop_hba *hba)
2205 {
2206         u_int32_t int_mask;
2207
2208         int_mask = BUS_SPACE_RD4_MV0(outbound_intmask);
2209                         
2210         int_mask |= MVIOP_MU_OUTBOUND_INT_POSTQUEUE
2211                         | MVIOP_MU_OUTBOUND_INT_MSG;
2212         BUS_SPACE_WRT4_MV0(outbound_intmask,int_mask);
2213 }
2214
2215 static void hptiop_enable_intr_mvfrey(struct hpt_iop_hba *hba)
2216 {
2217         BUS_SPACE_WRT4_MVFREY2(f0_doorbell_enable, CPU_TO_F0_DRBL_MSG_A_BIT);
2218         BUS_SPACE_RD4_MVFREY2(f0_doorbell_enable);
2219
2220         BUS_SPACE_WRT4_MVFREY2(isr_enable, 0x1);
2221         BUS_SPACE_RD4_MVFREY2(isr_enable);
2222
2223         BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0x1010);
2224         BUS_SPACE_RD4_MVFREY2(pcie_f0_int_enable);
2225 }
2226
2227 static void hptiop_disable_intr_itl(struct hpt_iop_hba *hba)
2228 {
2229         u_int32_t int_mask;
2230
2231         int_mask = BUS_SPACE_RD4_ITL(outbound_intmask);
2232
2233         int_mask |= IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0;
2234         BUS_SPACE_WRT4_ITL(outbound_intmask, int_mask);
2235         BUS_SPACE_RD4_ITL(outbound_intstatus);
2236 }
2237
2238 static void hptiop_disable_intr_mv(struct hpt_iop_hba *hba)
2239 {
2240         u_int32_t int_mask;
2241         int_mask = BUS_SPACE_RD4_MV0(outbound_intmask);
2242         
2243         int_mask &= ~(MVIOP_MU_OUTBOUND_INT_MSG
2244                         | MVIOP_MU_OUTBOUND_INT_POSTQUEUE);
2245         BUS_SPACE_WRT4_MV0(outbound_intmask,int_mask);
2246         BUS_SPACE_RD4_MV0(outbound_intmask);
2247 }
2248
2249 static void hptiop_disable_intr_mvfrey(struct hpt_iop_hba *hba)
2250 {
2251         BUS_SPACE_WRT4_MVFREY2(f0_doorbell_enable, 0);
2252         BUS_SPACE_RD4_MVFREY2(f0_doorbell_enable);
2253
2254         BUS_SPACE_WRT4_MVFREY2(isr_enable, 0);
2255         BUS_SPACE_RD4_MVFREY2(isr_enable);
2256
2257         BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0);
2258         BUS_SPACE_RD4_MVFREY2(pcie_f0_int_enable);
2259 }
2260
2261 static void hptiop_reset_adapter(void *argv)
2262 {
2263         struct hpt_iop_hba * hba = (struct hpt_iop_hba *)argv;
2264         if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000))
2265                 return;
2266         hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000);
2267 }
2268
2269 static void *hptiop_get_srb(struct hpt_iop_hba * hba)
2270 {
2271         struct hpt_iop_srb * srb;
2272
2273         if (hba->srb_list) {
2274                 srb = hba->srb_list;
2275                 hba->srb_list = srb->next;
2276                 return srb;
2277         }
2278
2279         return NULL;
2280 }
2281
2282 static void hptiop_free_srb(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb)
2283 {
2284         srb->next = hba->srb_list;
2285         hba->srb_list = srb;
2286 }
2287
2288 static void hptiop_action(struct cam_sim *sim, union ccb *ccb)
2289 {
2290         struct hpt_iop_hba * hba = (struct hpt_iop_hba *)cam_sim_softc(sim);
2291         struct hpt_iop_srb * srb;
2292         int error;
2293
2294         switch (ccb->ccb_h.func_code) {
2295
2296         case XPT_SCSI_IO:
2297                 if (ccb->ccb_h.target_lun != 0 ||
2298                         ccb->ccb_h.target_id >= hba->max_devices ||
2299                         (ccb->ccb_h.flags & CAM_CDB_PHYS))
2300                 {
2301                         ccb->ccb_h.status = CAM_TID_INVALID;
2302                         xpt_done(ccb);
2303                         return;
2304                 }
2305
2306                 if ((srb = hptiop_get_srb(hba)) == NULL) {
2307                         device_printf(hba->pcidev, "srb allocated failed");
2308                         ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2309                         xpt_done(ccb);
2310                         return;
2311                 }
2312
2313                 srb->ccb = ccb;
2314                 error = bus_dmamap_load_ccb(hba->io_dmat,
2315                                             srb->dma_map,
2316                                             ccb,
2317                                             hptiop_post_scsi_command,
2318                                             srb,
2319                                             0);
2320
2321                 if (error && error != EINPROGRESS) {
2322                         device_printf(hba->pcidev,
2323                                 "%d bus_dmamap_load error %d",
2324                                 hba->pciunit, error);
2325                         xpt_freeze_simq(hba->sim, 1);
2326                         ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2327                         hptiop_free_srb(hba, srb);
2328                         xpt_done(ccb);
2329                         return;
2330                 }
2331
2332                 return;
2333
2334         case XPT_RESET_BUS:
2335                 device_printf(hba->pcidev, "reset adapter");
2336                 hba->msg_done = 0;
2337                 hptiop_reset_adapter(hba);
2338                 break;
2339
2340         case XPT_GET_TRAN_SETTINGS:
2341         case XPT_SET_TRAN_SETTINGS:
2342                 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
2343                 break;
2344
2345         case XPT_CALC_GEOMETRY:
2346                 cam_calc_geometry(&ccb->ccg, 1);
2347                 break;
2348
2349         case XPT_PATH_INQ:
2350         {
2351                 struct ccb_pathinq *cpi = &ccb->cpi;
2352
2353                 cpi->version_num = 1;
2354                 cpi->hba_inquiry = PI_SDTR_ABLE;
2355                 cpi->target_sprt = 0;
2356                 cpi->hba_misc = PIM_NOBUSRESET;
2357                 cpi->hba_eng_cnt = 0;
2358                 cpi->max_target = hba->max_devices;
2359                 cpi->max_lun = 0;
2360                 cpi->unit_number = cam_sim_unit(sim);
2361                 cpi->bus_id = cam_sim_bus(sim);
2362                 cpi->initiator_id = hba->max_devices;
2363                 cpi->base_transfer_speed = 3300;
2364
2365                 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
2366                 strlcpy(cpi->hba_vid, "HPT   ", HBA_IDLEN);
2367                 strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
2368                 cpi->transport = XPORT_SPI;
2369                 cpi->transport_version = 2;
2370                 cpi->protocol = PROTO_SCSI;
2371                 cpi->protocol_version = SCSI_REV_2;
2372                 cpi->ccb_h.status = CAM_REQ_CMP;
2373                 break;
2374         }
2375
2376         default:
2377                 ccb->ccb_h.status = CAM_REQ_INVALID;
2378                 break;
2379         }
2380
2381         xpt_done(ccb);
2382         return;
2383 }
2384
2385 static void hptiop_post_req_itl(struct hpt_iop_hba *hba,
2386                                 struct hpt_iop_srb *srb,
2387                                 bus_dma_segment_t *segs, int nsegs)
2388 {
2389         int idx;
2390         union ccb *ccb = srb->ccb;
2391         u_int8_t *cdb;
2392
2393         if (ccb->ccb_h.flags & CAM_CDB_POINTER)
2394                 cdb = ccb->csio.cdb_io.cdb_ptr;
2395         else
2396                 cdb = ccb->csio.cdb_io.cdb_bytes;
2397
2398         KdPrint(("ccb=%p %x-%x-%x\n",
2399                 ccb, *(u_int32_t *)cdb, *((u_int32_t *)cdb+1), *((u_int32_t *)cdb+2)));
2400
2401         if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS) {
2402                 u_int32_t iop_req32;
2403                 struct hpt_iop_request_scsi_command req;
2404
2405                 iop_req32 = BUS_SPACE_RD4_ITL(inbound_queue);
2406
2407                 if (iop_req32 == IOPMU_QUEUE_EMPTY) {
2408                         device_printf(hba->pcidev, "invalid req offset\n");
2409                         ccb->ccb_h.status = CAM_BUSY;
2410                         bus_dmamap_unload(hba->io_dmat, srb->dma_map);
2411                         hptiop_free_srb(hba, srb);
2412                         xpt_done(ccb);
2413                         return;
2414                 }
2415
2416                 if (ccb->csio.dxfer_len && nsegs > 0) {
2417                         struct hpt_iopsg *psg = req.sg_list;
2418                         for (idx = 0; idx < nsegs; idx++, psg++) {
2419                                 psg->pci_address = (u_int64_t)segs[idx].ds_addr;
2420                                 psg->size = segs[idx].ds_len;
2421                                 psg->eot = 0;
2422                         }
2423                         psg[-1].eot = 1;
2424                 }
2425
2426                 bcopy(cdb, req.cdb, ccb->csio.cdb_len);
2427
2428                 req.header.size =
2429                                 offsetof(struct hpt_iop_request_scsi_command, sg_list)
2430                                 + nsegs*sizeof(struct hpt_iopsg);
2431                 req.header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
2432                 req.header.flags = 0;
2433                 req.header.result = IOP_RESULT_PENDING;
2434                 req.header.context = (u_int64_t)(unsigned long)srb;
2435                 req.dataxfer_length = ccb->csio.dxfer_len;
2436                 req.channel =  0;
2437                 req.target =  ccb->ccb_h.target_id;
2438                 req.lun =  ccb->ccb_h.target_lun;
2439
2440                 bus_space_write_region_1(hba->bar0t, hba->bar0h, iop_req32,
2441                         (u_int8_t *)&req, req.header.size);
2442
2443                 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2444                         bus_dmamap_sync(hba->io_dmat,
2445                                 srb->dma_map, BUS_DMASYNC_PREREAD);
2446                 }
2447                 else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
2448                         bus_dmamap_sync(hba->io_dmat,
2449                                 srb->dma_map, BUS_DMASYNC_PREWRITE);
2450
2451                 BUS_SPACE_WRT4_ITL(inbound_queue,iop_req32);
2452         } else {
2453                 struct hpt_iop_request_scsi_command *req;
2454
2455                 req = (struct hpt_iop_request_scsi_command *)srb;
2456                 if (ccb->csio.dxfer_len && nsegs > 0) {
2457                         struct hpt_iopsg *psg = req->sg_list;
2458                         for (idx = 0; idx < nsegs; idx++, psg++) {
2459                                 psg->pci_address = 
2460                                         (u_int64_t)segs[idx].ds_addr;
2461                                 psg->size = segs[idx].ds_len;
2462                                 psg->eot = 0;
2463                         }
2464                         psg[-1].eot = 1;
2465                 }
2466
2467                 bcopy(cdb, req->cdb, ccb->csio.cdb_len);
2468
2469                 req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
2470                 req->header.result = IOP_RESULT_PENDING;
2471                 req->dataxfer_length = ccb->csio.dxfer_len;
2472                 req->channel =  0;
2473                 req->target =  ccb->ccb_h.target_id;
2474                 req->lun =  ccb->ccb_h.target_lun;
2475                 req->header.size =
2476                         offsetof(struct hpt_iop_request_scsi_command, sg_list)
2477                         + nsegs*sizeof(struct hpt_iopsg);
2478                 req->header.context = (u_int64_t)srb->index |
2479                                                 IOPMU_QUEUE_ADDR_HOST_BIT;
2480                 req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
2481
2482                 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2483                         bus_dmamap_sync(hba->io_dmat,
2484                                 srb->dma_map, BUS_DMASYNC_PREREAD);
2485                 }else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
2486                         bus_dmamap_sync(hba->io_dmat,
2487                                 srb->dma_map, BUS_DMASYNC_PREWRITE);
2488                 }
2489
2490                 if (hba->firmware_version > 0x01020000
2491                         || hba->interface_version > 0x01020000) {
2492                         u_int32_t size_bits;
2493
2494                         if (req->header.size < 256)
2495                                 size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT;
2496                         else if (req->header.size < 512)
2497                                 size_bits = IOPMU_QUEUE_ADDR_HOST_BIT;
2498                         else
2499                                 size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT
2500                                                 | IOPMU_QUEUE_ADDR_HOST_BIT;
2501
2502                         BUS_SPACE_WRT4_ITL(inbound_queue,
2503                                 (u_int32_t)srb->phy_addr | size_bits);
2504                 } else
2505                         BUS_SPACE_WRT4_ITL(inbound_queue, (u_int32_t)srb->phy_addr
2506                                 |IOPMU_QUEUE_ADDR_HOST_BIT);
2507         }
2508 }
2509
2510 static void hptiop_post_req_mv(struct hpt_iop_hba *hba,
2511                                 struct hpt_iop_srb *srb,
2512                                 bus_dma_segment_t *segs, int nsegs)
2513 {
2514         int idx, size;
2515         union ccb *ccb = srb->ccb;
2516         u_int8_t *cdb;
2517         struct hpt_iop_request_scsi_command *req;
2518         u_int64_t req_phy;
2519
2520         req = (struct hpt_iop_request_scsi_command *)srb;
2521         req_phy = srb->phy_addr;
2522
2523         if (ccb->csio.dxfer_len && nsegs > 0) {
2524                 struct hpt_iopsg *psg = req->sg_list;
2525                 for (idx = 0; idx < nsegs; idx++, psg++) {
2526                         psg->pci_address = (u_int64_t)segs[idx].ds_addr;
2527                         psg->size = segs[idx].ds_len;
2528                         psg->eot = 0;
2529                 }
2530                 psg[-1].eot = 1;
2531         }
2532         if (ccb->ccb_h.flags & CAM_CDB_POINTER)
2533                 cdb = ccb->csio.cdb_io.cdb_ptr;
2534         else
2535                 cdb = ccb->csio.cdb_io.cdb_bytes;
2536
2537         bcopy(cdb, req->cdb, ccb->csio.cdb_len);
2538         req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
2539         req->header.result = IOP_RESULT_PENDING;
2540         req->dataxfer_length = ccb->csio.dxfer_len;
2541         req->channel = 0;
2542         req->target =  ccb->ccb_h.target_id;
2543         req->lun =  ccb->ccb_h.target_lun;
2544         req->header.size = sizeof(struct hpt_iop_request_scsi_command)
2545                                 - sizeof(struct hpt_iopsg)
2546                                 + nsegs * sizeof(struct hpt_iopsg);
2547         if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2548                 bus_dmamap_sync(hba->io_dmat,
2549                         srb->dma_map, BUS_DMASYNC_PREREAD);
2550         }
2551         else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
2552                 bus_dmamap_sync(hba->io_dmat,
2553                         srb->dma_map, BUS_DMASYNC_PREWRITE);
2554         req->header.context = (u_int64_t)srb->index
2555                                         << MVIOP_REQUEST_NUMBER_START_BIT
2556                                         | MVIOP_CMD_TYPE_SCSI;
2557         req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
2558         size = req->header.size >> 8;
2559         hptiop_mv_inbound_write(req_phy
2560                         | MVIOP_MU_QUEUE_ADDR_HOST_BIT
2561                         | imin(3, size), hba);
2562 }
2563
2564 static void hptiop_post_req_mvfrey(struct hpt_iop_hba *hba,
2565                                 struct hpt_iop_srb *srb,
2566                                 bus_dma_segment_t *segs, int nsegs)
2567 {
2568         int idx, index;
2569         union ccb *ccb = srb->ccb;
2570         u_int8_t *cdb;
2571         struct hpt_iop_request_scsi_command *req;
2572         u_int64_t req_phy;
2573
2574         req = (struct hpt_iop_request_scsi_command *)srb;
2575         req_phy = srb->phy_addr;
2576
2577         if (ccb->csio.dxfer_len && nsegs > 0) {
2578                 struct hpt_iopsg *psg = req->sg_list;
2579                 for (idx = 0; idx < nsegs; idx++, psg++) {
2580                         psg->pci_address = (u_int64_t)segs[idx].ds_addr | 1;
2581                         psg->size = segs[idx].ds_len;
2582                         psg->eot = 0;
2583                 }
2584                 psg[-1].eot = 1;
2585         }
2586         if (ccb->ccb_h.flags & CAM_CDB_POINTER)
2587                 cdb = ccb->csio.cdb_io.cdb_ptr;
2588         else
2589                 cdb = ccb->csio.cdb_io.cdb_bytes;
2590
2591         bcopy(cdb, req->cdb, ccb->csio.cdb_len);
2592         req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
2593         req->header.result = IOP_RESULT_PENDING;
2594         req->dataxfer_length = ccb->csio.dxfer_len;
2595         req->channel = 0;
2596         req->target = ccb->ccb_h.target_id;
2597         req->lun = ccb->ccb_h.target_lun;
2598         req->header.size = sizeof(struct hpt_iop_request_scsi_command)
2599                                 - sizeof(struct hpt_iopsg)
2600                                 + nsegs * sizeof(struct hpt_iopsg);
2601         if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2602                 bus_dmamap_sync(hba->io_dmat,
2603                         srb->dma_map, BUS_DMASYNC_PREREAD);
2604         }
2605         else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
2606                 bus_dmamap_sync(hba->io_dmat,
2607                         srb->dma_map, BUS_DMASYNC_PREWRITE);
2608
2609         req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT
2610                                                 | IOP_REQUEST_FLAG_ADDR_BITS
2611                                                 | ((req_phy >> 16) & 0xffff0000);
2612         req->header.context = ((req_phy & 0xffffffff) << 32 )
2613                                                 | srb->index << 4
2614                                                 | IOPMU_QUEUE_ADDR_HOST_BIT | req->header.type;
2615
2616         hba->u.mvfrey.inlist_wptr++;
2617         index = hba->u.mvfrey.inlist_wptr & 0x3fff;
2618
2619         if (index == hba->u.mvfrey.list_count) {
2620                 index = 0;
2621                 hba->u.mvfrey.inlist_wptr &= ~0x3fff;
2622                 hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE;
2623         }
2624
2625         hba->u.mvfrey.inlist[index].addr = req_phy;
2626         hba->u.mvfrey.inlist[index].intrfc_len = (req->header.size + 3) / 4;
2627
2628         BUS_SPACE_WRT4_MVFREY2(inbound_write_ptr, hba->u.mvfrey.inlist_wptr);
2629         BUS_SPACE_RD4_MVFREY2(inbound_write_ptr);
2630
2631         if (req->header.type == IOP_REQUEST_TYPE_SCSI_COMMAND) {
2632                 callout_reset(&srb->timeout, 20 * hz, hptiop_reset_adapter, hba);
2633         }
2634 }
2635
2636 static void hptiop_post_scsi_command(void *arg, bus_dma_segment_t *segs,
2637                                         int nsegs, int error)
2638 {
2639         struct hpt_iop_srb *srb = (struct hpt_iop_srb *)arg;
2640         union ccb *ccb = srb->ccb;
2641         struct hpt_iop_hba *hba = srb->hba;
2642
2643         if (error || nsegs > hba->max_sg_count) {
2644                 KdPrint(("hptiop: func_code=%x tid=%x lun=%jx nsegs=%d\n",
2645                         ccb->ccb_h.func_code,
2646                         ccb->ccb_h.target_id,
2647                         (uintmax_t)ccb->ccb_h.target_lun, nsegs));
2648                 ccb->ccb_h.status = CAM_BUSY;
2649                 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
2650                 hptiop_free_srb(hba, srb);
2651                 xpt_done(ccb);
2652                 return;
2653         }
2654
2655         hba->ops->post_req(hba, srb, segs, nsegs);
2656 }
2657
2658 static void hptiop_mv_map_ctlcfg(void *arg, bus_dma_segment_t *segs,
2659                                 int nsegs, int error)
2660 {
2661         struct hpt_iop_hba *hba = (struct hpt_iop_hba *)arg;
2662         hba->ctlcfgcmd_phy = ((u_int64_t)segs->ds_addr + 0x1F) 
2663                                 & ~(u_int64_t)0x1F;
2664         hba->ctlcfg_ptr = (u_int8_t *)(((unsigned long)hba->ctlcfg_ptr + 0x1F)
2665                                 & ~0x1F);
2666 }
2667
2668 static void hptiop_mvfrey_map_ctlcfg(void *arg, bus_dma_segment_t *segs,
2669                                 int nsegs, int error)
2670 {
2671         struct hpt_iop_hba *hba = (struct hpt_iop_hba *)arg;
2672         char *p;
2673         u_int64_t phy;
2674         u_int32_t list_count = hba->u.mvfrey.list_count;
2675
2676         phy = ((u_int64_t)segs->ds_addr + 0x1F) 
2677                                 & ~(u_int64_t)0x1F;
2678         p = (u_int8_t *)(((unsigned long)hba->ctlcfg_ptr + 0x1F)
2679                                 & ~0x1F);
2680         
2681         hba->ctlcfgcmd_phy = phy;
2682         hba->ctlcfg_ptr = p;
2683
2684         p += 0x800;
2685         phy += 0x800;
2686
2687         hba->u.mvfrey.inlist = (struct mvfrey_inlist_entry *)p;
2688         hba->u.mvfrey.inlist_phy = phy;
2689
2690         p += list_count * sizeof(struct mvfrey_inlist_entry);
2691         phy += list_count * sizeof(struct mvfrey_inlist_entry);
2692
2693         hba->u.mvfrey.outlist = (struct mvfrey_outlist_entry *)p;
2694         hba->u.mvfrey.outlist_phy = phy;
2695
2696         p += list_count * sizeof(struct mvfrey_outlist_entry);
2697         phy += list_count * sizeof(struct mvfrey_outlist_entry);
2698
2699         hba->u.mvfrey.outlist_cptr = (u_int32_t *)p;
2700         hba->u.mvfrey.outlist_cptr_phy = phy;
2701 }
2702
2703 static void hptiop_map_srb(void *arg, bus_dma_segment_t *segs,
2704                                 int nsegs, int error)
2705 {
2706         struct hpt_iop_hba * hba = (struct hpt_iop_hba *)arg;
2707         bus_addr_t phy_addr = (segs->ds_addr + 0x1F) & ~(bus_addr_t)0x1F;
2708         struct hpt_iop_srb *srb, *tmp_srb;
2709         int i;
2710
2711         if (error || nsegs == 0) {
2712                 device_printf(hba->pcidev, "hptiop_map_srb error");
2713                 return;
2714         }
2715
2716         /* map srb */
2717         srb = (struct hpt_iop_srb *)
2718                 (((unsigned long)hba->uncached_ptr + 0x1F)
2719                 & ~(unsigned long)0x1F);
2720
2721         for (i = 0; i < HPT_SRB_MAX_QUEUE_SIZE; i++) {
2722                 tmp_srb = (struct hpt_iop_srb *)
2723                                         ((char *)srb + i * HPT_SRB_MAX_SIZE);
2724                 if (((unsigned long)tmp_srb & 0x1F) == 0) {
2725                         if (bus_dmamap_create(hba->io_dmat,
2726                                                 0, &tmp_srb->dma_map)) {
2727                                 device_printf(hba->pcidev, "dmamap create failed");
2728                                 return;
2729                         }
2730
2731                         bzero(tmp_srb, sizeof(struct hpt_iop_srb));
2732                         tmp_srb->hba = hba;
2733                         tmp_srb->index = i;
2734                         if (hba->ctlcfg_ptr == 0) {/*itl iop*/
2735                                 tmp_srb->phy_addr = (u_int64_t)(u_int32_t)
2736                                                         (phy_addr >> 5);
2737                                 if (phy_addr & IOPMU_MAX_MEM_SUPPORT_MASK_32G)
2738                                         tmp_srb->srb_flag =
2739                                                 HPT_SRB_FLAG_HIGH_MEM_ACESS;
2740                         } else {
2741                                 tmp_srb->phy_addr = phy_addr;
2742                         }
2743
2744                         callout_init_mtx(&tmp_srb->timeout, &hba->lock, 0);
2745                         hptiop_free_srb(hba, tmp_srb);
2746                         hba->srb[i] = tmp_srb;
2747                         phy_addr += HPT_SRB_MAX_SIZE;
2748                 }
2749                 else {
2750                         device_printf(hba->pcidev, "invalid alignment");
2751                         return;
2752                 }
2753         }
2754 }
2755
2756 static void hptiop_os_message_callback(struct hpt_iop_hba * hba, u_int32_t msg)
2757 {
2758         hba->msg_done = 1;
2759 }
2760
2761 static  int hptiop_os_query_remove_device(struct hpt_iop_hba * hba,
2762                                                 int target_id)
2763 {
2764         struct cam_periph       *periph = NULL;
2765         struct cam_path         *path;
2766         int                     status, retval = 0;
2767
2768         status = xpt_create_path(&path, NULL, hba->sim->path_id, target_id, 0);
2769
2770         if (status == CAM_REQ_CMP) {
2771                 if ((periph = cam_periph_find(path, "da")) != NULL) {
2772                         if (periph->refcount >= 1) {
2773                                 device_printf(hba->pcidev, "%d ,"
2774                                         "target_id=0x%x,"
2775                                         "refcount=%d",
2776                                     hba->pciunit, target_id, periph->refcount);
2777                                 retval = -1;
2778                         }
2779                 }
2780                 xpt_free_path(path);
2781         }
2782         return retval;
2783 }
2784
2785 static void hptiop_release_resource(struct hpt_iop_hba *hba)
2786 {
2787         int i;
2788
2789         if (hba->ioctl_dev)
2790                 destroy_dev(hba->ioctl_dev);
2791
2792         if (hba->path) {
2793                 struct ccb_setasync ccb;
2794
2795                 xpt_setup_ccb(&ccb.ccb_h, hba->path, /*priority*/5);
2796                 ccb.ccb_h.func_code = XPT_SASYNC_CB;
2797                 ccb.event_enable = 0;
2798                 ccb.callback = hptiop_async;
2799                 ccb.callback_arg = hba->sim;
2800                 xpt_action((union ccb *)&ccb);
2801                 xpt_free_path(hba->path);
2802         }
2803
2804         if (hba->irq_handle)
2805                 bus_teardown_intr(hba->pcidev, hba->irq_res, hba->irq_handle);
2806
2807         if (hba->sim) {
2808                 hptiop_lock_adapter(hba);
2809                 xpt_bus_deregister(cam_sim_path(hba->sim));
2810                 cam_sim_free(hba->sim, TRUE);
2811                 hptiop_unlock_adapter(hba);
2812         }
2813
2814         if (hba->ctlcfg_dmat) {
2815                 bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap);
2816                 bus_dmamem_free(hba->ctlcfg_dmat,
2817                                         hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
2818                 bus_dma_tag_destroy(hba->ctlcfg_dmat);
2819         }
2820
2821         for (i = 0; i < HPT_SRB_MAX_QUEUE_SIZE; i++) {
2822                 struct hpt_iop_srb *srb = hba->srb[i];
2823                 if (srb->dma_map)
2824                         bus_dmamap_destroy(hba->io_dmat, srb->dma_map);
2825                 callout_drain(&srb->timeout);
2826         }
2827
2828         if (hba->srb_dmat) {
2829                 bus_dmamap_unload(hba->srb_dmat, hba->srb_dmamap);
2830                 bus_dmamap_destroy(hba->srb_dmat, hba->srb_dmamap);
2831                 bus_dma_tag_destroy(hba->srb_dmat);
2832         }
2833
2834         if (hba->io_dmat)
2835                 bus_dma_tag_destroy(hba->io_dmat);
2836
2837         if (hba->parent_dmat)
2838                 bus_dma_tag_destroy(hba->parent_dmat);
2839
2840         if (hba->irq_res)
2841                 bus_release_resource(hba->pcidev, SYS_RES_IRQ,
2842                                         0, hba->irq_res);
2843
2844         if (hba->bar0_res)
2845                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
2846                                         hba->bar0_rid, hba->bar0_res);
2847         if (hba->bar2_res)
2848                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
2849                                         hba->bar2_rid, hba->bar2_res);
2850         mtx_destroy(&hba->lock);
2851 }