]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/hptiop/hptiop.c
sys/{x86,amd64}: remove one of doubled ;s
[FreeBSD/FreeBSD.git] / sys / dev / hptiop / hptiop.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * HighPoint RR3xxx/4xxx RAID Driver for FreeBSD
5  * Copyright (C) 2007-2012 HighPoint Technologies, Inc. All Rights Reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31
32 #include <sys/param.h>
33 #include <sys/types.h>
34 #include <sys/cons.h>
35 #include <sys/time.h>
36 #include <sys/systm.h>
37
38 #include <sys/stat.h>
39 #include <sys/malloc.h>
40 #include <sys/conf.h>
41 #include <sys/libkern.h>
42 #include <sys/kernel.h>
43
44 #include <sys/kthread.h>
45 #include <sys/mutex.h>
46 #include <sys/module.h>
47
48 #include <sys/eventhandler.h>
49 #include <sys/bus.h>
50 #include <sys/taskqueue.h>
51 #include <sys/ioccom.h>
52
53 #include <machine/resource.h>
54 #include <machine/bus.h>
55 #include <machine/stdarg.h>
56 #include <sys/rman.h>
57
58 #include <vm/vm.h>
59 #include <vm/pmap.h>
60
61 #include <dev/pci/pcireg.h>
62 #include <dev/pci/pcivar.h>
63
64
65 #include <cam/cam.h>
66 #include <cam/cam_ccb.h>
67 #include <cam/cam_sim.h>
68 #include <cam/cam_xpt_sim.h>
69 #include <cam/cam_debug.h>
70 #include <cam/cam_periph.h>
71 #include <cam/scsi/scsi_all.h>
72 #include <cam/scsi/scsi_message.h>
73
74
75 #include <dev/hptiop/hptiop.h>
76
77 static const char driver_name[] = "hptiop";
78 static const char driver_version[] = "v1.9";
79
80 static devclass_t hptiop_devclass;
81
82 static int hptiop_send_sync_msg(struct hpt_iop_hba *hba,
83                                 u_int32_t msg, u_int32_t millisec);
84 static void hptiop_request_callback_itl(struct hpt_iop_hba *hba,
85                                                         u_int32_t req);
86 static void hptiop_request_callback_mv(struct hpt_iop_hba *hba, u_int64_t req);
87 static void hptiop_request_callback_mvfrey(struct hpt_iop_hba *hba,
88                                                         u_int32_t req);
89 static void hptiop_os_message_callback(struct hpt_iop_hba *hba, u_int32_t msg);
90 static int  hptiop_do_ioctl_itl(struct hpt_iop_hba *hba,
91                                 struct hpt_iop_ioctl_param *pParams);
92 static int  hptiop_do_ioctl_mv(struct hpt_iop_hba *hba,
93                                 struct hpt_iop_ioctl_param *pParams);
94 static int  hptiop_do_ioctl_mvfrey(struct hpt_iop_hba *hba,
95                                 struct hpt_iop_ioctl_param *pParams);
96 static int  hptiop_rescan_bus(struct hpt_iop_hba *hba);
97 static int hptiop_alloc_pci_res_itl(struct hpt_iop_hba *hba);
98 static int hptiop_alloc_pci_res_mv(struct hpt_iop_hba *hba);
99 static int hptiop_alloc_pci_res_mvfrey(struct hpt_iop_hba *hba);
100 static int hptiop_get_config_itl(struct hpt_iop_hba *hba,
101                                 struct hpt_iop_request_get_config *config);
102 static int hptiop_get_config_mv(struct hpt_iop_hba *hba,
103                                 struct hpt_iop_request_get_config *config);
104 static int hptiop_get_config_mvfrey(struct hpt_iop_hba *hba,
105                                 struct hpt_iop_request_get_config *config);
106 static int hptiop_set_config_itl(struct hpt_iop_hba *hba,
107                                 struct hpt_iop_request_set_config *config);
108 static int hptiop_set_config_mv(struct hpt_iop_hba *hba,
109                                 struct hpt_iop_request_set_config *config);
110 static int hptiop_set_config_mvfrey(struct hpt_iop_hba *hba,
111                                 struct hpt_iop_request_set_config *config);
112 static int hptiop_internal_memalloc_mv(struct hpt_iop_hba *hba);
113 static int hptiop_internal_memalloc_mvfrey(struct hpt_iop_hba *hba);
114 static int hptiop_internal_memfree_itl(struct hpt_iop_hba *hba);
115 static int hptiop_internal_memfree_mv(struct hpt_iop_hba *hba);
116 static int hptiop_internal_memfree_mvfrey(struct hpt_iop_hba *hba);
117 static int  hptiop_post_ioctl_command_itl(struct hpt_iop_hba *hba,
118                         u_int32_t req32, struct hpt_iop_ioctl_param *pParams);
119 static int  hptiop_post_ioctl_command_mv(struct hpt_iop_hba *hba,
120                                 struct hpt_iop_request_ioctl_command *req,
121                                 struct hpt_iop_ioctl_param *pParams);
122 static int  hptiop_post_ioctl_command_mvfrey(struct hpt_iop_hba *hba,
123                                 struct hpt_iop_request_ioctl_command *req,
124                                 struct hpt_iop_ioctl_param *pParams);
125 static void hptiop_post_req_itl(struct hpt_iop_hba *hba,
126                                 struct hpt_iop_srb *srb,
127                                 bus_dma_segment_t *segs, int nsegs);
128 static void hptiop_post_req_mv(struct hpt_iop_hba *hba,
129                                 struct hpt_iop_srb *srb,
130                                 bus_dma_segment_t *segs, int nsegs);
131 static void hptiop_post_req_mvfrey(struct hpt_iop_hba *hba,
132                                 struct hpt_iop_srb *srb,
133                                 bus_dma_segment_t *segs, int nsegs);
134 static void hptiop_post_msg_itl(struct hpt_iop_hba *hba, u_int32_t msg);
135 static void hptiop_post_msg_mv(struct hpt_iop_hba *hba, u_int32_t msg);
136 static void hptiop_post_msg_mvfrey(struct hpt_iop_hba *hba, u_int32_t msg);
137 static void hptiop_enable_intr_itl(struct hpt_iop_hba *hba);
138 static void hptiop_enable_intr_mv(struct hpt_iop_hba *hba);
139 static void hptiop_enable_intr_mvfrey(struct hpt_iop_hba *hba);
140 static void hptiop_disable_intr_itl(struct hpt_iop_hba *hba);
141 static void hptiop_disable_intr_mv(struct hpt_iop_hba *hba);
142 static void hptiop_disable_intr_mvfrey(struct hpt_iop_hba *hba);
143 static void hptiop_free_srb(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb);
144 static int  hptiop_os_query_remove_device(struct hpt_iop_hba *hba, int tid);
145 static int  hptiop_probe(device_t dev);
146 static int  hptiop_attach(device_t dev);
147 static int  hptiop_detach(device_t dev);
148 static int  hptiop_shutdown(device_t dev);
149 static void hptiop_action(struct cam_sim *sim, union ccb *ccb);
150 static void hptiop_poll(struct cam_sim *sim);
151 static void hptiop_async(void *callback_arg, u_int32_t code,
152                                         struct cam_path *path, void *arg);
153 static void hptiop_pci_intr(void *arg);
154 static void hptiop_release_resource(struct hpt_iop_hba *hba);
155 static void hptiop_reset_adapter(void *argv);
156 static d_open_t hptiop_open;
157 static d_close_t hptiop_close;
158 static d_ioctl_t hptiop_ioctl;
159
160 static struct cdevsw hptiop_cdevsw = {
161         .d_open = hptiop_open,
162         .d_close = hptiop_close,
163         .d_ioctl = hptiop_ioctl,
164         .d_name = driver_name,
165         .d_version = D_VERSION,
166 };
167
168 #define hba_from_dev(dev) \
169         ((struct hpt_iop_hba *)devclass_get_softc(hptiop_devclass, dev2unit(dev)))
170
171 #define BUS_SPACE_WRT4_ITL(offset, value) bus_space_write_4(hba->bar0t,\
172                 hba->bar0h, offsetof(struct hpt_iopmu_itl, offset), (value))
173 #define BUS_SPACE_RD4_ITL(offset) bus_space_read_4(hba->bar0t,\
174                 hba->bar0h, offsetof(struct hpt_iopmu_itl, offset))
175
176 #define BUS_SPACE_WRT4_MV0(offset, value) bus_space_write_4(hba->bar0t,\
177                 hba->bar0h, offsetof(struct hpt_iopmv_regs, offset), value)
178 #define BUS_SPACE_RD4_MV0(offset) bus_space_read_4(hba->bar0t,\
179                 hba->bar0h, offsetof(struct hpt_iopmv_regs, offset))
180 #define BUS_SPACE_WRT4_MV2(offset, value) bus_space_write_4(hba->bar2t,\
181                 hba->bar2h, offsetof(struct hpt_iopmu_mv, offset), value)
182 #define BUS_SPACE_RD4_MV2(offset) bus_space_read_4(hba->bar2t,\
183                 hba->bar2h, offsetof(struct hpt_iopmu_mv, offset))
184
185 #define BUS_SPACE_WRT4_MVFREY2(offset, value) bus_space_write_4(hba->bar2t,\
186                 hba->bar2h, offsetof(struct hpt_iopmu_mvfrey, offset), value)
187 #define BUS_SPACE_RD4_MVFREY2(offset) bus_space_read_4(hba->bar2t,\
188                 hba->bar2h, offsetof(struct hpt_iopmu_mvfrey, offset))
189
190 static int hptiop_open(ioctl_dev_t dev, int flags,
191                                         int devtype, ioctl_thread_t proc)
192 {
193         struct hpt_iop_hba *hba = hba_from_dev(dev);
194
195         if (hba==NULL)
196                 return ENXIO;
197         if (hba->flag & HPT_IOCTL_FLAG_OPEN)
198                 return EBUSY;
199         hba->flag |= HPT_IOCTL_FLAG_OPEN;
200         return 0;
201 }
202
203 static int hptiop_close(ioctl_dev_t dev, int flags,
204                                         int devtype, ioctl_thread_t proc)
205 {
206         struct hpt_iop_hba *hba = hba_from_dev(dev);
207         hba->flag &= ~(u_int32_t)HPT_IOCTL_FLAG_OPEN;
208         return 0;
209 }
210
211 static int hptiop_ioctl(ioctl_dev_t dev, u_long cmd, caddr_t data,
212                                         int flags, ioctl_thread_t proc)
213 {
214         int ret = EFAULT;
215         struct hpt_iop_hba *hba = hba_from_dev(dev);
216
217         mtx_lock(&Giant);
218
219         switch (cmd) {
220         case HPT_DO_IOCONTROL:
221                 ret = hba->ops->do_ioctl(hba,
222                                 (struct hpt_iop_ioctl_param *)data);
223                 break;
224         case HPT_SCAN_BUS:
225                 ret = hptiop_rescan_bus(hba);
226                 break;
227         }
228
229         mtx_unlock(&Giant);
230
231         return ret;
232 }
233
234 static u_int64_t hptiop_mv_outbound_read(struct hpt_iop_hba *hba)
235 {
236         u_int64_t p;
237         u_int32_t outbound_tail = BUS_SPACE_RD4_MV2(outbound_tail);
238         u_int32_t outbound_head = BUS_SPACE_RD4_MV2(outbound_head);
239
240         if (outbound_tail != outbound_head) {
241                 bus_space_read_region_4(hba->bar2t, hba->bar2h,
242                         offsetof(struct hpt_iopmu_mv,
243                                 outbound_q[outbound_tail]),
244                         (u_int32_t *)&p, 2);
245
246                 outbound_tail++;
247
248                 if (outbound_tail == MVIOP_QUEUE_LEN)
249                         outbound_tail = 0;
250
251                 BUS_SPACE_WRT4_MV2(outbound_tail, outbound_tail);
252                 return p;
253         } else
254                 return 0;
255 }
256
257 static void hptiop_mv_inbound_write(u_int64_t p, struct hpt_iop_hba *hba)
258 {
259         u_int32_t inbound_head = BUS_SPACE_RD4_MV2(inbound_head);
260         u_int32_t head = inbound_head + 1;
261
262         if (head == MVIOP_QUEUE_LEN)
263                 head = 0;
264
265         bus_space_write_region_4(hba->bar2t, hba->bar2h,
266                         offsetof(struct hpt_iopmu_mv, inbound_q[inbound_head]),
267                         (u_int32_t *)&p, 2);
268         BUS_SPACE_WRT4_MV2(inbound_head, head);
269         BUS_SPACE_WRT4_MV0(inbound_doorbell, MVIOP_MU_INBOUND_INT_POSTQUEUE);
270 }
271
272 static void hptiop_post_msg_itl(struct hpt_iop_hba *hba, u_int32_t msg)
273 {
274         BUS_SPACE_WRT4_ITL(inbound_msgaddr0, msg);
275         BUS_SPACE_RD4_ITL(outbound_intstatus);
276 }
277
278 static void hptiop_post_msg_mv(struct hpt_iop_hba *hba, u_int32_t msg)
279 {
280
281         BUS_SPACE_WRT4_MV2(inbound_msg, msg);
282         BUS_SPACE_WRT4_MV0(inbound_doorbell, MVIOP_MU_INBOUND_INT_MSG);
283
284         BUS_SPACE_RD4_MV0(outbound_intmask);
285 }
286
287 static void hptiop_post_msg_mvfrey(struct hpt_iop_hba *hba, u_int32_t msg)
288 {
289         BUS_SPACE_WRT4_MVFREY2(f0_to_cpu_msg_a, msg);
290         BUS_SPACE_RD4_MVFREY2(f0_to_cpu_msg_a);
291 }
292
293 static int hptiop_wait_ready_itl(struct hpt_iop_hba * hba, u_int32_t millisec)
294 {
295         u_int32_t req=0;
296         int i;
297
298         for (i = 0; i < millisec; i++) {
299                 req = BUS_SPACE_RD4_ITL(inbound_queue);
300                 if (req != IOPMU_QUEUE_EMPTY)
301                         break;
302                 DELAY(1000);
303         }
304
305         if (req!=IOPMU_QUEUE_EMPTY) {
306                 BUS_SPACE_WRT4_ITL(outbound_queue, req);
307                 BUS_SPACE_RD4_ITL(outbound_intstatus);
308                 return 0;
309         }
310
311         return -1;
312 }
313
314 static int hptiop_wait_ready_mv(struct hpt_iop_hba * hba, u_int32_t millisec)
315 {
316         if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec))
317                 return -1;
318
319         return 0;
320 }
321
322 static int hptiop_wait_ready_mvfrey(struct hpt_iop_hba * hba,
323                                                         u_int32_t millisec)
324 {
325         if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec))
326                 return -1;
327
328         return 0;
329 }
330
331 static void hptiop_request_callback_itl(struct hpt_iop_hba * hba,
332                                                         u_int32_t index)
333 {
334         struct hpt_iop_srb *srb;
335         struct hpt_iop_request_scsi_command *req=NULL;
336         union ccb *ccb;
337         u_int8_t *cdb;
338         u_int32_t result, temp, dxfer;
339         u_int64_t temp64;
340
341         if (index & IOPMU_QUEUE_MASK_HOST_BITS) { /*host req*/
342                 if (hba->firmware_version > 0x01020000 ||
343                         hba->interface_version > 0x01020000) {
344                         srb = hba->srb[index & ~(u_int32_t)
345                                 (IOPMU_QUEUE_ADDR_HOST_BIT
346                                 | IOPMU_QUEUE_REQUEST_RESULT_BIT)];
347                         req = (struct hpt_iop_request_scsi_command *)srb;
348                         if (index & IOPMU_QUEUE_REQUEST_RESULT_BIT)
349                                 result = IOP_RESULT_SUCCESS;
350                         else
351                                 result = req->header.result;
352                 } else {
353                         srb = hba->srb[index &
354                                 ~(u_int32_t)IOPMU_QUEUE_ADDR_HOST_BIT];
355                         req = (struct hpt_iop_request_scsi_command *)srb;
356                         result = req->header.result;
357                 }
358                 dxfer = req->dataxfer_length;
359                 goto srb_complete;
360         }
361
362         /*iop req*/
363         temp = bus_space_read_4(hba->bar0t, hba->bar0h, index +
364                 offsetof(struct hpt_iop_request_header, type));
365         result = bus_space_read_4(hba->bar0t, hba->bar0h, index +
366                 offsetof(struct hpt_iop_request_header, result));
367         switch(temp) {
368         case IOP_REQUEST_TYPE_IOCTL_COMMAND:
369         {
370                 temp64 = 0;
371                 bus_space_write_region_4(hba->bar0t, hba->bar0h, index +
372                         offsetof(struct hpt_iop_request_header, context),
373                         (u_int32_t *)&temp64, 2);
374                 wakeup((void *)((unsigned long)hba->u.itl.mu + index));
375                 break;
376         }
377
378         case IOP_REQUEST_TYPE_SCSI_COMMAND:
379                 bus_space_read_region_4(hba->bar0t, hba->bar0h, index +
380                         offsetof(struct hpt_iop_request_header, context),
381                         (u_int32_t *)&temp64, 2);
382                 srb = (struct hpt_iop_srb *)(unsigned long)temp64;
383                 dxfer = bus_space_read_4(hba->bar0t, hba->bar0h, 
384                                 index + offsetof(struct hpt_iop_request_scsi_command,
385                                 dataxfer_length));      
386 srb_complete:
387                 ccb = (union ccb *)srb->ccb;
388                 if (ccb->ccb_h.flags & CAM_CDB_POINTER)
389                         cdb = ccb->csio.cdb_io.cdb_ptr;
390                 else
391                         cdb = ccb->csio.cdb_io.cdb_bytes;
392
393                 if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */
394                         ccb->ccb_h.status = CAM_REQ_CMP;
395                         goto scsi_done;
396                 }
397
398                 switch (result) {
399                 case IOP_RESULT_SUCCESS:
400                         switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
401                         case CAM_DIR_IN:
402                                 bus_dmamap_sync(hba->io_dmat,
403                                         srb->dma_map, BUS_DMASYNC_POSTREAD);
404                                 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
405                                 break;
406                         case CAM_DIR_OUT:
407                                 bus_dmamap_sync(hba->io_dmat,
408                                         srb->dma_map, BUS_DMASYNC_POSTWRITE);
409                                 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
410                                 break;
411                         }
412
413                         ccb->ccb_h.status = CAM_REQ_CMP;
414                         break;
415
416                 case IOP_RESULT_BAD_TARGET:
417                         ccb->ccb_h.status = CAM_DEV_NOT_THERE;
418                         break;
419                 case IOP_RESULT_BUSY:
420                         ccb->ccb_h.status = CAM_BUSY;
421                         break;
422                 case IOP_RESULT_INVALID_REQUEST:
423                         ccb->ccb_h.status = CAM_REQ_INVALID;
424                         break;
425                 case IOP_RESULT_FAIL:
426                         ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
427                         break;
428                 case IOP_RESULT_RESET:
429                         ccb->ccb_h.status = CAM_BUSY;
430                         break;
431                 case IOP_RESULT_CHECK_CONDITION:
432                         memset(&ccb->csio.sense_data, 0,
433                             sizeof(ccb->csio.sense_data));
434                         if (dxfer < ccb->csio.sense_len)
435                                 ccb->csio.sense_resid = ccb->csio.sense_len -
436                                     dxfer;
437                         else
438                                 ccb->csio.sense_resid = 0;
439                         if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS) {/*iop*/
440                                 bus_space_read_region_1(hba->bar0t, hba->bar0h,
441                                         index + offsetof(struct hpt_iop_request_scsi_command,
442                                         sg_list), (u_int8_t *)&ccb->csio.sense_data, 
443                                         MIN(dxfer, sizeof(ccb->csio.sense_data)));
444                         } else {
445                                 memcpy(&ccb->csio.sense_data, &req->sg_list, 
446                                         MIN(dxfer, sizeof(ccb->csio.sense_data)));
447                         }
448                         ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
449                         ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
450                         ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
451                         break;
452                 default:
453                         ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
454                         break;
455                 }
456 scsi_done:
457                 if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS)
458                         BUS_SPACE_WRT4_ITL(outbound_queue, index);
459
460                 ccb->csio.resid = ccb->csio.dxfer_len - dxfer;
461
462                 hptiop_free_srb(hba, srb);
463                 xpt_done(ccb);
464                 break;
465         }
466 }
467
468 static void hptiop_drain_outbound_queue_itl(struct hpt_iop_hba *hba)
469 {
470         u_int32_t req, temp;
471
472         while ((req = BUS_SPACE_RD4_ITL(outbound_queue)) !=IOPMU_QUEUE_EMPTY) {
473                 if (req & IOPMU_QUEUE_MASK_HOST_BITS)
474                         hptiop_request_callback_itl(hba, req);
475                 else {
476                         struct hpt_iop_request_header *p;
477
478                         p = (struct hpt_iop_request_header *)
479                                 ((char *)hba->u.itl.mu + req);
480                         temp = bus_space_read_4(hba->bar0t,
481                                         hba->bar0h,req +
482                                         offsetof(struct hpt_iop_request_header,
483                                                 flags));
484                         if (temp & IOP_REQUEST_FLAG_SYNC_REQUEST) {
485                                 u_int64_t temp64;
486                                 bus_space_read_region_4(hba->bar0t,
487                                         hba->bar0h,req +
488                                         offsetof(struct hpt_iop_request_header,
489                                                 context),
490                                         (u_int32_t *)&temp64, 2);
491                                 if (temp64) {
492                                         hptiop_request_callback_itl(hba, req);
493                                 } else {
494                                         temp64 = 1;
495                                         bus_space_write_region_4(hba->bar0t,
496                                                 hba->bar0h,req +
497                                                 offsetof(struct hpt_iop_request_header,
498                                                         context),
499                                                 (u_int32_t *)&temp64, 2);
500                                 }
501                         } else
502                                 hptiop_request_callback_itl(hba, req);
503                 }
504         }
505 }
506
507 static int hptiop_intr_itl(struct hpt_iop_hba * hba)
508 {
509         u_int32_t status;
510         int ret = 0;
511
512         status = BUS_SPACE_RD4_ITL(outbound_intstatus);
513
514         if (status & IOPMU_OUTBOUND_INT_MSG0) {
515                 u_int32_t msg = BUS_SPACE_RD4_ITL(outbound_msgaddr0);
516                 KdPrint(("hptiop: received outbound msg %x\n", msg));
517                 BUS_SPACE_WRT4_ITL(outbound_intstatus, IOPMU_OUTBOUND_INT_MSG0);
518                 hptiop_os_message_callback(hba, msg);
519                 ret = 1;
520         }
521
522         if (status & IOPMU_OUTBOUND_INT_POSTQUEUE) {
523                 hptiop_drain_outbound_queue_itl(hba);
524                 ret = 1;
525         }
526
527         return ret;
528 }
529
530 static void hptiop_request_callback_mv(struct hpt_iop_hba * hba,
531                                                         u_int64_t _tag)
532 {
533         u_int32_t context = (u_int32_t)_tag;
534
535         if (context & MVIOP_CMD_TYPE_SCSI) {
536                 struct hpt_iop_srb *srb;
537                 struct hpt_iop_request_scsi_command *req;
538                 union ccb *ccb;
539                 u_int8_t *cdb;
540
541                 srb = hba->srb[context >> MVIOP_REQUEST_NUMBER_START_BIT];
542                 req = (struct hpt_iop_request_scsi_command *)srb;
543                 ccb = (union ccb *)srb->ccb;
544                 if (ccb->ccb_h.flags & CAM_CDB_POINTER)
545                         cdb = ccb->csio.cdb_io.cdb_ptr;
546                 else
547                         cdb = ccb->csio.cdb_io.cdb_bytes;
548
549                 if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */
550                         ccb->ccb_h.status = CAM_REQ_CMP;
551                         goto scsi_done;
552                 }
553                 if (context & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT)
554                         req->header.result = IOP_RESULT_SUCCESS;
555
556                 switch (req->header.result) {
557                 case IOP_RESULT_SUCCESS:
558                         switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
559                         case CAM_DIR_IN:
560                                 bus_dmamap_sync(hba->io_dmat,
561                                         srb->dma_map, BUS_DMASYNC_POSTREAD);
562                                 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
563                                 break;
564                         case CAM_DIR_OUT:
565                                 bus_dmamap_sync(hba->io_dmat,
566                                         srb->dma_map, BUS_DMASYNC_POSTWRITE);
567                                 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
568                                 break;
569                         }
570                         ccb->ccb_h.status = CAM_REQ_CMP;
571                         break;
572                 case IOP_RESULT_BAD_TARGET:
573                         ccb->ccb_h.status = CAM_DEV_NOT_THERE;
574                         break;
575                 case IOP_RESULT_BUSY:
576                         ccb->ccb_h.status = CAM_BUSY;
577                         break;
578                 case IOP_RESULT_INVALID_REQUEST:
579                         ccb->ccb_h.status = CAM_REQ_INVALID;
580                         break;
581                 case IOP_RESULT_FAIL:
582                         ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
583                         break;
584                 case IOP_RESULT_RESET:
585                         ccb->ccb_h.status = CAM_BUSY;
586                         break;
587                 case IOP_RESULT_CHECK_CONDITION:
588                         memset(&ccb->csio.sense_data, 0,
589                             sizeof(ccb->csio.sense_data));
590                         if (req->dataxfer_length < ccb->csio.sense_len)
591                                 ccb->csio.sense_resid = ccb->csio.sense_len -
592                                     req->dataxfer_length;
593                         else
594                                 ccb->csio.sense_resid = 0;
595                         memcpy(&ccb->csio.sense_data, &req->sg_list, 
596                                 MIN(req->dataxfer_length, sizeof(ccb->csio.sense_data)));
597                         ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
598                         ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
599                         ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
600                         break;
601                 default:
602                         ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
603                         break;
604                 }
605 scsi_done:
606                 ccb->csio.resid = ccb->csio.dxfer_len - req->dataxfer_length;
607                 
608                 hptiop_free_srb(hba, srb);
609                 xpt_done(ccb);
610         } else if (context & MVIOP_CMD_TYPE_IOCTL) {
611                 struct hpt_iop_request_ioctl_command *req = hba->ctlcfg_ptr;
612                 if (context & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT)
613                         hba->config_done = 1;
614                 else
615                         hba->config_done = -1;
616                 wakeup(req);
617         } else if (context &
618                         (MVIOP_CMD_TYPE_SET_CONFIG |
619                                 MVIOP_CMD_TYPE_GET_CONFIG))
620                 hba->config_done = 1;
621         else {
622                 device_printf(hba->pcidev, "wrong callback type\n");
623         }
624 }
625
626 static void hptiop_request_callback_mvfrey(struct hpt_iop_hba * hba,
627                                 u_int32_t _tag)
628 {
629         u_int32_t req_type = _tag & 0xf;
630
631         struct hpt_iop_srb *srb;
632         struct hpt_iop_request_scsi_command *req;
633         union ccb *ccb;
634         u_int8_t *cdb;
635
636         switch (req_type) {
637         case IOP_REQUEST_TYPE_GET_CONFIG:
638         case IOP_REQUEST_TYPE_SET_CONFIG:
639                 hba->config_done = 1;
640                 break;
641
642         case IOP_REQUEST_TYPE_SCSI_COMMAND:
643                 srb = hba->srb[(_tag >> 4) & 0xff];
644                 req = (struct hpt_iop_request_scsi_command *)srb;
645
646                 ccb = (union ccb *)srb->ccb;
647
648                 callout_stop(&srb->timeout);
649
650                 if (ccb->ccb_h.flags & CAM_CDB_POINTER)
651                         cdb = ccb->csio.cdb_io.cdb_ptr;
652                 else
653                         cdb = ccb->csio.cdb_io.cdb_bytes;
654
655                 if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */
656                         ccb->ccb_h.status = CAM_REQ_CMP;
657                         goto scsi_done;
658                 }
659
660                 if (_tag & MVFREYIOPMU_QUEUE_REQUEST_RESULT_BIT)
661                         req->header.result = IOP_RESULT_SUCCESS;
662
663                 switch (req->header.result) {
664                 case IOP_RESULT_SUCCESS:
665                         switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
666                         case CAM_DIR_IN:
667                                 bus_dmamap_sync(hba->io_dmat,
668                                                 srb->dma_map, BUS_DMASYNC_POSTREAD);
669                                 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
670                                 break;
671                         case CAM_DIR_OUT:
672                                 bus_dmamap_sync(hba->io_dmat,
673                                                 srb->dma_map, BUS_DMASYNC_POSTWRITE);
674                                 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
675                                 break;
676                         }
677                         ccb->ccb_h.status = CAM_REQ_CMP;
678                         break;
679                 case IOP_RESULT_BAD_TARGET:
680                         ccb->ccb_h.status = CAM_DEV_NOT_THERE;
681                         break;
682                 case IOP_RESULT_BUSY:
683                         ccb->ccb_h.status = CAM_BUSY;
684                         break;
685                 case IOP_RESULT_INVALID_REQUEST:
686                         ccb->ccb_h.status = CAM_REQ_INVALID;
687                         break;
688                 case IOP_RESULT_FAIL:
689                         ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
690                         break;
691                 case IOP_RESULT_RESET:
692                         ccb->ccb_h.status = CAM_BUSY;
693                         break;
694                 case IOP_RESULT_CHECK_CONDITION:
695                         memset(&ccb->csio.sense_data, 0,
696                                sizeof(ccb->csio.sense_data));
697                         if (req->dataxfer_length < ccb->csio.sense_len)
698                                 ccb->csio.sense_resid = ccb->csio.sense_len -
699                                 req->dataxfer_length;
700                         else
701                                 ccb->csio.sense_resid = 0;
702                         memcpy(&ccb->csio.sense_data, &req->sg_list, 
703                                MIN(req->dataxfer_length, sizeof(ccb->csio.sense_data)));
704                         ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
705                         ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
706                         ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
707                         break;
708                 default:
709                         ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
710                         break;
711                 }
712 scsi_done:
713                 ccb->csio.resid = ccb->csio.dxfer_len - req->dataxfer_length;
714                 
715                 hptiop_free_srb(hba, srb);
716                 xpt_done(ccb);
717                 break;
718         case IOP_REQUEST_TYPE_IOCTL_COMMAND:
719                 if (_tag & MVFREYIOPMU_QUEUE_REQUEST_RESULT_BIT)
720                         hba->config_done = 1;
721                 else
722                         hba->config_done = -1;
723                 wakeup((struct hpt_iop_request_ioctl_command *)hba->ctlcfg_ptr);
724                 break;
725         default:
726                 device_printf(hba->pcidev, "wrong callback type\n");
727                 break;
728         }
729 }
730
731 static void hptiop_drain_outbound_queue_mv(struct hpt_iop_hba * hba)
732 {
733         u_int64_t req;
734
735         while ((req = hptiop_mv_outbound_read(hba))) {
736                 if (req & MVIOP_MU_QUEUE_ADDR_HOST_BIT) {
737                         if (req & MVIOP_MU_QUEUE_REQUEST_RETURN_CONTEXT) {
738                                 hptiop_request_callback_mv(hba, req);
739                         }
740                 }
741         }
742 }
743
744 static int hptiop_intr_mv(struct hpt_iop_hba * hba)
745 {
746         u_int32_t status;
747         int ret = 0;
748
749         status = BUS_SPACE_RD4_MV0(outbound_doorbell);
750
751         if (status)
752                 BUS_SPACE_WRT4_MV0(outbound_doorbell, ~status);
753
754         if (status & MVIOP_MU_OUTBOUND_INT_MSG) {
755                 u_int32_t msg = BUS_SPACE_RD4_MV2(outbound_msg);
756                 KdPrint(("hptiop: received outbound msg %x\n", msg));
757                 hptiop_os_message_callback(hba, msg);
758                 ret = 1;
759         }
760
761         if (status & MVIOP_MU_OUTBOUND_INT_POSTQUEUE) {
762                 hptiop_drain_outbound_queue_mv(hba);
763                 ret = 1;
764         }
765
766         return ret;
767 }
768
769 static int hptiop_intr_mvfrey(struct hpt_iop_hba * hba)
770 {
771         u_int32_t status, _tag, cptr;
772         int ret = 0;
773
774         if (hba->initialized) {
775                 BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0);
776         }
777
778         status = BUS_SPACE_RD4_MVFREY2(f0_doorbell);
779         if (status) {
780                 BUS_SPACE_WRT4_MVFREY2(f0_doorbell, status);
781                 if (status & CPU_TO_F0_DRBL_MSG_A_BIT) {
782                         u_int32_t msg = BUS_SPACE_RD4_MVFREY2(cpu_to_f0_msg_a);
783                         hptiop_os_message_callback(hba, msg);
784                 }
785                 ret = 1;
786         }
787
788         status = BUS_SPACE_RD4_MVFREY2(isr_cause);
789         if (status) {
790                 BUS_SPACE_WRT4_MVFREY2(isr_cause, status);
791                 do {
792                         cptr = *hba->u.mvfrey.outlist_cptr & 0xff;
793                         while (hba->u.mvfrey.outlist_rptr != cptr) {
794                                 hba->u.mvfrey.outlist_rptr++;
795                                 if (hba->u.mvfrey.outlist_rptr == hba->u.mvfrey.list_count) {
796                                         hba->u.mvfrey.outlist_rptr = 0;
797                                 }
798         
799                                 _tag = hba->u.mvfrey.outlist[hba->u.mvfrey.outlist_rptr].val;
800                                 hptiop_request_callback_mvfrey(hba, _tag);
801                                 ret = 2;
802                         }
803                 } while (cptr != (*hba->u.mvfrey.outlist_cptr & 0xff));
804         }
805
806         if (hba->initialized) {
807                 BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0x1010);
808         }
809
810         return ret;
811 }
812
813 static int hptiop_send_sync_request_itl(struct hpt_iop_hba * hba,
814                                         u_int32_t req32, u_int32_t millisec)
815 {
816         u_int32_t i;
817         u_int64_t temp64;
818
819         BUS_SPACE_WRT4_ITL(inbound_queue, req32);
820         BUS_SPACE_RD4_ITL(outbound_intstatus);
821
822         for (i = 0; i < millisec; i++) {
823                 hptiop_intr_itl(hba);
824                 bus_space_read_region_4(hba->bar0t, hba->bar0h, req32 +
825                         offsetof(struct hpt_iop_request_header, context),
826                         (u_int32_t *)&temp64, 2);
827                 if (temp64)
828                         return 0;
829                 DELAY(1000);
830         }
831
832         return -1;
833 }
834
835 static int hptiop_send_sync_request_mv(struct hpt_iop_hba *hba,
836                                         void *req, u_int32_t millisec)
837 {
838         u_int32_t i;
839         u_int64_t phy_addr;
840         hba->config_done = 0;
841
842         phy_addr = hba->ctlcfgcmd_phy |
843                         (u_int64_t)MVIOP_MU_QUEUE_ADDR_HOST_BIT;
844         ((struct hpt_iop_request_get_config *)req)->header.flags |=
845                 IOP_REQUEST_FLAG_SYNC_REQUEST |
846                 IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
847         hptiop_mv_inbound_write(phy_addr, hba);
848         BUS_SPACE_RD4_MV0(outbound_intmask);
849
850         for (i = 0; i < millisec; i++) {
851                 hptiop_intr_mv(hba);
852                 if (hba->config_done)
853                         return 0;
854                 DELAY(1000);
855         }
856         return -1;
857 }
858
859 static int hptiop_send_sync_request_mvfrey(struct hpt_iop_hba *hba,
860                                         void *req, u_int32_t millisec)
861 {
862         u_int32_t i, index;
863         u_int64_t phy_addr;
864         struct hpt_iop_request_header *reqhdr =
865                                                                                 (struct hpt_iop_request_header *)req;
866         
867         hba->config_done = 0;
868
869         phy_addr = hba->ctlcfgcmd_phy;
870         reqhdr->flags = IOP_REQUEST_FLAG_SYNC_REQUEST
871                                         | IOP_REQUEST_FLAG_OUTPUT_CONTEXT
872                                         | IOP_REQUEST_FLAG_ADDR_BITS
873                                         | ((phy_addr >> 16) & 0xffff0000);
874         reqhdr->context = ((phy_addr & 0xffffffff) << 32 )
875                                         | IOPMU_QUEUE_ADDR_HOST_BIT | reqhdr->type;
876
877         hba->u.mvfrey.inlist_wptr++;
878         index = hba->u.mvfrey.inlist_wptr & 0x3fff;
879
880         if (index == hba->u.mvfrey.list_count) {
881                 index = 0;
882                 hba->u.mvfrey.inlist_wptr &= ~0x3fff;
883                 hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE;
884         }
885
886         hba->u.mvfrey.inlist[index].addr = phy_addr;
887         hba->u.mvfrey.inlist[index].intrfc_len = (reqhdr->size + 3) / 4;
888
889         BUS_SPACE_WRT4_MVFREY2(inbound_write_ptr, hba->u.mvfrey.inlist_wptr);
890         BUS_SPACE_RD4_MVFREY2(inbound_write_ptr);
891
892         for (i = 0; i < millisec; i++) {
893                 hptiop_intr_mvfrey(hba);
894                 if (hba->config_done)
895                         return 0;
896                 DELAY(1000);
897         }
898         return -1;
899 }
900
901 static int hptiop_send_sync_msg(struct hpt_iop_hba *hba,
902                                         u_int32_t msg, u_int32_t millisec)
903 {
904         u_int32_t i;
905
906         hba->msg_done = 0;
907         hba->ops->post_msg(hba, msg);
908
909         for (i=0; i<millisec; i++) {
910                 hba->ops->iop_intr(hba);
911                 if (hba->msg_done)
912                         break;
913                 DELAY(1000);
914         }
915
916         return hba->msg_done? 0 : -1;
917 }
918
919 static int hptiop_get_config_itl(struct hpt_iop_hba * hba,
920                                 struct hpt_iop_request_get_config * config)
921 {
922         u_int32_t req32;
923
924         config->header.size = sizeof(struct hpt_iop_request_get_config);
925         config->header.type = IOP_REQUEST_TYPE_GET_CONFIG;
926         config->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST;
927         config->header.result = IOP_RESULT_PENDING;
928         config->header.context = 0;
929
930         req32 = BUS_SPACE_RD4_ITL(inbound_queue);
931         if (req32 == IOPMU_QUEUE_EMPTY)
932                 return -1;
933
934         bus_space_write_region_4(hba->bar0t, hba->bar0h,
935                         req32, (u_int32_t *)config,
936                         sizeof(struct hpt_iop_request_header) >> 2);
937
938         if (hptiop_send_sync_request_itl(hba, req32, 20000)) {
939                 KdPrint(("hptiop: get config send cmd failed"));
940                 return -1;
941         }
942
943         bus_space_read_region_4(hba->bar0t, hba->bar0h,
944                         req32, (u_int32_t *)config,
945                         sizeof(struct hpt_iop_request_get_config) >> 2);
946
947         BUS_SPACE_WRT4_ITL(outbound_queue, req32);
948
949         return 0;
950 }
951
952 static int hptiop_get_config_mv(struct hpt_iop_hba * hba,
953                                 struct hpt_iop_request_get_config * config)
954 {
955         struct hpt_iop_request_get_config *req;
956
957         if (!(req = hba->ctlcfg_ptr))
958                 return -1;
959
960         req->header.flags = 0;
961         req->header.type = IOP_REQUEST_TYPE_GET_CONFIG;
962         req->header.size = sizeof(struct hpt_iop_request_get_config);
963         req->header.result = IOP_RESULT_PENDING;
964         req->header.context = MVIOP_CMD_TYPE_GET_CONFIG;
965
966         if (hptiop_send_sync_request_mv(hba, req, 20000)) {
967                 KdPrint(("hptiop: get config send cmd failed"));
968                 return -1;
969         }
970
971         *config = *req;
972         return 0;
973 }
974
975 static int hptiop_get_config_mvfrey(struct hpt_iop_hba * hba,
976                                 struct hpt_iop_request_get_config * config)
977 {
978         struct hpt_iop_request_get_config *info = hba->u.mvfrey.config;
979
980         if (info->header.size != sizeof(struct hpt_iop_request_get_config) ||
981             info->header.type != IOP_REQUEST_TYPE_GET_CONFIG) {
982                 KdPrint(("hptiop: header size %x/%x type %x/%x",
983                          info->header.size, (int)sizeof(struct hpt_iop_request_get_config),
984                          info->header.type, IOP_REQUEST_TYPE_GET_CONFIG));
985                 return -1;
986         }
987
988         config->interface_version = info->interface_version;
989         config->firmware_version = info->firmware_version;
990         config->max_requests = info->max_requests;
991         config->request_size = info->request_size;
992         config->max_sg_count = info->max_sg_count;
993         config->data_transfer_length = info->data_transfer_length;
994         config->alignment_mask = info->alignment_mask;
995         config->max_devices = info->max_devices;
996         config->sdram_size = info->sdram_size;
997
998         KdPrint(("hptiop: maxreq %x reqsz %x datalen %x maxdev %x sdram %x",
999                  config->max_requests, config->request_size,
1000                  config->data_transfer_length, config->max_devices,
1001                  config->sdram_size));
1002
1003         return 0;
1004 }
1005
1006 static int hptiop_set_config_itl(struct hpt_iop_hba *hba,
1007                                 struct hpt_iop_request_set_config *config)
1008 {
1009         u_int32_t req32;
1010
1011         req32 = BUS_SPACE_RD4_ITL(inbound_queue);
1012
1013         if (req32 == IOPMU_QUEUE_EMPTY)
1014                 return -1;
1015
1016         config->header.size = sizeof(struct hpt_iop_request_set_config);
1017         config->header.type = IOP_REQUEST_TYPE_SET_CONFIG;
1018         config->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST;
1019         config->header.result = IOP_RESULT_PENDING;
1020         config->header.context = 0;
1021
1022         bus_space_write_region_4(hba->bar0t, hba->bar0h, req32, 
1023                 (u_int32_t *)config, 
1024                 sizeof(struct hpt_iop_request_set_config) >> 2);
1025
1026         if (hptiop_send_sync_request_itl(hba, req32, 20000)) {
1027                 KdPrint(("hptiop: set config send cmd failed"));
1028                 return -1;
1029         }
1030
1031         BUS_SPACE_WRT4_ITL(outbound_queue, req32);
1032
1033         return 0;
1034 }
1035
1036 static int hptiop_set_config_mv(struct hpt_iop_hba *hba,
1037                                 struct hpt_iop_request_set_config *config)
1038 {
1039         struct hpt_iop_request_set_config *req;
1040
1041         if (!(req = hba->ctlcfg_ptr))
1042                 return -1;
1043
1044         memcpy((u_int8_t *)req + sizeof(struct hpt_iop_request_header),
1045                 (u_int8_t *)config + sizeof(struct hpt_iop_request_header),
1046                 sizeof(struct hpt_iop_request_set_config) -
1047                         sizeof(struct hpt_iop_request_header));
1048
1049         req->header.flags = 0;
1050         req->header.type = IOP_REQUEST_TYPE_SET_CONFIG;
1051         req->header.size = sizeof(struct hpt_iop_request_set_config);
1052         req->header.result = IOP_RESULT_PENDING;
1053         req->header.context = MVIOP_CMD_TYPE_SET_CONFIG;
1054
1055         if (hptiop_send_sync_request_mv(hba, req, 20000)) {
1056                 KdPrint(("hptiop: set config send cmd failed"));
1057                 return -1;
1058         }
1059
1060         return 0;
1061 }
1062
1063 static int hptiop_set_config_mvfrey(struct hpt_iop_hba *hba,
1064                                 struct hpt_iop_request_set_config *config)
1065 {
1066         struct hpt_iop_request_set_config *req;
1067
1068         if (!(req = hba->ctlcfg_ptr))
1069                 return -1;
1070
1071         memcpy((u_int8_t *)req + sizeof(struct hpt_iop_request_header),
1072                 (u_int8_t *)config + sizeof(struct hpt_iop_request_header),
1073                 sizeof(struct hpt_iop_request_set_config) -
1074                         sizeof(struct hpt_iop_request_header));
1075
1076         req->header.type = IOP_REQUEST_TYPE_SET_CONFIG;
1077         req->header.size = sizeof(struct hpt_iop_request_set_config);
1078         req->header.result = IOP_RESULT_PENDING;
1079
1080         if (hptiop_send_sync_request_mvfrey(hba, req, 20000)) {
1081                 KdPrint(("hptiop: set config send cmd failed"));
1082                 return -1;
1083         }
1084
1085         return 0;
1086 }
1087
1088 static int hptiop_post_ioctl_command_itl(struct hpt_iop_hba *hba,
1089                                 u_int32_t req32,
1090                                 struct hpt_iop_ioctl_param *pParams)
1091 {
1092         u_int64_t temp64;
1093         struct hpt_iop_request_ioctl_command req;
1094
1095         if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) >
1096                         (hba->max_request_size -
1097                         offsetof(struct hpt_iop_request_ioctl_command, buf))) {
1098                 device_printf(hba->pcidev, "request size beyond max value");
1099                 return -1;
1100         }
1101
1102         req.header.size = offsetof(struct hpt_iop_request_ioctl_command, buf)
1103                 + pParams->nInBufferSize;
1104         req.header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND;
1105         req.header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST;
1106         req.header.result = IOP_RESULT_PENDING;
1107         req.header.context = req32 + (u_int64_t)(unsigned long)hba->u.itl.mu;
1108         req.ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode);
1109         req.inbuf_size = pParams->nInBufferSize;
1110         req.outbuf_size = pParams->nOutBufferSize;
1111         req.bytes_returned = 0;
1112
1113         bus_space_write_region_4(hba->bar0t, hba->bar0h, req32, (u_int32_t *)&req, 
1114                 offsetof(struct hpt_iop_request_ioctl_command, buf)>>2);
1115         
1116         hptiop_lock_adapter(hba);
1117
1118         BUS_SPACE_WRT4_ITL(inbound_queue, req32);
1119         BUS_SPACE_RD4_ITL(outbound_intstatus);
1120
1121         bus_space_read_region_4(hba->bar0t, hba->bar0h, req32 +
1122                 offsetof(struct hpt_iop_request_ioctl_command, header.context),
1123                 (u_int32_t *)&temp64, 2);
1124         while (temp64) {
1125                 if (hptiop_sleep(hba, (void *)((unsigned long)hba->u.itl.mu + req32),
1126                                 PPAUSE, "hptctl", HPT_OSM_TIMEOUT)==0)
1127                         break;
1128                 hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000);
1129                 bus_space_read_region_4(hba->bar0t, hba->bar0h,req32 +
1130                         offsetof(struct hpt_iop_request_ioctl_command,
1131                                 header.context),
1132                         (u_int32_t *)&temp64, 2);
1133         }
1134
1135         hptiop_unlock_adapter(hba);
1136         return 0;
1137 }
1138
1139 static int hptiop_bus_space_copyin(struct hpt_iop_hba *hba, u_int32_t bus,
1140                                                                         void *user, int size)
1141 {
1142         unsigned char byte;
1143         int i;
1144
1145         for (i=0; i<size; i++) {
1146                 if (copyin((u_int8_t *)user + i, &byte, 1))
1147                         return -1;
1148                 bus_space_write_1(hba->bar0t, hba->bar0h, bus + i, byte);
1149         }
1150
1151         return 0;
1152 }
1153
1154 static int hptiop_bus_space_copyout(struct hpt_iop_hba *hba, u_int32_t bus,
1155                                                                         void *user, int size)
1156 {
1157         unsigned char byte;
1158         int i;
1159
1160         for (i=0; i<size; i++) {
1161                 byte = bus_space_read_1(hba->bar0t, hba->bar0h, bus + i);
1162                 if (copyout(&byte, (u_int8_t *)user + i, 1))
1163                         return -1;
1164         }
1165
1166         return 0;
1167 }
1168
1169 static int hptiop_do_ioctl_itl(struct hpt_iop_hba *hba,
1170                                 struct hpt_iop_ioctl_param * pParams)
1171 {
1172         u_int32_t req32;
1173         u_int32_t result;
1174
1175         if ((pParams->Magic != HPT_IOCTL_MAGIC) &&
1176                 (pParams->Magic != HPT_IOCTL_MAGIC32))
1177                 return EFAULT;
1178         
1179         req32 = BUS_SPACE_RD4_ITL(inbound_queue);
1180         if (req32 == IOPMU_QUEUE_EMPTY)
1181                 return EFAULT;
1182
1183         if (pParams->nInBufferSize)
1184                 if (hptiop_bus_space_copyin(hba, req32 +
1185                         offsetof(struct hpt_iop_request_ioctl_command, buf),
1186                         (void *)pParams->lpInBuffer, pParams->nInBufferSize))
1187                         goto invalid;
1188
1189         if (hptiop_post_ioctl_command_itl(hba, req32, pParams))
1190                 goto invalid;
1191
1192         result = bus_space_read_4(hba->bar0t, hba->bar0h, req32 +
1193                         offsetof(struct hpt_iop_request_ioctl_command,
1194                                 header.result));
1195
1196         if (result == IOP_RESULT_SUCCESS) {
1197                 if (pParams->nOutBufferSize)
1198                         if (hptiop_bus_space_copyout(hba, req32 +
1199                                 offsetof(struct hpt_iop_request_ioctl_command, buf) + 
1200                                         ((pParams->nInBufferSize + 3) & ~3),
1201                                 (void *)pParams->lpOutBuffer, pParams->nOutBufferSize))
1202                                 goto invalid;
1203
1204                 if (pParams->lpBytesReturned) {
1205                         if (hptiop_bus_space_copyout(hba, req32 + 
1206                                 offsetof(struct hpt_iop_request_ioctl_command, bytes_returned),
1207                                 (void *)pParams->lpBytesReturned, sizeof(unsigned  long)))
1208                                 goto invalid;
1209                 }
1210
1211                 BUS_SPACE_WRT4_ITL(outbound_queue, req32);
1212
1213                 return 0;
1214         } else{
1215 invalid:
1216                 BUS_SPACE_WRT4_ITL(outbound_queue, req32);
1217
1218                 return EFAULT;
1219         }
1220 }
1221
1222 static int hptiop_post_ioctl_command_mv(struct hpt_iop_hba *hba,
1223                                 struct hpt_iop_request_ioctl_command *req,
1224                                 struct hpt_iop_ioctl_param *pParams)
1225 {
1226         u_int64_t req_phy;
1227         int size = 0;
1228
1229         if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) >
1230                         (hba->max_request_size -
1231                         offsetof(struct hpt_iop_request_ioctl_command, buf))) {
1232                 device_printf(hba->pcidev, "request size beyond max value");
1233                 return -1;
1234         }
1235
1236         req->ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode);
1237         req->inbuf_size = pParams->nInBufferSize;
1238         req->outbuf_size = pParams->nOutBufferSize;
1239         req->header.size = offsetof(struct hpt_iop_request_ioctl_command, buf)
1240                                         + pParams->nInBufferSize;
1241         req->header.context = (u_int64_t)MVIOP_CMD_TYPE_IOCTL;
1242         req->header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND;
1243         req->header.result = IOP_RESULT_PENDING;
1244         req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
1245         size = req->header.size >> 8;
1246         size = imin(3, size);
1247         req_phy = hba->ctlcfgcmd_phy | MVIOP_MU_QUEUE_ADDR_HOST_BIT | size;
1248         hptiop_mv_inbound_write(req_phy, hba);
1249
1250         BUS_SPACE_RD4_MV0(outbound_intmask);
1251
1252         while (hba->config_done == 0) {
1253                 if (hptiop_sleep(hba, req, PPAUSE,
1254                         "hptctl", HPT_OSM_TIMEOUT)==0)
1255                         continue;
1256                 hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000);
1257         }
1258         return 0;
1259 }
1260
1261 static int hptiop_do_ioctl_mv(struct hpt_iop_hba *hba,
1262                                 struct hpt_iop_ioctl_param *pParams)
1263 {
1264         struct hpt_iop_request_ioctl_command *req;
1265
1266         if ((pParams->Magic != HPT_IOCTL_MAGIC) &&
1267                 (pParams->Magic != HPT_IOCTL_MAGIC32))
1268                 return EFAULT;
1269
1270         req = (struct hpt_iop_request_ioctl_command *)(hba->ctlcfg_ptr);
1271         hba->config_done = 0;
1272         hptiop_lock_adapter(hba);
1273         if (pParams->nInBufferSize)
1274                 if (copyin((void *)pParams->lpInBuffer,
1275                                 req->buf, pParams->nInBufferSize))
1276                         goto invalid;
1277         if (hptiop_post_ioctl_command_mv(hba, req, pParams))
1278                 goto invalid;
1279
1280         if (hba->config_done == 1) {
1281                 if (pParams->nOutBufferSize)
1282                         if (copyout(req->buf +
1283                                 ((pParams->nInBufferSize + 3) & ~3),
1284                                 (void *)pParams->lpOutBuffer,
1285                                 pParams->nOutBufferSize))
1286                                 goto invalid;
1287
1288                 if (pParams->lpBytesReturned)
1289                         if (copyout(&req->bytes_returned,
1290                                 (void*)pParams->lpBytesReturned,
1291                                 sizeof(u_int32_t)))
1292                                 goto invalid;
1293                 hptiop_unlock_adapter(hba);
1294                 return 0;
1295         } else{
1296 invalid:
1297                 hptiop_unlock_adapter(hba);
1298                 return EFAULT;
1299         }
1300 }
1301
1302 static int hptiop_post_ioctl_command_mvfrey(struct hpt_iop_hba *hba,
1303                                 struct hpt_iop_request_ioctl_command *req,
1304                                 struct hpt_iop_ioctl_param *pParams)
1305 {
1306         u_int64_t phy_addr;
1307         u_int32_t index;
1308
1309         phy_addr = hba->ctlcfgcmd_phy;
1310
1311         if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) >
1312                         (hba->max_request_size -
1313                         offsetof(struct hpt_iop_request_ioctl_command, buf))) {
1314                 device_printf(hba->pcidev, "request size beyond max value");
1315                 return -1;
1316         }
1317
1318         req->ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode);
1319         req->inbuf_size = pParams->nInBufferSize;
1320         req->outbuf_size = pParams->nOutBufferSize;
1321         req->header.size = offsetof(struct hpt_iop_request_ioctl_command, buf)
1322                                         + pParams->nInBufferSize;
1323
1324         req->header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND;
1325         req->header.result = IOP_RESULT_PENDING;
1326
1327         req->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST
1328                                                 | IOP_REQUEST_FLAG_OUTPUT_CONTEXT
1329                                                 | IOP_REQUEST_FLAG_ADDR_BITS
1330                                                 | ((phy_addr >> 16) & 0xffff0000);
1331         req->header.context = ((phy_addr & 0xffffffff) << 32 )
1332                                                 | IOPMU_QUEUE_ADDR_HOST_BIT | req->header.type;
1333
1334         hba->u.mvfrey.inlist_wptr++;
1335         index = hba->u.mvfrey.inlist_wptr & 0x3fff;
1336
1337         if (index == hba->u.mvfrey.list_count) {
1338                 index = 0;
1339                 hba->u.mvfrey.inlist_wptr &= ~0x3fff;
1340                 hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE;
1341         }
1342
1343         hba->u.mvfrey.inlist[index].addr = phy_addr;
1344         hba->u.mvfrey.inlist[index].intrfc_len = (req->header.size + 3) / 4;
1345
1346         BUS_SPACE_WRT4_MVFREY2(inbound_write_ptr, hba->u.mvfrey.inlist_wptr);
1347         BUS_SPACE_RD4_MVFREY2(inbound_write_ptr);
1348
1349         while (hba->config_done == 0) {
1350                 if (hptiop_sleep(hba, req, PPAUSE,
1351                         "hptctl", HPT_OSM_TIMEOUT)==0)
1352                         continue;
1353                 hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000);
1354         }
1355         return 0;
1356 }
1357
1358 static int hptiop_do_ioctl_mvfrey(struct hpt_iop_hba *hba,
1359                                 struct hpt_iop_ioctl_param *pParams)
1360 {
1361         struct hpt_iop_request_ioctl_command *req;
1362
1363         if ((pParams->Magic != HPT_IOCTL_MAGIC) &&
1364                 (pParams->Magic != HPT_IOCTL_MAGIC32))
1365                 return EFAULT;
1366
1367         req = (struct hpt_iop_request_ioctl_command *)(hba->ctlcfg_ptr);
1368         hba->config_done = 0;
1369         hptiop_lock_adapter(hba);
1370         if (pParams->nInBufferSize)
1371                 if (copyin((void *)pParams->lpInBuffer,
1372                                 req->buf, pParams->nInBufferSize))
1373                         goto invalid;
1374         if (hptiop_post_ioctl_command_mvfrey(hba, req, pParams))
1375                 goto invalid;
1376
1377         if (hba->config_done == 1) {
1378                 if (pParams->nOutBufferSize)
1379                         if (copyout(req->buf +
1380                                 ((pParams->nInBufferSize + 3) & ~3),
1381                                 (void *)pParams->lpOutBuffer,
1382                                 pParams->nOutBufferSize))
1383                                 goto invalid;
1384
1385                 if (pParams->lpBytesReturned)
1386                         if (copyout(&req->bytes_returned,
1387                                 (void*)pParams->lpBytesReturned,
1388                                 sizeof(u_int32_t)))
1389                                 goto invalid;
1390                 hptiop_unlock_adapter(hba);
1391                 return 0;
1392         } else{
1393 invalid:
1394                 hptiop_unlock_adapter(hba);
1395                 return EFAULT;
1396         }
1397 }
1398
1399 static int  hptiop_rescan_bus(struct hpt_iop_hba * hba)
1400 {
1401         union ccb           *ccb;
1402
1403         if ((ccb = xpt_alloc_ccb()) == NULL)
1404                 return(ENOMEM);
1405         if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(hba->sim),
1406                 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1407                 xpt_free_ccb(ccb);
1408                 return(EIO);
1409         }
1410         xpt_rescan(ccb);
1411         return(0);
1412 }
1413
1414 static  bus_dmamap_callback_t   hptiop_map_srb;
1415 static  bus_dmamap_callback_t   hptiop_post_scsi_command;
1416 static  bus_dmamap_callback_t   hptiop_mv_map_ctlcfg;
1417 static  bus_dmamap_callback_t   hptiop_mvfrey_map_ctlcfg;
1418
1419 static int hptiop_alloc_pci_res_itl(struct hpt_iop_hba *hba)
1420 {
1421         hba->bar0_rid = 0x10;
1422         hba->bar0_res = bus_alloc_resource_any(hba->pcidev,
1423                         SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE);
1424
1425         if (hba->bar0_res == NULL) {
1426                 device_printf(hba->pcidev,
1427                         "failed to get iop base adrress.\n");
1428                 return -1;
1429         }
1430         hba->bar0t = rman_get_bustag(hba->bar0_res);
1431         hba->bar0h = rman_get_bushandle(hba->bar0_res);
1432         hba->u.itl.mu = (struct hpt_iopmu_itl *)
1433                                 rman_get_virtual(hba->bar0_res);
1434
1435         if (!hba->u.itl.mu) {
1436                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1437                                         hba->bar0_rid, hba->bar0_res);
1438                 device_printf(hba->pcidev, "alloc mem res failed\n");
1439                 return -1;
1440         }
1441
1442         return 0;
1443 }
1444
1445 static int hptiop_alloc_pci_res_mv(struct hpt_iop_hba *hba)
1446 {
1447         hba->bar0_rid = 0x10;
1448         hba->bar0_res = bus_alloc_resource_any(hba->pcidev,
1449                         SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE);
1450
1451         if (hba->bar0_res == NULL) {
1452                 device_printf(hba->pcidev, "failed to get iop bar0.\n");
1453                 return -1;
1454         }
1455         hba->bar0t = rman_get_bustag(hba->bar0_res);
1456         hba->bar0h = rman_get_bushandle(hba->bar0_res);
1457         hba->u.mv.regs = (struct hpt_iopmv_regs *)
1458                                 rman_get_virtual(hba->bar0_res);
1459
1460         if (!hba->u.mv.regs) {
1461                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1462                                         hba->bar0_rid, hba->bar0_res);
1463                 device_printf(hba->pcidev, "alloc bar0 mem res failed\n");
1464                 return -1;
1465         }
1466
1467         hba->bar2_rid = 0x18;
1468         hba->bar2_res = bus_alloc_resource_any(hba->pcidev,
1469                         SYS_RES_MEMORY, &hba->bar2_rid, RF_ACTIVE);
1470
1471         if (hba->bar2_res == NULL) {
1472                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1473                                         hba->bar0_rid, hba->bar0_res);
1474                 device_printf(hba->pcidev, "failed to get iop bar2.\n");
1475                 return -1;
1476         }
1477
1478         hba->bar2t = rman_get_bustag(hba->bar2_res);
1479         hba->bar2h = rman_get_bushandle(hba->bar2_res);
1480         hba->u.mv.mu = (struct hpt_iopmu_mv *)rman_get_virtual(hba->bar2_res);
1481
1482         if (!hba->u.mv.mu) {
1483                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1484                                         hba->bar0_rid, hba->bar0_res);
1485                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1486                                         hba->bar2_rid, hba->bar2_res);
1487                 device_printf(hba->pcidev, "alloc mem bar2 res failed\n");
1488                 return -1;
1489         }
1490
1491         return 0;
1492 }
1493
1494 static int hptiop_alloc_pci_res_mvfrey(struct hpt_iop_hba *hba)
1495 {
1496         hba->bar0_rid = 0x10;
1497         hba->bar0_res = bus_alloc_resource_any(hba->pcidev,
1498                         SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE);
1499
1500         if (hba->bar0_res == NULL) {
1501                 device_printf(hba->pcidev, "failed to get iop bar0.\n");
1502                 return -1;
1503         }
1504         hba->bar0t = rman_get_bustag(hba->bar0_res);
1505         hba->bar0h = rman_get_bushandle(hba->bar0_res);
1506         hba->u.mvfrey.config = (struct hpt_iop_request_get_config *)
1507                                 rman_get_virtual(hba->bar0_res);
1508
1509         if (!hba->u.mvfrey.config) {
1510                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1511                                         hba->bar0_rid, hba->bar0_res);
1512                 device_printf(hba->pcidev, "alloc bar0 mem res failed\n");
1513                 return -1;
1514         }
1515
1516         hba->bar2_rid = 0x18;
1517         hba->bar2_res = bus_alloc_resource_any(hba->pcidev,
1518                         SYS_RES_MEMORY, &hba->bar2_rid, RF_ACTIVE);
1519
1520         if (hba->bar2_res == NULL) {
1521                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1522                                         hba->bar0_rid, hba->bar0_res);
1523                 device_printf(hba->pcidev, "failed to get iop bar2.\n");
1524                 return -1;
1525         }
1526
1527         hba->bar2t = rman_get_bustag(hba->bar2_res);
1528         hba->bar2h = rman_get_bushandle(hba->bar2_res);
1529         hba->u.mvfrey.mu =
1530                                         (struct hpt_iopmu_mvfrey *)rman_get_virtual(hba->bar2_res);
1531
1532         if (!hba->u.mvfrey.mu) {
1533                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1534                                         hba->bar0_rid, hba->bar0_res);
1535                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1536                                         hba->bar2_rid, hba->bar2_res);
1537                 device_printf(hba->pcidev, "alloc mem bar2 res failed\n");
1538                 return -1;
1539         }
1540
1541         return 0;
1542 }
1543
1544 static void hptiop_release_pci_res_itl(struct hpt_iop_hba *hba)
1545 {
1546         if (hba->bar0_res)
1547                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1548                         hba->bar0_rid, hba->bar0_res);
1549 }
1550
1551 static void hptiop_release_pci_res_mv(struct hpt_iop_hba *hba)
1552 {
1553         if (hba->bar0_res)
1554                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1555                         hba->bar0_rid, hba->bar0_res);
1556         if (hba->bar2_res)
1557                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1558                         hba->bar2_rid, hba->bar2_res);
1559 }
1560
1561 static void hptiop_release_pci_res_mvfrey(struct hpt_iop_hba *hba)
1562 {
1563         if (hba->bar0_res)
1564                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1565                         hba->bar0_rid, hba->bar0_res);
1566         if (hba->bar2_res)
1567                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1568                         hba->bar2_rid, hba->bar2_res);
1569 }
1570
1571 static int hptiop_internal_memalloc_mv(struct hpt_iop_hba *hba)
1572 {
1573         if (bus_dma_tag_create(hba->parent_dmat,
1574                                 1,
1575                                 0,
1576                                 BUS_SPACE_MAXADDR_32BIT,
1577                                 BUS_SPACE_MAXADDR,
1578                                 NULL, NULL,
1579                                 0x800 - 0x8,
1580                                 1,
1581                                 BUS_SPACE_MAXSIZE_32BIT,
1582                                 BUS_DMA_ALLOCNOW,
1583                                 NULL,
1584                                 NULL,
1585                                 &hba->ctlcfg_dmat)) {
1586                 device_printf(hba->pcidev, "alloc ctlcfg_dmat failed\n");
1587                 return -1;
1588         }
1589
1590         if (bus_dmamem_alloc(hba->ctlcfg_dmat, (void **)&hba->ctlcfg_ptr,
1591                 BUS_DMA_WAITOK | BUS_DMA_COHERENT,
1592                 &hba->ctlcfg_dmamap) != 0) {
1593                         device_printf(hba->pcidev,
1594                                         "bus_dmamem_alloc failed!\n");
1595                         bus_dma_tag_destroy(hba->ctlcfg_dmat);
1596                         return -1;
1597         }
1598
1599         if (bus_dmamap_load(hba->ctlcfg_dmat,
1600                         hba->ctlcfg_dmamap, hba->ctlcfg_ptr,
1601                         MVIOP_IOCTLCFG_SIZE,
1602                         hptiop_mv_map_ctlcfg, hba, 0)) {
1603                 device_printf(hba->pcidev, "bus_dmamap_load failed!\n");
1604                 if (hba->ctlcfg_dmat) {
1605                         bus_dmamem_free(hba->ctlcfg_dmat,
1606                                 hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
1607                         bus_dma_tag_destroy(hba->ctlcfg_dmat);
1608                 }
1609                 return -1;
1610         }
1611
1612         return 0;
1613 }
1614
1615 static int hptiop_internal_memalloc_mvfrey(struct hpt_iop_hba *hba)
1616 {
1617         u_int32_t list_count = BUS_SPACE_RD4_MVFREY2(inbound_conf_ctl);
1618
1619         list_count >>= 16;
1620
1621         if (list_count == 0) {
1622                 return -1;
1623         }
1624
1625         hba->u.mvfrey.list_count = list_count;
1626         hba->u.mvfrey.internal_mem_size = 0x800
1627                                                         + list_count * sizeof(struct mvfrey_inlist_entry)
1628                                                         + list_count * sizeof(struct mvfrey_outlist_entry)
1629                                                         + sizeof(int);
1630         if (bus_dma_tag_create(hba->parent_dmat,
1631                                 1,
1632                                 0,
1633                                 BUS_SPACE_MAXADDR_32BIT,
1634                                 BUS_SPACE_MAXADDR,
1635                                 NULL, NULL,
1636                                 hba->u.mvfrey.internal_mem_size,
1637                                 1,
1638                                 BUS_SPACE_MAXSIZE_32BIT,
1639                                 BUS_DMA_ALLOCNOW,
1640                                 NULL,
1641                                 NULL,
1642                                 &hba->ctlcfg_dmat)) {
1643                 device_printf(hba->pcidev, "alloc ctlcfg_dmat failed\n");
1644                 return -1;
1645         }
1646
1647         if (bus_dmamem_alloc(hba->ctlcfg_dmat, (void **)&hba->ctlcfg_ptr,
1648                 BUS_DMA_WAITOK | BUS_DMA_COHERENT,
1649                 &hba->ctlcfg_dmamap) != 0) {
1650                         device_printf(hba->pcidev,
1651                                         "bus_dmamem_alloc failed!\n");
1652                         bus_dma_tag_destroy(hba->ctlcfg_dmat);
1653                         return -1;
1654         }
1655
1656         if (bus_dmamap_load(hba->ctlcfg_dmat,
1657                         hba->ctlcfg_dmamap, hba->ctlcfg_ptr,
1658                         hba->u.mvfrey.internal_mem_size,
1659                         hptiop_mvfrey_map_ctlcfg, hba, 0)) {
1660                 device_printf(hba->pcidev, "bus_dmamap_load failed!\n");
1661                 if (hba->ctlcfg_dmat) {
1662                         bus_dmamem_free(hba->ctlcfg_dmat,
1663                                 hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
1664                         bus_dma_tag_destroy(hba->ctlcfg_dmat);
1665                 }
1666                 return -1;
1667         }
1668
1669         return 0;
1670 }
1671
1672 static int hptiop_internal_memfree_itl(struct hpt_iop_hba *hba) {
1673         return 0;
1674 }
1675
1676 static int hptiop_internal_memfree_mv(struct hpt_iop_hba *hba)
1677 {
1678         if (hba->ctlcfg_dmat) {
1679                 bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap);
1680                 bus_dmamem_free(hba->ctlcfg_dmat,
1681                                         hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
1682                 bus_dma_tag_destroy(hba->ctlcfg_dmat);
1683         }
1684
1685         return 0;
1686 }
1687
1688 static int hptiop_internal_memfree_mvfrey(struct hpt_iop_hba *hba)
1689 {
1690         if (hba->ctlcfg_dmat) {
1691                 bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap);
1692                 bus_dmamem_free(hba->ctlcfg_dmat,
1693                                         hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
1694                 bus_dma_tag_destroy(hba->ctlcfg_dmat);
1695         }
1696
1697         return 0;
1698 }
1699
1700 static int hptiop_reset_comm_mvfrey(struct hpt_iop_hba *hba)
1701 {
1702         u_int32_t i = 100;
1703
1704         if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET_COMM, 3000))
1705                 return -1;
1706
1707         /* wait 100ms for MCU ready */
1708         while(i--) {
1709                 DELAY(1000);
1710         }
1711
1712         BUS_SPACE_WRT4_MVFREY2(inbound_base,
1713                                                         hba->u.mvfrey.inlist_phy & 0xffffffff);
1714         BUS_SPACE_WRT4_MVFREY2(inbound_base_high,
1715                                                         (hba->u.mvfrey.inlist_phy >> 16) >> 16);
1716
1717         BUS_SPACE_WRT4_MVFREY2(outbound_base,
1718                                                         hba->u.mvfrey.outlist_phy & 0xffffffff);
1719         BUS_SPACE_WRT4_MVFREY2(outbound_base_high,
1720                                                         (hba->u.mvfrey.outlist_phy >> 16) >> 16);
1721
1722         BUS_SPACE_WRT4_MVFREY2(outbound_shadow_base,
1723                                                         hba->u.mvfrey.outlist_cptr_phy & 0xffffffff);
1724         BUS_SPACE_WRT4_MVFREY2(outbound_shadow_base_high,
1725                                                         (hba->u.mvfrey.outlist_cptr_phy >> 16) >> 16);
1726
1727         hba->u.mvfrey.inlist_wptr = (hba->u.mvfrey.list_count - 1)
1728                                                                 | CL_POINTER_TOGGLE;
1729         *hba->u.mvfrey.outlist_cptr = (hba->u.mvfrey.list_count - 1)
1730                                                                 | CL_POINTER_TOGGLE;
1731         hba->u.mvfrey.outlist_rptr = hba->u.mvfrey.list_count - 1;
1732         
1733         return 0;
1734 }
1735
1736 /*
1737  * CAM driver interface
1738  */
1739 static device_method_t driver_methods[] = {
1740         /* Device interface */
1741         DEVMETHOD(device_probe,     hptiop_probe),
1742         DEVMETHOD(device_attach,    hptiop_attach),
1743         DEVMETHOD(device_detach,    hptiop_detach),
1744         DEVMETHOD(device_shutdown,  hptiop_shutdown),
1745         { 0, 0 }
1746 };
1747
1748 static struct hptiop_adapter_ops hptiop_itl_ops = {
1749         .family            = INTEL_BASED_IOP,
1750         .iop_wait_ready    = hptiop_wait_ready_itl,
1751         .internal_memalloc = 0,
1752         .internal_memfree  = hptiop_internal_memfree_itl,
1753         .alloc_pci_res     = hptiop_alloc_pci_res_itl,
1754         .release_pci_res   = hptiop_release_pci_res_itl,
1755         .enable_intr       = hptiop_enable_intr_itl,
1756         .disable_intr      = hptiop_disable_intr_itl,
1757         .get_config        = hptiop_get_config_itl,
1758         .set_config        = hptiop_set_config_itl,
1759         .iop_intr          = hptiop_intr_itl,
1760         .post_msg          = hptiop_post_msg_itl,
1761         .post_req          = hptiop_post_req_itl,
1762         .do_ioctl          = hptiop_do_ioctl_itl,
1763         .reset_comm        = 0,
1764 };
1765
1766 static struct hptiop_adapter_ops hptiop_mv_ops = {
1767         .family            = MV_BASED_IOP,
1768         .iop_wait_ready    = hptiop_wait_ready_mv,
1769         .internal_memalloc = hptiop_internal_memalloc_mv,
1770         .internal_memfree  = hptiop_internal_memfree_mv,
1771         .alloc_pci_res     = hptiop_alloc_pci_res_mv,
1772         .release_pci_res   = hptiop_release_pci_res_mv,
1773         .enable_intr       = hptiop_enable_intr_mv,
1774         .disable_intr      = hptiop_disable_intr_mv,
1775         .get_config        = hptiop_get_config_mv,
1776         .set_config        = hptiop_set_config_mv,
1777         .iop_intr          = hptiop_intr_mv,
1778         .post_msg          = hptiop_post_msg_mv,
1779         .post_req          = hptiop_post_req_mv,
1780         .do_ioctl          = hptiop_do_ioctl_mv,
1781         .reset_comm        = 0,
1782 };
1783
1784 static struct hptiop_adapter_ops hptiop_mvfrey_ops = {
1785         .family            = MVFREY_BASED_IOP,
1786         .iop_wait_ready    = hptiop_wait_ready_mvfrey,
1787         .internal_memalloc = hptiop_internal_memalloc_mvfrey,
1788         .internal_memfree  = hptiop_internal_memfree_mvfrey,
1789         .alloc_pci_res     = hptiop_alloc_pci_res_mvfrey,
1790         .release_pci_res   = hptiop_release_pci_res_mvfrey,
1791         .enable_intr       = hptiop_enable_intr_mvfrey,
1792         .disable_intr      = hptiop_disable_intr_mvfrey,
1793         .get_config        = hptiop_get_config_mvfrey,
1794         .set_config        = hptiop_set_config_mvfrey,
1795         .iop_intr          = hptiop_intr_mvfrey,
1796         .post_msg          = hptiop_post_msg_mvfrey,
1797         .post_req          = hptiop_post_req_mvfrey,
1798         .do_ioctl          = hptiop_do_ioctl_mvfrey,
1799         .reset_comm        = hptiop_reset_comm_mvfrey,
1800 };
1801
1802 static driver_t hptiop_pci_driver = {
1803         driver_name,
1804         driver_methods,
1805         sizeof(struct hpt_iop_hba)
1806 };
1807
1808 DRIVER_MODULE(hptiop, pci, hptiop_pci_driver, hptiop_devclass, 0, 0);
1809 MODULE_DEPEND(hptiop, cam, 1, 1, 1);
1810
1811 static int hptiop_probe(device_t dev)
1812 {
1813         struct hpt_iop_hba *hba;
1814         u_int32_t id;
1815         static char buf[256];
1816         int sas = 0;
1817         struct hptiop_adapter_ops *ops;
1818
1819         if (pci_get_vendor(dev) != 0x1103)
1820                 return (ENXIO);
1821
1822         id = pci_get_device(dev);
1823
1824         switch (id) {
1825                 case 0x4520:
1826                 case 0x4521:
1827                 case 0x4522:
1828                         sas = 1;
1829                 case 0x3620:
1830                 case 0x3622:
1831                 case 0x3640:
1832                         ops = &hptiop_mvfrey_ops;
1833                         break;
1834                 case 0x4210:
1835                 case 0x4211:
1836                 case 0x4310:
1837                 case 0x4311:
1838                 case 0x4320:
1839                 case 0x4321:
1840                 case 0x4322:
1841                         sas = 1;
1842                 case 0x3220:
1843                 case 0x3320:
1844                 case 0x3410:
1845                 case 0x3520:
1846                 case 0x3510:
1847                 case 0x3511:
1848                 case 0x3521:
1849                 case 0x3522:
1850                 case 0x3530:
1851                 case 0x3540:
1852                 case 0x3560:
1853                         ops = &hptiop_itl_ops;
1854                         break;
1855                 case 0x3020:
1856                 case 0x3120:
1857                 case 0x3122:
1858                         ops = &hptiop_mv_ops;
1859                         break;
1860                 default:
1861                         return (ENXIO);
1862         }
1863
1864         device_printf(dev, "adapter at PCI %d:%d:%d, IRQ %d\n",
1865                 pci_get_bus(dev), pci_get_slot(dev),
1866                 pci_get_function(dev), pci_get_irq(dev));
1867
1868         sprintf(buf, "RocketRAID %x %s Controller\n",
1869                                 id, sas ? "SAS" : "SATA");
1870         device_set_desc_copy(dev, buf);
1871
1872         hba = (struct hpt_iop_hba *)device_get_softc(dev);
1873         bzero(hba, sizeof(struct hpt_iop_hba));
1874         hba->ops = ops;
1875
1876         KdPrint(("hba->ops=%p\n", hba->ops));
1877         return 0;
1878 }
1879
1880 static int hptiop_attach(device_t dev)
1881 {
1882         struct hpt_iop_hba *hba = (struct hpt_iop_hba *)device_get_softc(dev);
1883         struct hpt_iop_request_get_config  iop_config;
1884         struct hpt_iop_request_set_config  set_config;
1885         int rid = 0;
1886         struct cam_devq *devq;
1887         struct ccb_setasync ccb;
1888         u_int32_t unit = device_get_unit(dev);
1889
1890         device_printf(dev, "%d RocketRAID 3xxx/4xxx controller driver %s\n",
1891                         unit, driver_version);
1892
1893         KdPrint(("hptiop: attach(%d, %d/%d/%d) ops=%p\n", unit,
1894                 pci_get_bus(dev), pci_get_slot(dev),
1895                 pci_get_function(dev), hba->ops));
1896
1897         pci_enable_busmaster(dev);
1898         hba->pcidev = dev;
1899         hba->pciunit = unit;
1900
1901         if (hba->ops->alloc_pci_res(hba))
1902                 return ENXIO;
1903
1904         if (hba->ops->iop_wait_ready(hba, 2000)) {
1905                 device_printf(dev, "adapter is not ready\n");
1906                 goto release_pci_res;
1907         }
1908
1909         mtx_init(&hba->lock, "hptioplock", NULL, MTX_DEF);
1910
1911         if (bus_dma_tag_create(bus_get_dma_tag(dev),/* PCI parent */
1912                         1,  /* alignment */
1913                         0, /* boundary */
1914                         BUS_SPACE_MAXADDR,  /* lowaddr */
1915                         BUS_SPACE_MAXADDR,  /* highaddr */
1916                         NULL, NULL,         /* filter, filterarg */
1917                         BUS_SPACE_MAXSIZE_32BIT,    /* maxsize */
1918                         BUS_SPACE_UNRESTRICTED, /* nsegments */
1919                         BUS_SPACE_MAXSIZE_32BIT,    /* maxsegsize */
1920                         0,      /* flags */
1921                         NULL,   /* lockfunc */
1922                         NULL,       /* lockfuncarg */
1923                         &hba->parent_dmat   /* tag */))
1924         {
1925                 device_printf(dev, "alloc parent_dmat failed\n");
1926                 goto release_pci_res;
1927         }
1928
1929         if (hba->ops->family == MV_BASED_IOP) {
1930                 if (hba->ops->internal_memalloc(hba)) {
1931                         device_printf(dev, "alloc srb_dmat failed\n");
1932                         goto destroy_parent_tag;
1933                 }
1934         }
1935         
1936         if (hba->ops->get_config(hba, &iop_config)) {
1937                 device_printf(dev, "get iop config failed.\n");
1938                 goto get_config_failed;
1939         }
1940
1941         hba->firmware_version = iop_config.firmware_version;
1942         hba->interface_version = iop_config.interface_version;
1943         hba->max_requests = iop_config.max_requests;
1944         hba->max_devices = iop_config.max_devices;
1945         hba->max_request_size = iop_config.request_size;
1946         hba->max_sg_count = iop_config.max_sg_count;
1947
1948         if (hba->ops->family == MVFREY_BASED_IOP) {
1949                 if (hba->ops->internal_memalloc(hba)) {
1950                         device_printf(dev, "alloc srb_dmat failed\n");
1951                         goto destroy_parent_tag;
1952                 }
1953                 if (hba->ops->reset_comm(hba)) {
1954                         device_printf(dev, "reset comm failed\n");
1955                         goto get_config_failed;
1956                 }
1957         }
1958
1959         if (bus_dma_tag_create(hba->parent_dmat,/* parent */
1960                         4,  /* alignment */
1961                         BUS_SPACE_MAXADDR_32BIT+1, /* boundary */
1962                         BUS_SPACE_MAXADDR,  /* lowaddr */
1963                         BUS_SPACE_MAXADDR,  /* highaddr */
1964                         NULL, NULL,         /* filter, filterarg */
1965                         PAGE_SIZE * (hba->max_sg_count-1),  /* maxsize */
1966                         hba->max_sg_count,  /* nsegments */
1967                         0x20000,    /* maxsegsize */
1968                         BUS_DMA_ALLOCNOW,       /* flags */
1969                         busdma_lock_mutex,  /* lockfunc */
1970                         &hba->lock,     /* lockfuncarg */
1971                         &hba->io_dmat   /* tag */))
1972         {
1973                 device_printf(dev, "alloc io_dmat failed\n");
1974                 goto get_config_failed;
1975         }
1976
1977         if (bus_dma_tag_create(hba->parent_dmat,/* parent */
1978                         1,  /* alignment */
1979                         0, /* boundary */
1980                         BUS_SPACE_MAXADDR_32BIT,    /* lowaddr */
1981                         BUS_SPACE_MAXADDR,  /* highaddr */
1982                         NULL, NULL,         /* filter, filterarg */
1983                         HPT_SRB_MAX_SIZE * HPT_SRB_MAX_QUEUE_SIZE + 0x20,
1984                         1,  /* nsegments */
1985                         BUS_SPACE_MAXSIZE_32BIT,    /* maxsegsize */
1986                         0,      /* flags */
1987                         NULL,   /* lockfunc */
1988                         NULL,       /* lockfuncarg */
1989                         &hba->srb_dmat  /* tag */))
1990         {
1991                 device_printf(dev, "alloc srb_dmat failed\n");
1992                 goto destroy_io_dmat;
1993         }
1994
1995         if (bus_dmamem_alloc(hba->srb_dmat, (void **)&hba->uncached_ptr,
1996                         BUS_DMA_WAITOK | BUS_DMA_COHERENT,
1997                         &hba->srb_dmamap) != 0)
1998         {
1999                 device_printf(dev, "srb bus_dmamem_alloc failed!\n");
2000                 goto destroy_srb_dmat;
2001         }
2002
2003         if (bus_dmamap_load(hba->srb_dmat,
2004                         hba->srb_dmamap, hba->uncached_ptr,
2005                         (HPT_SRB_MAX_SIZE * HPT_SRB_MAX_QUEUE_SIZE) + 0x20,
2006                         hptiop_map_srb, hba, 0))
2007         {
2008                 device_printf(dev, "bus_dmamap_load failed!\n");
2009                 goto srb_dmamem_free;
2010         }
2011
2012         if ((devq = cam_simq_alloc(hba->max_requests - 1 )) == NULL) {
2013                 device_printf(dev, "cam_simq_alloc failed\n");
2014                 goto srb_dmamap_unload;
2015         }
2016
2017         hba->sim = cam_sim_alloc(hptiop_action, hptiop_poll, driver_name,
2018                         hba, unit, &hba->lock, hba->max_requests - 1, 1, devq);
2019         if (!hba->sim) {
2020                 device_printf(dev, "cam_sim_alloc failed\n");
2021                 cam_simq_free(devq);
2022                 goto srb_dmamap_unload;
2023         }
2024         hptiop_lock_adapter(hba);
2025         if (xpt_bus_register(hba->sim, dev, 0) != CAM_SUCCESS)
2026         {
2027                 device_printf(dev, "xpt_bus_register failed\n");
2028                 goto free_cam_sim;
2029         }
2030
2031         if (xpt_create_path(&hba->path, /*periph */ NULL,
2032                         cam_sim_path(hba->sim), CAM_TARGET_WILDCARD,
2033                         CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2034                 device_printf(dev, "xpt_create_path failed\n");
2035                 goto deregister_xpt_bus;
2036         }
2037         hptiop_unlock_adapter(hba);
2038
2039         bzero(&set_config, sizeof(set_config));
2040         set_config.iop_id = unit;
2041         set_config.vbus_id = cam_sim_path(hba->sim);
2042         set_config.max_host_request_size = HPT_SRB_MAX_REQ_SIZE;
2043
2044         if (hba->ops->set_config(hba, &set_config)) {
2045                 device_printf(dev, "set iop config failed.\n");
2046                 goto free_hba_path;
2047         }
2048
2049         xpt_setup_ccb(&ccb.ccb_h, hba->path, /*priority*/5);
2050         ccb.ccb_h.func_code = XPT_SASYNC_CB;
2051         ccb.event_enable = (AC_FOUND_DEVICE | AC_LOST_DEVICE);
2052         ccb.callback = hptiop_async;
2053         ccb.callback_arg = hba->sim;
2054         xpt_action((union ccb *)&ccb);
2055
2056         rid = 0;
2057         if ((hba->irq_res = bus_alloc_resource_any(hba->pcidev, SYS_RES_IRQ,
2058                         &rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
2059                 device_printf(dev, "allocate irq failed!\n");
2060                 goto free_hba_path;
2061         }
2062
2063         if (bus_setup_intr(hba->pcidev, hba->irq_res, INTR_TYPE_CAM | INTR_MPSAFE,
2064                                 NULL, hptiop_pci_intr, hba, &hba->irq_handle))
2065         {
2066                 device_printf(dev, "allocate intr function failed!\n");
2067                 goto free_irq_resource;
2068         }
2069
2070         if (hptiop_send_sync_msg(hba,
2071                         IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000)) {
2072                 device_printf(dev, "fail to start background task\n");
2073                 goto teartown_irq_resource;
2074         }
2075
2076         hba->ops->enable_intr(hba);
2077         hba->initialized = 1;
2078
2079         hba->ioctl_dev = make_dev(&hptiop_cdevsw, unit,
2080                                 UID_ROOT, GID_WHEEL /*GID_OPERATOR*/,
2081                                 S_IRUSR | S_IWUSR, "%s%d", driver_name, unit);
2082
2083
2084         return 0;
2085
2086
2087 teartown_irq_resource:
2088         bus_teardown_intr(dev, hba->irq_res, hba->irq_handle);
2089
2090 free_irq_resource:
2091         bus_release_resource(dev, SYS_RES_IRQ, 0, hba->irq_res);
2092
2093         hptiop_lock_adapter(hba);
2094 free_hba_path:
2095         xpt_free_path(hba->path);
2096
2097 deregister_xpt_bus:
2098         xpt_bus_deregister(cam_sim_path(hba->sim));
2099
2100 free_cam_sim:
2101         cam_sim_free(hba->sim, /*free devq*/ TRUE);
2102         hptiop_unlock_adapter(hba);
2103
2104 srb_dmamap_unload:
2105         if (hba->uncached_ptr)
2106                 bus_dmamap_unload(hba->srb_dmat, hba->srb_dmamap);
2107
2108 srb_dmamem_free:
2109         if (hba->uncached_ptr)
2110                 bus_dmamem_free(hba->srb_dmat,
2111                         hba->uncached_ptr, hba->srb_dmamap);
2112
2113 destroy_srb_dmat:
2114         if (hba->srb_dmat)
2115                 bus_dma_tag_destroy(hba->srb_dmat);
2116
2117 destroy_io_dmat:
2118         if (hba->io_dmat)
2119                 bus_dma_tag_destroy(hba->io_dmat);
2120
2121 get_config_failed:
2122         hba->ops->internal_memfree(hba);
2123
2124 destroy_parent_tag:
2125         if (hba->parent_dmat)
2126                 bus_dma_tag_destroy(hba->parent_dmat);
2127
2128 release_pci_res:
2129         if (hba->ops->release_pci_res)
2130                 hba->ops->release_pci_res(hba);
2131
2132         return ENXIO;
2133 }
2134
2135 static int hptiop_detach(device_t dev)
2136 {
2137         struct hpt_iop_hba * hba = (struct hpt_iop_hba *)device_get_softc(dev);
2138         int i;
2139         int error = EBUSY;
2140
2141         hptiop_lock_adapter(hba);
2142         for (i = 0; i < hba->max_devices; i++)
2143                 if (hptiop_os_query_remove_device(hba, i)) {
2144                         device_printf(dev, "%d file system is busy. id=%d",
2145                                                 hba->pciunit, i);
2146                         goto out;
2147                 }
2148
2149         if ((error = hptiop_shutdown(dev)) != 0)
2150                 goto out;
2151         if (hptiop_send_sync_msg(hba,
2152                 IOPMU_INBOUND_MSG0_STOP_BACKGROUND_TASK, 60000))
2153                 goto out;
2154         hptiop_unlock_adapter(hba);
2155
2156         hptiop_release_resource(hba);
2157         return (0);
2158 out:
2159         hptiop_unlock_adapter(hba);
2160         return error;
2161 }
2162
2163 static int hptiop_shutdown(device_t dev)
2164 {
2165         struct hpt_iop_hba * hba = (struct hpt_iop_hba *)device_get_softc(dev);
2166
2167         int error = 0;
2168
2169         if (hba->flag & HPT_IOCTL_FLAG_OPEN) {
2170                 device_printf(dev, "%d device is busy", hba->pciunit);
2171                 return EBUSY;
2172         }
2173
2174         hba->ops->disable_intr(hba);
2175
2176         if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_SHUTDOWN, 60000))
2177                 error = EBUSY;
2178
2179         return error;
2180 }
2181
2182 static void hptiop_pci_intr(void *arg)
2183 {
2184         struct hpt_iop_hba * hba = (struct hpt_iop_hba *)arg;
2185         hptiop_lock_adapter(hba);
2186         hba->ops->iop_intr(hba);
2187         hptiop_unlock_adapter(hba);
2188 }
2189
2190 static void hptiop_poll(struct cam_sim *sim)
2191 {
2192         struct hpt_iop_hba *hba;
2193
2194         hba = cam_sim_softc(sim);
2195         hba->ops->iop_intr(hba);
2196 }
2197
2198 static void hptiop_async(void * callback_arg, u_int32_t code,
2199                                         struct cam_path * path, void * arg)
2200 {
2201 }
2202
2203 static void hptiop_enable_intr_itl(struct hpt_iop_hba *hba)
2204 {
2205         BUS_SPACE_WRT4_ITL(outbound_intmask,
2206                 ~(IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0));
2207 }
2208
2209 static void hptiop_enable_intr_mv(struct hpt_iop_hba *hba)
2210 {
2211         u_int32_t int_mask;
2212
2213         int_mask = BUS_SPACE_RD4_MV0(outbound_intmask);
2214                         
2215         int_mask |= MVIOP_MU_OUTBOUND_INT_POSTQUEUE
2216                         | MVIOP_MU_OUTBOUND_INT_MSG;
2217         BUS_SPACE_WRT4_MV0(outbound_intmask,int_mask);
2218 }
2219
2220 static void hptiop_enable_intr_mvfrey(struct hpt_iop_hba *hba)
2221 {
2222         BUS_SPACE_WRT4_MVFREY2(f0_doorbell_enable, CPU_TO_F0_DRBL_MSG_A_BIT);
2223         BUS_SPACE_RD4_MVFREY2(f0_doorbell_enable);
2224
2225         BUS_SPACE_WRT4_MVFREY2(isr_enable, 0x1);
2226         BUS_SPACE_RD4_MVFREY2(isr_enable);
2227
2228         BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0x1010);
2229         BUS_SPACE_RD4_MVFREY2(pcie_f0_int_enable);
2230 }
2231
2232 static void hptiop_disable_intr_itl(struct hpt_iop_hba *hba)
2233 {
2234         u_int32_t int_mask;
2235
2236         int_mask = BUS_SPACE_RD4_ITL(outbound_intmask);
2237
2238         int_mask |= IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0;
2239         BUS_SPACE_WRT4_ITL(outbound_intmask, int_mask);
2240         BUS_SPACE_RD4_ITL(outbound_intstatus);
2241 }
2242
2243 static void hptiop_disable_intr_mv(struct hpt_iop_hba *hba)
2244 {
2245         u_int32_t int_mask;
2246         int_mask = BUS_SPACE_RD4_MV0(outbound_intmask);
2247         
2248         int_mask &= ~(MVIOP_MU_OUTBOUND_INT_MSG
2249                         | MVIOP_MU_OUTBOUND_INT_POSTQUEUE);
2250         BUS_SPACE_WRT4_MV0(outbound_intmask,int_mask);
2251         BUS_SPACE_RD4_MV0(outbound_intmask);
2252 }
2253
2254 static void hptiop_disable_intr_mvfrey(struct hpt_iop_hba *hba)
2255 {
2256         BUS_SPACE_WRT4_MVFREY2(f0_doorbell_enable, 0);
2257         BUS_SPACE_RD4_MVFREY2(f0_doorbell_enable);
2258
2259         BUS_SPACE_WRT4_MVFREY2(isr_enable, 0);
2260         BUS_SPACE_RD4_MVFREY2(isr_enable);
2261
2262         BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0);
2263         BUS_SPACE_RD4_MVFREY2(pcie_f0_int_enable);
2264 }
2265
2266 static void hptiop_reset_adapter(void *argv)
2267 {
2268         struct hpt_iop_hba * hba = (struct hpt_iop_hba *)argv;
2269         if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000))
2270                 return;
2271         hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000);
2272 }
2273
2274 static void *hptiop_get_srb(struct hpt_iop_hba * hba)
2275 {
2276         struct hpt_iop_srb * srb;
2277
2278         if (hba->srb_list) {
2279                 srb = hba->srb_list;
2280                 hba->srb_list = srb->next;
2281                 return srb;
2282         }
2283
2284         return NULL;
2285 }
2286
2287 static void hptiop_free_srb(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb)
2288 {
2289         srb->next = hba->srb_list;
2290         hba->srb_list = srb;
2291 }
2292
2293 static void hptiop_action(struct cam_sim *sim, union ccb *ccb)
2294 {
2295         struct hpt_iop_hba * hba = (struct hpt_iop_hba *)cam_sim_softc(sim);
2296         struct hpt_iop_srb * srb;
2297         int error;
2298
2299         switch (ccb->ccb_h.func_code) {
2300
2301         case XPT_SCSI_IO:
2302                 if (ccb->ccb_h.target_lun != 0 ||
2303                         ccb->ccb_h.target_id >= hba->max_devices ||
2304                         (ccb->ccb_h.flags & CAM_CDB_PHYS))
2305                 {
2306                         ccb->ccb_h.status = CAM_TID_INVALID;
2307                         xpt_done(ccb);
2308                         return;
2309                 }
2310
2311                 if ((srb = hptiop_get_srb(hba)) == NULL) {
2312                         device_printf(hba->pcidev, "srb allocated failed");
2313                         ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2314                         xpt_done(ccb);
2315                         return;
2316                 }
2317
2318                 srb->ccb = ccb;
2319                 error = bus_dmamap_load_ccb(hba->io_dmat,
2320                                             srb->dma_map,
2321                                             ccb,
2322                                             hptiop_post_scsi_command,
2323                                             srb,
2324                                             0);
2325
2326                 if (error && error != EINPROGRESS) {
2327                         device_printf(hba->pcidev,
2328                                 "%d bus_dmamap_load error %d",
2329                                 hba->pciunit, error);
2330                         xpt_freeze_simq(hba->sim, 1);
2331                         ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2332                         hptiop_free_srb(hba, srb);
2333                         xpt_done(ccb);
2334                         return;
2335                 }
2336
2337                 return;
2338
2339         case XPT_RESET_BUS:
2340                 device_printf(hba->pcidev, "reset adapter");
2341                 hba->msg_done = 0;
2342                 hptiop_reset_adapter(hba);
2343                 break;
2344
2345         case XPT_GET_TRAN_SETTINGS:
2346         case XPT_SET_TRAN_SETTINGS:
2347                 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
2348                 break;
2349
2350         case XPT_CALC_GEOMETRY:
2351                 cam_calc_geometry(&ccb->ccg, 1);
2352                 break;
2353
2354         case XPT_PATH_INQ:
2355         {
2356                 struct ccb_pathinq *cpi = &ccb->cpi;
2357
2358                 cpi->version_num = 1;
2359                 cpi->hba_inquiry = PI_SDTR_ABLE;
2360                 cpi->target_sprt = 0;
2361                 cpi->hba_misc = PIM_NOBUSRESET;
2362                 cpi->hba_eng_cnt = 0;
2363                 cpi->max_target = hba->max_devices;
2364                 cpi->max_lun = 0;
2365                 cpi->unit_number = cam_sim_unit(sim);
2366                 cpi->bus_id = cam_sim_bus(sim);
2367                 cpi->initiator_id = hba->max_devices;
2368                 cpi->base_transfer_speed = 3300;
2369
2370                 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
2371                 strlcpy(cpi->hba_vid, "HPT   ", HBA_IDLEN);
2372                 strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
2373                 cpi->transport = XPORT_SPI;
2374                 cpi->transport_version = 2;
2375                 cpi->protocol = PROTO_SCSI;
2376                 cpi->protocol_version = SCSI_REV_2;
2377                 cpi->ccb_h.status = CAM_REQ_CMP;
2378                 break;
2379         }
2380
2381         default:
2382                 ccb->ccb_h.status = CAM_REQ_INVALID;
2383                 break;
2384         }
2385
2386         xpt_done(ccb);
2387         return;
2388 }
2389
2390 static void hptiop_post_req_itl(struct hpt_iop_hba *hba,
2391                                 struct hpt_iop_srb *srb,
2392                                 bus_dma_segment_t *segs, int nsegs)
2393 {
2394         int idx;
2395         union ccb *ccb = srb->ccb;
2396         u_int8_t *cdb;
2397
2398         if (ccb->ccb_h.flags & CAM_CDB_POINTER)
2399                 cdb = ccb->csio.cdb_io.cdb_ptr;
2400         else
2401                 cdb = ccb->csio.cdb_io.cdb_bytes;
2402
2403         KdPrint(("ccb=%p %x-%x-%x\n",
2404                 ccb, *(u_int32_t *)cdb, *((u_int32_t *)cdb+1), *((u_int32_t *)cdb+2)));
2405
2406         if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS) {
2407                 u_int32_t iop_req32;
2408                 struct hpt_iop_request_scsi_command req;
2409
2410                 iop_req32 = BUS_SPACE_RD4_ITL(inbound_queue);
2411
2412                 if (iop_req32 == IOPMU_QUEUE_EMPTY) {
2413                         device_printf(hba->pcidev, "invalid req offset\n");
2414                         ccb->ccb_h.status = CAM_BUSY;
2415                         bus_dmamap_unload(hba->io_dmat, srb->dma_map);
2416                         hptiop_free_srb(hba, srb);
2417                         xpt_done(ccb);
2418                         return;
2419                 }
2420
2421                 if (ccb->csio.dxfer_len && nsegs > 0) {
2422                         struct hpt_iopsg *psg = req.sg_list;
2423                         for (idx = 0; idx < nsegs; idx++, psg++) {
2424                                 psg->pci_address = (u_int64_t)segs[idx].ds_addr;
2425                                 psg->size = segs[idx].ds_len;
2426                                 psg->eot = 0;
2427                         }
2428                         psg[-1].eot = 1;
2429                 }
2430
2431                 bcopy(cdb, req.cdb, ccb->csio.cdb_len);
2432
2433                 req.header.size =
2434                                 offsetof(struct hpt_iop_request_scsi_command, sg_list)
2435                                 + nsegs*sizeof(struct hpt_iopsg);
2436                 req.header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
2437                 req.header.flags = 0;
2438                 req.header.result = IOP_RESULT_PENDING;
2439                 req.header.context = (u_int64_t)(unsigned long)srb;
2440                 req.dataxfer_length = ccb->csio.dxfer_len;
2441                 req.channel =  0;
2442                 req.target =  ccb->ccb_h.target_id;
2443                 req.lun =  ccb->ccb_h.target_lun;
2444
2445                 bus_space_write_region_1(hba->bar0t, hba->bar0h, iop_req32,
2446                         (u_int8_t *)&req, req.header.size);
2447
2448                 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2449                         bus_dmamap_sync(hba->io_dmat,
2450                                 srb->dma_map, BUS_DMASYNC_PREREAD);
2451                 }
2452                 else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
2453                         bus_dmamap_sync(hba->io_dmat,
2454                                 srb->dma_map, BUS_DMASYNC_PREWRITE);
2455
2456                 BUS_SPACE_WRT4_ITL(inbound_queue,iop_req32);
2457         } else {
2458                 struct hpt_iop_request_scsi_command *req;
2459
2460                 req = (struct hpt_iop_request_scsi_command *)srb;
2461                 if (ccb->csio.dxfer_len && nsegs > 0) {
2462                         struct hpt_iopsg *psg = req->sg_list;
2463                         for (idx = 0; idx < nsegs; idx++, psg++) {
2464                                 psg->pci_address = 
2465                                         (u_int64_t)segs[idx].ds_addr;
2466                                 psg->size = segs[idx].ds_len;
2467                                 psg->eot = 0;
2468                         }
2469                         psg[-1].eot = 1;
2470                 }
2471
2472                 bcopy(cdb, req->cdb, ccb->csio.cdb_len);
2473
2474                 req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
2475                 req->header.result = IOP_RESULT_PENDING;
2476                 req->dataxfer_length = ccb->csio.dxfer_len;
2477                 req->channel =  0;
2478                 req->target =  ccb->ccb_h.target_id;
2479                 req->lun =  ccb->ccb_h.target_lun;
2480                 req->header.size =
2481                         offsetof(struct hpt_iop_request_scsi_command, sg_list)
2482                         + nsegs*sizeof(struct hpt_iopsg);
2483                 req->header.context = (u_int64_t)srb->index |
2484                                                 IOPMU_QUEUE_ADDR_HOST_BIT;
2485                 req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
2486
2487                 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2488                         bus_dmamap_sync(hba->io_dmat,
2489                                 srb->dma_map, BUS_DMASYNC_PREREAD);
2490                 }else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
2491                         bus_dmamap_sync(hba->io_dmat,
2492                                 srb->dma_map, BUS_DMASYNC_PREWRITE);
2493                 }
2494
2495                 if (hba->firmware_version > 0x01020000
2496                         || hba->interface_version > 0x01020000) {
2497                         u_int32_t size_bits;
2498
2499                         if (req->header.size < 256)
2500                                 size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT;
2501                         else if (req->header.size < 512)
2502                                 size_bits = IOPMU_QUEUE_ADDR_HOST_BIT;
2503                         else
2504                                 size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT
2505                                                 | IOPMU_QUEUE_ADDR_HOST_BIT;
2506
2507                         BUS_SPACE_WRT4_ITL(inbound_queue,
2508                                 (u_int32_t)srb->phy_addr | size_bits);
2509                 } else
2510                         BUS_SPACE_WRT4_ITL(inbound_queue, (u_int32_t)srb->phy_addr
2511                                 |IOPMU_QUEUE_ADDR_HOST_BIT);
2512         }
2513 }
2514
2515 static void hptiop_post_req_mv(struct hpt_iop_hba *hba,
2516                                 struct hpt_iop_srb *srb,
2517                                 bus_dma_segment_t *segs, int nsegs)
2518 {
2519         int idx, size;
2520         union ccb *ccb = srb->ccb;
2521         u_int8_t *cdb;
2522         struct hpt_iop_request_scsi_command *req;
2523         u_int64_t req_phy;
2524
2525         req = (struct hpt_iop_request_scsi_command *)srb;
2526         req_phy = srb->phy_addr;
2527
2528         if (ccb->csio.dxfer_len && nsegs > 0) {
2529                 struct hpt_iopsg *psg = req->sg_list;
2530                 for (idx = 0; idx < nsegs; idx++, psg++) {
2531                         psg->pci_address = (u_int64_t)segs[idx].ds_addr;
2532                         psg->size = segs[idx].ds_len;
2533                         psg->eot = 0;
2534                 }
2535                 psg[-1].eot = 1;
2536         }
2537         if (ccb->ccb_h.flags & CAM_CDB_POINTER)
2538                 cdb = ccb->csio.cdb_io.cdb_ptr;
2539         else
2540                 cdb = ccb->csio.cdb_io.cdb_bytes;
2541
2542         bcopy(cdb, req->cdb, ccb->csio.cdb_len);
2543         req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
2544         req->header.result = IOP_RESULT_PENDING;
2545         req->dataxfer_length = ccb->csio.dxfer_len;
2546         req->channel = 0;
2547         req->target =  ccb->ccb_h.target_id;
2548         req->lun =  ccb->ccb_h.target_lun;
2549         req->header.size = sizeof(struct hpt_iop_request_scsi_command)
2550                                 - sizeof(struct hpt_iopsg)
2551                                 + nsegs * sizeof(struct hpt_iopsg);
2552         if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2553                 bus_dmamap_sync(hba->io_dmat,
2554                         srb->dma_map, BUS_DMASYNC_PREREAD);
2555         }
2556         else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
2557                 bus_dmamap_sync(hba->io_dmat,
2558                         srb->dma_map, BUS_DMASYNC_PREWRITE);
2559         req->header.context = (u_int64_t)srb->index
2560                                         << MVIOP_REQUEST_NUMBER_START_BIT
2561                                         | MVIOP_CMD_TYPE_SCSI;
2562         req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
2563         size = req->header.size >> 8;
2564         hptiop_mv_inbound_write(req_phy
2565                         | MVIOP_MU_QUEUE_ADDR_HOST_BIT
2566                         | imin(3, size), hba);
2567 }
2568
2569 static void hptiop_post_req_mvfrey(struct hpt_iop_hba *hba,
2570                                 struct hpt_iop_srb *srb,
2571                                 bus_dma_segment_t *segs, int nsegs)
2572 {
2573         int idx, index;
2574         union ccb *ccb = srb->ccb;
2575         u_int8_t *cdb;
2576         struct hpt_iop_request_scsi_command *req;
2577         u_int64_t req_phy;
2578
2579         req = (struct hpt_iop_request_scsi_command *)srb;
2580         req_phy = srb->phy_addr;
2581
2582         if (ccb->csio.dxfer_len && nsegs > 0) {
2583                 struct hpt_iopsg *psg = req->sg_list;
2584                 for (idx = 0; idx < nsegs; idx++, psg++) {
2585                         psg->pci_address = (u_int64_t)segs[idx].ds_addr | 1;
2586                         psg->size = segs[idx].ds_len;
2587                         psg->eot = 0;
2588                 }
2589                 psg[-1].eot = 1;
2590         }
2591         if (ccb->ccb_h.flags & CAM_CDB_POINTER)
2592                 cdb = ccb->csio.cdb_io.cdb_ptr;
2593         else
2594                 cdb = ccb->csio.cdb_io.cdb_bytes;
2595
2596         bcopy(cdb, req->cdb, ccb->csio.cdb_len);
2597         req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
2598         req->header.result = IOP_RESULT_PENDING;
2599         req->dataxfer_length = ccb->csio.dxfer_len;
2600         req->channel = 0;
2601         req->target = ccb->ccb_h.target_id;
2602         req->lun = ccb->ccb_h.target_lun;
2603         req->header.size = sizeof(struct hpt_iop_request_scsi_command)
2604                                 - sizeof(struct hpt_iopsg)
2605                                 + nsegs * sizeof(struct hpt_iopsg);
2606         if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2607                 bus_dmamap_sync(hba->io_dmat,
2608                         srb->dma_map, BUS_DMASYNC_PREREAD);
2609         }
2610         else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
2611                 bus_dmamap_sync(hba->io_dmat,
2612                         srb->dma_map, BUS_DMASYNC_PREWRITE);
2613
2614         req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT
2615                                                 | IOP_REQUEST_FLAG_ADDR_BITS
2616                                                 | ((req_phy >> 16) & 0xffff0000);
2617         req->header.context = ((req_phy & 0xffffffff) << 32 )
2618                                                 | srb->index << 4
2619                                                 | IOPMU_QUEUE_ADDR_HOST_BIT | req->header.type;
2620
2621         hba->u.mvfrey.inlist_wptr++;
2622         index = hba->u.mvfrey.inlist_wptr & 0x3fff;
2623
2624         if (index == hba->u.mvfrey.list_count) {
2625                 index = 0;
2626                 hba->u.mvfrey.inlist_wptr &= ~0x3fff;
2627                 hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE;
2628         }
2629
2630         hba->u.mvfrey.inlist[index].addr = req_phy;
2631         hba->u.mvfrey.inlist[index].intrfc_len = (req->header.size + 3) / 4;
2632
2633         BUS_SPACE_WRT4_MVFREY2(inbound_write_ptr, hba->u.mvfrey.inlist_wptr);
2634         BUS_SPACE_RD4_MVFREY2(inbound_write_ptr);
2635
2636         if (req->header.type == IOP_REQUEST_TYPE_SCSI_COMMAND) {
2637                 callout_reset(&srb->timeout, 20 * hz, hptiop_reset_adapter, hba);
2638         }
2639 }
2640
2641 static void hptiop_post_scsi_command(void *arg, bus_dma_segment_t *segs,
2642                                         int nsegs, int error)
2643 {
2644         struct hpt_iop_srb *srb = (struct hpt_iop_srb *)arg;
2645         union ccb *ccb = srb->ccb;
2646         struct hpt_iop_hba *hba = srb->hba;
2647
2648         if (error || nsegs > hba->max_sg_count) {
2649                 KdPrint(("hptiop: func_code=%x tid=%x lun=%jx nsegs=%d\n",
2650                         ccb->ccb_h.func_code,
2651                         ccb->ccb_h.target_id,
2652                         (uintmax_t)ccb->ccb_h.target_lun, nsegs));
2653                 ccb->ccb_h.status = CAM_BUSY;
2654                 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
2655                 hptiop_free_srb(hba, srb);
2656                 xpt_done(ccb);
2657                 return;
2658         }
2659
2660         hba->ops->post_req(hba, srb, segs, nsegs);
2661 }
2662
2663 static void hptiop_mv_map_ctlcfg(void *arg, bus_dma_segment_t *segs,
2664                                 int nsegs, int error)
2665 {
2666         struct hpt_iop_hba *hba = (struct hpt_iop_hba *)arg;
2667         hba->ctlcfgcmd_phy = ((u_int64_t)segs->ds_addr + 0x1F) 
2668                                 & ~(u_int64_t)0x1F;
2669         hba->ctlcfg_ptr = (u_int8_t *)(((unsigned long)hba->ctlcfg_ptr + 0x1F)
2670                                 & ~0x1F);
2671 }
2672
2673 static void hptiop_mvfrey_map_ctlcfg(void *arg, bus_dma_segment_t *segs,
2674                                 int nsegs, int error)
2675 {
2676         struct hpt_iop_hba *hba = (struct hpt_iop_hba *)arg;
2677         char *p;
2678         u_int64_t phy;
2679         u_int32_t list_count = hba->u.mvfrey.list_count;
2680
2681         phy = ((u_int64_t)segs->ds_addr + 0x1F) 
2682                                 & ~(u_int64_t)0x1F;
2683         p = (u_int8_t *)(((unsigned long)hba->ctlcfg_ptr + 0x1F)
2684                                 & ~0x1F);
2685         
2686         hba->ctlcfgcmd_phy = phy;
2687         hba->ctlcfg_ptr = p;
2688
2689         p += 0x800;
2690         phy += 0x800;
2691
2692         hba->u.mvfrey.inlist = (struct mvfrey_inlist_entry *)p;
2693         hba->u.mvfrey.inlist_phy = phy;
2694
2695         p += list_count * sizeof(struct mvfrey_inlist_entry);
2696         phy += list_count * sizeof(struct mvfrey_inlist_entry);
2697
2698         hba->u.mvfrey.outlist = (struct mvfrey_outlist_entry *)p;
2699         hba->u.mvfrey.outlist_phy = phy;
2700
2701         p += list_count * sizeof(struct mvfrey_outlist_entry);
2702         phy += list_count * sizeof(struct mvfrey_outlist_entry);
2703
2704         hba->u.mvfrey.outlist_cptr = (u_int32_t *)p;
2705         hba->u.mvfrey.outlist_cptr_phy = phy;
2706 }
2707
2708 static void hptiop_map_srb(void *arg, bus_dma_segment_t *segs,
2709                                 int nsegs, int error)
2710 {
2711         struct hpt_iop_hba * hba = (struct hpt_iop_hba *)arg;
2712         bus_addr_t phy_addr = (segs->ds_addr + 0x1F) & ~(bus_addr_t)0x1F;
2713         struct hpt_iop_srb *srb, *tmp_srb;
2714         int i;
2715
2716         if (error || nsegs == 0) {
2717                 device_printf(hba->pcidev, "hptiop_map_srb error");
2718                 return;
2719         }
2720
2721         /* map srb */
2722         srb = (struct hpt_iop_srb *)
2723                 (((unsigned long)hba->uncached_ptr + 0x1F)
2724                 & ~(unsigned long)0x1F);
2725
2726         for (i = 0; i < HPT_SRB_MAX_QUEUE_SIZE; i++) {
2727                 tmp_srb = (struct hpt_iop_srb *)
2728                                         ((char *)srb + i * HPT_SRB_MAX_SIZE);
2729                 if (((unsigned long)tmp_srb & 0x1F) == 0) {
2730                         if (bus_dmamap_create(hba->io_dmat,
2731                                                 0, &tmp_srb->dma_map)) {
2732                                 device_printf(hba->pcidev, "dmamap create failed");
2733                                 return;
2734                         }
2735
2736                         bzero(tmp_srb, sizeof(struct hpt_iop_srb));
2737                         tmp_srb->hba = hba;
2738                         tmp_srb->index = i;
2739                         if (hba->ctlcfg_ptr == 0) {/*itl iop*/
2740                                 tmp_srb->phy_addr = (u_int64_t)(u_int32_t)
2741                                                         (phy_addr >> 5);
2742                                 if (phy_addr & IOPMU_MAX_MEM_SUPPORT_MASK_32G)
2743                                         tmp_srb->srb_flag =
2744                                                 HPT_SRB_FLAG_HIGH_MEM_ACESS;
2745                         } else {
2746                                 tmp_srb->phy_addr = phy_addr;
2747                         }
2748
2749                         callout_init_mtx(&tmp_srb->timeout, &hba->lock, 0);
2750                         hptiop_free_srb(hba, tmp_srb);
2751                         hba->srb[i] = tmp_srb;
2752                         phy_addr += HPT_SRB_MAX_SIZE;
2753                 }
2754                 else {
2755                         device_printf(hba->pcidev, "invalid alignment");
2756                         return;
2757                 }
2758         }
2759 }
2760
2761 static void hptiop_os_message_callback(struct hpt_iop_hba * hba, u_int32_t msg)
2762 {
2763         hba->msg_done = 1;
2764 }
2765
2766 static  int hptiop_os_query_remove_device(struct hpt_iop_hba * hba,
2767                                                 int target_id)
2768 {
2769         struct cam_periph       *periph = NULL;
2770         struct cam_path         *path;
2771         int                     status, retval = 0;
2772
2773         status = xpt_create_path(&path, NULL, hba->sim->path_id, target_id, 0);
2774
2775         if (status == CAM_REQ_CMP) {
2776                 if ((periph = cam_periph_find(path, "da")) != NULL) {
2777                         if (periph->refcount >= 1) {
2778                                 device_printf(hba->pcidev, "%d ,"
2779                                         "target_id=0x%x,"
2780                                         "refcount=%d",
2781                                     hba->pciunit, target_id, periph->refcount);
2782                                 retval = -1;
2783                         }
2784                 }
2785                 xpt_free_path(path);
2786         }
2787         return retval;
2788 }
2789
2790 static void hptiop_release_resource(struct hpt_iop_hba *hba)
2791 {
2792         int i;
2793
2794         if (hba->ioctl_dev)
2795                 destroy_dev(hba->ioctl_dev);
2796
2797         if (hba->path) {
2798                 struct ccb_setasync ccb;
2799
2800                 xpt_setup_ccb(&ccb.ccb_h, hba->path, /*priority*/5);
2801                 ccb.ccb_h.func_code = XPT_SASYNC_CB;
2802                 ccb.event_enable = 0;
2803                 ccb.callback = hptiop_async;
2804                 ccb.callback_arg = hba->sim;
2805                 xpt_action((union ccb *)&ccb);
2806                 xpt_free_path(hba->path);
2807         }
2808
2809         if (hba->irq_handle)
2810                 bus_teardown_intr(hba->pcidev, hba->irq_res, hba->irq_handle);
2811
2812         if (hba->sim) {
2813                 hptiop_lock_adapter(hba);
2814                 xpt_bus_deregister(cam_sim_path(hba->sim));
2815                 cam_sim_free(hba->sim, TRUE);
2816                 hptiop_unlock_adapter(hba);
2817         }
2818
2819         if (hba->ctlcfg_dmat) {
2820                 bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap);
2821                 bus_dmamem_free(hba->ctlcfg_dmat,
2822                                         hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
2823                 bus_dma_tag_destroy(hba->ctlcfg_dmat);
2824         }
2825
2826         for (i = 0; i < HPT_SRB_MAX_QUEUE_SIZE; i++) {
2827                 struct hpt_iop_srb *srb = hba->srb[i];
2828                 if (srb->dma_map)
2829                         bus_dmamap_destroy(hba->io_dmat, srb->dma_map);
2830                 callout_drain(&srb->timeout);
2831         }
2832
2833         if (hba->srb_dmat) {
2834                 bus_dmamap_unload(hba->srb_dmat, hba->srb_dmamap);
2835                 bus_dmamap_destroy(hba->srb_dmat, hba->srb_dmamap);
2836                 bus_dma_tag_destroy(hba->srb_dmat);
2837         }
2838
2839         if (hba->io_dmat)
2840                 bus_dma_tag_destroy(hba->io_dmat);
2841
2842         if (hba->parent_dmat)
2843                 bus_dma_tag_destroy(hba->parent_dmat);
2844
2845         if (hba->irq_res)
2846                 bus_release_resource(hba->pcidev, SYS_RES_IRQ,
2847                                         0, hba->irq_res);
2848
2849         if (hba->bar0_res)
2850                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
2851                                         hba->bar0_rid, hba->bar0_res);
2852         if (hba->bar2_res)
2853                 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
2854                                         hba->bar2_rid, hba->bar2_res);
2855         mtx_destroy(&hba->lock);
2856 }