2 * HighPoint RR3xxx/4xxx RAID Driver for FreeBSD
3 * Copyright (C) 2007-2012 HighPoint Technologies, Inc. All Rights Reserved.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
30 #include <sys/param.h>
31 #include <sys/types.h>
34 #include <sys/systm.h>
37 #include <sys/malloc.h>
39 #include <sys/libkern.h>
40 #include <sys/kernel.h>
42 #include <sys/kthread.h>
43 #include <sys/mutex.h>
44 #include <sys/module.h>
46 #include <sys/eventhandler.h>
48 #include <sys/taskqueue.h>
49 #include <sys/ioccom.h>
51 #include <machine/resource.h>
52 #include <machine/bus.h>
53 #include <machine/stdarg.h>
59 #include <dev/pci/pcireg.h>
60 #include <dev/pci/pcivar.h>
64 #include <cam/cam_ccb.h>
65 #include <cam/cam_sim.h>
66 #include <cam/cam_xpt_sim.h>
67 #include <cam/cam_debug.h>
68 #include <cam/cam_periph.h>
69 #include <cam/scsi/scsi_all.h>
70 #include <cam/scsi/scsi_message.h>
73 #include <dev/hptiop/hptiop.h>
75 static const char driver_name[] = "hptiop";
76 static const char driver_version[] = "v1.9";
78 static devclass_t hptiop_devclass;
80 static int hptiop_send_sync_msg(struct hpt_iop_hba *hba,
81 u_int32_t msg, u_int32_t millisec);
82 static void hptiop_request_callback_itl(struct hpt_iop_hba *hba,
84 static void hptiop_request_callback_mv(struct hpt_iop_hba *hba, u_int64_t req);
85 static void hptiop_request_callback_mvfrey(struct hpt_iop_hba *hba,
87 static void hptiop_os_message_callback(struct hpt_iop_hba *hba, u_int32_t msg);
88 static int hptiop_do_ioctl_itl(struct hpt_iop_hba *hba,
89 struct hpt_iop_ioctl_param *pParams);
90 static int hptiop_do_ioctl_mv(struct hpt_iop_hba *hba,
91 struct hpt_iop_ioctl_param *pParams);
92 static int hptiop_do_ioctl_mvfrey(struct hpt_iop_hba *hba,
93 struct hpt_iop_ioctl_param *pParams);
94 static int hptiop_rescan_bus(struct hpt_iop_hba *hba);
95 static int hptiop_alloc_pci_res_itl(struct hpt_iop_hba *hba);
96 static int hptiop_alloc_pci_res_mv(struct hpt_iop_hba *hba);
97 static int hptiop_alloc_pci_res_mvfrey(struct hpt_iop_hba *hba);
98 static int hptiop_get_config_itl(struct hpt_iop_hba *hba,
99 struct hpt_iop_request_get_config *config);
100 static int hptiop_get_config_mv(struct hpt_iop_hba *hba,
101 struct hpt_iop_request_get_config *config);
102 static int hptiop_get_config_mvfrey(struct hpt_iop_hba *hba,
103 struct hpt_iop_request_get_config *config);
104 static int hptiop_set_config_itl(struct hpt_iop_hba *hba,
105 struct hpt_iop_request_set_config *config);
106 static int hptiop_set_config_mv(struct hpt_iop_hba *hba,
107 struct hpt_iop_request_set_config *config);
108 static int hptiop_set_config_mvfrey(struct hpt_iop_hba *hba,
109 struct hpt_iop_request_set_config *config);
110 static int hptiop_internal_memalloc_mv(struct hpt_iop_hba *hba);
111 static int hptiop_internal_memalloc_mvfrey(struct hpt_iop_hba *hba);
112 static int hptiop_internal_memfree_itl(struct hpt_iop_hba *hba);
113 static int hptiop_internal_memfree_mv(struct hpt_iop_hba *hba);
114 static int hptiop_internal_memfree_mvfrey(struct hpt_iop_hba *hba);
115 static int hptiop_post_ioctl_command_itl(struct hpt_iop_hba *hba,
116 u_int32_t req32, struct hpt_iop_ioctl_param *pParams);
117 static int hptiop_post_ioctl_command_mv(struct hpt_iop_hba *hba,
118 struct hpt_iop_request_ioctl_command *req,
119 struct hpt_iop_ioctl_param *pParams);
120 static int hptiop_post_ioctl_command_mvfrey(struct hpt_iop_hba *hba,
121 struct hpt_iop_request_ioctl_command *req,
122 struct hpt_iop_ioctl_param *pParams);
123 static void hptiop_post_req_itl(struct hpt_iop_hba *hba,
124 struct hpt_iop_srb *srb,
125 bus_dma_segment_t *segs, int nsegs);
126 static void hptiop_post_req_mv(struct hpt_iop_hba *hba,
127 struct hpt_iop_srb *srb,
128 bus_dma_segment_t *segs, int nsegs);
129 static void hptiop_post_req_mvfrey(struct hpt_iop_hba *hba,
130 struct hpt_iop_srb *srb,
131 bus_dma_segment_t *segs, int nsegs);
132 static void hptiop_post_msg_itl(struct hpt_iop_hba *hba, u_int32_t msg);
133 static void hptiop_post_msg_mv(struct hpt_iop_hba *hba, u_int32_t msg);
134 static void hptiop_post_msg_mvfrey(struct hpt_iop_hba *hba, u_int32_t msg);
135 static void hptiop_enable_intr_itl(struct hpt_iop_hba *hba);
136 static void hptiop_enable_intr_mv(struct hpt_iop_hba *hba);
137 static void hptiop_enable_intr_mvfrey(struct hpt_iop_hba *hba);
138 static void hptiop_disable_intr_itl(struct hpt_iop_hba *hba);
139 static void hptiop_disable_intr_mv(struct hpt_iop_hba *hba);
140 static void hptiop_disable_intr_mvfrey(struct hpt_iop_hba *hba);
141 static void hptiop_free_srb(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb);
142 static int hptiop_os_query_remove_device(struct hpt_iop_hba *hba, int tid);
143 static int hptiop_probe(device_t dev);
144 static int hptiop_attach(device_t dev);
145 static int hptiop_detach(device_t dev);
146 static int hptiop_shutdown(device_t dev);
147 static void hptiop_action(struct cam_sim *sim, union ccb *ccb);
148 static void hptiop_poll(struct cam_sim *sim);
149 static void hptiop_async(void *callback_arg, u_int32_t code,
150 struct cam_path *path, void *arg);
151 static void hptiop_pci_intr(void *arg);
152 static void hptiop_release_resource(struct hpt_iop_hba *hba);
153 static void hptiop_reset_adapter(void *argv);
154 static d_open_t hptiop_open;
155 static d_close_t hptiop_close;
156 static d_ioctl_t hptiop_ioctl;
158 static struct cdevsw hptiop_cdevsw = {
159 .d_open = hptiop_open,
160 .d_close = hptiop_close,
161 .d_ioctl = hptiop_ioctl,
162 .d_name = driver_name,
163 .d_version = D_VERSION,
166 #define hba_from_dev(dev) \
167 ((struct hpt_iop_hba *)devclass_get_softc(hptiop_devclass, dev2unit(dev)))
169 #define BUS_SPACE_WRT4_ITL(offset, value) bus_space_write_4(hba->bar0t,\
170 hba->bar0h, offsetof(struct hpt_iopmu_itl, offset), (value))
171 #define BUS_SPACE_RD4_ITL(offset) bus_space_read_4(hba->bar0t,\
172 hba->bar0h, offsetof(struct hpt_iopmu_itl, offset))
174 #define BUS_SPACE_WRT4_MV0(offset, value) bus_space_write_4(hba->bar0t,\
175 hba->bar0h, offsetof(struct hpt_iopmv_regs, offset), value)
176 #define BUS_SPACE_RD4_MV0(offset) bus_space_read_4(hba->bar0t,\
177 hba->bar0h, offsetof(struct hpt_iopmv_regs, offset))
178 #define BUS_SPACE_WRT4_MV2(offset, value) bus_space_write_4(hba->bar2t,\
179 hba->bar2h, offsetof(struct hpt_iopmu_mv, offset), value)
180 #define BUS_SPACE_RD4_MV2(offset) bus_space_read_4(hba->bar2t,\
181 hba->bar2h, offsetof(struct hpt_iopmu_mv, offset))
183 #define BUS_SPACE_WRT4_MVFREY2(offset, value) bus_space_write_4(hba->bar2t,\
184 hba->bar2h, offsetof(struct hpt_iopmu_mvfrey, offset), value)
185 #define BUS_SPACE_RD4_MVFREY2(offset) bus_space_read_4(hba->bar2t,\
186 hba->bar2h, offsetof(struct hpt_iopmu_mvfrey, offset))
188 static int hptiop_open(ioctl_dev_t dev, int flags,
189 int devtype, ioctl_thread_t proc)
191 struct hpt_iop_hba *hba = hba_from_dev(dev);
195 if (hba->flag & HPT_IOCTL_FLAG_OPEN)
197 hba->flag |= HPT_IOCTL_FLAG_OPEN;
201 static int hptiop_close(ioctl_dev_t dev, int flags,
202 int devtype, ioctl_thread_t proc)
204 struct hpt_iop_hba *hba = hba_from_dev(dev);
205 hba->flag &= ~(u_int32_t)HPT_IOCTL_FLAG_OPEN;
209 static int hptiop_ioctl(ioctl_dev_t dev, u_long cmd, caddr_t data,
210 int flags, ioctl_thread_t proc)
213 struct hpt_iop_hba *hba = hba_from_dev(dev);
218 case HPT_DO_IOCONTROL:
219 ret = hba->ops->do_ioctl(hba,
220 (struct hpt_iop_ioctl_param *)data);
223 ret = hptiop_rescan_bus(hba);
232 static u_int64_t hptiop_mv_outbound_read(struct hpt_iop_hba *hba)
235 u_int32_t outbound_tail = BUS_SPACE_RD4_MV2(outbound_tail);
236 u_int32_t outbound_head = BUS_SPACE_RD4_MV2(outbound_head);
238 if (outbound_tail != outbound_head) {
239 bus_space_read_region_4(hba->bar2t, hba->bar2h,
240 offsetof(struct hpt_iopmu_mv,
241 outbound_q[outbound_tail]),
246 if (outbound_tail == MVIOP_QUEUE_LEN)
249 BUS_SPACE_WRT4_MV2(outbound_tail, outbound_tail);
255 static void hptiop_mv_inbound_write(u_int64_t p, struct hpt_iop_hba *hba)
257 u_int32_t inbound_head = BUS_SPACE_RD4_MV2(inbound_head);
258 u_int32_t head = inbound_head + 1;
260 if (head == MVIOP_QUEUE_LEN)
263 bus_space_write_region_4(hba->bar2t, hba->bar2h,
264 offsetof(struct hpt_iopmu_mv, inbound_q[inbound_head]),
266 BUS_SPACE_WRT4_MV2(inbound_head, head);
267 BUS_SPACE_WRT4_MV0(inbound_doorbell, MVIOP_MU_INBOUND_INT_POSTQUEUE);
270 static void hptiop_post_msg_itl(struct hpt_iop_hba *hba, u_int32_t msg)
272 BUS_SPACE_WRT4_ITL(inbound_msgaddr0, msg);
273 BUS_SPACE_RD4_ITL(outbound_intstatus);
276 static void hptiop_post_msg_mv(struct hpt_iop_hba *hba, u_int32_t msg)
279 BUS_SPACE_WRT4_MV2(inbound_msg, msg);
280 BUS_SPACE_WRT4_MV0(inbound_doorbell, MVIOP_MU_INBOUND_INT_MSG);
282 BUS_SPACE_RD4_MV0(outbound_intmask);
285 static void hptiop_post_msg_mvfrey(struct hpt_iop_hba *hba, u_int32_t msg)
287 BUS_SPACE_WRT4_MVFREY2(f0_to_cpu_msg_a, msg);
288 BUS_SPACE_RD4_MVFREY2(f0_to_cpu_msg_a);
291 static int hptiop_wait_ready_itl(struct hpt_iop_hba * hba, u_int32_t millisec)
296 for (i = 0; i < millisec; i++) {
297 req = BUS_SPACE_RD4_ITL(inbound_queue);
298 if (req != IOPMU_QUEUE_EMPTY)
303 if (req!=IOPMU_QUEUE_EMPTY) {
304 BUS_SPACE_WRT4_ITL(outbound_queue, req);
305 BUS_SPACE_RD4_ITL(outbound_intstatus);
312 static int hptiop_wait_ready_mv(struct hpt_iop_hba * hba, u_int32_t millisec)
314 if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec))
320 static int hptiop_wait_ready_mvfrey(struct hpt_iop_hba * hba,
323 if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec))
329 static void hptiop_request_callback_itl(struct hpt_iop_hba * hba,
332 struct hpt_iop_srb *srb;
333 struct hpt_iop_request_scsi_command *req=0;
336 u_int32_t result, temp, dxfer;
339 if (index & IOPMU_QUEUE_MASK_HOST_BITS) { /*host req*/
340 if (hba->firmware_version > 0x01020000 ||
341 hba->interface_version > 0x01020000) {
342 srb = hba->srb[index & ~(u_int32_t)
343 (IOPMU_QUEUE_ADDR_HOST_BIT
344 | IOPMU_QUEUE_REQUEST_RESULT_BIT)];
345 req = (struct hpt_iop_request_scsi_command *)srb;
346 if (index & IOPMU_QUEUE_REQUEST_RESULT_BIT)
347 result = IOP_RESULT_SUCCESS;
349 result = req->header.result;
351 srb = hba->srb[index &
352 ~(u_int32_t)IOPMU_QUEUE_ADDR_HOST_BIT];
353 req = (struct hpt_iop_request_scsi_command *)srb;
354 result = req->header.result;
356 dxfer = req->dataxfer_length;
361 temp = bus_space_read_4(hba->bar0t, hba->bar0h, index +
362 offsetof(struct hpt_iop_request_header, type));
363 result = bus_space_read_4(hba->bar0t, hba->bar0h, index +
364 offsetof(struct hpt_iop_request_header, result));
366 case IOP_REQUEST_TYPE_IOCTL_COMMAND:
369 bus_space_write_region_4(hba->bar0t, hba->bar0h, index +
370 offsetof(struct hpt_iop_request_header, context),
371 (u_int32_t *)&temp64, 2);
372 wakeup((void *)((unsigned long)hba->u.itl.mu + index));
376 case IOP_REQUEST_TYPE_SCSI_COMMAND:
377 bus_space_read_region_4(hba->bar0t, hba->bar0h, index +
378 offsetof(struct hpt_iop_request_header, context),
379 (u_int32_t *)&temp64, 2);
380 srb = (struct hpt_iop_srb *)(unsigned long)temp64;
381 dxfer = bus_space_read_4(hba->bar0t, hba->bar0h,
382 index + offsetof(struct hpt_iop_request_scsi_command,
385 ccb = (union ccb *)srb->ccb;
386 if (ccb->ccb_h.flags & CAM_CDB_POINTER)
387 cdb = ccb->csio.cdb_io.cdb_ptr;
389 cdb = ccb->csio.cdb_io.cdb_bytes;
391 if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */
392 ccb->ccb_h.status = CAM_REQ_CMP;
397 case IOP_RESULT_SUCCESS:
398 switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
400 bus_dmamap_sync(hba->io_dmat,
401 srb->dma_map, BUS_DMASYNC_POSTREAD);
402 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
405 bus_dmamap_sync(hba->io_dmat,
406 srb->dma_map, BUS_DMASYNC_POSTWRITE);
407 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
411 ccb->ccb_h.status = CAM_REQ_CMP;
414 case IOP_RESULT_BAD_TARGET:
415 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
417 case IOP_RESULT_BUSY:
418 ccb->ccb_h.status = CAM_BUSY;
420 case IOP_RESULT_INVALID_REQUEST:
421 ccb->ccb_h.status = CAM_REQ_INVALID;
423 case IOP_RESULT_FAIL:
424 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
426 case IOP_RESULT_RESET:
427 ccb->ccb_h.status = CAM_BUSY;
429 case IOP_RESULT_CHECK_CONDITION:
430 memset(&ccb->csio.sense_data, 0,
431 sizeof(ccb->csio.sense_data));
432 if (dxfer < ccb->csio.sense_len)
433 ccb->csio.sense_resid = ccb->csio.sense_len -
436 ccb->csio.sense_resid = 0;
437 if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS) {/*iop*/
438 bus_space_read_region_1(hba->bar0t, hba->bar0h,
439 index + offsetof(struct hpt_iop_request_scsi_command,
440 sg_list), (u_int8_t *)&ccb->csio.sense_data,
441 MIN(dxfer, sizeof(ccb->csio.sense_data)));
443 memcpy(&ccb->csio.sense_data, &req->sg_list,
444 MIN(dxfer, sizeof(ccb->csio.sense_data)));
446 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
447 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
448 ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
451 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
455 if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS)
456 BUS_SPACE_WRT4_ITL(outbound_queue, index);
458 ccb->csio.resid = ccb->csio.dxfer_len - dxfer;
460 hptiop_free_srb(hba, srb);
466 static void hptiop_drain_outbound_queue_itl(struct hpt_iop_hba *hba)
470 while ((req = BUS_SPACE_RD4_ITL(outbound_queue)) !=IOPMU_QUEUE_EMPTY) {
471 if (req & IOPMU_QUEUE_MASK_HOST_BITS)
472 hptiop_request_callback_itl(hba, req);
474 struct hpt_iop_request_header *p;
476 p = (struct hpt_iop_request_header *)
477 ((char *)hba->u.itl.mu + req);
478 temp = bus_space_read_4(hba->bar0t,
480 offsetof(struct hpt_iop_request_header,
482 if (temp & IOP_REQUEST_FLAG_SYNC_REQUEST) {
484 bus_space_read_region_4(hba->bar0t,
486 offsetof(struct hpt_iop_request_header,
488 (u_int32_t *)&temp64, 2);
490 hptiop_request_callback_itl(hba, req);
493 bus_space_write_region_4(hba->bar0t,
495 offsetof(struct hpt_iop_request_header,
497 (u_int32_t *)&temp64, 2);
500 hptiop_request_callback_itl(hba, req);
505 static int hptiop_intr_itl(struct hpt_iop_hba * hba)
510 status = BUS_SPACE_RD4_ITL(outbound_intstatus);
512 if (status & IOPMU_OUTBOUND_INT_MSG0) {
513 u_int32_t msg = BUS_SPACE_RD4_ITL(outbound_msgaddr0);
514 KdPrint(("hptiop: received outbound msg %x\n", msg));
515 BUS_SPACE_WRT4_ITL(outbound_intstatus, IOPMU_OUTBOUND_INT_MSG0);
516 hptiop_os_message_callback(hba, msg);
520 if (status & IOPMU_OUTBOUND_INT_POSTQUEUE) {
521 hptiop_drain_outbound_queue_itl(hba);
528 static void hptiop_request_callback_mv(struct hpt_iop_hba * hba,
531 u_int32_t context = (u_int32_t)_tag;
533 if (context & MVIOP_CMD_TYPE_SCSI) {
534 struct hpt_iop_srb *srb;
535 struct hpt_iop_request_scsi_command *req;
539 srb = hba->srb[context >> MVIOP_REQUEST_NUMBER_START_BIT];
540 req = (struct hpt_iop_request_scsi_command *)srb;
541 ccb = (union ccb *)srb->ccb;
542 if (ccb->ccb_h.flags & CAM_CDB_POINTER)
543 cdb = ccb->csio.cdb_io.cdb_ptr;
545 cdb = ccb->csio.cdb_io.cdb_bytes;
547 if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */
548 ccb->ccb_h.status = CAM_REQ_CMP;
551 if (context & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT)
552 req->header.result = IOP_RESULT_SUCCESS;
554 switch (req->header.result) {
555 case IOP_RESULT_SUCCESS:
556 switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
558 bus_dmamap_sync(hba->io_dmat,
559 srb->dma_map, BUS_DMASYNC_POSTREAD);
560 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
563 bus_dmamap_sync(hba->io_dmat,
564 srb->dma_map, BUS_DMASYNC_POSTWRITE);
565 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
568 ccb->ccb_h.status = CAM_REQ_CMP;
570 case IOP_RESULT_BAD_TARGET:
571 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
573 case IOP_RESULT_BUSY:
574 ccb->ccb_h.status = CAM_BUSY;
576 case IOP_RESULT_INVALID_REQUEST:
577 ccb->ccb_h.status = CAM_REQ_INVALID;
579 case IOP_RESULT_FAIL:
580 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
582 case IOP_RESULT_RESET:
583 ccb->ccb_h.status = CAM_BUSY;
585 case IOP_RESULT_CHECK_CONDITION:
586 memset(&ccb->csio.sense_data, 0,
587 sizeof(ccb->csio.sense_data));
588 if (req->dataxfer_length < ccb->csio.sense_len)
589 ccb->csio.sense_resid = ccb->csio.sense_len -
590 req->dataxfer_length;
592 ccb->csio.sense_resid = 0;
593 memcpy(&ccb->csio.sense_data, &req->sg_list,
594 MIN(req->dataxfer_length, sizeof(ccb->csio.sense_data)));
595 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
596 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
597 ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
600 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
604 ccb->csio.resid = ccb->csio.dxfer_len - req->dataxfer_length;
606 hptiop_free_srb(hba, srb);
608 } else if (context & MVIOP_CMD_TYPE_IOCTL) {
609 struct hpt_iop_request_ioctl_command *req = hba->ctlcfg_ptr;
610 if (context & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT)
611 hba->config_done = 1;
613 hba->config_done = -1;
616 (MVIOP_CMD_TYPE_SET_CONFIG |
617 MVIOP_CMD_TYPE_GET_CONFIG))
618 hba->config_done = 1;
620 device_printf(hba->pcidev, "wrong callback type\n");
624 static void hptiop_request_callback_mvfrey(struct hpt_iop_hba * hba,
627 u_int32_t req_type = _tag & 0xf;
629 struct hpt_iop_srb *srb;
630 struct hpt_iop_request_scsi_command *req;
635 case IOP_REQUEST_TYPE_GET_CONFIG:
636 case IOP_REQUEST_TYPE_SET_CONFIG:
637 hba->config_done = 1;
640 case IOP_REQUEST_TYPE_SCSI_COMMAND:
641 srb = hba->srb[(_tag >> 4) & 0xff];
642 req = (struct hpt_iop_request_scsi_command *)srb;
644 ccb = (union ccb *)srb->ccb;
646 untimeout(hptiop_reset_adapter, hba, srb->timeout_ch);
648 if (ccb->ccb_h.flags & CAM_CDB_POINTER)
649 cdb = ccb->csio.cdb_io.cdb_ptr;
651 cdb = ccb->csio.cdb_io.cdb_bytes;
653 if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */
654 ccb->ccb_h.status = CAM_REQ_CMP;
658 if (_tag & MVFREYIOPMU_QUEUE_REQUEST_RESULT_BIT)
659 req->header.result = IOP_RESULT_SUCCESS;
661 switch (req->header.result) {
662 case IOP_RESULT_SUCCESS:
663 switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
665 bus_dmamap_sync(hba->io_dmat,
666 srb->dma_map, BUS_DMASYNC_POSTREAD);
667 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
670 bus_dmamap_sync(hba->io_dmat,
671 srb->dma_map, BUS_DMASYNC_POSTWRITE);
672 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
675 ccb->ccb_h.status = CAM_REQ_CMP;
677 case IOP_RESULT_BAD_TARGET:
678 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
680 case IOP_RESULT_BUSY:
681 ccb->ccb_h.status = CAM_BUSY;
683 case IOP_RESULT_INVALID_REQUEST:
684 ccb->ccb_h.status = CAM_REQ_INVALID;
686 case IOP_RESULT_FAIL:
687 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
689 case IOP_RESULT_RESET:
690 ccb->ccb_h.status = CAM_BUSY;
692 case IOP_RESULT_CHECK_CONDITION:
693 memset(&ccb->csio.sense_data, 0,
694 sizeof(ccb->csio.sense_data));
695 if (req->dataxfer_length < ccb->csio.sense_len)
696 ccb->csio.sense_resid = ccb->csio.sense_len -
697 req->dataxfer_length;
699 ccb->csio.sense_resid = 0;
700 memcpy(&ccb->csio.sense_data, &req->sg_list,
701 MIN(req->dataxfer_length, sizeof(ccb->csio.sense_data)));
702 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
703 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
704 ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
707 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
711 ccb->csio.resid = ccb->csio.dxfer_len - req->dataxfer_length;
713 hptiop_free_srb(hba, srb);
716 case IOP_REQUEST_TYPE_IOCTL_COMMAND:
717 if (_tag & MVFREYIOPMU_QUEUE_REQUEST_RESULT_BIT)
718 hba->config_done = 1;
720 hba->config_done = -1;
721 wakeup((struct hpt_iop_request_ioctl_command *)hba->ctlcfg_ptr);
724 device_printf(hba->pcidev, "wrong callback type\n");
729 static void hptiop_drain_outbound_queue_mv(struct hpt_iop_hba * hba)
733 while ((req = hptiop_mv_outbound_read(hba))) {
734 if (req & MVIOP_MU_QUEUE_ADDR_HOST_BIT) {
735 if (req & MVIOP_MU_QUEUE_REQUEST_RETURN_CONTEXT) {
736 hptiop_request_callback_mv(hba, req);
742 static int hptiop_intr_mv(struct hpt_iop_hba * hba)
747 status = BUS_SPACE_RD4_MV0(outbound_doorbell);
750 BUS_SPACE_WRT4_MV0(outbound_doorbell, ~status);
752 if (status & MVIOP_MU_OUTBOUND_INT_MSG) {
753 u_int32_t msg = BUS_SPACE_RD4_MV2(outbound_msg);
754 KdPrint(("hptiop: received outbound msg %x\n", msg));
755 hptiop_os_message_callback(hba, msg);
759 if (status & MVIOP_MU_OUTBOUND_INT_POSTQUEUE) {
760 hptiop_drain_outbound_queue_mv(hba);
767 static int hptiop_intr_mvfrey(struct hpt_iop_hba * hba)
769 u_int32_t status, _tag, cptr;
772 if (hba->initialized) {
773 BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0);
776 status = BUS_SPACE_RD4_MVFREY2(f0_doorbell);
778 BUS_SPACE_WRT4_MVFREY2(f0_doorbell, status);
779 if (status & CPU_TO_F0_DRBL_MSG_A_BIT) {
780 u_int32_t msg = BUS_SPACE_RD4_MVFREY2(cpu_to_f0_msg_a);
781 hptiop_os_message_callback(hba, msg);
786 status = BUS_SPACE_RD4_MVFREY2(isr_cause);
788 BUS_SPACE_WRT4_MVFREY2(isr_cause, status);
790 cptr = *hba->u.mvfrey.outlist_cptr & 0xff;
791 while (hba->u.mvfrey.outlist_rptr != cptr) {
792 hba->u.mvfrey.outlist_rptr++;
793 if (hba->u.mvfrey.outlist_rptr == hba->u.mvfrey.list_count) {
794 hba->u.mvfrey.outlist_rptr = 0;
797 _tag = hba->u.mvfrey.outlist[hba->u.mvfrey.outlist_rptr].val;
798 hptiop_request_callback_mvfrey(hba, _tag);
801 } while (cptr != (*hba->u.mvfrey.outlist_cptr & 0xff));
804 if (hba->initialized) {
805 BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0x1010);
811 static int hptiop_send_sync_request_itl(struct hpt_iop_hba * hba,
812 u_int32_t req32, u_int32_t millisec)
817 BUS_SPACE_WRT4_ITL(inbound_queue, req32);
818 BUS_SPACE_RD4_ITL(outbound_intstatus);
820 for (i = 0; i < millisec; i++) {
821 hptiop_intr_itl(hba);
822 bus_space_read_region_4(hba->bar0t, hba->bar0h, req32 +
823 offsetof(struct hpt_iop_request_header, context),
824 (u_int32_t *)&temp64, 2);
833 static int hptiop_send_sync_request_mv(struct hpt_iop_hba *hba,
834 void *req, u_int32_t millisec)
838 hba->config_done = 0;
840 phy_addr = hba->ctlcfgcmd_phy |
841 (u_int64_t)MVIOP_MU_QUEUE_ADDR_HOST_BIT;
842 ((struct hpt_iop_request_get_config *)req)->header.flags |=
843 IOP_REQUEST_FLAG_SYNC_REQUEST |
844 IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
845 hptiop_mv_inbound_write(phy_addr, hba);
846 BUS_SPACE_RD4_MV0(outbound_intmask);
848 for (i = 0; i < millisec; i++) {
850 if (hba->config_done)
857 static int hptiop_send_sync_request_mvfrey(struct hpt_iop_hba *hba,
858 void *req, u_int32_t millisec)
862 struct hpt_iop_request_header *reqhdr =
863 (struct hpt_iop_request_header *)req;
865 hba->config_done = 0;
867 phy_addr = hba->ctlcfgcmd_phy;
868 reqhdr->flags = IOP_REQUEST_FLAG_SYNC_REQUEST
869 | IOP_REQUEST_FLAG_OUTPUT_CONTEXT
870 | IOP_REQUEST_FLAG_ADDR_BITS
871 | ((phy_addr >> 16) & 0xffff0000);
872 reqhdr->context = ((phy_addr & 0xffffffff) << 32 )
873 | IOPMU_QUEUE_ADDR_HOST_BIT | reqhdr->type;
875 hba->u.mvfrey.inlist_wptr++;
876 index = hba->u.mvfrey.inlist_wptr & 0x3fff;
878 if (index == hba->u.mvfrey.list_count) {
880 hba->u.mvfrey.inlist_wptr &= ~0x3fff;
881 hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE;
884 hba->u.mvfrey.inlist[index].addr = phy_addr;
885 hba->u.mvfrey.inlist[index].intrfc_len = (reqhdr->size + 3) / 4;
887 BUS_SPACE_WRT4_MVFREY2(inbound_write_ptr, hba->u.mvfrey.inlist_wptr);
888 BUS_SPACE_RD4_MVFREY2(inbound_write_ptr);
890 for (i = 0; i < millisec; i++) {
891 hptiop_intr_mvfrey(hba);
892 if (hba->config_done)
899 static int hptiop_send_sync_msg(struct hpt_iop_hba *hba,
900 u_int32_t msg, u_int32_t millisec)
905 hba->ops->post_msg(hba, msg);
907 for (i=0; i<millisec; i++) {
908 hba->ops->iop_intr(hba);
914 return hba->msg_done? 0 : -1;
917 static int hptiop_get_config_itl(struct hpt_iop_hba * hba,
918 struct hpt_iop_request_get_config * config)
922 config->header.size = sizeof(struct hpt_iop_request_get_config);
923 config->header.type = IOP_REQUEST_TYPE_GET_CONFIG;
924 config->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST;
925 config->header.result = IOP_RESULT_PENDING;
926 config->header.context = 0;
928 req32 = BUS_SPACE_RD4_ITL(inbound_queue);
929 if (req32 == IOPMU_QUEUE_EMPTY)
932 bus_space_write_region_4(hba->bar0t, hba->bar0h,
933 req32, (u_int32_t *)config,
934 sizeof(struct hpt_iop_request_header) >> 2);
936 if (hptiop_send_sync_request_itl(hba, req32, 20000)) {
937 KdPrint(("hptiop: get config send cmd failed"));
941 bus_space_read_region_4(hba->bar0t, hba->bar0h,
942 req32, (u_int32_t *)config,
943 sizeof(struct hpt_iop_request_get_config) >> 2);
945 BUS_SPACE_WRT4_ITL(outbound_queue, req32);
950 static int hptiop_get_config_mv(struct hpt_iop_hba * hba,
951 struct hpt_iop_request_get_config * config)
953 struct hpt_iop_request_get_config *req;
955 if (!(req = hba->ctlcfg_ptr))
958 req->header.flags = 0;
959 req->header.type = IOP_REQUEST_TYPE_GET_CONFIG;
960 req->header.size = sizeof(struct hpt_iop_request_get_config);
961 req->header.result = IOP_RESULT_PENDING;
962 req->header.context = MVIOP_CMD_TYPE_GET_CONFIG;
964 if (hptiop_send_sync_request_mv(hba, req, 20000)) {
965 KdPrint(("hptiop: get config send cmd failed"));
973 static int hptiop_get_config_mvfrey(struct hpt_iop_hba * hba,
974 struct hpt_iop_request_get_config * config)
976 struct hpt_iop_request_get_config *info = hba->u.mvfrey.config;
978 if (info->header.size != sizeof(struct hpt_iop_request_get_config) ||
979 info->header.type != IOP_REQUEST_TYPE_GET_CONFIG) {
980 KdPrint(("hptiop: header size %x/%x type %x/%x",
981 info->header.size, (int)sizeof(struct hpt_iop_request_get_config),
982 info->header.type, IOP_REQUEST_TYPE_GET_CONFIG));
986 config->interface_version = info->interface_version;
987 config->firmware_version = info->firmware_version;
988 config->max_requests = info->max_requests;
989 config->request_size = info->request_size;
990 config->max_sg_count = info->max_sg_count;
991 config->data_transfer_length = info->data_transfer_length;
992 config->alignment_mask = info->alignment_mask;
993 config->max_devices = info->max_devices;
994 config->sdram_size = info->sdram_size;
996 KdPrint(("hptiop: maxreq %x reqsz %x datalen %x maxdev %x sdram %x",
997 config->max_requests, config->request_size,
998 config->data_transfer_length, config->max_devices,
999 config->sdram_size));
1004 static int hptiop_set_config_itl(struct hpt_iop_hba *hba,
1005 struct hpt_iop_request_set_config *config)
1009 req32 = BUS_SPACE_RD4_ITL(inbound_queue);
1011 if (req32 == IOPMU_QUEUE_EMPTY)
1014 config->header.size = sizeof(struct hpt_iop_request_set_config);
1015 config->header.type = IOP_REQUEST_TYPE_SET_CONFIG;
1016 config->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST;
1017 config->header.result = IOP_RESULT_PENDING;
1018 config->header.context = 0;
1020 bus_space_write_region_4(hba->bar0t, hba->bar0h, req32,
1021 (u_int32_t *)config,
1022 sizeof(struct hpt_iop_request_set_config) >> 2);
1024 if (hptiop_send_sync_request_itl(hba, req32, 20000)) {
1025 KdPrint(("hptiop: set config send cmd failed"));
1029 BUS_SPACE_WRT4_ITL(outbound_queue, req32);
1034 static int hptiop_set_config_mv(struct hpt_iop_hba *hba,
1035 struct hpt_iop_request_set_config *config)
1037 struct hpt_iop_request_set_config *req;
1039 if (!(req = hba->ctlcfg_ptr))
1042 memcpy((u_int8_t *)req + sizeof(struct hpt_iop_request_header),
1043 (u_int8_t *)config + sizeof(struct hpt_iop_request_header),
1044 sizeof(struct hpt_iop_request_set_config) -
1045 sizeof(struct hpt_iop_request_header));
1047 req->header.flags = 0;
1048 req->header.type = IOP_REQUEST_TYPE_SET_CONFIG;
1049 req->header.size = sizeof(struct hpt_iop_request_set_config);
1050 req->header.result = IOP_RESULT_PENDING;
1051 req->header.context = MVIOP_CMD_TYPE_SET_CONFIG;
1053 if (hptiop_send_sync_request_mv(hba, req, 20000)) {
1054 KdPrint(("hptiop: set config send cmd failed"));
1061 static int hptiop_set_config_mvfrey(struct hpt_iop_hba *hba,
1062 struct hpt_iop_request_set_config *config)
1064 struct hpt_iop_request_set_config *req;
1066 if (!(req = hba->ctlcfg_ptr))
1069 memcpy((u_int8_t *)req + sizeof(struct hpt_iop_request_header),
1070 (u_int8_t *)config + sizeof(struct hpt_iop_request_header),
1071 sizeof(struct hpt_iop_request_set_config) -
1072 sizeof(struct hpt_iop_request_header));
1074 req->header.type = IOP_REQUEST_TYPE_SET_CONFIG;
1075 req->header.size = sizeof(struct hpt_iop_request_set_config);
1076 req->header.result = IOP_RESULT_PENDING;
1078 if (hptiop_send_sync_request_mvfrey(hba, req, 20000)) {
1079 KdPrint(("hptiop: set config send cmd failed"));
1086 static int hptiop_post_ioctl_command_itl(struct hpt_iop_hba *hba,
1088 struct hpt_iop_ioctl_param *pParams)
1091 struct hpt_iop_request_ioctl_command req;
1093 if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) >
1094 (hba->max_request_size -
1095 offsetof(struct hpt_iop_request_ioctl_command, buf))) {
1096 device_printf(hba->pcidev, "request size beyond max value");
1100 req.header.size = offsetof(struct hpt_iop_request_ioctl_command, buf)
1101 + pParams->nInBufferSize;
1102 req.header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND;
1103 req.header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST;
1104 req.header.result = IOP_RESULT_PENDING;
1105 req.header.context = req32 + (u_int64_t)(unsigned long)hba->u.itl.mu;
1106 req.ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode);
1107 req.inbuf_size = pParams->nInBufferSize;
1108 req.outbuf_size = pParams->nOutBufferSize;
1109 req.bytes_returned = 0;
1111 bus_space_write_region_4(hba->bar0t, hba->bar0h, req32, (u_int32_t *)&req,
1112 offsetof(struct hpt_iop_request_ioctl_command, buf)>>2);
1114 hptiop_lock_adapter(hba);
1116 BUS_SPACE_WRT4_ITL(inbound_queue, req32);
1117 BUS_SPACE_RD4_ITL(outbound_intstatus);
1119 bus_space_read_region_4(hba->bar0t, hba->bar0h, req32 +
1120 offsetof(struct hpt_iop_request_ioctl_command, header.context),
1121 (u_int32_t *)&temp64, 2);
1123 if (hptiop_sleep(hba, (void *)((unsigned long)hba->u.itl.mu + req32),
1124 PPAUSE, "hptctl", HPT_OSM_TIMEOUT)==0)
1126 hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000);
1127 bus_space_read_region_4(hba->bar0t, hba->bar0h,req32 +
1128 offsetof(struct hpt_iop_request_ioctl_command,
1130 (u_int32_t *)&temp64, 2);
1133 hptiop_unlock_adapter(hba);
1137 static int hptiop_bus_space_copyin(struct hpt_iop_hba *hba, u_int32_t bus,
1138 void *user, int size)
1143 for (i=0; i<size; i++) {
1144 if (copyin((u_int8_t *)user + i, &byte, 1))
1146 bus_space_write_1(hba->bar0t, hba->bar0h, bus + i, byte);
1152 static int hptiop_bus_space_copyout(struct hpt_iop_hba *hba, u_int32_t bus,
1153 void *user, int size)
1158 for (i=0; i<size; i++) {
1159 byte = bus_space_read_1(hba->bar0t, hba->bar0h, bus + i);
1160 if (copyout(&byte, (u_int8_t *)user + i, 1))
1167 static int hptiop_do_ioctl_itl(struct hpt_iop_hba *hba,
1168 struct hpt_iop_ioctl_param * pParams)
1173 if ((pParams->Magic != HPT_IOCTL_MAGIC) &&
1174 (pParams->Magic != HPT_IOCTL_MAGIC32))
1177 req32 = BUS_SPACE_RD4_ITL(inbound_queue);
1178 if (req32 == IOPMU_QUEUE_EMPTY)
1181 if (pParams->nInBufferSize)
1182 if (hptiop_bus_space_copyin(hba, req32 +
1183 offsetof(struct hpt_iop_request_ioctl_command, buf),
1184 (void *)pParams->lpInBuffer, pParams->nInBufferSize))
1187 if (hptiop_post_ioctl_command_itl(hba, req32, pParams))
1190 result = bus_space_read_4(hba->bar0t, hba->bar0h, req32 +
1191 offsetof(struct hpt_iop_request_ioctl_command,
1194 if (result == IOP_RESULT_SUCCESS) {
1195 if (pParams->nOutBufferSize)
1196 if (hptiop_bus_space_copyout(hba, req32 +
1197 offsetof(struct hpt_iop_request_ioctl_command, buf) +
1198 ((pParams->nInBufferSize + 3) & ~3),
1199 (void *)pParams->lpOutBuffer, pParams->nOutBufferSize))
1202 if (pParams->lpBytesReturned) {
1203 if (hptiop_bus_space_copyout(hba, req32 +
1204 offsetof(struct hpt_iop_request_ioctl_command, bytes_returned),
1205 (void *)pParams->lpBytesReturned, sizeof(unsigned long)))
1209 BUS_SPACE_WRT4_ITL(outbound_queue, req32);
1214 BUS_SPACE_WRT4_ITL(outbound_queue, req32);
1220 static int hptiop_post_ioctl_command_mv(struct hpt_iop_hba *hba,
1221 struct hpt_iop_request_ioctl_command *req,
1222 struct hpt_iop_ioctl_param *pParams)
1227 if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) >
1228 (hba->max_request_size -
1229 offsetof(struct hpt_iop_request_ioctl_command, buf))) {
1230 device_printf(hba->pcidev, "request size beyond max value");
1234 req->ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode);
1235 req->inbuf_size = pParams->nInBufferSize;
1236 req->outbuf_size = pParams->nOutBufferSize;
1237 req->header.size = offsetof(struct hpt_iop_request_ioctl_command, buf)
1238 + pParams->nInBufferSize;
1239 req->header.context = (u_int64_t)MVIOP_CMD_TYPE_IOCTL;
1240 req->header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND;
1241 req->header.result = IOP_RESULT_PENDING;
1242 req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
1243 size = req->header.size >> 8;
1244 size = size > 3 ? 3 : size;
1245 req_phy = hba->ctlcfgcmd_phy | MVIOP_MU_QUEUE_ADDR_HOST_BIT | size;
1246 hptiop_mv_inbound_write(req_phy, hba);
1248 BUS_SPACE_RD4_MV0(outbound_intmask);
1250 while (hba->config_done == 0) {
1251 if (hptiop_sleep(hba, req, PPAUSE,
1252 "hptctl", HPT_OSM_TIMEOUT)==0)
1254 hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000);
1259 static int hptiop_do_ioctl_mv(struct hpt_iop_hba *hba,
1260 struct hpt_iop_ioctl_param *pParams)
1262 struct hpt_iop_request_ioctl_command *req;
1264 if ((pParams->Magic != HPT_IOCTL_MAGIC) &&
1265 (pParams->Magic != HPT_IOCTL_MAGIC32))
1268 req = (struct hpt_iop_request_ioctl_command *)(hba->ctlcfg_ptr);
1269 hba->config_done = 0;
1270 hptiop_lock_adapter(hba);
1271 if (pParams->nInBufferSize)
1272 if (copyin((void *)pParams->lpInBuffer,
1273 req->buf, pParams->nInBufferSize))
1275 if (hptiop_post_ioctl_command_mv(hba, req, pParams))
1278 if (hba->config_done == 1) {
1279 if (pParams->nOutBufferSize)
1280 if (copyout(req->buf +
1281 ((pParams->nInBufferSize + 3) & ~3),
1282 (void *)pParams->lpOutBuffer,
1283 pParams->nOutBufferSize))
1286 if (pParams->lpBytesReturned)
1287 if (copyout(&req->bytes_returned,
1288 (void*)pParams->lpBytesReturned,
1291 hptiop_unlock_adapter(hba);
1295 hptiop_unlock_adapter(hba);
1300 static int hptiop_post_ioctl_command_mvfrey(struct hpt_iop_hba *hba,
1301 struct hpt_iop_request_ioctl_command *req,
1302 struct hpt_iop_ioctl_param *pParams)
1307 phy_addr = hba->ctlcfgcmd_phy;
1309 if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) >
1310 (hba->max_request_size -
1311 offsetof(struct hpt_iop_request_ioctl_command, buf))) {
1312 device_printf(hba->pcidev, "request size beyond max value");
1316 req->ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode);
1317 req->inbuf_size = pParams->nInBufferSize;
1318 req->outbuf_size = pParams->nOutBufferSize;
1319 req->header.size = offsetof(struct hpt_iop_request_ioctl_command, buf)
1320 + pParams->nInBufferSize;
1322 req->header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND;
1323 req->header.result = IOP_RESULT_PENDING;
1325 req->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST
1326 | IOP_REQUEST_FLAG_OUTPUT_CONTEXT
1327 | IOP_REQUEST_FLAG_ADDR_BITS
1328 | ((phy_addr >> 16) & 0xffff0000);
1329 req->header.context = ((phy_addr & 0xffffffff) << 32 )
1330 | IOPMU_QUEUE_ADDR_HOST_BIT | req->header.type;
1332 hba->u.mvfrey.inlist_wptr++;
1333 index = hba->u.mvfrey.inlist_wptr & 0x3fff;
1335 if (index == hba->u.mvfrey.list_count) {
1337 hba->u.mvfrey.inlist_wptr &= ~0x3fff;
1338 hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE;
1341 hba->u.mvfrey.inlist[index].addr = phy_addr;
1342 hba->u.mvfrey.inlist[index].intrfc_len = (req->header.size + 3) / 4;
1344 BUS_SPACE_WRT4_MVFREY2(inbound_write_ptr, hba->u.mvfrey.inlist_wptr);
1345 BUS_SPACE_RD4_MVFREY2(inbound_write_ptr);
1347 while (hba->config_done == 0) {
1348 if (hptiop_sleep(hba, req, PPAUSE,
1349 "hptctl", HPT_OSM_TIMEOUT)==0)
1351 hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000);
1356 static int hptiop_do_ioctl_mvfrey(struct hpt_iop_hba *hba,
1357 struct hpt_iop_ioctl_param *pParams)
1359 struct hpt_iop_request_ioctl_command *req;
1361 if ((pParams->Magic != HPT_IOCTL_MAGIC) &&
1362 (pParams->Magic != HPT_IOCTL_MAGIC32))
1365 req = (struct hpt_iop_request_ioctl_command *)(hba->ctlcfg_ptr);
1366 hba->config_done = 0;
1367 hptiop_lock_adapter(hba);
1368 if (pParams->nInBufferSize)
1369 if (copyin((void *)pParams->lpInBuffer,
1370 req->buf, pParams->nInBufferSize))
1372 if (hptiop_post_ioctl_command_mvfrey(hba, req, pParams))
1375 if (hba->config_done == 1) {
1376 if (pParams->nOutBufferSize)
1377 if (copyout(req->buf +
1378 ((pParams->nInBufferSize + 3) & ~3),
1379 (void *)pParams->lpOutBuffer,
1380 pParams->nOutBufferSize))
1383 if (pParams->lpBytesReturned)
1384 if (copyout(&req->bytes_returned,
1385 (void*)pParams->lpBytesReturned,
1388 hptiop_unlock_adapter(hba);
1392 hptiop_unlock_adapter(hba);
1397 static int hptiop_rescan_bus(struct hpt_iop_hba * hba)
1401 if ((ccb = xpt_alloc_ccb()) == NULL)
1403 if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(hba->sim),
1404 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1412 static bus_dmamap_callback_t hptiop_map_srb;
1413 static bus_dmamap_callback_t hptiop_post_scsi_command;
1414 static bus_dmamap_callback_t hptiop_mv_map_ctlcfg;
1415 static bus_dmamap_callback_t hptiop_mvfrey_map_ctlcfg;
1417 static int hptiop_alloc_pci_res_itl(struct hpt_iop_hba *hba)
1419 hba->bar0_rid = 0x10;
1420 hba->bar0_res = bus_alloc_resource_any(hba->pcidev,
1421 SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE);
1423 if (hba->bar0_res == NULL) {
1424 device_printf(hba->pcidev,
1425 "failed to get iop base adrress.\n");
1428 hba->bar0t = rman_get_bustag(hba->bar0_res);
1429 hba->bar0h = rman_get_bushandle(hba->bar0_res);
1430 hba->u.itl.mu = (struct hpt_iopmu_itl *)
1431 rman_get_virtual(hba->bar0_res);
1433 if (!hba->u.itl.mu) {
1434 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1435 hba->bar0_rid, hba->bar0_res);
1436 device_printf(hba->pcidev, "alloc mem res failed\n");
1443 static int hptiop_alloc_pci_res_mv(struct hpt_iop_hba *hba)
1445 hba->bar0_rid = 0x10;
1446 hba->bar0_res = bus_alloc_resource_any(hba->pcidev,
1447 SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE);
1449 if (hba->bar0_res == NULL) {
1450 device_printf(hba->pcidev, "failed to get iop bar0.\n");
1453 hba->bar0t = rman_get_bustag(hba->bar0_res);
1454 hba->bar0h = rman_get_bushandle(hba->bar0_res);
1455 hba->u.mv.regs = (struct hpt_iopmv_regs *)
1456 rman_get_virtual(hba->bar0_res);
1458 if (!hba->u.mv.regs) {
1459 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1460 hba->bar0_rid, hba->bar0_res);
1461 device_printf(hba->pcidev, "alloc bar0 mem res failed\n");
1465 hba->bar2_rid = 0x18;
1466 hba->bar2_res = bus_alloc_resource_any(hba->pcidev,
1467 SYS_RES_MEMORY, &hba->bar2_rid, RF_ACTIVE);
1469 if (hba->bar2_res == NULL) {
1470 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1471 hba->bar0_rid, hba->bar0_res);
1472 device_printf(hba->pcidev, "failed to get iop bar2.\n");
1476 hba->bar2t = rman_get_bustag(hba->bar2_res);
1477 hba->bar2h = rman_get_bushandle(hba->bar2_res);
1478 hba->u.mv.mu = (struct hpt_iopmu_mv *)rman_get_virtual(hba->bar2_res);
1480 if (!hba->u.mv.mu) {
1481 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1482 hba->bar0_rid, hba->bar0_res);
1483 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1484 hba->bar2_rid, hba->bar2_res);
1485 device_printf(hba->pcidev, "alloc mem bar2 res failed\n");
1492 static int hptiop_alloc_pci_res_mvfrey(struct hpt_iop_hba *hba)
1494 hba->bar0_rid = 0x10;
1495 hba->bar0_res = bus_alloc_resource_any(hba->pcidev,
1496 SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE);
1498 if (hba->bar0_res == NULL) {
1499 device_printf(hba->pcidev, "failed to get iop bar0.\n");
1502 hba->bar0t = rman_get_bustag(hba->bar0_res);
1503 hba->bar0h = rman_get_bushandle(hba->bar0_res);
1504 hba->u.mvfrey.config = (struct hpt_iop_request_get_config *)
1505 rman_get_virtual(hba->bar0_res);
1507 if (!hba->u.mvfrey.config) {
1508 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1509 hba->bar0_rid, hba->bar0_res);
1510 device_printf(hba->pcidev, "alloc bar0 mem res failed\n");
1514 hba->bar2_rid = 0x18;
1515 hba->bar2_res = bus_alloc_resource_any(hba->pcidev,
1516 SYS_RES_MEMORY, &hba->bar2_rid, RF_ACTIVE);
1518 if (hba->bar2_res == NULL) {
1519 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1520 hba->bar0_rid, hba->bar0_res);
1521 device_printf(hba->pcidev, "failed to get iop bar2.\n");
1525 hba->bar2t = rman_get_bustag(hba->bar2_res);
1526 hba->bar2h = rman_get_bushandle(hba->bar2_res);
1528 (struct hpt_iopmu_mvfrey *)rman_get_virtual(hba->bar2_res);
1530 if (!hba->u.mvfrey.mu) {
1531 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1532 hba->bar0_rid, hba->bar0_res);
1533 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1534 hba->bar2_rid, hba->bar2_res);
1535 device_printf(hba->pcidev, "alloc mem bar2 res failed\n");
1542 static void hptiop_release_pci_res_itl(struct hpt_iop_hba *hba)
1545 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1546 hba->bar0_rid, hba->bar0_res);
1549 static void hptiop_release_pci_res_mv(struct hpt_iop_hba *hba)
1552 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1553 hba->bar0_rid, hba->bar0_res);
1555 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1556 hba->bar2_rid, hba->bar2_res);
1559 static void hptiop_release_pci_res_mvfrey(struct hpt_iop_hba *hba)
1562 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1563 hba->bar0_rid, hba->bar0_res);
1565 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1566 hba->bar2_rid, hba->bar2_res);
1569 static int hptiop_internal_memalloc_mv(struct hpt_iop_hba *hba)
1571 if (bus_dma_tag_create(hba->parent_dmat,
1574 BUS_SPACE_MAXADDR_32BIT,
1579 BUS_SPACE_MAXSIZE_32BIT,
1583 &hba->ctlcfg_dmat)) {
1584 device_printf(hba->pcidev, "alloc ctlcfg_dmat failed\n");
1588 if (bus_dmamem_alloc(hba->ctlcfg_dmat, (void **)&hba->ctlcfg_ptr,
1589 BUS_DMA_WAITOK | BUS_DMA_COHERENT,
1590 &hba->ctlcfg_dmamap) != 0) {
1591 device_printf(hba->pcidev,
1592 "bus_dmamem_alloc failed!\n");
1593 bus_dma_tag_destroy(hba->ctlcfg_dmat);
1597 if (bus_dmamap_load(hba->ctlcfg_dmat,
1598 hba->ctlcfg_dmamap, hba->ctlcfg_ptr,
1599 MVIOP_IOCTLCFG_SIZE,
1600 hptiop_mv_map_ctlcfg, hba, 0)) {
1601 device_printf(hba->pcidev, "bus_dmamap_load failed!\n");
1602 if (hba->ctlcfg_dmat) {
1603 bus_dmamem_free(hba->ctlcfg_dmat,
1604 hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
1605 bus_dma_tag_destroy(hba->ctlcfg_dmat);
1613 static int hptiop_internal_memalloc_mvfrey(struct hpt_iop_hba *hba)
1615 u_int32_t list_count = BUS_SPACE_RD4_MVFREY2(inbound_conf_ctl);
1619 if (list_count == 0) {
1623 hba->u.mvfrey.list_count = list_count;
1624 hba->u.mvfrey.internal_mem_size = 0x800
1625 + list_count * sizeof(struct mvfrey_inlist_entry)
1626 + list_count * sizeof(struct mvfrey_outlist_entry)
1628 if (bus_dma_tag_create(hba->parent_dmat,
1631 BUS_SPACE_MAXADDR_32BIT,
1634 hba->u.mvfrey.internal_mem_size,
1636 BUS_SPACE_MAXSIZE_32BIT,
1640 &hba->ctlcfg_dmat)) {
1641 device_printf(hba->pcidev, "alloc ctlcfg_dmat failed\n");
1645 if (bus_dmamem_alloc(hba->ctlcfg_dmat, (void **)&hba->ctlcfg_ptr,
1646 BUS_DMA_WAITOK | BUS_DMA_COHERENT,
1647 &hba->ctlcfg_dmamap) != 0) {
1648 device_printf(hba->pcidev,
1649 "bus_dmamem_alloc failed!\n");
1650 bus_dma_tag_destroy(hba->ctlcfg_dmat);
1654 if (bus_dmamap_load(hba->ctlcfg_dmat,
1655 hba->ctlcfg_dmamap, hba->ctlcfg_ptr,
1656 hba->u.mvfrey.internal_mem_size,
1657 hptiop_mvfrey_map_ctlcfg, hba, 0)) {
1658 device_printf(hba->pcidev, "bus_dmamap_load failed!\n");
1659 if (hba->ctlcfg_dmat) {
1660 bus_dmamem_free(hba->ctlcfg_dmat,
1661 hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
1662 bus_dma_tag_destroy(hba->ctlcfg_dmat);
1670 static int hptiop_internal_memfree_itl(struct hpt_iop_hba *hba) {
1674 static int hptiop_internal_memfree_mv(struct hpt_iop_hba *hba)
1676 if (hba->ctlcfg_dmat) {
1677 bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap);
1678 bus_dmamem_free(hba->ctlcfg_dmat,
1679 hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
1680 bus_dma_tag_destroy(hba->ctlcfg_dmat);
1686 static int hptiop_internal_memfree_mvfrey(struct hpt_iop_hba *hba)
1688 if (hba->ctlcfg_dmat) {
1689 bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap);
1690 bus_dmamem_free(hba->ctlcfg_dmat,
1691 hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
1692 bus_dma_tag_destroy(hba->ctlcfg_dmat);
1698 static int hptiop_reset_comm_mvfrey(struct hpt_iop_hba *hba)
1702 if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET_COMM, 3000))
1705 /* wait 100ms for MCU ready */
1710 BUS_SPACE_WRT4_MVFREY2(inbound_base,
1711 hba->u.mvfrey.inlist_phy & 0xffffffff);
1712 BUS_SPACE_WRT4_MVFREY2(inbound_base_high,
1713 (hba->u.mvfrey.inlist_phy >> 16) >> 16);
1715 BUS_SPACE_WRT4_MVFREY2(outbound_base,
1716 hba->u.mvfrey.outlist_phy & 0xffffffff);
1717 BUS_SPACE_WRT4_MVFREY2(outbound_base_high,
1718 (hba->u.mvfrey.outlist_phy >> 16) >> 16);
1720 BUS_SPACE_WRT4_MVFREY2(outbound_shadow_base,
1721 hba->u.mvfrey.outlist_cptr_phy & 0xffffffff);
1722 BUS_SPACE_WRT4_MVFREY2(outbound_shadow_base_high,
1723 (hba->u.mvfrey.outlist_cptr_phy >> 16) >> 16);
1725 hba->u.mvfrey.inlist_wptr = (hba->u.mvfrey.list_count - 1)
1726 | CL_POINTER_TOGGLE;
1727 *hba->u.mvfrey.outlist_cptr = (hba->u.mvfrey.list_count - 1)
1728 | CL_POINTER_TOGGLE;
1729 hba->u.mvfrey.outlist_rptr = hba->u.mvfrey.list_count - 1;
1735 * CAM driver interface
1737 static device_method_t driver_methods[] = {
1738 /* Device interface */
1739 DEVMETHOD(device_probe, hptiop_probe),
1740 DEVMETHOD(device_attach, hptiop_attach),
1741 DEVMETHOD(device_detach, hptiop_detach),
1742 DEVMETHOD(device_shutdown, hptiop_shutdown),
1746 static struct hptiop_adapter_ops hptiop_itl_ops = {
1747 .family = INTEL_BASED_IOP,
1748 .iop_wait_ready = hptiop_wait_ready_itl,
1749 .internal_memalloc = 0,
1750 .internal_memfree = hptiop_internal_memfree_itl,
1751 .alloc_pci_res = hptiop_alloc_pci_res_itl,
1752 .release_pci_res = hptiop_release_pci_res_itl,
1753 .enable_intr = hptiop_enable_intr_itl,
1754 .disable_intr = hptiop_disable_intr_itl,
1755 .get_config = hptiop_get_config_itl,
1756 .set_config = hptiop_set_config_itl,
1757 .iop_intr = hptiop_intr_itl,
1758 .post_msg = hptiop_post_msg_itl,
1759 .post_req = hptiop_post_req_itl,
1760 .do_ioctl = hptiop_do_ioctl_itl,
1764 static struct hptiop_adapter_ops hptiop_mv_ops = {
1765 .family = MV_BASED_IOP,
1766 .iop_wait_ready = hptiop_wait_ready_mv,
1767 .internal_memalloc = hptiop_internal_memalloc_mv,
1768 .internal_memfree = hptiop_internal_memfree_mv,
1769 .alloc_pci_res = hptiop_alloc_pci_res_mv,
1770 .release_pci_res = hptiop_release_pci_res_mv,
1771 .enable_intr = hptiop_enable_intr_mv,
1772 .disable_intr = hptiop_disable_intr_mv,
1773 .get_config = hptiop_get_config_mv,
1774 .set_config = hptiop_set_config_mv,
1775 .iop_intr = hptiop_intr_mv,
1776 .post_msg = hptiop_post_msg_mv,
1777 .post_req = hptiop_post_req_mv,
1778 .do_ioctl = hptiop_do_ioctl_mv,
1782 static struct hptiop_adapter_ops hptiop_mvfrey_ops = {
1783 .family = MVFREY_BASED_IOP,
1784 .iop_wait_ready = hptiop_wait_ready_mvfrey,
1785 .internal_memalloc = hptiop_internal_memalloc_mvfrey,
1786 .internal_memfree = hptiop_internal_memfree_mvfrey,
1787 .alloc_pci_res = hptiop_alloc_pci_res_mvfrey,
1788 .release_pci_res = hptiop_release_pci_res_mvfrey,
1789 .enable_intr = hptiop_enable_intr_mvfrey,
1790 .disable_intr = hptiop_disable_intr_mvfrey,
1791 .get_config = hptiop_get_config_mvfrey,
1792 .set_config = hptiop_set_config_mvfrey,
1793 .iop_intr = hptiop_intr_mvfrey,
1794 .post_msg = hptiop_post_msg_mvfrey,
1795 .post_req = hptiop_post_req_mvfrey,
1796 .do_ioctl = hptiop_do_ioctl_mvfrey,
1797 .reset_comm = hptiop_reset_comm_mvfrey,
1800 static driver_t hptiop_pci_driver = {
1803 sizeof(struct hpt_iop_hba)
1806 DRIVER_MODULE(hptiop, pci, hptiop_pci_driver, hptiop_devclass, 0, 0);
1807 MODULE_DEPEND(hptiop, cam, 1, 1, 1);
1809 static int hptiop_probe(device_t dev)
1811 struct hpt_iop_hba *hba;
1813 static char buf[256];
1815 struct hptiop_adapter_ops *ops;
1817 if (pci_get_vendor(dev) != 0x1103)
1820 id = pci_get_device(dev);
1830 ops = &hptiop_mvfrey_ops;
1851 ops = &hptiop_itl_ops;
1856 ops = &hptiop_mv_ops;
1862 device_printf(dev, "adapter at PCI %d:%d:%d, IRQ %d\n",
1863 pci_get_bus(dev), pci_get_slot(dev),
1864 pci_get_function(dev), pci_get_irq(dev));
1866 sprintf(buf, "RocketRAID %x %s Controller\n",
1867 id, sas ? "SAS" : "SATA");
1868 device_set_desc_copy(dev, buf);
1870 hba = (struct hpt_iop_hba *)device_get_softc(dev);
1871 bzero(hba, sizeof(struct hpt_iop_hba));
1874 KdPrint(("hba->ops=%p\n", hba->ops));
1878 static int hptiop_attach(device_t dev)
1880 struct hpt_iop_hba *hba = (struct hpt_iop_hba *)device_get_softc(dev);
1881 struct hpt_iop_request_get_config iop_config;
1882 struct hpt_iop_request_set_config set_config;
1884 struct cam_devq *devq;
1885 struct ccb_setasync ccb;
1886 u_int32_t unit = device_get_unit(dev);
1888 device_printf(dev, "%d RocketRAID 3xxx/4xxx controller driver %s\n",
1889 unit, driver_version);
1891 KdPrint(("hptiop: attach(%d, %d/%d/%d) ops=%p\n", unit,
1892 pci_get_bus(dev), pci_get_slot(dev),
1893 pci_get_function(dev), hba->ops));
1895 pci_enable_busmaster(dev);
1897 hba->pciunit = unit;
1899 if (hba->ops->alloc_pci_res(hba))
1902 if (hba->ops->iop_wait_ready(hba, 2000)) {
1903 device_printf(dev, "adapter is not ready\n");
1904 goto release_pci_res;
1907 mtx_init(&hba->lock, "hptioplock", NULL, MTX_DEF);
1909 if (bus_dma_tag_create(bus_get_dma_tag(dev),/* PCI parent */
1912 BUS_SPACE_MAXADDR, /* lowaddr */
1913 BUS_SPACE_MAXADDR, /* highaddr */
1914 NULL, NULL, /* filter, filterarg */
1915 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1916 BUS_SPACE_UNRESTRICTED, /* nsegments */
1917 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1919 NULL, /* lockfunc */
1920 NULL, /* lockfuncarg */
1921 &hba->parent_dmat /* tag */))
1923 device_printf(dev, "alloc parent_dmat failed\n");
1924 goto release_pci_res;
1927 if (hba->ops->family == MV_BASED_IOP) {
1928 if (hba->ops->internal_memalloc(hba)) {
1929 device_printf(dev, "alloc srb_dmat failed\n");
1930 goto destroy_parent_tag;
1934 if (hba->ops->get_config(hba, &iop_config)) {
1935 device_printf(dev, "get iop config failed.\n");
1936 goto get_config_failed;
1939 hba->firmware_version = iop_config.firmware_version;
1940 hba->interface_version = iop_config.interface_version;
1941 hba->max_requests = iop_config.max_requests;
1942 hba->max_devices = iop_config.max_devices;
1943 hba->max_request_size = iop_config.request_size;
1944 hba->max_sg_count = iop_config.max_sg_count;
1946 if (hba->ops->family == MVFREY_BASED_IOP) {
1947 if (hba->ops->internal_memalloc(hba)) {
1948 device_printf(dev, "alloc srb_dmat failed\n");
1949 goto destroy_parent_tag;
1951 if (hba->ops->reset_comm(hba)) {
1952 device_printf(dev, "reset comm failed\n");
1953 goto get_config_failed;
1957 if (bus_dma_tag_create(hba->parent_dmat,/* parent */
1959 BUS_SPACE_MAXADDR_32BIT+1, /* boundary */
1960 BUS_SPACE_MAXADDR, /* lowaddr */
1961 BUS_SPACE_MAXADDR, /* highaddr */
1962 NULL, NULL, /* filter, filterarg */
1963 PAGE_SIZE * (hba->max_sg_count-1), /* maxsize */
1964 hba->max_sg_count, /* nsegments */
1965 0x20000, /* maxsegsize */
1966 BUS_DMA_ALLOCNOW, /* flags */
1967 busdma_lock_mutex, /* lockfunc */
1968 &hba->lock, /* lockfuncarg */
1969 &hba->io_dmat /* tag */))
1971 device_printf(dev, "alloc io_dmat failed\n");
1972 goto get_config_failed;
1975 if (bus_dma_tag_create(hba->parent_dmat,/* parent */
1978 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
1979 BUS_SPACE_MAXADDR, /* highaddr */
1980 NULL, NULL, /* filter, filterarg */
1981 HPT_SRB_MAX_SIZE * HPT_SRB_MAX_QUEUE_SIZE + 0x20,
1983 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1985 NULL, /* lockfunc */
1986 NULL, /* lockfuncarg */
1987 &hba->srb_dmat /* tag */))
1989 device_printf(dev, "alloc srb_dmat failed\n");
1990 goto destroy_io_dmat;
1993 if (bus_dmamem_alloc(hba->srb_dmat, (void **)&hba->uncached_ptr,
1994 BUS_DMA_WAITOK | BUS_DMA_COHERENT,
1995 &hba->srb_dmamap) != 0)
1997 device_printf(dev, "srb bus_dmamem_alloc failed!\n");
1998 goto destroy_srb_dmat;
2001 if (bus_dmamap_load(hba->srb_dmat,
2002 hba->srb_dmamap, hba->uncached_ptr,
2003 (HPT_SRB_MAX_SIZE * HPT_SRB_MAX_QUEUE_SIZE) + 0x20,
2004 hptiop_map_srb, hba, 0))
2006 device_printf(dev, "bus_dmamap_load failed!\n");
2007 goto srb_dmamem_free;
2010 if ((devq = cam_simq_alloc(hba->max_requests - 1 )) == NULL) {
2011 device_printf(dev, "cam_simq_alloc failed\n");
2012 goto srb_dmamap_unload;
2015 hba->sim = cam_sim_alloc(hptiop_action, hptiop_poll, driver_name,
2016 hba, unit, &Giant, hba->max_requests - 1, 1, devq);
2018 device_printf(dev, "cam_sim_alloc failed\n");
2019 cam_simq_free(devq);
2020 goto srb_dmamap_unload;
2022 if (xpt_bus_register(hba->sim, dev, 0) != CAM_SUCCESS)
2024 device_printf(dev, "xpt_bus_register failed\n");
2028 if (xpt_create_path(&hba->path, /*periph */ NULL,
2029 cam_sim_path(hba->sim), CAM_TARGET_WILDCARD,
2030 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2031 device_printf(dev, "xpt_create_path failed\n");
2032 goto deregister_xpt_bus;
2035 bzero(&set_config, sizeof(set_config));
2036 set_config.iop_id = unit;
2037 set_config.vbus_id = cam_sim_path(hba->sim);
2038 set_config.max_host_request_size = HPT_SRB_MAX_REQ_SIZE;
2040 if (hba->ops->set_config(hba, &set_config)) {
2041 device_printf(dev, "set iop config failed.\n");
2045 xpt_setup_ccb(&ccb.ccb_h, hba->path, /*priority*/5);
2046 ccb.ccb_h.func_code = XPT_SASYNC_CB;
2047 ccb.event_enable = (AC_FOUND_DEVICE | AC_LOST_DEVICE);
2048 ccb.callback = hptiop_async;
2049 ccb.callback_arg = hba->sim;
2050 xpt_action((union ccb *)&ccb);
2053 if ((hba->irq_res = bus_alloc_resource(hba->pcidev, SYS_RES_IRQ,
2054 &rid, 0, ~0ul, 1, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
2055 device_printf(dev, "allocate irq failed!\n");
2059 if (bus_setup_intr(hba->pcidev, hba->irq_res, INTR_TYPE_CAM,
2060 NULL, hptiop_pci_intr, hba, &hba->irq_handle))
2062 device_printf(dev, "allocate intr function failed!\n");
2063 goto free_irq_resource;
2066 if (hptiop_send_sync_msg(hba,
2067 IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000)) {
2068 device_printf(dev, "fail to start background task\n");
2069 goto teartown_irq_resource;
2072 hba->ops->enable_intr(hba);
2073 hba->initialized = 1;
2075 hba->ioctl_dev = make_dev(&hptiop_cdevsw, unit,
2076 UID_ROOT, GID_WHEEL /*GID_OPERATOR*/,
2077 S_IRUSR | S_IWUSR, "%s%d", driver_name, unit);
2083 teartown_irq_resource:
2084 bus_teardown_intr(dev, hba->irq_res, hba->irq_handle);
2087 bus_release_resource(dev, SYS_RES_IRQ, 0, hba->irq_res);
2090 xpt_free_path(hba->path);
2093 xpt_bus_deregister(cam_sim_path(hba->sim));
2096 cam_sim_free(hba->sim, /*free devq*/ TRUE);
2099 if (hba->uncached_ptr)
2100 bus_dmamap_unload(hba->srb_dmat, hba->srb_dmamap);
2103 if (hba->uncached_ptr)
2104 bus_dmamem_free(hba->srb_dmat,
2105 hba->uncached_ptr, hba->srb_dmamap);
2109 bus_dma_tag_destroy(hba->srb_dmat);
2113 bus_dma_tag_destroy(hba->io_dmat);
2116 hba->ops->internal_memfree(hba);
2119 if (hba->parent_dmat)
2120 bus_dma_tag_destroy(hba->parent_dmat);
2123 if (hba->ops->release_pci_res)
2124 hba->ops->release_pci_res(hba);
2129 static int hptiop_detach(device_t dev)
2131 struct hpt_iop_hba * hba = (struct hpt_iop_hba *)device_get_softc(dev);
2135 hptiop_lock_adapter(hba);
2136 for (i = 0; i < hba->max_devices; i++)
2137 if (hptiop_os_query_remove_device(hba, i)) {
2138 device_printf(dev, "%d file system is busy. id=%d",
2143 if ((error = hptiop_shutdown(dev)) != 0)
2145 if (hptiop_send_sync_msg(hba,
2146 IOPMU_INBOUND_MSG0_STOP_BACKGROUND_TASK, 60000))
2149 hptiop_release_resource(hba);
2152 hptiop_unlock_adapter(hba);
2156 static int hptiop_shutdown(device_t dev)
2158 struct hpt_iop_hba * hba = (struct hpt_iop_hba *)device_get_softc(dev);
2162 if (hba->flag & HPT_IOCTL_FLAG_OPEN) {
2163 device_printf(dev, "%d device is busy", hba->pciunit);
2167 hba->ops->disable_intr(hba);
2169 if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_SHUTDOWN, 60000))
2175 static void hptiop_pci_intr(void *arg)
2177 struct hpt_iop_hba * hba = (struct hpt_iop_hba *)arg;
2178 hptiop_lock_adapter(hba);
2179 hba->ops->iop_intr(hba);
2180 hptiop_unlock_adapter(hba);
2183 static void hptiop_poll(struct cam_sim *sim)
2185 hptiop_pci_intr(cam_sim_softc(sim));
2188 static void hptiop_async(void * callback_arg, u_int32_t code,
2189 struct cam_path * path, void * arg)
2193 static void hptiop_enable_intr_itl(struct hpt_iop_hba *hba)
2195 BUS_SPACE_WRT4_ITL(outbound_intmask,
2196 ~(IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0));
2199 static void hptiop_enable_intr_mv(struct hpt_iop_hba *hba)
2203 int_mask = BUS_SPACE_RD4_MV0(outbound_intmask);
2205 int_mask |= MVIOP_MU_OUTBOUND_INT_POSTQUEUE
2206 | MVIOP_MU_OUTBOUND_INT_MSG;
2207 BUS_SPACE_WRT4_MV0(outbound_intmask,int_mask);
2210 static void hptiop_enable_intr_mvfrey(struct hpt_iop_hba *hba)
2212 BUS_SPACE_WRT4_MVFREY2(f0_doorbell_enable, CPU_TO_F0_DRBL_MSG_A_BIT);
2213 BUS_SPACE_RD4_MVFREY2(f0_doorbell_enable);
2215 BUS_SPACE_WRT4_MVFREY2(isr_enable, 0x1);
2216 BUS_SPACE_RD4_MVFREY2(isr_enable);
2218 BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0x1010);
2219 BUS_SPACE_RD4_MVFREY2(pcie_f0_int_enable);
2222 static void hptiop_disable_intr_itl(struct hpt_iop_hba *hba)
2226 int_mask = BUS_SPACE_RD4_ITL(outbound_intmask);
2228 int_mask |= IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0;
2229 BUS_SPACE_WRT4_ITL(outbound_intmask, int_mask);
2230 BUS_SPACE_RD4_ITL(outbound_intstatus);
2233 static void hptiop_disable_intr_mv(struct hpt_iop_hba *hba)
2236 int_mask = BUS_SPACE_RD4_MV0(outbound_intmask);
2238 int_mask &= ~(MVIOP_MU_OUTBOUND_INT_MSG
2239 | MVIOP_MU_OUTBOUND_INT_POSTQUEUE);
2240 BUS_SPACE_WRT4_MV0(outbound_intmask,int_mask);
2241 BUS_SPACE_RD4_MV0(outbound_intmask);
2244 static void hptiop_disable_intr_mvfrey(struct hpt_iop_hba *hba)
2246 BUS_SPACE_WRT4_MVFREY2(f0_doorbell_enable, 0);
2247 BUS_SPACE_RD4_MVFREY2(f0_doorbell_enable);
2249 BUS_SPACE_WRT4_MVFREY2(isr_enable, 0);
2250 BUS_SPACE_RD4_MVFREY2(isr_enable);
2252 BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0);
2253 BUS_SPACE_RD4_MVFREY2(pcie_f0_int_enable);
2256 static void hptiop_reset_adapter(void *argv)
2258 struct hpt_iop_hba * hba = (struct hpt_iop_hba *)argv;
2259 if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000))
2261 hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000);
2264 static void *hptiop_get_srb(struct hpt_iop_hba * hba)
2266 struct hpt_iop_srb * srb;
2268 if (hba->srb_list) {
2269 srb = hba->srb_list;
2270 hba->srb_list = srb->next;
2277 static void hptiop_free_srb(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb)
2279 srb->next = hba->srb_list;
2280 hba->srb_list = srb;
2283 static void hptiop_action(struct cam_sim *sim, union ccb *ccb)
2285 struct hpt_iop_hba * hba = (struct hpt_iop_hba *)cam_sim_softc(sim);
2286 struct hpt_iop_srb * srb;
2289 switch (ccb->ccb_h.func_code) {
2292 hptiop_lock_adapter(hba);
2293 if (ccb->ccb_h.target_lun != 0 ||
2294 ccb->ccb_h.target_id >= hba->max_devices ||
2295 (ccb->ccb_h.flags & CAM_CDB_PHYS))
2297 ccb->ccb_h.status = CAM_TID_INVALID;
2302 if ((srb = hptiop_get_srb(hba)) == NULL) {
2303 device_printf(hba->pcidev, "srb allocated failed");
2304 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2310 error = bus_dmamap_load_ccb(hba->io_dmat,
2313 hptiop_post_scsi_command,
2317 if (error && error != EINPROGRESS) {
2318 device_printf(hba->pcidev,
2319 "%d bus_dmamap_load error %d",
2320 hba->pciunit, error);
2321 xpt_freeze_simq(hba->sim, 1);
2322 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2323 hptiop_free_srb(hba, srb);
2329 hptiop_unlock_adapter(hba);
2333 device_printf(hba->pcidev, "reset adapter");
2334 hptiop_lock_adapter(hba);
2336 hptiop_reset_adapter(hba);
2337 hptiop_unlock_adapter(hba);
2340 case XPT_GET_TRAN_SETTINGS:
2341 case XPT_SET_TRAN_SETTINGS:
2342 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
2345 case XPT_CALC_GEOMETRY:
2346 cam_calc_geometry(&ccb->ccg, 1);
2351 struct ccb_pathinq *cpi = &ccb->cpi;
2353 cpi->version_num = 1;
2354 cpi->hba_inquiry = PI_SDTR_ABLE;
2355 cpi->target_sprt = 0;
2356 cpi->hba_misc = PIM_NOBUSRESET;
2357 cpi->hba_eng_cnt = 0;
2358 cpi->max_target = hba->max_devices;
2360 cpi->unit_number = cam_sim_unit(sim);
2361 cpi->bus_id = cam_sim_bus(sim);
2362 cpi->initiator_id = hba->max_devices;
2363 cpi->base_transfer_speed = 3300;
2365 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
2366 strncpy(cpi->hba_vid, "HPT ", HBA_IDLEN);
2367 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
2368 cpi->transport = XPORT_SPI;
2369 cpi->transport_version = 2;
2370 cpi->protocol = PROTO_SCSI;
2371 cpi->protocol_version = SCSI_REV_2;
2372 cpi->ccb_h.status = CAM_REQ_CMP;
2377 ccb->ccb_h.status = CAM_REQ_INVALID;
2385 static void hptiop_post_req_itl(struct hpt_iop_hba *hba,
2386 struct hpt_iop_srb *srb,
2387 bus_dma_segment_t *segs, int nsegs)
2390 union ccb *ccb = srb->ccb;
2393 if (ccb->ccb_h.flags & CAM_CDB_POINTER)
2394 cdb = ccb->csio.cdb_io.cdb_ptr;
2396 cdb = ccb->csio.cdb_io.cdb_bytes;
2398 KdPrint(("ccb=%p %x-%x-%x\n",
2399 ccb, *(u_int32_t *)cdb, *((u_int32_t *)cdb+1), *((u_int32_t *)cdb+2)));
2401 if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS) {
2402 u_int32_t iop_req32;
2403 struct hpt_iop_request_scsi_command req;
2405 iop_req32 = BUS_SPACE_RD4_ITL(inbound_queue);
2407 if (iop_req32 == IOPMU_QUEUE_EMPTY) {
2408 device_printf(hba->pcidev, "invaild req offset\n");
2409 ccb->ccb_h.status = CAM_BUSY;
2410 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
2411 hptiop_free_srb(hba, srb);
2416 if (ccb->csio.dxfer_len && nsegs > 0) {
2417 struct hpt_iopsg *psg = req.sg_list;
2418 for (idx = 0; idx < nsegs; idx++, psg++) {
2419 psg->pci_address = (u_int64_t)segs[idx].ds_addr;
2420 psg->size = segs[idx].ds_len;
2426 bcopy(cdb, req.cdb, ccb->csio.cdb_len);
2429 offsetof(struct hpt_iop_request_scsi_command, sg_list)
2430 + nsegs*sizeof(struct hpt_iopsg);
2431 req.header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
2432 req.header.flags = 0;
2433 req.header.result = IOP_RESULT_PENDING;
2434 req.header.context = (u_int64_t)(unsigned long)srb;
2435 req.dataxfer_length = ccb->csio.dxfer_len;
2437 req.target = ccb->ccb_h.target_id;
2438 req.lun = ccb->ccb_h.target_lun;
2440 bus_space_write_region_1(hba->bar0t, hba->bar0h, iop_req32,
2441 (u_int8_t *)&req, req.header.size);
2443 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2444 bus_dmamap_sync(hba->io_dmat,
2445 srb->dma_map, BUS_DMASYNC_PREREAD);
2447 else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
2448 bus_dmamap_sync(hba->io_dmat,
2449 srb->dma_map, BUS_DMASYNC_PREWRITE);
2451 BUS_SPACE_WRT4_ITL(inbound_queue,iop_req32);
2453 struct hpt_iop_request_scsi_command *req;
2455 req = (struct hpt_iop_request_scsi_command *)srb;
2456 if (ccb->csio.dxfer_len && nsegs > 0) {
2457 struct hpt_iopsg *psg = req->sg_list;
2458 for (idx = 0; idx < nsegs; idx++, psg++) {
2460 (u_int64_t)segs[idx].ds_addr;
2461 psg->size = segs[idx].ds_len;
2467 bcopy(cdb, req->cdb, ccb->csio.cdb_len);
2469 req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
2470 req->header.result = IOP_RESULT_PENDING;
2471 req->dataxfer_length = ccb->csio.dxfer_len;
2473 req->target = ccb->ccb_h.target_id;
2474 req->lun = ccb->ccb_h.target_lun;
2476 offsetof(struct hpt_iop_request_scsi_command, sg_list)
2477 + nsegs*sizeof(struct hpt_iopsg);
2478 req->header.context = (u_int64_t)srb->index |
2479 IOPMU_QUEUE_ADDR_HOST_BIT;
2480 req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
2482 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2483 bus_dmamap_sync(hba->io_dmat,
2484 srb->dma_map, BUS_DMASYNC_PREREAD);
2485 }else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
2486 bus_dmamap_sync(hba->io_dmat,
2487 srb->dma_map, BUS_DMASYNC_PREWRITE);
2490 if (hba->firmware_version > 0x01020000
2491 || hba->interface_version > 0x01020000) {
2492 u_int32_t size_bits;
2494 if (req->header.size < 256)
2495 size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT;
2496 else if (req->header.size < 512)
2497 size_bits = IOPMU_QUEUE_ADDR_HOST_BIT;
2499 size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT
2500 | IOPMU_QUEUE_ADDR_HOST_BIT;
2502 BUS_SPACE_WRT4_ITL(inbound_queue,
2503 (u_int32_t)srb->phy_addr | size_bits);
2505 BUS_SPACE_WRT4_ITL(inbound_queue, (u_int32_t)srb->phy_addr
2506 |IOPMU_QUEUE_ADDR_HOST_BIT);
2510 static void hptiop_post_req_mv(struct hpt_iop_hba *hba,
2511 struct hpt_iop_srb *srb,
2512 bus_dma_segment_t *segs, int nsegs)
2515 union ccb *ccb = srb->ccb;
2517 struct hpt_iop_request_scsi_command *req;
2520 req = (struct hpt_iop_request_scsi_command *)srb;
2521 req_phy = srb->phy_addr;
2523 if (ccb->csio.dxfer_len && nsegs > 0) {
2524 struct hpt_iopsg *psg = req->sg_list;
2525 for (idx = 0; idx < nsegs; idx++, psg++) {
2526 psg->pci_address = (u_int64_t)segs[idx].ds_addr;
2527 psg->size = segs[idx].ds_len;
2532 if (ccb->ccb_h.flags & CAM_CDB_POINTER)
2533 cdb = ccb->csio.cdb_io.cdb_ptr;
2535 cdb = ccb->csio.cdb_io.cdb_bytes;
2537 bcopy(cdb, req->cdb, ccb->csio.cdb_len);
2538 req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
2539 req->header.result = IOP_RESULT_PENDING;
2540 req->dataxfer_length = ccb->csio.dxfer_len;
2542 req->target = ccb->ccb_h.target_id;
2543 req->lun = ccb->ccb_h.target_lun;
2544 req->header.size = sizeof(struct hpt_iop_request_scsi_command)
2545 - sizeof(struct hpt_iopsg)
2546 + nsegs * sizeof(struct hpt_iopsg);
2547 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2548 bus_dmamap_sync(hba->io_dmat,
2549 srb->dma_map, BUS_DMASYNC_PREREAD);
2551 else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
2552 bus_dmamap_sync(hba->io_dmat,
2553 srb->dma_map, BUS_DMASYNC_PREWRITE);
2554 req->header.context = (u_int64_t)srb->index
2555 << MVIOP_REQUEST_NUMBER_START_BIT
2556 | MVIOP_CMD_TYPE_SCSI;
2557 req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
2558 size = req->header.size >> 8;
2559 hptiop_mv_inbound_write(req_phy
2560 | MVIOP_MU_QUEUE_ADDR_HOST_BIT
2561 | (size > 3 ? 3 : size), hba);
2564 static void hptiop_post_req_mvfrey(struct hpt_iop_hba *hba,
2565 struct hpt_iop_srb *srb,
2566 bus_dma_segment_t *segs, int nsegs)
2569 union ccb *ccb = srb->ccb;
2571 struct hpt_iop_request_scsi_command *req;
2574 req = (struct hpt_iop_request_scsi_command *)srb;
2575 req_phy = srb->phy_addr;
2577 if (ccb->csio.dxfer_len && nsegs > 0) {
2578 struct hpt_iopsg *psg = req->sg_list;
2579 for (idx = 0; idx < nsegs; idx++, psg++) {
2580 psg->pci_address = (u_int64_t)segs[idx].ds_addr | 1;
2581 psg->size = segs[idx].ds_len;
2586 if (ccb->ccb_h.flags & CAM_CDB_POINTER)
2587 cdb = ccb->csio.cdb_io.cdb_ptr;
2589 cdb = ccb->csio.cdb_io.cdb_bytes;
2591 bcopy(cdb, req->cdb, ccb->csio.cdb_len);
2592 req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
2593 req->header.result = IOP_RESULT_PENDING;
2594 req->dataxfer_length = ccb->csio.dxfer_len;
2596 req->target = ccb->ccb_h.target_id;
2597 req->lun = ccb->ccb_h.target_lun;
2598 req->header.size = sizeof(struct hpt_iop_request_scsi_command)
2599 - sizeof(struct hpt_iopsg)
2600 + nsegs * sizeof(struct hpt_iopsg);
2601 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2602 bus_dmamap_sync(hba->io_dmat,
2603 srb->dma_map, BUS_DMASYNC_PREREAD);
2605 else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
2606 bus_dmamap_sync(hba->io_dmat,
2607 srb->dma_map, BUS_DMASYNC_PREWRITE);
2609 req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT
2610 | IOP_REQUEST_FLAG_ADDR_BITS
2611 | ((req_phy >> 16) & 0xffff0000);
2612 req->header.context = ((req_phy & 0xffffffff) << 32 )
2614 | IOPMU_QUEUE_ADDR_HOST_BIT | req->header.type;
2616 hba->u.mvfrey.inlist_wptr++;
2617 index = hba->u.mvfrey.inlist_wptr & 0x3fff;
2619 if (index == hba->u.mvfrey.list_count) {
2621 hba->u.mvfrey.inlist_wptr &= ~0x3fff;
2622 hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE;
2625 hba->u.mvfrey.inlist[index].addr = req_phy;
2626 hba->u.mvfrey.inlist[index].intrfc_len = (req->header.size + 3) / 4;
2628 BUS_SPACE_WRT4_MVFREY2(inbound_write_ptr, hba->u.mvfrey.inlist_wptr);
2629 BUS_SPACE_RD4_MVFREY2(inbound_write_ptr);
2631 if (req->header.type == IOP_REQUEST_TYPE_SCSI_COMMAND) {
2632 srb->timeout_ch = timeout(hptiop_reset_adapter, hba, 20*hz);
2636 static void hptiop_post_scsi_command(void *arg, bus_dma_segment_t *segs,
2637 int nsegs, int error)
2639 struct hpt_iop_srb *srb = (struct hpt_iop_srb *)arg;
2640 union ccb *ccb = srb->ccb;
2641 struct hpt_iop_hba *hba = srb->hba;
2643 if (error || nsegs > hba->max_sg_count) {
2644 KdPrint(("hptiop: func_code=%x tid=%x lun=%x nsegs=%d\n",
2645 ccb->ccb_h.func_code,
2646 ccb->ccb_h.target_id,
2647 ccb->ccb_h.target_lun, nsegs));
2648 ccb->ccb_h.status = CAM_BUSY;
2649 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
2650 hptiop_free_srb(hba, srb);
2655 hba->ops->post_req(hba, srb, segs, nsegs);
2658 static void hptiop_mv_map_ctlcfg(void *arg, bus_dma_segment_t *segs,
2659 int nsegs, int error)
2661 struct hpt_iop_hba *hba = (struct hpt_iop_hba *)arg;
2662 hba->ctlcfgcmd_phy = ((u_int64_t)segs->ds_addr + 0x1F)
2664 hba->ctlcfg_ptr = (u_int8_t *)(((unsigned long)hba->ctlcfg_ptr + 0x1F)
2668 static void hptiop_mvfrey_map_ctlcfg(void *arg, bus_dma_segment_t *segs,
2669 int nsegs, int error)
2671 struct hpt_iop_hba *hba = (struct hpt_iop_hba *)arg;
2674 u_int32_t list_count = hba->u.mvfrey.list_count;
2676 phy = ((u_int64_t)segs->ds_addr + 0x1F)
2678 p = (u_int8_t *)(((unsigned long)hba->ctlcfg_ptr + 0x1F)
2681 hba->ctlcfgcmd_phy = phy;
2682 hba->ctlcfg_ptr = p;
2687 hba->u.mvfrey.inlist = (struct mvfrey_inlist_entry *)p;
2688 hba->u.mvfrey.inlist_phy = phy;
2690 p += list_count * sizeof(struct mvfrey_inlist_entry);
2691 phy += list_count * sizeof(struct mvfrey_inlist_entry);
2693 hba->u.mvfrey.outlist = (struct mvfrey_outlist_entry *)p;
2694 hba->u.mvfrey.outlist_phy = phy;
2696 p += list_count * sizeof(struct mvfrey_outlist_entry);
2697 phy += list_count * sizeof(struct mvfrey_outlist_entry);
2699 hba->u.mvfrey.outlist_cptr = (u_int32_t *)p;
2700 hba->u.mvfrey.outlist_cptr_phy = phy;
2703 static void hptiop_map_srb(void *arg, bus_dma_segment_t *segs,
2704 int nsegs, int error)
2706 struct hpt_iop_hba * hba = (struct hpt_iop_hba *)arg;
2707 bus_addr_t phy_addr = (segs->ds_addr + 0x1F) & ~(bus_addr_t)0x1F;
2708 struct hpt_iop_srb *srb, *tmp_srb;
2711 if (error || nsegs == 0) {
2712 device_printf(hba->pcidev, "hptiop_map_srb error");
2717 srb = (struct hpt_iop_srb *)
2718 (((unsigned long)hba->uncached_ptr + 0x1F)
2719 & ~(unsigned long)0x1F);
2721 for (i = 0; i < HPT_SRB_MAX_QUEUE_SIZE; i++) {
2722 tmp_srb = (struct hpt_iop_srb *)
2723 ((char *)srb + i * HPT_SRB_MAX_SIZE);
2724 if (((unsigned long)tmp_srb & 0x1F) == 0) {
2725 if (bus_dmamap_create(hba->io_dmat,
2726 0, &tmp_srb->dma_map)) {
2727 device_printf(hba->pcidev, "dmamap create failed");
2731 bzero(tmp_srb, sizeof(struct hpt_iop_srb));
2734 if (hba->ctlcfg_ptr == 0) {/*itl iop*/
2735 tmp_srb->phy_addr = (u_int64_t)(u_int32_t)
2737 if (phy_addr & IOPMU_MAX_MEM_SUPPORT_MASK_32G)
2739 HPT_SRB_FLAG_HIGH_MEM_ACESS;
2741 tmp_srb->phy_addr = phy_addr;
2744 callout_handle_init(&tmp_srb->timeout_ch);
2745 hptiop_free_srb(hba, tmp_srb);
2746 hba->srb[i] = tmp_srb;
2747 phy_addr += HPT_SRB_MAX_SIZE;
2750 device_printf(hba->pcidev, "invalid alignment");
2756 static void hptiop_os_message_callback(struct hpt_iop_hba * hba, u_int32_t msg)
2761 static int hptiop_os_query_remove_device(struct hpt_iop_hba * hba,
2764 struct cam_periph *periph = NULL;
2765 struct cam_path *path;
2766 int status, retval = 0;
2768 status = xpt_create_path(&path, NULL, hba->sim->path_id, target_id, 0);
2770 if (status == CAM_REQ_CMP) {
2771 if ((periph = cam_periph_find(path, "da")) != NULL) {
2772 if (periph->refcount >= 1) {
2773 device_printf(hba->pcidev, "%d ,"
2776 hba->pciunit, target_id, periph->refcount);
2780 xpt_free_path(path);
2785 static void hptiop_release_resource(struct hpt_iop_hba *hba)
2789 struct ccb_setasync ccb;
2791 xpt_setup_ccb(&ccb.ccb_h, hba->path, /*priority*/5);
2792 ccb.ccb_h.func_code = XPT_SASYNC_CB;
2793 ccb.event_enable = 0;
2794 ccb.callback = hptiop_async;
2795 ccb.callback_arg = hba->sim;
2796 xpt_action((union ccb *)&ccb);
2797 xpt_free_path(hba->path);
2801 xpt_bus_deregister(cam_sim_path(hba->sim));
2802 cam_sim_free(hba->sim, TRUE);
2805 if (hba->ctlcfg_dmat) {
2806 bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap);
2807 bus_dmamem_free(hba->ctlcfg_dmat,
2808 hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
2809 bus_dma_tag_destroy(hba->ctlcfg_dmat);
2812 for (i = 0; i < HPT_SRB_MAX_QUEUE_SIZE; i++) {
2813 struct hpt_iop_srb *srb = hba->srb[i];
2815 bus_dmamap_destroy(hba->io_dmat, srb->dma_map);
2818 if (hba->srb_dmat) {
2819 bus_dmamap_unload(hba->srb_dmat, hba->srb_dmamap);
2820 bus_dmamap_destroy(hba->srb_dmat, hba->srb_dmamap);
2821 bus_dma_tag_destroy(hba->srb_dmat);
2825 bus_dma_tag_destroy(hba->io_dmat);
2827 if (hba->parent_dmat)
2828 bus_dma_tag_destroy(hba->parent_dmat);
2830 if (hba->irq_handle)
2831 bus_teardown_intr(hba->pcidev, hba->irq_res, hba->irq_handle);
2834 bus_release_resource(hba->pcidev, SYS_RES_IRQ,
2838 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
2839 hba->bar0_rid, hba->bar0_res);
2841 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
2842 hba->bar2_rid, hba->bar2_res);
2844 destroy_dev(hba->ioctl_dev);