2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * HighPoint RR3xxx/4xxx RAID Driver for FreeBSD
5 * Copyright (C) 2007-2012 HighPoint Technologies, Inc. All Rights Reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
33 #include <sys/types.h>
36 #include <sys/systm.h>
39 #include <sys/malloc.h>
41 #include <sys/libkern.h>
42 #include <sys/kernel.h>
44 #include <sys/kthread.h>
45 #include <sys/mutex.h>
46 #include <sys/module.h>
48 #include <sys/eventhandler.h>
50 #include <sys/taskqueue.h>
51 #include <sys/ioccom.h>
53 #include <machine/resource.h>
54 #include <machine/bus.h>
55 #include <machine/stdarg.h>
61 #include <dev/pci/pcireg.h>
62 #include <dev/pci/pcivar.h>
66 #include <cam/cam_ccb.h>
67 #include <cam/cam_sim.h>
68 #include <cam/cam_xpt_sim.h>
69 #include <cam/cam_debug.h>
70 #include <cam/cam_periph.h>
71 #include <cam/scsi/scsi_all.h>
72 #include <cam/scsi/scsi_message.h>
75 #include <dev/hptiop/hptiop.h>
77 static const char driver_name[] = "hptiop";
78 static const char driver_version[] = "v1.9";
80 static devclass_t hptiop_devclass;
82 static int hptiop_send_sync_msg(struct hpt_iop_hba *hba,
83 u_int32_t msg, u_int32_t millisec);
84 static void hptiop_request_callback_itl(struct hpt_iop_hba *hba,
86 static void hptiop_request_callback_mv(struct hpt_iop_hba *hba, u_int64_t req);
87 static void hptiop_request_callback_mvfrey(struct hpt_iop_hba *hba,
89 static void hptiop_os_message_callback(struct hpt_iop_hba *hba, u_int32_t msg);
90 static int hptiop_do_ioctl_itl(struct hpt_iop_hba *hba,
91 struct hpt_iop_ioctl_param *pParams);
92 static int hptiop_do_ioctl_mv(struct hpt_iop_hba *hba,
93 struct hpt_iop_ioctl_param *pParams);
94 static int hptiop_do_ioctl_mvfrey(struct hpt_iop_hba *hba,
95 struct hpt_iop_ioctl_param *pParams);
96 static int hptiop_rescan_bus(struct hpt_iop_hba *hba);
97 static int hptiop_alloc_pci_res_itl(struct hpt_iop_hba *hba);
98 static int hptiop_alloc_pci_res_mv(struct hpt_iop_hba *hba);
99 static int hptiop_alloc_pci_res_mvfrey(struct hpt_iop_hba *hba);
100 static int hptiop_get_config_itl(struct hpt_iop_hba *hba,
101 struct hpt_iop_request_get_config *config);
102 static int hptiop_get_config_mv(struct hpt_iop_hba *hba,
103 struct hpt_iop_request_get_config *config);
104 static int hptiop_get_config_mvfrey(struct hpt_iop_hba *hba,
105 struct hpt_iop_request_get_config *config);
106 static int hptiop_set_config_itl(struct hpt_iop_hba *hba,
107 struct hpt_iop_request_set_config *config);
108 static int hptiop_set_config_mv(struct hpt_iop_hba *hba,
109 struct hpt_iop_request_set_config *config);
110 static int hptiop_set_config_mvfrey(struct hpt_iop_hba *hba,
111 struct hpt_iop_request_set_config *config);
112 static int hptiop_internal_memalloc_mv(struct hpt_iop_hba *hba);
113 static int hptiop_internal_memalloc_mvfrey(struct hpt_iop_hba *hba);
114 static int hptiop_internal_memfree_itl(struct hpt_iop_hba *hba);
115 static int hptiop_internal_memfree_mv(struct hpt_iop_hba *hba);
116 static int hptiop_internal_memfree_mvfrey(struct hpt_iop_hba *hba);
117 static int hptiop_post_ioctl_command_itl(struct hpt_iop_hba *hba,
118 u_int32_t req32, struct hpt_iop_ioctl_param *pParams);
119 static int hptiop_post_ioctl_command_mv(struct hpt_iop_hba *hba,
120 struct hpt_iop_request_ioctl_command *req,
121 struct hpt_iop_ioctl_param *pParams);
122 static int hptiop_post_ioctl_command_mvfrey(struct hpt_iop_hba *hba,
123 struct hpt_iop_request_ioctl_command *req,
124 struct hpt_iop_ioctl_param *pParams);
125 static void hptiop_post_req_itl(struct hpt_iop_hba *hba,
126 struct hpt_iop_srb *srb,
127 bus_dma_segment_t *segs, int nsegs);
128 static void hptiop_post_req_mv(struct hpt_iop_hba *hba,
129 struct hpt_iop_srb *srb,
130 bus_dma_segment_t *segs, int nsegs);
131 static void hptiop_post_req_mvfrey(struct hpt_iop_hba *hba,
132 struct hpt_iop_srb *srb,
133 bus_dma_segment_t *segs, int nsegs);
134 static void hptiop_post_msg_itl(struct hpt_iop_hba *hba, u_int32_t msg);
135 static void hptiop_post_msg_mv(struct hpt_iop_hba *hba, u_int32_t msg);
136 static void hptiop_post_msg_mvfrey(struct hpt_iop_hba *hba, u_int32_t msg);
137 static void hptiop_enable_intr_itl(struct hpt_iop_hba *hba);
138 static void hptiop_enable_intr_mv(struct hpt_iop_hba *hba);
139 static void hptiop_enable_intr_mvfrey(struct hpt_iop_hba *hba);
140 static void hptiop_disable_intr_itl(struct hpt_iop_hba *hba);
141 static void hptiop_disable_intr_mv(struct hpt_iop_hba *hba);
142 static void hptiop_disable_intr_mvfrey(struct hpt_iop_hba *hba);
143 static void hptiop_free_srb(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb);
144 static int hptiop_os_query_remove_device(struct hpt_iop_hba *hba, int tid);
145 static int hptiop_probe(device_t dev);
146 static int hptiop_attach(device_t dev);
147 static int hptiop_detach(device_t dev);
148 static int hptiop_shutdown(device_t dev);
149 static void hptiop_action(struct cam_sim *sim, union ccb *ccb);
150 static void hptiop_poll(struct cam_sim *sim);
151 static void hptiop_async(void *callback_arg, u_int32_t code,
152 struct cam_path *path, void *arg);
153 static void hptiop_pci_intr(void *arg);
154 static void hptiop_release_resource(struct hpt_iop_hba *hba);
155 static void hptiop_reset_adapter(void *argv);
156 static d_open_t hptiop_open;
157 static d_close_t hptiop_close;
158 static d_ioctl_t hptiop_ioctl;
160 static struct cdevsw hptiop_cdevsw = {
161 .d_open = hptiop_open,
162 .d_close = hptiop_close,
163 .d_ioctl = hptiop_ioctl,
164 .d_name = driver_name,
165 .d_version = D_VERSION,
168 #define hba_from_dev(dev) \
169 ((struct hpt_iop_hba *)devclass_get_softc(hptiop_devclass, dev2unit(dev)))
171 #define BUS_SPACE_WRT4_ITL(offset, value) bus_space_write_4(hba->bar0t,\
172 hba->bar0h, offsetof(struct hpt_iopmu_itl, offset), (value))
173 #define BUS_SPACE_RD4_ITL(offset) bus_space_read_4(hba->bar0t,\
174 hba->bar0h, offsetof(struct hpt_iopmu_itl, offset))
176 #define BUS_SPACE_WRT4_MV0(offset, value) bus_space_write_4(hba->bar0t,\
177 hba->bar0h, offsetof(struct hpt_iopmv_regs, offset), value)
178 #define BUS_SPACE_RD4_MV0(offset) bus_space_read_4(hba->bar0t,\
179 hba->bar0h, offsetof(struct hpt_iopmv_regs, offset))
180 #define BUS_SPACE_WRT4_MV2(offset, value) bus_space_write_4(hba->bar2t,\
181 hba->bar2h, offsetof(struct hpt_iopmu_mv, offset), value)
182 #define BUS_SPACE_RD4_MV2(offset) bus_space_read_4(hba->bar2t,\
183 hba->bar2h, offsetof(struct hpt_iopmu_mv, offset))
185 #define BUS_SPACE_WRT4_MVFREY2(offset, value) bus_space_write_4(hba->bar2t,\
186 hba->bar2h, offsetof(struct hpt_iopmu_mvfrey, offset), value)
187 #define BUS_SPACE_RD4_MVFREY2(offset) bus_space_read_4(hba->bar2t,\
188 hba->bar2h, offsetof(struct hpt_iopmu_mvfrey, offset))
190 static int hptiop_open(ioctl_dev_t dev, int flags,
191 int devtype, ioctl_thread_t proc)
193 struct hpt_iop_hba *hba = hba_from_dev(dev);
197 if (hba->flag & HPT_IOCTL_FLAG_OPEN)
199 hba->flag |= HPT_IOCTL_FLAG_OPEN;
203 static int hptiop_close(ioctl_dev_t dev, int flags,
204 int devtype, ioctl_thread_t proc)
206 struct hpt_iop_hba *hba = hba_from_dev(dev);
207 hba->flag &= ~(u_int32_t)HPT_IOCTL_FLAG_OPEN;
211 static int hptiop_ioctl(ioctl_dev_t dev, u_long cmd, caddr_t data,
212 int flags, ioctl_thread_t proc)
215 struct hpt_iop_hba *hba = hba_from_dev(dev);
218 case HPT_DO_IOCONTROL:
219 ret = hba->ops->do_ioctl(hba,
220 (struct hpt_iop_ioctl_param *)data);
223 ret = hptiop_rescan_bus(hba);
229 static u_int64_t hptiop_mv_outbound_read(struct hpt_iop_hba *hba)
232 u_int32_t outbound_tail = BUS_SPACE_RD4_MV2(outbound_tail);
233 u_int32_t outbound_head = BUS_SPACE_RD4_MV2(outbound_head);
235 if (outbound_tail != outbound_head) {
236 bus_space_read_region_4(hba->bar2t, hba->bar2h,
237 offsetof(struct hpt_iopmu_mv,
238 outbound_q[outbound_tail]),
243 if (outbound_tail == MVIOP_QUEUE_LEN)
246 BUS_SPACE_WRT4_MV2(outbound_tail, outbound_tail);
252 static void hptiop_mv_inbound_write(u_int64_t p, struct hpt_iop_hba *hba)
254 u_int32_t inbound_head = BUS_SPACE_RD4_MV2(inbound_head);
255 u_int32_t head = inbound_head + 1;
257 if (head == MVIOP_QUEUE_LEN)
260 bus_space_write_region_4(hba->bar2t, hba->bar2h,
261 offsetof(struct hpt_iopmu_mv, inbound_q[inbound_head]),
263 BUS_SPACE_WRT4_MV2(inbound_head, head);
264 BUS_SPACE_WRT4_MV0(inbound_doorbell, MVIOP_MU_INBOUND_INT_POSTQUEUE);
267 static void hptiop_post_msg_itl(struct hpt_iop_hba *hba, u_int32_t msg)
269 BUS_SPACE_WRT4_ITL(inbound_msgaddr0, msg);
270 BUS_SPACE_RD4_ITL(outbound_intstatus);
273 static void hptiop_post_msg_mv(struct hpt_iop_hba *hba, u_int32_t msg)
276 BUS_SPACE_WRT4_MV2(inbound_msg, msg);
277 BUS_SPACE_WRT4_MV0(inbound_doorbell, MVIOP_MU_INBOUND_INT_MSG);
279 BUS_SPACE_RD4_MV0(outbound_intmask);
282 static void hptiop_post_msg_mvfrey(struct hpt_iop_hba *hba, u_int32_t msg)
284 BUS_SPACE_WRT4_MVFREY2(f0_to_cpu_msg_a, msg);
285 BUS_SPACE_RD4_MVFREY2(f0_to_cpu_msg_a);
288 static int hptiop_wait_ready_itl(struct hpt_iop_hba * hba, u_int32_t millisec)
293 for (i = 0; i < millisec; i++) {
294 req = BUS_SPACE_RD4_ITL(inbound_queue);
295 if (req != IOPMU_QUEUE_EMPTY)
300 if (req!=IOPMU_QUEUE_EMPTY) {
301 BUS_SPACE_WRT4_ITL(outbound_queue, req);
302 BUS_SPACE_RD4_ITL(outbound_intstatus);
309 static int hptiop_wait_ready_mv(struct hpt_iop_hba * hba, u_int32_t millisec)
311 if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec))
317 static int hptiop_wait_ready_mvfrey(struct hpt_iop_hba * hba,
320 if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_NOP, millisec))
326 static void hptiop_request_callback_itl(struct hpt_iop_hba * hba,
329 struct hpt_iop_srb *srb;
330 struct hpt_iop_request_scsi_command *req=NULL;
333 u_int32_t result, temp, dxfer;
336 if (index & IOPMU_QUEUE_MASK_HOST_BITS) { /*host req*/
337 if (hba->firmware_version > 0x01020000 ||
338 hba->interface_version > 0x01020000) {
339 srb = hba->srb[index & ~(u_int32_t)
340 (IOPMU_QUEUE_ADDR_HOST_BIT
341 | IOPMU_QUEUE_REQUEST_RESULT_BIT)];
342 req = (struct hpt_iop_request_scsi_command *)srb;
343 if (index & IOPMU_QUEUE_REQUEST_RESULT_BIT)
344 result = IOP_RESULT_SUCCESS;
346 result = req->header.result;
348 srb = hba->srb[index &
349 ~(u_int32_t)IOPMU_QUEUE_ADDR_HOST_BIT];
350 req = (struct hpt_iop_request_scsi_command *)srb;
351 result = req->header.result;
353 dxfer = req->dataxfer_length;
358 temp = bus_space_read_4(hba->bar0t, hba->bar0h, index +
359 offsetof(struct hpt_iop_request_header, type));
360 result = bus_space_read_4(hba->bar0t, hba->bar0h, index +
361 offsetof(struct hpt_iop_request_header, result));
363 case IOP_REQUEST_TYPE_IOCTL_COMMAND:
366 bus_space_write_region_4(hba->bar0t, hba->bar0h, index +
367 offsetof(struct hpt_iop_request_header, context),
368 (u_int32_t *)&temp64, 2);
369 wakeup((void *)((unsigned long)hba->u.itl.mu + index));
373 case IOP_REQUEST_TYPE_SCSI_COMMAND:
374 bus_space_read_region_4(hba->bar0t, hba->bar0h, index +
375 offsetof(struct hpt_iop_request_header, context),
376 (u_int32_t *)&temp64, 2);
377 srb = (struct hpt_iop_srb *)(unsigned long)temp64;
378 dxfer = bus_space_read_4(hba->bar0t, hba->bar0h,
379 index + offsetof(struct hpt_iop_request_scsi_command,
382 ccb = (union ccb *)srb->ccb;
383 if (ccb->ccb_h.flags & CAM_CDB_POINTER)
384 cdb = ccb->csio.cdb_io.cdb_ptr;
386 cdb = ccb->csio.cdb_io.cdb_bytes;
388 if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */
389 ccb->ccb_h.status = CAM_REQ_CMP;
394 case IOP_RESULT_SUCCESS:
395 switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
397 bus_dmamap_sync(hba->io_dmat,
398 srb->dma_map, BUS_DMASYNC_POSTREAD);
399 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
402 bus_dmamap_sync(hba->io_dmat,
403 srb->dma_map, BUS_DMASYNC_POSTWRITE);
404 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
408 ccb->ccb_h.status = CAM_REQ_CMP;
411 case IOP_RESULT_BAD_TARGET:
412 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
414 case IOP_RESULT_BUSY:
415 ccb->ccb_h.status = CAM_BUSY;
417 case IOP_RESULT_INVALID_REQUEST:
418 ccb->ccb_h.status = CAM_REQ_INVALID;
420 case IOP_RESULT_FAIL:
421 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
423 case IOP_RESULT_RESET:
424 ccb->ccb_h.status = CAM_BUSY;
426 case IOP_RESULT_CHECK_CONDITION:
427 memset(&ccb->csio.sense_data, 0,
428 sizeof(ccb->csio.sense_data));
429 if (dxfer < ccb->csio.sense_len)
430 ccb->csio.sense_resid = ccb->csio.sense_len -
433 ccb->csio.sense_resid = 0;
434 if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS) {/*iop*/
435 bus_space_read_region_1(hba->bar0t, hba->bar0h,
436 index + offsetof(struct hpt_iop_request_scsi_command,
437 sg_list), (u_int8_t *)&ccb->csio.sense_data,
438 MIN(dxfer, sizeof(ccb->csio.sense_data)));
440 memcpy(&ccb->csio.sense_data, &req->sg_list,
441 MIN(dxfer, sizeof(ccb->csio.sense_data)));
443 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
444 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
445 ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
448 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
452 if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS)
453 BUS_SPACE_WRT4_ITL(outbound_queue, index);
455 ccb->csio.resid = ccb->csio.dxfer_len - dxfer;
457 hptiop_free_srb(hba, srb);
463 static void hptiop_drain_outbound_queue_itl(struct hpt_iop_hba *hba)
467 while ((req = BUS_SPACE_RD4_ITL(outbound_queue)) !=IOPMU_QUEUE_EMPTY) {
468 if (req & IOPMU_QUEUE_MASK_HOST_BITS)
469 hptiop_request_callback_itl(hba, req);
471 struct hpt_iop_request_header *p;
473 p = (struct hpt_iop_request_header *)
474 ((char *)hba->u.itl.mu + req);
475 temp = bus_space_read_4(hba->bar0t,
477 offsetof(struct hpt_iop_request_header,
479 if (temp & IOP_REQUEST_FLAG_SYNC_REQUEST) {
481 bus_space_read_region_4(hba->bar0t,
483 offsetof(struct hpt_iop_request_header,
485 (u_int32_t *)&temp64, 2);
487 hptiop_request_callback_itl(hba, req);
490 bus_space_write_region_4(hba->bar0t,
492 offsetof(struct hpt_iop_request_header,
494 (u_int32_t *)&temp64, 2);
497 hptiop_request_callback_itl(hba, req);
502 static int hptiop_intr_itl(struct hpt_iop_hba * hba)
507 status = BUS_SPACE_RD4_ITL(outbound_intstatus);
509 if (status & IOPMU_OUTBOUND_INT_MSG0) {
510 u_int32_t msg = BUS_SPACE_RD4_ITL(outbound_msgaddr0);
511 KdPrint(("hptiop: received outbound msg %x\n", msg));
512 BUS_SPACE_WRT4_ITL(outbound_intstatus, IOPMU_OUTBOUND_INT_MSG0);
513 hptiop_os_message_callback(hba, msg);
517 if (status & IOPMU_OUTBOUND_INT_POSTQUEUE) {
518 hptiop_drain_outbound_queue_itl(hba);
525 static void hptiop_request_callback_mv(struct hpt_iop_hba * hba,
528 u_int32_t context = (u_int32_t)_tag;
530 if (context & MVIOP_CMD_TYPE_SCSI) {
531 struct hpt_iop_srb *srb;
532 struct hpt_iop_request_scsi_command *req;
536 srb = hba->srb[context >> MVIOP_REQUEST_NUMBER_START_BIT];
537 req = (struct hpt_iop_request_scsi_command *)srb;
538 ccb = (union ccb *)srb->ccb;
539 if (ccb->ccb_h.flags & CAM_CDB_POINTER)
540 cdb = ccb->csio.cdb_io.cdb_ptr;
542 cdb = ccb->csio.cdb_io.cdb_bytes;
544 if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */
545 ccb->ccb_h.status = CAM_REQ_CMP;
548 if (context & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT)
549 req->header.result = IOP_RESULT_SUCCESS;
551 switch (req->header.result) {
552 case IOP_RESULT_SUCCESS:
553 switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
555 bus_dmamap_sync(hba->io_dmat,
556 srb->dma_map, BUS_DMASYNC_POSTREAD);
557 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
560 bus_dmamap_sync(hba->io_dmat,
561 srb->dma_map, BUS_DMASYNC_POSTWRITE);
562 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
565 ccb->ccb_h.status = CAM_REQ_CMP;
567 case IOP_RESULT_BAD_TARGET:
568 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
570 case IOP_RESULT_BUSY:
571 ccb->ccb_h.status = CAM_BUSY;
573 case IOP_RESULT_INVALID_REQUEST:
574 ccb->ccb_h.status = CAM_REQ_INVALID;
576 case IOP_RESULT_FAIL:
577 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
579 case IOP_RESULT_RESET:
580 ccb->ccb_h.status = CAM_BUSY;
582 case IOP_RESULT_CHECK_CONDITION:
583 memset(&ccb->csio.sense_data, 0,
584 sizeof(ccb->csio.sense_data));
585 if (req->dataxfer_length < ccb->csio.sense_len)
586 ccb->csio.sense_resid = ccb->csio.sense_len -
587 req->dataxfer_length;
589 ccb->csio.sense_resid = 0;
590 memcpy(&ccb->csio.sense_data, &req->sg_list,
591 MIN(req->dataxfer_length, sizeof(ccb->csio.sense_data)));
592 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
593 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
594 ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
597 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
601 ccb->csio.resid = ccb->csio.dxfer_len - req->dataxfer_length;
603 hptiop_free_srb(hba, srb);
605 } else if (context & MVIOP_CMD_TYPE_IOCTL) {
606 struct hpt_iop_request_ioctl_command *req = hba->ctlcfg_ptr;
607 if (context & MVIOP_MU_QUEUE_REQUEST_RESULT_BIT)
608 hba->config_done = 1;
610 hba->config_done = -1;
613 (MVIOP_CMD_TYPE_SET_CONFIG |
614 MVIOP_CMD_TYPE_GET_CONFIG))
615 hba->config_done = 1;
617 device_printf(hba->pcidev, "wrong callback type\n");
621 static void hptiop_request_callback_mvfrey(struct hpt_iop_hba * hba,
624 u_int32_t req_type = _tag & 0xf;
626 struct hpt_iop_srb *srb;
627 struct hpt_iop_request_scsi_command *req;
632 case IOP_REQUEST_TYPE_GET_CONFIG:
633 case IOP_REQUEST_TYPE_SET_CONFIG:
634 hba->config_done = 1;
637 case IOP_REQUEST_TYPE_SCSI_COMMAND:
638 srb = hba->srb[(_tag >> 4) & 0xff];
639 req = (struct hpt_iop_request_scsi_command *)srb;
641 ccb = (union ccb *)srb->ccb;
643 callout_stop(&srb->timeout);
645 if (ccb->ccb_h.flags & CAM_CDB_POINTER)
646 cdb = ccb->csio.cdb_io.cdb_ptr;
648 cdb = ccb->csio.cdb_io.cdb_bytes;
650 if (cdb[0] == SYNCHRONIZE_CACHE) { /* ??? */
651 ccb->ccb_h.status = CAM_REQ_CMP;
655 if (_tag & MVFREYIOPMU_QUEUE_REQUEST_RESULT_BIT)
656 req->header.result = IOP_RESULT_SUCCESS;
658 switch (req->header.result) {
659 case IOP_RESULT_SUCCESS:
660 switch (ccb->ccb_h.flags & CAM_DIR_MASK) {
662 bus_dmamap_sync(hba->io_dmat,
663 srb->dma_map, BUS_DMASYNC_POSTREAD);
664 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
667 bus_dmamap_sync(hba->io_dmat,
668 srb->dma_map, BUS_DMASYNC_POSTWRITE);
669 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
672 ccb->ccb_h.status = CAM_REQ_CMP;
674 case IOP_RESULT_BAD_TARGET:
675 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
677 case IOP_RESULT_BUSY:
678 ccb->ccb_h.status = CAM_BUSY;
680 case IOP_RESULT_INVALID_REQUEST:
681 ccb->ccb_h.status = CAM_REQ_INVALID;
683 case IOP_RESULT_FAIL:
684 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
686 case IOP_RESULT_RESET:
687 ccb->ccb_h.status = CAM_BUSY;
689 case IOP_RESULT_CHECK_CONDITION:
690 memset(&ccb->csio.sense_data, 0,
691 sizeof(ccb->csio.sense_data));
692 if (req->dataxfer_length < ccb->csio.sense_len)
693 ccb->csio.sense_resid = ccb->csio.sense_len -
694 req->dataxfer_length;
696 ccb->csio.sense_resid = 0;
697 memcpy(&ccb->csio.sense_data, &req->sg_list,
698 MIN(req->dataxfer_length, sizeof(ccb->csio.sense_data)));
699 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
700 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
701 ccb->csio.scsi_status = SCSI_STATUS_CHECK_COND;
704 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
708 ccb->csio.resid = ccb->csio.dxfer_len - req->dataxfer_length;
710 hptiop_free_srb(hba, srb);
713 case IOP_REQUEST_TYPE_IOCTL_COMMAND:
714 if (_tag & MVFREYIOPMU_QUEUE_REQUEST_RESULT_BIT)
715 hba->config_done = 1;
717 hba->config_done = -1;
718 wakeup((struct hpt_iop_request_ioctl_command *)hba->ctlcfg_ptr);
721 device_printf(hba->pcidev, "wrong callback type\n");
726 static void hptiop_drain_outbound_queue_mv(struct hpt_iop_hba * hba)
730 while ((req = hptiop_mv_outbound_read(hba))) {
731 if (req & MVIOP_MU_QUEUE_ADDR_HOST_BIT) {
732 if (req & MVIOP_MU_QUEUE_REQUEST_RETURN_CONTEXT) {
733 hptiop_request_callback_mv(hba, req);
739 static int hptiop_intr_mv(struct hpt_iop_hba * hba)
744 status = BUS_SPACE_RD4_MV0(outbound_doorbell);
747 BUS_SPACE_WRT4_MV0(outbound_doorbell, ~status);
749 if (status & MVIOP_MU_OUTBOUND_INT_MSG) {
750 u_int32_t msg = BUS_SPACE_RD4_MV2(outbound_msg);
751 KdPrint(("hptiop: received outbound msg %x\n", msg));
752 hptiop_os_message_callback(hba, msg);
756 if (status & MVIOP_MU_OUTBOUND_INT_POSTQUEUE) {
757 hptiop_drain_outbound_queue_mv(hba);
764 static int hptiop_intr_mvfrey(struct hpt_iop_hba * hba)
766 u_int32_t status, _tag, cptr;
769 if (hba->initialized) {
770 BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0);
773 status = BUS_SPACE_RD4_MVFREY2(f0_doorbell);
775 BUS_SPACE_WRT4_MVFREY2(f0_doorbell, status);
776 if (status & CPU_TO_F0_DRBL_MSG_A_BIT) {
777 u_int32_t msg = BUS_SPACE_RD4_MVFREY2(cpu_to_f0_msg_a);
778 hptiop_os_message_callback(hba, msg);
783 status = BUS_SPACE_RD4_MVFREY2(isr_cause);
785 BUS_SPACE_WRT4_MVFREY2(isr_cause, status);
787 cptr = *hba->u.mvfrey.outlist_cptr & 0xff;
788 while (hba->u.mvfrey.outlist_rptr != cptr) {
789 hba->u.mvfrey.outlist_rptr++;
790 if (hba->u.mvfrey.outlist_rptr == hba->u.mvfrey.list_count) {
791 hba->u.mvfrey.outlist_rptr = 0;
794 _tag = hba->u.mvfrey.outlist[hba->u.mvfrey.outlist_rptr].val;
795 hptiop_request_callback_mvfrey(hba, _tag);
798 } while (cptr != (*hba->u.mvfrey.outlist_cptr & 0xff));
801 if (hba->initialized) {
802 BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0x1010);
808 static int hptiop_send_sync_request_itl(struct hpt_iop_hba * hba,
809 u_int32_t req32, u_int32_t millisec)
814 BUS_SPACE_WRT4_ITL(inbound_queue, req32);
815 BUS_SPACE_RD4_ITL(outbound_intstatus);
817 for (i = 0; i < millisec; i++) {
818 hptiop_intr_itl(hba);
819 bus_space_read_region_4(hba->bar0t, hba->bar0h, req32 +
820 offsetof(struct hpt_iop_request_header, context),
821 (u_int32_t *)&temp64, 2);
830 static int hptiop_send_sync_request_mv(struct hpt_iop_hba *hba,
831 void *req, u_int32_t millisec)
835 hba->config_done = 0;
837 phy_addr = hba->ctlcfgcmd_phy |
838 (u_int64_t)MVIOP_MU_QUEUE_ADDR_HOST_BIT;
839 ((struct hpt_iop_request_get_config *)req)->header.flags |=
840 IOP_REQUEST_FLAG_SYNC_REQUEST |
841 IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
842 hptiop_mv_inbound_write(phy_addr, hba);
843 BUS_SPACE_RD4_MV0(outbound_intmask);
845 for (i = 0; i < millisec; i++) {
847 if (hba->config_done)
854 static int hptiop_send_sync_request_mvfrey(struct hpt_iop_hba *hba,
855 void *req, u_int32_t millisec)
859 struct hpt_iop_request_header *reqhdr =
860 (struct hpt_iop_request_header *)req;
862 hba->config_done = 0;
864 phy_addr = hba->ctlcfgcmd_phy;
865 reqhdr->flags = IOP_REQUEST_FLAG_SYNC_REQUEST
866 | IOP_REQUEST_FLAG_OUTPUT_CONTEXT
867 | IOP_REQUEST_FLAG_ADDR_BITS
868 | ((phy_addr >> 16) & 0xffff0000);
869 reqhdr->context = ((phy_addr & 0xffffffff) << 32 )
870 | IOPMU_QUEUE_ADDR_HOST_BIT | reqhdr->type;
872 hba->u.mvfrey.inlist_wptr++;
873 index = hba->u.mvfrey.inlist_wptr & 0x3fff;
875 if (index == hba->u.mvfrey.list_count) {
877 hba->u.mvfrey.inlist_wptr &= ~0x3fff;
878 hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE;
881 hba->u.mvfrey.inlist[index].addr = phy_addr;
882 hba->u.mvfrey.inlist[index].intrfc_len = (reqhdr->size + 3) / 4;
884 BUS_SPACE_WRT4_MVFREY2(inbound_write_ptr, hba->u.mvfrey.inlist_wptr);
885 BUS_SPACE_RD4_MVFREY2(inbound_write_ptr);
887 for (i = 0; i < millisec; i++) {
888 hptiop_intr_mvfrey(hba);
889 if (hba->config_done)
896 static int hptiop_send_sync_msg(struct hpt_iop_hba *hba,
897 u_int32_t msg, u_int32_t millisec)
902 hba->ops->post_msg(hba, msg);
904 for (i=0; i<millisec; i++) {
905 hba->ops->iop_intr(hba);
911 return hba->msg_done? 0 : -1;
914 static int hptiop_get_config_itl(struct hpt_iop_hba * hba,
915 struct hpt_iop_request_get_config * config)
919 config->header.size = sizeof(struct hpt_iop_request_get_config);
920 config->header.type = IOP_REQUEST_TYPE_GET_CONFIG;
921 config->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST;
922 config->header.result = IOP_RESULT_PENDING;
923 config->header.context = 0;
925 req32 = BUS_SPACE_RD4_ITL(inbound_queue);
926 if (req32 == IOPMU_QUEUE_EMPTY)
929 bus_space_write_region_4(hba->bar0t, hba->bar0h,
930 req32, (u_int32_t *)config,
931 sizeof(struct hpt_iop_request_header) >> 2);
933 if (hptiop_send_sync_request_itl(hba, req32, 20000)) {
934 KdPrint(("hptiop: get config send cmd failed"));
938 bus_space_read_region_4(hba->bar0t, hba->bar0h,
939 req32, (u_int32_t *)config,
940 sizeof(struct hpt_iop_request_get_config) >> 2);
942 BUS_SPACE_WRT4_ITL(outbound_queue, req32);
947 static int hptiop_get_config_mv(struct hpt_iop_hba * hba,
948 struct hpt_iop_request_get_config * config)
950 struct hpt_iop_request_get_config *req;
952 if (!(req = hba->ctlcfg_ptr))
955 req->header.flags = 0;
956 req->header.type = IOP_REQUEST_TYPE_GET_CONFIG;
957 req->header.size = sizeof(struct hpt_iop_request_get_config);
958 req->header.result = IOP_RESULT_PENDING;
959 req->header.context = MVIOP_CMD_TYPE_GET_CONFIG;
961 if (hptiop_send_sync_request_mv(hba, req, 20000)) {
962 KdPrint(("hptiop: get config send cmd failed"));
970 static int hptiop_get_config_mvfrey(struct hpt_iop_hba * hba,
971 struct hpt_iop_request_get_config * config)
973 struct hpt_iop_request_get_config *info = hba->u.mvfrey.config;
975 if (info->header.size != sizeof(struct hpt_iop_request_get_config) ||
976 info->header.type != IOP_REQUEST_TYPE_GET_CONFIG) {
977 KdPrint(("hptiop: header size %x/%x type %x/%x",
978 info->header.size, (int)sizeof(struct hpt_iop_request_get_config),
979 info->header.type, IOP_REQUEST_TYPE_GET_CONFIG));
983 config->interface_version = info->interface_version;
984 config->firmware_version = info->firmware_version;
985 config->max_requests = info->max_requests;
986 config->request_size = info->request_size;
987 config->max_sg_count = info->max_sg_count;
988 config->data_transfer_length = info->data_transfer_length;
989 config->alignment_mask = info->alignment_mask;
990 config->max_devices = info->max_devices;
991 config->sdram_size = info->sdram_size;
993 KdPrint(("hptiop: maxreq %x reqsz %x datalen %x maxdev %x sdram %x",
994 config->max_requests, config->request_size,
995 config->data_transfer_length, config->max_devices,
996 config->sdram_size));
1001 static int hptiop_set_config_itl(struct hpt_iop_hba *hba,
1002 struct hpt_iop_request_set_config *config)
1006 req32 = BUS_SPACE_RD4_ITL(inbound_queue);
1008 if (req32 == IOPMU_QUEUE_EMPTY)
1011 config->header.size = sizeof(struct hpt_iop_request_set_config);
1012 config->header.type = IOP_REQUEST_TYPE_SET_CONFIG;
1013 config->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST;
1014 config->header.result = IOP_RESULT_PENDING;
1015 config->header.context = 0;
1017 bus_space_write_region_4(hba->bar0t, hba->bar0h, req32,
1018 (u_int32_t *)config,
1019 sizeof(struct hpt_iop_request_set_config) >> 2);
1021 if (hptiop_send_sync_request_itl(hba, req32, 20000)) {
1022 KdPrint(("hptiop: set config send cmd failed"));
1026 BUS_SPACE_WRT4_ITL(outbound_queue, req32);
1031 static int hptiop_set_config_mv(struct hpt_iop_hba *hba,
1032 struct hpt_iop_request_set_config *config)
1034 struct hpt_iop_request_set_config *req;
1036 if (!(req = hba->ctlcfg_ptr))
1039 memcpy((u_int8_t *)req + sizeof(struct hpt_iop_request_header),
1040 (u_int8_t *)config + sizeof(struct hpt_iop_request_header),
1041 sizeof(struct hpt_iop_request_set_config) -
1042 sizeof(struct hpt_iop_request_header));
1044 req->header.flags = 0;
1045 req->header.type = IOP_REQUEST_TYPE_SET_CONFIG;
1046 req->header.size = sizeof(struct hpt_iop_request_set_config);
1047 req->header.result = IOP_RESULT_PENDING;
1048 req->header.context = MVIOP_CMD_TYPE_SET_CONFIG;
1050 if (hptiop_send_sync_request_mv(hba, req, 20000)) {
1051 KdPrint(("hptiop: set config send cmd failed"));
1058 static int hptiop_set_config_mvfrey(struct hpt_iop_hba *hba,
1059 struct hpt_iop_request_set_config *config)
1061 struct hpt_iop_request_set_config *req;
1063 if (!(req = hba->ctlcfg_ptr))
1066 memcpy((u_int8_t *)req + sizeof(struct hpt_iop_request_header),
1067 (u_int8_t *)config + sizeof(struct hpt_iop_request_header),
1068 sizeof(struct hpt_iop_request_set_config) -
1069 sizeof(struct hpt_iop_request_header));
1071 req->header.type = IOP_REQUEST_TYPE_SET_CONFIG;
1072 req->header.size = sizeof(struct hpt_iop_request_set_config);
1073 req->header.result = IOP_RESULT_PENDING;
1075 if (hptiop_send_sync_request_mvfrey(hba, req, 20000)) {
1076 KdPrint(("hptiop: set config send cmd failed"));
1083 static int hptiop_post_ioctl_command_itl(struct hpt_iop_hba *hba,
1085 struct hpt_iop_ioctl_param *pParams)
1088 struct hpt_iop_request_ioctl_command req;
1090 if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) >
1091 (hba->max_request_size -
1092 offsetof(struct hpt_iop_request_ioctl_command, buf))) {
1093 device_printf(hba->pcidev, "request size beyond max value");
1097 req.header.size = offsetof(struct hpt_iop_request_ioctl_command, buf)
1098 + pParams->nInBufferSize;
1099 req.header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND;
1100 req.header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST;
1101 req.header.result = IOP_RESULT_PENDING;
1102 req.header.context = req32 + (u_int64_t)(unsigned long)hba->u.itl.mu;
1103 req.ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode);
1104 req.inbuf_size = pParams->nInBufferSize;
1105 req.outbuf_size = pParams->nOutBufferSize;
1106 req.bytes_returned = 0;
1108 bus_space_write_region_4(hba->bar0t, hba->bar0h, req32, (u_int32_t *)&req,
1109 offsetof(struct hpt_iop_request_ioctl_command, buf)>>2);
1111 hptiop_lock_adapter(hba);
1113 BUS_SPACE_WRT4_ITL(inbound_queue, req32);
1114 BUS_SPACE_RD4_ITL(outbound_intstatus);
1116 bus_space_read_region_4(hba->bar0t, hba->bar0h, req32 +
1117 offsetof(struct hpt_iop_request_ioctl_command, header.context),
1118 (u_int32_t *)&temp64, 2);
1120 if (hptiop_sleep(hba, (void *)((unsigned long)hba->u.itl.mu + req32),
1121 PPAUSE, "hptctl", HPT_OSM_TIMEOUT)==0)
1123 hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000);
1124 bus_space_read_region_4(hba->bar0t, hba->bar0h,req32 +
1125 offsetof(struct hpt_iop_request_ioctl_command,
1127 (u_int32_t *)&temp64, 2);
1130 hptiop_unlock_adapter(hba);
1134 static int hptiop_bus_space_copyin(struct hpt_iop_hba *hba, u_int32_t bus,
1135 void *user, int size)
1140 for (i=0; i<size; i++) {
1141 if (copyin((u_int8_t *)user + i, &byte, 1))
1143 bus_space_write_1(hba->bar0t, hba->bar0h, bus + i, byte);
1149 static int hptiop_bus_space_copyout(struct hpt_iop_hba *hba, u_int32_t bus,
1150 void *user, int size)
1155 for (i=0; i<size; i++) {
1156 byte = bus_space_read_1(hba->bar0t, hba->bar0h, bus + i);
1157 if (copyout(&byte, (u_int8_t *)user + i, 1))
1164 static int hptiop_do_ioctl_itl(struct hpt_iop_hba *hba,
1165 struct hpt_iop_ioctl_param * pParams)
1170 if ((pParams->Magic != HPT_IOCTL_MAGIC) &&
1171 (pParams->Magic != HPT_IOCTL_MAGIC32))
1174 req32 = BUS_SPACE_RD4_ITL(inbound_queue);
1175 if (req32 == IOPMU_QUEUE_EMPTY)
1178 if (pParams->nInBufferSize)
1179 if (hptiop_bus_space_copyin(hba, req32 +
1180 offsetof(struct hpt_iop_request_ioctl_command, buf),
1181 (void *)pParams->lpInBuffer, pParams->nInBufferSize))
1184 if (hptiop_post_ioctl_command_itl(hba, req32, pParams))
1187 result = bus_space_read_4(hba->bar0t, hba->bar0h, req32 +
1188 offsetof(struct hpt_iop_request_ioctl_command,
1191 if (result == IOP_RESULT_SUCCESS) {
1192 if (pParams->nOutBufferSize)
1193 if (hptiop_bus_space_copyout(hba, req32 +
1194 offsetof(struct hpt_iop_request_ioctl_command, buf) +
1195 ((pParams->nInBufferSize + 3) & ~3),
1196 (void *)pParams->lpOutBuffer, pParams->nOutBufferSize))
1199 if (pParams->lpBytesReturned) {
1200 if (hptiop_bus_space_copyout(hba, req32 +
1201 offsetof(struct hpt_iop_request_ioctl_command, bytes_returned),
1202 (void *)pParams->lpBytesReturned, sizeof(unsigned long)))
1206 BUS_SPACE_WRT4_ITL(outbound_queue, req32);
1211 BUS_SPACE_WRT4_ITL(outbound_queue, req32);
1217 static int hptiop_post_ioctl_command_mv(struct hpt_iop_hba *hba,
1218 struct hpt_iop_request_ioctl_command *req,
1219 struct hpt_iop_ioctl_param *pParams)
1224 if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) >
1225 (hba->max_request_size -
1226 offsetof(struct hpt_iop_request_ioctl_command, buf))) {
1227 device_printf(hba->pcidev, "request size beyond max value");
1231 req->ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode);
1232 req->inbuf_size = pParams->nInBufferSize;
1233 req->outbuf_size = pParams->nOutBufferSize;
1234 req->header.size = offsetof(struct hpt_iop_request_ioctl_command, buf)
1235 + pParams->nInBufferSize;
1236 req->header.context = (u_int64_t)MVIOP_CMD_TYPE_IOCTL;
1237 req->header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND;
1238 req->header.result = IOP_RESULT_PENDING;
1239 req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
1240 size = req->header.size >> 8;
1241 size = imin(3, size);
1242 req_phy = hba->ctlcfgcmd_phy | MVIOP_MU_QUEUE_ADDR_HOST_BIT | size;
1243 hptiop_mv_inbound_write(req_phy, hba);
1245 BUS_SPACE_RD4_MV0(outbound_intmask);
1247 while (hba->config_done == 0) {
1248 if (hptiop_sleep(hba, req, PPAUSE,
1249 "hptctl", HPT_OSM_TIMEOUT)==0)
1251 hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000);
1256 static int hptiop_do_ioctl_mv(struct hpt_iop_hba *hba,
1257 struct hpt_iop_ioctl_param *pParams)
1259 struct hpt_iop_request_ioctl_command *req;
1261 if ((pParams->Magic != HPT_IOCTL_MAGIC) &&
1262 (pParams->Magic != HPT_IOCTL_MAGIC32))
1265 req = (struct hpt_iop_request_ioctl_command *)(hba->ctlcfg_ptr);
1266 hba->config_done = 0;
1267 hptiop_lock_adapter(hba);
1268 if (pParams->nInBufferSize)
1269 if (copyin((void *)pParams->lpInBuffer,
1270 req->buf, pParams->nInBufferSize))
1272 if (hptiop_post_ioctl_command_mv(hba, req, pParams))
1275 if (hba->config_done == 1) {
1276 if (pParams->nOutBufferSize)
1277 if (copyout(req->buf +
1278 ((pParams->nInBufferSize + 3) & ~3),
1279 (void *)pParams->lpOutBuffer,
1280 pParams->nOutBufferSize))
1283 if (pParams->lpBytesReturned)
1284 if (copyout(&req->bytes_returned,
1285 (void*)pParams->lpBytesReturned,
1288 hptiop_unlock_adapter(hba);
1292 hptiop_unlock_adapter(hba);
1297 static int hptiop_post_ioctl_command_mvfrey(struct hpt_iop_hba *hba,
1298 struct hpt_iop_request_ioctl_command *req,
1299 struct hpt_iop_ioctl_param *pParams)
1304 phy_addr = hba->ctlcfgcmd_phy;
1306 if ((((pParams->nInBufferSize + 3) & ~3) + pParams->nOutBufferSize) >
1307 (hba->max_request_size -
1308 offsetof(struct hpt_iop_request_ioctl_command, buf))) {
1309 device_printf(hba->pcidev, "request size beyond max value");
1313 req->ioctl_code = HPT_CTL_CODE_BSD_TO_IOP(pParams->dwIoControlCode);
1314 req->inbuf_size = pParams->nInBufferSize;
1315 req->outbuf_size = pParams->nOutBufferSize;
1316 req->header.size = offsetof(struct hpt_iop_request_ioctl_command, buf)
1317 + pParams->nInBufferSize;
1319 req->header.type = IOP_REQUEST_TYPE_IOCTL_COMMAND;
1320 req->header.result = IOP_RESULT_PENDING;
1322 req->header.flags = IOP_REQUEST_FLAG_SYNC_REQUEST
1323 | IOP_REQUEST_FLAG_OUTPUT_CONTEXT
1324 | IOP_REQUEST_FLAG_ADDR_BITS
1325 | ((phy_addr >> 16) & 0xffff0000);
1326 req->header.context = ((phy_addr & 0xffffffff) << 32 )
1327 | IOPMU_QUEUE_ADDR_HOST_BIT | req->header.type;
1329 hba->u.mvfrey.inlist_wptr++;
1330 index = hba->u.mvfrey.inlist_wptr & 0x3fff;
1332 if (index == hba->u.mvfrey.list_count) {
1334 hba->u.mvfrey.inlist_wptr &= ~0x3fff;
1335 hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE;
1338 hba->u.mvfrey.inlist[index].addr = phy_addr;
1339 hba->u.mvfrey.inlist[index].intrfc_len = (req->header.size + 3) / 4;
1341 BUS_SPACE_WRT4_MVFREY2(inbound_write_ptr, hba->u.mvfrey.inlist_wptr);
1342 BUS_SPACE_RD4_MVFREY2(inbound_write_ptr);
1344 while (hba->config_done == 0) {
1345 if (hptiop_sleep(hba, req, PPAUSE,
1346 "hptctl", HPT_OSM_TIMEOUT)==0)
1348 hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000);
1353 static int hptiop_do_ioctl_mvfrey(struct hpt_iop_hba *hba,
1354 struct hpt_iop_ioctl_param *pParams)
1356 struct hpt_iop_request_ioctl_command *req;
1358 if ((pParams->Magic != HPT_IOCTL_MAGIC) &&
1359 (pParams->Magic != HPT_IOCTL_MAGIC32))
1362 req = (struct hpt_iop_request_ioctl_command *)(hba->ctlcfg_ptr);
1363 hba->config_done = 0;
1364 hptiop_lock_adapter(hba);
1365 if (pParams->nInBufferSize)
1366 if (copyin((void *)pParams->lpInBuffer,
1367 req->buf, pParams->nInBufferSize))
1369 if (hptiop_post_ioctl_command_mvfrey(hba, req, pParams))
1372 if (hba->config_done == 1) {
1373 if (pParams->nOutBufferSize)
1374 if (copyout(req->buf +
1375 ((pParams->nInBufferSize + 3) & ~3),
1376 (void *)pParams->lpOutBuffer,
1377 pParams->nOutBufferSize))
1380 if (pParams->lpBytesReturned)
1381 if (copyout(&req->bytes_returned,
1382 (void*)pParams->lpBytesReturned,
1385 hptiop_unlock_adapter(hba);
1389 hptiop_unlock_adapter(hba);
1394 static int hptiop_rescan_bus(struct hpt_iop_hba * hba)
1398 if ((ccb = xpt_alloc_ccb()) == NULL)
1400 if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(hba->sim),
1401 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
1409 static bus_dmamap_callback_t hptiop_map_srb;
1410 static bus_dmamap_callback_t hptiop_post_scsi_command;
1411 static bus_dmamap_callback_t hptiop_mv_map_ctlcfg;
1412 static bus_dmamap_callback_t hptiop_mvfrey_map_ctlcfg;
1414 static int hptiop_alloc_pci_res_itl(struct hpt_iop_hba *hba)
1416 hba->bar0_rid = 0x10;
1417 hba->bar0_res = bus_alloc_resource_any(hba->pcidev,
1418 SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE);
1420 if (hba->bar0_res == NULL) {
1421 device_printf(hba->pcidev,
1422 "failed to get iop base adrress.\n");
1425 hba->bar0t = rman_get_bustag(hba->bar0_res);
1426 hba->bar0h = rman_get_bushandle(hba->bar0_res);
1427 hba->u.itl.mu = (struct hpt_iopmu_itl *)
1428 rman_get_virtual(hba->bar0_res);
1430 if (!hba->u.itl.mu) {
1431 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1432 hba->bar0_rid, hba->bar0_res);
1433 device_printf(hba->pcidev, "alloc mem res failed\n");
1440 static int hptiop_alloc_pci_res_mv(struct hpt_iop_hba *hba)
1442 hba->bar0_rid = 0x10;
1443 hba->bar0_res = bus_alloc_resource_any(hba->pcidev,
1444 SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE);
1446 if (hba->bar0_res == NULL) {
1447 device_printf(hba->pcidev, "failed to get iop bar0.\n");
1450 hba->bar0t = rman_get_bustag(hba->bar0_res);
1451 hba->bar0h = rman_get_bushandle(hba->bar0_res);
1452 hba->u.mv.regs = (struct hpt_iopmv_regs *)
1453 rman_get_virtual(hba->bar0_res);
1455 if (!hba->u.mv.regs) {
1456 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1457 hba->bar0_rid, hba->bar0_res);
1458 device_printf(hba->pcidev, "alloc bar0 mem res failed\n");
1462 hba->bar2_rid = 0x18;
1463 hba->bar2_res = bus_alloc_resource_any(hba->pcidev,
1464 SYS_RES_MEMORY, &hba->bar2_rid, RF_ACTIVE);
1466 if (hba->bar2_res == NULL) {
1467 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1468 hba->bar0_rid, hba->bar0_res);
1469 device_printf(hba->pcidev, "failed to get iop bar2.\n");
1473 hba->bar2t = rman_get_bustag(hba->bar2_res);
1474 hba->bar2h = rman_get_bushandle(hba->bar2_res);
1475 hba->u.mv.mu = (struct hpt_iopmu_mv *)rman_get_virtual(hba->bar2_res);
1477 if (!hba->u.mv.mu) {
1478 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1479 hba->bar0_rid, hba->bar0_res);
1480 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1481 hba->bar2_rid, hba->bar2_res);
1482 device_printf(hba->pcidev, "alloc mem bar2 res failed\n");
1489 static int hptiop_alloc_pci_res_mvfrey(struct hpt_iop_hba *hba)
1491 hba->bar0_rid = 0x10;
1492 hba->bar0_res = bus_alloc_resource_any(hba->pcidev,
1493 SYS_RES_MEMORY, &hba->bar0_rid, RF_ACTIVE);
1495 if (hba->bar0_res == NULL) {
1496 device_printf(hba->pcidev, "failed to get iop bar0.\n");
1499 hba->bar0t = rman_get_bustag(hba->bar0_res);
1500 hba->bar0h = rman_get_bushandle(hba->bar0_res);
1501 hba->u.mvfrey.config = (struct hpt_iop_request_get_config *)
1502 rman_get_virtual(hba->bar0_res);
1504 if (!hba->u.mvfrey.config) {
1505 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1506 hba->bar0_rid, hba->bar0_res);
1507 device_printf(hba->pcidev, "alloc bar0 mem res failed\n");
1511 hba->bar2_rid = 0x18;
1512 hba->bar2_res = bus_alloc_resource_any(hba->pcidev,
1513 SYS_RES_MEMORY, &hba->bar2_rid, RF_ACTIVE);
1515 if (hba->bar2_res == NULL) {
1516 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1517 hba->bar0_rid, hba->bar0_res);
1518 device_printf(hba->pcidev, "failed to get iop bar2.\n");
1522 hba->bar2t = rman_get_bustag(hba->bar2_res);
1523 hba->bar2h = rman_get_bushandle(hba->bar2_res);
1525 (struct hpt_iopmu_mvfrey *)rman_get_virtual(hba->bar2_res);
1527 if (!hba->u.mvfrey.mu) {
1528 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1529 hba->bar0_rid, hba->bar0_res);
1530 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1531 hba->bar2_rid, hba->bar2_res);
1532 device_printf(hba->pcidev, "alloc mem bar2 res failed\n");
1539 static void hptiop_release_pci_res_itl(struct hpt_iop_hba *hba)
1542 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1543 hba->bar0_rid, hba->bar0_res);
1546 static void hptiop_release_pci_res_mv(struct hpt_iop_hba *hba)
1549 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1550 hba->bar0_rid, hba->bar0_res);
1552 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1553 hba->bar2_rid, hba->bar2_res);
1556 static void hptiop_release_pci_res_mvfrey(struct hpt_iop_hba *hba)
1559 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1560 hba->bar0_rid, hba->bar0_res);
1562 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
1563 hba->bar2_rid, hba->bar2_res);
1566 static int hptiop_internal_memalloc_mv(struct hpt_iop_hba *hba)
1568 if (bus_dma_tag_create(hba->parent_dmat,
1571 BUS_SPACE_MAXADDR_32BIT,
1576 BUS_SPACE_MAXSIZE_32BIT,
1580 &hba->ctlcfg_dmat)) {
1581 device_printf(hba->pcidev, "alloc ctlcfg_dmat failed\n");
1585 if (bus_dmamem_alloc(hba->ctlcfg_dmat, (void **)&hba->ctlcfg_ptr,
1586 BUS_DMA_WAITOK | BUS_DMA_COHERENT,
1587 &hba->ctlcfg_dmamap) != 0) {
1588 device_printf(hba->pcidev,
1589 "bus_dmamem_alloc failed!\n");
1590 bus_dma_tag_destroy(hba->ctlcfg_dmat);
1594 if (bus_dmamap_load(hba->ctlcfg_dmat,
1595 hba->ctlcfg_dmamap, hba->ctlcfg_ptr,
1596 MVIOP_IOCTLCFG_SIZE,
1597 hptiop_mv_map_ctlcfg, hba, 0)) {
1598 device_printf(hba->pcidev, "bus_dmamap_load failed!\n");
1599 if (hba->ctlcfg_dmat) {
1600 bus_dmamem_free(hba->ctlcfg_dmat,
1601 hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
1602 bus_dma_tag_destroy(hba->ctlcfg_dmat);
1610 static int hptiop_internal_memalloc_mvfrey(struct hpt_iop_hba *hba)
1612 u_int32_t list_count = BUS_SPACE_RD4_MVFREY2(inbound_conf_ctl);
1616 if (list_count == 0) {
1620 hba->u.mvfrey.list_count = list_count;
1621 hba->u.mvfrey.internal_mem_size = 0x800
1622 + list_count * sizeof(struct mvfrey_inlist_entry)
1623 + list_count * sizeof(struct mvfrey_outlist_entry)
1625 if (bus_dma_tag_create(hba->parent_dmat,
1628 BUS_SPACE_MAXADDR_32BIT,
1631 hba->u.mvfrey.internal_mem_size,
1633 BUS_SPACE_MAXSIZE_32BIT,
1637 &hba->ctlcfg_dmat)) {
1638 device_printf(hba->pcidev, "alloc ctlcfg_dmat failed\n");
1642 if (bus_dmamem_alloc(hba->ctlcfg_dmat, (void **)&hba->ctlcfg_ptr,
1643 BUS_DMA_WAITOK | BUS_DMA_COHERENT,
1644 &hba->ctlcfg_dmamap) != 0) {
1645 device_printf(hba->pcidev,
1646 "bus_dmamem_alloc failed!\n");
1647 bus_dma_tag_destroy(hba->ctlcfg_dmat);
1651 if (bus_dmamap_load(hba->ctlcfg_dmat,
1652 hba->ctlcfg_dmamap, hba->ctlcfg_ptr,
1653 hba->u.mvfrey.internal_mem_size,
1654 hptiop_mvfrey_map_ctlcfg, hba, 0)) {
1655 device_printf(hba->pcidev, "bus_dmamap_load failed!\n");
1656 if (hba->ctlcfg_dmat) {
1657 bus_dmamem_free(hba->ctlcfg_dmat,
1658 hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
1659 bus_dma_tag_destroy(hba->ctlcfg_dmat);
1667 static int hptiop_internal_memfree_itl(struct hpt_iop_hba *hba) {
1671 static int hptiop_internal_memfree_mv(struct hpt_iop_hba *hba)
1673 if (hba->ctlcfg_dmat) {
1674 bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap);
1675 bus_dmamem_free(hba->ctlcfg_dmat,
1676 hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
1677 bus_dma_tag_destroy(hba->ctlcfg_dmat);
1683 static int hptiop_internal_memfree_mvfrey(struct hpt_iop_hba *hba)
1685 if (hba->ctlcfg_dmat) {
1686 bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap);
1687 bus_dmamem_free(hba->ctlcfg_dmat,
1688 hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
1689 bus_dma_tag_destroy(hba->ctlcfg_dmat);
1695 static int hptiop_reset_comm_mvfrey(struct hpt_iop_hba *hba)
1699 if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET_COMM, 3000))
1702 /* wait 100ms for MCU ready */
1707 BUS_SPACE_WRT4_MVFREY2(inbound_base,
1708 hba->u.mvfrey.inlist_phy & 0xffffffff);
1709 BUS_SPACE_WRT4_MVFREY2(inbound_base_high,
1710 (hba->u.mvfrey.inlist_phy >> 16) >> 16);
1712 BUS_SPACE_WRT4_MVFREY2(outbound_base,
1713 hba->u.mvfrey.outlist_phy & 0xffffffff);
1714 BUS_SPACE_WRT4_MVFREY2(outbound_base_high,
1715 (hba->u.mvfrey.outlist_phy >> 16) >> 16);
1717 BUS_SPACE_WRT4_MVFREY2(outbound_shadow_base,
1718 hba->u.mvfrey.outlist_cptr_phy & 0xffffffff);
1719 BUS_SPACE_WRT4_MVFREY2(outbound_shadow_base_high,
1720 (hba->u.mvfrey.outlist_cptr_phy >> 16) >> 16);
1722 hba->u.mvfrey.inlist_wptr = (hba->u.mvfrey.list_count - 1)
1723 | CL_POINTER_TOGGLE;
1724 *hba->u.mvfrey.outlist_cptr = (hba->u.mvfrey.list_count - 1)
1725 | CL_POINTER_TOGGLE;
1726 hba->u.mvfrey.outlist_rptr = hba->u.mvfrey.list_count - 1;
1732 * CAM driver interface
1734 static device_method_t driver_methods[] = {
1735 /* Device interface */
1736 DEVMETHOD(device_probe, hptiop_probe),
1737 DEVMETHOD(device_attach, hptiop_attach),
1738 DEVMETHOD(device_detach, hptiop_detach),
1739 DEVMETHOD(device_shutdown, hptiop_shutdown),
1743 static struct hptiop_adapter_ops hptiop_itl_ops = {
1744 .family = INTEL_BASED_IOP,
1745 .iop_wait_ready = hptiop_wait_ready_itl,
1746 .internal_memalloc = 0,
1747 .internal_memfree = hptiop_internal_memfree_itl,
1748 .alloc_pci_res = hptiop_alloc_pci_res_itl,
1749 .release_pci_res = hptiop_release_pci_res_itl,
1750 .enable_intr = hptiop_enable_intr_itl,
1751 .disable_intr = hptiop_disable_intr_itl,
1752 .get_config = hptiop_get_config_itl,
1753 .set_config = hptiop_set_config_itl,
1754 .iop_intr = hptiop_intr_itl,
1755 .post_msg = hptiop_post_msg_itl,
1756 .post_req = hptiop_post_req_itl,
1757 .do_ioctl = hptiop_do_ioctl_itl,
1761 static struct hptiop_adapter_ops hptiop_mv_ops = {
1762 .family = MV_BASED_IOP,
1763 .iop_wait_ready = hptiop_wait_ready_mv,
1764 .internal_memalloc = hptiop_internal_memalloc_mv,
1765 .internal_memfree = hptiop_internal_memfree_mv,
1766 .alloc_pci_res = hptiop_alloc_pci_res_mv,
1767 .release_pci_res = hptiop_release_pci_res_mv,
1768 .enable_intr = hptiop_enable_intr_mv,
1769 .disable_intr = hptiop_disable_intr_mv,
1770 .get_config = hptiop_get_config_mv,
1771 .set_config = hptiop_set_config_mv,
1772 .iop_intr = hptiop_intr_mv,
1773 .post_msg = hptiop_post_msg_mv,
1774 .post_req = hptiop_post_req_mv,
1775 .do_ioctl = hptiop_do_ioctl_mv,
1779 static struct hptiop_adapter_ops hptiop_mvfrey_ops = {
1780 .family = MVFREY_BASED_IOP,
1781 .iop_wait_ready = hptiop_wait_ready_mvfrey,
1782 .internal_memalloc = hptiop_internal_memalloc_mvfrey,
1783 .internal_memfree = hptiop_internal_memfree_mvfrey,
1784 .alloc_pci_res = hptiop_alloc_pci_res_mvfrey,
1785 .release_pci_res = hptiop_release_pci_res_mvfrey,
1786 .enable_intr = hptiop_enable_intr_mvfrey,
1787 .disable_intr = hptiop_disable_intr_mvfrey,
1788 .get_config = hptiop_get_config_mvfrey,
1789 .set_config = hptiop_set_config_mvfrey,
1790 .iop_intr = hptiop_intr_mvfrey,
1791 .post_msg = hptiop_post_msg_mvfrey,
1792 .post_req = hptiop_post_req_mvfrey,
1793 .do_ioctl = hptiop_do_ioctl_mvfrey,
1794 .reset_comm = hptiop_reset_comm_mvfrey,
1797 static driver_t hptiop_pci_driver = {
1800 sizeof(struct hpt_iop_hba)
1803 DRIVER_MODULE(hptiop, pci, hptiop_pci_driver, hptiop_devclass, 0, 0);
1804 MODULE_DEPEND(hptiop, cam, 1, 1, 1);
1806 static int hptiop_probe(device_t dev)
1808 struct hpt_iop_hba *hba;
1810 static char buf[256];
1812 struct hptiop_adapter_ops *ops;
1814 if (pci_get_vendor(dev) != 0x1103)
1817 id = pci_get_device(dev);
1827 ops = &hptiop_mvfrey_ops;
1848 ops = &hptiop_itl_ops;
1853 ops = &hptiop_mv_ops;
1859 device_printf(dev, "adapter at PCI %d:%d:%d, IRQ %d\n",
1860 pci_get_bus(dev), pci_get_slot(dev),
1861 pci_get_function(dev), pci_get_irq(dev));
1863 sprintf(buf, "RocketRAID %x %s Controller\n",
1864 id, sas ? "SAS" : "SATA");
1865 device_set_desc_copy(dev, buf);
1867 hba = (struct hpt_iop_hba *)device_get_softc(dev);
1868 bzero(hba, sizeof(struct hpt_iop_hba));
1871 KdPrint(("hba->ops=%p\n", hba->ops));
1875 static int hptiop_attach(device_t dev)
1877 struct hpt_iop_hba *hba = (struct hpt_iop_hba *)device_get_softc(dev);
1878 struct hpt_iop_request_get_config iop_config;
1879 struct hpt_iop_request_set_config set_config;
1881 struct cam_devq *devq;
1882 struct ccb_setasync ccb;
1883 u_int32_t unit = device_get_unit(dev);
1885 device_printf(dev, "%d RocketRAID 3xxx/4xxx controller driver %s\n",
1886 unit, driver_version);
1888 KdPrint(("hptiop: attach(%d, %d/%d/%d) ops=%p\n", unit,
1889 pci_get_bus(dev), pci_get_slot(dev),
1890 pci_get_function(dev), hba->ops));
1892 pci_enable_busmaster(dev);
1894 hba->pciunit = unit;
1896 if (hba->ops->alloc_pci_res(hba))
1899 if (hba->ops->iop_wait_ready(hba, 2000)) {
1900 device_printf(dev, "adapter is not ready\n");
1901 goto release_pci_res;
1904 mtx_init(&hba->lock, "hptioplock", NULL, MTX_DEF);
1906 if (bus_dma_tag_create(bus_get_dma_tag(dev),/* PCI parent */
1909 BUS_SPACE_MAXADDR, /* lowaddr */
1910 BUS_SPACE_MAXADDR, /* highaddr */
1911 NULL, NULL, /* filter, filterarg */
1912 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
1913 BUS_SPACE_UNRESTRICTED, /* nsegments */
1914 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1916 NULL, /* lockfunc */
1917 NULL, /* lockfuncarg */
1918 &hba->parent_dmat /* tag */))
1920 device_printf(dev, "alloc parent_dmat failed\n");
1921 goto release_pci_res;
1924 if (hba->ops->family == MV_BASED_IOP) {
1925 if (hba->ops->internal_memalloc(hba)) {
1926 device_printf(dev, "alloc srb_dmat failed\n");
1927 goto destroy_parent_tag;
1931 if (hba->ops->get_config(hba, &iop_config)) {
1932 device_printf(dev, "get iop config failed.\n");
1933 goto get_config_failed;
1936 hba->firmware_version = iop_config.firmware_version;
1937 hba->interface_version = iop_config.interface_version;
1938 hba->max_requests = iop_config.max_requests;
1939 hba->max_devices = iop_config.max_devices;
1940 hba->max_request_size = iop_config.request_size;
1941 hba->max_sg_count = iop_config.max_sg_count;
1943 if (hba->ops->family == MVFREY_BASED_IOP) {
1944 if (hba->ops->internal_memalloc(hba)) {
1945 device_printf(dev, "alloc srb_dmat failed\n");
1946 goto destroy_parent_tag;
1948 if (hba->ops->reset_comm(hba)) {
1949 device_printf(dev, "reset comm failed\n");
1950 goto get_config_failed;
1954 if (bus_dma_tag_create(hba->parent_dmat,/* parent */
1956 BUS_SPACE_MAXADDR_32BIT+1, /* boundary */
1957 BUS_SPACE_MAXADDR, /* lowaddr */
1958 BUS_SPACE_MAXADDR, /* highaddr */
1959 NULL, NULL, /* filter, filterarg */
1960 PAGE_SIZE * (hba->max_sg_count-1), /* maxsize */
1961 hba->max_sg_count, /* nsegments */
1962 0x20000, /* maxsegsize */
1963 BUS_DMA_ALLOCNOW, /* flags */
1964 busdma_lock_mutex, /* lockfunc */
1965 &hba->lock, /* lockfuncarg */
1966 &hba->io_dmat /* tag */))
1968 device_printf(dev, "alloc io_dmat failed\n");
1969 goto get_config_failed;
1972 if (bus_dma_tag_create(hba->parent_dmat,/* parent */
1975 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
1976 BUS_SPACE_MAXADDR, /* highaddr */
1977 NULL, NULL, /* filter, filterarg */
1978 HPT_SRB_MAX_SIZE * HPT_SRB_MAX_QUEUE_SIZE + 0x20,
1980 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
1982 NULL, /* lockfunc */
1983 NULL, /* lockfuncarg */
1984 &hba->srb_dmat /* tag */))
1986 device_printf(dev, "alloc srb_dmat failed\n");
1987 goto destroy_io_dmat;
1990 if (bus_dmamem_alloc(hba->srb_dmat, (void **)&hba->uncached_ptr,
1991 BUS_DMA_WAITOK | BUS_DMA_COHERENT,
1992 &hba->srb_dmamap) != 0)
1994 device_printf(dev, "srb bus_dmamem_alloc failed!\n");
1995 goto destroy_srb_dmat;
1998 if (bus_dmamap_load(hba->srb_dmat,
1999 hba->srb_dmamap, hba->uncached_ptr,
2000 (HPT_SRB_MAX_SIZE * HPT_SRB_MAX_QUEUE_SIZE) + 0x20,
2001 hptiop_map_srb, hba, 0))
2003 device_printf(dev, "bus_dmamap_load failed!\n");
2004 goto srb_dmamem_free;
2007 if ((devq = cam_simq_alloc(hba->max_requests - 1 )) == NULL) {
2008 device_printf(dev, "cam_simq_alloc failed\n");
2009 goto srb_dmamap_unload;
2012 hba->sim = cam_sim_alloc(hptiop_action, hptiop_poll, driver_name,
2013 hba, unit, &hba->lock, hba->max_requests - 1, 1, devq);
2015 device_printf(dev, "cam_sim_alloc failed\n");
2016 cam_simq_free(devq);
2017 goto srb_dmamap_unload;
2019 hptiop_lock_adapter(hba);
2020 if (xpt_bus_register(hba->sim, dev, 0) != CAM_SUCCESS)
2022 device_printf(dev, "xpt_bus_register failed\n");
2026 if (xpt_create_path(&hba->path, /*periph */ NULL,
2027 cam_sim_path(hba->sim), CAM_TARGET_WILDCARD,
2028 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2029 device_printf(dev, "xpt_create_path failed\n");
2030 goto deregister_xpt_bus;
2032 hptiop_unlock_adapter(hba);
2034 bzero(&set_config, sizeof(set_config));
2035 set_config.iop_id = unit;
2036 set_config.vbus_id = cam_sim_path(hba->sim);
2037 set_config.max_host_request_size = HPT_SRB_MAX_REQ_SIZE;
2039 if (hba->ops->set_config(hba, &set_config)) {
2040 device_printf(dev, "set iop config failed.\n");
2044 xpt_setup_ccb(&ccb.ccb_h, hba->path, /*priority*/5);
2045 ccb.ccb_h.func_code = XPT_SASYNC_CB;
2046 ccb.event_enable = (AC_FOUND_DEVICE | AC_LOST_DEVICE);
2047 ccb.callback = hptiop_async;
2048 ccb.callback_arg = hba->sim;
2049 xpt_action((union ccb *)&ccb);
2052 if ((hba->irq_res = bus_alloc_resource_any(hba->pcidev, SYS_RES_IRQ,
2053 &rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) {
2054 device_printf(dev, "allocate irq failed!\n");
2058 if (bus_setup_intr(hba->pcidev, hba->irq_res, INTR_TYPE_CAM | INTR_MPSAFE,
2059 NULL, hptiop_pci_intr, hba, &hba->irq_handle))
2061 device_printf(dev, "allocate intr function failed!\n");
2062 goto free_irq_resource;
2065 if (hptiop_send_sync_msg(hba,
2066 IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000)) {
2067 device_printf(dev, "fail to start background task\n");
2068 goto teartown_irq_resource;
2071 hba->ops->enable_intr(hba);
2072 hba->initialized = 1;
2074 hba->ioctl_dev = make_dev(&hptiop_cdevsw, unit,
2075 UID_ROOT, GID_WHEEL /*GID_OPERATOR*/,
2076 S_IRUSR | S_IWUSR, "%s%d", driver_name, unit);
2082 teartown_irq_resource:
2083 bus_teardown_intr(dev, hba->irq_res, hba->irq_handle);
2086 bus_release_resource(dev, SYS_RES_IRQ, 0, hba->irq_res);
2088 hptiop_lock_adapter(hba);
2090 xpt_free_path(hba->path);
2093 xpt_bus_deregister(cam_sim_path(hba->sim));
2096 cam_sim_free(hba->sim, /*free devq*/ TRUE);
2097 hptiop_unlock_adapter(hba);
2100 if (hba->uncached_ptr)
2101 bus_dmamap_unload(hba->srb_dmat, hba->srb_dmamap);
2104 if (hba->uncached_ptr)
2105 bus_dmamem_free(hba->srb_dmat,
2106 hba->uncached_ptr, hba->srb_dmamap);
2110 bus_dma_tag_destroy(hba->srb_dmat);
2114 bus_dma_tag_destroy(hba->io_dmat);
2117 hba->ops->internal_memfree(hba);
2120 if (hba->parent_dmat)
2121 bus_dma_tag_destroy(hba->parent_dmat);
2124 if (hba->ops->release_pci_res)
2125 hba->ops->release_pci_res(hba);
2130 static int hptiop_detach(device_t dev)
2132 struct hpt_iop_hba * hba = (struct hpt_iop_hba *)device_get_softc(dev);
2136 hptiop_lock_adapter(hba);
2137 for (i = 0; i < hba->max_devices; i++)
2138 if (hptiop_os_query_remove_device(hba, i)) {
2139 device_printf(dev, "%d file system is busy. id=%d",
2144 if ((error = hptiop_shutdown(dev)) != 0)
2146 if (hptiop_send_sync_msg(hba,
2147 IOPMU_INBOUND_MSG0_STOP_BACKGROUND_TASK, 60000))
2149 hptiop_unlock_adapter(hba);
2151 hptiop_release_resource(hba);
2154 hptiop_unlock_adapter(hba);
2158 static int hptiop_shutdown(device_t dev)
2160 struct hpt_iop_hba * hba = (struct hpt_iop_hba *)device_get_softc(dev);
2164 if (hba->flag & HPT_IOCTL_FLAG_OPEN) {
2165 device_printf(dev, "%d device is busy", hba->pciunit);
2169 hba->ops->disable_intr(hba);
2171 if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_SHUTDOWN, 60000))
2177 static void hptiop_pci_intr(void *arg)
2179 struct hpt_iop_hba * hba = (struct hpt_iop_hba *)arg;
2180 hptiop_lock_adapter(hba);
2181 hba->ops->iop_intr(hba);
2182 hptiop_unlock_adapter(hba);
2185 static void hptiop_poll(struct cam_sim *sim)
2187 struct hpt_iop_hba *hba;
2189 hba = cam_sim_softc(sim);
2190 hba->ops->iop_intr(hba);
2193 static void hptiop_async(void * callback_arg, u_int32_t code,
2194 struct cam_path * path, void * arg)
2198 static void hptiop_enable_intr_itl(struct hpt_iop_hba *hba)
2200 BUS_SPACE_WRT4_ITL(outbound_intmask,
2201 ~(IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0));
2204 static void hptiop_enable_intr_mv(struct hpt_iop_hba *hba)
2208 int_mask = BUS_SPACE_RD4_MV0(outbound_intmask);
2210 int_mask |= MVIOP_MU_OUTBOUND_INT_POSTQUEUE
2211 | MVIOP_MU_OUTBOUND_INT_MSG;
2212 BUS_SPACE_WRT4_MV0(outbound_intmask,int_mask);
2215 static void hptiop_enable_intr_mvfrey(struct hpt_iop_hba *hba)
2217 BUS_SPACE_WRT4_MVFREY2(f0_doorbell_enable, CPU_TO_F0_DRBL_MSG_A_BIT);
2218 BUS_SPACE_RD4_MVFREY2(f0_doorbell_enable);
2220 BUS_SPACE_WRT4_MVFREY2(isr_enable, 0x1);
2221 BUS_SPACE_RD4_MVFREY2(isr_enable);
2223 BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0x1010);
2224 BUS_SPACE_RD4_MVFREY2(pcie_f0_int_enable);
2227 static void hptiop_disable_intr_itl(struct hpt_iop_hba *hba)
2231 int_mask = BUS_SPACE_RD4_ITL(outbound_intmask);
2233 int_mask |= IOPMU_OUTBOUND_INT_POSTQUEUE | IOPMU_OUTBOUND_INT_MSG0;
2234 BUS_SPACE_WRT4_ITL(outbound_intmask, int_mask);
2235 BUS_SPACE_RD4_ITL(outbound_intstatus);
2238 static void hptiop_disable_intr_mv(struct hpt_iop_hba *hba)
2241 int_mask = BUS_SPACE_RD4_MV0(outbound_intmask);
2243 int_mask &= ~(MVIOP_MU_OUTBOUND_INT_MSG
2244 | MVIOP_MU_OUTBOUND_INT_POSTQUEUE);
2245 BUS_SPACE_WRT4_MV0(outbound_intmask,int_mask);
2246 BUS_SPACE_RD4_MV0(outbound_intmask);
2249 static void hptiop_disable_intr_mvfrey(struct hpt_iop_hba *hba)
2251 BUS_SPACE_WRT4_MVFREY2(f0_doorbell_enable, 0);
2252 BUS_SPACE_RD4_MVFREY2(f0_doorbell_enable);
2254 BUS_SPACE_WRT4_MVFREY2(isr_enable, 0);
2255 BUS_SPACE_RD4_MVFREY2(isr_enable);
2257 BUS_SPACE_WRT4_MVFREY2(pcie_f0_int_enable, 0);
2258 BUS_SPACE_RD4_MVFREY2(pcie_f0_int_enable);
2261 static void hptiop_reset_adapter(void *argv)
2263 struct hpt_iop_hba * hba = (struct hpt_iop_hba *)argv;
2264 if (hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_RESET, 60000))
2266 hptiop_send_sync_msg(hba, IOPMU_INBOUND_MSG0_START_BACKGROUND_TASK, 5000);
2269 static void *hptiop_get_srb(struct hpt_iop_hba * hba)
2271 struct hpt_iop_srb * srb;
2273 if (hba->srb_list) {
2274 srb = hba->srb_list;
2275 hba->srb_list = srb->next;
2282 static void hptiop_free_srb(struct hpt_iop_hba *hba, struct hpt_iop_srb *srb)
2284 srb->next = hba->srb_list;
2285 hba->srb_list = srb;
2288 static void hptiop_action(struct cam_sim *sim, union ccb *ccb)
2290 struct hpt_iop_hba * hba = (struct hpt_iop_hba *)cam_sim_softc(sim);
2291 struct hpt_iop_srb * srb;
2294 switch (ccb->ccb_h.func_code) {
2297 if (ccb->ccb_h.target_lun != 0 ||
2298 ccb->ccb_h.target_id >= hba->max_devices ||
2299 (ccb->ccb_h.flags & CAM_CDB_PHYS))
2301 ccb->ccb_h.status = CAM_TID_INVALID;
2306 if ((srb = hptiop_get_srb(hba)) == NULL) {
2307 device_printf(hba->pcidev, "srb allocated failed");
2308 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2314 error = bus_dmamap_load_ccb(hba->io_dmat,
2317 hptiop_post_scsi_command,
2321 if (error && error != EINPROGRESS) {
2322 device_printf(hba->pcidev,
2323 "%d bus_dmamap_load error %d",
2324 hba->pciunit, error);
2325 xpt_freeze_simq(hba->sim, 1);
2326 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
2327 hptiop_free_srb(hba, srb);
2335 device_printf(hba->pcidev, "reset adapter");
2337 hptiop_reset_adapter(hba);
2340 case XPT_GET_TRAN_SETTINGS:
2341 case XPT_SET_TRAN_SETTINGS:
2342 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
2345 case XPT_CALC_GEOMETRY:
2346 cam_calc_geometry(&ccb->ccg, 1);
2351 struct ccb_pathinq *cpi = &ccb->cpi;
2353 cpi->version_num = 1;
2354 cpi->hba_inquiry = PI_SDTR_ABLE;
2355 cpi->target_sprt = 0;
2356 cpi->hba_misc = PIM_NOBUSRESET;
2357 cpi->hba_eng_cnt = 0;
2358 cpi->max_target = hba->max_devices;
2360 cpi->unit_number = cam_sim_unit(sim);
2361 cpi->bus_id = cam_sim_bus(sim);
2362 cpi->initiator_id = hba->max_devices;
2363 cpi->base_transfer_speed = 3300;
2365 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
2366 strlcpy(cpi->hba_vid, "HPT ", HBA_IDLEN);
2367 strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
2368 cpi->transport = XPORT_SPI;
2369 cpi->transport_version = 2;
2370 cpi->protocol = PROTO_SCSI;
2371 cpi->protocol_version = SCSI_REV_2;
2372 cpi->ccb_h.status = CAM_REQ_CMP;
2377 ccb->ccb_h.status = CAM_REQ_INVALID;
2385 static void hptiop_post_req_itl(struct hpt_iop_hba *hba,
2386 struct hpt_iop_srb *srb,
2387 bus_dma_segment_t *segs, int nsegs)
2390 union ccb *ccb = srb->ccb;
2393 if (ccb->ccb_h.flags & CAM_CDB_POINTER)
2394 cdb = ccb->csio.cdb_io.cdb_ptr;
2396 cdb = ccb->csio.cdb_io.cdb_bytes;
2398 KdPrint(("ccb=%p %x-%x-%x\n",
2399 ccb, *(u_int32_t *)cdb, *((u_int32_t *)cdb+1), *((u_int32_t *)cdb+2)));
2401 if (srb->srb_flag & HPT_SRB_FLAG_HIGH_MEM_ACESS) {
2402 u_int32_t iop_req32;
2403 struct hpt_iop_request_scsi_command req;
2405 iop_req32 = BUS_SPACE_RD4_ITL(inbound_queue);
2407 if (iop_req32 == IOPMU_QUEUE_EMPTY) {
2408 device_printf(hba->pcidev, "invalid req offset\n");
2409 ccb->ccb_h.status = CAM_BUSY;
2410 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
2411 hptiop_free_srb(hba, srb);
2416 if (ccb->csio.dxfer_len && nsegs > 0) {
2417 struct hpt_iopsg *psg = req.sg_list;
2418 for (idx = 0; idx < nsegs; idx++, psg++) {
2419 psg->pci_address = (u_int64_t)segs[idx].ds_addr;
2420 psg->size = segs[idx].ds_len;
2426 bcopy(cdb, req.cdb, ccb->csio.cdb_len);
2429 offsetof(struct hpt_iop_request_scsi_command, sg_list)
2430 + nsegs*sizeof(struct hpt_iopsg);
2431 req.header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
2432 req.header.flags = 0;
2433 req.header.result = IOP_RESULT_PENDING;
2434 req.header.context = (u_int64_t)(unsigned long)srb;
2435 req.dataxfer_length = ccb->csio.dxfer_len;
2437 req.target = ccb->ccb_h.target_id;
2438 req.lun = ccb->ccb_h.target_lun;
2440 bus_space_write_region_1(hba->bar0t, hba->bar0h, iop_req32,
2441 (u_int8_t *)&req, req.header.size);
2443 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2444 bus_dmamap_sync(hba->io_dmat,
2445 srb->dma_map, BUS_DMASYNC_PREREAD);
2447 else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
2448 bus_dmamap_sync(hba->io_dmat,
2449 srb->dma_map, BUS_DMASYNC_PREWRITE);
2451 BUS_SPACE_WRT4_ITL(inbound_queue,iop_req32);
2453 struct hpt_iop_request_scsi_command *req;
2455 req = (struct hpt_iop_request_scsi_command *)srb;
2456 if (ccb->csio.dxfer_len && nsegs > 0) {
2457 struct hpt_iopsg *psg = req->sg_list;
2458 for (idx = 0; idx < nsegs; idx++, psg++) {
2460 (u_int64_t)segs[idx].ds_addr;
2461 psg->size = segs[idx].ds_len;
2467 bcopy(cdb, req->cdb, ccb->csio.cdb_len);
2469 req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
2470 req->header.result = IOP_RESULT_PENDING;
2471 req->dataxfer_length = ccb->csio.dxfer_len;
2473 req->target = ccb->ccb_h.target_id;
2474 req->lun = ccb->ccb_h.target_lun;
2476 offsetof(struct hpt_iop_request_scsi_command, sg_list)
2477 + nsegs*sizeof(struct hpt_iopsg);
2478 req->header.context = (u_int64_t)srb->index |
2479 IOPMU_QUEUE_ADDR_HOST_BIT;
2480 req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
2482 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2483 bus_dmamap_sync(hba->io_dmat,
2484 srb->dma_map, BUS_DMASYNC_PREREAD);
2485 }else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT) {
2486 bus_dmamap_sync(hba->io_dmat,
2487 srb->dma_map, BUS_DMASYNC_PREWRITE);
2490 if (hba->firmware_version > 0x01020000
2491 || hba->interface_version > 0x01020000) {
2492 u_int32_t size_bits;
2494 if (req->header.size < 256)
2495 size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT;
2496 else if (req->header.size < 512)
2497 size_bits = IOPMU_QUEUE_ADDR_HOST_BIT;
2499 size_bits = IOPMU_QUEUE_REQUEST_SIZE_BIT
2500 | IOPMU_QUEUE_ADDR_HOST_BIT;
2502 BUS_SPACE_WRT4_ITL(inbound_queue,
2503 (u_int32_t)srb->phy_addr | size_bits);
2505 BUS_SPACE_WRT4_ITL(inbound_queue, (u_int32_t)srb->phy_addr
2506 |IOPMU_QUEUE_ADDR_HOST_BIT);
2510 static void hptiop_post_req_mv(struct hpt_iop_hba *hba,
2511 struct hpt_iop_srb *srb,
2512 bus_dma_segment_t *segs, int nsegs)
2515 union ccb *ccb = srb->ccb;
2517 struct hpt_iop_request_scsi_command *req;
2520 req = (struct hpt_iop_request_scsi_command *)srb;
2521 req_phy = srb->phy_addr;
2523 if (ccb->csio.dxfer_len && nsegs > 0) {
2524 struct hpt_iopsg *psg = req->sg_list;
2525 for (idx = 0; idx < nsegs; idx++, psg++) {
2526 psg->pci_address = (u_int64_t)segs[idx].ds_addr;
2527 psg->size = segs[idx].ds_len;
2532 if (ccb->ccb_h.flags & CAM_CDB_POINTER)
2533 cdb = ccb->csio.cdb_io.cdb_ptr;
2535 cdb = ccb->csio.cdb_io.cdb_bytes;
2537 bcopy(cdb, req->cdb, ccb->csio.cdb_len);
2538 req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
2539 req->header.result = IOP_RESULT_PENDING;
2540 req->dataxfer_length = ccb->csio.dxfer_len;
2542 req->target = ccb->ccb_h.target_id;
2543 req->lun = ccb->ccb_h.target_lun;
2544 req->header.size = sizeof(struct hpt_iop_request_scsi_command)
2545 - sizeof(struct hpt_iopsg)
2546 + nsegs * sizeof(struct hpt_iopsg);
2547 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2548 bus_dmamap_sync(hba->io_dmat,
2549 srb->dma_map, BUS_DMASYNC_PREREAD);
2551 else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
2552 bus_dmamap_sync(hba->io_dmat,
2553 srb->dma_map, BUS_DMASYNC_PREWRITE);
2554 req->header.context = (u_int64_t)srb->index
2555 << MVIOP_REQUEST_NUMBER_START_BIT
2556 | MVIOP_CMD_TYPE_SCSI;
2557 req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT;
2558 size = req->header.size >> 8;
2559 hptiop_mv_inbound_write(req_phy
2560 | MVIOP_MU_QUEUE_ADDR_HOST_BIT
2561 | imin(3, size), hba);
2564 static void hptiop_post_req_mvfrey(struct hpt_iop_hba *hba,
2565 struct hpt_iop_srb *srb,
2566 bus_dma_segment_t *segs, int nsegs)
2569 union ccb *ccb = srb->ccb;
2571 struct hpt_iop_request_scsi_command *req;
2574 req = (struct hpt_iop_request_scsi_command *)srb;
2575 req_phy = srb->phy_addr;
2577 if (ccb->csio.dxfer_len && nsegs > 0) {
2578 struct hpt_iopsg *psg = req->sg_list;
2579 for (idx = 0; idx < nsegs; idx++, psg++) {
2580 psg->pci_address = (u_int64_t)segs[idx].ds_addr | 1;
2581 psg->size = segs[idx].ds_len;
2586 if (ccb->ccb_h.flags & CAM_CDB_POINTER)
2587 cdb = ccb->csio.cdb_io.cdb_ptr;
2589 cdb = ccb->csio.cdb_io.cdb_bytes;
2591 bcopy(cdb, req->cdb, ccb->csio.cdb_len);
2592 req->header.type = IOP_REQUEST_TYPE_SCSI_COMMAND;
2593 req->header.result = IOP_RESULT_PENDING;
2594 req->dataxfer_length = ccb->csio.dxfer_len;
2596 req->target = ccb->ccb_h.target_id;
2597 req->lun = ccb->ccb_h.target_lun;
2598 req->header.size = sizeof(struct hpt_iop_request_scsi_command)
2599 - sizeof(struct hpt_iopsg)
2600 + nsegs * sizeof(struct hpt_iopsg);
2601 if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_IN) {
2602 bus_dmamap_sync(hba->io_dmat,
2603 srb->dma_map, BUS_DMASYNC_PREREAD);
2605 else if ((ccb->ccb_h.flags & CAM_DIR_MASK) == CAM_DIR_OUT)
2606 bus_dmamap_sync(hba->io_dmat,
2607 srb->dma_map, BUS_DMASYNC_PREWRITE);
2609 req->header.flags = IOP_REQUEST_FLAG_OUTPUT_CONTEXT
2610 | IOP_REQUEST_FLAG_ADDR_BITS
2611 | ((req_phy >> 16) & 0xffff0000);
2612 req->header.context = ((req_phy & 0xffffffff) << 32 )
2614 | IOPMU_QUEUE_ADDR_HOST_BIT | req->header.type;
2616 hba->u.mvfrey.inlist_wptr++;
2617 index = hba->u.mvfrey.inlist_wptr & 0x3fff;
2619 if (index == hba->u.mvfrey.list_count) {
2621 hba->u.mvfrey.inlist_wptr &= ~0x3fff;
2622 hba->u.mvfrey.inlist_wptr ^= CL_POINTER_TOGGLE;
2625 hba->u.mvfrey.inlist[index].addr = req_phy;
2626 hba->u.mvfrey.inlist[index].intrfc_len = (req->header.size + 3) / 4;
2628 BUS_SPACE_WRT4_MVFREY2(inbound_write_ptr, hba->u.mvfrey.inlist_wptr);
2629 BUS_SPACE_RD4_MVFREY2(inbound_write_ptr);
2631 if (req->header.type == IOP_REQUEST_TYPE_SCSI_COMMAND) {
2632 callout_reset(&srb->timeout, 20 * hz, hptiop_reset_adapter, hba);
2636 static void hptiop_post_scsi_command(void *arg, bus_dma_segment_t *segs,
2637 int nsegs, int error)
2639 struct hpt_iop_srb *srb = (struct hpt_iop_srb *)arg;
2640 union ccb *ccb = srb->ccb;
2641 struct hpt_iop_hba *hba = srb->hba;
2643 if (error || nsegs > hba->max_sg_count) {
2644 KdPrint(("hptiop: func_code=%x tid=%x lun=%jx nsegs=%d\n",
2645 ccb->ccb_h.func_code,
2646 ccb->ccb_h.target_id,
2647 (uintmax_t)ccb->ccb_h.target_lun, nsegs));
2648 ccb->ccb_h.status = CAM_BUSY;
2649 bus_dmamap_unload(hba->io_dmat, srb->dma_map);
2650 hptiop_free_srb(hba, srb);
2655 hba->ops->post_req(hba, srb, segs, nsegs);
2658 static void hptiop_mv_map_ctlcfg(void *arg, bus_dma_segment_t *segs,
2659 int nsegs, int error)
2661 struct hpt_iop_hba *hba = (struct hpt_iop_hba *)arg;
2662 hba->ctlcfgcmd_phy = ((u_int64_t)segs->ds_addr + 0x1F)
2664 hba->ctlcfg_ptr = (u_int8_t *)(((unsigned long)hba->ctlcfg_ptr + 0x1F)
2668 static void hptiop_mvfrey_map_ctlcfg(void *arg, bus_dma_segment_t *segs,
2669 int nsegs, int error)
2671 struct hpt_iop_hba *hba = (struct hpt_iop_hba *)arg;
2674 u_int32_t list_count = hba->u.mvfrey.list_count;
2676 phy = ((u_int64_t)segs->ds_addr + 0x1F)
2678 p = (u_int8_t *)(((unsigned long)hba->ctlcfg_ptr + 0x1F)
2681 hba->ctlcfgcmd_phy = phy;
2682 hba->ctlcfg_ptr = p;
2687 hba->u.mvfrey.inlist = (struct mvfrey_inlist_entry *)p;
2688 hba->u.mvfrey.inlist_phy = phy;
2690 p += list_count * sizeof(struct mvfrey_inlist_entry);
2691 phy += list_count * sizeof(struct mvfrey_inlist_entry);
2693 hba->u.mvfrey.outlist = (struct mvfrey_outlist_entry *)p;
2694 hba->u.mvfrey.outlist_phy = phy;
2696 p += list_count * sizeof(struct mvfrey_outlist_entry);
2697 phy += list_count * sizeof(struct mvfrey_outlist_entry);
2699 hba->u.mvfrey.outlist_cptr = (u_int32_t *)p;
2700 hba->u.mvfrey.outlist_cptr_phy = phy;
2703 static void hptiop_map_srb(void *arg, bus_dma_segment_t *segs,
2704 int nsegs, int error)
2706 struct hpt_iop_hba * hba = (struct hpt_iop_hba *)arg;
2707 bus_addr_t phy_addr = (segs->ds_addr + 0x1F) & ~(bus_addr_t)0x1F;
2708 struct hpt_iop_srb *srb, *tmp_srb;
2711 if (error || nsegs == 0) {
2712 device_printf(hba->pcidev, "hptiop_map_srb error");
2717 srb = (struct hpt_iop_srb *)
2718 (((unsigned long)hba->uncached_ptr + 0x1F)
2719 & ~(unsigned long)0x1F);
2721 for (i = 0; i < HPT_SRB_MAX_QUEUE_SIZE; i++) {
2722 tmp_srb = (struct hpt_iop_srb *)
2723 ((char *)srb + i * HPT_SRB_MAX_SIZE);
2724 if (((unsigned long)tmp_srb & 0x1F) == 0) {
2725 if (bus_dmamap_create(hba->io_dmat,
2726 0, &tmp_srb->dma_map)) {
2727 device_printf(hba->pcidev, "dmamap create failed");
2731 bzero(tmp_srb, sizeof(struct hpt_iop_srb));
2734 if (hba->ctlcfg_ptr == 0) {/*itl iop*/
2735 tmp_srb->phy_addr = (u_int64_t)(u_int32_t)
2737 if (phy_addr & IOPMU_MAX_MEM_SUPPORT_MASK_32G)
2739 HPT_SRB_FLAG_HIGH_MEM_ACESS;
2741 tmp_srb->phy_addr = phy_addr;
2744 callout_init_mtx(&tmp_srb->timeout, &hba->lock, 0);
2745 hptiop_free_srb(hba, tmp_srb);
2746 hba->srb[i] = tmp_srb;
2747 phy_addr += HPT_SRB_MAX_SIZE;
2750 device_printf(hba->pcidev, "invalid alignment");
2756 static void hptiop_os_message_callback(struct hpt_iop_hba * hba, u_int32_t msg)
2761 static int hptiop_os_query_remove_device(struct hpt_iop_hba * hba,
2764 struct cam_periph *periph = NULL;
2765 struct cam_path *path;
2766 int status, retval = 0;
2768 status = xpt_create_path(&path, NULL, hba->sim->path_id, target_id, 0);
2770 if (status == CAM_REQ_CMP) {
2771 if ((periph = cam_periph_find(path, "da")) != NULL) {
2772 if (periph->refcount >= 1) {
2773 device_printf(hba->pcidev, "%d ,"
2776 hba->pciunit, target_id, periph->refcount);
2780 xpt_free_path(path);
2785 static void hptiop_release_resource(struct hpt_iop_hba *hba)
2790 destroy_dev(hba->ioctl_dev);
2793 struct ccb_setasync ccb;
2795 xpt_setup_ccb(&ccb.ccb_h, hba->path, /*priority*/5);
2796 ccb.ccb_h.func_code = XPT_SASYNC_CB;
2797 ccb.event_enable = 0;
2798 ccb.callback = hptiop_async;
2799 ccb.callback_arg = hba->sim;
2800 xpt_action((union ccb *)&ccb);
2801 xpt_free_path(hba->path);
2804 if (hba->irq_handle)
2805 bus_teardown_intr(hba->pcidev, hba->irq_res, hba->irq_handle);
2808 hptiop_lock_adapter(hba);
2809 xpt_bus_deregister(cam_sim_path(hba->sim));
2810 cam_sim_free(hba->sim, TRUE);
2811 hptiop_unlock_adapter(hba);
2814 if (hba->ctlcfg_dmat) {
2815 bus_dmamap_unload(hba->ctlcfg_dmat, hba->ctlcfg_dmamap);
2816 bus_dmamem_free(hba->ctlcfg_dmat,
2817 hba->ctlcfg_ptr, hba->ctlcfg_dmamap);
2818 bus_dma_tag_destroy(hba->ctlcfg_dmat);
2821 for (i = 0; i < HPT_SRB_MAX_QUEUE_SIZE; i++) {
2822 struct hpt_iop_srb *srb = hba->srb[i];
2824 bus_dmamap_destroy(hba->io_dmat, srb->dma_map);
2825 callout_drain(&srb->timeout);
2828 if (hba->srb_dmat) {
2829 bus_dmamap_unload(hba->srb_dmat, hba->srb_dmamap);
2830 bus_dmamap_destroy(hba->srb_dmat, hba->srb_dmamap);
2831 bus_dma_tag_destroy(hba->srb_dmat);
2835 bus_dma_tag_destroy(hba->io_dmat);
2837 if (hba->parent_dmat)
2838 bus_dma_tag_destroy(hba->parent_dmat);
2841 bus_release_resource(hba->pcidev, SYS_RES_IRQ,
2845 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
2846 hba->bar0_rid, hba->bar0_res);
2848 bus_release_resource(hba->pcidev, SYS_RES_MEMORY,
2849 hba->bar2_rid, hba->bar2_res);
2850 mtx_destroy(&hba->lock);