2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2011 HighPoint Technologies, Inc.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <dev/hpt27xx/hpt27xx_config.h>
31 #include <dev/hpt27xx/os_bsd.h>
32 #include <dev/hpt27xx/hptintf.h>
34 static HIM *hpt_match(device_t dev, int scan)
40 for (him = him_list; him; him = him->next) {
41 for (i=0; him->get_supported_device_id(i, &pci_id); i++) {
42 if (scan && him->get_controller_count)
43 him->get_controller_count(&pci_id,0,0);
44 if ((pci_get_vendor(dev) == pci_id.vid) &&
45 (pci_get_device(dev) == pci_id.did)){
53 static int hpt_probe(device_t dev)
57 him = hpt_match(dev, 0);
59 KdPrint(("hpt_probe: adapter at PCI %d:%d:%d, IRQ %d",
60 pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev), pci_get_irq(dev)
62 device_set_desc(dev, him->name);
63 return (BUS_PROBE_DEFAULT);
69 static int hpt_attach(device_t dev)
71 PHBA hba = (PHBA)device_get_softc(dev);
78 KdPrint(("hpt_attach(%d/%d/%d)", pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev)));
80 him = hpt_match(dev, 1);
81 hba->ext_type = EXT_TYPE_HBA;
82 hba->ldm_adapter.him = him;
83 pci_enable_busmaster(dev);
85 pci_id.vid = pci_get_vendor(dev);
86 pci_id.did = pci_get_device(dev);
87 pci_id.rev = pci_get_revid(dev);
88 pci_id.subsys = (HPT_U32)(pci_get_subdevice(dev)) << 16 | pci_get_subvendor(dev);
90 size = him->get_adapter_size(&pci_id);
91 hba->ldm_adapter.him_handle = malloc(size, M_DEVBUF, M_WAITOK);
92 if (!hba->ldm_adapter.him_handle)
96 hba->pciaddr.tree = 0;
97 hba->pciaddr.bus = pci_get_bus(dev);
98 hba->pciaddr.device = pci_get_slot(dev);
99 hba->pciaddr.function = pci_get_function(dev);
101 if (!him->create_adapter(&pci_id, hba->pciaddr, hba->ldm_adapter.him_handle, hba)) {
102 free(hba->ldm_adapter.him_handle, M_DEVBUF);
106 os_printk("adapter at PCI %d:%d:%d, IRQ %d",
107 hba->pciaddr.bus, hba->pciaddr.device, hba->pciaddr.function, pci_get_irq(dev));
109 if (!ldm_register_adapter(&hba->ldm_adapter)) {
110 size = ldm_get_vbus_size();
111 vbus_ext = malloc(sizeof(VBUS_EXT) + size, M_DEVBUF, M_WAITOK);
113 free(hba->ldm_adapter.him_handle, M_DEVBUF);
116 memset(vbus_ext, 0, sizeof(VBUS_EXT));
117 vbus_ext->ext_type = EXT_TYPE_VBUS;
118 ldm_create_vbus((PVBUS)vbus_ext->vbus, vbus_ext);
119 ldm_register_adapter(&hba->ldm_adapter);
122 ldm_for_each_vbus(vbus, vbus_ext) {
123 if (hba->ldm_adapter.vbus==vbus) {
124 hba->vbus_ext = vbus_ext;
125 hba->next = vbus_ext->hba_list;
126 vbus_ext->hba_list = hba;
134 * Maybe we'd better to use the bus_dmamem_alloc to alloc DMA memory,
135 * but there are some problems currently (alignment, etc).
137 static __inline void *__get_free_pages(int order)
139 /* don't use low memory - other devices may get starved */
140 return contigmalloc(PAGE_SIZE<<order,
141 M_DEVBUF, M_WAITOK, BUS_SPACE_MAXADDR_24BIT, BUS_SPACE_MAXADDR, PAGE_SIZE, 0);
144 static __inline void free_pages(void *p, int order)
146 contigfree(p, PAGE_SIZE<<order, M_DEVBUF);
149 static int hpt_alloc_mem(PVBUS_EXT vbus_ext)
156 for (hba = vbus_ext->hba_list; hba; hba = hba->next)
157 hba->ldm_adapter.him->get_meminfo(hba->ldm_adapter.him_handle);
159 ldm_get_mem_info((PVBUS)vbus_ext->vbus, 0);
161 for (f=vbus_ext->freelist_head; f; f=f->next) {
162 KdPrint(("%s: %d*%d=%d bytes",
163 f->tag, f->count, f->size, f->count*f->size));
164 for (i=0; i<f->count; i++) {
165 p = (void **)malloc(f->size, M_DEVBUF, M_WAITOK);
166 if (!p) return (ENXIO);
172 for (f=vbus_ext->freelist_dma_head; f; f=f->next) {
175 HPT_ASSERT((f->size & (f->alignment-1))==0);
177 for (order=0, size=PAGE_SIZE; size<f->size; order++, size<<=1)
180 KdPrint(("%s: %d*%d=%d bytes, order %d",
181 f->tag, f->count, f->size, f->count*f->size, order));
182 HPT_ASSERT(f->alignment<=PAGE_SIZE);
184 for (i=0; i<f->count;) {
185 p = (void **)__get_free_pages(order);
187 for (j = size/f->size; j && i<f->count; i++,j--) {
189 *(BUS_ADDRESS *)(p+1) = (BUS_ADDRESS)vtophys(p);
191 p = (void **)((unsigned long)p + f->size);
196 HPT_ASSERT(PAGE_SIZE==DMAPOOL_PAGE_SIZE);
198 for (i=0; i<os_max_cache_pages; i++) {
199 p = (void **)__get_free_pages(0);
201 HPT_ASSERT(((HPT_UPTR)p & (DMAPOOL_PAGE_SIZE-1))==0);
202 dmapool_put_page((PVBUS)vbus_ext->vbus, p, (BUS_ADDRESS)vtophys(p));
208 static void hpt_free_mem(PVBUS_EXT vbus_ext)
215 for (f=vbus_ext->freelist_head; f; f=f->next) {
217 if (f->count!=f->reserved_count) {
218 KdPrint(("memory leak for freelist %s (%d/%d)", f->tag, f->count, f->reserved_count));
221 while ((p=freelist_get(f)))
225 for (i=0; i<os_max_cache_pages; i++) {
226 p = dmapool_get_page((PVBUS)vbus_ext->vbus, &bus);
231 for (f=vbus_ext->freelist_dma_head; f; f=f->next) {
234 if (f->count!=f->reserved_count) {
235 KdPrint(("memory leak for dma freelist %s (%d/%d)", f->tag, f->count, f->reserved_count));
238 for (order=0, size=PAGE_SIZE; size<f->size; order++, size<<=1) ;
240 while ((p=freelist_get_dma(f, &bus))) {
242 free_pages(p, order);
244 /* can't free immediately since other blocks in this page may still be in the list */
245 if (((HPT_UPTR)p & (PAGE_SIZE-1))==0)
246 dmapool_put_page((PVBUS)vbus_ext->vbus, p, bus);
251 while ((p = dmapool_get_page((PVBUS)vbus_ext->vbus, &bus)))
255 static int hpt_init_vbus(PVBUS_EXT vbus_ext)
259 for (hba = vbus_ext->hba_list; hba; hba = hba->next)
260 if (!hba->ldm_adapter.him->initialize(hba->ldm_adapter.him_handle)) {
261 KdPrint(("fail to initialize %p", hba));
265 ldm_initialize_vbus((PVBUS)vbus_ext->vbus, &vbus_ext->hba_list->ldm_adapter);
269 static void hpt_flush_done(PCOMMAND pCmd)
271 PVDEV vd = pCmd->target;
273 if (mIsArray(vd->type) && vd->u.array.transform && vd!=vd->u.array.transform->target) {
274 vd = vd->u.array.transform->target;
277 pCmd->Result = RETURN_PENDING;
278 vdev_queue_cmd(pCmd);
282 *(int *)pCmd->priv = 1;
287 * flush a vdev (without retry).
289 static int hpt_flush_vdev(PVBUS_EXT vbus_ext, PVDEV vd)
292 int result = 0, done;
295 KdPrint(("flusing dev %p", vd));
297 hpt_lock_vbus(vbus_ext);
299 if (mIsArray(vd->type) && vd->u.array.transform)
300 count = max(vd->u.array.transform->source->cmds_per_request,
301 vd->u.array.transform->target->cmds_per_request);
303 count = vd->cmds_per_request;
305 pCmd = ldm_alloc_cmds(vd->vbus, count);
308 hpt_unlock_vbus(vbus_ext);
312 pCmd->type = CMD_TYPE_FLUSH;
313 pCmd->flags.hard_flush = 1;
315 pCmd->done = hpt_flush_done;
322 while (hpt_sleep(vbus_ext, pCmd, PPAUSE, "hptfls", HPT_OSM_TIMEOUT)) {
323 ldm_reset_vbus(vd->vbus);
327 KdPrint(("flush result %d", pCmd->Result));
329 if (pCmd->Result!=RETURN_SUCCESS)
334 hpt_unlock_vbus(vbus_ext);
339 static void hpt_stop_tasks(PVBUS_EXT vbus_ext);
340 static void hpt_shutdown_vbus(PVBUS_EXT vbus_ext, int howto)
342 PVBUS vbus = (PVBUS)vbus_ext->vbus;
346 KdPrint(("hpt_shutdown_vbus"));
348 /* stop all ctl tasks and disable the worker taskqueue */
349 hpt_stop_tasks(vbus_ext);
350 vbus_ext->worker.ta_context = 0;
353 for (i=0; i<osm_max_targets; i++) {
354 PVDEV vd = ldm_find_target(vbus, i);
357 if (hpt_flush_vdev(vbus_ext, vd))
358 hpt_flush_vdev(vbus_ext, vd);
362 hpt_lock_vbus(vbus_ext);
364 hpt_unlock_vbus(vbus_ext);
366 ldm_release_vbus(vbus);
368 for (hba=vbus_ext->hba_list; hba; hba=hba->next)
369 bus_teardown_intr(hba->pcidev, hba->irq_res, hba->irq_handle);
371 hpt_free_mem(vbus_ext);
373 while ((hba=vbus_ext->hba_list)) {
374 vbus_ext->hba_list = hba->next;
375 free(hba->ldm_adapter.him_handle, M_DEVBUF);
377 callout_drain(&vbus_ext->timer);
378 mtx_destroy(&vbus_ext->lock);
379 free(vbus_ext, M_DEVBUF);
380 KdPrint(("hpt_shutdown_vbus done"));
383 static void __hpt_do_tasks(PVBUS_EXT vbus_ext)
387 tasks = vbus_ext->tasks;
394 t->func(vbus_ext->vbus, t->data);
398 static void hpt_do_tasks(PVBUS_EXT vbus_ext, int pending)
401 hpt_lock_vbus(vbus_ext);
402 __hpt_do_tasks(vbus_ext);
403 hpt_unlock_vbus(vbus_ext);
407 static void hpt_action(struct cam_sim *sim, union ccb *ccb);
408 static void hpt_poll(struct cam_sim *sim);
409 static void hpt_async(void * callback_arg, u_int32_t code, struct cam_path * path, void * arg);
410 static void hpt_pci_intr(void *arg);
412 static __inline POS_CMDEXT cmdext_get(PVBUS_EXT vbus_ext)
414 POS_CMDEXT p = vbus_ext->cmdext_list;
416 vbus_ext->cmdext_list = p->next;
420 static __inline void cmdext_put(POS_CMDEXT p)
422 p->next = p->vbus_ext->cmdext_list;
423 p->vbus_ext->cmdext_list = p;
426 static void hpt_timeout(void *arg)
428 PCOMMAND pCmd = (PCOMMAND)arg;
429 POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv;
431 KdPrint(("pCmd %p timeout", pCmd));
433 ldm_reset_vbus((PVBUS)ext->vbus_ext->vbus);
436 static void os_cmddone(PCOMMAND pCmd)
438 POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv;
439 union ccb *ccb = ext->ccb;
441 KdPrint(("<8>os_cmddone(%p, %d)", pCmd, pCmd->Result));
442 callout_stop(&ext->timeout);
443 switch(pCmd->Result) {
445 ccb->ccb_h.status = CAM_REQ_CMP;
447 case RETURN_BAD_DEVICE:
448 ccb->ccb_h.status = CAM_DEV_NOT_THERE;
450 case RETURN_DEVICE_BUSY:
451 ccb->ccb_h.status = CAM_BUSY;
453 case RETURN_INVALID_REQUEST:
454 ccb->ccb_h.status = CAM_REQ_INVALID;
456 case RETURN_SELECTION_TIMEOUT:
457 ccb->ccb_h.status = CAM_SEL_TIMEOUT;
460 ccb->ccb_h.status = CAM_BUSY;
463 ccb->ccb_h.status = CAM_SCSI_STATUS_ERROR;
467 if (pCmd->flags.data_in) {
468 bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_POSTREAD);
470 else if (pCmd->flags.data_out) {
471 bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map, BUS_DMASYNC_POSTWRITE);
474 bus_dmamap_unload(ext->vbus_ext->io_dmat, ext->dma_map);
481 static int os_buildsgl(PCOMMAND pCmd, PSG pSg, int logical)
483 POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv;
484 union ccb *ccb = ext->ccb;
487 os_set_sgptr(pSg, (HPT_U8 *)ccb->csio.data_ptr);
488 pSg->size = ccb->csio.dxfer_len;
492 /* since we have provided physical sg, nobody will ask us to build physical sg */
497 static void hpt_io_dmamap_callback(void *arg, bus_dma_segment_t *segs, int nsegs, int error)
499 PCOMMAND pCmd = (PCOMMAND)arg;
500 POS_CMDEXT ext = (POS_CMDEXT)pCmd->priv;
504 HPT_ASSERT(pCmd->flags.physical_sg);
507 panic("busdma error");
509 HPT_ASSERT(nsegs<=os_max_sg_descriptors);
512 for (idx = 0; idx < nsegs; idx++, psg++) {
513 psg->addr.bus = segs[idx].ds_addr;
514 psg->size = segs[idx].ds_len;
519 if (pCmd->flags.data_in) {
520 bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map,
521 BUS_DMASYNC_PREREAD);
523 else if (pCmd->flags.data_out) {
524 bus_dmamap_sync(ext->vbus_ext->io_dmat, ext->dma_map,
525 BUS_DMASYNC_PREWRITE);
528 callout_reset(&ext->timeout, HPT_OSM_TIMEOUT, hpt_timeout, pCmd);
532 static void hpt_scsi_io(PVBUS_EXT vbus_ext, union ccb *ccb)
534 PVBUS vbus = (PVBUS)vbus_ext->vbus;
541 if (ccb->ccb_h.flags & CAM_CDB_POINTER)
542 cdb = ccb->csio.cdb_io.cdb_ptr;
544 cdb = ccb->csio.cdb_io.cdb_bytes;
546 KdPrint(("<8>hpt_scsi_io: ccb %x id %d lun %d cdb %x-%x-%x",
548 ccb->ccb_h.target_id, ccb->ccb_h.target_lun,
549 *(HPT_U32 *)&cdb[0], *(HPT_U32 *)&cdb[4], *(HPT_U32 *)&cdb[8]
552 /* ccb->ccb_h.path_id is not our bus id - don't check it */
553 if (ccb->ccb_h.target_lun != 0 ||
554 ccb->ccb_h.target_id >= osm_max_targets ||
555 (ccb->ccb_h.flags & CAM_CDB_PHYS))
557 ccb->ccb_h.status = CAM_TID_INVALID;
562 vd = ldm_find_target(vbus, ccb->ccb_h.target_id);
565 ccb->ccb_h.status = CAM_SEL_TIMEOUT;
571 case TEST_UNIT_READY:
572 case START_STOP_UNIT:
573 case SYNCHRONIZE_CACHE:
574 ccb->ccb_h.status = CAM_REQ_CMP;
579 PINQUIRYDATA inquiryData;
580 memset(ccb->csio.data_ptr, 0, ccb->csio.dxfer_len);
581 inquiryData = (PINQUIRYDATA)ccb->csio.data_ptr;
583 inquiryData->AdditionalLength = 31;
584 inquiryData->CommandQueue = 1;
585 memcpy(&inquiryData->VendorId, "HPT ", 8);
586 memcpy(&inquiryData->ProductId, "DISK 0_0 ", 16);
588 if (vd->target_id / 10) {
589 inquiryData->ProductId[7] = (vd->target_id % 100) / 10 + '0';
590 inquiryData->ProductId[8] = (vd->target_id % 100) % 10 + '0';
593 inquiryData->ProductId[7] = (vd->target_id % 100) % 10 + '0';
595 memcpy(&inquiryData->ProductRevisionLevel, "4.00", 4);
597 ccb->ccb_h.status = CAM_REQ_CMP;
603 HPT_U8 *rbuf = ccb->csio.data_ptr;
605 HPT_U8 sector_size_shift = 0;
607 HPT_U32 sector_size = 0;
609 if (mIsArray(vd->type))
610 sector_size_shift = vd->u.array.sector_size_shift;
612 if(vd->type == VD_RAW){
613 sector_size = vd->u.raw.logical_sector_size;
616 switch (sector_size) {
618 KdPrint(("set 4k setctor size in READ_CAPACITY"));
619 sector_size_shift = 3;
625 new_cap = vd->capacity >> sector_size_shift;
627 if (new_cap > 0xfffffffful)
632 rbuf[0] = (HPT_U8)(cap>>24);
633 rbuf[1] = (HPT_U8)(cap>>16);
634 rbuf[2] = (HPT_U8)(cap>>8);
635 rbuf[3] = (HPT_U8)cap;
638 rbuf[6] = 2 << sector_size_shift;
641 ccb->ccb_h.status = CAM_REQ_CMP;
646 HPT_U8 *rbuf = ccb->csio.data_ptr;
649 ccb->ccb_h.status = CAM_REQ_CMP;
652 case SERVICE_ACTION_IN:
654 HPT_U8 *rbuf = ccb->csio.data_ptr;
656 HPT_U8 sector_size_shift = 0;
657 HPT_U32 sector_size = 0;
659 if(mIsArray(vd->type))
660 sector_size_shift = vd->u.array.sector_size_shift;
662 if(vd->type == VD_RAW){
663 sector_size = vd->u.raw.logical_sector_size;
666 switch (sector_size) {
668 KdPrint(("set 4k setctor size in SERVICE_ACTION_IN"));
669 sector_size_shift = 3;
675 cap = (vd->capacity >> sector_size_shift) - 1;
677 rbuf[0] = (HPT_U8)(cap>>56);
678 rbuf[1] = (HPT_U8)(cap>>48);
679 rbuf[2] = (HPT_U8)(cap>>40);
680 rbuf[3] = (HPT_U8)(cap>>32);
681 rbuf[4] = (HPT_U8)(cap>>24);
682 rbuf[5] = (HPT_U8)(cap>>16);
683 rbuf[6] = (HPT_U8)(cap>>8);
684 rbuf[7] = (HPT_U8)cap;
687 rbuf[10] = 2 << sector_size_shift;
690 ccb->ccb_h.status = CAM_REQ_CMP;
702 case 0x8f: /* VERIFY_16 */
704 HPT_U8 sector_size_shift = 0;
705 HPT_U32 sector_size = 0;
706 pCmd = ldm_alloc_cmds(vbus, vd->cmds_per_request);
708 KdPrint(("Failed to allocate command!"));
709 ccb->ccb_h.status = CAM_BUSY;
717 pCmd->uCmd.Ide.Lba = ((HPT_U32)cdb[1] << 16) | ((HPT_U32)cdb[2] << 8) | (HPT_U32)cdb[3];
718 pCmd->uCmd.Ide.nSectors = (HPT_U16) cdb[4];
722 case 0x8f: /* VERIFY_16 */
725 ((HPT_U64)cdb[2]<<56) |
726 ((HPT_U64)cdb[3]<<48) |
727 ((HPT_U64)cdb[4]<<40) |
728 ((HPT_U64)cdb[5]<<32) |
729 ((HPT_U64)cdb[6]<<24) |
730 ((HPT_U64)cdb[7]<<16) |
731 ((HPT_U64)cdb[8]<<8) |
733 pCmd->uCmd.Ide.Lba = block;
734 pCmd->uCmd.Ide.nSectors = (HPT_U16)cdb[13] | ((HPT_U16)cdb[12]<<8);
739 pCmd->uCmd.Ide.Lba = (HPT_U32)cdb[5] | ((HPT_U32)cdb[4] << 8) | ((HPT_U32)cdb[3] << 16) | ((HPT_U32)cdb[2] << 24);
740 pCmd->uCmd.Ide.nSectors = (HPT_U16) cdb[8] | ((HPT_U16)cdb[7]<<8);
744 if(mIsArray(vd->type)) {
745 sector_size_shift = vd->u.array.sector_size_shift;
748 if(vd->type == VD_RAW){
749 sector_size = vd->u.raw.logical_sector_size;
752 switch (sector_size) {
754 KdPrint(("<8>resize sector size from 4k to 512"));
755 sector_size_shift = 3;
761 pCmd->uCmd.Ide.Lba <<= sector_size_shift;
762 pCmd->uCmd.Ide.nSectors <<= sector_size_shift;
769 pCmd->flags.data_in = 1;
774 pCmd->flags.data_out = 1;
777 pCmd->priv = ext = cmdext_get(vbus_ext);
781 pCmd->done = os_cmddone;
782 pCmd->buildsgl = os_buildsgl;
784 pCmd->psg = ext->psg;
785 pCmd->flags.physical_sg = 1;
786 error = bus_dmamap_load_ccb(vbus_ext->io_dmat,
788 hpt_io_dmamap_callback, pCmd,
791 KdPrint(("<8>bus_dmamap_load return %d", error));
792 if (error && error!=EINPROGRESS) {
793 os_printk("bus_dmamap_load error %d", error);
796 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
803 ccb->ccb_h.status = CAM_REQ_INVALID;
811 static void hpt_action(struct cam_sim *sim, union ccb *ccb)
813 PVBUS_EXT vbus_ext = (PVBUS_EXT)cam_sim_softc(sim);
815 KdPrint(("<8>hpt_action(fn=%d, id=%d)", ccb->ccb_h.func_code, ccb->ccb_h.target_id));
817 hpt_assert_vbus_locked(vbus_ext);
818 switch (ccb->ccb_h.func_code) {
821 hpt_scsi_io(vbus_ext, ccb);
825 ldm_reset_vbus((PVBUS)vbus_ext->vbus);
828 case XPT_GET_TRAN_SETTINGS:
829 case XPT_SET_TRAN_SETTINGS:
830 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
833 case XPT_CALC_GEOMETRY:
834 ccb->ccg.heads = 255;
835 ccb->ccg.secs_per_track = 63;
836 ccb->ccg.cylinders = ccb->ccg.volume_size / (ccb->ccg.heads * ccb->ccg.secs_per_track);
837 ccb->ccb_h.status = CAM_REQ_CMP;
842 struct ccb_pathinq *cpi = &ccb->cpi;
844 cpi->version_num = 1;
845 cpi->hba_inquiry = PI_SDTR_ABLE;
846 cpi->target_sprt = 0;
847 cpi->hba_misc = PIM_NOBUSRESET;
848 cpi->hba_eng_cnt = 0;
849 cpi->max_target = osm_max_targets;
851 cpi->unit_number = cam_sim_unit(sim);
852 cpi->bus_id = cam_sim_bus(sim);
853 cpi->initiator_id = osm_max_targets;
854 cpi->base_transfer_speed = 3300;
856 strlcpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
857 strlcpy(cpi->hba_vid, "HPT ", HBA_IDLEN);
858 strlcpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
859 cpi->transport = XPORT_SPI;
860 cpi->transport_version = 2;
861 cpi->protocol = PROTO_SCSI;
862 cpi->protocol_version = SCSI_REV_2;
863 cpi->ccb_h.status = CAM_REQ_CMP;
868 ccb->ccb_h.status = CAM_REQ_INVALID;
876 static void hpt_pci_intr(void *arg)
878 PVBUS_EXT vbus_ext = (PVBUS_EXT)arg;
879 hpt_lock_vbus(vbus_ext);
880 ldm_intr((PVBUS)vbus_ext->vbus);
881 hpt_unlock_vbus(vbus_ext);
884 static void hpt_poll(struct cam_sim *sim)
886 PVBUS_EXT vbus_ext = (PVBUS_EXT)cam_sim_softc(sim);
888 hpt_assert_vbus_locked(vbus_ext);
889 ldm_intr((PVBUS)vbus_ext->vbus);
892 static void hpt_async(void * callback_arg, u_int32_t code, struct cam_path * path, void * arg)
894 KdPrint(("<8>hpt_async"));
897 static int hpt_shutdown(device_t dev)
899 KdPrint(("hpt_shutdown(dev=%p)", dev));
903 static int hpt_detach(device_t dev)
905 /* we don't allow the driver to be unloaded. */
909 static void hpt_ioctl_done(struct _IOCTL_ARG *arg)
915 static void __hpt_do_ioctl(PVBUS_EXT vbus_ext, IOCTL_ARG *ioctl_args)
917 ioctl_args->result = -1;
918 ioctl_args->done = hpt_ioctl_done;
919 ioctl_args->ioctl_cmnd = (void *)1;
921 hpt_lock_vbus(vbus_ext);
922 ldm_ioctl((PVBUS)vbus_ext->vbus, ioctl_args);
924 while (ioctl_args->ioctl_cmnd) {
925 if (hpt_sleep(vbus_ext, ioctl_args, PPAUSE, "hptctl", HPT_OSM_TIMEOUT)==0)
927 ldm_reset_vbus((PVBUS)vbus_ext->vbus);
928 __hpt_do_tasks(vbus_ext);
931 /* KdPrint(("ioctl %x result %d", ioctl_args->dwIoControlCode, ioctl_args->result)); */
933 hpt_unlock_vbus(vbus_ext);
936 static void hpt_do_ioctl(IOCTL_ARG *ioctl_args)
941 ldm_for_each_vbus(vbus, vbus_ext) {
942 __hpt_do_ioctl(vbus_ext, ioctl_args);
943 if (ioctl_args->result!=HPT_IOCTL_RESULT_WRONG_VBUS)
948 #define HPT_DO_IOCTL(code, inbuf, insize, outbuf, outsize) ({\
950 arg.dwIoControlCode = code;\
951 arg.lpInBuffer = inbuf;\
952 arg.lpOutBuffer = outbuf;\
953 arg.nInBufferSize = insize;\
954 arg.nOutBufferSize = outsize;\
955 arg.lpBytesReturned = 0;\
960 #define DEVICEID_VALID(id) ((id) && ((HPT_U32)(id)!=0xffffffff))
962 static int hpt_get_logical_devices(DEVICEID * pIds, int nMaxCount)
965 HPT_U32 count = nMaxCount-1;
967 if (HPT_DO_IOCTL(HPT_IOCTL_GET_LOGICAL_DEVICES,
968 &count, sizeof(HPT_U32), pIds, sizeof(DEVICEID)*nMaxCount))
971 nMaxCount = (int)pIds[0];
972 for (i=0; i<nMaxCount; i++) pIds[i] = pIds[i+1];
976 static int hpt_get_device_info_v3(DEVICEID id, PLOGICAL_DEVICE_INFO_V3 pInfo)
978 return HPT_DO_IOCTL(HPT_IOCTL_GET_DEVICE_INFO_V3,
979 &id, sizeof(DEVICEID), pInfo, sizeof(LOGICAL_DEVICE_INFO_V3));
982 /* not belong to this file logically, but we want to use ioctl interface */
983 static int __hpt_stop_tasks(PVBUS_EXT vbus_ext, DEVICEID id)
985 LOGICAL_DEVICE_INFO_V3 devinfo;
987 DEVICEID param[2] = { id, 0 };
989 if (hpt_get_device_info_v3(id, &devinfo))
992 if (devinfo.Type!=LDT_ARRAY)
995 if (devinfo.u.array.Flags & ARRAY_FLAG_REBUILDING)
996 param[1] = AS_REBUILD_ABORT;
997 else if (devinfo.u.array.Flags & ARRAY_FLAG_VERIFYING)
998 param[1] = AS_VERIFY_ABORT;
999 else if (devinfo.u.array.Flags & ARRAY_FLAG_INITIALIZING)
1000 param[1] = AS_INITIALIZE_ABORT;
1001 else if (devinfo.u.array.Flags & ARRAY_FLAG_TRANSFORMING)
1002 param[1] = AS_TRANSFORM_ABORT;
1006 KdPrint(("SET_ARRAY_STATE(%x, %d)", param[0], param[1]));
1007 result = HPT_DO_IOCTL(HPT_IOCTL_SET_ARRAY_STATE,
1008 param, sizeof(param), 0, 0);
1010 for (i=0; i<devinfo.u.array.nDisk; i++)
1011 if (DEVICEID_VALID(devinfo.u.array.Members[i]))
1012 __hpt_stop_tasks(vbus_ext, devinfo.u.array.Members[i]);
1017 static void hpt_stop_tasks(PVBUS_EXT vbus_ext)
1022 count = hpt_get_logical_devices((DEVICEID *)&ids, sizeof(ids)/sizeof(ids[0]));
1024 for (i=0; i<count; i++)
1025 __hpt_stop_tasks(vbus_ext, ids[i]);
1028 static d_open_t hpt_open;
1029 static d_close_t hpt_close;
1030 static d_ioctl_t hpt_ioctl;
1031 static int hpt_rescan_bus(void);
1033 static struct cdevsw hpt_cdevsw = {
1035 .d_close = hpt_close,
1036 .d_ioctl = hpt_ioctl,
1037 .d_name = driver_name,
1038 .d_version = D_VERSION,
1041 static struct intr_config_hook hpt_ich;
1044 * hpt_final_init will be called after all hpt_attach.
1046 static void hpt_final_init(void *dummy)
1048 int i,unit_number=0;
1053 /* Clear the config hook */
1054 config_intrhook_disestablish(&hpt_ich);
1056 /* allocate memory */
1058 ldm_for_each_vbus(vbus, vbus_ext) {
1059 if (hpt_alloc_mem(vbus_ext)) {
1060 os_printk("out of memory");
1068 os_printk("no controller detected.");
1072 /* initializing hardware */
1073 ldm_for_each_vbus(vbus, vbus_ext) {
1074 /* make timer available here */
1075 mtx_init(&vbus_ext->lock, "hptsleeplock", NULL, MTX_DEF);
1076 callout_init_mtx(&vbus_ext->timer, &vbus_ext->lock, 0);
1077 if (hpt_init_vbus(vbus_ext)) {
1078 os_printk("fail to initialize hardware");
1083 /* register CAM interface */
1084 ldm_for_each_vbus(vbus, vbus_ext) {
1085 struct cam_devq *devq;
1086 struct ccb_setasync ccb;
1088 if (bus_dma_tag_create(NULL,/* parent */
1090 BUS_SPACE_MAXADDR_32BIT+1, /* boundary */
1091 BUS_SPACE_MAXADDR, /* lowaddr */
1092 BUS_SPACE_MAXADDR, /* highaddr */
1093 NULL, NULL, /* filter, filterarg */
1094 PAGE_SIZE * (os_max_sg_descriptors-1), /* maxsize */
1095 os_max_sg_descriptors, /* nsegments */
1096 0x10000, /* maxsegsize */
1097 BUS_DMA_WAITOK, /* flags */
1098 busdma_lock_mutex, /* lockfunc */
1099 &vbus_ext->lock, /* lockfuncarg */
1100 &vbus_ext->io_dmat /* tag */))
1105 for (i=0; i<os_max_queue_comm; i++) {
1106 POS_CMDEXT ext = (POS_CMDEXT)malloc(sizeof(OS_CMDEXT), M_DEVBUF, M_WAITOK);
1108 os_printk("Can't alloc cmdext(%d)", i);
1111 ext->vbus_ext = vbus_ext;
1112 ext->next = vbus_ext->cmdext_list;
1113 vbus_ext->cmdext_list = ext;
1115 if (bus_dmamap_create(vbus_ext->io_dmat, 0, &ext->dma_map)) {
1116 os_printk("Can't create dma map(%d)", i);
1119 callout_init_mtx(&ext->timeout, &vbus_ext->lock, 0);
1122 if ((devq = cam_simq_alloc(os_max_queue_comm)) == NULL) {
1123 os_printk("cam_simq_alloc failed");
1126 vbus_ext->sim = cam_sim_alloc(hpt_action, hpt_poll, driver_name,
1127 vbus_ext, unit_number, &vbus_ext->lock, os_max_queue_comm, /*tagged*/8, devq);
1129 if (!vbus_ext->sim) {
1130 os_printk("cam_sim_alloc failed");
1131 cam_simq_free(devq);
1135 hpt_lock_vbus(vbus_ext);
1136 if (xpt_bus_register(vbus_ext->sim, NULL, 0) != CAM_SUCCESS) {
1137 hpt_unlock_vbus(vbus_ext);
1138 os_printk("xpt_bus_register failed");
1139 cam_sim_free(vbus_ext->sim, /*free devq*/ TRUE);
1140 vbus_ext->sim = NULL;
1144 if (xpt_create_path(&vbus_ext->path, /*periph */ NULL,
1145 cam_sim_path(vbus_ext->sim), CAM_TARGET_WILDCARD,
1146 CAM_LUN_WILDCARD) != CAM_REQ_CMP)
1148 hpt_unlock_vbus(vbus_ext);
1149 os_printk("xpt_create_path failed");
1150 xpt_bus_deregister(cam_sim_path(vbus_ext->sim));
1151 cam_sim_free(vbus_ext->sim, /*free_devq*/TRUE);
1152 vbus_ext->sim = NULL;
1156 memset(&ccb, 0, sizeof(ccb));
1157 xpt_setup_ccb(&ccb.ccb_h, vbus_ext->path, /*priority*/5);
1158 ccb.ccb_h.func_code = XPT_SASYNC_CB;
1159 ccb.event_enable = AC_LOST_DEVICE;
1160 ccb.callback = hpt_async;
1161 ccb.callback_arg = vbus_ext;
1162 xpt_action((union ccb *)&ccb);
1163 hpt_unlock_vbus(vbus_ext);
1165 for (hba = vbus_ext->hba_list; hba; hba = hba->next) {
1167 if ((hba->irq_res = bus_alloc_resource_any(hba->pcidev,
1168 SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE)) == NULL)
1170 os_printk("can't allocate interrupt");
1173 if (bus_setup_intr(hba->pcidev, hba->irq_res, INTR_TYPE_CAM | INTR_MPSAFE,
1174 NULL, hpt_pci_intr, vbus_ext, &hba->irq_handle))
1176 os_printk("can't set up interrupt");
1179 hba->ldm_adapter.him->intr_control(hba->ldm_adapter.him_handle, HPT_TRUE);
1183 vbus_ext->shutdown_eh = EVENTHANDLER_REGISTER(shutdown_final,
1184 hpt_shutdown_vbus, vbus_ext, SHUTDOWN_PRI_DEFAULT);
1185 if (!vbus_ext->shutdown_eh)
1186 os_printk("Shutdown event registration failed");
1189 ldm_for_each_vbus(vbus, vbus_ext) {
1190 TASK_INIT(&vbus_ext->worker, 0, (task_fn_t *)hpt_do_tasks, vbus_ext);
1191 if (vbus_ext->tasks)
1192 TASK_ENQUEUE(&vbus_ext->worker);
1195 make_dev(&hpt_cdevsw, DRIVER_MINOR, UID_ROOT, GID_OPERATOR,
1196 S_IRUSR | S_IWUSR, "%s", driver_name);
1199 #if defined(KLD_MODULE)
1201 typedef struct driverlink *driverlink_t;
1203 kobj_class_t driver;
1204 TAILQ_ENTRY(driverlink) link; /* list of drivers in devclass */
1207 typedef TAILQ_HEAD(driver_list, driverlink) driver_list_t;
1210 TAILQ_ENTRY(devclass) link;
1211 devclass_t parent; /* parent in devclass hierarchy */
1212 driver_list_t drivers; /* bus devclasses store drivers for bus */
1214 device_t *devices; /* array of devices indexed by unit */
1215 int maxunit; /* size of devices array */
1218 static void override_kernel_driver(void)
1220 driverlink_t dl, dlfirst;
1221 driver_t *tmpdriver;
1222 devclass_t dc = devclass_find("pci");
1225 dlfirst = TAILQ_FIRST(&dc->drivers);
1226 for (dl = dlfirst; dl; dl = TAILQ_NEXT(dl, link)) {
1227 if(strcmp(dl->driver->name, driver_name) == 0) {
1228 tmpdriver=dl->driver;
1229 dl->driver=dlfirst->driver;
1230 dlfirst->driver=tmpdriver;
1238 #define override_kernel_driver()
1241 static void hpt_init(void *dummy)
1244 os_printk("%s %s", driver_name_long, driver_ver);
1246 override_kernel_driver();
1249 hpt_ich.ich_func = hpt_final_init;
1250 hpt_ich.ich_arg = NULL;
1251 if (config_intrhook_establish(&hpt_ich) != 0) {
1252 printf("%s: cannot establish configuration hook\n",
1257 SYSINIT(hptinit, SI_SUB_CONFIGURE, SI_ORDER_FIRST, hpt_init, NULL);
1260 * CAM driver interface
1262 static device_method_t driver_methods[] = {
1263 /* Device interface */
1264 DEVMETHOD(device_probe, hpt_probe),
1265 DEVMETHOD(device_attach, hpt_attach),
1266 DEVMETHOD(device_detach, hpt_detach),
1267 DEVMETHOD(device_shutdown, hpt_shutdown),
1271 static driver_t hpt_pci_driver = {
1278 #error "no TARGETNAME found"
1281 /* use this to make TARGETNAME be expanded */
1282 #define __DRIVER_MODULE(p1, p2, p3, p4, p5) DRIVER_MODULE(p1, p2, p3, p4, p5)
1283 #define __MODULE_VERSION(p1, p2) MODULE_VERSION(p1, p2)
1284 #define __MODULE_DEPEND(p1, p2, p3, p4, p5) MODULE_DEPEND(p1, p2, p3, p4, p5)
1285 __DRIVER_MODULE(TARGETNAME, pci, hpt_pci_driver, 0, 0);
1286 __MODULE_VERSION(TARGETNAME, 1);
1287 __MODULE_DEPEND(TARGETNAME, cam, 1, 1, 1);
1289 static int hpt_open(struct cdev *dev, int flags, int devtype, struct thread *td)
1294 static int hpt_close(struct cdev *dev, int flags, int devtype, struct thread *td)
1299 static int hpt_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td)
1301 PHPT_IOCTL_PARAM piop=(PHPT_IOCTL_PARAM)data;
1302 IOCTL_ARG ioctl_args;
1303 HPT_U32 bytesReturned = 0;
1306 case HPT_DO_IOCONTROL:
1308 if (piop->Magic == HPT_IOCTL_MAGIC || piop->Magic == HPT_IOCTL_MAGIC32) {
1309 KdPrint(("<8>ioctl=%x in=%p len=%d out=%p len=%d\n",
1310 piop->dwIoControlCode,
1312 piop->nInBufferSize,
1314 piop->nOutBufferSize));
1316 memset(&ioctl_args, 0, sizeof(ioctl_args));
1318 ioctl_args.dwIoControlCode = piop->dwIoControlCode;
1319 ioctl_args.nInBufferSize = piop->nInBufferSize;
1320 ioctl_args.nOutBufferSize = piop->nOutBufferSize;
1321 ioctl_args.lpBytesReturned = &bytesReturned;
1323 if (ioctl_args.nInBufferSize) {
1324 ioctl_args.lpInBuffer = malloc(ioctl_args.nInBufferSize, M_DEVBUF, M_WAITOK);
1325 if (!ioctl_args.lpInBuffer)
1327 if (copyin((void*)piop->lpInBuffer,
1328 ioctl_args.lpInBuffer, piop->nInBufferSize))
1332 if (ioctl_args.nOutBufferSize) {
1333 ioctl_args.lpOutBuffer = malloc(ioctl_args.nOutBufferSize, M_DEVBUF, M_WAITOK | M_ZERO);
1334 if (!ioctl_args.lpOutBuffer)
1338 hpt_do_ioctl(&ioctl_args);
1340 if (ioctl_args.result==HPT_IOCTL_RESULT_OK) {
1341 if (piop->nOutBufferSize) {
1342 if (copyout(ioctl_args.lpOutBuffer,
1343 (void*)piop->lpOutBuffer, piop->nOutBufferSize))
1346 if (piop->lpBytesReturned) {
1347 if (copyout(&bytesReturned,
1348 (void*)piop->lpBytesReturned, sizeof(HPT_U32)))
1351 if (ioctl_args.lpInBuffer) free(ioctl_args.lpInBuffer, M_DEVBUF);
1352 if (ioctl_args.lpOutBuffer) free(ioctl_args.lpOutBuffer, M_DEVBUF);
1356 if (ioctl_args.lpInBuffer) free(ioctl_args.lpInBuffer, M_DEVBUF);
1357 if (ioctl_args.lpOutBuffer) free(ioctl_args.lpOutBuffer, M_DEVBUF);
1365 return hpt_rescan_bus();
1368 KdPrint(("invalid command!"));
1374 static int hpt_rescan_bus(void)
1380 ldm_for_each_vbus(vbus, vbus_ext) {
1381 if ((ccb = xpt_alloc_ccb()) == NULL)
1385 if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(vbus_ext->sim),
1386 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP)