3 * Generic defines for LSI '909 FC adapters.
6 * Copyright (c) 2000, 2001 by Greg Ansley
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice immediately at the beginning of the file, without modification,
13 * this list of conditions, and the following disclaimer.
14 * 2. The name of the author may not be used to endorse or promote products
15 * derived from this software without specific prior written permission.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE FOR
21 * ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
30 * Copyright (c) 2002, 2006 by Matthew Jacob
31 * All rights reserved.
33 * Redistribution and use in source and binary forms, with or without
34 * modification, are permitted provided that the following conditions are
36 * 1. Redistributions of source code must retain the above copyright
37 * notice, this list of conditions and the following disclaimer.
38 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
39 * substantially similar to the "NO WARRANTY" disclaimer below
40 * ("Disclaimer") and any redistribution must be conditioned upon including
41 * a substantially similar Disclaimer requirement for further binary
43 * 3. Neither the names of the above listed copyright holders nor the names
44 * of any contributors may be used to endorse or promote products derived
45 * from this software without specific prior written permission.
47 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
48 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
49 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
50 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
51 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
52 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
53 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
54 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
55 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
56 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
57 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
59 * Support from Chris Ellsworth in order to make SAS adapters work
60 * is gratefully acknowledged.
63 * Support from LSI-Logic has also gone a great deal toward making this a
64 * workable subsystem and is gratefully acknowledged.
67 * Copyright (c) 2004, Avid Technology, Inc. and its contributors.
68 * Copyright (c) 2004, 2005 Justin T. Gibbs
69 * Copyright (c) 2005, WHEEL Sp. z o.o.
70 * All rights reserved.
72 * Redistribution and use in source and binary forms, with or without
73 * modification, are permitted provided that the following conditions are
75 * 1. Redistributions of source code must retain the above copyright
76 * notice, this list of conditions and the following disclaimer.
77 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
78 * substantially similar to the "NO WARRANTY" disclaimer below
79 * ("Disclaimer") and any redistribution must be conditioned upon including
80 * a substantially similar Disclaimer requirement for further binary
82 * 3. Neither the names of the above listed copyright holders nor the names
83 * of any contributors may be used to endorse or promote products derived
84 * from this software without specific prior written permission.
86 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
87 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
88 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
89 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
90 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
91 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
92 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
93 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
94 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
95 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF THE COPYRIGHT
96 * OWNER OR CONTRIBUTOR IS ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
102 /********************************* OS Includes ********************************/
103 #include <sys/types.h>
104 #include <sys/param.h>
105 #include <sys/systm.h>
106 #include <sys/endian.h>
107 #include <sys/eventhandler.h>
108 #if __FreeBSD_version < 500000
109 #include <sys/kernel.h>
110 #include <sys/queue.h>
111 #include <sys/malloc.h>
112 #include <sys/devicestat.h>
114 #include <sys/lock.h>
115 #include <sys/kernel.h>
116 #include <sys/queue.h>
117 #include <sys/malloc.h>
118 #include <sys/mutex.h>
119 #include <sys/condvar.h>
121 #include <sys/proc.h>
123 #include <sys/module.h>
125 #include <machine/cpu.h>
126 #include <machine/resource.h>
128 #if __FreeBSD_version < 500000
129 #include <machine/bus.h>
130 #include <machine/clock.h>
133 #include <sys/rman.h>
135 #if __FreeBSD_version < 500000
136 #include <pci/pcireg.h>
137 #include <pci/pcivar.h>
139 #include <dev/pci/pcireg.h>
140 #include <dev/pci/pcivar.h>
143 #include <machine/bus.h>
146 /**************************** Register Definitions ****************************/
147 #include <dev/mpt/mpt_reg.h>
149 /******************************* MPI Definitions ******************************/
150 #include <dev/mpt/mpilib/mpi_type.h>
151 #include <dev/mpt/mpilib/mpi.h>
152 #include <dev/mpt/mpilib/mpi_cnfg.h>
153 #include <dev/mpt/mpilib/mpi_ioc.h>
154 #include <dev/mpt/mpilib/mpi_raid.h>
156 /* XXX For mpt_debug.c */
157 #include <dev/mpt/mpilib/mpi_init.h>
159 #define MPT_S64_2_SCALAR(y) ((((int64_t)y.High) << 32) | (y.Low))
160 #define MPT_U64_2_SCALAR(y) ((((uint64_t)y.High) << 32) | (y.Low))
162 /****************************** Misc Definitions ******************************/
163 /* #define MPT_TEST_MULTIPATH 1 */
165 #define MPT_FAIL (0x10000)
167 #define NUM_ELEMENTS(array) (sizeof(array) / sizeof(*array))
169 #define MPT_ROLE_NONE 0
170 #define MPT_ROLE_INITIATOR 1
171 #define MPT_ROLE_TARGET 2
172 #define MPT_ROLE_BOTH 3
173 #define MPT_ROLE_DEFAULT MPT_ROLE_INITIATOR
175 /**************************** Forward Declarations ****************************/
177 struct mpt_personality;
178 typedef struct req_entry request_t;
180 /************************* Personality Module Support *************************/
181 typedef int mpt_load_handler_t(struct mpt_personality *);
182 typedef int mpt_probe_handler_t(struct mpt_softc *);
183 typedef int mpt_attach_handler_t(struct mpt_softc *);
184 typedef int mpt_enable_handler_t(struct mpt_softc *);
185 typedef void mpt_ready_handler_t(struct mpt_softc *);
186 typedef int mpt_event_handler_t(struct mpt_softc *, request_t *,
187 MSG_EVENT_NOTIFY_REPLY *);
188 typedef void mpt_reset_handler_t(struct mpt_softc *, int /*type*/);
189 /* XXX Add return value and use for veto? */
190 typedef void mpt_shutdown_handler_t(struct mpt_softc *);
191 typedef void mpt_detach_handler_t(struct mpt_softc *);
192 typedef int mpt_unload_handler_t(struct mpt_personality *);
194 struct mpt_personality
197 uint32_t id; /* Assigned identifier. */
198 u_int use_count; /* Instances using personality*/
199 mpt_load_handler_t *load; /* configure personailty */
200 #define MPT_PERS_FIRST_HANDLER(pers) (&(pers)->load)
201 mpt_probe_handler_t *probe; /* configure personailty */
202 mpt_attach_handler_t *attach; /* initialize device instance */
203 mpt_enable_handler_t *enable; /* enable device */
204 mpt_ready_handler_t *ready; /* final open for business */
205 mpt_event_handler_t *event; /* Handle MPI event. */
206 mpt_reset_handler_t *reset; /* Re-init after reset. */
207 mpt_shutdown_handler_t *shutdown; /* Shutdown instance. */
208 mpt_detach_handler_t *detach; /* release device instance */
209 mpt_unload_handler_t *unload; /* Shutdown personality */
210 #define MPT_PERS_LAST_HANDLER(pers) (&(pers)->unload)
213 int mpt_modevent(module_t, int, void *);
215 /* Maximum supported number of personalities. */
216 #define MPT_MAX_PERSONALITIES (15)
218 #define MPT_PERSONALITY_DEPEND(name, dep, vmin, vpref, vmax) \
219 MODULE_DEPEND(name, dep, vmin, vpref, vmax)
221 #define DECLARE_MPT_PERSONALITY(name, order) \
222 static moduledata_t name##_mod = { \
223 #name, mpt_modevent, &name##_personality \
225 DECLARE_MODULE(name, name##_mod, SI_SUB_DRIVERS, order); \
226 MODULE_VERSION(name, 1); \
227 MPT_PERSONALITY_DEPEND(name, mpt_core, 1, 1, 1)
229 /******************************* Bus DMA Support ******************************/
230 /* XXX Need to update bus_dmamap_sync to take a range argument. */
231 #define bus_dmamap_sync_range(dma_tag, dmamap, offset, len, op) \
232 bus_dmamap_sync(dma_tag, dmamap, op)
234 #if __FreeBSD_version < 600000
235 #define bus_get_dma_tag(x) NULL
237 #if __FreeBSD_version >= 501102
238 #define mpt_dma_tag_create(mpt, parent_tag, alignment, boundary, \
239 lowaddr, highaddr, filter, filterarg, \
240 maxsize, nsegments, maxsegsz, flags, \
242 bus_dma_tag_create(parent_tag, alignment, boundary, \
243 lowaddr, highaddr, filter, filterarg, \
244 maxsize, nsegments, maxsegsz, flags, \
245 busdma_lock_mutex, &(mpt)->mpt_lock, \
248 #define mpt_dma_tag_create(mpt, parent_tag, alignment, boundary, \
249 lowaddr, highaddr, filter, filterarg, \
250 maxsize, nsegments, maxsegsz, flags, \
252 bus_dma_tag_create(parent_tag, alignment, boundary, \
253 lowaddr, highaddr, filter, filterarg, \
254 maxsize, nsegments, maxsegsz, flags, \
258 struct mpt_map_info {
259 struct mpt_softc *mpt;
264 void mpt_map_rquest(void *, bus_dma_segment_t *, int, int);
265 /* **************************** NewBUS interrupt Crock ************************/
266 #if __FreeBSD_version < 700031
267 #define mpt_setup_intr(d, i, f, U, if, ifa, hp) \
268 bus_setup_intr(d, i, f, if, ifa, hp)
270 #define mpt_setup_intr bus_setup_intr
273 /**************************** Kernel Thread Support ***************************/
274 #if __FreeBSD_version > 500005
275 #define mpt_kthread_create(func, farg, proc_ptr, flags, stackpgs, fmtstr, arg) \
276 kthread_create(func, farg, proc_ptr, flags, stackpgs, fmtstr, arg)
278 #define mpt_kthread_create(func, farg, proc_ptr, flags, stackpgs, fmtstr, arg) \
279 kthread_create(func, farg, proc_ptr, fmtstr, arg)
282 /****************************** Timer Facilities ******************************/
283 #if __FreeBSD_version > 500000
284 #define mpt_callout_init(c) callout_init(c, /*mpsafe*/1);
286 #define mpt_callout_init(c) callout_init(c);
289 /********************************** Endianess *********************************/
290 #define MPT_2_HOST64(ptr, tag) ptr->tag = le64toh(ptr->tag)
291 #define MPT_2_HOST32(ptr, tag) ptr->tag = le32toh(ptr->tag)
292 #define MPT_2_HOST16(ptr, tag) ptr->tag = le16toh(ptr->tag)
294 #define HOST_2_MPT64(ptr, tag) ptr->tag = htole64(ptr->tag)
295 #define HOST_2_MPT32(ptr, tag) ptr->tag = htole32(ptr->tag)
296 #define HOST_2_MPT16(ptr, tag) ptr->tag = htole16(ptr->tag)
298 #if _BYTE_ORDER == _BIG_ENDIAN
299 void mpt2host_sge_simple_union(SGE_SIMPLE_UNION *);
300 void mpt2host_iocfacts_reply(MSG_IOC_FACTS_REPLY *);
301 void mpt2host_portfacts_reply(MSG_PORT_FACTS_REPLY *);
302 void mpt2host_config_page_ioc2(CONFIG_PAGE_IOC_2 *);
303 void mpt2host_config_page_raid_vol_0(CONFIG_PAGE_RAID_VOL_0 *);
304 void mpt2host_mpi_raid_vol_indicator(MPI_RAID_VOL_INDICATOR *);
306 #define mpt2host_sge_simple_union(x) do { ; } while (0)
307 #define mpt2host_iocfacts_reply(x) do { ; } while (0)
308 #define mpt2host_portfacts_reply(x) do { ; } while (0)
309 #define mpt2host_config_page_ioc2(x) do { ; } while (0)
310 #define mpt2host_config_page_raid_vol_0(x) do { ; } while (0)
311 #define mpt2host_mpi_raid_vol_indicator(x) do { ; } while (0)
314 /**************************** MPI Transaction State ***************************/
316 REQ_STATE_NIL = 0x00,
317 REQ_STATE_FREE = 0x01,
318 REQ_STATE_ALLOCATED = 0x02,
319 REQ_STATE_QUEUED = 0x04,
320 REQ_STATE_DONE = 0x08,
321 REQ_STATE_TIMEDOUT = 0x10,
322 REQ_STATE_NEED_WAKEUP = 0x20,
323 REQ_STATE_LOCKED = 0x80, /* can't be freed */
324 REQ_STATE_MASK = 0xFF
328 TAILQ_ENTRY(req_entry) links; /* Pointer to next in list */
329 mpt_req_state_t state; /* Request State Information */
330 uint16_t index; /* Index of this entry */
331 uint16_t IOCStatus; /* Completion status */
332 uint16_t ResponseCode; /* TMF Reponse Code */
333 uint16_t serno; /* serial number */
334 union ccb *ccb; /* CAM request */
335 void *req_vbuf; /* Virtual Address of Entry */
336 void *sense_vbuf; /* Virtual Address of sense data */
337 bus_addr_t req_pbuf; /* Physical Address of Entry */
338 bus_addr_t sense_pbuf; /* Physical Address of sense data */
339 bus_dmamap_t dmap; /* DMA map for data buffers */
340 struct req_entry *chain; /* for SGE overallocations */
341 struct callout callout; /* Timeout for the request */
344 typedef struct mpt_config_params {
355 /**************************** MPI Target State Info ***************************/
358 uint32_t reply_desc; /* current reply descriptor */
359 uint32_t resid; /* current data residual */
360 uint32_t bytes_xfered; /* current relative offset */
361 union ccb *ccb; /* pointer to currently active ccb */
362 request_t *req; /* pointer to currently active assist request */
372 TGT_STATE_SETTING_UP_FOR_DATA,
373 TGT_STATE_MOVING_DATA,
374 TGT_STATE_MOVING_DATA_AND_STATUS,
375 TGT_STATE_SENDING_STATUS
380 * When we get an incoming command it has its own tag which is called the
381 * IoIndex. This is the value we gave that particular command buffer when
382 * we originally assigned it. It's just a number, really. The FC card uses
383 * it as an RX_ID. We can use it to index into mpt->tgt_cmd_ptrs, which
384 * contains pointers the request_t structures related to that IoIndex.
386 * What *we* do is construct a tag out of the index for the target command
387 * which owns the incoming ATIO plus a rolling sequence number.
389 #define MPT_MAKE_TAGID(mpt, req, ioindex) \
390 ((ioindex << 18) | (((mpt->sequence++) & 0x3f) << 12) | (req->index & 0xfff))
393 #define MPT_TAG_2_REQ(a, b) mpt_tag_2_req(a, (uint32_t) b)
395 #define MPT_TAG_2_REQ(mpt, tag) mpt->tgt_cmd_ptrs[tag >> 18]
398 #define MPT_TGT_STATE(mpt, req) ((mpt_tgt_state_t *) \
399 (&((uint8_t *)req->req_vbuf)[MPT_RQSL(mpt) - sizeof (mpt_tgt_state_t)]))
401 STAILQ_HEAD(mpt_hdr_stailq, ccb_hdr);
402 #define MPT_MAX_LUNS 256
404 struct mpt_hdr_stailq atios;
405 struct mpt_hdr_stailq inots;
408 #define MPT_MAX_ELS 64
410 /**************************** Handler Registration ****************************/
412 * Global table of registered reply handlers. The
413 * handler is indicated by byte 3 of the request
414 * index submitted to the IOC. This allows the
415 * driver core to perform generic processing without
416 * any knowledge of per-personality behavior.
418 * MPT_NUM_REPLY_HANDLERS must be a power of 2
419 * to allow the easy generation of a mask.
421 * The handler offsets used by the core are hard coded
422 * allowing faster code generation when assigning a handler
423 * to a request. All "personalities" must use the
424 * the handler registration mechanism.
426 * The IOC handlers that are rarely executed are placed
427 * at the tail of the table to make it more likely that
428 * all commonly executed handlers fit in a single cache
431 #define MPT_NUM_REPLY_HANDLERS (32)
432 #define MPT_REPLY_HANDLER_EVENTS MPT_CBI_TO_HID(0)
433 #define MPT_REPLY_HANDLER_CONFIG MPT_CBI_TO_HID(MPT_NUM_REPLY_HANDLERS-1)
434 #define MPT_REPLY_HANDLER_HANDSHAKE MPT_CBI_TO_HID(MPT_NUM_REPLY_HANDLERS-2)
435 typedef int mpt_reply_handler_t(struct mpt_softc *mpt, request_t *request,
436 uint32_t reply_desc, MSG_DEFAULT_REPLY *reply_frame);
438 mpt_reply_handler_t *reply_handler;
448 struct mpt_handler_record
450 LIST_ENTRY(mpt_handler_record) links;
451 mpt_handler_t handler;
454 LIST_HEAD(mpt_handler_list, mpt_handler_record);
457 * The handler_id is currently unused but would contain the
458 * handler ID used in the MsgContext field to allow direction
459 * of replies to the handler. Registrations that don't require
460 * a handler id can pass in NULL for the handler_id.
462 * Deregistrations for handlers without a handler id should
463 * pass in MPT_HANDLER_ID_NONE.
465 #define MPT_HANDLER_ID_NONE (0xFFFFFFFF)
466 int mpt_register_handler(struct mpt_softc *, mpt_handler_type,
467 mpt_handler_t, uint32_t *);
468 int mpt_deregister_handler(struct mpt_softc *, mpt_handler_type,
469 mpt_handler_t, uint32_t);
471 /******************* Per-Controller Instance Data Structures ******************/
472 TAILQ_HEAD(req_queue, req_entry);
474 /* Structure for saving proper values for modifyable PCI config registers */
477 uint16_t LatencyTimer_LineSize;
479 uint32_t Mem0_BAR[2];
480 uint32_t Mem1_BAR[2];
488 MPT_RVF_ACTIVE = 0x1,
489 MPT_RVF_ANNOUNCED = 0x2,
490 MPT_RVF_UP2DATE = 0x4,
491 MPT_RVF_REFERENCED = 0x8,
492 MPT_RVF_WCE_CHANGED = 0x10
493 } mpt_raid_volume_flags;
495 struct mpt_raid_volume {
496 CONFIG_PAGE_RAID_VOL_0 *config_page;
497 MPI_RAID_VOL_INDICATOR sync_progress;
498 mpt_raid_volume_flags flags;
499 u_int quiesced_disks;
504 MPT_RDF_ACTIVE = 0x01,
505 MPT_RDF_ANNOUNCED = 0x02,
506 MPT_RDF_UP2DATE = 0x04,
507 MPT_RDF_REFERENCED = 0x08,
508 MPT_RDF_QUIESCING = 0x10,
509 MPT_RDF_QUIESCED = 0x20
510 } mpt_raid_disk_flags;
512 struct mpt_raid_disk {
513 CONFIG_PAGE_RAID_PHYS_DISK_0 config_page;
514 struct mpt_raid_volume *volume;
516 u_int pass_thru_active;
517 mpt_raid_disk_flags flags;
520 struct mpt_evtf_record {
521 MSG_EVENT_NOTIFY_REPLY reply;
523 LIST_ENTRY(mpt_evtf_record) links;
526 LIST_HEAD(mpt_evtf_list, mpt_evtf_record);
528 struct mptsas_devinfo {
530 uint16_t parent_dev_handle;
531 uint16_t enclosure_handle;
534 uint8_t physical_port;
537 uint64_t sas_address;
538 uint32_t device_info;
541 struct mptsas_phyinfo {
545 uint8_t negotiated_link_rate;
546 uint8_t hw_link_rate;
547 uint8_t programmed_link_rate;
548 uint8_t sas_port_add_phy;
549 struct mptsas_devinfo identify;
550 struct mptsas_devinfo attached;
553 struct mptsas_portinfo {
555 struct mptsas_phyinfo *phy_info;
560 #if __FreeBSD_version < 500000
561 uint32_t mpt_islocked;
567 uint32_t mpt_pers_mask;
589 u_int role; /* role: none, ini, target, both */
592 #ifdef MPT_TEST_MULTIPATH
599 MSG_IOC_FACTS_REPLY ioc_facts;
604 MSG_PORT_FACTS_REPLY * port_facts;
605 #define mpt_ini_id port_facts[0].PortSCSIID
606 #define mpt_max_tgtcmds port_facts[0].MaxPostedCmdBuffers
609 * Device Configuration Information
613 CONFIG_PAGE_SCSI_PORT_0 _port_page0;
614 CONFIG_PAGE_SCSI_PORT_1 _port_page1;
615 CONFIG_PAGE_SCSI_PORT_2 _port_page2;
616 CONFIG_PAGE_SCSI_DEVICE_0 _dev_page0[16];
617 CONFIG_PAGE_SCSI_DEVICE_1 _dev_page1[16];
618 uint16_t _tag_enable;
619 uint16_t _disc_enable;
621 #define mpt_port_page0 cfg.spi._port_page0
622 #define mpt_port_page1 cfg.spi._port_page1
623 #define mpt_port_page2 cfg.spi._port_page2
624 #define mpt_dev_page0 cfg.spi._dev_page0
625 #define mpt_dev_page1 cfg.spi._dev_page1
626 #define mpt_tag_enable cfg.spi._tag_enable
627 #define mpt_disc_enable cfg.spi._disc_enable
629 CONFIG_PAGE_FC_PORT_0 _port_page0;
630 uint32_t _port_speed;
631 #define mpt_fcport_page0 cfg.fc._port_page0
632 #define mpt_fcport_speed cfg.fc._port_speed
635 #if __FreeBSD_version >= 500000
637 * Device config information stored up for sysctl to access
641 unsigned int initiator_id;
650 /* Controller Info for RAID information */
651 CONFIG_PAGE_IOC_2 * ioc_page2;
652 CONFIG_PAGE_IOC_3 * ioc_page3;
655 struct mpt_raid_volume* raid_volumes;
656 struct mpt_raid_disk* raid_disks;
657 u_int raid_max_volumes;
658 u_int raid_max_disks;
659 u_int raid_page0_len;
662 u_int raid_resync_rate;
663 u_int raid_mwce_setting;
664 u_int raid_queue_depth;
665 u_int raid_nonopt_volumes;
666 struct proc *raid_thread;
667 struct callout raid_timer;
673 struct resource * pci_irq; /* Interrupt map for chip */
674 void * ih; /* Interupt handle */
675 struct mpt_pci_cfg pci_cfg; /* saved PCI conf registers */
680 struct resource * pci_reg; /* Register map for chip */
681 int pci_mem_rid; /* Resource ID */
682 bus_space_tag_t pci_st; /* Bus tag for registers */
683 bus_space_handle_t pci_sh; /* Bus handle for registers */
684 /* PIO versions of above. */
686 struct resource * pci_pio_reg;
687 bus_space_tag_t pci_pio_st;
688 bus_space_handle_t pci_pio_sh;
690 bus_dma_tag_t parent_dmat; /* DMA tag for parent PCI bus */
691 bus_dma_tag_t reply_dmat; /* DMA tag for reply memory */
692 bus_dmamap_t reply_dmap; /* DMA map for reply memory */
693 uint8_t *reply; /* KVA of reply memory */
694 bus_addr_t reply_phys; /* BusAddr of reply memory */
696 bus_dma_tag_t buffer_dmat; /* DMA tag for buffers */
697 bus_dma_tag_t request_dmat; /* DMA tag for request memroy */
698 bus_dmamap_t request_dmap; /* DMA map for request memroy */
699 uint8_t *request; /* KVA of Request memory */
700 bus_addr_t request_phys; /* BusAddr of request memory */
702 uint32_t max_seg_cnt; /* calculated after IOC facts */
705 * Hardware management
710 * CAM && Software Management
712 request_t *request_pool;
713 struct req_queue request_free_list;
714 struct req_queue request_pending_list;
715 struct req_queue request_timeout_list;
719 struct cam_path *path;
721 struct cam_sim *phydisk_sim;
722 struct cam_path *phydisk_path;
724 struct proc *recovery_thread;
728 * Deferred frame acks due to resource shortage.
730 struct mpt_evtf_list ack_frames;
733 * Target Mode Support
735 uint32_t scsi_tgt_handler_id;
736 request_t ** tgt_cmd_ptrs;
737 request_t ** els_cmd_ptrs; /* FC only */
740 * *snork*- this is chosen to be here *just in case* somebody
741 * forgets to point to it exactly and we index off of trt with
744 tgt_resource_t trt_wildcard; /* wildcard luns */
745 tgt_resource_t trt[MPT_MAX_LUNS];
746 uint16_t tgt_cmds_allocated;
747 uint16_t els_cmds_allocated; /* FC only */
749 uint16_t timeouts; /* timeout count */
750 uint16_t success; /* successes afer timeout */
751 uint16_t sequence; /* Sequence Number */
755 /* Paired port in some dual adapters configurations */
756 struct mpt_softc * mpt2;
758 /* FW Image management */
759 uint32_t fw_image_size;
761 bus_dma_tag_t fw_dmat; /* DMA tag for firmware image */
762 bus_dmamap_t fw_dmap; /* DMA map for firmware image */
763 bus_addr_t fw_phys; /* BusAddr of firmware image */
766 struct mptsas_portinfo *sas_portinfo;
768 /* Shutdown Event Handler. */
771 TAILQ_ENTRY(mpt_softc) links;
774 static __inline void mpt_assign_serno(struct mpt_softc *, request_t *);
777 mpt_assign_serno(struct mpt_softc *mpt, request_t *req)
779 if ((req->serno = mpt->sequence++) == 0) {
780 req->serno = mpt->sequence++;
784 /***************************** Locking Primitives *****************************/
785 #if __FreeBSD_version < 500000
786 #define MPT_IFLAGS INTR_TYPE_CAM
787 #define MPT_LOCK(mpt) mpt_lockspl(mpt)
788 #define MPT_UNLOCK(mpt) mpt_unlockspl(mpt)
789 #define MPT_OWNED(mpt) mpt->mpt_islocked
790 #define MPT_LOCK_ASSERT(mpt)
791 #define MPTLOCK_2_CAMLOCK MPT_UNLOCK
792 #define CAMLOCK_2_MPTLOCK MPT_LOCK
793 #define MPT_LOCK_SETUP(mpt)
794 #define MPT_LOCK_DESTROY(mpt)
796 static __inline void mpt_lockspl(struct mpt_softc *mpt);
797 static __inline void mpt_unlockspl(struct mpt_softc *mpt);
800 mpt_lockspl(struct mpt_softc *mpt)
805 if (mpt->mpt_islocked++ == 0) {
806 mpt->mpt_splsaved = s;
809 panic("Recursed lock with mask: 0x%x\n", s);
814 mpt_unlockspl(struct mpt_softc *mpt)
816 if (mpt->mpt_islocked) {
817 if (--mpt->mpt_islocked == 0) {
818 splx(mpt->mpt_splsaved);
821 panic("Negative lock count\n");
825 mpt_sleep(struct mpt_softc *mpt, void *ident, int priority,
826 const char *wmesg, int timo)
832 KASSERT(mpt->mpt_islocked <= 1, ("Invalid lock count on tsleep"));
833 saved_cnt = mpt->mpt_islocked;
834 saved_spl = mpt->mpt_splsaved;
835 mpt->mpt_islocked = 0;
836 error = tsleep(ident, priority, wmesg, timo);
837 KASSERT(mpt->mpt_islocked == 0, ("Invalid lock count on wakeup"));
838 mpt->mpt_islocked = saved_cnt;
839 mpt->mpt_splsaved = saved_spl;
843 #define mpt_req_timeout(req, ticks, func, arg) \
844 callout_reset(&(req)->callout, (ticks), (func), (arg));
845 #define mpt_req_untimeout(req, func, arg) \
846 callout_stop(&(req)->callout)
847 #define mpt_req_timeout_init(req) \
848 callout_init(&(req)->callout)
852 #define MPT_IFLAGS INTR_TYPE_CAM | INTR_ENTROPY | INTR_MPSAFE
853 #define MPT_LOCK_SETUP(mpt) \
854 mtx_init(&mpt->mpt_lock, "mpt", NULL, MTX_DEF); \
855 mpt->mpt_locksetup = 1
856 #define MPT_LOCK_DESTROY(mpt) \
857 if (mpt->mpt_locksetup) { \
858 mtx_destroy(&mpt->mpt_lock); \
859 mpt->mpt_locksetup = 0; \
862 #define MPT_LOCK(mpt) mtx_lock(&(mpt)->mpt_lock)
863 #define MPT_UNLOCK(mpt) mtx_unlock(&(mpt)->mpt_lock)
864 #define MPT_OWNED(mpt) mtx_owned(&(mpt)->mpt_lock)
865 #define MPT_LOCK_ASSERT(mpt) mtx_assert(&(mpt)->mpt_lock, MA_OWNED)
866 #define MPTLOCK_2_CAMLOCK(mpt)
867 #define CAMLOCK_2_MPTLOCK(mpt)
868 #define mpt_sleep(mpt, ident, priority, wmesg, timo) \
869 msleep(ident, &(mpt)->mpt_lock, priority, wmesg, timo)
870 #define mpt_req_timeout(req, ticks, func, arg) \
871 callout_reset(&(req)->callout, (ticks), (func), (arg));
872 #define mpt_req_untimeout(req, func, arg) \
873 callout_stop(&(req)->callout)
874 #define mpt_req_timeout_init(req) \
875 callout_init(&(req)->callout, 1)
879 #define MPT_IFLAGS INTR_TYPE_CAM | INTR_ENTROPY
880 #define MPT_LOCK_SETUP(mpt) do { } while (0)
881 #define MPT_LOCK_DESTROY(mpt) do { } while (0)
882 #define MPT_LOCK_ASSERT(mpt) mtx_assert(&Giant, MA_OWNED)
883 #define MPT_LOCK(mpt) mtx_lock(&Giant)
884 #define MPT_UNLOCK(mpt) mtx_unlock(&Giant)
885 #define MPTLOCK_2_CAMLOCK(mpt)
886 #define CAMLOCK_2_MPTLOCK(mpt)
889 mpt_sleep(struct mpt_softc *, void *, int, const char *, int);
891 #define mpt_ccb_timeout(ccb, ticks, func, arg) \
893 (ccb)->ccb_h.timeout_ch = timeout((func), (arg), (ticks)); \
895 #define mpt_ccb_untimeout(ccb, func, arg) \
896 untimeout((func), (arg), (ccb)->ccb_h.timeout_ch)
897 #define mpt_ccb_timeout_init(ccb) \
898 callout_handle_init(&(ccb)->ccb_h.timeout_ch)
901 mpt_sleep(struct mpt_softc *mpt, void *i, int p, const char *w, int t)
904 r = tsleep(i, p, w, t);
910 /******************************* Register Access ******************************/
911 static __inline void mpt_write(struct mpt_softc *, size_t, uint32_t);
912 static __inline uint32_t mpt_read(struct mpt_softc *, int);
913 static __inline void mpt_pio_write(struct mpt_softc *, size_t, uint32_t);
914 static __inline uint32_t mpt_pio_read(struct mpt_softc *, int);
917 mpt_write(struct mpt_softc *mpt, size_t offset, uint32_t val)
919 bus_space_write_4(mpt->pci_st, mpt->pci_sh, offset, val);
922 static __inline uint32_t
923 mpt_read(struct mpt_softc *mpt, int offset)
925 return (bus_space_read_4(mpt->pci_st, mpt->pci_sh, offset));
929 * Some operations (e.g. diagnostic register writes while the ARM proccessor
930 * is disabled), must be performed using "PCI pio" operations. On non-PCI
931 * busses, these operations likely map to normal register accesses.
934 mpt_pio_write(struct mpt_softc *mpt, size_t offset, uint32_t val)
936 bus_space_write_4(mpt->pci_pio_st, mpt->pci_pio_sh, offset, val);
939 static __inline uint32_t
940 mpt_pio_read(struct mpt_softc *mpt, int offset)
942 return (bus_space_read_4(mpt->pci_pio_st, mpt->pci_pio_sh, offset));
944 /*********************** Reply Frame/Request Management ***********************/
945 /* Max MPT Reply we are willing to accept (must be power of 2) */
946 #define MPT_REPLY_SIZE 256
949 * Must be less than 16384 in order for target mode to work
951 #define MPT_MAX_REQUESTS(mpt) 512
952 #define MPT_REQUEST_AREA 512
953 #define MPT_SENSE_SIZE 32 /* included in MPT_REQUEST_AREA */
954 #define MPT_REQ_MEM_SIZE(mpt) (MPT_MAX_REQUESTS(mpt) * MPT_REQUEST_AREA)
956 #define MPT_CONTEXT_CB_SHIFT (16)
957 #define MPT_CBI(handle) (handle >> MPT_CONTEXT_CB_SHIFT)
958 #define MPT_CBI_TO_HID(cbi) ((cbi) << MPT_CONTEXT_CB_SHIFT)
959 #define MPT_CONTEXT_TO_CBI(x) \
960 (((x) >> MPT_CONTEXT_CB_SHIFT) & (MPT_NUM_REPLY_HANDLERS - 1))
961 #define MPT_CONTEXT_REQI_MASK 0xFFFF
962 #define MPT_CONTEXT_TO_REQI(x) ((x) & MPT_CONTEXT_REQI_MASK)
965 * Convert a 32bit physical address returned from IOC to an
966 * offset into our reply frame memory or the kvm address needed
967 * to access the data. The returned address is only the low
968 * 32 bits, so mask our base physical address accordingly.
970 #define MPT_REPLY_BADDR(x) \
972 #define MPT_REPLY_OTOV(m, i) \
973 ((void *)(&m->reply[i]))
975 #define MPT_DUMP_REPLY_FRAME(mpt, reply_frame) \
977 if (mpt->verbose > MPT_PRT_DEBUG) \
978 mpt_dump_reply_frame(mpt, reply_frame); \
981 static __inline uint32_t mpt_pop_reply_queue(struct mpt_softc *mpt);
982 static __inline void mpt_free_reply(struct mpt_softc *mpt, uint32_t ptr);
985 * Give the reply buffer back to the IOC after we have
986 * finished processing it.
989 mpt_free_reply(struct mpt_softc *mpt, uint32_t ptr)
991 mpt_write(mpt, MPT_OFFSET_REPLY_Q, ptr);
994 /* Get a reply from the IOC */
995 static __inline uint32_t
996 mpt_pop_reply_queue(struct mpt_softc *mpt)
998 return mpt_read(mpt, MPT_OFFSET_REPLY_Q);
1002 mpt_complete_request_chain(struct mpt_softc *, struct req_queue *, u_int);
1004 /************************** Scatter Gather Managment **************************/
1005 /* MPT_RQSL- size of request frame, in bytes */
1006 #define MPT_RQSL(mpt) (mpt->ioc_facts.RequestFrameSize << 2)
1008 /* MPT_NSGL- how many SG entries can fit in a request frame size */
1009 #define MPT_NSGL(mpt) (MPT_RQSL(mpt) / sizeof (SGE_IO_UNION))
1011 /* MPT_NRFM- how many request frames can fit in each request alloc we make */
1012 #define MPT_NRFM(mpt) (MPT_REQUEST_AREA / MPT_RQSL(mpt))
1015 * MPT_NSGL_FIRST- # of SG elements that can fit after
1016 * an I/O request but still within the request frame.
1017 * Do this safely based upon SGE_IO_UNION.
1019 * Note that the first element is *within* the SCSI request.
1021 #define MPT_NSGL_FIRST(mpt) \
1022 ((MPT_RQSL(mpt) - sizeof (MSG_SCSI_IO_REQUEST) + sizeof (SGE_IO_UNION)) / \
1023 sizeof (SGE_IO_UNION))
1025 /***************************** IOC Initialization *****************************/
1026 int mpt_reset(struct mpt_softc *, int /*reinit*/);
1028 /****************************** Debugging ************************************/
1029 typedef struct mpt_decode_entry {
1033 } mpt_decode_entry_t;
1035 int mpt_decode_value(mpt_decode_entry_t *table, u_int num_entries,
1036 const char *name, u_int value, u_int *cur_column,
1039 void mpt_dump_data(struct mpt_softc *, const char *, void *, int);
1040 void mpt_dump_request(struct mpt_softc *, request_t *);
1048 MPT_PRT_NEGOTIATION,
1057 #if __FreeBSD_version > 500000
1058 #define mpt_lprt(mpt, level, ...) \
1060 if (level <= (mpt)->verbose) \
1061 mpt_prt(mpt, __VA_ARGS__); \
1064 #define mpt_lprtc(mpt, level, ...) \
1066 if (level <= (mpt)->debug_level) \
1067 mpt_prtc(mpt, __VA_ARGS__); \
1070 void mpt_lprt(struct mpt_softc *, int, const char *, ...)
1072 void mpt_lprtc(struct mpt_softc *, int, const char *, ...)
1075 void mpt_prt(struct mpt_softc *, const char *, ...)
1077 void mpt_prtc(struct mpt_softc *, const char *, ...)
1080 /**************************** Target Mode Related ***************************/
1081 static __inline int mpt_cdblen(uint8_t, int);
1083 mpt_cdblen(uint8_t cdb0, int maxlen)
1085 int group = cdb0 >> 5;
1099 static __inline request_t * mpt_tag_2_req(struct mpt_softc *, uint32_t);
1100 static __inline request_t *
1101 mpt_tag_2_req(struct mpt_softc *mpt, uint32_t tag)
1103 uint16_t rtg = (tag >> 18);
1104 KASSERT(rtg < mpt->tgt_cmds_allocated, ("bad tag %d\n", tag));
1105 KASSERT(mpt->tgt_cmd_ptrs, ("no cmd backpointer array"));
1106 KASSERT(mpt->tgt_cmd_ptrs[rtg], ("no cmd backpointer"));
1107 return (mpt->tgt_cmd_ptrs[rtg]);
1112 mpt_req_on_free_list(struct mpt_softc *, request_t *);
1114 mpt_req_on_pending_list(struct mpt_softc *, request_t *);
1116 static __inline void
1117 mpt_req_spcl(struct mpt_softc *, request_t *, const char *, int);
1118 static __inline void
1119 mpt_req_not_spcl(struct mpt_softc *, request_t *, const char *, int);
1123 * Is request on freelist?
1126 mpt_req_on_free_list(struct mpt_softc *mpt, request_t *req)
1130 TAILQ_FOREACH(lrq, &mpt->request_free_list, links) {
1139 * Is request on pending list?
1142 mpt_req_on_pending_list(struct mpt_softc *mpt, request_t *req)
1146 TAILQ_FOREACH(lrq, &mpt->request_pending_list, links) {
1155 * Make sure that req *is* part of one of the special lists
1157 static __inline void
1158 mpt_req_spcl(struct mpt_softc *mpt, request_t *req, const char *s, int line)
1161 for (i = 0; i < mpt->els_cmds_allocated; i++) {
1162 if (req == mpt->els_cmd_ptrs[i]) {
1166 for (i = 0; i < mpt->tgt_cmds_allocated; i++) {
1167 if (req == mpt->tgt_cmd_ptrs[i]) {
1171 panic("%s(%d): req %p:%u function %x not in els or tgt ptrs\n",
1172 s, line, req, req->serno,
1173 ((PTR_MSG_REQUEST_HEADER)req->req_vbuf)->Function);
1177 * Make sure that req is *not* part of one of the special lists.
1179 static __inline void
1180 mpt_req_not_spcl(struct mpt_softc *mpt, request_t *req, const char *s, int line)
1183 for (i = 0; i < mpt->els_cmds_allocated; i++) {
1184 KASSERT(req != mpt->els_cmd_ptrs[i],
1185 ("%s(%d): req %p:%u func %x in els ptrs at ioindex %d\n",
1186 s, line, req, req->serno,
1187 ((PTR_MSG_REQUEST_HEADER)req->req_vbuf)->Function, i));
1189 for (i = 0; i < mpt->tgt_cmds_allocated; i++) {
1190 KASSERT(req != mpt->tgt_cmd_ptrs[i],
1191 ("%s(%d): req %p:%u func %x in tgt ptrs at ioindex %d\n",
1192 s, line, req, req->serno,
1193 ((PTR_MSG_REQUEST_HEADER)req->req_vbuf)->Function, i));
1199 * Task Management Types, purely for internal consumption
1202 MPT_ABORT_TASK_SET=1234,
1207 MPT_NIL_TMT_VALUE=5678
1210 /**************************** Unclassified Routines ***************************/
1211 void mpt_send_cmd(struct mpt_softc *mpt, request_t *req);
1212 int mpt_recv_handshake_reply(struct mpt_softc *mpt,
1213 size_t reply_len, void *reply);
1214 int mpt_wait_req(struct mpt_softc *mpt, request_t *req,
1215 mpt_req_state_t state, mpt_req_state_t mask,
1216 int sleep_ok, int time_ms);
1217 void mpt_enable_ints(struct mpt_softc *mpt);
1218 void mpt_disable_ints(struct mpt_softc *mpt);
1219 int mpt_attach(struct mpt_softc *mpt);
1220 int mpt_shutdown(struct mpt_softc *mpt);
1221 int mpt_detach(struct mpt_softc *mpt);
1222 int mpt_send_handshake_cmd(struct mpt_softc *mpt,
1223 size_t len, void *cmd);
1224 request_t * mpt_get_request(struct mpt_softc *mpt, int sleep_ok);
1225 void mpt_free_request(struct mpt_softc *mpt, request_t *req);
1226 void mpt_intr(void *arg);
1227 void mpt_check_doorbell(struct mpt_softc *mpt);
1228 void mpt_dump_reply_frame(struct mpt_softc *mpt,
1229 MSG_DEFAULT_REPLY *reply_frame);
1231 void mpt_set_config_regs(struct mpt_softc *);
1232 int mpt_issue_cfg_req(struct mpt_softc */*mpt*/, request_t */*req*/,
1234 bus_addr_t /*addr*/, bus_size_t/*len*/,
1235 int /*sleep_ok*/, int /*timeout_ms*/);
1236 int mpt_read_extcfg_header(struct mpt_softc *mpt, int PageVersion,
1237 int PageNumber, uint32_t PageAddress,
1239 CONFIG_EXTENDED_PAGE_HEADER *rslt,
1240 int sleep_ok, int timeout_ms);
1241 int mpt_read_extcfg_page(struct mpt_softc *mpt, int Action,
1242 uint32_t PageAddress,
1243 CONFIG_EXTENDED_PAGE_HEADER *hdr,
1244 void *buf, size_t len, int sleep_ok,
1246 int mpt_read_cfg_header(struct mpt_softc *, int /*PageType*/,
1248 uint32_t /*PageAddress*/,
1249 CONFIG_PAGE_HEADER *,
1250 int /*sleep_ok*/, int /*timeout_ms*/);
1251 int mpt_read_cfg_page(struct mpt_softc *t, int /*Action*/,
1252 uint32_t /*PageAddress*/,
1253 CONFIG_PAGE_HEADER *, size_t /*len*/,
1254 int /*sleep_ok*/, int /*timeout_ms*/);
1255 int mpt_write_cfg_page(struct mpt_softc *, int /*Action*/,
1256 uint32_t /*PageAddress*/,
1257 CONFIG_PAGE_HEADER *, size_t /*len*/,
1258 int /*sleep_ok*/, int /*timeout_ms*/);
1260 mpt_read_cur_cfg_page(struct mpt_softc *mpt, uint32_t PageAddress,
1261 CONFIG_PAGE_HEADER *hdr, size_t len,
1262 int sleep_ok, int timeout_ms)
1264 return (mpt_read_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_READ_CURRENT,
1265 PageAddress, hdr, len, sleep_ok, timeout_ms));
1269 mpt_write_cur_cfg_page(struct mpt_softc *mpt, uint32_t PageAddress,
1270 CONFIG_PAGE_HEADER *hdr, size_t len, int sleep_ok,
1273 return (mpt_write_cfg_page(mpt, MPI_CONFIG_ACTION_PAGE_WRITE_CURRENT,
1274 PageAddress, hdr, len, sleep_ok,
1277 /* mpt_debug.c functions */
1278 void mpt_print_reply(void *vmsg);
1279 void mpt_print_db(uint32_t mb);
1280 void mpt_print_config_reply(void *vmsg);
1281 char *mpt_ioc_diag(uint32_t diag);
1282 void mpt_req_state(mpt_req_state_t state);
1283 void mpt_print_config_request(void *vmsg);
1284 void mpt_print_request(void *vmsg);
1285 void mpt_print_scsi_io_request(MSG_SCSI_IO_REQUEST *msg);
1286 void mpt_dump_sgl(SGE_IO_UNION *se, int offset);
1287 #endif /* _MPT_H_ */