2 * Copyright (c) 1996-2000 Distributed Processing Technology Corporation
3 * Copyright (c) 2000-2001 Adaptec Corporation
6 * TERMS AND CONDITIONS OF USE
8 * Redistribution and use in source form, with or without modification, are
9 * permitted provided that redistributions of source code must retain the
10 * above copyright notice, this list of conditions and the following disclaimer.
12 * This software is provided `as is' by Adaptec and any express or implied
13 * warranties, including, but not limited to, the implied warranties of
14 * merchantability and fitness for a particular purpose, are disclaimed. In no
15 * event shall Adaptec be liable for any direct, indirect, incidental, special,
16 * exemplary or consequential damages (including, but not limited to,
17 * procurement of substitute goods or services; loss of use, data, or profits;
18 * or business interruptions) however caused and on any theory of liability,
19 * whether in contract, strict liability, or tort (including negligence or
20 * otherwise) arising in any way out of the use of this driver software, even
21 * if advised of the possibility of such damage.
23 * SCSI I2O host adapter driver
25 * V1.10 2004/05/05 scottl@freebsd.org
26 * - Massive cleanup of the driver to remove dead code and
27 * non-conformant style.
28 * - Removed most i386-specific code to make it more portable.
29 * - Converted to the bus_space API.
30 * V1.08 2001/08/21 Mark_Salyzyn@adaptec.com
31 * - The 2000S and 2005S do not initialize on some machines,
32 * increased timeout to 255ms from 50ms for the StatusGet
34 * V1.07 2001/05/22 Mark_Salyzyn@adaptec.com
35 * - I knew this one was too good to be true. The error return
36 * on ioctl commands needs to be compared to CAM_REQ_CMP, not
37 * to the bit masked status.
38 * V1.06 2001/05/08 Mark_Salyzyn@adaptec.com
39 * - The 2005S that was supported is affectionately called the
40 * Conjoined BAR Firmware. In order to support RAID-5 in a
41 * 16MB low-cost configuration, Firmware was forced to go
42 * to a Split BAR Firmware. This requires a separate IOP and
43 * Messaging base address.
44 * V1.05 2001/04/25 Mark_Salyzyn@adaptec.com
45 * - Handle support for 2005S Zero Channel RAID solution.
46 * - System locked up if the Adapter locked up. Do not try
47 * to send other commands if the resetIOP command fails. The
48 * fail outstanding command discovery loop was flawed as the
49 * removal of the command from the list prevented discovering
51 * - Comment changes to clarify driver.
52 * - SysInfo searched for an EATA SmartROM, not an I2O SmartROM.
53 * - We do not use the AC_FOUND_DEV event because of I2O.
55 * V1.04 2000/09/22 Mark_Salyzyn@adaptec.com, msmith@freebsd.org,
56 * lampa@fee.vutbr.cz and Scott_Long@adaptec.com.
57 * - Removed support for PM1554, PM2554 and PM2654 in Mode-0
58 * mode as this is confused with competitor adapters in run
60 * - critical locking needed in ASR_ccbAdd and ASR_ccbRemove
61 * to prevent operating system panic.
62 * - moved default major number to 154 from 97.
63 * V1.03 2000/07/12 Mark_Salyzyn@adaptec.com
64 * - The controller is not actually an ASR (Adaptec SCSI RAID)
65 * series that is visible, it's more of an internal code name.
66 * remove any visible references within reason for now.
67 * - bus_ptr->LUN was not correctly zeroed when initially
68 * allocated causing a possible panic of the operating system
70 * V1.02 2000/06/26 Mark_Salyzyn@adaptec.com
71 * - Code always fails for ASR_getTid affecting performance.
72 * - initiated a set of changes that resulted from a formal
73 * code inspection by Mark_Salyzyn@adaptec.com,
74 * George_Dake@adaptec.com, Jeff_Zeak@adaptec.com,
75 * Martin_Wilson@adaptec.com and Vincent_Trandoan@adaptec.com.
76 * Their findings were focussed on the LCT & TID handler, and
77 * all resulting changes were to improve code readability,
78 * consistency or have a positive effect on performance.
79 * V1.01 2000/06/14 Mark_Salyzyn@adaptec.com
80 * - Passthrough returned an incorrect error.
81 * - Passthrough did not migrate the intrinsic scsi layer wakeup
82 * on command completion.
83 * - generate control device nodes using make_dev and delete_dev.
84 * - Performance affected by TID caching reallocing.
85 * - Made suggested changes by Justin_Gibbs@adaptec.com
86 * - use splcam instead of splbio.
87 * - use cam_imask instead of bio_imask.
88 * - use u_int8_t instead of u_char.
89 * - use u_int16_t instead of u_short.
90 * - use u_int32_t instead of u_long where appropriate.
91 * - use 64 bit context handler instead of 32 bit.
92 * - create_ccb should only allocate the worst case
93 * requirements for the driver since CAM may evolve
94 * making union ccb much larger than needed here.
95 * renamed create_ccb to asr_alloc_ccb.
96 * - go nutz justifying all debug prints as macros
97 * defined at the top and remove unsightly ifdefs.
98 * - INLINE STATIC viewed as confusing. Historically
99 * utilized to affect code performance and debug
100 * issues in OS, Compiler or OEM specific situations.
101 * V1.00 2000/05/31 Mark_Salyzyn@adaptec.com
102 * - Ported from FreeBSD 2.2.X DPT I2O driver.
103 * changed struct scsi_xfer to union ccb/struct ccb_hdr
104 * changed variable name xs to ccb
105 * changed struct scsi_link to struct cam_path
106 * changed struct scsibus_data to struct cam_sim
107 * stopped using fordriver for holding on to the TID
108 * use proprietary packet creation instead of scsi_inquire
109 * CAM layer sends synchronize commands.
112 #include <sys/cdefs.h>
113 #include <sys/param.h> /* TRUE=1 and FALSE=0 defined here */
114 #include <sys/kernel.h>
115 #include <sys/module.h>
116 #include <sys/systm.h>
117 #include <sys/malloc.h>
118 #include <sys/conf.h>
119 #include <sys/ioccom.h>
120 #include <sys/priv.h>
121 #include <sys/proc.h>
123 #include <machine/resource.h>
124 #include <machine/bus.h>
125 #include <sys/rman.h>
126 #include <sys/stat.h>
127 #include <sys/bus_dma.h>
130 #include <cam/cam_ccb.h>
131 #include <cam/cam_sim.h>
132 #include <cam/cam_xpt_sim.h>
134 #include <cam/scsi/scsi_all.h>
135 #include <cam/scsi/scsi_message.h>
140 #if defined(__i386__)
142 #include <i386/include/cputypes.h>
144 #if defined(ASR_COMPAT)
145 #define ASR_IOCTL_COMPAT
146 #endif /* ASR_COMPAT */
148 #include <machine/vmparam.h>
150 #include <dev/pci/pcivar.h>
151 #include <dev/pci/pcireg.h>
153 #define osdSwap4(x) ((u_long)ntohl((u_long)(x)))
154 #define KVTOPHYS(x) vtophys(x)
155 #include <dev/asr/dptalign.h>
156 #include <dev/asr/i2oexec.h>
157 #include <dev/asr/i2obscsi.h>
158 #include <dev/asr/i2odpt.h>
159 #include <dev/asr/i2oadptr.h>
161 #include <dev/asr/sys_info.h>
163 __FBSDID("$FreeBSD$");
165 #define ASR_VERSION 1
166 #define ASR_REVISION '1'
167 #define ASR_SUBREVISION '0'
170 #define ASR_YEAR (2004 - 1980)
173 * Debug macros to reduce the unsightly ifdefs
175 #if (defined(DEBUG_ASR) || defined(DEBUG_ASR_USR_CMD) || defined(DEBUG_ASR_CMD))
177 debug_asr_message(PI2O_MESSAGE_FRAME message)
179 u_int32_t * pointer = (u_int32_t *)message;
180 u_int32_t length = I2O_MESSAGE_FRAME_getMessageSize(message);
181 u_int32_t counter = 0;
184 printf("%08lx%c", (u_long)*(pointer++),
185 (((++counter & 7) == 0) || (length == 0)) ? '\n' : ' ');
188 #endif /* DEBUG_ASR || DEBUG_ASR_USR_CMD || DEBUG_ASR_CMD */
191 /* Breaks on none STDC based compilers :-( */
192 #define debug_asr_printf(fmt,args...) printf(fmt, ##args)
193 #define debug_asr_dump_message(message) debug_asr_message(message)
194 #define debug_asr_print_path(ccb) xpt_print_path(ccb->ccb_h.path);
195 #else /* DEBUG_ASR */
196 #define debug_asr_printf(fmt,args...)
197 #define debug_asr_dump_message(message)
198 #define debug_asr_print_path(ccb)
199 #endif /* DEBUG_ASR */
202 * If DEBUG_ASR_CMD is defined:
203 * 0 - Display incoming SCSI commands
204 * 1 - add in a quick character before queueing.
205 * 2 - add in outgoing message frames.
207 #if (defined(DEBUG_ASR_CMD))
208 #define debug_asr_cmd_printf(fmt,args...) printf(fmt,##args)
210 debug_asr_dump_ccb(union ccb *ccb)
212 u_int8_t *cp = (unsigned char *)&(ccb->csio.cdb_io);
213 int len = ccb->csio.cdb_len;
216 debug_asr_cmd_printf (" %02x", *(cp++));
220 #if (DEBUG_ASR_CMD > 0)
221 #define debug_asr_cmd1_printf debug_asr_cmd_printf
223 #define debug_asr_cmd1_printf(fmt,args...)
225 #if (DEBUG_ASR_CMD > 1)
226 #define debug_asr_cmd2_printf debug_asr_cmd_printf
227 #define debug_asr_cmd2_dump_message(message) debug_asr_message(message)
229 #define debug_asr_cmd2_printf(fmt,args...)
230 #define debug_asr_cmd2_dump_message(message)
232 #else /* DEBUG_ASR_CMD */
233 #define debug_asr_cmd_printf(fmt,args...)
234 #define debug_asr_dump_ccb(ccb)
235 #define debug_asr_cmd1_printf(fmt,args...)
236 #define debug_asr_cmd2_printf(fmt,args...)
237 #define debug_asr_cmd2_dump_message(message)
238 #endif /* DEBUG_ASR_CMD */
240 #if (defined(DEBUG_ASR_USR_CMD))
241 #define debug_usr_cmd_printf(fmt,args...) printf(fmt,##args)
242 #define debug_usr_cmd_dump_message(message) debug_usr_message(message)
243 #else /* DEBUG_ASR_USR_CMD */
244 #define debug_usr_cmd_printf(fmt,args...)
245 #define debug_usr_cmd_dump_message(message)
246 #endif /* DEBUG_ASR_USR_CMD */
248 #ifdef ASR_IOCTL_COMPAT
249 #define dsDescription_size 46 /* Snug as a bug in a rug */
250 #endif /* ASR_IOCTL_COMPAT */
252 #include "dev/asr/dptsig.h"
254 static dpt_sig_S ASR_sig = {
255 { 'd', 'P', 't', 'S', 'i', 'G'}, SIG_VERSION, PROC_INTEL,
256 PROC_386 | PROC_486 | PROC_PENTIUM | PROC_SEXIUM, FT_HBADRVR, 0,
257 OEM_DPT, OS_FREE_BSD, CAP_ABOVE16MB, DEV_ALL, ADF_ALL_SC5,
258 0, 0, ASR_VERSION, ASR_REVISION, ASR_SUBREVISION,
259 ASR_MONTH, ASR_DAY, ASR_YEAR,
260 /* 01234567890123456789012345678901234567890123456789 < 50 chars */
261 "Adaptec FreeBSD 4.0.0 Unix SCSI I2O HBA Driver"
262 /* ^^^^^ asr_attach alters these to match OS */
265 /* Configuration Definitions */
267 #define SG_SIZE 58 /* Scatter Gather list Size */
268 #define MAX_TARGET_ID 126 /* Maximum Target ID supported */
269 #define MAX_LUN 255 /* Maximum LUN Supported */
270 #define MAX_CHANNEL 7 /* Maximum Channel # Supported by driver */
271 #define MAX_INBOUND 2000 /* Max CCBs, Also Max Queue Size */
272 #define MAX_OUTBOUND 256 /* Maximum outbound frames/adapter */
273 #define MAX_INBOUND_SIZE 512 /* Maximum inbound frame size */
274 #define MAX_MAP 4194304L /* Maximum mapping size of IOP */
275 /* Also serves as the minimum map for */
276 /* the 2005S zero channel RAID product */
278 /* I2O register set */
279 #define I2O_REG_STATUS 0x30
280 #define I2O_REG_MASK 0x34
281 #define I2O_REG_TOFIFO 0x40
282 #define I2O_REG_FROMFIFO 0x44
284 #define Mask_InterruptsDisabled 0x08
287 * A MIX of performance and space considerations for TID lookups
289 typedef u_int16_t tid_t;
292 u_int32_t size; /* up to MAX_LUN */
297 u_int32_t size; /* up to MAX_TARGET */
302 * To ensure that we only allocate and use the worst case ccb here, lets
303 * make our own local ccb union. If asr_alloc_ccb is utilized for another
304 * ccb type, ensure that you add the additional structures into our local
305 * ccb union. To ensure strict type checking, we will utilize the local
306 * ccb definition wherever possible.
309 struct ccb_hdr ccb_h; /* For convenience */
310 struct ccb_scsiio csio;
311 struct ccb_setasync csa;
314 struct Asr_status_mem {
315 I2O_EXEC_STATUS_GET_REPLY status;
319 /**************************************************************************
320 ** ASR Host Adapter structure - One Structure For Each Host Adapter That **
321 ** Is Configured Into The System. The Structure Supplies Configuration **
322 ** Information, Status Info, Queue Info And An Active CCB List Pointer. **
323 ***************************************************************************/
325 typedef struct Asr_softc {
328 u_long ha_Base; /* base port for each board */
329 bus_size_t ha_blinkLED;
330 bus_space_handle_t ha_i2o_bhandle;
331 bus_space_tag_t ha_i2o_btag;
332 bus_space_handle_t ha_frame_bhandle;
333 bus_space_tag_t ha_frame_btag;
334 I2O_IOP_ENTRY ha_SystemTable;
335 LIST_HEAD(,ccb_hdr) ha_ccb; /* ccbs in use */
337 bus_dma_tag_t ha_parent_dmat;
338 bus_dma_tag_t ha_statusmem_dmat;
339 bus_dmamap_t ha_statusmem_dmamap;
340 struct Asr_status_mem * ha_statusmem;
341 u_int32_t ha_rstatus_phys;
342 u_int32_t ha_status_phys;
343 struct cam_path * ha_path[MAX_CHANNEL+1];
344 struct cam_sim * ha_sim[MAX_CHANNEL+1];
345 struct resource * ha_mem_res;
346 struct resource * ha_mes_res;
347 struct resource * ha_irq_res;
349 PI2O_LCT ha_LCT; /* Complete list of devices */
350 #define le_type IdentityTag[0]
353 #define I2O_SCSI 0x00
354 #define I2O_PORT 0x80
355 #define I2O_UNKNOWN 0x7F
356 #define le_bus IdentityTag[1]
357 #define le_target IdentityTag[2]
358 #define le_lun IdentityTag[3]
359 target2lun_t * ha_targets[MAX_CHANNEL+1];
360 PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME ha_Msgs;
363 u_int8_t ha_in_reset;
364 #define HA_OPERATIONAL 0
365 #define HA_IN_RESET 1
366 #define HA_OFF_LINE 2
367 #define HA_OFF_LINE_RECOVERY 3
368 /* Configuration information */
369 /* The target id maximums we take */
370 u_int8_t ha_MaxBus; /* Maximum bus */
371 u_int8_t ha_MaxId; /* Maximum target ID */
372 u_int8_t ha_MaxLun; /* Maximum target LUN */
373 u_int8_t ha_SgSize; /* Max SG elements */
374 u_int8_t ha_pciBusNum;
375 u_int8_t ha_pciDeviceNum;
376 u_int8_t ha_adapter_target[MAX_CHANNEL+1];
377 u_int16_t ha_QueueSize; /* Max outstanding commands */
378 u_int16_t ha_Msgs_Count;
380 /* Links into other parents and HBAs */
381 STAILQ_ENTRY(Asr_softc) ha_next; /* HBA list */
382 struct cdev *ha_devt;
385 static STAILQ_HEAD(, Asr_softc) Asr_softc_list =
386 STAILQ_HEAD_INITIALIZER(Asr_softc_list);
389 set_ccb_timeout_ch(union asr_ccb *ccb, struct callout_handle ch)
391 ccb->ccb_h.sim_priv.entries[0].ptr = ch.callout;
394 static __inline struct callout_handle
395 get_ccb_timeout_ch(union asr_ccb *ccb)
397 struct callout_handle ch;
399 ch.callout = ccb->ccb_h.sim_priv.entries[0].ptr;
404 * Prototypes of the routines we have in this object.
407 /* I2O HDM interface */
408 static int asr_probe(device_t dev);
409 static int asr_attach(device_t dev);
411 static int asr_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag,
413 static int asr_open(struct cdev *dev, int32_t flags, int32_t ifmt,
415 static int asr_close(struct cdev *dev, int flags, int ifmt, struct thread *td);
416 static int asr_intr(Asr_softc_t *sc);
417 static void asr_timeout(void *arg);
418 static int ASR_init(Asr_softc_t *sc);
419 static int ASR_acquireLct(Asr_softc_t *sc);
420 static int ASR_acquireHrt(Asr_softc_t *sc);
421 static void asr_action(struct cam_sim *sim, union ccb *ccb);
422 static void asr_poll(struct cam_sim *sim);
423 static int ASR_queue(Asr_softc_t *sc, PI2O_MESSAGE_FRAME Message);
426 * Here is the auto-probe structure used to nest our tests appropriately
427 * during the startup phase of the operating system.
429 static device_method_t asr_methods[] = {
430 DEVMETHOD(device_probe, asr_probe),
431 DEVMETHOD(device_attach, asr_attach),
435 static driver_t asr_driver = {
441 static devclass_t asr_devclass;
442 DRIVER_MODULE(asr, pci, asr_driver, asr_devclass, 0, 0);
443 MODULE_DEPEND(asr, pci, 1, 1, 1);
444 MODULE_DEPEND(asr, cam, 1, 1, 1);
447 * devsw for asr hba driver
449 * only ioctl is used. the sd driver provides all other access.
451 static struct cdevsw asr_cdevsw = {
452 .d_version = D_VERSION,
453 .d_flags = D_NEEDGIANT,
455 .d_close = asr_close,
456 .d_ioctl = asr_ioctl,
460 /* I2O support routines */
462 static __inline u_int32_t
463 asr_get_FromFIFO(Asr_softc_t *sc)
465 return (bus_space_read_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle,
469 static __inline u_int32_t
470 asr_get_ToFIFO(Asr_softc_t *sc)
472 return (bus_space_read_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle,
476 static __inline u_int32_t
477 asr_get_intr(Asr_softc_t *sc)
479 return (bus_space_read_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle,
483 static __inline u_int32_t
484 asr_get_status(Asr_softc_t *sc)
486 return (bus_space_read_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle,
491 asr_set_FromFIFO(Asr_softc_t *sc, u_int32_t val)
493 bus_space_write_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle, I2O_REG_FROMFIFO,
498 asr_set_ToFIFO(Asr_softc_t *sc, u_int32_t val)
500 bus_space_write_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle, I2O_REG_TOFIFO,
505 asr_set_intr(Asr_softc_t *sc, u_int32_t val)
507 bus_space_write_4(sc->ha_i2o_btag, sc->ha_i2o_bhandle, I2O_REG_MASK,
512 asr_set_frame(Asr_softc_t *sc, void *frame, u_int32_t offset, int len)
514 bus_space_write_region_4(sc->ha_frame_btag, sc->ha_frame_bhandle,
515 offset, (u_int32_t *)frame, len);
519 * Fill message with default.
521 static PI2O_MESSAGE_FRAME
522 ASR_fillMessage(void *Message, u_int16_t size)
524 PI2O_MESSAGE_FRAME Message_Ptr;
526 Message_Ptr = (I2O_MESSAGE_FRAME *)Message;
527 bzero(Message_Ptr, size);
528 I2O_MESSAGE_FRAME_setVersionOffset(Message_Ptr, I2O_VERSION_11);
529 I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
530 (size + sizeof(U32) - 1) >> 2);
531 I2O_MESSAGE_FRAME_setInitiatorAddress (Message_Ptr, 1);
532 KASSERT(Message_Ptr != NULL, ("Message_Ptr == NULL"));
533 return (Message_Ptr);
534 } /* ASR_fillMessage */
536 #define EMPTY_QUEUE (0xffffffff)
539 ASR_getMessage(Asr_softc_t *sc)
543 MessageOffset = asr_get_ToFIFO(sc);
544 if (MessageOffset == EMPTY_QUEUE)
545 MessageOffset = asr_get_ToFIFO(sc);
547 return (MessageOffset);
548 } /* ASR_getMessage */
550 /* Issue a polled command */
552 ASR_initiateCp(Asr_softc_t *sc, PI2O_MESSAGE_FRAME Message)
554 U32 Mask = 0xffffffff;
559 * ASR_initiateCp is only used for synchronous commands and will
560 * be made more resiliant to adapter delays since commands like
561 * resetIOP can cause the adapter to be deaf for a little time.
563 while (((MessageOffset = ASR_getMessage(sc)) == EMPTY_QUEUE)
567 if (MessageOffset != EMPTY_QUEUE) {
568 asr_set_frame(sc, Message, MessageOffset,
569 I2O_MESSAGE_FRAME_getMessageSize(Message));
571 * Disable the Interrupts
573 Mask = asr_get_intr(sc);
574 asr_set_intr(sc, Mask | Mask_InterruptsDisabled);
575 asr_set_ToFIFO(sc, MessageOffset);
578 } /* ASR_initiateCp */
584 ASR_resetIOP(Asr_softc_t *sc)
586 I2O_EXEC_IOP_RESET_MESSAGE Message;
587 PI2O_EXEC_IOP_RESET_MESSAGE Message_Ptr;
592 * Build up our copy of the Message.
594 Message_Ptr = (PI2O_EXEC_IOP_RESET_MESSAGE)ASR_fillMessage(&Message,
595 sizeof(I2O_EXEC_IOP_RESET_MESSAGE));
596 I2O_EXEC_IOP_RESET_MESSAGE_setFunction(Message_Ptr, I2O_EXEC_IOP_RESET);
598 * Reset the Reply Status
600 Reply_Ptr = &sc->ha_statusmem->rstatus;
602 I2O_EXEC_IOP_RESET_MESSAGE_setStatusWordLowAddress(Message_Ptr,
603 sc->ha_rstatus_phys);
605 * Send the Message out
607 if ((Old = ASR_initiateCp(sc, (PI2O_MESSAGE_FRAME)Message_Ptr)) !=
610 * Wait for a response (Poll), timeouts are dangerous if
611 * the card is truly responsive. We assume response in 2s.
613 u_int8_t Delay = 200;
615 while ((*Reply_Ptr == 0) && (--Delay != 0)) {
619 * Re-enable the interrupts.
621 asr_set_intr(sc, Old);
622 KASSERT(*Reply_Ptr != 0, ("*Reply_Ptr == 0"));
625 KASSERT(Old != 0xffffffff, ("Old == -1"));
630 * Get the curent state of the adapter
632 static PI2O_EXEC_STATUS_GET_REPLY
633 ASR_getStatus(Asr_softc_t *sc)
635 I2O_EXEC_STATUS_GET_MESSAGE Message;
636 PI2O_EXEC_STATUS_GET_MESSAGE Message_Ptr;
637 PI2O_EXEC_STATUS_GET_REPLY buffer;
641 * Build up our copy of the Message.
643 Message_Ptr = (PI2O_EXEC_STATUS_GET_MESSAGE)ASR_fillMessage(&Message,
644 sizeof(I2O_EXEC_STATUS_GET_MESSAGE));
645 I2O_EXEC_STATUS_GET_MESSAGE_setFunction(Message_Ptr,
646 I2O_EXEC_STATUS_GET);
647 I2O_EXEC_STATUS_GET_MESSAGE_setReplyBufferAddressLow(Message_Ptr,
649 /* This one is a Byte Count */
650 I2O_EXEC_STATUS_GET_MESSAGE_setReplyBufferLength(Message_Ptr,
651 sizeof(I2O_EXEC_STATUS_GET_REPLY));
653 * Reset the Reply Status
655 buffer = &sc->ha_statusmem->status;
656 bzero(buffer, sizeof(I2O_EXEC_STATUS_GET_REPLY));
658 * Send the Message out
660 if ((Old = ASR_initiateCp(sc, (PI2O_MESSAGE_FRAME)Message_Ptr)) !=
663 * Wait for a response (Poll), timeouts are dangerous if
664 * the card is truly responsive. We assume response in 50ms.
666 u_int8_t Delay = 255;
668 while (*((U8 * volatile)&(buffer->SyncByte)) == 0) {
676 * Re-enable the interrupts.
678 asr_set_intr(sc, Old);
682 } /* ASR_getStatus */
685 * Check if the device is a SCSI I2O HBA, and add it to the list.
689 * Probe for ASR controller. If we find it, we will use it.
693 asr_probe(device_t dev)
697 id = (pci_get_device(dev) << 16) | pci_get_vendor(dev);
698 if ((id == 0xA5011044) || (id == 0xA5111044)) {
699 device_set_desc(dev, "Adaptec Caching SCSI RAID");
700 return (BUS_PROBE_DEFAULT);
705 static __inline union asr_ccb *
706 asr_alloc_ccb(Asr_softc_t *sc)
708 union asr_ccb *new_ccb;
710 if ((new_ccb = (union asr_ccb *)malloc(sizeof(*new_ccb),
711 M_DEVBUF, M_WAITOK | M_ZERO)) != NULL) {
712 new_ccb->ccb_h.pinfo.priority = 1;
713 new_ccb->ccb_h.pinfo.index = CAM_UNQUEUED_INDEX;
714 new_ccb->ccb_h.spriv_ptr0 = sc;
717 } /* asr_alloc_ccb */
720 asr_free_ccb(union asr_ccb *free_ccb)
722 free(free_ccb, M_DEVBUF);
726 * Print inquiry data `carefully'
729 ASR_prstring(u_int8_t *s, int len)
731 while ((--len >= 0) && (*s) && (*s != ' ') && (*s != '-')) {
732 printf ("%c", *(s++));
737 * Send a message synchronously and without Interrupt to a ccb.
740 ASR_queue_s(union asr_ccb *ccb, PI2O_MESSAGE_FRAME Message)
744 Asr_softc_t *sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0);
747 * We do not need any (optional byteswapping) method access to
748 * the Initiator context field.
750 I2O_MESSAGE_FRAME_setInitiatorContext64(Message, (long)ccb);
752 /* Prevent interrupt service */
754 Mask = asr_get_intr(sc);
755 asr_set_intr(sc, Mask | Mask_InterruptsDisabled);
757 if (ASR_queue(sc, Message) == EMPTY_QUEUE) {
758 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
759 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
763 * Wait for this board to report a finished instruction.
765 while ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
769 /* Re-enable Interrupts */
770 asr_set_intr(sc, Mask);
773 return (ccb->ccb_h.status);
777 * Send a message synchronously to an Asr_softc_t.
780 ASR_queue_c(Asr_softc_t *sc, PI2O_MESSAGE_FRAME Message)
785 if ((ccb = asr_alloc_ccb (sc)) == NULL) {
786 return (CAM_REQUEUE_REQ);
789 status = ASR_queue_s (ccb, Message);
797 * Add the specified ccb to the active queue
800 ASR_ccbAdd(Asr_softc_t *sc, union asr_ccb *ccb)
805 LIST_INSERT_HEAD(&(sc->ha_ccb), &(ccb->ccb_h), sim_links.le);
806 if (ccb->ccb_h.timeout != CAM_TIME_INFINITY) {
807 if (ccb->ccb_h.timeout == CAM_TIME_DEFAULT) {
809 * RAID systems can take considerable time to
810 * complete some commands given the large cache
811 * flashes switching from write back to write thru.
813 ccb->ccb_h.timeout = 6 * 60 * 1000;
815 set_ccb_timeout_ch(ccb, timeout(asr_timeout, (caddr_t)ccb,
816 (ccb->ccb_h.timeout * hz) / 1000));
822 * Remove the specified ccb from the active queue.
825 ASR_ccbRemove(Asr_softc_t *sc, union asr_ccb *ccb)
830 untimeout(asr_timeout, (caddr_t)ccb, get_ccb_timeout_ch(ccb));
831 LIST_REMOVE(&(ccb->ccb_h), sim_links.le);
833 } /* ASR_ccbRemove */
836 * Fail all the active commands, so they get re-issued by the operating
840 ASR_failActiveCommands(Asr_softc_t *sc)
847 * We do not need to inform the CAM layer that we had a bus
848 * reset since we manage it on our own, this also prevents the
849 * SCSI_DELAY settling that would be required on other systems.
850 * The `SCSI_DELAY' has already been handled by the card via the
851 * acquisition of the LCT table while we are at CAM priority level.
852 * for (int bus = 0; bus <= sc->ha_MaxBus; ++bus) {
853 * xpt_async (AC_BUS_RESET, sc->ha_path[bus], NULL);
856 while ((ccb = LIST_FIRST(&(sc->ha_ccb))) != NULL) {
857 ASR_ccbRemove (sc, (union asr_ccb *)ccb);
859 ccb->status &= ~CAM_STATUS_MASK;
860 ccb->status |= CAM_REQUEUE_REQ;
861 /* Nothing Transfered */
862 ((struct ccb_scsiio *)ccb)->resid
863 = ((struct ccb_scsiio *)ccb)->dxfer_len;
866 xpt_done ((union ccb *)ccb);
872 } /* ASR_failActiveCommands */
875 * The following command causes the HBA to reset the specific bus
878 ASR_resetBus(Asr_softc_t *sc, int bus)
880 I2O_HBA_BUS_RESET_MESSAGE Message;
881 I2O_HBA_BUS_RESET_MESSAGE *Message_Ptr;
882 PI2O_LCT_ENTRY Device;
884 Message_Ptr = (I2O_HBA_BUS_RESET_MESSAGE *)ASR_fillMessage(&Message,
885 sizeof(I2O_HBA_BUS_RESET_MESSAGE));
886 I2O_MESSAGE_FRAME_setFunction(&Message_Ptr->StdMessageFrame,
888 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
889 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
891 if (((Device->le_type & I2O_PORT) != 0)
892 && (Device->le_bus == bus)) {
893 I2O_MESSAGE_FRAME_setTargetAddress(
894 &Message_Ptr->StdMessageFrame,
895 I2O_LCT_ENTRY_getLocalTID(Device));
896 /* Asynchronous command, with no expectations */
897 (void)ASR_queue(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
904 ASR_getBlinkLedCode(Asr_softc_t *sc)
911 blink = bus_space_read_1(sc->ha_frame_btag,
912 sc->ha_frame_bhandle, sc->ha_blinkLED + 1);
916 blink = bus_space_read_1(sc->ha_frame_btag,
917 sc->ha_frame_bhandle, sc->ha_blinkLED);
919 } /* ASR_getBlinkCode */
922 * Determine the address of an TID lookup. Must be done at high priority
923 * since the address can be changed by other threads of execution.
925 * Returns NULL pointer if not indexible (but will attempt to generate
926 * an index if `new_entry' flag is set to TRUE).
928 * All addressible entries are to be guaranteed zero if never initialized.
931 ASR_getTidAddress(Asr_softc_t *sc, int bus, int target, int lun, int new_entry)
933 target2lun_t *bus_ptr;
934 lun2tid_t *target_ptr;
938 * Validity checking of incoming parameters. More of a bound
939 * expansion limit than an issue with the code dealing with the
942 * sc must be valid before it gets here, so that check could be
943 * dropped if speed a critical issue.
946 || (bus > MAX_CHANNEL)
947 || (target > sc->ha_MaxId)
948 || (lun > sc->ha_MaxLun)) {
949 debug_asr_printf("(%lx,%d,%d,%d) target out of range\n",
950 (u_long)sc, bus, target, lun);
954 * See if there is an associated bus list.
956 * for performance, allocate in size of BUS_CHUNK chunks.
957 * BUS_CHUNK must be a power of two. This is to reduce
958 * fragmentation effects on the allocations.
961 new_size = ((target + BUS_CHUNK - 1) & ~(BUS_CHUNK - 1));
962 if ((bus_ptr = sc->ha_targets[bus]) == NULL) {
964 * Allocate a new structure?
965 * Since one element in structure, the +1
966 * needed for size has been abstracted.
968 if ((new_entry == FALSE)
969 || ((sc->ha_targets[bus] = bus_ptr = (target2lun_t *)malloc (
970 sizeof(*bus_ptr) + (sizeof(bus_ptr->LUN) * new_size),
971 M_TEMP, M_WAITOK | M_ZERO))
973 debug_asr_printf("failed to allocate bus list\n");
976 bus_ptr->size = new_size + 1;
977 } else if (bus_ptr->size <= new_size) {
978 target2lun_t * new_bus_ptr;
981 * Reallocate a new structure?
982 * Since one element in structure, the +1
983 * needed for size has been abstracted.
985 if ((new_entry == FALSE)
986 || ((new_bus_ptr = (target2lun_t *)malloc (
987 sizeof(*bus_ptr) + (sizeof(bus_ptr->LUN) * new_size),
988 M_TEMP, M_WAITOK | M_ZERO)) == NULL)) {
989 debug_asr_printf("failed to reallocate bus list\n");
993 * Copy the whole thing, safer, simpler coding
994 * and not really performance critical at this point.
996 bcopy(bus_ptr, new_bus_ptr, sizeof(*bus_ptr)
997 + (sizeof(bus_ptr->LUN) * (bus_ptr->size - 1)));
998 sc->ha_targets[bus] = new_bus_ptr;
999 free(bus_ptr, M_TEMP);
1000 bus_ptr = new_bus_ptr;
1001 bus_ptr->size = new_size + 1;
1004 * We now have the bus list, lets get to the target list.
1005 * Since most systems have only *one* lun, we do not allocate
1006 * in chunks as above, here we allow one, then in chunk sizes.
1007 * TARGET_CHUNK must be a power of two. This is to reduce
1008 * fragmentation effects on the allocations.
1010 #define TARGET_CHUNK 8
1011 if ((new_size = lun) != 0) {
1012 new_size = ((lun + TARGET_CHUNK - 1) & ~(TARGET_CHUNK - 1));
1014 if ((target_ptr = bus_ptr->LUN[target]) == NULL) {
1016 * Allocate a new structure?
1017 * Since one element in structure, the +1
1018 * needed for size has been abstracted.
1020 if ((new_entry == FALSE)
1021 || ((bus_ptr->LUN[target] = target_ptr = (lun2tid_t *)malloc (
1022 sizeof(*target_ptr) + (sizeof(target_ptr->TID) * new_size),
1023 M_TEMP, M_WAITOK | M_ZERO)) == NULL)) {
1024 debug_asr_printf("failed to allocate target list\n");
1027 target_ptr->size = new_size + 1;
1028 } else if (target_ptr->size <= new_size) {
1029 lun2tid_t * new_target_ptr;
1032 * Reallocate a new structure?
1033 * Since one element in structure, the +1
1034 * needed for size has been abstracted.
1036 if ((new_entry == FALSE)
1037 || ((new_target_ptr = (lun2tid_t *)malloc (
1038 sizeof(*target_ptr) + (sizeof(target_ptr->TID) * new_size),
1039 M_TEMP, M_WAITOK | M_ZERO)) == NULL)) {
1040 debug_asr_printf("failed to reallocate target list\n");
1044 * Copy the whole thing, safer, simpler coding
1045 * and not really performance critical at this point.
1047 bcopy(target_ptr, new_target_ptr, sizeof(*target_ptr)
1048 + (sizeof(target_ptr->TID) * (target_ptr->size - 1)));
1049 bus_ptr->LUN[target] = new_target_ptr;
1050 free(target_ptr, M_TEMP);
1051 target_ptr = new_target_ptr;
1052 target_ptr->size = new_size + 1;
1055 * Now, acquire the TID address from the LUN indexed list.
1057 return (&(target_ptr->TID[lun]));
1058 } /* ASR_getTidAddress */
1061 * Get a pre-existing TID relationship.
1063 * If the TID was never set, return (tid_t)-1.
1065 * should use mutex rather than spl.
1067 static __inline tid_t
1068 ASR_getTid(Asr_softc_t *sc, int bus, int target, int lun)
1075 if (((tid_ptr = ASR_getTidAddress(sc, bus, target, lun, FALSE)) == NULL)
1076 /* (tid_t)0 or (tid_t)-1 indicate no TID */
1077 || (*tid_ptr == (tid_t)0)) {
1087 * Set a TID relationship.
1089 * If the TID was not set, return (tid_t)-1.
1091 * should use mutex rather than spl.
1093 static __inline tid_t
1094 ASR_setTid(Asr_softc_t *sc, int bus, int target, int lun, tid_t TID)
1099 if (TID != (tid_t)-1) {
1104 if ((tid_ptr = ASR_getTidAddress(sc, bus, target, lun, TRUE))
1115 /*-------------------------------------------------------------------------*/
1116 /* Function ASR_rescan */
1117 /*-------------------------------------------------------------------------*/
1118 /* The Parameters Passed To This Function Are : */
1119 /* Asr_softc_t * : HBA miniport driver's adapter data storage. */
1121 /* This Function Will rescan the adapter and resynchronize any data */
1123 /* Return : 0 For OK, Error Code Otherwise */
1124 /*-------------------------------------------------------------------------*/
1127 ASR_rescan(Asr_softc_t *sc)
1133 * Re-acquire the LCT table and synchronize us to the adapter.
1135 if ((error = ASR_acquireLct(sc)) == 0) {
1136 error = ASR_acquireHrt(sc);
1143 bus = sc->ha_MaxBus;
1144 /* Reset all existing cached TID lookups */
1146 int target, event = 0;
1149 * Scan for all targets on this bus to see if they
1150 * got affected by the rescan.
1152 for (target = 0; target <= sc->ha_MaxId; ++target) {
1155 /* Stay away from the controller ID */
1156 if (target == sc->ha_adapter_target[bus]) {
1159 for (lun = 0; lun <= sc->ha_MaxLun; ++lun) {
1160 PI2O_LCT_ENTRY Device;
1161 tid_t TID = (tid_t)-1;
1165 * See if the cached TID changed. Search for
1166 * the device in our new LCT.
1168 for (Device = sc->ha_LCT->LCTEntry;
1169 Device < (PI2O_LCT_ENTRY)(((U32 *)sc->ha_LCT)
1170 + I2O_LCT_getTableSize(sc->ha_LCT));
1172 if ((Device->le_type != I2O_UNKNOWN)
1173 && (Device->le_bus == bus)
1174 && (Device->le_target == target)
1175 && (Device->le_lun == lun)
1176 && (I2O_LCT_ENTRY_getUserTID(Device)
1178 TID = I2O_LCT_ENTRY_getLocalTID(
1184 * Indicate to the OS that the label needs
1185 * to be recalculated, or that the specific
1186 * open device is no longer valid (Merde)
1187 * because the cached TID changed.
1189 LastTID = ASR_getTid (sc, bus, target, lun);
1190 if (LastTID != TID) {
1191 struct cam_path * path;
1193 if (xpt_create_path(&path,
1195 cam_sim_path(sc->ha_sim[bus]),
1196 target, lun) != CAM_REQ_CMP) {
1197 if (TID == (tid_t)-1) {
1198 event |= AC_LOST_DEVICE;
1200 event |= AC_INQ_CHANGED
1201 | AC_GETDEV_CHANGED;
1204 if (TID == (tid_t)-1) {
1208 } else if (LastTID == (tid_t)-1) {
1209 struct ccb_getdev ccb;
1213 path, /*priority*/5);
1229 * We have the option of clearing the
1230 * cached TID for it to be rescanned, or to
1231 * set it now even if the device never got
1232 * accessed. We chose the later since we
1233 * currently do not use the condition that
1234 * the TID ever got cached.
1236 ASR_setTid (sc, bus, target, lun, TID);
1240 * The xpt layer can not handle multiple events at the
1243 if (event & AC_LOST_DEVICE) {
1244 xpt_async(AC_LOST_DEVICE, sc->ha_path[bus], NULL);
1246 if (event & AC_INQ_CHANGED) {
1247 xpt_async(AC_INQ_CHANGED, sc->ha_path[bus], NULL);
1249 if (event & AC_GETDEV_CHANGED) {
1250 xpt_async(AC_GETDEV_CHANGED, sc->ha_path[bus], NULL);
1252 } while (--bus >= 0);
1256 /*-------------------------------------------------------------------------*/
1257 /* Function ASR_reset */
1258 /*-------------------------------------------------------------------------*/
1259 /* The Parameters Passed To This Function Are : */
1260 /* Asr_softc_t * : HBA miniport driver's adapter data storage. */
1262 /* This Function Will reset the adapter and resynchronize any data */
1265 /*-------------------------------------------------------------------------*/
1268 ASR_reset(Asr_softc_t *sc)
1273 if ((sc->ha_in_reset == HA_IN_RESET)
1274 || (sc->ha_in_reset == HA_OFF_LINE_RECOVERY)) {
1279 * Promotes HA_OPERATIONAL to HA_IN_RESET,
1280 * or HA_OFF_LINE to HA_OFF_LINE_RECOVERY.
1282 ++(sc->ha_in_reset);
1283 if (ASR_resetIOP(sc) == 0) {
1284 debug_asr_printf ("ASR_resetIOP failed\n");
1286 * We really need to take this card off-line, easier said
1287 * than make sense. Better to keep retrying for now since if a
1288 * UART cable is connected the blinkLEDs the adapter is now in
1289 * a hard state requiring action from the monitor commands to
1290 * the HBA to continue. For debugging waiting forever is a
1291 * good thing. In a production system, however, one may wish
1292 * to instead take the card off-line ...
1295 while (ASR_resetIOP(sc) == 0);
1297 retVal = ASR_init (sc);
1300 debug_asr_printf ("ASR_init failed\n");
1301 sc->ha_in_reset = HA_OFF_LINE;
1304 if (ASR_rescan (sc) != 0) {
1305 debug_asr_printf ("ASR_rescan failed\n");
1307 ASR_failActiveCommands (sc);
1308 if (sc->ha_in_reset == HA_OFF_LINE_RECOVERY) {
1309 printf ("asr%d: Brining adapter back on-line\n",
1311 ? cam_sim_unit(xpt_path_sim(sc->ha_path[0]))
1314 sc->ha_in_reset = HA_OPERATIONAL;
1319 * Device timeout handler.
1322 asr_timeout(void *arg)
1324 union asr_ccb *ccb = (union asr_ccb *)arg;
1325 Asr_softc_t *sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0);
1328 debug_asr_print_path(ccb);
1329 debug_asr_printf("timed out");
1332 * Check if the adapter has locked up?
1334 if ((s = ASR_getBlinkLedCode(sc)) != 0) {
1336 printf ("asr%d: Blink LED 0x%x resetting adapter\n",
1337 cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)), s);
1338 if (ASR_reset (sc) == ENXIO) {
1339 /* Try again later */
1340 set_ccb_timeout_ch(ccb, timeout(asr_timeout,
1342 (ccb->ccb_h.timeout * hz) / 1000));
1347 * Abort does not function on the ASR card!!! Walking away from
1348 * the SCSI command is also *very* dangerous. A SCSI BUS reset is
1349 * our best bet, followed by a complete adapter reset if that fails.
1352 /* Check if we already timed out once to raise the issue */
1353 if ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_CMD_TIMEOUT) {
1354 debug_asr_printf (" AGAIN\nreinitializing adapter\n");
1355 if (ASR_reset (sc) == ENXIO) {
1356 set_ccb_timeout_ch(ccb, timeout(asr_timeout,
1358 (ccb->ccb_h.timeout * hz) / 1000));
1363 debug_asr_printf ("\nresetting bus\n");
1364 /* If the BUS reset does not take, then an adapter reset is next! */
1365 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
1366 ccb->ccb_h.status |= CAM_CMD_TIMEOUT;
1367 set_ccb_timeout_ch(ccb, timeout(asr_timeout, (caddr_t)ccb,
1368 (ccb->ccb_h.timeout * hz) / 1000));
1369 ASR_resetBus (sc, cam_sim_bus(xpt_path_sim(ccb->ccb_h.path)));
1370 xpt_async (AC_BUS_RESET, ccb->ccb_h.path, NULL);
1375 * send a message asynchronously
1378 ASR_queue(Asr_softc_t *sc, PI2O_MESSAGE_FRAME Message)
1383 debug_asr_printf("Host Command Dump:\n");
1384 debug_asr_dump_message(Message);
1386 ccb = (union asr_ccb *)(long)
1387 I2O_MESSAGE_FRAME_getInitiatorContext64(Message);
1389 if ((MessageOffset = ASR_getMessage(sc)) != EMPTY_QUEUE) {
1390 asr_set_frame(sc, Message, MessageOffset,
1391 I2O_MESSAGE_FRAME_getMessageSize(Message));
1393 ASR_ccbAdd (sc, ccb);
1395 /* Post the command */
1396 asr_set_ToFIFO(sc, MessageOffset);
1398 if (ASR_getBlinkLedCode(sc)) {
1400 * Unlikely we can do anything if we can't grab a
1401 * message frame :-(, but lets give it a try.
1403 (void)ASR_reset(sc);
1406 return (MessageOffset);
1410 /* Simple Scatter Gather elements */
1411 #define SG(SGL,Index,Flags,Buffer,Size) \
1412 I2O_FLAGS_COUNT_setCount( \
1413 &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index].FlagsCount), \
1415 I2O_FLAGS_COUNT_setFlags( \
1416 &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index].FlagsCount), \
1417 I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT | (Flags)); \
1418 I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress( \
1419 &(((PI2O_SG_ELEMENT)(SGL))->u.Simple[Index]), \
1420 (Buffer == NULL) ? 0 : KVTOPHYS(Buffer))
1423 * Retrieve Parameter Group.
1426 ASR_getParams(Asr_softc_t *sc, tid_t TID, int Group, void *Buffer,
1427 unsigned BufferSize)
1429 struct paramGetMessage {
1430 I2O_UTIL_PARAMS_GET_MESSAGE M;
1432 F[sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT)];
1434 I2O_PARAM_OPERATIONS_LIST_HEADER Header;
1435 I2O_PARAM_OPERATION_ALL_TEMPLATE Template[1];
1438 struct Operations *Operations_Ptr;
1439 I2O_UTIL_PARAMS_GET_MESSAGE *Message_Ptr;
1440 struct ParamBuffer {
1441 I2O_PARAM_RESULTS_LIST_HEADER Header;
1442 I2O_PARAM_READ_OPERATION_RESULT Read;
1446 Message_Ptr = (I2O_UTIL_PARAMS_GET_MESSAGE *)ASR_fillMessage(&Message,
1447 sizeof(I2O_UTIL_PARAMS_GET_MESSAGE)
1448 + sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT));
1449 Operations_Ptr = (struct Operations *)((char *)Message_Ptr
1450 + sizeof(I2O_UTIL_PARAMS_GET_MESSAGE)
1451 + sizeof(I2O_SGE_SIMPLE_ELEMENT)*2 - sizeof(I2O_SG_ELEMENT));
1452 bzero(Operations_Ptr, sizeof(struct Operations));
1453 I2O_PARAM_OPERATIONS_LIST_HEADER_setOperationCount(
1454 &(Operations_Ptr->Header), 1);
1455 I2O_PARAM_OPERATION_ALL_TEMPLATE_setOperation(
1456 &(Operations_Ptr->Template[0]), I2O_PARAMS_OPERATION_FIELD_GET);
1457 I2O_PARAM_OPERATION_ALL_TEMPLATE_setFieldCount(
1458 &(Operations_Ptr->Template[0]), 0xFFFF);
1459 I2O_PARAM_OPERATION_ALL_TEMPLATE_setGroupNumber(
1460 &(Operations_Ptr->Template[0]), Group);
1461 Buffer_Ptr = (struct ParamBuffer *)Buffer;
1462 bzero(Buffer_Ptr, BufferSize);
1464 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
1466 + (((sizeof(I2O_UTIL_PARAMS_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT))
1467 / sizeof(U32)) << 4));
1468 I2O_MESSAGE_FRAME_setTargetAddress (&(Message_Ptr->StdMessageFrame),
1470 I2O_MESSAGE_FRAME_setFunction (&(Message_Ptr->StdMessageFrame),
1471 I2O_UTIL_PARAMS_GET);
1473 * Set up the buffers as scatter gather elements.
1475 SG(&(Message_Ptr->SGL), 0,
1476 I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER,
1477 Operations_Ptr, sizeof(struct Operations));
1478 SG(&(Message_Ptr->SGL), 1,
1479 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
1480 Buffer_Ptr, BufferSize);
1482 if ((ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) == CAM_REQ_CMP)
1483 && (Buffer_Ptr->Header.ResultCount)) {
1484 return ((void *)(Buffer_Ptr->Info));
1487 } /* ASR_getParams */
1490 * Acquire the LCT information.
1493 ASR_acquireLct(Asr_softc_t *sc)
1495 PI2O_EXEC_LCT_NOTIFY_MESSAGE Message_Ptr;
1496 PI2O_SGE_SIMPLE_ELEMENT sg;
1497 int MessageSizeInBytes;
1501 PI2O_LCT_ENTRY Entry;
1504 * sc value assumed valid
1506 MessageSizeInBytes = sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE) -
1507 sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT);
1508 if ((Message_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE)malloc(
1509 MessageSizeInBytes, M_TEMP, M_WAITOK)) == NULL) {
1512 (void)ASR_fillMessage((void *)Message_Ptr, MessageSizeInBytes);
1513 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
1514 (I2O_VERSION_11 + (((sizeof(I2O_EXEC_LCT_NOTIFY_MESSAGE) -
1515 sizeof(I2O_SG_ELEMENT)) / sizeof(U32)) << 4)));
1516 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
1517 I2O_EXEC_LCT_NOTIFY);
1518 I2O_EXEC_LCT_NOTIFY_MESSAGE_setClassIdentifier(Message_Ptr,
1519 I2O_CLASS_MATCH_ANYCLASS);
1521 * Call the LCT table to determine the number of device entries
1522 * to reserve space for.
1524 SG(&(Message_Ptr->SGL), 0,
1525 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER, &Table,
1528 * since this code is reused in several systems, code efficiency
1529 * is greater by using a shift operation rather than a divide by
1530 * sizeof(u_int32_t).
1532 I2O_LCT_setTableSize(&Table,
1533 (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)) >> 2);
1534 (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
1536 * Determine the size of the LCT table.
1539 free(sc->ha_LCT, M_TEMP);
1542 * malloc only generates contiguous memory when less than a
1543 * page is expected. We must break the request up into an SG list ...
1545 if (((len = (I2O_LCT_getTableSize(&Table) << 2)) <=
1546 (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)))
1547 || (len > (128 * 1024))) { /* Arbitrary */
1548 free(Message_Ptr, M_TEMP);
1551 if ((sc->ha_LCT = (PI2O_LCT)malloc (len, M_TEMP, M_WAITOK)) == NULL) {
1552 free(Message_Ptr, M_TEMP);
1556 * since this code is reused in several systems, code efficiency
1557 * is greater by using a shift operation rather than a divide by
1558 * sizeof(u_int32_t).
1560 I2O_LCT_setTableSize(sc->ha_LCT,
1561 (sizeof(I2O_LCT) - sizeof(I2O_LCT_ENTRY)) >> 2);
1563 * Convert the access to the LCT table into a SG list.
1565 sg = Message_Ptr->SGL.u.Simple;
1566 v = (caddr_t)(sc->ha_LCT);
1568 int next, base, span;
1571 next = base = KVTOPHYS(v);
1572 I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg, base);
1574 /* How far can we go contiguously */
1575 while ((len > 0) && (base == next)) {
1578 next = trunc_page(base) + PAGE_SIZE;
1589 /* Construct the Flags */
1590 I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount), span);
1592 int rw = I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT;
1594 rw = (I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT
1595 | I2O_SGL_FLAGS_LAST_ELEMENT
1596 | I2O_SGL_FLAGS_END_OF_BUFFER);
1598 I2O_FLAGS_COUNT_setFlags(&(sg->FlagsCount), rw);
1606 * Incrementing requires resizing of the packet.
1609 MessageSizeInBytes += sizeof(*sg);
1610 I2O_MESSAGE_FRAME_setMessageSize(
1611 &(Message_Ptr->StdMessageFrame),
1612 I2O_MESSAGE_FRAME_getMessageSize(
1613 &(Message_Ptr->StdMessageFrame))
1614 + (sizeof(*sg) / sizeof(U32)));
1616 PI2O_EXEC_LCT_NOTIFY_MESSAGE NewMessage_Ptr;
1618 if ((NewMessage_Ptr = (PI2O_EXEC_LCT_NOTIFY_MESSAGE)
1619 malloc(MessageSizeInBytes, M_TEMP, M_WAITOK))
1621 free(sc->ha_LCT, M_TEMP);
1623 free(Message_Ptr, M_TEMP);
1626 span = ((caddr_t)sg) - (caddr_t)Message_Ptr;
1627 bcopy(Message_Ptr, NewMessage_Ptr, span);
1628 free(Message_Ptr, M_TEMP);
1629 sg = (PI2O_SGE_SIMPLE_ELEMENT)
1630 (((caddr_t)NewMessage_Ptr) + span);
1631 Message_Ptr = NewMessage_Ptr;
1636 retval = ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
1637 free(Message_Ptr, M_TEMP);
1638 if (retval != CAM_REQ_CMP) {
1642 /* If the LCT table grew, lets truncate accesses */
1643 if (I2O_LCT_getTableSize(&Table) < I2O_LCT_getTableSize(sc->ha_LCT)) {
1644 I2O_LCT_setTableSize(sc->ha_LCT, I2O_LCT_getTableSize(&Table));
1646 for (Entry = sc->ha_LCT->LCTEntry; Entry < (PI2O_LCT_ENTRY)
1647 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
1649 Entry->le_type = I2O_UNKNOWN;
1650 switch (I2O_CLASS_ID_getClass(&(Entry->ClassID))) {
1652 case I2O_CLASS_RANDOM_BLOCK_STORAGE:
1653 Entry->le_type = I2O_BSA;
1656 case I2O_CLASS_SCSI_PERIPHERAL:
1657 Entry->le_type = I2O_SCSI;
1660 case I2O_CLASS_FIBRE_CHANNEL_PERIPHERAL:
1661 Entry->le_type = I2O_FCA;
1664 case I2O_CLASS_BUS_ADAPTER_PORT:
1665 Entry->le_type = I2O_PORT | I2O_SCSI;
1667 case I2O_CLASS_FIBRE_CHANNEL_PORT:
1668 if (I2O_CLASS_ID_getClass(&(Entry->ClassID)) ==
1669 I2O_CLASS_FIBRE_CHANNEL_PORT) {
1670 Entry->le_type = I2O_PORT | I2O_FCA;
1672 { struct ControllerInfo {
1673 I2O_PARAM_RESULTS_LIST_HEADER Header;
1674 I2O_PARAM_READ_OPERATION_RESULT Read;
1675 I2O_HBA_SCSI_CONTROLLER_INFO_SCALAR Info;
1677 PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR Info;
1679 Entry->le_bus = 0xff;
1680 Entry->le_target = 0xff;
1681 Entry->le_lun = 0xff;
1683 if ((Info = (PI2O_HBA_SCSI_CONTROLLER_INFO_SCALAR)
1685 I2O_LCT_ENTRY_getLocalTID(Entry),
1686 I2O_HBA_SCSI_CONTROLLER_INFO_GROUP_NO,
1687 &Buffer, sizeof(struct ControllerInfo))) == NULL) {
1691 = I2O_HBA_SCSI_CONTROLLER_INFO_SCALAR_getInitiatorID(
1698 { struct DeviceInfo {
1699 I2O_PARAM_RESULTS_LIST_HEADER Header;
1700 I2O_PARAM_READ_OPERATION_RESULT Read;
1701 I2O_DPT_DEVICE_INFO_SCALAR Info;
1703 PI2O_DPT_DEVICE_INFO_SCALAR Info;
1705 Entry->le_bus = 0xff;
1706 Entry->le_target = 0xff;
1707 Entry->le_lun = 0xff;
1709 if ((Info = (PI2O_DPT_DEVICE_INFO_SCALAR)
1711 I2O_LCT_ENTRY_getLocalTID(Entry),
1712 I2O_DPT_DEVICE_INFO_GROUP_NO,
1713 &Buffer, sizeof(struct DeviceInfo))) == NULL) {
1717 |= I2O_DPT_DEVICE_INFO_SCALAR_getDeviceType(Info);
1719 = I2O_DPT_DEVICE_INFO_SCALAR_getBus(Info);
1720 if ((Entry->le_bus > sc->ha_MaxBus)
1721 && (Entry->le_bus <= MAX_CHANNEL)) {
1722 sc->ha_MaxBus = Entry->le_bus;
1725 = I2O_DPT_DEVICE_INFO_SCALAR_getIdentifier(Info);
1727 = I2O_DPT_DEVICE_INFO_SCALAR_getLunInfo(Info);
1731 * A zero return value indicates success.
1734 } /* ASR_acquireLct */
1737 * Initialize a message frame.
1738 * We assume that the CDB has already been set up, so all we do here is
1739 * generate the Scatter Gather list.
1741 static PI2O_MESSAGE_FRAME
1742 ASR_init_message(union asr_ccb *ccb, PI2O_MESSAGE_FRAME Message)
1744 PI2O_MESSAGE_FRAME Message_Ptr;
1745 PI2O_SGE_SIMPLE_ELEMENT sg;
1746 Asr_softc_t *sc = (Asr_softc_t *)(ccb->ccb_h.spriv_ptr0);
1747 vm_size_t size, len;
1750 int next, span, base, rw;
1751 int target = ccb->ccb_h.target_id;
1752 int lun = ccb->ccb_h.target_lun;
1753 int bus =cam_sim_bus(xpt_path_sim(ccb->ccb_h.path));
1756 /* We only need to zero out the PRIVATE_SCSI_SCB_EXECUTE_MESSAGE */
1757 Message_Ptr = (I2O_MESSAGE_FRAME *)Message;
1758 bzero(Message_Ptr, (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) -
1759 sizeof(I2O_SG_ELEMENT)));
1761 if ((TID = ASR_getTid (sc, bus, target, lun)) == (tid_t)-1) {
1762 PI2O_LCT_ENTRY Device;
1765 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
1766 (((U32 *)sc->ha_LCT) + I2O_LCT_getTableSize(sc->ha_LCT));
1768 if ((Device->le_type != I2O_UNKNOWN)
1769 && (Device->le_bus == bus)
1770 && (Device->le_target == target)
1771 && (Device->le_lun == lun)
1772 && (I2O_LCT_ENTRY_getUserTID(Device) == 0xFFF)) {
1773 TID = I2O_LCT_ENTRY_getLocalTID(Device);
1774 ASR_setTid(sc, Device->le_bus,
1775 Device->le_target, Device->le_lun,
1781 if (TID == (tid_t)0) {
1784 I2O_MESSAGE_FRAME_setTargetAddress(Message_Ptr, TID);
1785 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setTID(
1786 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, TID);
1787 I2O_MESSAGE_FRAME_setVersionOffset(Message_Ptr, I2O_VERSION_11 |
1788 (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) - sizeof(I2O_SG_ELEMENT))
1789 / sizeof(U32)) << 4));
1790 I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
1791 (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
1792 - sizeof(I2O_SG_ELEMENT)) / sizeof(U32));
1793 I2O_MESSAGE_FRAME_setInitiatorAddress (Message_Ptr, 1);
1794 I2O_MESSAGE_FRAME_setFunction(Message_Ptr, I2O_PRIVATE_MESSAGE);
1795 I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode (
1796 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, I2O_SCSI_SCB_EXEC);
1797 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (
1798 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr,
1799 I2O_SCB_FLAG_ENABLE_DISCONNECT
1800 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
1801 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER);
1803 * We do not need any (optional byteswapping) method access to
1804 * the Initiator & Transaction context field.
1806 I2O_MESSAGE_FRAME_setInitiatorContext64(Message, (long)ccb);
1808 I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID(
1809 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, DPT_ORGANIZATION_ID);
1813 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(
1814 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, ccb->csio.cdb_len);
1815 bcopy(&(ccb->csio.cdb_io),
1816 ((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr)->CDB,
1820 * Given a buffer describing a transfer, set up a scatter/gather map
1821 * in a ccb to map that SCSI transfer.
1824 rw = (ccb->ccb_h.flags & CAM_DIR_IN) ? 0 : I2O_SGL_FLAGS_DIR;
1826 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (
1827 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr,
1828 (ccb->csio.dxfer_len)
1829 ? ((rw) ? (I2O_SCB_FLAG_XFER_TO_DEVICE
1830 | I2O_SCB_FLAG_ENABLE_DISCONNECT
1831 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
1832 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER)
1833 : (I2O_SCB_FLAG_XFER_FROM_DEVICE
1834 | I2O_SCB_FLAG_ENABLE_DISCONNECT
1835 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
1836 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER))
1837 : (I2O_SCB_FLAG_ENABLE_DISCONNECT
1838 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
1839 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER));
1842 * Given a transfer described by a `data', fill in the SG list.
1844 sg = &((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr)->SGL.u.Simple[0];
1846 len = ccb->csio.dxfer_len;
1847 v = ccb->csio.data_ptr;
1848 KASSERT(ccb->csio.dxfer_len >= 0, ("csio.dxfer_len < 0"));
1849 MessageSize = I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr);
1850 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setByteCount(
1851 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr, len);
1852 while ((len > 0) && (sg < &((PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
1853 Message_Ptr)->SGL.u.Simple[SG_SIZE])) {
1855 next = base = KVTOPHYS(v);
1856 I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg, base);
1858 /* How far can we go contiguously */
1859 while ((len > 0) && (base == next)) {
1860 next = trunc_page(base) + PAGE_SIZE;
1871 I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount), span);
1873 rw |= I2O_SGL_FLAGS_LAST_ELEMENT;
1875 I2O_FLAGS_COUNT_setFlags(&(sg->FlagsCount),
1876 I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT | rw);
1878 MessageSize += sizeof(*sg) / sizeof(U32);
1880 /* We always do the request sense ... */
1881 if ((span = ccb->csio.sense_len) == 0) {
1882 span = sizeof(ccb->csio.sense_data);
1884 SG(sg, 0, I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
1885 &(ccb->csio.sense_data), span);
1886 I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
1887 MessageSize + (sizeof(*sg) / sizeof(U32)));
1888 return (Message_Ptr);
1889 } /* ASR_init_message */
1892 * Reset the adapter.
1895 ASR_initOutBound(Asr_softc_t *sc)
1897 struct initOutBoundMessage {
1898 I2O_EXEC_OUTBOUND_INIT_MESSAGE M;
1901 PI2O_EXEC_OUTBOUND_INIT_MESSAGE Message_Ptr;
1902 U32 *volatile Reply_Ptr;
1906 * Build up our copy of the Message.
1908 Message_Ptr = (PI2O_EXEC_OUTBOUND_INIT_MESSAGE)ASR_fillMessage(&Message,
1909 sizeof(I2O_EXEC_OUTBOUND_INIT_MESSAGE));
1910 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
1911 I2O_EXEC_OUTBOUND_INIT);
1912 I2O_EXEC_OUTBOUND_INIT_MESSAGE_setHostPageFrameSize(Message_Ptr, PAGE_SIZE);
1913 I2O_EXEC_OUTBOUND_INIT_MESSAGE_setOutboundMFrameSize(Message_Ptr,
1914 sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME));
1916 * Reset the Reply Status
1918 *(Reply_Ptr = (U32 *)((char *)Message_Ptr
1919 + sizeof(I2O_EXEC_OUTBOUND_INIT_MESSAGE))) = 0;
1920 SG (&(Message_Ptr->SGL), 0, I2O_SGL_FLAGS_LAST_ELEMENT, Reply_Ptr,
1923 * Send the Message out
1925 if ((Old = ASR_initiateCp(sc, (PI2O_MESSAGE_FRAME)Message_Ptr)) !=
1930 * Wait for a response (Poll).
1932 while (*Reply_Ptr < I2O_EXEC_OUTBOUND_INIT_REJECTED);
1934 * Re-enable the interrupts.
1936 asr_set_intr(sc, Old);
1938 * Populate the outbound table.
1940 if (sc->ha_Msgs == NULL) {
1942 /* Allocate the reply frames */
1943 size = sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
1944 * sc->ha_Msgs_Count;
1947 * contigmalloc only works reliably at
1948 * initialization time.
1950 if ((sc->ha_Msgs = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
1951 contigmalloc (size, M_DEVBUF, M_WAITOK, 0ul,
1952 0xFFFFFFFFul, (u_long)sizeof(U32), 0ul)) != NULL) {
1953 bzero(sc->ha_Msgs, size);
1954 sc->ha_Msgs_Phys = KVTOPHYS(sc->ha_Msgs);
1958 /* Initialize the outbound FIFO */
1959 if (sc->ha_Msgs != NULL)
1960 for(size = sc->ha_Msgs_Count, addr = sc->ha_Msgs_Phys;
1962 asr_set_FromFIFO(sc, addr);
1963 addr += sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME);
1965 return (*Reply_Ptr);
1968 } /* ASR_initOutBound */
1971 * Set the system table
1974 ASR_setSysTab(Asr_softc_t *sc)
1976 PI2O_EXEC_SYS_TAB_SET_MESSAGE Message_Ptr;
1977 PI2O_SET_SYSTAB_HEADER SystemTable;
1978 Asr_softc_t * ha, *next;
1979 PI2O_SGE_SIMPLE_ELEMENT sg;
1982 if ((SystemTable = (PI2O_SET_SYSTAB_HEADER)malloc (
1983 sizeof(I2O_SET_SYSTAB_HEADER), M_TEMP, M_WAITOK | M_ZERO)) == NULL) {
1986 STAILQ_FOREACH(ha, &Asr_softc_list, ha_next) {
1987 ++SystemTable->NumberEntries;
1989 if ((Message_Ptr = (PI2O_EXEC_SYS_TAB_SET_MESSAGE)malloc (
1990 sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT)
1991 + ((3+SystemTable->NumberEntries) * sizeof(I2O_SGE_SIMPLE_ELEMENT)),
1992 M_TEMP, M_WAITOK)) == NULL) {
1993 free(SystemTable, M_TEMP);
1996 (void)ASR_fillMessage((void *)Message_Ptr,
1997 sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT)
1998 + ((3+SystemTable->NumberEntries) * sizeof(I2O_SGE_SIMPLE_ELEMENT)));
1999 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
2001 (((sizeof(I2O_EXEC_SYS_TAB_SET_MESSAGE) - sizeof(I2O_SG_ELEMENT))
2002 / sizeof(U32)) << 4)));
2003 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
2004 I2O_EXEC_SYS_TAB_SET);
2006 * Call the LCT table to determine the number of device entries
2007 * to reserve space for.
2008 * since this code is reused in several systems, code efficiency
2009 * is greater by using a shift operation rather than a divide by
2010 * sizeof(u_int32_t).
2012 sg = (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr
2013 + ((I2O_MESSAGE_FRAME_getVersionOffset(
2014 &(Message_Ptr->StdMessageFrame)) & 0xF0) >> 2));
2015 SG(sg, 0, I2O_SGL_FLAGS_DIR, SystemTable, sizeof(I2O_SET_SYSTAB_HEADER));
2017 STAILQ_FOREACH_SAFE(ha, &Asr_softc_list, ha_next, next) {
2020 ? (I2O_SGL_FLAGS_DIR)
2021 : (I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER)),
2022 &(ha->ha_SystemTable), sizeof(ha->ha_SystemTable));
2025 SG(sg, 0, I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_END_OF_BUFFER, NULL, 0);
2026 SG(sg, 1, I2O_SGL_FLAGS_DIR | I2O_SGL_FLAGS_LAST_ELEMENT
2027 | I2O_SGL_FLAGS_END_OF_BUFFER, NULL, 0);
2028 retVal = ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
2029 free(Message_Ptr, M_TEMP);
2030 free(SystemTable, M_TEMP);
2032 } /* ASR_setSysTab */
2035 ASR_acquireHrt(Asr_softc_t *sc)
2037 I2O_EXEC_HRT_GET_MESSAGE Message;
2038 I2O_EXEC_HRT_GET_MESSAGE *Message_Ptr;
2041 I2O_HRT_ENTRY Entry[MAX_CHANNEL];
2043 u_int8_t NumberOfEntries;
2044 PI2O_HRT_ENTRY Entry;
2046 bzero(&Hrt, sizeof (Hrt));
2047 Message_Ptr = (I2O_EXEC_HRT_GET_MESSAGE *)ASR_fillMessage(&Message,
2048 sizeof(I2O_EXEC_HRT_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT)
2049 + sizeof(I2O_SGE_SIMPLE_ELEMENT));
2050 I2O_MESSAGE_FRAME_setVersionOffset(&(Message_Ptr->StdMessageFrame),
2052 + (((sizeof(I2O_EXEC_HRT_GET_MESSAGE) - sizeof(I2O_SG_ELEMENT))
2053 / sizeof(U32)) << 4)));
2054 I2O_MESSAGE_FRAME_setFunction (&(Message_Ptr->StdMessageFrame),
2058 * Set up the buffers as scatter gather elements.
2060 SG(&(Message_Ptr->SGL), 0,
2061 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
2063 if (ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) != CAM_REQ_CMP) {
2066 if ((NumberOfEntries = I2O_HRT_getNumberEntries(&Hrt.Header))
2067 > (MAX_CHANNEL + 1)) {
2068 NumberOfEntries = MAX_CHANNEL + 1;
2070 for (Entry = Hrt.Header.HRTEntry;
2071 NumberOfEntries != 0;
2072 ++Entry, --NumberOfEntries) {
2073 PI2O_LCT_ENTRY Device;
2075 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
2076 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT));
2078 if (I2O_LCT_ENTRY_getLocalTID(Device)
2079 == (I2O_HRT_ENTRY_getAdapterID(Entry) & 0xFFF)) {
2080 Device->le_bus = I2O_HRT_ENTRY_getAdapterID(
2082 if ((Device->le_bus > sc->ha_MaxBus)
2083 && (Device->le_bus <= MAX_CHANNEL)) {
2084 sc->ha_MaxBus = Device->le_bus;
2090 } /* ASR_acquireHrt */
2093 * Enable the adapter.
2096 ASR_enableSys(Asr_softc_t *sc)
2098 I2O_EXEC_SYS_ENABLE_MESSAGE Message;
2099 PI2O_EXEC_SYS_ENABLE_MESSAGE Message_Ptr;
2101 Message_Ptr = (PI2O_EXEC_SYS_ENABLE_MESSAGE)ASR_fillMessage(&Message,
2102 sizeof(I2O_EXEC_SYS_ENABLE_MESSAGE));
2103 I2O_MESSAGE_FRAME_setFunction(&(Message_Ptr->StdMessageFrame),
2104 I2O_EXEC_SYS_ENABLE);
2105 return (ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr) != 0);
2106 } /* ASR_enableSys */
2109 * Perform the stages necessary to initialize the adapter
2112 ASR_init(Asr_softc_t *sc)
2114 return ((ASR_initOutBound(sc) == 0)
2115 || (ASR_setSysTab(sc) != CAM_REQ_CMP)
2116 || (ASR_enableSys(sc) != CAM_REQ_CMP));
2120 * Send a Synchronize Cache command to the target device.
2123 ASR_sync(Asr_softc_t *sc, int bus, int target, int lun)
2128 * We will not synchronize the device when there are outstanding
2129 * commands issued by the OS (this is due to a locked up device,
2130 * as the OS normally would flush all outstanding commands before
2131 * issuing a shutdown or an adapter reset).
2134 && (LIST_FIRST(&(sc->ha_ccb)) != NULL)
2135 && ((TID = ASR_getTid (sc, bus, target, lun)) != (tid_t)-1)
2136 && (TID != (tid_t)0)) {
2137 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE Message;
2138 PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE Message_Ptr;
2140 Message_Ptr = (PRIVATE_SCSI_SCB_EXECUTE_MESSAGE *)&Message;
2141 bzero(Message_Ptr, sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2142 - sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT));
2144 I2O_MESSAGE_FRAME_setVersionOffset(
2145 (PI2O_MESSAGE_FRAME)Message_Ptr,
2147 | (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2148 - sizeof(I2O_SG_ELEMENT))
2149 / sizeof(U32)) << 4));
2150 I2O_MESSAGE_FRAME_setMessageSize(
2151 (PI2O_MESSAGE_FRAME)Message_Ptr,
2152 (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2153 - sizeof(I2O_SG_ELEMENT))
2155 I2O_MESSAGE_FRAME_setInitiatorAddress (
2156 (PI2O_MESSAGE_FRAME)Message_Ptr, 1);
2157 I2O_MESSAGE_FRAME_setFunction(
2158 (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_PRIVATE_MESSAGE);
2159 I2O_MESSAGE_FRAME_setTargetAddress(
2160 (PI2O_MESSAGE_FRAME)Message_Ptr, TID);
2161 I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode (
2162 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
2164 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setTID(Message_Ptr, TID);
2165 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2166 I2O_SCB_FLAG_ENABLE_DISCONNECT
2167 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2168 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER);
2169 I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID(
2170 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
2171 DPT_ORGANIZATION_ID);
2172 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(Message_Ptr, 6);
2173 Message_Ptr->CDB[0] = SYNCHRONIZE_CACHE;
2174 Message_Ptr->CDB[1] = (lun << 5);
2176 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2177 (I2O_SCB_FLAG_XFER_FROM_DEVICE
2178 | I2O_SCB_FLAG_ENABLE_DISCONNECT
2179 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2180 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER));
2182 (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
2188 ASR_synchronize(Asr_softc_t *sc)
2190 int bus, target, lun;
2192 for (bus = 0; bus <= sc->ha_MaxBus; ++bus) {
2193 for (target = 0; target <= sc->ha_MaxId; ++target) {
2194 for (lun = 0; lun <= sc->ha_MaxLun; ++lun) {
2195 ASR_sync(sc,bus,target,lun);
2202 * Reset the HBA, targets and BUS.
2203 * Currently this resets *all* the SCSI busses.
2205 static __inline void
2206 asr_hbareset(Asr_softc_t *sc)
2208 ASR_synchronize(sc);
2209 (void)ASR_reset(sc);
2210 } /* asr_hbareset */
2213 * A reduced copy of the real pci_map_mem, incorporating the MAX_MAP
2214 * limit and a reduction in error checking (in the pre 4.0 case).
2217 asr_pci_map_mem(device_t dev, Asr_softc_t *sc)
2223 * I2O specification says we must find first *memory* mapped BAR
2225 for (rid = 0; rid < 4; rid++) {
2226 p = pci_read_config(dev, PCIR_BAR(rid), sizeof(p));
2237 rid = PCIR_BAR(rid);
2238 p = pci_read_config(dev, rid, sizeof(p));
2239 pci_write_config(dev, rid, -1, sizeof(p));
2240 l = 0 - (pci_read_config(dev, rid, sizeof(l)) & ~15);
2241 pci_write_config(dev, rid, p, sizeof(p));
2246 * The 2005S Zero Channel RAID solution is not a perfect PCI
2247 * citizen. It asks for 4MB on BAR0, and 0MB on BAR1, once
2248 * enabled it rewrites the size of BAR0 to 2MB, sets BAR1 to
2249 * BAR0+2MB and sets it's size to 2MB. The IOP registers are
2250 * accessible via BAR0, the messaging registers are accessible
2251 * via BAR1. If the subdevice code is 50 to 59 decimal.
2253 s = pci_read_config(dev, PCIR_DEVVENDOR, sizeof(s));
2254 if (s != 0xA5111044) {
2255 s = pci_read_config(dev, PCIR_SUBVEND_0, sizeof(s));
2256 if ((((ADPTDOMINATOR_SUB_ID_START ^ s) & 0xF000FFFF) == 0)
2257 && (ADPTDOMINATOR_SUB_ID_START <= s)
2258 && (s <= ADPTDOMINATOR_SUB_ID_END)) {
2259 l = MAX_MAP; /* Conjoined BAR Raptor Daptor */
2263 sc->ha_mem_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid,
2264 p, p + l, l, RF_ACTIVE);
2265 if (sc->ha_mem_res == NULL) {
2268 sc->ha_Base = rman_get_start(sc->ha_mem_res);
2269 sc->ha_i2o_bhandle = rman_get_bushandle(sc->ha_mem_res);
2270 sc->ha_i2o_btag = rman_get_bustag(sc->ha_mem_res);
2272 if (s == 0xA5111044) { /* Split BAR Raptor Daptor */
2273 if ((rid += sizeof(u_int32_t)) >= PCIR_BAR(4)) {
2276 p = pci_read_config(dev, rid, sizeof(p));
2277 pci_write_config(dev, rid, -1, sizeof(p));
2278 l = 0 - (pci_read_config(dev, rid, sizeof(l)) & ~15);
2279 pci_write_config(dev, rid, p, sizeof(p));
2284 sc->ha_mes_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid,
2285 p, p + l, l, RF_ACTIVE);
2286 if (sc->ha_mes_res == NULL) {
2289 sc->ha_frame_bhandle = rman_get_bushandle(sc->ha_mes_res);
2290 sc->ha_frame_btag = rman_get_bustag(sc->ha_mes_res);
2292 sc->ha_frame_bhandle = sc->ha_i2o_bhandle;
2293 sc->ha_frame_btag = sc->ha_i2o_btag;
2296 } /* asr_pci_map_mem */
2299 * A simplified copy of the real pci_map_int with additional
2300 * registration requirements.
2303 asr_pci_map_int(device_t dev, Asr_softc_t *sc)
2307 sc->ha_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid,
2308 RF_ACTIVE | RF_SHAREABLE);
2309 if (sc->ha_irq_res == NULL) {
2312 if (bus_setup_intr(dev, sc->ha_irq_res, INTR_TYPE_CAM | INTR_ENTROPY,
2313 NULL, (driver_intr_t *)asr_intr, (void *)sc, &(sc->ha_intr))) {
2316 sc->ha_irq = pci_read_config(dev, PCIR_INTLINE, sizeof(char));
2318 } /* asr_pci_map_int */
2321 asr_status_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
2328 sc = (Asr_softc_t *)arg;
2331 * The status word can be at a 64-bit address, but the existing
2332 * accessor macros simply cannot manipulate 64-bit addresses.
2334 sc->ha_status_phys = (u_int32_t)segs[0].ds_addr +
2335 offsetof(struct Asr_status_mem, status);
2336 sc->ha_rstatus_phys = (u_int32_t)segs[0].ds_addr +
2337 offsetof(struct Asr_status_mem, rstatus);
2341 asr_alloc_dma(Asr_softc_t *sc)
2347 if (bus_dma_tag_create(bus_get_dma_tag(dev), /* PCI parent */
2348 1, 0, /* algnmnt, boundary */
2349 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
2350 BUS_SPACE_MAXADDR, /* highaddr */
2351 NULL, NULL, /* filter, filterarg */
2352 BUS_SPACE_MAXSIZE_32BIT, /* maxsize */
2353 BUS_SPACE_UNRESTRICTED, /* nsegments */
2354 BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */
2356 NULL, NULL, /* lockfunc, lockarg */
2357 &sc->ha_parent_dmat)) {
2358 device_printf(dev, "Cannot allocate parent DMA tag\n");
2362 if (bus_dma_tag_create(sc->ha_parent_dmat, /* parent */
2363 1, 0, /* algnmnt, boundary */
2364 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
2365 BUS_SPACE_MAXADDR, /* highaddr */
2366 NULL, NULL, /* filter, filterarg */
2367 sizeof(sc->ha_statusmem),/* maxsize */
2369 sizeof(sc->ha_statusmem),/* maxsegsize */
2371 NULL, NULL, /* lockfunc, lockarg */
2372 &sc->ha_statusmem_dmat)) {
2373 device_printf(dev, "Cannot allocate status DMA tag\n");
2374 bus_dma_tag_destroy(sc->ha_parent_dmat);
2378 if (bus_dmamem_alloc(sc->ha_statusmem_dmat, (void **)&sc->ha_statusmem,
2379 BUS_DMA_NOWAIT, &sc->ha_statusmem_dmamap)) {
2380 device_printf(dev, "Cannot allocate status memory\n");
2381 bus_dma_tag_destroy(sc->ha_statusmem_dmat);
2382 bus_dma_tag_destroy(sc->ha_parent_dmat);
2385 (void)bus_dmamap_load(sc->ha_statusmem_dmat, sc->ha_statusmem_dmamap,
2386 sc->ha_statusmem, sizeof(sc->ha_statusmem), asr_status_cb, sc, 0);
2392 asr_release_dma(Asr_softc_t *sc)
2395 if (sc->ha_rstatus_phys != 0)
2396 bus_dmamap_unload(sc->ha_statusmem_dmat,
2397 sc->ha_statusmem_dmamap);
2398 if (sc->ha_statusmem != NULL)
2399 bus_dmamem_free(sc->ha_statusmem_dmat, sc->ha_statusmem,
2400 sc->ha_statusmem_dmamap);
2401 if (sc->ha_statusmem_dmat != NULL)
2402 bus_dma_tag_destroy(sc->ha_statusmem_dmat);
2403 if (sc->ha_parent_dmat != NULL)
2404 bus_dma_tag_destroy(sc->ha_parent_dmat);
2408 * Attach the devices, and virtual devices to the driver list.
2411 asr_attach(device_t dev)
2413 PI2O_EXEC_STATUS_GET_REPLY status;
2414 PI2O_LCT_ENTRY Device;
2416 struct scsi_inquiry_data *iq;
2417 int bus, size, unit;
2420 sc = device_get_softc(dev);
2421 unit = device_get_unit(dev);
2424 if (STAILQ_EMPTY(&Asr_softc_list)) {
2426 * Fixup the OS revision as saved in the dptsig for the
2427 * engine (dptioctl.h) to pick up.
2429 bcopy(osrelease, &ASR_sig.dsDescription[16], 5);
2432 * Initialize the software structure
2434 LIST_INIT(&(sc->ha_ccb));
2435 /* Link us into the HA list */
2436 STAILQ_INSERT_TAIL(&Asr_softc_list, sc, ha_next);
2439 * This is the real McCoy!
2441 if (!asr_pci_map_mem(dev, sc)) {
2442 device_printf(dev, "could not map memory\n");
2445 /* Enable if not formerly enabled */
2446 pci_enable_busmaster(dev);
2448 sc->ha_pciBusNum = pci_get_bus(dev);
2449 sc->ha_pciDeviceNum = (pci_get_slot(dev) << 3) | pci_get_function(dev);
2451 if ((error = asr_alloc_dma(sc)) != 0)
2454 /* Check if the device is there? */
2455 if (ASR_resetIOP(sc) == 0) {
2456 device_printf(dev, "Cannot reset adapter\n");
2457 asr_release_dma(sc);
2460 status = &sc->ha_statusmem->status;
2461 if (ASR_getStatus(sc) == NULL) {
2462 device_printf(dev, "could not initialize hardware\n");
2463 asr_release_dma(sc);
2466 sc->ha_SystemTable.OrganizationID = status->OrganizationID;
2467 sc->ha_SystemTable.IOP_ID = status->IOP_ID;
2468 sc->ha_SystemTable.I2oVersion = status->I2oVersion;
2469 sc->ha_SystemTable.IopState = status->IopState;
2470 sc->ha_SystemTable.MessengerType = status->MessengerType;
2471 sc->ha_SystemTable.InboundMessageFrameSize = status->InboundMFrameSize;
2472 sc->ha_SystemTable.MessengerInfo.InboundMessagePortAddressLow =
2473 (U32)(sc->ha_Base + I2O_REG_TOFIFO); /* XXX 64-bit */
2475 if (!asr_pci_map_int(dev, (void *)sc)) {
2476 device_printf(dev, "could not map interrupt\n");
2477 asr_release_dma(sc);
2481 /* Adjust the maximim inbound count */
2482 if (((sc->ha_QueueSize =
2483 I2O_EXEC_STATUS_GET_REPLY_getMaxInboundMFrames(status)) >
2484 MAX_INBOUND) || (sc->ha_QueueSize == 0)) {
2485 sc->ha_QueueSize = MAX_INBOUND;
2488 /* Adjust the maximum outbound count */
2489 if (((sc->ha_Msgs_Count =
2490 I2O_EXEC_STATUS_GET_REPLY_getMaxOutboundMFrames(status)) >
2491 MAX_OUTBOUND) || (sc->ha_Msgs_Count == 0)) {
2492 sc->ha_Msgs_Count = MAX_OUTBOUND;
2494 if (sc->ha_Msgs_Count > sc->ha_QueueSize) {
2495 sc->ha_Msgs_Count = sc->ha_QueueSize;
2498 /* Adjust the maximum SG size to adapter */
2499 if ((size = (I2O_EXEC_STATUS_GET_REPLY_getInboundMFrameSize(status) <<
2500 2)) > MAX_INBOUND_SIZE) {
2501 size = MAX_INBOUND_SIZE;
2503 sc->ha_SgSize = (size - sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2504 + sizeof(I2O_SG_ELEMENT)) / sizeof(I2O_SGE_SIMPLE_ELEMENT);
2507 * Only do a bus/HBA reset on the first time through. On this
2508 * first time through, we do not send a flush to the devices.
2510 if (ASR_init(sc) == 0) {
2512 I2O_PARAM_RESULTS_LIST_HEADER Header;
2513 I2O_PARAM_READ_OPERATION_RESULT Read;
2514 I2O_DPT_EXEC_IOP_BUFFERS_SCALAR Info;
2516 PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR Info;
2517 #define FW_DEBUG_BLED_OFFSET 8
2519 if ((Info = (PI2O_DPT_EXEC_IOP_BUFFERS_SCALAR)
2520 ASR_getParams(sc, 0, I2O_DPT_EXEC_IOP_BUFFERS_GROUP_NO,
2521 &Buffer, sizeof(struct BufferInfo))) != NULL) {
2522 sc->ha_blinkLED = FW_DEBUG_BLED_OFFSET +
2523 I2O_DPT_EXEC_IOP_BUFFERS_SCALAR_getSerialOutputOffset(Info);
2525 if (ASR_acquireLct(sc) == 0) {
2526 (void)ASR_acquireHrt(sc);
2529 device_printf(dev, "failed to initialize\n");
2530 asr_release_dma(sc);
2534 * Add in additional probe responses for more channels. We
2535 * are reusing the variable `target' for a channel loop counter.
2536 * Done here because of we need both the acquireLct and
2539 for (Device = sc->ha_LCT->LCTEntry; Device < (PI2O_LCT_ENTRY)
2540 (((U32 *)sc->ha_LCT)+I2O_LCT_getTableSize(sc->ha_LCT)); ++Device) {
2541 if (Device->le_type == I2O_UNKNOWN) {
2544 if (I2O_LCT_ENTRY_getUserTID(Device) == 0xFFF) {
2545 if (Device->le_target > sc->ha_MaxId) {
2546 sc->ha_MaxId = Device->le_target;
2548 if (Device->le_lun > sc->ha_MaxLun) {
2549 sc->ha_MaxLun = Device->le_lun;
2552 if (((Device->le_type & I2O_PORT) != 0)
2553 && (Device->le_bus <= MAX_CHANNEL)) {
2554 /* Do not increase MaxId for efficiency */
2555 sc->ha_adapter_target[Device->le_bus] =
2561 * Print the HBA model number as inquired from the card.
2564 device_printf(dev, " ");
2566 if ((iq = (struct scsi_inquiry_data *)malloc(
2567 sizeof(struct scsi_inquiry_data), M_TEMP, M_WAITOK | M_ZERO)) !=
2569 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE Message;
2570 PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE Message_Ptr;
2573 Message_Ptr = (PRIVATE_SCSI_SCB_EXECUTE_MESSAGE *)&Message;
2574 bzero(Message_Ptr, sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) -
2575 sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT));
2577 I2O_MESSAGE_FRAME_setVersionOffset(
2578 (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_VERSION_11 |
2579 (((sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE)
2580 - sizeof(I2O_SG_ELEMENT)) / sizeof(U32)) << 4));
2581 I2O_MESSAGE_FRAME_setMessageSize(
2582 (PI2O_MESSAGE_FRAME)Message_Ptr,
2583 (sizeof(PRIVATE_SCSI_SCB_EXECUTE_MESSAGE) -
2584 sizeof(I2O_SG_ELEMENT) + sizeof(I2O_SGE_SIMPLE_ELEMENT)) /
2586 I2O_MESSAGE_FRAME_setInitiatorAddress(
2587 (PI2O_MESSAGE_FRAME)Message_Ptr, 1);
2588 I2O_MESSAGE_FRAME_setFunction(
2589 (PI2O_MESSAGE_FRAME)Message_Ptr, I2O_PRIVATE_MESSAGE);
2590 I2O_PRIVATE_MESSAGE_FRAME_setXFunctionCode(
2591 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr, I2O_SCSI_SCB_EXEC);
2592 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2593 I2O_SCB_FLAG_ENABLE_DISCONNECT
2594 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2595 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER);
2596 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setInterpret(Message_Ptr, 1);
2597 I2O_PRIVATE_MESSAGE_FRAME_setOrganizationID(
2598 (PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr,
2599 DPT_ORGANIZATION_ID);
2600 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setCDBLength(Message_Ptr, 6);
2601 Message_Ptr->CDB[0] = INQUIRY;
2602 Message_Ptr->CDB[4] =
2603 (unsigned char)sizeof(struct scsi_inquiry_data);
2604 if (Message_Ptr->CDB[4] == 0) {
2605 Message_Ptr->CDB[4] = 255;
2608 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setSCBFlags (Message_Ptr,
2609 (I2O_SCB_FLAG_XFER_FROM_DEVICE
2610 | I2O_SCB_FLAG_ENABLE_DISCONNECT
2611 | I2O_SCB_FLAG_SIMPLE_QUEUE_TAG
2612 | I2O_SCB_FLAG_SENSE_DATA_IN_BUFFER));
2614 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_setByteCount(
2615 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr,
2616 sizeof(struct scsi_inquiry_data));
2617 SG(&(Message_Ptr->SGL), 0,
2618 I2O_SGL_FLAGS_LAST_ELEMENT | I2O_SGL_FLAGS_END_OF_BUFFER,
2619 iq, sizeof(struct scsi_inquiry_data));
2620 (void)ASR_queue_c(sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
2622 if (iq->vendor[0] && (iq->vendor[0] != ' ')) {
2624 ASR_prstring (iq->vendor, 8);
2627 if (iq->product[0] && (iq->product[0] != ' ')) {
2629 ASR_prstring (iq->product, 16);
2632 if (iq->revision[0] && (iq->revision[0] != ' ')) {
2633 printf (" FW Rev. ");
2634 ASR_prstring (iq->revision, 4);
2642 printf (" %d channel, %d CCBs, Protocol I2O\n", sc->ha_MaxBus + 1,
2643 (sc->ha_QueueSize > MAX_INBOUND) ? MAX_INBOUND : sc->ha_QueueSize);
2645 for (bus = 0; bus <= sc->ha_MaxBus; ++bus) {
2646 struct cam_devq * devq;
2647 int QueueSize = sc->ha_QueueSize;
2649 if (QueueSize > MAX_INBOUND) {
2650 QueueSize = MAX_INBOUND;
2654 * Create the device queue for our SIM(s).
2656 if ((devq = cam_simq_alloc(QueueSize)) == NULL) {
2661 * Construct our first channel SIM entry
2663 sc->ha_sim[bus] = cam_sim_alloc(asr_action, asr_poll, "asr", sc,
2665 1, QueueSize, devq);
2666 if (sc->ha_sim[bus] == NULL) {
2670 if (xpt_bus_register(sc->ha_sim[bus], dev, bus) != CAM_SUCCESS){
2671 cam_sim_free(sc->ha_sim[bus],
2673 sc->ha_sim[bus] = NULL;
2677 if (xpt_create_path(&(sc->ha_path[bus]), /*periph*/NULL,
2678 cam_sim_path(sc->ha_sim[bus]), CAM_TARGET_WILDCARD,
2679 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
2680 xpt_bus_deregister( cam_sim_path(sc->ha_sim[bus]));
2681 cam_sim_free(sc->ha_sim[bus], /*free_devq*/TRUE);
2682 sc->ha_sim[bus] = NULL;
2688 * Generate the device node information
2690 sc->ha_devt = make_dev(&asr_cdevsw, unit, UID_ROOT, GID_OPERATOR, 0640,
2692 if (sc->ha_devt != NULL)
2693 (void)make_dev_alias(sc->ha_devt, "rdpti%d", unit);
2694 sc->ha_devt->si_drv1 = sc;
2699 asr_poll(struct cam_sim *sim)
2701 asr_intr(cam_sim_softc(sim));
2705 asr_action(struct cam_sim *sim, union ccb *ccb)
2707 struct Asr_softc *sc;
2709 debug_asr_printf("asr_action(%lx,%lx{%x})\n", (u_long)sim, (u_long)ccb,
2710 ccb->ccb_h.func_code);
2712 CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("asr_action\n"));
2714 ccb->ccb_h.spriv_ptr0 = sc = (struct Asr_softc *)cam_sim_softc(sim);
2716 switch ((int)ccb->ccb_h.func_code) {
2718 /* Common cases first */
2719 case XPT_SCSI_IO: /* Execute the requested I/O operation */
2722 char M[MAX_INBOUND_SIZE];
2724 PI2O_MESSAGE_FRAME Message_Ptr;
2726 /* Reject incoming commands while we are resetting the card */
2727 if (sc->ha_in_reset != HA_OPERATIONAL) {
2728 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2729 if (sc->ha_in_reset >= HA_OFF_LINE) {
2730 /* HBA is now off-line */
2731 ccb->ccb_h.status |= CAM_UNREC_HBA_ERROR;
2733 /* HBA currently resetting, try again later. */
2734 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
2736 debug_asr_cmd_printf (" e\n");
2738 debug_asr_cmd_printf (" q\n");
2741 if ((ccb->ccb_h.status & CAM_STATUS_MASK) != CAM_REQ_INPROG) {
2743 "asr%d WARNING: scsi_cmd(%x) already done on b%dt%du%d\n",
2744 cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)),
2745 ccb->csio.cdb_io.cdb_bytes[0],
2747 ccb->ccb_h.target_id,
2748 ccb->ccb_h.target_lun);
2750 debug_asr_cmd_printf("(%d,%d,%d,%d)", cam_sim_unit(sim),
2751 cam_sim_bus(sim), ccb->ccb_h.target_id,
2752 ccb->ccb_h.target_lun);
2753 debug_asr_dump_ccb(ccb);
2755 if ((Message_Ptr = ASR_init_message((union asr_ccb *)ccb,
2756 (PI2O_MESSAGE_FRAME)&Message)) != NULL) {
2757 debug_asr_cmd2_printf ("TID=%x:\n",
2758 PRIVATE_SCSI_SCB_EXECUTE_MESSAGE_getTID(
2759 (PPRIVATE_SCSI_SCB_EXECUTE_MESSAGE)Message_Ptr));
2760 debug_asr_cmd2_dump_message(Message_Ptr);
2761 debug_asr_cmd1_printf (" q");
2763 if (ASR_queue (sc, Message_Ptr) == EMPTY_QUEUE) {
2764 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2765 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
2766 debug_asr_cmd_printf (" E\n");
2769 debug_asr_cmd_printf(" Q\n");
2773 * We will get here if there is no valid TID for the device
2774 * referenced in the scsi command packet.
2776 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
2777 ccb->ccb_h.status |= CAM_SEL_TIMEOUT;
2778 debug_asr_cmd_printf (" B\n");
2783 case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */
2784 /* Rese HBA device ... */
2786 ccb->ccb_h.status = CAM_REQ_CMP;
2790 #if (defined(REPORT_LUNS))
2793 case XPT_ABORT: /* Abort the specified CCB */
2795 ccb->ccb_h.status = CAM_REQ_INVALID;
2799 case XPT_SET_TRAN_SETTINGS:
2801 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
2805 case XPT_GET_TRAN_SETTINGS:
2806 /* Get default/user set transfer settings for the target */
2808 struct ccb_trans_settings *cts = &(ccb->cts);
2809 struct ccb_trans_settings_scsi *scsi =
2810 &cts->proto_specific.scsi;
2811 struct ccb_trans_settings_spi *spi =
2812 &cts->xport_specific.spi;
2814 if (cts->type == CTS_TYPE_USER_SETTINGS) {
2815 cts->protocol = PROTO_SCSI;
2816 cts->protocol_version = SCSI_REV_2;
2817 cts->transport = XPORT_SPI;
2818 cts->transport_version = 2;
2820 scsi->flags = CTS_SCSI_FLAGS_TAG_ENB;
2821 spi->flags = CTS_SPI_FLAGS_DISC_ENB;
2822 spi->bus_width = MSG_EXT_WDTR_BUS_16_BIT;
2823 spi->sync_period = 6; /* 40MHz */
2824 spi->sync_offset = 15;
2825 spi->valid = CTS_SPI_VALID_SYNC_RATE
2826 | CTS_SPI_VALID_SYNC_OFFSET
2827 | CTS_SPI_VALID_BUS_WIDTH
2828 | CTS_SPI_VALID_DISC;
2829 scsi->valid = CTS_SCSI_VALID_TQ;
2831 ccb->ccb_h.status = CAM_REQ_CMP;
2833 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
2839 case XPT_CALC_GEOMETRY:
2841 struct ccb_calc_geometry *ccg;
2843 u_int32_t secs_per_cylinder;
2846 size_mb = ccg->volume_size
2847 / ((1024L * 1024L) / ccg->block_size);
2849 if (size_mb > 4096) {
2851 ccg->secs_per_track = 63;
2852 } else if (size_mb > 2048) {
2854 ccg->secs_per_track = 63;
2855 } else if (size_mb > 1024) {
2857 ccg->secs_per_track = 63;
2860 ccg->secs_per_track = 32;
2862 secs_per_cylinder = ccg->heads * ccg->secs_per_track;
2863 ccg->cylinders = ccg->volume_size / secs_per_cylinder;
2864 ccb->ccb_h.status = CAM_REQ_CMP;
2869 case XPT_RESET_BUS: /* Reset the specified SCSI bus */
2870 ASR_resetBus (sc, cam_sim_bus(sim));
2871 ccb->ccb_h.status = CAM_REQ_CMP;
2875 case XPT_TERM_IO: /* Terminate the I/O process */
2877 ccb->ccb_h.status = CAM_REQ_INVALID;
2881 case XPT_PATH_INQ: /* Path routing inquiry */
2883 struct ccb_pathinq *cpi = &(ccb->cpi);
2885 cpi->version_num = 1; /* XXX??? */
2886 cpi->hba_inquiry = PI_SDTR_ABLE|PI_TAG_ABLE|PI_WIDE_16;
2887 cpi->target_sprt = 0;
2888 /* Not necessary to reset bus, done by HDM initialization */
2889 cpi->hba_misc = PIM_NOBUSRESET;
2890 cpi->hba_eng_cnt = 0;
2891 cpi->max_target = sc->ha_MaxId;
2892 cpi->max_lun = sc->ha_MaxLun;
2893 cpi->initiator_id = sc->ha_adapter_target[cam_sim_bus(sim)];
2894 cpi->bus_id = cam_sim_bus(sim);
2895 cpi->base_transfer_speed = 3300;
2896 strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN);
2897 strncpy(cpi->hba_vid, "Adaptec", HBA_IDLEN);
2898 strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN);
2899 cpi->unit_number = cam_sim_unit(sim);
2900 cpi->ccb_h.status = CAM_REQ_CMP;
2901 cpi->transport = XPORT_SPI;
2902 cpi->transport_version = 2;
2903 cpi->protocol = PROTO_SCSI;
2904 cpi->protocol_version = SCSI_REV_2;
2909 ccb->ccb_h.status = CAM_REQ_INVALID;
2916 * Handle processing of current CCB as pointed to by the Status.
2919 asr_intr(Asr_softc_t *sc)
2923 for(processed = 0; asr_get_status(sc) & Mask_InterruptsDisabled;
2928 PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME Reply;
2930 if (((ReplyOffset = asr_get_FromFIFO(sc)) == EMPTY_QUEUE)
2931 && ((ReplyOffset = asr_get_FromFIFO(sc)) == EMPTY_QUEUE)) {
2934 Reply = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)(ReplyOffset
2935 - sc->ha_Msgs_Phys + (char *)(sc->ha_Msgs));
2937 * We do not need any (optional byteswapping) method access to
2938 * the Initiator context field.
2940 ccb = (union asr_ccb *)(long)
2941 I2O_MESSAGE_FRAME_getInitiatorContext64(
2942 &(Reply->StdReplyFrame.StdMessageFrame));
2943 if (I2O_MESSAGE_FRAME_getMsgFlags(
2944 &(Reply->StdReplyFrame.StdMessageFrame))
2945 & I2O_MESSAGE_FLAGS_FAIL) {
2946 I2O_UTIL_NOP_MESSAGE Message;
2947 PI2O_UTIL_NOP_MESSAGE Message_Ptr;
2950 MessageOffset = (u_long)
2951 I2O_FAILURE_REPLY_MESSAGE_FRAME_getPreservedMFA(
2952 (PI2O_FAILURE_REPLY_MESSAGE_FRAME)Reply);
2954 * Get the Original Message Frame's address, and get
2955 * it's Transaction Context into our space. (Currently
2956 * unused at original authorship, but better to be
2957 * safe than sorry). Straight copy means that we
2958 * need not concern ourselves with the (optional
2959 * byteswapping) method access.
2961 Reply->StdReplyFrame.TransactionContext =
2962 bus_space_read_4(sc->ha_frame_btag,
2963 sc->ha_frame_bhandle, MessageOffset +
2964 offsetof(I2O_SINGLE_REPLY_MESSAGE_FRAME,
2965 TransactionContext));
2967 * For 64 bit machines, we need to reconstruct the
2970 ccb = (union asr_ccb *)(long)
2971 I2O_MESSAGE_FRAME_getInitiatorContext64(
2972 &(Reply->StdReplyFrame.StdMessageFrame));
2974 * Unique error code for command failure.
2976 I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode(
2977 &(Reply->StdReplyFrame), (u_int16_t)-2);
2979 * Modify the message frame to contain a NOP and
2980 * re-issue it to the controller.
2982 Message_Ptr = (PI2O_UTIL_NOP_MESSAGE)ASR_fillMessage(
2983 &Message, sizeof(I2O_UTIL_NOP_MESSAGE));
2984 #if (I2O_UTIL_NOP != 0)
2985 I2O_MESSAGE_FRAME_setFunction (
2986 &(Message_Ptr->StdMessageFrame),
2990 * Copy the packet out to the Original Message
2992 asr_set_frame(sc, Message_Ptr, MessageOffset,
2993 sizeof(I2O_UTIL_NOP_MESSAGE));
2997 asr_set_ToFIFO(sc, MessageOffset);
3001 * Asynchronous command with no return requirements,
3002 * and a generic handler for immunity against odd error
3003 * returns from the adapter.
3007 * Return Reply so that it can be used for the
3010 asr_set_FromFIFO(sc, ReplyOffset);
3014 /* Welease Wadjah! (and stop timeouts) */
3015 ASR_ccbRemove (sc, ccb);
3017 dsc = I2O_SINGLE_REPLY_MESSAGE_FRAME_getDetailedStatusCode(
3018 &(Reply->StdReplyFrame));
3019 ccb->csio.scsi_status = dsc & I2O_SCSI_DEVICE_DSC_MASK;
3020 ccb->ccb_h.status &= ~CAM_STATUS_MASK;
3023 case I2O_SCSI_DSC_SUCCESS:
3024 ccb->ccb_h.status |= CAM_REQ_CMP;
3027 case I2O_SCSI_DSC_CHECK_CONDITION:
3028 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR |
3032 case I2O_SCSI_DSC_BUSY:
3034 case I2O_SCSI_HBA_DSC_ADAPTER_BUSY:
3036 case I2O_SCSI_HBA_DSC_SCSI_BUS_RESET:
3038 case I2O_SCSI_HBA_DSC_BUS_BUSY:
3039 ccb->ccb_h.status |= CAM_SCSI_BUSY;
3042 case I2O_SCSI_HBA_DSC_SELECTION_TIMEOUT:
3043 ccb->ccb_h.status |= CAM_SEL_TIMEOUT;
3046 case I2O_SCSI_HBA_DSC_COMMAND_TIMEOUT:
3048 case I2O_SCSI_HBA_DSC_DEVICE_NOT_PRESENT:
3050 case I2O_SCSI_HBA_DSC_LUN_INVALID:
3052 case I2O_SCSI_HBA_DSC_SCSI_TID_INVALID:
3053 ccb->ccb_h.status |= CAM_CMD_TIMEOUT;
3056 case I2O_SCSI_HBA_DSC_DATA_OVERRUN:
3058 case I2O_SCSI_HBA_DSC_REQUEST_LENGTH_ERROR:
3059 ccb->ccb_h.status |= CAM_DATA_RUN_ERR;
3063 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
3066 if ((ccb->csio.resid = ccb->csio.dxfer_len) != 0) {
3068 I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_getTransferCount(
3072 /* Sense data in reply packet */
3073 if (ccb->ccb_h.status & CAM_AUTOSNS_VALID) {
3074 u_int16_t size = I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_getAutoSenseTransferCount(Reply);
3077 if (size > sizeof(ccb->csio.sense_data)) {
3078 size = sizeof(ccb->csio.sense_data);
3080 if (size > I2O_SCSI_SENSE_DATA_SZ) {
3081 size = I2O_SCSI_SENSE_DATA_SZ;
3083 if ((ccb->csio.sense_len)
3084 && (size > ccb->csio.sense_len)) {
3085 size = ccb->csio.sense_len;
3087 if (size < ccb->csio.sense_len) {
3088 ccb->csio.sense_resid =
3089 ccb->csio.sense_len - size;
3091 ccb->csio.sense_resid = 0;
3093 bzero(&(ccb->csio.sense_data),
3094 sizeof(ccb->csio.sense_data));
3095 bcopy(Reply->SenseData,
3096 &(ccb->csio.sense_data), size);
3101 * Return Reply so that it can be used for the next command
3102 * since we have no more need for it now
3104 asr_set_FromFIFO(sc, ReplyOffset);
3106 if (ccb->ccb_h.path) {
3107 xpt_done ((union ccb *)ccb);
3115 #undef QueueSize /* Grrrr */
3116 #undef SG_Size /* Grrrr */
3119 * Meant to be included at the bottom of asr.c !!!
3123 * Included here as hard coded. Done because other necessary include
3124 * files utilize C++ comment structures which make them a nuisance to
3125 * included here just to pick up these three typedefs.
3127 typedef U32 DPT_TAG_T;
3128 typedef U32 DPT_MSG_T;
3129 typedef U32 DPT_RTN_T;
3131 #undef SCSI_RESET /* Conflicts with "scsi/scsiconf.h" defintion */
3132 #include "dev/asr/osd_unix.h"
3134 #define asr_unit(dev) dev2unit(dev)
3136 static u_int8_t ASR_ctlr_held;
3139 asr_open(struct cdev *dev, int32_t flags, int32_t ifmt, struct thread *td)
3144 if (dev->si_drv1 == NULL) {
3148 if (ASR_ctlr_held) {
3150 } else if ((error = priv_check(td, PRIV_DRIVER)) == 0) {
3158 asr_close(struct cdev *dev, int flags, int ifmt, struct thread *td)
3166 /*-------------------------------------------------------------------------*/
3167 /* Function ASR_queue_i */
3168 /*-------------------------------------------------------------------------*/
3169 /* The Parameters Passed To This Function Are : */
3170 /* Asr_softc_t * : HBA miniport driver's adapter data storage. */
3171 /* PI2O_MESSAGE_FRAME : Msg Structure Pointer For This Command */
3172 /* I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME following the Msg Structure */
3174 /* This Function Will Take The User Request Packet And Convert It To An */
3175 /* I2O MSG And Send It Off To The Adapter. */
3177 /* Return : 0 For OK, Error Code Otherwise */
3178 /*-------------------------------------------------------------------------*/
3180 ASR_queue_i(Asr_softc_t *sc, PI2O_MESSAGE_FRAME Packet)
3182 union asr_ccb * ccb;
3183 PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME Reply;
3184 PI2O_MESSAGE_FRAME Message_Ptr;
3185 PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME Reply_Ptr;
3186 int MessageSizeInBytes;
3187 int ReplySizeInBytes;
3190 /* Scatter Gather buffer list */
3191 struct ioctlSgList_S {
3192 SLIST_ENTRY(ioctlSgList_S) link;
3194 I2O_FLAGS_COUNT FlagsCount;
3195 char KernelSpace[sizeof(long)];
3197 /* Generates a `first' entry */
3198 SLIST_HEAD(ioctlSgListHead_S, ioctlSgList_S) sgList;
3200 if (ASR_getBlinkLedCode(sc)) {
3201 debug_usr_cmd_printf ("Adapter currently in BlinkLed %x\n",
3202 ASR_getBlinkLedCode(sc));
3205 /* Copy in the message into a local allocation */
3206 if ((Message_Ptr = (PI2O_MESSAGE_FRAME)malloc (
3207 sizeof(I2O_MESSAGE_FRAME), M_TEMP, M_WAITOK)) == NULL) {
3208 debug_usr_cmd_printf (
3209 "Failed to acquire I2O_MESSAGE_FRAME memory\n");
3212 if ((error = copyin ((caddr_t)Packet, (caddr_t)Message_Ptr,
3213 sizeof(I2O_MESSAGE_FRAME))) != 0) {
3214 free(Message_Ptr, M_TEMP);
3215 debug_usr_cmd_printf ("Can't copy in packet errno=%d\n", error);
3218 /* Acquire information to determine type of packet */
3219 MessageSizeInBytes = (I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr)<<2);
3220 /* The offset of the reply information within the user packet */
3221 Reply = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)((char *)Packet
3222 + MessageSizeInBytes);
3224 /* Check if the message is a synchronous initialization command */
3225 s = I2O_MESSAGE_FRAME_getFunction(Message_Ptr);
3226 free(Message_Ptr, M_TEMP);
3229 case I2O_EXEC_IOP_RESET:
3232 status = ASR_resetIOP(sc);
3233 ReplySizeInBytes = sizeof(status);
3234 debug_usr_cmd_printf ("resetIOP done\n");
3235 return (copyout ((caddr_t)&status, (caddr_t)Reply,
3239 case I2O_EXEC_STATUS_GET:
3240 { PI2O_EXEC_STATUS_GET_REPLY status;
3242 status = &sc->ha_statusmem->status;
3243 if (ASR_getStatus(sc) == NULL) {
3244 debug_usr_cmd_printf ("getStatus failed\n");
3247 ReplySizeInBytes = sizeof(status);
3248 debug_usr_cmd_printf ("getStatus done\n");
3249 return (copyout ((caddr_t)status, (caddr_t)Reply,
3253 case I2O_EXEC_OUTBOUND_INIT:
3256 status = ASR_initOutBound(sc);
3257 ReplySizeInBytes = sizeof(status);
3258 debug_usr_cmd_printf ("intOutBound done\n");
3259 return (copyout ((caddr_t)&status, (caddr_t)Reply,
3264 /* Determine if the message size is valid */
3265 if ((MessageSizeInBytes < sizeof(I2O_MESSAGE_FRAME))
3266 || (MAX_INBOUND_SIZE < MessageSizeInBytes)) {
3267 debug_usr_cmd_printf ("Packet size %d incorrect\n",
3268 MessageSizeInBytes);
3272 if ((Message_Ptr = (PI2O_MESSAGE_FRAME)malloc (MessageSizeInBytes,
3273 M_TEMP, M_WAITOK)) == NULL) {
3274 debug_usr_cmd_printf ("Failed to acquire frame[%d] memory\n",
3275 MessageSizeInBytes);
3278 if ((error = copyin ((caddr_t)Packet, (caddr_t)Message_Ptr,
3279 MessageSizeInBytes)) != 0) {
3280 free(Message_Ptr, M_TEMP);
3281 debug_usr_cmd_printf ("Can't copy in packet[%d] errno=%d\n",
3282 MessageSizeInBytes, error);
3286 /* Check the size of the reply frame, and start constructing */
3288 if ((Reply_Ptr = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)malloc (
3289 sizeof(I2O_MESSAGE_FRAME), M_TEMP, M_WAITOK)) == NULL) {
3290 free(Message_Ptr, M_TEMP);
3291 debug_usr_cmd_printf (
3292 "Failed to acquire I2O_MESSAGE_FRAME memory\n");
3295 if ((error = copyin ((caddr_t)Reply, (caddr_t)Reply_Ptr,
3296 sizeof(I2O_MESSAGE_FRAME))) != 0) {
3297 free(Reply_Ptr, M_TEMP);
3298 free(Message_Ptr, M_TEMP);
3299 debug_usr_cmd_printf (
3300 "Failed to copy in reply frame, errno=%d\n",
3304 ReplySizeInBytes = (I2O_MESSAGE_FRAME_getMessageSize(
3305 &(Reply_Ptr->StdReplyFrame.StdMessageFrame)) << 2);
3306 free(Reply_Ptr, M_TEMP);
3307 if (ReplySizeInBytes < sizeof(I2O_SINGLE_REPLY_MESSAGE_FRAME)) {
3308 free(Message_Ptr, M_TEMP);
3309 debug_usr_cmd_printf (
3310 "Failed to copy in reply frame[%d], errno=%d\n",
3311 ReplySizeInBytes, error);
3315 if ((Reply_Ptr = (PI2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)malloc (
3316 ((ReplySizeInBytes > sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME))
3317 ? ReplySizeInBytes : sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)),
3318 M_TEMP, M_WAITOK)) == NULL) {
3319 free(Message_Ptr, M_TEMP);
3320 debug_usr_cmd_printf ("Failed to acquire frame[%d] memory\n",
3324 (void)ASR_fillMessage((void *)Reply_Ptr, ReplySizeInBytes);
3325 Reply_Ptr->StdReplyFrame.StdMessageFrame.InitiatorContext
3326 = Message_Ptr->InitiatorContext;
3327 Reply_Ptr->StdReplyFrame.TransactionContext
3328 = ((PI2O_PRIVATE_MESSAGE_FRAME)Message_Ptr)->TransactionContext;
3329 I2O_MESSAGE_FRAME_setMsgFlags(
3330 &(Reply_Ptr->StdReplyFrame.StdMessageFrame),
3331 I2O_MESSAGE_FRAME_getMsgFlags(
3332 &(Reply_Ptr->StdReplyFrame.StdMessageFrame))
3333 | I2O_MESSAGE_FLAGS_REPLY);
3335 /* Check if the message is a special case command */
3336 switch (I2O_MESSAGE_FRAME_getFunction(Message_Ptr)) {
3337 case I2O_EXEC_SYS_TAB_SET: /* Special Case of empty Scatter Gather */
3338 if (MessageSizeInBytes == ((I2O_MESSAGE_FRAME_getVersionOffset(
3339 Message_Ptr) & 0xF0) >> 2)) {
3340 free(Message_Ptr, M_TEMP);
3341 I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode(
3342 &(Reply_Ptr->StdReplyFrame),
3343 (ASR_setSysTab(sc) != CAM_REQ_CMP));
3344 I2O_MESSAGE_FRAME_setMessageSize(
3345 &(Reply_Ptr->StdReplyFrame.StdMessageFrame),
3346 sizeof(I2O_SINGLE_REPLY_MESSAGE_FRAME));
3347 error = copyout ((caddr_t)Reply_Ptr, (caddr_t)Reply,
3349 free(Reply_Ptr, M_TEMP);
3354 /* Deal in the general case */
3355 /* First allocate and optionally copy in each scatter gather element */
3356 SLIST_INIT(&sgList);
3357 if ((I2O_MESSAGE_FRAME_getVersionOffset(Message_Ptr) & 0xF0) != 0) {
3358 PI2O_SGE_SIMPLE_ELEMENT sg;
3361 * since this code is reused in several systems, code
3362 * efficiency is greater by using a shift operation rather
3363 * than a divide by sizeof(u_int32_t).
3365 sg = (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr
3366 + ((I2O_MESSAGE_FRAME_getVersionOffset(Message_Ptr) & 0xF0)
3368 while (sg < (PI2O_SGE_SIMPLE_ELEMENT)(((caddr_t)Message_Ptr)
3369 + MessageSizeInBytes)) {
3373 if ((I2O_FLAGS_COUNT_getFlags(&(sg->FlagsCount))
3374 & I2O_SGL_FLAGS_SIMPLE_ADDRESS_ELEMENT) == 0) {
3378 len = I2O_FLAGS_COUNT_getCount(&(sg->FlagsCount));
3379 debug_usr_cmd_printf ("SG[%d] = %x[%d]\n",
3380 sg - (PI2O_SGE_SIMPLE_ELEMENT)((char *)Message_Ptr
3381 + ((I2O_MESSAGE_FRAME_getVersionOffset(
3382 Message_Ptr) & 0xF0) >> 2)),
3383 I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg), len);
3385 if ((elm = (struct ioctlSgList_S *)malloc (
3386 sizeof(*elm) - sizeof(elm->KernelSpace) + len,
3387 M_TEMP, M_WAITOK)) == NULL) {
3388 debug_usr_cmd_printf (
3389 "Failed to allocate SG[%d]\n", len);
3393 SLIST_INSERT_HEAD(&sgList, elm, link);
3394 elm->FlagsCount = sg->FlagsCount;
3395 elm->UserSpace = (caddr_t)
3396 (I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg));
3397 v = elm->KernelSpace;
3398 /* Copy in outgoing data (DIR bit could be invalid) */
3399 if ((error = copyin (elm->UserSpace, (caddr_t)v, len))
3404 * If the buffer is not contiguous, lets
3405 * break up the scatter/gather entries.
3408 && (sg < (PI2O_SGE_SIMPLE_ELEMENT)
3409 (((caddr_t)Message_Ptr) + MAX_INBOUND_SIZE))) {
3410 int next, base, span;
3413 next = base = KVTOPHYS(v);
3414 I2O_SGE_SIMPLE_ELEMENT_setPhysicalAddress(sg,
3417 /* How far can we go physically contiguously */
3418 while ((len > 0) && (base == next)) {
3421 next = trunc_page(base) + PAGE_SIZE;
3432 /* Construct the Flags */
3433 I2O_FLAGS_COUNT_setCount(&(sg->FlagsCount),
3436 int flags = I2O_FLAGS_COUNT_getFlags(
3437 &(elm->FlagsCount));
3438 /* Any remaining length? */
3441 ~(I2O_SGL_FLAGS_END_OF_BUFFER
3442 | I2O_SGL_FLAGS_LAST_ELEMENT);
3444 I2O_FLAGS_COUNT_setFlags(
3445 &(sg->FlagsCount), flags);
3448 debug_usr_cmd_printf ("sg[%d] = %x[%d]\n",
3449 sg - (PI2O_SGE_SIMPLE_ELEMENT)
3450 ((char *)Message_Ptr
3451 + ((I2O_MESSAGE_FRAME_getVersionOffset(
3452 Message_Ptr) & 0xF0) >> 2)),
3453 I2O_SGE_SIMPLE_ELEMENT_getPhysicalAddress(sg),
3460 * Incrementing requires resizing of the
3461 * packet, and moving up the existing SG
3465 MessageSizeInBytes += sizeof(*sg);
3466 I2O_MESSAGE_FRAME_setMessageSize(Message_Ptr,
3467 I2O_MESSAGE_FRAME_getMessageSize(Message_Ptr)
3468 + (sizeof(*sg) / sizeof(U32)));
3470 PI2O_MESSAGE_FRAME NewMessage_Ptr;
3473 = (PI2O_MESSAGE_FRAME)
3474 malloc (MessageSizeInBytes,
3475 M_TEMP, M_WAITOK)) == NULL) {
3476 debug_usr_cmd_printf (
3477 "Failed to acquire frame[%d] memory\n",
3478 MessageSizeInBytes);
3482 span = ((caddr_t)sg)
3483 - (caddr_t)Message_Ptr;
3484 bcopy(Message_Ptr,NewMessage_Ptr, span);
3485 bcopy((caddr_t)(sg-1),
3486 ((caddr_t)NewMessage_Ptr) + span,
3487 MessageSizeInBytes - span);
3488 free(Message_Ptr, M_TEMP);
3489 sg = (PI2O_SGE_SIMPLE_ELEMENT)
3490 (((caddr_t)NewMessage_Ptr) + span);
3491 Message_Ptr = NewMessage_Ptr;
3495 || ((I2O_FLAGS_COUNT_getFlags(&(sg->FlagsCount))
3496 & I2O_SGL_FLAGS_LAST_ELEMENT) != 0)) {
3502 while ((elm = SLIST_FIRST(&sgList)) != NULL) {
3503 SLIST_REMOVE_HEAD(&sgList, link);
3506 free(Reply_Ptr, M_TEMP);
3507 free(Message_Ptr, M_TEMP);
3512 debug_usr_cmd_printf ("Inbound: ");
3513 debug_usr_cmd_dump_message(Message_Ptr);
3515 /* Send the command */
3516 if ((ccb = asr_alloc_ccb (sc)) == NULL) {
3517 /* Free up in-kernel buffers */
3518 while ((elm = SLIST_FIRST(&sgList)) != NULL) {
3519 SLIST_REMOVE_HEAD(&sgList, link);
3522 free(Reply_Ptr, M_TEMP);
3523 free(Message_Ptr, M_TEMP);
3528 * We do not need any (optional byteswapping) method access to
3529 * the Initiator context field.
3531 I2O_MESSAGE_FRAME_setInitiatorContext64(
3532 (PI2O_MESSAGE_FRAME)Message_Ptr, (long)ccb);
3534 (void)ASR_queue (sc, (PI2O_MESSAGE_FRAME)Message_Ptr);
3536 free(Message_Ptr, M_TEMP);
3539 * Wait for the board to report a finished instruction.
3542 while ((ccb->ccb_h.status & CAM_STATUS_MASK) == CAM_REQ_INPROG) {
3543 if (ASR_getBlinkLedCode(sc)) {
3545 printf ("asr%d: Blink LED 0x%x resetting adapter\n",
3546 cam_sim_unit(xpt_path_sim(ccb->ccb_h.path)),
3547 ASR_getBlinkLedCode(sc));
3548 if (ASR_reset (sc) == ENXIO) {
3549 /* Command Cleanup */
3550 ASR_ccbRemove(sc, ccb);
3553 /* Free up in-kernel buffers */
3554 while ((elm = SLIST_FIRST(&sgList)) != NULL) {
3555 SLIST_REMOVE_HEAD(&sgList, link);
3558 free(Reply_Ptr, M_TEMP);
3562 /* Check every second for BlinkLed */
3563 /* There is no PRICAM, but outwardly PRIBIO is functional */
3564 tsleep(ccb, PRIBIO, "asr", hz);
3568 debug_usr_cmd_printf ("Outbound: ");
3569 debug_usr_cmd_dump_message(Reply_Ptr);
3571 I2O_SINGLE_REPLY_MESSAGE_FRAME_setDetailedStatusCode(
3572 &(Reply_Ptr->StdReplyFrame),
3573 (ccb->ccb_h.status != CAM_REQ_CMP));
3575 if (ReplySizeInBytes >= (sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
3576 - I2O_SCSI_SENSE_DATA_SZ - sizeof(U32))) {
3577 I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_setTransferCount(Reply_Ptr,
3578 ccb->csio.dxfer_len - ccb->csio.resid);
3580 if ((ccb->ccb_h.status & CAM_AUTOSNS_VALID) && (ReplySizeInBytes
3581 > (sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
3582 - I2O_SCSI_SENSE_DATA_SZ))) {
3583 int size = ReplySizeInBytes
3584 - sizeof(I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME)
3585 - I2O_SCSI_SENSE_DATA_SZ;
3587 if (size > sizeof(ccb->csio.sense_data)) {
3588 size = sizeof(ccb->csio.sense_data);
3590 if (size < ccb->csio.sense_len) {
3591 ccb->csio.sense_resid = ccb->csio.sense_len - size;
3593 ccb->csio.sense_resid = 0;
3595 bzero(&(ccb->csio.sense_data), sizeof(ccb->csio.sense_data));
3596 bcopy(&(ccb->csio.sense_data), Reply_Ptr->SenseData, size);
3597 I2O_SCSI_ERROR_REPLY_MESSAGE_FRAME_setAutoSenseTransferCount(
3601 /* Free up in-kernel buffers */
3602 while ((elm = SLIST_FIRST(&sgList)) != NULL) {
3603 /* Copy out as necessary */
3605 /* DIR bit considered `valid', error due to ignorance works */
3606 && ((I2O_FLAGS_COUNT_getFlags(&(elm->FlagsCount))
3607 & I2O_SGL_FLAGS_DIR) == 0)) {
3608 error = copyout((caddr_t)(elm->KernelSpace),
3610 I2O_FLAGS_COUNT_getCount(&(elm->FlagsCount)));
3612 SLIST_REMOVE_HEAD(&sgList, link);
3616 /* Copy reply frame to user space */
3617 error = copyout((caddr_t)Reply_Ptr, (caddr_t)Reply,
3620 free(Reply_Ptr, M_TEMP);
3626 /*----------------------------------------------------------------------*/
3627 /* Function asr_ioctl */
3628 /*----------------------------------------------------------------------*/
3629 /* The parameters passed to this function are : */
3630 /* dev : Device number. */
3631 /* cmd : Ioctl Command */
3632 /* data : User Argument Passed In. */
3633 /* flag : Mode Parameter */
3634 /* proc : Process Parameter */
3636 /* This function is the user interface into this adapter driver */
3638 /* Return : zero if OK, error code if not */
3639 /*----------------------------------------------------------------------*/
3642 asr_ioctl(struct cdev *dev, u_long cmd, caddr_t data, int flag, struct thread *td)
3644 Asr_softc_t *sc = dev->si_drv1;
3646 #ifdef ASR_IOCTL_COMPAT
3648 #endif /* ASR_IOCTL_COMPAT */
3654 #ifdef ASR_IOCTL_COMPAT
3655 #if (dsDescription_size != 50)
3656 case DPT_SIGNATURE + ((50 - dsDescription_size) << 16):
3658 if (cmd & 0xFFFF0000) {
3659 bcopy(&ASR_sig, data, sizeof(dpt_sig_S));
3662 /* Traditional version of the ioctl interface */
3663 case DPT_SIGNATURE & 0x0000FFFF:
3665 return (copyout((caddr_t)(&ASR_sig), *((caddr_t *)data),
3666 sizeof(dpt_sig_S)));
3668 /* Traditional version of the ioctl interface */
3669 case DPT_CTRLINFO & 0x0000FFFF:
3670 case DPT_CTRLINFO: {
3673 u_int16_t drvrHBAnum;
3675 u_int16_t blinkState;
3677 u_int8_t pciDeviceNum;
3679 u_int16_t Interrupt;
3680 u_int32_t reserved1;
3681 u_int32_t reserved2;
3682 u_int32_t reserved3;
3685 bzero(&CtlrInfo, sizeof(CtlrInfo));
3686 CtlrInfo.length = sizeof(CtlrInfo) - sizeof(u_int16_t);
3687 CtlrInfo.drvrHBAnum = asr_unit(dev);
3688 CtlrInfo.baseAddr = sc->ha_Base;
3689 i = ASR_getBlinkLedCode (sc);
3693 CtlrInfo.blinkState = i;
3694 CtlrInfo.pciBusNum = sc->ha_pciBusNum;
3695 CtlrInfo.pciDeviceNum = sc->ha_pciDeviceNum;
3696 #define FLG_OSD_PCI_VALID 0x0001
3697 #define FLG_OSD_DMA 0x0002
3698 #define FLG_OSD_I2O 0x0004
3699 CtlrInfo.hbaFlags = FLG_OSD_PCI_VALID|FLG_OSD_DMA|FLG_OSD_I2O;
3700 CtlrInfo.Interrupt = sc->ha_irq;
3701 #ifdef ASR_IOCTL_COMPAT
3702 if (cmd & 0xffff0000)
3703 bcopy(&CtlrInfo, data, sizeof(CtlrInfo));
3705 #endif /* ASR_IOCTL_COMPAT */
3706 error = copyout(&CtlrInfo, *(caddr_t *)data, sizeof(CtlrInfo));
3709 /* Traditional version of the ioctl interface */
3710 case DPT_SYSINFO & 0x0000FFFF:
3713 #ifdef ASR_IOCTL_COMPAT
3715 /* Kernel Specific ptok `hack' */
3716 #define ptok(a) ((char *)(uintptr_t)(a) + KERNBASE)
3718 bzero(&Info, sizeof(Info));
3720 /* Appears I am the only person in the Kernel doing this */
3728 Info.drive0CMOS = j;
3735 Info.drive1CMOS = j;
3737 Info.numDrives = *((char *)ptok(0x475));
3738 #else /* ASR_IOCTL_COMPAT */
3739 bzero(&Info, sizeof(Info));
3740 #endif /* ASR_IOCTL_COMPAT */
3742 Info.processorFamily = ASR_sig.dsProcessorFamily;
3743 #if defined(__i386__)
3745 case CPU_386SX: case CPU_386:
3746 Info.processorType = PROC_386; break;
3747 case CPU_486SX: case CPU_486:
3748 Info.processorType = PROC_486; break;
3750 Info.processorType = PROC_PENTIUM; break;
3752 Info.processorType = PROC_SEXIUM; break;
3756 Info.osType = OS_BSDI_UNIX;
3757 Info.osMajorVersion = osrelease[0] - '0';
3758 Info.osMinorVersion = osrelease[2] - '0';
3759 /* Info.osRevision = 0; */
3760 /* Info.osSubRevision = 0; */
3761 Info.busType = SI_PCI_BUS;
3762 Info.flags = SI_OSversionValid|SI_BusTypeValid|SI_NO_SmartROM;
3764 #ifdef ASR_IOCTL_COMPAT
3765 Info.flags |= SI_CMOS_Valid | SI_NumDrivesValid;
3766 /* Go Out And Look For I2O SmartROM */
3767 for(j = 0xC8000; j < 0xE0000; j += 2048) {
3771 if (*((unsigned short *)cp) != 0xAA55) {
3774 j += (cp[2] * 512) - 2048;
3775 if ((*((u_long *)(cp + 6))
3776 != ('S' + (' ' * 256) + (' ' * 65536L)))
3777 || (*((u_long *)(cp + 10))
3778 != ('I' + ('2' * 256) + ('0' * 65536L)))) {
3782 for (k = 0; k < 64; ++k) {
3783 if (*((unsigned short *)cp)
3784 == (' ' + ('v' * 256))) {
3789 Info.smartROMMajorVersion
3790 = *((unsigned char *)(cp += 4)) - '0';
3791 Info.smartROMMinorVersion
3792 = *((unsigned char *)(cp += 2));
3793 Info.smartROMRevision
3794 = *((unsigned char *)(++cp));
3795 Info.flags |= SI_SmartROMverValid;
3796 Info.flags &= ~SI_NO_SmartROM;
3800 /* Get The Conventional Memory Size From CMOS */
3806 Info.conventionalMemSize = j;
3808 /* Get The Extended Memory Found At Power On From CMOS */
3814 Info.extendedMemSize = j;
3815 Info.flags |= SI_MemorySizeValid;
3817 /* Copy Out The Info Structure To The User */
3818 if (cmd & 0xFFFF0000)
3819 bcopy(&Info, data, sizeof(Info));
3821 #endif /* ASR_IOCTL_COMPAT */
3822 error = copyout(&Info, *(caddr_t *)data, sizeof(Info));
3825 /* Get The BlinkLED State */
3827 i = ASR_getBlinkLedCode (sc);
3830 #ifdef ASR_IOCTL_COMPAT
3831 if (cmd & 0xffff0000)
3832 bcopy(&i, data, sizeof(i));
3834 #endif /* ASR_IOCTL_COMPAT */
3835 error = copyout(&i, *(caddr_t *)data, sizeof(i));
3838 /* Send an I2O command */
3840 return (ASR_queue_i(sc, *((PI2O_MESSAGE_FRAME *)data)));
3842 /* Reset and re-initialize the adapter */
3844 return (ASR_reset(sc));
3846 /* Rescan the LCT table and resynchronize the information */
3848 return (ASR_rescan(sc));