]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/twa/tw_cl_io.c
MFC r353776 (dim):
[FreeBSD/FreeBSD.git] / sys / dev / twa / tw_cl_io.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (c) 2004-07 Applied Micro Circuits Corporation.
5  * Copyright (c) 2004-05 Vinod Kashyap
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice, this list of conditions and the following disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27  * SUCH DAMAGE.
28  *
29  *      $FreeBSD$
30  */
31
32 /*
33  * AMCC'S 3ware driver for 9000 series storage controllers.
34  *
35  * Author: Vinod Kashyap
36  * Modifications by: Adam Radford
37  * Modifications by: Manjunath Ranganathaiah
38  */
39
40
41 /*
42  * Common Layer I/O functions.
43  */
44
45
46 #include "tw_osl_share.h"
47 #include "tw_cl_share.h"
48 #include "tw_cl_fwif.h"
49 #include "tw_cl_ioctl.h"
50 #include "tw_cl.h"
51 #include "tw_cl_externs.h"
52 #include "tw_osl_ioctl.h"
53
54 #include <cam/cam.h>
55 #include <cam/cam_ccb.h>
56 #include <cam/cam_xpt_sim.h>
57
58
59
60 /*
61  * Function name:       tw_cl_start_io
62  * Description:         Interface to OS Layer for accepting SCSI requests.
63  *
64  * Input:               ctlr_handle     -- controller handle
65  *                      req_pkt         -- OSL built request packet
66  *                      req_handle      -- request handle
67  * Output:              None
68  * Return value:        0       -- success
69  *                      non-zero-- failure
70  */
71 TW_INT32
72 tw_cl_start_io(struct tw_cl_ctlr_handle *ctlr_handle,
73         struct tw_cl_req_packet *req_pkt, struct tw_cl_req_handle *req_handle)
74 {
75         struct tw_cli_ctlr_context              *ctlr;
76         struct tw_cli_req_context               *req;
77         struct tw_cl_command_9k                 *cmd;
78         struct tw_cl_scsi_req_packet            *scsi_req;
79         TW_INT32                                error = TW_CL_ERR_REQ_SUCCESS;
80
81         tw_cli_dbg_printf(10, ctlr_handle, tw_osl_cur_func(), "entered");
82
83         ctlr = (struct tw_cli_ctlr_context *)(ctlr_handle->cl_ctlr_ctxt);
84
85         /*
86          * If working with a firmware version that does not support multiple
87          * luns, and this request is directed at a non-zero lun, error it
88          * back right away.
89          */
90         if ((req_pkt->gen_req_pkt.scsi_req.lun) &&
91                 (ctlr->working_srl < TWA_MULTI_LUN_FW_SRL)) {
92                 req_pkt->status |= (TW_CL_ERR_REQ_INVALID_LUN |
93                         TW_CL_ERR_REQ_SCSI_ERROR);
94                 req_pkt->tw_osl_callback(req_handle);
95                 return(TW_CL_ERR_REQ_SUCCESS);
96         }
97
98         if ((req = tw_cli_get_request(ctlr
99                 )) == TW_CL_NULL) {
100                 tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
101                         "Out of request context packets: returning busy");
102                 return(TW_OSL_EBUSY);
103         }
104
105         req_handle->cl_req_ctxt = req;
106         req->req_handle = req_handle;
107         req->orig_req = req_pkt;
108         req->tw_cli_callback = tw_cli_complete_io;
109
110         req->flags |= TW_CLI_REQ_FLAGS_EXTERNAL;
111         req->flags |= TW_CLI_REQ_FLAGS_9K;
112
113         scsi_req = &(req_pkt->gen_req_pkt.scsi_req);
114
115         /* Build the cmd pkt. */
116         cmd = &(req->cmd_pkt->command.cmd_pkt_9k);
117
118         req->cmd_pkt->cmd_hdr.header_desc.size_header = 128;
119
120         cmd->res__opcode = BUILD_RES__OPCODE(0, TWA_FW_CMD_EXECUTE_SCSI);
121         cmd->unit = (TW_UINT8)(scsi_req->unit);
122         cmd->lun_l4__req_id = TW_CL_SWAP16(
123                 BUILD_LUN_L4__REQ_ID(scsi_req->lun, req->request_id));
124         cmd->status = 0;
125         cmd->sgl_offset = 16; /* offset from end of hdr = max cdb len */
126         tw_osl_memcpy(cmd->cdb, scsi_req->cdb, scsi_req->cdb_len);
127
128         if (req_pkt->flags & TW_CL_REQ_CALLBACK_FOR_SGLIST) {
129                 TW_UINT32       num_sgl_entries;
130
131                 req_pkt->tw_osl_sgl_callback(req_handle, cmd->sg_list,
132                         &num_sgl_entries);
133                 cmd->lun_h4__sgl_entries =
134                         TW_CL_SWAP16(BUILD_LUN_H4__SGL_ENTRIES(scsi_req->lun,
135                                 num_sgl_entries));
136         } else {
137                 cmd->lun_h4__sgl_entries =
138                         TW_CL_SWAP16(BUILD_LUN_H4__SGL_ENTRIES(scsi_req->lun,
139                                 scsi_req->sgl_entries));
140                 tw_cli_fill_sg_list(ctlr, scsi_req->sg_list,
141                         cmd->sg_list, scsi_req->sgl_entries);
142         }
143
144         if (((TW_CL_Q_FIRST_ITEM(&(ctlr->req_q_head[TW_CLI_PENDING_Q]))) != TW_CL_NULL) ||
145                 (ctlr->reset_in_progress)) {
146                 tw_cli_req_q_insert_tail(req, TW_CLI_PENDING_Q);
147                 TW_CLI_WRITE_CONTROL_REGISTER(ctlr_handle,
148                         TWA_CONTROL_UNMASK_COMMAND_INTERRUPT);
149         } else if ((error = tw_cli_submit_cmd(req))) {
150                 tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
151                         "Could not start request. request = %p, error = %d",
152                         req, error);
153                 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
154         }
155         return(error);
156 }
157
158
159
160 /*
161  * Function name:       tw_cli_submit_cmd
162  * Description:         Submits a cmd to firmware.
163  *
164  * Input:               req     -- ptr to CL internal request context
165  * Output:              None
166  * Return value:        0       -- success
167  *                      non-zero-- failure
168  */
169 TW_INT32
170 tw_cli_submit_cmd(struct tw_cli_req_context *req)
171 {
172         struct tw_cli_ctlr_context      *ctlr = req->ctlr;
173         struct tw_cl_ctlr_handle        *ctlr_handle = ctlr->ctlr_handle;
174         TW_UINT32                       status_reg;
175         TW_INT32                        error = 0;
176
177         tw_cli_dbg_printf(10, ctlr_handle, tw_osl_cur_func(), "entered");
178
179         /* Serialize access to the controller cmd queue. */
180         tw_osl_get_lock(ctlr_handle, ctlr->io_lock);
181
182         /* For 9650SE first write low 4 bytes */
183         if ((ctlr->device_id == TW_CL_DEVICE_ID_9K_E) ||
184             (ctlr->device_id == TW_CL_DEVICE_ID_9K_SA))
185                 tw_osl_write_reg(ctlr_handle,
186                                  TWA_COMMAND_QUEUE_OFFSET_LOW,
187                                  (TW_UINT32)(req->cmd_pkt_phys + sizeof(struct tw_cl_command_header)), 4);
188
189         status_reg = TW_CLI_READ_STATUS_REGISTER(ctlr_handle);
190         if (status_reg & TWA_STATUS_COMMAND_QUEUE_FULL) {
191                 struct tw_cl_req_packet *req_pkt =
192                         (struct tw_cl_req_packet *)(req->orig_req);
193
194                 tw_cli_dbg_printf(7, ctlr_handle, tw_osl_cur_func(),
195                         "Cmd queue full");
196
197                 if ((req->flags & TW_CLI_REQ_FLAGS_INTERNAL)
198                         || ((req_pkt) &&
199                         (req_pkt->flags & TW_CL_REQ_RETRY_ON_BUSY))
200                         ) {
201                         if (req->state != TW_CLI_REQ_STATE_PENDING) {
202                                 tw_cli_dbg_printf(2, ctlr_handle,
203                                         tw_osl_cur_func(),
204                                         "pending internal/ioctl request");
205                                 req->state = TW_CLI_REQ_STATE_PENDING;
206                                 tw_cli_req_q_insert_tail(req, TW_CLI_PENDING_Q);
207                                 /* Unmask command interrupt. */
208                                 TW_CLI_WRITE_CONTROL_REGISTER(ctlr_handle,
209                                         TWA_CONTROL_UNMASK_COMMAND_INTERRUPT);
210                         } else
211                                 error = TW_OSL_EBUSY;
212                 } else {
213                         error = TW_OSL_EBUSY;
214                 }
215         } else {
216                 tw_cli_dbg_printf(10, ctlr_handle, tw_osl_cur_func(),
217                         "Submitting command");
218
219                 /* Insert command into busy queue */
220                 req->state = TW_CLI_REQ_STATE_BUSY;
221                 tw_cli_req_q_insert_tail(req, TW_CLI_BUSY_Q);
222
223                 if ((ctlr->device_id == TW_CL_DEVICE_ID_9K_E) ||
224                     (ctlr->device_id == TW_CL_DEVICE_ID_9K_SA)) {
225                         /* Now write the high 4 bytes */
226                         tw_osl_write_reg(ctlr_handle, 
227                                          TWA_COMMAND_QUEUE_OFFSET_HIGH,
228                                          (TW_UINT32)(((TW_UINT64)(req->cmd_pkt_phys + sizeof(struct tw_cl_command_header)))>>32), 4);
229                 } else {
230                         if (ctlr->flags & TW_CL_64BIT_ADDRESSES) {
231                                 /* First write the low 4 bytes, then the high 4. */
232                                 tw_osl_write_reg(ctlr_handle,
233                                                  TWA_COMMAND_QUEUE_OFFSET_LOW,
234                                                  (TW_UINT32)(req->cmd_pkt_phys + sizeof(struct tw_cl_command_header)), 4);
235                                 tw_osl_write_reg(ctlr_handle, 
236                                                  TWA_COMMAND_QUEUE_OFFSET_HIGH,
237                                                  (TW_UINT32)(((TW_UINT64)(req->cmd_pkt_phys + sizeof(struct tw_cl_command_header)))>>32), 4);
238                         } else
239                                 tw_osl_write_reg(ctlr_handle, 
240                                                  TWA_COMMAND_QUEUE_OFFSET,
241                                                  (TW_UINT32)(req->cmd_pkt_phys + sizeof(struct tw_cl_command_header)), 4);
242                 }
243         }
244
245         tw_osl_free_lock(ctlr_handle, ctlr->io_lock);
246
247         return(error);
248 }
249
250
251
252 /*
253  * Function name:       tw_cl_fw_passthru
254  * Description:         Interface to OS Layer for accepting firmware
255  *                      passthru requests.
256  * Input:               ctlr_handle     -- controller handle
257  *                      req_pkt         -- OSL built request packet
258  *                      req_handle      -- request handle
259  * Output:              None
260  * Return value:        0       -- success
261  *                      non-zero-- failure
262  */
263 TW_INT32
264 tw_cl_fw_passthru(struct tw_cl_ctlr_handle *ctlr_handle,
265         struct tw_cl_req_packet *req_pkt, struct tw_cl_req_handle *req_handle)
266 {
267         struct tw_cli_ctlr_context              *ctlr;
268         struct tw_cli_req_context               *req;
269         union tw_cl_command_7k                  *cmd_7k;
270         struct tw_cl_command_9k                 *cmd_9k;
271         struct tw_cl_passthru_req_packet        *pt_req;
272         TW_UINT8                                opcode;
273         TW_UINT8                                sgl_offset;
274         TW_VOID                                 *sgl = TW_CL_NULL;
275         TW_INT32                                error = TW_CL_ERR_REQ_SUCCESS;
276
277         tw_cli_dbg_printf(5, ctlr_handle, tw_osl_cur_func(), "entered");
278
279         ctlr = (struct tw_cli_ctlr_context *)(ctlr_handle->cl_ctlr_ctxt);
280
281         if ((req = tw_cli_get_request(ctlr
282                 )) == TW_CL_NULL) {
283                 tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
284                         "Out of request context packets: returning busy");
285                 return(TW_OSL_EBUSY);
286         }
287
288         req_handle->cl_req_ctxt = req;
289         req->req_handle = req_handle;
290         req->orig_req = req_pkt;
291         req->tw_cli_callback = tw_cli_complete_io;
292
293         req->flags |= TW_CLI_REQ_FLAGS_PASSTHRU;
294
295         pt_req = &(req_pkt->gen_req_pkt.pt_req);
296
297         tw_osl_memcpy(req->cmd_pkt, pt_req->cmd_pkt,
298                 pt_req->cmd_pkt_length);
299         /* Build the cmd pkt. */
300         if ((opcode = GET_OPCODE(((TW_UINT8 *)
301                 (pt_req->cmd_pkt))[sizeof(struct tw_cl_command_header)]))
302                         == TWA_FW_CMD_EXECUTE_SCSI) {
303                 TW_UINT16       lun_l4, lun_h4;
304
305                 tw_cli_dbg_printf(5, ctlr_handle, tw_osl_cur_func(),
306                         "passthru: 9k cmd pkt");
307                 req->flags |= TW_CLI_REQ_FLAGS_9K;
308                 cmd_9k = &(req->cmd_pkt->command.cmd_pkt_9k);
309                 lun_l4 = GET_LUN_L4(cmd_9k->lun_l4__req_id);
310                 lun_h4 = GET_LUN_H4(cmd_9k->lun_h4__sgl_entries);
311                 cmd_9k->lun_l4__req_id = TW_CL_SWAP16(
312                         BUILD_LUN_L4__REQ_ID(lun_l4, req->request_id));
313                 if (pt_req->sgl_entries) {
314                         cmd_9k->lun_h4__sgl_entries =
315                                 TW_CL_SWAP16(BUILD_LUN_H4__SGL_ENTRIES(lun_h4,
316                                         pt_req->sgl_entries));
317                         sgl = (TW_VOID *)(cmd_9k->sg_list);
318                 }
319         } else {
320                 tw_cli_dbg_printf(5, ctlr_handle, tw_osl_cur_func(),
321                         "passthru: 7k cmd pkt");
322                 cmd_7k = &(req->cmd_pkt->command.cmd_pkt_7k);
323                 cmd_7k->generic.request_id =
324                         (TW_UINT8)(TW_CL_SWAP16(req->request_id));
325                 if ((sgl_offset =
326                         GET_SGL_OFF(cmd_7k->generic.sgl_off__opcode))) {
327                         if (ctlr->device_id == TW_CL_DEVICE_ID_9K_SA)
328                                 sgl = (((TW_UINT32 *)cmd_7k) + cmd_7k->generic.size);
329                         else
330                                 sgl = (((TW_UINT32 *)cmd_7k) + sgl_offset);
331                         cmd_7k->generic.size += pt_req->sgl_entries *
332                                 ((ctlr->flags & TW_CL_64BIT_ADDRESSES) ? 3 : 2);
333                 }
334         }
335
336         if (sgl)
337                 tw_cli_fill_sg_list(ctlr, pt_req->sg_list,
338                         sgl, pt_req->sgl_entries);
339
340         if (((TW_CL_Q_FIRST_ITEM(&(ctlr->req_q_head[TW_CLI_PENDING_Q]))) != TW_CL_NULL) ||
341                 (ctlr->reset_in_progress)) {
342                 tw_cli_req_q_insert_tail(req, TW_CLI_PENDING_Q);
343                 TW_CLI_WRITE_CONTROL_REGISTER(ctlr_handle,
344                         TWA_CONTROL_UNMASK_COMMAND_INTERRUPT);
345         } else if ((error = tw_cli_submit_cmd(req))) {
346                 tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
347                         TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
348                         0x1100, 0x1, TW_CL_SEVERITY_ERROR_STRING,
349                         "Failed to start passthru command",
350                         "error = %d", error);
351                 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
352         }
353         return(error);
354 }
355
356
357
358 /*
359  * Function name:       tw_cl_ioctl
360  * Description:         Handler of CL supported ioctl cmds.
361  *
362  * Input:               ctlr    -- ptr to per ctlr structure
363  *                      cmd     -- ioctl cmd
364  *                      buf     -- ptr to buffer in kernel memory, which is
365  *                                 a copy of the input buffer in user-space
366  * Output:              buf     -- ptr to buffer in kernel memory, which will
367  *                                 need to be copied to the output buffer in
368  *                                 user-space
369  * Return value:        0       -- success
370  *                      non-zero-- failure
371  */
372 TW_INT32
373 tw_cl_ioctl(struct tw_cl_ctlr_handle *ctlr_handle, u_long cmd, TW_VOID *buf)
374 {
375         struct tw_cli_ctlr_context      *ctlr =
376                 (struct tw_cli_ctlr_context *)(ctlr_handle->cl_ctlr_ctxt);
377         struct tw_cl_ioctl_packet       *user_buf =
378                 (struct tw_cl_ioctl_packet *)buf;
379         struct tw_cl_event_packet       event_buf;
380         TW_INT32                        event_index;
381         TW_INT32                        start_index;
382         TW_INT32                        error = TW_OSL_ESUCCESS;
383
384         tw_cli_dbg_printf(5, ctlr_handle, tw_osl_cur_func(), "entered");
385
386         /* Serialize access to the AEN queue and the ioctl lock. */
387         tw_osl_get_lock(ctlr_handle, ctlr->gen_lock);
388
389         switch (cmd) {
390         case TW_CL_IOCTL_GET_FIRST_EVENT:
391                 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
392                         "Get First Event");
393
394                 if (ctlr->aen_q_wrapped) {
395                         if (ctlr->aen_q_overflow) {
396                                 /*
397                                  * The aen queue has wrapped, even before some
398                                  * events have been retrieved.  Let the caller
399                                  * know that he missed out on some AEN's.
400                                  */
401                                 user_buf->driver_pkt.status =
402                                         TW_CL_ERROR_AEN_OVERFLOW;
403                                 ctlr->aen_q_overflow = TW_CL_FALSE;
404                         } else
405                                 user_buf->driver_pkt.status = 0;
406                         event_index = ctlr->aen_head;
407                 } else {
408                         if (ctlr->aen_head == ctlr->aen_tail) {
409                                 user_buf->driver_pkt.status =
410                                         TW_CL_ERROR_AEN_NO_EVENTS;
411                                 break;
412                         }
413                         user_buf->driver_pkt.status = 0;
414                         event_index = ctlr->aen_tail;   /* = 0 */
415                 }
416                 tw_osl_memcpy(user_buf->data_buf,
417                         &(ctlr->aen_queue[event_index]),
418                         sizeof(struct tw_cl_event_packet));
419
420                 ctlr->aen_queue[event_index].retrieved = TW_CL_AEN_RETRIEVED;
421
422                 break;
423
424
425         case TW_CL_IOCTL_GET_LAST_EVENT:
426                 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
427                         "Get Last Event");
428
429                 if (ctlr->aen_q_wrapped) {
430                         if (ctlr->aen_q_overflow) {
431                                 /*
432                                  * The aen queue has wrapped, even before some
433                                  * events have been retrieved.  Let the caller
434                                  * know that he missed out on some AEN's.
435                                  */
436                                 user_buf->driver_pkt.status =
437                                         TW_CL_ERROR_AEN_OVERFLOW;
438                                 ctlr->aen_q_overflow = TW_CL_FALSE;
439                         } else
440                                 user_buf->driver_pkt.status = 0;
441                 } else {
442                         if (ctlr->aen_head == ctlr->aen_tail) {
443                                 user_buf->driver_pkt.status =
444                                         TW_CL_ERROR_AEN_NO_EVENTS;
445                                 break;
446                         }
447                         user_buf->driver_pkt.status = 0;
448                 }
449                 event_index = (ctlr->aen_head - 1 + ctlr->max_aens_supported) %
450                         ctlr->max_aens_supported;
451
452                 tw_osl_memcpy(user_buf->data_buf,
453                         &(ctlr->aen_queue[event_index]),
454                         sizeof(struct tw_cl_event_packet));
455
456                 ctlr->aen_queue[event_index].retrieved = TW_CL_AEN_RETRIEVED;
457                 
458                 break;
459
460
461         case TW_CL_IOCTL_GET_NEXT_EVENT:
462                 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
463                         "Get Next Event");
464
465                 user_buf->driver_pkt.status = 0;
466                 if (ctlr->aen_q_wrapped) {
467                         tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
468                                 "Get Next Event: wrapped");
469                         if (ctlr->aen_q_overflow) {
470                                 /*
471                                  * The aen queue has wrapped, even before some
472                                  * events have been retrieved.  Let the caller
473                                  * know that he missed out on some AEN's.
474                                  */
475                                 tw_cli_dbg_printf(2, ctlr_handle,
476                                         tw_osl_cur_func(),
477                                         "Get Next Event: overflow");
478                                 user_buf->driver_pkt.status =
479                                         TW_CL_ERROR_AEN_OVERFLOW;
480                                 ctlr->aen_q_overflow = TW_CL_FALSE;
481                         }
482                         start_index = ctlr->aen_head;
483                 } else {
484                         if (ctlr->aen_head == ctlr->aen_tail) {
485                                 tw_cli_dbg_printf(3, ctlr_handle,
486                                         tw_osl_cur_func(),
487                                         "Get Next Event: empty queue");
488                                 user_buf->driver_pkt.status =
489                                         TW_CL_ERROR_AEN_NO_EVENTS;
490                                 break;
491                         }
492                         start_index = ctlr->aen_tail;   /* = 0 */
493                 }
494                 tw_osl_memcpy(&event_buf, user_buf->data_buf,
495                         sizeof(struct tw_cl_event_packet));
496
497                 event_index = (start_index + event_buf.sequence_id -
498                         ctlr->aen_queue[start_index].sequence_id + 1) %
499                         ctlr->max_aens_supported;
500
501                 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
502                         "Get Next Event: si = %x, ei = %x, ebsi = %x, "
503                         "sisi = %x, eisi = %x",
504                         start_index, event_index, event_buf.sequence_id,
505                         ctlr->aen_queue[start_index].sequence_id,
506                         ctlr->aen_queue[event_index].sequence_id);
507
508                 if (! (ctlr->aen_queue[event_index].sequence_id >
509                         event_buf.sequence_id)) {
510                         /*
511                          * We don't have any event matching the criterion.  So,
512                          * we have to report TW_CL_ERROR_NO_EVENTS.  If we also
513                          * encountered an overflow condition above, we cannot
514                          * report both conditions during this call.  We choose
515                          * to report NO_EVENTS this time, and an overflow the
516                          * next time we are called.
517                          */
518                         if (user_buf->driver_pkt.status ==
519                                 TW_CL_ERROR_AEN_OVERFLOW) {
520                                 /*
521                                  * Make a note so we report the overflow
522                                  * next time.
523                                  */
524                                 ctlr->aen_q_overflow = TW_CL_TRUE;
525                         }
526                         user_buf->driver_pkt.status = TW_CL_ERROR_AEN_NO_EVENTS;
527                         break;
528                 }
529                 /* Copy the event -- even if there has been an overflow. */
530                 tw_osl_memcpy(user_buf->data_buf,
531                         &(ctlr->aen_queue[event_index]),
532                         sizeof(struct tw_cl_event_packet));
533
534                 ctlr->aen_queue[event_index].retrieved = TW_CL_AEN_RETRIEVED;
535
536                 break;
537
538
539         case TW_CL_IOCTL_GET_PREVIOUS_EVENT:
540                 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
541                         "Get Previous Event");
542
543                 user_buf->driver_pkt.status = 0;
544                 if (ctlr->aen_q_wrapped) {
545                         if (ctlr->aen_q_overflow) {
546                                 /*
547                                  * The aen queue has wrapped, even before some
548                                  * events have been retrieved.  Let the caller
549                                  * know that he missed out on some AEN's.
550                                  */
551                                 user_buf->driver_pkt.status =
552                                         TW_CL_ERROR_AEN_OVERFLOW;
553                                 ctlr->aen_q_overflow = TW_CL_FALSE;
554                         }
555                         start_index = ctlr->aen_head;
556                 } else {
557                         if (ctlr->aen_head == ctlr->aen_tail) {
558                                 user_buf->driver_pkt.status =
559                                         TW_CL_ERROR_AEN_NO_EVENTS;
560                                 break;
561                         }
562                         start_index = ctlr->aen_tail;   /* = 0 */
563                 }
564                 tw_osl_memcpy(&event_buf, user_buf->data_buf,
565                         sizeof(struct tw_cl_event_packet));
566
567                 event_index = (start_index + event_buf.sequence_id -
568                         ctlr->aen_queue[start_index].sequence_id - 1) %
569                         ctlr->max_aens_supported;
570
571                 if (! (ctlr->aen_queue[event_index].sequence_id <
572                         event_buf.sequence_id)) {
573                         /*
574                          * We don't have any event matching the criterion.  So,
575                          * we have to report TW_CL_ERROR_NO_EVENTS.  If we also
576                          * encountered an overflow condition above, we cannot
577                          * report both conditions during this call.  We choose
578                          * to report NO_EVENTS this time, and an overflow the
579                          * next time we are called.
580                          */
581                         if (user_buf->driver_pkt.status ==
582                                 TW_CL_ERROR_AEN_OVERFLOW) {
583                                 /*
584                                  * Make a note so we report the overflow
585                                  * next time.
586                                  */
587                                 ctlr->aen_q_overflow = TW_CL_TRUE;
588                         }
589                         user_buf->driver_pkt.status = TW_CL_ERROR_AEN_NO_EVENTS;
590                         break;
591                 }
592                 /* Copy the event -- even if there has been an overflow. */
593                 tw_osl_memcpy(user_buf->data_buf,
594                         &(ctlr->aen_queue[event_index]),
595                         sizeof(struct tw_cl_event_packet));
596
597                 ctlr->aen_queue[event_index].retrieved = TW_CL_AEN_RETRIEVED;
598
599                 break;
600
601
602         case TW_CL_IOCTL_GET_LOCK:
603         {
604                 struct tw_cl_lock_packet        lock_pkt;
605                 TW_TIME                         cur_time;
606
607                 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
608                         "Get ioctl lock");
609
610                 cur_time = tw_osl_get_local_time();
611                 tw_osl_memcpy(&lock_pkt, user_buf->data_buf,
612                         sizeof(struct tw_cl_lock_packet));
613
614                 if ((ctlr->ioctl_lock.lock == TW_CLI_LOCK_FREE) ||
615                         (lock_pkt.force_flag) ||
616                         (cur_time >= ctlr->ioctl_lock.timeout)) {
617                         tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
618                                 "GET_LOCK: Getting lock!");
619                         ctlr->ioctl_lock.lock = TW_CLI_LOCK_HELD;
620                         ctlr->ioctl_lock.timeout =
621                                 cur_time + (lock_pkt.timeout_msec / 1000);
622                         lock_pkt.time_remaining_msec = lock_pkt.timeout_msec;
623                         user_buf->driver_pkt.status = 0;
624                 } else {
625                         tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
626                                 "GET_LOCK: Lock already held!");
627                         lock_pkt.time_remaining_msec = (TW_UINT32)(
628                                 (ctlr->ioctl_lock.timeout - cur_time) * 1000);
629                         user_buf->driver_pkt.status =
630                                 TW_CL_ERROR_IOCTL_LOCK_ALREADY_HELD;
631                 }
632                 tw_osl_memcpy(user_buf->data_buf, &lock_pkt,
633                         sizeof(struct tw_cl_lock_packet));
634                 break;
635         }
636
637
638         case TW_CL_IOCTL_RELEASE_LOCK:
639                 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
640                         "Release ioctl lock");
641
642                 if (ctlr->ioctl_lock.lock == TW_CLI_LOCK_FREE) {
643                         tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
644                                 "twa_ioctl: RELEASE_LOCK: Lock not held!");
645                         user_buf->driver_pkt.status =
646                                 TW_CL_ERROR_IOCTL_LOCK_NOT_HELD;
647                 } else {
648                         tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
649                                 "RELEASE_LOCK: Releasing lock!");
650                         ctlr->ioctl_lock.lock = TW_CLI_LOCK_FREE;
651                         user_buf->driver_pkt.status = 0;
652                 }
653                 break;
654
655
656         case TW_CL_IOCTL_GET_COMPATIBILITY_INFO:
657         {
658                 struct tw_cl_compatibility_packet       comp_pkt;
659
660                 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
661                         "Get compatibility info");
662
663                 tw_osl_memcpy(comp_pkt.driver_version,
664                         TW_OSL_DRIVER_VERSION_STRING,
665                         sizeof(TW_OSL_DRIVER_VERSION_STRING));
666                 comp_pkt.working_srl = ctlr->working_srl;
667                 comp_pkt.working_branch = ctlr->working_branch;
668                 comp_pkt.working_build = ctlr->working_build;
669                 comp_pkt.driver_srl_high = TWA_CURRENT_FW_SRL;
670                 comp_pkt.driver_branch_high =
671                         TWA_CURRENT_FW_BRANCH(ctlr->arch_id);
672                 comp_pkt.driver_build_high =
673                         TWA_CURRENT_FW_BUILD(ctlr->arch_id);
674                 comp_pkt.driver_srl_low = TWA_BASE_FW_SRL;
675                 comp_pkt.driver_branch_low = TWA_BASE_FW_BRANCH;
676                 comp_pkt.driver_build_low = TWA_BASE_FW_BUILD;
677                 comp_pkt.fw_on_ctlr_srl = ctlr->fw_on_ctlr_srl;
678                 comp_pkt.fw_on_ctlr_branch = ctlr->fw_on_ctlr_branch;
679                 comp_pkt.fw_on_ctlr_build = ctlr->fw_on_ctlr_build;
680                 user_buf->driver_pkt.status = 0;
681
682                 /* Copy compatibility information to user space. */
683                 tw_osl_memcpy(user_buf->data_buf, &comp_pkt,
684                         (sizeof(struct tw_cl_compatibility_packet) <
685                         user_buf->driver_pkt.buffer_length) ?
686                         sizeof(struct tw_cl_compatibility_packet) :
687                         user_buf->driver_pkt.buffer_length);
688                 break;
689         }
690
691         default:        
692                 /* Unknown opcode. */
693                 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
694                         "Unknown ioctl cmd 0x%x", cmd);
695                 error = TW_OSL_ENOTTY;
696         }
697
698         tw_osl_free_lock(ctlr_handle, ctlr->gen_lock);
699         return(error);
700 }
701
702
703
704 /*
705  * Function name:       tw_cli_get_param
706  * Description:         Get a firmware parameter.
707  *
708  * Input:               ctlr            -- ptr to per ctlr structure
709  *                      table_id        -- parameter table #
710  *                      param_id        -- index of the parameter in the table
711  *                      param_size      -- size of the parameter in bytes
712  *                      callback        -- ptr to function, if any, to be called
713  *                                      back on completion; TW_CL_NULL if no callback.
714  * Output:              param_data      -- param value
715  * Return value:        0       -- success
716  *                      non-zero-- failure
717  */
718 TW_INT32
719 tw_cli_get_param(struct tw_cli_ctlr_context *ctlr, TW_INT32 table_id,
720         TW_INT32 param_id, TW_VOID *param_data, TW_INT32 param_size,
721         TW_VOID (* callback)(struct tw_cli_req_context *req))
722 {
723         struct tw_cli_req_context       *req;
724         union tw_cl_command_7k          *cmd;
725         struct tw_cl_param_9k           *param = TW_CL_NULL;
726         TW_INT32                        error = TW_OSL_EBUSY;
727
728         tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
729
730         /* Get a request packet. */
731         if ((req = tw_cli_get_request(ctlr
732                 )) == TW_CL_NULL)
733                 goto out;
734
735         /* Make sure this is the only CL internal request at this time. */
736         if (ctlr->internal_req_busy) {
737                 error = TW_OSL_EBUSY;
738                 goto out;
739         }
740         ctlr->internal_req_busy = TW_CL_TRUE;
741         req->data = ctlr->internal_req_data;
742         req->data_phys = ctlr->internal_req_data_phys;
743         req->length = TW_CLI_SECTOR_SIZE;
744         req->flags |= TW_CLI_REQ_FLAGS_INTERNAL;
745
746         /* Initialize memory to read data into. */
747         param = (struct tw_cl_param_9k *)(req->data);
748         tw_osl_memzero(param, sizeof(struct tw_cl_param_9k) - 1 + param_size);
749
750         /* Build the cmd pkt. */
751         cmd = &(req->cmd_pkt->command.cmd_pkt_7k);
752
753         req->cmd_pkt->cmd_hdr.header_desc.size_header = 128;
754
755         cmd->param.sgl_off__opcode =
756                 BUILD_SGL_OFF__OPCODE(2, TWA_FW_CMD_GET_PARAM);
757         cmd->param.request_id = (TW_UINT8)(TW_CL_SWAP16(req->request_id));
758         cmd->param.host_id__unit = BUILD_HOST_ID__UNIT(0, 0);
759         cmd->param.param_count = TW_CL_SWAP16(1);
760
761         if (ctlr->flags & TW_CL_64BIT_ADDRESSES) {
762                 ((struct tw_cl_sg_desc64 *)(cmd->param.sgl))[0].address =
763                         TW_CL_SWAP64(req->data_phys);
764                 ((struct tw_cl_sg_desc64 *)(cmd->param.sgl))[0].length =
765                         TW_CL_SWAP32(req->length);
766                 cmd->param.size = 2 + 3;
767         } else {
768                 ((struct tw_cl_sg_desc32 *)(cmd->param.sgl))[0].address =
769                         TW_CL_SWAP32(req->data_phys);
770                 ((struct tw_cl_sg_desc32 *)(cmd->param.sgl))[0].length =
771                         TW_CL_SWAP32(req->length);
772                 cmd->param.size = 2 + 2;
773         }
774
775         /* Specify which parameter we need. */
776         param->table_id = TW_CL_SWAP16(table_id | TWA_9K_PARAM_DESCRIPTOR);
777         param->parameter_id = (TW_UINT8)(param_id);
778         param->parameter_size_bytes = TW_CL_SWAP16(param_size);
779
780         /* Submit the command. */
781         if (callback == TW_CL_NULL) {
782                 /* There's no call back; wait till the command completes. */
783                 error = tw_cli_submit_and_poll_request(req,
784                                 TW_CLI_REQUEST_TIMEOUT_PERIOD);
785                 if (error)
786                         goto out;
787                 if ((error = cmd->param.status)) {
788 #if       0
789                         tw_cli_create_ctlr_event(ctlr,
790                                 TW_CL_MESSAGE_SOURCE_CONTROLLER_ERROR,
791                                 &(req->cmd_pkt->cmd_hdr));
792 #endif // 0
793                         goto out;
794                 }
795                 tw_osl_memcpy(param_data, param->data, param_size);
796                 ctlr->internal_req_busy = TW_CL_FALSE;
797                 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
798         } else {
799                 /* There's a call back.  Simply submit the command. */
800                 req->tw_cli_callback = callback;
801                 if ((error = tw_cli_submit_cmd(req)))
802                         goto out;
803         }
804         return(0);
805
806 out:
807         tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
808                 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
809                 0x1101, 0x1, TW_CL_SEVERITY_ERROR_STRING,
810                 "get_param failed",
811                 "error = %d", error);
812         if (param)
813                 ctlr->internal_req_busy = TW_CL_FALSE;
814         if (req)
815                 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
816         return(1);
817 }
818
819
820
821 /*
822  * Function name:       tw_cli_set_param
823  * Description:         Set a firmware parameter.
824  *
825  * Input:               ctlr            -- ptr to per ctlr structure
826  *                      table_id        -- parameter table #
827  *                      param_id        -- index of the parameter in the table
828  *                      param_size      -- size of the parameter in bytes
829  *                      callback        -- ptr to function, if any, to be called
830  *                                      back on completion; TW_CL_NULL if no callback.
831  * Output:              None
832  * Return value:        0       -- success
833  *                      non-zero-- failure
834  */
835 TW_INT32
836 tw_cli_set_param(struct tw_cli_ctlr_context *ctlr, TW_INT32 table_id,
837         TW_INT32 param_id, TW_INT32 param_size, TW_VOID *data,
838         TW_VOID (* callback)(struct tw_cli_req_context *req))
839 {
840         struct tw_cli_req_context       *req;
841         union tw_cl_command_7k          *cmd;
842         struct tw_cl_param_9k           *param = TW_CL_NULL;
843         TW_INT32                        error = TW_OSL_EBUSY;
844
845         tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
846
847         /* Get a request packet. */
848         if ((req = tw_cli_get_request(ctlr
849                 )) == TW_CL_NULL)
850                 goto out;
851
852         /* Make sure this is the only CL internal request at this time. */
853         if (ctlr->internal_req_busy) {
854                 error = TW_OSL_EBUSY;
855                 goto out;
856         }
857         ctlr->internal_req_busy = TW_CL_TRUE;
858         req->data = ctlr->internal_req_data;
859         req->data_phys = ctlr->internal_req_data_phys;
860         req->length = TW_CLI_SECTOR_SIZE;
861         req->flags |= TW_CLI_REQ_FLAGS_INTERNAL;
862
863         /* Initialize memory to send data using. */
864         param = (struct tw_cl_param_9k *)(req->data);
865         tw_osl_memzero(param, sizeof(struct tw_cl_param_9k) - 1 + param_size);
866
867         /* Build the cmd pkt. */
868         cmd = &(req->cmd_pkt->command.cmd_pkt_7k);
869
870         req->cmd_pkt->cmd_hdr.header_desc.size_header = 128;
871
872         cmd->param.sgl_off__opcode =
873                 BUILD_SGL_OFF__OPCODE(2, TWA_FW_CMD_SET_PARAM);
874         cmd->param.request_id = (TW_UINT8)(TW_CL_SWAP16(req->request_id));
875         cmd->param.host_id__unit = BUILD_HOST_ID__UNIT(0, 0);
876         cmd->param.param_count = TW_CL_SWAP16(1);
877
878         if (ctlr->flags & TW_CL_64BIT_ADDRESSES) {
879                 ((struct tw_cl_sg_desc64 *)(cmd->param.sgl))[0].address =
880                         TW_CL_SWAP64(req->data_phys);
881                 ((struct tw_cl_sg_desc64 *)(cmd->param.sgl))[0].length =
882                         TW_CL_SWAP32(req->length);
883                 cmd->param.size = 2 + 3;
884         } else {
885                 ((struct tw_cl_sg_desc32 *)(cmd->param.sgl))[0].address =
886                         TW_CL_SWAP32(req->data_phys);
887                 ((struct tw_cl_sg_desc32 *)(cmd->param.sgl))[0].length =
888                         TW_CL_SWAP32(req->length);
889                 cmd->param.size = 2 + 2;
890         }
891
892         /* Specify which parameter we want to set. */
893         param->table_id = TW_CL_SWAP16(table_id | TWA_9K_PARAM_DESCRIPTOR);
894         param->parameter_id = (TW_UINT8)(param_id);
895         param->parameter_size_bytes = TW_CL_SWAP16(param_size);
896         tw_osl_memcpy(param->data, data, param_size);
897
898         /* Submit the command. */
899         if (callback == TW_CL_NULL) {
900                 /* There's no call back; wait till the command completes. */
901                 error = tw_cli_submit_and_poll_request(req,
902                                 TW_CLI_REQUEST_TIMEOUT_PERIOD);
903                 if (error)
904                         goto out;
905                 if ((error = cmd->param.status)) {
906 #if       0
907                         tw_cli_create_ctlr_event(ctlr,
908                                 TW_CL_MESSAGE_SOURCE_CONTROLLER_ERROR,
909                                 &(req->cmd_pkt->cmd_hdr));
910 #endif // 0
911                         goto out;
912                 }
913                 ctlr->internal_req_busy = TW_CL_FALSE;
914                 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
915         } else {
916                 /* There's a call back.  Simply submit the command. */
917                 req->tw_cli_callback = callback;
918                 if ((error = tw_cli_submit_cmd(req)))
919                         goto out;
920         }
921         return(error);
922
923 out:
924         tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
925                 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
926                 0x1102, 0x1, TW_CL_SEVERITY_ERROR_STRING,
927                 "set_param failed",
928                 "error = %d", error);
929         if (param)
930                 ctlr->internal_req_busy = TW_CL_FALSE;
931         if (req)
932                 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
933         return(error);
934 }
935
936
937
938 /*
939  * Function name:       tw_cli_submit_and_poll_request
940  * Description:         Sends down a firmware cmd, and waits for the completion
941  *                      in a tight loop.
942  *
943  * Input:               req     -- ptr to request pkt
944  *                      timeout -- max # of seconds to wait before giving up
945  * Output:              None
946  * Return value:        0       -- success
947  *                      non-zero-- failure
948  */
949 TW_INT32
950 tw_cli_submit_and_poll_request(struct tw_cli_req_context *req,
951         TW_UINT32 timeout)
952 {
953         struct tw_cli_ctlr_context      *ctlr = req->ctlr;
954         TW_TIME                         end_time;
955         TW_INT32                        error;
956
957         tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
958
959         /*
960          * If the cmd queue is full, tw_cli_submit_cmd will queue this
961          * request in the pending queue, since this is an internal request.
962          */
963         if ((error = tw_cli_submit_cmd(req))) {
964                 tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
965                         TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
966                         0x1103, 0x1, TW_CL_SEVERITY_ERROR_STRING,
967                         "Failed to start internal request",
968                         "error = %d", error);
969                 return(error);
970         }
971
972         /*
973          * Poll for the response until the command gets completed, or there's
974          * a timeout.
975          */
976         end_time = tw_osl_get_local_time() + timeout;
977         do {
978                 if ((error = req->error_code))
979                         /*
980                          * This will take care of completion due to a reset,
981                          * or a failure in tw_cli_submit_pending_queue.
982                          * The caller should do the clean-up.
983                          */
984                         return(error);
985
986                 /* See if the command completed. */
987                 tw_cli_process_resp_intr(ctlr);
988
989                 if ((req->state != TW_CLI_REQ_STATE_BUSY) &&
990                         (req->state != TW_CLI_REQ_STATE_PENDING))
991                         return(req->state != TW_CLI_REQ_STATE_COMPLETE);
992         } while (tw_osl_get_local_time() <= end_time);
993
994         /* Time out! */
995         tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
996                 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
997                 0x1104, 0x1, TW_CL_SEVERITY_ERROR_STRING,
998                 "Internal request timed out",
999                 "request = %p", req);
1000
1001         /*
1002          * We will reset the controller only if the request has already been
1003          * submitted, so as to not lose the request packet.  If a busy request
1004          * timed out, the reset will take care of freeing resources.  If a
1005          * pending request timed out, we will free resources for that request,
1006          * right here, thereby avoiding a reset.  So, the caller is expected
1007          * to NOT cleanup when TW_OSL_ETIMEDOUT is returned.
1008          */
1009
1010         /*
1011          * We have to make sure that this timed out request, if it were in the
1012          * pending queue, doesn't get submitted while we are here, from
1013          * tw_cli_submit_pending_queue.  There could be a race in that case.
1014          * Need to revisit.
1015          */
1016         if (req->state == TW_CLI_REQ_STATE_PENDING) {
1017                 tw_cli_dbg_printf(3, ctlr->ctlr_handle, tw_osl_cur_func(),
1018                         "Removing request from pending queue");
1019                 /*
1020                  * Request was never submitted.  Clean up.  Note that we did
1021                  * not do a reset.  So, we have to remove the request ourselves
1022                  * from the pending queue (as against tw_cli_drain_pendinq_queue
1023                  * taking care of it).
1024                  */
1025                 tw_cli_req_q_remove_item(req, TW_CLI_PENDING_Q);
1026                 if ((TW_CL_Q_FIRST_ITEM(&(ctlr->req_q_head[TW_CLI_PENDING_Q]))) == TW_CL_NULL)
1027                         TW_CLI_WRITE_CONTROL_REGISTER(ctlr->ctlr_handle,
1028                                 TWA_CONTROL_MASK_COMMAND_INTERRUPT);
1029                 if (req->data)
1030                         ctlr->internal_req_busy = TW_CL_FALSE;
1031                 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
1032         }
1033
1034         return(TW_OSL_ETIMEDOUT);
1035 }
1036
1037
1038
1039 /*
1040  * Function name:       tw_cl_reset_ctlr
1041  * Description:         Soft resets and then initializes the controller;
1042  *                      drains any incomplete requests.
1043  *
1044  * Input:               ctlr    -- ptr to per ctlr structure
1045  *                      req_handle      -- ptr to request handle
1046  * Output:              None
1047  * Return value:        0       -- success
1048  *                      non-zero-- failure
1049  */
1050 TW_INT32
1051 tw_cl_reset_ctlr(struct tw_cl_ctlr_handle *ctlr_handle)
1052 {
1053         struct tw_cli_ctlr_context      *ctlr =
1054                 (struct tw_cli_ctlr_context *)(ctlr_handle->cl_ctlr_ctxt);
1055         struct twa_softc                *sc = ctlr_handle->osl_ctlr_ctxt;
1056         struct tw_cli_req_context       *req;
1057         TW_INT32                        reset_attempt = 1;
1058         TW_INT32                        error = 0;
1059
1060         tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(), "entered");
1061
1062         ctlr->reset_in_progress = TW_CL_TRUE;
1063         twa_teardown_intr(sc);
1064
1065
1066         /*
1067          * Error back all requests in the complete, busy, and pending queues.
1068          * If any request is already on its way to getting submitted, it's in
1069          * none of these queues and so, will not be completed.  That request
1070          * will continue its course and get submitted to the controller after
1071          * the reset is done (and io_lock is released).
1072          */
1073         tw_cli_drain_complete_queue(ctlr);
1074         tw_cli_drain_busy_queue(ctlr);
1075         tw_cli_drain_pending_queue(ctlr);
1076         ctlr->internal_req_busy = TW_CL_FALSE;
1077         ctlr->get_more_aens     = TW_CL_FALSE;
1078
1079         /* Soft reset the controller. */
1080         while (reset_attempt <= TW_CLI_MAX_RESET_ATTEMPTS) {
1081                 if ((error = tw_cli_soft_reset(ctlr))) {
1082                         tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
1083                                 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
1084                                 0x1105, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1085                                 "Controller reset failed",
1086                                 "error = %d; attempt %d", error, reset_attempt++);
1087                         reset_attempt++;
1088                         continue;
1089                 }
1090
1091                 /* Re-establish logical connection with the controller. */
1092                 if ((error = tw_cli_init_connection(ctlr,
1093                                 (TW_UINT16)(ctlr->max_simult_reqs),
1094                                 0, 0, 0, 0, 0, TW_CL_NULL, TW_CL_NULL, TW_CL_NULL,
1095                                 TW_CL_NULL, TW_CL_NULL))) {
1096                         tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
1097                                 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
1098                                 0x1106, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1099                                 "Can't initialize connection after reset",
1100                                 "error = %d", error);
1101                         reset_attempt++;
1102                         continue;
1103                 }
1104
1105 #ifdef    TW_OSL_DEBUG
1106                 tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
1107                         TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
1108                         0x1107, 0x3, TW_CL_SEVERITY_INFO_STRING,
1109                         "Controller reset done!", " ");
1110 #endif /* TW_OSL_DEBUG */
1111                 break;
1112         } /* End of while */
1113
1114         /* Move commands from the reset queue to the pending queue. */
1115         while ((req = tw_cli_req_q_remove_head(ctlr, TW_CLI_RESET_Q)) != TW_CL_NULL) {
1116                 tw_osl_timeout(req->req_handle);
1117                 tw_cli_req_q_insert_tail(req, TW_CLI_PENDING_Q);
1118         }
1119
1120         twa_setup_intr(sc);
1121         tw_cli_enable_interrupts(ctlr);
1122         if ((TW_CL_Q_FIRST_ITEM(&(ctlr->req_q_head[TW_CLI_PENDING_Q]))) != TW_CL_NULL)
1123                 TW_CLI_WRITE_CONTROL_REGISTER(ctlr_handle,
1124                         TWA_CONTROL_UNMASK_COMMAND_INTERRUPT);
1125         ctlr->reset_in_progress = TW_CL_FALSE;
1126         ctlr->reset_needed = TW_CL_FALSE;
1127
1128         /* Request for a bus re-scan. */
1129         tw_osl_scan_bus(ctlr_handle);
1130
1131         return(error);
1132 }
1133
1134 TW_VOID
1135 tw_cl_set_reset_needed(struct tw_cl_ctlr_handle *ctlr_handle)
1136 {
1137         struct tw_cli_ctlr_context      *ctlr =
1138                 (struct tw_cli_ctlr_context *)(ctlr_handle->cl_ctlr_ctxt);
1139
1140         ctlr->reset_needed = TW_CL_TRUE;
1141 }
1142
1143 TW_INT32
1144 tw_cl_is_reset_needed(struct tw_cl_ctlr_handle *ctlr_handle)
1145 {
1146         struct tw_cli_ctlr_context      *ctlr =
1147                 (struct tw_cli_ctlr_context *)(ctlr_handle->cl_ctlr_ctxt);
1148
1149         return(ctlr->reset_needed);
1150 }
1151
1152 TW_INT32
1153 tw_cl_is_active(struct tw_cl_ctlr_handle *ctlr_handle)
1154 {
1155         struct tw_cli_ctlr_context      *ctlr =
1156                 (struct tw_cli_ctlr_context *)
1157                 (ctlr_handle->cl_ctlr_ctxt);
1158
1159                 return(ctlr->active);
1160 }
1161
1162
1163
1164 /*
1165  * Function name:       tw_cli_soft_reset
1166  * Description:         Does the actual soft reset.
1167  *
1168  * Input:               ctlr    -- ptr to per ctlr structure
1169  * Output:              None
1170  * Return value:        0       -- success
1171  *                      non-zero-- failure
1172  */
1173 TW_INT32
1174 tw_cli_soft_reset(struct tw_cli_ctlr_context *ctlr)
1175 {
1176         struct tw_cl_ctlr_handle        *ctlr_handle = ctlr->ctlr_handle;
1177         int                             found;
1178         int                             loop_count;
1179         TW_UINT32                       error;
1180
1181         tw_cli_dbg_printf(1, ctlr_handle, tw_osl_cur_func(), "entered");
1182
1183         tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
1184                 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
1185                 0x1108, 0x3, TW_CL_SEVERITY_INFO_STRING,
1186                 "Resetting controller...",
1187                 " ");
1188
1189         /* Don't let any new commands get submitted to the controller. */
1190         tw_osl_get_lock(ctlr_handle, ctlr->io_lock);
1191
1192         TW_CLI_SOFT_RESET(ctlr_handle);
1193
1194         if ((ctlr->device_id == TW_CL_DEVICE_ID_9K_X) ||
1195             (ctlr->device_id == TW_CL_DEVICE_ID_9K_E) ||
1196             (ctlr->device_id == TW_CL_DEVICE_ID_9K_SA)) {
1197                 /*
1198                  * There's a hardware bug in the G133 ASIC, which can lead to
1199                  * PCI parity errors and hangs, if the host accesses any
1200                  * registers when the firmware is resetting the hardware, as
1201                  * part of a hard/soft reset.  The window of time when the
1202                  * problem can occur is about 10 ms.  Here, we will handshake
1203                  * with the firmware to find out when the firmware is pulling
1204                  * down the hardware reset pin, and wait for about 500 ms to
1205                  * make sure we don't access any hardware registers (for
1206                  * polling) during that window.
1207                  */
1208                 ctlr->reset_phase1_in_progress = TW_CL_TRUE;
1209                 loop_count = 0;
1210                 do {
1211                         found = (tw_cli_find_response(ctlr, TWA_RESET_PHASE1_NOTIFICATION_RESPONSE) == TW_OSL_ESUCCESS);
1212                         tw_osl_delay(10);
1213                         loop_count++;
1214                         error = 0x7888;
1215                 } while (!found && (loop_count < 6000000)); /* Loop for no more than 60 seconds */
1216
1217                 if (!found) {
1218                         tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
1219                                 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
1220                                 0x1109, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1221                                 "Missed firmware handshake after soft-reset",
1222                                 "error = %d", error);
1223                         tw_osl_free_lock(ctlr_handle, ctlr->io_lock);
1224                         return(error);
1225                 }
1226
1227                 tw_osl_delay(TWA_RESET_PHASE1_WAIT_TIME_MS * 1000);
1228                 ctlr->reset_phase1_in_progress = TW_CL_FALSE;
1229         }
1230
1231         if ((error = tw_cli_poll_status(ctlr,
1232                         TWA_STATUS_MICROCONTROLLER_READY |
1233                         TWA_STATUS_ATTENTION_INTERRUPT,
1234                         TW_CLI_RESET_TIMEOUT_PERIOD))) {
1235                 tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
1236                         TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
1237                         0x1109, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1238                         "Micro-ctlr not ready/No attn intr after reset",
1239                         "error = %d", error);
1240                 tw_osl_free_lock(ctlr_handle, ctlr->io_lock);
1241                 return(error);
1242         }
1243
1244         TW_CLI_WRITE_CONTROL_REGISTER(ctlr_handle,
1245                 TWA_CONTROL_CLEAR_ATTENTION_INTERRUPT);
1246
1247         if ((error = tw_cli_drain_response_queue(ctlr))) {
1248                 tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
1249                         TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
1250                         0x110A, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1251                         "Can't drain response queue after reset",
1252                         "error = %d", error);
1253                 tw_osl_free_lock(ctlr_handle, ctlr->io_lock);
1254                 return(error);
1255         }
1256         
1257         tw_osl_free_lock(ctlr_handle, ctlr->io_lock);
1258
1259         if ((error = tw_cli_drain_aen_queue(ctlr))) {
1260                 tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
1261                         TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
1262                         0x110B, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1263                         "Can't drain AEN queue after reset",
1264                         "error = %d", error);
1265                 return(error);
1266         }
1267         
1268         if ((error = tw_cli_find_aen(ctlr, TWA_AEN_SOFT_RESET))) {
1269                 tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
1270                         TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
1271                         0x110C, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1272                         "Reset not reported by controller",
1273                         "error = %d", error);
1274                 return(error);
1275         }
1276
1277         return(TW_OSL_ESUCCESS);
1278 }
1279
1280
1281
1282 /*
1283  * Function name:       tw_cli_send_scsi_cmd
1284  * Description:         Sends down a scsi cmd to fw.
1285  *
1286  * Input:               req     -- ptr to request pkt
1287  *                      cmd     -- opcode of scsi cmd to send
1288  * Output:              None
1289  * Return value:        0       -- success
1290  *                      non-zero-- failure
1291  */
1292 TW_INT32
1293 tw_cli_send_scsi_cmd(struct tw_cli_req_context *req, TW_INT32 cmd)
1294 {
1295         struct tw_cl_command_packet     *cmdpkt;
1296         struct tw_cl_command_9k         *cmd9k;
1297         struct tw_cli_ctlr_context      *ctlr;
1298         TW_INT32                        error;
1299
1300         ctlr = req->ctlr;
1301         tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
1302
1303         /* Make sure this is the only CL internal request at this time. */
1304         if (ctlr->internal_req_busy)
1305                 return(TW_OSL_EBUSY);
1306         ctlr->internal_req_busy = TW_CL_TRUE;
1307         req->data = ctlr->internal_req_data;
1308         req->data_phys = ctlr->internal_req_data_phys;
1309         tw_osl_memzero(req->data, TW_CLI_SECTOR_SIZE);
1310         req->length = TW_CLI_SECTOR_SIZE;
1311
1312         /* Build the cmd pkt. */
1313         cmdpkt = req->cmd_pkt;
1314
1315         cmdpkt->cmd_hdr.header_desc.size_header = 128;
1316                 
1317         cmd9k = &(cmdpkt->command.cmd_pkt_9k);
1318
1319         cmd9k->res__opcode =
1320                 BUILD_RES__OPCODE(0, TWA_FW_CMD_EXECUTE_SCSI);
1321         cmd9k->unit = 0;
1322         cmd9k->lun_l4__req_id = TW_CL_SWAP16(req->request_id);
1323         cmd9k->status = 0;
1324         cmd9k->sgl_offset = 16; /* offset from end of hdr = max cdb len */
1325         cmd9k->lun_h4__sgl_entries = TW_CL_SWAP16(1);
1326
1327         if (req->ctlr->flags & TW_CL_64BIT_ADDRESSES) {
1328                 ((struct tw_cl_sg_desc64 *)(cmd9k->sg_list))[0].address =
1329                         TW_CL_SWAP64(req->data_phys);
1330                 ((struct tw_cl_sg_desc64 *)(cmd9k->sg_list))[0].length =
1331                         TW_CL_SWAP32(req->length);
1332         } else {
1333                 ((struct tw_cl_sg_desc32 *)(cmd9k->sg_list))[0].address =
1334                         TW_CL_SWAP32(req->data_phys);
1335                 ((struct tw_cl_sg_desc32 *)(cmd9k->sg_list))[0].length =
1336                         TW_CL_SWAP32(req->length);
1337         }
1338
1339         cmd9k->cdb[0] = (TW_UINT8)cmd;
1340         cmd9k->cdb[4] = 128;
1341
1342         if ((error = tw_cli_submit_cmd(req)))
1343                 if (error != TW_OSL_EBUSY) {
1344                         tw_cli_dbg_printf(1, ctlr->ctlr_handle,
1345                                 tw_osl_cur_func(),
1346                                 "Failed to start SCSI command",
1347                                 "request = %p, error = %d", req, error);
1348                         return(TW_OSL_EIO);
1349                 }
1350         return(TW_OSL_ESUCCESS);
1351 }
1352
1353
1354
1355 /*
1356  * Function name:       tw_cli_get_aen
1357  * Description:         Sends down a Request Sense cmd to fw to fetch an AEN.
1358  *
1359  * Input:               ctlr    -- ptr to per ctlr structure
1360  * Output:              None
1361  * Return value:        0       -- success
1362  *                      non-zero-- failure
1363  */
1364 TW_INT32
1365 tw_cli_get_aen(struct tw_cli_ctlr_context *ctlr)
1366 {
1367         struct tw_cli_req_context       *req;
1368         TW_INT32                        error;
1369
1370         tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
1371
1372         if ((req = tw_cli_get_request(ctlr
1373                 )) == TW_CL_NULL)
1374                 return(TW_OSL_EBUSY);
1375
1376         req->flags |= TW_CLI_REQ_FLAGS_INTERNAL;
1377         req->flags |= TW_CLI_REQ_FLAGS_9K;
1378         req->tw_cli_callback = tw_cli_aen_callback;
1379         if ((error = tw_cli_send_scsi_cmd(req, 0x03 /* REQUEST_SENSE */))) {
1380                 tw_cli_dbg_printf(1, ctlr->ctlr_handle, tw_osl_cur_func(),
1381                         "Could not send SCSI command",
1382                         "request = %p, error = %d", req, error);
1383                 if (req->data)
1384                         ctlr->internal_req_busy = TW_CL_FALSE;
1385                 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
1386         }
1387         return(error);
1388 }
1389
1390
1391
1392 /*
1393  * Function name:       tw_cli_fill_sg_list
1394  * Description:         Fills in the scatter/gather list.
1395  *
1396  * Input:               ctlr    -- ptr to per ctlr structure
1397  *                      sgl_src -- ptr to fill the sg list from
1398  *                      sgl_dest-- ptr to sg list
1399  *                      nsegments--# of segments
1400  * Output:              None
1401  * Return value:        None
1402  */
1403 TW_VOID
1404 tw_cli_fill_sg_list(struct tw_cli_ctlr_context *ctlr, TW_VOID *sgl_src,
1405         TW_VOID *sgl_dest, TW_INT32 num_sgl_entries)
1406 {
1407         TW_INT32        i;
1408
1409         tw_cli_dbg_printf(10, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
1410
1411         if (ctlr->flags & TW_CL_64BIT_ADDRESSES) {
1412                 struct tw_cl_sg_desc64 *sgl_s =
1413                         (struct tw_cl_sg_desc64 *)sgl_src;
1414                 struct tw_cl_sg_desc64 *sgl_d =
1415                         (struct tw_cl_sg_desc64 *)sgl_dest;
1416
1417                 tw_cli_dbg_printf(10, ctlr->ctlr_handle, tw_osl_cur_func(),
1418                         "64 bit addresses");
1419                 for (i = 0; i < num_sgl_entries; i++) {
1420                         sgl_d[i].address = TW_CL_SWAP64(sgl_s->address);
1421                         sgl_d[i].length = TW_CL_SWAP32(sgl_s->length);
1422                         sgl_s++;
1423                         if (ctlr->flags & TW_CL_64BIT_SG_LENGTH)
1424                                 sgl_s = (struct tw_cl_sg_desc64 *)
1425                                         (((TW_INT8 *)(sgl_s)) + 4);
1426                 }
1427         } else {
1428                 struct tw_cl_sg_desc32 *sgl_s =
1429                         (struct tw_cl_sg_desc32 *)sgl_src;
1430                 struct tw_cl_sg_desc32 *sgl_d =
1431                         (struct tw_cl_sg_desc32 *)sgl_dest;
1432
1433                 tw_cli_dbg_printf(10, ctlr->ctlr_handle, tw_osl_cur_func(),
1434                         "32 bit addresses");
1435                 for (i = 0; i < num_sgl_entries; i++) {
1436                         sgl_d[i].address = TW_CL_SWAP32(sgl_s[i].address);
1437                         sgl_d[i].length = TW_CL_SWAP32(sgl_s[i].length);
1438                 }
1439         }
1440 }
1441