]> CyberLeo.Net >> Repos - FreeBSD/releng/8.1.git/blob - sys/dev/twa/tw_cl_io.c
Copy stable/8 to releng/8.1 in preparation for 8.1-RC1.
[FreeBSD/releng/8.1.git] / sys / dev / twa / tw_cl_io.c
1 /*
2  * Copyright (c) 2004-07 Applied Micro Circuits Corporation.
3  * Copyright (c) 2004-05 Vinod Kashyap
4  * All rights reserved.
5  *
6  * Redistribution and use in source and binary forms, with or without
7  * modification, are permitted provided that the following conditions
8  * are met:
9  * 1. Redistributions of source code must retain the above copyright
10  *    notice, this list of conditions and the following disclaimer.
11  * 2. Redistributions in binary form must reproduce the above copyright
12  *    notice, this list of conditions and the following disclaimer in the
13  *    documentation and/or other materials provided with the distribution.
14  *
15  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
25  * SUCH DAMAGE.
26  *
27  *      $FreeBSD$
28  */
29
30 /*
31  * AMCC'S 3ware driver for 9000 series storage controllers.
32  *
33  * Author: Vinod Kashyap
34  * Modifications by: Adam Radford
35  * Modifications by: Manjunath Ranganathaiah
36  */
37
38
39 /*
40  * Common Layer I/O functions.
41  */
42
43
44 #include "tw_osl_share.h"
45 #include "tw_cl_share.h"
46 #include "tw_cl_fwif.h"
47 #include "tw_cl_ioctl.h"
48 #include "tw_cl.h"
49 #include "tw_cl_externs.h"
50 #include "tw_osl_ioctl.h"
51
52
53
54 /*
55  * Function name:       tw_cl_start_io
56  * Description:         Interface to OS Layer for accepting SCSI requests.
57  *
58  * Input:               ctlr_handle     -- controller handle
59  *                      req_pkt         -- OSL built request packet
60  *                      req_handle      -- request handle
61  * Output:              None
62  * Return value:        0       -- success
63  *                      non-zero-- failure
64  */
65 TW_INT32
66 tw_cl_start_io(struct tw_cl_ctlr_handle *ctlr_handle,
67         struct tw_cl_req_packet *req_pkt, struct tw_cl_req_handle *req_handle)
68 {
69         struct tw_cli_ctlr_context              *ctlr;
70         struct tw_cli_req_context               *req;
71         struct tw_cl_command_9k                 *cmd;
72         struct tw_cl_scsi_req_packet            *scsi_req;
73         TW_INT32                                error;
74
75         tw_cli_dbg_printf(10, ctlr_handle, tw_osl_cur_func(), "entered");
76
77         ctlr = (struct tw_cli_ctlr_context *)(ctlr_handle->cl_ctlr_ctxt);
78
79         if (ctlr->state & TW_CLI_CTLR_STATE_RESET_IN_PROGRESS) {
80                 tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
81                         "I/O during reset: returning busy. Ctlr state = 0x%x",
82                         ctlr->state);
83                 tw_osl_ctlr_busy(ctlr_handle, req_handle);
84                 return(TW_OSL_EBUSY);
85         }
86
87         /*
88          * If working with a firmware version that does not support multiple
89          * luns, and this request is directed at a non-zero lun, error it
90          * back right away.
91          */
92         if ((req_pkt->gen_req_pkt.scsi_req.lun) &&
93                 (ctlr->working_srl < TWA_MULTI_LUN_FW_SRL)) {
94                 req_pkt->status |= (TW_CL_ERR_REQ_INVALID_LUN |
95                         TW_CL_ERR_REQ_SCSI_ERROR);
96                 req_pkt->tw_osl_callback(req_handle);
97                 return(TW_CL_ERR_REQ_SUCCESS);
98         }
99
100         if ((req = tw_cli_get_request(ctlr
101                 )) == TW_CL_NULL) {
102                 tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
103                         "Out of request context packets: returning busy");
104                 tw_osl_ctlr_busy(ctlr_handle, req_handle);
105                 return(TW_OSL_EBUSY);
106         }
107
108         req_handle->cl_req_ctxt = req;
109         req->req_handle = req_handle;
110         req->orig_req = req_pkt;
111         req->tw_cli_callback = tw_cli_complete_io;
112
113         req->flags |= TW_CLI_REQ_FLAGS_EXTERNAL;
114         req->flags |= TW_CLI_REQ_FLAGS_9K;
115
116         scsi_req = &(req_pkt->gen_req_pkt.scsi_req);
117
118         /* Build the cmd pkt. */
119         cmd = &(req->cmd_pkt->command.cmd_pkt_9k);
120
121         req->cmd_pkt->cmd_hdr.header_desc.size_header = 128;
122
123         cmd->res__opcode = BUILD_RES__OPCODE(0, TWA_FW_CMD_EXECUTE_SCSI);
124         cmd->unit = (TW_UINT8)(scsi_req->unit);
125         cmd->lun_l4__req_id = TW_CL_SWAP16(
126                 BUILD_LUN_L4__REQ_ID(scsi_req->lun, req->request_id));
127         cmd->status = 0;
128         cmd->sgl_offset = 16; /* offset from end of hdr = max cdb len */
129         tw_osl_memcpy(cmd->cdb, scsi_req->cdb, scsi_req->cdb_len);
130
131         if (req_pkt->flags & TW_CL_REQ_CALLBACK_FOR_SGLIST) {
132                 TW_UINT32       num_sgl_entries;
133
134                 req_pkt->tw_osl_sgl_callback(req_handle, cmd->sg_list,
135                         &num_sgl_entries);
136                 cmd->lun_h4__sgl_entries =
137                         TW_CL_SWAP16(BUILD_LUN_H4__SGL_ENTRIES(scsi_req->lun,
138                                 num_sgl_entries));
139         } else {
140                 cmd->lun_h4__sgl_entries =
141                         TW_CL_SWAP16(BUILD_LUN_H4__SGL_ENTRIES(scsi_req->lun,
142                                 scsi_req->sgl_entries));
143                 tw_cli_fill_sg_list(ctlr, scsi_req->sg_list,
144                         cmd->sg_list, scsi_req->sgl_entries);
145         }
146
147         if ((error = tw_cli_submit_cmd(req))) {
148                 tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
149                         "Could not start request. request = %p, error = %d",
150                         req, error);
151                 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
152         }
153         return(error);
154 }
155
156
157
158 /*
159  * Function name:       tw_cli_submit_cmd
160  * Description:         Submits a cmd to firmware.
161  *
162  * Input:               req     -- ptr to CL internal request context
163  * Output:              None
164  * Return value:        0       -- success
165  *                      non-zero-- failure
166  */
167 TW_INT32
168 tw_cli_submit_cmd(struct tw_cli_req_context *req)
169 {
170         struct tw_cli_ctlr_context      *ctlr = req->ctlr;
171         struct tw_cl_ctlr_handle        *ctlr_handle = ctlr->ctlr_handle;
172         TW_UINT32                       status_reg;
173         TW_INT32                        error;
174         TW_UINT8                        notify_osl_of_ctlr_busy = TW_CL_FALSE;
175
176         tw_cli_dbg_printf(10, ctlr_handle, tw_osl_cur_func(), "entered");
177
178         /* Serialize access to the controller cmd queue. */
179         tw_osl_get_lock(ctlr_handle, ctlr->io_lock);
180
181         /* For 9650SE first write low 4 bytes */
182         if ((ctlr->device_id == TW_CL_DEVICE_ID_9K_E) ||
183             (ctlr->device_id == TW_CL_DEVICE_ID_9K_SA))
184                 tw_osl_write_reg(ctlr_handle,
185                                  TWA_COMMAND_QUEUE_OFFSET_LOW,
186                                  (TW_UINT32)(req->cmd_pkt_phys + sizeof(struct tw_cl_command_header)), 4);
187
188         /* Check to see if we can post a command. */
189         status_reg = TW_CLI_READ_STATUS_REGISTER(ctlr_handle);
190         if ((error = tw_cli_check_ctlr_state(ctlr, status_reg)))
191                 goto out;
192
193         if (status_reg & TWA_STATUS_COMMAND_QUEUE_FULL) {
194                 struct tw_cl_req_packet *req_pkt =
195                         (struct tw_cl_req_packet *)(req->orig_req);
196
197                 tw_cli_dbg_printf(7, ctlr_handle, tw_osl_cur_func(),
198                         "Cmd queue full");
199
200                 if ((req->flags & TW_CLI_REQ_FLAGS_INTERNAL)
201                         || ((req_pkt) &&
202                         (req_pkt->flags & TW_CL_REQ_RETRY_ON_BUSY))
203                         ) {
204                         if (req->state != TW_CLI_REQ_STATE_PENDING) {
205                                 tw_cli_dbg_printf(2, ctlr_handle,
206                                         tw_osl_cur_func(),
207                                         "pending internal/ioctl request");
208                                 req->state = TW_CLI_REQ_STATE_PENDING;
209                                 tw_cli_req_q_insert_tail(req, TW_CLI_PENDING_Q);
210                                 error = 0;
211                         } else
212                                 error = TW_OSL_EBUSY;
213                 } else {
214                         notify_osl_of_ctlr_busy = TW_CL_TRUE;
215                         error = TW_OSL_EBUSY;
216                 }
217         } else {
218                 tw_cli_dbg_printf(10, ctlr_handle, tw_osl_cur_func(),
219                         "Submitting command");
220
221                 /* Insert command into busy queue */
222                 req->state = TW_CLI_REQ_STATE_BUSY;
223                 tw_cli_req_q_insert_tail(req, TW_CLI_BUSY_Q);
224
225                 if ((ctlr->device_id == TW_CL_DEVICE_ID_9K_E) ||
226                     (ctlr->device_id == TW_CL_DEVICE_ID_9K_SA)) {
227                         /* Now write the high 4 bytes */
228                         tw_osl_write_reg(ctlr_handle, 
229                                          TWA_COMMAND_QUEUE_OFFSET_HIGH,
230                                          (TW_UINT32)(((TW_UINT64)(req->cmd_pkt_phys + sizeof(struct tw_cl_command_header)))>>32), 4);
231                 } else {
232                         if (ctlr->flags & TW_CL_64BIT_ADDRESSES) {
233                                 /* First write the low 4 bytes, then the high 4. */
234                                 tw_osl_write_reg(ctlr_handle,
235                                                  TWA_COMMAND_QUEUE_OFFSET_LOW,
236                                                  (TW_UINT32)(req->cmd_pkt_phys + sizeof(struct tw_cl_command_header)), 4);
237                                 tw_osl_write_reg(ctlr_handle, 
238                                                  TWA_COMMAND_QUEUE_OFFSET_HIGH,
239                                                  (TW_UINT32)(((TW_UINT64)(req->cmd_pkt_phys + sizeof(struct tw_cl_command_header)))>>32), 4);
240                         } else
241                                 tw_osl_write_reg(ctlr_handle, 
242                                                  TWA_COMMAND_QUEUE_OFFSET,
243                                                  (TW_UINT32)(req->cmd_pkt_phys + sizeof(struct tw_cl_command_header)), 4);
244                 }
245         }
246 out:
247         tw_osl_free_lock(ctlr_handle, ctlr->io_lock);
248
249         if (status_reg & TWA_STATUS_COMMAND_QUEUE_FULL) {
250                 if (notify_osl_of_ctlr_busy)
251                         tw_osl_ctlr_busy(ctlr_handle, req->req_handle);
252
253                 /*
254                  * Synchronize access between writes to command and control
255                  * registers in 64-bit environments, on G66.
256                  */
257                 if (ctlr->state & TW_CLI_CTLR_STATE_G66_WORKAROUND_NEEDED)
258                         tw_osl_get_lock(ctlr_handle, ctlr->io_lock);
259
260                 /* Unmask command interrupt. */
261                 TW_CLI_WRITE_CONTROL_REGISTER(ctlr_handle,
262                         TWA_CONTROL_UNMASK_COMMAND_INTERRUPT);
263
264                 if (ctlr->state & TW_CLI_CTLR_STATE_G66_WORKAROUND_NEEDED)
265                         tw_osl_free_lock(ctlr_handle, ctlr->io_lock);
266         }
267
268         return(error);
269 }
270
271
272
273 /*
274  * Function name:       tw_cl_fw_passthru
275  * Description:         Interface to OS Layer for accepting firmware
276  *                      passthru requests.
277  * Input:               ctlr_handle     -- controller handle
278  *                      req_pkt         -- OSL built request packet
279  *                      req_handle      -- request handle
280  * Output:              None
281  * Return value:        0       -- success
282  *                      non-zero-- failure
283  */
284 TW_INT32
285 tw_cl_fw_passthru(struct tw_cl_ctlr_handle *ctlr_handle,
286         struct tw_cl_req_packet *req_pkt, struct tw_cl_req_handle *req_handle)
287 {
288         struct tw_cli_ctlr_context              *ctlr;
289         struct tw_cli_req_context               *req;
290         union tw_cl_command_7k                  *cmd_7k;
291         struct tw_cl_command_9k                 *cmd_9k;
292         struct tw_cl_passthru_req_packet        *pt_req;
293         TW_UINT8                                opcode;
294         TW_UINT8                                sgl_offset;
295         TW_VOID                                 *sgl = TW_CL_NULL;
296         TW_INT32                                error;
297
298         tw_cli_dbg_printf(5, ctlr_handle, tw_osl_cur_func(), "entered");
299
300         ctlr = (struct tw_cli_ctlr_context *)(ctlr_handle->cl_ctlr_ctxt);
301
302         if (ctlr->state & TW_CLI_CTLR_STATE_RESET_IN_PROGRESS) {
303                 tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
304                         "Passthru request during reset: returning busy. "
305                         "Ctlr state = 0x%x",
306                         ctlr->state);
307                 tw_osl_ctlr_busy(ctlr_handle, req_handle);
308                 return(TW_OSL_EBUSY);
309         }
310
311         if ((req = tw_cli_get_request(ctlr
312                 )) == TW_CL_NULL) {
313                 tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
314                         "Out of request context packets: returning busy");
315                 tw_osl_ctlr_busy(ctlr_handle, req_handle);
316                 return(TW_OSL_EBUSY);
317         }
318
319         req_handle->cl_req_ctxt = req;
320         req->req_handle = req_handle;
321         req->orig_req = req_pkt;
322         req->tw_cli_callback = tw_cli_complete_io;
323
324         req->flags |= (TW_CLI_REQ_FLAGS_EXTERNAL | TW_CLI_REQ_FLAGS_PASSTHRU);
325
326         pt_req = &(req_pkt->gen_req_pkt.pt_req);
327
328         tw_osl_memcpy(req->cmd_pkt, pt_req->cmd_pkt,
329                 pt_req->cmd_pkt_length);
330         /* Build the cmd pkt. */
331         if ((opcode = GET_OPCODE(((TW_UINT8 *)
332                 (pt_req->cmd_pkt))[sizeof(struct tw_cl_command_header)]))
333                         == TWA_FW_CMD_EXECUTE_SCSI) {
334                 TW_UINT16       lun_l4, lun_h4;
335
336                 tw_cli_dbg_printf(5, ctlr_handle, tw_osl_cur_func(),
337                         "passthru: 9k cmd pkt");
338                 req->flags |= TW_CLI_REQ_FLAGS_9K;
339                 cmd_9k = &(req->cmd_pkt->command.cmd_pkt_9k);
340                 lun_l4 = GET_LUN_L4(cmd_9k->lun_l4__req_id);
341                 lun_h4 = GET_LUN_H4(cmd_9k->lun_h4__sgl_entries);
342                 cmd_9k->lun_l4__req_id = TW_CL_SWAP16(
343                         BUILD_LUN_L4__REQ_ID(lun_l4, req->request_id));
344                 if (pt_req->sgl_entries) {
345                         cmd_9k->lun_h4__sgl_entries =
346                                 TW_CL_SWAP16(BUILD_LUN_H4__SGL_ENTRIES(lun_h4,
347                                         pt_req->sgl_entries));
348                         sgl = (TW_VOID *)(cmd_9k->sg_list);
349                 }
350         } else {
351                 tw_cli_dbg_printf(5, ctlr_handle, tw_osl_cur_func(),
352                         "passthru: 7k cmd pkt");
353                 cmd_7k = &(req->cmd_pkt->command.cmd_pkt_7k);
354                 cmd_7k->generic.request_id =
355                         (TW_UINT8)(TW_CL_SWAP16(req->request_id));
356                 if ((sgl_offset =
357                         GET_SGL_OFF(cmd_7k->generic.sgl_off__opcode))) {
358                         if (ctlr->device_id == TW_CL_DEVICE_ID_9K_SA)
359                                 sgl = (((TW_UINT32 *)cmd_7k) + cmd_7k->generic.size);
360                         else
361                                 sgl = (((TW_UINT32 *)cmd_7k) + sgl_offset);
362                         cmd_7k->generic.size += pt_req->sgl_entries *
363                                 ((ctlr->flags & TW_CL_64BIT_ADDRESSES) ? 3 : 2);
364                 }
365         }
366
367         if (sgl)
368                 tw_cli_fill_sg_list(ctlr, pt_req->sg_list,
369                         sgl, pt_req->sgl_entries);
370
371         if ((error = tw_cli_submit_cmd(req))) {
372                 tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
373                         TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
374                         0x1100, 0x1, TW_CL_SEVERITY_ERROR_STRING,
375                         "Failed to start passthru command",
376                         "error = %d", error);
377                 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
378         }
379         return(error);
380 }
381
382
383
384 /*
385  * Function name:       tw_cl_ioctl
386  * Description:         Handler of CL supported ioctl cmds.
387  *
388  * Input:               ctlr    -- ptr to per ctlr structure
389  *                      cmd     -- ioctl cmd
390  *                      buf     -- ptr to buffer in kernel memory, which is
391  *                                 a copy of the input buffer in user-space
392  * Output:              buf     -- ptr to buffer in kernel memory, which will
393  *                                 need to be copied to the output buffer in
394  *                                 user-space
395  * Return value:        0       -- success
396  *                      non-zero-- failure
397  */
398 TW_INT32
399 tw_cl_ioctl(struct tw_cl_ctlr_handle *ctlr_handle, TW_INT32 cmd, TW_VOID *buf)
400 {
401         struct tw_cli_ctlr_context      *ctlr =
402                 (struct tw_cli_ctlr_context *)(ctlr_handle->cl_ctlr_ctxt);
403         struct tw_cl_ioctl_packet       *user_buf =
404                 (struct tw_cl_ioctl_packet *)buf;
405         struct tw_cl_event_packet       event_buf;
406         TW_INT32                        event_index;
407         TW_INT32                        start_index;
408         TW_INT32                        error = TW_OSL_ESUCCESS;
409
410         tw_cli_dbg_printf(5, ctlr_handle, tw_osl_cur_func(), "entered");
411
412         /* Serialize access to the AEN queue and the ioctl lock. */
413         tw_osl_get_lock(ctlr_handle, ctlr->gen_lock);
414
415         switch (cmd) {
416         case TW_CL_IOCTL_GET_FIRST_EVENT:
417                 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
418                         "Get First Event");
419
420                 if (ctlr->aen_q_wrapped) {
421                         if (ctlr->aen_q_overflow) {
422                                 /*
423                                  * The aen queue has wrapped, even before some
424                                  * events have been retrieved.  Let the caller
425                                  * know that he missed out on some AEN's.
426                                  */
427                                 user_buf->driver_pkt.status =
428                                         TW_CL_ERROR_AEN_OVERFLOW;
429                                 ctlr->aen_q_overflow = TW_CL_FALSE;
430                         } else
431                                 user_buf->driver_pkt.status = 0;
432                         event_index = ctlr->aen_head;
433                 } else {
434                         if (ctlr->aen_head == ctlr->aen_tail) {
435                                 user_buf->driver_pkt.status =
436                                         TW_CL_ERROR_AEN_NO_EVENTS;
437                                 break;
438                         }
439                         user_buf->driver_pkt.status = 0;
440                         event_index = ctlr->aen_tail;   /* = 0 */
441                 }
442                 tw_osl_memcpy(user_buf->data_buf,
443                         &(ctlr->aen_queue[event_index]),
444                         sizeof(struct tw_cl_event_packet));
445
446                 ctlr->aen_queue[event_index].retrieved = TW_CL_AEN_RETRIEVED;
447
448                 break;
449
450
451         case TW_CL_IOCTL_GET_LAST_EVENT:
452                 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
453                         "Get Last Event");
454
455                 if (ctlr->aen_q_wrapped) {
456                         if (ctlr->aen_q_overflow) {
457                                 /*
458                                  * The aen queue has wrapped, even before some
459                                  * events have been retrieved.  Let the caller
460                                  * know that he missed out on some AEN's.
461                                  */
462                                 user_buf->driver_pkt.status =
463                                         TW_CL_ERROR_AEN_OVERFLOW;
464                                 ctlr->aen_q_overflow = TW_CL_FALSE;
465                         } else
466                                 user_buf->driver_pkt.status = 0;
467                 } else {
468                         if (ctlr->aen_head == ctlr->aen_tail) {
469                                 user_buf->driver_pkt.status =
470                                         TW_CL_ERROR_AEN_NO_EVENTS;
471                                 break;
472                         }
473                         user_buf->driver_pkt.status = 0;
474                 }
475                 event_index = (ctlr->aen_head - 1 + ctlr->max_aens_supported) %
476                         ctlr->max_aens_supported;
477
478                 tw_osl_memcpy(user_buf->data_buf,
479                         &(ctlr->aen_queue[event_index]),
480                         sizeof(struct tw_cl_event_packet));
481
482                 ctlr->aen_queue[event_index].retrieved = TW_CL_AEN_RETRIEVED;
483                 
484                 break;
485
486
487         case TW_CL_IOCTL_GET_NEXT_EVENT:
488                 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
489                         "Get Next Event");
490
491                 user_buf->driver_pkt.status = 0;
492                 if (ctlr->aen_q_wrapped) {
493                         tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
494                                 "Get Next Event: wrapped");
495                         if (ctlr->aen_q_overflow) {
496                                 /*
497                                  * The aen queue has wrapped, even before some
498                                  * events have been retrieved.  Let the caller
499                                  * know that he missed out on some AEN's.
500                                  */
501                                 tw_cli_dbg_printf(2, ctlr_handle,
502                                         tw_osl_cur_func(),
503                                         "Get Next Event: overflow");
504                                 user_buf->driver_pkt.status =
505                                         TW_CL_ERROR_AEN_OVERFLOW;
506                                 ctlr->aen_q_overflow = TW_CL_FALSE;
507                         }
508                         start_index = ctlr->aen_head;
509                 } else {
510                         if (ctlr->aen_head == ctlr->aen_tail) {
511                                 tw_cli_dbg_printf(3, ctlr_handle,
512                                         tw_osl_cur_func(),
513                                         "Get Next Event: empty queue");
514                                 user_buf->driver_pkt.status =
515                                         TW_CL_ERROR_AEN_NO_EVENTS;
516                                 break;
517                         }
518                         start_index = ctlr->aen_tail;   /* = 0 */
519                 }
520                 tw_osl_memcpy(&event_buf, user_buf->data_buf,
521                         sizeof(struct tw_cl_event_packet));
522
523                 event_index = (start_index + event_buf.sequence_id -
524                         ctlr->aen_queue[start_index].sequence_id + 1) %
525                         ctlr->max_aens_supported;
526
527                 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
528                         "Get Next Event: si = %x, ei = %x, ebsi = %x, "
529                         "sisi = %x, eisi = %x",
530                         start_index, event_index, event_buf.sequence_id,
531                         ctlr->aen_queue[start_index].sequence_id,
532                         ctlr->aen_queue[event_index].sequence_id);
533
534                 if (! (ctlr->aen_queue[event_index].sequence_id >
535                         event_buf.sequence_id)) {
536                         /*
537                          * We don't have any event matching the criterion.  So,
538                          * we have to report TW_CL_ERROR_NO_EVENTS.  If we also
539                          * encountered an overflow condition above, we cannot
540                          * report both conditions during this call.  We choose
541                          * to report NO_EVENTS this time, and an overflow the
542                          * next time we are called.
543                          */
544                         if (user_buf->driver_pkt.status ==
545                                 TW_CL_ERROR_AEN_OVERFLOW) {
546                                 /*
547                                  * Make a note so we report the overflow
548                                  * next time.
549                                  */
550                                 ctlr->aen_q_overflow = TW_CL_TRUE;
551                         }
552                         user_buf->driver_pkt.status = TW_CL_ERROR_AEN_NO_EVENTS;
553                         break;
554                 }
555                 /* Copy the event -- even if there has been an overflow. */
556                 tw_osl_memcpy(user_buf->data_buf,
557                         &(ctlr->aen_queue[event_index]),
558                         sizeof(struct tw_cl_event_packet));
559
560                 ctlr->aen_queue[event_index].retrieved = TW_CL_AEN_RETRIEVED;
561
562                 break;
563
564
565         case TW_CL_IOCTL_GET_PREVIOUS_EVENT:
566                 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
567                         "Get Previous Event");
568
569                 user_buf->driver_pkt.status = 0;
570                 if (ctlr->aen_q_wrapped) {
571                         if (ctlr->aen_q_overflow) {
572                                 /*
573                                  * The aen queue has wrapped, even before some
574                                  * events have been retrieved.  Let the caller
575                                  * know that he missed out on some AEN's.
576                                  */
577                                 user_buf->driver_pkt.status =
578                                         TW_CL_ERROR_AEN_OVERFLOW;
579                                 ctlr->aen_q_overflow = TW_CL_FALSE;
580                         }
581                         start_index = ctlr->aen_head;
582                 } else {
583                         if (ctlr->aen_head == ctlr->aen_tail) {
584                                 user_buf->driver_pkt.status =
585                                         TW_CL_ERROR_AEN_NO_EVENTS;
586                                 break;
587                         }
588                         start_index = ctlr->aen_tail;   /* = 0 */
589                 }
590                 tw_osl_memcpy(&event_buf, user_buf->data_buf,
591                         sizeof(struct tw_cl_event_packet));
592
593                 event_index = (start_index + event_buf.sequence_id -
594                         ctlr->aen_queue[start_index].sequence_id - 1) %
595                         ctlr->max_aens_supported;
596
597                 if (! (ctlr->aen_queue[event_index].sequence_id <
598                         event_buf.sequence_id)) {
599                         /*
600                          * We don't have any event matching the criterion.  So,
601                          * we have to report TW_CL_ERROR_NO_EVENTS.  If we also
602                          * encountered an overflow condition above, we cannot
603                          * report both conditions during this call.  We choose
604                          * to report NO_EVENTS this time, and an overflow the
605                          * next time we are called.
606                          */
607                         if (user_buf->driver_pkt.status ==
608                                 TW_CL_ERROR_AEN_OVERFLOW) {
609                                 /*
610                                  * Make a note so we report the overflow
611                                  * next time.
612                                  */
613                                 ctlr->aen_q_overflow = TW_CL_TRUE;
614                         }
615                         user_buf->driver_pkt.status = TW_CL_ERROR_AEN_NO_EVENTS;
616                         break;
617                 }
618                 /* Copy the event -- even if there has been an overflow. */
619                 tw_osl_memcpy(user_buf->data_buf,
620                         &(ctlr->aen_queue[event_index]),
621                         sizeof(struct tw_cl_event_packet));
622
623                 ctlr->aen_queue[event_index].retrieved = TW_CL_AEN_RETRIEVED;
624
625                 break;
626
627
628         case TW_CL_IOCTL_GET_LOCK:
629         {
630                 struct tw_cl_lock_packet        lock_pkt;
631                 TW_TIME                         cur_time;
632
633                 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
634                         "Get ioctl lock");
635
636                 cur_time = tw_osl_get_local_time();
637                 tw_osl_memcpy(&lock_pkt, user_buf->data_buf,
638                         sizeof(struct tw_cl_lock_packet));
639
640                 if ((ctlr->ioctl_lock.lock == TW_CLI_LOCK_FREE) ||
641                         (lock_pkt.force_flag) ||
642                         (cur_time >= ctlr->ioctl_lock.timeout)) {
643                         tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
644                                 "GET_LOCK: Getting lock!");
645                         ctlr->ioctl_lock.lock = TW_CLI_LOCK_HELD;
646                         ctlr->ioctl_lock.timeout =
647                                 cur_time + (lock_pkt.timeout_msec / 1000);
648                         lock_pkt.time_remaining_msec = lock_pkt.timeout_msec;
649                         user_buf->driver_pkt.status = 0;
650                 } else {
651                         tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
652                                 "GET_LOCK: Lock already held!");
653                         lock_pkt.time_remaining_msec = (TW_UINT32)(
654                                 (ctlr->ioctl_lock.timeout - cur_time) * 1000);
655                         user_buf->driver_pkt.status =
656                                 TW_CL_ERROR_IOCTL_LOCK_ALREADY_HELD;
657                 }
658                 tw_osl_memcpy(user_buf->data_buf, &lock_pkt,
659                         sizeof(struct tw_cl_lock_packet));
660                 break;
661         }
662
663
664         case TW_CL_IOCTL_RELEASE_LOCK:
665                 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
666                         "Release ioctl lock");
667
668                 if (ctlr->ioctl_lock.lock == TW_CLI_LOCK_FREE) {
669                         tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
670                                 "twa_ioctl: RELEASE_LOCK: Lock not held!");
671                         user_buf->driver_pkt.status =
672                                 TW_CL_ERROR_IOCTL_LOCK_NOT_HELD;
673                 } else {
674                         tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
675                                 "RELEASE_LOCK: Releasing lock!");
676                         ctlr->ioctl_lock.lock = TW_CLI_LOCK_FREE;
677                         user_buf->driver_pkt.status = 0;
678                 }
679                 break;
680
681
682         case TW_CL_IOCTL_GET_COMPATIBILITY_INFO:
683         {
684                 struct tw_cl_compatibility_packet       comp_pkt;
685
686                 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
687                         "Get compatibility info");
688
689                 tw_osl_memcpy(comp_pkt.driver_version,
690                         TW_OSL_DRIVER_VERSION_STRING,
691                         sizeof(TW_OSL_DRIVER_VERSION_STRING));
692                 comp_pkt.working_srl = ctlr->working_srl;
693                 comp_pkt.working_branch = ctlr->working_branch;
694                 comp_pkt.working_build = ctlr->working_build;
695                 comp_pkt.driver_srl_high = TWA_CURRENT_FW_SRL;
696                 comp_pkt.driver_branch_high =
697                         TWA_CURRENT_FW_BRANCH(ctlr->arch_id);
698                 comp_pkt.driver_build_high =
699                         TWA_CURRENT_FW_BUILD(ctlr->arch_id);
700                 comp_pkt.driver_srl_low = TWA_BASE_FW_SRL;
701                 comp_pkt.driver_branch_low = TWA_BASE_FW_BRANCH;
702                 comp_pkt.driver_build_low = TWA_BASE_FW_BUILD;
703                 comp_pkt.fw_on_ctlr_srl = ctlr->fw_on_ctlr_srl;
704                 comp_pkt.fw_on_ctlr_branch = ctlr->fw_on_ctlr_branch;
705                 comp_pkt.fw_on_ctlr_build = ctlr->fw_on_ctlr_build;
706                 user_buf->driver_pkt.status = 0;
707
708                 /* Copy compatibility information to user space. */
709                 tw_osl_memcpy(user_buf->data_buf, &comp_pkt,
710                         (sizeof(struct tw_cl_compatibility_packet) <
711                         user_buf->driver_pkt.buffer_length) ?
712                         sizeof(struct tw_cl_compatibility_packet) :
713                         user_buf->driver_pkt.buffer_length);
714                 break;
715         }
716
717         default:        
718                 /* Unknown opcode. */
719                 tw_cli_dbg_printf(3, ctlr_handle, tw_osl_cur_func(),
720                         "Unknown ioctl cmd 0x%x", cmd);
721                 error = TW_OSL_ENOTTY;
722         }
723
724         tw_osl_free_lock(ctlr_handle, ctlr->gen_lock);
725         return(error);
726 }
727
728
729
730 /*
731  * Function name:       tw_cli_get_param
732  * Description:         Get a firmware parameter.
733  *
734  * Input:               ctlr            -- ptr to per ctlr structure
735  *                      table_id        -- parameter table #
736  *                      param_id        -- index of the parameter in the table
737  *                      param_size      -- size of the parameter in bytes
738  *                      callback        -- ptr to function, if any, to be called
739  *                                      back on completion; TW_CL_NULL if no callback.
740  * Output:              param_data      -- param value
741  * Return value:        0       -- success
742  *                      non-zero-- failure
743  */
744 TW_INT32
745 tw_cli_get_param(struct tw_cli_ctlr_context *ctlr, TW_INT32 table_id,
746         TW_INT32 param_id, TW_VOID *param_data, TW_INT32 param_size,
747         TW_VOID (* callback)(struct tw_cli_req_context *req))
748 {
749         struct tw_cli_req_context       *req;
750         union tw_cl_command_7k          *cmd;
751         struct tw_cl_param_9k           *param = TW_CL_NULL;
752         TW_INT32                        error = TW_OSL_EBUSY;
753
754         tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
755
756         /* Get a request packet. */
757         if ((req = tw_cli_get_request(ctlr
758                 )) == TW_CL_NULL)
759                 goto out;
760
761         /* Make sure this is the only CL internal request at this time. */
762         if (ctlr->state & TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY) {
763                 error = TW_OSL_EBUSY;
764                 goto out;
765         }
766         ctlr->state |= TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY;
767         req->data = ctlr->internal_req_data;
768         req->data_phys = ctlr->internal_req_data_phys;
769         req->length = TW_CLI_SECTOR_SIZE;
770         req->flags |= TW_CLI_REQ_FLAGS_INTERNAL;
771
772         /* Initialize memory to read data into. */
773         param = (struct tw_cl_param_9k *)(req->data);
774         tw_osl_memzero(param, sizeof(struct tw_cl_param_9k) - 1 + param_size);
775
776         /* Build the cmd pkt. */
777         cmd = &(req->cmd_pkt->command.cmd_pkt_7k);
778
779         req->cmd_pkt->cmd_hdr.header_desc.size_header = 128;
780
781         cmd->param.sgl_off__opcode =
782                 BUILD_SGL_OFF__OPCODE(2, TWA_FW_CMD_GET_PARAM);
783         cmd->param.request_id =
784                 (TW_UINT8)(TW_CL_SWAP16(req->request_id));
785         cmd->param.host_id__unit = BUILD_HOST_ID__UNIT(0, 0);
786         cmd->param.param_count = TW_CL_SWAP16(1);
787
788         if (ctlr->flags & TW_CL_64BIT_ADDRESSES) {
789                 ((struct tw_cl_sg_desc64 *)(cmd->param.sgl))[0].address =
790                         TW_CL_SWAP64(req->data_phys);
791                 ((struct tw_cl_sg_desc64 *)(cmd->param.sgl))[0].length =
792                         TW_CL_SWAP32(req->length);
793                 cmd->param.size = 2 + 3;
794         } else {
795                 ((struct tw_cl_sg_desc32 *)(cmd->param.sgl))[0].address =
796                         TW_CL_SWAP32(req->data_phys);
797                 ((struct tw_cl_sg_desc32 *)(cmd->param.sgl))[0].length =
798                         TW_CL_SWAP32(req->length);
799                 cmd->param.size = 2 + 2;
800         }
801
802         /* Specify which parameter we need. */
803         param->table_id = TW_CL_SWAP16(table_id | TWA_9K_PARAM_DESCRIPTOR);
804         param->parameter_id = (TW_UINT8)(param_id);
805         param->parameter_size_bytes = TW_CL_SWAP16(param_size);
806
807         /* Submit the command. */
808         if (callback == TW_CL_NULL) {
809                 /* There's no call back; wait till the command completes. */
810                 error = tw_cli_submit_and_poll_request(req,
811                                 TW_CLI_REQUEST_TIMEOUT_PERIOD);
812                 if (error == TW_OSL_ETIMEDOUT)
813                         /* Clean-up done by tw_cli_submit_and_poll_request. */
814                         return(error);
815                 if (error)
816                         goto out;
817                 if ((error = cmd->param.status)) {
818                         tw_cli_create_ctlr_event(ctlr,
819                                 TW_CL_MESSAGE_SOURCE_CONTROLLER_ERROR,
820                                 &(req->cmd_pkt->cmd_hdr));
821                         goto out;
822                 }
823                 tw_osl_memcpy(param_data, param->data, param_size);
824                 ctlr->state &= ~TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY;
825                 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
826         } else {
827                 /* There's a call back.  Simply submit the command. */
828                 req->tw_cli_callback = callback;
829                 if ((error = tw_cli_submit_cmd(req)))
830                         goto out;
831         }
832         return(0);
833
834 out:
835         tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
836                 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
837                 0x1101, 0x1, TW_CL_SEVERITY_ERROR_STRING,
838                 "get_param failed",
839                 "error = %d", error);
840         if (param)
841                 ctlr->state &= ~TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY;
842         if (req)
843                 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
844         return(1);
845 }
846
847
848
849 /*
850  * Function name:       tw_cli_set_param
851  * Description:         Set a firmware parameter.
852  *
853  * Input:               ctlr            -- ptr to per ctlr structure
854  *                      table_id        -- parameter table #
855  *                      param_id        -- index of the parameter in the table
856  *                      param_size      -- size of the parameter in bytes
857  *                      callback        -- ptr to function, if any, to be called
858  *                                      back on completion; TW_CL_NULL if no callback.
859  * Output:              None
860  * Return value:        0       -- success
861  *                      non-zero-- failure
862  */
863 TW_INT32
864 tw_cli_set_param(struct tw_cli_ctlr_context *ctlr, TW_INT32 table_id,
865         TW_INT32 param_id, TW_INT32 param_size, TW_VOID *data,
866         TW_VOID (* callback)(struct tw_cli_req_context *req))
867 {
868         struct tw_cli_req_context       *req;
869         union tw_cl_command_7k          *cmd;
870         struct tw_cl_param_9k           *param = TW_CL_NULL;
871         TW_INT32                        error = TW_OSL_EBUSY;
872
873         tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
874
875         /* Get a request packet. */
876         if ((req = tw_cli_get_request(ctlr
877                 )) == TW_CL_NULL)
878                 goto out;
879
880         /* Make sure this is the only CL internal request at this time. */
881         if (ctlr->state & TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY) {
882                 error = TW_OSL_EBUSY;
883                 goto out;
884         }
885         ctlr->state |= TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY;
886         req->data = ctlr->internal_req_data;
887         req->data_phys = ctlr->internal_req_data_phys;
888         req->length = TW_CLI_SECTOR_SIZE;
889         req->flags |= TW_CLI_REQ_FLAGS_INTERNAL;
890
891         /* Initialize memory to send data using. */
892         param = (struct tw_cl_param_9k *)(req->data);
893         tw_osl_memzero(param, sizeof(struct tw_cl_param_9k) - 1 + param_size);
894
895         /* Build the cmd pkt. */
896         cmd = &(req->cmd_pkt->command.cmd_pkt_7k);
897
898         req->cmd_pkt->cmd_hdr.header_desc.size_header = 128;
899
900         cmd->param.sgl_off__opcode =
901                 BUILD_SGL_OFF__OPCODE(2, TWA_FW_CMD_SET_PARAM);
902         cmd->param.request_id = (TW_UINT8)(TW_CL_SWAP16(req->request_id));
903         cmd->param.host_id__unit = BUILD_HOST_ID__UNIT(0, 0);
904         cmd->param.param_count = TW_CL_SWAP16(1);
905
906         if (ctlr->flags & TW_CL_64BIT_ADDRESSES) {
907                 ((struct tw_cl_sg_desc64 *)(cmd->param.sgl))[0].address =
908                         TW_CL_SWAP64(req->data_phys);
909                 ((struct tw_cl_sg_desc64 *)(cmd->param.sgl))[0].length =
910                         TW_CL_SWAP32(req->length);
911                 cmd->param.size = 2 + 3;
912         } else {
913                 ((struct tw_cl_sg_desc32 *)(cmd->param.sgl))[0].address =
914                         TW_CL_SWAP32(req->data_phys);
915                 ((struct tw_cl_sg_desc32 *)(cmd->param.sgl))[0].length =
916                         TW_CL_SWAP32(req->length);
917                 cmd->param.size = 2 + 2;
918         }
919
920         /* Specify which parameter we want to set. */
921         param->table_id = TW_CL_SWAP16(table_id | TWA_9K_PARAM_DESCRIPTOR);
922         param->parameter_id = (TW_UINT8)(param_id);
923         param->parameter_size_bytes = TW_CL_SWAP16(param_size);
924         tw_osl_memcpy(param->data, data, param_size);
925
926         /* Submit the command. */
927         if (callback == TW_CL_NULL) {
928                 /* There's no call back;  wait till the command completes. */
929                 error = tw_cli_submit_and_poll_request(req,
930                         TW_CLI_REQUEST_TIMEOUT_PERIOD);
931                 if (error == TW_OSL_ETIMEDOUT)
932                         /* Clean-up done by tw_cli_submit_and_poll_request. */
933                         return(error);
934                 if (error)
935                         goto out;
936                 if ((error = cmd->param.status)) {
937                         tw_cli_create_ctlr_event(ctlr,
938                                 TW_CL_MESSAGE_SOURCE_CONTROLLER_ERROR,
939                                 &(req->cmd_pkt->cmd_hdr));
940                         goto out;
941                 }
942                 ctlr->state &= ~TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY;
943                 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
944         } else {
945                 /* There's a call back.  Simply submit the command. */
946                 req->tw_cli_callback = callback;
947                 if ((error = tw_cli_submit_cmd(req)))
948                         goto out;
949         }
950         return(error);
951
952 out:
953         tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
954                 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
955                 0x1102, 0x1, TW_CL_SEVERITY_ERROR_STRING,
956                 "set_param failed",
957                 "error = %d", error);
958         if (param)
959                 ctlr->state &= ~TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY;
960         if (req)
961                 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
962         return(error);
963 }
964
965
966
967 /*
968  * Function name:       tw_cli_submit_and_poll_request
969  * Description:         Sends down a firmware cmd, and waits for the completion
970  *                      in a tight loop.
971  *
972  * Input:               req     -- ptr to request pkt
973  *                      timeout -- max # of seconds to wait before giving up
974  * Output:              None
975  * Return value:        0       -- success
976  *                      non-zero-- failure
977  */
978 TW_INT32
979 tw_cli_submit_and_poll_request(struct tw_cli_req_context *req,
980         TW_UINT32 timeout)
981 {
982         struct tw_cli_ctlr_context      *ctlr = req->ctlr;
983         TW_TIME                         end_time;
984         TW_INT32                        error;
985
986         tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
987
988         /*
989          * If the cmd queue is full, tw_cli_submit_cmd will queue this
990          * request in the pending queue, since this is an internal request.
991          */
992         if ((error = tw_cli_submit_cmd(req))) {
993                 tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
994                         TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
995                         0x1103, 0x1, TW_CL_SEVERITY_ERROR_STRING,
996                         "Failed to start internal request",
997                         "error = %d", error);
998                 return(error);
999         }
1000
1001         /*
1002          * Poll for the response until the command gets completed, or there's
1003          * a timeout.
1004          */
1005         end_time = tw_osl_get_local_time() + timeout;
1006         do {
1007                 if ((error = req->error_code))
1008                         /*
1009                          * This will take care of completion due to a reset,
1010                          * or a failure in tw_cli_submit_pending_queue.
1011                          * The caller should do the clean-up.
1012                          */
1013                         return(error);
1014
1015                 /* See if the command completed. */
1016                 tw_cli_process_resp_intr(ctlr);
1017
1018                 if ((req->state != TW_CLI_REQ_STATE_BUSY) &&
1019                         (req->state != TW_CLI_REQ_STATE_PENDING))
1020                         return(req->state != TW_CLI_REQ_STATE_COMPLETE);
1021         } while (tw_osl_get_local_time() <= end_time);
1022
1023         /* Time out! */
1024         tw_cl_create_event(ctlr->ctlr_handle, TW_CL_FALSE,
1025                 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
1026                 0x1104, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1027                 "Internal request timed out",
1028                 "request = %p", req);
1029
1030         /*
1031          * We will reset the controller only if the request has already been
1032          * submitted, so as to not lose the request packet.  If a busy request
1033          * timed out, the reset will take care of freeing resources.  If a
1034          * pending request timed out, we will free resources for that request,
1035          * right here, thereby avoiding a reset.  So, the caller is expected
1036          * to NOT cleanup when TW_OSL_ETIMEDOUT is returned.
1037          */
1038
1039         /*
1040          * We have to make sure that this timed out request, if it were in the
1041          * pending queue, doesn't get submitted while we are here, from
1042          * tw_cli_submit_pending_queue.  There could be a race in that case.
1043          * Need to revisit.
1044          */
1045         if (req->state != TW_CLI_REQ_STATE_PENDING)
1046                 tw_cl_reset_ctlr(ctlr->ctlr_handle);
1047         else {
1048                 tw_cli_dbg_printf(3, ctlr->ctlr_handle, tw_osl_cur_func(),
1049                         "Removing request from pending queue");
1050                 /*
1051                  * Request was never submitted.  Clean up.  Note that we did
1052                  * not do a reset.  So, we have to remove the request ourselves
1053                  * from the pending queue (as against tw_cli_drain_pendinq_queue
1054                  * taking care of it).
1055                  */
1056                 tw_cli_req_q_remove_item(req, TW_CLI_PENDING_Q);
1057                 if (req->data)
1058                         ctlr->state &= ~TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY;
1059                 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
1060         }
1061
1062         return(TW_OSL_ETIMEDOUT);
1063 }
1064
1065
1066
1067 /*
1068  * Function name:       tw_cl_reset_ctlr
1069  * Description:         Soft resets and then initializes the controller;
1070  *                      drains any incomplete requests.
1071  *
1072  * Input:               ctlr    -- ptr to per ctlr structure
1073  * Output:              None
1074  * Return value:        0       -- success
1075  *                      non-zero-- failure
1076  */
1077 TW_INT32
1078 tw_cl_reset_ctlr(struct tw_cl_ctlr_handle *ctlr_handle)
1079 {
1080         struct tw_cli_ctlr_context      *ctlr =
1081                 (struct tw_cli_ctlr_context *)(ctlr_handle->cl_ctlr_ctxt);
1082         TW_INT32                        reset_attempt = 1;
1083         TW_INT32                        error;
1084
1085         tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(), "entered");
1086
1087         ctlr->state |= TW_CLI_CTLR_STATE_RESET_IN_PROGRESS;
1088
1089         /*
1090          * Error back all requests in the complete, busy, and pending queues.
1091          * If any request is already on its way to getting submitted, it's in
1092          * none of these queues and so, will not be completed.  That request
1093          * will continue its course and get submitted to the controller after
1094          * the reset is done (and io_lock is released).
1095          */
1096         tw_cli_dbg_printf(2, ctlr_handle, tw_osl_cur_func(),
1097                 "Draining all queues following reset");
1098         tw_cli_drain_complete_queue(ctlr);
1099         tw_cli_drain_busy_queue(ctlr);
1100         tw_cli_drain_pending_queue(ctlr);
1101
1102         tw_cli_disable_interrupts(ctlr);
1103
1104         /* Soft reset the controller. */
1105 try_reset:
1106         if ((error = tw_cli_soft_reset(ctlr))) {
1107                 tw_cl_create_event(ctlr_handle, TW_CL_TRUE,
1108                         TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
1109                         0x1105, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1110                         "Controller reset failed",
1111                         "error = %d; attempt %d", error, reset_attempt++);
1112                 if (reset_attempt <= TW_CLI_MAX_RESET_ATTEMPTS)
1113                         goto try_reset;
1114                 else
1115                         goto out;
1116         }
1117
1118         /* Re-establish logical connection with the controller. */
1119         if ((error = tw_cli_init_connection(ctlr,
1120                         (TW_UINT16)(ctlr->max_simult_reqs),
1121                         0, 0, 0, 0, 0, TW_CL_NULL, TW_CL_NULL, TW_CL_NULL,
1122                         TW_CL_NULL, TW_CL_NULL))) {
1123                 tw_cl_create_event(ctlr_handle, TW_CL_TRUE,
1124                         TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
1125                         0x1106, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1126                         "Can't initialize connection after reset",
1127                         "error = %d", error);
1128                 goto out;
1129         }
1130
1131         tw_cl_create_event(ctlr_handle, TW_CL_TRUE,
1132                 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
1133                 0x1107, 0x3, TW_CL_SEVERITY_INFO_STRING,
1134                 "Controller reset done!",
1135                 " ");
1136
1137 out:
1138         ctlr->state &= ~TW_CLI_CTLR_STATE_RESET_IN_PROGRESS;
1139         /*
1140          * Enable interrupts, and also clear attention and response interrupts.
1141          */
1142         tw_cli_enable_interrupts(ctlr);
1143         
1144         /* Request for a bus re-scan. */
1145         if (!error)
1146                 tw_osl_scan_bus(ctlr_handle);
1147         return(error);
1148 }
1149
1150
1151
1152 /*
1153  * Function name:       tw_cli_soft_reset
1154  * Description:         Does the actual soft reset.
1155  *
1156  * Input:               ctlr    -- ptr to per ctlr structure
1157  * Output:              None
1158  * Return value:        0       -- success
1159  *                      non-zero-- failure
1160  */
1161 TW_INT32
1162 tw_cli_soft_reset(struct tw_cli_ctlr_context *ctlr)
1163 {
1164         struct tw_cl_ctlr_handle        *ctlr_handle = ctlr->ctlr_handle;
1165         TW_UINT32                       status_reg;
1166         TW_UINT32                       error;
1167
1168         tw_cli_dbg_printf(1, ctlr_handle, tw_osl_cur_func(), "entered");
1169
1170         tw_cl_create_event(ctlr_handle, TW_CL_TRUE,
1171                 TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
1172                 0x1108, 0x3, TW_CL_SEVERITY_INFO_STRING,
1173                 "Resetting controller...",
1174                 " ");
1175
1176         /* Don't let any new commands get submitted to the controller. */
1177         tw_osl_get_lock(ctlr_handle, ctlr->io_lock);
1178
1179         TW_CLI_SOFT_RESET(ctlr_handle);
1180
1181         if ((ctlr->device_id == TW_CL_DEVICE_ID_9K_X) ||
1182             (ctlr->device_id == TW_CL_DEVICE_ID_9K_E) ||
1183             (ctlr->device_id == TW_CL_DEVICE_ID_9K_SA)) {
1184                 /*
1185                  * There's a hardware bug in the G133 ASIC, which can lead to
1186                  * PCI parity errors and hangs, if the host accesses any
1187                  * registers when the firmware is resetting the hardware, as
1188                  * part of a hard/soft reset.  The window of time when the
1189                  * problem can occur is about 10 ms.  Here, we will handshake
1190                  * with the firmware to find out when the firmware is pulling
1191                  * down the hardware reset pin, and wait for about 500 ms to
1192                  * make sure we don't access any hardware registers (for
1193                  * polling) during that window.
1194                  */
1195                 ctlr->state |= TW_CLI_CTLR_STATE_RESET_PHASE1_IN_PROGRESS;
1196                 while (tw_cli_find_response(ctlr,
1197                         TWA_RESET_PHASE1_NOTIFICATION_RESPONSE) != TW_OSL_ESUCCESS)
1198                         tw_osl_delay(10);
1199                 tw_osl_delay(TWA_RESET_PHASE1_WAIT_TIME_MS * 1000);
1200                 ctlr->state &= ~TW_CLI_CTLR_STATE_RESET_PHASE1_IN_PROGRESS;
1201         }
1202
1203         if ((error = tw_cli_poll_status(ctlr,
1204                         TWA_STATUS_MICROCONTROLLER_READY |
1205                         TWA_STATUS_ATTENTION_INTERRUPT,
1206                         TW_CLI_RESET_TIMEOUT_PERIOD))) {
1207                 tw_cl_create_event(ctlr_handle, TW_CL_TRUE,
1208                         TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
1209                         0x1109, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1210                         "Micro-ctlr not ready/No attn intr after reset",
1211                         "error = %d", error);
1212                 tw_osl_free_lock(ctlr_handle, ctlr->io_lock);
1213                 return(error);
1214         }
1215
1216         TW_CLI_WRITE_CONTROL_REGISTER(ctlr_handle,
1217                 TWA_CONTROL_CLEAR_ATTENTION_INTERRUPT);
1218
1219         if ((error = tw_cli_drain_response_queue(ctlr))) {
1220                 tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
1221                         TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
1222                         0x110A, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1223                         "Can't drain response queue after reset",
1224                         "error = %d", error);
1225                 tw_osl_free_lock(ctlr_handle, ctlr->io_lock);
1226                 return(error);
1227         }
1228         
1229         tw_osl_free_lock(ctlr_handle, ctlr->io_lock);
1230
1231         if ((error = tw_cli_drain_aen_queue(ctlr))) {
1232                 tw_cl_create_event(ctlr_handle, TW_CL_FALSE,
1233                         TW_CL_MESSAGE_SOURCE_COMMON_LAYER_ERROR,
1234                         0x110B, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1235                         "Can't drain AEN queue after reset",
1236                         "error = %d", error);
1237                 return(error);
1238         }
1239         
1240         if ((error = tw_cli_find_aen(ctlr, TWA_AEN_SOFT_RESET))) {
1241                 tw_cl_create_event(ctlr_handle, TW_CL_TRUE,
1242                         TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
1243                         0x110C, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1244                         "Reset not reported by controller",
1245                         "error = %d", error);
1246                 return(error);
1247         }
1248         
1249         status_reg = TW_CLI_READ_STATUS_REGISTER(ctlr_handle);
1250         
1251         if ((error = TW_CLI_STATUS_ERRORS(status_reg)) ||
1252                         (error = tw_cli_check_ctlr_state(ctlr, status_reg))) {
1253                 tw_cl_create_event(ctlr_handle, TW_CL_TRUE,
1254                         TW_CL_MESSAGE_SOURCE_COMMON_LAYER_EVENT,
1255                         0x110D, 0x1, TW_CL_SEVERITY_ERROR_STRING,
1256                         "Controller errors detected after reset",
1257                         "error = %d", error);
1258                 return(error);
1259         }
1260         
1261         return(TW_OSL_ESUCCESS);
1262 }
1263
1264
1265
1266 /*
1267  * Function name:       tw_cli_send_scsi_cmd
1268  * Description:         Sends down a scsi cmd to fw.
1269  *
1270  * Input:               req     -- ptr to request pkt
1271  *                      cmd     -- opcode of scsi cmd to send
1272  * Output:              None
1273  * Return value:        0       -- success
1274  *                      non-zero-- failure
1275  */
1276 TW_INT32
1277 tw_cli_send_scsi_cmd(struct tw_cli_req_context *req, TW_INT32 cmd)
1278 {
1279         struct tw_cl_command_packet     *cmdpkt;
1280         struct tw_cl_command_9k         *cmd9k;
1281         struct tw_cli_ctlr_context      *ctlr;
1282         TW_INT32                        error;
1283
1284         ctlr = req->ctlr;
1285         tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
1286
1287         /* Make sure this is the only CL internal request at this time. */
1288         if (ctlr->state & TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY)
1289                 return(TW_OSL_EBUSY);
1290         ctlr->state |= TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY;
1291         req->data = ctlr->internal_req_data;
1292         req->data_phys = ctlr->internal_req_data_phys;
1293         tw_osl_memzero(req->data, TW_CLI_SECTOR_SIZE);
1294         req->length = TW_CLI_SECTOR_SIZE;
1295
1296         /* Build the cmd pkt. */
1297         cmdpkt = req->cmd_pkt;
1298
1299         cmdpkt->cmd_hdr.header_desc.size_header = 128;
1300                 
1301         cmd9k = &(cmdpkt->command.cmd_pkt_9k);
1302
1303         cmd9k->res__opcode =
1304                 BUILD_RES__OPCODE(0, TWA_FW_CMD_EXECUTE_SCSI);
1305         cmd9k->unit = 0;
1306         cmd9k->lun_l4__req_id = TW_CL_SWAP16(req->request_id);
1307         cmd9k->status = 0;
1308         cmd9k->sgl_offset = 16; /* offset from end of hdr = max cdb len */
1309         cmd9k->lun_h4__sgl_entries = TW_CL_SWAP16(1);
1310
1311         if (req->ctlr->flags & TW_CL_64BIT_ADDRESSES) {
1312                 ((struct tw_cl_sg_desc64 *)(cmd9k->sg_list))[0].address =
1313                         TW_CL_SWAP64(req->data_phys);
1314                 ((struct tw_cl_sg_desc64 *)(cmd9k->sg_list))[0].length =
1315                         TW_CL_SWAP32(req->length);
1316         } else {
1317                 ((struct tw_cl_sg_desc32 *)(cmd9k->sg_list))[0].address =
1318                         TW_CL_SWAP32(req->data_phys);
1319                 ((struct tw_cl_sg_desc32 *)(cmd9k->sg_list))[0].length =
1320                         TW_CL_SWAP32(req->length);
1321         }
1322
1323         cmd9k->cdb[0] = (TW_UINT8)cmd;
1324         cmd9k->cdb[4] = 128;
1325
1326         if ((error = tw_cli_submit_cmd(req)))
1327                 if (error != TW_OSL_EBUSY) {
1328                         tw_cli_dbg_printf(1, ctlr->ctlr_handle,
1329                                 tw_osl_cur_func(),
1330                                 "Failed to start SCSI command",
1331                                 "request = %p, error = %d", req, error);
1332                         return(TW_OSL_EIO);
1333                 }
1334         return(TW_OSL_ESUCCESS);
1335 }
1336
1337
1338
1339 /*
1340  * Function name:       tw_cli_get_aen
1341  * Description:         Sends down a Request Sense cmd to fw to fetch an AEN.
1342  *
1343  * Input:               ctlr    -- ptr to per ctlr structure
1344  * Output:              None
1345  * Return value:        0       -- success
1346  *                      non-zero-- failure
1347  */
1348 TW_INT32
1349 tw_cli_get_aen(struct tw_cli_ctlr_context *ctlr)
1350 {
1351         struct tw_cli_req_context       *req;
1352         TW_INT32                        error;
1353
1354         tw_cli_dbg_printf(4, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
1355
1356         if ((req = tw_cli_get_request(ctlr
1357                 )) == TW_CL_NULL)
1358                 return(TW_OSL_EBUSY);
1359
1360         req->flags |= TW_CLI_REQ_FLAGS_INTERNAL;
1361         req->flags |= TW_CLI_REQ_FLAGS_9K;
1362         req->tw_cli_callback = tw_cli_aen_callback;
1363         if ((error = tw_cli_send_scsi_cmd(req, 0x03 /* REQUEST_SENSE */))) {
1364                 tw_cli_dbg_printf(1, ctlr->ctlr_handle, tw_osl_cur_func(),
1365                         "Could not send SCSI command",
1366                         "request = %p, error = %d", req, error);
1367                 if (req->data)
1368                         ctlr->state &= ~TW_CLI_CTLR_STATE_INTERNAL_REQ_BUSY;
1369                 tw_cli_req_q_insert_tail(req, TW_CLI_FREE_Q);
1370         }
1371         return(error);
1372 }
1373
1374
1375
1376 /*
1377  * Function name:       tw_cli_fill_sg_list
1378  * Description:         Fills in the scatter/gather list.
1379  *
1380  * Input:               ctlr    -- ptr to per ctlr structure
1381  *                      sgl_src -- ptr to fill the sg list from
1382  *                      sgl_dest-- ptr to sg list
1383  *                      nsegments--# of segments
1384  * Output:              None
1385  * Return value:        None
1386  */
1387 TW_VOID
1388 tw_cli_fill_sg_list(struct tw_cli_ctlr_context *ctlr, TW_VOID *sgl_src,
1389         TW_VOID *sgl_dest, TW_INT32 num_sgl_entries)
1390 {
1391         TW_INT32        i;
1392
1393         tw_cli_dbg_printf(10, ctlr->ctlr_handle, tw_osl_cur_func(), "entered");
1394
1395         if (ctlr->flags & TW_CL_64BIT_ADDRESSES) {
1396                 struct tw_cl_sg_desc64 *sgl_s =
1397                         (struct tw_cl_sg_desc64 *)sgl_src;
1398                 struct tw_cl_sg_desc64 *sgl_d =
1399                         (struct tw_cl_sg_desc64 *)sgl_dest;
1400
1401                 tw_cli_dbg_printf(10, ctlr->ctlr_handle, tw_osl_cur_func(),
1402                         "64 bit addresses");
1403                 for (i = 0; i < num_sgl_entries; i++) {
1404                         sgl_d[i].address = TW_CL_SWAP64(sgl_s->address);
1405                         sgl_d[i].length = TW_CL_SWAP32(sgl_s->length);
1406                         sgl_s++;
1407                         if (ctlr->flags & TW_CL_64BIT_SG_LENGTH)
1408                                 sgl_s = (struct tw_cl_sg_desc64 *)
1409                                         (((TW_INT8 *)(sgl_s)) + 4);
1410                 }
1411         } else {
1412                 struct tw_cl_sg_desc32 *sgl_s =
1413                         (struct tw_cl_sg_desc32 *)sgl_src;
1414                 struct tw_cl_sg_desc32 *sgl_d =
1415                         (struct tw_cl_sg_desc32 *)sgl_dest;
1416
1417                 tw_cli_dbg_printf(10, ctlr->ctlr_handle, tw_osl_cur_func(),
1418                         "32 bit addresses");
1419                 for (i = 0; i < num_sgl_entries; i++) {
1420                         sgl_d[i].address = TW_CL_SWAP32(sgl_s[i].address);
1421                         sgl_d[i].length = TW_CL_SWAP32(sgl_s[i].length);
1422                 }
1423         }
1424 }
1425