2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2010 LSI Corp.
6 * Author : Manjunath Ranganathaiah <manjunath.ranganathaiah@lsi.com>
8 * Redistribution and use in source and binary forms, with or without
9 * modification, are permitted provided that the following conditions
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
18 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
19 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
20 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
21 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
22 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
23 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
24 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
25 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
26 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 #include <dev/tws/tws.h>
33 #include <dev/tws/tws_services.h>
34 #include <dev/tws/tws_hdm.h>
35 #include <dev/tws/tws_user.h>
37 #include <cam/cam_ccb.h>
38 #include <cam/cam_sim.h>
39 #include <cam/cam_xpt_sim.h>
40 #include <cam/cam_debug.h>
41 #include <cam/cam_periph.h>
43 #include <cam/scsi/scsi_all.h>
44 #include <cam/scsi/scsi_message.h>
46 static int tws_cam_depth=(TWS_MAX_REQS - TWS_RESERVED_REQS);
47 static char tws_sev_str[5][8]={"","ERROR","WARNING","INFO","DEBUG"};
49 static void tws_action(struct cam_sim *sim, union ccb *ccb);
50 static void tws_poll(struct cam_sim *sim);
51 static void tws_scsi_complete(struct tws_request *req);
55 void tws_unmap_request(struct tws_softc *sc, struct tws_request *req);
56 int32_t tws_map_request(struct tws_softc *sc, struct tws_request *req);
57 int tws_bus_scan(struct tws_softc *sc);
58 int tws_cam_attach(struct tws_softc *sc);
59 void tws_cam_detach(struct tws_softc *sc);
60 void tws_reset(void *arg);
62 static void tws_reset_cb(void *arg);
63 static void tws_reinit(void *arg);
64 static int32_t tws_execute_scsi(struct tws_softc *sc, union ccb *ccb);
65 static void tws_freeze_simq(struct tws_softc *sc, struct tws_request *req);
66 static void tws_dmamap_data_load_cbfn(void *arg, bus_dma_segment_t *segs,
68 static void tws_fill_sg_list(struct tws_softc *sc, void *sgl_src,
69 void *sgl_dest, u_int16_t num_sgl_entries);
70 static void tws_err_complete(struct tws_softc *sc, u_int64_t mfa);
71 static void tws_scsi_err_complete(struct tws_request *req,
72 struct tws_command_header *hdr);
73 static void tws_passthru_err_complete(struct tws_request *req,
74 struct tws_command_header *hdr);
77 void tws_timeout(void *arg);
78 static void tws_intr_attn_aen(struct tws_softc *sc);
79 static void tws_intr_attn_error(struct tws_softc *sc);
80 static void tws_intr_resp(struct tws_softc *sc);
81 void tws_intr(void *arg);
82 void tws_cmd_complete(struct tws_request *req);
83 void tws_aen_complete(struct tws_request *req);
84 int tws_send_scsi_cmd(struct tws_softc *sc, int cmd);
85 void tws_getset_param_complete(struct tws_request *req);
86 int tws_set_param(struct tws_softc *sc, u_int32_t table_id, u_int32_t param_id,
87 u_int32_t param_size, void *data);
88 int tws_get_param(struct tws_softc *sc, u_int32_t table_id, u_int32_t param_id,
89 u_int32_t param_size, void *data);
92 extern struct tws_request *tws_get_request(struct tws_softc *sc,
94 extern void *tws_release_request(struct tws_request *req);
95 extern int tws_submit_command(struct tws_softc *sc, struct tws_request *req);
96 extern boolean tws_get_response(struct tws_softc *sc,
97 u_int16_t *req_id, u_int64_t *mfa);
98 extern void tws_q_insert_tail(struct tws_softc *sc, struct tws_request *req,
100 extern struct tws_request * tws_q_remove_request(struct tws_softc *sc,
101 struct tws_request *req, u_int8_t q_type );
102 extern void tws_send_event(struct tws_softc *sc, u_int8_t event);
104 extern struct tws_sense *
105 tws_find_sense_from_mfa(struct tws_softc *sc, u_int64_t mfa);
107 extern void tws_fetch_aen(void *arg);
108 extern void tws_disable_db_intr(struct tws_softc *sc);
109 extern void tws_enable_db_intr(struct tws_softc *sc);
110 extern void tws_passthru_complete(struct tws_request *req);
111 extern void tws_aen_synctime_with_host(struct tws_softc *sc);
112 extern void tws_circular_aenq_insert(struct tws_softc *sc,
113 struct tws_circular_q *cq, struct tws_event_packet *aen);
114 extern int tws_use_32bit_sgls;
115 extern boolean tws_ctlr_reset(struct tws_softc *sc);
116 extern struct tws_request * tws_q_remove_tail(struct tws_softc *sc,
118 extern void tws_turn_off_interrupts(struct tws_softc *sc);
119 extern void tws_turn_on_interrupts(struct tws_softc *sc);
120 extern int tws_init_connect(struct tws_softc *sc, u_int16_t mc);
121 extern void tws_init_obfl_q(struct tws_softc *sc);
122 extern uint8_t tws_get_state(struct tws_softc *sc);
123 extern void tws_assert_soft_reset(struct tws_softc *sc);
124 extern boolean tws_ctlr_ready(struct tws_softc *sc);
125 extern u_int16_t tws_poll4_response(struct tws_softc *sc, u_int64_t *mfa);
126 extern int tws_setup_intr(struct tws_softc *sc, int irqs);
127 extern int tws_teardown_intr(struct tws_softc *sc);
132 tws_cam_attach(struct tws_softc *sc)
134 struct cam_devq *devq;
136 TWS_TRACE_DEBUG(sc, "entry", 0, sc);
137 /* Create a device queue for sim */
140 * if the user sets cam depth to less than 1
141 * cam may get confused
143 if ( tws_cam_depth < 1 )
145 if ( tws_cam_depth > (tws_queue_depth - TWS_RESERVED_REQS) )
146 tws_cam_depth = tws_queue_depth - TWS_RESERVED_REQS;
148 TWS_TRACE_DEBUG(sc, "depths,ctlr,cam", tws_queue_depth, tws_cam_depth);
150 if ((devq = cam_simq_alloc(tws_cam_depth)) == NULL) {
151 tws_log(sc, CAM_SIMQ_ALLOC);
156 * Create a SIM entry. Though we can support tws_cam_depth
157 * simultaneous requests, we claim to be able to handle only
158 * (tws_cam_depth), so that we always have reserved requests
159 * packet available to service ioctls and internal commands.
161 sc->sim = cam_sim_alloc(tws_action, tws_poll, "tws", sc,
162 device_get_unit(sc->tws_dev),
164 tws_cam_depth, 1, devq);
166 if (sc->sim == NULL) {
168 tws_log(sc, CAM_SIM_ALLOC);
170 /* Register the bus. */
171 mtx_lock(&sc->sim_lock);
172 if (xpt_bus_register(sc->sim,
175 cam_sim_free(sc->sim, TRUE); /* passing true will free the devq */
176 sc->sim = NULL; /* so cam_detach will not try to free it */
177 mtx_unlock(&sc->sim_lock);
178 tws_log(sc, TWS_XPT_BUS_REGISTER);
181 if (xpt_create_path(&sc->path, NULL, cam_sim_path(sc->sim),
183 CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
184 xpt_bus_deregister(cam_sim_path(sc->sim));
185 /* Passing TRUE to cam_sim_free will free the devq as well. */
186 cam_sim_free(sc->sim, TRUE);
187 tws_log(sc, TWS_XPT_CREATE_PATH);
188 mtx_unlock(&sc->sim_lock);
191 mtx_unlock(&sc->sim_lock);
197 tws_cam_detach(struct tws_softc *sc)
199 TWS_TRACE_DEBUG(sc, "entry", 0, 0);
200 mtx_lock(&sc->sim_lock);
202 xpt_free_path(sc->path);
204 xpt_bus_deregister(cam_sim_path(sc->sim));
205 cam_sim_free(sc->sim, TRUE);
207 mtx_unlock(&sc->sim_lock);
211 tws_bus_scan(struct tws_softc *sc)
215 TWS_TRACE_DEBUG(sc, "entry", sc, 0);
218 ccb = xpt_alloc_ccb();
219 mtx_lock(&sc->sim_lock);
220 if (xpt_create_path(&ccb->ccb_h.path, NULL, cam_sim_path(sc->sim),
221 CAM_TARGET_WILDCARD, CAM_LUN_WILDCARD) != CAM_REQ_CMP) {
222 mtx_unlock(&sc->sim_lock);
227 mtx_unlock(&sc->sim_lock);
232 tws_action(struct cam_sim *sim, union ccb *ccb)
234 struct tws_softc *sc = (struct tws_softc *)cam_sim_softc(sim);
237 switch( ccb->ccb_h.func_code ) {
240 if ( tws_execute_scsi(sc, ccb) )
241 TWS_TRACE_DEBUG(sc, "execute scsi failed", 0, 0);
246 TWS_TRACE_DEBUG(sc, "abort i/o", 0, 0);
247 ccb->ccb_h.status = CAM_UA_ABORT;
253 TWS_TRACE_DEBUG(sc, "reset bus", sim, ccb);
256 case XPT_SET_TRAN_SETTINGS:
258 TWS_TRACE_DEBUG(sc, "set tran settings", sim, ccb);
259 ccb->ccb_h.status = CAM_FUNC_NOTAVAIL;
264 case XPT_GET_TRAN_SETTINGS:
266 TWS_TRACE_DEBUG(sc, "get tran settings", sim, ccb);
268 ccb->cts.protocol = PROTO_SCSI;
269 ccb->cts.protocol_version = SCSI_REV_2;
270 ccb->cts.transport = XPORT_SPI;
271 ccb->cts.transport_version = 2;
273 ccb->cts.xport_specific.spi.valid = CTS_SPI_VALID_DISC;
274 ccb->cts.xport_specific.spi.flags = CTS_SPI_FLAGS_DISC_ENB;
275 ccb->cts.proto_specific.scsi.valid = CTS_SCSI_VALID_TQ;
276 ccb->cts.proto_specific.scsi.flags = CTS_SCSI_FLAGS_TAG_ENB;
277 ccb->ccb_h.status = CAM_REQ_CMP;
282 case XPT_CALC_GEOMETRY:
284 TWS_TRACE_DEBUG(sc, "calc geometry(ccb,block-size)", ccb,
285 ccb->ccg.block_size);
286 cam_calc_geometry(&ccb->ccg, 1/* extended */);
293 TWS_TRACE_DEBUG(sc, "path inquiry", sim, ccb);
294 ccb->cpi.version_num = 1;
295 ccb->cpi.hba_inquiry = 0;
296 ccb->cpi.target_sprt = 0;
297 ccb->cpi.hba_misc = 0;
298 ccb->cpi.hba_eng_cnt = 0;
299 ccb->cpi.max_target = TWS_MAX_NUM_UNITS;
300 ccb->cpi.max_lun = TWS_MAX_NUM_LUNS - 1;
301 ccb->cpi.unit_number = cam_sim_unit(sim);
302 ccb->cpi.bus_id = cam_sim_bus(sim);
303 ccb->cpi.initiator_id = TWS_SCSI_INITIATOR_ID;
304 ccb->cpi.base_transfer_speed = 6000000;
305 strlcpy(ccb->cpi.sim_vid, "FreeBSD", SIM_IDLEN);
306 strlcpy(ccb->cpi.hba_vid, "3ware", HBA_IDLEN);
307 strlcpy(ccb->cpi.dev_name, cam_sim_name(sim), DEV_IDLEN);
308 ccb->cpi.transport = XPORT_SPI;
309 ccb->cpi.transport_version = 2;
310 ccb->cpi.protocol = PROTO_SCSI;
311 ccb->cpi.protocol_version = SCSI_REV_2;
312 ccb->cpi.maxio = TWS_MAX_IO_SIZE;
313 ccb->ccb_h.status = CAM_REQ_CMP;
319 TWS_TRACE_DEBUG(sc, "default", sim, ccb);
320 ccb->ccb_h.status = CAM_REQ_INVALID;
327 tws_scsi_complete(struct tws_request *req)
329 struct tws_softc *sc = req->sc;
331 mtx_lock(&sc->q_lock);
332 tws_q_remove_request(sc, req, TWS_BUSY_Q);
333 mtx_unlock(&sc->q_lock);
335 callout_stop(&req->timeout);
336 tws_unmap_request(req->sc, req);
339 req->ccb_ptr->ccb_h.status = CAM_REQ_CMP;
340 mtx_lock(&sc->sim_lock);
341 xpt_done(req->ccb_ptr);
342 mtx_unlock(&sc->sim_lock);
344 mtx_lock(&sc->q_lock);
345 tws_q_insert_tail(sc, req, TWS_FREE_Q);
346 mtx_unlock(&sc->q_lock);
350 tws_getset_param_complete(struct tws_request *req)
352 struct tws_softc *sc = req->sc;
354 TWS_TRACE_DEBUG(sc, "getset complete", req, req->request_id);
356 callout_stop(&req->timeout);
357 tws_unmap_request(sc, req);
359 free(req->data, M_TWS);
361 req->state = TWS_REQ_STATE_FREE;
365 tws_aen_complete(struct tws_request *req)
367 struct tws_softc *sc = req->sc;
368 struct tws_command_header *sense;
369 struct tws_event_packet event;
370 u_int16_t aen_code=0;
372 TWS_TRACE_DEBUG(sc, "aen complete", 0, req->request_id);
374 callout_stop(&req->timeout);
375 tws_unmap_request(sc, req);
377 sense = (struct tws_command_header *)req->data;
379 TWS_TRACE_DEBUG(sc,"sense code, key",sense->sense_data[0],
380 sense->sense_data[2]);
381 TWS_TRACE_DEBUG(sc,"sense rid, seve",sense->header_desc.request_id,
382 sense->status_block.res__severity);
383 TWS_TRACE_DEBUG(sc,"sense srcnum, error",sense->status_block.srcnum,
384 sense->status_block.error);
385 TWS_TRACE_DEBUG(sc,"sense shdr, ssense",sense->header_desc.size_header,
386 sense->header_desc.size_sense);
388 aen_code = sense->status_block.error;
390 switch ( aen_code ) {
391 case TWS_AEN_SYNC_TIME_WITH_HOST :
392 tws_aen_synctime_with_host(sc);
394 case TWS_AEN_QUEUE_EMPTY :
397 bzero(&event, sizeof(struct tws_event_packet));
398 event.sequence_id = sc->seq_id;
399 event.time_stamp_sec = (u_int32_t)TWS_LOCAL_TIME;
400 event.aen_code = sense->status_block.error;
401 event.severity = sense->status_block.res__severity & 0x7;
402 event.event_src = TWS_SRC_CTRL_EVENT;
403 strcpy(event.severity_str, tws_sev_str[event.severity]);
404 event.retrieved = TWS_AEN_NOT_RETRIEVED;
406 bcopy(sense->err_specific_desc, event.parameter_data,
407 TWS_ERROR_SPECIFIC_DESC_LEN);
408 event.parameter_data[TWS_ERROR_SPECIFIC_DESC_LEN - 1] = '\0';
409 event.parameter_len = (u_int8_t)strlen(event.parameter_data)+1;
411 if ( event.parameter_len < TWS_ERROR_SPECIFIC_DESC_LEN ) {
412 event.parameter_len += ((u_int8_t)strlen(event.parameter_data +
413 event.parameter_len) + 1);
416 device_printf(sc->tws_dev, "%s: (0x%02X: 0x%04X): %s: %s\n",
420 event.parameter_data +
421 (strlen(event.parameter_data) + 1),
422 event.parameter_data);
424 mtx_lock(&sc->gen_lock);
425 tws_circular_aenq_insert(sc, &sc->aen_q, &event);
427 mtx_unlock(&sc->gen_lock);
432 free(req->data, M_TWS);
434 req->state = TWS_REQ_STATE_FREE;
436 if ( aen_code != TWS_AEN_QUEUE_EMPTY ) {
437 /* timeout(tws_fetch_aen, sc, 1);*/
438 sc->stats.num_aens++;
439 tws_fetch_aen((void *)sc);
444 tws_cmd_complete(struct tws_request *req)
446 struct tws_softc *sc = req->sc;
448 callout_stop(&req->timeout);
449 tws_unmap_request(sc, req);
453 tws_err_complete(struct tws_softc *sc, u_int64_t mfa)
455 struct tws_command_header *hdr;
456 struct tws_sense *sen;
457 struct tws_request *req;
459 u_int32_t reg, status;
462 TWS_TRACE_DEBUG(sc, "null mfa", 0, mfa);
465 /* lookup the sense */
466 sen = tws_find_sense_from_mfa(sc, mfa);
468 TWS_TRACE_DEBUG(sc, "found null req", 0, mfa);
472 TWS_TRACE_DEBUG(sc, "sen, hdr", sen, hdr);
473 req_id = hdr->header_desc.request_id;
474 req = &sc->reqs[req_id];
475 TWS_TRACE_DEBUG(sc, "req, id", req, req_id);
476 if ( req->error_code != TWS_REQ_RET_SUBMIT_SUCCESS )
477 TWS_TRACE_DEBUG(sc, "submit failure?", 0, req->error_code);
481 case TWS_REQ_TYPE_PASSTHRU :
482 tws_passthru_err_complete(req, hdr);
484 case TWS_REQ_TYPE_GETSET_PARAM :
485 tws_getset_param_complete(req);
487 case TWS_REQ_TYPE_SCSI_IO :
488 tws_scsi_err_complete(req, hdr);
493 mtx_lock(&sc->io_lock);
494 hdr->header_desc.size_header = 128;
495 reg = (u_int32_t)( mfa>>32);
496 tws_write_reg(sc, TWS_I2O0_HOBQPH, reg, 4);
497 reg = (u_int32_t)(mfa);
498 tws_write_reg(sc, TWS_I2O0_HOBQPL, reg, 4);
500 status = tws_read_reg(sc, TWS_I2O0_STATUS, 4);
501 if ( status & TWS_BIT13 ) {
502 device_printf(sc->tws_dev, "OBFL Overrun\n");
503 sc->obfl_q_overrun = true;
505 mtx_unlock(&sc->io_lock);
509 tws_scsi_err_complete(struct tws_request *req, struct tws_command_header *hdr)
511 u_int8_t *sense_data;
512 struct tws_softc *sc = req->sc;
513 union ccb *ccb = req->ccb_ptr;
515 TWS_TRACE_DEBUG(sc, "sbe, cmd_status", hdr->status_block.error,
516 req->cmd_pkt->cmd.pkt_a.status);
517 if ( hdr->status_block.error == TWS_ERROR_LOGICAL_UNIT_NOT_SUPPORTED ||
518 hdr->status_block.error == TWS_ERROR_UNIT_OFFLINE ) {
520 if ( ccb->ccb_h.target_lun ) {
521 TWS_TRACE_DEBUG(sc, "invalid lun error",0,0);
522 ccb->ccb_h.status |= CAM_DEV_NOT_THERE;
524 TWS_TRACE_DEBUG(sc, "invalid target error",0,0);
525 ccb->ccb_h.status |= CAM_SEL_TIMEOUT;
529 TWS_TRACE_DEBUG(sc, "scsi status error",0,0);
530 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR;
531 if (((ccb->csio.cdb_io.cdb_bytes[0] == 0x1A) &&
532 (hdr->status_block.error == TWS_ERROR_NOT_SUPPORTED))) {
533 ccb->ccb_h.status |= CAM_SCSI_STATUS_ERROR | CAM_AUTOSNS_VALID;
534 TWS_TRACE_DEBUG(sc, "page mode not supported",0,0);
538 /* if there were no error simply mark complete error */
539 if (ccb->ccb_h.status == 0)
540 ccb->ccb_h.status = CAM_REQ_CMP_ERR;
542 sense_data = (u_int8_t *)&ccb->csio.sense_data;
544 memcpy(sense_data, hdr->sense_data, TWS_SENSE_DATA_LENGTH );
545 ccb->csio.sense_len = TWS_SENSE_DATA_LENGTH;
546 ccb->ccb_h.status |= CAM_AUTOSNS_VALID;
548 ccb->csio.scsi_status = req->cmd_pkt->cmd.pkt_a.status;
550 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
551 mtx_lock(&sc->sim_lock);
553 mtx_unlock(&sc->sim_lock);
555 callout_stop(&req->timeout);
556 tws_unmap_request(req->sc, req);
557 mtx_lock(&sc->q_lock);
558 tws_q_remove_request(sc, req, TWS_BUSY_Q);
559 tws_q_insert_tail(sc, req, TWS_FREE_Q);
560 mtx_unlock(&sc->q_lock);
564 tws_passthru_err_complete(struct tws_request *req,
565 struct tws_command_header *hdr)
567 TWS_TRACE_DEBUG(req->sc, "entry", hdr, req->request_id);
568 req->error_code = hdr->status_block.error;
569 memcpy(&(req->cmd_pkt->hdr), hdr, sizeof(struct tws_command_header));
570 tws_passthru_complete(req);
574 tws_drain_busy_queue(struct tws_softc *sc)
576 struct tws_request *req;
578 TWS_TRACE_DEBUG(sc, "entry", 0, 0);
580 mtx_lock(&sc->q_lock);
581 req = tws_q_remove_tail(sc, TWS_BUSY_Q);
582 mtx_unlock(&sc->q_lock);
584 TWS_TRACE_DEBUG(sc, "moved to TWS_COMPLETE_Q", 0, req->request_id);
585 callout_stop(&req->timeout);
587 req->error_code = TWS_REQ_RET_RESET;
588 ccb = (union ccb *)(req->ccb_ptr);
590 ccb->ccb_h.status &= ~CAM_SIM_QUEUED;
591 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
592 ccb->ccb_h.status |= CAM_SCSI_BUS_RESET;
594 tws_unmap_request(req->sc, req);
596 mtx_lock(&sc->sim_lock);
597 xpt_done(req->ccb_ptr);
598 mtx_unlock(&sc->sim_lock);
600 mtx_lock(&sc->q_lock);
601 tws_q_insert_tail(sc, req, TWS_FREE_Q);
602 req = tws_q_remove_tail(sc, TWS_BUSY_Q);
603 mtx_unlock(&sc->q_lock);
609 tws_drain_reserved_reqs(struct tws_softc *sc)
611 struct tws_request *r;
613 r = &sc->reqs[TWS_REQ_TYPE_AEN_FETCH];
614 if ( r->state != TWS_REQ_STATE_FREE ) {
615 TWS_TRACE_DEBUG(sc, "reset aen req", 0, 0);
616 callout_stop(&r->timeout);
617 tws_unmap_request(sc, r);
618 free(r->data, M_TWS);
619 r->state = TWS_REQ_STATE_FREE;
620 r->error_code = TWS_REQ_RET_RESET;
623 r = &sc->reqs[TWS_REQ_TYPE_PASSTHRU];
624 if ( r->state == TWS_REQ_STATE_BUSY ) {
625 TWS_TRACE_DEBUG(sc, "reset passthru req", 0, 0);
626 r->error_code = TWS_REQ_RET_RESET;
629 r = &sc->reqs[TWS_REQ_TYPE_GETSET_PARAM];
630 if ( r->state != TWS_REQ_STATE_FREE ) {
631 TWS_TRACE_DEBUG(sc, "reset setparam req", 0, 0);
632 callout_stop(&r->timeout);
633 tws_unmap_request(sc, r);
634 free(r->data, M_TWS);
635 r->state = TWS_REQ_STATE_FREE;
636 r->error_code = TWS_REQ_RET_RESET;
641 tws_drain_response_queue(struct tws_softc *sc)
645 while ( tws_get_response(sc, &req_id, &mfa) );
650 tws_execute_scsi(struct tws_softc *sc, union ccb *ccb)
652 struct tws_command_packet *cmd_pkt;
653 struct tws_request *req;
654 struct ccb_hdr *ccb_h = &(ccb->ccb_h);
655 struct ccb_scsiio *csio = &(ccb->csio);
659 mtx_assert(&sc->sim_lock, MA_OWNED);
660 if (ccb_h->target_id >= TWS_MAX_NUM_UNITS) {
661 TWS_TRACE_DEBUG(sc, "traget id too big", ccb_h->target_id, ccb_h->target_lun);
662 ccb_h->status |= CAM_TID_INVALID;
666 if (ccb_h->target_lun >= TWS_MAX_NUM_LUNS) {
667 TWS_TRACE_DEBUG(sc, "target lun 2 big", ccb_h->target_id, ccb_h->target_lun);
668 ccb_h->status |= CAM_LUN_INVALID;
673 if(ccb_h->flags & CAM_CDB_PHYS) {
674 TWS_TRACE_DEBUG(sc, "cdb phy", ccb_h->target_id, ccb_h->target_lun);
675 ccb_h->status = CAM_REQ_INVALID;
681 * We are going to work on this request. Mark it as enqueued (though
682 * we don't actually queue it...)
684 ccb_h->status |= CAM_SIM_QUEUED;
686 req = tws_get_request(sc, TWS_REQ_TYPE_SCSI_IO);
688 TWS_TRACE_DEBUG(sc, "no reqs", ccb_h->target_id, ccb_h->target_lun);
689 ccb_h->status |= CAM_REQUEUE_REQ;
694 if((ccb_h->flags & CAM_DIR_MASK) != CAM_DIR_NONE) {
695 if(ccb_h->flags & CAM_DIR_IN)
696 req->flags |= TWS_DIR_IN;
697 if(ccb_h->flags & CAM_DIR_OUT)
698 req->flags |= TWS_DIR_OUT;
700 req->flags = TWS_DIR_NONE; /* no data */
703 req->type = TWS_REQ_TYPE_SCSI_IO;
704 req->cb = tws_scsi_complete;
706 cmd_pkt = req->cmd_pkt;
707 /* cmd_pkt->hdr.header_desc.size_header = 128; */
708 cmd_pkt->cmd.pkt_a.res__opcode = TWS_FW_CMD_EXECUTE_SCSI;
709 cmd_pkt->cmd.pkt_a.unit = ccb_h->target_id;
710 cmd_pkt->cmd.pkt_a.status = 0;
711 cmd_pkt->cmd.pkt_a.sgl_offset = 16;
714 lun = ccb_h->target_lun & 0XF;
716 cmd_pkt->cmd.pkt_a.lun_l4__req_id = lun | req->request_id;
718 lun = ccb_h->target_lun & 0XF0;
720 cmd_pkt->cmd.pkt_a.lun_h4__sgl_entries = lun;
723 if ( csio->cdb_len > 16 )
724 TWS_TRACE(sc, "cdb len too big", ccb_h->target_id, csio->cdb_len);
727 if(ccb_h->flags & CAM_CDB_POINTER)
728 bcopy(csio->cdb_io.cdb_ptr, cmd_pkt->cmd.pkt_a.cdb, csio->cdb_len);
730 bcopy(csio->cdb_io.cdb_bytes, cmd_pkt->cmd.pkt_a.cdb, csio->cdb_len);
733 req->flags |= TWS_DATA_CCB;
737 * tws_map_load_data_callback will fill in the SGL,
738 * and submit the I/O.
740 sc->stats.scsi_ios++;
741 callout_reset_sbt(&req->timeout, SBT_1MS * ccb->ccb_h.timeout, 0,
742 tws_timeout, req, 0);
743 error = tws_map_request(sc, req);
749 tws_send_scsi_cmd(struct tws_softc *sc, int cmd)
751 struct tws_request *req;
752 struct tws_command_packet *cmd_pkt;
755 TWS_TRACE_DEBUG(sc, "entry",sc, cmd);
756 req = tws_get_request(sc, TWS_REQ_TYPE_AEN_FETCH);
761 req->cb = tws_aen_complete;
763 cmd_pkt = req->cmd_pkt;
764 cmd_pkt->cmd.pkt_a.res__opcode = TWS_FW_CMD_EXECUTE_SCSI;
765 cmd_pkt->cmd.pkt_a.status = 0;
766 cmd_pkt->cmd.pkt_a.unit = 0;
767 cmd_pkt->cmd.pkt_a.sgl_offset = 16;
768 cmd_pkt->cmd.pkt_a.lun_l4__req_id = req->request_id;
770 cmd_pkt->cmd.pkt_a.cdb[0] = (u_int8_t)cmd;
771 cmd_pkt->cmd.pkt_a.cdb[4] = 128;
773 req->length = TWS_SECTOR_SIZE;
774 req->data = malloc(TWS_SECTOR_SIZE, M_TWS, M_NOWAIT);
775 if ( req->data == NULL )
777 bzero(req->data, TWS_SECTOR_SIZE);
778 req->flags = TWS_DIR_IN;
780 callout_reset(&req->timeout, (TWS_IO_TIMEOUT * hz), tws_timeout, req);
781 error = tws_map_request(sc, req);
787 tws_set_param(struct tws_softc *sc, u_int32_t table_id, u_int32_t param_id,
788 u_int32_t param_size, void *data)
790 struct tws_request *req;
791 struct tws_command_packet *cmd_pkt;
792 union tws_command_giga *cmd;
793 struct tws_getset_param *param;
796 req = tws_get_request(sc, TWS_REQ_TYPE_GETSET_PARAM);
798 TWS_TRACE_DEBUG(sc, "null req", 0, 0);
802 req->length = TWS_SECTOR_SIZE;
803 req->data = malloc(TWS_SECTOR_SIZE, M_TWS, M_NOWAIT);
804 if ( req->data == NULL )
806 bzero(req->data, TWS_SECTOR_SIZE);
807 param = (struct tws_getset_param *)req->data;
809 req->cb = tws_getset_param_complete;
810 req->flags = TWS_DIR_OUT;
811 cmd_pkt = req->cmd_pkt;
813 cmd = &cmd_pkt->cmd.pkt_g;
814 cmd->param.sgl_off__opcode =
815 BUILD_SGL_OFF__OPCODE(2, TWS_FW_CMD_SET_PARAM);
816 cmd->param.request_id = (u_int8_t)req->request_id;
817 cmd->param.host_id__unit = 0;
818 cmd->param.param_count = 1;
819 cmd->param.size = 2; /* map routine will add sgls */
821 /* Specify which parameter we want to set. */
822 param->table_id = (table_id | TWS_9K_PARAM_DESCRIPTOR);
823 param->parameter_id = (u_int8_t)(param_id);
824 param->parameter_size_bytes = (u_int16_t)param_size;
825 memcpy(param->data, data, param_size);
827 callout_reset(&req->timeout, (TWS_IOCTL_TIMEOUT * hz), tws_timeout, req);
828 error = tws_map_request(sc, req);
834 tws_get_param(struct tws_softc *sc, u_int32_t table_id, u_int32_t param_id,
835 u_int32_t param_size, void *data)
837 struct tws_request *req;
838 struct tws_command_packet *cmd_pkt;
839 union tws_command_giga *cmd;
840 struct tws_getset_param *param;
846 req = tws_get_request(sc, TWS_REQ_TYPE_GETSET_PARAM);
848 TWS_TRACE_DEBUG(sc, "null req", 0, 0);
852 req->length = TWS_SECTOR_SIZE;
853 req->data = malloc(TWS_SECTOR_SIZE, M_TWS, M_NOWAIT);
854 if ( req->data == NULL )
856 bzero(req->data, TWS_SECTOR_SIZE);
857 param = (struct tws_getset_param *)req->data;
860 req->flags = TWS_DIR_IN;
861 cmd_pkt = req->cmd_pkt;
863 cmd = &cmd_pkt->cmd.pkt_g;
864 cmd->param.sgl_off__opcode =
865 BUILD_SGL_OFF__OPCODE(2, TWS_FW_CMD_GET_PARAM);
866 cmd->param.request_id = (u_int8_t)req->request_id;
867 cmd->param.host_id__unit = 0;
868 cmd->param.param_count = 1;
869 cmd->param.size = 2; /* map routine will add sgls */
871 /* Specify which parameter we want to set. */
872 param->table_id = (table_id | TWS_9K_PARAM_DESCRIPTOR);
873 param->parameter_id = (u_int8_t)(param_id);
874 param->parameter_size_bytes = (u_int16_t)param_size;
876 error = tws_map_request(sc, req);
878 reqid = tws_poll4_response(sc, &mfa);
879 tws_unmap_request(sc, req);
881 if ( reqid == TWS_REQ_TYPE_GETSET_PARAM ) {
882 memcpy(data, param->data, param_size);
888 free(req->data, M_TWS);
889 req->state = TWS_REQ_STATE_FREE;
895 tws_unmap_request(struct tws_softc *sc, struct tws_request *req)
897 if (req->data != NULL) {
898 if ( req->flags & TWS_DIR_IN )
899 bus_dmamap_sync(sc->data_tag, req->dma_map,
900 BUS_DMASYNC_POSTREAD);
901 if ( req->flags & TWS_DIR_OUT )
902 bus_dmamap_sync(sc->data_tag, req->dma_map,
903 BUS_DMASYNC_POSTWRITE);
904 mtx_lock(&sc->io_lock);
905 bus_dmamap_unload(sc->data_tag, req->dma_map);
906 mtx_unlock(&sc->io_lock);
911 tws_map_request(struct tws_softc *sc, struct tws_request *req)
916 /* If the command involves data, map that too. */
917 if (req->data != NULL) {
918 int my_flags = ((req->type == TWS_REQ_TYPE_SCSI_IO) ? BUS_DMA_WAITOK : BUS_DMA_NOWAIT);
921 * Map the data buffer into bus space and build the SG list.
923 mtx_lock(&sc->io_lock);
924 if (req->flags & TWS_DATA_CCB)
925 error = bus_dmamap_load_ccb(sc->data_tag, req->dma_map,
927 tws_dmamap_data_load_cbfn, req,
930 error = bus_dmamap_load(sc->data_tag, req->dma_map,
931 req->data, req->length,
932 tws_dmamap_data_load_cbfn, req,
934 mtx_unlock(&sc->io_lock);
936 if (error == EINPROGRESS) {
937 TWS_TRACE(sc, "in progress", 0, error);
938 tws_freeze_simq(sc, req);
939 error = 0; // EINPROGRESS is not a fatal error.
941 } else { /* no data involved */
942 error = tws_submit_command(sc, req);
949 tws_dmamap_data_load_cbfn(void *arg, bus_dma_segment_t *segs,
952 struct tws_request *req = (struct tws_request *)arg;
953 struct tws_softc *sc = req->sc;
954 u_int16_t sgls = nseg;
956 struct tws_cmd_generic *gcmd;
960 TWS_TRACE(sc, "SOMETHING BAD HAPPENED! error = %d\n", error, 0);
963 if ( error == EFBIG ) {
964 TWS_TRACE(sc, "not enough data segs", 0, nseg);
965 req->error_code = error;
966 req->ccb_ptr->ccb_h.status = CAM_REQ_TOO_BIG;
970 if ( req->flags & TWS_DIR_IN )
971 bus_dmamap_sync(req->sc->data_tag, req->dma_map,
972 BUS_DMASYNC_PREREAD);
973 if ( req->flags & TWS_DIR_OUT )
974 bus_dmamap_sync(req->sc->data_tag, req->dma_map,
975 BUS_DMASYNC_PREWRITE);
977 if ( (req->type == TWS_REQ_TYPE_PASSTHRU &&
978 GET_OPCODE(req->cmd_pkt->cmd.pkt_a.res__opcode) !=
979 TWS_FW_CMD_EXECUTE_SCSI) ||
980 req->type == TWS_REQ_TYPE_GETSET_PARAM) {
981 gcmd = &req->cmd_pkt->cmd.pkt_g.generic;
982 sgl_ptr = (u_int32_t *)(gcmd) + gcmd->size;
984 ((req->sc->is64bit && !tws_use_32bit_sgls) ? 4 : 2 );
985 tws_fill_sg_list(req->sc, (void *)segs, sgl_ptr, sgls);
988 tws_fill_sg_list(req->sc, (void *)segs,
989 (void *)&(req->cmd_pkt->cmd.pkt_a.sg_list), sgls);
990 req->cmd_pkt->cmd.pkt_a.lun_h4__sgl_entries |= sgls ;
995 req->error_code = tws_submit_command(req->sc, req);
1001 tws_fill_sg_list(struct tws_softc *sc, void *sgl_src, void *sgl_dest,
1002 u_int16_t num_sgl_entries)
1006 if ( sc->is64bit ) {
1007 struct tws_sg_desc64 *sgl_s = (struct tws_sg_desc64 *)sgl_src;
1009 if ( !tws_use_32bit_sgls ) {
1010 struct tws_sg_desc64 *sgl_d = (struct tws_sg_desc64 *)sgl_dest;
1011 if ( num_sgl_entries > TWS_MAX_64BIT_SG_ELEMENTS )
1012 TWS_TRACE(sc, "64bit sg overflow", num_sgl_entries, 0);
1013 for (i = 0; i < num_sgl_entries; i++) {
1014 sgl_d[i].address = sgl_s->address;
1015 sgl_d[i].length = sgl_s->length;
1017 sgl_d[i].reserved = 0;
1018 sgl_s = (struct tws_sg_desc64 *) (((u_int8_t *)sgl_s) +
1019 sizeof(bus_dma_segment_t));
1022 struct tws_sg_desc32 *sgl_d = (struct tws_sg_desc32 *)sgl_dest;
1023 if ( num_sgl_entries > TWS_MAX_32BIT_SG_ELEMENTS )
1024 TWS_TRACE(sc, "32bit sg overflow", num_sgl_entries, 0);
1025 for (i = 0; i < num_sgl_entries; i++) {
1026 sgl_d[i].address = sgl_s->address;
1027 sgl_d[i].length = sgl_s->length;
1029 sgl_s = (struct tws_sg_desc64 *) (((u_int8_t *)sgl_s) +
1030 sizeof(bus_dma_segment_t));
1034 struct tws_sg_desc32 *sgl_s = (struct tws_sg_desc32 *)sgl_src;
1035 struct tws_sg_desc32 *sgl_d = (struct tws_sg_desc32 *)sgl_dest;
1037 if ( num_sgl_entries > TWS_MAX_32BIT_SG_ELEMENTS )
1038 TWS_TRACE(sc, "32bit sg overflow", num_sgl_entries, 0);
1041 for (i = 0; i < num_sgl_entries; i++) {
1042 sgl_d[i].address = sgl_s[i].address;
1043 sgl_d[i].length = sgl_s[i].length;
1053 struct tws_softc *sc = (struct tws_softc *)arg;
1054 u_int32_t histat=0, db=0;
1057 device_printf(sc->tws_dev, "null softc!!!\n");
1061 if ( tws_get_state(sc) == TWS_RESET ) {
1065 if ( tws_get_state(sc) != TWS_ONLINE ) {
1069 sc->stats.num_intrs++;
1070 histat = tws_read_reg(sc, TWS_I2O0_HISTAT, 4);
1071 if ( histat & TWS_BIT2 ) {
1072 TWS_TRACE_DEBUG(sc, "door bell :)", histat, TWS_I2O0_HISTAT);
1073 db = tws_read_reg(sc, TWS_I2O0_IOBDB, 4);
1074 if ( db & TWS_BIT21 ) {
1075 tws_intr_attn_error(sc);
1078 if ( db & TWS_BIT18 ) {
1079 tws_intr_attn_aen(sc);
1083 if ( histat & TWS_BIT3 ) {
1089 tws_intr_attn_aen(struct tws_softc *sc)
1093 /* maskoff db intrs until all the aens are fetched */
1094 /* tws_disable_db_intr(sc); */
1095 tws_fetch_aen((void *)sc);
1096 tws_write_reg(sc, TWS_I2O0_HOBDBC, TWS_BIT18, 4);
1097 db = tws_read_reg(sc, TWS_I2O0_IOBDB, 4);
1102 tws_intr_attn_error(struct tws_softc *sc)
1106 TWS_TRACE(sc, "attn error", 0, 0);
1107 tws_write_reg(sc, TWS_I2O0_HOBDBC, ~0, 4);
1108 db = tws_read_reg(sc, TWS_I2O0_IOBDB, 4);
1109 device_printf(sc->tws_dev, "Micro controller error.\n");
1114 tws_intr_resp(struct tws_softc *sc)
1119 while ( tws_get_response(sc, &req_id, &mfa) ) {
1120 sc->stats.reqs_out++;
1121 if ( req_id == TWS_INVALID_REQID ) {
1122 TWS_TRACE_DEBUG(sc, "invalid req_id", mfa, req_id);
1123 sc->stats.reqs_errored++;
1124 tws_err_complete(sc, mfa);
1127 sc->reqs[req_id].cb(&sc->reqs[req_id]);
1134 tws_poll(struct cam_sim *sim)
1136 struct tws_softc *sc = (struct tws_softc *)cam_sim_softc(sim);
1137 TWS_TRACE_DEBUG(sc, "entry", 0, 0);
1138 tws_intr((void *) sc);
1142 tws_timeout(void *arg)
1144 struct tws_request *req = (struct tws_request *)arg;
1145 struct tws_softc *sc = req->sc;
1148 if ( req->error_code == TWS_REQ_RET_RESET ) {
1152 mtx_lock(&sc->gen_lock);
1153 if ( req->error_code == TWS_REQ_RET_RESET ) {
1154 mtx_unlock(&sc->gen_lock);
1158 if ( tws_get_state(sc) == TWS_RESET ) {
1159 mtx_unlock(&sc->gen_lock);
1163 xpt_freeze_simq(sc->sim, 1);
1165 tws_send_event(sc, TWS_RESET_START);
1167 if (req->type == TWS_REQ_TYPE_SCSI_IO) {
1168 device_printf(sc->tws_dev, "I/O Request timed out... Resetting controller\n");
1169 } else if (req->type == TWS_REQ_TYPE_PASSTHRU) {
1170 device_printf(sc->tws_dev, "IOCTL Request timed out... Resetting controller\n");
1172 device_printf(sc->tws_dev, "Internal Request timed out... Resetting controller\n");
1175 tws_assert_soft_reset(sc);
1176 tws_turn_off_interrupts(sc);
1177 tws_reset_cb( (void*) sc );
1178 tws_reinit( (void*) sc );
1180 // device_printf(sc->tws_dev, "Controller Reset complete!\n");
1181 tws_send_event(sc, TWS_RESET_COMPLETE);
1182 mtx_unlock(&sc->gen_lock);
1184 xpt_release_simq(sc->sim, 1);
1188 tws_reset(void *arg)
1190 struct tws_softc *sc = (struct tws_softc *)arg;
1192 mtx_lock(&sc->gen_lock);
1193 if ( tws_get_state(sc) == TWS_RESET ) {
1194 mtx_unlock(&sc->gen_lock);
1198 xpt_freeze_simq(sc->sim, 1);
1200 tws_send_event(sc, TWS_RESET_START);
1202 device_printf(sc->tws_dev, "Resetting controller\n");
1204 tws_assert_soft_reset(sc);
1205 tws_turn_off_interrupts(sc);
1206 tws_reset_cb( (void*) sc );
1207 tws_reinit( (void*) sc );
1209 // device_printf(sc->tws_dev, "Controller Reset complete!\n");
1210 tws_send_event(sc, TWS_RESET_COMPLETE);
1211 mtx_unlock(&sc->gen_lock);
1213 xpt_release_simq(sc->sim, 1);
1217 tws_reset_cb(void *arg)
1219 struct tws_softc *sc = (struct tws_softc *)arg;
1224 if ( tws_get_state(sc) != TWS_RESET ) {
1228 // device_printf(sc->tws_dev, "Draining Busy Queue\n");
1229 tws_drain_busy_queue(sc);
1230 // device_printf(sc->tws_dev, "Draining Reserved Reqs\n");
1231 tws_drain_reserved_reqs(sc);
1232 // device_printf(sc->tws_dev, "Draining Response Queue\n");
1233 tws_drain_response_queue(sc);
1235 // device_printf(sc->tws_dev, "Looking for controller ready flag...\n");
1236 endt = TWS_LOCAL_TIME + TWS_POLL_TIMEOUT;
1237 while ((TWS_LOCAL_TIME <= endt) && (!found)) {
1238 reg = tws_read_reg(sc, TWS_I2O0_SCRPD3, 4);
1239 if ( reg & TWS_BIT13 ) {
1241 // device_printf(sc->tws_dev, " ... Got it!\n");
1245 device_printf(sc->tws_dev, " ... Controller ready flag NOT found!\n");
1249 tws_reinit(void *arg)
1251 struct tws_softc *sc = (struct tws_softc *)arg;
1257 // device_printf(sc->tws_dev, "Waiting for Controller Ready\n");
1258 while ( !done && try ) {
1259 if ( tws_ctlr_ready(sc) ) {
1264 if ( timeout_val >= TWS_RESET_TIMEOUT ) {
1267 tws_assert_soft_reset(sc);
1270 mtx_sleep(sc, &sc->gen_lock, 0, "tws_reinit", 5*hz);
1275 device_printf(sc->tws_dev, "FAILED to get Controller Ready!\n");
1279 sc->obfl_q_overrun = false;
1280 // device_printf(sc->tws_dev, "Sending initConnect\n");
1281 if ( tws_init_connect(sc, tws_queue_depth) ) {
1282 TWS_TRACE_DEBUG(sc, "initConnect failed", 0, sc->is64bit);
1284 tws_init_obfl_q(sc);
1286 tws_turn_on_interrupts(sc);
1293 tws_freeze_simq(struct tws_softc *sc, struct tws_request *req)
1295 /* Only for IO commands */
1296 if (req->type == TWS_REQ_TYPE_SCSI_IO) {
1297 union ccb *ccb = (union ccb *)(req->ccb_ptr);
1299 xpt_freeze_simq(sc->sim, 1);
1300 ccb->ccb_h.status |= CAM_RELEASE_SIMQ;
1301 ccb->ccb_h.status |= CAM_REQUEUE_REQ;
1306 TUNABLE_INT("hw.tws.cam_depth", &tws_cam_depth);