1 /* $NetBSD: pdq.c,v 1.33 2001/11/13 13:14:43 lukem Exp $ */
4 * Copyright (c) 1995,1996 Matt Thomas <matt@3am-software.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. The name of the author may not be used to endorse or promote products
13 * derived from this software without specific prior written permission
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 * Id: pdq.c,v 1.32 1997/06/05 01:56:35 thomas Exp
32 * DEC PDQ FDDI Controller O/S independent code
34 * This module should work any on PDQ based board. Note that changes for
35 * MIPS and Alpha architectures (or any other architecture which requires
36 * a flushing of memory or write buffers and/or has incoherent caches)
37 * have yet to be made.
39 * However, it is expected that the PDQ_CSR_WRITE macro will cause a
40 * flushing of the write buffers.
44 #include <sys/cdefs.h>
45 __KERNEL_RCSID(0, "$NetBSD: pdq.c,v 1.33 2001/11/13 13:14:43 lukem Exp $");
48 #define PDQ_HWSUPPORT /* for pdq.h */
50 #if defined(__FreeBSD__)
52 * What a botch having to specific includes for FreeBSD!
54 #include <dev/pdq/pdq_freebsd.h>
55 #include <dev/pdq/pdqreg.h>
61 #define PDQ_ROUNDUP(n, x) (((n) + ((x) - 1)) & ~((x) - 1))
62 #define PDQ_CMD_RX_ALIGNMENT 16
64 #if (defined(PDQTEST) && !defined(PDQ_NOPRINTF)) || defined(PDQVERBOSE)
65 #define PDQ_PRINTF(x) printf x
67 #define PDQ_PRINTF(x) do { } while (0)
70 static const char * const pdq_halt_codes[] = {
71 "Selftest Timeout", "Host Bus Parity Error", "Host Directed Fault",
72 "Software Fault", "Hardware Fault", "PC Trace Path Test",
73 "DMA Error", "Image CRC Error", "Adapter Processer Error"
76 static const char * const pdq_adapter_states[] = {
77 "Reset", "Upgrade", "DMA Unavailable", "DMA Available",
78 "Link Available", "Link Unavailable", "Halted", "Ring Member"
82 * The following are used in conjunction with
85 static const char * const pdq_entities[] = {
86 "Station", "Link", "Phy Port"
89 static const char * const pdq_station_events[] = {
94 static const char * const pdq_station_arguments[] = {
98 static const char * const pdq_link_events[] = {
101 "Block Check Error (CRC)",
102 "Frame Status Error",
106 "Receive Data Overrun",
109 "Ring Initialization Initiated",
110 "Ring Initialization Received",
111 "Ring Beacon Initiated",
112 "Duplicate Address Failure",
113 "Duplicate Token Detected",
117 "Directed Beacon Received",
120 static const char * const pdq_link_arguments[] = {
127 static const char * const pdq_phy_events[] = {
128 "LEM Error Monitor Reject",
129 "Elasticy Buffer Error",
130 "Link Confidence Test Reject"
133 static const char * const pdq_phy_arguments[] = {
137 static const char * const * const pdq_event_arguments[] = {
138 pdq_station_arguments,
143 static const char * const * const pdq_event_codes[] = {
149 static const char * const pdq_station_types[] = {
150 "SAS", "DAC", "SAC", "NAC", "DAS"
153 static const char * const pdq_smt_versions[] = { "", "V6.2", "V7.2", "V7.3" };
155 static const char pdq_phy_types[] = "ABSM";
157 static const char * const pdq_pmd_types0[] = {
158 "ANSI Multi-Mode", "ANSI Single-Mode Type 1", "ANSI Single-Mode Type 2",
162 static const char * const pdq_pmd_types100[] = {
163 "Low Power", "Thin Wire", "Shielded Twisted Pair",
164 "Unshielded Twisted Pair"
167 static const char * const * const pdq_pmd_types[] = {
168 pdq_pmd_types0, pdq_pmd_types100
171 static const char * const pdq_descriptions[] = {
180 pdq_print_fddi_chars(
182 const pdq_response_status_chars_get_t *rsp)
184 const char hexchars[] = "0123456789abcdef";
187 #if !defined(__bsdi__) && !defined(__NetBSD__)
192 "DEC %s FDDI %s Controller\n",
193 #if !defined(__bsdi__) && !defined(__NetBSD__)
196 pdq_descriptions[pdq->pdq_type],
197 pdq_station_types[rsp->status_chars_get.station_type]);
199 printf(PDQ_OS_PREFIX "FDDI address %c%c:%c%c:%c%c:%c%c:%c%c:%c%c, FW=%c%c%c%c, HW=%c",
201 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[0] >> 4],
202 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[0] & 0x0F],
203 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[1] >> 4],
204 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[1] & 0x0F],
205 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[2] >> 4],
206 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[2] & 0x0F],
207 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[3] >> 4],
208 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[3] & 0x0F],
209 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[4] >> 4],
210 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[4] & 0x0F],
211 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[5] >> 4],
212 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[5] & 0x0F],
213 pdq->pdq_fwrev.fwrev_bytes[0], pdq->pdq_fwrev.fwrev_bytes[1],
214 pdq->pdq_fwrev.fwrev_bytes[2], pdq->pdq_fwrev.fwrev_bytes[3],
215 rsp->status_chars_get.module_rev.fwrev_bytes[0]);
217 if (rsp->status_chars_get.smt_version_id < PDQ_ARRAY_SIZE(pdq_smt_versions)) {
218 printf(", SMT %s\n", pdq_smt_versions[rsp->status_chars_get.smt_version_id]);
221 printf(PDQ_OS_PREFIX "FDDI Port%s = %c (PMD = %s)",
223 rsp->status_chars_get.station_type == PDQ_STATION_TYPE_DAS ? "[A]" : "",
224 pdq_phy_types[rsp->status_chars_get.phy_type[0]],
225 pdq_pmd_types[rsp->status_chars_get.pmd_type[0] / 100][rsp->status_chars_get.pmd_type[0] % 100]);
227 if (rsp->status_chars_get.station_type == PDQ_STATION_TYPE_DAS)
228 printf(", FDDI Port[B] = %c (PMD = %s)",
229 pdq_phy_types[rsp->status_chars_get.phy_type[1]],
230 pdq_pmd_types[rsp->status_chars_get.pmd_type[1] / 100][rsp->status_chars_get.pmd_type[1] % 100]);
234 pdq_os_update_status(pdq, rsp);
241 pdq_bus_memaddr_t csr_base,
245 csrs->csr_base = csr_base;
246 csrs->csr_port_reset = PDQ_CSR_OFFSET(csr_base, 0 * csrsize);
247 csrs->csr_host_data = PDQ_CSR_OFFSET(csr_base, 1 * csrsize);
248 csrs->csr_port_control = PDQ_CSR_OFFSET(csr_base, 2 * csrsize);
249 csrs->csr_port_data_a = PDQ_CSR_OFFSET(csr_base, 3 * csrsize);
250 csrs->csr_port_data_b = PDQ_CSR_OFFSET(csr_base, 4 * csrsize);
251 csrs->csr_port_status = PDQ_CSR_OFFSET(csr_base, 5 * csrsize);
252 csrs->csr_host_int_type_0 = PDQ_CSR_OFFSET(csr_base, 6 * csrsize);
253 csrs->csr_host_int_enable = PDQ_CSR_OFFSET(csr_base, 7 * csrsize);
254 csrs->csr_type_2_producer = PDQ_CSR_OFFSET(csr_base, 8 * csrsize);
255 csrs->csr_cmd_response_producer = PDQ_CSR_OFFSET(csr_base, 10 * csrsize);
256 csrs->csr_cmd_request_producer = PDQ_CSR_OFFSET(csr_base, 11 * csrsize);
257 csrs->csr_host_smt_producer = PDQ_CSR_OFFSET(csr_base, 12 * csrsize);
258 csrs->csr_unsolicited_producer = PDQ_CSR_OFFSET(csr_base, 13 * csrsize);
263 pdq_pci_csrs_t *csrs,
265 pdq_bus_memaddr_t csr_base,
269 csrs->csr_base = csr_base;
270 csrs->csr_pfi_mode_control = PDQ_CSR_OFFSET(csr_base, 16 * csrsize);
271 csrs->csr_pfi_status = PDQ_CSR_OFFSET(csr_base, 17 * csrsize);
272 csrs->csr_fifo_write = PDQ_CSR_OFFSET(csr_base, 18 * csrsize);
273 csrs->csr_fifo_read = PDQ_CSR_OFFSET(csr_base, 19 * csrsize);
277 pdq_flush_databuf_queue(
279 pdq_databuf_queue_t *q)
281 PDQ_OS_DATABUF_T *pdu;
283 PDQ_OS_DATABUF_DEQUEUE(q, pdu);
286 PDQ_OS_DATABUF_FREE(pdq, pdu);
292 const pdq_csrs_t * const csrs,
296 PDQ_CSR_WRITE(csrs, csr_host_int_type_0, PDQ_HOST_INT_CSR_CMD_DONE);
297 PDQ_CSR_WRITE(csrs, csr_port_control, PDQ_PCTL_CMD_ERROR | cmd);
298 while ((PDQ_CSR_READ(csrs, csr_host_int_type_0) & PDQ_HOST_INT_CSR_CMD_DONE) == 0 && cnt < 33000000)
300 PDQ_PRINTF(("CSR cmd spun %d times\n", cnt));
301 if (PDQ_CSR_READ(csrs, csr_host_int_type_0) & PDQ_HOST_INT_CSR_CMD_DONE) {
302 PDQ_CSR_WRITE(csrs, csr_host_int_type_0, PDQ_HOST_INT_CSR_CMD_DONE);
303 return (PDQ_CSR_READ(csrs, csr_port_control) & PDQ_PCTL_CMD_ERROR) ? PDQ_FALSE : PDQ_TRUE;
305 /* adapter failure */
312 const pdq_csrs_t * const csrs,
313 pdq_lanaddr_t *hwaddr)
317 PDQ_CSR_WRITE(csrs, csr_port_data_a, 0);
318 pdq_do_port_control(csrs, PDQ_PCTL_MLA_READ);
319 data = PDQ_CSR_READ(csrs, csr_host_data);
321 hwaddr->lanaddr_bytes[0] = (data >> 0) & 0xFF;
322 hwaddr->lanaddr_bytes[1] = (data >> 8) & 0xFF;
323 hwaddr->lanaddr_bytes[2] = (data >> 16) & 0xFF;
324 hwaddr->lanaddr_bytes[3] = (data >> 24) & 0xFF;
326 PDQ_CSR_WRITE(csrs, csr_port_data_a, 1);
327 pdq_do_port_control(csrs, PDQ_PCTL_MLA_READ);
328 data = PDQ_CSR_READ(csrs, csr_host_data);
330 hwaddr->lanaddr_bytes[4] = (data >> 0) & 0xFF;
331 hwaddr->lanaddr_bytes[5] = (data >> 8) & 0xFF;
336 const pdq_csrs_t * const csrs,
341 pdq_do_port_control(csrs, PDQ_PCTL_FW_REV_READ);
342 data = PDQ_CSR_READ(csrs, csr_host_data);
344 fwrev->fwrev_bytes[3] = (data >> 0) & 0xFF;
345 fwrev->fwrev_bytes[2] = (data >> 8) & 0xFF;
346 fwrev->fwrev_bytes[1] = (data >> 16) & 0xFF;
347 fwrev->fwrev_bytes[0] = (data >> 24) & 0xFF;
353 pdq_response_error_log_get_t *log_entry)
355 const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
356 pdq_uint32_t *ptr = (pdq_uint32_t *) log_entry;
358 pdq_do_port_control(csrs, PDQ_PCTL_ERROR_LOG_START);
360 while (pdq_do_port_control(csrs, PDQ_PCTL_FW_REV_READ) == PDQ_TRUE) {
361 *ptr++ = PDQ_CSR_READ(csrs, csr_host_data);
362 if ((pdq_uint8_t *) ptr - (pdq_uint8_t *) log_entry == sizeof(*log_entry))
365 return (ptr == (pdq_uint32_t *) log_entry) ? PDQ_FALSE : PDQ_TRUE;
368 static pdq_chip_rev_t
370 const pdq_csrs_t * const csrs)
374 PDQ_CSR_WRITE(csrs, csr_port_data_a, PDQ_SUB_CMD_PDQ_REV_GET);
375 pdq_do_port_control(csrs, PDQ_PCTL_SUB_CMD);
376 data = PDQ_CSR_READ(csrs, csr_host_data);
378 return (pdq_chip_rev_t) data;
381 static const struct {
384 const char *cmd_name;
386 { sizeof(pdq_cmd_generic_t), /* 0 - PDQC_START */
387 sizeof(pdq_response_generic_t),
390 { sizeof(pdq_cmd_filter_set_t), /* 1 - PDQC_FILTER_SET */
391 sizeof(pdq_response_generic_t),
394 { sizeof(pdq_cmd_generic_t), /* 2 - PDQC_FILTER_GET */
395 sizeof(pdq_response_filter_get_t),
398 { sizeof(pdq_cmd_chars_set_t), /* 3 - PDQC_CHARS_SET */
399 sizeof(pdq_response_generic_t),
402 { sizeof(pdq_cmd_generic_t), /* 4 - PDQC_STATUS_CHARS_GET */
403 sizeof(pdq_response_status_chars_get_t),
407 { sizeof(pdq_cmd_generic_t), /* 5 - PDQC_COUNTERS_GET */
408 sizeof(pdq_response_counters_get_t),
411 { sizeof(pdq_cmd_counters_set_t), /* 6 - PDQC_COUNTERS_SET */
412 sizeof(pdq_response_generic_t),
416 { 0, 0, "Counters Get" },
417 { 0, 0, "Counters Set" },
419 { sizeof(pdq_cmd_addr_filter_set_t), /* 7 - PDQC_ADDR_FILTER_SET */
420 sizeof(pdq_response_generic_t),
423 { sizeof(pdq_cmd_generic_t), /* 8 - PDQC_ADDR_FILTER_GET */
424 sizeof(pdq_response_addr_filter_get_t),
427 { sizeof(pdq_cmd_generic_t), /* 9 - PDQC_ERROR_LOG_CLEAR */
428 sizeof(pdq_response_generic_t),
431 { sizeof(pdq_cmd_generic_t), /* 10 - PDQC_ERROR_LOG_SET */
432 sizeof(pdq_response_generic_t),
435 { sizeof(pdq_cmd_generic_t), /* 11 - PDQC_FDDI_MIB_GET */
436 sizeof(pdq_response_generic_t),
439 { sizeof(pdq_cmd_generic_t), /* 12 - PDQC_DEC_EXT_MIB_GET */
440 sizeof(pdq_response_generic_t),
443 { sizeof(pdq_cmd_generic_t), /* 13 - PDQC_DEC_SPECIFIC_GET */
444 sizeof(pdq_response_generic_t),
447 { sizeof(pdq_cmd_generic_t), /* 14 - PDQC_SNMP_SET */
448 sizeof(pdq_response_generic_t),
452 { sizeof(pdq_cmd_generic_t), /* 16 - PDQC_SMT_MIB_GET */
453 sizeof(pdq_response_generic_t),
456 { sizeof(pdq_cmd_generic_t), /* 17 - PDQC_SMT_MIB_SET */
457 sizeof(pdq_response_generic_t),
460 { 0, 0, "Bogus CMD" },
467 const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
468 pdq_command_info_t * const ci = &pdq->pdq_command_info;
469 pdq_descriptor_block_t * const dbp = pdq->pdq_dbp;
470 pdq_txdesc_t * const txd = &dbp->pdqdb_command_requests[ci->ci_request_producer];
472 pdq_uint32_t cmdlen, rsplen, mask;
475 * If there are commands or responses active or there aren't
476 * any pending commands, then don't queue any more.
478 if (ci->ci_command_active || ci->ci_pending_commands == 0)
482 * Determine which command needs to be queued.
484 op = PDQC_SMT_MIB_SET;
485 for (mask = 1 << ((int) op); (mask & ci->ci_pending_commands) == 0; mask >>= 1)
486 op = (pdq_cmd_code_t) ((int) op - 1);
488 * Obtain the sizes needed for the command and response.
489 * Round up to PDQ_CMD_RX_ALIGNMENT so the receive buffer is
490 * always properly aligned.
492 cmdlen = PDQ_ROUNDUP(pdq_cmd_info[op].cmd_len, PDQ_CMD_RX_ALIGNMENT);
493 rsplen = PDQ_ROUNDUP(pdq_cmd_info[op].rsp_len, PDQ_CMD_RX_ALIGNMENT);
497 * Since only one command at a time will be queued, there will always
502 * Obtain and fill in the descriptor for the command (descriptor is
505 txd->txd_seg_len = cmdlen;
508 * Clear the command area, set the opcode, and the command from the pending
512 ci->ci_queued_commands[ci->ci_request_producer] = op;
513 #if defined(PDQVERBOSE)
514 ((pdq_response_generic_t *) ci->ci_response_bufstart)->generic_op = PDQC_BOGUS_CMD;
516 PDQ_OS_MEMZERO(ci->ci_request_bufstart, cmdlen);
517 *(pdq_cmd_code_t *) ci->ci_request_bufstart = op;
518 ci->ci_pending_commands &= ~mask;
521 * Fill in the command area, if needed.
524 case PDQC_FILTER_SET: {
525 pdq_cmd_filter_set_t *filter_set = (pdq_cmd_filter_set_t *) ci->ci_request_bufstart;
527 filter_set->filter_set_items[idx].item_code = PDQI_IND_GROUP_PROM;
528 filter_set->filter_set_items[idx].filter_state = (pdq->pdq_flags & PDQ_PROMISC ? PDQ_FILTER_PASS : PDQ_FILTER_BLOCK);
530 filter_set->filter_set_items[idx].item_code = PDQI_GROUP_PROM;
531 filter_set->filter_set_items[idx].filter_state = (pdq->pdq_flags & PDQ_ALLMULTI ? PDQ_FILTER_PASS : PDQ_FILTER_BLOCK);
533 filter_set->filter_set_items[idx].item_code = PDQI_SMT_PROM;
534 filter_set->filter_set_items[idx].filter_state = ((pdq->pdq_flags & (PDQ_PROMISC|PDQ_PASS_SMT)) == (PDQ_PROMISC|PDQ_PASS_SMT) ? PDQ_FILTER_PASS : PDQ_FILTER_BLOCK);
536 filter_set->filter_set_items[idx].item_code = PDQI_SMT_USER;
537 filter_set->filter_set_items[idx].filter_state = (pdq->pdq_flags & PDQ_PASS_SMT ? PDQ_FILTER_PASS : PDQ_FILTER_BLOCK);
539 filter_set->filter_set_items[idx].item_code = PDQI_EOL;
542 case PDQC_ADDR_FILTER_SET: {
543 pdq_cmd_addr_filter_set_t *addr_filter_set = (pdq_cmd_addr_filter_set_t *) ci->ci_request_bufstart;
544 pdq_lanaddr_t *addr = addr_filter_set->addr_filter_set_addresses;
545 addr->lanaddr_bytes[0] = 0xFF;
546 addr->lanaddr_bytes[1] = 0xFF;
547 addr->lanaddr_bytes[2] = 0xFF;
548 addr->lanaddr_bytes[3] = 0xFF;
549 addr->lanaddr_bytes[4] = 0xFF;
550 addr->lanaddr_bytes[5] = 0xFF;
552 pdq_os_addr_fill(pdq, addr, 61);
555 case PDQC_SNMP_SET: {
556 pdq_cmd_snmp_set_t *snmp_set = (pdq_cmd_snmp_set_t *) ci->ci_request_bufstart;
558 snmp_set->snmp_set_items[idx].item_code = PDQSNMP_FULL_DUPLEX_ENABLE;
559 snmp_set->snmp_set_items[idx].item_value = (pdq->pdq_flags & PDQ_WANT_FDX ? 1 : 2);
560 snmp_set->snmp_set_items[idx].item_port = 0;
562 snmp_set->snmp_set_items[idx].item_code = PDQSNMP_EOL;
565 default: { /* to make gcc happy */
572 * Sync the command request buffer and descriptor, then advance
573 * the request producer index.
575 PDQ_OS_CMDRQST_PRESYNC(pdq, txd->txd_seg_len);
576 PDQ_OS_DESC_PRESYNC(pdq, txd, sizeof(pdq_txdesc_t));
577 PDQ_ADVANCE(ci->ci_request_producer, 1, PDQ_RING_MASK(dbp->pdqdb_command_requests));
580 * Sync the command response buffer and advance the response
581 * producer index (descriptor is already pre-initialized)
583 PDQ_OS_CMDRSP_PRESYNC(pdq, PDQ_SIZE_COMMAND_RESPONSE);
584 PDQ_ADVANCE(ci->ci_response_producer, 1, PDQ_RING_MASK(dbp->pdqdb_command_responses));
586 * At this point the command is done. All that needs to be done is to
587 * produce it to the PDQ.
589 PDQ_PRINTF(("PDQ Queue Command Request: %s queued\n",
590 pdq_cmd_info[op].cmd_name));
592 ci->ci_command_active++;
593 PDQ_CSR_WRITE(csrs, csr_cmd_response_producer, ci->ci_response_producer | (ci->ci_response_completion << 8));
594 PDQ_CSR_WRITE(csrs, csr_cmd_request_producer, ci->ci_request_producer | (ci->ci_request_completion << 8));
598 pdq_process_command_responses(
601 const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
602 pdq_command_info_t * const ci = &pdq->pdq_command_info;
603 volatile const pdq_consumer_block_t * const cbp = pdq->pdq_cbp;
604 pdq_descriptor_block_t * const dbp = pdq->pdq_dbp;
605 const pdq_response_generic_t *rspgen;
608 * We have to process the command and response in tandem so
609 * just wait for the response to be consumed. If it has been
610 * consumed then the command must have been as well.
613 if (cbp->pdqcb_command_response == ci->ci_response_completion)
616 PDQ_ASSERT(cbp->pdqcb_command_request != ci->ci_request_completion);
618 PDQ_OS_CMDRSP_POSTSYNC(pdq, PDQ_SIZE_COMMAND_RESPONSE);
619 rspgen = (const pdq_response_generic_t *) ci->ci_response_bufstart;
620 PDQ_ASSERT(rspgen->generic_op == ci->ci_queued_commands[ci->ci_request_completion]);
621 PDQ_ASSERT(rspgen->generic_status == PDQR_SUCCESS);
622 PDQ_PRINTF(("PDQ Process Command Response: %s completed (status=%d [0x%x])\n",
623 pdq_cmd_info[rspgen->generic_op].cmd_name,
624 rspgen->generic_status, rspgen->generic_status));
626 if (rspgen->generic_op == PDQC_STATUS_CHARS_GET && (pdq->pdq_flags & PDQ_PRINTCHARS)) {
627 pdq->pdq_flags &= ~PDQ_PRINTCHARS;
628 pdq_print_fddi_chars(pdq, (const pdq_response_status_chars_get_t *) rspgen);
629 } else if (rspgen->generic_op == PDQC_DEC_EXT_MIB_GET) {
630 pdq->pdq_flags &= ~PDQ_IS_FDX;
631 if (((const pdq_response_dec_ext_mib_get_t *)rspgen)->dec_ext_mib_get.fdx_operational)
632 pdq->pdq_flags |= PDQ_IS_FDX;
635 PDQ_ADVANCE(ci->ci_request_completion, 1, PDQ_RING_MASK(dbp->pdqdb_command_requests));
636 PDQ_ADVANCE(ci->ci_response_completion, 1, PDQ_RING_MASK(dbp->pdqdb_command_responses));
637 ci->ci_command_active = 0;
639 if (ci->ci_pending_commands != 0) {
640 pdq_queue_commands(pdq);
642 PDQ_CSR_WRITE(csrs, csr_cmd_response_producer,
643 ci->ci_response_producer | (ci->ci_response_completion << 8));
644 PDQ_CSR_WRITE(csrs, csr_cmd_request_producer,
645 ci->ci_request_producer | (ci->ci_request_completion << 8));
650 * This following routine processes unsolicited events.
651 * In addition, it also fills the unsolicited queue with
652 * event buffers so it can be used to initialize the queue
656 pdq_process_unsolicited_events(
659 const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
660 pdq_unsolicited_info_t *ui = &pdq->pdq_unsolicited_info;
661 volatile const pdq_consumer_block_t *cbp = pdq->pdq_cbp;
662 pdq_descriptor_block_t *dbp = pdq->pdq_dbp;
665 * Process each unsolicited event (if any).
668 while (cbp->pdqcb_unsolicited_event != ui->ui_completion) {
669 const pdq_unsolicited_event_t *event;
670 event = &ui->ui_events[ui->ui_completion & (PDQ_NUM_UNSOLICITED_EVENTS-1)];
671 PDQ_OS_UNSOL_EVENT_POSTSYNC(pdq, event);
673 switch (event->event_type) {
674 case PDQ_UNSOLICITED_EVENT: {
676 switch (event->event_entity) {
677 case PDQ_ENTITY_STATION: {
678 bad_event = event->event_code.value >= PDQ_STATION_EVENT_MAX;
681 case PDQ_ENTITY_LINK: {
682 bad_event = event->event_code.value >= PDQ_LINK_EVENT_MAX;
685 case PDQ_ENTITY_PHY_PORT: {
686 bad_event = event->event_code.value >= PDQ_PHY_EVENT_MAX;
697 printf(PDQ_OS_PREFIX "Unsolicited Event: %s: %s",
699 pdq_entities[event->event_entity],
700 pdq_event_codes[event->event_entity][event->event_code.value]);
701 if (event->event_entity == PDQ_ENTITY_PHY_PORT)
702 printf("[%d]", event->event_index);
706 case PDQ_UNSOLICITED_COUNTERS: {
710 PDQ_OS_UNSOL_EVENT_PRESYNC(pdq, event);
711 PDQ_ADVANCE(ui->ui_completion, 1, PDQ_RING_MASK(dbp->pdqdb_unsolicited_events));
716 * Now give back the event buffers back to the PDQ.
718 PDQ_ADVANCE(ui->ui_producer, ui->ui_free, PDQ_RING_MASK(dbp->pdqdb_unsolicited_events));
721 PDQ_CSR_WRITE(csrs, csr_unsolicited_producer,
722 ui->ui_producer | (ui->ui_completion << 8));
726 pdq_process_received_data(
729 pdq_rxdesc_t *receives,
730 pdq_uint32_t completion_goal,
731 pdq_uint32_t ring_mask)
733 pdq_uint32_t completion = rx->rx_completion;
734 pdq_uint32_t producer = rx->rx_producer;
735 PDQ_OS_DATABUF_T **buffers = (PDQ_OS_DATABUF_T **) rx->rx_buffers;
739 while (completion != completion_goal) {
740 PDQ_OS_DATABUF_T *fpdu, *lpdu, *npdu;
741 pdq_uint8_t *dataptr;
742 pdq_uint32_t fc, datalen, pdulen, segcnt;
743 pdq_rxstatus_t status;
745 fpdu = lpdu = buffers[completion];
746 PDQ_ASSERT(fpdu != NULL);
747 PDQ_OS_RXPDU_POSTSYNC(pdq, fpdu, 0, sizeof(u_int32_t));
748 dataptr = PDQ_OS_DATABUF_PTR(fpdu);
749 status = *(pdq_rxstatus_t *) dataptr;
750 if (status.rxs_rcc_badpdu == 0) {
751 datalen = status.rxs_len;
752 PDQ_OS_RXPDU_POSTSYNC(pdq, fpdu, sizeof(u_int32_t),
753 PDQ_RX_FC_OFFSET + 1 - sizeof(u_int32_t));
754 fc = dataptr[PDQ_RX_FC_OFFSET];
755 switch (fc & (PDQ_FDDIFC_C|PDQ_FDDIFC_L|PDQ_FDDIFC_F)) {
756 case PDQ_FDDI_LLC_ASYNC:
757 case PDQ_FDDI_LLC_SYNC:
758 case PDQ_FDDI_IMP_ASYNC:
759 case PDQ_FDDI_IMP_SYNC: {
760 if (datalen > PDQ_FDDI_MAX || datalen < PDQ_FDDI_LLC_MIN) {
761 PDQ_PRINTF(("discard: bad length %d\n", datalen));
767 if (datalen > PDQ_FDDI_MAX || datalen < PDQ_FDDI_SMT_MIN)
772 PDQ_PRINTF(("discard: bad fc 0x%x\n", fc));
777 * Update the lengths of the data buffers now that we know
780 pdulen = datalen + (PDQ_RX_FC_OFFSET - PDQ_OS_HDR_OFFSET) - 4 /* CRC */;
781 segcnt = (pdulen + PDQ_OS_HDR_OFFSET + PDQ_OS_DATABUF_SIZE - 1) / PDQ_OS_DATABUF_SIZE;
782 PDQ_OS_DATABUF_ALLOC(pdq, npdu);
784 PDQ_PRINTF(("discard: no databuf #0\n"));
787 buffers[completion] = npdu;
788 for (idx = 1; idx < segcnt; idx++) {
789 PDQ_OS_DATABUF_ALLOC(pdq, npdu);
791 PDQ_OS_DATABUF_NEXT_SET(lpdu, NULL);
792 PDQ_OS_DATABUF_FREE(pdq, fpdu);
795 PDQ_OS_DATABUF_NEXT_SET(lpdu, buffers[(completion + idx) & ring_mask]);
796 lpdu = PDQ_OS_DATABUF_NEXT(lpdu);
797 buffers[(completion + idx) & ring_mask] = npdu;
799 PDQ_OS_DATABUF_NEXT_SET(lpdu, NULL);
800 for (idx = 0; idx < PDQ_RX_SEGCNT; idx++) {
801 buffers[(producer + idx) & ring_mask] =
802 buffers[(completion + idx) & ring_mask];
803 buffers[(completion + idx) & ring_mask] = NULL;
805 PDQ_OS_DATABUF_ADJ(fpdu, PDQ_OS_HDR_OFFSET);
807 PDQ_OS_DATABUF_LEN_SET(fpdu, pdulen);
809 PDQ_OS_DATABUF_LEN_SET(lpdu, pdulen + PDQ_OS_HDR_OFFSET - (segcnt - 1) * PDQ_OS_DATABUF_SIZE);
812 * Do not pass to protocol if packet was received promiscuously
814 pdq_os_receive_pdu(pdq, fpdu, pdulen,
815 status.rxs_rcc_dd < PDQ_RXS_RCC_DD_CAM_MATCH);
816 rx->rx_free += PDQ_RX_SEGCNT;
817 PDQ_ADVANCE(producer, PDQ_RX_SEGCNT, ring_mask);
818 PDQ_ADVANCE(completion, PDQ_RX_SEGCNT, ring_mask);
821 PDQ_PRINTF(("discard: bad pdu 0x%x(%d.%d.%d.%d.%d)\n", status.rxs_status,
822 status.rxs_rcc_badpdu, status.rxs_rcc_badcrc,
823 status.rxs_rcc_reason, status.rxs_fsc, status.rxs_fsb_e));
824 if (status.rxs_rcc_reason == 7)
826 if (status.rxs_rcc_reason != 0) {
828 if (status.rxs_rcc_badcrc) {
829 printf(PDQ_OS_PREFIX " MAC CRC error (source=%x-%x-%x-%x-%x-%x)\n",
831 dataptr[PDQ_RX_FC_OFFSET+1],
832 dataptr[PDQ_RX_FC_OFFSET+2],
833 dataptr[PDQ_RX_FC_OFFSET+3],
834 dataptr[PDQ_RX_FC_OFFSET+4],
835 dataptr[PDQ_RX_FC_OFFSET+5],
836 dataptr[PDQ_RX_FC_OFFSET+6]);
837 /* rx->rx_badcrc++; */
838 } else if (status.rxs_fsc == 0 || status.rxs_fsb_e == 1) {
839 /* rx->rx_frame_status_errors++; */
847 * Discarded frames go right back on the queue; therefore
848 * ring entries were freed.
850 for (idx = 0; idx < PDQ_RX_SEGCNT; idx++) {
851 buffers[producer] = buffers[completion];
852 buffers[completion] = NULL;
853 rxd = &receives[rx->rx_producer];
855 rxd->rxd_sop = 1; rxd->rxd_seg_cnt = PDQ_RX_SEGCNT - 1;
857 rxd->rxd_sop = 0; rxd->rxd_seg_cnt = 0;
860 rxd->rxd_seg_len_hi = PDQ_OS_DATABUF_SIZE / 16;
861 rxd->rxd_pa_lo = PDQ_OS_DATABUF_BUSPA(pdq, buffers[rx->rx_producer]);
862 PDQ_OS_RXPDU_PRESYNC(pdq, buffers[rx->rx_producer], 0, PDQ_OS_DATABUF_SIZE);
863 PDQ_OS_DESC_PRESYNC(pdq, rxd, sizeof(*rxd));
864 PDQ_ADVANCE(rx->rx_producer, 1, ring_mask);
865 PDQ_ADVANCE(producer, 1, ring_mask);
866 PDQ_ADVANCE(completion, 1, ring_mask);
869 rx->rx_completion = completion;
871 while (rx->rx_free > PDQ_RX_SEGCNT && rx->rx_free > rx->rx_target) {
872 PDQ_OS_DATABUF_T *pdu;
874 * Allocate the needed number of data buffers.
875 * Try to obtain them from our free queue before
876 * asking the system for more.
878 for (idx = 0; idx < PDQ_RX_SEGCNT; idx++) {
879 if ((pdu = buffers[(rx->rx_producer + idx) & ring_mask]) == NULL) {
880 PDQ_OS_DATABUF_ALLOC(pdq, pdu);
883 buffers[(rx->rx_producer + idx) & ring_mask] = pdu;
885 rxd = &receives[(rx->rx_producer + idx) & ring_mask];
887 rxd->rxd_sop = 1; rxd->rxd_seg_cnt = PDQ_RX_SEGCNT - 1;
889 rxd->rxd_sop = 0; rxd->rxd_seg_cnt = 0;
892 rxd->rxd_seg_len_hi = PDQ_OS_DATABUF_SIZE / 16;
893 rxd->rxd_pa_lo = PDQ_OS_DATABUF_BUSPA(pdq, pdu);
894 PDQ_OS_RXPDU_PRESYNC(pdq, pdu, 0, PDQ_OS_DATABUF_SIZE);
895 PDQ_OS_DESC_PRESYNC(pdq, rxd, sizeof(*rxd));
897 if (idx < PDQ_RX_SEGCNT) {
899 * We didn't get all databufs required to complete a new
900 * receive buffer. Keep the ones we got and retry a bit
901 * later for the rest.
905 PDQ_ADVANCE(rx->rx_producer, PDQ_RX_SEGCNT, ring_mask);
906 rx->rx_free -= PDQ_RX_SEGCNT;
910 static void pdq_process_transmitted_data(pdq_t *pdq);
913 pdq_queue_transmit_data(
915 PDQ_OS_DATABUF_T *pdu)
917 pdq_tx_info_t * const tx = &pdq->pdq_tx_info;
918 pdq_descriptor_block_t * const dbp = pdq->pdq_dbp;
919 pdq_uint32_t producer = tx->tx_producer;
920 pdq_txdesc_t *eop = NULL;
921 PDQ_OS_DATABUF_T *pdu0;
922 pdq_uint32_t freecnt;
923 #if defined(PDQ_BUS_DMA)
928 if (PDQ_RX_FC_OFFSET == PDQ_OS_HDR_OFFSET) {
929 freecnt = tx->tx_free - 1;
931 freecnt = tx->tx_free;
934 * Need 2 or more descriptors to be able to send.
937 pdq->pdq_intrmask |= PDQ_HOST_INT_TX_ENABLE;
938 PDQ_CSR_WRITE(&pdq->pdq_csrs, csr_host_int_enable, pdq->pdq_intrmask);
942 if (PDQ_RX_FC_OFFSET == PDQ_OS_HDR_OFFSET) {
943 dbp->pdqdb_transmits[producer] = tx->tx_hdrdesc;
944 PDQ_OS_DESC_PRESYNC(pdq, &dbp->pdqdb_transmits[producer], sizeof(pdq_txdesc_t));
945 PDQ_ADVANCE(producer, 1, PDQ_RING_MASK(dbp->pdqdb_transmits));
948 #if defined(PDQ_BUS_DMA)
949 map = M_GETCTX(pdu, bus_dmamap_t);
950 if (freecnt >= map->dm_nsegs) {
952 for (idx = 0; idx < map->dm_nsegs; idx++) {
954 * Initialize the transmit descriptor
956 eop = &dbp->pdqdb_transmits[producer];
957 eop->txd_seg_len = map->dm_segs[idx].ds_len;
958 eop->txd_pa_lo = map->dm_segs[idx].ds_addr;
959 eop->txd_sop = eop->txd_eop = eop->txd_pa_hi = 0;
960 PDQ_OS_DESC_PRESYNC(pdq, eop, sizeof(pdq_txdesc_t));
962 PDQ_ADVANCE(producer, 1, PDQ_RING_MASK(dbp->pdqdb_transmits));
969 for (freecnt = tx->tx_free - 1, pdu0 = pdu; pdu0 != NULL && freecnt > 0;) {
970 pdq_uint32_t fraglen, datalen = PDQ_OS_DATABUF_LEN(pdu0);
971 const pdq_uint8_t *dataptr = PDQ_OS_DATABUF_PTR(pdu0);
974 * The first segment is limited to the space remaining in
975 * page. All segments after that can be up to a full page
978 fraglen = PDQ_OS_PAGESIZE - ((dataptr - (pdq_uint8_t *) NULL) & (PDQ_OS_PAGESIZE-1));
979 while (datalen > 0 && freecnt > 0) {
980 pdq_uint32_t seglen = (fraglen < datalen ? fraglen : datalen);
983 * Initialize the transmit descriptor
985 eop = &dbp->pdqdb_transmits[producer];
986 eop->txd_seg_len = seglen;
987 eop->txd_pa_lo = PDQ_OS_VA_TO_BUSPA(pdq, dataptr);
988 eop->txd_sop = eop->txd_eop = eop->txd_pa_hi = 0;
989 PDQ_OS_DESC_PRESYNC(pdq, eop, sizeof(pdq_txdesc_t));
992 fraglen = PDQ_OS_PAGESIZE;
994 PDQ_ADVANCE(producer, 1, PDQ_RING_MASK(dbp->pdqdb_transmits));
996 pdu0 = PDQ_OS_DATABUF_NEXT(pdu0);
998 #endif /* defined(PDQ_BUS_DMA) */
1000 unsigned completion = tx->tx_completion;
1001 PDQ_ASSERT(freecnt == 0);
1002 PDQ_OS_CONSUMER_POSTSYNC(pdq);
1003 pdq_process_transmitted_data(pdq);
1004 if (completion != tx->tx_completion) {
1005 producer = tx->tx_producer;
1010 * If we still have data to process then the ring was too full
1011 * to store the PDU. Return FALSE so the caller will requeue
1012 * the PDU for later.
1014 pdq->pdq_intrmask |= PDQ_HOST_INT_TX_ENABLE;
1015 PDQ_CSR_WRITE(&pdq->pdq_csrs, csr_host_int_enable, pdq->pdq_intrmask);
1019 * Everything went fine. Finish it up.
1021 tx->tx_descriptor_count[tx->tx_producer] = tx->tx_free - freecnt;
1022 if (PDQ_RX_FC_OFFSET != PDQ_OS_HDR_OFFSET) {
1023 dbp->pdqdb_transmits[tx->tx_producer].txd_sop = 1;
1024 PDQ_OS_DESC_PRESYNC(pdq, &dbp->pdqdb_transmits[tx->tx_producer],
1025 sizeof(pdq_txdesc_t));
1028 PDQ_OS_DESC_PRESYNC(pdq, eop, sizeof(pdq_txdesc_t));
1029 PDQ_OS_DATABUF_ENQUEUE(&tx->tx_txq, pdu);
1030 tx->tx_producer = producer;
1031 tx->tx_free = freecnt;
1032 PDQ_DO_TYPE2_PRODUCER(pdq);
1037 pdq_process_transmitted_data(
1040 pdq_tx_info_t *tx = &pdq->pdq_tx_info;
1041 volatile const pdq_consumer_block_t *cbp = pdq->pdq_cbp;
1042 pdq_descriptor_block_t *dbp = pdq->pdq_dbp;
1043 pdq_uint32_t completion = tx->tx_completion;
1046 while (completion != cbp->pdqcb_transmits) {
1047 PDQ_OS_DATABUF_T *pdu;
1048 pdq_uint32_t descriptor_count = tx->tx_descriptor_count[completion];
1049 PDQ_ASSERT(dbp->pdqdb_transmits[completion].txd_sop == 1);
1050 PDQ_ASSERT(dbp->pdqdb_transmits[(completion + descriptor_count - 1) & PDQ_RING_MASK(dbp->pdqdb_transmits)].txd_eop == 1);
1051 PDQ_OS_DATABUF_DEQUEUE(&tx->tx_txq, pdu);
1052 pdq_os_transmit_done(pdq, pdu);
1053 tx->tx_free += descriptor_count;
1055 PDQ_ADVANCE(completion, descriptor_count, PDQ_RING_MASK(dbp->pdqdb_transmits));
1057 if (tx->tx_completion != completion) {
1058 tx->tx_completion = completion;
1059 pdq->pdq_intrmask &= ~PDQ_HOST_INT_TX_ENABLE;
1060 PDQ_CSR_WRITE(&pdq->pdq_csrs, csr_host_int_enable, pdq->pdq_intrmask);
1061 pdq_os_restart_transmitter(pdq);
1064 PDQ_DO_TYPE2_PRODUCER(pdq);
1068 pdq_flush_transmitter(
1071 volatile pdq_consumer_block_t *cbp = pdq->pdq_cbp;
1072 pdq_tx_info_t *tx = &pdq->pdq_tx_info;
1075 PDQ_OS_DATABUF_T *pdu;
1076 PDQ_OS_DATABUF_DEQUEUE(&tx->tx_txq, pdu);
1080 * Don't call transmit done since the packet never made it
1083 PDQ_OS_DATABUF_FREE(pdq, pdu);
1086 tx->tx_free = PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_transmits);
1087 cbp->pdqcb_transmits = tx->tx_completion = tx->tx_producer;
1088 PDQ_OS_CONSUMER_PRESYNC(pdq);
1090 PDQ_DO_TYPE2_PRODUCER(pdq);
1097 const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
1101 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1102 if (state == PDQS_DMA_UNAVAILABLE)
1104 PDQ_CSR_WRITE(csrs, csr_port_data_a,
1105 (state == PDQS_HALTED && pdq->pdq_type != PDQ_DEFTA) ? 0 : PDQ_PRESET_SKIP_SELFTEST);
1106 PDQ_CSR_WRITE(csrs, csr_port_reset, 1);
1107 PDQ_OS_USEC_DELAY(100);
1108 PDQ_CSR_WRITE(csrs, csr_port_reset, 0);
1109 for (cnt = 100000;;cnt--) {
1110 PDQ_OS_USEC_DELAY(1000);
1111 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1112 if (state == PDQS_DMA_UNAVAILABLE || cnt == 0)
1115 PDQ_PRINTF(("PDQ Reset spun %d cycles\n", 100000 - cnt));
1116 PDQ_OS_USEC_DELAY(10000);
1117 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1118 PDQ_ASSERT(state == PDQS_DMA_UNAVAILABLE);
1119 PDQ_ASSERT(cnt > 0);
1123 * The following routine brings the PDQ from whatever state it is
1124 * in to DMA_UNAVAILABLE (ie. like a RESET but without doing a RESET).
1131 const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
1132 int cnt, pass = 0, idx;
1133 PDQ_OS_DATABUF_T **buffers;
1136 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1137 if (state != PDQS_DMA_UNAVAILABLE) {
1139 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1140 PDQ_ASSERT(state == PDQS_DMA_UNAVAILABLE);
1144 case PDQS_RING_MEMBER:
1145 case PDQS_LINK_UNAVAILABLE:
1146 case PDQS_LINK_AVAILABLE: {
1147 PDQ_CSR_WRITE(csrs, csr_port_data_a, PDQ_SUB_CMD_LINK_UNINIT);
1148 PDQ_CSR_WRITE(csrs, csr_port_data_b, 0);
1149 pdq_do_port_control(csrs, PDQ_PCTL_SUB_CMD);
1150 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1151 PDQ_ASSERT(state == PDQS_DMA_AVAILABLE);
1154 case PDQS_DMA_AVAILABLE: {
1155 PDQ_CSR_WRITE(csrs, csr_port_data_a, 0);
1156 PDQ_CSR_WRITE(csrs, csr_port_data_b, 0);
1157 pdq_do_port_control(csrs, PDQ_PCTL_DMA_UNINIT);
1158 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1159 PDQ_ASSERT(state == PDQS_DMA_UNAVAILABLE);
1162 case PDQS_DMA_UNAVAILABLE: {
1168 * Now we should be in DMA_UNAVAILABLE. So bring the PDQ into
1173 * Obtain the hardware address and firmware revisions
1174 * (MLA = my long address which is FDDI speak for hardware address)
1176 pdq_read_mla(&pdq->pdq_csrs, &pdq->pdq_hwaddr);
1177 pdq_read_fwrev(&pdq->pdq_csrs, &pdq->pdq_fwrev);
1178 pdq->pdq_chip_rev = pdq_read_chiprev(&pdq->pdq_csrs);
1180 if (pdq->pdq_type == PDQ_DEFPA) {
1182 * Disable interrupts and DMA.
1184 PDQ_CSR_WRITE(&pdq->pdq_pci_csrs, csr_pfi_mode_control, 0);
1185 PDQ_CSR_WRITE(&pdq->pdq_pci_csrs, csr_pfi_status, 0x10);
1189 * Flush all the databuf queues.
1191 pdq_flush_databuf_queue(pdq, &pdq->pdq_tx_info.tx_txq);
1192 pdq->pdq_flags &= ~(PDQ_TXOK|PDQ_IS_ONRING|PDQ_IS_FDX);
1193 buffers = (PDQ_OS_DATABUF_T **) pdq->pdq_rx_info.rx_buffers;
1194 for (idx = 0; idx < PDQ_RING_SIZE(pdq->pdq_dbp->pdqdb_receives); idx++) {
1195 if (buffers[idx] != NULL) {
1196 PDQ_OS_DATABUF_FREE(pdq, buffers[idx]);
1197 buffers[idx] = NULL;
1200 pdq->pdq_rx_info.rx_free = PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_receives);
1201 buffers = (PDQ_OS_DATABUF_T **) pdq->pdq_host_smt_info.rx_buffers;
1202 for (idx = 0; idx < PDQ_RING_SIZE(pdq->pdq_dbp->pdqdb_host_smt); idx++) {
1203 if (buffers[idx] != NULL) {
1204 PDQ_OS_DATABUF_FREE(pdq, buffers[idx]);
1205 buffers[idx] = NULL;
1208 pdq->pdq_host_smt_info.rx_free = PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_host_smt);
1211 * Reset the consumer indexes to 0.
1213 pdq->pdq_cbp->pdqcb_receives = 0;
1214 pdq->pdq_cbp->pdqcb_transmits = 0;
1215 pdq->pdq_cbp->pdqcb_host_smt = 0;
1216 pdq->pdq_cbp->pdqcb_unsolicited_event = 0;
1217 pdq->pdq_cbp->pdqcb_command_response = 0;
1218 pdq->pdq_cbp->pdqcb_command_request = 0;
1219 PDQ_OS_CONSUMER_PRESYNC(pdq);
1222 * Reset the producer and completion indexes to 0.
1224 pdq->pdq_command_info.ci_request_producer = 0;
1225 pdq->pdq_command_info.ci_response_producer = 0;
1226 pdq->pdq_command_info.ci_request_completion = 0;
1227 pdq->pdq_command_info.ci_response_completion = 0;
1228 pdq->pdq_unsolicited_info.ui_producer = 0;
1229 pdq->pdq_unsolicited_info.ui_completion = 0;
1230 pdq->pdq_rx_info.rx_producer = 0;
1231 pdq->pdq_rx_info.rx_completion = 0;
1232 pdq->pdq_tx_info.tx_producer = 0;
1233 pdq->pdq_tx_info.tx_completion = 0;
1234 pdq->pdq_host_smt_info.rx_producer = 0;
1235 pdq->pdq_host_smt_info.rx_completion = 0;
1237 pdq->pdq_command_info.ci_command_active = 0;
1238 pdq->pdq_unsolicited_info.ui_free = PDQ_NUM_UNSOLICITED_EVENTS;
1239 pdq->pdq_tx_info.tx_free = PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_transmits);
1242 * Allow the DEFPA to do DMA. Then program the physical
1243 * addresses of the consumer and descriptor blocks.
1245 if (pdq->pdq_type == PDQ_DEFPA) {
1247 PDQ_CSR_WRITE(&pdq->pdq_pci_csrs, csr_pfi_mode_control,
1248 PDQ_PFI_MODE_DMA_ENABLE);
1250 PDQ_CSR_WRITE(&pdq->pdq_pci_csrs, csr_pfi_mode_control,
1251 PDQ_PFI_MODE_DMA_ENABLE
1252 /*|PDQ_PFI_MODE_PFI_PCI_INTR*/|PDQ_PFI_MODE_PDQ_PCI_INTR);
1257 * Make sure the unsolicited queue has events ...
1259 pdq_process_unsolicited_events(pdq);
1261 if ((pdq->pdq_type == PDQ_DEFEA && pdq->pdq_chip_rev == PDQ_CHIP_REV_E)
1262 || pdq->pdq_type == PDQ_DEFTA)
1263 PDQ_CSR_WRITE(csrs, csr_port_data_b, PDQ_DMA_BURST_16LW);
1265 PDQ_CSR_WRITE(csrs, csr_port_data_b, PDQ_DMA_BURST_8LW);
1266 PDQ_CSR_WRITE(csrs, csr_port_data_a, PDQ_SUB_CMD_DMA_BURST_SIZE_SET);
1267 pdq_do_port_control(csrs, PDQ_PCTL_SUB_CMD);
1270 * Make sure there isn't stale information in the caches before
1271 * tell the adapter about the blocks it's going to use.
1273 PDQ_OS_CONSUMER_PRESYNC(pdq);
1275 PDQ_CSR_WRITE(csrs, csr_port_data_b, 0);
1276 PDQ_CSR_WRITE(csrs, csr_port_data_a, pdq->pdq_pa_consumer_block);
1277 pdq_do_port_control(csrs, PDQ_PCTL_CONSUMER_BLOCK);
1279 PDQ_CSR_WRITE(csrs, csr_port_data_b, 0);
1280 #if !defined(BYTE_ORDER) || BYTE_ORDER == LITTLE_ENDIAN
1281 PDQ_CSR_WRITE(csrs, csr_port_data_a, pdq->pdq_pa_descriptor_block | PDQ_DMA_INIT_LW_BSWAP_DATA);
1283 PDQ_CSR_WRITE(csrs, csr_port_data_a, pdq->pdq_pa_descriptor_block | PDQ_DMA_INIT_LW_BSWAP_DATA | PDQ_DMA_INIT_LW_BSWAP_LITERAL);
1285 pdq_do_port_control(csrs, PDQ_PCTL_DMA_INIT);
1287 for (cnt = 0; cnt < 1000; cnt++) {
1288 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1289 if (state == PDQS_HALTED) {
1295 if (state == PDQS_DMA_AVAILABLE) {
1296 PDQ_PRINTF(("Transition to DMA Available took %d spins\n", cnt));
1299 PDQ_OS_USEC_DELAY(1000);
1301 PDQ_ASSERT(state == PDQS_DMA_AVAILABLE);
1303 PDQ_CSR_WRITE(csrs, csr_host_int_type_0, 0xFF);
1304 pdq->pdq_intrmask = 0;
1305 /* PDQ_HOST_INT_STATE_CHANGE
1306 |PDQ_HOST_INT_FATAL_ERROR|PDQ_HOST_INT_CMD_RSP_ENABLE
1307 |PDQ_HOST_INT_UNSOL_ENABLE */;
1308 PDQ_CSR_WRITE(csrs, csr_host_int_enable, pdq->pdq_intrmask);
1311 * Any other command but START should be valid.
1313 pdq->pdq_command_info.ci_pending_commands &= ~(PDQ_BITMASK(PDQC_START));
1314 if (pdq->pdq_flags & PDQ_PRINTCHARS)
1315 pdq->pdq_command_info.ci_pending_commands |= PDQ_BITMASK(PDQC_STATUS_CHARS_GET);
1316 pdq_queue_commands(pdq);
1318 if (pdq->pdq_flags & PDQ_PRINTCHARS) {
1320 * Now wait (up to 100ms) for the command(s) to finish.
1322 for (cnt = 0; cnt < 1000; cnt++) {
1323 PDQ_OS_CONSUMER_POSTSYNC(pdq);
1324 pdq_process_command_responses(pdq);
1325 if (pdq->pdq_command_info.ci_response_producer == pdq->pdq_command_info.ci_response_completion)
1327 PDQ_OS_USEC_DELAY(1000);
1329 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1339 const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
1342 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1343 PDQ_ASSERT(state != PDQS_DMA_UNAVAILABLE);
1344 PDQ_ASSERT(state != PDQS_RESET);
1345 PDQ_ASSERT(state != PDQS_HALTED);
1346 PDQ_ASSERT(state != PDQS_UPGRADE);
1347 PDQ_ASSERT(state != PDQS_RING_MEMBER);
1349 case PDQS_DMA_AVAILABLE: {
1351 * The PDQ after being reset screws up some of its state.
1352 * So we need to clear all the errors/interrupts so the real
1353 * ones will get through.
1355 PDQ_CSR_WRITE(csrs, csr_host_int_type_0, 0xFF);
1356 pdq->pdq_intrmask = PDQ_HOST_INT_STATE_CHANGE
1357 |PDQ_HOST_INT_XMT_DATA_FLUSH|PDQ_HOST_INT_FATAL_ERROR
1358 |PDQ_HOST_INT_CMD_RSP_ENABLE|PDQ_HOST_INT_UNSOL_ENABLE
1359 |PDQ_HOST_INT_RX_ENABLE|PDQ_HOST_INT_HOST_SMT_ENABLE;
1360 PDQ_CSR_WRITE(csrs, csr_host_int_enable, pdq->pdq_intrmask);
1362 * Set the MAC and address filters and start up the PDQ.
1364 pdq_process_unsolicited_events(pdq);
1365 pdq_process_received_data(pdq, &pdq->pdq_rx_info,
1366 pdq->pdq_dbp->pdqdb_receives,
1367 pdq->pdq_cbp->pdqcb_receives,
1368 PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_receives));
1369 PDQ_DO_TYPE2_PRODUCER(pdq);
1370 if (pdq->pdq_flags & PDQ_PASS_SMT) {
1371 pdq_process_received_data(pdq, &pdq->pdq_host_smt_info,
1372 pdq->pdq_dbp->pdqdb_host_smt,
1373 pdq->pdq_cbp->pdqcb_host_smt,
1374 PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_host_smt));
1375 PDQ_CSR_WRITE(csrs, csr_host_smt_producer,
1376 pdq->pdq_host_smt_info.rx_producer
1377 | (pdq->pdq_host_smt_info.rx_completion << 8));
1379 pdq->pdq_command_info.ci_pending_commands = PDQ_BITMASK(PDQC_FILTER_SET)
1380 | PDQ_BITMASK(PDQC_ADDR_FILTER_SET)
1381 | PDQ_BITMASK(PDQC_SNMP_SET)
1382 | PDQ_BITMASK(PDQC_START);
1383 if (pdq->pdq_flags & PDQ_PRINTCHARS)
1384 pdq->pdq_command_info.ci_pending_commands |= PDQ_BITMASK(PDQC_STATUS_CHARS_GET);
1385 pdq_queue_commands(pdq);
1388 case PDQS_LINK_UNAVAILABLE:
1389 case PDQS_LINK_AVAILABLE: {
1390 pdq->pdq_command_info.ci_pending_commands = PDQ_BITMASK(PDQC_FILTER_SET)
1391 | PDQ_BITMASK(PDQC_ADDR_FILTER_SET)
1392 | PDQ_BITMASK(PDQC_SNMP_SET);
1393 if (pdq->pdq_flags & PDQ_PRINTCHARS)
1394 pdq->pdq_command_info.ci_pending_commands |= PDQ_BITMASK(PDQC_STATUS_CHARS_GET);
1395 if (pdq->pdq_flags & PDQ_PASS_SMT) {
1396 pdq_process_received_data(pdq, &pdq->pdq_host_smt_info,
1397 pdq->pdq_dbp->pdqdb_host_smt,
1398 pdq->pdq_cbp->pdqcb_host_smt,
1399 PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_host_smt));
1400 PDQ_CSR_WRITE(csrs, csr_host_smt_producer,
1401 pdq->pdq_host_smt_info.rx_producer
1402 | (pdq->pdq_host_smt_info.rx_completion << 8));
1404 pdq_process_unsolicited_events(pdq);
1405 pdq_queue_commands(pdq);
1408 case PDQS_RING_MEMBER: {
1410 default: { /* to make gcc happy */
1420 const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
1424 if (pdq->pdq_type == PDQ_DEFPA)
1425 PDQ_CSR_WRITE(&pdq->pdq_pci_csrs, csr_pfi_status, 0x18);
1427 while ((data = PDQ_CSR_READ(csrs, csr_port_status)) & PDQ_PSTS_INTR_PENDING) {
1429 PDQ_PRINTF(("PDQ Interrupt: Status = 0x%08x\n", data));
1430 PDQ_OS_CONSUMER_POSTSYNC(pdq);
1431 if (data & PDQ_PSTS_RCV_DATA_PENDING) {
1432 pdq_process_received_data(pdq, &pdq->pdq_rx_info,
1433 pdq->pdq_dbp->pdqdb_receives,
1434 pdq->pdq_cbp->pdqcb_receives,
1435 PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_receives));
1436 PDQ_DO_TYPE2_PRODUCER(pdq);
1438 if (data & PDQ_PSTS_HOST_SMT_PENDING) {
1439 pdq_process_received_data(pdq, &pdq->pdq_host_smt_info,
1440 pdq->pdq_dbp->pdqdb_host_smt,
1441 pdq->pdq_cbp->pdqcb_host_smt,
1442 PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_host_smt));
1443 PDQ_DO_HOST_SMT_PRODUCER(pdq);
1445 /* if (data & PDQ_PSTS_XMT_DATA_PENDING) */
1446 pdq_process_transmitted_data(pdq);
1447 if (data & PDQ_PSTS_UNSOL_PENDING)
1448 pdq_process_unsolicited_events(pdq);
1449 if (data & PDQ_PSTS_CMD_RSP_PENDING)
1450 pdq_process_command_responses(pdq);
1451 if (data & PDQ_PSTS_TYPE_0_PENDING) {
1452 data = PDQ_CSR_READ(csrs, csr_host_int_type_0);
1453 if (data & PDQ_HOST_INT_STATE_CHANGE) {
1454 pdq_state_t state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1455 printf(PDQ_OS_PREFIX "%s", PDQ_OS_PREFIX_ARGS, pdq_adapter_states[state]);
1456 if (state == PDQS_LINK_UNAVAILABLE) {
1457 pdq->pdq_flags &= ~(PDQ_TXOK|PDQ_IS_ONRING|PDQ_IS_FDX);
1458 } else if (state == PDQS_LINK_AVAILABLE) {
1459 if (pdq->pdq_flags & PDQ_WANT_FDX) {
1460 pdq->pdq_command_info.ci_pending_commands |= PDQ_BITMASK(PDQC_DEC_EXT_MIB_GET);
1461 pdq_queue_commands(pdq);
1463 pdq->pdq_flags |= PDQ_TXOK|PDQ_IS_ONRING;
1464 pdq_os_restart_transmitter(pdq);
1465 } else if (state == PDQS_HALTED) {
1466 pdq_response_error_log_get_t log_entry;
1467 pdq_halt_code_t halt_code = PDQ_PSTS_HALT_ID(PDQ_CSR_READ(csrs, csr_port_status));
1468 printf(": halt code = %d (%s)\n",
1469 halt_code, pdq_halt_codes[halt_code]);
1470 if (halt_code == PDQH_DMA_ERROR && pdq->pdq_type == PDQ_DEFPA) {
1471 PDQ_PRINTF(("\tPFI status = 0x%x, Host 0 Fatal Interrupt = 0x%x\n",
1472 PDQ_CSR_READ(&pdq->pdq_pci_csrs, csr_pfi_status),
1473 data & PDQ_HOST_INT_FATAL_ERROR));
1475 PDQ_OS_MEMZERO(&log_entry, sizeof(log_entry));
1476 if (pdq_read_error_log(pdq, &log_entry)) {
1477 PDQ_PRINTF((" Error log Entry:\n"));
1478 PDQ_PRINTF((" CMD Status = %d (0x%x)\n",
1479 log_entry.error_log_get_status,
1480 log_entry.error_log_get_status));
1481 PDQ_PRINTF((" Event Status = %d (0x%x)\n",
1482 log_entry.error_log_get_event_status,
1483 log_entry.error_log_get_event_status));
1484 PDQ_PRINTF((" Caller Id = %d (0x%x)\n",
1485 log_entry.error_log_get_caller_id,
1486 log_entry.error_log_get_caller_id));
1487 PDQ_PRINTF((" Write Count = %d (0x%x)\n",
1488 log_entry.error_log_get_write_count,
1489 log_entry.error_log_get_write_count));
1490 PDQ_PRINTF((" FRU Implication Mask = %d (0x%x)\n",
1491 log_entry.error_log_get_fru_implication_mask,
1492 log_entry.error_log_get_fru_implication_mask));
1493 PDQ_PRINTF((" Test ID = %d (0x%x)\n",
1494 log_entry.error_log_get_test_id,
1495 log_entry.error_log_get_test_id));
1498 if (pdq->pdq_flags & PDQ_RUNNING)
1503 PDQ_CSR_WRITE(csrs, csr_host_int_type_0, PDQ_HOST_INT_STATE_CHANGE);
1505 if (data & PDQ_HOST_INT_FATAL_ERROR) {
1507 if (pdq->pdq_flags & PDQ_RUNNING)
1511 if (data & PDQ_HOST_INT_XMT_DATA_FLUSH) {
1512 printf(PDQ_OS_PREFIX "Flushing transmit queue\n", PDQ_OS_PREFIX_ARGS);
1513 pdq->pdq_flags &= ~PDQ_TXOK;
1514 pdq_flush_transmitter(pdq);
1515 pdq_do_port_control(csrs, PDQ_PCTL_XMT_DATA_FLUSH_DONE);
1516 PDQ_CSR_WRITE(csrs, csr_host_int_type_0, PDQ_HOST_INT_XMT_DATA_FLUSH);
1519 if (pdq->pdq_type == PDQ_DEFPA)
1520 PDQ_CSR_WRITE(&pdq->pdq_pci_csrs, csr_pfi_status, 0x18);
1528 pdq_bus_memaddr_t csr_base,
1536 pdq_descriptor_block_t *dbp;
1537 #if !defined(PDQ_BUS_DMA)
1538 const pdq_uint32_t contig_bytes = (sizeof(pdq_descriptor_block_t) * 2) - PDQ_OS_PAGESIZE;
1543 PDQ_ASSERT(sizeof(pdq_descriptor_block_t) == 8192);
1544 PDQ_ASSERT(sizeof(pdq_consumer_block_t) == 64);
1545 PDQ_ASSERT(sizeof(pdq_response_filter_get_t) == PDQ_SIZE_RESPONSE_FILTER_GET);
1546 PDQ_ASSERT(sizeof(pdq_cmd_addr_filter_set_t) == PDQ_SIZE_CMD_ADDR_FILTER_SET);
1547 PDQ_ASSERT(sizeof(pdq_response_addr_filter_get_t) == PDQ_SIZE_RESPONSE_ADDR_FILTER_GET);
1548 PDQ_ASSERT(sizeof(pdq_response_status_chars_get_t) == PDQ_SIZE_RESPONSE_STATUS_CHARS_GET);
1549 PDQ_ASSERT(sizeof(pdq_response_fddi_mib_get_t) == PDQ_SIZE_RESPONSE_FDDI_MIB_GET);
1550 PDQ_ASSERT(sizeof(pdq_response_dec_ext_mib_get_t) == PDQ_SIZE_RESPONSE_DEC_EXT_MIB_GET);
1551 PDQ_ASSERT(sizeof(pdq_unsolicited_event_t) == 512);
1553 pdq = (pdq_t *) PDQ_OS_MEMALLOC(sizeof(pdq_t));
1555 PDQ_PRINTF(("malloc(%d) failed\n", sizeof(*pdq)));
1558 PDQ_OS_MEMZERO(pdq, sizeof(pdq_t));
1559 pdq->pdq_type = type;
1560 pdq->pdq_unit = unit;
1561 pdq->pdq_os_ctx = (void *) ctx;
1562 pdq->pdq_os_name = name;
1563 pdq->pdq_flags = PDQ_PRINTCHARS;
1565 * Allocate the additional data structures required by
1566 * the PDQ driver. Allocate a contiguous region of memory
1567 * for the descriptor block. We need to allocated enough
1568 * to guarantee that we will a get 8KB block of memory aligned
1569 * on a 8KB boundary. This turns to require that we allocate
1570 * (N*2 - 1 page) pages of memory. On machine with less than
1571 * a 8KB page size, it mean we will allocate more memory than
1572 * we need. The extra will be used for the unsolicited event
1573 * buffers (though on machines with 8KB pages we will to allocate
1574 * them separately since there will be nothing left overs.)
1576 #if defined(PDQ_OS_MEMALLOC_CONTIG)
1577 p = (pdq_uint8_t *) PDQ_OS_MEMALLOC_CONTIG(contig_bytes);
1580 printf("%s() - PDQ_OS_MEMALLOC_CONTIG() failed!\n", __FUNCTION__);
1583 pdq_physaddr_t physaddr = PDQ_OS_VA_TO_BUSPA(pdq, p);
1585 * Assert that we really got contiguous memory. This isn't really
1586 * needed on systems that actually have physical contiguous allocation
1587 * routines, but on those systems that don't ...
1589 for (idx = PDQ_OS_PAGESIZE; idx < 0x2000; idx += PDQ_OS_PAGESIZE) {
1590 if (PDQ_OS_VA_TO_BUSPA(pdq, p + idx) - physaddr != idx)
1591 goto cleanup_and_return;
1593 if (physaddr & 0x1FFF) {
1594 pdq->pdq_unsolicited_info.ui_events = (pdq_unsolicited_event_t *) p;
1595 pdq->pdq_unsolicited_info.ui_pa_bufstart = physaddr;
1596 pdq->pdq_dbp = (pdq_descriptor_block_t *) &p[0x2000 - (physaddr & 0x1FFF)];
1597 pdq->pdq_pa_descriptor_block = physaddr & ~0x1FFFUL;
1599 pdq->pdq_dbp = (pdq_descriptor_block_t *) p;
1600 pdq->pdq_pa_descriptor_block = physaddr;
1601 pdq->pdq_unsolicited_info.ui_events = (pdq_unsolicited_event_t *) &p[0x2000];
1602 pdq->pdq_unsolicited_info.ui_pa_bufstart = physaddr + 0x2000;
1605 pdq->pdq_cbp = (volatile pdq_consumer_block_t *) &pdq->pdq_dbp->pdqdb_consumer;
1606 pdq->pdq_pa_consumer_block = PDQ_DB_BUSPA(pdq, pdq->pdq_cbp);
1607 if (contig_bytes == sizeof(pdq_descriptor_block_t)) {
1608 pdq->pdq_unsolicited_info.ui_events =
1609 (pdq_unsolicited_event_t *) PDQ_OS_MEMALLOC(
1610 PDQ_NUM_UNSOLICITED_EVENTS * sizeof(pdq_unsolicited_event_t));
1613 if (pdq_os_memalloc_contig(pdq))
1614 goto cleanup_and_return;
1618 * Make sure everything got allocated. If not, free what did
1619 * get allocated and return.
1621 if (pdq->pdq_dbp == NULL || pdq->pdq_unsolicited_info.ui_events == NULL) {
1623 #ifdef PDQ_OS_MEMFREE_CONTIG
1624 if (p /* pdq->pdq_dbp */ != NULL)
1625 PDQ_OS_MEMFREE_CONTIG(p /* pdq->pdq_dbp */, contig_bytes);
1626 if (contig_bytes == sizeof(pdq_descriptor_block_t) && pdq->pdq_unsolicited_info.ui_events != NULL)
1627 PDQ_OS_MEMFREE(pdq->pdq_unsolicited_info.ui_events,
1628 PDQ_NUM_UNSOLICITED_EVENTS * sizeof(pdq_unsolicited_event_t));
1630 PDQ_OS_MEMFREE(pdq, sizeof(pdq_t));
1635 PDQ_PRINTF(("\nPDQ Descriptor Block = " PDQ_OS_PTR_FMT " (PA = 0x%x)\n", dbp, pdq->pdq_pa_descriptor_block));
1636 PDQ_PRINTF((" Receive Queue = " PDQ_OS_PTR_FMT "\n", dbp->pdqdb_receives));
1637 PDQ_PRINTF((" Transmit Queue = " PDQ_OS_PTR_FMT "\n", dbp->pdqdb_transmits));
1638 PDQ_PRINTF((" Host SMT Queue = " PDQ_OS_PTR_FMT "\n", dbp->pdqdb_host_smt));
1639 PDQ_PRINTF((" Command Response Queue = " PDQ_OS_PTR_FMT "\n", dbp->pdqdb_command_responses));
1640 PDQ_PRINTF((" Command Request Queue = " PDQ_OS_PTR_FMT "\n", dbp->pdqdb_command_requests));
1641 PDQ_PRINTF(("PDQ Consumer Block = " PDQ_OS_PTR_FMT "\n", pdq->pdq_cbp));
1644 * Zero out the descriptor block. Not really required but
1645 * it pays to be neat. This will also zero out the consumer
1646 * block, command pool, and buffer pointers for the receive
1649 PDQ_OS_MEMZERO(dbp, sizeof(*dbp));
1652 * Initialize the CSR references.
1653 * the DEFAA (FutureBus+) skips a longword between registers
1655 pdq_init_csrs(&pdq->pdq_csrs, bus, csr_base, pdq->pdq_type == PDQ_DEFAA ? 2 : 1);
1656 if (pdq->pdq_type == PDQ_DEFPA)
1657 pdq_init_pci_csrs(&pdq->pdq_pci_csrs, bus, csr_base, 1);
1659 PDQ_PRINTF(("PDQ CSRs: BASE = " PDQ_OS_CSR_FMT "\n", pdq->pdq_csrs.csr_base));
1660 PDQ_PRINTF((" Port Reset = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1661 pdq->pdq_csrs.csr_port_reset, PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_reset)));
1662 PDQ_PRINTF((" Host Data = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1663 pdq->pdq_csrs.csr_host_data, PDQ_CSR_READ(&pdq->pdq_csrs, csr_host_data)));
1664 PDQ_PRINTF((" Port Control = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1665 pdq->pdq_csrs.csr_port_control, PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_control)));
1666 PDQ_PRINTF((" Port Data A = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1667 pdq->pdq_csrs.csr_port_data_a, PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_data_a)));
1668 PDQ_PRINTF((" Port Data B = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1669 pdq->pdq_csrs.csr_port_data_b, PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_data_b)));
1670 PDQ_PRINTF((" Port Status = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1671 pdq->pdq_csrs.csr_port_status, PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_status)));
1672 PDQ_PRINTF((" Host Int Type 0 = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1673 pdq->pdq_csrs.csr_host_int_type_0, PDQ_CSR_READ(&pdq->pdq_csrs, csr_host_int_type_0)));
1674 PDQ_PRINTF((" Host Int Enable = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1675 pdq->pdq_csrs.csr_host_int_enable, PDQ_CSR_READ(&pdq->pdq_csrs, csr_host_int_enable)));
1676 PDQ_PRINTF((" Type 2 Producer = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1677 pdq->pdq_csrs.csr_type_2_producer, PDQ_CSR_READ(&pdq->pdq_csrs, csr_type_2_producer)));
1678 PDQ_PRINTF((" Command Response Producer = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1679 pdq->pdq_csrs.csr_cmd_response_producer, PDQ_CSR_READ(&pdq->pdq_csrs, csr_cmd_response_producer)));
1680 PDQ_PRINTF((" Command Request Producer = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1681 pdq->pdq_csrs.csr_cmd_request_producer, PDQ_CSR_READ(&pdq->pdq_csrs, csr_cmd_request_producer)));
1682 PDQ_PRINTF((" Host SMT Producer = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1683 pdq->pdq_csrs.csr_host_smt_producer, PDQ_CSR_READ(&pdq->pdq_csrs, csr_host_smt_producer)));
1684 PDQ_PRINTF((" Unsolicited Producer = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1685 pdq->pdq_csrs.csr_unsolicited_producer, PDQ_CSR_READ(&pdq->pdq_csrs, csr_unsolicited_producer)));
1688 * Initialize the command information block
1690 pdq->pdq_command_info.ci_request_bufstart = dbp->pdqdb_cmd_request_buf;
1691 pdq->pdq_command_info.ci_pa_request_bufstart = PDQ_DB_BUSPA(pdq, pdq->pdq_command_info.ci_request_bufstart);
1692 pdq->pdq_command_info.ci_pa_request_descriptors = PDQ_DB_BUSPA(pdq, dbp->pdqdb_command_requests);
1693 PDQ_PRINTF(("PDQ Command Request Buffer = " PDQ_OS_PTR_FMT " (PA=0x%x)\n",
1694 pdq->pdq_command_info.ci_request_bufstart,
1695 pdq->pdq_command_info.ci_pa_request_bufstart));
1696 for (idx = 0; idx < sizeof(dbp->pdqdb_command_requests)/sizeof(dbp->pdqdb_command_requests[0]); idx++) {
1697 pdq_txdesc_t *txd = &dbp->pdqdb_command_requests[idx];
1699 txd->txd_pa_lo = pdq->pdq_command_info.ci_pa_request_bufstart;
1700 txd->txd_eop = txd->txd_sop = 1;
1703 PDQ_OS_DESC_PRESYNC(pdq, dbp->pdqdb_command_requests,
1704 sizeof(dbp->pdqdb_command_requests));
1706 pdq->pdq_command_info.ci_response_bufstart = dbp->pdqdb_cmd_response_buf;
1707 pdq->pdq_command_info.ci_pa_response_bufstart = PDQ_DB_BUSPA(pdq, pdq->pdq_command_info.ci_response_bufstart);
1708 pdq->pdq_command_info.ci_pa_response_descriptors = PDQ_DB_BUSPA(pdq, dbp->pdqdb_command_responses);
1709 PDQ_PRINTF(("PDQ Command Response Buffer = " PDQ_OS_PTR_FMT " (PA=0x%x)\n",
1710 pdq->pdq_command_info.ci_response_bufstart,
1711 pdq->pdq_command_info.ci_pa_response_bufstart));
1712 for (idx = 0; idx < sizeof(dbp->pdqdb_command_responses)/sizeof(dbp->pdqdb_command_responses[0]); idx++) {
1713 pdq_rxdesc_t *rxd = &dbp->pdqdb_command_responses[idx];
1715 rxd->rxd_pa_lo = pdq->pdq_command_info.ci_pa_response_bufstart;
1717 rxd->rxd_seg_cnt = 0;
1718 rxd->rxd_seg_len_lo = 0;
1719 rxd->rxd_seg_len_hi = PDQ_SIZE_COMMAND_RESPONSE / 16;
1721 PDQ_OS_DESC_PRESYNC(pdq, dbp->pdqdb_command_responses,
1722 sizeof(dbp->pdqdb_command_responses));
1725 * Initialize the unsolicited event information block
1727 pdq->pdq_unsolicited_info.ui_free = PDQ_NUM_UNSOLICITED_EVENTS;
1728 pdq->pdq_unsolicited_info.ui_pa_descriptors = PDQ_DB_BUSPA(pdq, dbp->pdqdb_unsolicited_events);
1729 PDQ_PRINTF(("PDQ Unsolicit Event Buffer = " PDQ_OS_PTR_FMT " (PA=0x%x)\n",
1730 pdq->pdq_unsolicited_info.ui_events,
1731 pdq->pdq_unsolicited_info.ui_pa_bufstart));
1732 for (idx = 0; idx < sizeof(dbp->pdqdb_unsolicited_events)/sizeof(dbp->pdqdb_unsolicited_events[0]); idx++) {
1733 pdq_rxdesc_t *rxd = &dbp->pdqdb_unsolicited_events[idx];
1734 pdq_unsolicited_event_t *event = &pdq->pdq_unsolicited_info.ui_events[idx & (PDQ_NUM_UNSOLICITED_EVENTS-1)];
1737 rxd->rxd_seg_cnt = 0;
1738 rxd->rxd_seg_len_hi = sizeof(pdq_unsolicited_event_t) / 16;
1739 rxd->rxd_pa_lo = pdq->pdq_unsolicited_info.ui_pa_bufstart + (const pdq_uint8_t *) event
1740 - (const pdq_uint8_t *) pdq->pdq_unsolicited_info.ui_events;
1742 PDQ_OS_UNSOL_EVENT_PRESYNC(pdq, event);
1744 PDQ_OS_DESC_PRESYNC(pdq, dbp->pdqdb_unsolicited_events,
1745 sizeof(dbp->pdqdb_unsolicited_events));
1748 * Initialize the receive information blocks (normal and SMT).
1750 pdq->pdq_rx_info.rx_buffers = pdq->pdq_receive_buffers;
1751 pdq->pdq_rx_info.rx_free = PDQ_RING_MASK(dbp->pdqdb_receives);
1752 pdq->pdq_rx_info.rx_target = pdq->pdq_rx_info.rx_free - PDQ_RX_SEGCNT * 8;
1753 pdq->pdq_rx_info.rx_pa_descriptors = PDQ_DB_BUSPA(pdq, dbp->pdqdb_receives);
1755 pdq->pdq_host_smt_info.rx_buffers = pdq->pdq_host_smt_buffers;
1756 pdq->pdq_host_smt_info.rx_free = PDQ_RING_MASK(dbp->pdqdb_host_smt);
1757 pdq->pdq_host_smt_info.rx_target = pdq->pdq_host_smt_info.rx_free - PDQ_RX_SEGCNT * 3;
1758 pdq->pdq_host_smt_info.rx_pa_descriptors = PDQ_DB_BUSPA(pdq, dbp->pdqdb_host_smt);
1761 * Initialize the transmit information block.
1763 dbp->pdqdb_tx_hdr[0] = PDQ_FDDI_PH0;
1764 dbp->pdqdb_tx_hdr[1] = PDQ_FDDI_PH1;
1765 dbp->pdqdb_tx_hdr[2] = PDQ_FDDI_PH2;
1766 pdq->pdq_tx_info.tx_free = PDQ_RING_MASK(dbp->pdqdb_transmits);
1767 pdq->pdq_tx_info.tx_hdrdesc.txd_seg_len = 3;
1768 pdq->pdq_tx_info.tx_hdrdesc.txd_sop = 1;
1769 pdq->pdq_tx_info.tx_hdrdesc.txd_pa_lo = PDQ_DB_BUSPA(pdq, dbp->pdqdb_tx_hdr);
1770 pdq->pdq_tx_info.tx_pa_descriptors = PDQ_DB_BUSPA(pdq, dbp->pdqdb_transmits);
1772 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_status));
1773 PDQ_PRINTF(("PDQ Adapter State = %s\n", pdq_adapter_states[state]));
1776 * Stop the PDQ if it is running and put it into a known state.
1778 state = pdq_stop(pdq);
1780 PDQ_PRINTF(("PDQ Adapter State = %s\n", pdq_adapter_states[state]));
1781 PDQ_ASSERT(state == PDQS_DMA_AVAILABLE);
1783 * If the adapter is not the state we expect, then the initialization
1784 * failed. Cleanup and exit.
1786 #if defined(PDQVERBOSE)
1787 if (state == PDQS_HALTED) {
1788 pdq_halt_code_t halt_code = PDQ_PSTS_HALT_ID(PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_status));
1789 printf("Halt code = %d (%s)\n", halt_code, pdq_halt_codes[halt_code]);
1790 if (halt_code == PDQH_DMA_ERROR && pdq->pdq_type == PDQ_DEFPA)
1791 PDQ_PRINTF(("PFI status = 0x%x, Host 0 Fatal Interrupt = 0x%x\n",
1792 PDQ_CSR_READ(&pdq->pdq_pci_csrs, csr_pfi_status),
1793 PDQ_CSR_READ(&pdq->pdq_csrs, csr_host_int_type_0) & PDQ_HOST_INT_FATAL_ERROR));
1796 if (state == PDQS_RESET || state == PDQS_HALTED || state == PDQS_UPGRADE)
1797 goto cleanup_and_return;
1799 PDQ_PRINTF(("PDQ Hardware Address = %02x-%02x-%02x-%02x-%02x-%02x\n",
1800 pdq->pdq_hwaddr.lanaddr_bytes[0], pdq->pdq_hwaddr.lanaddr_bytes[1],
1801 pdq->pdq_hwaddr.lanaddr_bytes[2], pdq->pdq_hwaddr.lanaddr_bytes[3],
1802 pdq->pdq_hwaddr.lanaddr_bytes[4], pdq->pdq_hwaddr.lanaddr_bytes[5]));
1803 PDQ_PRINTF(("PDQ Firmware Revision = %c%c%c%c\n",
1804 pdq->pdq_fwrev.fwrev_bytes[0], pdq->pdq_fwrev.fwrev_bytes[1],
1805 pdq->pdq_fwrev.fwrev_bytes[2], pdq->pdq_fwrev.fwrev_bytes[3]));
1806 PDQ_PRINTF(("PDQ Chip Revision = "));
1807 switch (pdq->pdq_chip_rev) {
1808 case PDQ_CHIP_REV_A_B_OR_C: PDQ_PRINTF(("Rev C or below")); break;
1809 case PDQ_CHIP_REV_D: PDQ_PRINTF(("Rev D")); break;
1810 case PDQ_CHIP_REV_E: PDQ_PRINTF(("Rev E")); break;
1811 default: PDQ_PRINTF(("Unknown Rev %d", (int) pdq->pdq_chip_rev));