1 /* $NetBSD: pdq.c,v 1.33 2001/11/13 13:14:43 lukem Exp $ */
4 * Copyright (c) 1995,1996 Matt Thomas <matt@3am-software.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. The name of the author may not be used to endorse or promote products
13 * derived from this software without specific prior written permission
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 * Id: pdq.c,v 1.32 1997/06/05 01:56:35 thomas Exp
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
34 * DEC PDQ FDDI Controller O/S independent code
36 * This module should work any on PDQ based board. Note that changes for
37 * MIPS and Alpha architectures (or any other architecture which requires
38 * a flushing of memory or write buffers and/or has incoherent caches)
39 * have yet to be made.
41 * However, it is expected that the PDQ_CSR_WRITE macro will cause a
42 * flushing of the write buffers.
46 #define PDQ_HWSUPPORT /* for pdq.h */
49 * What a botch having to specific includes for FreeBSD!
51 #include <dev/pdq/pdq_freebsd.h>
52 #include <dev/pdq/pdqreg.h>
54 #define PDQ_ROUNDUP(n, x) (((n) + ((x) - 1)) & ~((x) - 1))
55 #define PDQ_CMD_RX_ALIGNMENT 16
57 #if (defined(PDQTEST) && !defined(PDQ_NOPRINTF)) || defined(PDQVERBOSE)
58 #define PDQ_PRINTF(x) printf x
60 #define PDQ_PRINTF(x) do { } while (0)
63 static const char * const pdq_halt_codes[] = {
64 "Selftest Timeout", "Host Bus Parity Error", "Host Directed Fault",
65 "Software Fault", "Hardware Fault", "PC Trace Path Test",
66 "DMA Error", "Image CRC Error", "Adapter Processer Error"
69 static const char * const pdq_adapter_states[] = {
70 "Reset", "Upgrade", "DMA Unavailable", "DMA Available",
71 "Link Available", "Link Unavailable", "Halted", "Ring Member"
75 * The following are used in conjunction with
78 static const char * const pdq_entities[] = {
79 "Station", "Link", "Phy Port"
82 static const char * const pdq_station_events[] = {
87 static const char * const pdq_station_arguments[] = {
91 static const char * const pdq_link_events[] = {
94 "Block Check Error (CRC)",
99 "Receive Data Overrun",
102 "Ring Initialization Initiated",
103 "Ring Initialization Received",
104 "Ring Beacon Initiated",
105 "Duplicate Address Failure",
106 "Duplicate Token Detected",
110 "Directed Beacon Received",
113 static const char * const pdq_link_arguments[] = {
120 static const char * const pdq_phy_events[] = {
121 "LEM Error Monitor Reject",
122 "Elasticy Buffer Error",
123 "Link Confidence Test Reject"
126 static const char * const pdq_phy_arguments[] = {
130 static const char * const * const pdq_event_arguments[] = {
131 pdq_station_arguments,
136 static const char * const * const pdq_event_codes[] = {
142 static const char * const pdq_station_types[] = {
143 "SAS", "DAC", "SAC", "NAC", "DAS"
146 static const char * const pdq_smt_versions[] = { "", "V6.2", "V7.2", "V7.3" };
148 static const char pdq_phy_types[] = "ABSM";
150 static const char * const pdq_pmd_types0[] = {
151 "ANSI Multi-Mode", "ANSI Single-Mode Type 1", "ANSI Single-Mode Type 2",
155 static const char * const pdq_pmd_types100[] = {
156 "Low Power", "Thin Wire", "Shielded Twisted Pair",
157 "Unshielded Twisted Pair"
160 static const char * const * const pdq_pmd_types[] = {
161 pdq_pmd_types0, pdq_pmd_types100
164 static const char * const pdq_descriptions[] = {
173 pdq_print_fddi_chars(
175 const pdq_response_status_chars_get_t *rsp)
177 const char hexchars[] = "0123456789abcdef";
181 "DEC %s FDDI %s Controller\n",
183 pdq_descriptions[pdq->pdq_type],
184 pdq_station_types[rsp->status_chars_get.station_type]);
186 printf(PDQ_OS_PREFIX "FDDI address %c%c:%c%c:%c%c:%c%c:%c%c:%c%c, FW=%c%c%c%c, HW=%c",
188 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[0] >> 4],
189 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[0] & 0x0F],
190 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[1] >> 4],
191 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[1] & 0x0F],
192 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[2] >> 4],
193 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[2] & 0x0F],
194 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[3] >> 4],
195 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[3] & 0x0F],
196 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[4] >> 4],
197 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[4] & 0x0F],
198 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[5] >> 4],
199 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[5] & 0x0F],
200 pdq->pdq_fwrev.fwrev_bytes[0], pdq->pdq_fwrev.fwrev_bytes[1],
201 pdq->pdq_fwrev.fwrev_bytes[2], pdq->pdq_fwrev.fwrev_bytes[3],
202 rsp->status_chars_get.module_rev.fwrev_bytes[0]);
204 if (rsp->status_chars_get.smt_version_id < PDQ_ARRAY_SIZE(pdq_smt_versions)) {
205 printf(", SMT %s\n", pdq_smt_versions[rsp->status_chars_get.smt_version_id]);
208 printf(PDQ_OS_PREFIX "FDDI Port%s = %c (PMD = %s)",
210 rsp->status_chars_get.station_type == PDQ_STATION_TYPE_DAS ? "[A]" : "",
211 pdq_phy_types[rsp->status_chars_get.phy_type[0]],
212 pdq_pmd_types[rsp->status_chars_get.pmd_type[0] / 100][rsp->status_chars_get.pmd_type[0] % 100]);
214 if (rsp->status_chars_get.station_type == PDQ_STATION_TYPE_DAS)
215 printf(", FDDI Port[B] = %c (PMD = %s)",
216 pdq_phy_types[rsp->status_chars_get.phy_type[1]],
217 pdq_pmd_types[rsp->status_chars_get.pmd_type[1] / 100][rsp->status_chars_get.pmd_type[1] % 100]);
221 pdq_os_update_status(pdq, rsp);
228 pdq_bus_memaddr_t csr_base,
232 csrs->csr_base = csr_base;
233 csrs->csr_port_reset = PDQ_CSR_OFFSET(csr_base, 0 * csrsize);
234 csrs->csr_host_data = PDQ_CSR_OFFSET(csr_base, 1 * csrsize);
235 csrs->csr_port_control = PDQ_CSR_OFFSET(csr_base, 2 * csrsize);
236 csrs->csr_port_data_a = PDQ_CSR_OFFSET(csr_base, 3 * csrsize);
237 csrs->csr_port_data_b = PDQ_CSR_OFFSET(csr_base, 4 * csrsize);
238 csrs->csr_port_status = PDQ_CSR_OFFSET(csr_base, 5 * csrsize);
239 csrs->csr_host_int_type_0 = PDQ_CSR_OFFSET(csr_base, 6 * csrsize);
240 csrs->csr_host_int_enable = PDQ_CSR_OFFSET(csr_base, 7 * csrsize);
241 csrs->csr_type_2_producer = PDQ_CSR_OFFSET(csr_base, 8 * csrsize);
242 csrs->csr_cmd_response_producer = PDQ_CSR_OFFSET(csr_base, 10 * csrsize);
243 csrs->csr_cmd_request_producer = PDQ_CSR_OFFSET(csr_base, 11 * csrsize);
244 csrs->csr_host_smt_producer = PDQ_CSR_OFFSET(csr_base, 12 * csrsize);
245 csrs->csr_unsolicited_producer = PDQ_CSR_OFFSET(csr_base, 13 * csrsize);
250 pdq_pci_csrs_t *csrs,
252 pdq_bus_memaddr_t csr_base,
256 csrs->csr_base = csr_base;
257 csrs->csr_pfi_mode_control = PDQ_CSR_OFFSET(csr_base, 16 * csrsize);
258 csrs->csr_pfi_status = PDQ_CSR_OFFSET(csr_base, 17 * csrsize);
259 csrs->csr_fifo_write = PDQ_CSR_OFFSET(csr_base, 18 * csrsize);
260 csrs->csr_fifo_read = PDQ_CSR_OFFSET(csr_base, 19 * csrsize);
264 pdq_flush_databuf_queue(
266 pdq_databuf_queue_t *q)
268 PDQ_OS_DATABUF_T *pdu;
270 PDQ_OS_DATABUF_DEQUEUE(q, pdu);
273 PDQ_OS_DATABUF_FREE(pdq, pdu);
279 const pdq_csrs_t * const csrs,
283 PDQ_CSR_WRITE(csrs, csr_host_int_type_0, PDQ_HOST_INT_CSR_CMD_DONE);
284 PDQ_CSR_WRITE(csrs, csr_port_control, PDQ_PCTL_CMD_ERROR | cmd);
285 while ((PDQ_CSR_READ(csrs, csr_host_int_type_0) & PDQ_HOST_INT_CSR_CMD_DONE) == 0 && cnt < 33000000)
287 PDQ_PRINTF(("CSR cmd spun %d times\n", cnt));
288 if (PDQ_CSR_READ(csrs, csr_host_int_type_0) & PDQ_HOST_INT_CSR_CMD_DONE) {
289 PDQ_CSR_WRITE(csrs, csr_host_int_type_0, PDQ_HOST_INT_CSR_CMD_DONE);
290 return (PDQ_CSR_READ(csrs, csr_port_control) & PDQ_PCTL_CMD_ERROR) ? PDQ_FALSE : PDQ_TRUE;
292 /* adapter failure */
299 const pdq_csrs_t * const csrs,
300 pdq_lanaddr_t *hwaddr)
304 PDQ_CSR_WRITE(csrs, csr_port_data_a, 0);
305 pdq_do_port_control(csrs, PDQ_PCTL_MLA_READ);
306 data = PDQ_CSR_READ(csrs, csr_host_data);
308 hwaddr->lanaddr_bytes[0] = (data >> 0) & 0xFF;
309 hwaddr->lanaddr_bytes[1] = (data >> 8) & 0xFF;
310 hwaddr->lanaddr_bytes[2] = (data >> 16) & 0xFF;
311 hwaddr->lanaddr_bytes[3] = (data >> 24) & 0xFF;
313 PDQ_CSR_WRITE(csrs, csr_port_data_a, 1);
314 pdq_do_port_control(csrs, PDQ_PCTL_MLA_READ);
315 data = PDQ_CSR_READ(csrs, csr_host_data);
317 hwaddr->lanaddr_bytes[4] = (data >> 0) & 0xFF;
318 hwaddr->lanaddr_bytes[5] = (data >> 8) & 0xFF;
323 const pdq_csrs_t * const csrs,
328 pdq_do_port_control(csrs, PDQ_PCTL_FW_REV_READ);
329 data = PDQ_CSR_READ(csrs, csr_host_data);
331 fwrev->fwrev_bytes[3] = (data >> 0) & 0xFF;
332 fwrev->fwrev_bytes[2] = (data >> 8) & 0xFF;
333 fwrev->fwrev_bytes[1] = (data >> 16) & 0xFF;
334 fwrev->fwrev_bytes[0] = (data >> 24) & 0xFF;
340 pdq_response_error_log_get_t *log_entry)
342 const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
343 pdq_uint32_t *ptr = (pdq_uint32_t *) log_entry;
345 pdq_do_port_control(csrs, PDQ_PCTL_ERROR_LOG_START);
347 while (pdq_do_port_control(csrs, PDQ_PCTL_FW_REV_READ) == PDQ_TRUE) {
348 *ptr++ = PDQ_CSR_READ(csrs, csr_host_data);
349 if ((pdq_uint8_t *) ptr - (pdq_uint8_t *) log_entry == sizeof(*log_entry))
352 return (ptr == (pdq_uint32_t *) log_entry) ? PDQ_FALSE : PDQ_TRUE;
355 static pdq_chip_rev_t
357 const pdq_csrs_t * const csrs)
361 PDQ_CSR_WRITE(csrs, csr_port_data_a, PDQ_SUB_CMD_PDQ_REV_GET);
362 pdq_do_port_control(csrs, PDQ_PCTL_SUB_CMD);
363 data = PDQ_CSR_READ(csrs, csr_host_data);
365 return (pdq_chip_rev_t) data;
368 static const struct {
371 const char *cmd_name;
373 { sizeof(pdq_cmd_generic_t), /* 0 - PDQC_START */
374 sizeof(pdq_response_generic_t),
377 { sizeof(pdq_cmd_filter_set_t), /* 1 - PDQC_FILTER_SET */
378 sizeof(pdq_response_generic_t),
381 { sizeof(pdq_cmd_generic_t), /* 2 - PDQC_FILTER_GET */
382 sizeof(pdq_response_filter_get_t),
385 { sizeof(pdq_cmd_chars_set_t), /* 3 - PDQC_CHARS_SET */
386 sizeof(pdq_response_generic_t),
389 { sizeof(pdq_cmd_generic_t), /* 4 - PDQC_STATUS_CHARS_GET */
390 sizeof(pdq_response_status_chars_get_t),
394 { sizeof(pdq_cmd_generic_t), /* 5 - PDQC_COUNTERS_GET */
395 sizeof(pdq_response_counters_get_t),
398 { sizeof(pdq_cmd_counters_set_t), /* 6 - PDQC_COUNTERS_SET */
399 sizeof(pdq_response_generic_t),
403 { 0, 0, "Counters Get" },
404 { 0, 0, "Counters Set" },
406 { sizeof(pdq_cmd_addr_filter_set_t), /* 7 - PDQC_ADDR_FILTER_SET */
407 sizeof(pdq_response_generic_t),
410 { sizeof(pdq_cmd_generic_t), /* 8 - PDQC_ADDR_FILTER_GET */
411 sizeof(pdq_response_addr_filter_get_t),
414 { sizeof(pdq_cmd_generic_t), /* 9 - PDQC_ERROR_LOG_CLEAR */
415 sizeof(pdq_response_generic_t),
418 { sizeof(pdq_cmd_generic_t), /* 10 - PDQC_ERROR_LOG_SET */
419 sizeof(pdq_response_generic_t),
422 { sizeof(pdq_cmd_generic_t), /* 11 - PDQC_FDDI_MIB_GET */
423 sizeof(pdq_response_generic_t),
426 { sizeof(pdq_cmd_generic_t), /* 12 - PDQC_DEC_EXT_MIB_GET */
427 sizeof(pdq_response_generic_t),
430 { sizeof(pdq_cmd_generic_t), /* 13 - PDQC_DEC_SPECIFIC_GET */
431 sizeof(pdq_response_generic_t),
434 { sizeof(pdq_cmd_generic_t), /* 14 - PDQC_SNMP_SET */
435 sizeof(pdq_response_generic_t),
439 { sizeof(pdq_cmd_generic_t), /* 16 - PDQC_SMT_MIB_GET */
440 sizeof(pdq_response_generic_t),
443 { sizeof(pdq_cmd_generic_t), /* 17 - PDQC_SMT_MIB_SET */
444 sizeof(pdq_response_generic_t),
447 { 0, 0, "Bogus CMD" },
454 const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
455 pdq_command_info_t * const ci = &pdq->pdq_command_info;
456 pdq_descriptor_block_t * const dbp = pdq->pdq_dbp;
457 pdq_txdesc_t * const txd = &dbp->pdqdb_command_requests[ci->ci_request_producer];
459 pdq_uint32_t cmdlen, rsplen, mask;
462 * If there are commands or responses active or there aren't
463 * any pending commands, then don't queue any more.
465 if (ci->ci_command_active || ci->ci_pending_commands == 0)
469 * Determine which command needs to be queued.
471 op = PDQC_SMT_MIB_SET;
472 for (mask = 1 << ((int) op); (mask & ci->ci_pending_commands) == 0; mask >>= 1)
473 op = (pdq_cmd_code_t) ((int) op - 1);
475 * Obtain the sizes needed for the command and response.
476 * Round up to PDQ_CMD_RX_ALIGNMENT so the receive buffer is
477 * always properly aligned.
479 cmdlen = PDQ_ROUNDUP(pdq_cmd_info[op].cmd_len, PDQ_CMD_RX_ALIGNMENT);
480 rsplen = PDQ_ROUNDUP(pdq_cmd_info[op].rsp_len, PDQ_CMD_RX_ALIGNMENT);
484 * Since only one command at a time will be queued, there will always
489 * Obtain and fill in the descriptor for the command (descriptor is
492 txd->txd_seg_len = cmdlen;
495 * Clear the command area, set the opcode, and the command from the pending
499 ci->ci_queued_commands[ci->ci_request_producer] = op;
500 #if defined(PDQVERBOSE)
501 ((pdq_response_generic_t *) ci->ci_response_bufstart)->generic_op = PDQC_BOGUS_CMD;
503 PDQ_OS_MEMZERO(ci->ci_request_bufstart, cmdlen);
504 *(pdq_cmd_code_t *) ci->ci_request_bufstart = op;
505 ci->ci_pending_commands &= ~mask;
508 * Fill in the command area, if needed.
511 case PDQC_FILTER_SET: {
512 pdq_cmd_filter_set_t *filter_set = (pdq_cmd_filter_set_t *) ci->ci_request_bufstart;
514 filter_set->filter_set_items[idx].item_code = PDQI_IND_GROUP_PROM;
515 filter_set->filter_set_items[idx].filter_state = (pdq->pdq_flags & PDQ_PROMISC ? PDQ_FILTER_PASS : PDQ_FILTER_BLOCK);
517 filter_set->filter_set_items[idx].item_code = PDQI_GROUP_PROM;
518 filter_set->filter_set_items[idx].filter_state = (pdq->pdq_flags & PDQ_ALLMULTI ? PDQ_FILTER_PASS : PDQ_FILTER_BLOCK);
520 filter_set->filter_set_items[idx].item_code = PDQI_SMT_PROM;
521 filter_set->filter_set_items[idx].filter_state = ((pdq->pdq_flags & (PDQ_PROMISC|PDQ_PASS_SMT)) == (PDQ_PROMISC|PDQ_PASS_SMT) ? PDQ_FILTER_PASS : PDQ_FILTER_BLOCK);
523 filter_set->filter_set_items[idx].item_code = PDQI_SMT_USER;
524 filter_set->filter_set_items[idx].filter_state = (pdq->pdq_flags & PDQ_PASS_SMT ? PDQ_FILTER_PASS : PDQ_FILTER_BLOCK);
526 filter_set->filter_set_items[idx].item_code = PDQI_EOL;
529 case PDQC_ADDR_FILTER_SET: {
530 pdq_cmd_addr_filter_set_t *addr_filter_set = (pdq_cmd_addr_filter_set_t *) ci->ci_request_bufstart;
531 pdq_lanaddr_t *addr = addr_filter_set->addr_filter_set_addresses;
532 addr->lanaddr_bytes[0] = 0xFF;
533 addr->lanaddr_bytes[1] = 0xFF;
534 addr->lanaddr_bytes[2] = 0xFF;
535 addr->lanaddr_bytes[3] = 0xFF;
536 addr->lanaddr_bytes[4] = 0xFF;
537 addr->lanaddr_bytes[5] = 0xFF;
539 pdq_os_addr_fill(pdq, addr, 61);
542 case PDQC_SNMP_SET: {
543 pdq_cmd_snmp_set_t *snmp_set = (pdq_cmd_snmp_set_t *) ci->ci_request_bufstart;
545 snmp_set->snmp_set_items[idx].item_code = PDQSNMP_FULL_DUPLEX_ENABLE;
546 snmp_set->snmp_set_items[idx].item_value = (pdq->pdq_flags & PDQ_WANT_FDX ? 1 : 2);
547 snmp_set->snmp_set_items[idx].item_port = 0;
549 snmp_set->snmp_set_items[idx].item_code = PDQSNMP_EOL;
552 default: { /* to make gcc happy */
559 * Sync the command request buffer and descriptor, then advance
560 * the request producer index.
562 PDQ_OS_CMDRQST_PRESYNC(pdq, txd->txd_seg_len);
563 PDQ_OS_DESC_PRESYNC(pdq, txd, sizeof(pdq_txdesc_t));
564 PDQ_ADVANCE(ci->ci_request_producer, 1, PDQ_RING_MASK(dbp->pdqdb_command_requests));
567 * Sync the command response buffer and advance the response
568 * producer index (descriptor is already pre-initialized)
570 PDQ_OS_CMDRSP_PRESYNC(pdq, PDQ_SIZE_COMMAND_RESPONSE);
571 PDQ_ADVANCE(ci->ci_response_producer, 1, PDQ_RING_MASK(dbp->pdqdb_command_responses));
573 * At this point the command is done. All that needs to be done is to
574 * produce it to the PDQ.
576 PDQ_PRINTF(("PDQ Queue Command Request: %s queued\n",
577 pdq_cmd_info[op].cmd_name));
579 ci->ci_command_active++;
580 PDQ_CSR_WRITE(csrs, csr_cmd_response_producer, ci->ci_response_producer | (ci->ci_response_completion << 8));
581 PDQ_CSR_WRITE(csrs, csr_cmd_request_producer, ci->ci_request_producer | (ci->ci_request_completion << 8));
585 pdq_process_command_responses(
588 const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
589 pdq_command_info_t * const ci = &pdq->pdq_command_info;
590 volatile const pdq_consumer_block_t * const cbp = pdq->pdq_cbp;
591 pdq_descriptor_block_t * const dbp = pdq->pdq_dbp;
592 const pdq_response_generic_t *rspgen;
595 * We have to process the command and response in tandem so
596 * just wait for the response to be consumed. If it has been
597 * consumed then the command must have been as well.
600 if (cbp->pdqcb_command_response == ci->ci_response_completion)
603 PDQ_ASSERT(cbp->pdqcb_command_request != ci->ci_request_completion);
605 PDQ_OS_CMDRSP_POSTSYNC(pdq, PDQ_SIZE_COMMAND_RESPONSE);
606 rspgen = (const pdq_response_generic_t *) ci->ci_response_bufstart;
607 PDQ_ASSERT(rspgen->generic_op == ci->ci_queued_commands[ci->ci_request_completion]);
608 PDQ_ASSERT(rspgen->generic_status == PDQR_SUCCESS);
609 PDQ_PRINTF(("PDQ Process Command Response: %s completed (status=%d [0x%x])\n",
610 pdq_cmd_info[rspgen->generic_op].cmd_name,
611 rspgen->generic_status, rspgen->generic_status));
613 if (rspgen->generic_op == PDQC_STATUS_CHARS_GET && (pdq->pdq_flags & PDQ_PRINTCHARS)) {
614 pdq->pdq_flags &= ~PDQ_PRINTCHARS;
615 pdq_print_fddi_chars(pdq, (const pdq_response_status_chars_get_t *) rspgen);
616 } else if (rspgen->generic_op == PDQC_DEC_EXT_MIB_GET) {
617 pdq->pdq_flags &= ~PDQ_IS_FDX;
618 if (((const pdq_response_dec_ext_mib_get_t *)rspgen)->dec_ext_mib_get.fdx_operational)
619 pdq->pdq_flags |= PDQ_IS_FDX;
622 PDQ_ADVANCE(ci->ci_request_completion, 1, PDQ_RING_MASK(dbp->pdqdb_command_requests));
623 PDQ_ADVANCE(ci->ci_response_completion, 1, PDQ_RING_MASK(dbp->pdqdb_command_responses));
624 ci->ci_command_active = 0;
626 if (ci->ci_pending_commands != 0) {
627 pdq_queue_commands(pdq);
629 PDQ_CSR_WRITE(csrs, csr_cmd_response_producer,
630 ci->ci_response_producer | (ci->ci_response_completion << 8));
631 PDQ_CSR_WRITE(csrs, csr_cmd_request_producer,
632 ci->ci_request_producer | (ci->ci_request_completion << 8));
637 * This following routine processes unsolicited events.
638 * In addition, it also fills the unsolicited queue with
639 * event buffers so it can be used to initialize the queue
643 pdq_process_unsolicited_events(
646 const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
647 pdq_unsolicited_info_t *ui = &pdq->pdq_unsolicited_info;
648 volatile const pdq_consumer_block_t *cbp = pdq->pdq_cbp;
649 pdq_descriptor_block_t *dbp = pdq->pdq_dbp;
652 * Process each unsolicited event (if any).
655 while (cbp->pdqcb_unsolicited_event != ui->ui_completion) {
656 const pdq_unsolicited_event_t *event;
657 event = &ui->ui_events[ui->ui_completion & (PDQ_NUM_UNSOLICITED_EVENTS-1)];
658 PDQ_OS_UNSOL_EVENT_POSTSYNC(pdq, event);
660 switch (event->event_type) {
661 case PDQ_UNSOLICITED_EVENT: {
663 switch (event->event_entity) {
664 case PDQ_ENTITY_STATION: {
665 bad_event = event->event_code.value >= PDQ_STATION_EVENT_MAX;
668 case PDQ_ENTITY_LINK: {
669 bad_event = event->event_code.value >= PDQ_LINK_EVENT_MAX;
672 case PDQ_ENTITY_PHY_PORT: {
673 bad_event = event->event_code.value >= PDQ_PHY_EVENT_MAX;
684 printf(PDQ_OS_PREFIX "Unsolicited Event: %s: %s",
686 pdq_entities[event->event_entity],
687 pdq_event_codes[event->event_entity][event->event_code.value]);
688 if (event->event_entity == PDQ_ENTITY_PHY_PORT)
689 printf("[%d]", event->event_index);
693 case PDQ_UNSOLICITED_COUNTERS: {
697 PDQ_OS_UNSOL_EVENT_PRESYNC(pdq, event);
698 PDQ_ADVANCE(ui->ui_completion, 1, PDQ_RING_MASK(dbp->pdqdb_unsolicited_events));
703 * Now give back the event buffers back to the PDQ.
705 PDQ_ADVANCE(ui->ui_producer, ui->ui_free, PDQ_RING_MASK(dbp->pdqdb_unsolicited_events));
708 PDQ_CSR_WRITE(csrs, csr_unsolicited_producer,
709 ui->ui_producer | (ui->ui_completion << 8));
713 pdq_process_received_data(
716 pdq_rxdesc_t *receives,
717 pdq_uint32_t completion_goal,
718 pdq_uint32_t ring_mask)
720 pdq_uint32_t completion = rx->rx_completion;
721 pdq_uint32_t producer = rx->rx_producer;
722 PDQ_OS_DATABUF_T **buffers = (PDQ_OS_DATABUF_T **) rx->rx_buffers;
726 while (completion != completion_goal) {
727 PDQ_OS_DATABUF_T *fpdu, *lpdu, *npdu;
728 pdq_uint8_t *dataptr;
729 pdq_uint32_t fc, datalen, pdulen, segcnt;
730 pdq_rxstatus_t status;
732 fpdu = lpdu = buffers[completion];
733 PDQ_ASSERT(fpdu != NULL);
734 PDQ_OS_RXPDU_POSTSYNC(pdq, fpdu, 0, sizeof(u_int32_t));
735 dataptr = PDQ_OS_DATABUF_PTR(fpdu);
736 status = *(pdq_rxstatus_t *) dataptr;
737 if (status.rxs_rcc_badpdu == 0) {
738 datalen = status.rxs_len;
739 PDQ_OS_RXPDU_POSTSYNC(pdq, fpdu, sizeof(u_int32_t),
740 PDQ_RX_FC_OFFSET + 1 - sizeof(u_int32_t));
741 fc = dataptr[PDQ_RX_FC_OFFSET];
742 switch (fc & (PDQ_FDDIFC_C|PDQ_FDDIFC_L|PDQ_FDDIFC_F)) {
743 case PDQ_FDDI_LLC_ASYNC:
744 case PDQ_FDDI_LLC_SYNC:
745 case PDQ_FDDI_IMP_ASYNC:
746 case PDQ_FDDI_IMP_SYNC: {
747 if (datalen > PDQ_FDDI_MAX || datalen < PDQ_FDDI_LLC_MIN) {
748 PDQ_PRINTF(("discard: bad length %d\n", datalen));
754 if (datalen > PDQ_FDDI_MAX || datalen < PDQ_FDDI_SMT_MIN)
759 PDQ_PRINTF(("discard: bad fc 0x%x\n", fc));
764 * Update the lengths of the data buffers now that we know
767 pdulen = datalen + (PDQ_RX_FC_OFFSET - PDQ_OS_HDR_OFFSET) - 4 /* CRC */;
768 segcnt = (pdulen + PDQ_OS_HDR_OFFSET + PDQ_OS_DATABUF_SIZE - 1) / PDQ_OS_DATABUF_SIZE;
769 PDQ_OS_DATABUF_ALLOC(pdq, npdu);
771 PDQ_PRINTF(("discard: no databuf #0\n"));
774 buffers[completion] = npdu;
775 for (idx = 1; idx < segcnt; idx++) {
776 PDQ_OS_DATABUF_ALLOC(pdq, npdu);
778 PDQ_OS_DATABUF_NEXT_SET(lpdu, NULL);
779 PDQ_OS_DATABUF_FREE(pdq, fpdu);
782 PDQ_OS_DATABUF_NEXT_SET(lpdu, buffers[(completion + idx) & ring_mask]);
783 lpdu = PDQ_OS_DATABUF_NEXT(lpdu);
784 buffers[(completion + idx) & ring_mask] = npdu;
786 PDQ_OS_DATABUF_NEXT_SET(lpdu, NULL);
787 for (idx = 0; idx < PDQ_RX_SEGCNT; idx++) {
788 buffers[(producer + idx) & ring_mask] =
789 buffers[(completion + idx) & ring_mask];
790 buffers[(completion + idx) & ring_mask] = NULL;
792 PDQ_OS_DATABUF_ADJ(fpdu, PDQ_OS_HDR_OFFSET);
794 PDQ_OS_DATABUF_LEN_SET(fpdu, pdulen);
796 PDQ_OS_DATABUF_LEN_SET(lpdu, pdulen + PDQ_OS_HDR_OFFSET - (segcnt - 1) * PDQ_OS_DATABUF_SIZE);
799 * Do not pass to protocol if packet was received promiscuously
801 pdq_os_receive_pdu(pdq, fpdu, pdulen,
802 status.rxs_rcc_dd < PDQ_RXS_RCC_DD_CAM_MATCH);
803 rx->rx_free += PDQ_RX_SEGCNT;
804 PDQ_ADVANCE(producer, PDQ_RX_SEGCNT, ring_mask);
805 PDQ_ADVANCE(completion, PDQ_RX_SEGCNT, ring_mask);
808 PDQ_PRINTF(("discard: bad pdu 0x%x(%d.%d.%d.%d.%d)\n", status.rxs_status,
809 status.rxs_rcc_badpdu, status.rxs_rcc_badcrc,
810 status.rxs_rcc_reason, status.rxs_fsc, status.rxs_fsb_e));
811 if (status.rxs_rcc_reason == 7)
813 if (status.rxs_rcc_reason != 0) {
815 if (status.rxs_rcc_badcrc) {
816 printf(PDQ_OS_PREFIX " MAC CRC error (source=%x-%x-%x-%x-%x-%x)\n",
818 dataptr[PDQ_RX_FC_OFFSET+1],
819 dataptr[PDQ_RX_FC_OFFSET+2],
820 dataptr[PDQ_RX_FC_OFFSET+3],
821 dataptr[PDQ_RX_FC_OFFSET+4],
822 dataptr[PDQ_RX_FC_OFFSET+5],
823 dataptr[PDQ_RX_FC_OFFSET+6]);
824 /* rx->rx_badcrc++; */
825 } else if (status.rxs_fsc == 0 || status.rxs_fsb_e == 1) {
826 /* rx->rx_frame_status_errors++; */
834 * Discarded frames go right back on the queue; therefore
835 * ring entries were freed.
837 for (idx = 0; idx < PDQ_RX_SEGCNT; idx++) {
838 buffers[producer] = buffers[completion];
839 buffers[completion] = NULL;
840 rxd = &receives[rx->rx_producer];
842 rxd->rxd_sop = 1; rxd->rxd_seg_cnt = PDQ_RX_SEGCNT - 1;
844 rxd->rxd_sop = 0; rxd->rxd_seg_cnt = 0;
847 rxd->rxd_seg_len_hi = PDQ_OS_DATABUF_SIZE / 16;
848 rxd->rxd_pa_lo = PDQ_OS_DATABUF_BUSPA(pdq, buffers[rx->rx_producer]);
849 PDQ_OS_RXPDU_PRESYNC(pdq, buffers[rx->rx_producer], 0, PDQ_OS_DATABUF_SIZE);
850 PDQ_OS_DESC_PRESYNC(pdq, rxd, sizeof(*rxd));
851 PDQ_ADVANCE(rx->rx_producer, 1, ring_mask);
852 PDQ_ADVANCE(producer, 1, ring_mask);
853 PDQ_ADVANCE(completion, 1, ring_mask);
856 rx->rx_completion = completion;
858 while (rx->rx_free > PDQ_RX_SEGCNT && rx->rx_free > rx->rx_target) {
859 PDQ_OS_DATABUF_T *pdu;
861 * Allocate the needed number of data buffers.
862 * Try to obtain them from our free queue before
863 * asking the system for more.
865 for (idx = 0; idx < PDQ_RX_SEGCNT; idx++) {
866 if ((pdu = buffers[(rx->rx_producer + idx) & ring_mask]) == NULL) {
867 PDQ_OS_DATABUF_ALLOC(pdq, pdu);
870 buffers[(rx->rx_producer + idx) & ring_mask] = pdu;
872 rxd = &receives[(rx->rx_producer + idx) & ring_mask];
874 rxd->rxd_sop = 1; rxd->rxd_seg_cnt = PDQ_RX_SEGCNT - 1;
876 rxd->rxd_sop = 0; rxd->rxd_seg_cnt = 0;
879 rxd->rxd_seg_len_hi = PDQ_OS_DATABUF_SIZE / 16;
880 rxd->rxd_pa_lo = PDQ_OS_DATABUF_BUSPA(pdq, pdu);
881 PDQ_OS_RXPDU_PRESYNC(pdq, pdu, 0, PDQ_OS_DATABUF_SIZE);
882 PDQ_OS_DESC_PRESYNC(pdq, rxd, sizeof(*rxd));
884 if (idx < PDQ_RX_SEGCNT) {
886 * We didn't get all databufs required to complete a new
887 * receive buffer. Keep the ones we got and retry a bit
888 * later for the rest.
892 PDQ_ADVANCE(rx->rx_producer, PDQ_RX_SEGCNT, ring_mask);
893 rx->rx_free -= PDQ_RX_SEGCNT;
897 static void pdq_process_transmitted_data(pdq_t *pdq);
900 pdq_queue_transmit_data(
902 PDQ_OS_DATABUF_T *pdu)
904 pdq_tx_info_t * const tx = &pdq->pdq_tx_info;
905 pdq_descriptor_block_t * const dbp = pdq->pdq_dbp;
906 pdq_uint32_t producer = tx->tx_producer;
907 pdq_txdesc_t *eop = NULL;
908 PDQ_OS_DATABUF_T *pdu0;
909 pdq_uint32_t freecnt;
910 #if defined(PDQ_BUS_DMA)
915 if (PDQ_RX_FC_OFFSET == PDQ_OS_HDR_OFFSET) {
916 freecnt = tx->tx_free - 1;
918 freecnt = tx->tx_free;
921 * Need 2 or more descriptors to be able to send.
924 pdq->pdq_intrmask |= PDQ_HOST_INT_TX_ENABLE;
925 PDQ_CSR_WRITE(&pdq->pdq_csrs, csr_host_int_enable, pdq->pdq_intrmask);
929 if (PDQ_RX_FC_OFFSET == PDQ_OS_HDR_OFFSET) {
930 dbp->pdqdb_transmits[producer] = tx->tx_hdrdesc;
931 PDQ_OS_DESC_PRESYNC(pdq, &dbp->pdqdb_transmits[producer], sizeof(pdq_txdesc_t));
932 PDQ_ADVANCE(producer, 1, PDQ_RING_MASK(dbp->pdqdb_transmits));
935 #if defined(PDQ_BUS_DMA)
936 map = M_GETCTX(pdu, bus_dmamap_t);
937 if (freecnt >= map->dm_nsegs) {
939 for (idx = 0; idx < map->dm_nsegs; idx++) {
941 * Initialize the transmit descriptor
943 eop = &dbp->pdqdb_transmits[producer];
944 eop->txd_seg_len = map->dm_segs[idx].ds_len;
945 eop->txd_pa_lo = map->dm_segs[idx].ds_addr;
946 eop->txd_sop = eop->txd_eop = eop->txd_pa_hi = 0;
947 PDQ_OS_DESC_PRESYNC(pdq, eop, sizeof(pdq_txdesc_t));
949 PDQ_ADVANCE(producer, 1, PDQ_RING_MASK(dbp->pdqdb_transmits));
956 for (freecnt = tx->tx_free - 1, pdu0 = pdu; pdu0 != NULL && freecnt > 0;) {
957 pdq_uint32_t fraglen, datalen = PDQ_OS_DATABUF_LEN(pdu0);
958 const pdq_uint8_t *dataptr = PDQ_OS_DATABUF_PTR(pdu0);
961 * The first segment is limited to the space remaining in
962 * page. All segments after that can be up to a full page
965 fraglen = PDQ_OS_PAGESIZE - ((dataptr - (pdq_uint8_t *) NULL) & (PDQ_OS_PAGESIZE-1));
966 while (datalen > 0 && freecnt > 0) {
967 pdq_uint32_t seglen = (fraglen < datalen ? fraglen : datalen);
970 * Initialize the transmit descriptor
972 eop = &dbp->pdqdb_transmits[producer];
973 eop->txd_seg_len = seglen;
974 eop->txd_pa_lo = PDQ_OS_VA_TO_BUSPA(pdq, dataptr);
975 eop->txd_sop = eop->txd_eop = eop->txd_pa_hi = 0;
976 PDQ_OS_DESC_PRESYNC(pdq, eop, sizeof(pdq_txdesc_t));
979 fraglen = PDQ_OS_PAGESIZE;
981 PDQ_ADVANCE(producer, 1, PDQ_RING_MASK(dbp->pdqdb_transmits));
983 pdu0 = PDQ_OS_DATABUF_NEXT(pdu0);
985 #endif /* defined(PDQ_BUS_DMA) */
987 unsigned completion = tx->tx_completion;
988 PDQ_ASSERT(freecnt == 0);
989 PDQ_OS_CONSUMER_POSTSYNC(pdq);
990 pdq_process_transmitted_data(pdq);
991 if (completion != tx->tx_completion) {
992 producer = tx->tx_producer;
997 * If we still have data to process then the ring was too full
998 * to store the PDU. Return FALSE so the caller will requeue
1001 pdq->pdq_intrmask |= PDQ_HOST_INT_TX_ENABLE;
1002 PDQ_CSR_WRITE(&pdq->pdq_csrs, csr_host_int_enable, pdq->pdq_intrmask);
1006 * Everything went fine. Finish it up.
1008 tx->tx_descriptor_count[tx->tx_producer] = tx->tx_free - freecnt;
1009 if (PDQ_RX_FC_OFFSET != PDQ_OS_HDR_OFFSET) {
1010 dbp->pdqdb_transmits[tx->tx_producer].txd_sop = 1;
1011 PDQ_OS_DESC_PRESYNC(pdq, &dbp->pdqdb_transmits[tx->tx_producer],
1012 sizeof(pdq_txdesc_t));
1015 PDQ_OS_DESC_PRESYNC(pdq, eop, sizeof(pdq_txdesc_t));
1016 PDQ_OS_DATABUF_ENQUEUE(&tx->tx_txq, pdu);
1017 tx->tx_producer = producer;
1018 tx->tx_free = freecnt;
1019 PDQ_DO_TYPE2_PRODUCER(pdq);
1024 pdq_process_transmitted_data(
1027 pdq_tx_info_t *tx = &pdq->pdq_tx_info;
1028 volatile const pdq_consumer_block_t *cbp = pdq->pdq_cbp;
1029 pdq_descriptor_block_t *dbp = pdq->pdq_dbp;
1030 pdq_uint32_t completion = tx->tx_completion;
1033 while (completion != cbp->pdqcb_transmits) {
1034 PDQ_OS_DATABUF_T *pdu;
1035 pdq_uint32_t descriptor_count = tx->tx_descriptor_count[completion];
1036 PDQ_ASSERT(dbp->pdqdb_transmits[completion].txd_sop == 1);
1037 PDQ_ASSERT(dbp->pdqdb_transmits[(completion + descriptor_count - 1) & PDQ_RING_MASK(dbp->pdqdb_transmits)].txd_eop == 1);
1038 PDQ_OS_DATABUF_DEQUEUE(&tx->tx_txq, pdu);
1039 pdq_os_transmit_done(pdq, pdu);
1040 tx->tx_free += descriptor_count;
1042 PDQ_ADVANCE(completion, descriptor_count, PDQ_RING_MASK(dbp->pdqdb_transmits));
1044 if (tx->tx_completion != completion) {
1045 tx->tx_completion = completion;
1046 pdq->pdq_intrmask &= ~PDQ_HOST_INT_TX_ENABLE;
1047 PDQ_CSR_WRITE(&pdq->pdq_csrs, csr_host_int_enable, pdq->pdq_intrmask);
1048 pdq_os_restart_transmitter(pdq);
1051 PDQ_DO_TYPE2_PRODUCER(pdq);
1055 pdq_flush_transmitter(
1058 volatile pdq_consumer_block_t *cbp = pdq->pdq_cbp;
1059 pdq_tx_info_t *tx = &pdq->pdq_tx_info;
1062 PDQ_OS_DATABUF_T *pdu;
1063 PDQ_OS_DATABUF_DEQUEUE(&tx->tx_txq, pdu);
1067 * Don't call transmit done since the packet never made it
1070 PDQ_OS_DATABUF_FREE(pdq, pdu);
1073 tx->tx_free = PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_transmits);
1074 cbp->pdqcb_transmits = tx->tx_completion = tx->tx_producer;
1075 PDQ_OS_CONSUMER_PRESYNC(pdq);
1077 PDQ_DO_TYPE2_PRODUCER(pdq);
1084 const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
1088 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1089 if (state == PDQS_DMA_UNAVAILABLE)
1091 PDQ_CSR_WRITE(csrs, csr_port_data_a,
1092 (state == PDQS_HALTED && pdq->pdq_type != PDQ_DEFTA) ? 0 : PDQ_PRESET_SKIP_SELFTEST);
1093 PDQ_CSR_WRITE(csrs, csr_port_reset, 1);
1094 PDQ_OS_USEC_DELAY(100);
1095 PDQ_CSR_WRITE(csrs, csr_port_reset, 0);
1096 for (cnt = 100000;;cnt--) {
1097 PDQ_OS_USEC_DELAY(1000);
1098 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1099 if (state == PDQS_DMA_UNAVAILABLE || cnt == 0)
1102 PDQ_PRINTF(("PDQ Reset spun %d cycles\n", 100000 - cnt));
1103 PDQ_OS_USEC_DELAY(10000);
1104 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1105 PDQ_ASSERT(state == PDQS_DMA_UNAVAILABLE);
1106 PDQ_ASSERT(cnt > 0);
1110 * The following routine brings the PDQ from whatever state it is
1111 * in to DMA_UNAVAILABLE (ie. like a RESET but without doing a RESET).
1118 const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
1119 int cnt, pass = 0, idx;
1120 PDQ_OS_DATABUF_T **buffers;
1123 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1124 if (state != PDQS_DMA_UNAVAILABLE) {
1126 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1127 PDQ_ASSERT(state == PDQS_DMA_UNAVAILABLE);
1131 case PDQS_RING_MEMBER:
1132 case PDQS_LINK_UNAVAILABLE:
1133 case PDQS_LINK_AVAILABLE: {
1134 PDQ_CSR_WRITE(csrs, csr_port_data_a, PDQ_SUB_CMD_LINK_UNINIT);
1135 PDQ_CSR_WRITE(csrs, csr_port_data_b, 0);
1136 pdq_do_port_control(csrs, PDQ_PCTL_SUB_CMD);
1137 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1138 PDQ_ASSERT(state == PDQS_DMA_AVAILABLE);
1141 case PDQS_DMA_AVAILABLE: {
1142 PDQ_CSR_WRITE(csrs, csr_port_data_a, 0);
1143 PDQ_CSR_WRITE(csrs, csr_port_data_b, 0);
1144 pdq_do_port_control(csrs, PDQ_PCTL_DMA_UNINIT);
1145 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1146 PDQ_ASSERT(state == PDQS_DMA_UNAVAILABLE);
1149 case PDQS_DMA_UNAVAILABLE: {
1155 * Now we should be in DMA_UNAVAILABLE. So bring the PDQ into
1160 * Obtain the hardware address and firmware revisions
1161 * (MLA = my long address which is FDDI speak for hardware address)
1163 pdq_read_mla(&pdq->pdq_csrs, &pdq->pdq_hwaddr);
1164 pdq_read_fwrev(&pdq->pdq_csrs, &pdq->pdq_fwrev);
1165 pdq->pdq_chip_rev = pdq_read_chiprev(&pdq->pdq_csrs);
1167 if (pdq->pdq_type == PDQ_DEFPA) {
1169 * Disable interrupts and DMA.
1171 PDQ_CSR_WRITE(&pdq->pdq_pci_csrs, csr_pfi_mode_control, 0);
1172 PDQ_CSR_WRITE(&pdq->pdq_pci_csrs, csr_pfi_status, 0x10);
1176 * Flush all the databuf queues.
1178 pdq_flush_databuf_queue(pdq, &pdq->pdq_tx_info.tx_txq);
1179 pdq->pdq_flags &= ~(PDQ_TXOK|PDQ_IS_ONRING|PDQ_IS_FDX);
1180 buffers = (PDQ_OS_DATABUF_T **) pdq->pdq_rx_info.rx_buffers;
1181 for (idx = 0; idx < PDQ_RING_SIZE(pdq->pdq_dbp->pdqdb_receives); idx++) {
1182 if (buffers[idx] != NULL) {
1183 PDQ_OS_DATABUF_FREE(pdq, buffers[idx]);
1184 buffers[idx] = NULL;
1187 pdq->pdq_rx_info.rx_free = PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_receives);
1188 buffers = (PDQ_OS_DATABUF_T **) pdq->pdq_host_smt_info.rx_buffers;
1189 for (idx = 0; idx < PDQ_RING_SIZE(pdq->pdq_dbp->pdqdb_host_smt); idx++) {
1190 if (buffers[idx] != NULL) {
1191 PDQ_OS_DATABUF_FREE(pdq, buffers[idx]);
1192 buffers[idx] = NULL;
1195 pdq->pdq_host_smt_info.rx_free = PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_host_smt);
1198 * Reset the consumer indexes to 0.
1200 pdq->pdq_cbp->pdqcb_receives = 0;
1201 pdq->pdq_cbp->pdqcb_transmits = 0;
1202 pdq->pdq_cbp->pdqcb_host_smt = 0;
1203 pdq->pdq_cbp->pdqcb_unsolicited_event = 0;
1204 pdq->pdq_cbp->pdqcb_command_response = 0;
1205 pdq->pdq_cbp->pdqcb_command_request = 0;
1206 PDQ_OS_CONSUMER_PRESYNC(pdq);
1209 * Reset the producer and completion indexes to 0.
1211 pdq->pdq_command_info.ci_request_producer = 0;
1212 pdq->pdq_command_info.ci_response_producer = 0;
1213 pdq->pdq_command_info.ci_request_completion = 0;
1214 pdq->pdq_command_info.ci_response_completion = 0;
1215 pdq->pdq_unsolicited_info.ui_producer = 0;
1216 pdq->pdq_unsolicited_info.ui_completion = 0;
1217 pdq->pdq_rx_info.rx_producer = 0;
1218 pdq->pdq_rx_info.rx_completion = 0;
1219 pdq->pdq_tx_info.tx_producer = 0;
1220 pdq->pdq_tx_info.tx_completion = 0;
1221 pdq->pdq_host_smt_info.rx_producer = 0;
1222 pdq->pdq_host_smt_info.rx_completion = 0;
1224 pdq->pdq_command_info.ci_command_active = 0;
1225 pdq->pdq_unsolicited_info.ui_free = PDQ_NUM_UNSOLICITED_EVENTS;
1226 pdq->pdq_tx_info.tx_free = PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_transmits);
1229 * Allow the DEFPA to do DMA. Then program the physical
1230 * addresses of the consumer and descriptor blocks.
1232 if (pdq->pdq_type == PDQ_DEFPA) {
1234 PDQ_CSR_WRITE(&pdq->pdq_pci_csrs, csr_pfi_mode_control,
1235 PDQ_PFI_MODE_DMA_ENABLE);
1237 PDQ_CSR_WRITE(&pdq->pdq_pci_csrs, csr_pfi_mode_control,
1238 PDQ_PFI_MODE_DMA_ENABLE
1239 /*|PDQ_PFI_MODE_PFI_PCI_INTR*/|PDQ_PFI_MODE_PDQ_PCI_INTR);
1244 * Make sure the unsolicited queue has events ...
1246 pdq_process_unsolicited_events(pdq);
1248 if ((pdq->pdq_type == PDQ_DEFEA && pdq->pdq_chip_rev == PDQ_CHIP_REV_E)
1249 || pdq->pdq_type == PDQ_DEFTA)
1250 PDQ_CSR_WRITE(csrs, csr_port_data_b, PDQ_DMA_BURST_16LW);
1252 PDQ_CSR_WRITE(csrs, csr_port_data_b, PDQ_DMA_BURST_8LW);
1253 PDQ_CSR_WRITE(csrs, csr_port_data_a, PDQ_SUB_CMD_DMA_BURST_SIZE_SET);
1254 pdq_do_port_control(csrs, PDQ_PCTL_SUB_CMD);
1257 * Make sure there isn't stale information in the caches before
1258 * tell the adapter about the blocks it's going to use.
1260 PDQ_OS_CONSUMER_PRESYNC(pdq);
1262 PDQ_CSR_WRITE(csrs, csr_port_data_b, 0);
1263 PDQ_CSR_WRITE(csrs, csr_port_data_a, pdq->pdq_pa_consumer_block);
1264 pdq_do_port_control(csrs, PDQ_PCTL_CONSUMER_BLOCK);
1266 PDQ_CSR_WRITE(csrs, csr_port_data_b, 0);
1267 #if !defined(BYTE_ORDER) || BYTE_ORDER == LITTLE_ENDIAN
1268 PDQ_CSR_WRITE(csrs, csr_port_data_a, pdq->pdq_pa_descriptor_block | PDQ_DMA_INIT_LW_BSWAP_DATA);
1270 PDQ_CSR_WRITE(csrs, csr_port_data_a, pdq->pdq_pa_descriptor_block | PDQ_DMA_INIT_LW_BSWAP_DATA | PDQ_DMA_INIT_LW_BSWAP_LITERAL);
1272 pdq_do_port_control(csrs, PDQ_PCTL_DMA_INIT);
1274 for (cnt = 0; cnt < 1000; cnt++) {
1275 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1276 if (state == PDQS_HALTED) {
1282 if (state == PDQS_DMA_AVAILABLE) {
1283 PDQ_PRINTF(("Transition to DMA Available took %d spins\n", cnt));
1286 PDQ_OS_USEC_DELAY(1000);
1288 PDQ_ASSERT(state == PDQS_DMA_AVAILABLE);
1290 PDQ_CSR_WRITE(csrs, csr_host_int_type_0, 0xFF);
1291 pdq->pdq_intrmask = 0;
1292 /* PDQ_HOST_INT_STATE_CHANGE
1293 |PDQ_HOST_INT_FATAL_ERROR|PDQ_HOST_INT_CMD_RSP_ENABLE
1294 |PDQ_HOST_INT_UNSOL_ENABLE */;
1295 PDQ_CSR_WRITE(csrs, csr_host_int_enable, pdq->pdq_intrmask);
1298 * Any other command but START should be valid.
1300 pdq->pdq_command_info.ci_pending_commands &= ~(PDQ_BITMASK(PDQC_START));
1301 if (pdq->pdq_flags & PDQ_PRINTCHARS)
1302 pdq->pdq_command_info.ci_pending_commands |= PDQ_BITMASK(PDQC_STATUS_CHARS_GET);
1303 pdq_queue_commands(pdq);
1305 if (pdq->pdq_flags & PDQ_PRINTCHARS) {
1307 * Now wait (up to 100ms) for the command(s) to finish.
1309 for (cnt = 0; cnt < 1000; cnt++) {
1310 PDQ_OS_CONSUMER_POSTSYNC(pdq);
1311 pdq_process_command_responses(pdq);
1312 if (pdq->pdq_command_info.ci_response_producer == pdq->pdq_command_info.ci_response_completion)
1314 PDQ_OS_USEC_DELAY(1000);
1316 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1326 const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
1329 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1330 PDQ_ASSERT(state != PDQS_DMA_UNAVAILABLE);
1331 PDQ_ASSERT(state != PDQS_RESET);
1332 PDQ_ASSERT(state != PDQS_HALTED);
1333 PDQ_ASSERT(state != PDQS_UPGRADE);
1334 PDQ_ASSERT(state != PDQS_RING_MEMBER);
1336 case PDQS_DMA_AVAILABLE: {
1338 * The PDQ after being reset screws up some of its state.
1339 * So we need to clear all the errors/interrupts so the real
1340 * ones will get through.
1342 PDQ_CSR_WRITE(csrs, csr_host_int_type_0, 0xFF);
1343 pdq->pdq_intrmask = PDQ_HOST_INT_STATE_CHANGE
1344 |PDQ_HOST_INT_XMT_DATA_FLUSH|PDQ_HOST_INT_FATAL_ERROR
1345 |PDQ_HOST_INT_CMD_RSP_ENABLE|PDQ_HOST_INT_UNSOL_ENABLE
1346 |PDQ_HOST_INT_RX_ENABLE|PDQ_HOST_INT_HOST_SMT_ENABLE;
1347 PDQ_CSR_WRITE(csrs, csr_host_int_enable, pdq->pdq_intrmask);
1349 * Set the MAC and address filters and start up the PDQ.
1351 pdq_process_unsolicited_events(pdq);
1352 pdq_process_received_data(pdq, &pdq->pdq_rx_info,
1353 pdq->pdq_dbp->pdqdb_receives,
1354 pdq->pdq_cbp->pdqcb_receives,
1355 PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_receives));
1356 PDQ_DO_TYPE2_PRODUCER(pdq);
1357 if (pdq->pdq_flags & PDQ_PASS_SMT) {
1358 pdq_process_received_data(pdq, &pdq->pdq_host_smt_info,
1359 pdq->pdq_dbp->pdqdb_host_smt,
1360 pdq->pdq_cbp->pdqcb_host_smt,
1361 PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_host_smt));
1362 PDQ_CSR_WRITE(csrs, csr_host_smt_producer,
1363 pdq->pdq_host_smt_info.rx_producer
1364 | (pdq->pdq_host_smt_info.rx_completion << 8));
1366 pdq->pdq_command_info.ci_pending_commands = PDQ_BITMASK(PDQC_FILTER_SET)
1367 | PDQ_BITMASK(PDQC_ADDR_FILTER_SET)
1368 | PDQ_BITMASK(PDQC_SNMP_SET)
1369 | PDQ_BITMASK(PDQC_START);
1370 if (pdq->pdq_flags & PDQ_PRINTCHARS)
1371 pdq->pdq_command_info.ci_pending_commands |= PDQ_BITMASK(PDQC_STATUS_CHARS_GET);
1372 pdq_queue_commands(pdq);
1375 case PDQS_LINK_UNAVAILABLE:
1376 case PDQS_LINK_AVAILABLE: {
1377 pdq->pdq_command_info.ci_pending_commands = PDQ_BITMASK(PDQC_FILTER_SET)
1378 | PDQ_BITMASK(PDQC_ADDR_FILTER_SET)
1379 | PDQ_BITMASK(PDQC_SNMP_SET);
1380 if (pdq->pdq_flags & PDQ_PRINTCHARS)
1381 pdq->pdq_command_info.ci_pending_commands |= PDQ_BITMASK(PDQC_STATUS_CHARS_GET);
1382 if (pdq->pdq_flags & PDQ_PASS_SMT) {
1383 pdq_process_received_data(pdq, &pdq->pdq_host_smt_info,
1384 pdq->pdq_dbp->pdqdb_host_smt,
1385 pdq->pdq_cbp->pdqcb_host_smt,
1386 PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_host_smt));
1387 PDQ_CSR_WRITE(csrs, csr_host_smt_producer,
1388 pdq->pdq_host_smt_info.rx_producer
1389 | (pdq->pdq_host_smt_info.rx_completion << 8));
1391 pdq_process_unsolicited_events(pdq);
1392 pdq_queue_commands(pdq);
1395 case PDQS_RING_MEMBER: {
1397 default: { /* to make gcc happy */
1407 const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
1411 if (pdq->pdq_type == PDQ_DEFPA)
1412 PDQ_CSR_WRITE(&pdq->pdq_pci_csrs, csr_pfi_status, 0x18);
1414 while ((data = PDQ_CSR_READ(csrs, csr_port_status)) & PDQ_PSTS_INTR_PENDING) {
1416 PDQ_PRINTF(("PDQ Interrupt: Status = 0x%08x\n", data));
1417 PDQ_OS_CONSUMER_POSTSYNC(pdq);
1418 if (data & PDQ_PSTS_RCV_DATA_PENDING) {
1419 pdq_process_received_data(pdq, &pdq->pdq_rx_info,
1420 pdq->pdq_dbp->pdqdb_receives,
1421 pdq->pdq_cbp->pdqcb_receives,
1422 PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_receives));
1423 PDQ_DO_TYPE2_PRODUCER(pdq);
1425 if (data & PDQ_PSTS_HOST_SMT_PENDING) {
1426 pdq_process_received_data(pdq, &pdq->pdq_host_smt_info,
1427 pdq->pdq_dbp->pdqdb_host_smt,
1428 pdq->pdq_cbp->pdqcb_host_smt,
1429 PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_host_smt));
1430 PDQ_DO_HOST_SMT_PRODUCER(pdq);
1432 /* if (data & PDQ_PSTS_XMT_DATA_PENDING) */
1433 pdq_process_transmitted_data(pdq);
1434 if (data & PDQ_PSTS_UNSOL_PENDING)
1435 pdq_process_unsolicited_events(pdq);
1436 if (data & PDQ_PSTS_CMD_RSP_PENDING)
1437 pdq_process_command_responses(pdq);
1438 if (data & PDQ_PSTS_TYPE_0_PENDING) {
1439 data = PDQ_CSR_READ(csrs, csr_host_int_type_0);
1440 if (data & PDQ_HOST_INT_STATE_CHANGE) {
1441 pdq_state_t state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1442 printf(PDQ_OS_PREFIX "%s", PDQ_OS_PREFIX_ARGS, pdq_adapter_states[state]);
1443 if (state == PDQS_LINK_UNAVAILABLE) {
1444 pdq->pdq_flags &= ~(PDQ_TXOK|PDQ_IS_ONRING|PDQ_IS_FDX);
1445 } else if (state == PDQS_LINK_AVAILABLE) {
1446 if (pdq->pdq_flags & PDQ_WANT_FDX) {
1447 pdq->pdq_command_info.ci_pending_commands |= PDQ_BITMASK(PDQC_DEC_EXT_MIB_GET);
1448 pdq_queue_commands(pdq);
1450 pdq->pdq_flags |= PDQ_TXOK|PDQ_IS_ONRING;
1451 pdq_os_restart_transmitter(pdq);
1452 } else if (state == PDQS_HALTED) {
1453 pdq_response_error_log_get_t log_entry;
1454 pdq_halt_code_t halt_code = PDQ_PSTS_HALT_ID(PDQ_CSR_READ(csrs, csr_port_status));
1455 printf(": halt code = %d (%s)\n",
1456 halt_code, pdq_halt_codes[halt_code]);
1457 if (halt_code == PDQH_DMA_ERROR && pdq->pdq_type == PDQ_DEFPA) {
1458 PDQ_PRINTF(("\tPFI status = 0x%x, Host 0 Fatal Interrupt = 0x%x\n",
1459 PDQ_CSR_READ(&pdq->pdq_pci_csrs, csr_pfi_status),
1460 data & PDQ_HOST_INT_FATAL_ERROR));
1462 PDQ_OS_MEMZERO(&log_entry, sizeof(log_entry));
1463 if (pdq_read_error_log(pdq, &log_entry)) {
1464 PDQ_PRINTF((" Error log Entry:\n"));
1465 PDQ_PRINTF((" CMD Status = %d (0x%x)\n",
1466 log_entry.error_log_get_status,
1467 log_entry.error_log_get_status));
1468 PDQ_PRINTF((" Event Status = %d (0x%x)\n",
1469 log_entry.error_log_get_event_status,
1470 log_entry.error_log_get_event_status));
1471 PDQ_PRINTF((" Caller Id = %d (0x%x)\n",
1472 log_entry.error_log_get_caller_id,
1473 log_entry.error_log_get_caller_id));
1474 PDQ_PRINTF((" Write Count = %d (0x%x)\n",
1475 log_entry.error_log_get_write_count,
1476 log_entry.error_log_get_write_count));
1477 PDQ_PRINTF((" FRU Implication Mask = %d (0x%x)\n",
1478 log_entry.error_log_get_fru_implication_mask,
1479 log_entry.error_log_get_fru_implication_mask));
1480 PDQ_PRINTF((" Test ID = %d (0x%x)\n",
1481 log_entry.error_log_get_test_id,
1482 log_entry.error_log_get_test_id));
1485 if (pdq->pdq_flags & PDQ_RUNNING)
1490 PDQ_CSR_WRITE(csrs, csr_host_int_type_0, PDQ_HOST_INT_STATE_CHANGE);
1492 if (data & PDQ_HOST_INT_FATAL_ERROR) {
1494 if (pdq->pdq_flags & PDQ_RUNNING)
1498 if (data & PDQ_HOST_INT_XMT_DATA_FLUSH) {
1499 printf(PDQ_OS_PREFIX "Flushing transmit queue\n", PDQ_OS_PREFIX_ARGS);
1500 pdq->pdq_flags &= ~PDQ_TXOK;
1501 pdq_flush_transmitter(pdq);
1502 pdq_do_port_control(csrs, PDQ_PCTL_XMT_DATA_FLUSH_DONE);
1503 PDQ_CSR_WRITE(csrs, csr_host_int_type_0, PDQ_HOST_INT_XMT_DATA_FLUSH);
1506 if (pdq->pdq_type == PDQ_DEFPA)
1507 PDQ_CSR_WRITE(&pdq->pdq_pci_csrs, csr_pfi_status, 0x18);
1515 pdq_bus_memaddr_t csr_base,
1523 pdq_descriptor_block_t *dbp;
1524 #if !defined(PDQ_BUS_DMA)
1525 const pdq_uint32_t contig_bytes = (sizeof(pdq_descriptor_block_t) * 2) - PDQ_OS_PAGESIZE;
1530 PDQ_ASSERT(sizeof(pdq_descriptor_block_t) == 8192);
1531 PDQ_ASSERT(sizeof(pdq_consumer_block_t) == 64);
1532 PDQ_ASSERT(sizeof(pdq_response_filter_get_t) == PDQ_SIZE_RESPONSE_FILTER_GET);
1533 PDQ_ASSERT(sizeof(pdq_cmd_addr_filter_set_t) == PDQ_SIZE_CMD_ADDR_FILTER_SET);
1534 PDQ_ASSERT(sizeof(pdq_response_addr_filter_get_t) == PDQ_SIZE_RESPONSE_ADDR_FILTER_GET);
1535 PDQ_ASSERT(sizeof(pdq_response_status_chars_get_t) == PDQ_SIZE_RESPONSE_STATUS_CHARS_GET);
1536 PDQ_ASSERT(sizeof(pdq_response_fddi_mib_get_t) == PDQ_SIZE_RESPONSE_FDDI_MIB_GET);
1537 PDQ_ASSERT(sizeof(pdq_response_dec_ext_mib_get_t) == PDQ_SIZE_RESPONSE_DEC_EXT_MIB_GET);
1538 PDQ_ASSERT(sizeof(pdq_unsolicited_event_t) == 512);
1540 pdq = (pdq_t *) PDQ_OS_MEMALLOC(sizeof(pdq_t));
1542 PDQ_PRINTF(("malloc(%d) failed\n", sizeof(*pdq)));
1545 PDQ_OS_MEMZERO(pdq, sizeof(pdq_t));
1546 pdq->pdq_type = type;
1547 pdq->pdq_unit = unit;
1548 pdq->pdq_os_ctx = (void *) ctx;
1549 pdq->pdq_os_name = name;
1550 pdq->pdq_flags = PDQ_PRINTCHARS;
1552 * Allocate the additional data structures required by
1553 * the PDQ driver. Allocate a contiguous region of memory
1554 * for the descriptor block. We need to allocated enough
1555 * to guarantee that we will a get 8KB block of memory aligned
1556 * on a 8KB boundary. This turns to require that we allocate
1557 * (N*2 - 1 page) pages of memory. On machine with less than
1558 * a 8KB page size, it mean we will allocate more memory than
1559 * we need. The extra will be used for the unsolicited event
1560 * buffers (though on machines with 8KB pages we will to allocate
1561 * them separately since there will be nothing left overs.)
1563 #if defined(PDQ_OS_MEMALLOC_CONTIG)
1564 p = (pdq_uint8_t *) PDQ_OS_MEMALLOC_CONTIG(contig_bytes);
1567 printf("%s() - PDQ_OS_MEMALLOC_CONTIG() failed!\n", __func__);
1570 pdq_physaddr_t physaddr = PDQ_OS_VA_TO_BUSPA(pdq, p);
1572 * Assert that we really got contiguous memory. This isn't really
1573 * needed on systems that actually have physical contiguous allocation
1574 * routines, but on those systems that don't ...
1576 for (idx = PDQ_OS_PAGESIZE; idx < 0x2000; idx += PDQ_OS_PAGESIZE) {
1577 if (PDQ_OS_VA_TO_BUSPA(pdq, p + idx) - physaddr != idx)
1578 goto cleanup_and_return;
1580 if (physaddr & 0x1FFF) {
1581 pdq->pdq_unsolicited_info.ui_events = (pdq_unsolicited_event_t *) p;
1582 pdq->pdq_unsolicited_info.ui_pa_bufstart = physaddr;
1583 pdq->pdq_dbp = (pdq_descriptor_block_t *) &p[0x2000 - (physaddr & 0x1FFF)];
1584 pdq->pdq_pa_descriptor_block = physaddr & ~0x1FFFUL;
1586 pdq->pdq_dbp = (pdq_descriptor_block_t *) p;
1587 pdq->pdq_pa_descriptor_block = physaddr;
1588 pdq->pdq_unsolicited_info.ui_events = (pdq_unsolicited_event_t *) &p[0x2000];
1589 pdq->pdq_unsolicited_info.ui_pa_bufstart = physaddr + 0x2000;
1592 pdq->pdq_cbp = (volatile pdq_consumer_block_t *) &pdq->pdq_dbp->pdqdb_consumer;
1593 pdq->pdq_pa_consumer_block = PDQ_DB_BUSPA(pdq, pdq->pdq_cbp);
1594 if (contig_bytes == sizeof(pdq_descriptor_block_t)) {
1595 pdq->pdq_unsolicited_info.ui_events =
1596 (pdq_unsolicited_event_t *) PDQ_OS_MEMALLOC(
1597 PDQ_NUM_UNSOLICITED_EVENTS * sizeof(pdq_unsolicited_event_t));
1600 if (pdq_os_memalloc_contig(pdq))
1601 goto cleanup_and_return;
1605 * Make sure everything got allocated. If not, free what did
1606 * get allocated and return.
1608 if (pdq->pdq_dbp == NULL || pdq->pdq_unsolicited_info.ui_events == NULL) {
1610 #ifdef PDQ_OS_MEMFREE_CONTIG
1611 if (p /* pdq->pdq_dbp */ != NULL)
1612 PDQ_OS_MEMFREE_CONTIG(p /* pdq->pdq_dbp */, contig_bytes);
1613 if (contig_bytes == sizeof(pdq_descriptor_block_t) && pdq->pdq_unsolicited_info.ui_events != NULL)
1614 PDQ_OS_MEMFREE(pdq->pdq_unsolicited_info.ui_events,
1615 PDQ_NUM_UNSOLICITED_EVENTS * sizeof(pdq_unsolicited_event_t));
1617 PDQ_OS_MEMFREE(pdq, sizeof(pdq_t));
1622 PDQ_PRINTF(("\nPDQ Descriptor Block = " PDQ_OS_PTR_FMT " (PA = 0x%x)\n", dbp, pdq->pdq_pa_descriptor_block));
1623 PDQ_PRINTF((" Receive Queue = " PDQ_OS_PTR_FMT "\n", dbp->pdqdb_receives));
1624 PDQ_PRINTF((" Transmit Queue = " PDQ_OS_PTR_FMT "\n", dbp->pdqdb_transmits));
1625 PDQ_PRINTF((" Host SMT Queue = " PDQ_OS_PTR_FMT "\n", dbp->pdqdb_host_smt));
1626 PDQ_PRINTF((" Command Response Queue = " PDQ_OS_PTR_FMT "\n", dbp->pdqdb_command_responses));
1627 PDQ_PRINTF((" Command Request Queue = " PDQ_OS_PTR_FMT "\n", dbp->pdqdb_command_requests));
1628 PDQ_PRINTF(("PDQ Consumer Block = " PDQ_OS_PTR_FMT "\n", pdq->pdq_cbp));
1631 * Zero out the descriptor block. Not really required but
1632 * it pays to be neat. This will also zero out the consumer
1633 * block, command pool, and buffer pointers for the receive
1636 PDQ_OS_MEMZERO(dbp, sizeof(*dbp));
1639 * Initialize the CSR references.
1640 * the DEFAA (FutureBus+) skips a longword between registers
1642 pdq_init_csrs(&pdq->pdq_csrs, bus, csr_base, pdq->pdq_type == PDQ_DEFAA ? 2 : 1);
1643 if (pdq->pdq_type == PDQ_DEFPA)
1644 pdq_init_pci_csrs(&pdq->pdq_pci_csrs, bus, csr_base, 1);
1646 PDQ_PRINTF(("PDQ CSRs: BASE = " PDQ_OS_CSR_FMT "\n", pdq->pdq_csrs.csr_base));
1647 PDQ_PRINTF((" Port Reset = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1648 pdq->pdq_csrs.csr_port_reset, PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_reset)));
1649 PDQ_PRINTF((" Host Data = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1650 pdq->pdq_csrs.csr_host_data, PDQ_CSR_READ(&pdq->pdq_csrs, csr_host_data)));
1651 PDQ_PRINTF((" Port Control = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1652 pdq->pdq_csrs.csr_port_control, PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_control)));
1653 PDQ_PRINTF((" Port Data A = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1654 pdq->pdq_csrs.csr_port_data_a, PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_data_a)));
1655 PDQ_PRINTF((" Port Data B = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1656 pdq->pdq_csrs.csr_port_data_b, PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_data_b)));
1657 PDQ_PRINTF((" Port Status = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1658 pdq->pdq_csrs.csr_port_status, PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_status)));
1659 PDQ_PRINTF((" Host Int Type 0 = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1660 pdq->pdq_csrs.csr_host_int_type_0, PDQ_CSR_READ(&pdq->pdq_csrs, csr_host_int_type_0)));
1661 PDQ_PRINTF((" Host Int Enable = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1662 pdq->pdq_csrs.csr_host_int_enable, PDQ_CSR_READ(&pdq->pdq_csrs, csr_host_int_enable)));
1663 PDQ_PRINTF((" Type 2 Producer = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1664 pdq->pdq_csrs.csr_type_2_producer, PDQ_CSR_READ(&pdq->pdq_csrs, csr_type_2_producer)));
1665 PDQ_PRINTF((" Command Response Producer = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1666 pdq->pdq_csrs.csr_cmd_response_producer, PDQ_CSR_READ(&pdq->pdq_csrs, csr_cmd_response_producer)));
1667 PDQ_PRINTF((" Command Request Producer = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1668 pdq->pdq_csrs.csr_cmd_request_producer, PDQ_CSR_READ(&pdq->pdq_csrs, csr_cmd_request_producer)));
1669 PDQ_PRINTF((" Host SMT Producer = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1670 pdq->pdq_csrs.csr_host_smt_producer, PDQ_CSR_READ(&pdq->pdq_csrs, csr_host_smt_producer)));
1671 PDQ_PRINTF((" Unsolicited Producer = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1672 pdq->pdq_csrs.csr_unsolicited_producer, PDQ_CSR_READ(&pdq->pdq_csrs, csr_unsolicited_producer)));
1675 * Initialize the command information block
1677 pdq->pdq_command_info.ci_request_bufstart = dbp->pdqdb_cmd_request_buf;
1678 pdq->pdq_command_info.ci_pa_request_bufstart = PDQ_DB_BUSPA(pdq, pdq->pdq_command_info.ci_request_bufstart);
1679 pdq->pdq_command_info.ci_pa_request_descriptors = PDQ_DB_BUSPA(pdq, dbp->pdqdb_command_requests);
1680 PDQ_PRINTF(("PDQ Command Request Buffer = " PDQ_OS_PTR_FMT " (PA=0x%x)\n",
1681 pdq->pdq_command_info.ci_request_bufstart,
1682 pdq->pdq_command_info.ci_pa_request_bufstart));
1683 for (idx = 0; idx < sizeof(dbp->pdqdb_command_requests)/sizeof(dbp->pdqdb_command_requests[0]); idx++) {
1684 pdq_txdesc_t *txd = &dbp->pdqdb_command_requests[idx];
1686 txd->txd_pa_lo = pdq->pdq_command_info.ci_pa_request_bufstart;
1687 txd->txd_eop = txd->txd_sop = 1;
1690 PDQ_OS_DESC_PRESYNC(pdq, dbp->pdqdb_command_requests,
1691 sizeof(dbp->pdqdb_command_requests));
1693 pdq->pdq_command_info.ci_response_bufstart = dbp->pdqdb_cmd_response_buf;
1694 pdq->pdq_command_info.ci_pa_response_bufstart = PDQ_DB_BUSPA(pdq, pdq->pdq_command_info.ci_response_bufstart);
1695 pdq->pdq_command_info.ci_pa_response_descriptors = PDQ_DB_BUSPA(pdq, dbp->pdqdb_command_responses);
1696 PDQ_PRINTF(("PDQ Command Response Buffer = " PDQ_OS_PTR_FMT " (PA=0x%x)\n",
1697 pdq->pdq_command_info.ci_response_bufstart,
1698 pdq->pdq_command_info.ci_pa_response_bufstart));
1699 for (idx = 0; idx < sizeof(dbp->pdqdb_command_responses)/sizeof(dbp->pdqdb_command_responses[0]); idx++) {
1700 pdq_rxdesc_t *rxd = &dbp->pdqdb_command_responses[idx];
1702 rxd->rxd_pa_lo = pdq->pdq_command_info.ci_pa_response_bufstart;
1704 rxd->rxd_seg_cnt = 0;
1705 rxd->rxd_seg_len_lo = 0;
1706 rxd->rxd_seg_len_hi = PDQ_SIZE_COMMAND_RESPONSE / 16;
1708 PDQ_OS_DESC_PRESYNC(pdq, dbp->pdqdb_command_responses,
1709 sizeof(dbp->pdqdb_command_responses));
1712 * Initialize the unsolicited event information block
1714 pdq->pdq_unsolicited_info.ui_free = PDQ_NUM_UNSOLICITED_EVENTS;
1715 pdq->pdq_unsolicited_info.ui_pa_descriptors = PDQ_DB_BUSPA(pdq, dbp->pdqdb_unsolicited_events);
1716 PDQ_PRINTF(("PDQ Unsolicit Event Buffer = " PDQ_OS_PTR_FMT " (PA=0x%x)\n",
1717 pdq->pdq_unsolicited_info.ui_events,
1718 pdq->pdq_unsolicited_info.ui_pa_bufstart));
1719 for (idx = 0; idx < sizeof(dbp->pdqdb_unsolicited_events)/sizeof(dbp->pdqdb_unsolicited_events[0]); idx++) {
1720 pdq_rxdesc_t *rxd = &dbp->pdqdb_unsolicited_events[idx];
1721 pdq_unsolicited_event_t *event = &pdq->pdq_unsolicited_info.ui_events[idx & (PDQ_NUM_UNSOLICITED_EVENTS-1)];
1724 rxd->rxd_seg_cnt = 0;
1725 rxd->rxd_seg_len_hi = sizeof(pdq_unsolicited_event_t) / 16;
1726 rxd->rxd_pa_lo = pdq->pdq_unsolicited_info.ui_pa_bufstart + (const pdq_uint8_t *) event
1727 - (const pdq_uint8_t *) pdq->pdq_unsolicited_info.ui_events;
1729 PDQ_OS_UNSOL_EVENT_PRESYNC(pdq, event);
1731 PDQ_OS_DESC_PRESYNC(pdq, dbp->pdqdb_unsolicited_events,
1732 sizeof(dbp->pdqdb_unsolicited_events));
1735 * Initialize the receive information blocks (normal and SMT).
1737 pdq->pdq_rx_info.rx_buffers = pdq->pdq_receive_buffers;
1738 pdq->pdq_rx_info.rx_free = PDQ_RING_MASK(dbp->pdqdb_receives);
1739 pdq->pdq_rx_info.rx_target = pdq->pdq_rx_info.rx_free - PDQ_RX_SEGCNT * 8;
1740 pdq->pdq_rx_info.rx_pa_descriptors = PDQ_DB_BUSPA(pdq, dbp->pdqdb_receives);
1742 pdq->pdq_host_smt_info.rx_buffers = pdq->pdq_host_smt_buffers;
1743 pdq->pdq_host_smt_info.rx_free = PDQ_RING_MASK(dbp->pdqdb_host_smt);
1744 pdq->pdq_host_smt_info.rx_target = pdq->pdq_host_smt_info.rx_free - PDQ_RX_SEGCNT * 3;
1745 pdq->pdq_host_smt_info.rx_pa_descriptors = PDQ_DB_BUSPA(pdq, dbp->pdqdb_host_smt);
1748 * Initialize the transmit information block.
1750 dbp->pdqdb_tx_hdr[0] = PDQ_FDDI_PH0;
1751 dbp->pdqdb_tx_hdr[1] = PDQ_FDDI_PH1;
1752 dbp->pdqdb_tx_hdr[2] = PDQ_FDDI_PH2;
1753 pdq->pdq_tx_info.tx_free = PDQ_RING_MASK(dbp->pdqdb_transmits);
1754 pdq->pdq_tx_info.tx_hdrdesc.txd_seg_len = 3;
1755 pdq->pdq_tx_info.tx_hdrdesc.txd_sop = 1;
1756 pdq->pdq_tx_info.tx_hdrdesc.txd_pa_lo = PDQ_DB_BUSPA(pdq, dbp->pdqdb_tx_hdr);
1757 pdq->pdq_tx_info.tx_pa_descriptors = PDQ_DB_BUSPA(pdq, dbp->pdqdb_transmits);
1759 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_status));
1760 PDQ_PRINTF(("PDQ Adapter State = %s\n", pdq_adapter_states[state]));
1763 * Stop the PDQ if it is running and put it into a known state.
1765 state = pdq_stop(pdq);
1767 PDQ_PRINTF(("PDQ Adapter State = %s\n", pdq_adapter_states[state]));
1768 PDQ_ASSERT(state == PDQS_DMA_AVAILABLE);
1770 * If the adapter is not the state we expect, then the initialization
1771 * failed. Cleanup and exit.
1773 #if defined(PDQVERBOSE)
1774 if (state == PDQS_HALTED) {
1775 pdq_halt_code_t halt_code = PDQ_PSTS_HALT_ID(PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_status));
1776 printf("Halt code = %d (%s)\n", halt_code, pdq_halt_codes[halt_code]);
1777 if (halt_code == PDQH_DMA_ERROR && pdq->pdq_type == PDQ_DEFPA)
1778 PDQ_PRINTF(("PFI status = 0x%x, Host 0 Fatal Interrupt = 0x%x\n",
1779 PDQ_CSR_READ(&pdq->pdq_pci_csrs, csr_pfi_status),
1780 PDQ_CSR_READ(&pdq->pdq_csrs, csr_host_int_type_0) & PDQ_HOST_INT_FATAL_ERROR));
1783 if (state == PDQS_RESET || state == PDQS_HALTED || state == PDQS_UPGRADE)
1784 goto cleanup_and_return;
1786 PDQ_PRINTF(("PDQ Hardware Address = %02x-%02x-%02x-%02x-%02x-%02x\n",
1787 pdq->pdq_hwaddr.lanaddr_bytes[0], pdq->pdq_hwaddr.lanaddr_bytes[1],
1788 pdq->pdq_hwaddr.lanaddr_bytes[2], pdq->pdq_hwaddr.lanaddr_bytes[3],
1789 pdq->pdq_hwaddr.lanaddr_bytes[4], pdq->pdq_hwaddr.lanaddr_bytes[5]));
1790 PDQ_PRINTF(("PDQ Firmware Revision = %c%c%c%c\n",
1791 pdq->pdq_fwrev.fwrev_bytes[0], pdq->pdq_fwrev.fwrev_bytes[1],
1792 pdq->pdq_fwrev.fwrev_bytes[2], pdq->pdq_fwrev.fwrev_bytes[3]));
1793 PDQ_PRINTF(("PDQ Chip Revision = "));
1794 switch (pdq->pdq_chip_rev) {
1795 case PDQ_CHIP_REV_A_B_OR_C: PDQ_PRINTF(("Rev C or below")); break;
1796 case PDQ_CHIP_REV_D: PDQ_PRINTF(("Rev D")); break;
1797 case PDQ_CHIP_REV_E: PDQ_PRINTF(("Rev E")); break;
1798 default: PDQ_PRINTF(("Unknown Rev %d", (int) pdq->pdq_chip_rev));