1 /* $NetBSD: pdq.c,v 1.33 2001/11/13 13:14:43 lukem Exp $ */
4 * Copyright (c) 1995,1996 Matt Thomas <matt@3am-software.com>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. The name of the author may not be used to endorse or promote products
13 * derived from this software without specific prior written permission
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
26 * Id: pdq.c,v 1.32 1997/06/05 01:56:35 thomas Exp
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
34 * DEC PDQ FDDI Controller O/S independent code
36 * This module should work any on PDQ based board. Note that changes for
37 * MIPS and Alpha architectures (or any other architecture which requires
38 * a flushing of memory or write buffers and/or has incoherent caches)
39 * have yet to be made.
41 * However, it is expected that the PDQ_CSR_WRITE macro will cause a
42 * flushing of the write buffers.
46 #define PDQ_HWSUPPORT /* for pdq.h */
49 * What a botch having to specific includes for FreeBSD!
51 #include <dev/pdq/pdq_freebsd.h>
52 #include <dev/pdq/pdqreg.h>
54 #define PDQ_ROUNDUP(n, x) (((n) + ((x) - 1)) & ~((x) - 1))
55 #define PDQ_CMD_RX_ALIGNMENT 16
57 #if (defined(PDQTEST) && !defined(PDQ_NOPRINTF)) || defined(PDQVERBOSE)
58 #define PDQ_PRINTF(x) printf x
60 #define PDQ_PRINTF(x) do { } while (0)
63 static const char * const pdq_halt_codes[] = {
64 "Selftest Timeout", "Host Bus Parity Error", "Host Directed Fault",
65 "Software Fault", "Hardware Fault", "PC Trace Path Test",
66 "DMA Error", "Image CRC Error", "Adapter Processor Error"
69 static const char * const pdq_adapter_states[] = {
70 "Reset", "Upgrade", "DMA Unavailable", "DMA Available",
71 "Link Available", "Link Unavailable", "Halted", "Ring Member"
75 * The following are used in conjunction with
78 static const char * const pdq_entities[] = {
79 "Station", "Link", "Phy Port"
82 static const char * const pdq_station_events[] = {
87 static const char * const pdq_station_arguments[] = {
91 static const char * const pdq_link_events[] = {
94 "Block Check Error (CRC)",
99 "Receive Data Overrun",
102 "Ring Initialization Initiated",
103 "Ring Initialization Received",
104 "Ring Beacon Initiated",
105 "Duplicate Address Failure",
106 "Duplicate Token Detected",
110 "Directed Beacon Received",
113 static const char * const pdq_link_arguments[] = {
120 static const char * const pdq_phy_events[] = {
121 "LEM Error Monitor Reject",
122 "Elasticy Buffer Error",
123 "Link Confidence Test Reject"
126 static const char * const pdq_phy_arguments[] = {
130 static const char * const * const pdq_event_arguments[] = {
131 pdq_station_arguments,
136 static const char * const * const pdq_event_codes[] = {
142 static const char * const pdq_station_types[] = {
143 "SAS", "DAC", "SAC", "NAC", "DAS"
146 static const char * const pdq_smt_versions[] = { "", "V6.2", "V7.2", "V7.3" };
148 static const char pdq_phy_types[] = "ABSM";
150 static const char * const pdq_pmd_types0[] = {
151 "ANSI Multi-Mode", "ANSI Single-Mode Type 1", "ANSI Single-Mode Type 2",
155 static const char * const pdq_pmd_types100[] = {
156 "Low Power", "Thin Wire", "Shielded Twisted Pair",
157 "Unshielded Twisted Pair"
160 static const char * const * const pdq_pmd_types[] = {
161 pdq_pmd_types0, pdq_pmd_types100
164 static const char * const pdq_descriptions[] = {
169 pdq_print_fddi_chars(
171 const pdq_response_status_chars_get_t *rsp)
173 const char hexchars[] = "0123456789abcdef";
177 "DEC %s FDDI %s Controller\n",
179 pdq_descriptions[pdq->pdq_type],
180 pdq_station_types[rsp->status_chars_get.station_type]);
182 printf(PDQ_OS_PREFIX "FDDI address %c%c:%c%c:%c%c:%c%c:%c%c:%c%c, FW=%c%c%c%c, HW=%c",
184 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[0] >> 4],
185 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[0] & 0x0F],
186 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[1] >> 4],
187 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[1] & 0x0F],
188 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[2] >> 4],
189 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[2] & 0x0F],
190 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[3] >> 4],
191 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[3] & 0x0F],
192 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[4] >> 4],
193 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[4] & 0x0F],
194 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[5] >> 4],
195 hexchars[pdq->pdq_hwaddr.lanaddr_bytes[5] & 0x0F],
196 pdq->pdq_fwrev.fwrev_bytes[0], pdq->pdq_fwrev.fwrev_bytes[1],
197 pdq->pdq_fwrev.fwrev_bytes[2], pdq->pdq_fwrev.fwrev_bytes[3],
198 rsp->status_chars_get.module_rev.fwrev_bytes[0]);
200 if (rsp->status_chars_get.smt_version_id < PDQ_ARRAY_SIZE(pdq_smt_versions)) {
201 printf(", SMT %s\n", pdq_smt_versions[rsp->status_chars_get.smt_version_id]);
204 printf(PDQ_OS_PREFIX "FDDI Port%s = %c (PMD = %s)",
206 rsp->status_chars_get.station_type == PDQ_STATION_TYPE_DAS ? "[A]" : "",
207 pdq_phy_types[rsp->status_chars_get.phy_type[0]],
208 pdq_pmd_types[rsp->status_chars_get.pmd_type[0] / 100][rsp->status_chars_get.pmd_type[0] % 100]);
210 if (rsp->status_chars_get.station_type == PDQ_STATION_TYPE_DAS)
211 printf(", FDDI Port[B] = %c (PMD = %s)",
212 pdq_phy_types[rsp->status_chars_get.phy_type[1]],
213 pdq_pmd_types[rsp->status_chars_get.pmd_type[1] / 100][rsp->status_chars_get.pmd_type[1] % 100]);
217 pdq_os_update_status(pdq, rsp);
224 pdq_bus_memaddr_t csr_base,
228 csrs->csr_base = csr_base;
229 csrs->csr_port_reset = PDQ_CSR_OFFSET(csr_base, 0 * csrsize);
230 csrs->csr_host_data = PDQ_CSR_OFFSET(csr_base, 1 * csrsize);
231 csrs->csr_port_control = PDQ_CSR_OFFSET(csr_base, 2 * csrsize);
232 csrs->csr_port_data_a = PDQ_CSR_OFFSET(csr_base, 3 * csrsize);
233 csrs->csr_port_data_b = PDQ_CSR_OFFSET(csr_base, 4 * csrsize);
234 csrs->csr_port_status = PDQ_CSR_OFFSET(csr_base, 5 * csrsize);
235 csrs->csr_host_int_type_0 = PDQ_CSR_OFFSET(csr_base, 6 * csrsize);
236 csrs->csr_host_int_enable = PDQ_CSR_OFFSET(csr_base, 7 * csrsize);
237 csrs->csr_type_2_producer = PDQ_CSR_OFFSET(csr_base, 8 * csrsize);
238 csrs->csr_cmd_response_producer = PDQ_CSR_OFFSET(csr_base, 10 * csrsize);
239 csrs->csr_cmd_request_producer = PDQ_CSR_OFFSET(csr_base, 11 * csrsize);
240 csrs->csr_host_smt_producer = PDQ_CSR_OFFSET(csr_base, 12 * csrsize);
241 csrs->csr_unsolicited_producer = PDQ_CSR_OFFSET(csr_base, 13 * csrsize);
246 pdq_pci_csrs_t *csrs,
248 pdq_bus_memaddr_t csr_base,
252 csrs->csr_base = csr_base;
253 csrs->csr_pfi_mode_control = PDQ_CSR_OFFSET(csr_base, 16 * csrsize);
254 csrs->csr_pfi_status = PDQ_CSR_OFFSET(csr_base, 17 * csrsize);
255 csrs->csr_fifo_write = PDQ_CSR_OFFSET(csr_base, 18 * csrsize);
256 csrs->csr_fifo_read = PDQ_CSR_OFFSET(csr_base, 19 * csrsize);
260 pdq_flush_databuf_queue(
262 pdq_databuf_queue_t *q)
264 PDQ_OS_DATABUF_T *pdu;
266 PDQ_OS_DATABUF_DEQUEUE(q, pdu);
269 PDQ_OS_DATABUF_FREE(pdq, pdu);
275 const pdq_csrs_t * const csrs,
279 PDQ_CSR_WRITE(csrs, csr_host_int_type_0, PDQ_HOST_INT_CSR_CMD_DONE);
280 PDQ_CSR_WRITE(csrs, csr_port_control, PDQ_PCTL_CMD_ERROR | cmd);
281 while ((PDQ_CSR_READ(csrs, csr_host_int_type_0) & PDQ_HOST_INT_CSR_CMD_DONE) == 0 && cnt < 33000000)
283 PDQ_PRINTF(("CSR cmd spun %d times\n", cnt));
284 if (PDQ_CSR_READ(csrs, csr_host_int_type_0) & PDQ_HOST_INT_CSR_CMD_DONE) {
285 PDQ_CSR_WRITE(csrs, csr_host_int_type_0, PDQ_HOST_INT_CSR_CMD_DONE);
286 return (PDQ_CSR_READ(csrs, csr_port_control) & PDQ_PCTL_CMD_ERROR) ? PDQ_FALSE : PDQ_TRUE;
288 /* adapter failure */
295 const pdq_csrs_t * const csrs,
296 pdq_lanaddr_t *hwaddr)
300 PDQ_CSR_WRITE(csrs, csr_port_data_a, 0);
301 pdq_do_port_control(csrs, PDQ_PCTL_MLA_READ);
302 data = PDQ_CSR_READ(csrs, csr_host_data);
304 hwaddr->lanaddr_bytes[0] = (data >> 0) & 0xFF;
305 hwaddr->lanaddr_bytes[1] = (data >> 8) & 0xFF;
306 hwaddr->lanaddr_bytes[2] = (data >> 16) & 0xFF;
307 hwaddr->lanaddr_bytes[3] = (data >> 24) & 0xFF;
309 PDQ_CSR_WRITE(csrs, csr_port_data_a, 1);
310 pdq_do_port_control(csrs, PDQ_PCTL_MLA_READ);
311 data = PDQ_CSR_READ(csrs, csr_host_data);
313 hwaddr->lanaddr_bytes[4] = (data >> 0) & 0xFF;
314 hwaddr->lanaddr_bytes[5] = (data >> 8) & 0xFF;
319 const pdq_csrs_t * const csrs,
324 pdq_do_port_control(csrs, PDQ_PCTL_FW_REV_READ);
325 data = PDQ_CSR_READ(csrs, csr_host_data);
327 fwrev->fwrev_bytes[3] = (data >> 0) & 0xFF;
328 fwrev->fwrev_bytes[2] = (data >> 8) & 0xFF;
329 fwrev->fwrev_bytes[1] = (data >> 16) & 0xFF;
330 fwrev->fwrev_bytes[0] = (data >> 24) & 0xFF;
336 pdq_response_error_log_get_t *log_entry)
338 const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
339 pdq_uint32_t *ptr = (pdq_uint32_t *) log_entry;
341 pdq_do_port_control(csrs, PDQ_PCTL_ERROR_LOG_START);
343 while (pdq_do_port_control(csrs, PDQ_PCTL_FW_REV_READ) == PDQ_TRUE) {
344 *ptr++ = PDQ_CSR_READ(csrs, csr_host_data);
345 if ((pdq_uint8_t *) ptr - (pdq_uint8_t *) log_entry == sizeof(*log_entry))
348 return (ptr == (pdq_uint32_t *) log_entry) ? PDQ_FALSE : PDQ_TRUE;
351 static pdq_chip_rev_t
353 const pdq_csrs_t * const csrs)
357 PDQ_CSR_WRITE(csrs, csr_port_data_a, PDQ_SUB_CMD_PDQ_REV_GET);
358 pdq_do_port_control(csrs, PDQ_PCTL_SUB_CMD);
359 data = PDQ_CSR_READ(csrs, csr_host_data);
361 return (pdq_chip_rev_t) data;
364 static const struct {
367 const char *cmd_name;
369 { sizeof(pdq_cmd_generic_t), /* 0 - PDQC_START */
370 sizeof(pdq_response_generic_t),
373 { sizeof(pdq_cmd_filter_set_t), /* 1 - PDQC_FILTER_SET */
374 sizeof(pdq_response_generic_t),
377 { sizeof(pdq_cmd_generic_t), /* 2 - PDQC_FILTER_GET */
378 sizeof(pdq_response_filter_get_t),
381 { sizeof(pdq_cmd_chars_set_t), /* 3 - PDQC_CHARS_SET */
382 sizeof(pdq_response_generic_t),
385 { sizeof(pdq_cmd_generic_t), /* 4 - PDQC_STATUS_CHARS_GET */
386 sizeof(pdq_response_status_chars_get_t),
390 { sizeof(pdq_cmd_generic_t), /* 5 - PDQC_COUNTERS_GET */
391 sizeof(pdq_response_counters_get_t),
394 { sizeof(pdq_cmd_counters_set_t), /* 6 - PDQC_COUNTERS_SET */
395 sizeof(pdq_response_generic_t),
399 { 0, 0, "Counters Get" },
400 { 0, 0, "Counters Set" },
402 { sizeof(pdq_cmd_addr_filter_set_t), /* 7 - PDQC_ADDR_FILTER_SET */
403 sizeof(pdq_response_generic_t),
406 { sizeof(pdq_cmd_generic_t), /* 8 - PDQC_ADDR_FILTER_GET */
407 sizeof(pdq_response_addr_filter_get_t),
410 { sizeof(pdq_cmd_generic_t), /* 9 - PDQC_ERROR_LOG_CLEAR */
411 sizeof(pdq_response_generic_t),
414 { sizeof(pdq_cmd_generic_t), /* 10 - PDQC_ERROR_LOG_SET */
415 sizeof(pdq_response_generic_t),
418 { sizeof(pdq_cmd_generic_t), /* 11 - PDQC_FDDI_MIB_GET */
419 sizeof(pdq_response_generic_t),
422 { sizeof(pdq_cmd_generic_t), /* 12 - PDQC_DEC_EXT_MIB_GET */
423 sizeof(pdq_response_generic_t),
426 { sizeof(pdq_cmd_generic_t), /* 13 - PDQC_DEC_SPECIFIC_GET */
427 sizeof(pdq_response_generic_t),
430 { sizeof(pdq_cmd_generic_t), /* 14 - PDQC_SNMP_SET */
431 sizeof(pdq_response_generic_t),
435 { sizeof(pdq_cmd_generic_t), /* 16 - PDQC_SMT_MIB_GET */
436 sizeof(pdq_response_generic_t),
439 { sizeof(pdq_cmd_generic_t), /* 17 - PDQC_SMT_MIB_SET */
440 sizeof(pdq_response_generic_t),
443 { 0, 0, "Bogus CMD" },
450 const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
451 pdq_command_info_t * const ci = &pdq->pdq_command_info;
452 pdq_descriptor_block_t * const dbp = pdq->pdq_dbp;
453 pdq_txdesc_t * const txd = &dbp->pdqdb_command_requests[ci->ci_request_producer];
455 pdq_uint32_t cmdlen, rsplen, mask;
458 * If there are commands or responses active or there aren't
459 * any pending commands, then don't queue any more.
461 if (ci->ci_command_active || ci->ci_pending_commands == 0)
465 * Determine which command needs to be queued.
467 op = PDQC_SMT_MIB_SET;
468 for (mask = 1 << ((int) op); (mask & ci->ci_pending_commands) == 0; mask >>= 1)
469 op = (pdq_cmd_code_t) ((int) op - 1);
471 * Obtain the sizes needed for the command and response.
472 * Round up to PDQ_CMD_RX_ALIGNMENT so the receive buffer is
473 * always properly aligned.
475 cmdlen = PDQ_ROUNDUP(pdq_cmd_info[op].cmd_len, PDQ_CMD_RX_ALIGNMENT);
476 rsplen = PDQ_ROUNDUP(pdq_cmd_info[op].rsp_len, PDQ_CMD_RX_ALIGNMENT);
480 * Since only one command at a time will be queued, there will always
485 * Obtain and fill in the descriptor for the command (descriptor is
488 txd->txd_seg_len = cmdlen;
491 * Clear the command area, set the opcode, and the command from the pending
495 ci->ci_queued_commands[ci->ci_request_producer] = op;
496 #if defined(PDQVERBOSE)
497 ((pdq_response_generic_t *) ci->ci_response_bufstart)->generic_op = PDQC_BOGUS_CMD;
499 PDQ_OS_MEMZERO(ci->ci_request_bufstart, cmdlen);
500 *(pdq_cmd_code_t *) ci->ci_request_bufstart = op;
501 ci->ci_pending_commands &= ~mask;
504 * Fill in the command area, if needed.
507 case PDQC_FILTER_SET: {
508 pdq_cmd_filter_set_t *filter_set = (pdq_cmd_filter_set_t *) ci->ci_request_bufstart;
510 filter_set->filter_set_items[idx].item_code = PDQI_IND_GROUP_PROM;
511 filter_set->filter_set_items[idx].filter_state = (pdq->pdq_flags & PDQ_PROMISC ? PDQ_FILTER_PASS : PDQ_FILTER_BLOCK);
513 filter_set->filter_set_items[idx].item_code = PDQI_GROUP_PROM;
514 filter_set->filter_set_items[idx].filter_state = (pdq->pdq_flags & PDQ_ALLMULTI ? PDQ_FILTER_PASS : PDQ_FILTER_BLOCK);
516 filter_set->filter_set_items[idx].item_code = PDQI_SMT_PROM;
517 filter_set->filter_set_items[idx].filter_state = ((pdq->pdq_flags & (PDQ_PROMISC|PDQ_PASS_SMT)) == (PDQ_PROMISC|PDQ_PASS_SMT) ? PDQ_FILTER_PASS : PDQ_FILTER_BLOCK);
519 filter_set->filter_set_items[idx].item_code = PDQI_SMT_USER;
520 filter_set->filter_set_items[idx].filter_state = (pdq->pdq_flags & PDQ_PASS_SMT ? PDQ_FILTER_PASS : PDQ_FILTER_BLOCK);
522 filter_set->filter_set_items[idx].item_code = PDQI_EOL;
525 case PDQC_ADDR_FILTER_SET: {
526 pdq_cmd_addr_filter_set_t *addr_filter_set = (pdq_cmd_addr_filter_set_t *) ci->ci_request_bufstart;
527 pdq_lanaddr_t *addr = addr_filter_set->addr_filter_set_addresses;
528 addr->lanaddr_bytes[0] = 0xFF;
529 addr->lanaddr_bytes[1] = 0xFF;
530 addr->lanaddr_bytes[2] = 0xFF;
531 addr->lanaddr_bytes[3] = 0xFF;
532 addr->lanaddr_bytes[4] = 0xFF;
533 addr->lanaddr_bytes[5] = 0xFF;
535 pdq_os_addr_fill(pdq, addr, 61);
538 case PDQC_SNMP_SET: {
539 pdq_cmd_snmp_set_t *snmp_set = (pdq_cmd_snmp_set_t *) ci->ci_request_bufstart;
541 snmp_set->snmp_set_items[idx].item_code = PDQSNMP_FULL_DUPLEX_ENABLE;
542 snmp_set->snmp_set_items[idx].item_value = (pdq->pdq_flags & PDQ_WANT_FDX ? 1 : 2);
543 snmp_set->snmp_set_items[idx].item_port = 0;
545 snmp_set->snmp_set_items[idx].item_code = PDQSNMP_EOL;
548 default: { /* to make gcc happy */
555 * Sync the command request buffer and descriptor, then advance
556 * the request producer index.
558 PDQ_OS_CMDRQST_PRESYNC(pdq, txd->txd_seg_len);
559 PDQ_OS_DESC_PRESYNC(pdq, txd, sizeof(pdq_txdesc_t));
560 PDQ_ADVANCE(ci->ci_request_producer, 1, PDQ_RING_MASK(dbp->pdqdb_command_requests));
563 * Sync the command response buffer and advance the response
564 * producer index (descriptor is already pre-initialized)
566 PDQ_OS_CMDRSP_PRESYNC(pdq, PDQ_SIZE_COMMAND_RESPONSE);
567 PDQ_ADVANCE(ci->ci_response_producer, 1, PDQ_RING_MASK(dbp->pdqdb_command_responses));
569 * At this point the command is done. All that needs to be done is to
570 * produce it to the PDQ.
572 PDQ_PRINTF(("PDQ Queue Command Request: %s queued\n",
573 pdq_cmd_info[op].cmd_name));
575 ci->ci_command_active++;
576 PDQ_CSR_WRITE(csrs, csr_cmd_response_producer, ci->ci_response_producer | (ci->ci_response_completion << 8));
577 PDQ_CSR_WRITE(csrs, csr_cmd_request_producer, ci->ci_request_producer | (ci->ci_request_completion << 8));
581 pdq_process_command_responses(
584 const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
585 pdq_command_info_t * const ci = &pdq->pdq_command_info;
586 volatile const pdq_consumer_block_t * const cbp = pdq->pdq_cbp;
587 pdq_descriptor_block_t * const dbp = pdq->pdq_dbp;
588 const pdq_response_generic_t *rspgen;
591 * We have to process the command and response in tandem so
592 * just wait for the response to be consumed. If it has been
593 * consumed then the command must have been as well.
596 if (cbp->pdqcb_command_response == ci->ci_response_completion)
599 PDQ_ASSERT(cbp->pdqcb_command_request != ci->ci_request_completion);
601 PDQ_OS_CMDRSP_POSTSYNC(pdq, PDQ_SIZE_COMMAND_RESPONSE);
602 rspgen = (const pdq_response_generic_t *) ci->ci_response_bufstart;
603 PDQ_ASSERT(rspgen->generic_op == ci->ci_queued_commands[ci->ci_request_completion]);
604 PDQ_ASSERT(rspgen->generic_status == PDQR_SUCCESS);
605 PDQ_PRINTF(("PDQ Process Command Response: %s completed (status=%d [0x%x])\n",
606 pdq_cmd_info[rspgen->generic_op].cmd_name,
607 rspgen->generic_status, rspgen->generic_status));
609 if (rspgen->generic_op == PDQC_STATUS_CHARS_GET && (pdq->pdq_flags & PDQ_PRINTCHARS)) {
610 pdq->pdq_flags &= ~PDQ_PRINTCHARS;
611 pdq_print_fddi_chars(pdq, (const pdq_response_status_chars_get_t *) rspgen);
612 } else if (rspgen->generic_op == PDQC_DEC_EXT_MIB_GET) {
613 pdq->pdq_flags &= ~PDQ_IS_FDX;
614 if (((const pdq_response_dec_ext_mib_get_t *)rspgen)->dec_ext_mib_get.fdx_operational)
615 pdq->pdq_flags |= PDQ_IS_FDX;
618 PDQ_ADVANCE(ci->ci_request_completion, 1, PDQ_RING_MASK(dbp->pdqdb_command_requests));
619 PDQ_ADVANCE(ci->ci_response_completion, 1, PDQ_RING_MASK(dbp->pdqdb_command_responses));
620 ci->ci_command_active = 0;
622 if (ci->ci_pending_commands != 0) {
623 pdq_queue_commands(pdq);
625 PDQ_CSR_WRITE(csrs, csr_cmd_response_producer,
626 ci->ci_response_producer | (ci->ci_response_completion << 8));
627 PDQ_CSR_WRITE(csrs, csr_cmd_request_producer,
628 ci->ci_request_producer | (ci->ci_request_completion << 8));
633 * This following routine processes unsolicited events.
634 * In addition, it also fills the unsolicited queue with
635 * event buffers so it can be used to initialize the queue
639 pdq_process_unsolicited_events(
642 const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
643 pdq_unsolicited_info_t *ui = &pdq->pdq_unsolicited_info;
644 volatile const pdq_consumer_block_t *cbp = pdq->pdq_cbp;
645 pdq_descriptor_block_t *dbp = pdq->pdq_dbp;
648 * Process each unsolicited event (if any).
651 while (cbp->pdqcb_unsolicited_event != ui->ui_completion) {
652 const pdq_unsolicited_event_t *event;
653 event = &ui->ui_events[ui->ui_completion & (PDQ_NUM_UNSOLICITED_EVENTS-1)];
654 PDQ_OS_UNSOL_EVENT_POSTSYNC(pdq, event);
656 switch (event->event_type) {
657 case PDQ_UNSOLICITED_EVENT: {
659 switch (event->event_entity) {
660 case PDQ_ENTITY_STATION: {
661 bad_event = event->event_code.value >= PDQ_STATION_EVENT_MAX;
664 case PDQ_ENTITY_LINK: {
665 bad_event = event->event_code.value >= PDQ_LINK_EVENT_MAX;
668 case PDQ_ENTITY_PHY_PORT: {
669 bad_event = event->event_code.value >= PDQ_PHY_EVENT_MAX;
680 printf(PDQ_OS_PREFIX "Unsolicited Event: %s: %s",
682 pdq_entities[event->event_entity],
683 pdq_event_codes[event->event_entity][event->event_code.value]);
684 if (event->event_entity == PDQ_ENTITY_PHY_PORT)
685 printf("[%d]", event->event_index);
689 case PDQ_UNSOLICITED_COUNTERS: {
693 PDQ_OS_UNSOL_EVENT_PRESYNC(pdq, event);
694 PDQ_ADVANCE(ui->ui_completion, 1, PDQ_RING_MASK(dbp->pdqdb_unsolicited_events));
699 * Now give back the event buffers back to the PDQ.
701 PDQ_ADVANCE(ui->ui_producer, ui->ui_free, PDQ_RING_MASK(dbp->pdqdb_unsolicited_events));
704 PDQ_CSR_WRITE(csrs, csr_unsolicited_producer,
705 ui->ui_producer | (ui->ui_completion << 8));
709 pdq_process_received_data(
712 pdq_rxdesc_t *receives,
713 pdq_uint32_t completion_goal,
714 pdq_uint32_t ring_mask)
716 pdq_uint32_t completion = rx->rx_completion;
717 pdq_uint32_t producer = rx->rx_producer;
718 PDQ_OS_DATABUF_T **buffers = (PDQ_OS_DATABUF_T **) rx->rx_buffers;
722 while (completion != completion_goal) {
723 PDQ_OS_DATABUF_T *fpdu, *lpdu, *npdu;
724 pdq_uint8_t *dataptr;
725 pdq_uint32_t fc, datalen, pdulen, segcnt;
726 pdq_rxstatus_t status;
728 fpdu = lpdu = buffers[completion];
729 PDQ_ASSERT(fpdu != NULL);
730 PDQ_OS_RXPDU_POSTSYNC(pdq, fpdu, 0, sizeof(u_int32_t));
731 dataptr = PDQ_OS_DATABUF_PTR(fpdu);
732 status = *(pdq_rxstatus_t *) dataptr;
733 if (status.rxs_rcc_badpdu == 0) {
734 datalen = status.rxs_len;
735 PDQ_OS_RXPDU_POSTSYNC(pdq, fpdu, sizeof(u_int32_t),
736 PDQ_RX_FC_OFFSET + 1 - sizeof(u_int32_t));
737 fc = dataptr[PDQ_RX_FC_OFFSET];
738 switch (fc & (PDQ_FDDIFC_C|PDQ_FDDIFC_L|PDQ_FDDIFC_F)) {
739 case PDQ_FDDI_LLC_ASYNC:
740 case PDQ_FDDI_LLC_SYNC:
741 case PDQ_FDDI_IMP_ASYNC:
742 case PDQ_FDDI_IMP_SYNC: {
743 if (datalen > PDQ_FDDI_MAX || datalen < PDQ_FDDI_LLC_MIN) {
744 PDQ_PRINTF(("discard: bad length %d\n", datalen));
750 if (datalen > PDQ_FDDI_MAX || datalen < PDQ_FDDI_SMT_MIN)
755 PDQ_PRINTF(("discard: bad fc 0x%x\n", fc));
760 * Update the lengths of the data buffers now that we know
763 pdulen = datalen + (PDQ_RX_FC_OFFSET - PDQ_OS_HDR_OFFSET) - 4 /* CRC */;
764 segcnt = (pdulen + PDQ_OS_HDR_OFFSET + PDQ_OS_DATABUF_SIZE - 1) / PDQ_OS_DATABUF_SIZE;
765 PDQ_OS_DATABUF_ALLOC(pdq, npdu);
767 PDQ_PRINTF(("discard: no databuf #0\n"));
770 buffers[completion] = npdu;
771 for (idx = 1; idx < segcnt; idx++) {
772 PDQ_OS_DATABUF_ALLOC(pdq, npdu);
774 PDQ_OS_DATABUF_NEXT_SET(lpdu, NULL);
775 PDQ_OS_DATABUF_FREE(pdq, fpdu);
778 PDQ_OS_DATABUF_NEXT_SET(lpdu, buffers[(completion + idx) & ring_mask]);
779 lpdu = PDQ_OS_DATABUF_NEXT(lpdu);
780 buffers[(completion + idx) & ring_mask] = npdu;
782 PDQ_OS_DATABUF_NEXT_SET(lpdu, NULL);
783 for (idx = 0; idx < PDQ_RX_SEGCNT; idx++) {
784 buffers[(producer + idx) & ring_mask] =
785 buffers[(completion + idx) & ring_mask];
786 buffers[(completion + idx) & ring_mask] = NULL;
788 PDQ_OS_DATABUF_ADJ(fpdu, PDQ_OS_HDR_OFFSET);
790 PDQ_OS_DATABUF_LEN_SET(fpdu, pdulen);
792 PDQ_OS_DATABUF_LEN_SET(lpdu, pdulen + PDQ_OS_HDR_OFFSET - (segcnt - 1) * PDQ_OS_DATABUF_SIZE);
795 * Do not pass to protocol if packet was received promiscuously
797 pdq_os_receive_pdu(pdq, fpdu, pdulen,
798 status.rxs_rcc_dd < PDQ_RXS_RCC_DD_CAM_MATCH);
799 rx->rx_free += PDQ_RX_SEGCNT;
800 PDQ_ADVANCE(producer, PDQ_RX_SEGCNT, ring_mask);
801 PDQ_ADVANCE(completion, PDQ_RX_SEGCNT, ring_mask);
804 PDQ_PRINTF(("discard: bad pdu 0x%x(%d.%d.%d.%d.%d)\n", status.rxs_status,
805 status.rxs_rcc_badpdu, status.rxs_rcc_badcrc,
806 status.rxs_rcc_reason, status.rxs_fsc, status.rxs_fsb_e));
807 if (status.rxs_rcc_reason == 7)
809 if (status.rxs_rcc_reason != 0) {
811 if (status.rxs_rcc_badcrc) {
812 printf(PDQ_OS_PREFIX " MAC CRC error (source=%x-%x-%x-%x-%x-%x)\n",
814 dataptr[PDQ_RX_FC_OFFSET+1],
815 dataptr[PDQ_RX_FC_OFFSET+2],
816 dataptr[PDQ_RX_FC_OFFSET+3],
817 dataptr[PDQ_RX_FC_OFFSET+4],
818 dataptr[PDQ_RX_FC_OFFSET+5],
819 dataptr[PDQ_RX_FC_OFFSET+6]);
820 /* rx->rx_badcrc++; */
821 } else if (status.rxs_fsc == 0 || status.rxs_fsb_e == 1) {
822 /* rx->rx_frame_status_errors++; */
830 * Discarded frames go right back on the queue; therefore
831 * ring entries were freed.
833 for (idx = 0; idx < PDQ_RX_SEGCNT; idx++) {
834 buffers[producer] = buffers[completion];
835 buffers[completion] = NULL;
836 rxd = &receives[rx->rx_producer];
838 rxd->rxd_sop = 1; rxd->rxd_seg_cnt = PDQ_RX_SEGCNT - 1;
840 rxd->rxd_sop = 0; rxd->rxd_seg_cnt = 0;
843 rxd->rxd_seg_len_hi = PDQ_OS_DATABUF_SIZE / 16;
844 rxd->rxd_pa_lo = PDQ_OS_DATABUF_BUSPA(pdq, buffers[rx->rx_producer]);
845 PDQ_OS_RXPDU_PRESYNC(pdq, buffers[rx->rx_producer], 0, PDQ_OS_DATABUF_SIZE);
846 PDQ_OS_DESC_PRESYNC(pdq, rxd, sizeof(*rxd));
847 PDQ_ADVANCE(rx->rx_producer, 1, ring_mask);
848 PDQ_ADVANCE(producer, 1, ring_mask);
849 PDQ_ADVANCE(completion, 1, ring_mask);
852 rx->rx_completion = completion;
854 while (rx->rx_free > PDQ_RX_SEGCNT && rx->rx_free > rx->rx_target) {
855 PDQ_OS_DATABUF_T *pdu;
857 * Allocate the needed number of data buffers.
858 * Try to obtain them from our free queue before
859 * asking the system for more.
861 for (idx = 0; idx < PDQ_RX_SEGCNT; idx++) {
862 if ((pdu = buffers[(rx->rx_producer + idx) & ring_mask]) == NULL) {
863 PDQ_OS_DATABUF_ALLOC(pdq, pdu);
866 buffers[(rx->rx_producer + idx) & ring_mask] = pdu;
868 rxd = &receives[(rx->rx_producer + idx) & ring_mask];
870 rxd->rxd_sop = 1; rxd->rxd_seg_cnt = PDQ_RX_SEGCNT - 1;
872 rxd->rxd_sop = 0; rxd->rxd_seg_cnt = 0;
875 rxd->rxd_seg_len_hi = PDQ_OS_DATABUF_SIZE / 16;
876 rxd->rxd_pa_lo = PDQ_OS_DATABUF_BUSPA(pdq, pdu);
877 PDQ_OS_RXPDU_PRESYNC(pdq, pdu, 0, PDQ_OS_DATABUF_SIZE);
878 PDQ_OS_DESC_PRESYNC(pdq, rxd, sizeof(*rxd));
880 if (idx < PDQ_RX_SEGCNT) {
882 * We didn't get all databufs required to complete a new
883 * receive buffer. Keep the ones we got and retry a bit
884 * later for the rest.
888 PDQ_ADVANCE(rx->rx_producer, PDQ_RX_SEGCNT, ring_mask);
889 rx->rx_free -= PDQ_RX_SEGCNT;
893 static void pdq_process_transmitted_data(pdq_t *pdq);
896 pdq_queue_transmit_data(
898 PDQ_OS_DATABUF_T *pdu)
900 pdq_tx_info_t * const tx = &pdq->pdq_tx_info;
901 pdq_descriptor_block_t * const dbp = pdq->pdq_dbp;
902 pdq_uint32_t producer = tx->tx_producer;
903 pdq_txdesc_t *eop = NULL;
904 PDQ_OS_DATABUF_T *pdu0;
905 pdq_uint32_t freecnt;
906 #if defined(PDQ_BUS_DMA)
911 if (PDQ_RX_FC_OFFSET == PDQ_OS_HDR_OFFSET) {
912 freecnt = tx->tx_free - 1;
914 freecnt = tx->tx_free;
917 * Need 2 or more descriptors to be able to send.
920 pdq->pdq_intrmask |= PDQ_HOST_INT_TX_ENABLE;
921 PDQ_CSR_WRITE(&pdq->pdq_csrs, csr_host_int_enable, pdq->pdq_intrmask);
925 if (PDQ_RX_FC_OFFSET == PDQ_OS_HDR_OFFSET) {
926 dbp->pdqdb_transmits[producer] = tx->tx_hdrdesc;
927 PDQ_OS_DESC_PRESYNC(pdq, &dbp->pdqdb_transmits[producer], sizeof(pdq_txdesc_t));
928 PDQ_ADVANCE(producer, 1, PDQ_RING_MASK(dbp->pdqdb_transmits));
931 #if defined(PDQ_BUS_DMA)
932 map = M_GETCTX(pdu, bus_dmamap_t);
933 if (freecnt >= map->dm_nsegs) {
935 for (idx = 0; idx < map->dm_nsegs; idx++) {
937 * Initialize the transmit descriptor
939 eop = &dbp->pdqdb_transmits[producer];
940 eop->txd_seg_len = map->dm_segs[idx].ds_len;
941 eop->txd_pa_lo = map->dm_segs[idx].ds_addr;
942 eop->txd_sop = eop->txd_eop = eop->txd_pa_hi = 0;
943 PDQ_OS_DESC_PRESYNC(pdq, eop, sizeof(pdq_txdesc_t));
945 PDQ_ADVANCE(producer, 1, PDQ_RING_MASK(dbp->pdqdb_transmits));
952 for (freecnt = tx->tx_free - 1, pdu0 = pdu; pdu0 != NULL && freecnt > 0;) {
953 pdq_uint32_t fraglen, datalen = PDQ_OS_DATABUF_LEN(pdu0);
954 const pdq_uint8_t *dataptr = PDQ_OS_DATABUF_PTR(pdu0);
957 * The first segment is limited to the space remaining in
958 * page. All segments after that can be up to a full page
961 fraglen = PDQ_OS_PAGESIZE - ((dataptr - (pdq_uint8_t *) NULL) & (PDQ_OS_PAGESIZE-1));
962 while (datalen > 0 && freecnt > 0) {
963 pdq_uint32_t seglen = (fraglen < datalen ? fraglen : datalen);
966 * Initialize the transmit descriptor
968 eop = &dbp->pdqdb_transmits[producer];
969 eop->txd_seg_len = seglen;
970 eop->txd_pa_lo = PDQ_OS_VA_TO_BUSPA(pdq, dataptr);
971 eop->txd_sop = eop->txd_eop = eop->txd_pa_hi = 0;
972 PDQ_OS_DESC_PRESYNC(pdq, eop, sizeof(pdq_txdesc_t));
975 fraglen = PDQ_OS_PAGESIZE;
977 PDQ_ADVANCE(producer, 1, PDQ_RING_MASK(dbp->pdqdb_transmits));
979 pdu0 = PDQ_OS_DATABUF_NEXT(pdu0);
981 #endif /* defined(PDQ_BUS_DMA) */
983 unsigned completion = tx->tx_completion;
984 PDQ_ASSERT(freecnt == 0);
985 PDQ_OS_CONSUMER_POSTSYNC(pdq);
986 pdq_process_transmitted_data(pdq);
987 if (completion != tx->tx_completion) {
988 producer = tx->tx_producer;
993 * If we still have data to process then the ring was too full
994 * to store the PDU. Return FALSE so the caller will requeue
997 pdq->pdq_intrmask |= PDQ_HOST_INT_TX_ENABLE;
998 PDQ_CSR_WRITE(&pdq->pdq_csrs, csr_host_int_enable, pdq->pdq_intrmask);
1002 * Everything went fine. Finish it up.
1004 tx->tx_descriptor_count[tx->tx_producer] = tx->tx_free - freecnt;
1005 if (PDQ_RX_FC_OFFSET != PDQ_OS_HDR_OFFSET) {
1006 dbp->pdqdb_transmits[tx->tx_producer].txd_sop = 1;
1007 PDQ_OS_DESC_PRESYNC(pdq, &dbp->pdqdb_transmits[tx->tx_producer],
1008 sizeof(pdq_txdesc_t));
1011 PDQ_OS_DESC_PRESYNC(pdq, eop, sizeof(pdq_txdesc_t));
1012 PDQ_OS_DATABUF_ENQUEUE(&tx->tx_txq, pdu);
1013 tx->tx_producer = producer;
1014 tx->tx_free = freecnt;
1015 PDQ_DO_TYPE2_PRODUCER(pdq);
1020 pdq_process_transmitted_data(
1023 pdq_tx_info_t *tx = &pdq->pdq_tx_info;
1024 volatile const pdq_consumer_block_t *cbp = pdq->pdq_cbp;
1025 pdq_descriptor_block_t *dbp = pdq->pdq_dbp;
1026 pdq_uint32_t completion = tx->tx_completion;
1029 while (completion != cbp->pdqcb_transmits) {
1030 PDQ_OS_DATABUF_T *pdu;
1031 pdq_uint32_t descriptor_count = tx->tx_descriptor_count[completion];
1032 PDQ_ASSERT(dbp->pdqdb_transmits[completion].txd_sop == 1);
1033 PDQ_ASSERT(dbp->pdqdb_transmits[(completion + descriptor_count - 1) & PDQ_RING_MASK(dbp->pdqdb_transmits)].txd_eop == 1);
1034 PDQ_OS_DATABUF_DEQUEUE(&tx->tx_txq, pdu);
1035 pdq_os_transmit_done(pdq, pdu);
1036 tx->tx_free += descriptor_count;
1038 PDQ_ADVANCE(completion, descriptor_count, PDQ_RING_MASK(dbp->pdqdb_transmits));
1040 if (tx->tx_completion != completion) {
1041 tx->tx_completion = completion;
1042 pdq->pdq_intrmask &= ~PDQ_HOST_INT_TX_ENABLE;
1043 PDQ_CSR_WRITE(&pdq->pdq_csrs, csr_host_int_enable, pdq->pdq_intrmask);
1044 pdq_os_restart_transmitter(pdq);
1047 PDQ_DO_TYPE2_PRODUCER(pdq);
1051 pdq_flush_transmitter(
1054 volatile pdq_consumer_block_t *cbp = pdq->pdq_cbp;
1055 pdq_tx_info_t *tx = &pdq->pdq_tx_info;
1058 PDQ_OS_DATABUF_T *pdu;
1059 PDQ_OS_DATABUF_DEQUEUE(&tx->tx_txq, pdu);
1063 * Don't call transmit done since the packet never made it
1066 PDQ_OS_DATABUF_FREE(pdq, pdu);
1069 tx->tx_free = PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_transmits);
1070 cbp->pdqcb_transmits = tx->tx_completion = tx->tx_producer;
1071 PDQ_OS_CONSUMER_PRESYNC(pdq);
1073 PDQ_DO_TYPE2_PRODUCER(pdq);
1080 const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
1084 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1085 if (state == PDQS_DMA_UNAVAILABLE)
1087 PDQ_CSR_WRITE(csrs, csr_port_data_a, PDQ_PRESET_SKIP_SELFTEST);
1088 PDQ_CSR_WRITE(csrs, csr_port_reset, 1);
1089 PDQ_OS_USEC_DELAY(100);
1090 PDQ_CSR_WRITE(csrs, csr_port_reset, 0);
1091 for (cnt = 100000;;cnt--) {
1092 PDQ_OS_USEC_DELAY(1000);
1093 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1094 if (state == PDQS_DMA_UNAVAILABLE || cnt == 0)
1097 PDQ_PRINTF(("PDQ Reset spun %d cycles\n", 100000 - cnt));
1098 PDQ_OS_USEC_DELAY(10000);
1099 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1100 PDQ_ASSERT(state == PDQS_DMA_UNAVAILABLE);
1101 PDQ_ASSERT(cnt > 0);
1105 * The following routine brings the PDQ from whatever state it is
1106 * in to DMA_UNAVAILABLE (ie. like a RESET but without doing a RESET).
1113 const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
1114 int cnt, pass = 0, idx;
1115 PDQ_OS_DATABUF_T **buffers;
1118 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1119 if (state != PDQS_DMA_UNAVAILABLE) {
1121 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1122 PDQ_ASSERT(state == PDQS_DMA_UNAVAILABLE);
1126 case PDQS_RING_MEMBER:
1127 case PDQS_LINK_UNAVAILABLE:
1128 case PDQS_LINK_AVAILABLE: {
1129 PDQ_CSR_WRITE(csrs, csr_port_data_a, PDQ_SUB_CMD_LINK_UNINIT);
1130 PDQ_CSR_WRITE(csrs, csr_port_data_b, 0);
1131 pdq_do_port_control(csrs, PDQ_PCTL_SUB_CMD);
1132 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1133 PDQ_ASSERT(state == PDQS_DMA_AVAILABLE);
1136 case PDQS_DMA_AVAILABLE: {
1137 PDQ_CSR_WRITE(csrs, csr_port_data_a, 0);
1138 PDQ_CSR_WRITE(csrs, csr_port_data_b, 0);
1139 pdq_do_port_control(csrs, PDQ_PCTL_DMA_UNINIT);
1140 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1141 PDQ_ASSERT(state == PDQS_DMA_UNAVAILABLE);
1144 case PDQS_DMA_UNAVAILABLE: {
1150 * Now we should be in DMA_UNAVAILABLE. So bring the PDQ into
1155 * Obtain the hardware address and firmware revisions
1156 * (MLA = my long address which is FDDI speak for hardware address)
1158 pdq_read_mla(&pdq->pdq_csrs, &pdq->pdq_hwaddr);
1159 pdq_read_fwrev(&pdq->pdq_csrs, &pdq->pdq_fwrev);
1160 pdq->pdq_chip_rev = pdq_read_chiprev(&pdq->pdq_csrs);
1163 * Disable interrupts and DMA.
1165 PDQ_CSR_WRITE(&pdq->pdq_pci_csrs, csr_pfi_mode_control, 0);
1166 PDQ_CSR_WRITE(&pdq->pdq_pci_csrs, csr_pfi_status, 0x10);
1169 * Flush all the databuf queues.
1171 pdq_flush_databuf_queue(pdq, &pdq->pdq_tx_info.tx_txq);
1172 pdq->pdq_flags &= ~(PDQ_TXOK|PDQ_IS_ONRING|PDQ_IS_FDX);
1173 buffers = (PDQ_OS_DATABUF_T **) pdq->pdq_rx_info.rx_buffers;
1174 for (idx = 0; idx < PDQ_RING_SIZE(pdq->pdq_dbp->pdqdb_receives); idx++) {
1175 if (buffers[idx] != NULL) {
1176 PDQ_OS_DATABUF_FREE(pdq, buffers[idx]);
1177 buffers[idx] = NULL;
1180 pdq->pdq_rx_info.rx_free = PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_receives);
1181 buffers = (PDQ_OS_DATABUF_T **) pdq->pdq_host_smt_info.rx_buffers;
1182 for (idx = 0; idx < PDQ_RING_SIZE(pdq->pdq_dbp->pdqdb_host_smt); idx++) {
1183 if (buffers[idx] != NULL) {
1184 PDQ_OS_DATABUF_FREE(pdq, buffers[idx]);
1185 buffers[idx] = NULL;
1188 pdq->pdq_host_smt_info.rx_free = PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_host_smt);
1191 * Reset the consumer indexes to 0.
1193 pdq->pdq_cbp->pdqcb_receives = 0;
1194 pdq->pdq_cbp->pdqcb_transmits = 0;
1195 pdq->pdq_cbp->pdqcb_host_smt = 0;
1196 pdq->pdq_cbp->pdqcb_unsolicited_event = 0;
1197 pdq->pdq_cbp->pdqcb_command_response = 0;
1198 pdq->pdq_cbp->pdqcb_command_request = 0;
1199 PDQ_OS_CONSUMER_PRESYNC(pdq);
1202 * Reset the producer and completion indexes to 0.
1204 pdq->pdq_command_info.ci_request_producer = 0;
1205 pdq->pdq_command_info.ci_response_producer = 0;
1206 pdq->pdq_command_info.ci_request_completion = 0;
1207 pdq->pdq_command_info.ci_response_completion = 0;
1208 pdq->pdq_unsolicited_info.ui_producer = 0;
1209 pdq->pdq_unsolicited_info.ui_completion = 0;
1210 pdq->pdq_rx_info.rx_producer = 0;
1211 pdq->pdq_rx_info.rx_completion = 0;
1212 pdq->pdq_tx_info.tx_producer = 0;
1213 pdq->pdq_tx_info.tx_completion = 0;
1214 pdq->pdq_host_smt_info.rx_producer = 0;
1215 pdq->pdq_host_smt_info.rx_completion = 0;
1217 pdq->pdq_command_info.ci_command_active = 0;
1218 pdq->pdq_unsolicited_info.ui_free = PDQ_NUM_UNSOLICITED_EVENTS;
1219 pdq->pdq_tx_info.tx_free = PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_transmits);
1222 * Allow the DEFPA to do DMA. Then program the physical
1223 * addresses of the consumer and descriptor blocks.
1226 PDQ_CSR_WRITE(&pdq->pdq_pci_csrs, csr_pfi_mode_control,
1227 PDQ_PFI_MODE_DMA_ENABLE);
1229 PDQ_CSR_WRITE(&pdq->pdq_pci_csrs, csr_pfi_mode_control,
1230 PDQ_PFI_MODE_DMA_ENABLE
1231 /*|PDQ_PFI_MODE_PFI_PCI_INTR*/|PDQ_PFI_MODE_PDQ_PCI_INTR);
1235 * Make sure the unsolicited queue has events ...
1237 pdq_process_unsolicited_events(pdq);
1239 PDQ_CSR_WRITE(csrs, csr_port_data_b, PDQ_DMA_BURST_8LW);
1240 PDQ_CSR_WRITE(csrs, csr_port_data_a, PDQ_SUB_CMD_DMA_BURST_SIZE_SET);
1241 pdq_do_port_control(csrs, PDQ_PCTL_SUB_CMD);
1244 * Make sure there isn't stale information in the caches before
1245 * tell the adapter about the blocks it's going to use.
1247 PDQ_OS_CONSUMER_PRESYNC(pdq);
1249 PDQ_CSR_WRITE(csrs, csr_port_data_b, 0);
1250 PDQ_CSR_WRITE(csrs, csr_port_data_a, pdq->pdq_pa_consumer_block);
1251 pdq_do_port_control(csrs, PDQ_PCTL_CONSUMER_BLOCK);
1253 PDQ_CSR_WRITE(csrs, csr_port_data_b, 0);
1254 #if !defined(BYTE_ORDER) || BYTE_ORDER == LITTLE_ENDIAN
1255 PDQ_CSR_WRITE(csrs, csr_port_data_a, pdq->pdq_pa_descriptor_block | PDQ_DMA_INIT_LW_BSWAP_DATA);
1257 PDQ_CSR_WRITE(csrs, csr_port_data_a, pdq->pdq_pa_descriptor_block | PDQ_DMA_INIT_LW_BSWAP_DATA | PDQ_DMA_INIT_LW_BSWAP_LITERAL);
1259 pdq_do_port_control(csrs, PDQ_PCTL_DMA_INIT);
1261 for (cnt = 0; cnt < 1000; cnt++) {
1262 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1263 if (state == PDQS_HALTED) {
1269 if (state == PDQS_DMA_AVAILABLE) {
1270 PDQ_PRINTF(("Transition to DMA Available took %d spins\n", cnt));
1273 PDQ_OS_USEC_DELAY(1000);
1275 PDQ_ASSERT(state == PDQS_DMA_AVAILABLE);
1277 PDQ_CSR_WRITE(csrs, csr_host_int_type_0, 0xFF);
1278 pdq->pdq_intrmask = 0;
1279 /* PDQ_HOST_INT_STATE_CHANGE
1280 |PDQ_HOST_INT_FATAL_ERROR|PDQ_HOST_INT_CMD_RSP_ENABLE
1281 |PDQ_HOST_INT_UNSOL_ENABLE */
1282 PDQ_CSR_WRITE(csrs, csr_host_int_enable, pdq->pdq_intrmask);
1285 * Any other command but START should be valid.
1287 pdq->pdq_command_info.ci_pending_commands &= ~(PDQ_BITMASK(PDQC_START));
1288 if (pdq->pdq_flags & PDQ_PRINTCHARS)
1289 pdq->pdq_command_info.ci_pending_commands |= PDQ_BITMASK(PDQC_STATUS_CHARS_GET);
1290 pdq_queue_commands(pdq);
1292 if (pdq->pdq_flags & PDQ_PRINTCHARS) {
1294 * Now wait (up to 100ms) for the command(s) to finish.
1296 for (cnt = 0; cnt < 1000; cnt++) {
1297 PDQ_OS_CONSUMER_POSTSYNC(pdq);
1298 pdq_process_command_responses(pdq);
1299 if (pdq->pdq_command_info.ci_response_producer == pdq->pdq_command_info.ci_response_completion)
1301 PDQ_OS_USEC_DELAY(1000);
1303 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1313 const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
1316 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1317 PDQ_ASSERT(state != PDQS_DMA_UNAVAILABLE);
1318 PDQ_ASSERT(state != PDQS_RESET);
1319 PDQ_ASSERT(state != PDQS_HALTED);
1320 PDQ_ASSERT(state != PDQS_UPGRADE);
1321 PDQ_ASSERT(state != PDQS_RING_MEMBER);
1323 case PDQS_DMA_AVAILABLE: {
1325 * The PDQ after being reset screws up some of its state.
1326 * So we need to clear all the errors/interrupts so the real
1327 * ones will get through.
1329 PDQ_CSR_WRITE(csrs, csr_host_int_type_0, 0xFF);
1330 pdq->pdq_intrmask = PDQ_HOST_INT_STATE_CHANGE
1331 |PDQ_HOST_INT_XMT_DATA_FLUSH|PDQ_HOST_INT_FATAL_ERROR
1332 |PDQ_HOST_INT_CMD_RSP_ENABLE|PDQ_HOST_INT_UNSOL_ENABLE
1333 |PDQ_HOST_INT_RX_ENABLE|PDQ_HOST_INT_HOST_SMT_ENABLE;
1334 PDQ_CSR_WRITE(csrs, csr_host_int_enable, pdq->pdq_intrmask);
1336 * Set the MAC and address filters and start up the PDQ.
1338 pdq_process_unsolicited_events(pdq);
1339 pdq_process_received_data(pdq, &pdq->pdq_rx_info,
1340 pdq->pdq_dbp->pdqdb_receives,
1341 pdq->pdq_cbp->pdqcb_receives,
1342 PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_receives));
1343 PDQ_DO_TYPE2_PRODUCER(pdq);
1344 if (pdq->pdq_flags & PDQ_PASS_SMT) {
1345 pdq_process_received_data(pdq, &pdq->pdq_host_smt_info,
1346 pdq->pdq_dbp->pdqdb_host_smt,
1347 pdq->pdq_cbp->pdqcb_host_smt,
1348 PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_host_smt));
1349 PDQ_CSR_WRITE(csrs, csr_host_smt_producer,
1350 pdq->pdq_host_smt_info.rx_producer
1351 | (pdq->pdq_host_smt_info.rx_completion << 8));
1353 pdq->pdq_command_info.ci_pending_commands = PDQ_BITMASK(PDQC_FILTER_SET)
1354 | PDQ_BITMASK(PDQC_ADDR_FILTER_SET)
1355 | PDQ_BITMASK(PDQC_SNMP_SET)
1356 | PDQ_BITMASK(PDQC_START);
1357 if (pdq->pdq_flags & PDQ_PRINTCHARS)
1358 pdq->pdq_command_info.ci_pending_commands |= PDQ_BITMASK(PDQC_STATUS_CHARS_GET);
1359 pdq_queue_commands(pdq);
1362 case PDQS_LINK_UNAVAILABLE:
1363 case PDQS_LINK_AVAILABLE: {
1364 pdq->pdq_command_info.ci_pending_commands = PDQ_BITMASK(PDQC_FILTER_SET)
1365 | PDQ_BITMASK(PDQC_ADDR_FILTER_SET)
1366 | PDQ_BITMASK(PDQC_SNMP_SET);
1367 if (pdq->pdq_flags & PDQ_PRINTCHARS)
1368 pdq->pdq_command_info.ci_pending_commands |= PDQ_BITMASK(PDQC_STATUS_CHARS_GET);
1369 if (pdq->pdq_flags & PDQ_PASS_SMT) {
1370 pdq_process_received_data(pdq, &pdq->pdq_host_smt_info,
1371 pdq->pdq_dbp->pdqdb_host_smt,
1372 pdq->pdq_cbp->pdqcb_host_smt,
1373 PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_host_smt));
1374 PDQ_CSR_WRITE(csrs, csr_host_smt_producer,
1375 pdq->pdq_host_smt_info.rx_producer
1376 | (pdq->pdq_host_smt_info.rx_completion << 8));
1378 pdq_process_unsolicited_events(pdq);
1379 pdq_queue_commands(pdq);
1382 case PDQS_RING_MEMBER: {
1384 default: { /* to make gcc happy */
1394 const pdq_csrs_t * const csrs = &pdq->pdq_csrs;
1398 PDQ_CSR_WRITE(&pdq->pdq_pci_csrs, csr_pfi_status, 0x18);
1400 while ((data = PDQ_CSR_READ(csrs, csr_port_status)) & PDQ_PSTS_INTR_PENDING) {
1402 PDQ_PRINTF(("PDQ Interrupt: Status = 0x%08x\n", data));
1403 PDQ_OS_CONSUMER_POSTSYNC(pdq);
1404 if (data & PDQ_PSTS_RCV_DATA_PENDING) {
1405 pdq_process_received_data(pdq, &pdq->pdq_rx_info,
1406 pdq->pdq_dbp->pdqdb_receives,
1407 pdq->pdq_cbp->pdqcb_receives,
1408 PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_receives));
1409 PDQ_DO_TYPE2_PRODUCER(pdq);
1411 if (data & PDQ_PSTS_HOST_SMT_PENDING) {
1412 pdq_process_received_data(pdq, &pdq->pdq_host_smt_info,
1413 pdq->pdq_dbp->pdqdb_host_smt,
1414 pdq->pdq_cbp->pdqcb_host_smt,
1415 PDQ_RING_MASK(pdq->pdq_dbp->pdqdb_host_smt));
1416 PDQ_DO_HOST_SMT_PRODUCER(pdq);
1418 /* if (data & PDQ_PSTS_XMT_DATA_PENDING) */
1419 pdq_process_transmitted_data(pdq);
1420 if (data & PDQ_PSTS_UNSOL_PENDING)
1421 pdq_process_unsolicited_events(pdq);
1422 if (data & PDQ_PSTS_CMD_RSP_PENDING)
1423 pdq_process_command_responses(pdq);
1424 if (data & PDQ_PSTS_TYPE_0_PENDING) {
1425 data = PDQ_CSR_READ(csrs, csr_host_int_type_0);
1426 if (data & PDQ_HOST_INT_STATE_CHANGE) {
1427 pdq_state_t state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(csrs, csr_port_status));
1428 printf(PDQ_OS_PREFIX "%s", PDQ_OS_PREFIX_ARGS, pdq_adapter_states[state]);
1429 if (state == PDQS_LINK_UNAVAILABLE) {
1430 pdq->pdq_flags &= ~(PDQ_TXOK|PDQ_IS_ONRING|PDQ_IS_FDX);
1431 } else if (state == PDQS_LINK_AVAILABLE) {
1432 if (pdq->pdq_flags & PDQ_WANT_FDX) {
1433 pdq->pdq_command_info.ci_pending_commands |= PDQ_BITMASK(PDQC_DEC_EXT_MIB_GET);
1434 pdq_queue_commands(pdq);
1436 pdq->pdq_flags |= PDQ_TXOK|PDQ_IS_ONRING;
1437 pdq_os_restart_transmitter(pdq);
1438 } else if (state == PDQS_HALTED) {
1439 pdq_response_error_log_get_t log_entry;
1440 pdq_halt_code_t halt_code = PDQ_PSTS_HALT_ID(PDQ_CSR_READ(csrs, csr_port_status));
1441 printf(": halt code = %d (%s)\n",
1442 halt_code, pdq_halt_codes[halt_code]);
1443 if (halt_code == PDQH_DMA_ERROR) {
1444 PDQ_PRINTF(("\tPFI status = 0x%x, Host 0 Fatal Interrupt = 0x%x\n",
1445 PDQ_CSR_READ(&pdq->pdq_pci_csrs, csr_pfi_status),
1446 data & PDQ_HOST_INT_FATAL_ERROR));
1448 PDQ_OS_MEMZERO(&log_entry, sizeof(log_entry));
1449 if (pdq_read_error_log(pdq, &log_entry)) {
1450 PDQ_PRINTF((" Error log Entry:\n"));
1451 PDQ_PRINTF((" CMD Status = %d (0x%x)\n",
1452 log_entry.error_log_get_status,
1453 log_entry.error_log_get_status));
1454 PDQ_PRINTF((" Event Status = %d (0x%x)\n",
1455 log_entry.error_log_get_event_status,
1456 log_entry.error_log_get_event_status));
1457 PDQ_PRINTF((" Caller Id = %d (0x%x)\n",
1458 log_entry.error_log_get_caller_id,
1459 log_entry.error_log_get_caller_id));
1460 PDQ_PRINTF((" Write Count = %d (0x%x)\n",
1461 log_entry.error_log_get_write_count,
1462 log_entry.error_log_get_write_count));
1463 PDQ_PRINTF((" FRU Implication Mask = %d (0x%x)\n",
1464 log_entry.error_log_get_fru_implication_mask,
1465 log_entry.error_log_get_fru_implication_mask));
1466 PDQ_PRINTF((" Test ID = %d (0x%x)\n",
1467 log_entry.error_log_get_test_id,
1468 log_entry.error_log_get_test_id));
1471 if (pdq->pdq_flags & PDQ_RUNNING)
1476 PDQ_CSR_WRITE(csrs, csr_host_int_type_0, PDQ_HOST_INT_STATE_CHANGE);
1478 if (data & PDQ_HOST_INT_FATAL_ERROR) {
1480 if (pdq->pdq_flags & PDQ_RUNNING)
1484 if (data & PDQ_HOST_INT_XMT_DATA_FLUSH) {
1485 printf(PDQ_OS_PREFIX "Flushing transmit queue\n", PDQ_OS_PREFIX_ARGS);
1486 pdq->pdq_flags &= ~PDQ_TXOK;
1487 pdq_flush_transmitter(pdq);
1488 pdq_do_port_control(csrs, PDQ_PCTL_XMT_DATA_FLUSH_DONE);
1489 PDQ_CSR_WRITE(csrs, csr_host_int_type_0, PDQ_HOST_INT_XMT_DATA_FLUSH);
1492 PDQ_CSR_WRITE(&pdq->pdq_pci_csrs, csr_pfi_status, 0x18);
1500 pdq_bus_memaddr_t csr_base,
1508 pdq_descriptor_block_t *dbp;
1509 #if !defined(PDQ_BUS_DMA)
1510 const pdq_uint32_t contig_bytes = (sizeof(pdq_descriptor_block_t) * 2) - PDQ_OS_PAGESIZE;
1515 PDQ_ASSERT(sizeof(pdq_descriptor_block_t) == 8192);
1516 PDQ_ASSERT(sizeof(pdq_consumer_block_t) == 64);
1517 PDQ_ASSERT(sizeof(pdq_response_filter_get_t) == PDQ_SIZE_RESPONSE_FILTER_GET);
1518 PDQ_ASSERT(sizeof(pdq_cmd_addr_filter_set_t) == PDQ_SIZE_CMD_ADDR_FILTER_SET);
1519 PDQ_ASSERT(sizeof(pdq_response_addr_filter_get_t) == PDQ_SIZE_RESPONSE_ADDR_FILTER_GET);
1520 PDQ_ASSERT(sizeof(pdq_response_status_chars_get_t) == PDQ_SIZE_RESPONSE_STATUS_CHARS_GET);
1521 PDQ_ASSERT(sizeof(pdq_response_fddi_mib_get_t) == PDQ_SIZE_RESPONSE_FDDI_MIB_GET);
1522 PDQ_ASSERT(sizeof(pdq_response_dec_ext_mib_get_t) == PDQ_SIZE_RESPONSE_DEC_EXT_MIB_GET);
1523 PDQ_ASSERT(sizeof(pdq_unsolicited_event_t) == 512);
1525 pdq = (pdq_t *) PDQ_OS_MEMALLOC(sizeof(pdq_t));
1527 PDQ_PRINTF(("malloc(%d) failed\n", sizeof(*pdq)));
1530 PDQ_OS_MEMZERO(pdq, sizeof(pdq_t));
1531 pdq->pdq_type = type;
1532 pdq->pdq_unit = unit;
1533 pdq->pdq_os_ctx = (void *) ctx;
1534 pdq->pdq_os_name = name;
1535 pdq->pdq_flags = PDQ_PRINTCHARS;
1537 * Allocate the additional data structures required by
1538 * the PDQ driver. Allocate a contiguous region of memory
1539 * for the descriptor block. We need to allocated enough
1540 * to guarantee that we will a get 8KB block of memory aligned
1541 * on a 8KB boundary. This turns to require that we allocate
1542 * (N*2 - 1 page) pages of memory. On machine with less than
1543 * a 8KB page size, it mean we will allocate more memory than
1544 * we need. The extra will be used for the unsolicited event
1545 * buffers (though on machines with 8KB pages we will to allocate
1546 * them separately since there will be nothing left overs.)
1548 #if defined(PDQ_OS_MEMALLOC_CONTIG)
1549 p = (pdq_uint8_t *) PDQ_OS_MEMALLOC_CONTIG(contig_bytes);
1552 printf("%s() - PDQ_OS_MEMALLOC_CONTIG() failed!\n", __func__);
1555 pdq_physaddr_t physaddr = PDQ_OS_VA_TO_BUSPA(pdq, p);
1557 * Assert that we really got contiguous memory. This isn't really
1558 * needed on systems that actually have physical contiguous allocation
1559 * routines, but on those systems that don't ...
1561 for (idx = PDQ_OS_PAGESIZE; idx < 0x2000; idx += PDQ_OS_PAGESIZE) {
1562 if (PDQ_OS_VA_TO_BUSPA(pdq, p + idx) - physaddr != idx)
1563 goto cleanup_and_return;
1565 if (physaddr & 0x1FFF) {
1566 pdq->pdq_unsolicited_info.ui_events = (pdq_unsolicited_event_t *) p;
1567 pdq->pdq_unsolicited_info.ui_pa_bufstart = physaddr;
1568 pdq->pdq_dbp = (pdq_descriptor_block_t *) &p[0x2000 - (physaddr & 0x1FFF)];
1569 pdq->pdq_pa_descriptor_block = physaddr & ~0x1FFFUL;
1571 pdq->pdq_dbp = (pdq_descriptor_block_t *) p;
1572 pdq->pdq_pa_descriptor_block = physaddr;
1573 pdq->pdq_unsolicited_info.ui_events = (pdq_unsolicited_event_t *) &p[0x2000];
1574 pdq->pdq_unsolicited_info.ui_pa_bufstart = physaddr + 0x2000;
1577 pdq->pdq_cbp = (volatile pdq_consumer_block_t *) &pdq->pdq_dbp->pdqdb_consumer;
1578 pdq->pdq_pa_consumer_block = PDQ_DB_BUSPA(pdq, pdq->pdq_cbp);
1579 if (contig_bytes == sizeof(pdq_descriptor_block_t)) {
1580 pdq->pdq_unsolicited_info.ui_events =
1581 (pdq_unsolicited_event_t *) PDQ_OS_MEMALLOC(
1582 PDQ_NUM_UNSOLICITED_EVENTS * sizeof(pdq_unsolicited_event_t));
1585 if (pdq_os_memalloc_contig(pdq))
1586 goto cleanup_and_return;
1590 * Make sure everything got allocated. If not, free what did
1591 * get allocated and return.
1593 if (pdq->pdq_dbp == NULL || pdq->pdq_unsolicited_info.ui_events == NULL) {
1595 #ifdef PDQ_OS_MEMFREE_CONTIG
1596 if (p /* pdq->pdq_dbp */ != NULL)
1597 PDQ_OS_MEMFREE_CONTIG(p /* pdq->pdq_dbp */, contig_bytes);
1598 if (contig_bytes == sizeof(pdq_descriptor_block_t) && pdq->pdq_unsolicited_info.ui_events != NULL)
1599 PDQ_OS_MEMFREE(pdq->pdq_unsolicited_info.ui_events,
1600 PDQ_NUM_UNSOLICITED_EVENTS * sizeof(pdq_unsolicited_event_t));
1602 PDQ_OS_MEMFREE(pdq, sizeof(pdq_t));
1607 PDQ_PRINTF(("\nPDQ Descriptor Block = " PDQ_OS_PTR_FMT " (PA = 0x%x)\n", dbp, pdq->pdq_pa_descriptor_block));
1608 PDQ_PRINTF((" Receive Queue = " PDQ_OS_PTR_FMT "\n", dbp->pdqdb_receives));
1609 PDQ_PRINTF((" Transmit Queue = " PDQ_OS_PTR_FMT "\n", dbp->pdqdb_transmits));
1610 PDQ_PRINTF((" Host SMT Queue = " PDQ_OS_PTR_FMT "\n", dbp->pdqdb_host_smt));
1611 PDQ_PRINTF((" Command Response Queue = " PDQ_OS_PTR_FMT "\n", dbp->pdqdb_command_responses));
1612 PDQ_PRINTF((" Command Request Queue = " PDQ_OS_PTR_FMT "\n", dbp->pdqdb_command_requests));
1613 PDQ_PRINTF(("PDQ Consumer Block = " PDQ_OS_PTR_FMT "\n", pdq->pdq_cbp));
1616 * Zero out the descriptor block. Not really required but
1617 * it pays to be neat. This will also zero out the consumer
1618 * block, command pool, and buffer pointers for the receive
1621 PDQ_OS_MEMZERO(dbp, sizeof(*dbp));
1624 * Initialize the CSR references.
1625 * the DEFAA (FutureBus+) skips a longword between registers
1627 pdq_init_csrs(&pdq->pdq_csrs, bus, csr_base, 1);
1628 pdq_init_pci_csrs(&pdq->pdq_pci_csrs, bus, csr_base, 1);
1630 PDQ_PRINTF(("PDQ CSRs: BASE = " PDQ_OS_CSR_FMT "\n", pdq->pdq_csrs.csr_base));
1631 PDQ_PRINTF((" Port Reset = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1632 pdq->pdq_csrs.csr_port_reset, PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_reset)));
1633 PDQ_PRINTF((" Host Data = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1634 pdq->pdq_csrs.csr_host_data, PDQ_CSR_READ(&pdq->pdq_csrs, csr_host_data)));
1635 PDQ_PRINTF((" Port Control = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1636 pdq->pdq_csrs.csr_port_control, PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_control)));
1637 PDQ_PRINTF((" Port Data A = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1638 pdq->pdq_csrs.csr_port_data_a, PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_data_a)));
1639 PDQ_PRINTF((" Port Data B = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1640 pdq->pdq_csrs.csr_port_data_b, PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_data_b)));
1641 PDQ_PRINTF((" Port Status = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1642 pdq->pdq_csrs.csr_port_status, PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_status)));
1643 PDQ_PRINTF((" Host Int Type 0 = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1644 pdq->pdq_csrs.csr_host_int_type_0, PDQ_CSR_READ(&pdq->pdq_csrs, csr_host_int_type_0)));
1645 PDQ_PRINTF((" Host Int Enable = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1646 pdq->pdq_csrs.csr_host_int_enable, PDQ_CSR_READ(&pdq->pdq_csrs, csr_host_int_enable)));
1647 PDQ_PRINTF((" Type 2 Producer = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1648 pdq->pdq_csrs.csr_type_2_producer, PDQ_CSR_READ(&pdq->pdq_csrs, csr_type_2_producer)));
1649 PDQ_PRINTF((" Command Response Producer = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1650 pdq->pdq_csrs.csr_cmd_response_producer, PDQ_CSR_READ(&pdq->pdq_csrs, csr_cmd_response_producer)));
1651 PDQ_PRINTF((" Command Request Producer = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1652 pdq->pdq_csrs.csr_cmd_request_producer, PDQ_CSR_READ(&pdq->pdq_csrs, csr_cmd_request_producer)));
1653 PDQ_PRINTF((" Host SMT Producer = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1654 pdq->pdq_csrs.csr_host_smt_producer, PDQ_CSR_READ(&pdq->pdq_csrs, csr_host_smt_producer)));
1655 PDQ_PRINTF((" Unsolicited Producer = " PDQ_OS_CSR_FMT " [0x%08x]\n",
1656 pdq->pdq_csrs.csr_unsolicited_producer, PDQ_CSR_READ(&pdq->pdq_csrs, csr_unsolicited_producer)));
1659 * Initialize the command information block
1661 pdq->pdq_command_info.ci_request_bufstart = dbp->pdqdb_cmd_request_buf;
1662 pdq->pdq_command_info.ci_pa_request_bufstart = PDQ_DB_BUSPA(pdq, pdq->pdq_command_info.ci_request_bufstart);
1663 pdq->pdq_command_info.ci_pa_request_descriptors = PDQ_DB_BUSPA(pdq, dbp->pdqdb_command_requests);
1664 PDQ_PRINTF(("PDQ Command Request Buffer = " PDQ_OS_PTR_FMT " (PA=0x%x)\n",
1665 pdq->pdq_command_info.ci_request_bufstart,
1666 pdq->pdq_command_info.ci_pa_request_bufstart));
1667 for (idx = 0; idx < sizeof(dbp->pdqdb_command_requests)/sizeof(dbp->pdqdb_command_requests[0]); idx++) {
1668 pdq_txdesc_t *txd = &dbp->pdqdb_command_requests[idx];
1670 txd->txd_pa_lo = pdq->pdq_command_info.ci_pa_request_bufstart;
1671 txd->txd_eop = txd->txd_sop = 1;
1674 PDQ_OS_DESC_PRESYNC(pdq, dbp->pdqdb_command_requests,
1675 sizeof(dbp->pdqdb_command_requests));
1677 pdq->pdq_command_info.ci_response_bufstart = dbp->pdqdb_cmd_response_buf;
1678 pdq->pdq_command_info.ci_pa_response_bufstart = PDQ_DB_BUSPA(pdq, pdq->pdq_command_info.ci_response_bufstart);
1679 pdq->pdq_command_info.ci_pa_response_descriptors = PDQ_DB_BUSPA(pdq, dbp->pdqdb_command_responses);
1680 PDQ_PRINTF(("PDQ Command Response Buffer = " PDQ_OS_PTR_FMT " (PA=0x%x)\n",
1681 pdq->pdq_command_info.ci_response_bufstart,
1682 pdq->pdq_command_info.ci_pa_response_bufstart));
1683 for (idx = 0; idx < sizeof(dbp->pdqdb_command_responses)/sizeof(dbp->pdqdb_command_responses[0]); idx++) {
1684 pdq_rxdesc_t *rxd = &dbp->pdqdb_command_responses[idx];
1686 rxd->rxd_pa_lo = pdq->pdq_command_info.ci_pa_response_bufstart;
1688 rxd->rxd_seg_cnt = 0;
1689 rxd->rxd_seg_len_lo = 0;
1690 rxd->rxd_seg_len_hi = PDQ_SIZE_COMMAND_RESPONSE / 16;
1692 PDQ_OS_DESC_PRESYNC(pdq, dbp->pdqdb_command_responses,
1693 sizeof(dbp->pdqdb_command_responses));
1696 * Initialize the unsolicited event information block
1698 pdq->pdq_unsolicited_info.ui_free = PDQ_NUM_UNSOLICITED_EVENTS;
1699 pdq->pdq_unsolicited_info.ui_pa_descriptors = PDQ_DB_BUSPA(pdq, dbp->pdqdb_unsolicited_events);
1700 PDQ_PRINTF(("PDQ Unsolicit Event Buffer = " PDQ_OS_PTR_FMT " (PA=0x%x)\n",
1701 pdq->pdq_unsolicited_info.ui_events,
1702 pdq->pdq_unsolicited_info.ui_pa_bufstart));
1703 for (idx = 0; idx < sizeof(dbp->pdqdb_unsolicited_events)/sizeof(dbp->pdqdb_unsolicited_events[0]); idx++) {
1704 pdq_rxdesc_t *rxd = &dbp->pdqdb_unsolicited_events[idx];
1705 pdq_unsolicited_event_t *event = &pdq->pdq_unsolicited_info.ui_events[idx & (PDQ_NUM_UNSOLICITED_EVENTS-1)];
1708 rxd->rxd_seg_cnt = 0;
1709 rxd->rxd_seg_len_hi = sizeof(pdq_unsolicited_event_t) / 16;
1710 rxd->rxd_pa_lo = pdq->pdq_unsolicited_info.ui_pa_bufstart + (const pdq_uint8_t *) event
1711 - (const pdq_uint8_t *) pdq->pdq_unsolicited_info.ui_events;
1713 PDQ_OS_UNSOL_EVENT_PRESYNC(pdq, event);
1715 PDQ_OS_DESC_PRESYNC(pdq, dbp->pdqdb_unsolicited_events,
1716 sizeof(dbp->pdqdb_unsolicited_events));
1719 * Initialize the receive information blocks (normal and SMT).
1721 pdq->pdq_rx_info.rx_buffers = pdq->pdq_receive_buffers;
1722 pdq->pdq_rx_info.rx_free = PDQ_RING_MASK(dbp->pdqdb_receives);
1723 pdq->pdq_rx_info.rx_target = pdq->pdq_rx_info.rx_free - PDQ_RX_SEGCNT * 8;
1724 pdq->pdq_rx_info.rx_pa_descriptors = PDQ_DB_BUSPA(pdq, dbp->pdqdb_receives);
1726 pdq->pdq_host_smt_info.rx_buffers = pdq->pdq_host_smt_buffers;
1727 pdq->pdq_host_smt_info.rx_free = PDQ_RING_MASK(dbp->pdqdb_host_smt);
1728 pdq->pdq_host_smt_info.rx_target = pdq->pdq_host_smt_info.rx_free - PDQ_RX_SEGCNT * 3;
1729 pdq->pdq_host_smt_info.rx_pa_descriptors = PDQ_DB_BUSPA(pdq, dbp->pdqdb_host_smt);
1732 * Initialize the transmit information block.
1734 dbp->pdqdb_tx_hdr[0] = PDQ_FDDI_PH0;
1735 dbp->pdqdb_tx_hdr[1] = PDQ_FDDI_PH1;
1736 dbp->pdqdb_tx_hdr[2] = PDQ_FDDI_PH2;
1737 pdq->pdq_tx_info.tx_free = PDQ_RING_MASK(dbp->pdqdb_transmits);
1738 pdq->pdq_tx_info.tx_hdrdesc.txd_seg_len = 3;
1739 pdq->pdq_tx_info.tx_hdrdesc.txd_sop = 1;
1740 pdq->pdq_tx_info.tx_hdrdesc.txd_pa_lo = PDQ_DB_BUSPA(pdq, dbp->pdqdb_tx_hdr);
1741 pdq->pdq_tx_info.tx_pa_descriptors = PDQ_DB_BUSPA(pdq, dbp->pdqdb_transmits);
1743 state = PDQ_PSTS_ADAPTER_STATE(PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_status));
1744 PDQ_PRINTF(("PDQ Adapter State = %s\n", pdq_adapter_states[state]));
1747 * Stop the PDQ if it is running and put it into a known state.
1749 state = pdq_stop(pdq);
1751 PDQ_PRINTF(("PDQ Adapter State = %s\n", pdq_adapter_states[state]));
1752 PDQ_ASSERT(state == PDQS_DMA_AVAILABLE);
1754 * If the adapter is not the state we expect, then the initialization
1755 * failed. Cleanup and exit.
1757 #if defined(PDQVERBOSE)
1758 if (state == PDQS_HALTED) {
1759 pdq_halt_code_t halt_code = PDQ_PSTS_HALT_ID(PDQ_CSR_READ(&pdq->pdq_csrs, csr_port_status));
1760 printf("Halt code = %d (%s)\n", halt_code, pdq_halt_codes[halt_code]);
1761 if (halt_code == PDQH_DMA_ERROR)
1762 PDQ_PRINTF(("PFI status = 0x%x, Host 0 Fatal Interrupt = 0x%x\n",
1763 PDQ_CSR_READ(&pdq->pdq_pci_csrs, csr_pfi_status),
1764 PDQ_CSR_READ(&pdq->pdq_csrs, csr_host_int_type_0) & PDQ_HOST_INT_FATAL_ERROR));
1767 if (state == PDQS_RESET || state == PDQS_HALTED || state == PDQS_UPGRADE)
1768 goto cleanup_and_return;
1770 PDQ_PRINTF(("PDQ Hardware Address = %02x-%02x-%02x-%02x-%02x-%02x\n",
1771 pdq->pdq_hwaddr.lanaddr_bytes[0], pdq->pdq_hwaddr.lanaddr_bytes[1],
1772 pdq->pdq_hwaddr.lanaddr_bytes[2], pdq->pdq_hwaddr.lanaddr_bytes[3],
1773 pdq->pdq_hwaddr.lanaddr_bytes[4], pdq->pdq_hwaddr.lanaddr_bytes[5]));
1774 PDQ_PRINTF(("PDQ Firmware Revision = %c%c%c%c\n",
1775 pdq->pdq_fwrev.fwrev_bytes[0], pdq->pdq_fwrev.fwrev_bytes[1],
1776 pdq->pdq_fwrev.fwrev_bytes[2], pdq->pdq_fwrev.fwrev_bytes[3]));
1777 PDQ_PRINTF(("PDQ Chip Revision = "));
1778 switch (pdq->pdq_chip_rev) {
1779 case PDQ_CHIP_REV_A_B_OR_C: PDQ_PRINTF(("Rev C or below")); break;
1780 case PDQ_CHIP_REV_D: PDQ_PRINTF(("Rev D")); break;
1781 case PDQ_CHIP_REV_E: PDQ_PRINTF(("Rev E")); break;
1782 default: PDQ_PRINTF(("Unknown Rev %d", (int) pdq->pdq_chip_rev));