2 * AMD 10Gb Ethernet driver
4 * Copyright (c) 2014-2016,2020 Advanced Micro Devices, Inc.
6 * This file is available to you under your choice of the following two
11 * This file is free software; you may copy, redistribute and/or modify
12 * it under the terms of the GNU General Public License as published by
13 * the Free Software Foundation, either version 2 of the License, or (at
14 * your option) any later version.
16 * This file is distributed in the hope that it will be useful, but
17 * WITHOUT ANY WARRANTY; without even the implied warranty of
18 * MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU
19 * General Public License for more details.
21 * You should have received a copy of the GNU General Public License
22 * along with this program. If not, see <http://www.gnu.org/licenses/>.
24 * This file incorporates work covered by the following copyright and
26 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
27 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
28 * Inc. unless otherwise expressly agreed to in writing between Synopsys
31 * The Software IS NOT an item of Licensed Software or Licensed Product
32 * under any End User Software License Agreement or Agreement for Licensed
33 * Product with Synopsys or any supplement thereto. Permission is hereby
34 * granted, free of charge, to any person obtaining a copy of this software
35 * annotated with this license and the Software, to deal in the Software
36 * without restriction, including without limitation the rights to use,
37 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
38 * of the Software, and to permit persons to whom the Software is furnished
39 * to do so, subject to the following conditions:
41 * The above copyright notice and this permission notice shall be included
42 * in all copies or substantial portions of the Software.
44 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
45 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
46 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
47 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
48 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
49 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
50 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
51 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
52 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
53 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
54 * THE POSSIBILITY OF SUCH DAMAGE.
57 * License 2: Modified BSD
59 * Redistribution and use in source and binary forms, with or without
60 * modification, are permitted provided that the following conditions are met:
61 * * Redistributions of source code must retain the above copyright
62 * notice, this list of conditions and the following disclaimer.
63 * * Redistributions in binary form must reproduce the above copyright
64 * notice, this list of conditions and the following disclaimer in the
65 * documentation and/or other materials provided with the distribution.
66 * * Neither the name of Advanced Micro Devices, Inc. nor the
67 * names of its contributors may be used to endorse or promote products
68 * derived from this software without specific prior written permission.
70 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
71 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
72 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
73 * ARE DISCLAIMED. IN NO EVENT SHALL <COPYRIGHT HOLDER> BE LIABLE FOR ANY
74 * DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES
75 * (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES;
76 * LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND
77 * ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
78 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
79 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
81 * This file incorporates work covered by the following copyright and
83 * The Synopsys DWC ETHER XGMAC Software Driver and documentation
84 * (hereinafter "Software") is an unsupported proprietary work of Synopsys,
85 * Inc. unless otherwise expressly agreed to in writing between Synopsys
88 * The Software IS NOT an item of Licensed Software or Licensed Product
89 * under any End User Software License Agreement or Agreement for Licensed
90 * Product with Synopsys or any supplement thereto. Permission is hereby
91 * granted, free of charge, to any person obtaining a copy of this software
92 * annotated with this license and the Software, to deal in the Software
93 * without restriction, including without limitation the rights to use,
94 * copy, modify, merge, publish, distribute, sublicense, and/or sell copies
95 * of the Software, and to permit persons to whom the Software is furnished
96 * to do so, subject to the following conditions:
98 * The above copyright notice and this permission notice shall be included
99 * in all copies or substantial portions of the Software.
101 * THIS SOFTWARE IS BEING DISTRIBUTED BY SYNOPSYS SOLELY ON AN "AS IS"
102 * BASIS AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
103 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
104 * PARTICULAR PURPOSE ARE HEREBY DISCLAIMED. IN NO EVENT SHALL SYNOPSYS
105 * BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
106 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
107 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
108 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
109 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
110 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
111 * THE POSSIBILITY OF SUCH DAMAGE.
114 #include <sys/cdefs.h>
115 __FBSDID("$FreeBSD$");
118 #include "xgbe-common.h"
120 #include <net/if_dl.h>
122 static inline unsigned int xgbe_get_max_frame(struct xgbe_prv_data *pdata)
124 return (if_getmtu(pdata->netdev) + ETH_HLEN + ETH_FCS_LEN + VLAN_HLEN);
128 xgbe_usec_to_riwt(struct xgbe_prv_data *pdata, unsigned int usec)
133 rate = pdata->sysclk_rate;
136 * Convert the input usec value to the watchdog timer value. Each
137 * watchdog timer value is equivalent to 256 clock cycles.
138 * Calculate the required value as:
139 * ( usec * ( system_clock_mhz / 10^6 ) / 256
141 ret = (usec * (rate / 1000000)) / 256;
147 xgbe_riwt_to_usec(struct xgbe_prv_data *pdata, unsigned int riwt)
152 rate = pdata->sysclk_rate;
155 * Convert the input watchdog timer value to the usec value. Each
156 * watchdog timer value is equivalent to 256 clock cycles.
157 * Calculate the required value as:
158 * ( riwt * 256 ) / ( system_clock_mhz / 10^6 )
160 ret = (riwt * 256) / (rate / 1000000);
166 xgbe_config_pbl_val(struct xgbe_prv_data *pdata)
168 unsigned int pblx8, pbl;
171 pblx8 = DMA_PBL_X8_DISABLE;
174 if (pdata->pbl > 32) {
175 pblx8 = DMA_PBL_X8_ENABLE;
179 for (i = 0; i < pdata->channel_count; i++) {
180 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_CR, PBLX8,
183 if (pdata->channel[i]->tx_ring)
184 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR,
187 if (pdata->channel[i]->rx_ring)
188 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR,
196 xgbe_config_osp_mode(struct xgbe_prv_data *pdata)
200 for (i = 0; i < pdata->channel_count; i++) {
201 if (!pdata->channel[i]->tx_ring)
204 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, OSP,
212 xgbe_config_rsf_mode(struct xgbe_prv_data *pdata, unsigned int val)
216 for (i = 0; i < pdata->rx_q_count; i++)
217 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RSF, val);
223 xgbe_config_tsf_mode(struct xgbe_prv_data *pdata, unsigned int val)
227 for (i = 0; i < pdata->tx_q_count; i++)
228 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TSF, val);
234 xgbe_config_rx_threshold(struct xgbe_prv_data *pdata, unsigned int val)
238 for (i = 0; i < pdata->rx_q_count; i++)
239 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RTC, val);
245 xgbe_config_tx_threshold(struct xgbe_prv_data *pdata, unsigned int val)
249 for (i = 0; i < pdata->tx_q_count; i++)
250 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TTC, val);
256 xgbe_config_rx_coalesce(struct xgbe_prv_data *pdata)
260 for (i = 0; i < pdata->channel_count; i++) {
261 if (!pdata->channel[i]->rx_ring)
264 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RIWT, RWT,
272 xgbe_config_tx_coalesce(struct xgbe_prv_data *pdata)
278 xgbe_config_rx_buffer_size(struct xgbe_prv_data *pdata)
282 for (i = 0; i < pdata->channel_count; i++) {
283 if (!pdata->channel[i]->rx_ring)
286 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, RBSZ,
292 xgbe_config_tso_mode(struct xgbe_prv_data *pdata)
296 int tso_enabled = (if_getcapenable(pdata->netdev) & IFCAP_TSO);
298 for (i = 0; i < pdata->channel_count; i++) {
299 if (!pdata->channel[i]->tx_ring)
302 axgbe_printf(1, "TSO in channel %d %s\n", i, tso_enabled ? "enabled" : "disabled");
303 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, TSE, tso_enabled ? 1 : 0);
308 xgbe_config_sph_mode(struct xgbe_prv_data *pdata)
311 int sph_enable_flag = XGMAC_IOREAD_BITS(pdata, MAC_HWF1R, SPHEN);
313 axgbe_printf(1, "sph_enable %d sph feature enabled?: %d\n",
314 pdata->sph_enable, sph_enable_flag);
316 if (pdata->sph_enable && sph_enable_flag)
317 axgbe_printf(0, "SPH Enabled\n");
319 for (i = 0; i < pdata->channel_count; i++) {
320 if (!pdata->channel[i]->rx_ring)
322 if (pdata->sph_enable && sph_enable_flag) {
323 /* Enable split header feature */
324 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_CR, SPH, 1);
326 /* Disable split header feature */
327 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_CR, SPH, 0);
330 /* per-channel confirmation of SPH being disabled/enabled */
331 int val = XGMAC_DMA_IOREAD_BITS(pdata->channel[i], DMA_CH_CR, SPH);
332 axgbe_printf(0, "%s: SPH %s in channel %d\n", __func__,
333 (val ? "enabled" : "disabled"), i);
336 if (pdata->sph_enable && sph_enable_flag)
337 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, HDSMS, XGBE_SPH_HDSMS_SIZE);
341 xgbe_write_rss_reg(struct xgbe_prv_data *pdata, unsigned int type,
342 unsigned int index, unsigned int val)
347 mtx_lock(&pdata->rss_mutex);
349 if (XGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB)) {
354 XGMAC_IOWRITE(pdata, MAC_RSSDR, val);
356 XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, RSSIA, index);
357 XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, ADDRT, type);
358 XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, CT, 0);
359 XGMAC_IOWRITE_BITS(pdata, MAC_RSSAR, OB, 1);
363 if (!XGMAC_IOREAD_BITS(pdata, MAC_RSSAR, OB))
372 mtx_unlock(&pdata->rss_mutex);
378 xgbe_write_rss_hash_key(struct xgbe_prv_data *pdata)
380 unsigned int key_regs = sizeof(pdata->rss_key) / sizeof(uint32_t);
381 unsigned int *key = (unsigned int *)&pdata->rss_key;
385 ret = xgbe_write_rss_reg(pdata, XGBE_RSS_HASH_KEY_TYPE,
395 xgbe_write_rss_lookup_table(struct xgbe_prv_data *pdata)
400 for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++) {
401 ret = xgbe_write_rss_reg(pdata, XGBE_RSS_LOOKUP_TABLE_TYPE, i,
402 pdata->rss_table[i]);
411 xgbe_set_rss_hash_key(struct xgbe_prv_data *pdata, const uint8_t *key)
413 memcpy(pdata->rss_key, key, sizeof(pdata->rss_key));
415 return (xgbe_write_rss_hash_key(pdata));
419 xgbe_set_rss_lookup_table(struct xgbe_prv_data *pdata, const uint32_t *table)
423 for (i = 0; i < ARRAY_SIZE(pdata->rss_table); i++)
424 XGMAC_SET_BITS(pdata->rss_table[i], MAC_RSSDR, DMCH, table[i]);
426 return (xgbe_write_rss_lookup_table(pdata));
430 xgbe_enable_rss(struct xgbe_prv_data *pdata)
434 if (!pdata->hw_feat.rss)
435 return (-EOPNOTSUPP);
437 /* Program the hash key */
438 ret = xgbe_write_rss_hash_key(pdata);
442 /* Program the lookup table */
443 ret = xgbe_write_rss_lookup_table(pdata);
447 /* Set the RSS options */
448 XGMAC_IOWRITE(pdata, MAC_RSSCR, pdata->rss_options);
451 XGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 1);
453 axgbe_printf(0, "RSS Enabled\n");
459 xgbe_disable_rss(struct xgbe_prv_data *pdata)
461 if (!pdata->hw_feat.rss)
462 return (-EOPNOTSUPP);
464 XGMAC_IOWRITE_BITS(pdata, MAC_RSSCR, RSSE, 0);
466 axgbe_printf(0, "RSS Disabled\n");
472 xgbe_config_rss(struct xgbe_prv_data *pdata)
476 if (!pdata->hw_feat.rss)
479 /* Check if the interface has RSS capability */
480 if (pdata->enable_rss)
481 ret = xgbe_enable_rss(pdata);
483 ret = xgbe_disable_rss(pdata);
486 axgbe_error("error configuring RSS, RSS disabled\n");
490 xgbe_disable_tx_flow_control(struct xgbe_prv_data *pdata)
492 unsigned int max_q_count, q_count;
493 unsigned int reg, reg_val;
496 /* Clear MTL flow control */
497 for (i = 0; i < pdata->rx_q_count; i++)
498 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, 0);
500 /* Clear MAC flow control */
501 max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
502 q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count);
504 for (i = 0; i < q_count; i++) {
505 reg_val = XGMAC_IOREAD(pdata, reg);
506 XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 0);
507 XGMAC_IOWRITE(pdata, reg, reg_val);
509 reg += MAC_QTFCR_INC;
516 xgbe_enable_tx_flow_control(struct xgbe_prv_data *pdata)
518 unsigned int max_q_count, q_count;
519 unsigned int reg, reg_val;
522 /* Set MTL flow control */
523 for (i = 0; i < pdata->rx_q_count; i++) {
524 unsigned int ehfc = 0;
526 if (pdata->rx_rfd[i]) {
527 /* Flow control thresholds are established */
528 /* TODO - enable pfc/ets support */
532 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, EHFC, ehfc);
534 axgbe_printf(1, "flow control %s for RXq%u\n",
535 ehfc ? "enabled" : "disabled", i);
538 /* Set MAC flow control */
539 max_q_count = XGMAC_MAX_FLOW_CONTROL_QUEUES;
540 q_count = min_t(unsigned int, pdata->tx_q_count, max_q_count);
542 for (i = 0; i < q_count; i++) {
543 reg_val = XGMAC_IOREAD(pdata, reg);
545 /* Enable transmit flow control */
546 XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, TFE, 1);
549 XGMAC_SET_BITS(reg_val, MAC_Q0TFCR, PT, 0xffff);
551 XGMAC_IOWRITE(pdata, reg, reg_val);
553 reg += MAC_QTFCR_INC;
560 xgbe_disable_rx_flow_control(struct xgbe_prv_data *pdata)
562 XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 0);
568 xgbe_enable_rx_flow_control(struct xgbe_prv_data *pdata)
570 XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, RFE, 1);
576 xgbe_config_tx_flow_control(struct xgbe_prv_data *pdata)
579 xgbe_enable_tx_flow_control(pdata);
581 xgbe_disable_tx_flow_control(pdata);
587 xgbe_config_rx_flow_control(struct xgbe_prv_data *pdata)
590 xgbe_enable_rx_flow_control(pdata);
592 xgbe_disable_rx_flow_control(pdata);
598 xgbe_config_flow_control(struct xgbe_prv_data *pdata)
600 xgbe_config_tx_flow_control(pdata);
601 xgbe_config_rx_flow_control(pdata);
603 XGMAC_IOWRITE_BITS(pdata, MAC_RFCR, PFCE, 0);
607 xgbe_enable_dma_interrupts(struct xgbe_prv_data *pdata)
609 struct xgbe_channel *channel;
612 /* Set the interrupt mode if supported */
613 if (pdata->channel_irq_mode)
614 XGMAC_IOWRITE_BITS(pdata, DMA_MR, INTM,
615 pdata->channel_irq_mode);
617 ver = XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER);
619 for (i = 0; i < pdata->channel_count; i++) {
620 channel = pdata->channel[i];
622 /* Clear all the interrupts which are set */
623 XGMAC_DMA_IOWRITE(channel, DMA_CH_SR,
624 XGMAC_DMA_IOREAD(channel, DMA_CH_SR));
626 /* Clear all interrupt enable bits */
627 channel->curr_ier = 0;
629 /* Enable following interrupts
630 * NIE - Normal Interrupt Summary Enable
631 * AIE - Abnormal Interrupt Summary Enable
632 * FBEE - Fatal Bus Error Enable
635 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, NIE20, 1);
636 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, AIE20, 1);
638 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, NIE, 1);
639 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, AIE, 1);
641 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, FBEE, 1);
643 if (channel->tx_ring) {
644 /* Enable the following Tx interrupts
645 * TIE - Transmit Interrupt Enable (unless using
646 * per channel interrupts in edge triggered
649 if (!pdata->per_channel_irq || pdata->channel_irq_mode)
650 XGMAC_SET_BITS(channel->curr_ier,
653 if (channel->rx_ring) {
654 /* Enable following Rx interrupts
655 * RBUE - Receive Buffer Unavailable Enable
656 * RIE - Receive Interrupt Enable (unless using
657 * per channel interrupts in edge triggered
660 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RBUE, 1);
661 if (!pdata->per_channel_irq || pdata->channel_irq_mode)
662 XGMAC_SET_BITS(channel->curr_ier,
666 XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, channel->curr_ier);
671 xgbe_enable_mtl_interrupts(struct xgbe_prv_data *pdata)
673 unsigned int mtl_q_isr;
674 unsigned int q_count, i;
676 q_count = max(pdata->hw_feat.tx_q_cnt, pdata->hw_feat.rx_q_cnt);
677 for (i = 0; i < q_count; i++) {
678 /* Clear all the interrupts which are set */
679 mtl_q_isr = XGMAC_MTL_IOREAD(pdata, i, MTL_Q_ISR);
680 XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_ISR, mtl_q_isr);
682 /* No MTL interrupts to be enabled */
683 XGMAC_MTL_IOWRITE(pdata, i, MTL_Q_IER, 0);
688 xgbe_enable_mac_interrupts(struct xgbe_prv_data *pdata)
690 unsigned int mac_ier = 0;
692 /* Enable Timestamp interrupt */
693 XGMAC_SET_BITS(mac_ier, MAC_IER, TSIE, 1);
695 XGMAC_IOWRITE(pdata, MAC_IER, mac_ier);
697 /* Enable all counter interrupts */
698 XGMAC_IOWRITE_BITS(pdata, MMC_RIER, ALL_INTERRUPTS, 0xffffffff);
699 XGMAC_IOWRITE_BITS(pdata, MMC_TIER, ALL_INTERRUPTS, 0xffffffff);
701 /* Enable MDIO single command completion interrupt */
702 XGMAC_IOWRITE_BITS(pdata, MAC_MDIOIER, SNGLCOMPIE, 1);
706 xgbe_set_speed(struct xgbe_prv_data *pdata, int speed)
724 if (XGMAC_IOREAD_BITS(pdata, MAC_TCR, SS) != ss)
725 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, SS, ss);
731 xgbe_enable_rx_vlan_stripping(struct xgbe_prv_data *pdata)
733 /* Put the VLAN tag in the Rx descriptor */
734 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLRXS, 1);
736 /* Don't check the VLAN type */
737 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, DOVLTC, 1);
739 /* Check only C-TAG (0x8100) packets */
740 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ERSVLM, 0);
742 /* Don't consider an S-TAG (0x88A8) packet as a VLAN packet */
743 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ESVL, 0);
745 /* Enable VLAN tag stripping */
746 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0x3);
748 axgbe_printf(0, "VLAN Stripping Enabled\n");
754 xgbe_disable_rx_vlan_stripping(struct xgbe_prv_data *pdata)
756 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, EVLS, 0);
758 axgbe_printf(0, "VLAN Stripping Disabled\n");
764 xgbe_enable_rx_vlan_filtering(struct xgbe_prv_data *pdata)
766 /* Enable VLAN filtering */
767 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 1);
769 /* Enable VLAN Hash Table filtering */
770 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTHM, 1);
772 /* Disable VLAN tag inverse matching */
773 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VTIM, 0);
775 /* Only filter on the lower 12-bits of the VLAN tag */
776 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, ETV, 1);
778 /* In order for the VLAN Hash Table filtering to be effective,
779 * the VLAN tag identifier in the VLAN Tag Register must not
780 * be zero. Set the VLAN tag identifier to "1" to enable the
781 * VLAN Hash Table filtering. This implies that a VLAN tag of
782 * 1 will always pass filtering.
784 XGMAC_IOWRITE_BITS(pdata, MAC_VLANTR, VL, 1);
786 axgbe_printf(0, "VLAN filtering Enabled\n");
792 xgbe_disable_rx_vlan_filtering(struct xgbe_prv_data *pdata)
794 /* Disable VLAN filtering */
795 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, VTFE, 0);
797 axgbe_printf(0, "VLAN filtering Disabled\n");
803 xgbe_vid_crc32_le(__le16 vid_le)
807 unsigned char *data = (unsigned char *)&vid_le;
808 unsigned char data_byte = 0;
811 bits = get_bitmask_order(VLAN_VID_MASK);
812 for (i = 0; i < bits; i++) {
814 data_byte = data[i / 8];
816 temp = ((crc & 1) ^ data_byte) & 1;
821 crc ^= CRC32_POLY_LE;
828 xgbe_update_vlan_hash_table(struct xgbe_prv_data *pdata)
832 uint16_t vlan_hash_table = 0;
835 axgbe_printf(1, "%s: Before updating VLANHTR 0x%x\n", __func__,
836 XGMAC_IOREAD(pdata, MAC_VLANHTR));
838 /* Generate the VLAN Hash Table value */
839 for_each_set_bit(vid, pdata->active_vlans, VLAN_NVID) {
841 /* Get the CRC32 value of the VLAN ID */
842 vid_le = cpu_to_le16(vid);
843 crc = bitrev32(~xgbe_vid_crc32_le(vid_le)) >> 28;
845 vlan_hash_table |= (1 << crc);
846 axgbe_printf(1, "%s: vid 0x%x vid_le 0x%x crc 0x%x "
847 "vlan_hash_table 0x%x\n", __func__, vid, vid_le, crc,
851 /* Set the VLAN Hash Table filtering register */
852 XGMAC_IOWRITE_BITS(pdata, MAC_VLANHTR, VLHT, vlan_hash_table);
854 axgbe_printf(1, "%s: After updating VLANHTR 0x%x\n", __func__,
855 XGMAC_IOREAD(pdata, MAC_VLANHTR));
861 xgbe_set_promiscuous_mode(struct xgbe_prv_data *pdata, unsigned int enable)
863 unsigned int val = enable ? 1 : 0;
865 if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PR) == val)
868 axgbe_printf(1, "%s promiscous mode\n", enable? "entering" : "leaving");
870 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PR, val);
872 /* Hardware will still perform VLAN filtering in promiscuous mode */
874 axgbe_printf(1, "Disabling rx vlan filtering\n");
875 xgbe_disable_rx_vlan_filtering(pdata);
877 if ((if_getcapenable(pdata->netdev) & IFCAP_VLAN_HWFILTER)) {
878 axgbe_printf(1, "Enabling rx vlan filtering\n");
879 xgbe_enable_rx_vlan_filtering(pdata);
887 xgbe_set_all_multicast_mode(struct xgbe_prv_data *pdata, unsigned int enable)
889 unsigned int val = enable ? 1 : 0;
891 if (XGMAC_IOREAD_BITS(pdata, MAC_PFR, PM) == val)
894 axgbe_printf(1,"%s allmulti mode\n", enable ? "entering" : "leaving");
895 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, PM, val);
901 xgbe_set_mac_reg(struct xgbe_prv_data *pdata, char *addr, unsigned int *mac_reg)
903 unsigned int mac_addr_hi, mac_addr_lo;
910 mac_addr = (uint8_t *)&mac_addr_lo;
911 mac_addr[0] = addr[0];
912 mac_addr[1] = addr[1];
913 mac_addr[2] = addr[2];
914 mac_addr[3] = addr[3];
915 mac_addr = (uint8_t *)&mac_addr_hi;
916 mac_addr[0] = addr[4];
917 mac_addr[1] = addr[5];
919 axgbe_printf(1, "adding mac address %pM at %#x\n", addr, *mac_reg);
921 XGMAC_SET_BITS(mac_addr_hi, MAC_MACA1HR, AE, 1);
924 XGMAC_IOWRITE(pdata, *mac_reg, mac_addr_hi);
925 *mac_reg += MAC_MACA_INC;
926 XGMAC_IOWRITE(pdata, *mac_reg, mac_addr_lo);
927 *mac_reg += MAC_MACA_INC;
931 xgbe_set_mac_addn_addrs(struct xgbe_prv_data *pdata)
933 unsigned int mac_reg;
934 unsigned int addn_macs;
936 mac_reg = MAC_MACA1HR;
937 addn_macs = pdata->hw_feat.addn_mac;
939 xgbe_set_mac_reg(pdata, pdata->mac_addr, &mac_reg);
942 /* Clear remaining additional MAC address entries */
944 xgbe_set_mac_reg(pdata, NULL, &mac_reg);
948 xgbe_add_mac_addresses(struct xgbe_prv_data *pdata)
950 /* TODO - add support to set mac hash table */
951 xgbe_set_mac_addn_addrs(pdata);
957 xgbe_set_mac_address(struct xgbe_prv_data *pdata, uint8_t *addr)
959 unsigned int mac_addr_hi, mac_addr_lo;
961 mac_addr_hi = (addr[5] << 8) | (addr[4] << 0);
962 mac_addr_lo = (addr[3] << 24) | (addr[2] << 16) |
963 (addr[1] << 8) | (addr[0] << 0);
965 XGMAC_IOWRITE(pdata, MAC_MACA0HR, mac_addr_hi);
966 XGMAC_IOWRITE(pdata, MAC_MACA0LR, mac_addr_lo);
972 xgbe_config_rx_mode(struct xgbe_prv_data *pdata)
974 unsigned int pr_mode, am_mode;
976 pr_mode = ((pdata->netdev->if_flags & IFF_PPROMISC) != 0);
977 am_mode = ((pdata->netdev->if_flags & IFF_ALLMULTI) != 0);
979 xgbe_set_promiscuous_mode(pdata, pr_mode);
980 xgbe_set_all_multicast_mode(pdata, am_mode);
982 xgbe_add_mac_addresses(pdata);
988 xgbe_clr_gpio(struct xgbe_prv_data *pdata, unsigned int gpio)
995 reg = XGMAC_IOREAD(pdata, MAC_GPIOSR);
997 reg &= ~(1 << (gpio + 16));
998 XGMAC_IOWRITE(pdata, MAC_GPIOSR, reg);
1004 xgbe_set_gpio(struct xgbe_prv_data *pdata, unsigned int gpio)
1011 reg = XGMAC_IOREAD(pdata, MAC_GPIOSR);
1013 reg |= (1 << (gpio + 16));
1014 XGMAC_IOWRITE(pdata, MAC_GPIOSR, reg);
1020 xgbe_read_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad, int mmd_reg)
1022 unsigned long flags;
1023 unsigned int mmd_address, index, offset;
1026 if (mmd_reg & MII_ADDR_C45)
1027 mmd_address = mmd_reg & ~MII_ADDR_C45;
1029 mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
1031 /* The PCS registers are accessed using mmio. The underlying
1032 * management interface uses indirect addressing to access the MMD
1033 * register sets. This requires accessing of the PCS register in two
1034 * phases, an address phase and a data phase.
1036 * The mmio interface is based on 16-bit offsets and values. All
1037 * register offsets must therefore be adjusted by left shifting the
1038 * offset 1 bit and reading 16 bits of data.
1041 index = mmd_address & ~pdata->xpcs_window_mask;
1042 offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
1044 spin_lock_irqsave(&pdata->xpcs_lock, flags);
1045 XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
1046 mmd_data = XPCS16_IOREAD(pdata, offset);
1047 spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
1053 xgbe_write_mmd_regs_v2(struct xgbe_prv_data *pdata, int prtad, int mmd_reg,
1056 unsigned long flags;
1057 unsigned int mmd_address, index, offset;
1059 if (mmd_reg & MII_ADDR_C45)
1060 mmd_address = mmd_reg & ~MII_ADDR_C45;
1062 mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
1064 /* The PCS registers are accessed using mmio. The underlying
1065 * management interface uses indirect addressing to access the MMD
1066 * register sets. This requires accessing of the PCS register in two
1067 * phases, an address phase and a data phase.
1069 * The mmio interface is based on 16-bit offsets and values. All
1070 * register offsets must therefore be adjusted by left shifting the
1071 * offset 1 bit and writing 16 bits of data.
1074 index = mmd_address & ~pdata->xpcs_window_mask;
1075 offset = pdata->xpcs_window + (mmd_address & pdata->xpcs_window_mask);
1077 spin_lock_irqsave(&pdata->xpcs_lock, flags);
1078 XPCS32_IOWRITE(pdata, pdata->xpcs_window_sel_reg, index);
1079 XPCS16_IOWRITE(pdata, offset, mmd_data);
1080 spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
1084 xgbe_read_mmd_regs_v1(struct xgbe_prv_data *pdata, int prtad, int mmd_reg)
1086 unsigned long flags;
1087 unsigned int mmd_address;
1090 if (mmd_reg & MII_ADDR_C45)
1091 mmd_address = mmd_reg & ~MII_ADDR_C45;
1093 mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
1095 /* The PCS registers are accessed using mmio. The underlying APB3
1096 * management interface uses indirect addressing to access the MMD
1097 * register sets. This requires accessing of the PCS register in two
1098 * phases, an address phase and a data phase.
1100 * The mmio interface is based on 32-bit offsets and values. All
1101 * register offsets must therefore be adjusted by left shifting the
1102 * offset 2 bits and reading 32 bits of data.
1104 spin_lock_irqsave(&pdata->xpcs_lock, flags);
1105 XPCS32_IOWRITE(pdata, PCS_V1_WINDOW_SELECT, mmd_address >> 8);
1106 mmd_data = XPCS32_IOREAD(pdata, (mmd_address & 0xff) << 2);
1107 spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
1113 xgbe_write_mmd_regs_v1(struct xgbe_prv_data *pdata, int prtad, int mmd_reg,
1116 unsigned int mmd_address;
1117 unsigned long flags;
1119 if (mmd_reg & MII_ADDR_C45)
1120 mmd_address = mmd_reg & ~MII_ADDR_C45;
1122 mmd_address = (pdata->mdio_mmd << 16) | (mmd_reg & 0xffff);
1124 /* The PCS registers are accessed using mmio. The underlying APB3
1125 * management interface uses indirect addressing to access the MMD
1126 * register sets. This requires accessing of the PCS register in two
1127 * phases, an address phase and a data phase.
1129 * The mmio interface is based on 32-bit offsets and values. All
1130 * register offsets must therefore be adjusted by left shifting the
1131 * offset 2 bits and writing 32 bits of data.
1133 spin_lock_irqsave(&pdata->xpcs_lock, flags);
1134 XPCS32_IOWRITE(pdata, PCS_V1_WINDOW_SELECT, mmd_address >> 8);
1135 XPCS32_IOWRITE(pdata, (mmd_address & 0xff) << 2, mmd_data);
1136 spin_unlock_irqrestore(&pdata->xpcs_lock, flags);
1140 xgbe_read_mmd_regs(struct xgbe_prv_data *pdata, int prtad, int mmd_reg)
1142 switch (pdata->vdata->xpcs_access) {
1143 case XGBE_XPCS_ACCESS_V1:
1144 return (xgbe_read_mmd_regs_v1(pdata, prtad, mmd_reg));
1146 case XGBE_XPCS_ACCESS_V2:
1148 return (xgbe_read_mmd_regs_v2(pdata, prtad, mmd_reg));
1153 xgbe_write_mmd_regs(struct xgbe_prv_data *pdata, int prtad, int mmd_reg,
1156 switch (pdata->vdata->xpcs_access) {
1157 case XGBE_XPCS_ACCESS_V1:
1158 return (xgbe_write_mmd_regs_v1(pdata, prtad, mmd_reg, mmd_data));
1160 case XGBE_XPCS_ACCESS_V2:
1162 return (xgbe_write_mmd_regs_v2(pdata, prtad, mmd_reg, mmd_data));
1167 xgbe_create_mdio_sca(int port, int reg)
1169 unsigned int mdio_sca, da;
1171 da = (reg & MII_ADDR_C45) ? reg >> 16 : 0;
1174 XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, RA, reg);
1175 XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, PA, port);
1176 XGMAC_SET_BITS(mdio_sca, MAC_MDIOSCAR, DA, da);
1182 xgbe_write_ext_mii_regs(struct xgbe_prv_data *pdata, int addr, int reg,
1185 unsigned int mdio_sca, mdio_sccd;
1187 mtx_lock_spin(&pdata->mdio_mutex);
1189 mdio_sca = xgbe_create_mdio_sca(addr, reg);
1190 XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
1193 XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, DATA, val);
1194 XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, CMD, 1);
1195 XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, BUSY, 1);
1196 XGMAC_IOWRITE(pdata, MAC_MDIOSCCDR, mdio_sccd);
1198 if (msleep_spin(pdata, &pdata->mdio_mutex, "mdio_xfer", hz / 8) ==
1200 axgbe_error("%s: MDIO write error\n", __func__);
1201 mtx_unlock_spin(&pdata->mdio_mutex);
1202 return (-ETIMEDOUT);
1205 mtx_unlock_spin(&pdata->mdio_mutex);
1210 xgbe_read_ext_mii_regs(struct xgbe_prv_data *pdata, int addr, int reg)
1212 unsigned int mdio_sca, mdio_sccd;
1214 mtx_lock_spin(&pdata->mdio_mutex);
1216 mdio_sca = xgbe_create_mdio_sca(addr, reg);
1217 XGMAC_IOWRITE(pdata, MAC_MDIOSCAR, mdio_sca);
1220 XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, CMD, 3);
1221 XGMAC_SET_BITS(mdio_sccd, MAC_MDIOSCCDR, BUSY, 1);
1222 XGMAC_IOWRITE(pdata, MAC_MDIOSCCDR, mdio_sccd);
1224 if (msleep_spin(pdata, &pdata->mdio_mutex, "mdio_xfer", hz / 8) ==
1226 axgbe_error("%s: MDIO read error\n", __func__);
1227 mtx_unlock_spin(&pdata->mdio_mutex);
1228 return (-ETIMEDOUT);
1231 mtx_unlock_spin(&pdata->mdio_mutex);
1233 return (XGMAC_IOREAD_BITS(pdata, MAC_MDIOSCCDR, DATA));
1237 xgbe_set_ext_mii_mode(struct xgbe_prv_data *pdata, unsigned int port,
1238 enum xgbe_mdio_mode mode)
1240 unsigned int reg_val = XGMAC_IOREAD(pdata, MAC_MDIOCL22R);
1243 case XGBE_MDIO_MODE_CL22:
1244 if (port > XGMAC_MAX_C22_PORT)
1246 reg_val |= (1 << port);
1248 case XGBE_MDIO_MODE_CL45:
1254 XGMAC_IOWRITE(pdata, MAC_MDIOCL22R, reg_val);
1260 xgbe_tx_complete(struct xgbe_ring_desc *rdesc)
1262 return (!XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, OWN));
1266 xgbe_disable_rx_csum(struct xgbe_prv_data *pdata)
1268 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 0);
1270 axgbe_printf(0, "Receive checksum offload Disabled\n");
1275 xgbe_enable_rx_csum(struct xgbe_prv_data *pdata)
1277 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, IPC, 1);
1279 axgbe_printf(0, "Receive checksum offload Enabled\n");
1284 xgbe_tx_desc_reset(struct xgbe_ring_data *rdata)
1286 struct xgbe_ring_desc *rdesc = rdata->rdesc;
1288 /* Reset the Tx descriptor
1289 * Set buffer 1 (lo) address to zero
1290 * Set buffer 1 (hi) address to zero
1291 * Reset all other control bits (IC, TTSE, B2L & B1L)
1292 * Reset all other control bits (OWN, CTXT, FD, LD, CPC, CIC, etc)
1303 xgbe_tx_desc_init(struct xgbe_channel *channel)
1305 struct xgbe_ring *ring = channel->tx_ring;
1306 struct xgbe_ring_data *rdata;
1308 int start_index = ring->cur;
1310 /* Initialze all descriptors */
1311 for (i = 0; i < ring->rdesc_count; i++) {
1312 rdata = XGBE_GET_DESC_DATA(ring, i);
1314 /* Initialize Tx descriptor */
1315 xgbe_tx_desc_reset(rdata);
1318 /* Update the total number of Tx descriptors */
1319 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDRLR, ring->rdesc_count - 1);
1321 /* Update the starting address of descriptor ring */
1322 rdata = XGBE_GET_DESC_DATA(ring, start_index);
1323 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_HI,
1324 upper_32_bits(rdata->rdata_paddr));
1325 XGMAC_DMA_IOWRITE(channel, DMA_CH_TDLR_LO,
1326 lower_32_bits(rdata->rdata_paddr));
1330 xgbe_rx_desc_init(struct xgbe_channel *channel)
1332 struct xgbe_ring *ring = channel->rx_ring;
1333 struct xgbe_ring_data *rdata;
1334 unsigned int start_index = ring->cur;
1337 * Just set desc_count and the starting address of the desc list
1338 * here. Rest will be done as part of the txrx path.
1341 /* Update the total number of Rx descriptors */
1342 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDRLR, ring->rdesc_count - 1);
1344 /* Update the starting address of descriptor ring */
1345 rdata = XGBE_GET_DESC_DATA(ring, start_index);
1346 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_HI,
1347 upper_32_bits(rdata->rdata_paddr));
1348 XGMAC_DMA_IOWRITE(channel, DMA_CH_RDLR_LO,
1349 lower_32_bits(rdata->rdata_paddr));
1353 xgbe_dev_read(struct xgbe_channel *channel)
1355 struct xgbe_prv_data *pdata = channel->pdata;
1356 struct xgbe_ring *ring = channel->rx_ring;
1357 struct xgbe_ring_data *rdata;
1358 struct xgbe_ring_desc *rdesc;
1359 struct xgbe_packet_data *packet = &ring->packet_data;
1360 unsigned int err, etlt, l34t = 0;
1362 axgbe_printf(1, "-->xgbe_dev_read: cur = %d\n", ring->cur);
1364 rdata = XGBE_GET_DESC_DATA(ring, ring->cur);
1365 rdesc = rdata->rdesc;
1367 /* Check for data availability */
1368 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, OWN))
1373 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CTXT)) {
1374 /* TODO - Timestamp Context Descriptor */
1375 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1377 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1382 /* Normal Descriptor, be sure Context Descriptor bit is off */
1383 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES, CONTEXT, 0);
1385 /* Indicate if a Context Descriptor is next */
1386 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, CDA))
1387 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1390 /* Get the header length */
1391 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, FD)) {
1392 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1394 rdata->rx.hdr_len = XGMAC_GET_BITS_LE(rdesc->desc2,
1395 RX_NORMAL_DESC2, HL);
1396 if (rdata->rx.hdr_len)
1397 pdata->ext_stats.rx_split_header_packets++;
1399 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1402 /* Get the RSS hash */
1403 if (XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, RSV)) {
1404 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1407 packet->rss_hash = le32_to_cpu(rdesc->desc1);
1409 l34t = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, L34T);
1411 case RX_DESC3_L34T_IPV4_TCP:
1412 packet->rss_hash_type = M_HASHTYPE_RSS_TCP_IPV4;
1414 case RX_DESC3_L34T_IPV4_UDP:
1415 packet->rss_hash_type = M_HASHTYPE_RSS_UDP_IPV4;
1417 case RX_DESC3_L34T_IPV6_TCP:
1418 packet->rss_hash_type = M_HASHTYPE_RSS_TCP_IPV6;
1420 case RX_DESC3_L34T_IPV6_UDP:
1421 packet->rss_hash_type = M_HASHTYPE_RSS_UDP_IPV6;
1424 packet->rss_hash_type = M_HASHTYPE_OPAQUE;
1429 /* Not all the data has been transferred for this packet */
1430 if (!XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, LD)) {
1431 /* This is not the last of the data for this packet */
1432 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1437 /* This is the last of the data for this packet */
1438 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1441 /* Get the packet length */
1442 rdata->rx.len = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, PL);
1444 /* Set checksum done indicator as appropriate */
1445 /* TODO - add tunneling support */
1446 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1449 /* Check for errors (only valid in last descriptor) */
1450 err = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ES);
1451 etlt = XGMAC_GET_BITS_LE(rdesc->desc3, RX_NORMAL_DESC3, ETLT);
1452 axgbe_printf(1, "%s: err=%u, etlt=%#x\n", __func__, err, etlt);
1454 if (!err || !etlt) {
1455 /* No error if err is 0 or etlt is 0 */
1457 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1459 packet->vlan_ctag = XGMAC_GET_BITS_LE(rdesc->desc0,
1460 RX_NORMAL_DESC0, OVT);
1461 axgbe_printf(1, "vlan-ctag=%#06x\n", packet->vlan_ctag);
1464 unsigned int tnp = XGMAC_GET_BITS(packet->attributes,
1465 RX_PACKET_ATTRIBUTES, TNP);
1467 if ((etlt == 0x05) || (etlt == 0x06)) {
1468 axgbe_printf(1, "%s: err1 l34t %d err 0x%x etlt 0x%x\n",
1469 __func__, l34t, err, etlt);
1470 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1472 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1474 pdata->ext_stats.rx_csum_errors++;
1475 } else if (tnp && ((etlt == 0x09) || (etlt == 0x0a))) {
1476 axgbe_printf(1, "%s: err2 l34t %d err 0x%x etlt 0x%x\n",
1477 __func__, l34t, err, etlt);
1478 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1480 XGMAC_SET_BITS(packet->attributes, RX_PACKET_ATTRIBUTES,
1482 pdata->ext_stats.rx_vxlan_csum_errors++;
1484 axgbe_printf(1, "%s: tnp %d l34t %d err 0x%x etlt 0x%x\n",
1485 __func__, tnp, l34t, err, etlt);
1486 axgbe_printf(1, "%s: Channel: %d SR 0x%x DSR 0x%x \n",
1487 __func__, channel->queue_index,
1488 XGMAC_DMA_IOREAD(channel, DMA_CH_SR),
1489 XGMAC_DMA_IOREAD(channel, DMA_CH_DSR));
1490 axgbe_printf(1, "%s: ring cur %d dirty %d\n",
1491 __func__, ring->cur, ring->dirty);
1492 axgbe_printf(1, "%s: Desc 0x%08x-0x%08x-0x%08x-0x%08x\n",
1493 __func__, rdesc->desc0, rdesc->desc1, rdesc->desc2,
1495 XGMAC_SET_BITS(packet->errors, RX_PACKET_ERRORS,
1500 axgbe_printf(1, "<--xgbe_dev_read: %s - descriptor=%u (cur=%d)\n",
1501 channel->name, ring->cur & (ring->rdesc_count - 1), ring->cur);
1507 xgbe_is_context_desc(struct xgbe_ring_desc *rdesc)
1509 /* Rx and Tx share CTXT bit, so check TDES3.CTXT bit */
1510 return (XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, CTXT));
1514 xgbe_is_last_desc(struct xgbe_ring_desc *rdesc)
1516 /* Rx and Tx share LD bit, so check TDES3.LD bit */
1517 return (XGMAC_GET_BITS_LE(rdesc->desc3, TX_NORMAL_DESC3, LD));
1521 xgbe_enable_int(struct xgbe_channel *channel, enum xgbe_int int_id)
1523 struct xgbe_prv_data *pdata = channel->pdata;
1525 axgbe_printf(1, "enable_int: DMA_CH_IER read - 0x%x\n",
1529 case XGMAC_INT_DMA_CH_SR_TI:
1530 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 1);
1532 case XGMAC_INT_DMA_CH_SR_TPS:
1533 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TXSE, 1);
1535 case XGMAC_INT_DMA_CH_SR_TBU:
1536 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TBUE, 1);
1538 case XGMAC_INT_DMA_CH_SR_RI:
1539 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 1);
1541 case XGMAC_INT_DMA_CH_SR_RBU:
1542 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RBUE, 1);
1544 case XGMAC_INT_DMA_CH_SR_RPS:
1545 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RSE, 1);
1547 case XGMAC_INT_DMA_CH_SR_TI_RI:
1548 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 1);
1549 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 1);
1551 case XGMAC_INT_DMA_CH_SR_FBE:
1552 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, FBEE, 1);
1554 case XGMAC_INT_DMA_ALL:
1555 channel->curr_ier |= channel->saved_ier;
1561 XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, channel->curr_ier);
1563 axgbe_printf(1, "enable_int: DMA_CH_IER write - 0x%x\n",
1570 xgbe_disable_int(struct xgbe_channel *channel, enum xgbe_int int_id)
1572 struct xgbe_prv_data *pdata = channel->pdata;
1574 axgbe_printf(1, "disable_int: DMA_CH_IER read - 0x%x\n",
1578 case XGMAC_INT_DMA_CH_SR_TI:
1579 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 0);
1581 case XGMAC_INT_DMA_CH_SR_TPS:
1582 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TXSE, 0);
1584 case XGMAC_INT_DMA_CH_SR_TBU:
1585 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TBUE, 0);
1587 case XGMAC_INT_DMA_CH_SR_RI:
1588 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 0);
1590 case XGMAC_INT_DMA_CH_SR_RBU:
1591 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RBUE, 0);
1593 case XGMAC_INT_DMA_CH_SR_RPS:
1594 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RSE, 0);
1596 case XGMAC_INT_DMA_CH_SR_TI_RI:
1597 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, TIE, 0);
1598 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, RIE, 0);
1600 case XGMAC_INT_DMA_CH_SR_FBE:
1601 XGMAC_SET_BITS(channel->curr_ier, DMA_CH_IER, FBEE, 0);
1603 case XGMAC_INT_DMA_ALL:
1604 channel->saved_ier = channel->curr_ier;
1605 channel->curr_ier = 0;
1611 XGMAC_DMA_IOWRITE(channel, DMA_CH_IER, channel->curr_ier);
1613 axgbe_printf(1, "disable_int: DMA_CH_IER write - 0x%x\n",
1620 __xgbe_exit(struct xgbe_prv_data *pdata)
1622 unsigned int count = 2000;
1624 /* Issue a software reset */
1625 XGMAC_IOWRITE_BITS(pdata, DMA_MR, SWR, 1);
1628 /* Poll Until Poll Condition */
1629 while (--count && XGMAC_IOREAD_BITS(pdata, DMA_MR, SWR))
1639 xgbe_exit(struct xgbe_prv_data *pdata)
1643 /* To guard against possible incorrectly generated interrupts,
1644 * issue the software reset twice.
1646 ret = __xgbe_exit(pdata);
1648 axgbe_error("%s: exit error %d\n", __func__, ret);
1652 return (__xgbe_exit(pdata));
1656 xgbe_flush_tx_queues(struct xgbe_prv_data *pdata)
1658 unsigned int i, count;
1660 if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) < 0x21)
1663 for (i = 0; i < pdata->tx_q_count; i++)
1664 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, FTQ, 1);
1666 /* Poll Until Poll Condition */
1667 for (i = 0; i < pdata->tx_q_count; i++) {
1669 while (--count && XGMAC_MTL_IOREAD_BITS(pdata, i,
1681 xgbe_config_dma_bus(struct xgbe_prv_data *pdata)
1685 sbmr = XGMAC_IOREAD(pdata, DMA_SBMR);
1687 /* Set enhanced addressing mode */
1688 XGMAC_SET_BITS(sbmr, DMA_SBMR, EAME, 1);
1690 /* Set the System Bus mode */
1691 XGMAC_SET_BITS(sbmr, DMA_SBMR, UNDEF, 1);
1692 XGMAC_SET_BITS(sbmr, DMA_SBMR, BLEN, pdata->blen >> 2);
1693 XGMAC_SET_BITS(sbmr, DMA_SBMR, AAL, pdata->aal);
1694 XGMAC_SET_BITS(sbmr, DMA_SBMR, RD_OSR_LMT, pdata->rd_osr_limit - 1);
1695 XGMAC_SET_BITS(sbmr, DMA_SBMR, WR_OSR_LMT, pdata->wr_osr_limit - 1);
1697 XGMAC_IOWRITE(pdata, DMA_SBMR, sbmr);
1699 /* Set descriptor fetching threshold */
1700 if (pdata->vdata->tx_desc_prefetch)
1701 XGMAC_IOWRITE_BITS(pdata, DMA_TXEDMACR, TDPS,
1702 pdata->vdata->tx_desc_prefetch);
1704 if (pdata->vdata->rx_desc_prefetch)
1705 XGMAC_IOWRITE_BITS(pdata, DMA_RXEDMACR, RDPS,
1706 pdata->vdata->rx_desc_prefetch);
1710 xgbe_config_dma_cache(struct xgbe_prv_data *pdata)
1712 XGMAC_IOWRITE(pdata, DMA_AXIARCR, pdata->arcr);
1713 XGMAC_IOWRITE(pdata, DMA_AXIAWCR, pdata->awcr);
1715 XGMAC_IOWRITE(pdata, DMA_AXIAWARCR, pdata->awarcr);
1719 xgbe_config_mtl_mode(struct xgbe_prv_data *pdata)
1723 /* Set Tx to weighted round robin scheduling algorithm */
1724 XGMAC_IOWRITE_BITS(pdata, MTL_OMR, ETSALG, MTL_ETSALG_WRR);
1726 /* Set Tx traffic classes to use WRR algorithm with equal weights */
1727 for (i = 0; i < pdata->hw_feat.tc_cnt; i++) {
1728 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_ETSCR, TSA,
1730 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_TC_QWR, QW, 1);
1733 /* Set Rx to strict priority algorithm */
1734 XGMAC_IOWRITE_BITS(pdata, MTL_OMR, RAA, MTL_RAA_SP);
1738 xgbe_queue_flow_control_threshold(struct xgbe_prv_data *pdata,
1739 unsigned int queue, unsigned int q_fifo_size)
1741 unsigned int frame_fifo_size;
1742 unsigned int rfa, rfd;
1744 frame_fifo_size = XGMAC_FLOW_CONTROL_ALIGN(xgbe_get_max_frame(pdata));
1745 axgbe_printf(1, "%s: queue %d q_fifo_size %d frame_fifo_size 0x%x\n",
1746 __func__, queue, q_fifo_size, frame_fifo_size);
1748 /* TODO - add pfc/ets related support */
1750 /* This path deals with just maximum frame sizes which are
1751 * limited to a jumbo frame of 9,000 (plus headers, etc.)
1752 * so we can never exceed the maximum allowable RFA/RFD
1755 if (q_fifo_size <= 2048) {
1756 /* rx_rfd to zero to signal no flow control */
1757 pdata->rx_rfa[queue] = 0;
1758 pdata->rx_rfd[queue] = 0;
1762 if (q_fifo_size <= 4096) {
1763 /* Between 2048 and 4096 */
1764 pdata->rx_rfa[queue] = 0; /* Full - 1024 bytes */
1765 pdata->rx_rfd[queue] = 1; /* Full - 1536 bytes */
1769 if (q_fifo_size <= frame_fifo_size) {
1770 /* Between 4096 and max-frame */
1771 pdata->rx_rfa[queue] = 2; /* Full - 2048 bytes */
1772 pdata->rx_rfd[queue] = 5; /* Full - 3584 bytes */
1776 if (q_fifo_size <= (frame_fifo_size * 3)) {
1777 /* Between max-frame and 3 max-frames,
1778 * trigger if we get just over a frame of data and
1779 * resume when we have just under half a frame left.
1781 rfa = q_fifo_size - frame_fifo_size;
1782 rfd = rfa + (frame_fifo_size / 2);
1784 /* Above 3 max-frames - trigger when just over
1785 * 2 frames of space available
1787 rfa = frame_fifo_size * 2;
1788 rfa += XGMAC_FLOW_CONTROL_UNIT;
1789 rfd = rfa + frame_fifo_size;
1792 pdata->rx_rfa[queue] = XGMAC_FLOW_CONTROL_VALUE(rfa);
1793 pdata->rx_rfd[queue] = XGMAC_FLOW_CONTROL_VALUE(rfd);
1794 axgbe_printf(1, "%s: forced queue %d rfa 0x%x rfd 0x%x\n", __func__,
1795 queue, pdata->rx_rfa[queue], pdata->rx_rfd[queue]);
1799 xgbe_calculate_flow_control_threshold(struct xgbe_prv_data *pdata,
1802 unsigned int q_fifo_size;
1805 for (i = 0; i < pdata->rx_q_count; i++) {
1806 q_fifo_size = (fifo[i] + 1) * XGMAC_FIFO_UNIT;
1808 axgbe_printf(1, "%s: fifo[%d] - 0x%x q_fifo_size 0x%x\n",
1809 __func__, i, fifo[i], q_fifo_size);
1810 xgbe_queue_flow_control_threshold(pdata, i, q_fifo_size);
1815 xgbe_config_flow_control_threshold(struct xgbe_prv_data *pdata)
1819 for (i = 0; i < pdata->rx_q_count; i++) {
1820 axgbe_printf(1, "%s: queue %d rfa %d rfd %d\n", __func__, i,
1821 pdata->rx_rfa[i], pdata->rx_rfd[i]);
1823 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFA,
1825 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQFCR, RFD,
1828 axgbe_printf(1, "%s: MTL_Q_RQFCR 0x%x\n", __func__,
1829 XGMAC_MTL_IOREAD(pdata, i, MTL_Q_RQFCR));
1834 xgbe_get_tx_fifo_size(struct xgbe_prv_data *pdata)
1836 /* The configured value may not be the actual amount of fifo RAM */
1837 return (min_t(unsigned int, pdata->tx_max_fifo_size,
1838 pdata->hw_feat.tx_fifo_size));
1842 xgbe_get_rx_fifo_size(struct xgbe_prv_data *pdata)
1844 /* The configured value may not be the actual amount of fifo RAM */
1845 return (min_t(unsigned int, pdata->rx_max_fifo_size,
1846 pdata->hw_feat.rx_fifo_size));
1850 xgbe_calculate_equal_fifo(unsigned int fifo_size, unsigned int queue_count,
1853 unsigned int q_fifo_size;
1854 unsigned int p_fifo;
1857 q_fifo_size = fifo_size / queue_count;
1859 /* Calculate the fifo setting by dividing the queue's fifo size
1860 * by the fifo allocation increment (with 0 representing the
1861 * base allocation increment so decrement the result by 1).
1863 p_fifo = q_fifo_size / XGMAC_FIFO_UNIT;
1867 /* Distribute the fifo equally amongst the queues */
1868 for (i = 0; i < queue_count; i++)
1873 xgbe_set_nonprio_fifos(unsigned int fifo_size, unsigned int queue_count,
1878 MPASS(powerof2(XGMAC_FIFO_MIN_ALLOC));
1880 if (queue_count <= IEEE_8021QAZ_MAX_TCS)
1883 /* Rx queues 9 and up are for specialized packets,
1884 * such as PTP or DCB control packets, etc. and
1885 * don't require a large fifo
1887 for (i = IEEE_8021QAZ_MAX_TCS; i < queue_count; i++) {
1888 fifo[i] = (XGMAC_FIFO_MIN_ALLOC / XGMAC_FIFO_UNIT) - 1;
1889 fifo_size -= XGMAC_FIFO_MIN_ALLOC;
1896 xgbe_config_tx_fifo_size(struct xgbe_prv_data *pdata)
1898 unsigned int fifo_size;
1899 unsigned int fifo[XGBE_MAX_QUEUES];
1902 fifo_size = xgbe_get_tx_fifo_size(pdata);
1903 axgbe_printf(1, "%s: fifo_size 0x%x\n", __func__, fifo_size);
1905 xgbe_calculate_equal_fifo(fifo_size, pdata->tx_q_count, fifo);
1907 for (i = 0; i < pdata->tx_q_count; i++) {
1908 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TQS, fifo[i]);
1909 axgbe_printf(1, "Tx q %d FIFO Size 0x%x\n", i,
1910 XGMAC_MTL_IOREAD(pdata, i, MTL_Q_TQOMR));
1913 axgbe_printf(1, "%d Tx hardware queues, %d byte fifo per queue\n",
1914 pdata->tx_q_count, ((fifo[0] + 1) * XGMAC_FIFO_UNIT));
1918 xgbe_config_rx_fifo_size(struct xgbe_prv_data *pdata)
1920 unsigned int fifo_size;
1921 unsigned int fifo[XGBE_MAX_QUEUES];
1922 unsigned int prio_queues;
1925 /* TODO - add pfc/ets related support */
1927 /* Clear any DCB related fifo/queue information */
1928 fifo_size = xgbe_get_rx_fifo_size(pdata);
1929 prio_queues = XGMAC_PRIO_QUEUES(pdata->rx_q_count);
1930 axgbe_printf(1, "%s: fifo_size 0x%x rx_q_cnt %d prio %d\n", __func__,
1931 fifo_size, pdata->rx_q_count, prio_queues);
1933 /* Assign a minimum fifo to the non-VLAN priority queues */
1934 fifo_size = xgbe_set_nonprio_fifos(fifo_size, pdata->rx_q_count, fifo);
1936 xgbe_calculate_equal_fifo(fifo_size, prio_queues, fifo);
1938 for (i = 0; i < pdata->rx_q_count; i++) {
1939 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_RQOMR, RQS, fifo[i]);
1940 axgbe_printf(1, "Rx q %d FIFO Size 0x%x\n", i,
1941 XGMAC_MTL_IOREAD(pdata, i, MTL_Q_RQOMR));
1944 xgbe_calculate_flow_control_threshold(pdata, fifo);
1945 xgbe_config_flow_control_threshold(pdata);
1947 axgbe_printf(1, "%u Rx hardware queues, %u byte fifo/queue\n",
1948 pdata->rx_q_count, ((fifo[0] + 1) * XGMAC_FIFO_UNIT));
1952 xgbe_config_queue_mapping(struct xgbe_prv_data *pdata)
1954 unsigned int qptc, qptc_extra, queue;
1955 unsigned int prio_queues;
1956 unsigned int ppq, ppq_extra, prio;
1958 unsigned int i, j, reg, reg_val;
1960 /* Map the MTL Tx Queues to Traffic Classes
1961 * Note: Tx Queues >= Traffic Classes
1963 qptc = pdata->tx_q_count / pdata->hw_feat.tc_cnt;
1964 qptc_extra = pdata->tx_q_count % pdata->hw_feat.tc_cnt;
1966 for (i = 0, queue = 0; i < pdata->hw_feat.tc_cnt; i++) {
1967 for (j = 0; j < qptc; j++) {
1968 axgbe_printf(1, "TXq%u mapped to TC%u\n", queue, i);
1969 XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
1971 pdata->q2tc_map[queue++] = i;
1974 if (i < qptc_extra) {
1975 axgbe_printf(1, "TXq%u mapped to TC%u\n", queue, i);
1976 XGMAC_MTL_IOWRITE_BITS(pdata, queue, MTL_Q_TQOMR,
1978 pdata->q2tc_map[queue++] = i;
1982 /* Map the 8 VLAN priority values to available MTL Rx queues */
1983 prio_queues = XGMAC_PRIO_QUEUES(pdata->rx_q_count);
1984 ppq = IEEE_8021QAZ_MAX_TCS / prio_queues;
1985 ppq_extra = IEEE_8021QAZ_MAX_TCS % prio_queues;
1989 for (i = 0, prio = 0; i < prio_queues;) {
1991 for (j = 0; j < ppq; j++) {
1992 axgbe_printf(1, "PRIO%u mapped to RXq%u\n", prio, i);
1993 mask |= (1 << prio);
1994 pdata->prio2q_map[prio++] = i;
1997 if (i < ppq_extra) {
1998 axgbe_printf(1, "PRIO%u mapped to RXq%u\n", prio, i);
1999 mask |= (1 << prio);
2000 pdata->prio2q_map[prio++] = i;
2003 reg_val |= (mask << ((i++ % MAC_RQC2_Q_PER_REG) << 3));
2005 if ((i % MAC_RQC2_Q_PER_REG) && (i != prio_queues))
2008 XGMAC_IOWRITE(pdata, reg, reg_val);
2009 reg += MAC_RQC2_INC;
2013 /* Select dynamic mapping of MTL Rx queue to DMA Rx channel */
2016 for (i = 0; i < pdata->rx_q_count;) {
2017 reg_val |= (0x80 << ((i++ % MTL_RQDCM_Q_PER_REG) << 3));
2019 if ((i % MTL_RQDCM_Q_PER_REG) && (i != pdata->rx_q_count))
2022 XGMAC_IOWRITE(pdata, reg, reg_val);
2024 reg += MTL_RQDCM_INC;
2030 xgbe_config_mac_address(struct xgbe_prv_data *pdata)
2032 xgbe_set_mac_address(pdata, IF_LLADDR(pdata->netdev));
2034 /* Filtering is done using perfect filtering and hash filtering */
2035 if (pdata->hw_feat.hash_table_size) {
2036 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HPF, 1);
2037 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HUC, 1);
2038 XGMAC_IOWRITE_BITS(pdata, MAC_PFR, HMC, 1);
2043 xgbe_config_jumbo_enable(struct xgbe_prv_data *pdata)
2047 val = (if_getmtu(pdata->netdev) > XGMAC_STD_PACKET_MTU) ? 1 : 0;
2049 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, JE, val);
2053 xgbe_config_mac_speed(struct xgbe_prv_data *pdata)
2055 xgbe_set_speed(pdata, pdata->phy_speed);
2059 xgbe_config_checksum_offload(struct xgbe_prv_data *pdata)
2061 if ((if_getcapenable(pdata->netdev) & IFCAP_RXCSUM))
2062 xgbe_enable_rx_csum(pdata);
2064 xgbe_disable_rx_csum(pdata);
2068 xgbe_config_vlan_support(struct xgbe_prv_data *pdata)
2070 /* Indicate that VLAN Tx CTAGs come from context descriptors */
2071 XGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, CSVL, 0);
2072 XGMAC_IOWRITE_BITS(pdata, MAC_VLANIR, VLTI, 1);
2074 /* Set the current VLAN Hash Table register value */
2075 xgbe_update_vlan_hash_table(pdata);
2077 if ((if_getcapenable(pdata->netdev) & IFCAP_VLAN_HWFILTER)) {
2078 axgbe_printf(1, "Enabling rx vlan filtering\n");
2079 xgbe_enable_rx_vlan_filtering(pdata);
2081 axgbe_printf(1, "Disabling rx vlan filtering\n");
2082 xgbe_disable_rx_vlan_filtering(pdata);
2085 if ((if_getcapenable(pdata->netdev) & IFCAP_VLAN_HWTAGGING)) {
2086 axgbe_printf(1, "Enabling rx vlan stripping\n");
2087 xgbe_enable_rx_vlan_stripping(pdata);
2089 axgbe_printf(1, "Disabling rx vlan stripping\n");
2090 xgbe_disable_rx_vlan_stripping(pdata);
2095 xgbe_mmc_read(struct xgbe_prv_data *pdata, unsigned int reg_lo)
2100 if (pdata->vdata->mmc_64bit) {
2102 /* These registers are always 32 bit */
2103 case MMC_RXRUNTERROR:
2104 case MMC_RXJABBERERROR:
2105 case MMC_RXUNDERSIZE_G:
2106 case MMC_RXOVERSIZE_G:
2107 case MMC_RXWATCHDOGERROR:
2116 /* These registers are always 64 bit */
2117 case MMC_TXOCTETCOUNT_GB_LO:
2118 case MMC_TXOCTETCOUNT_G_LO:
2119 case MMC_RXOCTETCOUNT_GB_LO:
2120 case MMC_RXOCTETCOUNT_G_LO:
2129 val = XGMAC_IOREAD(pdata, reg_lo);
2132 val |= ((uint64_t)XGMAC_IOREAD(pdata, reg_lo + 4) << 32);
2138 xgbe_tx_mmc_int(struct xgbe_prv_data *pdata)
2140 struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
2141 unsigned int mmc_isr = XGMAC_IOREAD(pdata, MMC_TISR);
2143 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_GB))
2144 stats->txoctetcount_gb +=
2145 xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO);
2147 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_GB))
2148 stats->txframecount_gb +=
2149 xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO);
2151 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_G))
2152 stats->txbroadcastframes_g +=
2153 xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO);
2155 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_G))
2156 stats->txmulticastframes_g +=
2157 xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO);
2159 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX64OCTETS_GB))
2160 stats->tx64octets_gb +=
2161 xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO);
2163 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX65TO127OCTETS_GB))
2164 stats->tx65to127octets_gb +=
2165 xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO);
2167 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX128TO255OCTETS_GB))
2168 stats->tx128to255octets_gb +=
2169 xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO);
2171 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX256TO511OCTETS_GB))
2172 stats->tx256to511octets_gb +=
2173 xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO);
2175 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX512TO1023OCTETS_GB))
2176 stats->tx512to1023octets_gb +=
2177 xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO);
2179 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TX1024TOMAXOCTETS_GB))
2180 stats->tx1024tomaxoctets_gb +=
2181 xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
2183 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNICASTFRAMES_GB))
2184 stats->txunicastframes_gb +=
2185 xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO);
2187 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXMULTICASTFRAMES_GB))
2188 stats->txmulticastframes_gb +=
2189 xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
2191 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXBROADCASTFRAMES_GB))
2192 stats->txbroadcastframes_g +=
2193 xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
2195 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXUNDERFLOWERROR))
2196 stats->txunderflowerror +=
2197 xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO);
2199 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXOCTETCOUNT_G))
2200 stats->txoctetcount_g +=
2201 xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO);
2203 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXFRAMECOUNT_G))
2204 stats->txframecount_g +=
2205 xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO);
2207 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXPAUSEFRAMES))
2208 stats->txpauseframes +=
2209 xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO);
2211 if (XGMAC_GET_BITS(mmc_isr, MMC_TISR, TXVLANFRAMES_G))
2212 stats->txvlanframes_g +=
2213 xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO);
2217 xgbe_rx_mmc_int(struct xgbe_prv_data *pdata)
2219 struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
2220 unsigned int mmc_isr = XGMAC_IOREAD(pdata, MMC_RISR);
2222 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFRAMECOUNT_GB))
2223 stats->rxframecount_gb +=
2224 xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO);
2226 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_GB))
2227 stats->rxoctetcount_gb +=
2228 xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO);
2230 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOCTETCOUNT_G))
2231 stats->rxoctetcount_g +=
2232 xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO);
2234 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXBROADCASTFRAMES_G))
2235 stats->rxbroadcastframes_g +=
2236 xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO);
2238 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXMULTICASTFRAMES_G))
2239 stats->rxmulticastframes_g +=
2240 xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO);
2242 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXCRCERROR))
2243 stats->rxcrcerror +=
2244 xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO);
2246 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXRUNTERROR))
2247 stats->rxrunterror +=
2248 xgbe_mmc_read(pdata, MMC_RXRUNTERROR);
2250 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXJABBERERROR))
2251 stats->rxjabbererror +=
2252 xgbe_mmc_read(pdata, MMC_RXJABBERERROR);
2254 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNDERSIZE_G))
2255 stats->rxundersize_g +=
2256 xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G);
2258 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOVERSIZE_G))
2259 stats->rxoversize_g +=
2260 xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G);
2262 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX64OCTETS_GB))
2263 stats->rx64octets_gb +=
2264 xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO);
2266 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX65TO127OCTETS_GB))
2267 stats->rx65to127octets_gb +=
2268 xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO);
2270 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX128TO255OCTETS_GB))
2271 stats->rx128to255octets_gb +=
2272 xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO);
2274 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX256TO511OCTETS_GB))
2275 stats->rx256to511octets_gb +=
2276 xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO);
2278 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX512TO1023OCTETS_GB))
2279 stats->rx512to1023octets_gb +=
2280 xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO);
2282 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RX1024TOMAXOCTETS_GB))
2283 stats->rx1024tomaxoctets_gb +=
2284 xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
2286 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXUNICASTFRAMES_G))
2287 stats->rxunicastframes_g +=
2288 xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO);
2290 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXLENGTHERROR))
2291 stats->rxlengtherror +=
2292 xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO);
2294 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXOUTOFRANGETYPE))
2295 stats->rxoutofrangetype +=
2296 xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO);
2298 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXPAUSEFRAMES))
2299 stats->rxpauseframes +=
2300 xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO);
2302 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXFIFOOVERFLOW))
2303 stats->rxfifooverflow +=
2304 xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO);
2306 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXVLANFRAMES_GB))
2307 stats->rxvlanframes_gb +=
2308 xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO);
2310 if (XGMAC_GET_BITS(mmc_isr, MMC_RISR, RXWATCHDOGERROR))
2311 stats->rxwatchdogerror +=
2312 xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR);
2316 xgbe_read_mmc_stats(struct xgbe_prv_data *pdata)
2318 struct xgbe_mmc_stats *stats = &pdata->mmc_stats;
2320 /* Freeze counters */
2321 XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 1);
2323 stats->txoctetcount_gb +=
2324 xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_GB_LO);
2326 stats->txframecount_gb +=
2327 xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_GB_LO);
2329 stats->txbroadcastframes_g +=
2330 xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_G_LO);
2332 stats->txmulticastframes_g +=
2333 xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_G_LO);
2335 stats->tx64octets_gb +=
2336 xgbe_mmc_read(pdata, MMC_TX64OCTETS_GB_LO);
2338 stats->tx65to127octets_gb +=
2339 xgbe_mmc_read(pdata, MMC_TX65TO127OCTETS_GB_LO);
2341 stats->tx128to255octets_gb +=
2342 xgbe_mmc_read(pdata, MMC_TX128TO255OCTETS_GB_LO);
2344 stats->tx256to511octets_gb +=
2345 xgbe_mmc_read(pdata, MMC_TX256TO511OCTETS_GB_LO);
2347 stats->tx512to1023octets_gb +=
2348 xgbe_mmc_read(pdata, MMC_TX512TO1023OCTETS_GB_LO);
2350 stats->tx1024tomaxoctets_gb +=
2351 xgbe_mmc_read(pdata, MMC_TX1024TOMAXOCTETS_GB_LO);
2353 stats->txunicastframes_gb +=
2354 xgbe_mmc_read(pdata, MMC_TXUNICASTFRAMES_GB_LO);
2356 stats->txmulticastframes_gb +=
2357 xgbe_mmc_read(pdata, MMC_TXMULTICASTFRAMES_GB_LO);
2359 stats->txbroadcastframes_gb +=
2360 xgbe_mmc_read(pdata, MMC_TXBROADCASTFRAMES_GB_LO);
2362 stats->txunderflowerror +=
2363 xgbe_mmc_read(pdata, MMC_TXUNDERFLOWERROR_LO);
2365 stats->txoctetcount_g +=
2366 xgbe_mmc_read(pdata, MMC_TXOCTETCOUNT_G_LO);
2368 stats->txframecount_g +=
2369 xgbe_mmc_read(pdata, MMC_TXFRAMECOUNT_G_LO);
2371 stats->txpauseframes +=
2372 xgbe_mmc_read(pdata, MMC_TXPAUSEFRAMES_LO);
2374 stats->txvlanframes_g +=
2375 xgbe_mmc_read(pdata, MMC_TXVLANFRAMES_G_LO);
2377 stats->rxframecount_gb +=
2378 xgbe_mmc_read(pdata, MMC_RXFRAMECOUNT_GB_LO);
2380 stats->rxoctetcount_gb +=
2381 xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_GB_LO);
2383 stats->rxoctetcount_g +=
2384 xgbe_mmc_read(pdata, MMC_RXOCTETCOUNT_G_LO);
2386 stats->rxbroadcastframes_g +=
2387 xgbe_mmc_read(pdata, MMC_RXBROADCASTFRAMES_G_LO);
2389 stats->rxmulticastframes_g +=
2390 xgbe_mmc_read(pdata, MMC_RXMULTICASTFRAMES_G_LO);
2392 stats->rxcrcerror +=
2393 xgbe_mmc_read(pdata, MMC_RXCRCERROR_LO);
2395 stats->rxrunterror +=
2396 xgbe_mmc_read(pdata, MMC_RXRUNTERROR);
2398 stats->rxjabbererror +=
2399 xgbe_mmc_read(pdata, MMC_RXJABBERERROR);
2401 stats->rxundersize_g +=
2402 xgbe_mmc_read(pdata, MMC_RXUNDERSIZE_G);
2404 stats->rxoversize_g +=
2405 xgbe_mmc_read(pdata, MMC_RXOVERSIZE_G);
2407 stats->rx64octets_gb +=
2408 xgbe_mmc_read(pdata, MMC_RX64OCTETS_GB_LO);
2410 stats->rx65to127octets_gb +=
2411 xgbe_mmc_read(pdata, MMC_RX65TO127OCTETS_GB_LO);
2413 stats->rx128to255octets_gb +=
2414 xgbe_mmc_read(pdata, MMC_RX128TO255OCTETS_GB_LO);
2416 stats->rx256to511octets_gb +=
2417 xgbe_mmc_read(pdata, MMC_RX256TO511OCTETS_GB_LO);
2419 stats->rx512to1023octets_gb +=
2420 xgbe_mmc_read(pdata, MMC_RX512TO1023OCTETS_GB_LO);
2422 stats->rx1024tomaxoctets_gb +=
2423 xgbe_mmc_read(pdata, MMC_RX1024TOMAXOCTETS_GB_LO);
2425 stats->rxunicastframes_g +=
2426 xgbe_mmc_read(pdata, MMC_RXUNICASTFRAMES_G_LO);
2428 stats->rxlengtherror +=
2429 xgbe_mmc_read(pdata, MMC_RXLENGTHERROR_LO);
2431 stats->rxoutofrangetype +=
2432 xgbe_mmc_read(pdata, MMC_RXOUTOFRANGETYPE_LO);
2434 stats->rxpauseframes +=
2435 xgbe_mmc_read(pdata, MMC_RXPAUSEFRAMES_LO);
2437 stats->rxfifooverflow +=
2438 xgbe_mmc_read(pdata, MMC_RXFIFOOVERFLOW_LO);
2440 stats->rxvlanframes_gb +=
2441 xgbe_mmc_read(pdata, MMC_RXVLANFRAMES_GB_LO);
2443 stats->rxwatchdogerror +=
2444 xgbe_mmc_read(pdata, MMC_RXWATCHDOGERROR);
2446 /* Un-freeze counters */
2447 XGMAC_IOWRITE_BITS(pdata, MMC_CR, MCF, 0);
2451 xgbe_config_mmc(struct xgbe_prv_data *pdata)
2453 /* Set counters to reset on read */
2454 XGMAC_IOWRITE_BITS(pdata, MMC_CR, ROR, 1);
2456 /* Reset the counters */
2457 XGMAC_IOWRITE_BITS(pdata, MMC_CR, CR, 1);
2461 xgbe_txq_prepare_tx_stop(struct xgbe_prv_data *pdata, unsigned int queue)
2463 unsigned int tx_status;
2464 unsigned long tx_timeout;
2466 /* The Tx engine cannot be stopped if it is actively processing
2467 * packets. Wait for the Tx queue to empty the Tx fifo. Don't
2468 * wait forever though...
2470 tx_timeout = ticks + (XGBE_DMA_STOP_TIMEOUT * hz);
2471 while (ticks < tx_timeout) {
2472 tx_status = XGMAC_MTL_IOREAD(pdata, queue, MTL_Q_TQDR);
2473 if ((XGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TRCSTS) != 1) &&
2474 (XGMAC_GET_BITS(tx_status, MTL_Q_TQDR, TXQSTS) == 0))
2480 if (ticks >= tx_timeout)
2481 axgbe_printf(1, "timed out waiting for Tx queue %u to empty\n",
2486 xgbe_prepare_tx_stop(struct xgbe_prv_data *pdata, unsigned int queue)
2488 unsigned int tx_dsr, tx_pos, tx_qidx;
2489 unsigned int tx_status;
2490 unsigned long tx_timeout;
2492 if (XGMAC_GET_BITS(pdata->hw_feat.version, MAC_VR, SNPSVER) > 0x20)
2493 return (xgbe_txq_prepare_tx_stop(pdata, queue));
2495 /* Calculate the status register to read and the position within */
2496 if (queue < DMA_DSRX_FIRST_QUEUE) {
2498 tx_pos = (queue * DMA_DSR_Q_WIDTH) + DMA_DSR0_TPS_START;
2500 tx_qidx = queue - DMA_DSRX_FIRST_QUEUE;
2502 tx_dsr = DMA_DSR1 + ((tx_qidx / DMA_DSRX_QPR) * DMA_DSRX_INC);
2503 tx_pos = ((tx_qidx % DMA_DSRX_QPR) * DMA_DSR_Q_WIDTH) +
2507 /* The Tx engine cannot be stopped if it is actively processing
2508 * descriptors. Wait for the Tx engine to enter the stopped or
2509 * suspended state. Don't wait forever though...
2511 tx_timeout = ticks + (XGBE_DMA_STOP_TIMEOUT * hz);
2512 while (ticks < tx_timeout) {
2513 tx_status = XGMAC_IOREAD(pdata, tx_dsr);
2514 tx_status = GET_BITS(tx_status, tx_pos, DMA_DSR_TPS_WIDTH);
2515 if ((tx_status == DMA_TPS_STOPPED) ||
2516 (tx_status == DMA_TPS_SUSPENDED))
2522 if (ticks >= tx_timeout)
2523 axgbe_printf(1, "timed out waiting for Tx DMA channel %u to stop\n",
2528 xgbe_enable_tx(struct xgbe_prv_data *pdata)
2532 /* Enable each Tx DMA channel */
2533 for (i = 0; i < pdata->channel_count; i++) {
2534 if (!pdata->channel[i]->tx_ring)
2537 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 1);
2540 /* Enable each Tx queue */
2541 for (i = 0; i < pdata->tx_q_count; i++)
2542 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN,
2546 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1);
2550 xgbe_disable_tx(struct xgbe_prv_data *pdata)
2554 /* Prepare for Tx DMA channel stop */
2555 for (i = 0; i < pdata->tx_q_count; i++)
2556 xgbe_prepare_tx_stop(pdata, i);
2558 /* Disable MAC Tx */
2559 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0);
2561 /* Disable each Tx queue */
2562 for (i = 0; i < pdata->tx_q_count; i++)
2563 XGMAC_MTL_IOWRITE_BITS(pdata, i, MTL_Q_TQOMR, TXQEN, 0);
2565 /* Disable each Tx DMA channel */
2566 for (i = 0; i < pdata->channel_count; i++) {
2567 if (!pdata->channel[i]->tx_ring)
2570 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 0);
2575 xgbe_prepare_rx_stop(struct xgbe_prv_data *pdata, unsigned int queue)
2577 unsigned int rx_status;
2578 unsigned long rx_timeout;
2580 /* The Rx engine cannot be stopped if it is actively processing
2581 * packets. Wait for the Rx queue to empty the Rx fifo. Don't
2582 * wait forever though...
2584 rx_timeout = ticks + (XGBE_DMA_STOP_TIMEOUT * hz);
2585 while (ticks < rx_timeout) {
2586 rx_status = XGMAC_MTL_IOREAD(pdata, queue, MTL_Q_RQDR);
2587 if ((XGMAC_GET_BITS(rx_status, MTL_Q_RQDR, PRXQ) == 0) &&
2588 (XGMAC_GET_BITS(rx_status, MTL_Q_RQDR, RXQSTS) == 0))
2594 if (ticks >= rx_timeout)
2595 axgbe_printf(1, "timed out waiting for Rx queue %d to empty\n",
2600 xgbe_enable_rx(struct xgbe_prv_data *pdata)
2602 unsigned int reg_val, i;
2604 /* Enable each Rx DMA channel */
2605 for (i = 0; i < pdata->channel_count; i++) {
2606 if (!pdata->channel[i]->rx_ring)
2609 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 1);
2612 /* Enable each Rx queue */
2614 for (i = 0; i < pdata->rx_q_count; i++)
2615 reg_val |= (0x02 << (i << 1));
2616 XGMAC_IOWRITE(pdata, MAC_RQC0R, reg_val);
2619 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 1);
2620 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 1);
2621 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 1);
2622 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 1);
2626 xgbe_disable_rx(struct xgbe_prv_data *pdata)
2630 /* Disable MAC Rx */
2631 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, DCRCC, 0);
2632 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, CST, 0);
2633 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, ACS, 0);
2634 XGMAC_IOWRITE_BITS(pdata, MAC_RCR, RE, 0);
2636 /* Prepare for Rx DMA channel stop */
2637 for (i = 0; i < pdata->rx_q_count; i++)
2638 xgbe_prepare_rx_stop(pdata, i);
2640 /* Disable each Rx queue */
2641 XGMAC_IOWRITE(pdata, MAC_RQC0R, 0);
2643 /* Disable each Rx DMA channel */
2644 for (i = 0; i < pdata->channel_count; i++) {
2645 if (!pdata->channel[i]->rx_ring)
2648 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 0);
2653 xgbe_powerup_tx(struct xgbe_prv_data *pdata)
2657 /* Enable each Tx DMA channel */
2658 for (i = 0; i < pdata->channel_count; i++) {
2659 if (!pdata->channel[i]->tx_ring)
2662 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 1);
2666 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 1);
2670 xgbe_powerdown_tx(struct xgbe_prv_data *pdata)
2674 /* Prepare for Tx DMA channel stop */
2675 for (i = 0; i < pdata->tx_q_count; i++)
2676 xgbe_prepare_tx_stop(pdata, i);
2678 /* Disable MAC Tx */
2679 XGMAC_IOWRITE_BITS(pdata, MAC_TCR, TE, 0);
2681 /* Disable each Tx DMA channel */
2682 for (i = 0; i < pdata->channel_count; i++) {
2683 if (!pdata->channel[i]->tx_ring)
2686 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_TCR, ST, 0);
2691 xgbe_powerup_rx(struct xgbe_prv_data *pdata)
2695 /* Enable each Rx DMA channel */
2696 for (i = 0; i < pdata->channel_count; i++) {
2697 if (!pdata->channel[i]->rx_ring)
2700 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 1);
2705 xgbe_powerdown_rx(struct xgbe_prv_data *pdata)
2709 /* Disable each Rx DMA channel */
2710 for (i = 0; i < pdata->channel_count; i++) {
2711 if (!pdata->channel[i]->rx_ring)
2714 XGMAC_DMA_IOWRITE_BITS(pdata->channel[i], DMA_CH_RCR, SR, 0);
2719 xgbe_init(struct xgbe_prv_data *pdata)
2721 struct xgbe_desc_if *desc_if = &pdata->desc_if;
2724 /* Flush Tx queues */
2725 ret = xgbe_flush_tx_queues(pdata);
2727 axgbe_error("error flushing TX queues\n");
2732 * Initialize DMA related features
2734 xgbe_config_dma_bus(pdata);
2735 xgbe_config_dma_cache(pdata);
2736 xgbe_config_osp_mode(pdata);
2737 xgbe_config_pbl_val(pdata);
2738 xgbe_config_rx_coalesce(pdata);
2739 xgbe_config_tx_coalesce(pdata);
2740 xgbe_config_rx_buffer_size(pdata);
2741 xgbe_config_tso_mode(pdata);
2742 xgbe_config_sph_mode(pdata);
2743 xgbe_config_rss(pdata);
2744 desc_if->wrapper_tx_desc_init(pdata);
2745 desc_if->wrapper_rx_desc_init(pdata);
2746 xgbe_enable_dma_interrupts(pdata);
2749 * Initialize MTL related features
2751 xgbe_config_mtl_mode(pdata);
2752 xgbe_config_queue_mapping(pdata);
2753 xgbe_config_tsf_mode(pdata, pdata->tx_sf_mode);
2754 xgbe_config_rsf_mode(pdata, pdata->rx_sf_mode);
2755 xgbe_config_tx_threshold(pdata, pdata->tx_threshold);
2756 xgbe_config_rx_threshold(pdata, pdata->rx_threshold);
2757 xgbe_config_tx_fifo_size(pdata);
2758 xgbe_config_rx_fifo_size(pdata);
2759 /*TODO: Error Packet and undersized good Packet forwarding enable
2762 xgbe_enable_mtl_interrupts(pdata);
2765 * Initialize MAC related features
2767 xgbe_config_mac_address(pdata);
2768 xgbe_config_rx_mode(pdata);
2769 xgbe_config_jumbo_enable(pdata);
2770 xgbe_config_flow_control(pdata);
2771 xgbe_config_mac_speed(pdata);
2772 xgbe_config_checksum_offload(pdata);
2773 xgbe_config_vlan_support(pdata);
2774 xgbe_config_mmc(pdata);
2775 xgbe_enable_mac_interrupts(pdata);
2781 xgbe_init_function_ptrs_dev(struct xgbe_hw_if *hw_if)
2784 hw_if->tx_complete = xgbe_tx_complete;
2786 hw_if->set_mac_address = xgbe_set_mac_address;
2787 hw_if->config_rx_mode = xgbe_config_rx_mode;
2789 hw_if->enable_rx_csum = xgbe_enable_rx_csum;
2790 hw_if->disable_rx_csum = xgbe_disable_rx_csum;
2792 hw_if->enable_rx_vlan_stripping = xgbe_enable_rx_vlan_stripping;
2793 hw_if->disable_rx_vlan_stripping = xgbe_disable_rx_vlan_stripping;
2794 hw_if->enable_rx_vlan_filtering = xgbe_enable_rx_vlan_filtering;
2795 hw_if->disable_rx_vlan_filtering = xgbe_disable_rx_vlan_filtering;
2796 hw_if->update_vlan_hash_table = xgbe_update_vlan_hash_table;
2798 hw_if->read_mmd_regs = xgbe_read_mmd_regs;
2799 hw_if->write_mmd_regs = xgbe_write_mmd_regs;
2801 hw_if->set_speed = xgbe_set_speed;
2803 hw_if->set_ext_mii_mode = xgbe_set_ext_mii_mode;
2804 hw_if->read_ext_mii_regs = xgbe_read_ext_mii_regs;
2805 hw_if->write_ext_mii_regs = xgbe_write_ext_mii_regs;
2807 hw_if->set_gpio = xgbe_set_gpio;
2808 hw_if->clr_gpio = xgbe_clr_gpio;
2810 hw_if->enable_tx = xgbe_enable_tx;
2811 hw_if->disable_tx = xgbe_disable_tx;
2812 hw_if->enable_rx = xgbe_enable_rx;
2813 hw_if->disable_rx = xgbe_disable_rx;
2815 hw_if->powerup_tx = xgbe_powerup_tx;
2816 hw_if->powerdown_tx = xgbe_powerdown_tx;
2817 hw_if->powerup_rx = xgbe_powerup_rx;
2818 hw_if->powerdown_rx = xgbe_powerdown_rx;
2820 hw_if->dev_read = xgbe_dev_read;
2821 hw_if->enable_int = xgbe_enable_int;
2822 hw_if->disable_int = xgbe_disable_int;
2823 hw_if->init = xgbe_init;
2824 hw_if->exit = xgbe_exit;
2826 /* Descriptor related Sequences have to be initialized here */
2827 hw_if->tx_desc_init = xgbe_tx_desc_init;
2828 hw_if->rx_desc_init = xgbe_rx_desc_init;
2829 hw_if->tx_desc_reset = xgbe_tx_desc_reset;
2830 hw_if->is_last_desc = xgbe_is_last_desc;
2831 hw_if->is_context_desc = xgbe_is_context_desc;
2834 hw_if->config_tx_flow_control = xgbe_config_tx_flow_control;
2835 hw_if->config_rx_flow_control = xgbe_config_rx_flow_control;
2837 /* For RX coalescing */
2838 hw_if->config_rx_coalesce = xgbe_config_rx_coalesce;
2839 hw_if->config_tx_coalesce = xgbe_config_tx_coalesce;
2840 hw_if->usec_to_riwt = xgbe_usec_to_riwt;
2841 hw_if->riwt_to_usec = xgbe_riwt_to_usec;
2843 /* For RX and TX threshold config */
2844 hw_if->config_rx_threshold = xgbe_config_rx_threshold;
2845 hw_if->config_tx_threshold = xgbe_config_tx_threshold;
2847 /* For RX and TX Store and Forward Mode config */
2848 hw_if->config_rsf_mode = xgbe_config_rsf_mode;
2849 hw_if->config_tsf_mode = xgbe_config_tsf_mode;
2851 /* For TX DMA Operating on Second Frame config */
2852 hw_if->config_osp_mode = xgbe_config_osp_mode;
2854 /* For MMC statistics support */
2855 hw_if->tx_mmc_int = xgbe_tx_mmc_int;
2856 hw_if->rx_mmc_int = xgbe_rx_mmc_int;
2857 hw_if->read_mmc_stats = xgbe_read_mmc_stats;
2859 /* For Receive Side Scaling */
2860 hw_if->enable_rss = xgbe_enable_rss;
2861 hw_if->disable_rss = xgbe_disable_rss;
2862 hw_if->set_rss_hash_key = xgbe_set_rss_hash_key;
2863 hw_if->set_rss_lookup_table = xgbe_set_rss_lookup_table;