2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2002-2007 Neterion, Inc.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
16 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
31 #include <dev/nxge/include/xgehal-device.h>
32 #include <dev/nxge/include/xgehal-channel.h>
33 #include <dev/nxge/include/xgehal-fifo.h>
34 #include <dev/nxge/include/xgehal-ring.h>
35 #include <dev/nxge/include/xgehal-driver.h>
36 #include <dev/nxge/include/xgehal-mgmt.h>
38 #define SWITCH_SIGN 0xA5A5A5A5A5A5A5A5ULL
41 #ifdef XGE_HAL_HERC_EMULATION
42 #undef XGE_HAL_PROCESS_LINK_INT_IN_ISR
46 * Jenkins hash key length(in bytes)
48 #define XGE_HAL_JHASH_MSG_LEN 50
51 * mix(a,b,c) used in Jenkins hash algorithm
53 #define mix(a,b,c) { \
54 a -= b; a -= c; a ^= (c>>13); \
55 b -= c; b -= a; b ^= (a<<8); \
56 c -= a; c -= b; c ^= (b>>13); \
57 a -= b; a -= c; a ^= (c>>12); \
58 b -= c; b -= a; b ^= (a<<16); \
59 c -= a; c -= b; c ^= (b>>5); \
60 a -= b; a -= c; a ^= (c>>3); \
61 b -= c; b -= a; b ^= (a<<10); \
62 c -= a; c -= b; c ^= (b>>15); \
67 * __hal_device_event_queued
68 * @data: pointer to xge_hal_device_t structure
70 * Will be called when new event succesfully queued.
73 __hal_device_event_queued(void *data, int event_type)
75 xge_assert(((xge_hal_device_t*)data)->magic == XGE_HAL_MAGIC);
76 if (g_xge_hal_driver->uld_callbacks.event_queued) {
77 g_xge_hal_driver->uld_callbacks.event_queued(data, event_type);
82 * __hal_pio_mem_write32_upper
84 * Endiann-aware implementation of xge_os_pio_mem_write32().
85 * Since Xframe has 64bit registers, we differintiate uppper and lower
89 __hal_pio_mem_write32_upper(pci_dev_h pdev, pci_reg_h regh, u32 val, void *addr)
91 #if defined(XGE_OS_HOST_BIG_ENDIAN) && !defined(XGE_OS_PIO_LITTLE_ENDIAN)
92 xge_os_pio_mem_write32(pdev, regh, val, addr);
94 xge_os_pio_mem_write32(pdev, regh, val, (void *)((char *)addr + 4));
99 * __hal_pio_mem_write32_upper
101 * Endiann-aware implementation of xge_os_pio_mem_write32().
102 * Since Xframe has 64bit registers, we differintiate uppper and lower
106 __hal_pio_mem_write32_lower(pci_dev_h pdev, pci_reg_h regh, u32 val,
109 #if defined(XGE_OS_HOST_BIG_ENDIAN) && !defined(XGE_OS_PIO_LITTLE_ENDIAN)
110 xge_os_pio_mem_write32(pdev, regh, val,
111 (void *) ((char *)addr + 4));
113 xge_os_pio_mem_write32(pdev, regh, val, addr);
118 * __hal_device_register_poll
119 * @hldev: pointer to xge_hal_device_t structure
120 * @reg: register to poll for
121 * @op: 0 - bit reset, 1 - bit set
122 * @mask: mask for logical "and" condition based on %op
123 * @max_millis: maximum time to try to poll in milliseconds
125 * Will poll certain register for specified amount of time.
126 * Will poll until masked bit is not cleared.
129 __hal_device_register_poll(xge_hal_device_t *hldev, u64 *reg,
130 int op, u64 mask, int max_millis)
134 xge_hal_status_e ret = XGE_HAL_FAIL;
139 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, reg);
140 if (op == 0 && !(val64 & mask))
142 else if (op == 1 && (val64 & mask) == mask)
148 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, reg);
149 if (op == 0 && !(val64 & mask))
151 else if (op == 1 && (val64 & mask) == mask)
154 } while (++i < max_millis);
160 * __hal_device_wait_quiescent
162 * @hw_status: hw_status in case of error
164 * Will wait until device is quiescent for some blocks.
166 static xge_hal_status_e
167 __hal_device_wait_quiescent(xge_hal_device_t *hldev, u64 *hw_status)
169 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
171 /* poll and wait first */
172 #ifdef XGE_HAL_HERC_EMULATION
173 (void) __hal_device_register_poll(hldev, &bar0->adapter_status, 1,
174 (XGE_HAL_ADAPTER_STATUS_TDMA_READY |
175 XGE_HAL_ADAPTER_STATUS_RDMA_READY |
176 XGE_HAL_ADAPTER_STATUS_PFC_READY |
177 XGE_HAL_ADAPTER_STATUS_TMAC_BUF_EMPTY |
178 XGE_HAL_ADAPTER_STATUS_PIC_QUIESCENT |
179 XGE_HAL_ADAPTER_STATUS_MC_DRAM_READY |
180 XGE_HAL_ADAPTER_STATUS_MC_QUEUES_READY |
181 XGE_HAL_ADAPTER_STATUS_M_PLL_LOCK),
182 XGE_HAL_DEVICE_QUIESCENT_WAIT_MAX_MILLIS);
184 (void) __hal_device_register_poll(hldev, &bar0->adapter_status, 1,
185 (XGE_HAL_ADAPTER_STATUS_TDMA_READY |
186 XGE_HAL_ADAPTER_STATUS_RDMA_READY |
187 XGE_HAL_ADAPTER_STATUS_PFC_READY |
188 XGE_HAL_ADAPTER_STATUS_TMAC_BUF_EMPTY |
189 XGE_HAL_ADAPTER_STATUS_PIC_QUIESCENT |
190 XGE_HAL_ADAPTER_STATUS_MC_DRAM_READY |
191 XGE_HAL_ADAPTER_STATUS_MC_QUEUES_READY |
192 XGE_HAL_ADAPTER_STATUS_M_PLL_LOCK |
193 XGE_HAL_ADAPTER_STATUS_P_PLL_LOCK),
194 XGE_HAL_DEVICE_QUIESCENT_WAIT_MAX_MILLIS);
197 return xge_hal_device_status(hldev, hw_status);
201 * xge_hal_device_is_slot_freeze
204 * Returns non-zero if the slot is freezed.
205 * The determination is made based on the adapter_status
206 * register which will never give all FFs, unless PCI read
210 xge_hal_device_is_slot_freeze(xge_hal_device_h devh)
212 xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
213 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
216 xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
217 &bar0->adapter_status);
218 xge_os_pci_read16(hldev->pdev,hldev->cfgh,
219 xge_offsetof(xge_hal_pci_config_le_t, device_id),
222 if (adapter_status == XGE_HAL_ALL_FOXES)
225 dummy = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
227 printf(">>> Slot is frozen!\n");
231 return((adapter_status == XGE_HAL_ALL_FOXES) || (device_id == 0xffff));
236 * __hal_device_led_actifity_fix
237 * @hldev: pointer to xge_hal_device_t structure
239 * SXE-002: Configure link and activity LED to turn it off
242 __hal_device_led_actifity_fix(xge_hal_device_t *hldev)
244 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
248 xge_os_pci_read16(hldev->pdev, hldev->cfgh,
249 xge_offsetof(xge_hal_pci_config_le_t, subsystem_id), &subid);
252 * In the case of Herc, there is a new register named beacon control
253 * is added which was not present in Xena.
254 * Beacon control register in Herc is at the same offset as
255 * gpio control register in Xena. It means they are one and same in
256 * the case of Xena. Also, gpio control register offset in Herc and
258 * The current register map represents Herc(It means we have
259 * both beacon and gpio control registers in register map).
260 * WRT transition from Xena to Herc, all the code in Xena which was
261 * using gpio control register for LED handling would have to
262 * use beacon control register in Herc and the rest of the code
263 * which uses gpio control in Xena would use the same register
265 * WRT LED handling(following code), In the case of Herc, beacon
266 * control register has to be used. This is applicable for Xena also,
267 * since it represents the gpio control register in Xena.
269 if ((subid & 0xFF) >= 0x07) {
270 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
271 &bar0->beacon_control);
272 val64 |= 0x0000800000000000ULL;
273 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
274 val64, &bar0->beacon_control);
275 val64 = 0x0411040400000000ULL;
276 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
277 (void *) ((u8 *)bar0 + 0x2700));
281 /* Constants for Fixing the MacAddress problem seen mostly on
284 static u64 xena_fix_mac[] = {
285 0x0060000000000000ULL, 0x0060600000000000ULL,
286 0x0040600000000000ULL, 0x0000600000000000ULL,
287 0x0020600000000000ULL, 0x0060600000000000ULL,
288 0x0020600000000000ULL, 0x0060600000000000ULL,
289 0x0020600000000000ULL, 0x0060600000000000ULL,
290 0x0020600000000000ULL, 0x0060600000000000ULL,
291 0x0020600000000000ULL, 0x0060600000000000ULL,
292 0x0020600000000000ULL, 0x0060600000000000ULL,
293 0x0020600000000000ULL, 0x0060600000000000ULL,
294 0x0020600000000000ULL, 0x0060600000000000ULL,
295 0x0020600000000000ULL, 0x0060600000000000ULL,
296 0x0020600000000000ULL, 0x0060600000000000ULL,
297 0x0020600000000000ULL, 0x0000600000000000ULL,
298 0x0040600000000000ULL, 0x0060600000000000ULL,
303 * __hal_device_fix_mac
304 * @hldev: HAL device handle.
306 * Fix for all "FFs" MAC address problems observed on Alpha platforms.
309 __hal_device_xena_fix_mac(xge_hal_device_t *hldev)
312 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
315 * In the case of Herc, there is a new register named beacon control
316 * is added which was not present in Xena.
317 * Beacon control register in Herc is at the same offset as
318 * gpio control register in Xena. It means they are one and same in
319 * the case of Xena. Also, gpio control register offset in Herc and
321 * The current register map represents Herc(It means we have
322 * both beacon and gpio control registers in register map).
323 * WRT transition from Xena to Herc, all the code in Xena which was
324 * using gpio control register for LED handling would have to
325 * use beacon control register in Herc and the rest of the code
326 * which uses gpio control in Xena would use the same register
328 * In the following code(xena_fix_mac), beacon control register has
329 * to be used in the case of Xena, since it represents gpio control
330 * register. In the case of Herc, there is no change required.
332 while (xena_fix_mac[i] != END_SIGN) {
333 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
334 xena_fix_mac[i++], &bar0->beacon_control);
340 * xge_hal_device_bcast_enable
341 * @hldev: HAL device handle.
343 * Enable receiving broadcasts.
344 * The host must first write RMAC_CFG_KEY "key"
345 * register, and then - MAC_CFG register.
348 xge_hal_device_bcast_enable(xge_hal_device_h devh)
350 xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
351 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
354 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
356 val64 |= XGE_HAL_MAC_RMAC_BCAST_ENABLE;
358 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
359 XGE_HAL_RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
361 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0,
362 (u32)(val64 >> 32), &bar0->mac_cfg);
364 xge_debug_device(XGE_TRACE, "mac_cfg 0x"XGE_OS_LLXFMT": broadcast %s",
365 (unsigned long long)val64,
366 hldev->config.mac.rmac_bcast_en ? "enabled" : "disabled");
370 * xge_hal_device_bcast_disable
371 * @hldev: HAL device handle.
373 * Disable receiving broadcasts.
374 * The host must first write RMAC_CFG_KEY "key"
375 * register, and then - MAC_CFG register.
378 xge_hal_device_bcast_disable(xge_hal_device_h devh)
380 xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
381 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
384 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
387 val64 &= ~(XGE_HAL_MAC_RMAC_BCAST_ENABLE);
388 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
389 XGE_HAL_RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
391 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0,
392 (u32)(val64 >> 32), &bar0->mac_cfg);
394 xge_debug_device(XGE_TRACE, "mac_cfg 0x"XGE_OS_LLXFMT": broadcast %s",
395 (unsigned long long)val64,
396 hldev->config.mac.rmac_bcast_en ? "enabled" : "disabled");
400 * __hal_device_shared_splits_configure
401 * @hldev: HAL device handle.
403 * TxDMA will stop Read request if the number of read split had exceeded
404 * the limit set by shared_splits
407 __hal_device_shared_splits_configure(xge_hal_device_t *hldev)
409 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
412 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
415 XGE_HAL_PIC_CNTL_SHARED_SPLITS(hldev->config.shared_splits);
416 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
418 xge_debug_device(XGE_TRACE, "%s", "shared splits configured");
422 * __hal_device_rmac_padding_configure
423 * @hldev: HAL device handle.
425 * Configure RMAC frame padding. Depends on configuration, it
426 * can be send to host or removed by MAC.
429 __hal_device_rmac_padding_configure(xge_hal_device_t *hldev)
431 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
434 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
435 XGE_HAL_RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
436 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
438 val64 &= ( ~XGE_HAL_MAC_RMAC_ALL_ADDR_ENABLE );
439 val64 &= ( ~XGE_HAL_MAC_CFG_RMAC_PROM_ENABLE );
440 val64 |= XGE_HAL_MAC_CFG_TMAC_APPEND_PAD;
443 * If the RTH enable bit is not set, strip the FCS
445 if (!hldev->config.rth_en ||
446 !(xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
447 &bar0->rts_rth_cfg) & XGE_HAL_RTS_RTH_EN)) {
448 val64 |= XGE_HAL_MAC_CFG_RMAC_STRIP_FCS;
451 val64 &= ( ~XGE_HAL_MAC_CFG_RMAC_STRIP_PAD );
452 val64 |= XGE_HAL_MAC_RMAC_DISCARD_PFRM;
454 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0,
455 (u32)(val64 >> 32), (char*)&bar0->mac_cfg);
458 xge_debug_device(XGE_TRACE,
459 "mac_cfg 0x"XGE_OS_LLXFMT": frame padding configured",
460 (unsigned long long)val64);
464 * __hal_device_pause_frames_configure
465 * @hldev: HAL device handle.
467 * Set Pause threshold.
469 * Pause frame is generated if the amount of data outstanding
470 * on any queue exceeded the ratio of
471 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
474 __hal_device_pause_frames_configure(xge_hal_device_t *hldev)
476 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
480 switch (hldev->config.mac.media) {
481 case XGE_HAL_MEDIA_SR:
482 case XGE_HAL_MEDIA_SW:
483 val64=0xfffbfffbfffbfffbULL;
485 case XGE_HAL_MEDIA_LR:
486 case XGE_HAL_MEDIA_LW:
487 val64=0xffbbffbbffbbffbbULL;
489 case XGE_HAL_MEDIA_ER:
490 case XGE_HAL_MEDIA_EW:
492 val64=0xffbbffbbffbbffbbULL;
496 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
497 val64, &bar0->mc_pause_thresh_q0q3);
498 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
499 val64, &bar0->mc_pause_thresh_q4q7);
501 /* Set the time value to be inserted in the pause frame generated
503 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
504 &bar0->rmac_pause_cfg);
505 if (hldev->config.mac.rmac_pause_gen_en)
506 val64 |= XGE_HAL_RMAC_PAUSE_GEN_EN;
508 val64 &= ~(XGE_HAL_RMAC_PAUSE_GEN_EN);
509 if (hldev->config.mac.rmac_pause_rcv_en)
510 val64 |= XGE_HAL_RMAC_PAUSE_RCV_EN;
512 val64 &= ~(XGE_HAL_RMAC_PAUSE_RCV_EN);
513 val64 &= ~(XGE_HAL_RMAC_PAUSE_HG_PTIME(0xffff));
514 val64 |= XGE_HAL_RMAC_PAUSE_HG_PTIME(hldev->config.mac.rmac_pause_time);
515 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
516 &bar0->rmac_pause_cfg);
519 for (i = 0; i<4; i++) {
521 (((u64)0xFF00|hldev->config.mac.mc_pause_threshold_q0q3)
524 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
525 &bar0->mc_pause_thresh_q0q3);
528 for (i = 0; i<4; i++) {
530 (((u64)0xFF00|hldev->config.mac.mc_pause_threshold_q4q7)
533 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
534 &bar0->mc_pause_thresh_q4q7);
535 xge_debug_device(XGE_TRACE, "%s", "pause frames configured");
539 * Herc's clock rate doubled, unless the slot is 33MHz.
541 unsigned int __hal_fix_time_ival_herc(xge_hal_device_t *hldev,
542 unsigned int time_ival)
544 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA)
547 xge_assert(xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC);
549 if (hldev->bus_frequency != XGE_HAL_PCI_BUS_FREQUENCY_UNKNOWN &&
550 hldev->bus_frequency != XGE_HAL_PCI_BUS_FREQUENCY_33MHZ)
558 * __hal_device_bus_master_disable
559 * @hldev: HAL device handle.
561 * Disable bus mastership.
564 __hal_device_bus_master_disable (xge_hal_device_t *hldev)
569 xge_os_pci_read16(hldev->pdev, hldev->cfgh,
570 xge_offsetof(xge_hal_pci_config_le_t, command), &cmd);
572 xge_os_pci_write16(hldev->pdev, hldev->cfgh,
573 xge_offsetof(xge_hal_pci_config_le_t, command), cmd);
577 * __hal_device_bus_master_enable
578 * @hldev: HAL device handle.
580 * Disable bus mastership.
583 __hal_device_bus_master_enable (xge_hal_device_t *hldev)
588 xge_os_pci_read16(hldev->pdev, hldev->cfgh,
589 xge_offsetof(xge_hal_pci_config_le_t, command), &cmd);
591 /* already enabled? do nothing */
592 if (cmd & bus_master)
596 xge_os_pci_write16(hldev->pdev, hldev->cfgh,
597 xge_offsetof(xge_hal_pci_config_le_t, command), cmd);
600 * __hal_device_intr_mgmt
601 * @hldev: HAL device handle.
602 * @mask: mask indicating which Intr block must be modified.
603 * @flag: if true - enable, otherwise - disable interrupts.
605 * Disable or enable device interrupts. Mask is used to specify
606 * which hardware blocks should produce interrupts. For details
607 * please refer to Xframe User Guide.
610 __hal_device_intr_mgmt(xge_hal_device_t *hldev, u64 mask, int flag)
612 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
613 u64 val64 = 0, temp64 = 0;
616 gim_saved = gim = xge_os_pio_mem_read64(hldev->pdev,
617 hldev->regh0, &bar0->general_int_mask);
619 /* Top level interrupt classification */
621 if ((mask & (XGE_HAL_TX_PIC_INTR/* | XGE_HAL_RX_PIC_INTR*/))) {
622 /* Enable PIC Intrs in the general intr mask register */
623 val64 = XGE_HAL_TXPIC_INT_M/* | XGE_HAL_PIC_RX_INT_M*/;
625 gim &= ~((u64) val64);
626 temp64 = xge_os_pio_mem_read64(hldev->pdev,
627 hldev->regh0, &bar0->pic_int_mask);
629 temp64 &= ~XGE_HAL_PIC_INT_TX;
630 #ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR
631 if (xge_hal_device_check_id(hldev) ==
633 temp64 &= ~XGE_HAL_PIC_INT_MISC;
636 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
637 temp64, &bar0->pic_int_mask);
638 #ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR
639 if (xge_hal_device_check_id(hldev) ==
642 * Unmask only Link Up interrupt
644 temp64 = xge_os_pio_mem_read64(hldev->pdev,
645 hldev->regh0, &bar0->misc_int_mask);
646 temp64 &= ~XGE_HAL_MISC_INT_REG_LINK_UP_INT;
647 xge_os_pio_mem_write64(hldev->pdev,
648 hldev->regh0, temp64,
649 &bar0->misc_int_mask);
650 xge_debug_device(XGE_TRACE,
651 "unmask link up flag "XGE_OS_LLXFMT,
652 (unsigned long long)temp64);
655 } else { /* flag == 0 */
657 #ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR
658 if (xge_hal_device_check_id(hldev) ==
661 * Mask both Link Up and Down interrupts
663 temp64 = xge_os_pio_mem_read64(hldev->pdev,
664 hldev->regh0, &bar0->misc_int_mask);
665 temp64 |= XGE_HAL_MISC_INT_REG_LINK_UP_INT;
666 temp64 |= XGE_HAL_MISC_INT_REG_LINK_DOWN_INT;
667 xge_os_pio_mem_write64(hldev->pdev,
668 hldev->regh0, temp64,
669 &bar0->misc_int_mask);
670 xge_debug_device(XGE_TRACE,
671 "mask link up/down flag "XGE_OS_LLXFMT,
672 (unsigned long long)temp64);
675 /* Disable PIC Intrs in the general intr mask
677 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
678 XGE_HAL_ALL_INTRS_DIS,
679 &bar0->pic_int_mask);
685 /* Enabling/Disabling Tx DMA interrupts */
686 if (mask & XGE_HAL_TX_DMA_INTR) {
687 /* Enable TxDMA Intrs in the general intr mask register */
688 val64 = XGE_HAL_TXDMA_INT_M;
690 gim &= ~((u64) val64);
691 /* Enable all TxDMA interrupts */
692 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
693 0x0, &bar0->txdma_int_mask);
694 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
695 0x0, &bar0->pfc_err_mask);
696 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
697 0x0, &bar0->tda_err_mask);
698 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
699 0x0, &bar0->pcc_err_mask);
700 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
701 0x0, &bar0->tti_err_mask);
702 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
703 0x0, &bar0->lso_err_mask);
704 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
705 0x0, &bar0->tpa_err_mask);
706 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
707 0x0, &bar0->sm_err_mask);
709 } else { /* flag == 0 */
711 /* Disable TxDMA Intrs in the general intr mask
713 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
714 XGE_HAL_ALL_INTRS_DIS,
715 &bar0->txdma_int_mask);
716 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
717 XGE_HAL_ALL_INTRS_DIS,
718 &bar0->pfc_err_mask);
724 /* Enabling/Disabling Rx DMA interrupts */
725 if (mask & XGE_HAL_RX_DMA_INTR) {
726 /* Enable RxDMA Intrs in the general intr mask register */
727 val64 = XGE_HAL_RXDMA_INT_M;
730 gim &= ~((u64) val64);
731 /* All RxDMA block interrupts are disabled for now
733 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
734 XGE_HAL_ALL_INTRS_DIS,
735 &bar0->rxdma_int_mask);
737 } else { /* flag == 0 */
739 /* Disable RxDMA Intrs in the general intr mask
741 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
742 XGE_HAL_ALL_INTRS_DIS,
743 &bar0->rxdma_int_mask);
750 /* Enabling/Disabling MAC interrupts */
751 if (mask & (XGE_HAL_TX_MAC_INTR | XGE_HAL_RX_MAC_INTR)) {
752 val64 = XGE_HAL_TXMAC_INT_M | XGE_HAL_RXMAC_INT_M;
755 gim &= ~((u64) val64);
757 /* All MAC block error inter. are disabled for now. */
758 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
759 XGE_HAL_ALL_INTRS_DIS, &bar0->mac_int_mask);
760 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
761 XGE_HAL_ALL_INTRS_DIS, &bar0->mac_rmac_err_mask);
763 } else { /* flag == 0 */
765 /* Disable MAC Intrs in the general intr mask
767 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
768 XGE_HAL_ALL_INTRS_DIS, &bar0->mac_int_mask);
769 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
770 XGE_HAL_ALL_INTRS_DIS, &bar0->mac_rmac_err_mask);
776 /* XGXS Interrupts */
777 if (mask & (XGE_HAL_TX_XGXS_INTR | XGE_HAL_RX_XGXS_INTR)) {
778 val64 = XGE_HAL_TXXGXS_INT_M | XGE_HAL_RXXGXS_INT_M;
781 gim &= ~((u64) val64);
782 /* All XGXS block error interrupts are disabled for now
784 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
785 XGE_HAL_ALL_INTRS_DIS, &bar0->xgxs_int_mask);
787 } else { /* flag == 0 */
789 /* Disable MC Intrs in the general intr mask register */
790 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
791 XGE_HAL_ALL_INTRS_DIS, &bar0->xgxs_int_mask);
797 /* Memory Controller(MC) interrupts */
798 if (mask & XGE_HAL_MC_INTR) {
799 val64 = XGE_HAL_MC_INT_M;
802 gim &= ~((u64) val64);
804 /* Enable all MC blocks error interrupts */
805 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
806 0x0ULL, &bar0->mc_int_mask);
808 } else { /* flag == 0 */
810 /* Disable MC Intrs in the general intr mask
812 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
813 XGE_HAL_ALL_INTRS_DIS, &bar0->mc_int_mask);
820 /* Tx traffic interrupts */
821 if (mask & XGE_HAL_TX_TRAFFIC_INTR) {
822 val64 = XGE_HAL_TXTRAFFIC_INT_M;
825 gim &= ~((u64) val64);
827 /* Enable all the Tx side interrupts */
828 /* '0' Enables all 64 TX interrupt levels. */
829 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0x0,
830 &bar0->tx_traffic_mask);
832 } else { /* flag == 0 */
834 /* Disable Tx Traffic Intrs in the general intr mask
836 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
837 XGE_HAL_ALL_INTRS_DIS,
838 &bar0->tx_traffic_mask);
843 /* Rx traffic interrupts */
844 if (mask & XGE_HAL_RX_TRAFFIC_INTR) {
845 val64 = XGE_HAL_RXTRAFFIC_INT_M;
847 gim &= ~((u64) val64);
848 /* '0' Enables all 8 RX interrupt levels. */
849 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0x0,
850 &bar0->rx_traffic_mask);
852 } else { /* flag == 0 */
854 /* Disable Rx Traffic Intrs in the general intr mask
857 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
858 XGE_HAL_ALL_INTRS_DIS,
859 &bar0->rx_traffic_mask);
865 /* Sched Timer interrupt */
866 if (mask & XGE_HAL_SCHED_INTR) {
868 temp64 = xge_os_pio_mem_read64(hldev->pdev,
869 hldev->regh0, &bar0->txpic_int_mask);
870 temp64 &= ~XGE_HAL_TXPIC_INT_SCHED_INTR;
871 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
872 temp64, &bar0->txpic_int_mask);
874 xge_hal_device_sched_timer(hldev,
875 hldev->config.sched_timer_us,
876 hldev->config.sched_timer_one_shot);
878 temp64 = xge_os_pio_mem_read64(hldev->pdev,
879 hldev->regh0, &bar0->txpic_int_mask);
880 temp64 |= XGE_HAL_TXPIC_INT_SCHED_INTR;
882 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
883 temp64, &bar0->txpic_int_mask);
885 xge_hal_device_sched_timer(hldev,
886 XGE_HAL_SCHED_TIMER_DISABLED,
887 XGE_HAL_SCHED_TIMER_ON_SHOT_ENABLE);
891 if (gim != gim_saved) {
892 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, gim,
893 &bar0->general_int_mask);
894 xge_debug_device(XGE_TRACE, "general_int_mask updated "
895 XGE_OS_LLXFMT" => "XGE_OS_LLXFMT,
896 (unsigned long long)gim_saved, (unsigned long long)gim);
901 * __hal_device_bimodal_configure
902 * @hldev: HAL device handle.
904 * Bimodal parameters initialization.
907 __hal_device_bimodal_configure(xge_hal_device_t *hldev)
911 for (i=0; i<XGE_HAL_MAX_RING_NUM; i++) {
912 xge_hal_tti_config_t *tti;
913 xge_hal_rti_config_t *rti;
915 if (!hldev->config.ring.queue[i].configured)
917 rti = &hldev->config.ring.queue[i].rti;
918 tti = &hldev->bimodal_tti[i];
921 tti->urange_a = hldev->bimodal_urange_a_en * 10;
924 tti->ufc_a = hldev->bimodal_urange_a_en * 8;
928 tti->timer_val_us = hldev->bimodal_timer_val_us;
929 tti->timer_ac_en = 1;
930 tti->timer_ci_en = 0;
935 rti->ufc_a = 1; /* <= for netpipe type of tests */
938 rti->ufc_d = 4; /* <= 99% of a bandwidth traffic counts here */
939 rti->timer_ac_en = 1;
940 rti->timer_val_us = 5; /* for optimal bus efficiency usage */
945 * __hal_device_tti_apply
946 * @hldev: HAL device handle.
948 * apply TTI configuration.
950 static xge_hal_status_e
951 __hal_device_tti_apply(xge_hal_device_t *hldev, xge_hal_tti_config_t *tti,
952 int num, int runtime)
954 u64 val64, data1 = 0, data2 = 0;
955 xge_hal_pci_bar0_t *bar0;
958 bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0;
960 bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
962 if (tti->timer_val_us) {
963 unsigned int tx_interval;
965 if (hldev->config.pci_freq_mherz) {
966 tx_interval = hldev->config.pci_freq_mherz *
967 tti->timer_val_us / 64;
969 __hal_fix_time_ival_herc(hldev,
972 tx_interval = tti->timer_val_us;
974 data1 |= XGE_HAL_TTI_DATA1_MEM_TX_TIMER_VAL(tx_interval);
975 if (tti->timer_ac_en) {
976 data1 |= XGE_HAL_TTI_DATA1_MEM_TX_TIMER_AC_EN;
978 if (tti->timer_ci_en) {
979 data1 |= XGE_HAL_TTI_DATA1_MEM_TX_TIMER_CI_EN;
983 xge_debug_device(XGE_TRACE, "TTI[%d] timer enabled to %d, ci %s",
984 num, tx_interval, tti->timer_ci_en ?
985 "enabled": "disabled");
996 data1 |= XGE_HAL_TTI_DATA1_MEM_TX_URNG_A(tti->urange_a) |
997 XGE_HAL_TTI_DATA1_MEM_TX_URNG_B(tti->urange_b) |
998 XGE_HAL_TTI_DATA1_MEM_TX_URNG_C(tti->urange_c);
1000 data2 |= XGE_HAL_TTI_DATA2_MEM_TX_UFC_A(tti->ufc_a) |
1001 XGE_HAL_TTI_DATA2_MEM_TX_UFC_B(tti->ufc_b) |
1002 XGE_HAL_TTI_DATA2_MEM_TX_UFC_C(tti->ufc_c) |
1003 XGE_HAL_TTI_DATA2_MEM_TX_UFC_D(tti->ufc_d);
1006 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, data1,
1007 &bar0->tti_data1_mem);
1008 (void)xge_os_pio_mem_read64(hldev->pdev,
1009 hldev->regh0, &bar0->tti_data1_mem);
1010 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, data2,
1011 &bar0->tti_data2_mem);
1012 (void)xge_os_pio_mem_read64(hldev->pdev,
1013 hldev->regh0, &bar0->tti_data2_mem);
1016 val64 = XGE_HAL_TTI_CMD_MEM_WE | XGE_HAL_TTI_CMD_MEM_STROBE_NEW_CMD |
1017 XGE_HAL_TTI_CMD_MEM_OFFSET(num);
1018 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
1019 &bar0->tti_command_mem);
1021 if (!runtime && __hal_device_register_poll(hldev, &bar0->tti_command_mem,
1022 0, XGE_HAL_TTI_CMD_MEM_STROBE_NEW_CMD,
1023 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
1024 /* upper layer may require to repeat */
1025 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
1029 xge_debug_device(XGE_TRACE, "TTI[%d] configured: tti_data1_mem 0x"
1031 (unsigned long long)xge_os_pio_mem_read64(hldev->pdev,
1032 hldev->regh0, &bar0->tti_data1_mem));
1039 * __hal_device_tti_configure
1040 * @hldev: HAL device handle.
1042 * TTI Initialization.
1043 * Initialize Transmit Traffic Interrupt Scheme.
1045 static xge_hal_status_e
1046 __hal_device_tti_configure(xge_hal_device_t *hldev, int runtime)
1050 for (i=0; i<XGE_HAL_MAX_FIFO_NUM; i++) {
1053 if (!hldev->config.fifo.queue[i].configured)
1056 for (j=0; j<XGE_HAL_MAX_FIFO_TTI_NUM; j++) {
1057 xge_hal_status_e status;
1059 if (!hldev->config.fifo.queue[i].tti[j].enabled)
1062 /* at least some TTI enabled. Record it. */
1063 hldev->tti_enabled = 1;
1065 status = __hal_device_tti_apply(hldev,
1066 &hldev->config.fifo.queue[i].tti[j],
1067 i * XGE_HAL_MAX_FIFO_TTI_NUM + j, runtime);
1068 if (status != XGE_HAL_OK)
1073 /* processing bimodal TTIs */
1074 for (i=0; i<XGE_HAL_MAX_RING_NUM; i++) {
1075 xge_hal_status_e status;
1077 if (!hldev->bimodal_tti[i].enabled)
1080 /* at least some bimodal TTI enabled. Record it. */
1081 hldev->tti_enabled = 1;
1083 status = __hal_device_tti_apply(hldev, &hldev->bimodal_tti[i],
1084 XGE_HAL_MAX_FIFO_TTI_RING_0 + i, runtime);
1085 if (status != XGE_HAL_OK)
1094 * __hal_device_rti_configure
1095 * @hldev: HAL device handle.
1097 * RTI Initialization.
1098 * Initialize Receive Traffic Interrupt Scheme.
1101 __hal_device_rti_configure(xge_hal_device_t *hldev, int runtime)
1103 xge_hal_pci_bar0_t *bar0;
1104 u64 val64, data1 = 0, data2 = 0;
1109 * we don't want to re-configure RTI in case when
1110 * bimodal interrupts are in use. Instead reconfigure TTI
1111 * with new RTI values.
1113 if (hldev->config.bimodal_interrupts) {
1114 __hal_device_bimodal_configure(hldev);
1115 return __hal_device_tti_configure(hldev, 1);
1117 bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0;
1119 bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
1121 for (i=0; i<XGE_HAL_MAX_RING_NUM; i++) {
1122 xge_hal_rti_config_t *rti = &hldev->config.ring.queue[i].rti;
1124 if (!hldev->config.ring.queue[i].configured)
1127 if (rti->timer_val_us) {
1128 unsigned int rx_interval;
1130 if (hldev->config.pci_freq_mherz) {
1131 rx_interval = hldev->config.pci_freq_mherz *
1132 rti->timer_val_us / 8;
1134 __hal_fix_time_ival_herc(hldev,
1137 rx_interval = rti->timer_val_us;
1139 data1 |=XGE_HAL_RTI_DATA1_MEM_RX_TIMER_VAL(rx_interval);
1140 if (rti->timer_ac_en) {
1141 data1 |= XGE_HAL_RTI_DATA1_MEM_RX_TIMER_AC_EN;
1143 data1 |= XGE_HAL_RTI_DATA1_MEM_RX_TIMER_CI_EN;
1146 if (rti->urange_a ||
1153 data1 |=XGE_HAL_RTI_DATA1_MEM_RX_URNG_A(rti->urange_a) |
1154 XGE_HAL_RTI_DATA1_MEM_RX_URNG_B(rti->urange_b) |
1155 XGE_HAL_RTI_DATA1_MEM_RX_URNG_C(rti->urange_c);
1157 data2 |= XGE_HAL_RTI_DATA2_MEM_RX_UFC_A(rti->ufc_a) |
1158 XGE_HAL_RTI_DATA2_MEM_RX_UFC_B(rti->ufc_b) |
1159 XGE_HAL_RTI_DATA2_MEM_RX_UFC_C(rti->ufc_c) |
1160 XGE_HAL_RTI_DATA2_MEM_RX_UFC_D(rti->ufc_d);
1163 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, data1,
1164 &bar0->rti_data1_mem);
1165 (void)xge_os_pio_mem_read64(hldev->pdev,
1166 hldev->regh0, &bar0->rti_data1_mem);
1167 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, data2,
1168 &bar0->rti_data2_mem);
1169 (void)xge_os_pio_mem_read64(hldev->pdev,
1170 hldev->regh0, &bar0->rti_data2_mem);
1173 val64 = XGE_HAL_RTI_CMD_MEM_WE |
1174 XGE_HAL_RTI_CMD_MEM_STROBE_NEW_CMD;
1175 val64 |= XGE_HAL_RTI_CMD_MEM_OFFSET(i);
1176 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
1177 &bar0->rti_command_mem);
1179 if (!runtime && __hal_device_register_poll(hldev,
1180 &bar0->rti_command_mem, 0,
1181 XGE_HAL_RTI_CMD_MEM_STROBE_NEW_CMD,
1182 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
1183 /* upper layer may require to repeat */
1184 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
1188 xge_debug_device(XGE_TRACE,
1189 "RTI[%d] configured: rti_data1_mem 0x"XGE_OS_LLXFMT,
1191 (unsigned long long)xge_os_pio_mem_read64(hldev->pdev,
1192 hldev->regh0, &bar0->rti_data1_mem));
1200 /* Constants to be programmed into the Xena's registers to configure
1202 static u64 default_xena_mdio_cfg[] = {
1204 0xC001010000000000ULL, 0xC0010100000000E0ULL,
1205 0xC0010100008000E4ULL,
1206 /* Remove Reset from PMA PLL */
1207 0xC001010000000000ULL, 0xC0010100000000E0ULL,
1208 0xC0010100000000E4ULL,
1212 static u64 default_herc_mdio_cfg[] = {
1216 static u64 default_xena_dtx_cfg[] = {
1217 0x8000051500000000ULL, 0x80000515000000E0ULL,
1218 0x80000515D93500E4ULL, 0x8001051500000000ULL,
1219 0x80010515000000E0ULL, 0x80010515001E00E4ULL,
1220 0x8002051500000000ULL, 0x80020515000000E0ULL,
1221 0x80020515F21000E4ULL,
1222 /* Set PADLOOPBACKN */
1223 0x8002051500000000ULL, 0x80020515000000E0ULL,
1224 0x80020515B20000E4ULL, 0x8003051500000000ULL,
1225 0x80030515000000E0ULL, 0x80030515B20000E4ULL,
1226 0x8004051500000000ULL, 0x80040515000000E0ULL,
1227 0x80040515B20000E4ULL, 0x8005051500000000ULL,
1228 0x80050515000000E0ULL, 0x80050515B20000E4ULL,
1230 /* Remove PADLOOPBACKN */
1231 0x8002051500000000ULL, 0x80020515000000E0ULL,
1232 0x80020515F20000E4ULL, 0x8003051500000000ULL,
1233 0x80030515000000E0ULL, 0x80030515F20000E4ULL,
1234 0x8004051500000000ULL, 0x80040515000000E0ULL,
1235 0x80040515F20000E4ULL, 0x8005051500000000ULL,
1236 0x80050515000000E0ULL, 0x80050515F20000E4ULL,
1241 static u64 default_herc_dtx_cfg[] = {
1242 0x80000515BA750000ULL, 0x80000515BA7500E0ULL,
1243 0x80000515BA750004ULL, 0x80000515BA7500E4ULL,
1244 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
1245 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
1246 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
1247 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
1252 static u64 default_herc_dtx_cfg[] = {
1253 0x8000051536750000ULL, 0x80000515367500E0ULL,
1254 0x8000051536750004ULL, 0x80000515367500E4ULL,
1256 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
1257 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
1259 0x801205150D440000ULL, 0x801205150D4400E0ULL,
1260 0x801205150D440004ULL, 0x801205150D4400E4ULL,
1262 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
1263 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
1269 __hal_serial_mem_write64(xge_hal_device_t *hldev, u64 value, u64 *reg)
1271 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0,
1272 (u32)(value>>32), reg);
1274 __hal_pio_mem_write32_lower(hldev->pdev, hldev->regh0,
1281 __hal_serial_mem_read64(xge_hal_device_t *hldev, u64 *reg)
1283 u64 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
1290 * __hal_device_xaui_configure
1291 * @hldev: HAL device handle.
1293 * Configure XAUI Interface of Xena.
1295 * To Configure the Xena's XAUI, one has to write a series
1296 * of 64 bit values into two registers in a particular
1297 * sequence. Hence a macro 'SWITCH_SIGN' has been defined
1298 * which will be defined in the array of configuration values
1299 * (default_dtx_cfg & default_mdio_cfg) at appropriate places
1300 * to switch writing from one regsiter to another. We continue
1301 * writing these values until we encounter the 'END_SIGN' macro.
1302 * For example, After making a series of 21 writes into
1303 * dtx_control register the 'SWITCH_SIGN' appears and hence we
1304 * start writing into mdio_control until we encounter END_SIGN.
1307 __hal_device_xaui_configure(xge_hal_device_t *hldev)
1309 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
1310 int mdio_cnt = 0, dtx_cnt = 0;
1311 u64 *default_dtx_cfg = NULL, *default_mdio_cfg = NULL;
1313 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) {
1314 default_dtx_cfg = default_xena_dtx_cfg;
1315 default_mdio_cfg = default_xena_mdio_cfg;
1316 } else if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
1317 default_dtx_cfg = default_herc_dtx_cfg;
1318 default_mdio_cfg = default_herc_mdio_cfg;
1320 xge_assert(default_dtx_cfg);
1326 while (default_dtx_cfg[dtx_cnt] != END_SIGN) {
1327 if (default_dtx_cfg[dtx_cnt] == SWITCH_SIGN) {
1331 __hal_serial_mem_write64(hldev, default_dtx_cfg[dtx_cnt],
1332 &bar0->dtx_control);
1336 while (default_mdio_cfg[mdio_cnt] != END_SIGN) {
1337 if (default_mdio_cfg[mdio_cnt] == SWITCH_SIGN) {
1341 __hal_serial_mem_write64(hldev, default_mdio_cfg[mdio_cnt],
1342 &bar0->mdio_control);
1345 } while ( !((default_dtx_cfg[dtx_cnt] == END_SIGN) &&
1346 (default_mdio_cfg[mdio_cnt] == END_SIGN)) );
1348 xge_debug_device(XGE_TRACE, "%s", "XAUI interface configured");
1352 * __hal_device_mac_link_util_set
1353 * @hldev: HAL device handle.
1355 * Set sampling rate to calculate link utilization.
1358 __hal_device_mac_link_util_set(xge_hal_device_t *hldev)
1360 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
1363 val64 = XGE_HAL_MAC_TX_LINK_UTIL_VAL(
1364 hldev->config.mac.tmac_util_period) |
1365 XGE_HAL_MAC_RX_LINK_UTIL_VAL(
1366 hldev->config.mac.rmac_util_period);
1367 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
1368 &bar0->mac_link_util);
1369 xge_debug_device(XGE_TRACE, "%s",
1370 "bandwidth link utilization configured");
1374 * __hal_device_set_swapper
1375 * @hldev: HAL device handle.
1377 * Set the Xframe's byte "swapper" in accordance with
1378 * endianness of the host.
1381 __hal_device_set_swapper(xge_hal_device_t *hldev)
1383 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
1387 * from 32bit errarta:
1389 * The SWAPPER_CONTROL register determines how the adapter accesses
1390 * host memory as well as how it responds to read and write requests
1391 * from the host system. Writes to this register should be performed
1392 * carefully, since the byte swappers could reverse the order of bytes.
1393 * When configuring this register keep in mind that writes to the PIF
1394 * read and write swappers could reverse the order of the upper and
1395 * lower 32-bit words. This means that the driver may have to write
1396 * to the upper 32 bits of the SWAPPER_CONTROL twice in order to
1397 * configure the entire register. */
1400 * The device by default set to a big endian format, so a big endian
1401 * driver need not set anything.
1404 #if defined(XGE_HAL_CUSTOM_HW_SWAPPER)
1406 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
1407 0xffffffffffffffffULL, &bar0->swapper_ctrl);
1409 val64 = XGE_HAL_CUSTOM_HW_SWAPPER;
1412 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
1413 &bar0->swapper_ctrl);
1415 xge_debug_device(XGE_TRACE, "using custom HW swapper 0x"XGE_OS_LLXFMT,
1416 (unsigned long long)val64);
1418 #elif !defined(XGE_OS_HOST_BIG_ENDIAN)
1421 * Initially we enable all bits to make it accessible by the driver,
1422 * then we selectively enable only those bits that we want to set.
1423 * i.e. force swapper to swap for the first time since second write
1424 * will overwrite with the final settings.
1426 * Use only for little endian platforms.
1428 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
1429 0xffffffffffffffffULL, &bar0->swapper_ctrl);
1431 val64 = (XGE_HAL_SWAPPER_CTRL_PIF_R_FE |
1432 XGE_HAL_SWAPPER_CTRL_PIF_R_SE |
1433 XGE_HAL_SWAPPER_CTRL_PIF_W_FE |
1434 XGE_HAL_SWAPPER_CTRL_PIF_W_SE |
1435 XGE_HAL_SWAPPER_CTRL_RTH_FE |
1436 XGE_HAL_SWAPPER_CTRL_RTH_SE |
1437 XGE_HAL_SWAPPER_CTRL_TXP_FE |
1438 XGE_HAL_SWAPPER_CTRL_TXP_SE |
1439 XGE_HAL_SWAPPER_CTRL_TXD_R_FE |
1440 XGE_HAL_SWAPPER_CTRL_TXD_R_SE |
1441 XGE_HAL_SWAPPER_CTRL_TXD_W_FE |
1442 XGE_HAL_SWAPPER_CTRL_TXD_W_SE |
1443 XGE_HAL_SWAPPER_CTRL_TXF_R_FE |
1444 XGE_HAL_SWAPPER_CTRL_RXD_R_FE |
1445 XGE_HAL_SWAPPER_CTRL_RXD_R_SE |
1446 XGE_HAL_SWAPPER_CTRL_RXD_W_FE |
1447 XGE_HAL_SWAPPER_CTRL_RXD_W_SE |
1448 XGE_HAL_SWAPPER_CTRL_RXF_W_FE |
1449 XGE_HAL_SWAPPER_CTRL_XMSI_FE |
1450 XGE_HAL_SWAPPER_CTRL_STATS_FE | XGE_HAL_SWAPPER_CTRL_STATS_SE);
1453 if (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX) {
1454 val64 |= XGE_HAL_SWAPPER_CTRL_XMSI_SE;
1456 __hal_pio_mem_write32_lower(hldev->pdev, hldev->regh0, (u32)val64,
1457 &bar0->swapper_ctrl);
1459 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, (u32)(val64>>32),
1460 &bar0->swapper_ctrl);
1462 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, (u32)(val64>>32),
1463 &bar0->swapper_ctrl);
1464 xge_debug_device(XGE_TRACE, "%s", "using little endian set");
1467 /* Verifying if endian settings are accurate by reading a feedback
1469 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
1470 &bar0->pif_rd_swapper_fb);
1471 if (val64 != XGE_HAL_IF_RD_SWAPPER_FB) {
1472 xge_debug_device(XGE_ERR, "pif_rd_swapper_fb read "XGE_OS_LLXFMT,
1473 (unsigned long long) val64);
1474 return XGE_HAL_ERR_SWAPPER_CTRL;
1477 xge_debug_device(XGE_TRACE, "%s", "be/le swapper enabled");
1483 * __hal_device_rts_mac_configure - Configure RTS steering based on
1484 * destination mac address.
1485 * @hldev: HAL device handle.
1489 __hal_device_rts_mac_configure(xge_hal_device_t *hldev)
1491 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
1494 if (!hldev->config.rts_mac_en) {
1499 * Set the receive traffic steering mode from default(classic)
1502 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
1504 val64 |= XGE_HAL_RTS_CTRL_ENHANCED_MODE;
1505 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
1506 val64, &bar0->rts_ctrl);
1511 * __hal_device_rts_port_configure - Configure RTS steering based on
1512 * destination or source port number.
1513 * @hldev: HAL device handle.
1517 __hal_device_rts_port_configure(xge_hal_device_t *hldev)
1519 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
1523 if (!hldev->config.rts_port_en) {
1528 * Set the receive traffic steering mode from default(classic)
1531 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
1533 val64 |= XGE_HAL_RTS_CTRL_ENHANCED_MODE;
1534 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
1535 val64, &bar0->rts_ctrl);
1538 * Initiate port steering according to per-ring configuration
1540 for (rnum = 0; rnum < XGE_HAL_MAX_RING_NUM; rnum++) {
1542 xge_hal_ring_queue_t *queue = &hldev->config.ring.queue[rnum];
1544 if (!queue->configured || queue->rts_port_en)
1547 for (pnum = 0; pnum < XGE_HAL_MAX_STEERABLE_PORTS; pnum++) {
1548 xge_hal_rts_port_t *port = &queue->rts_ports[pnum];
1551 * Skip and clear empty ports
1557 xge_os_pio_mem_write64(hldev->pdev,
1559 &bar0->rts_pn_cam_data);
1561 val64 = BIT(7) | BIT(15);
1564 * Assign new Port values according
1567 val64 = vBIT(port->num,8,16) |
1568 vBIT(rnum,37,3) | BIT(63);
1573 xge_os_pio_mem_write64(hldev->pdev,
1574 hldev->regh0, val64,
1575 &bar0->rts_pn_cam_data);
1577 val64 = BIT(7) | BIT(15) | vBIT(pnum,24,8);
1580 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
1581 val64, &bar0->rts_pn_cam_ctrl);
1583 /* poll until done */
1584 if (__hal_device_register_poll(hldev,
1585 &bar0->rts_pn_cam_ctrl, 0,
1586 XGE_HAL_RTS_PN_CAM_CTRL_STROBE_BEING_EXECUTED,
1587 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) !=
1589 /* upper layer may require to repeat */
1590 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
1598 * __hal_device_rts_qos_configure - Configure RTS steering based on
1600 * @hldev: HAL device handle.
1604 __hal_device_rts_qos_configure(xge_hal_device_t *hldev)
1606 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
1610 if (!hldev->config.rts_qos_en) {
1614 /* First clear the RTS_DS_MEM_DATA */
1616 for (j = 0; j < 64; j++ )
1618 /* First clear the value */
1619 val64 = XGE_HAL_RTS_DS_MEM_DATA(0);
1621 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
1622 &bar0->rts_ds_mem_data);
1624 val64 = XGE_HAL_RTS_DS_MEM_CTRL_WE |
1625 XGE_HAL_RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
1626 XGE_HAL_RTS_DS_MEM_CTRL_OFFSET ( j );
1628 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
1629 &bar0->rts_ds_mem_ctrl);
1632 /* poll until done */
1633 if (__hal_device_register_poll(hldev,
1634 &bar0->rts_ds_mem_ctrl, 0,
1635 XGE_HAL_RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
1636 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
1637 /* upper layer may require to repeat */
1638 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
1644 for (j = 0; j < XGE_HAL_MAX_RING_NUM; j++) {
1645 if (hldev->config.ring.queue[j].configured)
1649 switch (rx_ring_num) {
1652 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0);
1653 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1);
1654 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2);
1655 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3);
1656 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4);
1659 val64 = 0x0001000100010001ULL;
1660 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0);
1661 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1);
1662 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2);
1663 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3);
1664 val64 = 0x0001000100000000ULL;
1665 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4);
1668 val64 = 0x0001020001020001ULL;
1669 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0);
1670 val64 = 0x0200010200010200ULL;
1671 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1);
1672 val64 = 0x0102000102000102ULL;
1673 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2);
1674 val64 = 0x0001020001020001ULL;
1675 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3);
1676 val64 = 0x0200010200000000ULL;
1677 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4);
1680 val64 = 0x0001020300010203ULL;
1681 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0);
1682 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1);
1683 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2);
1684 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3);
1685 val64 = 0x0001020300000000ULL;
1686 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4);
1689 val64 = 0x0001020304000102ULL;
1690 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0);
1691 val64 = 0x0304000102030400ULL;
1692 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1);
1693 val64 = 0x0102030400010203ULL;
1694 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2);
1695 val64 = 0x0400010203040001ULL;
1696 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3);
1697 val64 = 0x0203040000000000ULL;
1698 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4);
1701 val64 = 0x0001020304050001ULL;
1702 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0);
1703 val64 = 0x0203040500010203ULL;
1704 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1);
1705 val64 = 0x0405000102030405ULL;
1706 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2);
1707 val64 = 0x0001020304050001ULL;
1708 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3);
1709 val64 = 0x0203040500000000ULL;
1710 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4);
1713 val64 = 0x0001020304050600ULL;
1714 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0);
1715 val64 = 0x0102030405060001ULL;
1716 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1);
1717 val64 = 0x0203040506000102ULL;
1718 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2);
1719 val64 = 0x0304050600010203ULL;
1720 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3);
1721 val64 = 0x0405060000000000ULL;
1722 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4);
1725 val64 = 0x0001020304050607ULL;
1726 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0);
1727 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1);
1728 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2);
1729 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3);
1730 val64 = 0x0001020300000000ULL;
1731 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4);
1739 * xge__hal_device_rts_mac_enable
1741 * @devh: HAL device handle.
1742 * @index: index number where the MAC addr will be stored
1743 * @macaddr: MAC address
1745 * - Enable RTS steering for the given MAC address. This function has to be
1746 * called with lock acquired.
1749 * 1. ULD has to call this function with the index value which
1750 * statisfies the following condition:
1751 * ring_num = (index % 8)
1752 * 2.ULD also needs to make sure that the index is not
1753 * occupied by any MAC address. If that index has any MAC address
1754 * it will be overwritten and HAL will not check for it.
1758 xge_hal_device_rts_mac_enable(xge_hal_device_h devh, int index, macaddr_t macaddr)
1760 int max_addr = XGE_HAL_MAX_MAC_ADDRESSES;
1761 xge_hal_status_e status;
1763 xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
1765 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)
1766 max_addr = XGE_HAL_MAX_MAC_ADDRESSES_HERC;
1768 if ( index >= max_addr )
1769 return XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES;
1772 * Set the MAC address at the given location marked by index.
1774 status = xge_hal_device_macaddr_set(hldev, index, macaddr);
1775 if (status != XGE_HAL_OK) {
1776 xge_debug_device(XGE_ERR, "%s",
1777 "Not able to set the mac addr");
1781 return xge_hal_device_rts_section_enable(hldev, index);
1785 * xge__hal_device_rts_mac_disable
1786 * @hldev: HAL device handle.
1787 * @index: index number where to disable the MAC addr
1789 * Disable RTS Steering based on the MAC address.
1790 * This function should be called with lock acquired.
1794 xge_hal_device_rts_mac_disable(xge_hal_device_h devh, int index)
1796 xge_hal_status_e status;
1797 u8 macaddr[6] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
1798 int max_addr = XGE_HAL_MAX_MAC_ADDRESSES;
1800 xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
1802 xge_debug_ll(XGE_TRACE, "the index value is %d ", index);
1804 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)
1805 max_addr = XGE_HAL_MAX_MAC_ADDRESSES_HERC;
1807 if ( index >= max_addr )
1808 return XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES;
1811 * Disable MAC address @ given index location
1813 status = xge_hal_device_macaddr_set(hldev, index, macaddr);
1814 if (status != XGE_HAL_OK) {
1815 xge_debug_device(XGE_ERR, "%s",
1816 "Not able to set the mac addr");
1825 * __hal_device_rth_configure - Configure RTH for the device
1826 * @hldev: HAL device handle.
1828 * Using IT (Indirection Table).
1831 __hal_device_rth_it_configure(xge_hal_device_t *hldev)
1833 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
1835 int rings[XGE_HAL_MAX_RING_NUM]={0};
1841 if (!hldev->config.rth_en) {
1846 * Set the receive traffic steering mode from default(classic)
1849 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
1851 val64 |= XGE_HAL_RTS_CTRL_ENHANCED_MODE;
1852 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
1853 val64, &bar0->rts_ctrl);
1855 buckets_num = (1 << hldev->config.rth_bucket_size);
1858 for (rnum = 0; rnum < XGE_HAL_MAX_RING_NUM; rnum++) {
1859 if (hldev->config.ring.queue[rnum].configured &&
1860 hldev->config.ring.queue[rnum].rth_en)
1861 rings[rmax++] = rnum;
1865 /* for starters: fill in all the buckets with rings "equally" */
1866 for (bucket = 0; bucket < buckets_num; bucket++) {
1872 val64 = XGE_HAL_RTS_RTH_MAP_MEM_DATA_ENTRY_EN |
1873 XGE_HAL_RTS_RTH_MAP_MEM_DATA(rings[rnum]);
1874 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
1875 &bar0->rts_rth_map_mem_data);
1878 val64 = XGE_HAL_RTS_RTH_MAP_MEM_CTRL_WE |
1879 XGE_HAL_RTS_RTH_MAP_MEM_CTRL_STROBE |
1880 XGE_HAL_RTS_RTH_MAP_MEM_CTRL_OFFSET(bucket);
1881 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
1882 &bar0->rts_rth_map_mem_ctrl);
1884 /* poll until done */
1885 if (__hal_device_register_poll(hldev,
1886 &bar0->rts_rth_map_mem_ctrl, 0,
1887 XGE_HAL_RTS_RTH_MAP_MEM_CTRL_STROBE,
1888 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
1889 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
1895 val64 = XGE_HAL_RTS_RTH_EN;
1896 val64 |= XGE_HAL_RTS_RTH_BUCKET_SIZE(hldev->config.rth_bucket_size);
1897 val64 |= XGE_HAL_RTS_RTH_TCP_IPV4_EN | XGE_HAL_RTS_RTH_UDP_IPV4_EN | XGE_HAL_RTS_RTH_IPV4_EN |
1898 XGE_HAL_RTS_RTH_TCP_IPV6_EN |XGE_HAL_RTS_RTH_UDP_IPV6_EN | XGE_HAL_RTS_RTH_IPV6_EN |
1899 XGE_HAL_RTS_RTH_TCP_IPV6_EX_EN | XGE_HAL_RTS_RTH_UDP_IPV6_EX_EN | XGE_HAL_RTS_RTH_IPV6_EX_EN;
1901 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
1902 &bar0->rts_rth_cfg);
1904 xge_debug_device(XGE_TRACE, "RTH configured, bucket_size %d",
1905 hldev->config.rth_bucket_size);
1912 * __hal_spdm_entry_add - Add a new entry to the SPDM table.
1914 * Add a new entry to the SPDM table
1916 * This function add a new entry to the SPDM table.
1919 * This function should be called with spdm_lock.
1921 * See also: xge_hal_spdm_entry_add , xge_hal_spdm_entry_remove.
1923 static xge_hal_status_e
1924 __hal_spdm_entry_add(xge_hal_device_t *hldev, xge_hal_ipaddr_t *src_ip,
1925 xge_hal_ipaddr_t *dst_ip, u16 l4_sp, u16 l4_dp, u8 is_tcp,
1926 u8 is_ipv4, u8 tgt_queue, u32 jhash_value, u16 spdm_entry)
1928 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
1930 u64 spdm_line_arr[8];
1934 * Clear the SPDM READY bit
1936 val64 = XGE_HAL_RX_PIC_INT_REG_SPDM_READY;
1937 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
1938 &bar0->rxpic_int_reg);
1940 xge_debug_device(XGE_TRACE,
1941 "L4 SP %x:DP %x: hash %x tgt_queue %d ",
1942 l4_sp, l4_dp, jhash_value, tgt_queue);
1944 xge_os_memzero(&spdm_line_arr, sizeof(spdm_line_arr));
1947 * Construct the SPDM entry.
1949 spdm_line_arr[0] = vBIT(l4_sp,0,16) |
1951 vBIT(tgt_queue,53,3) |
1957 spdm_line_arr[1] = vBIT(src_ip->ipv4.addr,0,32) |
1958 vBIT(dst_ip->ipv4.addr,32,32);
1961 xge_os_memcpy(&spdm_line_arr[1], &src_ip->ipv6.addr[0], 8);
1962 xge_os_memcpy(&spdm_line_arr[2], &src_ip->ipv6.addr[1], 8);
1963 xge_os_memcpy(&spdm_line_arr[3], &dst_ip->ipv6.addr[0], 8);
1964 xge_os_memcpy(&spdm_line_arr[4], &dst_ip->ipv6.addr[1], 8);
1967 spdm_line_arr[7] = vBIT(jhash_value,0,32) |
1968 BIT(63); /* entry enable bit */
1971 * Add the entry to the SPDM table
1973 for(line_no = 0; line_no < 8; line_no++) {
1974 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
1975 spdm_line_arr[line_no],
1976 (void *)((char *)hldev->spdm_mem_base +
1982 * Wait for the operation to be completed.
1984 if (__hal_device_register_poll(hldev, &bar0->rxpic_int_reg, 1,
1985 XGE_HAL_RX_PIC_INT_REG_SPDM_READY,
1986 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
1987 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
1991 * Add this information to a local SPDM table. The purpose of
1992 * maintaining a local SPDM table is to avoid a search in the
1993 * adapter SPDM table for spdm entry lookup which is very costly
1996 hldev->spdm_table[spdm_entry]->in_use = 1;
1997 xge_os_memcpy(&hldev->spdm_table[spdm_entry]->src_ip, src_ip,
1998 sizeof(xge_hal_ipaddr_t));
1999 xge_os_memcpy(&hldev->spdm_table[spdm_entry]->dst_ip, dst_ip,
2000 sizeof(xge_hal_ipaddr_t));
2001 hldev->spdm_table[spdm_entry]->l4_sp = l4_sp;
2002 hldev->spdm_table[spdm_entry]->l4_dp = l4_dp;
2003 hldev->spdm_table[spdm_entry]->is_tcp = is_tcp;
2004 hldev->spdm_table[spdm_entry]->is_ipv4 = is_ipv4;
2005 hldev->spdm_table[spdm_entry]->tgt_queue = tgt_queue;
2006 hldev->spdm_table[spdm_entry]->jhash_value = jhash_value;
2007 hldev->spdm_table[spdm_entry]->spdm_entry = spdm_entry;
2013 * __hal_device_rth_spdm_configure - Configure RTH for the device
2014 * @hldev: HAL device handle.
2016 * Using SPDM (Socket-Pair Direct Match).
2019 __hal_device_rth_spdm_configure(xge_hal_device_t *hldev)
2021 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0;
2024 u32 spdm_bar_offset;
2025 int spdm_table_size;
2028 if (!hldev->config.rth_spdm_en) {
2033 * Retrieve the base address of SPDM Table.
2035 val64 = xge_os_pio_mem_read64(hldev->pdev,
2036 hldev->regh0, &bar0->spdm_bir_offset);
2038 spdm_bar_num = XGE_HAL_SPDM_PCI_BAR_NUM(val64);
2039 spdm_bar_offset = XGE_HAL_SPDM_PCI_BAR_OFFSET(val64);
2043 * spdm_bar_num specifies the PCI bar num register used to
2044 * address the memory space. spdm_bar_offset specifies the offset
2045 * of the SPDM memory with in the bar num memory space.
2047 switch (spdm_bar_num) {
2050 hldev->spdm_mem_base = (char *)bar0 +
2051 (spdm_bar_offset * 8);
2056 char *bar1 = (char *)hldev->bar1;
2057 hldev->spdm_mem_base = bar1 + (spdm_bar_offset * 8);
2061 xge_assert(((spdm_bar_num != 0) && (spdm_bar_num != 1)));
2065 * Retrieve the size of SPDM table(number of entries).
2067 val64 = xge_os_pio_mem_read64(hldev->pdev,
2068 hldev->regh0, &bar0->spdm_structure);
2069 hldev->spdm_max_entries = XGE_HAL_SPDM_MAX_ENTRIES(val64);
2072 spdm_table_size = hldev->spdm_max_entries *
2073 sizeof(xge_hal_spdm_entry_t);
2074 if (hldev->spdm_table == NULL) {
2078 * Allocate memory to hold the copy of SPDM table.
2080 if ((hldev->spdm_table = (xge_hal_spdm_entry_t **)
2083 (sizeof(xge_hal_spdm_entry_t *) *
2084 hldev->spdm_max_entries))) == NULL) {
2085 return XGE_HAL_ERR_OUT_OF_MEMORY;
2088 if ((mem = xge_os_malloc(hldev->pdev, spdm_table_size)) == NULL)
2090 xge_os_free(hldev->pdev, hldev->spdm_table,
2091 (sizeof(xge_hal_spdm_entry_t *) *
2092 hldev->spdm_max_entries));
2093 return XGE_HAL_ERR_OUT_OF_MEMORY;
2096 xge_os_memzero(mem, spdm_table_size);
2097 for (i = 0; i < hldev->spdm_max_entries; i++) {
2098 hldev->spdm_table[i] = (xge_hal_spdm_entry_t *)
2100 i * sizeof(xge_hal_spdm_entry_t));
2102 xge_os_spin_lock_init(&hldev->spdm_lock, hldev->pdev);
2105 * We are here because the host driver tries to
2106 * do a soft reset on the device.
2107 * Since the device soft reset clears the SPDM table, copy
2108 * the entries from the local SPDM table to the actual one.
2110 xge_os_spin_lock(&hldev->spdm_lock);
2111 for (i = 0; i < hldev->spdm_max_entries; i++) {
2112 xge_hal_spdm_entry_t *spdm_entry = hldev->spdm_table[i];
2114 if (spdm_entry->in_use) {
2115 if (__hal_spdm_entry_add(hldev,
2116 &spdm_entry->src_ip,
2117 &spdm_entry->dst_ip,
2121 spdm_entry->is_ipv4,
2122 spdm_entry->tgt_queue,
2123 spdm_entry->jhash_value,
2124 spdm_entry->spdm_entry)
2126 /* Log an warning */
2127 xge_debug_device(XGE_ERR,
2128 "SPDM table update from local"
2133 xge_os_spin_unlock(&hldev->spdm_lock);
2137 * Set the receive traffic steering mode from default(classic)
2140 val64 = xge_os_pio_mem_read64(hldev->pdev,
2141 hldev->regh0, &bar0->rts_ctrl);
2142 val64 |= XGE_HAL_RTS_CTRL_ENHANCED_MODE;
2143 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
2144 val64, &bar0->rts_ctrl);
2147 * We may not need to configure rts_rth_jhash_cfg register as the
2148 * default values are good enough to calculate the hash.
2152 * As of now, set all the rth mask registers to zero. TODO.
2154 for(i = 0; i < 5; i++) {
2155 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
2156 0, &bar0->rts_rth_hash_mask[i]);
2159 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
2160 0, &bar0->rts_rth_hash_mask_5);
2162 if (hldev->config.rth_spdm_use_l4) {
2163 val64 = XGE_HAL_RTH_STATUS_SPDM_USE_L4;
2164 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
2165 val64, &bar0->rts_rth_status);
2168 val64 = XGE_HAL_RTS_RTH_EN;
2169 val64 |= XGE_HAL_RTS_RTH_IPV4_EN | XGE_HAL_RTS_RTH_TCP_IPV4_EN;
2170 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
2171 &bar0->rts_rth_cfg);
2178 * __hal_device_pci_init
2179 * @hldev: HAL device handle.
2181 * Initialize certain PCI/PCI-X configuration registers
2182 * with recommended values. Save config space for future hw resets.
2185 __hal_device_pci_init(xge_hal_device_t *hldev)
2191 /* Store PCI device ID and revision for future references where in we
2192 * decide Xena revision using PCI sub system ID */
2193 xge_os_pci_read16(hldev->pdev,hldev->cfgh,
2194 xge_offsetof(xge_hal_pci_config_le_t, device_id),
2196 xge_os_pci_read8(hldev->pdev,hldev->cfgh,
2197 xge_offsetof(xge_hal_pci_config_le_t, revision),
2200 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)
2201 pcisize = XGE_HAL_PCISIZE_HERC;
2202 else if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA)
2203 pcisize = XGE_HAL_PCISIZE_XENA;
2205 /* save original PCI config space to restore it on device_terminate() */
2206 for (i = 0; i < pcisize; i++) {
2207 xge_os_pci_read32(hldev->pdev, hldev->cfgh, i*4,
2208 (u32*)&hldev->pci_config_space_bios + i);
2211 /* Set the PErr Repconse bit and SERR in PCI command register. */
2212 xge_os_pci_read16(hldev->pdev, hldev->cfgh,
2213 xge_offsetof(xge_hal_pci_config_le_t, command), &cmd);
2215 xge_os_pci_write16(hldev->pdev, hldev->cfgh,
2216 xge_offsetof(xge_hal_pci_config_le_t, command), cmd);
2218 /* Set user spcecified value for the PCI Latency Timer */
2219 if (hldev->config.latency_timer &&
2220 hldev->config.latency_timer != XGE_HAL_USE_BIOS_DEFAULT_LATENCY) {
2221 xge_os_pci_write8(hldev->pdev, hldev->cfgh,
2222 xge_offsetof(xge_hal_pci_config_le_t,
2224 (u8)hldev->config.latency_timer);
2226 /* Read back latency timer to reflect it into user level */
2227 xge_os_pci_read8(hldev->pdev, hldev->cfgh,
2228 xge_offsetof(xge_hal_pci_config_le_t, latency_timer), &val);
2229 hldev->config.latency_timer = val;
2231 /* Enable Data Parity Error Recovery in PCI-X command register. */
2232 xge_os_pci_read16(hldev->pdev, hldev->cfgh,
2233 xge_offsetof(xge_hal_pci_config_le_t, pcix_command), &cmd);
2235 xge_os_pci_write16(hldev->pdev, hldev->cfgh,
2236 xge_offsetof(xge_hal_pci_config_le_t, pcix_command), cmd);
2238 /* Set MMRB count in PCI-X command register. */
2239 if (hldev->config.mmrb_count != XGE_HAL_DEFAULT_BIOS_MMRB_COUNT) {
2241 cmd |= hldev->config.mmrb_count << 2;
2242 xge_os_pci_write16(hldev->pdev, hldev->cfgh,
2243 xge_offsetof(xge_hal_pci_config_le_t, pcix_command),
2246 /* Read back MMRB count to reflect it into user level */
2247 xge_os_pci_read16(hldev->pdev, hldev->cfgh,
2248 xge_offsetof(xge_hal_pci_config_le_t, pcix_command),
2251 hldev->config.mmrb_count = cmd>>2;
2253 /* Setting Maximum outstanding splits based on system type. */
2254 if (hldev->config.max_splits_trans != XGE_HAL_USE_BIOS_DEFAULT_SPLITS) {
2255 xge_os_pci_read16(hldev->pdev, hldev->cfgh,
2256 xge_offsetof(xge_hal_pci_config_le_t, pcix_command),
2259 cmd |= hldev->config.max_splits_trans << 4;
2260 xge_os_pci_write16(hldev->pdev, hldev->cfgh,
2261 xge_offsetof(xge_hal_pci_config_le_t, pcix_command),
2265 /* Read back max split trans to reflect it into user level */
2266 xge_os_pci_read16(hldev->pdev, hldev->cfgh,
2267 xge_offsetof(xge_hal_pci_config_le_t, pcix_command), &cmd);
2269 hldev->config.max_splits_trans = cmd>>4;
2271 /* Forcibly disabling relaxed ordering capability of the card. */
2272 xge_os_pci_read16(hldev->pdev, hldev->cfgh,
2273 xge_offsetof(xge_hal_pci_config_le_t, pcix_command), &cmd);
2275 xge_os_pci_write16(hldev->pdev, hldev->cfgh,
2276 xge_offsetof(xge_hal_pci_config_le_t, pcix_command), cmd);
2278 /* save PCI config space for future resets */
2279 for (i = 0; i < pcisize; i++) {
2280 xge_os_pci_read32(hldev->pdev, hldev->cfgh, i*4,
2281 (u32*)&hldev->pci_config_space + i);
2286 * __hal_device_pci_info_get - Get PCI bus informations such as width, frequency
2288 * @devh: HAL device handle.
2289 * @pci_mode: pointer to a variable of enumerated type
2290 * xge_hal_pci_mode_e{}.
2291 * @bus_frequency: pointer to a variable of enumerated type
2292 * xge_hal_pci_bus_frequency_e{}.
2293 * @bus_width: pointer to a variable of enumerated type
2294 * xge_hal_pci_bus_width_e{}.
2296 * Get pci mode, frequency, and PCI bus width.
2298 * Returns: one of the xge_hal_status_e{} enumerated types.
2299 * XGE_HAL_OK - for success.
2300 * XGE_HAL_ERR_INVALID_PCI_INFO - for invalid PCI information from the card.
2301 * XGE_HAL_ERR_BAD_DEVICE_ID - for invalid card.
2303 * See Also: xge_hal_pci_mode_e, xge_hal_pci_mode_e, xge_hal_pci_width_e.
2305 static xge_hal_status_e
2306 __hal_device_pci_info_get(xge_hal_device_h devh, xge_hal_pci_mode_e *pci_mode,
2307 xge_hal_pci_bus_frequency_e *bus_frequency,
2308 xge_hal_pci_bus_width_e *bus_width)
2310 xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
2311 xge_hal_status_e rc_status = XGE_HAL_OK;
2312 xge_hal_card_e card_id = xge_hal_device_check_id (devh);
2314 #ifdef XGE_HAL_HERC_EMULATION
2315 hldev->config.pci_freq_mherz =
2316 XGE_HAL_PCI_BUS_FREQUENCY_66MHZ;
2318 XGE_HAL_PCI_BUS_FREQUENCY_66MHZ;
2319 *pci_mode = XGE_HAL_PCI_66MHZ_MODE;
2321 if (card_id == XGE_HAL_CARD_HERC) {
2322 xge_hal_pci_bar0_t *bar0 =
2323 (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
2324 u64 pci_info = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2326 if (XGE_HAL_PCI_32_BIT & pci_info)
2327 *bus_width = XGE_HAL_PCI_BUS_WIDTH_32BIT;
2329 *bus_width = XGE_HAL_PCI_BUS_WIDTH_64BIT;
2330 switch((pci_info & XGE_HAL_PCI_INFO)>>60)
2332 case XGE_HAL_PCI_33MHZ_MODE:
2334 XGE_HAL_PCI_BUS_FREQUENCY_33MHZ;
2335 *pci_mode = XGE_HAL_PCI_33MHZ_MODE;
2337 case XGE_HAL_PCI_66MHZ_MODE:
2339 XGE_HAL_PCI_BUS_FREQUENCY_66MHZ;
2340 *pci_mode = XGE_HAL_PCI_66MHZ_MODE;
2342 case XGE_HAL_PCIX_M1_66MHZ_MODE:
2344 XGE_HAL_PCI_BUS_FREQUENCY_66MHZ;
2345 *pci_mode = XGE_HAL_PCIX_M1_66MHZ_MODE;
2347 case XGE_HAL_PCIX_M1_100MHZ_MODE:
2349 XGE_HAL_PCI_BUS_FREQUENCY_100MHZ;
2350 *pci_mode = XGE_HAL_PCIX_M1_100MHZ_MODE;
2352 case XGE_HAL_PCIX_M1_133MHZ_MODE:
2354 XGE_HAL_PCI_BUS_FREQUENCY_133MHZ;
2355 *pci_mode = XGE_HAL_PCIX_M1_133MHZ_MODE;
2357 case XGE_HAL_PCIX_M2_66MHZ_MODE:
2359 XGE_HAL_PCI_BUS_FREQUENCY_133MHZ;
2360 *pci_mode = XGE_HAL_PCIX_M2_66MHZ_MODE;
2362 case XGE_HAL_PCIX_M2_100MHZ_MODE:
2364 XGE_HAL_PCI_BUS_FREQUENCY_200MHZ;
2365 *pci_mode = XGE_HAL_PCIX_M2_100MHZ_MODE;
2367 case XGE_HAL_PCIX_M2_133MHZ_MODE:
2369 XGE_HAL_PCI_BUS_FREQUENCY_266MHZ;
2370 *pci_mode = XGE_HAL_PCIX_M2_133MHZ_MODE;
2372 case XGE_HAL_PCIX_M1_RESERVED:
2373 case XGE_HAL_PCIX_M1_66MHZ_NS:
2374 case XGE_HAL_PCIX_M1_100MHZ_NS:
2375 case XGE_HAL_PCIX_M1_133MHZ_NS:
2376 case XGE_HAL_PCIX_M2_RESERVED:
2377 case XGE_HAL_PCIX_533_RESERVED:
2379 rc_status = XGE_HAL_ERR_INVALID_PCI_INFO;
2380 xge_debug_device(XGE_ERR,
2381 "invalid pci info "XGE_OS_LLXFMT,
2382 (unsigned long long)pci_info);
2385 if (rc_status != XGE_HAL_ERR_INVALID_PCI_INFO)
2386 xge_debug_device(XGE_TRACE, "PCI info: mode %d width "
2387 "%d frequency %d", *pci_mode, *bus_width,
2389 if (hldev->config.pci_freq_mherz ==
2390 XGE_HAL_DEFAULT_USE_HARDCODE) {
2391 hldev->config.pci_freq_mherz = *bus_frequency;
2394 /* for XENA, we report PCI mode, only. PCI bus frequency, and bus width
2395 * are set to unknown */
2396 else if (card_id == XGE_HAL_CARD_XENA) {
2398 u8 dev_num, bus_num;
2399 /* initialize defaults for XENA */
2400 *bus_frequency = XGE_HAL_PCI_BUS_FREQUENCY_UNKNOWN;
2401 *bus_width = XGE_HAL_PCI_BUS_WIDTH_UNKNOWN;
2402 xge_os_pci_read32(hldev->pdev, hldev->cfgh,
2403 xge_offsetof(xge_hal_pci_config_le_t, pcix_status),
2405 dev_num = (u8)((pcix_status & 0xF8) >> 3);
2406 bus_num = (u8)((pcix_status & 0xFF00) >> 8);
2407 if (dev_num == 0 && bus_num == 0)
2408 *pci_mode = XGE_HAL_PCI_BASIC_MODE;
2410 *pci_mode = XGE_HAL_PCIX_BASIC_MODE;
2411 xge_debug_device(XGE_TRACE, "PCI info: mode %d", *pci_mode);
2412 if (hldev->config.pci_freq_mherz ==
2413 XGE_HAL_DEFAULT_USE_HARDCODE) {
2415 * There is no way to detect BUS frequency on Xena,
2416 * so, in case of automatic configuration we hopelessly
2419 hldev->config.pci_freq_mherz =
2420 XGE_HAL_PCI_BUS_FREQUENCY_133MHZ;
2422 } else if (card_id == XGE_HAL_CARD_TITAN) {
2423 *bus_width = XGE_HAL_PCI_BUS_WIDTH_64BIT;
2424 *bus_frequency = XGE_HAL_PCI_BUS_FREQUENCY_250MHZ;
2425 if (hldev->config.pci_freq_mherz ==
2426 XGE_HAL_DEFAULT_USE_HARDCODE) {
2427 hldev->config.pci_freq_mherz = *bus_frequency;
2430 rc_status = XGE_HAL_ERR_BAD_DEVICE_ID;
2431 xge_debug_device(XGE_ERR, "invalid device id %d", card_id);
2439 * __hal_device_handle_link_up_ind
2440 * @hldev: HAL device handle.
2442 * Link up indication handler. The function is invoked by HAL when
2443 * Xframe indicates that the link is up for programmable amount of time.
2446 __hal_device_handle_link_up_ind(xge_hal_device_t *hldev)
2448 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
2452 * If the previous link state is not down, return.
2454 if (hldev->link_state == XGE_HAL_LINK_UP) {
2455 #ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR
2456 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC){
2457 val64 = xge_os_pio_mem_read64(
2458 hldev->pdev, hldev->regh0,
2459 &bar0->misc_int_mask);
2460 val64 |= XGE_HAL_MISC_INT_REG_LINK_UP_INT;
2461 val64 &= ~XGE_HAL_MISC_INT_REG_LINK_DOWN_INT;
2462 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
2463 val64, &bar0->misc_int_mask);
2466 xge_debug_device(XGE_TRACE,
2467 "link up indication while link is up, ignoring..");
2471 /* Now re-enable it as due to noise, hardware turned it off */
2472 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2473 &bar0->adapter_control);
2474 val64 |= XGE_HAL_ADAPTER_CNTL_EN;
2475 val64 = val64 & (~XGE_HAL_ADAPTER_ECC_EN); /* ECC enable */
2476 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
2477 &bar0->adapter_control);
2479 /* Turn on the Laser */
2480 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2481 &bar0->adapter_control);
2482 val64 = val64|(XGE_HAL_ADAPTER_EOI_TX_ON |
2483 XGE_HAL_ADAPTER_LED_ON);
2484 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
2485 &bar0->adapter_control);
2487 #ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR
2488 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
2489 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2490 &bar0->adapter_status);
2491 if (val64 & (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT |
2492 XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT)) {
2493 xge_debug_device(XGE_TRACE, "%s",
2494 "fail to transition link to up...");
2499 * Mask the Link Up interrupt and unmask the Link Down
2502 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2503 &bar0->misc_int_mask);
2504 val64 |= XGE_HAL_MISC_INT_REG_LINK_UP_INT;
2505 val64 &= ~XGE_HAL_MISC_INT_REG_LINK_DOWN_INT;
2506 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
2507 &bar0->misc_int_mask);
2508 xge_debug_device(XGE_TRACE, "calling link up..");
2509 hldev->link_state = XGE_HAL_LINK_UP;
2512 if (g_xge_hal_driver->uld_callbacks.link_up) {
2513 g_xge_hal_driver->uld_callbacks.link_up(
2514 hldev->upper_layer_info);
2521 if (__hal_device_register_poll(hldev, &bar0->adapter_status, 0,
2522 (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT |
2523 XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT),
2524 XGE_HAL_DEVICE_FAULT_WAIT_MAX_MILLIS) == XGE_HAL_OK) {
2527 (void) xge_queue_produce_context(hldev->queueh,
2528 XGE_HAL_EVENT_LINK_IS_UP,
2530 /* link is up after been enabled */
2533 xge_debug_device(XGE_TRACE, "%s",
2534 "fail to transition link to up...");
2540 * __hal_device_handle_link_down_ind
2541 * @hldev: HAL device handle.
2543 * Link down indication handler. The function is invoked by HAL when
2544 * Xframe indicates that the link is down.
2547 __hal_device_handle_link_down_ind(xge_hal_device_t *hldev)
2549 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
2553 * If the previous link state is not up, return.
2555 if (hldev->link_state == XGE_HAL_LINK_DOWN) {
2556 #ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR
2557 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC){
2558 val64 = xge_os_pio_mem_read64(
2559 hldev->pdev, hldev->regh0,
2560 &bar0->misc_int_mask);
2561 val64 |= XGE_HAL_MISC_INT_REG_LINK_DOWN_INT;
2562 val64 &= ~XGE_HAL_MISC_INT_REG_LINK_UP_INT;
2563 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
2564 val64, &bar0->misc_int_mask);
2567 xge_debug_device(XGE_TRACE,
2568 "link down indication while link is down, ignoring..");
2573 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2574 &bar0->adapter_control);
2576 /* try to debounce the link only if the adapter is enabled. */
2577 if (val64 & XGE_HAL_ADAPTER_CNTL_EN) {
2578 if (__hal_device_register_poll(hldev, &bar0->adapter_status, 0,
2579 (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT |
2580 XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT),
2581 XGE_HAL_DEVICE_FAULT_WAIT_MAX_MILLIS) == XGE_HAL_OK) {
2582 xge_debug_device(XGE_TRACE,
2583 "link is actually up (possible noisy link?), ignoring.");
2588 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2589 &bar0->adapter_control);
2591 val64 = val64 & (~XGE_HAL_ADAPTER_LED_ON);
2592 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
2593 &bar0->adapter_control);
2595 #ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR
2596 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
2598 * Mask the Link Down interrupt and unmask the Link up
2601 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2602 &bar0->misc_int_mask);
2603 val64 |= XGE_HAL_MISC_INT_REG_LINK_DOWN_INT;
2604 val64 &= ~XGE_HAL_MISC_INT_REG_LINK_UP_INT;
2605 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
2606 &bar0->misc_int_mask);
2609 xge_debug_device(XGE_TRACE, "calling link down..");
2610 hldev->link_state = XGE_HAL_LINK_DOWN;
2613 if (g_xge_hal_driver->uld_callbacks.link_down) {
2614 g_xge_hal_driver->uld_callbacks.link_down(
2615 hldev->upper_layer_info);
2621 (void) xge_queue_produce_context(hldev->queueh,
2622 XGE_HAL_EVENT_LINK_IS_DOWN,
2628 * __hal_device_handle_link_state_change
2629 * @hldev: HAL device handle.
2631 * Link state change handler. The function is invoked by HAL when
2632 * Xframe indicates link state change condition. The code here makes sure to
2633 * 1) ignore redundant state change indications;
2634 * 2) execute link-up sequence, and handle the failure to bring the link up;
2635 * 3) generate XGE_HAL_LINK_UP/DOWN event for the subsequent handling by
2636 * upper-layer driver (ULD).
2639 __hal_device_handle_link_state_change(xge_hal_device_t *hldev)
2644 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
2648 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2649 &bar0->adapter_control);
2651 /* If the adapter is not enabled but the hal thinks we are in the up
2652 * state then transition to the down state.
2654 if ( !(val64 & XGE_HAL_ADAPTER_CNTL_EN) &&
2655 (hldev->link_state == XGE_HAL_LINK_UP) ) {
2656 return(__hal_device_handle_link_down_ind(hldev));
2661 (void) xge_hal_device_status(hldev, &hw_status);
2662 hw_link_state = (hw_status &
2663 (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT |
2664 XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT)) ?
2665 XGE_HAL_LINK_DOWN : XGE_HAL_LINK_UP;
2667 /* check if the current link state is still considered
2668 * to be changed. This way we will make sure that this is
2669 * not a noise which needs to be filtered out */
2670 if (hldev->link_state == hw_link_state)
2672 } while (i++ < hldev->config.link_valid_cnt);
2674 /* If the current link state is same as previous, just return */
2675 if (hldev->link_state == hw_link_state)
2677 /* detected state change */
2678 else if (hw_link_state == XGE_HAL_LINK_UP)
2679 retcode = __hal_device_handle_link_up_ind(hldev);
2681 retcode = __hal_device_handle_link_down_ind(hldev);
2689 __hal_device_handle_serr(xge_hal_device_t *hldev, char *reg, u64 value)
2691 hldev->stats.sw_dev_err_stats.serr_cnt++;
2692 if (hldev->config.dump_on_serr) {
2693 #ifdef XGE_HAL_USE_MGMT_AUX
2694 (void) xge_hal_aux_device_dump(hldev);
2698 (void) xge_queue_produce(hldev->queueh, XGE_HAL_EVENT_SERR, hldev,
2699 1, sizeof(u64), (void *)&value);
2701 xge_debug_device(XGE_ERR, "%s: read "XGE_OS_LLXFMT, reg,
2702 (unsigned long long) value);
2709 __hal_device_handle_eccerr(xge_hal_device_t *hldev, char *reg, u64 value)
2711 if (hldev->config.dump_on_eccerr) {
2712 #ifdef XGE_HAL_USE_MGMT_AUX
2713 (void) xge_hal_aux_device_dump(hldev);
2717 /* Herc smart enough to recover on its own! */
2718 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) {
2719 (void) xge_queue_produce(hldev->queueh,
2720 XGE_HAL_EVENT_ECCERR, hldev,
2721 1, sizeof(u64), (void *)&value);
2724 xge_debug_device(XGE_ERR, "%s: read "XGE_OS_LLXFMT, reg,
2725 (unsigned long long) value);
2732 __hal_device_handle_parityerr(xge_hal_device_t *hldev, char *reg, u64 value)
2734 if (hldev->config.dump_on_parityerr) {
2735 #ifdef XGE_HAL_USE_MGMT_AUX
2736 (void) xge_hal_aux_device_dump(hldev);
2739 (void) xge_queue_produce_context(hldev->queueh,
2740 XGE_HAL_EVENT_PARITYERR, hldev);
2742 xge_debug_device(XGE_ERR, "%s: read "XGE_OS_LLXFMT, reg,
2743 (unsigned long long) value);
2750 __hal_device_handle_targetabort(xge_hal_device_t *hldev)
2752 (void) xge_queue_produce_context(hldev->queueh,
2753 XGE_HAL_EVENT_TARGETABORT, hldev);
2758 * __hal_device_hw_initialize
2759 * @hldev: HAL device handle.
2761 * Initialize Xframe hardware.
2763 static xge_hal_status_e
2764 __hal_device_hw_initialize(xge_hal_device_t *hldev)
2766 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
2767 xge_hal_status_e status;
2770 /* Set proper endian settings and verify the same by reading the PIF
2771 * Feed-back register. */
2772 status = __hal_device_set_swapper(hldev);
2773 if (status != XGE_HAL_OK) {
2777 /* update the pci mode, frequency, and width */
2778 if (__hal_device_pci_info_get(hldev, &hldev->pci_mode,
2779 &hldev->bus_frequency, &hldev->bus_width) != XGE_HAL_OK){
2780 hldev->pci_mode = XGE_HAL_PCI_INVALID_MODE;
2781 hldev->bus_frequency = XGE_HAL_PCI_BUS_FREQUENCY_UNKNOWN;
2782 hldev->bus_width = XGE_HAL_PCI_BUS_WIDTH_UNKNOWN;
2784 * FIXME: this cannot happen.
2785 * But if it happens we cannot continue just like that
2787 xge_debug_device(XGE_ERR, "unable to get pci info");
2790 if ((hldev->pci_mode == XGE_HAL_PCI_33MHZ_MODE) ||
2791 (hldev->pci_mode == XGE_HAL_PCI_66MHZ_MODE) ||
2792 (hldev->pci_mode == XGE_HAL_PCI_BASIC_MODE)) {
2793 /* PCI optimization: set TxReqTimeOut
2794 * register (0x800+0x120) to 0x1ff or
2795 * something close to this.
2796 * Note: not to be used for PCI-X! */
2798 val64 = XGE_HAL_TXREQTO_VAL(0x1FF);
2799 val64 |= XGE_HAL_TXREQTO_EN;
2800 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
2801 &bar0->txreqtimeout);
2803 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0ULL,
2804 &bar0->read_retry_delay);
2806 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0ULL,
2807 &bar0->write_retry_delay);
2809 xge_debug_device(XGE_TRACE, "%s", "optimizing for PCI mode");
2812 if (hldev->bus_frequency == XGE_HAL_PCI_BUS_FREQUENCY_266MHZ ||
2813 hldev->bus_frequency == XGE_HAL_PCI_BUS_FREQUENCY_250MHZ) {
2815 /* Optimizing for PCI-X 266/250 */
2817 val64 = XGE_HAL_TXREQTO_VAL(0x7F);
2818 val64 |= XGE_HAL_TXREQTO_EN;
2819 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
2820 &bar0->txreqtimeout);
2822 xge_debug_device(XGE_TRACE, "%s", "optimizing for PCI-X 266/250 modes");
2825 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
2826 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0x4000000000000ULL,
2827 &bar0->read_retry_delay);
2829 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0x4000000000000ULL,
2830 &bar0->write_retry_delay);
2833 /* added this to set the no of bytes used to update lso_bytes_sent
2835 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2836 &bar0->pic_control_2);
2837 val64 &= ~XGE_HAL_TXD_WRITE_BC(0x2);
2838 val64 |= XGE_HAL_TXD_WRITE_BC(0x4);
2839 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
2840 &bar0->pic_control_2);
2841 /* added this to clear the EOI_RESET field while leaving XGXS_RESET
2842 * in reset, then a 1-second delay */
2843 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
2844 XGE_HAL_SW_RESET_XGXS, &bar0->sw_reset);
2845 xge_os_mdelay(1000);
2847 /* Clear the XGXS_RESET field of the SW_RESET register in order to
2848 * release the XGXS from reset. Its reset value is 0xA5; write 0x00
2849 * to activate the XGXS. The core requires a minimum 500 us reset.*/
2850 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0, &bar0->sw_reset);
2851 (void) xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2855 /* read registers in all blocks */
2856 (void) xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2857 &bar0->mac_int_mask);
2858 (void) xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2859 &bar0->mc_int_mask);
2860 (void) xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2861 &bar0->xgxs_int_mask);
2863 /* set default MTU and steer based on length*/
2864 __hal_ring_mtu_set(hldev, hldev->config.mtu+22); // Alway set 22 bytes extra for steering to work
2866 if (hldev->config.mac.rmac_bcast_en) {
2867 xge_hal_device_bcast_enable(hldev);
2869 xge_hal_device_bcast_disable(hldev);
2872 #ifndef XGE_HAL_HERC_EMULATION
2873 __hal_device_xaui_configure(hldev);
2875 __hal_device_mac_link_util_set(hldev);
2877 __hal_device_mac_link_util_set(hldev);
2880 * Keep its PCI REQ# line asserted during a write
2881 * transaction up to the end of the transaction
2883 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2884 &bar0->misc_control);
2886 val64 |= XGE_HAL_MISC_CONTROL_EXT_REQ_EN;
2888 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
2889 val64, &bar0->misc_control);
2891 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
2892 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2893 &bar0->misc_control);
2895 val64 |= XGE_HAL_MISC_CONTROL_LINK_FAULT;
2897 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
2898 val64, &bar0->misc_control);
2902 * bimodal interrupts is when all Rx traffic interrupts
2903 * will go to TTI, so we need to adjust RTI settings and
2904 * use adaptive TTI timer. We need to make sure RTI is
2905 * properly configured to sane value which will not
2906 * distrupt bimodal behavior.
2908 if (hldev->config.bimodal_interrupts) {
2911 /* force polling_cnt to be "0", otherwise
2912 * IRQ workload statistics will be screwed. This could
2913 * be worked out in TXPIC handler later. */
2914 hldev->config.isr_polling_cnt = 0;
2915 hldev->config.sched_timer_us = 10000;
2917 /* disable all TTI < 56 */
2918 for (i=0; i<XGE_HAL_MAX_FIFO_NUM; i++) {
2920 if (!hldev->config.fifo.queue[i].configured)
2922 for (j=0; j<XGE_HAL_MAX_FIFO_TTI_NUM; j++) {
2923 if (hldev->config.fifo.queue[i].tti[j].enabled)
2924 hldev->config.fifo.queue[i].tti[j].enabled = 0;
2928 /* now configure bimodal interrupts */
2929 __hal_device_bimodal_configure(hldev);
2932 status = __hal_device_tti_configure(hldev, 0);
2933 if (status != XGE_HAL_OK)
2936 status = __hal_device_rti_configure(hldev, 0);
2937 if (status != XGE_HAL_OK)
2940 status = __hal_device_rth_it_configure(hldev);
2941 if (status != XGE_HAL_OK)
2944 status = __hal_device_rth_spdm_configure(hldev);
2945 if (status != XGE_HAL_OK)
2948 status = __hal_device_rts_mac_configure(hldev);
2949 if (status != XGE_HAL_OK) {
2950 xge_debug_device(XGE_ERR, "__hal_device_rts_mac_configure Failed ");
2954 status = __hal_device_rts_port_configure(hldev);
2955 if (status != XGE_HAL_OK) {
2956 xge_debug_device(XGE_ERR, "__hal_device_rts_port_configure Failed ");
2960 status = __hal_device_rts_qos_configure(hldev);
2961 if (status != XGE_HAL_OK) {
2962 xge_debug_device(XGE_ERR, "__hal_device_rts_qos_configure Failed ");
2966 __hal_device_pause_frames_configure(hldev);
2967 __hal_device_rmac_padding_configure(hldev);
2968 __hal_device_shared_splits_configure(hldev);
2970 /* make sure all interrupts going to be disabled at the moment */
2971 __hal_device_intr_mgmt(hldev, XGE_HAL_ALL_INTRS, 0);
2973 /* SXE-008 Transmit DMA arbitration issue */
2974 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA &&
2975 hldev->revision < 4) {
2976 xge_os_pio_mem_write64(hldev->pdev,hldev->regh0,
2977 XGE_HAL_ADAPTER_PCC_ENABLE_FOUR,
2980 #if 0 // Removing temporarily as FreeBSD is seeing lower performance
2981 // attributable to this fix.
2983 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
2984 /* Turn off the ECC error reporting for RLDRAM interface */
2985 if ((status = xge_hal_fix_rldram_ecc_error(hldev)) != XGE_HAL_OK)
2989 __hal_fifo_hw_initialize(hldev);
2990 __hal_ring_hw_initialize(hldev);
2992 if (__hal_device_wait_quiescent(hldev, &val64)) {
2993 return XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT;
2996 if (__hal_device_register_poll(hldev, &bar0->adapter_status, 1,
2997 XGE_HAL_ADAPTER_STATUS_RC_PRC_QUIESCENT,
2998 XGE_HAL_DEVICE_QUIESCENT_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
2999 xge_debug_device(XGE_TRACE, "%s", "PRC is not QUIESCENT!");
3000 return XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT;
3003 xge_debug_device(XGE_TRACE, "device 0x"XGE_OS_LLXFMT" is quiescent",
3004 (unsigned long long)(ulong_t)hldev);
3006 if (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX ||
3007 hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSI) {
3009 * If MSI is enabled, ensure that One Shot for MSI in PCI_CTRL
3012 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3013 &bar0->pic_control);
3014 val64 &= ~(XGE_HAL_PIC_CNTL_ONE_SHOT_TINT);
3015 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
3016 &bar0->pic_control);
3019 hldev->hw_is_initialized = 1;
3020 hldev->terminating = 0;
3025 * __hal_device_reset - Reset device only.
3026 * @hldev: HAL device handle.
3028 * Reset the device, and subsequently restore
3029 * the previously saved PCI configuration space.
3031 #define XGE_HAL_MAX_PCI_CONFIG_SPACE_REINIT 50
3032 static xge_hal_status_e
3033 __hal_device_reset(xge_hal_device_t *hldev)
3035 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
3036 int i, j, swap_done, pcisize = 0;
3037 u64 val64, rawval = 0ULL;
3039 if (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX) {
3040 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
3041 if ( hldev->bar2 ) {
3042 u64 *msix_vetor_table = (u64 *)hldev->bar2;
3044 // 2 64bit words for each entry
3045 for (i = 0; i < XGE_HAL_MAX_MSIX_MESSAGES * 2;
3047 hldev->msix_vector_table[i] =
3048 xge_os_pio_mem_read64(hldev->pdev,
3049 hldev->regh2, &msix_vetor_table[i]);
3054 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3055 &bar0->pif_rd_swapper_fb);
3056 swap_done = (val64 == XGE_HAL_IF_RD_SWAPPER_FB);
3059 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0,
3060 (u32)(XGE_HAL_SW_RESET_ALL>>32), (char *)&bar0->sw_reset);
3062 u32 val = (u32)(XGE_HAL_SW_RESET_ALL >> 32);
3063 #if defined(XGE_OS_HOST_LITTLE_ENDIAN) || defined(XGE_OS_PIO_LITTLE_ENDIAN)
3065 val = (((val & (u32)0x000000ffUL) << 24) |
3066 ((val & (u32)0x0000ff00UL) << 8) |
3067 ((val & (u32)0x00ff0000UL) >> 8) |
3068 ((val & (u32)0xff000000UL) >> 24));
3070 xge_os_pio_mem_write32(hldev->pdev, hldev->regh0, val,
3074 pcisize = (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)?
3075 XGE_HAL_PCISIZE_HERC : XGE_HAL_PCISIZE_XENA;
3077 xge_os_mdelay(20); /* Wait for 20 ms after reset */
3080 /* Poll for no more than 1 second */
3081 for (i = 0; i < XGE_HAL_MAX_PCI_CONFIG_SPACE_REINIT; i++)
3083 for (j = 0; j < pcisize; j++) {
3084 xge_os_pci_write32(hldev->pdev, hldev->cfgh, j * 4,
3085 *((u32*)&hldev->pci_config_space + j));
3088 xge_os_pci_read16(hldev->pdev,hldev->cfgh,
3089 xge_offsetof(xge_hal_pci_config_le_t, device_id),
3092 if (xge_hal_device_check_id(hldev) != XGE_HAL_CARD_UNKNOWN)
3098 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_UNKNOWN)
3100 xge_debug_device(XGE_ERR, "device reset failed");
3101 return XGE_HAL_ERR_RESET_FAILED;
3104 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
3107 rawval = XGE_HAL_SW_RESET_RAW_VAL_HERC;
3108 pcisize = XGE_HAL_PCISIZE_HERC;
3111 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3113 if (val64 != rawval) {
3117 xge_os_mdelay(1); /* Wait for 1ms before retry */
3119 } else if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) {
3120 rawval = XGE_HAL_SW_RESET_RAW_VAL_XENA;
3121 pcisize = XGE_HAL_PCISIZE_XENA;
3122 xge_os_mdelay(XGE_HAL_DEVICE_RESET_WAIT_MAX_MILLIS);
3125 /* Restore MSI-X vector table */
3126 if (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX) {
3127 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
3128 if ( hldev->bar2 ) {
3130 * 94: MSIXTable 00000004 ( BIR:4 Offset:0x0 )
3131 * 98: PBATable 00000404 ( BIR:4 Offset:0x400 )
3133 u64 *msix_vetor_table = (u64 *)hldev->bar2;
3135 /* 2 64bit words for each entry */
3136 for (i = 0; i < XGE_HAL_MAX_MSIX_MESSAGES * 2;
3138 xge_os_pio_mem_write64(hldev->pdev,
3140 hldev->msix_vector_table[i],
3141 &msix_vetor_table[i]);
3147 hldev->link_state = XGE_HAL_LINK_DOWN;
3148 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3151 if (val64 != rawval) {
3152 xge_debug_device(XGE_ERR, "device has not been reset "
3153 "got 0x"XGE_OS_LLXFMT", expected 0x"XGE_OS_LLXFMT,
3154 (unsigned long long)val64, (unsigned long long)rawval);
3155 return XGE_HAL_ERR_RESET_FAILED;
3158 hldev->hw_is_initialized = 0;
3163 * __hal_device_poll - General private routine to poll the device.
3164 * @hldev: HAL device handle.
3166 * Returns: one of the xge_hal_status_e{} enumerated types.
3167 * XGE_HAL_OK - for success.
3168 * XGE_HAL_ERR_CRITICAL - when encounters critical error.
3170 static xge_hal_status_e
3171 __hal_device_poll(xge_hal_device_t *hldev)
3173 xge_hal_pci_bar0_t *bar0;
3176 bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
3178 /* Handling SERR errors by forcing a H/W reset. */
3179 err_reg = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3180 &bar0->serr_source);
3181 if (err_reg & XGE_HAL_SERR_SOURCE_ANY) {
3182 __hal_device_handle_serr(hldev, "serr_source", err_reg);
3183 return XGE_HAL_ERR_CRITICAL;
3186 err_reg = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3187 &bar0->misc_int_reg);
3189 if (err_reg & XGE_HAL_MISC_INT_REG_DP_ERR_INT) {
3190 hldev->stats.sw_dev_err_stats.parity_err_cnt++;
3191 __hal_device_handle_parityerr(hldev, "misc_int_reg", err_reg);
3192 return XGE_HAL_ERR_CRITICAL;
3195 #ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR
3196 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA)
3200 /* Handling link status change error Intr */
3201 err_reg = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3202 &bar0->mac_rmac_err_reg);
3203 if (__hal_device_handle_link_state_change(hldev))
3204 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3205 err_reg, &bar0->mac_rmac_err_reg);
3208 if (hldev->inject_serr != 0) {
3209 err_reg = hldev->inject_serr;
3210 hldev->inject_serr = 0;
3211 __hal_device_handle_serr(hldev, "inject_serr", err_reg);
3212 return XGE_HAL_ERR_CRITICAL;
3215 if (hldev->inject_ecc != 0) {
3216 err_reg = hldev->inject_ecc;
3217 hldev->inject_ecc = 0;
3218 hldev->stats.sw_dev_err_stats.ecc_err_cnt++;
3219 __hal_device_handle_eccerr(hldev, "inject_ecc", err_reg);
3220 return XGE_HAL_ERR_CRITICAL;
3223 if (hldev->inject_bad_tcode != 0) {
3224 u8 t_code = hldev->inject_bad_tcode;
3225 xge_hal_channel_t channel;
3226 xge_hal_fifo_txd_t txd;
3227 xge_hal_ring_rxd_1_t rxd;
3229 channel.devh = hldev;
3231 if (hldev->inject_bad_tcode_for_chan_type ==
3232 XGE_HAL_CHANNEL_TYPE_FIFO) {
3233 channel.type = XGE_HAL_CHANNEL_TYPE_FIFO;
3236 channel.type = XGE_HAL_CHANNEL_TYPE_RING;
3239 hldev->inject_bad_tcode = 0;
3241 if (channel.type == XGE_HAL_CHANNEL_TYPE_FIFO)
3242 return xge_hal_device_handle_tcode(&channel, &txd,
3245 return xge_hal_device_handle_tcode(&channel, &rxd,
3253 * __hal_verify_pcc_idle - Verify All Enbled PCC are IDLE or not
3254 * @hldev: HAL device handle.
3255 * @adp_status: Adapter Status value
3256 * Usage: See xge_hal_device_enable{}.
3259 __hal_verify_pcc_idle(xge_hal_device_t *hldev, u64 adp_status)
3261 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA &&
3262 hldev->revision < 4) {
3264 * For Xena 1,2,3 we enable only 4 PCCs Due to
3265 * SXE-008 (Transmit DMA arbitration issue)
3267 if ((adp_status & XGE_HAL_ADAPTER_STATUS_RMAC_PCC_4_IDLE)
3268 != XGE_HAL_ADAPTER_STATUS_RMAC_PCC_4_IDLE) {
3269 xge_debug_device(XGE_TRACE, "%s",
3270 "PCC is not IDLE after adapter enabled!");
3271 return XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT;
3274 if ((adp_status & XGE_HAL_ADAPTER_STATUS_RMAC_PCC_IDLE) !=
3275 XGE_HAL_ADAPTER_STATUS_RMAC_PCC_IDLE) {
3276 xge_debug_device(XGE_TRACE, "%s",
3277 "PCC is not IDLE after adapter enabled!");
3278 return XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT;
3285 __hal_update_bimodal(xge_hal_device_t *hldev, int ring_no)
3287 int tval, d, iwl_avg, len_avg, bytes_avg, bytes_hist, d_hist;
3288 int iwl_rxcnt, iwl_txcnt, iwl_txavg, len_rxavg, iwl_rxavg, len_txavg;
3291 #define _HIST_SIZE 50 /* 0.5 sec history */
3292 #define _HIST_ADJ_TIMER 1
3295 static int bytes_avg_history[_HIST_SIZE] = {0};
3296 static int d_avg_history[_HIST_SIZE] = {0};
3297 static int history_idx = 0;
3298 static int pstep = 1;
3299 static int hist_adj_timer = 0;
3302 * tval - current value of this bimodal timer
3304 tval = hldev->bimodal_tti[ring_no].timer_val_us;
3307 * d - how many interrupts we were getting since last
3308 * bimodal timer tick.
3310 d = hldev->stats.sw_dev_info_stats.tx_traffic_intr_cnt -
3311 hldev->bimodal_intr_cnt;
3313 /* advance bimodal interrupt counter */
3314 hldev->bimodal_intr_cnt =
3315 hldev->stats.sw_dev_info_stats.tx_traffic_intr_cnt;
3318 * iwl_cnt - how many interrupts we've got since last
3319 * bimodal timer tick.
3321 iwl_rxcnt = (hldev->irq_workload_rxcnt[ring_no] ?
3322 hldev->irq_workload_rxcnt[ring_no] : 1);
3323 iwl_txcnt = (hldev->irq_workload_txcnt[ring_no] ?
3324 hldev->irq_workload_txcnt[ring_no] : 1);
3325 iwl_cnt = iwl_rxcnt + iwl_txcnt;
3328 * we need to take hldev->config.isr_polling_cnt into account
3329 * but for some reason this line causing GCC to produce wrong
3330 * code on Solaris. As of now, if bimodal_interrupts is configured
3331 * hldev->config.isr_polling_cnt is forced to be "0".
3333 * iwl_cnt = iwl_cnt / (hldev->config.isr_polling_cnt + 1); */
3336 * iwl_avg - how many RXDs on avarage been processed since
3337 * last bimodal timer tick. This indirectly includes
3340 iwl_rxavg = hldev->irq_workload_rxd[ring_no] / iwl_rxcnt;
3341 iwl_txavg = hldev->irq_workload_txd[ring_no] / iwl_txcnt;
3342 iwl_avg = iwl_rxavg + iwl_txavg;
3343 iwl_avg = iwl_avg == 0 ? 1 : iwl_avg;
3346 * len_avg - how many bytes on avarage been processed since
3347 * last bimodal timer tick. i.e. avarage frame size.
3349 len_rxavg = 1 + hldev->irq_workload_rxlen[ring_no] /
3350 (hldev->irq_workload_rxd[ring_no] ?
3351 hldev->irq_workload_rxd[ring_no] : 1);
3352 len_txavg = 1 + hldev->irq_workload_txlen[ring_no] /
3353 (hldev->irq_workload_txd[ring_no] ?
3354 hldev->irq_workload_txd[ring_no] : 1);
3355 len_avg = len_rxavg + len_txavg;
3359 /* align on low boundary */
3360 if ((tval -_STEP) < hldev->config.bimodal_timer_lo_us)
3361 tval = hldev->config.bimodal_timer_lo_us;
3365 tval = hldev->config.bimodal_timer_lo_us;
3367 for (i = 0; i < _HIST_SIZE; i++)
3368 bytes_avg_history[i] = d_avg_history[i] = 0;
3374 /* always try to ajust timer to the best throughput value */
3375 bytes_avg = iwl_avg * len_avg;
3376 history_idx %= _HIST_SIZE;
3377 bytes_avg_history[history_idx] = bytes_avg;
3378 d_avg_history[history_idx] = d;
3380 d_hist = bytes_hist = 0;
3381 for (i = 0; i < _HIST_SIZE; i++) {
3382 /* do not re-configure until history is gathered */
3383 if (!bytes_avg_history[i]) {
3384 tval = hldev->config.bimodal_timer_lo_us;
3387 bytes_hist += bytes_avg_history[i];
3388 d_hist += d_avg_history[i];
3390 bytes_hist /= _HIST_SIZE;
3391 d_hist /= _HIST_SIZE;
3393 // xge_os_printf("d %d iwl_avg %d len_avg %d:%d:%d tval %d avg %d hist %d pstep %d",
3394 // d, iwl_avg, len_txavg, len_rxavg, len_avg, tval, d*bytes_avg,
3395 // d_hist*bytes_hist, pstep);
3397 /* make an adaptive step */
3398 if (d * bytes_avg < d_hist * bytes_hist && hist_adj_timer++ > _HIST_ADJ_TIMER) {
3404 (tval + _STEP) <= hldev->config.bimodal_timer_hi_us) {
3406 hldev->stats.sw_dev_info_stats.bimodal_hi_adjust_cnt++;
3407 } else if ((tval - _STEP) >= hldev->config.bimodal_timer_lo_us) {
3409 hldev->stats.sw_dev_info_stats.bimodal_lo_adjust_cnt++;
3412 /* enable TTI range A for better latencies */
3413 hldev->bimodal_urange_a_en = 0;
3414 if (tval <= hldev->config.bimodal_timer_lo_us && iwl_avg > 2)
3415 hldev->bimodal_urange_a_en = 1;
3418 /* reset workload statistics counters */
3419 hldev->irq_workload_rxcnt[ring_no] = 0;
3420 hldev->irq_workload_rxd[ring_no] = 0;
3421 hldev->irq_workload_rxlen[ring_no] = 0;
3422 hldev->irq_workload_txcnt[ring_no] = 0;
3423 hldev->irq_workload_txd[ring_no] = 0;
3424 hldev->irq_workload_txlen[ring_no] = 0;
3426 /* reconfigure TTI56 + ring_no with new timer value */
3427 hldev->bimodal_timer_val_us = tval;
3428 (void) __hal_device_rti_configure(hldev, 1);
3432 __hal_update_rxufca(xge_hal_device_t *hldev, int ring_no)
3436 ufc = hldev->config.ring.queue[ring_no].rti.ufc_a;
3437 ic = hldev->stats.sw_dev_info_stats.rx_traffic_intr_cnt;
3439 /* urange_a adaptive coalescing */
3440 if (hldev->rxufca_lbolt > hldev->rxufca_lbolt_time) {
3441 if (ic > hldev->rxufca_intr_thres) {
3442 if (ufc < hldev->config.rxufca_hi_lim) {
3444 for (i=0; i<XGE_HAL_MAX_RING_NUM; i++)
3445 hldev->config.ring.queue[i].rti.ufc_a = ufc;
3446 (void) __hal_device_rti_configure(hldev, 1);
3447 hldev->stats.sw_dev_info_stats.
3448 rxufca_hi_adjust_cnt++;
3450 hldev->rxufca_intr_thres = ic +
3451 hldev->config.rxufca_intr_thres; /* def: 30 */
3453 if (ufc > hldev->config.rxufca_lo_lim) {
3455 for (i=0; i<XGE_HAL_MAX_RING_NUM; i++)
3456 hldev->config.ring.queue[i].rti.ufc_a = ufc;
3457 (void) __hal_device_rti_configure(hldev, 1);
3458 hldev->stats.sw_dev_info_stats.
3459 rxufca_lo_adjust_cnt++;
3462 hldev->rxufca_lbolt_time = hldev->rxufca_lbolt +
3463 hldev->config.rxufca_lbolt_period;
3465 hldev->rxufca_lbolt++;
3469 * __hal_device_handle_mc - Handle MC interrupt reason
3470 * @hldev: HAL device handle.
3471 * @reason: interrupt reason
3474 __hal_device_handle_mc(xge_hal_device_t *hldev, u64 reason)
3476 xge_hal_pci_bar0_t *isrbar0 =
3477 (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0;
3480 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3481 &isrbar0->mc_int_status);
3482 if (!(val64 & XGE_HAL_MC_INT_STATUS_MC_INT))
3485 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3486 &isrbar0->mc_err_reg);
3487 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3488 val64, &isrbar0->mc_err_reg);
3490 if (val64 & XGE_HAL_MC_ERR_REG_ETQ_ECC_SG_ERR_L ||
3491 val64 & XGE_HAL_MC_ERR_REG_ETQ_ECC_SG_ERR_U ||
3492 val64 & XGE_HAL_MC_ERR_REG_MIRI_ECC_SG_ERR_0 ||
3493 val64 & XGE_HAL_MC_ERR_REG_MIRI_ECC_SG_ERR_1 ||
3494 (xge_hal_device_check_id(hldev) != XGE_HAL_CARD_XENA &&
3495 (val64 & XGE_HAL_MC_ERR_REG_ITQ_ECC_SG_ERR_L ||
3496 val64 & XGE_HAL_MC_ERR_REG_ITQ_ECC_SG_ERR_U ||
3497 val64 & XGE_HAL_MC_ERR_REG_RLD_ECC_SG_ERR_L ||
3498 val64 & XGE_HAL_MC_ERR_REG_RLD_ECC_SG_ERR_U))) {
3499 hldev->stats.sw_dev_err_stats.single_ecc_err_cnt++;
3500 hldev->stats.sw_dev_err_stats.ecc_err_cnt++;
3503 if (val64 & XGE_HAL_MC_ERR_REG_ETQ_ECC_DB_ERR_L ||
3504 val64 & XGE_HAL_MC_ERR_REG_ETQ_ECC_DB_ERR_U ||
3505 val64 & XGE_HAL_MC_ERR_REG_MIRI_ECC_DB_ERR_0 ||
3506 val64 & XGE_HAL_MC_ERR_REG_MIRI_ECC_DB_ERR_1 ||
3507 (xge_hal_device_check_id(hldev) != XGE_HAL_CARD_XENA &&
3508 (val64 & XGE_HAL_MC_ERR_REG_ITQ_ECC_DB_ERR_L ||
3509 val64 & XGE_HAL_MC_ERR_REG_ITQ_ECC_DB_ERR_U ||
3510 val64 & XGE_HAL_MC_ERR_REG_RLD_ECC_DB_ERR_L ||
3511 val64 & XGE_HAL_MC_ERR_REG_RLD_ECC_DB_ERR_U))) {
3512 hldev->stats.sw_dev_err_stats.double_ecc_err_cnt++;
3513 hldev->stats.sw_dev_err_stats.ecc_err_cnt++;
3516 if (val64 & XGE_HAL_MC_ERR_REG_SM_ERR) {
3517 hldev->stats.sw_dev_err_stats.sm_err_cnt++;
3520 /* those two should result in device reset */
3521 if (val64 & XGE_HAL_MC_ERR_REG_MIRI_ECC_DB_ERR_0 ||
3522 val64 & XGE_HAL_MC_ERR_REG_MIRI_ECC_DB_ERR_1) {
3523 __hal_device_handle_eccerr(hldev, "mc_err_reg", val64);
3524 return XGE_HAL_ERR_CRITICAL;
3531 * __hal_device_handle_pic - Handle non-traffic PIC interrupt reason
3532 * @hldev: HAL device handle.
3533 * @reason: interrupt reason
3536 __hal_device_handle_pic(xge_hal_device_t *hldev, u64 reason)
3538 xge_hal_pci_bar0_t *isrbar0 =
3539 (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0;
3542 if (reason & XGE_HAL_PIC_INT_FLSH) {
3543 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3544 &isrbar0->flsh_int_reg);
3545 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3546 val64, &isrbar0->flsh_int_reg);
3547 /* FIXME: handle register */
3549 if (reason & XGE_HAL_PIC_INT_MDIO) {
3550 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3551 &isrbar0->mdio_int_reg);
3552 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3553 val64, &isrbar0->mdio_int_reg);
3554 /* FIXME: handle register */
3556 if (reason & XGE_HAL_PIC_INT_IIC) {
3557 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3558 &isrbar0->iic_int_reg);
3559 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3560 val64, &isrbar0->iic_int_reg);
3561 /* FIXME: handle register */
3563 if (reason & XGE_HAL_PIC_INT_MISC) {
3564 val64 = xge_os_pio_mem_read64(hldev->pdev,
3565 hldev->regh0, &isrbar0->misc_int_reg);
3566 #ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR
3567 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
3568 /* Check for Link interrupts. If both Link Up/Down
3569 * bits are set, clear both and check adapter status
3571 if ((val64 & XGE_HAL_MISC_INT_REG_LINK_UP_INT) &&
3572 (val64 & XGE_HAL_MISC_INT_REG_LINK_DOWN_INT)) {
3575 xge_debug_device(XGE_TRACE,
3576 "both link up and link down detected "XGE_OS_LLXFMT,
3577 (unsigned long long)val64);
3579 temp64 = (XGE_HAL_MISC_INT_REG_LINK_DOWN_INT |
3580 XGE_HAL_MISC_INT_REG_LINK_UP_INT);
3581 xge_os_pio_mem_write64(hldev->pdev,
3582 hldev->regh0, temp64,
3583 &isrbar0->misc_int_reg);
3585 else if (val64 & XGE_HAL_MISC_INT_REG_LINK_UP_INT) {
3586 xge_debug_device(XGE_TRACE,
3587 "link up call request, misc_int "XGE_OS_LLXFMT,
3588 (unsigned long long)val64);
3589 __hal_device_handle_link_up_ind(hldev);
3591 else if (val64 & XGE_HAL_MISC_INT_REG_LINK_DOWN_INT){
3592 xge_debug_device(XGE_TRACE,
3593 "link down request, misc_int "XGE_OS_LLXFMT,
3594 (unsigned long long)val64);
3595 __hal_device_handle_link_down_ind(hldev);
3600 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3601 val64, &isrbar0->misc_int_reg);
3609 * __hal_device_handle_txpic - Handle TxPIC interrupt reason
3610 * @hldev: HAL device handle.
3611 * @reason: interrupt reason
3614 __hal_device_handle_txpic(xge_hal_device_t *hldev, u64 reason)
3616 xge_hal_status_e status = XGE_HAL_OK;
3617 xge_hal_pci_bar0_t *isrbar0 =
3618 (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0;
3621 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3622 &isrbar0->pic_int_status);
3623 if ( val64 & (XGE_HAL_PIC_INT_FLSH |
3624 XGE_HAL_PIC_INT_MDIO |
3625 XGE_HAL_PIC_INT_IIC |
3626 XGE_HAL_PIC_INT_MISC) ) {
3627 status = __hal_device_handle_pic(hldev, val64);
3631 if (!(val64 & XGE_HAL_PIC_INT_TX))
3634 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3635 &isrbar0->txpic_int_reg);
3636 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3637 val64, &isrbar0->txpic_int_reg);
3640 if (val64 & XGE_HAL_TXPIC_INT_SCHED_INTR) {
3643 if (g_xge_hal_driver->uld_callbacks.sched_timer != NULL)
3644 g_xge_hal_driver->uld_callbacks.sched_timer(
3645 hldev, hldev->upper_layer_info);
3647 * This feature implements adaptive receive interrupt
3648 * coalecing. It is disabled by default. To enable it
3649 * set hldev->config.rxufca_lo_lim to be not equal to
3650 * hldev->config.rxufca_hi_lim.
3652 * We are using HW timer for this feature, so
3653 * use needs to configure hldev->config.rxufca_lbolt_period
3654 * which is essentially a time slice of timer.
3656 * For those who familiar with Linux, lbolt means jiffies
3657 * of this timer. I.e. timer tick.
3659 if (hldev->config.rxufca_lo_lim !=
3660 hldev->config.rxufca_hi_lim &&
3661 hldev->config.rxufca_lo_lim != 0) {
3662 for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
3663 if (!hldev->config.ring.queue[i].configured)
3665 if (hldev->config.ring.queue[i].rti.urange_a)
3666 __hal_update_rxufca(hldev, i);
3671 * This feature implements adaptive TTI timer re-calculation
3672 * based on host utilization, number of interrupt processed,
3673 * number of RXD per tick and avarage length of packets per
3676 if (hldev->config.bimodal_interrupts) {
3677 for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
3678 if (!hldev->config.ring.queue[i].configured)
3680 if (hldev->bimodal_tti[i].enabled)
3681 __hal_update_bimodal(hldev, i);
3690 * __hal_device_handle_txdma - Handle TxDMA interrupt reason
3691 * @hldev: HAL device handle.
3692 * @reason: interrupt reason
3695 __hal_device_handle_txdma(xge_hal_device_t *hldev, u64 reason)
3697 xge_hal_pci_bar0_t *isrbar0 =
3698 (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0;
3699 u64 val64, temp64, err;
3701 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3702 &isrbar0->txdma_int_status);
3703 if (val64 & XGE_HAL_TXDMA_PFC_INT) {
3704 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3705 &isrbar0->pfc_err_reg);
3706 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3707 err, &isrbar0->pfc_err_reg);
3708 hldev->stats.sw_dev_info_stats.pfc_err_cnt++;
3709 temp64 = XGE_HAL_PFC_ECC_DB_ERR|XGE_HAL_PFC_SM_ERR_ALARM
3710 |XGE_HAL_PFC_MISC_0_ERR|XGE_HAL_PFC_MISC_1_ERR
3711 |XGE_HAL_PFC_PCIX_ERR;
3715 if (val64 & XGE_HAL_TXDMA_TDA_INT) {
3716 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3717 &isrbar0->tda_err_reg);
3718 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3719 err, &isrbar0->tda_err_reg);
3720 hldev->stats.sw_dev_info_stats.tda_err_cnt++;
3721 temp64 = XGE_HAL_TDA_Fn_ECC_DB_ERR|XGE_HAL_TDA_SM0_ERR_ALARM
3722 |XGE_HAL_TDA_SM1_ERR_ALARM;
3726 if (val64 & XGE_HAL_TXDMA_PCC_INT) {
3727 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3728 &isrbar0->pcc_err_reg);
3729 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3730 err, &isrbar0->pcc_err_reg);
3731 hldev->stats.sw_dev_info_stats.pcc_err_cnt++;
3732 temp64 = XGE_HAL_PCC_FB_ECC_DB_ERR|XGE_HAL_PCC_TXB_ECC_DB_ERR
3733 |XGE_HAL_PCC_SM_ERR_ALARM|XGE_HAL_PCC_WR_ERR_ALARM
3734 |XGE_HAL_PCC_N_SERR|XGE_HAL_PCC_6_COF_OV_ERR
3735 |XGE_HAL_PCC_7_COF_OV_ERR|XGE_HAL_PCC_6_LSO_OV_ERR
3736 |XGE_HAL_PCC_7_LSO_OV_ERR;
3740 if (val64 & XGE_HAL_TXDMA_TTI_INT) {
3741 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3742 &isrbar0->tti_err_reg);
3743 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3744 err, &isrbar0->tti_err_reg);
3745 hldev->stats.sw_dev_info_stats.tti_err_cnt++;
3746 temp64 = XGE_HAL_TTI_SM_ERR_ALARM;
3750 if (val64 & XGE_HAL_TXDMA_LSO_INT) {
3751 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3752 &isrbar0->lso_err_reg);
3753 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3754 err, &isrbar0->lso_err_reg);
3755 hldev->stats.sw_dev_info_stats.lso_err_cnt++;
3756 temp64 = XGE_HAL_LSO6_ABORT|XGE_HAL_LSO7_ABORT
3757 |XGE_HAL_LSO6_SM_ERR_ALARM|XGE_HAL_LSO7_SM_ERR_ALARM;
3761 if (val64 & XGE_HAL_TXDMA_TPA_INT) {
3762 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3763 &isrbar0->tpa_err_reg);
3764 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3765 err, &isrbar0->tpa_err_reg);
3766 hldev->stats.sw_dev_info_stats.tpa_err_cnt++;
3767 temp64 = XGE_HAL_TPA_SM_ERR_ALARM;
3771 if (val64 & XGE_HAL_TXDMA_SM_INT) {
3772 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3773 &isrbar0->sm_err_reg);
3774 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3775 err, &isrbar0->sm_err_reg);
3776 hldev->stats.sw_dev_info_stats.sm_err_cnt++;
3777 temp64 = XGE_HAL_SM_SM_ERR_ALARM;
3784 reset : xge_hal_device_reset(hldev);
3785 xge_hal_device_enable(hldev);
3786 xge_hal_device_intr_enable(hldev);
3791 * __hal_device_handle_txmac - Handle TxMAC interrupt reason
3792 * @hldev: HAL device handle.
3793 * @reason: interrupt reason
3796 __hal_device_handle_txmac(xge_hal_device_t *hldev, u64 reason)
3798 xge_hal_pci_bar0_t *isrbar0 =
3799 (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0;
3802 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3803 &isrbar0->mac_int_status);
3804 if (!(val64 & XGE_HAL_MAC_INT_STATUS_TMAC_INT))
3807 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3808 &isrbar0->mac_tmac_err_reg);
3809 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3810 val64, &isrbar0->mac_tmac_err_reg);
3811 hldev->stats.sw_dev_info_stats.mac_tmac_err_cnt++;
3812 temp64 = XGE_HAL_TMAC_TX_BUF_OVRN|XGE_HAL_TMAC_TX_SM_ERR;
3813 if (val64 & temp64) {
3814 xge_hal_device_reset(hldev);
3815 xge_hal_device_enable(hldev);
3816 xge_hal_device_intr_enable(hldev);
3823 * __hal_device_handle_txxgxs - Handle TxXGXS interrupt reason
3824 * @hldev: HAL device handle.
3825 * @reason: interrupt reason
3828 __hal_device_handle_txxgxs(xge_hal_device_t *hldev, u64 reason)
3830 xge_hal_pci_bar0_t *isrbar0 =
3831 (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0;
3834 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3835 &isrbar0->xgxs_int_status);
3836 if (!(val64 & XGE_HAL_XGXS_INT_STATUS_TXGXS))
3839 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3840 &isrbar0->xgxs_txgxs_err_reg);
3841 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3842 val64, &isrbar0->xgxs_txgxs_err_reg);
3843 hldev->stats.sw_dev_info_stats.xgxs_txgxs_err_cnt++;
3844 temp64 = XGE_HAL_TXGXS_ESTORE_UFLOW|XGE_HAL_TXGXS_TX_SM_ERR;
3845 if (val64 & temp64) {
3846 xge_hal_device_reset(hldev);
3847 xge_hal_device_enable(hldev);
3848 xge_hal_device_intr_enable(hldev);
3855 * __hal_device_handle_rxpic - Handle RxPIC interrupt reason
3856 * @hldev: HAL device handle.
3857 * @reason: interrupt reason
3860 __hal_device_handle_rxpic(xge_hal_device_t *hldev, u64 reason)
3862 /* FIXME: handle register */
3868 * __hal_device_handle_rxdma - Handle RxDMA interrupt reason
3869 * @hldev: HAL device handle.
3870 * @reason: interrupt reason
3873 __hal_device_handle_rxdma(xge_hal_device_t *hldev, u64 reason)
3875 xge_hal_pci_bar0_t *isrbar0 =
3876 (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0;
3877 u64 val64, err, temp64;
3879 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3880 &isrbar0->rxdma_int_status);
3881 if (val64 & XGE_HAL_RXDMA_RC_INT) {
3882 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3883 &isrbar0->rc_err_reg);
3884 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3885 err, &isrbar0->rc_err_reg);
3886 hldev->stats.sw_dev_info_stats.rc_err_cnt++;
3887 temp64 = XGE_HAL_RC_PRCn_ECC_DB_ERR|XGE_HAL_RC_FTC_ECC_DB_ERR
3888 |XGE_HAL_RC_PRCn_SM_ERR_ALARM
3889 |XGE_HAL_RC_FTC_SM_ERR_ALARM;
3893 if (val64 & XGE_HAL_RXDMA_RPA_INT) {
3894 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3895 &isrbar0->rpa_err_reg);
3896 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3897 err, &isrbar0->rpa_err_reg);
3898 hldev->stats.sw_dev_info_stats.rpa_err_cnt++;
3899 temp64 = XGE_HAL_RPA_SM_ERR_ALARM|XGE_HAL_RPA_CREDIT_ERR;
3903 if (val64 & XGE_HAL_RXDMA_RDA_INT) {
3904 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3905 &isrbar0->rda_err_reg);
3906 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3907 err, &isrbar0->rda_err_reg);
3908 hldev->stats.sw_dev_info_stats.rda_err_cnt++;
3909 temp64 = XGE_HAL_RDA_RXDn_ECC_DB_ERR
3910 |XGE_HAL_RDA_FRM_ECC_DB_N_AERR
3911 |XGE_HAL_RDA_SM1_ERR_ALARM|XGE_HAL_RDA_SM0_ERR_ALARM
3912 |XGE_HAL_RDA_RXD_ECC_DB_SERR;
3916 if (val64 & XGE_HAL_RXDMA_RTI_INT) {
3917 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3918 &isrbar0->rti_err_reg);
3919 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3920 err, &isrbar0->rti_err_reg);
3921 hldev->stats.sw_dev_info_stats.rti_err_cnt++;
3922 temp64 = XGE_HAL_RTI_SM_ERR_ALARM;
3929 reset : xge_hal_device_reset(hldev);
3930 xge_hal_device_enable(hldev);
3931 xge_hal_device_intr_enable(hldev);
3936 * __hal_device_handle_rxmac - Handle RxMAC interrupt reason
3937 * @hldev: HAL device handle.
3938 * @reason: interrupt reason
3941 __hal_device_handle_rxmac(xge_hal_device_t *hldev, u64 reason)
3943 xge_hal_pci_bar0_t *isrbar0 =
3944 (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0;
3947 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3948 &isrbar0->mac_int_status);
3949 if (!(val64 & XGE_HAL_MAC_INT_STATUS_RMAC_INT))
3952 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3953 &isrbar0->mac_rmac_err_reg);
3954 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3955 val64, &isrbar0->mac_rmac_err_reg);
3956 hldev->stats.sw_dev_info_stats.mac_rmac_err_cnt++;
3957 temp64 = XGE_HAL_RMAC_RX_BUFF_OVRN|XGE_HAL_RMAC_RX_SM_ERR;
3958 if (val64 & temp64) {
3959 xge_hal_device_reset(hldev);
3960 xge_hal_device_enable(hldev);
3961 xge_hal_device_intr_enable(hldev);
3968 * __hal_device_handle_rxxgxs - Handle RxXGXS interrupt reason
3969 * @hldev: HAL device handle.
3970 * @reason: interrupt reason
3973 __hal_device_handle_rxxgxs(xge_hal_device_t *hldev, u64 reason)
3975 xge_hal_pci_bar0_t *isrbar0 =
3976 (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0;
3979 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3980 &isrbar0->xgxs_int_status);
3981 if (!(val64 & XGE_HAL_XGXS_INT_STATUS_RXGXS))
3984 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3985 &isrbar0->xgxs_rxgxs_err_reg);
3986 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3987 val64, &isrbar0->xgxs_rxgxs_err_reg);
3988 hldev->stats.sw_dev_info_stats.xgxs_rxgxs_err_cnt++;
3989 temp64 = XGE_HAL_RXGXS_ESTORE_OFLOW|XGE_HAL_RXGXS_RX_SM_ERR;
3990 if (val64 & temp64) {
3991 xge_hal_device_reset(hldev);
3992 xge_hal_device_enable(hldev);
3993 xge_hal_device_intr_enable(hldev);
4000 * xge_hal_device_enable - Enable device.
4001 * @hldev: HAL device handle.
4003 * Enable the specified device: bring up the link/interface.
4004 * Returns: XGE_HAL_OK - success.
4005 * XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT - Failed to restore the device
4006 * to a "quiescent" state.
4008 * See also: xge_hal_status_e{}.
4010 * Usage: See ex_open{}.
4013 xge_hal_device_enable(xge_hal_device_t *hldev)
4015 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
4020 if (!hldev->hw_is_initialized) {
4021 xge_hal_status_e status;
4023 status = __hal_device_hw_initialize(hldev);
4024 if (status != XGE_HAL_OK) {
4030 * Not needed in most cases, i.e.
4031 * when device_disable() is followed by reset -
4032 * the latter copies back PCI config space, along with
4033 * the bus mastership - see __hal_device_reset().
4034 * However, there are/may-in-future be other cases, and
4037 __hal_device_bus_master_enable(hldev);
4039 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
4041 * Configure the link stability period.
4043 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
4044 &bar0->misc_control);
4045 if (hldev->config.link_stability_period !=
4046 XGE_HAL_DEFAULT_USE_HARDCODE) {
4048 val64 |= XGE_HAL_MISC_CONTROL_LINK_STABILITY_PERIOD(
4049 hldev->config.link_stability_period);
4052 * Use the link stability period 1 ms as default
4054 val64 |= XGE_HAL_MISC_CONTROL_LINK_STABILITY_PERIOD(
4055 XGE_HAL_DEFAULT_LINK_STABILITY_PERIOD);
4057 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
4058 val64, &bar0->misc_control);
4061 * Clearing any possible Link up/down interrupts that
4062 * could have popped up just before Enabling the card.
4064 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
4065 &bar0->misc_int_reg);
4067 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
4068 val64, &bar0->misc_int_reg);
4069 xge_debug_device(XGE_TRACE, "%s","link state cleared");
4071 } else if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) {
4073 * Clearing any possible Link state change interrupts that
4074 * could have popped up just before Enabling the card.
4076 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
4077 &bar0->mac_rmac_err_reg);
4079 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
4080 val64, &bar0->mac_rmac_err_reg);
4081 xge_debug_device(XGE_TRACE, "%s", "link state cleared");
4085 if (__hal_device_wait_quiescent(hldev, &val64)) {
4086 return XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT;
4089 /* Enabling Laser. */
4090 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
4091 &bar0->adapter_control);
4092 val64 |= XGE_HAL_ADAPTER_EOI_TX_ON;
4093 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
4094 &bar0->adapter_control);
4096 /* let link establish */
4099 /* set link down untill poll() routine will set it up (maybe) */
4100 hldev->link_state = XGE_HAL_LINK_DOWN;
4102 /* If link is UP (adpter is connected) then enable the adapter */
4103 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
4104 &bar0->adapter_status);
4105 if( val64 & (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT |
4106 XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT) ) {
4107 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
4108 &bar0->adapter_control);
4109 val64 = val64 & (~XGE_HAL_ADAPTER_LED_ON);
4111 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
4112 &bar0->adapter_control);
4113 val64 = val64 | ( XGE_HAL_ADAPTER_EOI_TX_ON |
4114 XGE_HAL_ADAPTER_LED_ON );
4117 val64 = val64 | XGE_HAL_ADAPTER_CNTL_EN; /* adapter enable */
4118 val64 = val64 & (~XGE_HAL_ADAPTER_ECC_EN); /* ECC enable */
4119 xge_os_pio_mem_write64 (hldev->pdev, hldev->regh0, val64,
4120 &bar0->adapter_control);
4122 /* We spin here waiting for the Link to come up.
4123 * This is the fix for the Link being unstable after the reset. */
4128 adp_status = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
4129 &bar0->adapter_status);
4131 /* Read the adapter control register for Adapter_enable bit */
4132 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
4133 &bar0->adapter_control);
4134 if (!(adp_status & (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT |
4135 XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT)) &&
4136 (val64 & XGE_HAL_ADAPTER_CNTL_EN)) {
4138 if (j >= hldev->config.link_valid_cnt) {
4139 if (xge_hal_device_status(hldev, &adp_status) ==
4141 if (__hal_verify_pcc_idle(hldev,
4142 adp_status) != XGE_HAL_OK) {
4144 XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT;
4146 xge_debug_device(XGE_TRACE,
4147 "adp_status: "XGE_OS_LLXFMT
4150 (unsigned long long)adp_status);
4151 val64 = xge_os_pio_mem_read64(
4154 &bar0->adapter_control);
4156 (XGE_HAL_ADAPTER_EOI_TX_ON |
4157 XGE_HAL_ADAPTER_LED_ON );
4158 xge_os_pio_mem_write64(hldev->pdev,
4159 hldev->regh0, val64,
4160 &bar0->adapter_control);
4163 val64 = xge_os_pio_mem_read64(
4166 &bar0->adapter_control);
4167 break; /* out of for loop */
4170 XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT;
4174 j = 0; /* Reset the count */
4175 /* Turn on the Laser */
4176 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
4177 &bar0->adapter_control);
4178 val64 = val64 | XGE_HAL_ADAPTER_EOI_TX_ON;
4179 xge_os_pio_mem_write64 (hldev->pdev, hldev->regh0,
4180 val64, &bar0->adapter_control);
4184 /* Now re-enable it as due to noise, hardware
4186 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
4187 &bar0->adapter_control);
4188 val64 |= XGE_HAL_ADAPTER_CNTL_EN;
4189 val64 = val64 & (~XGE_HAL_ADAPTER_ECC_EN);/*ECC enable*/
4190 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
4191 &bar0->adapter_control);
4193 xge_os_mdelay(1); /* Sleep for 1 msec */
4195 } while (i < hldev->config.link_retry_cnt);
4197 __hal_device_led_actifity_fix(hldev);
4199 #ifndef XGE_HAL_PROCESS_LINK_INT_IN_ISR
4200 /* Here we are performing soft reset on XGXS to force link down.
4201 * Since link is already up, we will get link state change
4202 * poll notificatoin after adapter is enabled */
4204 __hal_serial_mem_write64(hldev, 0x80010515001E0000ULL,
4205 &bar0->dtx_control);
4206 (void) __hal_serial_mem_read64(hldev, &bar0->dtx_control);
4208 __hal_serial_mem_write64(hldev, 0x80010515001E00E0ULL,
4209 &bar0->dtx_control);
4210 (void) __hal_serial_mem_read64(hldev, &bar0->dtx_control);
4212 __hal_serial_mem_write64(hldev, 0x80070515001F00E4ULL,
4213 &bar0->dtx_control);
4214 (void) __hal_serial_mem_read64(hldev, &bar0->dtx_control);
4216 xge_os_mdelay(100); /* Sleep for 500 msec */
4218 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA)
4222 * With some switches the link state change interrupt does not
4223 * occur even though the xgxs reset is done as per SPN-006. So,
4224 * poll the adapter status register and check if the link state
4227 adp_status = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
4228 &bar0->adapter_status);
4229 if (!(adp_status & (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT |
4230 XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
4232 xge_debug_device(XGE_TRACE, "%s",
4233 "enable device causing link state change ind..");
4234 (void) __hal_device_handle_link_state_change(hldev);
4238 if (hldev->config.stats_refresh_time_sec !=
4239 XGE_HAL_STATS_REFRESH_DISABLE)
4240 __hal_stats_enable(&hldev->stats);
4246 * xge_hal_device_disable - Disable Xframe adapter.
4247 * @hldev: Device handle.
4249 * Disable this device. To gracefully reset the adapter, the host should:
4251 * - call xge_hal_device_disable();
4253 * - call xge_hal_device_intr_disable();
4255 * - close all opened channels and clean up outstanding resources;
4257 * - do some work (error recovery, change mtu, reset, etc);
4259 * - call xge_hal_device_enable();
4261 * - open channels, replenish RxDs, etc.
4263 * - call xge_hal_device_intr_enable().
4265 * Note: Disabling the device does _not_ include disabling of interrupts.
4266 * After disabling the device stops receiving new frames but those frames
4267 * that were already in the pipe will keep coming for some few milliseconds.
4269 * Returns: XGE_HAL_OK - success.
4270 * XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT - Failed to restore the device to
4271 * a "quiescent" state.
4273 * See also: xge_hal_status_e{}.
4276 xge_hal_device_disable(xge_hal_device_t *hldev)
4278 xge_hal_status_e status = XGE_HAL_OK;
4279 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
4282 xge_debug_device(XGE_TRACE, "%s", "turn off laser, cleanup hardware");
4284 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
4285 &bar0->adapter_control);
4286 val64 = val64 & (~XGE_HAL_ADAPTER_CNTL_EN);
4287 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
4288 &bar0->adapter_control);
4290 if (__hal_device_wait_quiescent(hldev, &val64) != XGE_HAL_OK) {
4291 status = XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT;
4294 if (__hal_device_register_poll(hldev, &bar0->adapter_status, 1,
4295 XGE_HAL_ADAPTER_STATUS_RC_PRC_QUIESCENT,
4296 XGE_HAL_DEVICE_QUIESCENT_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
4297 xge_debug_device(XGE_TRACE, "%s", "PRC is not QUIESCENT!");
4298 status = XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT;
4301 if (hldev->config.stats_refresh_time_sec !=
4302 XGE_HAL_STATS_REFRESH_DISABLE)
4303 __hal_stats_disable(&hldev->stats);
4304 #ifdef XGE_DEBUG_ASSERT
4306 xge_assert(!hldev->stats.is_enabled);
4309 #ifndef XGE_HAL_DONT_DISABLE_BUS_MASTER_ON_STOP
4310 __hal_device_bus_master_disable(hldev);
4317 * xge_hal_device_reset - Reset device.
4318 * @hldev: HAL device handle.
4320 * Soft-reset the device, reset the device stats except reset_cnt.
4322 * After reset is done, will try to re-initialize HW.
4324 * Returns: XGE_HAL_OK - success.
4325 * XGE_HAL_ERR_DEVICE_NOT_INITIALIZED - Device is not initialized.
4326 * XGE_HAL_ERR_RESET_FAILED - Reset failed.
4328 * See also: xge_hal_status_e{}.
4331 xge_hal_device_reset(xge_hal_device_t *hldev)
4333 xge_hal_status_e status;
4335 /* increment the soft reset counter */
4336 u32 reset_cnt = hldev->stats.sw_dev_info_stats.soft_reset_cnt;
4338 xge_debug_device(XGE_TRACE, "%s (%d)", "resetting the device", reset_cnt);
4340 if (!hldev->is_initialized)
4341 return XGE_HAL_ERR_DEVICE_NOT_INITIALIZED;
4343 /* actual "soft" reset of the adapter */
4344 status = __hal_device_reset(hldev);
4346 /* reset all stats including saved */
4347 __hal_stats_soft_reset(hldev, 1);
4349 /* increment reset counter */
4350 hldev->stats.sw_dev_info_stats.soft_reset_cnt = reset_cnt + 1;
4352 /* re-initialize rxufca_intr_thres */
4353 hldev->rxufca_intr_thres = hldev->config.rxufca_intr_thres;
4355 hldev->reset_needed_after_close = 0;
4361 * xge_hal_device_status - Check whether Xframe hardware is ready for
4363 * @hldev: HAL device handle.
4364 * @hw_status: Xframe status register. Returned by HAL.
4366 * Check whether Xframe hardware is ready for operation.
4367 * The checking includes TDMA, RDMA, PFC, PIC, MC_DRAM, and the rest
4368 * hardware functional blocks.
4370 * Returns: XGE_HAL_OK if the device is ready for operation. Otherwise
4371 * returns XGE_HAL_FAIL. Also, fills in adapter status (in @hw_status).
4373 * See also: xge_hal_status_e{}.
4374 * Usage: See ex_open{}.
4377 xge_hal_device_status(xge_hal_device_t *hldev, u64 *hw_status)
4379 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
4382 tmp64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
4383 &bar0->adapter_status);
4387 if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_TDMA_READY)) {
4388 xge_debug_device(XGE_TRACE, "%s", "TDMA is not ready!");
4389 return XGE_HAL_FAIL;
4391 if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_RDMA_READY)) {
4392 xge_debug_device(XGE_TRACE, "%s", "RDMA is not ready!");
4393 return XGE_HAL_FAIL;
4395 if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_PFC_READY)) {
4396 xge_debug_device(XGE_TRACE, "%s", "PFC is not ready!");
4397 return XGE_HAL_FAIL;
4399 if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
4400 xge_debug_device(XGE_TRACE, "%s", "TMAC BUF is not empty!");
4401 return XGE_HAL_FAIL;
4403 if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_PIC_QUIESCENT)) {
4404 xge_debug_device(XGE_TRACE, "%s", "PIC is not QUIESCENT!");
4405 return XGE_HAL_FAIL;
4407 if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_MC_DRAM_READY)) {
4408 xge_debug_device(XGE_TRACE, "%s", "MC_DRAM is not ready!");
4409 return XGE_HAL_FAIL;
4411 if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_MC_QUEUES_READY)) {
4412 xge_debug_device(XGE_TRACE, "%s", "MC_QUEUES is not ready!");
4413 return XGE_HAL_FAIL;
4415 if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_M_PLL_LOCK)) {
4416 xge_debug_device(XGE_TRACE, "%s", "M_PLL is not locked!");
4417 return XGE_HAL_FAIL;
4419 #ifndef XGE_HAL_HERC_EMULATION
4421 * Andrew: in PCI 33 mode, the P_PLL is not used, and therefore,
4422 * the P_PLL_LOCK bit in the adapter_status register will
4425 if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_P_PLL_LOCK) &&
4426 xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC &&
4427 hldev->pci_mode != XGE_HAL_PCI_33MHZ_MODE) {
4428 xge_debug_device(XGE_TRACE, "%s", "P_PLL is not locked!");
4429 return XGE_HAL_FAIL;
4437 __hal_device_msi_intr_endis(xge_hal_device_t *hldev, int flag)
4439 u16 msi_control_reg;
4441 xge_os_pci_read16(hldev->pdev, hldev->cfgh,
4442 xge_offsetof(xge_hal_pci_config_le_t,
4443 msi_control), &msi_control_reg);
4446 msi_control_reg |= 0x1;
4448 msi_control_reg &= ~0x1;
4450 xge_os_pci_write16(hldev->pdev, hldev->cfgh,
4451 xge_offsetof(xge_hal_pci_config_le_t,
4452 msi_control), msi_control_reg);
4456 __hal_device_msix_intr_endis(xge_hal_device_t *hldev,
4457 xge_hal_channel_t *channel, int flag)
4460 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0;
4462 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
4463 &bar0->xmsi_mask_reg);
4466 val64 &= ~(1LL << ( 63 - channel->msix_idx ));
4468 val64 |= (1LL << ( 63 - channel->msix_idx ));
4469 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
4470 &bar0->xmsi_mask_reg);
4474 * xge_hal_device_intr_enable - Enable Xframe interrupts.
4475 * @hldev: HAL device handle.
4476 * @op: One of the xge_hal_device_intr_e enumerated values specifying
4477 * the type(s) of interrupts to enable.
4479 * Enable Xframe interrupts. The function is to be executed the last in
4480 * Xframe initialization sequence.
4482 * See also: xge_hal_device_intr_disable()
4485 xge_hal_device_intr_enable(xge_hal_device_t *hldev)
4490 /* PRC initialization and configuration */
4491 xge_list_for_each(item, &hldev->ring_channels) {
4492 xge_hal_channel_h channel;
4493 channel = xge_container_of(item, xge_hal_channel_t, item);
4494 __hal_ring_prc_enable(channel);
4497 /* enable traffic only interrupts */
4498 if (hldev->config.intr_mode != XGE_HAL_INTR_MODE_IRQLINE) {
4500 * make sure all interrupts going to be disabled if MSI
4503 __hal_device_intr_mgmt(hldev, XGE_HAL_ALL_INTRS, 0);
4506 * Enable the Tx traffic interrupts only if the TTI feature is
4510 if (hldev->tti_enabled)
4511 val64 = XGE_HAL_TX_TRAFFIC_INTR;
4513 if (!hldev->config.bimodal_interrupts)
4514 val64 |= XGE_HAL_RX_TRAFFIC_INTR;
4516 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA)
4517 val64 |= XGE_HAL_RX_TRAFFIC_INTR;
4519 val64 |=XGE_HAL_TX_PIC_INTR |
4521 XGE_HAL_TX_DMA_INTR |
4522 (hldev->config.sched_timer_us !=
4523 XGE_HAL_SCHED_TIMER_DISABLED ? XGE_HAL_SCHED_INTR : 0);
4524 __hal_device_intr_mgmt(hldev, val64, 1);
4528 * Enable MSI-X interrupts
4530 if (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX) {
4532 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
4534 * To enable MSI-X, MSI also needs to be enabled,
4535 * due to a bug in the herc NIC.
4537 __hal_device_msi_intr_endis(hldev, 1);
4541 /* Enable the MSI-X interrupt for each configured channel */
4542 xge_list_for_each(item, &hldev->fifo_channels) {
4543 xge_hal_channel_t *channel;
4545 channel = xge_container_of(item,
4546 xge_hal_channel_t, item);
4548 /* 0 vector is reserved for alarms */
4549 if (!channel->msix_idx)
4552 __hal_device_msix_intr_endis(hldev, channel, 1);
4555 xge_list_for_each(item, &hldev->ring_channels) {
4556 xge_hal_channel_t *channel;
4558 channel = xge_container_of(item,
4559 xge_hal_channel_t, item);
4561 /* 0 vector is reserved for alarms */
4562 if (!channel->msix_idx)
4565 __hal_device_msix_intr_endis(hldev, channel, 1);
4569 xge_debug_device(XGE_TRACE, "%s", "interrupts are enabled");
4574 * xge_hal_device_intr_disable - Disable Xframe interrupts.
4575 * @hldev: HAL device handle.
4576 * @op: One of the xge_hal_device_intr_e enumerated values specifying
4577 * the type(s) of interrupts to disable.
4579 * Disable Xframe interrupts.
4581 * See also: xge_hal_device_intr_enable()
4584 xge_hal_device_intr_disable(xge_hal_device_t *hldev)
4587 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
4590 if (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX) {
4592 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
4594 * To disable MSI-X, MSI also needs to be disabled,
4595 * due to a bug in the herc NIC.
4597 __hal_device_msi_intr_endis(hldev, 0);
4600 /* Disable the MSI-X interrupt for each configured channel */
4601 xge_list_for_each(item, &hldev->fifo_channels) {
4602 xge_hal_channel_t *channel;
4604 channel = xge_container_of(item,
4605 xge_hal_channel_t, item);
4607 /* 0 vector is reserved for alarms */
4608 if (!channel->msix_idx)
4611 __hal_device_msix_intr_endis(hldev, channel, 0);
4615 xge_os_pio_mem_write64(hldev->pdev,
4616 hldev->regh0, 0xFFFFFFFFFFFFFFFFULL,
4617 &bar0->tx_traffic_mask);
4619 xge_list_for_each(item, &hldev->ring_channels) {
4620 xge_hal_channel_t *channel;
4622 channel = xge_container_of(item,
4623 xge_hal_channel_t, item);
4625 /* 0 vector is reserved for alarms */
4626 if (!channel->msix_idx)
4629 __hal_device_msix_intr_endis(hldev, channel, 0);
4632 xge_os_pio_mem_write64(hldev->pdev,
4633 hldev->regh0, 0xFFFFFFFFFFFFFFFFULL,
4634 &bar0->rx_traffic_mask);
4638 * Disable traffic only interrupts.
4639 * Tx traffic interrupts are used only if the TTI feature is
4643 if (hldev->tti_enabled)
4644 val64 = XGE_HAL_TX_TRAFFIC_INTR;
4646 val64 |= XGE_HAL_RX_TRAFFIC_INTR |
4647 XGE_HAL_TX_PIC_INTR |
4649 (hldev->config.sched_timer_us != XGE_HAL_SCHED_TIMER_DISABLED ?
4650 XGE_HAL_SCHED_INTR : 0);
4651 __hal_device_intr_mgmt(hldev, val64, 0);
4653 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
4654 0xFFFFFFFFFFFFFFFFULL,
4655 &bar0->general_int_mask);
4658 /* disable all configured PRCs */
4659 xge_list_for_each(item, &hldev->ring_channels) {
4660 xge_hal_channel_h channel;
4661 channel = xge_container_of(item, xge_hal_channel_t, item);
4662 __hal_ring_prc_disable(channel);
4665 xge_debug_device(XGE_TRACE, "%s", "interrupts are disabled");
4670 * xge_hal_device_mcast_enable - Enable Xframe multicast addresses.
4671 * @hldev: HAL device handle.
4673 * Enable Xframe multicast addresses.
4674 * Returns: XGE_HAL_OK on success.
4675 * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to enable mcast
4676 * feature within the time(timeout).
4678 * See also: xge_hal_device_mcast_disable(), xge_hal_status_e{}.
4681 xge_hal_device_mcast_enable(xge_hal_device_t *hldev)
4684 xge_hal_pci_bar0_t *bar0;
4685 int mc_offset = XGE_HAL_MAC_MC_ALL_MC_ADDR_OFFSET;
4688 return XGE_HAL_ERR_INVALID_DEVICE;
4690 if (hldev->mcast_refcnt)
4693 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)
4694 mc_offset = XGE_HAL_MAC_MC_ALL_MC_ADDR_OFFSET_HERC;
4696 hldev->mcast_refcnt = 1;
4698 bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
4700 /* Enable all Multicast addresses */
4701 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
4702 XGE_HAL_RMAC_ADDR_DATA0_MEM_ADDR(0x010203040506ULL),
4703 &bar0->rmac_addr_data0_mem);
4704 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
4705 XGE_HAL_RMAC_ADDR_DATA1_MEM_MASK(0xfeffffffffffULL),
4706 &bar0->rmac_addr_data1_mem);
4707 val64 = XGE_HAL_RMAC_ADDR_CMD_MEM_WE |
4708 XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4709 XGE_HAL_RMAC_ADDR_CMD_MEM_OFFSET(mc_offset);
4710 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
4711 &bar0->rmac_addr_cmd_mem);
4713 if (__hal_device_register_poll(hldev,
4714 &bar0->rmac_addr_cmd_mem, 0,
4715 XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4716 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
4717 /* upper layer may require to repeat */
4718 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
4725 * xge_hal_device_mcast_disable - Disable Xframe multicast addresses.
4726 * @hldev: HAL device handle.
4728 * Disable Xframe multicast addresses.
4729 * Returns: XGE_HAL_OK - success.
4730 * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to disable mcast
4731 * feature within the time(timeout).
4733 * See also: xge_hal_device_mcast_enable(), xge_hal_status_e{}.
4736 xge_hal_device_mcast_disable(xge_hal_device_t *hldev)
4739 xge_hal_pci_bar0_t *bar0;
4740 int mc_offset = XGE_HAL_MAC_MC_ALL_MC_ADDR_OFFSET;
4743 return XGE_HAL_ERR_INVALID_DEVICE;
4745 if (hldev->mcast_refcnt == 0)
4748 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)
4749 mc_offset = XGE_HAL_MAC_MC_ALL_MC_ADDR_OFFSET_HERC;
4751 hldev->mcast_refcnt = 0;
4753 bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
4755 /* Disable all Multicast addresses */
4756 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
4757 XGE_HAL_RMAC_ADDR_DATA0_MEM_ADDR(0xffffffffffffULL),
4758 &bar0->rmac_addr_data0_mem);
4759 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
4760 XGE_HAL_RMAC_ADDR_DATA1_MEM_MASK(0),
4761 &bar0->rmac_addr_data1_mem);
4763 val64 = XGE_HAL_RMAC_ADDR_CMD_MEM_WE |
4764 XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4765 XGE_HAL_RMAC_ADDR_CMD_MEM_OFFSET(mc_offset);
4766 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
4767 &bar0->rmac_addr_cmd_mem);
4769 if (__hal_device_register_poll(hldev,
4770 &bar0->rmac_addr_cmd_mem, 0,
4771 XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4772 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
4773 /* upper layer may require to repeat */
4774 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
4781 * xge_hal_device_promisc_enable - Enable promiscuous mode.
4782 * @hldev: HAL device handle.
4784 * Enable promiscuous mode of Xframe operation.
4786 * See also: xge_hal_device_promisc_disable().
4789 xge_hal_device_promisc_enable(xge_hal_device_t *hldev)
4792 xge_hal_pci_bar0_t *bar0;
4796 bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
4798 if (!hldev->is_promisc) {
4799 /* Put the NIC into promiscuous mode */
4800 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
4802 val64 |= XGE_HAL_MAC_CFG_RMAC_PROM_ENABLE;
4804 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
4805 XGE_HAL_RMAC_CFG_KEY(0x4C0D),
4806 &bar0->rmac_cfg_key);
4808 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0,
4812 hldev->is_promisc = 1;
4813 xge_debug_device(XGE_TRACE,
4814 "mac_cfg 0x"XGE_OS_LLXFMT": promisc enabled",
4815 (unsigned long long)val64);
4820 * xge_hal_device_promisc_disable - Disable promiscuous mode.
4821 * @hldev: HAL device handle.
4823 * Disable promiscuous mode of Xframe operation.
4825 * See also: xge_hal_device_promisc_enable().
4828 xge_hal_device_promisc_disable(xge_hal_device_t *hldev)
4831 xge_hal_pci_bar0_t *bar0;
4835 bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
4837 if (hldev->is_promisc) {
4838 /* Remove the NIC from promiscuous mode */
4839 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
4841 val64 &= ~XGE_HAL_MAC_CFG_RMAC_PROM_ENABLE;
4843 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
4844 XGE_HAL_RMAC_CFG_KEY(0x4C0D),
4845 &bar0->rmac_cfg_key);
4847 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0,
4851 hldev->is_promisc = 0;
4852 xge_debug_device(XGE_TRACE,
4853 "mac_cfg 0x"XGE_OS_LLXFMT": promisc disabled",
4854 (unsigned long long)val64);
4859 * xge_hal_device_macaddr_get - Get MAC addresses.
4860 * @hldev: HAL device handle.
4861 * @index: MAC address index, in the range from 0 to
4862 * XGE_HAL_MAX_MAC_ADDRESSES.
4863 * @macaddr: MAC address. Returned by HAL.
4865 * Retrieve one of the stored MAC addresses by reading non-volatile
4866 * memory on the chip.
4868 * Up to %XGE_HAL_MAX_MAC_ADDRESSES addresses is supported.
4870 * Returns: XGE_HAL_OK - success.
4871 * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to retrieve the mac
4872 * address within the time(timeout).
4873 * XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES - Invalid MAC address index.
4875 * See also: xge_hal_device_macaddr_set(), xge_hal_status_e{}.
4878 xge_hal_device_macaddr_get(xge_hal_device_t *hldev, int index,
4881 xge_hal_pci_bar0_t *bar0;
4885 if (hldev == NULL) {
4886 return XGE_HAL_ERR_INVALID_DEVICE;
4889 bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
4891 if ( index >= XGE_HAL_MAX_MAC_ADDRESSES ) {
4892 return XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES;
4895 #ifdef XGE_HAL_HERC_EMULATION
4896 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,0x0000010000000000,
4897 &bar0->rmac_addr_data0_mem);
4898 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,0x0000000000000000,
4899 &bar0->rmac_addr_data1_mem);
4900 val64 = XGE_HAL_RMAC_ADDR_CMD_MEM_RD |
4901 XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4902 XGE_HAL_RMAC_ADDR_CMD_MEM_OFFSET((index));
4903 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
4904 &bar0->rmac_addr_cmd_mem);
4906 /* poll until done */
4907 __hal_device_register_poll(hldev,
4908 &bar0->rmac_addr_cmd_mem, 0,
4909 XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD,
4910 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS);
4914 val64 = ( XGE_HAL_RMAC_ADDR_CMD_MEM_RD |
4915 XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4916 XGE_HAL_RMAC_ADDR_CMD_MEM_OFFSET((index)) );
4917 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
4918 &bar0->rmac_addr_cmd_mem);
4920 if (__hal_device_register_poll(hldev, &bar0->rmac_addr_cmd_mem, 0,
4921 XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4922 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
4923 /* upper layer may require to repeat */
4924 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
4927 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
4928 &bar0->rmac_addr_data0_mem);
4929 for (i=0; i < XGE_HAL_ETH_ALEN; i++) {
4930 (*macaddr)[i] = (u8)(val64 >> ((64 - 8) - (i * 8)));
4933 #ifdef XGE_HAL_HERC_EMULATION
4934 for (i=0; i < XGE_HAL_ETH_ALEN; i++) {
4935 (*macaddr)[i] = (u8)0;
4937 (*macaddr)[1] = (u8)1;
4945 * xge_hal_device_macaddr_set - Set MAC address.
4946 * @hldev: HAL device handle.
4947 * @index: MAC address index, in the range from 0 to
4948 * XGE_HAL_MAX_MAC_ADDRESSES.
4949 * @macaddr: New MAC address to configure.
4951 * Configure one of the available MAC address "slots".
4953 * Up to %XGE_HAL_MAX_MAC_ADDRESSES addresses is supported.
4955 * Returns: XGE_HAL_OK - success.
4956 * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to set the new mac
4957 * address within the time(timeout).
4958 * XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES - Invalid MAC address index.
4960 * See also: xge_hal_device_macaddr_get(), xge_hal_status_e{}.
4963 xge_hal_device_macaddr_set(xge_hal_device_t *hldev, int index,
4966 xge_hal_pci_bar0_t *bar0 =
4967 (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
4971 if ( index >= XGE_HAL_MAX_MAC_ADDRESSES )
4972 return XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES;
4975 for (i=0; i < XGE_HAL_ETH_ALEN; i++) {
4976 temp64 |= macaddr[i];
4981 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
4982 XGE_HAL_RMAC_ADDR_DATA0_MEM_ADDR(temp64),
4983 &bar0->rmac_addr_data0_mem);
4985 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
4986 XGE_HAL_RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4987 &bar0->rmac_addr_data1_mem);
4989 val64 = ( XGE_HAL_RMAC_ADDR_CMD_MEM_WE |
4990 XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4991 XGE_HAL_RMAC_ADDR_CMD_MEM_OFFSET((index)) );
4993 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
4994 &bar0->rmac_addr_cmd_mem);
4996 if (__hal_device_register_poll(hldev, &bar0->rmac_addr_cmd_mem, 0,
4997 XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4998 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
4999 /* upper layer may require to repeat */
5000 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
5007 * xge_hal_device_macaddr_clear - Set MAC address.
5008 * @hldev: HAL device handle.
5009 * @index: MAC address index, in the range from 0 to
5010 * XGE_HAL_MAX_MAC_ADDRESSES.
5012 * Clear one of the available MAC address "slots".
5014 * Returns: XGE_HAL_OK - success.
5015 * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to set the new mac
5016 * address within the time(timeout).
5017 * XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES - Invalid MAC address index.
5019 * See also: xge_hal_device_macaddr_set(), xge_hal_status_e{}.
5022 xge_hal_device_macaddr_clear(xge_hal_device_t *hldev, int index)
5024 xge_hal_status_e status;
5025 u8 macaddr[6] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
5027 status = xge_hal_device_macaddr_set(hldev, index, macaddr);
5028 if (status != XGE_HAL_OK) {
5029 xge_debug_device(XGE_ERR, "%s",
5030 "Not able to set the mac addr");
5038 * xge_hal_device_macaddr_find - Finds index in the rmac table.
5039 * @hldev: HAL device handle.
5040 * @wanted: Wanted MAC address.
5042 * See also: xge_hal_device_macaddr_set().
5045 xge_hal_device_macaddr_find(xge_hal_device_t *hldev, macaddr_t wanted)
5049 if (hldev == NULL) {
5050 return XGE_HAL_ERR_INVALID_DEVICE;
5053 for (i=1; i<XGE_HAL_MAX_MAC_ADDRESSES; i++) {
5055 (void) xge_hal_device_macaddr_get(hldev, i, &macaddr);
5056 if (!xge_os_memcmp(macaddr, wanted, sizeof(macaddr_t))) {
5065 * xge_hal_device_mtu_set - Set MTU.
5066 * @hldev: HAL device handle.
5067 * @new_mtu: New MTU size to configure.
5069 * Set new MTU value. Example, to use jumbo frames:
5070 * xge_hal_device_mtu_set(my_device, my_channel, 9600);
5072 * Returns: XGE_HAL_OK on success.
5073 * XGE_HAL_ERR_SWAPPER_CTRL - Failed to configure swapper control
5075 * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to initialize TTI/RTI
5077 * XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT - Failed to restore the device to
5078 * a "quiescent" state.
5081 xge_hal_device_mtu_set(xge_hal_device_t *hldev, int new_mtu)
5083 xge_hal_status_e status;
5086 * reset needed if 1) new MTU differs, and
5087 * 2a) device was closed or
5088 * 2b) device is being upped for first time.
5090 if (hldev->config.mtu != new_mtu) {
5091 if (hldev->reset_needed_after_close ||
5092 !hldev->mtu_first_time_set) {
5093 status = xge_hal_device_reset(hldev);
5094 if (status != XGE_HAL_OK) {
5095 xge_debug_device(XGE_TRACE, "%s",
5096 "fatal: can not reset the device");
5100 /* store the new MTU in device, reset will use it */
5101 hldev->config.mtu = new_mtu;
5102 xge_debug_device(XGE_TRACE, "new MTU %d applied",
5106 if (!hldev->mtu_first_time_set)
5107 hldev->mtu_first_time_set = 1;
5113 * xge_hal_device_initialize - Initialize Xframe device.
5114 * @hldev: HAL device handle.
5115 * @attr: pointer to xge_hal_device_attr_t structure
5116 * @device_config: Configuration to be _applied_ to the device,
5117 * For the Xframe configuration "knobs" please
5118 * refer to xge_hal_device_config_t and Xframe
5121 * Initialize Xframe device. Note that all the arguments of this public API
5122 * are 'IN', including @hldev. Upper-layer driver (ULD) cooperates with
5123 * OS to find new Xframe device, locate its PCI and memory spaces.
5125 * When done, the ULD allocates sizeof(xge_hal_device_t) bytes for HAL
5126 * to enable the latter to perform Xframe hardware initialization.
5128 * Returns: XGE_HAL_OK - success.
5129 * XGE_HAL_ERR_DRIVER_NOT_INITIALIZED - Driver is not initialized.
5130 * XGE_HAL_ERR_BAD_DEVICE_CONFIG - Device configuration params are not
5132 * XGE_HAL_ERR_OUT_OF_MEMORY - Memory allocation failed.
5133 * XGE_HAL_ERR_BAD_SUBSYSTEM_ID - Device subsystem id is invalid.
5134 * XGE_HAL_ERR_INVALID_MAC_ADDRESS - Device mac address in not valid.
5135 * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to retrieve the mac
5136 * address within the time(timeout) or TTI/RTI initialization failed.
5137 * XGE_HAL_ERR_SWAPPER_CTRL - Failed to configure swapper control.
5138 * XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT -Device is not queiscent.
5140 * See also: xge_hal_device_terminate(), xge_hal_status_e{}
5141 * xge_hal_device_attr_t{}.
5144 xge_hal_device_initialize(xge_hal_device_t *hldev, xge_hal_device_attr_t *attr,
5145 xge_hal_device_config_t *device_config)
5148 xge_hal_status_e status;
5149 xge_hal_channel_t *channel;
5152 int total_dram_size, ring_auto_dram_cfg, left_dram_size;
5153 int total_dram_size_max = 0;
5155 xge_debug_device(XGE_TRACE, "device 0x"XGE_OS_LLXFMT" is initializing",
5156 (unsigned long long)(ulong_t)hldev);
5159 if (g_xge_hal_driver == NULL ||
5160 !g_xge_hal_driver->is_initialized) {
5161 return XGE_HAL_ERR_DRIVER_NOT_INITIALIZED;
5164 xge_os_memzero(hldev, sizeof(xge_hal_device_t));
5167 * validate a common part of Xframe-I/II configuration
5168 * (and run check_card() later, once PCI inited - see below)
5170 status = __hal_device_config_check_common(device_config);
5171 if (status != XGE_HAL_OK)
5175 xge_os_memcpy(&hldev->config, device_config,
5176 sizeof(xge_hal_device_config_t));
5178 /* save original attr */
5179 xge_os_memcpy(&hldev->orig_attr, attr,
5180 sizeof(xge_hal_device_attr_t));
5182 /* initialize rxufca_intr_thres */
5183 hldev->rxufca_intr_thres = hldev->config.rxufca_intr_thres;
5185 hldev->regh0 = attr->regh0;
5186 hldev->regh1 = attr->regh1;
5187 hldev->regh2 = attr->regh2;
5188 hldev->isrbar0 = hldev->bar0 = attr->bar0;
5189 hldev->bar1 = attr->bar1;
5190 hldev->bar2 = attr->bar2;
5191 hldev->pdev = attr->pdev;
5192 hldev->irqh = attr->irqh;
5193 hldev->cfgh = attr->cfgh;
5195 /* set initial bimodal timer for bimodal adaptive schema */
5196 hldev->bimodal_timer_val_us = hldev->config.bimodal_timer_lo_us;
5198 hldev->queueh = xge_queue_create(hldev->pdev, hldev->irqh,
5199 g_xge_hal_driver->config.queue_size_initial,
5200 g_xge_hal_driver->config.queue_size_max,
5201 __hal_device_event_queued, hldev);
5202 if (hldev->queueh == NULL)
5203 return XGE_HAL_ERR_OUT_OF_MEMORY;
5205 hldev->magic = XGE_HAL_MAGIC;
5207 xge_assert(hldev->regh0);
5208 xge_assert(hldev->regh1);
5209 xge_assert(hldev->bar0);
5210 xge_assert(hldev->bar1);
5211 xge_assert(hldev->pdev);
5212 xge_assert(hldev->irqh);
5213 xge_assert(hldev->cfgh);
5215 /* initialize some PCI/PCI-X fields of this PCI device. */
5216 __hal_device_pci_init(hldev);
5219 * initlialize lists to properly handling a potential
5222 xge_list_init(&hldev->free_channels);
5223 xge_list_init(&hldev->fifo_channels);
5224 xge_list_init(&hldev->ring_channels);
5226 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) {
5227 /* fixups for xena */
5228 hldev->config.rth_en = 0;
5229 hldev->config.rth_spdm_en = 0;
5230 hldev->config.rts_mac_en = 0;
5231 total_dram_size_max = XGE_HAL_MAX_RING_QUEUE_SIZE_XENA;
5233 status = __hal_device_config_check_xena(device_config);
5234 if (status != XGE_HAL_OK) {
5235 xge_hal_device_terminate(hldev);
5238 if (hldev->config.bimodal_interrupts == 1) {
5239 xge_hal_device_terminate(hldev);
5240 return XGE_HAL_BADCFG_BIMODAL_XENA_NOT_ALLOWED;
5241 } else if (hldev->config.bimodal_interrupts ==
5242 XGE_HAL_DEFAULT_USE_HARDCODE)
5243 hldev->config.bimodal_interrupts = 0;
5244 } else if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
5245 /* fixups for herc */
5246 total_dram_size_max = XGE_HAL_MAX_RING_QUEUE_SIZE_HERC;
5247 status = __hal_device_config_check_herc(device_config);
5248 if (status != XGE_HAL_OK) {
5249 xge_hal_device_terminate(hldev);
5252 if (hldev->config.bimodal_interrupts ==
5253 XGE_HAL_DEFAULT_USE_HARDCODE)
5254 hldev->config.bimodal_interrupts = 1;
5256 xge_debug_device(XGE_ERR,
5257 "detected unknown device_id 0x%x", hldev->device_id);
5258 xge_hal_device_terminate(hldev);
5259 return XGE_HAL_ERR_BAD_DEVICE_ID;
5262 /* allocate and initialize FIFO types of channels according to
5264 for (i = 0; i < XGE_HAL_MAX_FIFO_NUM; i++) {
5265 if (!device_config->fifo.queue[i].configured)
5268 channel = __hal_channel_allocate(hldev, i,
5269 XGE_HAL_CHANNEL_TYPE_FIFO);
5270 if (channel == NULL) {
5271 xge_debug_device(XGE_ERR,
5272 "fifo: __hal_channel_allocate failed");
5273 xge_hal_device_terminate(hldev);
5274 return XGE_HAL_ERR_OUT_OF_MEMORY;
5276 /* add new channel to the device */
5277 xge_list_insert(&channel->item, &hldev->free_channels);
5281 * automatic DRAM adjustment
5283 total_dram_size = 0;
5284 ring_auto_dram_cfg = 0;
5285 for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
5286 if (!device_config->ring.queue[i].configured)
5288 if (device_config->ring.queue[i].dram_size_mb ==
5289 XGE_HAL_DEFAULT_USE_HARDCODE) {
5290 ring_auto_dram_cfg++;
5293 total_dram_size += device_config->ring.queue[i].dram_size_mb;
5295 left_dram_size = total_dram_size_max - total_dram_size;
5296 if (left_dram_size < 0 ||
5297 (ring_auto_dram_cfg && left_dram_size / ring_auto_dram_cfg == 0)) {
5298 xge_debug_device(XGE_ERR,
5299 "ring config: exceeded DRAM size %d MB",
5300 total_dram_size_max);
5301 xge_hal_device_terminate(hldev);
5302 return XGE_HAL_BADCFG_RING_QUEUE_SIZE;
5306 * allocate and initialize RING types of channels according to
5309 for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
5310 if (!device_config->ring.queue[i].configured)
5313 if (device_config->ring.queue[i].dram_size_mb ==
5314 XGE_HAL_DEFAULT_USE_HARDCODE) {
5315 hldev->config.ring.queue[i].dram_size_mb =
5316 device_config->ring.queue[i].dram_size_mb =
5317 left_dram_size / ring_auto_dram_cfg;
5320 channel = __hal_channel_allocate(hldev, i,
5321 XGE_HAL_CHANNEL_TYPE_RING);
5322 if (channel == NULL) {
5323 xge_debug_device(XGE_ERR,
5324 "ring: __hal_channel_allocate failed");
5325 xge_hal_device_terminate(hldev);
5326 return XGE_HAL_ERR_OUT_OF_MEMORY;
5328 /* add new channel to the device */
5329 xge_list_insert(&channel->item, &hldev->free_channels);
5332 /* get subsystem IDs */
5333 xge_os_pci_read16(hldev->pdev, hldev->cfgh,
5334 xge_offsetof(xge_hal_pci_config_le_t, subsystem_id),
5336 xge_os_pci_read16(hldev->pdev, hldev->cfgh,
5337 xge_offsetof(xge_hal_pci_config_le_t, subsystem_vendor_id),
5339 xge_debug_device(XGE_TRACE,
5340 "subsystem_id %04x:%04x",
5341 subsys_vendor, subsys_device);
5343 /* reset device initially */
5344 (void) __hal_device_reset(hldev);
5346 /* set host endian before, to assure proper action */
5347 status = __hal_device_set_swapper(hldev);
5348 if (status != XGE_HAL_OK) {
5349 xge_debug_device(XGE_ERR,
5350 "__hal_device_set_swapper failed");
5351 xge_hal_device_terminate(hldev);
5352 (void) __hal_device_reset(hldev);
5356 #ifndef XGE_HAL_HERC_EMULATION
5357 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA)
5358 __hal_device_xena_fix_mac(hldev);
5361 /* MAC address initialization.
5362 * For now only one mac address will be read and used. */
5363 status = xge_hal_device_macaddr_get(hldev, 0, &hldev->macaddr[0]);
5364 if (status != XGE_HAL_OK) {
5365 xge_debug_device(XGE_ERR,
5366 "xge_hal_device_macaddr_get failed");
5367 xge_hal_device_terminate(hldev);
5371 if (hldev->macaddr[0][0] == 0xFF &&
5372 hldev->macaddr[0][1] == 0xFF &&
5373 hldev->macaddr[0][2] == 0xFF &&
5374 hldev->macaddr[0][3] == 0xFF &&
5375 hldev->macaddr[0][4] == 0xFF &&
5376 hldev->macaddr[0][5] == 0xFF) {
5377 xge_debug_device(XGE_ERR,
5378 "xge_hal_device_macaddr_get returns all FFs");
5379 xge_hal_device_terminate(hldev);
5380 return XGE_HAL_ERR_INVALID_MAC_ADDRESS;
5383 xge_debug_device(XGE_TRACE,
5384 "default macaddr: 0x%02x-%02x-%02x-%02x-%02x-%02x",
5385 hldev->macaddr[0][0], hldev->macaddr[0][1],
5386 hldev->macaddr[0][2], hldev->macaddr[0][3],
5387 hldev->macaddr[0][4], hldev->macaddr[0][5]);
5389 status = __hal_stats_initialize(&hldev->stats, hldev);
5390 if (status != XGE_HAL_OK) {
5391 xge_debug_device(XGE_ERR,
5392 "__hal_stats_initialize failed");
5393 xge_hal_device_terminate(hldev);
5397 status = __hal_device_hw_initialize(hldev);
5398 if (status != XGE_HAL_OK) {
5399 xge_debug_device(XGE_ERR,
5400 "__hal_device_hw_initialize failed");
5401 xge_hal_device_terminate(hldev);
5404 hldev->dump_buf=(char*)xge_os_malloc(hldev->pdev, XGE_HAL_DUMP_BUF_SIZE);
5405 if (hldev->dump_buf == NULL) {
5406 xge_debug_device(XGE_ERR,
5407 "__hal_device_hw_initialize failed");
5408 xge_hal_device_terminate(hldev);
5409 return XGE_HAL_ERR_OUT_OF_MEMORY;
5413 /* Xena-only: need to serialize fifo posts across all device fifos */
5414 #if defined(XGE_HAL_TX_MULTI_POST)
5415 xge_os_spin_lock_init(&hldev->xena_post_lock, hldev->pdev);
5416 #elif defined(XGE_HAL_TX_MULTI_POST_IRQ)
5417 xge_os_spin_lock_init_irq(&hldev->xena_post_lock, hldev->irqh);
5419 /* Getting VPD data */
5420 __hal_device_get_vpd_data(hldev);
5422 hldev->is_initialized = 1;
5428 * xge_hal_device_terminating - Mark the device as 'terminating'.
5429 * @devh: HAL device handle.
5431 * Mark the device as 'terminating', going to terminate. Can be used
5432 * to serialize termination with other running processes/contexts.
5434 * See also: xge_hal_device_terminate().
5437 xge_hal_device_terminating(xge_hal_device_h devh)
5439 xge_hal_device_t *hldev = (xge_hal_device_t*)devh;
5441 xge_hal_channel_t *channel;
5442 #if defined(XGE_HAL_TX_MULTI_RESERVE_IRQ)
5443 unsigned long flags=0;
5447 * go through each opened tx channel and aquire
5448 * lock, so it will serialize with HAL termination flag
5450 xge_list_for_each(item, &hldev->fifo_channels) {
5451 channel = xge_container_of(item, xge_hal_channel_t, item);
5452 #if defined(XGE_HAL_TX_MULTI_RESERVE)
5453 xge_os_spin_lock(&channel->reserve_lock);
5454 #elif defined(XGE_HAL_TX_MULTI_RESERVE_IRQ)
5455 xge_os_spin_lock_irq(&channel->reserve_lock, flags);
5458 channel->terminating = 1;
5460 #if defined(XGE_HAL_TX_MULTI_RESERVE)
5461 xge_os_spin_unlock(&channel->reserve_lock);
5462 #elif defined(XGE_HAL_TX_MULTI_RESERVE_IRQ)
5463 xge_os_spin_unlock_irq(&channel->reserve_lock, flags);
5467 hldev->terminating = 1;
5471 * xge_hal_device_terminate - Terminate Xframe device.
5472 * @hldev: HAL device handle.
5474 * Terminate HAL device.
5476 * See also: xge_hal_device_initialize().
5479 xge_hal_device_terminate(xge_hal_device_t *hldev)
5481 xge_assert(g_xge_hal_driver != NULL);
5482 xge_assert(hldev != NULL);
5483 xge_assert(hldev->magic == XGE_HAL_MAGIC);
5485 xge_queue_flush(hldev->queueh);
5487 hldev->terminating = 1;
5488 hldev->is_initialized = 0;
5490 hldev->magic = XGE_HAL_DEAD;
5492 #if defined(XGE_HAL_TX_MULTI_POST)
5493 xge_os_spin_lock_destroy(&hldev->xena_post_lock, hldev->pdev);
5494 #elif defined(XGE_HAL_TX_MULTI_POST_IRQ)
5495 xge_os_spin_lock_destroy_irq(&hldev->xena_post_lock, hldev->pdev);
5498 xge_debug_device(XGE_TRACE, "device "XGE_OS_LLXFMT" is terminating",
5499 (unsigned long long)(ulong_t)hldev);
5501 xge_assert(xge_list_is_empty(&hldev->fifo_channels));
5502 xge_assert(xge_list_is_empty(&hldev->ring_channels));
5504 if (hldev->stats.is_initialized) {
5505 __hal_stats_terminate(&hldev->stats);
5508 /* close if open and free all channels */
5509 while (!xge_list_is_empty(&hldev->free_channels)) {
5510 xge_hal_channel_t *channel = (xge_hal_channel_t*)
5511 hldev->free_channels.next;
5513 xge_assert(!channel->is_open);
5514 xge_list_remove(&channel->item);
5515 __hal_channel_free(channel);
5518 if (hldev->queueh) {
5519 xge_queue_destroy(hldev->queueh);
5522 if (hldev->spdm_table) {
5523 xge_os_free(hldev->pdev,
5524 hldev->spdm_table[0],
5525 (sizeof(xge_hal_spdm_entry_t) *
5526 hldev->spdm_max_entries));
5527 xge_os_free(hldev->pdev,
5529 (sizeof(xge_hal_spdm_entry_t *) *
5530 hldev->spdm_max_entries));
5531 xge_os_spin_lock_destroy(&hldev->spdm_lock, hldev->pdev);
5532 hldev->spdm_table = NULL;
5535 if (hldev->dump_buf) {
5536 xge_os_free(hldev->pdev, hldev->dump_buf,
5537 XGE_HAL_DUMP_BUF_SIZE);
5538 hldev->dump_buf = NULL;
5541 if (hldev->device_id != 0) {
5544 pcisize = (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)?
5545 XGE_HAL_PCISIZE_HERC : XGE_HAL_PCISIZE_XENA;
5546 for (j = 0; j < pcisize; j++) {
5547 xge_os_pci_write32(hldev->pdev, hldev->cfgh, j * 4,
5548 *((u32*)&hldev->pci_config_space_bios + j));
5553 * __hal_device_get_vpd_data - Getting vpd_data.
5555 * @hldev: HAL device handle.
5557 * Getting product name and serial number from vpd capabilites structure
5561 __hal_device_get_vpd_data(xge_hal_device_t *hldev)
5565 int index = 0, count, fail = 0;
5566 u8 vpd_addr = XGE_HAL_CARD_XENA_VPD_ADDR;
5567 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)
5568 vpd_addr = XGE_HAL_CARD_HERC_VPD_ADDR;
5570 xge_os_strcpy((char *) hldev->vpd_data.product_name,
5571 "10 Gigabit Ethernet Adapter");
5572 xge_os_strcpy((char *) hldev->vpd_data.serial_num, "not available");
5574 vpd_data = ( u8*) xge_os_malloc(hldev->pdev, XGE_HAL_VPD_BUFFER_SIZE + 16);
5575 if ( vpd_data == NULL )
5578 for (index = 0; index < XGE_HAL_VPD_BUFFER_SIZE; index +=4 ) {
5579 xge_os_pci_write8(hldev->pdev, hldev->cfgh, (vpd_addr + 2), (u8)index);
5580 xge_os_pci_read8(hldev->pdev, hldev->cfgh,(vpd_addr + 2), &data);
5581 xge_os_pci_write8(hldev->pdev, hldev->cfgh, (vpd_addr + 3), 0);
5582 for (count = 0; count < 5; count++ ) {
5584 xge_os_pci_read8(hldev->pdev, hldev->cfgh,(vpd_addr + 3), &data);
5585 if (data == XGE_HAL_VPD_READ_COMPLETE)
5590 xge_os_printf("ERR, Reading VPD data failed");
5595 xge_os_pci_read32(hldev->pdev, hldev->cfgh,(vpd_addr + 4),
5596 (u32 *)&vpd_data[index]);
5601 /* read serial number of adapter */
5602 for (count = 0; count < XGE_HAL_VPD_BUFFER_SIZE; count++) {
5603 if ((vpd_data[count] == 'S') &&
5604 (vpd_data[count + 1] == 'N') &&
5605 (vpd_data[count + 2] < XGE_HAL_VPD_LENGTH)) {
5606 memset(hldev->vpd_data.serial_num, 0, XGE_HAL_VPD_LENGTH);
5607 memcpy(hldev->vpd_data.serial_num, &vpd_data[count + 3],
5608 vpd_data[count + 2]);
5613 if (vpd_data[1] < XGE_HAL_VPD_LENGTH) {
5614 memset(hldev->vpd_data.product_name, 0, vpd_data[1]);
5615 memcpy(hldev->vpd_data.product_name, &vpd_data[3], vpd_data[1]);
5620 xge_os_free(hldev->pdev, vpd_data, XGE_HAL_VPD_BUFFER_SIZE + 16);
5625 * xge_hal_device_handle_tcode - Handle transfer code.
5626 * @channelh: Channel handle.
5627 * @dtrh: Descriptor handle.
5628 * @t_code: One of the enumerated (and documented in the Xframe user guide)
5631 * Handle descriptor's transfer code. The latter comes with each completed
5632 * descriptor, see xge_hal_fifo_dtr_next_completed() and
5633 * xge_hal_ring_dtr_next_completed().
5634 * Transfer codes are enumerated in xgehal-fifo.h and xgehal-ring.h.
5636 * Returns: one of the xge_hal_status_e{} enumerated types.
5637 * XGE_HAL_OK - for success.
5638 * XGE_HAL_ERR_CRITICAL - when encounters critical error.
5641 xge_hal_device_handle_tcode (xge_hal_channel_h channelh,
5642 xge_hal_dtr_h dtrh, u8 t_code)
5644 xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
5645 xge_hal_device_t *hldev = (xge_hal_device_t *)channel->devh;
5648 xge_os_printf("invalid t_code %d", t_code);
5652 if (channel->type == XGE_HAL_CHANNEL_TYPE_FIFO) {
5653 hldev->stats.sw_dev_err_stats.txd_t_code_err_cnt[t_code]++;
5655 #if defined(XGE_HAL_DEBUG_BAD_TCODE)
5656 xge_hal_fifo_txd_t *txdp = (xge_hal_fifo_txd_t *)dtrh;
5657 xge_os_printf(""XGE_OS_LLXFMT":"XGE_OS_LLXFMT":"
5658 XGE_OS_LLXFMT":"XGE_OS_LLXFMT,
5659 txdp->control_1, txdp->control_2, txdp->buffer_pointer,
5660 txdp->host_control);
5663 /* handle link "down" immediately without going through
5664 * xge_hal_device_poll() routine. */
5665 if (t_code == XGE_HAL_TXD_T_CODE_LOSS_OF_LINK) {
5667 if (hldev->link_state != XGE_HAL_LINK_DOWN) {
5668 xge_hal_pci_bar0_t *bar0 =
5669 (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
5672 hldev->link_state = XGE_HAL_LINK_DOWN;
5674 val64 = xge_os_pio_mem_read64(hldev->pdev,
5675 hldev->regh0, &bar0->adapter_control);
5678 val64 = val64 & (~XGE_HAL_ADAPTER_LED_ON);
5679 xge_os_pio_mem_write64(hldev->pdev,
5680 hldev->regh0, val64,
5681 &bar0->adapter_control);
5683 g_xge_hal_driver->uld_callbacks.link_down(
5684 hldev->upper_layer_info);
5686 } else if (t_code == XGE_HAL_TXD_T_CODE_ABORT_BUFFER ||
5687 t_code == XGE_HAL_TXD_T_CODE_ABORT_DTOR) {
5688 __hal_device_handle_targetabort(hldev);
5689 return XGE_HAL_ERR_CRITICAL;
5691 return XGE_HAL_ERR_PKT_DROP;
5692 } else if (channel->type == XGE_HAL_CHANNEL_TYPE_RING) {
5693 hldev->stats.sw_dev_err_stats.rxd_t_code_err_cnt[t_code]++;
5695 #if defined(XGE_HAL_DEBUG_BAD_TCODE)
5696 xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)dtrh;
5697 xge_os_printf(""XGE_OS_LLXFMT":"XGE_OS_LLXFMT":"XGE_OS_LLXFMT
5698 ":"XGE_OS_LLXFMT, rxdp->control_1,
5699 rxdp->control_2, rxdp->buffer0_ptr,
5700 rxdp->host_control);
5702 if (t_code == XGE_HAL_RXD_T_CODE_BAD_ECC) {
5703 hldev->stats.sw_dev_err_stats.ecc_err_cnt++;
5704 __hal_device_handle_eccerr(hldev, "rxd_t_code",
5706 return XGE_HAL_ERR_CRITICAL;
5707 } else if (t_code == XGE_HAL_RXD_T_CODE_PARITY ||
5708 t_code == XGE_HAL_RXD_T_CODE_PARITY_ABORT) {
5709 hldev->stats.sw_dev_err_stats.parity_err_cnt++;
5710 __hal_device_handle_parityerr(hldev, "rxd_t_code",
5712 return XGE_HAL_ERR_CRITICAL;
5713 /* do not drop if detected unknown IPv6 extension */
5714 } else if (t_code != XGE_HAL_RXD_T_CODE_UNKNOWN_PROTO) {
5715 return XGE_HAL_ERR_PKT_DROP;
5722 * xge_hal_device_link_state - Get link state.
5723 * @devh: HAL device handle.
5724 * @ls: Link state, see xge_hal_device_link_state_e{}.
5727 * Returns: XGE_HAL_OK.
5728 * See also: xge_hal_device_link_state_e{}.
5730 xge_hal_status_e xge_hal_device_link_state(xge_hal_device_h devh,
5731 xge_hal_device_link_state_e *ls)
5733 xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
5735 xge_assert(ls != NULL);
5736 *ls = hldev->link_state;
5741 * xge_hal_device_sched_timer - Configure scheduled device interrupt.
5742 * @devh: HAL device handle.
5743 * @interval_us: Time interval, in miscoseconds.
5744 * Unlike transmit and receive interrupts,
5745 * the scheduled interrupt is generated independently of
5746 * traffic, but purely based on time.
5747 * @one_shot: 1 - generate scheduled interrupt only once.
5748 * 0 - generate scheduled interrupt periodically at the specified
5749 * @interval_us interval.
5751 * (Re-)configure scheduled interrupt. Can be called at runtime to change
5752 * the setting, generate one-shot interrupts based on the resource and/or
5753 * traffic conditions, other purposes.
5754 * See also: xge_hal_device_config_t{}.
5756 void xge_hal_device_sched_timer(xge_hal_device_h devh, int interval_us,
5760 xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
5761 xge_hal_pci_bar0_t *bar0 =
5762 (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
5763 unsigned int interval = hldev->config.pci_freq_mherz * interval_us;
5765 interval = __hal_fix_time_ival_herc(hldev, interval);
5767 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
5768 &bar0->scheduled_int_ctrl);
5770 val64 &= XGE_HAL_SCHED_INT_PERIOD_MASK;
5771 val64 |= XGE_HAL_SCHED_INT_PERIOD(interval);
5773 val64 |= XGE_HAL_SCHED_INT_CTRL_ONE_SHOT;
5775 val64 |= XGE_HAL_SCHED_INT_CTRL_TIMER_EN;
5777 val64 &= ~XGE_HAL_SCHED_INT_CTRL_TIMER_EN;
5780 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
5781 val64, &bar0->scheduled_int_ctrl);
5783 xge_debug_device(XGE_TRACE, "sched_timer 0x"XGE_OS_LLXFMT": %s",
5784 (unsigned long long)val64,
5785 interval ? "enabled" : "disabled");
5789 * xge_hal_device_check_id - Verify device ID.
5790 * @devh: HAL device handle.
5793 * Returns: one of the xge_hal_card_e{} enumerated types.
5794 * See also: xge_hal_card_e{}.
5797 xge_hal_device_check_id(xge_hal_device_h devh)
5799 xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
5800 switch (hldev->device_id) {
5801 case XGE_PCI_DEVICE_ID_XENA_1:
5802 case XGE_PCI_DEVICE_ID_XENA_2:
5803 return XGE_HAL_CARD_XENA;
5804 case XGE_PCI_DEVICE_ID_HERC_1:
5805 case XGE_PCI_DEVICE_ID_HERC_2:
5806 return XGE_HAL_CARD_HERC;
5807 case XGE_PCI_DEVICE_ID_TITAN_1:
5808 case XGE_PCI_DEVICE_ID_TITAN_2:
5809 return XGE_HAL_CARD_TITAN;
5811 return XGE_HAL_CARD_UNKNOWN;
5816 * xge_hal_device_pci_info_get - Get PCI bus informations such as width,
5817 * frequency, and mode from previously stored values.
5818 * @devh: HAL device handle.
5819 * @pci_mode: pointer to a variable of enumerated type
5820 * xge_hal_pci_mode_e{}.
5821 * @bus_frequency: pointer to a variable of enumerated type
5822 * xge_hal_pci_bus_frequency_e{}.
5823 * @bus_width: pointer to a variable of enumerated type
5824 * xge_hal_pci_bus_width_e{}.
5826 * Get pci mode, frequency, and PCI bus width.
5827 * Returns: one of the xge_hal_status_e{} enumerated types.
5828 * XGE_HAL_OK - for success.
5829 * XGE_HAL_ERR_INVALID_DEVICE - for invalid device handle.
5830 * See Also: xge_hal_pci_mode_e, xge_hal_pci_mode_e, xge_hal_pci_width_e.
5833 xge_hal_device_pci_info_get(xge_hal_device_h devh, xge_hal_pci_mode_e *pci_mode,
5834 xge_hal_pci_bus_frequency_e *bus_frequency,
5835 xge_hal_pci_bus_width_e *bus_width)
5837 xge_hal_status_e rc_status;
5838 xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
5840 if (!hldev || !hldev->is_initialized || hldev->magic != XGE_HAL_MAGIC) {
5841 rc_status = XGE_HAL_ERR_INVALID_DEVICE;
5842 xge_debug_device(XGE_ERR,
5843 "xge_hal_device_pci_info_get error, rc %d for device %p",
5849 *pci_mode = hldev->pci_mode;
5850 *bus_frequency = hldev->bus_frequency;
5851 *bus_width = hldev->bus_width;
5852 rc_status = XGE_HAL_OK;
5857 * xge_hal_reinitialize_hw
5858 * @hldev: private member of the device structure.
5860 * This function will soft reset the NIC and re-initalize all the
5861 * I/O registers to the values they had after it's inital initialization
5862 * through the probe function.
5864 int xge_hal_reinitialize_hw(xge_hal_device_t * hldev)
5866 (void) xge_hal_device_reset(hldev);
5867 if (__hal_device_hw_initialize(hldev) != XGE_HAL_OK) {
5868 xge_hal_device_terminate(hldev);
5869 (void) __hal_device_reset(hldev);
5877 * __hal_read_spdm_entry_line
5878 * @hldev: pointer to xge_hal_device_t structure
5879 * @spdm_line: spdm line in the spdm entry to be read.
5880 * @spdm_entry: spdm entry of the spdm_line in the SPDM table.
5881 * @spdm_line_val: Contains the value stored in the spdm line.
5883 * SPDM table contains upto a maximum of 256 spdm entries.
5884 * Each spdm entry contains 8 lines and each line stores 8 bytes.
5885 * This function reads the spdm line(addressed by @spdm_line)
5886 * of the spdm entry(addressed by @spdm_entry) in
5890 __hal_read_spdm_entry_line(xge_hal_device_t *hldev, u8 spdm_line,
5891 u16 spdm_entry, u64 *spdm_line_val)
5893 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
5896 val64 = XGE_HAL_RTS_RTH_SPDM_MEM_CTRL_STROBE |
5897 XGE_HAL_RTS_RTH_SPDM_MEM_CTRL_LINE_SEL(spdm_line) |
5898 XGE_HAL_RTS_RTH_SPDM_MEM_CTRL_OFFSET(spdm_entry);
5900 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
5901 &bar0->rts_rth_spdm_mem_ctrl);
5903 /* poll until done */
5904 if (__hal_device_register_poll(hldev,
5905 &bar0->rts_rth_spdm_mem_ctrl, 0,
5906 XGE_HAL_RTS_RTH_SPDM_MEM_CTRL_STROBE,
5907 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
5909 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
5912 *spdm_line_val = xge_os_pio_mem_read64(hldev->pdev,
5913 hldev->regh0, &bar0->rts_rth_spdm_mem_data);
5919 * __hal_get_free_spdm_entry
5920 * @hldev: pointer to xge_hal_device_t structure
5921 * @spdm_entry: Contains an index to the unused spdm entry in the SPDM table.
5923 * This function returns an index of unused spdm entry in the SPDM
5926 static xge_hal_status_e
5927 __hal_get_free_spdm_entry(xge_hal_device_t *hldev, u16 *spdm_entry)
5929 xge_hal_status_e status;
5930 u64 spdm_line_val=0;
5933 * Search in the local SPDM table for a free slot.
5936 for(; *spdm_entry < hldev->spdm_max_entries; (*spdm_entry)++) {
5937 if (hldev->spdm_table[*spdm_entry]->in_use) {
5942 if (*spdm_entry >= hldev->spdm_max_entries) {
5943 return XGE_HAL_ERR_SPDM_TABLE_FULL;
5947 * Make sure that the corresponding spdm entry in the SPDM
5949 * Seventh line of the spdm entry contains information about
5950 * whether the entry is free or not.
5952 if ((status = __hal_read_spdm_entry_line(hldev, 7, *spdm_entry,
5953 &spdm_line_val)) != XGE_HAL_OK) {
5957 /* BIT(63) in spdm_line 7 corresponds to entry_enable bit */
5958 if ((spdm_line_val & BIT(63))) {
5962 xge_debug_device(XGE_ERR, "Local SPDM table is not "
5963 "consistent with the actual one for the spdm "
5964 "entry %d", *spdm_entry);
5965 return XGE_HAL_ERR_SPDM_TABLE_DATA_INCONSISTENT;
5973 * __hal_calc_jhash - Calculate Jenkins hash.
5974 * @msg: Jenkins hash algorithm key.
5975 * @length: Length of the key.
5976 * @golden_ratio: Jenkins hash golden ratio.
5977 * @init_value: Jenkins hash initial value.
5979 * This function implements the Jenkins based algorithm used for the
5980 * calculation of the RTH hash.
5981 * Returns: Jenkins hash value.
5985 __hal_calc_jhash(u8 *msg, u32 length, u32 golden_ratio, u32 init_value)
5988 register u32 a,b,c,len;
5991 * Set up the internal state
5994 a = b = golden_ratio; /* the golden ratio; an arbitrary value */
5995 c = init_value; /* the previous hash value */
5997 /* handle most of the key */
6000 a += (msg[0] + ((u32)msg[1]<<8) + ((u32)msg[2]<<16)
6001 + ((u32)msg[3]<<24));
6002 b += (msg[4] + ((u32)msg[5]<<8) + ((u32)msg[6]<<16)
6003 + ((u32)msg[7]<<24));
6004 c += (msg[8] + ((u32)msg[9]<<8) + ((u32)msg[10]<<16)
6005 + ((u32)msg[11]<<24));
6007 msg += 12; len -= 12;
6010 /* handle the last 11 bytes */
6012 switch(len) /* all the case statements fall through */
6014 case 11: c+= ((u32)msg[10]<<24);
6016 case 10: c+= ((u32)msg[9]<<16);
6018 case 9 : c+= ((u32)msg[8]<<8);
6020 /* the first byte of c is reserved for the length */
6021 case 8 : b+= ((u32)msg[7]<<24);
6023 case 7 : b+= ((u32)msg[6]<<16);
6025 case 6 : b+= ((u32)msg[5]<<8);
6027 case 5 : b+= msg[4];
6029 case 4 : a+= ((u32)msg[3]<<24);
6031 case 3 : a+= ((u32)msg[2]<<16);
6033 case 2 : a+= ((u32)msg[1]<<8);
6035 case 1 : a+= msg[0];
6037 /* case 0: nothing left to add */
6042 /* report the result */
6048 * xge_hal_spdm_entry_add - Add a new entry to the SPDM table.
6049 * @devh: HAL device handle.
6050 * @src_ip: Source ip address(IPv4/IPv6).
6051 * @dst_ip: Destination ip address(IPv4/IPv6).
6052 * @l4_sp: L4 source port.
6053 * @l4_dp: L4 destination port.
6054 * @is_tcp: Set to 1, if the protocol is TCP.
6055 * 0, if the protocol is UDP.
6056 * @is_ipv4: Set to 1, if the protocol is IPv4.
6057 * 0, if the protocol is IPv6.
6058 * @tgt_queue: Target queue to route the receive packet.
6060 * This function add a new entry to the SPDM table.
6062 * Returns: XGE_HAL_OK - success.
6063 * XGE_HAL_ERR_SPDM_NOT_ENABLED - SPDM support is not enabled.
6064 * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to add a new entry with in
6065 * the time(timeout).
6066 * XGE_HAL_ERR_SPDM_TABLE_FULL - SPDM table is full.
6067 * XGE_HAL_ERR_SPDM_INVALID_ENTRY - Invalid SPDM entry.
6069 * See also: xge_hal_spdm_entry_remove{}.
6072 xge_hal_spdm_entry_add(xge_hal_device_h devh, xge_hal_ipaddr_t *src_ip,
6073 xge_hal_ipaddr_t *dst_ip, u16 l4_sp, u16 l4_dp,
6074 u8 is_tcp, u8 is_ipv4, u8 tgt_queue)
6077 xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
6078 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
6081 u32 jhash_golden_ratio;
6085 u8 msg[XGE_HAL_JHASH_MSG_LEN];
6087 xge_hal_status_e status;
6090 if (!hldev->config.rth_spdm_en) {
6091 return XGE_HAL_ERR_SPDM_NOT_ENABLED;
6094 if ((tgt_queue < XGE_HAL_MIN_RING_NUM) ||
6095 (tgt_queue > XGE_HAL_MAX_RING_NUM)) {
6096 return XGE_HAL_ERR_SPDM_INVALID_ENTRY;
6101 * Calculate the jenkins hash.
6104 * Create the Jenkins hash algorithm key.
6105 * key = {L3SA, L3DA, L4SP, L4DP}, if SPDM is configured to
6106 * use L4 information. Otherwize key = {L3SA, L3DA}.
6110 ipaddr_len = 4; // In bytes
6116 * Jenkins hash algorithm expects the key in the big endian
6117 * format. Since key is the byte array, memcpy won't work in the
6118 * case of little endian. So, the current code extracts each
6119 * byte starting from MSB and store it in the key.
6122 for (off = 0; off < ipaddr_len; off++) {
6123 u32 mask = vBIT32(0xff,(off*8),8);
6124 int shift = 32-(off+1)*8;
6125 msg[off] = (u8)((src_ip->ipv4.addr & mask) >> shift);
6126 msg[off+ipaddr_len] =
6127 (u8)((dst_ip->ipv4.addr & mask) >> shift);
6130 for (off = 0; off < ipaddr_len; off++) {
6132 u64 mask = vBIT(0xff,(loc*8),8);
6133 int shift = 64-(loc+1)*8;
6135 msg[off] = (u8)((src_ip->ipv6.addr[off/8] & mask)
6137 msg[off+ipaddr_len] = (u8)((dst_ip->ipv6.addr[off/8]
6142 off = (2*ipaddr_len);
6144 if (hldev->config.rth_spdm_use_l4) {
6145 msg[off] = (u8)((l4_sp & 0xff00) >> 8);
6146 msg[off + 1] = (u8)(l4_sp & 0xff);
6147 msg[off + 2] = (u8)((l4_dp & 0xff00) >> 8);
6148 msg[off + 3] = (u8)(l4_dp & 0xff);
6153 * Calculate jenkins hash for this configuration
6155 val64 = xge_os_pio_mem_read64(hldev->pdev,
6157 &bar0->rts_rth_jhash_cfg);
6158 jhash_golden_ratio = (u32)(val64 >> 32);
6159 jhash_init_val = (u32)(val64 & 0xffffffff);
6161 jhash_value = __hal_calc_jhash(msg, off,
6165 xge_os_spin_lock(&hldev->spdm_lock);
6168 * Locate a free slot in the SPDM table. To avoid a seach in the
6169 * actual SPDM table, which is very expensive in terms of time,
6170 * we are maintaining a local copy of the table and the search for
6171 * the free entry is performed in the local table.
6173 if ((status = __hal_get_free_spdm_entry(hldev,&spdm_entry))
6175 xge_os_spin_unlock(&hldev->spdm_lock);
6180 * Add this entry to the SPDM table
6182 status = __hal_spdm_entry_add(hldev, src_ip, dst_ip, l4_sp, l4_dp,
6183 is_tcp, is_ipv4, tgt_queue,
6184 jhash_value, /* calculated jhash */
6187 xge_os_spin_unlock(&hldev->spdm_lock);
6193 * xge_hal_spdm_entry_remove - Remove an entry from the SPDM table.
6194 * @devh: HAL device handle.
6195 * @src_ip: Source ip address(IPv4/IPv6).
6196 * @dst_ip: Destination ip address(IPv4/IPv6).
6197 * @l4_sp: L4 source port.
6198 * @l4_dp: L4 destination port.
6199 * @is_tcp: Set to 1, if the protocol is TCP.
6200 * 0, if the protocol os UDP.
6201 * @is_ipv4: Set to 1, if the protocol is IPv4.
6202 * 0, if the protocol is IPv6.
6204 * This function remove an entry from the SPDM table.
6206 * Returns: XGE_HAL_OK - success.
6207 * XGE_HAL_ERR_SPDM_NOT_ENABLED - SPDM support is not enabled.
6208 * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to remove an entry with in
6209 * the time(timeout).
6210 * XGE_HAL_ERR_SPDM_ENTRY_NOT_FOUND - Unable to locate the entry in the SPDM
6213 * See also: xge_hal_spdm_entry_add{}.
6216 xge_hal_spdm_entry_remove(xge_hal_device_h devh, xge_hal_ipaddr_t *src_ip,
6217 xge_hal_ipaddr_t *dst_ip, u16 l4_sp, u16 l4_dp,
6218 u8 is_tcp, u8 is_ipv4)
6221 xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
6222 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
6225 xge_hal_status_e status;
6226 u64 spdm_line_arr[8];
6233 if (!hldev->config.rth_spdm_en) {
6234 return XGE_HAL_ERR_SPDM_NOT_ENABLED;
6237 xge_os_spin_lock(&hldev->spdm_lock);
6240 * Poll the rxpic_int_reg register until spdm ready bit is set or
6243 if (__hal_device_register_poll(hldev, &bar0->rxpic_int_reg, 1,
6244 XGE_HAL_RX_PIC_INT_REG_SPDM_READY,
6245 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
6247 /* upper layer may require to repeat */
6248 xge_os_spin_unlock(&hldev->spdm_lock);
6249 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
6253 * Clear the SPDM READY bit.
6255 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
6256 &bar0->rxpic_int_reg);
6257 val64 &= ~XGE_HAL_RX_PIC_INT_REG_SPDM_READY;
6258 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
6259 &bar0->rxpic_int_reg);
6262 * Search in the local SPDM table to get the index of the
6263 * corresponding entry in the SPDM table.
6266 for (;spdm_entry < hldev->spdm_max_entries; spdm_entry++) {
6267 if ((!hldev->spdm_table[spdm_entry]->in_use) ||
6268 (hldev->spdm_table[spdm_entry]->is_tcp != is_tcp) ||
6269 (hldev->spdm_table[spdm_entry]->l4_sp != l4_sp) ||
6270 (hldev->spdm_table[spdm_entry]->l4_dp != l4_dp) ||
6271 (hldev->spdm_table[spdm_entry]->is_ipv4 != is_ipv4)) {
6276 * Compare the src/dst IP addresses of source and target
6279 if ((hldev->spdm_table[spdm_entry]->src_ip.ipv4.addr
6280 != src_ip->ipv4.addr) ||
6281 (hldev->spdm_table[spdm_entry]->dst_ip.ipv4.addr
6282 != dst_ip->ipv4.addr)) {
6286 if ((hldev->spdm_table[spdm_entry]->src_ip.ipv6.addr[0]
6287 != src_ip->ipv6.addr[0]) ||
6288 (hldev->spdm_table[spdm_entry]->src_ip.ipv6.addr[1]
6289 != src_ip->ipv6.addr[1]) ||
6290 (hldev->spdm_table[spdm_entry]->dst_ip.ipv6.addr[0]
6291 != dst_ip->ipv6.addr[0]) ||
6292 (hldev->spdm_table[spdm_entry]->dst_ip.ipv6.addr[1]
6293 != dst_ip->ipv6.addr[1])) {
6300 if (spdm_entry >= hldev->spdm_max_entries) {
6301 xge_os_spin_unlock(&hldev->spdm_lock);
6302 return XGE_HAL_ERR_SPDM_ENTRY_NOT_FOUND;
6306 * Retrieve the corresponding entry from the SPDM table and
6307 * make sure that the data is consistent.
6309 for(line_no = 0; line_no < 8; line_no++) {
6312 * SPDM line 2,3,4 are valid only for IPv6 entry.
6313 * SPDM line 5 & 6 are reserved. We don't have to
6314 * read these entries in the above cases.
6317 ((line_no == 2)||(line_no == 3)||(line_no == 4))) ||
6323 if ((status = __hal_read_spdm_entry_line(
6327 &spdm_line_arr[line_no]))
6329 xge_os_spin_unlock(&hldev->spdm_lock);
6335 * Seventh line of the spdm entry contains the entry_enable
6336 * bit. Make sure that the entry_enable bit of this spdm entry
6338 * To remove an entry from the SPDM table, reset this
6341 if (!(spdm_line_arr[7] & BIT(63))) {
6345 xge_debug_device(XGE_ERR, "Local SPDM table is not "
6346 "consistent with the actual one for the spdm "
6347 "entry %d ", spdm_entry);
6352 * Retreive the L4 SP/DP, src/dst ip addresses from the SPDM
6353 * table and do a comparision.
6355 spdm_is_tcp = (u8)((spdm_line_arr[0] & BIT(59)) >> 4);
6356 spdm_is_ipv4 = (u8)(spdm_line_arr[0] & BIT(63));
6357 spdm_l4_sp = (u16)(spdm_line_arr[0] >> 48);
6358 spdm_l4_dp = (u16)((spdm_line_arr[0] >> 32) & 0xffff);
6361 if ((spdm_is_tcp != is_tcp) ||
6362 (spdm_is_ipv4 != is_ipv4) ||
6363 (spdm_l4_sp != l4_sp) ||
6364 (spdm_l4_dp != l4_dp)) {
6368 xge_debug_device(XGE_ERR, "Local SPDM table is not "
6369 "consistent with the actual one for the spdm "
6370 "entry %d ", spdm_entry);
6375 /* Upper 32 bits of spdm_line(64 bit) contains the
6376 * src IPv4 address. Lower 32 bits of spdm_line
6377 * contains the destination IPv4 address.
6379 u32 temp_src_ip = (u32)(spdm_line_arr[1] >> 32);
6380 u32 temp_dst_ip = (u32)(spdm_line_arr[1] & 0xffffffff);
6382 if ((temp_src_ip != src_ip->ipv4.addr) ||
6383 (temp_dst_ip != dst_ip->ipv4.addr)) {
6384 xge_debug_device(XGE_ERR, "Local SPDM table is not "
6385 "consistent with the actual one for the spdm "
6386 "entry %d ", spdm_entry);
6392 * SPDM line 1 & 2 contains the src IPv6 address.
6393 * SPDM line 3 & 4 contains the dst IPv6 address.
6395 if ((spdm_line_arr[1] != src_ip->ipv6.addr[0]) ||
6396 (spdm_line_arr[2] != src_ip->ipv6.addr[1]) ||
6397 (spdm_line_arr[3] != dst_ip->ipv6.addr[0]) ||
6398 (spdm_line_arr[4] != dst_ip->ipv6.addr[1])) {
6403 xge_debug_device(XGE_ERR, "Local SPDM table is not "
6404 "consistent with the actual one for the spdm "
6405 "entry %d ", spdm_entry);
6411 * Reset the entry_enable bit to zero
6413 spdm_line_arr[7] &= ~BIT(63);
6415 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
6417 (void *)((char *)hldev->spdm_mem_base +
6418 (spdm_entry * 64) + (7 * 8)));
6421 * Wait for the operation to be completed.
6423 if (__hal_device_register_poll(hldev,
6424 &bar0->rxpic_int_reg, 1,
6425 XGE_HAL_RX_PIC_INT_REG_SPDM_READY,
6426 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
6427 xge_os_spin_unlock(&hldev->spdm_lock);
6428 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
6432 * Make the corresponding spdm entry in the local SPDM table
6433 * available for future use.
6435 hldev->spdm_table[spdm_entry]->in_use = 0;
6436 xge_os_spin_unlock(&hldev->spdm_lock);
6441 xge_os_spin_unlock(&hldev->spdm_lock);
6442 return XGE_HAL_ERR_SPDM_TABLE_DATA_INCONSISTENT;
6446 * __hal_device_rti_set
6447 * @ring: The post_qid of the ring.
6448 * @channel: HAL channel of the ring.
6450 * This function stores the RTI value associated for the MSI and
6451 * also unmasks this particular RTI in the rti_mask register.
6453 static void __hal_device_rti_set(int ring_qid, xge_hal_channel_t *channel)
6455 xge_hal_device_t *hldev = (xge_hal_device_t*)channel->devh;
6456 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0;
6459 if (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSI ||
6460 hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX)
6461 channel->rti = (u8)ring_qid;
6463 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
6464 &bar0->rx_traffic_mask);
6465 val64 &= ~BIT(ring_qid);
6466 xge_os_pio_mem_write64(hldev->pdev,
6467 hldev->regh0, val64,
6468 &bar0->rx_traffic_mask);
6472 * __hal_device_tti_set
6473 * @ring: The post_qid of the FIFO.
6474 * @channel: HAL channel the FIFO.
6476 * This function stores the TTI value associated for the MSI and
6477 * also unmasks this particular TTI in the tti_mask register.
6479 static void __hal_device_tti_set(int fifo_qid, xge_hal_channel_t *channel)
6481 xge_hal_device_t *hldev = (xge_hal_device_t*)channel->devh;
6482 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0;
6485 if (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSI ||
6486 hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX)
6487 channel->tti = (u8)fifo_qid;
6489 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
6490 &bar0->tx_traffic_mask);
6491 val64 &= ~BIT(fifo_qid);
6492 xge_os_pio_mem_write64(hldev->pdev,
6493 hldev->regh0, val64,
6494 &bar0->tx_traffic_mask);
6498 * xge_hal_channel_msi_set - Associate a RTI with a ring or TTI with a
6499 * FIFO for a given MSI.
6500 * @channelh: HAL channel handle.
6501 * @msi: MSI Number associated with the channel.
6502 * @msi_msg: The MSI message associated with the MSI number above.
6504 * This API will associate a given channel (either Ring or FIFO) with the
6505 * given MSI number. It will alo program the Tx_Mat/Rx_Mat tables in the
6506 * hardware to indicate this association to the hardware.
6509 xge_hal_channel_msi_set(xge_hal_channel_h channelh, int msi, u32 msi_msg)
6511 xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
6512 xge_hal_device_t *hldev = (xge_hal_device_t*)channel->devh;
6513 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0;
6516 channel->msi_msg = msi_msg;
6517 if (channel->type == XGE_HAL_CHANNEL_TYPE_RING) {
6518 int ring = channel->post_qid;
6519 xge_debug_osdep(XGE_TRACE, "MSI Data: 0x%4x, Ring: %d,"
6520 " MSI: %d", channel->msi_msg, ring, msi);
6521 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
6523 val64 |= XGE_HAL_SET_RX_MAT(ring, msi);
6524 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
6526 __hal_device_rti_set(ring, channel);
6528 int fifo = channel->post_qid;
6529 xge_debug_osdep(XGE_TRACE, "MSI Data: 0x%4x, Fifo: %d,"
6530 " MSI: %d", channel->msi_msg, fifo, msi);
6531 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
6533 val64 |= XGE_HAL_SET_TX_MAT(fifo, msi);
6534 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
6536 __hal_device_tti_set(fifo, channel);
6543 * xge_hal_mask_msix - Begin IRQ processing.
6544 * @hldev: HAL device handle.
6547 * The function masks the msix interrupt for the given msi_id
6552 * Otherwise, XGE_HAL_ERR_WRONG_IRQ if the msix index is out of range
6557 xge_hal_mask_msix(xge_hal_device_h devh, int msi_id)
6559 xge_hal_status_e status = XGE_HAL_OK;
6560 xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
6561 u32 *bar2 = (u32 *)hldev->bar2;
6564 xge_assert(msi_id < XGE_HAL_MAX_MSIX_MESSAGES);
6566 val32 = xge_os_pio_mem_read32(hldev->pdev, hldev->regh2, &bar2[msi_id*4+3]);
6568 xge_os_pio_mem_write32(hldev->pdev, hldev->regh2, val32, &bar2[msi_id*4+3]);
6573 * xge_hal_mask_msix - Begin IRQ processing.
6574 * @hldev: HAL device handle.
6577 * The function masks the msix interrupt for the given msi_id
6582 * Otherwise, XGE_HAL_ERR_WRONG_IRQ if the msix index is out of range
6587 xge_hal_unmask_msix(xge_hal_device_h devh, int msi_id)
6589 xge_hal_status_e status = XGE_HAL_OK;
6590 xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
6591 u32 *bar2 = (u32 *)hldev->bar2;
6594 xge_assert(msi_id < XGE_HAL_MAX_MSIX_MESSAGES);
6596 val32 = xge_os_pio_mem_read32(hldev->pdev, hldev->regh2, &bar2[msi_id*4+3]);
6598 xge_os_pio_mem_write32(hldev->pdev, hldev->regh2, val32, &bar2[msi_id*4+3]);
6603 * __hal_set_msix_vals
6604 * @devh: HAL device handle.
6605 * @msix_value: 32bit MSI-X value transferred across PCI to @msix_address.
6606 * Filled in by this function.
6607 * @msix_address: 32bit MSI-X DMA address.
6608 * Filled in by this function.
6609 * @msix_idx: index that corresponds to the (@msix_value, @msix_address)
6610 * entry in the table of MSI-X (value, address) pairs.
6612 * This function will program the hardware associating the given
6613 * address/value cobination to the specified msi number.
6615 static void __hal_set_msix_vals (xge_hal_device_h devh,
6622 xge_hal_device_t *hldev = (xge_hal_device_t*)devh;
6623 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0;
6626 val64 = XGE_HAL_XMSI_NO(msix_idx) | XGE_HAL_XMSI_STROBE;
6627 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0,
6628 (u32)(val64 >> 32), &bar0->xmsi_access);
6629 __hal_pio_mem_write32_lower(hldev->pdev, hldev->regh0,
6630 (u32)(val64), &bar0->xmsi_access);
6632 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
6633 &bar0->xmsi_access);
6634 if (val64 & XGE_HAL_XMSI_STROBE)
6639 *msix_value = (u32)(xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
6641 *msix_addr = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
6642 &bar0->xmsi_address);
6646 * xge_hal_channel_msix_set - Associate MSI-X with a channel.
6647 * @channelh: HAL channel handle.
6648 * @msix_idx: index that corresponds to a particular (@msix_value,
6649 * @msix_address) entry in the MSI-X table.
6651 * This API associates a given channel (either Ring or FIFO) with the
6652 * given MSI-X number. It programs the Xframe's Tx_Mat/Rx_Mat tables
6653 * to indicate this association.
6656 xge_hal_channel_msix_set(xge_hal_channel_h channelh, int msix_idx)
6658 xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
6659 xge_hal_device_t *hldev = (xge_hal_device_t*)channel->devh;
6660 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0;
6663 if (channel->type == XGE_HAL_CHANNEL_TYPE_RING) {
6664 /* Currently Ring and RTI is one on one. */
6665 int ring = channel->post_qid;
6666 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
6668 val64 |= XGE_HAL_SET_RX_MAT(ring, msix_idx);
6669 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
6671 __hal_device_rti_set(ring, channel);
6672 hldev->config.fifo.queue[channel->post_qid].intr_vector =
6674 } else if (channel->type == XGE_HAL_CHANNEL_TYPE_FIFO) {
6675 int fifo = channel->post_qid;
6676 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
6678 val64 |= XGE_HAL_SET_TX_MAT(fifo, msix_idx);
6679 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
6681 __hal_device_tti_set(fifo, channel);
6682 hldev->config.ring.queue[channel->post_qid].intr_vector =
6685 channel->msix_idx = msix_idx;
6686 __hal_set_msix_vals(hldev, &channel->msix_data,
6687 &channel->msix_address,
6693 #if defined(XGE_HAL_CONFIG_LRO)
6695 * xge_hal_lro_terminate - Terminate lro resources.
6696 * @lro_scale: Amount of lro memory.
6697 * @hldev: Hal device structure.
6701 xge_hal_lro_terminate(u32 lro_scale,
6702 xge_hal_device_t *hldev)
6707 * xge_hal_lro_init - Initiate lro resources.
6708 * @lro_scale: Amount of lro memory.
6709 * @hldev: Hal device structure.
6710 * Note: For time being I am using only one LRO per device. Later on size
6711 * will be increased.
6715 xge_hal_lro_init(u32 lro_scale,
6716 xge_hal_device_t *hldev)
6720 if (hldev->config.lro_sg_size == XGE_HAL_DEFAULT_USE_HARDCODE)
6721 hldev->config.lro_sg_size = XGE_HAL_LRO_DEFAULT_SG_SIZE;
6723 if (hldev->config.lro_frm_len == XGE_HAL_DEFAULT_USE_HARDCODE)
6724 hldev->config.lro_frm_len = XGE_HAL_LRO_DEFAULT_FRM_LEN;
6726 for (i=0; i < XGE_HAL_MAX_RING_NUM; i++)
6728 xge_os_memzero(hldev->lro_desc[i].lro_pool,
6729 sizeof(lro_t) * XGE_HAL_LRO_MAX_BUCKETS);
6731 hldev->lro_desc[i].lro_next_idx = 0;
6732 hldev->lro_desc[i].lro_recent = NULL;
6741 * xge_hal_device_poll - HAL device "polling" entry point.
6742 * @devh: HAL device.
6744 * HAL "polling" entry point. Note that this is part of HAL public API.
6745 * Upper-Layer driver _must_ periodically poll HAL via
6746 * xge_hal_device_poll().
6748 * HAL uses caller's execution context to serially process accumulated
6749 * slow-path events, such as link state changes and hardware error
6752 * The rate of polling could be somewhere between 500us to 10ms,
6753 * depending on requirements (e.g., the requirement to support fail-over
6754 * could mean that 500us or even 100us polling interval need to be used).
6756 * The need and motivation for external polling includes
6758 * - remove the error-checking "burden" from the HAL interrupt handler
6759 * (see xge_hal_device_handle_irq());
6761 * - remove the potential source of portability issues by _not_
6762 * implementing separate polling thread within HAL itself.
6764 * See also: xge_hal_event_e{}, xge_hal_driver_config_t{}.
6765 * Usage: See ex_slow_path{}.
6768 xge_hal_device_poll(xge_hal_device_h devh)
6770 unsigned char item_buf[sizeof(xge_queue_item_t) +
6771 XGE_DEFAULT_EVENT_MAX_DATA_SIZE];
6772 xge_queue_item_t *item = (xge_queue_item_t *)(void *)item_buf;
6773 xge_queue_status_e qstatus;
6774 xge_hal_status_e hstatus;
6776 int queue_has_critical_event = 0;
6777 xge_hal_device_t *hldev = (xge_hal_device_t*)devh;
6779 xge_os_memzero(item_buf, (sizeof(xge_queue_item_t) +
6780 XGE_DEFAULT_EVENT_MAX_DATA_SIZE));
6783 if (!hldev->is_initialized ||
6784 hldev->terminating ||
6785 hldev->magic != XGE_HAL_MAGIC)
6788 if(hldev->stats.sw_dev_err_stats.xpak_counter.tick_period < 72000)
6793 hldev->stats.sw_dev_err_stats.xpak_counter.tick_period++;
6796 * Logging Error messages in the excess temperature,
6797 * Bias current, laser output for three cycle
6799 __hal_updt_stats_xpak(hldev);
6800 hldev->stats.sw_dev_err_stats.xpak_counter.tick_period = 0;
6803 if (!queue_has_critical_event)
6804 queue_has_critical_event =
6805 __queue_get_reset_critical(hldev->queueh);
6808 while (i++ < XGE_HAL_DRIVER_QUEUE_CONSUME_MAX || queue_has_critical_event) {
6810 qstatus = xge_queue_consume(hldev->queueh,
6811 XGE_DEFAULT_EVENT_MAX_DATA_SIZE,
6813 if (qstatus == XGE_QUEUE_IS_EMPTY)
6816 xge_debug_queue(XGE_TRACE,
6817 "queueh 0x"XGE_OS_LLXFMT" consumed event: %d ctxt 0x"
6818 XGE_OS_LLXFMT, (u64)(ulong_t)hldev->queueh, item->event_type,
6819 (u64)(ulong_t)item->context);
6821 if (!hldev->is_initialized ||
6822 hldev->magic != XGE_HAL_MAGIC) {
6827 switch (item->event_type) {
6828 case XGE_HAL_EVENT_LINK_IS_UP: {
6829 if (!queue_has_critical_event &&
6830 g_xge_hal_driver->uld_callbacks.link_up) {
6831 g_xge_hal_driver->uld_callbacks.link_up(
6832 hldev->upper_layer_info);
6833 hldev->link_state = XGE_HAL_LINK_UP;
6836 case XGE_HAL_EVENT_LINK_IS_DOWN: {
6837 if (!queue_has_critical_event &&
6838 g_xge_hal_driver->uld_callbacks.link_down) {
6839 g_xge_hal_driver->uld_callbacks.link_down(
6840 hldev->upper_layer_info);
6841 hldev->link_state = XGE_HAL_LINK_DOWN;
6844 case XGE_HAL_EVENT_SERR:
6845 case XGE_HAL_EVENT_ECCERR:
6846 case XGE_HAL_EVENT_PARITYERR:
6847 case XGE_HAL_EVENT_TARGETABORT:
6848 case XGE_HAL_EVENT_SLOT_FREEZE: {
6849 void *item_data = xge_queue_item_data(item);
6850 xge_hal_event_e event_type = item->event_type;
6851 u64 val64 = *((u64*)item_data);
6853 if (event_type != XGE_HAL_EVENT_SLOT_FREEZE)
6854 if (xge_hal_device_is_slot_freeze(hldev))
6855 event_type = XGE_HAL_EVENT_SLOT_FREEZE;
6856 if (g_xge_hal_driver->uld_callbacks.crit_err) {
6857 g_xge_hal_driver->uld_callbacks.crit_err(
6858 hldev->upper_layer_info,
6861 /* handle one critical event per poll cycle */
6867 xge_debug_queue(XGE_TRACE,
6868 "got non-HAL event %d",
6873 /* broadcast this event */
6874 if (g_xge_hal_driver->uld_callbacks.event)
6875 g_xge_hal_driver->uld_callbacks.event(item);
6878 if (g_xge_hal_driver->uld_callbacks.before_device_poll) {
6879 if (g_xge_hal_driver->uld_callbacks.before_device_poll(
6886 hstatus = __hal_device_poll(hldev);
6887 if (g_xge_hal_driver->uld_callbacks.after_device_poll)
6888 g_xge_hal_driver->uld_callbacks.after_device_poll(hldev);
6891 * handle critical error right away:
6892 * - walk the device queue again
6893 * - drop non-critical events, if any
6894 * - look for the 1st critical
6896 if (hstatus == XGE_HAL_ERR_CRITICAL) {
6897 queue_has_critical_event = 1;
6905 * xge_hal_rts_rth_init - Set enhanced mode for RTS hashing.
6906 * @hldev: HAL device handle.
6908 * This function is used to set the adapter to enhanced mode.
6910 * See also: xge_hal_rts_rth_clr(), xge_hal_rts_rth_set().
6913 xge_hal_rts_rth_init(xge_hal_device_t *hldev)
6915 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
6919 * Set the receive traffic steering mode from default(classic)
6922 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
6924 val64 |= XGE_HAL_RTS_CTRL_ENHANCED_MODE;
6925 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
6926 val64, &bar0->rts_ctrl);
6930 * xge_hal_rts_rth_clr - Clear RTS hashing.
6931 * @hldev: HAL device handle.
6933 * This function is used to clear all RTS hashing related stuff.
6934 * It brings the adapter out from enhanced mode to classic mode.
6935 * It also clears RTS_RTH_CFG register i.e clears hash type, function etc.
6937 * See also: xge_hal_rts_rth_set(), xge_hal_rts_rth_itable_set().
6940 xge_hal_rts_rth_clr(xge_hal_device_t *hldev)
6942 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
6946 * Set the receive traffic steering mode from default(classic)
6949 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
6951 val64 &= ~XGE_HAL_RTS_CTRL_ENHANCED_MODE;
6952 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
6953 val64, &bar0->rts_ctrl);
6955 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
6956 &bar0->rts_rth_cfg);
6960 * xge_hal_rts_rth_set - Set/configure RTS hashing.
6961 * @hldev: HAL device handle.
6962 * @def_q: default queue
6963 * @hash_type: hash type i.e TcpIpV4, TcpIpV6 etc.
6964 * @bucket_size: no of least significant bits to be used for hashing.
6966 * Used to set/configure all RTS hashing related stuff.
6967 * - set the steering mode to enhanced.
6968 * - set hash function i.e algo selection.
6969 * - set the default queue.
6971 * See also: xge_hal_rts_rth_clr(), xge_hal_rts_rth_itable_set().
6974 xge_hal_rts_rth_set(xge_hal_device_t *hldev, u8 def_q, u64 hash_type,
6977 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
6980 val64 = XGE_HAL_RTS_DEFAULT_Q(def_q);
6981 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
6982 &bar0->rts_default_q);
6985 val64 |= XGE_HAL_RTS_RTH_EN;
6986 val64 |= XGE_HAL_RTS_RTH_BUCKET_SIZE(bucket_size);
6987 val64 |= XGE_HAL_RTS_RTH_ALG_SEL_MS;
6988 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
6989 &bar0->rts_rth_cfg);
6993 * xge_hal_rts_rth_start - Start RTS hashing.
6994 * @hldev: HAL device handle.
6996 * Used to Start RTS hashing .
6998 * See also: xge_hal_rts_rth_clr(), xge_hal_rts_rth_itable_set(), xge_hal_rts_rth_start.
7001 xge_hal_rts_rth_start(xge_hal_device_t *hldev)
7003 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
7007 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
7008 &bar0->rts_rth_cfg);
7009 val64 |= XGE_HAL_RTS_RTH_EN;
7010 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
7011 &bar0->rts_rth_cfg);
7015 * xge_hal_rts_rth_stop - Stop the RTS hashing.
7016 * @hldev: HAL device handle.
7018 * Used to Staop RTS hashing .
7020 * See also: xge_hal_rts_rth_clr(), xge_hal_rts_rth_itable_set(), xge_hal_rts_rth_start.
7023 xge_hal_rts_rth_stop(xge_hal_device_t *hldev)
7025 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
7028 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
7029 &bar0->rts_rth_cfg);
7030 val64 &= ~XGE_HAL_RTS_RTH_EN;
7031 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
7032 &bar0->rts_rth_cfg);
7036 * xge_hal_rts_rth_itable_set - Set/configure indirection table (IT).
7037 * @hldev: HAL device handle.
7038 * @itable: Pointer to the indirection table
7039 * @itable_size: no of least significant bits to be used for hashing
7041 * Used to set/configure indirection table.
7042 * It enables the required no of entries in the IT.
7043 * It adds entries to the IT.
7045 * See also: xge_hal_rts_rth_clr(), xge_hal_rts_rth_set().
7048 xge_hal_rts_rth_itable_set(xge_hal_device_t *hldev, u8 *itable, u32 itable_size)
7050 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
7054 for (idx = 0; idx < itable_size; idx++) {
7055 val64 = XGE_HAL_RTS_RTH_MAP_MEM_DATA_ENTRY_EN |
7056 XGE_HAL_RTS_RTH_MAP_MEM_DATA(itable[idx]);
7058 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
7059 &bar0->rts_rth_map_mem_data);
7062 val64 = (XGE_HAL_RTS_RTH_MAP_MEM_CTRL_WE |
7063 XGE_HAL_RTS_RTH_MAP_MEM_CTRL_STROBE |
7064 XGE_HAL_RTS_RTH_MAP_MEM_CTRL_OFFSET(idx));
7065 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
7066 &bar0->rts_rth_map_mem_ctrl);
7068 /* poll until done */
7069 if (__hal_device_register_poll(hldev,
7070 &bar0->rts_rth_map_mem_ctrl, 0,
7071 XGE_HAL_RTS_RTH_MAP_MEM_CTRL_STROBE,
7072 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
7073 /* upper layer may require to repeat */
7074 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
7083 * xge_hal_device_rts_rth_key_set - Configure 40byte secret for hash calc.
7085 * @hldev: HAL device handle.
7086 * @KeySize: Number of 64-bit words
7087 * @Key: upto 40-byte array of 8-bit values
7088 * This function configures the 40-byte secret which is used for hash
7091 * See also: xge_hal_rts_rth_clr(), xge_hal_rts_rth_set().
7094 xge_hal_device_rts_rth_key_set(xge_hal_device_t *hldev, u8 KeySize, u8 *Key)
7096 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *) hldev->bar0;
7105 for ( i = 0; i < 8 ; i++) {
7106 /* Prepare 64-bit word for 'nreg' containing 8 keys. */
7109 val64 |= Key[entry++];
7114 /* temp64 = XGE_HAL_RTH_HASH_MASK_n(val64, (n<<3), (n<<3)+7);*/
7115 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
7116 &bar0->rts_rth_hash_mask[nreg++]);
7120 /* Clear the rest if key is less than 40 bytes */
7122 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
7123 &bar0->rts_rth_hash_mask[nreg++]);
7129 * xge_hal_device_is_closed - Device is closed
7131 * @devh: HAL device handle.
7134 xge_hal_device_is_closed(xge_hal_device_h devh)
7136 xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
7138 if (xge_list_is_empty(&hldev->fifo_channels) &&
7139 xge_list_is_empty(&hldev->ring_channels))
7146 xge_hal_device_rts_section_enable(xge_hal_device_h devh, int index)
7150 int max_addr = XGE_HAL_MAX_MAC_ADDRESSES;
7152 xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
7153 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
7155 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)
7156 max_addr = XGE_HAL_MAX_MAC_ADDRESSES_HERC;
7158 if ( index >= max_addr )
7159 return XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES;
7162 * Calculate the section value
7164 section = index / 32;
7166 xge_debug_device(XGE_TRACE, "the Section value is %d ", section);
7168 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
7169 &bar0->rts_mac_cfg);
7173 val64 |= XGE_HAL_RTS_MAC_SECT0_EN;
7176 val64 |= XGE_HAL_RTS_MAC_SECT1_EN;
7179 val64 |= XGE_HAL_RTS_MAC_SECT2_EN;
7182 val64 |= XGE_HAL_RTS_MAC_SECT3_EN;
7185 val64 |= XGE_HAL_RTS_MAC_SECT4_EN;
7188 val64 |= XGE_HAL_RTS_MAC_SECT5_EN;
7191 val64 |= XGE_HAL_RTS_MAC_SECT6_EN;
7194 val64 |= XGE_HAL_RTS_MAC_SECT7_EN;
7197 xge_debug_device(XGE_ERR, "Invalid Section value %d "
7201 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
7202 val64, &bar0->rts_mac_cfg);
7208 * xge_hal_fix_rldram_ecc_error
7209 * @hldev: private member of the device structure.
7211 * SXE-02-010. This function will turn OFF the ECC error reporting for the
7212 * interface bet'n external Micron RLDRAM II device and memory controller.
7213 * The error would have been reported in RLD_ECC_DB_ERR_L and RLD_ECC_DB_ERR_U
7214 * fields of MC_ERR_REG register. Issue reported by HP-Unix folks during the
7215 * qualification of Herc.
7218 xge_hal_fix_rldram_ecc_error(xge_hal_device_t * hldev)
7220 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0;
7224 val64 = XGE_HAL_MC_RLDRAM_TEST_MODE;
7225 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
7226 &bar0->mc_rldram_test_ctrl);
7228 // Enable fg/bg tests.
7229 val64 = 0x0100000000000000ULL;
7230 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
7233 // Enable RLDRAM configuration.
7234 val64 = 0x0000000000017B00ULL;
7235 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
7236 &bar0->mc_rldram_mrs);
7238 // Enable RLDRAM queues.
7239 val64 = 0x0000000001017B00ULL;
7240 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
7241 &bar0->mc_rldram_mrs);
7243 // Setup test ranges
7244 val64 = 0x00000000001E0100ULL;
7245 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
7246 &bar0->mc_rldram_test_add);
7248 val64 = 0x00000100001F0100ULL;
7249 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
7250 &bar0->mc_rldram_test_add_bkg);
7252 val64 = 0x0001000000010000ULL;
7253 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
7254 &bar0->mc_rldram_test_ctrl);
7256 if (__hal_device_register_poll(hldev, &bar0->mc_rldram_test_ctrl, 1,
7257 XGE_HAL_MC_RLDRAM_TEST_DONE,
7258 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK){
7259 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
7263 val64 = 0x0000000000000000ULL;
7264 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
7265 &bar0->mc_rldram_test_ctrl);