2 * Copyright (c) 2002-2007 Neterion, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <dev/nxge/include/xgehal-device.h>
30 #include <dev/nxge/include/xgehal-channel.h>
31 #include <dev/nxge/include/xgehal-fifo.h>
32 #include <dev/nxge/include/xgehal-ring.h>
33 #include <dev/nxge/include/xgehal-driver.h>
34 #include <dev/nxge/include/xgehal-mgmt.h>
36 #define SWITCH_SIGN 0xA5A5A5A5A5A5A5A5ULL
39 #ifdef XGE_HAL_HERC_EMULATION
40 #undef XGE_HAL_PROCESS_LINK_INT_IN_ISR
44 * Jenkins hash key length(in bytes)
46 #define XGE_HAL_JHASH_MSG_LEN 50
49 * mix(a,b,c) used in Jenkins hash algorithm
51 #define mix(a,b,c) { \
52 a -= b; a -= c; a ^= (c>>13); \
53 b -= c; b -= a; b ^= (a<<8); \
54 c -= a; c -= b; c ^= (b>>13); \
55 a -= b; a -= c; a ^= (c>>12); \
56 b -= c; b -= a; b ^= (a<<16); \
57 c -= a; c -= b; c ^= (b>>5); \
58 a -= b; a -= c; a ^= (c>>3); \
59 b -= c; b -= a; b ^= (a<<10); \
60 c -= a; c -= b; c ^= (b>>15); \
65 * __hal_device_event_queued
66 * @data: pointer to xge_hal_device_t structure
68 * Will be called when new event succesfully queued.
71 __hal_device_event_queued(void *data, int event_type)
73 xge_assert(((xge_hal_device_t*)data)->magic == XGE_HAL_MAGIC);
74 if (g_xge_hal_driver->uld_callbacks.event_queued) {
75 g_xge_hal_driver->uld_callbacks.event_queued(data, event_type);
80 * __hal_pio_mem_write32_upper
82 * Endiann-aware implementation of xge_os_pio_mem_write32().
83 * Since Xframe has 64bit registers, we differintiate uppper and lower
87 __hal_pio_mem_write32_upper(pci_dev_h pdev, pci_reg_h regh, u32 val, void *addr)
89 #if defined(XGE_OS_HOST_BIG_ENDIAN) && !defined(XGE_OS_PIO_LITTLE_ENDIAN)
90 xge_os_pio_mem_write32(pdev, regh, val, addr);
92 xge_os_pio_mem_write32(pdev, regh, val, (void *)((char *)addr + 4));
97 * __hal_pio_mem_write32_upper
99 * Endiann-aware implementation of xge_os_pio_mem_write32().
100 * Since Xframe has 64bit registers, we differintiate uppper and lower
104 __hal_pio_mem_write32_lower(pci_dev_h pdev, pci_reg_h regh, u32 val,
107 #if defined(XGE_OS_HOST_BIG_ENDIAN) && !defined(XGE_OS_PIO_LITTLE_ENDIAN)
108 xge_os_pio_mem_write32(pdev, regh, val,
109 (void *) ((char *)addr + 4));
111 xge_os_pio_mem_write32(pdev, regh, val, addr);
116 * __hal_device_register_poll
117 * @hldev: pointer to xge_hal_device_t structure
118 * @reg: register to poll for
119 * @op: 0 - bit reset, 1 - bit set
120 * @mask: mask for logical "and" condition based on %op
121 * @max_millis: maximum time to try to poll in milliseconds
123 * Will poll certain register for specified amount of time.
124 * Will poll until masked bit is not cleared.
127 __hal_device_register_poll(xge_hal_device_t *hldev, u64 *reg,
128 int op, u64 mask, int max_millis)
132 xge_hal_status_e ret = XGE_HAL_FAIL;
137 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, reg);
138 if (op == 0 && !(val64 & mask))
140 else if (op == 1 && (val64 & mask) == mask)
146 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, reg);
147 if (op == 0 && !(val64 & mask))
149 else if (op == 1 && (val64 & mask) == mask)
152 } while (++i < max_millis);
158 * __hal_device_wait_quiescent
160 * @hw_status: hw_status in case of error
162 * Will wait until device is quiescent for some blocks.
164 static xge_hal_status_e
165 __hal_device_wait_quiescent(xge_hal_device_t *hldev, u64 *hw_status)
167 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
169 /* poll and wait first */
170 #ifdef XGE_HAL_HERC_EMULATION
171 (void) __hal_device_register_poll(hldev, &bar0->adapter_status, 1,
172 (XGE_HAL_ADAPTER_STATUS_TDMA_READY |
173 XGE_HAL_ADAPTER_STATUS_RDMA_READY |
174 XGE_HAL_ADAPTER_STATUS_PFC_READY |
175 XGE_HAL_ADAPTER_STATUS_TMAC_BUF_EMPTY |
176 XGE_HAL_ADAPTER_STATUS_PIC_QUIESCENT |
177 XGE_HAL_ADAPTER_STATUS_MC_DRAM_READY |
178 XGE_HAL_ADAPTER_STATUS_MC_QUEUES_READY |
179 XGE_HAL_ADAPTER_STATUS_M_PLL_LOCK),
180 XGE_HAL_DEVICE_QUIESCENT_WAIT_MAX_MILLIS);
182 (void) __hal_device_register_poll(hldev, &bar0->adapter_status, 1,
183 (XGE_HAL_ADAPTER_STATUS_TDMA_READY |
184 XGE_HAL_ADAPTER_STATUS_RDMA_READY |
185 XGE_HAL_ADAPTER_STATUS_PFC_READY |
186 XGE_HAL_ADAPTER_STATUS_TMAC_BUF_EMPTY |
187 XGE_HAL_ADAPTER_STATUS_PIC_QUIESCENT |
188 XGE_HAL_ADAPTER_STATUS_MC_DRAM_READY |
189 XGE_HAL_ADAPTER_STATUS_MC_QUEUES_READY |
190 XGE_HAL_ADAPTER_STATUS_M_PLL_LOCK |
191 XGE_HAL_ADAPTER_STATUS_P_PLL_LOCK),
192 XGE_HAL_DEVICE_QUIESCENT_WAIT_MAX_MILLIS);
195 return xge_hal_device_status(hldev, hw_status);
199 * xge_hal_device_is_slot_freeze
202 * Returns non-zero if the slot is freezed.
203 * The determination is made based on the adapter_status
204 * register which will never give all FFs, unless PCI read
208 xge_hal_device_is_slot_freeze(xge_hal_device_h devh)
210 xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
211 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
214 xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
215 &bar0->adapter_status);
216 xge_os_pci_read16(hldev->pdev,hldev->cfgh,
217 xge_offsetof(xge_hal_pci_config_le_t, device_id),
220 if (adapter_status == XGE_HAL_ALL_FOXES)
223 dummy = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
225 printf(">>> Slot is frozen!\n");
229 return((adapter_status == XGE_HAL_ALL_FOXES) || (device_id == 0xffff));
234 * __hal_device_led_actifity_fix
235 * @hldev: pointer to xge_hal_device_t structure
237 * SXE-002: Configure link and activity LED to turn it off
240 __hal_device_led_actifity_fix(xge_hal_device_t *hldev)
242 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
246 xge_os_pci_read16(hldev->pdev, hldev->cfgh,
247 xge_offsetof(xge_hal_pci_config_le_t, subsystem_id), &subid);
250 * In the case of Herc, there is a new register named beacon control
251 * is added which was not present in Xena.
252 * Beacon control register in Herc is at the same offset as
253 * gpio control register in Xena. It means they are one and same in
254 * the case of Xena. Also, gpio control register offset in Herc and
256 * The current register map represents Herc(It means we have
257 * both beacon and gpio control registers in register map).
258 * WRT transition from Xena to Herc, all the code in Xena which was
259 * using gpio control register for LED handling would have to
260 * use beacon control register in Herc and the rest of the code
261 * which uses gpio control in Xena would use the same register
263 * WRT LED handling(following code), In the case of Herc, beacon
264 * control register has to be used. This is applicable for Xena also,
265 * since it represents the gpio control register in Xena.
267 if ((subid & 0xFF) >= 0x07) {
268 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
269 &bar0->beacon_control);
270 val64 |= 0x0000800000000000ULL;
271 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
272 val64, &bar0->beacon_control);
273 val64 = 0x0411040400000000ULL;
274 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
275 (void *) ((u8 *)bar0 + 0x2700));
279 /* Constants for Fixing the MacAddress problem seen mostly on
282 static u64 xena_fix_mac[] = {
283 0x0060000000000000ULL, 0x0060600000000000ULL,
284 0x0040600000000000ULL, 0x0000600000000000ULL,
285 0x0020600000000000ULL, 0x0060600000000000ULL,
286 0x0020600000000000ULL, 0x0060600000000000ULL,
287 0x0020600000000000ULL, 0x0060600000000000ULL,
288 0x0020600000000000ULL, 0x0060600000000000ULL,
289 0x0020600000000000ULL, 0x0060600000000000ULL,
290 0x0020600000000000ULL, 0x0060600000000000ULL,
291 0x0020600000000000ULL, 0x0060600000000000ULL,
292 0x0020600000000000ULL, 0x0060600000000000ULL,
293 0x0020600000000000ULL, 0x0060600000000000ULL,
294 0x0020600000000000ULL, 0x0060600000000000ULL,
295 0x0020600000000000ULL, 0x0000600000000000ULL,
296 0x0040600000000000ULL, 0x0060600000000000ULL,
301 * __hal_device_fix_mac
302 * @hldev: HAL device handle.
304 * Fix for all "FFs" MAC address problems observed on Alpha platforms.
307 __hal_device_xena_fix_mac(xge_hal_device_t *hldev)
310 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
313 * In the case of Herc, there is a new register named beacon control
314 * is added which was not present in Xena.
315 * Beacon control register in Herc is at the same offset as
316 * gpio control register in Xena. It means they are one and same in
317 * the case of Xena. Also, gpio control register offset in Herc and
319 * The current register map represents Herc(It means we have
320 * both beacon and gpio control registers in register map).
321 * WRT transition from Xena to Herc, all the code in Xena which was
322 * using gpio control register for LED handling would have to
323 * use beacon control register in Herc and the rest of the code
324 * which uses gpio control in Xena would use the same register
326 * In the following code(xena_fix_mac), beacon control register has
327 * to be used in the case of Xena, since it represents gpio control
328 * register. In the case of Herc, there is no change required.
330 while (xena_fix_mac[i] != END_SIGN) {
331 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
332 xena_fix_mac[i++], &bar0->beacon_control);
338 * xge_hal_device_bcast_enable
339 * @hldev: HAL device handle.
341 * Enable receiving broadcasts.
342 * The host must first write RMAC_CFG_KEY "key"
343 * register, and then - MAC_CFG register.
346 xge_hal_device_bcast_enable(xge_hal_device_h devh)
348 xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
349 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
352 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
354 val64 |= XGE_HAL_MAC_RMAC_BCAST_ENABLE;
356 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
357 XGE_HAL_RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
359 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0,
360 (u32)(val64 >> 32), &bar0->mac_cfg);
362 xge_debug_device(XGE_TRACE, "mac_cfg 0x"XGE_OS_LLXFMT": broadcast %s",
363 (unsigned long long)val64,
364 hldev->config.mac.rmac_bcast_en ? "enabled" : "disabled");
368 * xge_hal_device_bcast_disable
369 * @hldev: HAL device handle.
371 * Disable receiving broadcasts.
372 * The host must first write RMAC_CFG_KEY "key"
373 * register, and then - MAC_CFG register.
376 xge_hal_device_bcast_disable(xge_hal_device_h devh)
378 xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
379 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
382 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
385 val64 &= ~(XGE_HAL_MAC_RMAC_BCAST_ENABLE);
386 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
387 XGE_HAL_RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
389 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0,
390 (u32)(val64 >> 32), &bar0->mac_cfg);
392 xge_debug_device(XGE_TRACE, "mac_cfg 0x"XGE_OS_LLXFMT": broadcast %s",
393 (unsigned long long)val64,
394 hldev->config.mac.rmac_bcast_en ? "enabled" : "disabled");
398 * __hal_device_shared_splits_configure
399 * @hldev: HAL device handle.
401 * TxDMA will stop Read request if the number of read split had exceeded
402 * the limit set by shared_splits
405 __hal_device_shared_splits_configure(xge_hal_device_t *hldev)
407 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
410 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
413 XGE_HAL_PIC_CNTL_SHARED_SPLITS(hldev->config.shared_splits);
414 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
416 xge_debug_device(XGE_TRACE, "%s", "shared splits configured");
420 * __hal_device_rmac_padding_configure
421 * @hldev: HAL device handle.
423 * Configure RMAC frame padding. Depends on configuration, it
424 * can be send to host or removed by MAC.
427 __hal_device_rmac_padding_configure(xge_hal_device_t *hldev)
429 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
432 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
433 XGE_HAL_RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
434 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
436 val64 &= ( ~XGE_HAL_MAC_RMAC_ALL_ADDR_ENABLE );
437 val64 &= ( ~XGE_HAL_MAC_CFG_RMAC_PROM_ENABLE );
438 val64 |= XGE_HAL_MAC_CFG_TMAC_APPEND_PAD;
441 * If the RTH enable bit is not set, strip the FCS
443 if (!hldev->config.rth_en ||
444 !(xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
445 &bar0->rts_rth_cfg) & XGE_HAL_RTS_RTH_EN)) {
446 val64 |= XGE_HAL_MAC_CFG_RMAC_STRIP_FCS;
449 val64 &= ( ~XGE_HAL_MAC_CFG_RMAC_STRIP_PAD );
450 val64 |= XGE_HAL_MAC_RMAC_DISCARD_PFRM;
452 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0,
453 (u32)(val64 >> 32), (char*)&bar0->mac_cfg);
456 xge_debug_device(XGE_TRACE,
457 "mac_cfg 0x"XGE_OS_LLXFMT": frame padding configured",
458 (unsigned long long)val64);
462 * __hal_device_pause_frames_configure
463 * @hldev: HAL device handle.
465 * Set Pause threshold.
467 * Pause frame is generated if the amount of data outstanding
468 * on any queue exceeded the ratio of
469 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
472 __hal_device_pause_frames_configure(xge_hal_device_t *hldev)
474 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
478 switch (hldev->config.mac.media) {
479 case XGE_HAL_MEDIA_SR:
480 case XGE_HAL_MEDIA_SW:
481 val64=0xfffbfffbfffbfffbULL;
483 case XGE_HAL_MEDIA_LR:
484 case XGE_HAL_MEDIA_LW:
485 val64=0xffbbffbbffbbffbbULL;
487 case XGE_HAL_MEDIA_ER:
488 case XGE_HAL_MEDIA_EW:
490 val64=0xffbbffbbffbbffbbULL;
494 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
495 val64, &bar0->mc_pause_thresh_q0q3);
496 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
497 val64, &bar0->mc_pause_thresh_q4q7);
499 /* Set the time value to be inserted in the pause frame generated
501 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
502 &bar0->rmac_pause_cfg);
503 if (hldev->config.mac.rmac_pause_gen_en)
504 val64 |= XGE_HAL_RMAC_PAUSE_GEN_EN;
506 val64 &= ~(XGE_HAL_RMAC_PAUSE_GEN_EN);
507 if (hldev->config.mac.rmac_pause_rcv_en)
508 val64 |= XGE_HAL_RMAC_PAUSE_RCV_EN;
510 val64 &= ~(XGE_HAL_RMAC_PAUSE_RCV_EN);
511 val64 &= ~(XGE_HAL_RMAC_PAUSE_HG_PTIME(0xffff));
512 val64 |= XGE_HAL_RMAC_PAUSE_HG_PTIME(hldev->config.mac.rmac_pause_time);
513 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
514 &bar0->rmac_pause_cfg);
517 for (i = 0; i<4; i++) {
519 (((u64)0xFF00|hldev->config.mac.mc_pause_threshold_q0q3)
522 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
523 &bar0->mc_pause_thresh_q0q3);
526 for (i = 0; i<4; i++) {
528 (((u64)0xFF00|hldev->config.mac.mc_pause_threshold_q4q7)
531 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
532 &bar0->mc_pause_thresh_q4q7);
533 xge_debug_device(XGE_TRACE, "%s", "pause frames configured");
537 * Herc's clock rate doubled, unless the slot is 33MHz.
539 unsigned int __hal_fix_time_ival_herc(xge_hal_device_t *hldev,
540 unsigned int time_ival)
542 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA)
545 xge_assert(xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC);
547 if (hldev->bus_frequency != XGE_HAL_PCI_BUS_FREQUENCY_UNKNOWN &&
548 hldev->bus_frequency != XGE_HAL_PCI_BUS_FREQUENCY_33MHZ)
556 * __hal_device_bus_master_disable
557 * @hldev: HAL device handle.
559 * Disable bus mastership.
562 __hal_device_bus_master_disable (xge_hal_device_t *hldev)
567 xge_os_pci_read16(hldev->pdev, hldev->cfgh,
568 xge_offsetof(xge_hal_pci_config_le_t, command), &cmd);
570 xge_os_pci_write16(hldev->pdev, hldev->cfgh,
571 xge_offsetof(xge_hal_pci_config_le_t, command), cmd);
575 * __hal_device_bus_master_enable
576 * @hldev: HAL device handle.
578 * Disable bus mastership.
581 __hal_device_bus_master_enable (xge_hal_device_t *hldev)
586 xge_os_pci_read16(hldev->pdev, hldev->cfgh,
587 xge_offsetof(xge_hal_pci_config_le_t, command), &cmd);
589 /* already enabled? do nothing */
590 if (cmd & bus_master)
594 xge_os_pci_write16(hldev->pdev, hldev->cfgh,
595 xge_offsetof(xge_hal_pci_config_le_t, command), cmd);
598 * __hal_device_intr_mgmt
599 * @hldev: HAL device handle.
600 * @mask: mask indicating which Intr block must be modified.
601 * @flag: if true - enable, otherwise - disable interrupts.
603 * Disable or enable device interrupts. Mask is used to specify
604 * which hardware blocks should produce interrupts. For details
605 * please refer to Xframe User Guide.
608 __hal_device_intr_mgmt(xge_hal_device_t *hldev, u64 mask, int flag)
610 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
611 u64 val64 = 0, temp64 = 0;
614 gim_saved = gim = xge_os_pio_mem_read64(hldev->pdev,
615 hldev->regh0, &bar0->general_int_mask);
617 /* Top level interrupt classification */
619 if ((mask & (XGE_HAL_TX_PIC_INTR/* | XGE_HAL_RX_PIC_INTR*/))) {
620 /* Enable PIC Intrs in the general intr mask register */
621 val64 = XGE_HAL_TXPIC_INT_M/* | XGE_HAL_PIC_RX_INT_M*/;
623 gim &= ~((u64) val64);
624 temp64 = xge_os_pio_mem_read64(hldev->pdev,
625 hldev->regh0, &bar0->pic_int_mask);
627 temp64 &= ~XGE_HAL_PIC_INT_TX;
628 #ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR
629 if (xge_hal_device_check_id(hldev) ==
631 temp64 &= ~XGE_HAL_PIC_INT_MISC;
634 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
635 temp64, &bar0->pic_int_mask);
636 #ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR
637 if (xge_hal_device_check_id(hldev) ==
640 * Unmask only Link Up interrupt
642 temp64 = xge_os_pio_mem_read64(hldev->pdev,
643 hldev->regh0, &bar0->misc_int_mask);
644 temp64 &= ~XGE_HAL_MISC_INT_REG_LINK_UP_INT;
645 xge_os_pio_mem_write64(hldev->pdev,
646 hldev->regh0, temp64,
647 &bar0->misc_int_mask);
648 xge_debug_device(XGE_TRACE,
649 "unmask link up flag "XGE_OS_LLXFMT,
650 (unsigned long long)temp64);
653 } else { /* flag == 0 */
655 #ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR
656 if (xge_hal_device_check_id(hldev) ==
659 * Mask both Link Up and Down interrupts
661 temp64 = xge_os_pio_mem_read64(hldev->pdev,
662 hldev->regh0, &bar0->misc_int_mask);
663 temp64 |= XGE_HAL_MISC_INT_REG_LINK_UP_INT;
664 temp64 |= XGE_HAL_MISC_INT_REG_LINK_DOWN_INT;
665 xge_os_pio_mem_write64(hldev->pdev,
666 hldev->regh0, temp64,
667 &bar0->misc_int_mask);
668 xge_debug_device(XGE_TRACE,
669 "mask link up/down flag "XGE_OS_LLXFMT,
670 (unsigned long long)temp64);
673 /* Disable PIC Intrs in the general intr mask
675 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
676 XGE_HAL_ALL_INTRS_DIS,
677 &bar0->pic_int_mask);
683 /* Enabling/Disabling Tx DMA interrupts */
684 if (mask & XGE_HAL_TX_DMA_INTR) {
685 /* Enable TxDMA Intrs in the general intr mask register */
686 val64 = XGE_HAL_TXDMA_INT_M;
688 gim &= ~((u64) val64);
689 /* Enable all TxDMA interrupts */
690 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
691 0x0, &bar0->txdma_int_mask);
692 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
693 0x0, &bar0->pfc_err_mask);
694 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
695 0x0, &bar0->tda_err_mask);
696 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
697 0x0, &bar0->pcc_err_mask);
698 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
699 0x0, &bar0->tti_err_mask);
700 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
701 0x0, &bar0->lso_err_mask);
702 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
703 0x0, &bar0->tpa_err_mask);
704 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
705 0x0, &bar0->sm_err_mask);
707 } else { /* flag == 0 */
709 /* Disable TxDMA Intrs in the general intr mask
711 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
712 XGE_HAL_ALL_INTRS_DIS,
713 &bar0->txdma_int_mask);
714 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
715 XGE_HAL_ALL_INTRS_DIS,
716 &bar0->pfc_err_mask);
722 /* Enabling/Disabling Rx DMA interrupts */
723 if (mask & XGE_HAL_RX_DMA_INTR) {
724 /* Enable RxDMA Intrs in the general intr mask register */
725 val64 = XGE_HAL_RXDMA_INT_M;
728 gim &= ~((u64) val64);
729 /* All RxDMA block interrupts are disabled for now
731 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
732 XGE_HAL_ALL_INTRS_DIS,
733 &bar0->rxdma_int_mask);
735 } else { /* flag == 0 */
737 /* Disable RxDMA Intrs in the general intr mask
739 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
740 XGE_HAL_ALL_INTRS_DIS,
741 &bar0->rxdma_int_mask);
748 /* Enabling/Disabling MAC interrupts */
749 if (mask & (XGE_HAL_TX_MAC_INTR | XGE_HAL_RX_MAC_INTR)) {
750 val64 = XGE_HAL_TXMAC_INT_M | XGE_HAL_RXMAC_INT_M;
753 gim &= ~((u64) val64);
755 /* All MAC block error inter. are disabled for now. */
756 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
757 XGE_HAL_ALL_INTRS_DIS, &bar0->mac_int_mask);
758 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
759 XGE_HAL_ALL_INTRS_DIS, &bar0->mac_rmac_err_mask);
761 } else { /* flag == 0 */
763 /* Disable MAC Intrs in the general intr mask
765 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
766 XGE_HAL_ALL_INTRS_DIS, &bar0->mac_int_mask);
767 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
768 XGE_HAL_ALL_INTRS_DIS, &bar0->mac_rmac_err_mask);
774 /* XGXS Interrupts */
775 if (mask & (XGE_HAL_TX_XGXS_INTR | XGE_HAL_RX_XGXS_INTR)) {
776 val64 = XGE_HAL_TXXGXS_INT_M | XGE_HAL_RXXGXS_INT_M;
779 gim &= ~((u64) val64);
780 /* All XGXS block error interrupts are disabled for now
782 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
783 XGE_HAL_ALL_INTRS_DIS, &bar0->xgxs_int_mask);
785 } else { /* flag == 0 */
787 /* Disable MC Intrs in the general intr mask register */
788 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
789 XGE_HAL_ALL_INTRS_DIS, &bar0->xgxs_int_mask);
795 /* Memory Controller(MC) interrupts */
796 if (mask & XGE_HAL_MC_INTR) {
797 val64 = XGE_HAL_MC_INT_M;
800 gim &= ~((u64) val64);
802 /* Enable all MC blocks error interrupts */
803 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
804 0x0ULL, &bar0->mc_int_mask);
806 } else { /* flag == 0 */
808 /* Disable MC Intrs in the general intr mask
810 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
811 XGE_HAL_ALL_INTRS_DIS, &bar0->mc_int_mask);
818 /* Tx traffic interrupts */
819 if (mask & XGE_HAL_TX_TRAFFIC_INTR) {
820 val64 = XGE_HAL_TXTRAFFIC_INT_M;
823 gim &= ~((u64) val64);
825 /* Enable all the Tx side interrupts */
826 /* '0' Enables all 64 TX interrupt levels. */
827 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0x0,
828 &bar0->tx_traffic_mask);
830 } else { /* flag == 0 */
832 /* Disable Tx Traffic Intrs in the general intr mask
834 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
835 XGE_HAL_ALL_INTRS_DIS,
836 &bar0->tx_traffic_mask);
841 /* Rx traffic interrupts */
842 if (mask & XGE_HAL_RX_TRAFFIC_INTR) {
843 val64 = XGE_HAL_RXTRAFFIC_INT_M;
845 gim &= ~((u64) val64);
846 /* '0' Enables all 8 RX interrupt levels. */
847 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0x0,
848 &bar0->rx_traffic_mask);
850 } else { /* flag == 0 */
852 /* Disable Rx Traffic Intrs in the general intr mask
855 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
856 XGE_HAL_ALL_INTRS_DIS,
857 &bar0->rx_traffic_mask);
863 /* Sched Timer interrupt */
864 if (mask & XGE_HAL_SCHED_INTR) {
866 temp64 = xge_os_pio_mem_read64(hldev->pdev,
867 hldev->regh0, &bar0->txpic_int_mask);
868 temp64 &= ~XGE_HAL_TXPIC_INT_SCHED_INTR;
869 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
870 temp64, &bar0->txpic_int_mask);
872 xge_hal_device_sched_timer(hldev,
873 hldev->config.sched_timer_us,
874 hldev->config.sched_timer_one_shot);
876 temp64 = xge_os_pio_mem_read64(hldev->pdev,
877 hldev->regh0, &bar0->txpic_int_mask);
878 temp64 |= XGE_HAL_TXPIC_INT_SCHED_INTR;
880 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
881 temp64, &bar0->txpic_int_mask);
883 xge_hal_device_sched_timer(hldev,
884 XGE_HAL_SCHED_TIMER_DISABLED,
885 XGE_HAL_SCHED_TIMER_ON_SHOT_ENABLE);
889 if (gim != gim_saved) {
890 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, gim,
891 &bar0->general_int_mask);
892 xge_debug_device(XGE_TRACE, "general_int_mask updated "
893 XGE_OS_LLXFMT" => "XGE_OS_LLXFMT,
894 (unsigned long long)gim_saved, (unsigned long long)gim);
899 * __hal_device_bimodal_configure
900 * @hldev: HAL device handle.
902 * Bimodal parameters initialization.
905 __hal_device_bimodal_configure(xge_hal_device_t *hldev)
909 for (i=0; i<XGE_HAL_MAX_RING_NUM; i++) {
910 xge_hal_tti_config_t *tti;
911 xge_hal_rti_config_t *rti;
913 if (!hldev->config.ring.queue[i].configured)
915 rti = &hldev->config.ring.queue[i].rti;
916 tti = &hldev->bimodal_tti[i];
919 tti->urange_a = hldev->bimodal_urange_a_en * 10;
922 tti->ufc_a = hldev->bimodal_urange_a_en * 8;
926 tti->timer_val_us = hldev->bimodal_timer_val_us;
927 tti->timer_ac_en = 1;
928 tti->timer_ci_en = 0;
933 rti->ufc_a = 1; /* <= for netpipe type of tests */
936 rti->ufc_d = 4; /* <= 99% of a bandwidth traffic counts here */
937 rti->timer_ac_en = 1;
938 rti->timer_val_us = 5; /* for optimal bus efficiency usage */
943 * __hal_device_tti_apply
944 * @hldev: HAL device handle.
946 * apply TTI configuration.
948 static xge_hal_status_e
949 __hal_device_tti_apply(xge_hal_device_t *hldev, xge_hal_tti_config_t *tti,
950 int num, int runtime)
952 u64 val64, data1 = 0, data2 = 0;
953 xge_hal_pci_bar0_t *bar0;
956 bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0;
958 bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
960 if (tti->timer_val_us) {
961 unsigned int tx_interval;
963 if (hldev->config.pci_freq_mherz) {
964 tx_interval = hldev->config.pci_freq_mherz *
965 tti->timer_val_us / 64;
967 __hal_fix_time_ival_herc(hldev,
970 tx_interval = tti->timer_val_us;
972 data1 |= XGE_HAL_TTI_DATA1_MEM_TX_TIMER_VAL(tx_interval);
973 if (tti->timer_ac_en) {
974 data1 |= XGE_HAL_TTI_DATA1_MEM_TX_TIMER_AC_EN;
976 if (tti->timer_ci_en) {
977 data1 |= XGE_HAL_TTI_DATA1_MEM_TX_TIMER_CI_EN;
981 xge_debug_device(XGE_TRACE, "TTI[%d] timer enabled to %d, ci %s",
982 num, tx_interval, tti->timer_ci_en ?
983 "enabled": "disabled");
994 data1 |= XGE_HAL_TTI_DATA1_MEM_TX_URNG_A(tti->urange_a) |
995 XGE_HAL_TTI_DATA1_MEM_TX_URNG_B(tti->urange_b) |
996 XGE_HAL_TTI_DATA1_MEM_TX_URNG_C(tti->urange_c);
998 data2 |= XGE_HAL_TTI_DATA2_MEM_TX_UFC_A(tti->ufc_a) |
999 XGE_HAL_TTI_DATA2_MEM_TX_UFC_B(tti->ufc_b) |
1000 XGE_HAL_TTI_DATA2_MEM_TX_UFC_C(tti->ufc_c) |
1001 XGE_HAL_TTI_DATA2_MEM_TX_UFC_D(tti->ufc_d);
1004 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, data1,
1005 &bar0->tti_data1_mem);
1006 (void)xge_os_pio_mem_read64(hldev->pdev,
1007 hldev->regh0, &bar0->tti_data1_mem);
1008 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, data2,
1009 &bar0->tti_data2_mem);
1010 (void)xge_os_pio_mem_read64(hldev->pdev,
1011 hldev->regh0, &bar0->tti_data2_mem);
1014 val64 = XGE_HAL_TTI_CMD_MEM_WE | XGE_HAL_TTI_CMD_MEM_STROBE_NEW_CMD |
1015 XGE_HAL_TTI_CMD_MEM_OFFSET(num);
1016 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
1017 &bar0->tti_command_mem);
1019 if (!runtime && __hal_device_register_poll(hldev, &bar0->tti_command_mem,
1020 0, XGE_HAL_TTI_CMD_MEM_STROBE_NEW_CMD,
1021 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
1022 /* upper layer may require to repeat */
1023 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
1027 xge_debug_device(XGE_TRACE, "TTI[%d] configured: tti_data1_mem 0x"
1029 (unsigned long long)xge_os_pio_mem_read64(hldev->pdev,
1030 hldev->regh0, &bar0->tti_data1_mem));
1037 * __hal_device_tti_configure
1038 * @hldev: HAL device handle.
1040 * TTI Initialization.
1041 * Initialize Transmit Traffic Interrupt Scheme.
1043 static xge_hal_status_e
1044 __hal_device_tti_configure(xge_hal_device_t *hldev, int runtime)
1048 for (i=0; i<XGE_HAL_MAX_FIFO_NUM; i++) {
1051 if (!hldev->config.fifo.queue[i].configured)
1054 for (j=0; j<XGE_HAL_MAX_FIFO_TTI_NUM; j++) {
1055 xge_hal_status_e status;
1057 if (!hldev->config.fifo.queue[i].tti[j].enabled)
1060 /* at least some TTI enabled. Record it. */
1061 hldev->tti_enabled = 1;
1063 status = __hal_device_tti_apply(hldev,
1064 &hldev->config.fifo.queue[i].tti[j],
1065 i * XGE_HAL_MAX_FIFO_TTI_NUM + j, runtime);
1066 if (status != XGE_HAL_OK)
1071 /* processing bimodal TTIs */
1072 for (i=0; i<XGE_HAL_MAX_RING_NUM; i++) {
1073 xge_hal_status_e status;
1075 if (!hldev->bimodal_tti[i].enabled)
1078 /* at least some bimodal TTI enabled. Record it. */
1079 hldev->tti_enabled = 1;
1081 status = __hal_device_tti_apply(hldev, &hldev->bimodal_tti[i],
1082 XGE_HAL_MAX_FIFO_TTI_RING_0 + i, runtime);
1083 if (status != XGE_HAL_OK)
1092 * __hal_device_rti_configure
1093 * @hldev: HAL device handle.
1095 * RTI Initialization.
1096 * Initialize Receive Traffic Interrupt Scheme.
1099 __hal_device_rti_configure(xge_hal_device_t *hldev, int runtime)
1101 xge_hal_pci_bar0_t *bar0;
1102 u64 val64, data1 = 0, data2 = 0;
1107 * we don't want to re-configure RTI in case when
1108 * bimodal interrupts are in use. Instead reconfigure TTI
1109 * with new RTI values.
1111 if (hldev->config.bimodal_interrupts) {
1112 __hal_device_bimodal_configure(hldev);
1113 return __hal_device_tti_configure(hldev, 1);
1115 bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0;
1117 bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
1119 for (i=0; i<XGE_HAL_MAX_RING_NUM; i++) {
1120 xge_hal_rti_config_t *rti = &hldev->config.ring.queue[i].rti;
1122 if (!hldev->config.ring.queue[i].configured)
1125 if (rti->timer_val_us) {
1126 unsigned int rx_interval;
1128 if (hldev->config.pci_freq_mherz) {
1129 rx_interval = hldev->config.pci_freq_mherz *
1130 rti->timer_val_us / 8;
1132 __hal_fix_time_ival_herc(hldev,
1135 rx_interval = rti->timer_val_us;
1137 data1 |=XGE_HAL_RTI_DATA1_MEM_RX_TIMER_VAL(rx_interval);
1138 if (rti->timer_ac_en) {
1139 data1 |= XGE_HAL_RTI_DATA1_MEM_RX_TIMER_AC_EN;
1141 data1 |= XGE_HAL_RTI_DATA1_MEM_RX_TIMER_CI_EN;
1144 if (rti->urange_a ||
1151 data1 |=XGE_HAL_RTI_DATA1_MEM_RX_URNG_A(rti->urange_a) |
1152 XGE_HAL_RTI_DATA1_MEM_RX_URNG_B(rti->urange_b) |
1153 XGE_HAL_RTI_DATA1_MEM_RX_URNG_C(rti->urange_c);
1155 data2 |= XGE_HAL_RTI_DATA2_MEM_RX_UFC_A(rti->ufc_a) |
1156 XGE_HAL_RTI_DATA2_MEM_RX_UFC_B(rti->ufc_b) |
1157 XGE_HAL_RTI_DATA2_MEM_RX_UFC_C(rti->ufc_c) |
1158 XGE_HAL_RTI_DATA2_MEM_RX_UFC_D(rti->ufc_d);
1161 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, data1,
1162 &bar0->rti_data1_mem);
1163 (void)xge_os_pio_mem_read64(hldev->pdev,
1164 hldev->regh0, &bar0->rti_data1_mem);
1165 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, data2,
1166 &bar0->rti_data2_mem);
1167 (void)xge_os_pio_mem_read64(hldev->pdev,
1168 hldev->regh0, &bar0->rti_data2_mem);
1171 val64 = XGE_HAL_RTI_CMD_MEM_WE |
1172 XGE_HAL_RTI_CMD_MEM_STROBE_NEW_CMD;
1173 val64 |= XGE_HAL_RTI_CMD_MEM_OFFSET(i);
1174 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
1175 &bar0->rti_command_mem);
1177 if (!runtime && __hal_device_register_poll(hldev,
1178 &bar0->rti_command_mem, 0,
1179 XGE_HAL_RTI_CMD_MEM_STROBE_NEW_CMD,
1180 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
1181 /* upper layer may require to repeat */
1182 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
1186 xge_debug_device(XGE_TRACE,
1187 "RTI[%d] configured: rti_data1_mem 0x"XGE_OS_LLXFMT,
1189 (unsigned long long)xge_os_pio_mem_read64(hldev->pdev,
1190 hldev->regh0, &bar0->rti_data1_mem));
1198 /* Constants to be programmed into the Xena's registers to configure
1200 static u64 default_xena_mdio_cfg[] = {
1202 0xC001010000000000ULL, 0xC0010100000000E0ULL,
1203 0xC0010100008000E4ULL,
1204 /* Remove Reset from PMA PLL */
1205 0xC001010000000000ULL, 0xC0010100000000E0ULL,
1206 0xC0010100000000E4ULL,
1210 static u64 default_herc_mdio_cfg[] = {
1214 static u64 default_xena_dtx_cfg[] = {
1215 0x8000051500000000ULL, 0x80000515000000E0ULL,
1216 0x80000515D93500E4ULL, 0x8001051500000000ULL,
1217 0x80010515000000E0ULL, 0x80010515001E00E4ULL,
1218 0x8002051500000000ULL, 0x80020515000000E0ULL,
1219 0x80020515F21000E4ULL,
1220 /* Set PADLOOPBACKN */
1221 0x8002051500000000ULL, 0x80020515000000E0ULL,
1222 0x80020515B20000E4ULL, 0x8003051500000000ULL,
1223 0x80030515000000E0ULL, 0x80030515B20000E4ULL,
1224 0x8004051500000000ULL, 0x80040515000000E0ULL,
1225 0x80040515B20000E4ULL, 0x8005051500000000ULL,
1226 0x80050515000000E0ULL, 0x80050515B20000E4ULL,
1228 /* Remove PADLOOPBACKN */
1229 0x8002051500000000ULL, 0x80020515000000E0ULL,
1230 0x80020515F20000E4ULL, 0x8003051500000000ULL,
1231 0x80030515000000E0ULL, 0x80030515F20000E4ULL,
1232 0x8004051500000000ULL, 0x80040515000000E0ULL,
1233 0x80040515F20000E4ULL, 0x8005051500000000ULL,
1234 0x80050515000000E0ULL, 0x80050515F20000E4ULL,
1239 static u64 default_herc_dtx_cfg[] = {
1240 0x80000515BA750000ULL, 0x80000515BA7500E0ULL,
1241 0x80000515BA750004ULL, 0x80000515BA7500E4ULL,
1242 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
1243 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
1244 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
1245 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
1250 static u64 default_herc_dtx_cfg[] = {
1251 0x8000051536750000ULL, 0x80000515367500E0ULL,
1252 0x8000051536750004ULL, 0x80000515367500E4ULL,
1254 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
1255 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
1257 0x801205150D440000ULL, 0x801205150D4400E0ULL,
1258 0x801205150D440004ULL, 0x801205150D4400E4ULL,
1260 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
1261 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
1267 __hal_serial_mem_write64(xge_hal_device_t *hldev, u64 value, u64 *reg)
1269 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0,
1270 (u32)(value>>32), reg);
1272 __hal_pio_mem_write32_lower(hldev->pdev, hldev->regh0,
1279 __hal_serial_mem_read64(xge_hal_device_t *hldev, u64 *reg)
1281 u64 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
1288 * __hal_device_xaui_configure
1289 * @hldev: HAL device handle.
1291 * Configure XAUI Interface of Xena.
1293 * To Configure the Xena's XAUI, one has to write a series
1294 * of 64 bit values into two registers in a particular
1295 * sequence. Hence a macro 'SWITCH_SIGN' has been defined
1296 * which will be defined in the array of configuration values
1297 * (default_dtx_cfg & default_mdio_cfg) at appropriate places
1298 * to switch writing from one regsiter to another. We continue
1299 * writing these values until we encounter the 'END_SIGN' macro.
1300 * For example, After making a series of 21 writes into
1301 * dtx_control register the 'SWITCH_SIGN' appears and hence we
1302 * start writing into mdio_control until we encounter END_SIGN.
1305 __hal_device_xaui_configure(xge_hal_device_t *hldev)
1307 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
1308 int mdio_cnt = 0, dtx_cnt = 0;
1309 u64 *default_dtx_cfg = NULL, *default_mdio_cfg = NULL;
1311 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) {
1312 default_dtx_cfg = default_xena_dtx_cfg;
1313 default_mdio_cfg = default_xena_mdio_cfg;
1314 } else if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
1315 default_dtx_cfg = default_herc_dtx_cfg;
1316 default_mdio_cfg = default_herc_mdio_cfg;
1318 xge_assert(default_dtx_cfg);
1324 while (default_dtx_cfg[dtx_cnt] != END_SIGN) {
1325 if (default_dtx_cfg[dtx_cnt] == SWITCH_SIGN) {
1329 __hal_serial_mem_write64(hldev, default_dtx_cfg[dtx_cnt],
1330 &bar0->dtx_control);
1334 while (default_mdio_cfg[mdio_cnt] != END_SIGN) {
1335 if (default_mdio_cfg[mdio_cnt] == SWITCH_SIGN) {
1339 __hal_serial_mem_write64(hldev, default_mdio_cfg[mdio_cnt],
1340 &bar0->mdio_control);
1343 } while ( !((default_dtx_cfg[dtx_cnt] == END_SIGN) &&
1344 (default_mdio_cfg[mdio_cnt] == END_SIGN)) );
1346 xge_debug_device(XGE_TRACE, "%s", "XAUI interface configured");
1350 * __hal_device_mac_link_util_set
1351 * @hldev: HAL device handle.
1353 * Set sampling rate to calculate link utilization.
1356 __hal_device_mac_link_util_set(xge_hal_device_t *hldev)
1358 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
1361 val64 = XGE_HAL_MAC_TX_LINK_UTIL_VAL(
1362 hldev->config.mac.tmac_util_period) |
1363 XGE_HAL_MAC_RX_LINK_UTIL_VAL(
1364 hldev->config.mac.rmac_util_period);
1365 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
1366 &bar0->mac_link_util);
1367 xge_debug_device(XGE_TRACE, "%s",
1368 "bandwidth link utilization configured");
1372 * __hal_device_set_swapper
1373 * @hldev: HAL device handle.
1375 * Set the Xframe's byte "swapper" in accordance with
1376 * endianness of the host.
1379 __hal_device_set_swapper(xge_hal_device_t *hldev)
1381 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
1385 * from 32bit errarta:
1387 * The SWAPPER_CONTROL register determines how the adapter accesses
1388 * host memory as well as how it responds to read and write requests
1389 * from the host system. Writes to this register should be performed
1390 * carefully, since the byte swappers could reverse the order of bytes.
1391 * When configuring this register keep in mind that writes to the PIF
1392 * read and write swappers could reverse the order of the upper and
1393 * lower 32-bit words. This means that the driver may have to write
1394 * to the upper 32 bits of the SWAPPER_CONTROL twice in order to
1395 * configure the entire register. */
1398 * The device by default set to a big endian format, so a big endian
1399 * driver need not set anything.
1402 #if defined(XGE_HAL_CUSTOM_HW_SWAPPER)
1404 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
1405 0xffffffffffffffffULL, &bar0->swapper_ctrl);
1407 val64 = XGE_HAL_CUSTOM_HW_SWAPPER;
1410 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
1411 &bar0->swapper_ctrl);
1413 xge_debug_device(XGE_TRACE, "using custom HW swapper 0x"XGE_OS_LLXFMT,
1414 (unsigned long long)val64);
1416 #elif !defined(XGE_OS_HOST_BIG_ENDIAN)
1419 * Initially we enable all bits to make it accessible by the driver,
1420 * then we selectively enable only those bits that we want to set.
1421 * i.e. force swapper to swap for the first time since second write
1422 * will overwrite with the final settings.
1424 * Use only for little endian platforms.
1426 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
1427 0xffffffffffffffffULL, &bar0->swapper_ctrl);
1429 val64 = (XGE_HAL_SWAPPER_CTRL_PIF_R_FE |
1430 XGE_HAL_SWAPPER_CTRL_PIF_R_SE |
1431 XGE_HAL_SWAPPER_CTRL_PIF_W_FE |
1432 XGE_HAL_SWAPPER_CTRL_PIF_W_SE |
1433 XGE_HAL_SWAPPER_CTRL_RTH_FE |
1434 XGE_HAL_SWAPPER_CTRL_RTH_SE |
1435 XGE_HAL_SWAPPER_CTRL_TXP_FE |
1436 XGE_HAL_SWAPPER_CTRL_TXP_SE |
1437 XGE_HAL_SWAPPER_CTRL_TXD_R_FE |
1438 XGE_HAL_SWAPPER_CTRL_TXD_R_SE |
1439 XGE_HAL_SWAPPER_CTRL_TXD_W_FE |
1440 XGE_HAL_SWAPPER_CTRL_TXD_W_SE |
1441 XGE_HAL_SWAPPER_CTRL_TXF_R_FE |
1442 XGE_HAL_SWAPPER_CTRL_RXD_R_FE |
1443 XGE_HAL_SWAPPER_CTRL_RXD_R_SE |
1444 XGE_HAL_SWAPPER_CTRL_RXD_W_FE |
1445 XGE_HAL_SWAPPER_CTRL_RXD_W_SE |
1446 XGE_HAL_SWAPPER_CTRL_RXF_W_FE |
1447 XGE_HAL_SWAPPER_CTRL_XMSI_FE |
1448 XGE_HAL_SWAPPER_CTRL_STATS_FE | XGE_HAL_SWAPPER_CTRL_STATS_SE);
1451 if (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX) {
1452 val64 |= XGE_HAL_SWAPPER_CTRL_XMSI_SE;
1454 __hal_pio_mem_write32_lower(hldev->pdev, hldev->regh0, (u32)val64,
1455 &bar0->swapper_ctrl);
1457 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, (u32)(val64>>32),
1458 &bar0->swapper_ctrl);
1460 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, (u32)(val64>>32),
1461 &bar0->swapper_ctrl);
1462 xge_debug_device(XGE_TRACE, "%s", "using little endian set");
1465 /* Verifying if endian settings are accurate by reading a feedback
1467 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
1468 &bar0->pif_rd_swapper_fb);
1469 if (val64 != XGE_HAL_IF_RD_SWAPPER_FB) {
1470 xge_debug_device(XGE_ERR, "pif_rd_swapper_fb read "XGE_OS_LLXFMT,
1471 (unsigned long long) val64);
1472 return XGE_HAL_ERR_SWAPPER_CTRL;
1475 xge_debug_device(XGE_TRACE, "%s", "be/le swapper enabled");
1481 * __hal_device_rts_mac_configure - Configure RTS steering based on
1482 * destination mac address.
1483 * @hldev: HAL device handle.
1487 __hal_device_rts_mac_configure(xge_hal_device_t *hldev)
1489 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
1492 if (!hldev->config.rts_mac_en) {
1497 * Set the receive traffic steering mode from default(classic)
1500 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
1502 val64 |= XGE_HAL_RTS_CTRL_ENHANCED_MODE;
1503 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
1504 val64, &bar0->rts_ctrl);
1509 * __hal_device_rts_port_configure - Configure RTS steering based on
1510 * destination or source port number.
1511 * @hldev: HAL device handle.
1515 __hal_device_rts_port_configure(xge_hal_device_t *hldev)
1517 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
1521 if (!hldev->config.rts_port_en) {
1526 * Set the receive traffic steering mode from default(classic)
1529 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
1531 val64 |= XGE_HAL_RTS_CTRL_ENHANCED_MODE;
1532 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
1533 val64, &bar0->rts_ctrl);
1536 * Initiate port steering according to per-ring configuration
1538 for (rnum = 0; rnum < XGE_HAL_MAX_RING_NUM; rnum++) {
1540 xge_hal_ring_queue_t *queue = &hldev->config.ring.queue[rnum];
1542 if (!queue->configured || queue->rts_port_en)
1545 for (pnum = 0; pnum < XGE_HAL_MAX_STEERABLE_PORTS; pnum++) {
1546 xge_hal_rts_port_t *port = &queue->rts_ports[pnum];
1549 * Skip and clear empty ports
1555 xge_os_pio_mem_write64(hldev->pdev,
1557 &bar0->rts_pn_cam_data);
1559 val64 = BIT(7) | BIT(15);
1562 * Assign new Port values according
1565 val64 = vBIT(port->num,8,16) |
1566 vBIT(rnum,37,3) | BIT(63);
1571 xge_os_pio_mem_write64(hldev->pdev,
1572 hldev->regh0, val64,
1573 &bar0->rts_pn_cam_data);
1575 val64 = BIT(7) | BIT(15) | vBIT(pnum,24,8);
1578 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
1579 val64, &bar0->rts_pn_cam_ctrl);
1581 /* poll until done */
1582 if (__hal_device_register_poll(hldev,
1583 &bar0->rts_pn_cam_ctrl, 0,
1584 XGE_HAL_RTS_PN_CAM_CTRL_STROBE_BEING_EXECUTED,
1585 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) !=
1587 /* upper layer may require to repeat */
1588 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
1596 * __hal_device_rts_qos_configure - Configure RTS steering based on
1598 * @hldev: HAL device handle.
1602 __hal_device_rts_qos_configure(xge_hal_device_t *hldev)
1604 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
1608 if (!hldev->config.rts_qos_en) {
1612 /* First clear the RTS_DS_MEM_DATA */
1614 for (j = 0; j < 64; j++ )
1616 /* First clear the value */
1617 val64 = XGE_HAL_RTS_DS_MEM_DATA(0);
1619 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
1620 &bar0->rts_ds_mem_data);
1622 val64 = XGE_HAL_RTS_DS_MEM_CTRL_WE |
1623 XGE_HAL_RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
1624 XGE_HAL_RTS_DS_MEM_CTRL_OFFSET ( j );
1626 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
1627 &bar0->rts_ds_mem_ctrl);
1630 /* poll until done */
1631 if (__hal_device_register_poll(hldev,
1632 &bar0->rts_ds_mem_ctrl, 0,
1633 XGE_HAL_RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
1634 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
1635 /* upper layer may require to repeat */
1636 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
1642 for (j = 0; j < XGE_HAL_MAX_RING_NUM; j++) {
1643 if (hldev->config.ring.queue[j].configured)
1647 switch (rx_ring_num) {
1650 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0);
1651 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1);
1652 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2);
1653 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3);
1654 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4);
1657 val64 = 0x0001000100010001ULL;
1658 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0);
1659 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1);
1660 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2);
1661 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3);
1662 val64 = 0x0001000100000000ULL;
1663 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4);
1666 val64 = 0x0001020001020001ULL;
1667 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0);
1668 val64 = 0x0200010200010200ULL;
1669 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1);
1670 val64 = 0x0102000102000102ULL;
1671 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2);
1672 val64 = 0x0001020001020001ULL;
1673 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3);
1674 val64 = 0x0200010200000000ULL;
1675 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4);
1678 val64 = 0x0001020300010203ULL;
1679 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0);
1680 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1);
1681 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2);
1682 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3);
1683 val64 = 0x0001020300000000ULL;
1684 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4);
1687 val64 = 0x0001020304000102ULL;
1688 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0);
1689 val64 = 0x0304000102030400ULL;
1690 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1);
1691 val64 = 0x0102030400010203ULL;
1692 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2);
1693 val64 = 0x0400010203040001ULL;
1694 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3);
1695 val64 = 0x0203040000000000ULL;
1696 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4);
1699 val64 = 0x0001020304050001ULL;
1700 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0);
1701 val64 = 0x0203040500010203ULL;
1702 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1);
1703 val64 = 0x0405000102030405ULL;
1704 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2);
1705 val64 = 0x0001020304050001ULL;
1706 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3);
1707 val64 = 0x0203040500000000ULL;
1708 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4);
1711 val64 = 0x0001020304050600ULL;
1712 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0);
1713 val64 = 0x0102030405060001ULL;
1714 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1);
1715 val64 = 0x0203040506000102ULL;
1716 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2);
1717 val64 = 0x0304050600010203ULL;
1718 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3);
1719 val64 = 0x0405060000000000ULL;
1720 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4);
1723 val64 = 0x0001020304050607ULL;
1724 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0);
1725 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1);
1726 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2);
1727 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3);
1728 val64 = 0x0001020300000000ULL;
1729 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4);
1737 * xge__hal_device_rts_mac_enable
1739 * @devh: HAL device handle.
1740 * @index: index number where the MAC addr will be stored
1741 * @macaddr: MAC address
1743 * - Enable RTS steering for the given MAC address. This function has to be
1744 * called with lock acquired.
1747 * 1. ULD has to call this function with the index value which
1748 * statisfies the following condition:
1749 * ring_num = (index % 8)
1750 * 2.ULD also needs to make sure that the index is not
1751 * occupied by any MAC address. If that index has any MAC address
1752 * it will be overwritten and HAL will not check for it.
1756 xge_hal_device_rts_mac_enable(xge_hal_device_h devh, int index, macaddr_t macaddr)
1758 int max_addr = XGE_HAL_MAX_MAC_ADDRESSES;
1759 xge_hal_status_e status;
1761 xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
1763 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)
1764 max_addr = XGE_HAL_MAX_MAC_ADDRESSES_HERC;
1766 if ( index >= max_addr )
1767 return XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES;
1770 * Set the MAC address at the given location marked by index.
1772 status = xge_hal_device_macaddr_set(hldev, index, macaddr);
1773 if (status != XGE_HAL_OK) {
1774 xge_debug_device(XGE_ERR, "%s",
1775 "Not able to set the mac addr");
1779 return xge_hal_device_rts_section_enable(hldev, index);
1783 * xge__hal_device_rts_mac_disable
1784 * @hldev: HAL device handle.
1785 * @index: index number where to disable the MAC addr
1787 * Disable RTS Steering based on the MAC address.
1788 * This function should be called with lock acquired.
1792 xge_hal_device_rts_mac_disable(xge_hal_device_h devh, int index)
1794 xge_hal_status_e status;
1795 u8 macaddr[6] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
1796 int max_addr = XGE_HAL_MAX_MAC_ADDRESSES;
1798 xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
1800 xge_debug_ll(XGE_TRACE, "the index value is %d ", index);
1802 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)
1803 max_addr = XGE_HAL_MAX_MAC_ADDRESSES_HERC;
1805 if ( index >= max_addr )
1806 return XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES;
1809 * Disable MAC address @ given index location
1811 status = xge_hal_device_macaddr_set(hldev, index, macaddr);
1812 if (status != XGE_HAL_OK) {
1813 xge_debug_device(XGE_ERR, "%s",
1814 "Not able to set the mac addr");
1823 * __hal_device_rth_configure - Configure RTH for the device
1824 * @hldev: HAL device handle.
1826 * Using IT (Indirection Table).
1829 __hal_device_rth_it_configure(xge_hal_device_t *hldev)
1831 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
1833 int rings[XGE_HAL_MAX_RING_NUM]={0};
1839 if (!hldev->config.rth_en) {
1844 * Set the receive traffic steering mode from default(classic)
1847 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
1849 val64 |= XGE_HAL_RTS_CTRL_ENHANCED_MODE;
1850 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
1851 val64, &bar0->rts_ctrl);
1853 buckets_num = (1 << hldev->config.rth_bucket_size);
1856 for (rnum = 0; rnum < XGE_HAL_MAX_RING_NUM; rnum++) {
1857 if (hldev->config.ring.queue[rnum].configured &&
1858 hldev->config.ring.queue[rnum].rth_en)
1859 rings[rmax++] = rnum;
1863 /* for starters: fill in all the buckets with rings "equally" */
1864 for (bucket = 0; bucket < buckets_num; bucket++) {
1870 val64 = XGE_HAL_RTS_RTH_MAP_MEM_DATA_ENTRY_EN |
1871 XGE_HAL_RTS_RTH_MAP_MEM_DATA(rings[rnum]);
1872 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
1873 &bar0->rts_rth_map_mem_data);
1876 val64 = XGE_HAL_RTS_RTH_MAP_MEM_CTRL_WE |
1877 XGE_HAL_RTS_RTH_MAP_MEM_CTRL_STROBE |
1878 XGE_HAL_RTS_RTH_MAP_MEM_CTRL_OFFSET(bucket);
1879 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
1880 &bar0->rts_rth_map_mem_ctrl);
1882 /* poll until done */
1883 if (__hal_device_register_poll(hldev,
1884 &bar0->rts_rth_map_mem_ctrl, 0,
1885 XGE_HAL_RTS_RTH_MAP_MEM_CTRL_STROBE,
1886 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
1887 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
1893 val64 = XGE_HAL_RTS_RTH_EN;
1894 val64 |= XGE_HAL_RTS_RTH_BUCKET_SIZE(hldev->config.rth_bucket_size);
1895 val64 |= XGE_HAL_RTS_RTH_TCP_IPV4_EN | XGE_HAL_RTS_RTH_UDP_IPV4_EN | XGE_HAL_RTS_RTH_IPV4_EN |
1896 XGE_HAL_RTS_RTH_TCP_IPV6_EN |XGE_HAL_RTS_RTH_UDP_IPV6_EN | XGE_HAL_RTS_RTH_IPV6_EN |
1897 XGE_HAL_RTS_RTH_TCP_IPV6_EX_EN | XGE_HAL_RTS_RTH_UDP_IPV6_EX_EN | XGE_HAL_RTS_RTH_IPV6_EX_EN;
1899 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
1900 &bar0->rts_rth_cfg);
1902 xge_debug_device(XGE_TRACE, "RTH configured, bucket_size %d",
1903 hldev->config.rth_bucket_size);
1910 * __hal_spdm_entry_add - Add a new entry to the SPDM table.
1912 * Add a new entry to the SPDM table
1914 * This function add a new entry to the SPDM table.
1917 * This function should be called with spdm_lock.
1919 * See also: xge_hal_spdm_entry_add , xge_hal_spdm_entry_remove.
1921 static xge_hal_status_e
1922 __hal_spdm_entry_add(xge_hal_device_t *hldev, xge_hal_ipaddr_t *src_ip,
1923 xge_hal_ipaddr_t *dst_ip, u16 l4_sp, u16 l4_dp, u8 is_tcp,
1924 u8 is_ipv4, u8 tgt_queue, u32 jhash_value, u16 spdm_entry)
1926 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
1928 u64 spdm_line_arr[8];
1932 * Clear the SPDM READY bit
1934 val64 = XGE_HAL_RX_PIC_INT_REG_SPDM_READY;
1935 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
1936 &bar0->rxpic_int_reg);
1938 xge_debug_device(XGE_TRACE,
1939 "L4 SP %x:DP %x: hash %x tgt_queue %d ",
1940 l4_sp, l4_dp, jhash_value, tgt_queue);
1942 xge_os_memzero(&spdm_line_arr, sizeof(spdm_line_arr));
1945 * Construct the SPDM entry.
1947 spdm_line_arr[0] = vBIT(l4_sp,0,16) |
1949 vBIT(tgt_queue,53,3) |
1955 spdm_line_arr[1] = vBIT(src_ip->ipv4.addr,0,32) |
1956 vBIT(dst_ip->ipv4.addr,32,32);
1959 xge_os_memcpy(&spdm_line_arr[1], &src_ip->ipv6.addr[0], 8);
1960 xge_os_memcpy(&spdm_line_arr[2], &src_ip->ipv6.addr[1], 8);
1961 xge_os_memcpy(&spdm_line_arr[3], &dst_ip->ipv6.addr[0], 8);
1962 xge_os_memcpy(&spdm_line_arr[4], &dst_ip->ipv6.addr[1], 8);
1965 spdm_line_arr[7] = vBIT(jhash_value,0,32) |
1966 BIT(63); /* entry enable bit */
1969 * Add the entry to the SPDM table
1971 for(line_no = 0; line_no < 8; line_no++) {
1972 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
1973 spdm_line_arr[line_no],
1974 (void *)((char *)hldev->spdm_mem_base +
1980 * Wait for the operation to be completed.
1982 if (__hal_device_register_poll(hldev, &bar0->rxpic_int_reg, 1,
1983 XGE_HAL_RX_PIC_INT_REG_SPDM_READY,
1984 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
1985 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
1989 * Add this information to a local SPDM table. The purpose of
1990 * maintaining a local SPDM table is to avoid a search in the
1991 * adapter SPDM table for spdm entry lookup which is very costly
1994 hldev->spdm_table[spdm_entry]->in_use = 1;
1995 xge_os_memcpy(&hldev->spdm_table[spdm_entry]->src_ip, src_ip,
1996 sizeof(xge_hal_ipaddr_t));
1997 xge_os_memcpy(&hldev->spdm_table[spdm_entry]->dst_ip, dst_ip,
1998 sizeof(xge_hal_ipaddr_t));
1999 hldev->spdm_table[spdm_entry]->l4_sp = l4_sp;
2000 hldev->spdm_table[spdm_entry]->l4_dp = l4_dp;
2001 hldev->spdm_table[spdm_entry]->is_tcp = is_tcp;
2002 hldev->spdm_table[spdm_entry]->is_ipv4 = is_ipv4;
2003 hldev->spdm_table[spdm_entry]->tgt_queue = tgt_queue;
2004 hldev->spdm_table[spdm_entry]->jhash_value = jhash_value;
2005 hldev->spdm_table[spdm_entry]->spdm_entry = spdm_entry;
2011 * __hal_device_rth_spdm_configure - Configure RTH for the device
2012 * @hldev: HAL device handle.
2014 * Using SPDM (Socket-Pair Direct Match).
2017 __hal_device_rth_spdm_configure(xge_hal_device_t *hldev)
2019 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0;
2022 u32 spdm_bar_offset;
2023 int spdm_table_size;
2026 if (!hldev->config.rth_spdm_en) {
2031 * Retrieve the base address of SPDM Table.
2033 val64 = xge_os_pio_mem_read64(hldev->pdev,
2034 hldev->regh0, &bar0->spdm_bir_offset);
2036 spdm_bar_num = XGE_HAL_SPDM_PCI_BAR_NUM(val64);
2037 spdm_bar_offset = XGE_HAL_SPDM_PCI_BAR_OFFSET(val64);
2041 * spdm_bar_num specifies the PCI bar num register used to
2042 * address the memory space. spdm_bar_offset specifies the offset
2043 * of the SPDM memory with in the bar num memory space.
2045 switch (spdm_bar_num) {
2048 hldev->spdm_mem_base = (char *)bar0 +
2049 (spdm_bar_offset * 8);
2054 char *bar1 = (char *)hldev->bar1;
2055 hldev->spdm_mem_base = bar1 + (spdm_bar_offset * 8);
2059 xge_assert(((spdm_bar_num != 0) && (spdm_bar_num != 1)));
2063 * Retrieve the size of SPDM table(number of entries).
2065 val64 = xge_os_pio_mem_read64(hldev->pdev,
2066 hldev->regh0, &bar0->spdm_structure);
2067 hldev->spdm_max_entries = XGE_HAL_SPDM_MAX_ENTRIES(val64);
2070 spdm_table_size = hldev->spdm_max_entries *
2071 sizeof(xge_hal_spdm_entry_t);
2072 if (hldev->spdm_table == NULL) {
2076 * Allocate memory to hold the copy of SPDM table.
2078 if ((hldev->spdm_table = (xge_hal_spdm_entry_t **)
2081 (sizeof(xge_hal_spdm_entry_t *) *
2082 hldev->spdm_max_entries))) == NULL) {
2083 return XGE_HAL_ERR_OUT_OF_MEMORY;
2086 if ((mem = xge_os_malloc(hldev->pdev, spdm_table_size)) == NULL)
2088 xge_os_free(hldev->pdev, hldev->spdm_table,
2089 (sizeof(xge_hal_spdm_entry_t *) *
2090 hldev->spdm_max_entries));
2091 return XGE_HAL_ERR_OUT_OF_MEMORY;
2094 xge_os_memzero(mem, spdm_table_size);
2095 for (i = 0; i < hldev->spdm_max_entries; i++) {
2096 hldev->spdm_table[i] = (xge_hal_spdm_entry_t *)
2098 i * sizeof(xge_hal_spdm_entry_t));
2100 xge_os_spin_lock_init(&hldev->spdm_lock, hldev->pdev);
2103 * We are here because the host driver tries to
2104 * do a soft reset on the device.
2105 * Since the device soft reset clears the SPDM table, copy
2106 * the entries from the local SPDM table to the actual one.
2108 xge_os_spin_lock(&hldev->spdm_lock);
2109 for (i = 0; i < hldev->spdm_max_entries; i++) {
2110 xge_hal_spdm_entry_t *spdm_entry = hldev->spdm_table[i];
2112 if (spdm_entry->in_use) {
2113 if (__hal_spdm_entry_add(hldev,
2114 &spdm_entry->src_ip,
2115 &spdm_entry->dst_ip,
2119 spdm_entry->is_ipv4,
2120 spdm_entry->tgt_queue,
2121 spdm_entry->jhash_value,
2122 spdm_entry->spdm_entry)
2124 /* Log an warning */
2125 xge_debug_device(XGE_ERR,
2126 "SPDM table update from local"
2131 xge_os_spin_unlock(&hldev->spdm_lock);
2135 * Set the receive traffic steering mode from default(classic)
2138 val64 = xge_os_pio_mem_read64(hldev->pdev,
2139 hldev->regh0, &bar0->rts_ctrl);
2140 val64 |= XGE_HAL_RTS_CTRL_ENHANCED_MODE;
2141 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
2142 val64, &bar0->rts_ctrl);
2145 * We may not need to configure rts_rth_jhash_cfg register as the
2146 * default values are good enough to calculate the hash.
2150 * As of now, set all the rth mask registers to zero. TODO.
2152 for(i = 0; i < 5; i++) {
2153 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
2154 0, &bar0->rts_rth_hash_mask[i]);
2157 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
2158 0, &bar0->rts_rth_hash_mask_5);
2160 if (hldev->config.rth_spdm_use_l4) {
2161 val64 = XGE_HAL_RTH_STATUS_SPDM_USE_L4;
2162 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
2163 val64, &bar0->rts_rth_status);
2166 val64 = XGE_HAL_RTS_RTH_EN;
2167 val64 |= XGE_HAL_RTS_RTH_IPV4_EN | XGE_HAL_RTS_RTH_TCP_IPV4_EN;
2168 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
2169 &bar0->rts_rth_cfg);
2176 * __hal_device_pci_init
2177 * @hldev: HAL device handle.
2179 * Initialize certain PCI/PCI-X configuration registers
2180 * with recommended values. Save config space for future hw resets.
2183 __hal_device_pci_init(xge_hal_device_t *hldev)
2189 /* Store PCI device ID and revision for future references where in we
2190 * decide Xena revision using PCI sub system ID */
2191 xge_os_pci_read16(hldev->pdev,hldev->cfgh,
2192 xge_offsetof(xge_hal_pci_config_le_t, device_id),
2194 xge_os_pci_read8(hldev->pdev,hldev->cfgh,
2195 xge_offsetof(xge_hal_pci_config_le_t, revision),
2198 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)
2199 pcisize = XGE_HAL_PCISIZE_HERC;
2200 else if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA)
2201 pcisize = XGE_HAL_PCISIZE_XENA;
2203 /* save original PCI config space to restore it on device_terminate() */
2204 for (i = 0; i < pcisize; i++) {
2205 xge_os_pci_read32(hldev->pdev, hldev->cfgh, i*4,
2206 (u32*)&hldev->pci_config_space_bios + i);
2209 /* Set the PErr Repconse bit and SERR in PCI command register. */
2210 xge_os_pci_read16(hldev->pdev, hldev->cfgh,
2211 xge_offsetof(xge_hal_pci_config_le_t, command), &cmd);
2213 xge_os_pci_write16(hldev->pdev, hldev->cfgh,
2214 xge_offsetof(xge_hal_pci_config_le_t, command), cmd);
2216 /* Set user spcecified value for the PCI Latency Timer */
2217 if (hldev->config.latency_timer &&
2218 hldev->config.latency_timer != XGE_HAL_USE_BIOS_DEFAULT_LATENCY) {
2219 xge_os_pci_write8(hldev->pdev, hldev->cfgh,
2220 xge_offsetof(xge_hal_pci_config_le_t,
2222 (u8)hldev->config.latency_timer);
2224 /* Read back latency timer to reflect it into user level */
2225 xge_os_pci_read8(hldev->pdev, hldev->cfgh,
2226 xge_offsetof(xge_hal_pci_config_le_t, latency_timer), &val);
2227 hldev->config.latency_timer = val;
2229 /* Enable Data Parity Error Recovery in PCI-X command register. */
2230 xge_os_pci_read16(hldev->pdev, hldev->cfgh,
2231 xge_offsetof(xge_hal_pci_config_le_t, pcix_command), &cmd);
2233 xge_os_pci_write16(hldev->pdev, hldev->cfgh,
2234 xge_offsetof(xge_hal_pci_config_le_t, pcix_command), cmd);
2236 /* Set MMRB count in PCI-X command register. */
2237 if (hldev->config.mmrb_count != XGE_HAL_DEFAULT_BIOS_MMRB_COUNT) {
2239 cmd |= hldev->config.mmrb_count << 2;
2240 xge_os_pci_write16(hldev->pdev, hldev->cfgh,
2241 xge_offsetof(xge_hal_pci_config_le_t, pcix_command),
2244 /* Read back MMRB count to reflect it into user level */
2245 xge_os_pci_read16(hldev->pdev, hldev->cfgh,
2246 xge_offsetof(xge_hal_pci_config_le_t, pcix_command),
2249 hldev->config.mmrb_count = cmd>>2;
2251 /* Setting Maximum outstanding splits based on system type. */
2252 if (hldev->config.max_splits_trans != XGE_HAL_USE_BIOS_DEFAULT_SPLITS) {
2253 xge_os_pci_read16(hldev->pdev, hldev->cfgh,
2254 xge_offsetof(xge_hal_pci_config_le_t, pcix_command),
2257 cmd |= hldev->config.max_splits_trans << 4;
2258 xge_os_pci_write16(hldev->pdev, hldev->cfgh,
2259 xge_offsetof(xge_hal_pci_config_le_t, pcix_command),
2263 /* Read back max split trans to reflect it into user level */
2264 xge_os_pci_read16(hldev->pdev, hldev->cfgh,
2265 xge_offsetof(xge_hal_pci_config_le_t, pcix_command), &cmd);
2267 hldev->config.max_splits_trans = cmd>>4;
2269 /* Forcibly disabling relaxed ordering capability of the card. */
2270 xge_os_pci_read16(hldev->pdev, hldev->cfgh,
2271 xge_offsetof(xge_hal_pci_config_le_t, pcix_command), &cmd);
2273 xge_os_pci_write16(hldev->pdev, hldev->cfgh,
2274 xge_offsetof(xge_hal_pci_config_le_t, pcix_command), cmd);
2276 /* save PCI config space for future resets */
2277 for (i = 0; i < pcisize; i++) {
2278 xge_os_pci_read32(hldev->pdev, hldev->cfgh, i*4,
2279 (u32*)&hldev->pci_config_space + i);
2284 * __hal_device_pci_info_get - Get PCI bus informations such as width, frequency
2286 * @devh: HAL device handle.
2287 * @pci_mode: pointer to a variable of enumerated type
2288 * xge_hal_pci_mode_e{}.
2289 * @bus_frequency: pointer to a variable of enumerated type
2290 * xge_hal_pci_bus_frequency_e{}.
2291 * @bus_width: pointer to a variable of enumerated type
2292 * xge_hal_pci_bus_width_e{}.
2294 * Get pci mode, frequency, and PCI bus width.
2296 * Returns: one of the xge_hal_status_e{} enumerated types.
2297 * XGE_HAL_OK - for success.
2298 * XGE_HAL_ERR_INVALID_PCI_INFO - for invalid PCI information from the card.
2299 * XGE_HAL_ERR_BAD_DEVICE_ID - for invalid card.
2301 * See Also: xge_hal_pci_mode_e, xge_hal_pci_mode_e, xge_hal_pci_width_e.
2303 static xge_hal_status_e
2304 __hal_device_pci_info_get(xge_hal_device_h devh, xge_hal_pci_mode_e *pci_mode,
2305 xge_hal_pci_bus_frequency_e *bus_frequency,
2306 xge_hal_pci_bus_width_e *bus_width)
2308 xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
2309 xge_hal_status_e rc_status = XGE_HAL_OK;
2310 xge_hal_card_e card_id = xge_hal_device_check_id (devh);
2312 #ifdef XGE_HAL_HERC_EMULATION
2313 hldev->config.pci_freq_mherz =
2314 XGE_HAL_PCI_BUS_FREQUENCY_66MHZ;
2316 XGE_HAL_PCI_BUS_FREQUENCY_66MHZ;
2317 *pci_mode = XGE_HAL_PCI_66MHZ_MODE;
2319 if (card_id == XGE_HAL_CARD_HERC) {
2320 xge_hal_pci_bar0_t *bar0 =
2321 (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
2322 u64 pci_info = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2324 if (XGE_HAL_PCI_32_BIT & pci_info)
2325 *bus_width = XGE_HAL_PCI_BUS_WIDTH_32BIT;
2327 *bus_width = XGE_HAL_PCI_BUS_WIDTH_64BIT;
2328 switch((pci_info & XGE_HAL_PCI_INFO)>>60)
2330 case XGE_HAL_PCI_33MHZ_MODE:
2332 XGE_HAL_PCI_BUS_FREQUENCY_33MHZ;
2333 *pci_mode = XGE_HAL_PCI_33MHZ_MODE;
2335 case XGE_HAL_PCI_66MHZ_MODE:
2337 XGE_HAL_PCI_BUS_FREQUENCY_66MHZ;
2338 *pci_mode = XGE_HAL_PCI_66MHZ_MODE;
2340 case XGE_HAL_PCIX_M1_66MHZ_MODE:
2342 XGE_HAL_PCI_BUS_FREQUENCY_66MHZ;
2343 *pci_mode = XGE_HAL_PCIX_M1_66MHZ_MODE;
2345 case XGE_HAL_PCIX_M1_100MHZ_MODE:
2347 XGE_HAL_PCI_BUS_FREQUENCY_100MHZ;
2348 *pci_mode = XGE_HAL_PCIX_M1_100MHZ_MODE;
2350 case XGE_HAL_PCIX_M1_133MHZ_MODE:
2352 XGE_HAL_PCI_BUS_FREQUENCY_133MHZ;
2353 *pci_mode = XGE_HAL_PCIX_M1_133MHZ_MODE;
2355 case XGE_HAL_PCIX_M2_66MHZ_MODE:
2357 XGE_HAL_PCI_BUS_FREQUENCY_133MHZ;
2358 *pci_mode = XGE_HAL_PCIX_M2_66MHZ_MODE;
2360 case XGE_HAL_PCIX_M2_100MHZ_MODE:
2362 XGE_HAL_PCI_BUS_FREQUENCY_200MHZ;
2363 *pci_mode = XGE_HAL_PCIX_M2_100MHZ_MODE;
2365 case XGE_HAL_PCIX_M2_133MHZ_MODE:
2367 XGE_HAL_PCI_BUS_FREQUENCY_266MHZ;
2368 *pci_mode = XGE_HAL_PCIX_M2_133MHZ_MODE;
2370 case XGE_HAL_PCIX_M1_RESERVED:
2371 case XGE_HAL_PCIX_M1_66MHZ_NS:
2372 case XGE_HAL_PCIX_M1_100MHZ_NS:
2373 case XGE_HAL_PCIX_M1_133MHZ_NS:
2374 case XGE_HAL_PCIX_M2_RESERVED:
2375 case XGE_HAL_PCIX_533_RESERVED:
2377 rc_status = XGE_HAL_ERR_INVALID_PCI_INFO;
2378 xge_debug_device(XGE_ERR,
2379 "invalid pci info "XGE_OS_LLXFMT,
2380 (unsigned long long)pci_info);
2383 if (rc_status != XGE_HAL_ERR_INVALID_PCI_INFO)
2384 xge_debug_device(XGE_TRACE, "PCI info: mode %d width "
2385 "%d frequency %d", *pci_mode, *bus_width,
2387 if (hldev->config.pci_freq_mherz ==
2388 XGE_HAL_DEFAULT_USE_HARDCODE) {
2389 hldev->config.pci_freq_mherz = *bus_frequency;
2392 /* for XENA, we report PCI mode, only. PCI bus frequency, and bus width
2393 * are set to unknown */
2394 else if (card_id == XGE_HAL_CARD_XENA) {
2396 u8 dev_num, bus_num;
2397 /* initialize defaults for XENA */
2398 *bus_frequency = XGE_HAL_PCI_BUS_FREQUENCY_UNKNOWN;
2399 *bus_width = XGE_HAL_PCI_BUS_WIDTH_UNKNOWN;
2400 xge_os_pci_read32(hldev->pdev, hldev->cfgh,
2401 xge_offsetof(xge_hal_pci_config_le_t, pcix_status),
2403 dev_num = (u8)((pcix_status & 0xF8) >> 3);
2404 bus_num = (u8)((pcix_status & 0xFF00) >> 8);
2405 if (dev_num == 0 && bus_num == 0)
2406 *pci_mode = XGE_HAL_PCI_BASIC_MODE;
2408 *pci_mode = XGE_HAL_PCIX_BASIC_MODE;
2409 xge_debug_device(XGE_TRACE, "PCI info: mode %d", *pci_mode);
2410 if (hldev->config.pci_freq_mherz ==
2411 XGE_HAL_DEFAULT_USE_HARDCODE) {
2413 * There is no way to detect BUS frequency on Xena,
2414 * so, in case of automatic configuration we hopelessly
2417 hldev->config.pci_freq_mherz =
2418 XGE_HAL_PCI_BUS_FREQUENCY_133MHZ;
2420 } else if (card_id == XGE_HAL_CARD_TITAN) {
2421 *bus_width = XGE_HAL_PCI_BUS_WIDTH_64BIT;
2422 *bus_frequency = XGE_HAL_PCI_BUS_FREQUENCY_250MHZ;
2423 if (hldev->config.pci_freq_mherz ==
2424 XGE_HAL_DEFAULT_USE_HARDCODE) {
2425 hldev->config.pci_freq_mherz = *bus_frequency;
2428 rc_status = XGE_HAL_ERR_BAD_DEVICE_ID;
2429 xge_debug_device(XGE_ERR, "invalid device id %d", card_id);
2437 * __hal_device_handle_link_up_ind
2438 * @hldev: HAL device handle.
2440 * Link up indication handler. The function is invoked by HAL when
2441 * Xframe indicates that the link is up for programmable amount of time.
2444 __hal_device_handle_link_up_ind(xge_hal_device_t *hldev)
2446 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
2450 * If the previous link state is not down, return.
2452 if (hldev->link_state == XGE_HAL_LINK_UP) {
2453 #ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR
2454 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC){
2455 val64 = xge_os_pio_mem_read64(
2456 hldev->pdev, hldev->regh0,
2457 &bar0->misc_int_mask);
2458 val64 |= XGE_HAL_MISC_INT_REG_LINK_UP_INT;
2459 val64 &= ~XGE_HAL_MISC_INT_REG_LINK_DOWN_INT;
2460 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
2461 val64, &bar0->misc_int_mask);
2464 xge_debug_device(XGE_TRACE,
2465 "link up indication while link is up, ignoring..");
2469 /* Now re-enable it as due to noise, hardware turned it off */
2470 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2471 &bar0->adapter_control);
2472 val64 |= XGE_HAL_ADAPTER_CNTL_EN;
2473 val64 = val64 & (~XGE_HAL_ADAPTER_ECC_EN); /* ECC enable */
2474 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
2475 &bar0->adapter_control);
2477 /* Turn on the Laser */
2478 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2479 &bar0->adapter_control);
2480 val64 = val64|(XGE_HAL_ADAPTER_EOI_TX_ON |
2481 XGE_HAL_ADAPTER_LED_ON);
2482 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
2483 &bar0->adapter_control);
2485 #ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR
2486 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
2487 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2488 &bar0->adapter_status);
2489 if (val64 & (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT |
2490 XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT)) {
2491 xge_debug_device(XGE_TRACE, "%s",
2492 "fail to transition link to up...");
2497 * Mask the Link Up interrupt and unmask the Link Down
2500 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2501 &bar0->misc_int_mask);
2502 val64 |= XGE_HAL_MISC_INT_REG_LINK_UP_INT;
2503 val64 &= ~XGE_HAL_MISC_INT_REG_LINK_DOWN_INT;
2504 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
2505 &bar0->misc_int_mask);
2506 xge_debug_device(XGE_TRACE, "calling link up..");
2507 hldev->link_state = XGE_HAL_LINK_UP;
2510 if (g_xge_hal_driver->uld_callbacks.link_up) {
2511 g_xge_hal_driver->uld_callbacks.link_up(
2512 hldev->upper_layer_info);
2519 if (__hal_device_register_poll(hldev, &bar0->adapter_status, 0,
2520 (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT |
2521 XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT),
2522 XGE_HAL_DEVICE_FAULT_WAIT_MAX_MILLIS) == XGE_HAL_OK) {
2525 (void) xge_queue_produce_context(hldev->queueh,
2526 XGE_HAL_EVENT_LINK_IS_UP,
2528 /* link is up after been enabled */
2531 xge_debug_device(XGE_TRACE, "%s",
2532 "fail to transition link to up...");
2538 * __hal_device_handle_link_down_ind
2539 * @hldev: HAL device handle.
2541 * Link down indication handler. The function is invoked by HAL when
2542 * Xframe indicates that the link is down.
2545 __hal_device_handle_link_down_ind(xge_hal_device_t *hldev)
2547 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
2551 * If the previous link state is not up, return.
2553 if (hldev->link_state == XGE_HAL_LINK_DOWN) {
2554 #ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR
2555 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC){
2556 val64 = xge_os_pio_mem_read64(
2557 hldev->pdev, hldev->regh0,
2558 &bar0->misc_int_mask);
2559 val64 |= XGE_HAL_MISC_INT_REG_LINK_DOWN_INT;
2560 val64 &= ~XGE_HAL_MISC_INT_REG_LINK_UP_INT;
2561 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
2562 val64, &bar0->misc_int_mask);
2565 xge_debug_device(XGE_TRACE,
2566 "link down indication while link is down, ignoring..");
2571 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2572 &bar0->adapter_control);
2574 /* try to debounce the link only if the adapter is enabled. */
2575 if (val64 & XGE_HAL_ADAPTER_CNTL_EN) {
2576 if (__hal_device_register_poll(hldev, &bar0->adapter_status, 0,
2577 (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT |
2578 XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT),
2579 XGE_HAL_DEVICE_FAULT_WAIT_MAX_MILLIS) == XGE_HAL_OK) {
2580 xge_debug_device(XGE_TRACE,
2581 "link is actually up (possible noisy link?), ignoring.");
2586 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2587 &bar0->adapter_control);
2589 val64 = val64 & (~XGE_HAL_ADAPTER_LED_ON);
2590 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
2591 &bar0->adapter_control);
2593 #ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR
2594 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
2596 * Mask the Link Down interrupt and unmask the Link up
2599 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2600 &bar0->misc_int_mask);
2601 val64 |= XGE_HAL_MISC_INT_REG_LINK_DOWN_INT;
2602 val64 &= ~XGE_HAL_MISC_INT_REG_LINK_UP_INT;
2603 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
2604 &bar0->misc_int_mask);
2607 xge_debug_device(XGE_TRACE, "calling link down..");
2608 hldev->link_state = XGE_HAL_LINK_DOWN;
2611 if (g_xge_hal_driver->uld_callbacks.link_down) {
2612 g_xge_hal_driver->uld_callbacks.link_down(
2613 hldev->upper_layer_info);
2619 (void) xge_queue_produce_context(hldev->queueh,
2620 XGE_HAL_EVENT_LINK_IS_DOWN,
2626 * __hal_device_handle_link_state_change
2627 * @hldev: HAL device handle.
2629 * Link state change handler. The function is invoked by HAL when
2630 * Xframe indicates link state change condition. The code here makes sure to
2631 * 1) ignore redundant state change indications;
2632 * 2) execute link-up sequence, and handle the failure to bring the link up;
2633 * 3) generate XGE_HAL_LINK_UP/DOWN event for the subsequent handling by
2634 * upper-layer driver (ULD).
2637 __hal_device_handle_link_state_change(xge_hal_device_t *hldev)
2642 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
2646 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2647 &bar0->adapter_control);
2649 /* If the adapter is not enabled but the hal thinks we are in the up
2650 * state then transition to the down state.
2652 if ( !(val64 & XGE_HAL_ADAPTER_CNTL_EN) &&
2653 (hldev->link_state == XGE_HAL_LINK_UP) ) {
2654 return(__hal_device_handle_link_down_ind(hldev));
2659 (void) xge_hal_device_status(hldev, &hw_status);
2660 hw_link_state = (hw_status &
2661 (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT |
2662 XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT)) ?
2663 XGE_HAL_LINK_DOWN : XGE_HAL_LINK_UP;
2665 /* check if the current link state is still considered
2666 * to be changed. This way we will make sure that this is
2667 * not a noise which needs to be filtered out */
2668 if (hldev->link_state == hw_link_state)
2670 } while (i++ < hldev->config.link_valid_cnt);
2672 /* If the current link state is same as previous, just return */
2673 if (hldev->link_state == hw_link_state)
2675 /* detected state change */
2676 else if (hw_link_state == XGE_HAL_LINK_UP)
2677 retcode = __hal_device_handle_link_up_ind(hldev);
2679 retcode = __hal_device_handle_link_down_ind(hldev);
2687 __hal_device_handle_serr(xge_hal_device_t *hldev, char *reg, u64 value)
2689 hldev->stats.sw_dev_err_stats.serr_cnt++;
2690 if (hldev->config.dump_on_serr) {
2691 #ifdef XGE_HAL_USE_MGMT_AUX
2692 (void) xge_hal_aux_device_dump(hldev);
2696 (void) xge_queue_produce(hldev->queueh, XGE_HAL_EVENT_SERR, hldev,
2697 1, sizeof(u64), (void *)&value);
2699 xge_debug_device(XGE_ERR, "%s: read "XGE_OS_LLXFMT, reg,
2700 (unsigned long long) value);
2707 __hal_device_handle_eccerr(xge_hal_device_t *hldev, char *reg, u64 value)
2709 if (hldev->config.dump_on_eccerr) {
2710 #ifdef XGE_HAL_USE_MGMT_AUX
2711 (void) xge_hal_aux_device_dump(hldev);
2715 /* Herc smart enough to recover on its own! */
2716 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) {
2717 (void) xge_queue_produce(hldev->queueh,
2718 XGE_HAL_EVENT_ECCERR, hldev,
2719 1, sizeof(u64), (void *)&value);
2722 xge_debug_device(XGE_ERR, "%s: read "XGE_OS_LLXFMT, reg,
2723 (unsigned long long) value);
2730 __hal_device_handle_parityerr(xge_hal_device_t *hldev, char *reg, u64 value)
2732 if (hldev->config.dump_on_parityerr) {
2733 #ifdef XGE_HAL_USE_MGMT_AUX
2734 (void) xge_hal_aux_device_dump(hldev);
2737 (void) xge_queue_produce_context(hldev->queueh,
2738 XGE_HAL_EVENT_PARITYERR, hldev);
2740 xge_debug_device(XGE_ERR, "%s: read "XGE_OS_LLXFMT, reg,
2741 (unsigned long long) value);
2748 __hal_device_handle_targetabort(xge_hal_device_t *hldev)
2750 (void) xge_queue_produce_context(hldev->queueh,
2751 XGE_HAL_EVENT_TARGETABORT, hldev);
2756 * __hal_device_hw_initialize
2757 * @hldev: HAL device handle.
2759 * Initialize Xframe hardware.
2761 static xge_hal_status_e
2762 __hal_device_hw_initialize(xge_hal_device_t *hldev)
2764 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
2765 xge_hal_status_e status;
2768 /* Set proper endian settings and verify the same by reading the PIF
2769 * Feed-back register. */
2770 status = __hal_device_set_swapper(hldev);
2771 if (status != XGE_HAL_OK) {
2775 /* update the pci mode, frequency, and width */
2776 if (__hal_device_pci_info_get(hldev, &hldev->pci_mode,
2777 &hldev->bus_frequency, &hldev->bus_width) != XGE_HAL_OK){
2778 hldev->pci_mode = XGE_HAL_PCI_INVALID_MODE;
2779 hldev->bus_frequency = XGE_HAL_PCI_BUS_FREQUENCY_UNKNOWN;
2780 hldev->bus_width = XGE_HAL_PCI_BUS_WIDTH_UNKNOWN;
2782 * FIXME: this cannot happen.
2783 * But if it happens we cannot continue just like that
2785 xge_debug_device(XGE_ERR, "unable to get pci info");
2788 if ((hldev->pci_mode == XGE_HAL_PCI_33MHZ_MODE) ||
2789 (hldev->pci_mode == XGE_HAL_PCI_66MHZ_MODE) ||
2790 (hldev->pci_mode == XGE_HAL_PCI_BASIC_MODE)) {
2791 /* PCI optimization: set TxReqTimeOut
2792 * register (0x800+0x120) to 0x1ff or
2793 * something close to this.
2794 * Note: not to be used for PCI-X! */
2796 val64 = XGE_HAL_TXREQTO_VAL(0x1FF);
2797 val64 |= XGE_HAL_TXREQTO_EN;
2798 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
2799 &bar0->txreqtimeout);
2801 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0ULL,
2802 &bar0->read_retry_delay);
2804 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0ULL,
2805 &bar0->write_retry_delay);
2807 xge_debug_device(XGE_TRACE, "%s", "optimizing for PCI mode");
2810 if (hldev->bus_frequency == XGE_HAL_PCI_BUS_FREQUENCY_266MHZ ||
2811 hldev->bus_frequency == XGE_HAL_PCI_BUS_FREQUENCY_250MHZ) {
2813 /* Optimizing for PCI-X 266/250 */
2815 val64 = XGE_HAL_TXREQTO_VAL(0x7F);
2816 val64 |= XGE_HAL_TXREQTO_EN;
2817 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
2818 &bar0->txreqtimeout);
2820 xge_debug_device(XGE_TRACE, "%s", "optimizing for PCI-X 266/250 modes");
2823 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
2824 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0x4000000000000ULL,
2825 &bar0->read_retry_delay);
2827 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0x4000000000000ULL,
2828 &bar0->write_retry_delay);
2831 /* added this to set the no of bytes used to update lso_bytes_sent
2833 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2834 &bar0->pic_control_2);
2835 val64 &= ~XGE_HAL_TXD_WRITE_BC(0x2);
2836 val64 |= XGE_HAL_TXD_WRITE_BC(0x4);
2837 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
2838 &bar0->pic_control_2);
2839 /* added this to clear the EOI_RESET field while leaving XGXS_RESET
2840 * in reset, then a 1-second delay */
2841 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
2842 XGE_HAL_SW_RESET_XGXS, &bar0->sw_reset);
2843 xge_os_mdelay(1000);
2845 /* Clear the XGXS_RESET field of the SW_RESET register in order to
2846 * release the XGXS from reset. Its reset value is 0xA5; write 0x00
2847 * to activate the XGXS. The core requires a minimum 500 us reset.*/
2848 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0, &bar0->sw_reset);
2849 (void) xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2853 /* read registers in all blocks */
2854 (void) xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2855 &bar0->mac_int_mask);
2856 (void) xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2857 &bar0->mc_int_mask);
2858 (void) xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2859 &bar0->xgxs_int_mask);
2861 /* set default MTU and steer based on length*/
2862 __hal_ring_mtu_set(hldev, hldev->config.mtu+22); // Alway set 22 bytes extra for steering to work
2864 if (hldev->config.mac.rmac_bcast_en) {
2865 xge_hal_device_bcast_enable(hldev);
2867 xge_hal_device_bcast_disable(hldev);
2870 #ifndef XGE_HAL_HERC_EMULATION
2871 __hal_device_xaui_configure(hldev);
2873 __hal_device_mac_link_util_set(hldev);
2875 __hal_device_mac_link_util_set(hldev);
2878 * Keep its PCI REQ# line asserted during a write
2879 * transaction up to the end of the transaction
2881 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2882 &bar0->misc_control);
2884 val64 |= XGE_HAL_MISC_CONTROL_EXT_REQ_EN;
2886 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
2887 val64, &bar0->misc_control);
2889 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
2890 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2891 &bar0->misc_control);
2893 val64 |= XGE_HAL_MISC_CONTROL_LINK_FAULT;
2895 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
2896 val64, &bar0->misc_control);
2900 * bimodal interrupts is when all Rx traffic interrupts
2901 * will go to TTI, so we need to adjust RTI settings and
2902 * use adaptive TTI timer. We need to make sure RTI is
2903 * properly configured to sane value which will not
2904 * distrupt bimodal behavior.
2906 if (hldev->config.bimodal_interrupts) {
2909 /* force polling_cnt to be "0", otherwise
2910 * IRQ workload statistics will be screwed. This could
2911 * be worked out in TXPIC handler later. */
2912 hldev->config.isr_polling_cnt = 0;
2913 hldev->config.sched_timer_us = 10000;
2915 /* disable all TTI < 56 */
2916 for (i=0; i<XGE_HAL_MAX_FIFO_NUM; i++) {
2918 if (!hldev->config.fifo.queue[i].configured)
2920 for (j=0; j<XGE_HAL_MAX_FIFO_TTI_NUM; j++) {
2921 if (hldev->config.fifo.queue[i].tti[j].enabled)
2922 hldev->config.fifo.queue[i].tti[j].enabled = 0;
2926 /* now configure bimodal interrupts */
2927 __hal_device_bimodal_configure(hldev);
2930 status = __hal_device_tti_configure(hldev, 0);
2931 if (status != XGE_HAL_OK)
2934 status = __hal_device_rti_configure(hldev, 0);
2935 if (status != XGE_HAL_OK)
2938 status = __hal_device_rth_it_configure(hldev);
2939 if (status != XGE_HAL_OK)
2942 status = __hal_device_rth_spdm_configure(hldev);
2943 if (status != XGE_HAL_OK)
2946 status = __hal_device_rts_mac_configure(hldev);
2947 if (status != XGE_HAL_OK) {
2948 xge_debug_device(XGE_ERR, "__hal_device_rts_mac_configure Failed ");
2952 status = __hal_device_rts_port_configure(hldev);
2953 if (status != XGE_HAL_OK) {
2954 xge_debug_device(XGE_ERR, "__hal_device_rts_port_configure Failed ");
2958 status = __hal_device_rts_qos_configure(hldev);
2959 if (status != XGE_HAL_OK) {
2960 xge_debug_device(XGE_ERR, "__hal_device_rts_qos_configure Failed ");
2964 __hal_device_pause_frames_configure(hldev);
2965 __hal_device_rmac_padding_configure(hldev);
2966 __hal_device_shared_splits_configure(hldev);
2968 /* make sure all interrupts going to be disabled at the moment */
2969 __hal_device_intr_mgmt(hldev, XGE_HAL_ALL_INTRS, 0);
2971 /* SXE-008 Transmit DMA arbitration issue */
2972 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA &&
2973 hldev->revision < 4) {
2974 xge_os_pio_mem_write64(hldev->pdev,hldev->regh0,
2975 XGE_HAL_ADAPTER_PCC_ENABLE_FOUR,
2978 #if 0 // Removing temporarily as FreeBSD is seeing lower performance
2979 // attributable to this fix.
2981 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
2982 /* Turn off the ECC error reporting for RLDRAM interface */
2983 if ((status = xge_hal_fix_rldram_ecc_error(hldev)) != XGE_HAL_OK)
2987 __hal_fifo_hw_initialize(hldev);
2988 __hal_ring_hw_initialize(hldev);
2990 if (__hal_device_wait_quiescent(hldev, &val64)) {
2991 return XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT;
2994 if (__hal_device_register_poll(hldev, &bar0->adapter_status, 1,
2995 XGE_HAL_ADAPTER_STATUS_RC_PRC_QUIESCENT,
2996 XGE_HAL_DEVICE_QUIESCENT_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
2997 xge_debug_device(XGE_TRACE, "%s", "PRC is not QUIESCENT!");
2998 return XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT;
3001 xge_debug_device(XGE_TRACE, "device 0x"XGE_OS_LLXFMT" is quiescent",
3002 (unsigned long long)(ulong_t)hldev);
3004 if (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX ||
3005 hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSI) {
3007 * If MSI is enabled, ensure that One Shot for MSI in PCI_CTRL
3010 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3011 &bar0->pic_control);
3012 val64 &= ~(XGE_HAL_PIC_CNTL_ONE_SHOT_TINT);
3013 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
3014 &bar0->pic_control);
3017 hldev->hw_is_initialized = 1;
3018 hldev->terminating = 0;
3023 * __hal_device_reset - Reset device only.
3024 * @hldev: HAL device handle.
3026 * Reset the device, and subsequently restore
3027 * the previously saved PCI configuration space.
3029 #define XGE_HAL_MAX_PCI_CONFIG_SPACE_REINIT 50
3030 static xge_hal_status_e
3031 __hal_device_reset(xge_hal_device_t *hldev)
3033 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
3034 int i, j, swap_done, pcisize = 0;
3035 u64 val64, rawval = 0ULL;
3037 if (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX) {
3038 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
3039 if ( hldev->bar2 ) {
3040 u64 *msix_vetor_table = (u64 *)hldev->bar2;
3042 // 2 64bit words for each entry
3043 for (i = 0; i < XGE_HAL_MAX_MSIX_MESSAGES * 2;
3045 hldev->msix_vector_table[i] =
3046 xge_os_pio_mem_read64(hldev->pdev,
3047 hldev->regh2, &msix_vetor_table[i]);
3052 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3053 &bar0->pif_rd_swapper_fb);
3054 swap_done = (val64 == XGE_HAL_IF_RD_SWAPPER_FB);
3057 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0,
3058 (u32)(XGE_HAL_SW_RESET_ALL>>32), (char *)&bar0->sw_reset);
3060 u32 val = (u32)(XGE_HAL_SW_RESET_ALL >> 32);
3061 #if defined(XGE_OS_HOST_LITTLE_ENDIAN) || defined(XGE_OS_PIO_LITTLE_ENDIAN)
3063 val = (((val & (u32)0x000000ffUL) << 24) |
3064 ((val & (u32)0x0000ff00UL) << 8) |
3065 ((val & (u32)0x00ff0000UL) >> 8) |
3066 ((val & (u32)0xff000000UL) >> 24));
3068 xge_os_pio_mem_write32(hldev->pdev, hldev->regh0, val,
3072 pcisize = (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)?
3073 XGE_HAL_PCISIZE_HERC : XGE_HAL_PCISIZE_XENA;
3075 xge_os_mdelay(20); /* Wait for 20 ms after reset */
3078 /* Poll for no more than 1 second */
3079 for (i = 0; i < XGE_HAL_MAX_PCI_CONFIG_SPACE_REINIT; i++)
3081 for (j = 0; j < pcisize; j++) {
3082 xge_os_pci_write32(hldev->pdev, hldev->cfgh, j * 4,
3083 *((u32*)&hldev->pci_config_space + j));
3086 xge_os_pci_read16(hldev->pdev,hldev->cfgh,
3087 xge_offsetof(xge_hal_pci_config_le_t, device_id),
3090 if (xge_hal_device_check_id(hldev) != XGE_HAL_CARD_UNKNOWN)
3096 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_UNKNOWN)
3098 xge_debug_device(XGE_ERR, "device reset failed");
3099 return XGE_HAL_ERR_RESET_FAILED;
3102 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
3105 rawval = XGE_HAL_SW_RESET_RAW_VAL_HERC;
3106 pcisize = XGE_HAL_PCISIZE_HERC;
3109 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3111 if (val64 != rawval) {
3115 xge_os_mdelay(1); /* Wait for 1ms before retry */
3117 } else if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) {
3118 rawval = XGE_HAL_SW_RESET_RAW_VAL_XENA;
3119 pcisize = XGE_HAL_PCISIZE_XENA;
3120 xge_os_mdelay(XGE_HAL_DEVICE_RESET_WAIT_MAX_MILLIS);
3123 /* Restore MSI-X vector table */
3124 if (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX) {
3125 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
3126 if ( hldev->bar2 ) {
3128 * 94: MSIXTable 00000004 ( BIR:4 Offset:0x0 )
3129 * 98: PBATable 00000404 ( BIR:4 Offset:0x400 )
3131 u64 *msix_vetor_table = (u64 *)hldev->bar2;
3133 /* 2 64bit words for each entry */
3134 for (i = 0; i < XGE_HAL_MAX_MSIX_MESSAGES * 2;
3136 xge_os_pio_mem_write64(hldev->pdev,
3138 hldev->msix_vector_table[i],
3139 &msix_vetor_table[i]);
3145 hldev->link_state = XGE_HAL_LINK_DOWN;
3146 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3149 if (val64 != rawval) {
3150 xge_debug_device(XGE_ERR, "device has not been reset "
3151 "got 0x"XGE_OS_LLXFMT", expected 0x"XGE_OS_LLXFMT,
3152 (unsigned long long)val64, (unsigned long long)rawval);
3153 return XGE_HAL_ERR_RESET_FAILED;
3156 hldev->hw_is_initialized = 0;
3161 * __hal_device_poll - General private routine to poll the device.
3162 * @hldev: HAL device handle.
3164 * Returns: one of the xge_hal_status_e{} enumerated types.
3165 * XGE_HAL_OK - for success.
3166 * XGE_HAL_ERR_CRITICAL - when encounters critical error.
3168 static xge_hal_status_e
3169 __hal_device_poll(xge_hal_device_t *hldev)
3171 xge_hal_pci_bar0_t *bar0;
3174 bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
3176 /* Handling SERR errors by forcing a H/W reset. */
3177 err_reg = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3178 &bar0->serr_source);
3179 if (err_reg & XGE_HAL_SERR_SOURCE_ANY) {
3180 __hal_device_handle_serr(hldev, "serr_source", err_reg);
3181 return XGE_HAL_ERR_CRITICAL;
3184 err_reg = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3185 &bar0->misc_int_reg);
3187 if (err_reg & XGE_HAL_MISC_INT_REG_DP_ERR_INT) {
3188 hldev->stats.sw_dev_err_stats.parity_err_cnt++;
3189 __hal_device_handle_parityerr(hldev, "misc_int_reg", err_reg);
3190 return XGE_HAL_ERR_CRITICAL;
3193 #ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR
3194 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA)
3198 /* Handling link status change error Intr */
3199 err_reg = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3200 &bar0->mac_rmac_err_reg);
3201 if (__hal_device_handle_link_state_change(hldev))
3202 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3203 err_reg, &bar0->mac_rmac_err_reg);
3206 if (hldev->inject_serr != 0) {
3207 err_reg = hldev->inject_serr;
3208 hldev->inject_serr = 0;
3209 __hal_device_handle_serr(hldev, "inject_serr", err_reg);
3210 return XGE_HAL_ERR_CRITICAL;
3213 if (hldev->inject_ecc != 0) {
3214 err_reg = hldev->inject_ecc;
3215 hldev->inject_ecc = 0;
3216 hldev->stats.sw_dev_err_stats.ecc_err_cnt++;
3217 __hal_device_handle_eccerr(hldev, "inject_ecc", err_reg);
3218 return XGE_HAL_ERR_CRITICAL;
3221 if (hldev->inject_bad_tcode != 0) {
3222 u8 t_code = hldev->inject_bad_tcode;
3223 xge_hal_channel_t channel;
3224 xge_hal_fifo_txd_t txd;
3225 xge_hal_ring_rxd_1_t rxd;
3227 channel.devh = hldev;
3229 if (hldev->inject_bad_tcode_for_chan_type ==
3230 XGE_HAL_CHANNEL_TYPE_FIFO) {
3231 channel.type = XGE_HAL_CHANNEL_TYPE_FIFO;
3234 channel.type = XGE_HAL_CHANNEL_TYPE_RING;
3237 hldev->inject_bad_tcode = 0;
3239 if (channel.type == XGE_HAL_CHANNEL_TYPE_FIFO)
3240 return xge_hal_device_handle_tcode(&channel, &txd,
3243 return xge_hal_device_handle_tcode(&channel, &rxd,
3251 * __hal_verify_pcc_idle - Verify All Enbled PCC are IDLE or not
3252 * @hldev: HAL device handle.
3253 * @adp_status: Adapter Status value
3254 * Usage: See xge_hal_device_enable{}.
3257 __hal_verify_pcc_idle(xge_hal_device_t *hldev, u64 adp_status)
3259 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA &&
3260 hldev->revision < 4) {
3262 * For Xena 1,2,3 we enable only 4 PCCs Due to
3263 * SXE-008 (Transmit DMA arbitration issue)
3265 if ((adp_status & XGE_HAL_ADAPTER_STATUS_RMAC_PCC_4_IDLE)
3266 != XGE_HAL_ADAPTER_STATUS_RMAC_PCC_4_IDLE) {
3267 xge_debug_device(XGE_TRACE, "%s",
3268 "PCC is not IDLE after adapter enabled!");
3269 return XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT;
3272 if ((adp_status & XGE_HAL_ADAPTER_STATUS_RMAC_PCC_IDLE) !=
3273 XGE_HAL_ADAPTER_STATUS_RMAC_PCC_IDLE) {
3274 xge_debug_device(XGE_TRACE, "%s",
3275 "PCC is not IDLE after adapter enabled!");
3276 return XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT;
3283 __hal_update_bimodal(xge_hal_device_t *hldev, int ring_no)
3285 int tval, d, iwl_avg, len_avg, bytes_avg, bytes_hist, d_hist;
3286 int iwl_rxcnt, iwl_txcnt, iwl_txavg, len_rxavg, iwl_rxavg, len_txavg;
3289 #define _HIST_SIZE 50 /* 0.5 sec history */
3290 #define _HIST_ADJ_TIMER 1
3293 static int bytes_avg_history[_HIST_SIZE] = {0};
3294 static int d_avg_history[_HIST_SIZE] = {0};
3295 static int history_idx = 0;
3296 static int pstep = 1;
3297 static int hist_adj_timer = 0;
3300 * tval - current value of this bimodal timer
3302 tval = hldev->bimodal_tti[ring_no].timer_val_us;
3305 * d - how many interrupts we were getting since last
3306 * bimodal timer tick.
3308 d = hldev->stats.sw_dev_info_stats.tx_traffic_intr_cnt -
3309 hldev->bimodal_intr_cnt;
3311 /* advance bimodal interrupt counter */
3312 hldev->bimodal_intr_cnt =
3313 hldev->stats.sw_dev_info_stats.tx_traffic_intr_cnt;
3316 * iwl_cnt - how many interrupts we've got since last
3317 * bimodal timer tick.
3319 iwl_rxcnt = (hldev->irq_workload_rxcnt[ring_no] ?
3320 hldev->irq_workload_rxcnt[ring_no] : 1);
3321 iwl_txcnt = (hldev->irq_workload_txcnt[ring_no] ?
3322 hldev->irq_workload_txcnt[ring_no] : 1);
3323 iwl_cnt = iwl_rxcnt + iwl_txcnt;
3324 iwl_cnt = iwl_cnt; /* just to remove the lint warning */
3327 * we need to take hldev->config.isr_polling_cnt into account
3328 * but for some reason this line causing GCC to produce wrong
3329 * code on Solaris. As of now, if bimodal_interrupts is configured
3330 * hldev->config.isr_polling_cnt is forced to be "0".
3332 * iwl_cnt = iwl_cnt / (hldev->config.isr_polling_cnt + 1); */
3335 * iwl_avg - how many RXDs on avarage been processed since
3336 * last bimodal timer tick. This indirectly includes
3339 iwl_rxavg = hldev->irq_workload_rxd[ring_no] / iwl_rxcnt;
3340 iwl_txavg = hldev->irq_workload_txd[ring_no] / iwl_txcnt;
3341 iwl_avg = iwl_rxavg + iwl_txavg;
3342 iwl_avg = iwl_avg == 0 ? 1 : iwl_avg;
3345 * len_avg - how many bytes on avarage been processed since
3346 * last bimodal timer tick. i.e. avarage frame size.
3348 len_rxavg = 1 + hldev->irq_workload_rxlen[ring_no] /
3349 (hldev->irq_workload_rxd[ring_no] ?
3350 hldev->irq_workload_rxd[ring_no] : 1);
3351 len_txavg = 1 + hldev->irq_workload_txlen[ring_no] /
3352 (hldev->irq_workload_txd[ring_no] ?
3353 hldev->irq_workload_txd[ring_no] : 1);
3354 len_avg = len_rxavg + len_txavg;
3358 /* align on low boundary */
3359 if ((tval -_STEP) < hldev->config.bimodal_timer_lo_us)
3360 tval = hldev->config.bimodal_timer_lo_us;
3364 tval = hldev->config.bimodal_timer_lo_us;
3366 for (i = 0; i < _HIST_SIZE; i++)
3367 bytes_avg_history[i] = d_avg_history[i] = 0;
3373 /* always try to ajust timer to the best throughput value */
3374 bytes_avg = iwl_avg * len_avg;
3375 history_idx %= _HIST_SIZE;
3376 bytes_avg_history[history_idx] = bytes_avg;
3377 d_avg_history[history_idx] = d;
3379 d_hist = bytes_hist = 0;
3380 for (i = 0; i < _HIST_SIZE; i++) {
3381 /* do not re-configure until history is gathered */
3382 if (!bytes_avg_history[i]) {
3383 tval = hldev->config.bimodal_timer_lo_us;
3386 bytes_hist += bytes_avg_history[i];
3387 d_hist += d_avg_history[i];
3389 bytes_hist /= _HIST_SIZE;
3390 d_hist /= _HIST_SIZE;
3392 // xge_os_printf("d %d iwl_avg %d len_avg %d:%d:%d tval %d avg %d hist %d pstep %d",
3393 // d, iwl_avg, len_txavg, len_rxavg, len_avg, tval, d*bytes_avg,
3394 // d_hist*bytes_hist, pstep);
3396 /* make an adaptive step */
3397 if (d * bytes_avg < d_hist * bytes_hist && hist_adj_timer++ > _HIST_ADJ_TIMER) {
3403 (tval + _STEP) <= hldev->config.bimodal_timer_hi_us) {
3405 hldev->stats.sw_dev_info_stats.bimodal_hi_adjust_cnt++;
3406 } else if ((tval - _STEP) >= hldev->config.bimodal_timer_lo_us) {
3408 hldev->stats.sw_dev_info_stats.bimodal_lo_adjust_cnt++;
3411 /* enable TTI range A for better latencies */
3412 hldev->bimodal_urange_a_en = 0;
3413 if (tval <= hldev->config.bimodal_timer_lo_us && iwl_avg > 2)
3414 hldev->bimodal_urange_a_en = 1;
3417 /* reset workload statistics counters */
3418 hldev->irq_workload_rxcnt[ring_no] = 0;
3419 hldev->irq_workload_rxd[ring_no] = 0;
3420 hldev->irq_workload_rxlen[ring_no] = 0;
3421 hldev->irq_workload_txcnt[ring_no] = 0;
3422 hldev->irq_workload_txd[ring_no] = 0;
3423 hldev->irq_workload_txlen[ring_no] = 0;
3425 /* reconfigure TTI56 + ring_no with new timer value */
3426 hldev->bimodal_timer_val_us = tval;
3427 (void) __hal_device_rti_configure(hldev, 1);
3431 __hal_update_rxufca(xge_hal_device_t *hldev, int ring_no)
3435 ufc = hldev->config.ring.queue[ring_no].rti.ufc_a;
3436 ic = hldev->stats.sw_dev_info_stats.rx_traffic_intr_cnt;
3438 /* urange_a adaptive coalescing */
3439 if (hldev->rxufca_lbolt > hldev->rxufca_lbolt_time) {
3440 if (ic > hldev->rxufca_intr_thres) {
3441 if (ufc < hldev->config.rxufca_hi_lim) {
3443 for (i=0; i<XGE_HAL_MAX_RING_NUM; i++)
3444 hldev->config.ring.queue[i].rti.ufc_a = ufc;
3445 (void) __hal_device_rti_configure(hldev, 1);
3446 hldev->stats.sw_dev_info_stats.
3447 rxufca_hi_adjust_cnt++;
3449 hldev->rxufca_intr_thres = ic +
3450 hldev->config.rxufca_intr_thres; /* def: 30 */
3452 if (ufc > hldev->config.rxufca_lo_lim) {
3454 for (i=0; i<XGE_HAL_MAX_RING_NUM; i++)
3455 hldev->config.ring.queue[i].rti.ufc_a = ufc;
3456 (void) __hal_device_rti_configure(hldev, 1);
3457 hldev->stats.sw_dev_info_stats.
3458 rxufca_lo_adjust_cnt++;
3461 hldev->rxufca_lbolt_time = hldev->rxufca_lbolt +
3462 hldev->config.rxufca_lbolt_period;
3464 hldev->rxufca_lbolt++;
3468 * __hal_device_handle_mc - Handle MC interrupt reason
3469 * @hldev: HAL device handle.
3470 * @reason: interrupt reason
3473 __hal_device_handle_mc(xge_hal_device_t *hldev, u64 reason)
3475 xge_hal_pci_bar0_t *isrbar0 =
3476 (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0;
3479 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3480 &isrbar0->mc_int_status);
3481 if (!(val64 & XGE_HAL_MC_INT_STATUS_MC_INT))
3484 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3485 &isrbar0->mc_err_reg);
3486 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3487 val64, &isrbar0->mc_err_reg);
3489 if (val64 & XGE_HAL_MC_ERR_REG_ETQ_ECC_SG_ERR_L ||
3490 val64 & XGE_HAL_MC_ERR_REG_ETQ_ECC_SG_ERR_U ||
3491 val64 & XGE_HAL_MC_ERR_REG_MIRI_ECC_SG_ERR_0 ||
3492 val64 & XGE_HAL_MC_ERR_REG_MIRI_ECC_SG_ERR_1 ||
3493 (xge_hal_device_check_id(hldev) != XGE_HAL_CARD_XENA &&
3494 (val64 & XGE_HAL_MC_ERR_REG_ITQ_ECC_SG_ERR_L ||
3495 val64 & XGE_HAL_MC_ERR_REG_ITQ_ECC_SG_ERR_U ||
3496 val64 & XGE_HAL_MC_ERR_REG_RLD_ECC_SG_ERR_L ||
3497 val64 & XGE_HAL_MC_ERR_REG_RLD_ECC_SG_ERR_U))) {
3498 hldev->stats.sw_dev_err_stats.single_ecc_err_cnt++;
3499 hldev->stats.sw_dev_err_stats.ecc_err_cnt++;
3502 if (val64 & XGE_HAL_MC_ERR_REG_ETQ_ECC_DB_ERR_L ||
3503 val64 & XGE_HAL_MC_ERR_REG_ETQ_ECC_DB_ERR_U ||
3504 val64 & XGE_HAL_MC_ERR_REG_MIRI_ECC_DB_ERR_0 ||
3505 val64 & XGE_HAL_MC_ERR_REG_MIRI_ECC_DB_ERR_1 ||
3506 (xge_hal_device_check_id(hldev) != XGE_HAL_CARD_XENA &&
3507 (val64 & XGE_HAL_MC_ERR_REG_ITQ_ECC_DB_ERR_L ||
3508 val64 & XGE_HAL_MC_ERR_REG_ITQ_ECC_DB_ERR_U ||
3509 val64 & XGE_HAL_MC_ERR_REG_RLD_ECC_DB_ERR_L ||
3510 val64 & XGE_HAL_MC_ERR_REG_RLD_ECC_DB_ERR_U))) {
3511 hldev->stats.sw_dev_err_stats.double_ecc_err_cnt++;
3512 hldev->stats.sw_dev_err_stats.ecc_err_cnt++;
3515 if (val64 & XGE_HAL_MC_ERR_REG_SM_ERR) {
3516 hldev->stats.sw_dev_err_stats.sm_err_cnt++;
3519 /* those two should result in device reset */
3520 if (val64 & XGE_HAL_MC_ERR_REG_MIRI_ECC_DB_ERR_0 ||
3521 val64 & XGE_HAL_MC_ERR_REG_MIRI_ECC_DB_ERR_1) {
3522 __hal_device_handle_eccerr(hldev, "mc_err_reg", val64);
3523 return XGE_HAL_ERR_CRITICAL;
3530 * __hal_device_handle_pic - Handle non-traffic PIC interrupt reason
3531 * @hldev: HAL device handle.
3532 * @reason: interrupt reason
3535 __hal_device_handle_pic(xge_hal_device_t *hldev, u64 reason)
3537 xge_hal_pci_bar0_t *isrbar0 =
3538 (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0;
3541 if (reason & XGE_HAL_PIC_INT_FLSH) {
3542 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3543 &isrbar0->flsh_int_reg);
3544 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3545 val64, &isrbar0->flsh_int_reg);
3546 /* FIXME: handle register */
3548 if (reason & XGE_HAL_PIC_INT_MDIO) {
3549 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3550 &isrbar0->mdio_int_reg);
3551 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3552 val64, &isrbar0->mdio_int_reg);
3553 /* FIXME: handle register */
3555 if (reason & XGE_HAL_PIC_INT_IIC) {
3556 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3557 &isrbar0->iic_int_reg);
3558 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3559 val64, &isrbar0->iic_int_reg);
3560 /* FIXME: handle register */
3562 if (reason & XGE_HAL_PIC_INT_MISC) {
3563 val64 = xge_os_pio_mem_read64(hldev->pdev,
3564 hldev->regh0, &isrbar0->misc_int_reg);
3565 #ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR
3566 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
3567 /* Check for Link interrupts. If both Link Up/Down
3568 * bits are set, clear both and check adapter status
3570 if ((val64 & XGE_HAL_MISC_INT_REG_LINK_UP_INT) &&
3571 (val64 & XGE_HAL_MISC_INT_REG_LINK_DOWN_INT)) {
3574 xge_debug_device(XGE_TRACE,
3575 "both link up and link down detected "XGE_OS_LLXFMT,
3576 (unsigned long long)val64);
3578 temp64 = (XGE_HAL_MISC_INT_REG_LINK_DOWN_INT |
3579 XGE_HAL_MISC_INT_REG_LINK_UP_INT);
3580 xge_os_pio_mem_write64(hldev->pdev,
3581 hldev->regh0, temp64,
3582 &isrbar0->misc_int_reg);
3584 else if (val64 & XGE_HAL_MISC_INT_REG_LINK_UP_INT) {
3585 xge_debug_device(XGE_TRACE,
3586 "link up call request, misc_int "XGE_OS_LLXFMT,
3587 (unsigned long long)val64);
3588 __hal_device_handle_link_up_ind(hldev);
3590 else if (val64 & XGE_HAL_MISC_INT_REG_LINK_DOWN_INT){
3591 xge_debug_device(XGE_TRACE,
3592 "link down request, misc_int "XGE_OS_LLXFMT,
3593 (unsigned long long)val64);
3594 __hal_device_handle_link_down_ind(hldev);
3599 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3600 val64, &isrbar0->misc_int_reg);
3608 * __hal_device_handle_txpic - Handle TxPIC interrupt reason
3609 * @hldev: HAL device handle.
3610 * @reason: interrupt reason
3613 __hal_device_handle_txpic(xge_hal_device_t *hldev, u64 reason)
3615 xge_hal_status_e status = XGE_HAL_OK;
3616 xge_hal_pci_bar0_t *isrbar0 =
3617 (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0;
3620 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3621 &isrbar0->pic_int_status);
3622 if ( val64 & (XGE_HAL_PIC_INT_FLSH |
3623 XGE_HAL_PIC_INT_MDIO |
3624 XGE_HAL_PIC_INT_IIC |
3625 XGE_HAL_PIC_INT_MISC) ) {
3626 status = __hal_device_handle_pic(hldev, val64);
3630 if (!(val64 & XGE_HAL_PIC_INT_TX))
3633 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3634 &isrbar0->txpic_int_reg);
3635 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3636 val64, &isrbar0->txpic_int_reg);
3639 if (val64 & XGE_HAL_TXPIC_INT_SCHED_INTR) {
3642 if (g_xge_hal_driver->uld_callbacks.sched_timer != NULL)
3643 g_xge_hal_driver->uld_callbacks.sched_timer(
3644 hldev, hldev->upper_layer_info);
3646 * This feature implements adaptive receive interrupt
3647 * coalecing. It is disabled by default. To enable it
3648 * set hldev->config.rxufca_lo_lim to be not equal to
3649 * hldev->config.rxufca_hi_lim.
3651 * We are using HW timer for this feature, so
3652 * use needs to configure hldev->config.rxufca_lbolt_period
3653 * which is essentially a time slice of timer.
3655 * For those who familiar with Linux, lbolt means jiffies
3656 * of this timer. I.e. timer tick.
3658 if (hldev->config.rxufca_lo_lim !=
3659 hldev->config.rxufca_hi_lim &&
3660 hldev->config.rxufca_lo_lim != 0) {
3661 for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
3662 if (!hldev->config.ring.queue[i].configured)
3664 if (hldev->config.ring.queue[i].rti.urange_a)
3665 __hal_update_rxufca(hldev, i);
3670 * This feature implements adaptive TTI timer re-calculation
3671 * based on host utilization, number of interrupt processed,
3672 * number of RXD per tick and avarage length of packets per
3675 if (hldev->config.bimodal_interrupts) {
3676 for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
3677 if (!hldev->config.ring.queue[i].configured)
3679 if (hldev->bimodal_tti[i].enabled)
3680 __hal_update_bimodal(hldev, i);
3689 * __hal_device_handle_txdma - Handle TxDMA interrupt reason
3690 * @hldev: HAL device handle.
3691 * @reason: interrupt reason
3694 __hal_device_handle_txdma(xge_hal_device_t *hldev, u64 reason)
3696 xge_hal_pci_bar0_t *isrbar0 =
3697 (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0;
3698 u64 val64, temp64, err;
3700 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3701 &isrbar0->txdma_int_status);
3702 if (val64 & XGE_HAL_TXDMA_PFC_INT) {
3703 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3704 &isrbar0->pfc_err_reg);
3705 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3706 err, &isrbar0->pfc_err_reg);
3707 hldev->stats.sw_dev_info_stats.pfc_err_cnt++;
3708 temp64 = XGE_HAL_PFC_ECC_DB_ERR|XGE_HAL_PFC_SM_ERR_ALARM
3709 |XGE_HAL_PFC_MISC_0_ERR|XGE_HAL_PFC_MISC_1_ERR
3710 |XGE_HAL_PFC_PCIX_ERR;
3714 if (val64 & XGE_HAL_TXDMA_TDA_INT) {
3715 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3716 &isrbar0->tda_err_reg);
3717 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3718 err, &isrbar0->tda_err_reg);
3719 hldev->stats.sw_dev_info_stats.tda_err_cnt++;
3720 temp64 = XGE_HAL_TDA_Fn_ECC_DB_ERR|XGE_HAL_TDA_SM0_ERR_ALARM
3721 |XGE_HAL_TDA_SM1_ERR_ALARM;
3725 if (val64 & XGE_HAL_TXDMA_PCC_INT) {
3726 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3727 &isrbar0->pcc_err_reg);
3728 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3729 err, &isrbar0->pcc_err_reg);
3730 hldev->stats.sw_dev_info_stats.pcc_err_cnt++;
3731 temp64 = XGE_HAL_PCC_FB_ECC_DB_ERR|XGE_HAL_PCC_TXB_ECC_DB_ERR
3732 |XGE_HAL_PCC_SM_ERR_ALARM|XGE_HAL_PCC_WR_ERR_ALARM
3733 |XGE_HAL_PCC_N_SERR|XGE_HAL_PCC_6_COF_OV_ERR
3734 |XGE_HAL_PCC_7_COF_OV_ERR|XGE_HAL_PCC_6_LSO_OV_ERR
3735 |XGE_HAL_PCC_7_LSO_OV_ERR;
3739 if (val64 & XGE_HAL_TXDMA_TTI_INT) {
3740 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3741 &isrbar0->tti_err_reg);
3742 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3743 err, &isrbar0->tti_err_reg);
3744 hldev->stats.sw_dev_info_stats.tti_err_cnt++;
3745 temp64 = XGE_HAL_TTI_SM_ERR_ALARM;
3749 if (val64 & XGE_HAL_TXDMA_LSO_INT) {
3750 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3751 &isrbar0->lso_err_reg);
3752 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3753 err, &isrbar0->lso_err_reg);
3754 hldev->stats.sw_dev_info_stats.lso_err_cnt++;
3755 temp64 = XGE_HAL_LSO6_ABORT|XGE_HAL_LSO7_ABORT
3756 |XGE_HAL_LSO6_SM_ERR_ALARM|XGE_HAL_LSO7_SM_ERR_ALARM;
3760 if (val64 & XGE_HAL_TXDMA_TPA_INT) {
3761 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3762 &isrbar0->tpa_err_reg);
3763 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3764 err, &isrbar0->tpa_err_reg);
3765 hldev->stats.sw_dev_info_stats.tpa_err_cnt++;
3766 temp64 = XGE_HAL_TPA_SM_ERR_ALARM;
3770 if (val64 & XGE_HAL_TXDMA_SM_INT) {
3771 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3772 &isrbar0->sm_err_reg);
3773 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3774 err, &isrbar0->sm_err_reg);
3775 hldev->stats.sw_dev_info_stats.sm_err_cnt++;
3776 temp64 = XGE_HAL_SM_SM_ERR_ALARM;
3783 reset : xge_hal_device_reset(hldev);
3784 xge_hal_device_enable(hldev);
3785 xge_hal_device_intr_enable(hldev);
3790 * __hal_device_handle_txmac - Handle TxMAC interrupt reason
3791 * @hldev: HAL device handle.
3792 * @reason: interrupt reason
3795 __hal_device_handle_txmac(xge_hal_device_t *hldev, u64 reason)
3797 xge_hal_pci_bar0_t *isrbar0 =
3798 (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0;
3801 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3802 &isrbar0->mac_int_status);
3803 if (!(val64 & XGE_HAL_MAC_INT_STATUS_TMAC_INT))
3806 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3807 &isrbar0->mac_tmac_err_reg);
3808 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3809 val64, &isrbar0->mac_tmac_err_reg);
3810 hldev->stats.sw_dev_info_stats.mac_tmac_err_cnt++;
3811 temp64 = XGE_HAL_TMAC_TX_BUF_OVRN|XGE_HAL_TMAC_TX_SM_ERR;
3812 if (val64 & temp64) {
3813 xge_hal_device_reset(hldev);
3814 xge_hal_device_enable(hldev);
3815 xge_hal_device_intr_enable(hldev);
3822 * __hal_device_handle_txxgxs - Handle TxXGXS interrupt reason
3823 * @hldev: HAL device handle.
3824 * @reason: interrupt reason
3827 __hal_device_handle_txxgxs(xge_hal_device_t *hldev, u64 reason)
3829 xge_hal_pci_bar0_t *isrbar0 =
3830 (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0;
3833 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3834 &isrbar0->xgxs_int_status);
3835 if (!(val64 & XGE_HAL_XGXS_INT_STATUS_TXGXS))
3838 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3839 &isrbar0->xgxs_txgxs_err_reg);
3840 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3841 val64, &isrbar0->xgxs_txgxs_err_reg);
3842 hldev->stats.sw_dev_info_stats.xgxs_txgxs_err_cnt++;
3843 temp64 = XGE_HAL_TXGXS_ESTORE_UFLOW|XGE_HAL_TXGXS_TX_SM_ERR;
3844 if (val64 & temp64) {
3845 xge_hal_device_reset(hldev);
3846 xge_hal_device_enable(hldev);
3847 xge_hal_device_intr_enable(hldev);
3854 * __hal_device_handle_rxpic - Handle RxPIC interrupt reason
3855 * @hldev: HAL device handle.
3856 * @reason: interrupt reason
3859 __hal_device_handle_rxpic(xge_hal_device_t *hldev, u64 reason)
3861 /* FIXME: handle register */
3867 * __hal_device_handle_rxdma - Handle RxDMA interrupt reason
3868 * @hldev: HAL device handle.
3869 * @reason: interrupt reason
3872 __hal_device_handle_rxdma(xge_hal_device_t *hldev, u64 reason)
3874 xge_hal_pci_bar0_t *isrbar0 =
3875 (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0;
3876 u64 val64, err, temp64;
3878 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3879 &isrbar0->rxdma_int_status);
3880 if (val64 & XGE_HAL_RXDMA_RC_INT) {
3881 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3882 &isrbar0->rc_err_reg);
3883 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3884 err, &isrbar0->rc_err_reg);
3885 hldev->stats.sw_dev_info_stats.rc_err_cnt++;
3886 temp64 = XGE_HAL_RC_PRCn_ECC_DB_ERR|XGE_HAL_RC_FTC_ECC_DB_ERR
3887 |XGE_HAL_RC_PRCn_SM_ERR_ALARM
3888 |XGE_HAL_RC_FTC_SM_ERR_ALARM;
3892 if (val64 & XGE_HAL_RXDMA_RPA_INT) {
3893 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3894 &isrbar0->rpa_err_reg);
3895 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3896 err, &isrbar0->rpa_err_reg);
3897 hldev->stats.sw_dev_info_stats.rpa_err_cnt++;
3898 temp64 = XGE_HAL_RPA_SM_ERR_ALARM|XGE_HAL_RPA_CREDIT_ERR;
3902 if (val64 & XGE_HAL_RXDMA_RDA_INT) {
3903 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3904 &isrbar0->rda_err_reg);
3905 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3906 err, &isrbar0->rda_err_reg);
3907 hldev->stats.sw_dev_info_stats.rda_err_cnt++;
3908 temp64 = XGE_HAL_RDA_RXDn_ECC_DB_ERR
3909 |XGE_HAL_RDA_FRM_ECC_DB_N_AERR
3910 |XGE_HAL_RDA_SM1_ERR_ALARM|XGE_HAL_RDA_SM0_ERR_ALARM
3911 |XGE_HAL_RDA_RXD_ECC_DB_SERR;
3915 if (val64 & XGE_HAL_RXDMA_RTI_INT) {
3916 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3917 &isrbar0->rti_err_reg);
3918 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3919 err, &isrbar0->rti_err_reg);
3920 hldev->stats.sw_dev_info_stats.rti_err_cnt++;
3921 temp64 = XGE_HAL_RTI_SM_ERR_ALARM;
3928 reset : xge_hal_device_reset(hldev);
3929 xge_hal_device_enable(hldev);
3930 xge_hal_device_intr_enable(hldev);
3935 * __hal_device_handle_rxmac - Handle RxMAC interrupt reason
3936 * @hldev: HAL device handle.
3937 * @reason: interrupt reason
3940 __hal_device_handle_rxmac(xge_hal_device_t *hldev, u64 reason)
3942 xge_hal_pci_bar0_t *isrbar0 =
3943 (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0;
3946 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3947 &isrbar0->mac_int_status);
3948 if (!(val64 & XGE_HAL_MAC_INT_STATUS_RMAC_INT))
3951 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3952 &isrbar0->mac_rmac_err_reg);
3953 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3954 val64, &isrbar0->mac_rmac_err_reg);
3955 hldev->stats.sw_dev_info_stats.mac_rmac_err_cnt++;
3956 temp64 = XGE_HAL_RMAC_RX_BUFF_OVRN|XGE_HAL_RMAC_RX_SM_ERR;
3957 if (val64 & temp64) {
3958 xge_hal_device_reset(hldev);
3959 xge_hal_device_enable(hldev);
3960 xge_hal_device_intr_enable(hldev);
3967 * __hal_device_handle_rxxgxs - Handle RxXGXS interrupt reason
3968 * @hldev: HAL device handle.
3969 * @reason: interrupt reason
3972 __hal_device_handle_rxxgxs(xge_hal_device_t *hldev, u64 reason)
3974 xge_hal_pci_bar0_t *isrbar0 =
3975 (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0;
3978 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3979 &isrbar0->xgxs_int_status);
3980 if (!(val64 & XGE_HAL_XGXS_INT_STATUS_RXGXS))
3983 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3984 &isrbar0->xgxs_rxgxs_err_reg);
3985 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3986 val64, &isrbar0->xgxs_rxgxs_err_reg);
3987 hldev->stats.sw_dev_info_stats.xgxs_rxgxs_err_cnt++;
3988 temp64 = XGE_HAL_RXGXS_ESTORE_OFLOW|XGE_HAL_RXGXS_RX_SM_ERR;
3989 if (val64 & temp64) {
3990 xge_hal_device_reset(hldev);
3991 xge_hal_device_enable(hldev);
3992 xge_hal_device_intr_enable(hldev);
3999 * xge_hal_device_enable - Enable device.
4000 * @hldev: HAL device handle.
4002 * Enable the specified device: bring up the link/interface.
4003 * Returns: XGE_HAL_OK - success.
4004 * XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT - Failed to restore the device
4005 * to a "quiescent" state.
4007 * See also: xge_hal_status_e{}.
4009 * Usage: See ex_open{}.
4012 xge_hal_device_enable(xge_hal_device_t *hldev)
4014 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
4019 if (!hldev->hw_is_initialized) {
4020 xge_hal_status_e status;
4022 status = __hal_device_hw_initialize(hldev);
4023 if (status != XGE_HAL_OK) {
4029 * Not needed in most cases, i.e.
4030 * when device_disable() is followed by reset -
4031 * the latter copies back PCI config space, along with
4032 * the bus mastership - see __hal_device_reset().
4033 * However, there are/may-in-future be other cases, and
4036 __hal_device_bus_master_enable(hldev);
4038 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
4040 * Configure the link stability period.
4042 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
4043 &bar0->misc_control);
4044 if (hldev->config.link_stability_period !=
4045 XGE_HAL_DEFAULT_USE_HARDCODE) {
4047 val64 |= XGE_HAL_MISC_CONTROL_LINK_STABILITY_PERIOD(
4048 hldev->config.link_stability_period);
4051 * Use the link stability period 1 ms as default
4053 val64 |= XGE_HAL_MISC_CONTROL_LINK_STABILITY_PERIOD(
4054 XGE_HAL_DEFAULT_LINK_STABILITY_PERIOD);
4056 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
4057 val64, &bar0->misc_control);
4060 * Clearing any possible Link up/down interrupts that
4061 * could have popped up just before Enabling the card.
4063 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
4064 &bar0->misc_int_reg);
4066 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
4067 val64, &bar0->misc_int_reg);
4068 xge_debug_device(XGE_TRACE, "%s","link state cleared");
4070 } else if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) {
4072 * Clearing any possible Link state change interrupts that
4073 * could have popped up just before Enabling the card.
4075 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
4076 &bar0->mac_rmac_err_reg);
4078 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
4079 val64, &bar0->mac_rmac_err_reg);
4080 xge_debug_device(XGE_TRACE, "%s", "link state cleared");
4084 if (__hal_device_wait_quiescent(hldev, &val64)) {
4085 return XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT;
4088 /* Enabling Laser. */
4089 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
4090 &bar0->adapter_control);
4091 val64 |= XGE_HAL_ADAPTER_EOI_TX_ON;
4092 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
4093 &bar0->adapter_control);
4095 /* let link establish */
4098 /* set link down untill poll() routine will set it up (maybe) */
4099 hldev->link_state = XGE_HAL_LINK_DOWN;
4101 /* If link is UP (adpter is connected) then enable the adapter */
4102 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
4103 &bar0->adapter_status);
4104 if( val64 & (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT |
4105 XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT) ) {
4106 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
4107 &bar0->adapter_control);
4108 val64 = val64 & (~XGE_HAL_ADAPTER_LED_ON);
4110 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
4111 &bar0->adapter_control);
4112 val64 = val64 | ( XGE_HAL_ADAPTER_EOI_TX_ON |
4113 XGE_HAL_ADAPTER_LED_ON );
4116 val64 = val64 | XGE_HAL_ADAPTER_CNTL_EN; /* adapter enable */
4117 val64 = val64 & (~XGE_HAL_ADAPTER_ECC_EN); /* ECC enable */
4118 xge_os_pio_mem_write64 (hldev->pdev, hldev->regh0, val64,
4119 &bar0->adapter_control);
4121 /* We spin here waiting for the Link to come up.
4122 * This is the fix for the Link being unstable after the reset. */
4127 adp_status = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
4128 &bar0->adapter_status);
4130 /* Read the adapter control register for Adapter_enable bit */
4131 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
4132 &bar0->adapter_control);
4133 if (!(adp_status & (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT |
4134 XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT)) &&
4135 (val64 & XGE_HAL_ADAPTER_CNTL_EN)) {
4137 if (j >= hldev->config.link_valid_cnt) {
4138 if (xge_hal_device_status(hldev, &adp_status) ==
4140 if (__hal_verify_pcc_idle(hldev,
4141 adp_status) != XGE_HAL_OK) {
4143 XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT;
4145 xge_debug_device(XGE_TRACE,
4146 "adp_status: "XGE_OS_LLXFMT
4149 (unsigned long long)adp_status);
4150 val64 = xge_os_pio_mem_read64(
4153 &bar0->adapter_control);
4155 (XGE_HAL_ADAPTER_EOI_TX_ON |
4156 XGE_HAL_ADAPTER_LED_ON );
4157 xge_os_pio_mem_write64(hldev->pdev,
4158 hldev->regh0, val64,
4159 &bar0->adapter_control);
4162 val64 = xge_os_pio_mem_read64(
4165 &bar0->adapter_control);
4166 break; /* out of for loop */
4169 XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT;
4173 j = 0; /* Reset the count */
4174 /* Turn on the Laser */
4175 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
4176 &bar0->adapter_control);
4177 val64 = val64 | XGE_HAL_ADAPTER_EOI_TX_ON;
4178 xge_os_pio_mem_write64 (hldev->pdev, hldev->regh0,
4179 val64, &bar0->adapter_control);
4183 /* Now re-enable it as due to noise, hardware
4185 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
4186 &bar0->adapter_control);
4187 val64 |= XGE_HAL_ADAPTER_CNTL_EN;
4188 val64 = val64 & (~XGE_HAL_ADAPTER_ECC_EN);/*ECC enable*/
4189 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
4190 &bar0->adapter_control);
4192 xge_os_mdelay(1); /* Sleep for 1 msec */
4194 } while (i < hldev->config.link_retry_cnt);
4196 __hal_device_led_actifity_fix(hldev);
4198 #ifndef XGE_HAL_PROCESS_LINK_INT_IN_ISR
4199 /* Here we are performing soft reset on XGXS to force link down.
4200 * Since link is already up, we will get link state change
4201 * poll notificatoin after adapter is enabled */
4203 __hal_serial_mem_write64(hldev, 0x80010515001E0000ULL,
4204 &bar0->dtx_control);
4205 (void) __hal_serial_mem_read64(hldev, &bar0->dtx_control);
4207 __hal_serial_mem_write64(hldev, 0x80010515001E00E0ULL,
4208 &bar0->dtx_control);
4209 (void) __hal_serial_mem_read64(hldev, &bar0->dtx_control);
4211 __hal_serial_mem_write64(hldev, 0x80070515001F00E4ULL,
4212 &bar0->dtx_control);
4213 (void) __hal_serial_mem_read64(hldev, &bar0->dtx_control);
4215 xge_os_mdelay(100); /* Sleep for 500 msec */
4217 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA)
4221 * With some switches the link state change interrupt does not
4222 * occur even though the xgxs reset is done as per SPN-006. So,
4223 * poll the adapter status register and check if the link state
4226 adp_status = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
4227 &bar0->adapter_status);
4228 if (!(adp_status & (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT |
4229 XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
4231 xge_debug_device(XGE_TRACE, "%s",
4232 "enable device causing link state change ind..");
4233 (void) __hal_device_handle_link_state_change(hldev);
4237 if (hldev->config.stats_refresh_time_sec !=
4238 XGE_HAL_STATS_REFRESH_DISABLE)
4239 __hal_stats_enable(&hldev->stats);
4245 * xge_hal_device_disable - Disable Xframe adapter.
4246 * @hldev: Device handle.
4248 * Disable this device. To gracefully reset the adapter, the host should:
4250 * - call xge_hal_device_disable();
4252 * - call xge_hal_device_intr_disable();
4254 * - close all opened channels and clean up outstanding resources;
4256 * - do some work (error recovery, change mtu, reset, etc);
4258 * - call xge_hal_device_enable();
4260 * - open channels, replenish RxDs, etc.
4262 * - call xge_hal_device_intr_enable().
4264 * Note: Disabling the device does _not_ include disabling of interrupts.
4265 * After disabling the device stops receiving new frames but those frames
4266 * that were already in the pipe will keep coming for some few milliseconds.
4268 * Returns: XGE_HAL_OK - success.
4269 * XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT - Failed to restore the device to
4270 * a "quiescent" state.
4272 * See also: xge_hal_status_e{}.
4275 xge_hal_device_disable(xge_hal_device_t *hldev)
4277 xge_hal_status_e status = XGE_HAL_OK;
4278 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
4281 xge_debug_device(XGE_TRACE, "%s", "turn off laser, cleanup hardware");
4283 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
4284 &bar0->adapter_control);
4285 val64 = val64 & (~XGE_HAL_ADAPTER_CNTL_EN);
4286 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
4287 &bar0->adapter_control);
4289 if (__hal_device_wait_quiescent(hldev, &val64) != XGE_HAL_OK) {
4290 status = XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT;
4293 if (__hal_device_register_poll(hldev, &bar0->adapter_status, 1,
4294 XGE_HAL_ADAPTER_STATUS_RC_PRC_QUIESCENT,
4295 XGE_HAL_DEVICE_QUIESCENT_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
4296 xge_debug_device(XGE_TRACE, "%s", "PRC is not QUIESCENT!");
4297 status = XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT;
4300 if (hldev->config.stats_refresh_time_sec !=
4301 XGE_HAL_STATS_REFRESH_DISABLE)
4302 __hal_stats_disable(&hldev->stats);
4303 #ifdef XGE_DEBUG_ASSERT
4305 xge_assert(!hldev->stats.is_enabled);
4308 #ifndef XGE_HAL_DONT_DISABLE_BUS_MASTER_ON_STOP
4309 __hal_device_bus_master_disable(hldev);
4316 * xge_hal_device_reset - Reset device.
4317 * @hldev: HAL device handle.
4319 * Soft-reset the device, reset the device stats except reset_cnt.
4321 * After reset is done, will try to re-initialize HW.
4323 * Returns: XGE_HAL_OK - success.
4324 * XGE_HAL_ERR_DEVICE_NOT_INITIALIZED - Device is not initialized.
4325 * XGE_HAL_ERR_RESET_FAILED - Reset failed.
4327 * See also: xge_hal_status_e{}.
4330 xge_hal_device_reset(xge_hal_device_t *hldev)
4332 xge_hal_status_e status;
4334 /* increment the soft reset counter */
4335 u32 reset_cnt = hldev->stats.sw_dev_info_stats.soft_reset_cnt;
4337 xge_debug_device(XGE_TRACE, "%s (%d)", "resetting the device", reset_cnt);
4339 if (!hldev->is_initialized)
4340 return XGE_HAL_ERR_DEVICE_NOT_INITIALIZED;
4342 /* actual "soft" reset of the adapter */
4343 status = __hal_device_reset(hldev);
4345 /* reset all stats including saved */
4346 __hal_stats_soft_reset(hldev, 1);
4348 /* increment reset counter */
4349 hldev->stats.sw_dev_info_stats.soft_reset_cnt = reset_cnt + 1;
4351 /* re-initialize rxufca_intr_thres */
4352 hldev->rxufca_intr_thres = hldev->config.rxufca_intr_thres;
4354 hldev->reset_needed_after_close = 0;
4360 * xge_hal_device_status - Check whether Xframe hardware is ready for
4362 * @hldev: HAL device handle.
4363 * @hw_status: Xframe status register. Returned by HAL.
4365 * Check whether Xframe hardware is ready for operation.
4366 * The checking includes TDMA, RDMA, PFC, PIC, MC_DRAM, and the rest
4367 * hardware functional blocks.
4369 * Returns: XGE_HAL_OK if the device is ready for operation. Otherwise
4370 * returns XGE_HAL_FAIL. Also, fills in adapter status (in @hw_status).
4372 * See also: xge_hal_status_e{}.
4373 * Usage: See ex_open{}.
4376 xge_hal_device_status(xge_hal_device_t *hldev, u64 *hw_status)
4378 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
4381 tmp64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
4382 &bar0->adapter_status);
4386 if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_TDMA_READY)) {
4387 xge_debug_device(XGE_TRACE, "%s", "TDMA is not ready!");
4388 return XGE_HAL_FAIL;
4390 if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_RDMA_READY)) {
4391 xge_debug_device(XGE_TRACE, "%s", "RDMA is not ready!");
4392 return XGE_HAL_FAIL;
4394 if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_PFC_READY)) {
4395 xge_debug_device(XGE_TRACE, "%s", "PFC is not ready!");
4396 return XGE_HAL_FAIL;
4398 if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
4399 xge_debug_device(XGE_TRACE, "%s", "TMAC BUF is not empty!");
4400 return XGE_HAL_FAIL;
4402 if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_PIC_QUIESCENT)) {
4403 xge_debug_device(XGE_TRACE, "%s", "PIC is not QUIESCENT!");
4404 return XGE_HAL_FAIL;
4406 if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_MC_DRAM_READY)) {
4407 xge_debug_device(XGE_TRACE, "%s", "MC_DRAM is not ready!");
4408 return XGE_HAL_FAIL;
4410 if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_MC_QUEUES_READY)) {
4411 xge_debug_device(XGE_TRACE, "%s", "MC_QUEUES is not ready!");
4412 return XGE_HAL_FAIL;
4414 if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_M_PLL_LOCK)) {
4415 xge_debug_device(XGE_TRACE, "%s", "M_PLL is not locked!");
4416 return XGE_HAL_FAIL;
4418 #ifndef XGE_HAL_HERC_EMULATION
4420 * Andrew: in PCI 33 mode, the P_PLL is not used, and therefore,
4421 * the P_PLL_LOCK bit in the adapter_status register will
4424 if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_P_PLL_LOCK) &&
4425 xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC &&
4426 hldev->pci_mode != XGE_HAL_PCI_33MHZ_MODE) {
4427 xge_debug_device(XGE_TRACE, "%s", "P_PLL is not locked!");
4428 return XGE_HAL_FAIL;
4436 __hal_device_msi_intr_endis(xge_hal_device_t *hldev, int flag)
4438 u16 msi_control_reg;
4440 xge_os_pci_read16(hldev->pdev, hldev->cfgh,
4441 xge_offsetof(xge_hal_pci_config_le_t,
4442 msi_control), &msi_control_reg);
4445 msi_control_reg |= 0x1;
4447 msi_control_reg &= ~0x1;
4449 xge_os_pci_write16(hldev->pdev, hldev->cfgh,
4450 xge_offsetof(xge_hal_pci_config_le_t,
4451 msi_control), msi_control_reg);
4455 __hal_device_msix_intr_endis(xge_hal_device_t *hldev,
4456 xge_hal_channel_t *channel, int flag)
4459 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0;
4461 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
4462 &bar0->xmsi_mask_reg);
4465 val64 &= ~(1LL << ( 63 - channel->msix_idx ));
4467 val64 |= (1LL << ( 63 - channel->msix_idx ));
4468 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
4469 &bar0->xmsi_mask_reg);
4473 * xge_hal_device_intr_enable - Enable Xframe interrupts.
4474 * @hldev: HAL device handle.
4475 * @op: One of the xge_hal_device_intr_e enumerated values specifying
4476 * the type(s) of interrupts to enable.
4478 * Enable Xframe interrupts. The function is to be executed the last in
4479 * Xframe initialization sequence.
4481 * See also: xge_hal_device_intr_disable()
4484 xge_hal_device_intr_enable(xge_hal_device_t *hldev)
4489 /* PRC initialization and configuration */
4490 xge_list_for_each(item, &hldev->ring_channels) {
4491 xge_hal_channel_h channel;
4492 channel = xge_container_of(item, xge_hal_channel_t, item);
4493 __hal_ring_prc_enable(channel);
4496 /* enable traffic only interrupts */
4497 if (hldev->config.intr_mode != XGE_HAL_INTR_MODE_IRQLINE) {
4499 * make sure all interrupts going to be disabled if MSI
4502 __hal_device_intr_mgmt(hldev, XGE_HAL_ALL_INTRS, 0);
4505 * Enable the Tx traffic interrupts only if the TTI feature is
4509 if (hldev->tti_enabled)
4510 val64 = XGE_HAL_TX_TRAFFIC_INTR;
4512 if (!hldev->config.bimodal_interrupts)
4513 val64 |= XGE_HAL_RX_TRAFFIC_INTR;
4515 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA)
4516 val64 |= XGE_HAL_RX_TRAFFIC_INTR;
4518 val64 |=XGE_HAL_TX_PIC_INTR |
4520 XGE_HAL_TX_DMA_INTR |
4521 (hldev->config.sched_timer_us !=
4522 XGE_HAL_SCHED_TIMER_DISABLED ? XGE_HAL_SCHED_INTR : 0);
4523 __hal_device_intr_mgmt(hldev, val64, 1);
4527 * Enable MSI-X interrupts
4529 if (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX) {
4531 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
4533 * To enable MSI-X, MSI also needs to be enabled,
4534 * due to a bug in the herc NIC.
4536 __hal_device_msi_intr_endis(hldev, 1);
4540 /* Enable the MSI-X interrupt for each configured channel */
4541 xge_list_for_each(item, &hldev->fifo_channels) {
4542 xge_hal_channel_t *channel;
4544 channel = xge_container_of(item,
4545 xge_hal_channel_t, item);
4547 /* 0 vector is reserved for alarms */
4548 if (!channel->msix_idx)
4551 __hal_device_msix_intr_endis(hldev, channel, 1);
4554 xge_list_for_each(item, &hldev->ring_channels) {
4555 xge_hal_channel_t *channel;
4557 channel = xge_container_of(item,
4558 xge_hal_channel_t, item);
4560 /* 0 vector is reserved for alarms */
4561 if (!channel->msix_idx)
4564 __hal_device_msix_intr_endis(hldev, channel, 1);
4568 xge_debug_device(XGE_TRACE, "%s", "interrupts are enabled");
4573 * xge_hal_device_intr_disable - Disable Xframe interrupts.
4574 * @hldev: HAL device handle.
4575 * @op: One of the xge_hal_device_intr_e enumerated values specifying
4576 * the type(s) of interrupts to disable.
4578 * Disable Xframe interrupts.
4580 * See also: xge_hal_device_intr_enable()
4583 xge_hal_device_intr_disable(xge_hal_device_t *hldev)
4586 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
4589 if (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX) {
4591 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
4593 * To disable MSI-X, MSI also needs to be disabled,
4594 * due to a bug in the herc NIC.
4596 __hal_device_msi_intr_endis(hldev, 0);
4599 /* Disable the MSI-X interrupt for each configured channel */
4600 xge_list_for_each(item, &hldev->fifo_channels) {
4601 xge_hal_channel_t *channel;
4603 channel = xge_container_of(item,
4604 xge_hal_channel_t, item);
4606 /* 0 vector is reserved for alarms */
4607 if (!channel->msix_idx)
4610 __hal_device_msix_intr_endis(hldev, channel, 0);
4614 xge_os_pio_mem_write64(hldev->pdev,
4615 hldev->regh0, 0xFFFFFFFFFFFFFFFFULL,
4616 &bar0->tx_traffic_mask);
4618 xge_list_for_each(item, &hldev->ring_channels) {
4619 xge_hal_channel_t *channel;
4621 channel = xge_container_of(item,
4622 xge_hal_channel_t, item);
4624 /* 0 vector is reserved for alarms */
4625 if (!channel->msix_idx)
4628 __hal_device_msix_intr_endis(hldev, channel, 0);
4631 xge_os_pio_mem_write64(hldev->pdev,
4632 hldev->regh0, 0xFFFFFFFFFFFFFFFFULL,
4633 &bar0->rx_traffic_mask);
4637 * Disable traffic only interrupts.
4638 * Tx traffic interrupts are used only if the TTI feature is
4642 if (hldev->tti_enabled)
4643 val64 = XGE_HAL_TX_TRAFFIC_INTR;
4645 val64 |= XGE_HAL_RX_TRAFFIC_INTR |
4646 XGE_HAL_TX_PIC_INTR |
4648 (hldev->config.sched_timer_us != XGE_HAL_SCHED_TIMER_DISABLED ?
4649 XGE_HAL_SCHED_INTR : 0);
4650 __hal_device_intr_mgmt(hldev, val64, 0);
4652 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
4653 0xFFFFFFFFFFFFFFFFULL,
4654 &bar0->general_int_mask);
4657 /* disable all configured PRCs */
4658 xge_list_for_each(item, &hldev->ring_channels) {
4659 xge_hal_channel_h channel;
4660 channel = xge_container_of(item, xge_hal_channel_t, item);
4661 __hal_ring_prc_disable(channel);
4664 xge_debug_device(XGE_TRACE, "%s", "interrupts are disabled");
4669 * xge_hal_device_mcast_enable - Enable Xframe multicast addresses.
4670 * @hldev: HAL device handle.
4672 * Enable Xframe multicast addresses.
4673 * Returns: XGE_HAL_OK on success.
4674 * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to enable mcast
4675 * feature within the time(timeout).
4677 * See also: xge_hal_device_mcast_disable(), xge_hal_status_e{}.
4680 xge_hal_device_mcast_enable(xge_hal_device_t *hldev)
4683 xge_hal_pci_bar0_t *bar0;
4684 int mc_offset = XGE_HAL_MAC_MC_ALL_MC_ADDR_OFFSET;
4687 return XGE_HAL_ERR_INVALID_DEVICE;
4689 if (hldev->mcast_refcnt)
4692 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)
4693 mc_offset = XGE_HAL_MAC_MC_ALL_MC_ADDR_OFFSET_HERC;
4695 hldev->mcast_refcnt = 1;
4697 bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
4699 /* Enable all Multicast addresses */
4700 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
4701 XGE_HAL_RMAC_ADDR_DATA0_MEM_ADDR(0x010203040506ULL),
4702 &bar0->rmac_addr_data0_mem);
4703 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
4704 XGE_HAL_RMAC_ADDR_DATA1_MEM_MASK(0xfeffffffffffULL),
4705 &bar0->rmac_addr_data1_mem);
4706 val64 = XGE_HAL_RMAC_ADDR_CMD_MEM_WE |
4707 XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4708 XGE_HAL_RMAC_ADDR_CMD_MEM_OFFSET(mc_offset);
4709 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
4710 &bar0->rmac_addr_cmd_mem);
4712 if (__hal_device_register_poll(hldev,
4713 &bar0->rmac_addr_cmd_mem, 0,
4714 XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4715 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
4716 /* upper layer may require to repeat */
4717 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
4724 * xge_hal_device_mcast_disable - Disable Xframe multicast addresses.
4725 * @hldev: HAL device handle.
4727 * Disable Xframe multicast addresses.
4728 * Returns: XGE_HAL_OK - success.
4729 * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to disable mcast
4730 * feature within the time(timeout).
4732 * See also: xge_hal_device_mcast_enable(), xge_hal_status_e{}.
4735 xge_hal_device_mcast_disable(xge_hal_device_t *hldev)
4738 xge_hal_pci_bar0_t *bar0;
4739 int mc_offset = XGE_HAL_MAC_MC_ALL_MC_ADDR_OFFSET;
4742 return XGE_HAL_ERR_INVALID_DEVICE;
4744 if (hldev->mcast_refcnt == 0)
4747 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)
4748 mc_offset = XGE_HAL_MAC_MC_ALL_MC_ADDR_OFFSET_HERC;
4750 hldev->mcast_refcnt = 0;
4752 bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
4754 /* Disable all Multicast addresses */
4755 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
4756 XGE_HAL_RMAC_ADDR_DATA0_MEM_ADDR(0xffffffffffffULL),
4757 &bar0->rmac_addr_data0_mem);
4758 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
4759 XGE_HAL_RMAC_ADDR_DATA1_MEM_MASK(0),
4760 &bar0->rmac_addr_data1_mem);
4762 val64 = XGE_HAL_RMAC_ADDR_CMD_MEM_WE |
4763 XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4764 XGE_HAL_RMAC_ADDR_CMD_MEM_OFFSET(mc_offset);
4765 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
4766 &bar0->rmac_addr_cmd_mem);
4768 if (__hal_device_register_poll(hldev,
4769 &bar0->rmac_addr_cmd_mem, 0,
4770 XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4771 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
4772 /* upper layer may require to repeat */
4773 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
4780 * xge_hal_device_promisc_enable - Enable promiscuous mode.
4781 * @hldev: HAL device handle.
4783 * Enable promiscuous mode of Xframe operation.
4785 * See also: xge_hal_device_promisc_disable().
4788 xge_hal_device_promisc_enable(xge_hal_device_t *hldev)
4791 xge_hal_pci_bar0_t *bar0;
4795 bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
4797 if (!hldev->is_promisc) {
4798 /* Put the NIC into promiscuous mode */
4799 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
4801 val64 |= XGE_HAL_MAC_CFG_RMAC_PROM_ENABLE;
4803 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
4804 XGE_HAL_RMAC_CFG_KEY(0x4C0D),
4805 &bar0->rmac_cfg_key);
4807 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0,
4811 hldev->is_promisc = 1;
4812 xge_debug_device(XGE_TRACE,
4813 "mac_cfg 0x"XGE_OS_LLXFMT": promisc enabled",
4814 (unsigned long long)val64);
4819 * xge_hal_device_promisc_disable - Disable promiscuous mode.
4820 * @hldev: HAL device handle.
4822 * Disable promiscuous mode of Xframe operation.
4824 * See also: xge_hal_device_promisc_enable().
4827 xge_hal_device_promisc_disable(xge_hal_device_t *hldev)
4830 xge_hal_pci_bar0_t *bar0;
4834 bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
4836 if (hldev->is_promisc) {
4837 /* Remove the NIC from promiscuous mode */
4838 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
4840 val64 &= ~XGE_HAL_MAC_CFG_RMAC_PROM_ENABLE;
4842 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
4843 XGE_HAL_RMAC_CFG_KEY(0x4C0D),
4844 &bar0->rmac_cfg_key);
4846 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0,
4850 hldev->is_promisc = 0;
4851 xge_debug_device(XGE_TRACE,
4852 "mac_cfg 0x"XGE_OS_LLXFMT": promisc disabled",
4853 (unsigned long long)val64);
4858 * xge_hal_device_macaddr_get - Get MAC addresses.
4859 * @hldev: HAL device handle.
4860 * @index: MAC address index, in the range from 0 to
4861 * XGE_HAL_MAX_MAC_ADDRESSES.
4862 * @macaddr: MAC address. Returned by HAL.
4864 * Retrieve one of the stored MAC addresses by reading non-volatile
4865 * memory on the chip.
4867 * Up to %XGE_HAL_MAX_MAC_ADDRESSES addresses is supported.
4869 * Returns: XGE_HAL_OK - success.
4870 * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to retrieve the mac
4871 * address within the time(timeout).
4872 * XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES - Invalid MAC address index.
4874 * See also: xge_hal_device_macaddr_set(), xge_hal_status_e{}.
4877 xge_hal_device_macaddr_get(xge_hal_device_t *hldev, int index,
4880 xge_hal_pci_bar0_t *bar0;
4884 if (hldev == NULL) {
4885 return XGE_HAL_ERR_INVALID_DEVICE;
4888 bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
4890 if ( index >= XGE_HAL_MAX_MAC_ADDRESSES ) {
4891 return XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES;
4894 #ifdef XGE_HAL_HERC_EMULATION
4895 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,0x0000010000000000,
4896 &bar0->rmac_addr_data0_mem);
4897 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,0x0000000000000000,
4898 &bar0->rmac_addr_data1_mem);
4899 val64 = XGE_HAL_RMAC_ADDR_CMD_MEM_RD |
4900 XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4901 XGE_HAL_RMAC_ADDR_CMD_MEM_OFFSET((index));
4902 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
4903 &bar0->rmac_addr_cmd_mem);
4905 /* poll until done */
4906 __hal_device_register_poll(hldev,
4907 &bar0->rmac_addr_cmd_mem, 0,
4908 XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD,
4909 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS);
4913 val64 = ( XGE_HAL_RMAC_ADDR_CMD_MEM_RD |
4914 XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4915 XGE_HAL_RMAC_ADDR_CMD_MEM_OFFSET((index)) );
4916 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
4917 &bar0->rmac_addr_cmd_mem);
4919 if (__hal_device_register_poll(hldev, &bar0->rmac_addr_cmd_mem, 0,
4920 XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4921 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
4922 /* upper layer may require to repeat */
4923 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
4926 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
4927 &bar0->rmac_addr_data0_mem);
4928 for (i=0; i < XGE_HAL_ETH_ALEN; i++) {
4929 (*macaddr)[i] = (u8)(val64 >> ((64 - 8) - (i * 8)));
4932 #ifdef XGE_HAL_HERC_EMULATION
4933 for (i=0; i < XGE_HAL_ETH_ALEN; i++) {
4934 (*macaddr)[i] = (u8)0;
4936 (*macaddr)[1] = (u8)1;
4944 * xge_hal_device_macaddr_set - Set MAC address.
4945 * @hldev: HAL device handle.
4946 * @index: MAC address index, in the range from 0 to
4947 * XGE_HAL_MAX_MAC_ADDRESSES.
4948 * @macaddr: New MAC address to configure.
4950 * Configure one of the available MAC address "slots".
4952 * Up to %XGE_HAL_MAX_MAC_ADDRESSES addresses is supported.
4954 * Returns: XGE_HAL_OK - success.
4955 * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to set the new mac
4956 * address within the time(timeout).
4957 * XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES - Invalid MAC address index.
4959 * See also: xge_hal_device_macaddr_get(), xge_hal_status_e{}.
4962 xge_hal_device_macaddr_set(xge_hal_device_t *hldev, int index,
4965 xge_hal_pci_bar0_t *bar0 =
4966 (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
4970 if ( index >= XGE_HAL_MAX_MAC_ADDRESSES )
4971 return XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES;
4974 for (i=0; i < XGE_HAL_ETH_ALEN; i++) {
4975 temp64 |= macaddr[i];
4980 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
4981 XGE_HAL_RMAC_ADDR_DATA0_MEM_ADDR(temp64),
4982 &bar0->rmac_addr_data0_mem);
4984 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
4985 XGE_HAL_RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4986 &bar0->rmac_addr_data1_mem);
4988 val64 = ( XGE_HAL_RMAC_ADDR_CMD_MEM_WE |
4989 XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4990 XGE_HAL_RMAC_ADDR_CMD_MEM_OFFSET((index)) );
4992 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
4993 &bar0->rmac_addr_cmd_mem);
4995 if (__hal_device_register_poll(hldev, &bar0->rmac_addr_cmd_mem, 0,
4996 XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4997 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
4998 /* upper layer may require to repeat */
4999 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
5006 * xge_hal_device_macaddr_clear - Set MAC address.
5007 * @hldev: HAL device handle.
5008 * @index: MAC address index, in the range from 0 to
5009 * XGE_HAL_MAX_MAC_ADDRESSES.
5011 * Clear one of the available MAC address "slots".
5013 * Returns: XGE_HAL_OK - success.
5014 * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to set the new mac
5015 * address within the time(timeout).
5016 * XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES - Invalid MAC address index.
5018 * See also: xge_hal_device_macaddr_set(), xge_hal_status_e{}.
5021 xge_hal_device_macaddr_clear(xge_hal_device_t *hldev, int index)
5023 xge_hal_status_e status;
5024 u8 macaddr[6] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
5026 status = xge_hal_device_macaddr_set(hldev, index, macaddr);
5027 if (status != XGE_HAL_OK) {
5028 xge_debug_device(XGE_ERR, "%s",
5029 "Not able to set the mac addr");
5037 * xge_hal_device_macaddr_find - Finds index in the rmac table.
5038 * @hldev: HAL device handle.
5039 * @wanted: Wanted MAC address.
5041 * See also: xge_hal_device_macaddr_set().
5044 xge_hal_device_macaddr_find(xge_hal_device_t *hldev, macaddr_t wanted)
5048 if (hldev == NULL) {
5049 return XGE_HAL_ERR_INVALID_DEVICE;
5052 for (i=1; i<XGE_HAL_MAX_MAC_ADDRESSES; i++) {
5054 (void) xge_hal_device_macaddr_get(hldev, i, &macaddr);
5055 if (!xge_os_memcmp(macaddr, wanted, sizeof(macaddr_t))) {
5064 * xge_hal_device_mtu_set - Set MTU.
5065 * @hldev: HAL device handle.
5066 * @new_mtu: New MTU size to configure.
5068 * Set new MTU value. Example, to use jumbo frames:
5069 * xge_hal_device_mtu_set(my_device, my_channel, 9600);
5071 * Returns: XGE_HAL_OK on success.
5072 * XGE_HAL_ERR_SWAPPER_CTRL - Failed to configure swapper control
5074 * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to initialize TTI/RTI
5076 * XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT - Failed to restore the device to
5077 * a "quiescent" state.
5080 xge_hal_device_mtu_set(xge_hal_device_t *hldev, int new_mtu)
5082 xge_hal_status_e status;
5085 * reset needed if 1) new MTU differs, and
5086 * 2a) device was closed or
5087 * 2b) device is being upped for first time.
5089 if (hldev->config.mtu != new_mtu) {
5090 if (hldev->reset_needed_after_close ||
5091 !hldev->mtu_first_time_set) {
5092 status = xge_hal_device_reset(hldev);
5093 if (status != XGE_HAL_OK) {
5094 xge_debug_device(XGE_TRACE, "%s",
5095 "fatal: can not reset the device");
5099 /* store the new MTU in device, reset will use it */
5100 hldev->config.mtu = new_mtu;
5101 xge_debug_device(XGE_TRACE, "new MTU %d applied",
5105 if (!hldev->mtu_first_time_set)
5106 hldev->mtu_first_time_set = 1;
5112 * xge_hal_device_initialize - Initialize Xframe device.
5113 * @hldev: HAL device handle.
5114 * @attr: pointer to xge_hal_device_attr_t structure
5115 * @device_config: Configuration to be _applied_ to the device,
5116 * For the Xframe configuration "knobs" please
5117 * refer to xge_hal_device_config_t and Xframe
5120 * Initialize Xframe device. Note that all the arguments of this public API
5121 * are 'IN', including @hldev. Upper-layer driver (ULD) cooperates with
5122 * OS to find new Xframe device, locate its PCI and memory spaces.
5124 * When done, the ULD allocates sizeof(xge_hal_device_t) bytes for HAL
5125 * to enable the latter to perform Xframe hardware initialization.
5127 * Returns: XGE_HAL_OK - success.
5128 * XGE_HAL_ERR_DRIVER_NOT_INITIALIZED - Driver is not initialized.
5129 * XGE_HAL_ERR_BAD_DEVICE_CONFIG - Device configuration params are not
5131 * XGE_HAL_ERR_OUT_OF_MEMORY - Memory allocation failed.
5132 * XGE_HAL_ERR_BAD_SUBSYSTEM_ID - Device subsystem id is invalid.
5133 * XGE_HAL_ERR_INVALID_MAC_ADDRESS - Device mac address in not valid.
5134 * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to retrieve the mac
5135 * address within the time(timeout) or TTI/RTI initialization failed.
5136 * XGE_HAL_ERR_SWAPPER_CTRL - Failed to configure swapper control.
5137 * XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT -Device is not queiscent.
5139 * See also: xge_hal_device_terminate(), xge_hal_status_e{}
5140 * xge_hal_device_attr_t{}.
5143 xge_hal_device_initialize(xge_hal_device_t *hldev, xge_hal_device_attr_t *attr,
5144 xge_hal_device_config_t *device_config)
5147 xge_hal_status_e status;
5148 xge_hal_channel_t *channel;
5151 int total_dram_size, ring_auto_dram_cfg, left_dram_size;
5152 int total_dram_size_max = 0;
5154 xge_debug_device(XGE_TRACE, "device 0x"XGE_OS_LLXFMT" is initializing",
5155 (unsigned long long)(ulong_t)hldev);
5158 if (g_xge_hal_driver == NULL ||
5159 !g_xge_hal_driver->is_initialized) {
5160 return XGE_HAL_ERR_DRIVER_NOT_INITIALIZED;
5163 xge_os_memzero(hldev, sizeof(xge_hal_device_t));
5166 * validate a common part of Xframe-I/II configuration
5167 * (and run check_card() later, once PCI inited - see below)
5169 status = __hal_device_config_check_common(device_config);
5170 if (status != XGE_HAL_OK)
5174 xge_os_memcpy(&hldev->config, device_config,
5175 sizeof(xge_hal_device_config_t));
5177 /* save original attr */
5178 xge_os_memcpy(&hldev->orig_attr, attr,
5179 sizeof(xge_hal_device_attr_t));
5181 /* initialize rxufca_intr_thres */
5182 hldev->rxufca_intr_thres = hldev->config.rxufca_intr_thres;
5184 hldev->regh0 = attr->regh0;
5185 hldev->regh1 = attr->regh1;
5186 hldev->regh2 = attr->regh2;
5187 hldev->isrbar0 = hldev->bar0 = attr->bar0;
5188 hldev->bar1 = attr->bar1;
5189 hldev->bar2 = attr->bar2;
5190 hldev->pdev = attr->pdev;
5191 hldev->irqh = attr->irqh;
5192 hldev->cfgh = attr->cfgh;
5194 /* set initial bimodal timer for bimodal adaptive schema */
5195 hldev->bimodal_timer_val_us = hldev->config.bimodal_timer_lo_us;
5197 hldev->queueh = xge_queue_create(hldev->pdev, hldev->irqh,
5198 g_xge_hal_driver->config.queue_size_initial,
5199 g_xge_hal_driver->config.queue_size_max,
5200 __hal_device_event_queued, hldev);
5201 if (hldev->queueh == NULL)
5202 return XGE_HAL_ERR_OUT_OF_MEMORY;
5204 hldev->magic = XGE_HAL_MAGIC;
5206 xge_assert(hldev->regh0);
5207 xge_assert(hldev->regh1);
5208 xge_assert(hldev->bar0);
5209 xge_assert(hldev->bar1);
5210 xge_assert(hldev->pdev);
5211 xge_assert(hldev->irqh);
5212 xge_assert(hldev->cfgh);
5214 /* initialize some PCI/PCI-X fields of this PCI device. */
5215 __hal_device_pci_init(hldev);
5218 * initlialize lists to properly handling a potential
5221 xge_list_init(&hldev->free_channels);
5222 xge_list_init(&hldev->fifo_channels);
5223 xge_list_init(&hldev->ring_channels);
5225 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) {
5226 /* fixups for xena */
5227 hldev->config.rth_en = 0;
5228 hldev->config.rth_spdm_en = 0;
5229 hldev->config.rts_mac_en = 0;
5230 total_dram_size_max = XGE_HAL_MAX_RING_QUEUE_SIZE_XENA;
5232 status = __hal_device_config_check_xena(device_config);
5233 if (status != XGE_HAL_OK) {
5234 xge_hal_device_terminate(hldev);
5237 if (hldev->config.bimodal_interrupts == 1) {
5238 xge_hal_device_terminate(hldev);
5239 return XGE_HAL_BADCFG_BIMODAL_XENA_NOT_ALLOWED;
5240 } else if (hldev->config.bimodal_interrupts ==
5241 XGE_HAL_DEFAULT_USE_HARDCODE)
5242 hldev->config.bimodal_interrupts = 0;
5243 } else if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
5244 /* fixups for herc */
5245 total_dram_size_max = XGE_HAL_MAX_RING_QUEUE_SIZE_HERC;
5246 status = __hal_device_config_check_herc(device_config);
5247 if (status != XGE_HAL_OK) {
5248 xge_hal_device_terminate(hldev);
5251 if (hldev->config.bimodal_interrupts ==
5252 XGE_HAL_DEFAULT_USE_HARDCODE)
5253 hldev->config.bimodal_interrupts = 1;
5255 xge_debug_device(XGE_ERR,
5256 "detected unknown device_id 0x%x", hldev->device_id);
5257 xge_hal_device_terminate(hldev);
5258 return XGE_HAL_ERR_BAD_DEVICE_ID;
5261 /* allocate and initialize FIFO types of channels according to
5263 for (i = 0; i < XGE_HAL_MAX_FIFO_NUM; i++) {
5264 if (!device_config->fifo.queue[i].configured)
5267 channel = __hal_channel_allocate(hldev, i,
5268 XGE_HAL_CHANNEL_TYPE_FIFO);
5269 if (channel == NULL) {
5270 xge_debug_device(XGE_ERR,
5271 "fifo: __hal_channel_allocate failed");
5272 xge_hal_device_terminate(hldev);
5273 return XGE_HAL_ERR_OUT_OF_MEMORY;
5275 /* add new channel to the device */
5276 xge_list_insert(&channel->item, &hldev->free_channels);
5280 * automatic DRAM adjustment
5282 total_dram_size = 0;
5283 ring_auto_dram_cfg = 0;
5284 for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
5285 if (!device_config->ring.queue[i].configured)
5287 if (device_config->ring.queue[i].dram_size_mb ==
5288 XGE_HAL_DEFAULT_USE_HARDCODE) {
5289 ring_auto_dram_cfg++;
5292 total_dram_size += device_config->ring.queue[i].dram_size_mb;
5294 left_dram_size = total_dram_size_max - total_dram_size;
5295 if (left_dram_size < 0 ||
5296 (ring_auto_dram_cfg && left_dram_size / ring_auto_dram_cfg == 0)) {
5297 xge_debug_device(XGE_ERR,
5298 "ring config: exceeded DRAM size %d MB",
5299 total_dram_size_max);
5300 xge_hal_device_terminate(hldev);
5301 return XGE_HAL_BADCFG_RING_QUEUE_SIZE;
5305 * allocate and initialize RING types of channels according to
5308 for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
5309 if (!device_config->ring.queue[i].configured)
5312 if (device_config->ring.queue[i].dram_size_mb ==
5313 XGE_HAL_DEFAULT_USE_HARDCODE) {
5314 hldev->config.ring.queue[i].dram_size_mb =
5315 device_config->ring.queue[i].dram_size_mb =
5316 left_dram_size / ring_auto_dram_cfg;
5319 channel = __hal_channel_allocate(hldev, i,
5320 XGE_HAL_CHANNEL_TYPE_RING);
5321 if (channel == NULL) {
5322 xge_debug_device(XGE_ERR,
5323 "ring: __hal_channel_allocate failed");
5324 xge_hal_device_terminate(hldev);
5325 return XGE_HAL_ERR_OUT_OF_MEMORY;
5327 /* add new channel to the device */
5328 xge_list_insert(&channel->item, &hldev->free_channels);
5331 /* get subsystem IDs */
5332 xge_os_pci_read16(hldev->pdev, hldev->cfgh,
5333 xge_offsetof(xge_hal_pci_config_le_t, subsystem_id),
5335 xge_os_pci_read16(hldev->pdev, hldev->cfgh,
5336 xge_offsetof(xge_hal_pci_config_le_t, subsystem_vendor_id),
5338 xge_debug_device(XGE_TRACE,
5339 "subsystem_id %04x:%04x",
5340 subsys_vendor, subsys_device);
5342 /* reset device initially */
5343 (void) __hal_device_reset(hldev);
5345 /* set host endian before, to assure proper action */
5346 status = __hal_device_set_swapper(hldev);
5347 if (status != XGE_HAL_OK) {
5348 xge_debug_device(XGE_ERR,
5349 "__hal_device_set_swapper failed");
5350 xge_hal_device_terminate(hldev);
5351 (void) __hal_device_reset(hldev);
5355 #ifndef XGE_HAL_HERC_EMULATION
5356 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA)
5357 __hal_device_xena_fix_mac(hldev);
5360 /* MAC address initialization.
5361 * For now only one mac address will be read and used. */
5362 status = xge_hal_device_macaddr_get(hldev, 0, &hldev->macaddr[0]);
5363 if (status != XGE_HAL_OK) {
5364 xge_debug_device(XGE_ERR,
5365 "xge_hal_device_macaddr_get failed");
5366 xge_hal_device_terminate(hldev);
5370 if (hldev->macaddr[0][0] == 0xFF &&
5371 hldev->macaddr[0][1] == 0xFF &&
5372 hldev->macaddr[0][2] == 0xFF &&
5373 hldev->macaddr[0][3] == 0xFF &&
5374 hldev->macaddr[0][4] == 0xFF &&
5375 hldev->macaddr[0][5] == 0xFF) {
5376 xge_debug_device(XGE_ERR,
5377 "xge_hal_device_macaddr_get returns all FFs");
5378 xge_hal_device_terminate(hldev);
5379 return XGE_HAL_ERR_INVALID_MAC_ADDRESS;
5382 xge_debug_device(XGE_TRACE,
5383 "default macaddr: 0x%02x-%02x-%02x-%02x-%02x-%02x",
5384 hldev->macaddr[0][0], hldev->macaddr[0][1],
5385 hldev->macaddr[0][2], hldev->macaddr[0][3],
5386 hldev->macaddr[0][4], hldev->macaddr[0][5]);
5388 status = __hal_stats_initialize(&hldev->stats, hldev);
5389 if (status != XGE_HAL_OK) {
5390 xge_debug_device(XGE_ERR,
5391 "__hal_stats_initialize failed");
5392 xge_hal_device_terminate(hldev);
5396 status = __hal_device_hw_initialize(hldev);
5397 if (status != XGE_HAL_OK) {
5398 xge_debug_device(XGE_ERR,
5399 "__hal_device_hw_initialize failed");
5400 xge_hal_device_terminate(hldev);
5403 hldev->dump_buf=(char*)xge_os_malloc(hldev->pdev, XGE_HAL_DUMP_BUF_SIZE);
5404 if (hldev->dump_buf == NULL) {
5405 xge_debug_device(XGE_ERR,
5406 "__hal_device_hw_initialize failed");
5407 xge_hal_device_terminate(hldev);
5408 return XGE_HAL_ERR_OUT_OF_MEMORY;
5412 /* Xena-only: need to serialize fifo posts across all device fifos */
5413 #if defined(XGE_HAL_TX_MULTI_POST)
5414 xge_os_spin_lock_init(&hldev->xena_post_lock, hldev->pdev);
5415 #elif defined(XGE_HAL_TX_MULTI_POST_IRQ)
5416 xge_os_spin_lock_init_irq(&hldev->xena_post_lock, hldev->irqh);
5418 /* Getting VPD data */
5419 __hal_device_get_vpd_data(hldev);
5421 hldev->is_initialized = 1;
5427 * xge_hal_device_terminating - Mark the device as 'terminating'.
5428 * @devh: HAL device handle.
5430 * Mark the device as 'terminating', going to terminate. Can be used
5431 * to serialize termination with other running processes/contexts.
5433 * See also: xge_hal_device_terminate().
5436 xge_hal_device_terminating(xge_hal_device_h devh)
5438 xge_hal_device_t *hldev = (xge_hal_device_t*)devh;
5440 xge_hal_channel_t *channel;
5441 #if defined(XGE_HAL_TX_MULTI_RESERVE_IRQ)
5442 unsigned long flags=0;
5446 * go through each opened tx channel and aquire
5447 * lock, so it will serialize with HAL termination flag
5449 xge_list_for_each(item, &hldev->fifo_channels) {
5450 channel = xge_container_of(item, xge_hal_channel_t, item);
5451 #if defined(XGE_HAL_TX_MULTI_RESERVE)
5452 xge_os_spin_lock(&channel->reserve_lock);
5453 #elif defined(XGE_HAL_TX_MULTI_RESERVE_IRQ)
5454 xge_os_spin_lock_irq(&channel->reserve_lock, flags);
5457 channel->terminating = 1;
5459 #if defined(XGE_HAL_TX_MULTI_RESERVE)
5460 xge_os_spin_unlock(&channel->reserve_lock);
5461 #elif defined(XGE_HAL_TX_MULTI_RESERVE_IRQ)
5462 xge_os_spin_unlock_irq(&channel->reserve_lock, flags);
5466 hldev->terminating = 1;
5470 * xge_hal_device_terminate - Terminate Xframe device.
5471 * @hldev: HAL device handle.
5473 * Terminate HAL device.
5475 * See also: xge_hal_device_initialize().
5478 xge_hal_device_terminate(xge_hal_device_t *hldev)
5480 xge_assert(g_xge_hal_driver != NULL);
5481 xge_assert(hldev != NULL);
5482 xge_assert(hldev->magic == XGE_HAL_MAGIC);
5484 xge_queue_flush(hldev->queueh);
5486 hldev->terminating = 1;
5487 hldev->is_initialized = 0;
5489 hldev->magic = XGE_HAL_DEAD;
5491 #if defined(XGE_HAL_TX_MULTI_POST)
5492 xge_os_spin_lock_destroy(&hldev->xena_post_lock, hldev->pdev);
5493 #elif defined(XGE_HAL_TX_MULTI_POST_IRQ)
5494 xge_os_spin_lock_destroy_irq(&hldev->xena_post_lock, hldev->pdev);
5497 xge_debug_device(XGE_TRACE, "device "XGE_OS_LLXFMT" is terminating",
5498 (unsigned long long)(ulong_t)hldev);
5500 xge_assert(xge_list_is_empty(&hldev->fifo_channels));
5501 xge_assert(xge_list_is_empty(&hldev->ring_channels));
5503 if (hldev->stats.is_initialized) {
5504 __hal_stats_terminate(&hldev->stats);
5507 /* close if open and free all channels */
5508 while (!xge_list_is_empty(&hldev->free_channels)) {
5509 xge_hal_channel_t *channel = (xge_hal_channel_t*)
5510 hldev->free_channels.next;
5512 xge_assert(!channel->is_open);
5513 xge_list_remove(&channel->item);
5514 __hal_channel_free(channel);
5517 if (hldev->queueh) {
5518 xge_queue_destroy(hldev->queueh);
5521 if (hldev->spdm_table) {
5522 xge_os_free(hldev->pdev,
5523 hldev->spdm_table[0],
5524 (sizeof(xge_hal_spdm_entry_t) *
5525 hldev->spdm_max_entries));
5526 xge_os_free(hldev->pdev,
5528 (sizeof(xge_hal_spdm_entry_t *) *
5529 hldev->spdm_max_entries));
5530 xge_os_spin_lock_destroy(&hldev->spdm_lock, hldev->pdev);
5531 hldev->spdm_table = NULL;
5534 if (hldev->dump_buf) {
5535 xge_os_free(hldev->pdev, hldev->dump_buf,
5536 XGE_HAL_DUMP_BUF_SIZE);
5537 hldev->dump_buf = NULL;
5540 if (hldev->device_id != 0) {
5543 pcisize = (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)?
5544 XGE_HAL_PCISIZE_HERC : XGE_HAL_PCISIZE_XENA;
5545 for (j = 0; j < pcisize; j++) {
5546 xge_os_pci_write32(hldev->pdev, hldev->cfgh, j * 4,
5547 *((u32*)&hldev->pci_config_space_bios + j));
5552 * __hal_device_get_vpd_data - Getting vpd_data.
5554 * @hldev: HAL device handle.
5556 * Getting product name and serial number from vpd capabilites structure
5560 __hal_device_get_vpd_data(xge_hal_device_t *hldev)
5564 int index = 0, count, fail = 0;
5565 u8 vpd_addr = XGE_HAL_CARD_XENA_VPD_ADDR;
5566 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)
5567 vpd_addr = XGE_HAL_CARD_HERC_VPD_ADDR;
5569 xge_os_strcpy((char *) hldev->vpd_data.product_name,
5570 "10 Gigabit Ethernet Adapter");
5571 xge_os_strcpy((char *) hldev->vpd_data.serial_num, "not available");
5573 vpd_data = ( u8*) xge_os_malloc(hldev->pdev, XGE_HAL_VPD_BUFFER_SIZE + 16);
5574 if ( vpd_data == 0 )
5577 for (index = 0; index < XGE_HAL_VPD_BUFFER_SIZE; index +=4 ) {
5578 xge_os_pci_write8(hldev->pdev, hldev->cfgh, (vpd_addr + 2), (u8)index);
5579 xge_os_pci_read8(hldev->pdev, hldev->cfgh,(vpd_addr + 2), &data);
5580 xge_os_pci_write8(hldev->pdev, hldev->cfgh, (vpd_addr + 3), 0);
5581 for (count = 0; count < 5; count++ ) {
5583 xge_os_pci_read8(hldev->pdev, hldev->cfgh,(vpd_addr + 3), &data);
5584 if (data == XGE_HAL_VPD_READ_COMPLETE)
5589 xge_os_printf("ERR, Reading VPD data failed");
5594 xge_os_pci_read32(hldev->pdev, hldev->cfgh,(vpd_addr + 4),
5595 (u32 *)&vpd_data[index]);
5600 /* read serial number of adapter */
5601 for (count = 0; count < XGE_HAL_VPD_BUFFER_SIZE; count++) {
5602 if ((vpd_data[count] == 'S') &&
5603 (vpd_data[count + 1] == 'N') &&
5604 (vpd_data[count + 2] < XGE_HAL_VPD_LENGTH)) {
5605 memset(hldev->vpd_data.serial_num, 0, XGE_HAL_VPD_LENGTH);
5606 memcpy(hldev->vpd_data.serial_num, &vpd_data[count + 3],
5607 vpd_data[count + 2]);
5612 if (vpd_data[1] < XGE_HAL_VPD_LENGTH) {
5613 memset(hldev->vpd_data.product_name, 0, vpd_data[1]);
5614 memcpy(hldev->vpd_data.product_name, &vpd_data[3], vpd_data[1]);
5619 xge_os_free(hldev->pdev, vpd_data, XGE_HAL_VPD_BUFFER_SIZE + 16);
5624 * xge_hal_device_handle_tcode - Handle transfer code.
5625 * @channelh: Channel handle.
5626 * @dtrh: Descriptor handle.
5627 * @t_code: One of the enumerated (and documented in the Xframe user guide)
5630 * Handle descriptor's transfer code. The latter comes with each completed
5631 * descriptor, see xge_hal_fifo_dtr_next_completed() and
5632 * xge_hal_ring_dtr_next_completed().
5633 * Transfer codes are enumerated in xgehal-fifo.h and xgehal-ring.h.
5635 * Returns: one of the xge_hal_status_e{} enumerated types.
5636 * XGE_HAL_OK - for success.
5637 * XGE_HAL_ERR_CRITICAL - when encounters critical error.
5640 xge_hal_device_handle_tcode (xge_hal_channel_h channelh,
5641 xge_hal_dtr_h dtrh, u8 t_code)
5643 xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
5644 xge_hal_device_t *hldev = (xge_hal_device_t *)channel->devh;
5647 xge_os_printf("invalid t_code %d", t_code);
5651 if (channel->type == XGE_HAL_CHANNEL_TYPE_FIFO) {
5652 hldev->stats.sw_dev_err_stats.txd_t_code_err_cnt[t_code]++;
5654 #if defined(XGE_HAL_DEBUG_BAD_TCODE)
5655 xge_hal_fifo_txd_t *txdp = (xge_hal_fifo_txd_t *)dtrh;
5656 xge_os_printf(""XGE_OS_LLXFMT":"XGE_OS_LLXFMT":"
5657 XGE_OS_LLXFMT":"XGE_OS_LLXFMT,
5658 txdp->control_1, txdp->control_2, txdp->buffer_pointer,
5659 txdp->host_control);
5662 /* handle link "down" immediately without going through
5663 * xge_hal_device_poll() routine. */
5664 if (t_code == XGE_HAL_TXD_T_CODE_LOSS_OF_LINK) {
5666 if (hldev->link_state != XGE_HAL_LINK_DOWN) {
5667 xge_hal_pci_bar0_t *bar0 =
5668 (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
5671 hldev->link_state = XGE_HAL_LINK_DOWN;
5673 val64 = xge_os_pio_mem_read64(hldev->pdev,
5674 hldev->regh0, &bar0->adapter_control);
5677 val64 = val64 & (~XGE_HAL_ADAPTER_LED_ON);
5678 xge_os_pio_mem_write64(hldev->pdev,
5679 hldev->regh0, val64,
5680 &bar0->adapter_control);
5682 g_xge_hal_driver->uld_callbacks.link_down(
5683 hldev->upper_layer_info);
5685 } else if (t_code == XGE_HAL_TXD_T_CODE_ABORT_BUFFER ||
5686 t_code == XGE_HAL_TXD_T_CODE_ABORT_DTOR) {
5687 __hal_device_handle_targetabort(hldev);
5688 return XGE_HAL_ERR_CRITICAL;
5690 return XGE_HAL_ERR_PKT_DROP;
5691 } else if (channel->type == XGE_HAL_CHANNEL_TYPE_RING) {
5692 hldev->stats.sw_dev_err_stats.rxd_t_code_err_cnt[t_code]++;
5694 #if defined(XGE_HAL_DEBUG_BAD_TCODE)
5695 xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)dtrh;
5696 xge_os_printf(""XGE_OS_LLXFMT":"XGE_OS_LLXFMT":"XGE_OS_LLXFMT
5697 ":"XGE_OS_LLXFMT, rxdp->control_1,
5698 rxdp->control_2, rxdp->buffer0_ptr,
5699 rxdp->host_control);
5701 if (t_code == XGE_HAL_RXD_T_CODE_BAD_ECC) {
5702 hldev->stats.sw_dev_err_stats.ecc_err_cnt++;
5703 __hal_device_handle_eccerr(hldev, "rxd_t_code",
5705 return XGE_HAL_ERR_CRITICAL;
5706 } else if (t_code == XGE_HAL_RXD_T_CODE_PARITY ||
5707 t_code == XGE_HAL_RXD_T_CODE_PARITY_ABORT) {
5708 hldev->stats.sw_dev_err_stats.parity_err_cnt++;
5709 __hal_device_handle_parityerr(hldev, "rxd_t_code",
5711 return XGE_HAL_ERR_CRITICAL;
5712 /* do not drop if detected unknown IPv6 extension */
5713 } else if (t_code != XGE_HAL_RXD_T_CODE_UNKNOWN_PROTO) {
5714 return XGE_HAL_ERR_PKT_DROP;
5721 * xge_hal_device_link_state - Get link state.
5722 * @devh: HAL device handle.
5723 * @ls: Link state, see xge_hal_device_link_state_e{}.
5726 * Returns: XGE_HAL_OK.
5727 * See also: xge_hal_device_link_state_e{}.
5729 xge_hal_status_e xge_hal_device_link_state(xge_hal_device_h devh,
5730 xge_hal_device_link_state_e *ls)
5732 xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
5734 xge_assert(ls != NULL);
5735 *ls = hldev->link_state;
5740 * xge_hal_device_sched_timer - Configure scheduled device interrupt.
5741 * @devh: HAL device handle.
5742 * @interval_us: Time interval, in miscoseconds.
5743 * Unlike transmit and receive interrupts,
5744 * the scheduled interrupt is generated independently of
5745 * traffic, but purely based on time.
5746 * @one_shot: 1 - generate scheduled interrupt only once.
5747 * 0 - generate scheduled interrupt periodically at the specified
5748 * @interval_us interval.
5750 * (Re-)configure scheduled interrupt. Can be called at runtime to change
5751 * the setting, generate one-shot interrupts based on the resource and/or
5752 * traffic conditions, other purposes.
5753 * See also: xge_hal_device_config_t{}.
5755 void xge_hal_device_sched_timer(xge_hal_device_h devh, int interval_us,
5759 xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
5760 xge_hal_pci_bar0_t *bar0 =
5761 (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
5762 unsigned int interval = hldev->config.pci_freq_mherz * interval_us;
5764 interval = __hal_fix_time_ival_herc(hldev, interval);
5766 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
5767 &bar0->scheduled_int_ctrl);
5769 val64 &= XGE_HAL_SCHED_INT_PERIOD_MASK;
5770 val64 |= XGE_HAL_SCHED_INT_PERIOD(interval);
5772 val64 |= XGE_HAL_SCHED_INT_CTRL_ONE_SHOT;
5774 val64 |= XGE_HAL_SCHED_INT_CTRL_TIMER_EN;
5776 val64 &= ~XGE_HAL_SCHED_INT_CTRL_TIMER_EN;
5779 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
5780 val64, &bar0->scheduled_int_ctrl);
5782 xge_debug_device(XGE_TRACE, "sched_timer 0x"XGE_OS_LLXFMT": %s",
5783 (unsigned long long)val64,
5784 interval ? "enabled" : "disabled");
5788 * xge_hal_device_check_id - Verify device ID.
5789 * @devh: HAL device handle.
5792 * Returns: one of the xge_hal_card_e{} enumerated types.
5793 * See also: xge_hal_card_e{}.
5796 xge_hal_device_check_id(xge_hal_device_h devh)
5798 xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
5799 switch (hldev->device_id) {
5800 case XGE_PCI_DEVICE_ID_XENA_1:
5801 case XGE_PCI_DEVICE_ID_XENA_2:
5802 return XGE_HAL_CARD_XENA;
5803 case XGE_PCI_DEVICE_ID_HERC_1:
5804 case XGE_PCI_DEVICE_ID_HERC_2:
5805 return XGE_HAL_CARD_HERC;
5806 case XGE_PCI_DEVICE_ID_TITAN_1:
5807 case XGE_PCI_DEVICE_ID_TITAN_2:
5808 return XGE_HAL_CARD_TITAN;
5810 return XGE_HAL_CARD_UNKNOWN;
5815 * xge_hal_device_pci_info_get - Get PCI bus informations such as width,
5816 * frequency, and mode from previously stored values.
5817 * @devh: HAL device handle.
5818 * @pci_mode: pointer to a variable of enumerated type
5819 * xge_hal_pci_mode_e{}.
5820 * @bus_frequency: pointer to a variable of enumerated type
5821 * xge_hal_pci_bus_frequency_e{}.
5822 * @bus_width: pointer to a variable of enumerated type
5823 * xge_hal_pci_bus_width_e{}.
5825 * Get pci mode, frequency, and PCI bus width.
5826 * Returns: one of the xge_hal_status_e{} enumerated types.
5827 * XGE_HAL_OK - for success.
5828 * XGE_HAL_ERR_INVALID_DEVICE - for invalid device handle.
5829 * See Also: xge_hal_pci_mode_e, xge_hal_pci_mode_e, xge_hal_pci_width_e.
5832 xge_hal_device_pci_info_get(xge_hal_device_h devh, xge_hal_pci_mode_e *pci_mode,
5833 xge_hal_pci_bus_frequency_e *bus_frequency,
5834 xge_hal_pci_bus_width_e *bus_width)
5836 xge_hal_status_e rc_status;
5837 xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
5839 if (!hldev || !hldev->is_initialized || hldev->magic != XGE_HAL_MAGIC) {
5840 rc_status = XGE_HAL_ERR_INVALID_DEVICE;
5841 xge_debug_device(XGE_ERR,
5842 "xge_hal_device_pci_info_get error, rc %d for device %p",
5848 *pci_mode = hldev->pci_mode;
5849 *bus_frequency = hldev->bus_frequency;
5850 *bus_width = hldev->bus_width;
5851 rc_status = XGE_HAL_OK;
5856 * xge_hal_reinitialize_hw
5857 * @hldev: private member of the device structure.
5859 * This function will soft reset the NIC and re-initalize all the
5860 * I/O registers to the values they had after it's inital initialization
5861 * through the probe function.
5863 int xge_hal_reinitialize_hw(xge_hal_device_t * hldev)
5865 (void) xge_hal_device_reset(hldev);
5866 if (__hal_device_hw_initialize(hldev) != XGE_HAL_OK) {
5867 xge_hal_device_terminate(hldev);
5868 (void) __hal_device_reset(hldev);
5876 * __hal_read_spdm_entry_line
5877 * @hldev: pointer to xge_hal_device_t structure
5878 * @spdm_line: spdm line in the spdm entry to be read.
5879 * @spdm_entry: spdm entry of the spdm_line in the SPDM table.
5880 * @spdm_line_val: Contains the value stored in the spdm line.
5882 * SPDM table contains upto a maximum of 256 spdm entries.
5883 * Each spdm entry contains 8 lines and each line stores 8 bytes.
5884 * This function reads the spdm line(addressed by @spdm_line)
5885 * of the spdm entry(addressed by @spdm_entry) in
5889 __hal_read_spdm_entry_line(xge_hal_device_t *hldev, u8 spdm_line,
5890 u16 spdm_entry, u64 *spdm_line_val)
5892 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
5895 val64 = XGE_HAL_RTS_RTH_SPDM_MEM_CTRL_STROBE |
5896 XGE_HAL_RTS_RTH_SPDM_MEM_CTRL_LINE_SEL(spdm_line) |
5897 XGE_HAL_RTS_RTH_SPDM_MEM_CTRL_OFFSET(spdm_entry);
5899 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
5900 &bar0->rts_rth_spdm_mem_ctrl);
5902 /* poll until done */
5903 if (__hal_device_register_poll(hldev,
5904 &bar0->rts_rth_spdm_mem_ctrl, 0,
5905 XGE_HAL_RTS_RTH_SPDM_MEM_CTRL_STROBE,
5906 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
5908 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
5911 *spdm_line_val = xge_os_pio_mem_read64(hldev->pdev,
5912 hldev->regh0, &bar0->rts_rth_spdm_mem_data);
5918 * __hal_get_free_spdm_entry
5919 * @hldev: pointer to xge_hal_device_t structure
5920 * @spdm_entry: Contains an index to the unused spdm entry in the SPDM table.
5922 * This function returns an index of unused spdm entry in the SPDM
5925 static xge_hal_status_e
5926 __hal_get_free_spdm_entry(xge_hal_device_t *hldev, u16 *spdm_entry)
5928 xge_hal_status_e status;
5929 u64 spdm_line_val=0;
5932 * Search in the local SPDM table for a free slot.
5935 for(; *spdm_entry < hldev->spdm_max_entries; (*spdm_entry)++) {
5936 if (hldev->spdm_table[*spdm_entry]->in_use) {
5941 if (*spdm_entry >= hldev->spdm_max_entries) {
5942 return XGE_HAL_ERR_SPDM_TABLE_FULL;
5946 * Make sure that the corresponding spdm entry in the SPDM
5948 * Seventh line of the spdm entry contains information about
5949 * whether the entry is free or not.
5951 if ((status = __hal_read_spdm_entry_line(hldev, 7, *spdm_entry,
5952 &spdm_line_val)) != XGE_HAL_OK) {
5956 /* BIT(63) in spdm_line 7 corresponds to entry_enable bit */
5957 if ((spdm_line_val & BIT(63))) {
5961 xge_debug_device(XGE_ERR, "Local SPDM table is not "
5962 "consistent with the actual one for the spdm "
5963 "entry %d", *spdm_entry);
5964 return XGE_HAL_ERR_SPDM_TABLE_DATA_INCONSISTENT;
5972 * __hal_calc_jhash - Calculate Jenkins hash.
5973 * @msg: Jenkins hash algorithm key.
5974 * @length: Length of the key.
5975 * @golden_ratio: Jenkins hash golden ratio.
5976 * @init_value: Jenkins hash initial value.
5978 * This function implements the Jenkins based algorithm used for the
5979 * calculation of the RTH hash.
5980 * Returns: Jenkins hash value.
5984 __hal_calc_jhash(u8 *msg, u32 length, u32 golden_ratio, u32 init_value)
5987 register u32 a,b,c,len;
5990 * Set up the internal state
5993 a = b = golden_ratio; /* the golden ratio; an arbitrary value */
5994 c = init_value; /* the previous hash value */
5996 /* handle most of the key */
5999 a += (msg[0] + ((u32)msg[1]<<8) + ((u32)msg[2]<<16)
6000 + ((u32)msg[3]<<24));
6001 b += (msg[4] + ((u32)msg[5]<<8) + ((u32)msg[6]<<16)
6002 + ((u32)msg[7]<<24));
6003 c += (msg[8] + ((u32)msg[9]<<8) + ((u32)msg[10]<<16)
6004 + ((u32)msg[11]<<24));
6006 msg += 12; len -= 12;
6009 /* handle the last 11 bytes */
6011 switch(len) /* all the case statements fall through */
6013 case 11: c+= ((u32)msg[10]<<24);
6015 case 10: c+= ((u32)msg[9]<<16);
6017 case 9 : c+= ((u32)msg[8]<<8);
6019 /* the first byte of c is reserved for the length */
6020 case 8 : b+= ((u32)msg[7]<<24);
6022 case 7 : b+= ((u32)msg[6]<<16);
6024 case 6 : b+= ((u32)msg[5]<<8);
6026 case 5 : b+= msg[4];
6028 case 4 : a+= ((u32)msg[3]<<24);
6030 case 3 : a+= ((u32)msg[2]<<16);
6032 case 2 : a+= ((u32)msg[1]<<8);
6034 case 1 : a+= msg[0];
6036 /* case 0: nothing left to add */
6041 /* report the result */
6047 * xge_hal_spdm_entry_add - Add a new entry to the SPDM table.
6048 * @devh: HAL device handle.
6049 * @src_ip: Source ip address(IPv4/IPv6).
6050 * @dst_ip: Destination ip address(IPv4/IPv6).
6051 * @l4_sp: L4 source port.
6052 * @l4_dp: L4 destination port.
6053 * @is_tcp: Set to 1, if the protocol is TCP.
6054 * 0, if the protocol is UDP.
6055 * @is_ipv4: Set to 1, if the protocol is IPv4.
6056 * 0, if the protocol is IPv6.
6057 * @tgt_queue: Target queue to route the receive packet.
6059 * This function add a new entry to the SPDM table.
6061 * Returns: XGE_HAL_OK - success.
6062 * XGE_HAL_ERR_SPDM_NOT_ENABLED - SPDM support is not enabled.
6063 * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to add a new entry with in
6064 * the time(timeout).
6065 * XGE_HAL_ERR_SPDM_TABLE_FULL - SPDM table is full.
6066 * XGE_HAL_ERR_SPDM_INVALID_ENTRY - Invalid SPDM entry.
6068 * See also: xge_hal_spdm_entry_remove{}.
6071 xge_hal_spdm_entry_add(xge_hal_device_h devh, xge_hal_ipaddr_t *src_ip,
6072 xge_hal_ipaddr_t *dst_ip, u16 l4_sp, u16 l4_dp,
6073 u8 is_tcp, u8 is_ipv4, u8 tgt_queue)
6076 xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
6077 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
6080 u32 jhash_golden_ratio;
6084 u8 msg[XGE_HAL_JHASH_MSG_LEN];
6086 xge_hal_status_e status;
6089 if (!hldev->config.rth_spdm_en) {
6090 return XGE_HAL_ERR_SPDM_NOT_ENABLED;
6093 if ((tgt_queue < XGE_HAL_MIN_RING_NUM) ||
6094 (tgt_queue > XGE_HAL_MAX_RING_NUM)) {
6095 return XGE_HAL_ERR_SPDM_INVALID_ENTRY;
6100 * Calculate the jenkins hash.
6103 * Create the Jenkins hash algorithm key.
6104 * key = {L3SA, L3DA, L4SP, L4DP}, if SPDM is configured to
6105 * use L4 information. Otherwize key = {L3SA, L3DA}.
6109 ipaddr_len = 4; // In bytes
6115 * Jenkins hash algorithm expects the key in the big endian
6116 * format. Since key is the byte array, memcpy won't work in the
6117 * case of little endian. So, the current code extracts each
6118 * byte starting from MSB and store it in the key.
6121 for (off = 0; off < ipaddr_len; off++) {
6122 u32 mask = vBIT32(0xff,(off*8),8);
6123 int shift = 32-(off+1)*8;
6124 msg[off] = (u8)((src_ip->ipv4.addr & mask) >> shift);
6125 msg[off+ipaddr_len] =
6126 (u8)((dst_ip->ipv4.addr & mask) >> shift);
6129 for (off = 0; off < ipaddr_len; off++) {
6131 u64 mask = vBIT(0xff,(loc*8),8);
6132 int shift = 64-(loc+1)*8;
6134 msg[off] = (u8)((src_ip->ipv6.addr[off/8] & mask)
6136 msg[off+ipaddr_len] = (u8)((dst_ip->ipv6.addr[off/8]
6141 off = (2*ipaddr_len);
6143 if (hldev->config.rth_spdm_use_l4) {
6144 msg[off] = (u8)((l4_sp & 0xff00) >> 8);
6145 msg[off + 1] = (u8)(l4_sp & 0xff);
6146 msg[off + 2] = (u8)((l4_dp & 0xff00) >> 8);
6147 msg[off + 3] = (u8)(l4_dp & 0xff);
6152 * Calculate jenkins hash for this configuration
6154 val64 = xge_os_pio_mem_read64(hldev->pdev,
6156 &bar0->rts_rth_jhash_cfg);
6157 jhash_golden_ratio = (u32)(val64 >> 32);
6158 jhash_init_val = (u32)(val64 & 0xffffffff);
6160 jhash_value = __hal_calc_jhash(msg, off,
6164 xge_os_spin_lock(&hldev->spdm_lock);
6167 * Locate a free slot in the SPDM table. To avoid a seach in the
6168 * actual SPDM table, which is very expensive in terms of time,
6169 * we are maintaining a local copy of the table and the search for
6170 * the free entry is performed in the local table.
6172 if ((status = __hal_get_free_spdm_entry(hldev,&spdm_entry))
6174 xge_os_spin_unlock(&hldev->spdm_lock);
6179 * Add this entry to the SPDM table
6181 status = __hal_spdm_entry_add(hldev, src_ip, dst_ip, l4_sp, l4_dp,
6182 is_tcp, is_ipv4, tgt_queue,
6183 jhash_value, /* calculated jhash */
6186 xge_os_spin_unlock(&hldev->spdm_lock);
6192 * xge_hal_spdm_entry_remove - Remove an entry from the SPDM table.
6193 * @devh: HAL device handle.
6194 * @src_ip: Source ip address(IPv4/IPv6).
6195 * @dst_ip: Destination ip address(IPv4/IPv6).
6196 * @l4_sp: L4 source port.
6197 * @l4_dp: L4 destination port.
6198 * @is_tcp: Set to 1, if the protocol is TCP.
6199 * 0, if the protocol os UDP.
6200 * @is_ipv4: Set to 1, if the protocol is IPv4.
6201 * 0, if the protocol is IPv6.
6203 * This function remove an entry from the SPDM table.
6205 * Returns: XGE_HAL_OK - success.
6206 * XGE_HAL_ERR_SPDM_NOT_ENABLED - SPDM support is not enabled.
6207 * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to remove an entry with in
6208 * the time(timeout).
6209 * XGE_HAL_ERR_SPDM_ENTRY_NOT_FOUND - Unable to locate the entry in the SPDM
6212 * See also: xge_hal_spdm_entry_add{}.
6215 xge_hal_spdm_entry_remove(xge_hal_device_h devh, xge_hal_ipaddr_t *src_ip,
6216 xge_hal_ipaddr_t *dst_ip, u16 l4_sp, u16 l4_dp,
6217 u8 is_tcp, u8 is_ipv4)
6220 xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
6221 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
6224 xge_hal_status_e status;
6225 u64 spdm_line_arr[8];
6232 if (!hldev->config.rth_spdm_en) {
6233 return XGE_HAL_ERR_SPDM_NOT_ENABLED;
6236 xge_os_spin_lock(&hldev->spdm_lock);
6239 * Poll the rxpic_int_reg register until spdm ready bit is set or
6242 if (__hal_device_register_poll(hldev, &bar0->rxpic_int_reg, 1,
6243 XGE_HAL_RX_PIC_INT_REG_SPDM_READY,
6244 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
6246 /* upper layer may require to repeat */
6247 xge_os_spin_unlock(&hldev->spdm_lock);
6248 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
6252 * Clear the SPDM READY bit.
6254 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
6255 &bar0->rxpic_int_reg);
6256 val64 &= ~XGE_HAL_RX_PIC_INT_REG_SPDM_READY;
6257 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
6258 &bar0->rxpic_int_reg);
6261 * Search in the local SPDM table to get the index of the
6262 * corresponding entry in the SPDM table.
6265 for (;spdm_entry < hldev->spdm_max_entries; spdm_entry++) {
6266 if ((!hldev->spdm_table[spdm_entry]->in_use) ||
6267 (hldev->spdm_table[spdm_entry]->is_tcp != is_tcp) ||
6268 (hldev->spdm_table[spdm_entry]->l4_sp != l4_sp) ||
6269 (hldev->spdm_table[spdm_entry]->l4_dp != l4_dp) ||
6270 (hldev->spdm_table[spdm_entry]->is_ipv4 != is_ipv4)) {
6275 * Compare the src/dst IP addresses of source and target
6278 if ((hldev->spdm_table[spdm_entry]->src_ip.ipv4.addr
6279 != src_ip->ipv4.addr) ||
6280 (hldev->spdm_table[spdm_entry]->dst_ip.ipv4.addr
6281 != dst_ip->ipv4.addr)) {
6285 if ((hldev->spdm_table[spdm_entry]->src_ip.ipv6.addr[0]
6286 != src_ip->ipv6.addr[0]) ||
6287 (hldev->spdm_table[spdm_entry]->src_ip.ipv6.addr[1]
6288 != src_ip->ipv6.addr[1]) ||
6289 (hldev->spdm_table[spdm_entry]->dst_ip.ipv6.addr[0]
6290 != dst_ip->ipv6.addr[0]) ||
6291 (hldev->spdm_table[spdm_entry]->dst_ip.ipv6.addr[1]
6292 != dst_ip->ipv6.addr[1])) {
6299 if (spdm_entry >= hldev->spdm_max_entries) {
6300 xge_os_spin_unlock(&hldev->spdm_lock);
6301 return XGE_HAL_ERR_SPDM_ENTRY_NOT_FOUND;
6305 * Retrieve the corresponding entry from the SPDM table and
6306 * make sure that the data is consistent.
6308 for(line_no = 0; line_no < 8; line_no++) {
6311 * SPDM line 2,3,4 are valid only for IPv6 entry.
6312 * SPDM line 5 & 6 are reserved. We don't have to
6313 * read these entries in the above cases.
6316 ((line_no == 2)||(line_no == 3)||(line_no == 4))) ||
6322 if ((status = __hal_read_spdm_entry_line(
6326 &spdm_line_arr[line_no]))
6328 xge_os_spin_unlock(&hldev->spdm_lock);
6334 * Seventh line of the spdm entry contains the entry_enable
6335 * bit. Make sure that the entry_enable bit of this spdm entry
6337 * To remove an entry from the SPDM table, reset this
6340 if (!(spdm_line_arr[7] & BIT(63))) {
6344 xge_debug_device(XGE_ERR, "Local SPDM table is not "
6345 "consistent with the actual one for the spdm "
6346 "entry %d ", spdm_entry);
6351 * Retreive the L4 SP/DP, src/dst ip addresses from the SPDM
6352 * table and do a comparision.
6354 spdm_is_tcp = (u8)((spdm_line_arr[0] & BIT(59)) >> 4);
6355 spdm_is_ipv4 = (u8)(spdm_line_arr[0] & BIT(63));
6356 spdm_l4_sp = (u16)(spdm_line_arr[0] >> 48);
6357 spdm_l4_dp = (u16)((spdm_line_arr[0] >> 32) & 0xffff);
6360 if ((spdm_is_tcp != is_tcp) ||
6361 (spdm_is_ipv4 != is_ipv4) ||
6362 (spdm_l4_sp != l4_sp) ||
6363 (spdm_l4_dp != l4_dp)) {
6367 xge_debug_device(XGE_ERR, "Local SPDM table is not "
6368 "consistent with the actual one for the spdm "
6369 "entry %d ", spdm_entry);
6374 /* Upper 32 bits of spdm_line(64 bit) contains the
6375 * src IPv4 address. Lower 32 bits of spdm_line
6376 * contains the destination IPv4 address.
6378 u32 temp_src_ip = (u32)(spdm_line_arr[1] >> 32);
6379 u32 temp_dst_ip = (u32)(spdm_line_arr[1] & 0xffffffff);
6381 if ((temp_src_ip != src_ip->ipv4.addr) ||
6382 (temp_dst_ip != dst_ip->ipv4.addr)) {
6383 xge_debug_device(XGE_ERR, "Local SPDM table is not "
6384 "consistent with the actual one for the spdm "
6385 "entry %d ", spdm_entry);
6391 * SPDM line 1 & 2 contains the src IPv6 address.
6392 * SPDM line 3 & 4 contains the dst IPv6 address.
6394 if ((spdm_line_arr[1] != src_ip->ipv6.addr[0]) ||
6395 (spdm_line_arr[2] != src_ip->ipv6.addr[1]) ||
6396 (spdm_line_arr[3] != dst_ip->ipv6.addr[0]) ||
6397 (spdm_line_arr[4] != dst_ip->ipv6.addr[1])) {
6402 xge_debug_device(XGE_ERR, "Local SPDM table is not "
6403 "consistent with the actual one for the spdm "
6404 "entry %d ", spdm_entry);
6410 * Reset the entry_enable bit to zero
6412 spdm_line_arr[7] &= ~BIT(63);
6414 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
6416 (void *)((char *)hldev->spdm_mem_base +
6417 (spdm_entry * 64) + (7 * 8)));
6420 * Wait for the operation to be completed.
6422 if (__hal_device_register_poll(hldev,
6423 &bar0->rxpic_int_reg, 1,
6424 XGE_HAL_RX_PIC_INT_REG_SPDM_READY,
6425 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
6426 xge_os_spin_unlock(&hldev->spdm_lock);
6427 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
6431 * Make the corresponding spdm entry in the local SPDM table
6432 * available for future use.
6434 hldev->spdm_table[spdm_entry]->in_use = 0;
6435 xge_os_spin_unlock(&hldev->spdm_lock);
6440 xge_os_spin_unlock(&hldev->spdm_lock);
6441 return XGE_HAL_ERR_SPDM_TABLE_DATA_INCONSISTENT;
6445 * __hal_device_rti_set
6446 * @ring: The post_qid of the ring.
6447 * @channel: HAL channel of the ring.
6449 * This function stores the RTI value associated for the MSI and
6450 * also unmasks this particular RTI in the rti_mask register.
6452 static void __hal_device_rti_set(int ring_qid, xge_hal_channel_t *channel)
6454 xge_hal_device_t *hldev = (xge_hal_device_t*)channel->devh;
6455 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0;
6458 if (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSI ||
6459 hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX)
6460 channel->rti = (u8)ring_qid;
6462 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
6463 &bar0->rx_traffic_mask);
6464 val64 &= ~BIT(ring_qid);
6465 xge_os_pio_mem_write64(hldev->pdev,
6466 hldev->regh0, val64,
6467 &bar0->rx_traffic_mask);
6471 * __hal_device_tti_set
6472 * @ring: The post_qid of the FIFO.
6473 * @channel: HAL channel the FIFO.
6475 * This function stores the TTI value associated for the MSI and
6476 * also unmasks this particular TTI in the tti_mask register.
6478 static void __hal_device_tti_set(int fifo_qid, xge_hal_channel_t *channel)
6480 xge_hal_device_t *hldev = (xge_hal_device_t*)channel->devh;
6481 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0;
6484 if (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSI ||
6485 hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX)
6486 channel->tti = (u8)fifo_qid;
6488 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
6489 &bar0->tx_traffic_mask);
6490 val64 &= ~BIT(fifo_qid);
6491 xge_os_pio_mem_write64(hldev->pdev,
6492 hldev->regh0, val64,
6493 &bar0->tx_traffic_mask);
6497 * xge_hal_channel_msi_set - Associate a RTI with a ring or TTI with a
6498 * FIFO for a given MSI.
6499 * @channelh: HAL channel handle.
6500 * @msi: MSI Number associated with the channel.
6501 * @msi_msg: The MSI message associated with the MSI number above.
6503 * This API will associate a given channel (either Ring or FIFO) with the
6504 * given MSI number. It will alo program the Tx_Mat/Rx_Mat tables in the
6505 * hardware to indicate this association to the hardware.
6508 xge_hal_channel_msi_set(xge_hal_channel_h channelh, int msi, u32 msi_msg)
6510 xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
6511 xge_hal_device_t *hldev = (xge_hal_device_t*)channel->devh;
6512 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0;
6515 channel->msi_msg = msi_msg;
6516 if (channel->type == XGE_HAL_CHANNEL_TYPE_RING) {
6517 int ring = channel->post_qid;
6518 xge_debug_osdep(XGE_TRACE, "MSI Data: 0x%4x, Ring: %d,"
6519 " MSI: %d", channel->msi_msg, ring, msi);
6520 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
6522 val64 |= XGE_HAL_SET_RX_MAT(ring, msi);
6523 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
6525 __hal_device_rti_set(ring, channel);
6527 int fifo = channel->post_qid;
6528 xge_debug_osdep(XGE_TRACE, "MSI Data: 0x%4x, Fifo: %d,"
6529 " MSI: %d", channel->msi_msg, fifo, msi);
6530 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
6532 val64 |= XGE_HAL_SET_TX_MAT(fifo, msi);
6533 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
6535 __hal_device_tti_set(fifo, channel);
6542 * xge_hal_mask_msix - Begin IRQ processing.
6543 * @hldev: HAL device handle.
6546 * The function masks the msix interrupt for the given msi_id
6551 * Otherwise, XGE_HAL_ERR_WRONG_IRQ if the msix index is out of range
6556 xge_hal_mask_msix(xge_hal_device_h devh, int msi_id)
6558 xge_hal_status_e status = XGE_HAL_OK;
6559 xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
6560 u32 *bar2 = (u32 *)hldev->bar2;
6563 xge_assert(msi_id < XGE_HAL_MAX_MSIX_MESSAGES);
6565 val32 = xge_os_pio_mem_read32(hldev->pdev, hldev->regh2, &bar2[msi_id*4+3]);
6567 xge_os_pio_mem_write32(hldev->pdev, hldev->regh2, val32, &bar2[msi_id*4+3]);
6572 * xge_hal_mask_msix - Begin IRQ processing.
6573 * @hldev: HAL device handle.
6576 * The function masks the msix interrupt for the given msi_id
6581 * Otherwise, XGE_HAL_ERR_WRONG_IRQ if the msix index is out of range
6586 xge_hal_unmask_msix(xge_hal_device_h devh, int msi_id)
6588 xge_hal_status_e status = XGE_HAL_OK;
6589 xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
6590 u32 *bar2 = (u32 *)hldev->bar2;
6593 xge_assert(msi_id < XGE_HAL_MAX_MSIX_MESSAGES);
6595 val32 = xge_os_pio_mem_read32(hldev->pdev, hldev->regh2, &bar2[msi_id*4+3]);
6597 xge_os_pio_mem_write32(hldev->pdev, hldev->regh2, val32, &bar2[msi_id*4+3]);
6602 * __hal_set_msix_vals
6603 * @devh: HAL device handle.
6604 * @msix_value: 32bit MSI-X value transferred across PCI to @msix_address.
6605 * Filled in by this function.
6606 * @msix_address: 32bit MSI-X DMA address.
6607 * Filled in by this function.
6608 * @msix_idx: index that corresponds to the (@msix_value, @msix_address)
6609 * entry in the table of MSI-X (value, address) pairs.
6611 * This function will program the hardware associating the given
6612 * address/value cobination to the specified msi number.
6614 static void __hal_set_msix_vals (xge_hal_device_h devh,
6621 xge_hal_device_t *hldev = (xge_hal_device_t*)devh;
6622 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0;
6625 val64 = XGE_HAL_XMSI_NO(msix_idx) | XGE_HAL_XMSI_STROBE;
6626 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0,
6627 (u32)(val64 >> 32), &bar0->xmsi_access);
6628 __hal_pio_mem_write32_lower(hldev->pdev, hldev->regh0,
6629 (u32)(val64), &bar0->xmsi_access);
6631 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
6632 &bar0->xmsi_access);
6633 if (val64 & XGE_HAL_XMSI_STROBE)
6638 *msix_value = (u32)(xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
6640 *msix_addr = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
6641 &bar0->xmsi_address);
6645 * xge_hal_channel_msix_set - Associate MSI-X with a channel.
6646 * @channelh: HAL channel handle.
6647 * @msix_idx: index that corresponds to a particular (@msix_value,
6648 * @msix_address) entry in the MSI-X table.
6650 * This API associates a given channel (either Ring or FIFO) with the
6651 * given MSI-X number. It programs the Xframe's Tx_Mat/Rx_Mat tables
6652 * to indicate this association.
6655 xge_hal_channel_msix_set(xge_hal_channel_h channelh, int msix_idx)
6657 xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
6658 xge_hal_device_t *hldev = (xge_hal_device_t*)channel->devh;
6659 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0;
6662 if (channel->type == XGE_HAL_CHANNEL_TYPE_RING) {
6663 /* Currently Ring and RTI is one on one. */
6664 int ring = channel->post_qid;
6665 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
6667 val64 |= XGE_HAL_SET_RX_MAT(ring, msix_idx);
6668 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
6670 __hal_device_rti_set(ring, channel);
6671 hldev->config.fifo.queue[channel->post_qid].intr_vector =
6673 } else if (channel->type == XGE_HAL_CHANNEL_TYPE_FIFO) {
6674 int fifo = channel->post_qid;
6675 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
6677 val64 |= XGE_HAL_SET_TX_MAT(fifo, msix_idx);
6678 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
6680 __hal_device_tti_set(fifo, channel);
6681 hldev->config.ring.queue[channel->post_qid].intr_vector =
6684 channel->msix_idx = msix_idx;
6685 __hal_set_msix_vals(hldev, &channel->msix_data,
6686 &channel->msix_address,
6692 #if defined(XGE_HAL_CONFIG_LRO)
6694 * xge_hal_lro_terminate - Terminate lro resources.
6695 * @lro_scale: Amount of lro memory.
6696 * @hldev: Hal device structure.
6700 xge_hal_lro_terminate(u32 lro_scale,
6701 xge_hal_device_t *hldev)
6706 * xge_hal_lro_init - Initiate lro resources.
6707 * @lro_scale: Amount of lro memory.
6708 * @hldev: Hal device structure.
6709 * Note: For time being I am using only one LRO per device. Later on size
6710 * will be increased.
6714 xge_hal_lro_init(u32 lro_scale,
6715 xge_hal_device_t *hldev)
6719 if (hldev->config.lro_sg_size == XGE_HAL_DEFAULT_USE_HARDCODE)
6720 hldev->config.lro_sg_size = XGE_HAL_LRO_DEFAULT_SG_SIZE;
6722 if (hldev->config.lro_frm_len == XGE_HAL_DEFAULT_USE_HARDCODE)
6723 hldev->config.lro_frm_len = XGE_HAL_LRO_DEFAULT_FRM_LEN;
6725 for (i=0; i < XGE_HAL_MAX_RING_NUM; i++)
6727 xge_os_memzero(hldev->lro_desc[i].lro_pool,
6728 sizeof(lro_t) * XGE_HAL_LRO_MAX_BUCKETS);
6730 hldev->lro_desc[i].lro_next_idx = 0;
6731 hldev->lro_desc[i].lro_recent = NULL;
6740 * xge_hal_device_poll - HAL device "polling" entry point.
6741 * @devh: HAL device.
6743 * HAL "polling" entry point. Note that this is part of HAL public API.
6744 * Upper-Layer driver _must_ periodically poll HAL via
6745 * xge_hal_device_poll().
6747 * HAL uses caller's execution context to serially process accumulated
6748 * slow-path events, such as link state changes and hardware error
6751 * The rate of polling could be somewhere between 500us to 10ms,
6752 * depending on requirements (e.g., the requirement to support fail-over
6753 * could mean that 500us or even 100us polling interval need to be used).
6755 * The need and motivation for external polling includes
6757 * - remove the error-checking "burden" from the HAL interrupt handler
6758 * (see xge_hal_device_handle_irq());
6760 * - remove the potential source of portability issues by _not_
6761 * implementing separate polling thread within HAL itself.
6763 * See also: xge_hal_event_e{}, xge_hal_driver_config_t{}.
6764 * Usage: See ex_slow_path{}.
6767 xge_hal_device_poll(xge_hal_device_h devh)
6769 unsigned char item_buf[sizeof(xge_queue_item_t) +
6770 XGE_DEFAULT_EVENT_MAX_DATA_SIZE];
6771 xge_queue_item_t *item = (xge_queue_item_t *)(void *)item_buf;
6772 xge_queue_status_e qstatus;
6773 xge_hal_status_e hstatus;
6775 int queue_has_critical_event = 0;
6776 xge_hal_device_t *hldev = (xge_hal_device_t*)devh;
6778 xge_os_memzero(item_buf, (sizeof(xge_queue_item_t) +
6779 XGE_DEFAULT_EVENT_MAX_DATA_SIZE));
6782 if (!hldev->is_initialized ||
6783 hldev->terminating ||
6784 hldev->magic != XGE_HAL_MAGIC)
6787 if(hldev->stats.sw_dev_err_stats.xpak_counter.tick_period < 72000)
6792 hldev->stats.sw_dev_err_stats.xpak_counter.tick_period++;
6795 * Logging Error messages in the excess temperature,
6796 * Bias current, laser output for three cycle
6798 __hal_updt_stats_xpak(hldev);
6799 hldev->stats.sw_dev_err_stats.xpak_counter.tick_period = 0;
6802 if (!queue_has_critical_event)
6803 queue_has_critical_event =
6804 __queue_get_reset_critical(hldev->queueh);
6807 while (i++ < XGE_HAL_DRIVER_QUEUE_CONSUME_MAX || queue_has_critical_event) {
6809 qstatus = xge_queue_consume(hldev->queueh,
6810 XGE_DEFAULT_EVENT_MAX_DATA_SIZE,
6812 if (qstatus == XGE_QUEUE_IS_EMPTY)
6815 xge_debug_queue(XGE_TRACE,
6816 "queueh 0x"XGE_OS_LLXFMT" consumed event: %d ctxt 0x"
6817 XGE_OS_LLXFMT, (u64)(ulong_t)hldev->queueh, item->event_type,
6818 (u64)(ulong_t)item->context);
6820 if (!hldev->is_initialized ||
6821 hldev->magic != XGE_HAL_MAGIC) {
6826 switch (item->event_type) {
6827 case XGE_HAL_EVENT_LINK_IS_UP: {
6828 if (!queue_has_critical_event &&
6829 g_xge_hal_driver->uld_callbacks.link_up) {
6830 g_xge_hal_driver->uld_callbacks.link_up(
6831 hldev->upper_layer_info);
6832 hldev->link_state = XGE_HAL_LINK_UP;
6835 case XGE_HAL_EVENT_LINK_IS_DOWN: {
6836 if (!queue_has_critical_event &&
6837 g_xge_hal_driver->uld_callbacks.link_down) {
6838 g_xge_hal_driver->uld_callbacks.link_down(
6839 hldev->upper_layer_info);
6840 hldev->link_state = XGE_HAL_LINK_DOWN;
6843 case XGE_HAL_EVENT_SERR:
6844 case XGE_HAL_EVENT_ECCERR:
6845 case XGE_HAL_EVENT_PARITYERR:
6846 case XGE_HAL_EVENT_TARGETABORT:
6847 case XGE_HAL_EVENT_SLOT_FREEZE: {
6848 void *item_data = xge_queue_item_data(item);
6849 xge_hal_event_e event_type = item->event_type;
6850 u64 val64 = *((u64*)item_data);
6852 if (event_type != XGE_HAL_EVENT_SLOT_FREEZE)
6853 if (xge_hal_device_is_slot_freeze(hldev))
6854 event_type = XGE_HAL_EVENT_SLOT_FREEZE;
6855 if (g_xge_hal_driver->uld_callbacks.crit_err) {
6856 g_xge_hal_driver->uld_callbacks.crit_err(
6857 hldev->upper_layer_info,
6860 /* handle one critical event per poll cycle */
6866 xge_debug_queue(XGE_TRACE,
6867 "got non-HAL event %d",
6872 /* broadcast this event */
6873 if (g_xge_hal_driver->uld_callbacks.event)
6874 g_xge_hal_driver->uld_callbacks.event(item);
6877 if (g_xge_hal_driver->uld_callbacks.before_device_poll) {
6878 if (g_xge_hal_driver->uld_callbacks.before_device_poll(
6885 hstatus = __hal_device_poll(hldev);
6886 if (g_xge_hal_driver->uld_callbacks.after_device_poll)
6887 g_xge_hal_driver->uld_callbacks.after_device_poll(hldev);
6890 * handle critical error right away:
6891 * - walk the device queue again
6892 * - drop non-critical events, if any
6893 * - look for the 1st critical
6895 if (hstatus == XGE_HAL_ERR_CRITICAL) {
6896 queue_has_critical_event = 1;
6904 * xge_hal_rts_rth_init - Set enhanced mode for RTS hashing.
6905 * @hldev: HAL device handle.
6907 * This function is used to set the adapter to enhanced mode.
6909 * See also: xge_hal_rts_rth_clr(), xge_hal_rts_rth_set().
6912 xge_hal_rts_rth_init(xge_hal_device_t *hldev)
6914 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
6918 * Set the receive traffic steering mode from default(classic)
6921 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
6923 val64 |= XGE_HAL_RTS_CTRL_ENHANCED_MODE;
6924 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
6925 val64, &bar0->rts_ctrl);
6929 * xge_hal_rts_rth_clr - Clear RTS hashing.
6930 * @hldev: HAL device handle.
6932 * This function is used to clear all RTS hashing related stuff.
6933 * It brings the adapter out from enhanced mode to classic mode.
6934 * It also clears RTS_RTH_CFG register i.e clears hash type, function etc.
6936 * See also: xge_hal_rts_rth_set(), xge_hal_rts_rth_itable_set().
6939 xge_hal_rts_rth_clr(xge_hal_device_t *hldev)
6941 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
6945 * Set the receive traffic steering mode from default(classic)
6948 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
6950 val64 &= ~XGE_HAL_RTS_CTRL_ENHANCED_MODE;
6951 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
6952 val64, &bar0->rts_ctrl);
6954 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
6955 &bar0->rts_rth_cfg);
6959 * xge_hal_rts_rth_set - Set/configure RTS hashing.
6960 * @hldev: HAL device handle.
6961 * @def_q: default queue
6962 * @hash_type: hash type i.e TcpIpV4, TcpIpV6 etc.
6963 * @bucket_size: no of least significant bits to be used for hashing.
6965 * Used to set/configure all RTS hashing related stuff.
6966 * - set the steering mode to enhanced.
6967 * - set hash function i.e algo selection.
6968 * - set the default queue.
6970 * See also: xge_hal_rts_rth_clr(), xge_hal_rts_rth_itable_set().
6973 xge_hal_rts_rth_set(xge_hal_device_t *hldev, u8 def_q, u64 hash_type,
6976 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
6979 val64 = XGE_HAL_RTS_DEFAULT_Q(def_q);
6980 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
6981 &bar0->rts_default_q);
6984 val64 |= XGE_HAL_RTS_RTH_EN;
6985 val64 |= XGE_HAL_RTS_RTH_BUCKET_SIZE(bucket_size);
6986 val64 |= XGE_HAL_RTS_RTH_ALG_SEL_MS;
6987 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
6988 &bar0->rts_rth_cfg);
6992 * xge_hal_rts_rth_start - Start RTS hashing.
6993 * @hldev: HAL device handle.
6995 * Used to Start RTS hashing .
6997 * See also: xge_hal_rts_rth_clr(), xge_hal_rts_rth_itable_set(), xge_hal_rts_rth_start.
7000 xge_hal_rts_rth_start(xge_hal_device_t *hldev)
7002 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
7006 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
7007 &bar0->rts_rth_cfg);
7008 val64 |= XGE_HAL_RTS_RTH_EN;
7009 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
7010 &bar0->rts_rth_cfg);
7014 * xge_hal_rts_rth_stop - Stop the RTS hashing.
7015 * @hldev: HAL device handle.
7017 * Used to Staop RTS hashing .
7019 * See also: xge_hal_rts_rth_clr(), xge_hal_rts_rth_itable_set(), xge_hal_rts_rth_start.
7022 xge_hal_rts_rth_stop(xge_hal_device_t *hldev)
7024 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
7027 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
7028 &bar0->rts_rth_cfg);
7029 val64 &= ~XGE_HAL_RTS_RTH_EN;
7030 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
7031 &bar0->rts_rth_cfg);
7035 * xge_hal_rts_rth_itable_set - Set/configure indirection table (IT).
7036 * @hldev: HAL device handle.
7037 * @itable: Pointer to the indirection table
7038 * @itable_size: no of least significant bits to be used for hashing
7040 * Used to set/configure indirection table.
7041 * It enables the required no of entries in the IT.
7042 * It adds entries to the IT.
7044 * See also: xge_hal_rts_rth_clr(), xge_hal_rts_rth_set().
7047 xge_hal_rts_rth_itable_set(xge_hal_device_t *hldev, u8 *itable, u32 itable_size)
7049 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
7053 for (idx = 0; idx < itable_size; idx++) {
7054 val64 = XGE_HAL_RTS_RTH_MAP_MEM_DATA_ENTRY_EN |
7055 XGE_HAL_RTS_RTH_MAP_MEM_DATA(itable[idx]);
7057 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
7058 &bar0->rts_rth_map_mem_data);
7061 val64 = (XGE_HAL_RTS_RTH_MAP_MEM_CTRL_WE |
7062 XGE_HAL_RTS_RTH_MAP_MEM_CTRL_STROBE |
7063 XGE_HAL_RTS_RTH_MAP_MEM_CTRL_OFFSET(idx));
7064 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
7065 &bar0->rts_rth_map_mem_ctrl);
7067 /* poll until done */
7068 if (__hal_device_register_poll(hldev,
7069 &bar0->rts_rth_map_mem_ctrl, 0,
7070 XGE_HAL_RTS_RTH_MAP_MEM_CTRL_STROBE,
7071 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
7072 /* upper layer may require to repeat */
7073 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
7082 * xge_hal_device_rts_rth_key_set - Configure 40byte secret for hash calc.
7084 * @hldev: HAL device handle.
7085 * @KeySize: Number of 64-bit words
7086 * @Key: upto 40-byte array of 8-bit values
7087 * This function configures the 40-byte secret which is used for hash
7090 * See also: xge_hal_rts_rth_clr(), xge_hal_rts_rth_set().
7093 xge_hal_device_rts_rth_key_set(xge_hal_device_t *hldev, u8 KeySize, u8 *Key)
7095 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *) hldev->bar0;
7104 for ( i = 0; i < 8 ; i++) {
7105 /* Prepare 64-bit word for 'nreg' containing 8 keys. */
7108 val64 |= Key[entry++];
7113 /* temp64 = XGE_HAL_RTH_HASH_MASK_n(val64, (n<<3), (n<<3)+7);*/
7114 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
7115 &bar0->rts_rth_hash_mask[nreg++]);
7119 /* Clear the rest if key is less than 40 bytes */
7121 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
7122 &bar0->rts_rth_hash_mask[nreg++]);
7128 * xge_hal_device_is_closed - Device is closed
7130 * @devh: HAL device handle.
7133 xge_hal_device_is_closed(xge_hal_device_h devh)
7135 xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
7137 if (xge_list_is_empty(&hldev->fifo_channels) &&
7138 xge_list_is_empty(&hldev->ring_channels))
7145 xge_hal_device_rts_section_enable(xge_hal_device_h devh, int index)
7149 int max_addr = XGE_HAL_MAX_MAC_ADDRESSES;
7151 xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
7152 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
7154 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)
7155 max_addr = XGE_HAL_MAX_MAC_ADDRESSES_HERC;
7157 if ( index >= max_addr )
7158 return XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES;
7161 * Calculate the section value
7163 section = index / 32;
7165 xge_debug_device(XGE_TRACE, "the Section value is %d ", section);
7167 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
7168 &bar0->rts_mac_cfg);
7172 val64 |= XGE_HAL_RTS_MAC_SECT0_EN;
7175 val64 |= XGE_HAL_RTS_MAC_SECT1_EN;
7178 val64 |= XGE_HAL_RTS_MAC_SECT2_EN;
7181 val64 |= XGE_HAL_RTS_MAC_SECT3_EN;
7184 val64 |= XGE_HAL_RTS_MAC_SECT4_EN;
7187 val64 |= XGE_HAL_RTS_MAC_SECT5_EN;
7190 val64 |= XGE_HAL_RTS_MAC_SECT6_EN;
7193 val64 |= XGE_HAL_RTS_MAC_SECT7_EN;
7196 xge_debug_device(XGE_ERR, "Invalid Section value %d "
7200 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
7201 val64, &bar0->rts_mac_cfg);
7207 * xge_hal_fix_rldram_ecc_error
7208 * @hldev: private member of the device structure.
7210 * SXE-02-010. This function will turn OFF the ECC error reporting for the
7211 * interface bet'n external Micron RLDRAM II device and memory controller.
7212 * The error would have been reported in RLD_ECC_DB_ERR_L and RLD_ECC_DB_ERR_U
7213 * fields of MC_ERR_REG register. Issue reported by HP-Unix folks during the
7214 * qualification of Herc.
7217 xge_hal_fix_rldram_ecc_error(xge_hal_device_t * hldev)
7219 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0;
7223 val64 = XGE_HAL_MC_RLDRAM_TEST_MODE;
7224 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
7225 &bar0->mc_rldram_test_ctrl);
7227 // Enable fg/bg tests.
7228 val64 = 0x0100000000000000ULL;
7229 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
7232 // Enable RLDRAM configuration.
7233 val64 = 0x0000000000017B00ULL;
7234 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
7235 &bar0->mc_rldram_mrs);
7237 // Enable RLDRAM queues.
7238 val64 = 0x0000000001017B00ULL;
7239 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
7240 &bar0->mc_rldram_mrs);
7242 // Setup test ranges
7243 val64 = 0x00000000001E0100ULL;
7244 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
7245 &bar0->mc_rldram_test_add);
7247 val64 = 0x00000100001F0100ULL;
7248 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
7249 &bar0->mc_rldram_test_add_bkg);
7251 val64 = 0x0001000000010000ULL;
7252 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
7253 &bar0->mc_rldram_test_ctrl);
7255 if (__hal_device_register_poll(hldev, &bar0->mc_rldram_test_ctrl, 1,
7256 XGE_HAL_MC_RLDRAM_TEST_DONE,
7257 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK){
7258 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
7262 val64 = 0x0000000000000000ULL;
7263 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
7264 &bar0->mc_rldram_test_ctrl);