2 * Copyright (c) 2002-2007 Neterion, Inc.
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
29 #include <dev/nxge/include/xgehal-device.h>
30 #include <dev/nxge/include/xgehal-channel.h>
31 #include <dev/nxge/include/xgehal-fifo.h>
32 #include <dev/nxge/include/xgehal-ring.h>
33 #include <dev/nxge/include/xgehal-driver.h>
34 #include <dev/nxge/include/xgehal-mgmt.h>
36 #define SWITCH_SIGN 0xA5A5A5A5A5A5A5A5ULL
39 #ifdef XGE_HAL_HERC_EMULATION
40 #undef XGE_HAL_PROCESS_LINK_INT_IN_ISR
44 * Jenkins hash key length(in bytes)
46 #define XGE_HAL_JHASH_MSG_LEN 50
49 * mix(a,b,c) used in Jenkins hash algorithm
51 #define mix(a,b,c) { \
52 a -= b; a -= c; a ^= (c>>13); \
53 b -= c; b -= a; b ^= (a<<8); \
54 c -= a; c -= b; c ^= (b>>13); \
55 a -= b; a -= c; a ^= (c>>12); \
56 b -= c; b -= a; b ^= (a<<16); \
57 c -= a; c -= b; c ^= (b>>5); \
58 a -= b; a -= c; a ^= (c>>3); \
59 b -= c; b -= a; b ^= (a<<10); \
60 c -= a; c -= b; c ^= (b>>15); \
65 * __hal_device_event_queued
66 * @data: pointer to xge_hal_device_t structure
68 * Will be called when new event succesfully queued.
71 __hal_device_event_queued(void *data, int event_type)
73 xge_assert(((xge_hal_device_t*)data)->magic == XGE_HAL_MAGIC);
74 if (g_xge_hal_driver->uld_callbacks.event_queued) {
75 g_xge_hal_driver->uld_callbacks.event_queued(data, event_type);
80 * __hal_pio_mem_write32_upper
82 * Endiann-aware implementation of xge_os_pio_mem_write32().
83 * Since Xframe has 64bit registers, we differintiate uppper and lower
87 __hal_pio_mem_write32_upper(pci_dev_h pdev, pci_reg_h regh, u32 val, void *addr)
89 #if defined(XGE_OS_HOST_BIG_ENDIAN) && !defined(XGE_OS_PIO_LITTLE_ENDIAN)
90 xge_os_pio_mem_write32(pdev, regh, val, addr);
92 xge_os_pio_mem_write32(pdev, regh, val, (void *)((char *)addr + 4));
97 * __hal_pio_mem_write32_upper
99 * Endiann-aware implementation of xge_os_pio_mem_write32().
100 * Since Xframe has 64bit registers, we differintiate uppper and lower
104 __hal_pio_mem_write32_lower(pci_dev_h pdev, pci_reg_h regh, u32 val,
107 #if defined(XGE_OS_HOST_BIG_ENDIAN) && !defined(XGE_OS_PIO_LITTLE_ENDIAN)
108 xge_os_pio_mem_write32(pdev, regh, val,
109 (void *) ((char *)addr + 4));
111 xge_os_pio_mem_write32(pdev, regh, val, addr);
116 * __hal_device_register_poll
117 * @hldev: pointer to xge_hal_device_t structure
118 * @reg: register to poll for
119 * @op: 0 - bit reset, 1 - bit set
120 * @mask: mask for logical "and" condition based on %op
121 * @max_millis: maximum time to try to poll in milliseconds
123 * Will poll certain register for specified amount of time.
124 * Will poll until masked bit is not cleared.
127 __hal_device_register_poll(xge_hal_device_t *hldev, u64 *reg,
128 int op, u64 mask, int max_millis)
132 xge_hal_status_e ret = XGE_HAL_FAIL;
137 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, reg);
138 if (op == 0 && !(val64 & mask))
140 else if (op == 1 && (val64 & mask) == mask)
146 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0, reg);
147 if (op == 0 && !(val64 & mask))
149 else if (op == 1 && (val64 & mask) == mask)
152 } while (++i < max_millis);
158 * __hal_device_wait_quiescent
160 * @hw_status: hw_status in case of error
162 * Will wait until device is quiescent for some blocks.
164 static xge_hal_status_e
165 __hal_device_wait_quiescent(xge_hal_device_t *hldev, u64 *hw_status)
167 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
169 /* poll and wait first */
170 #ifdef XGE_HAL_HERC_EMULATION
171 (void) __hal_device_register_poll(hldev, &bar0->adapter_status, 1,
172 (XGE_HAL_ADAPTER_STATUS_TDMA_READY |
173 XGE_HAL_ADAPTER_STATUS_RDMA_READY |
174 XGE_HAL_ADAPTER_STATUS_PFC_READY |
175 XGE_HAL_ADAPTER_STATUS_TMAC_BUF_EMPTY |
176 XGE_HAL_ADAPTER_STATUS_PIC_QUIESCENT |
177 XGE_HAL_ADAPTER_STATUS_MC_DRAM_READY |
178 XGE_HAL_ADAPTER_STATUS_MC_QUEUES_READY |
179 XGE_HAL_ADAPTER_STATUS_M_PLL_LOCK),
180 XGE_HAL_DEVICE_QUIESCENT_WAIT_MAX_MILLIS);
182 (void) __hal_device_register_poll(hldev, &bar0->adapter_status, 1,
183 (XGE_HAL_ADAPTER_STATUS_TDMA_READY |
184 XGE_HAL_ADAPTER_STATUS_RDMA_READY |
185 XGE_HAL_ADAPTER_STATUS_PFC_READY |
186 XGE_HAL_ADAPTER_STATUS_TMAC_BUF_EMPTY |
187 XGE_HAL_ADAPTER_STATUS_PIC_QUIESCENT |
188 XGE_HAL_ADAPTER_STATUS_MC_DRAM_READY |
189 XGE_HAL_ADAPTER_STATUS_MC_QUEUES_READY |
190 XGE_HAL_ADAPTER_STATUS_M_PLL_LOCK |
191 XGE_HAL_ADAPTER_STATUS_P_PLL_LOCK),
192 XGE_HAL_DEVICE_QUIESCENT_WAIT_MAX_MILLIS);
195 return xge_hal_device_status(hldev, hw_status);
199 * xge_hal_device_is_slot_freeze
202 * Returns non-zero if the slot is freezed.
203 * The determination is made based on the adapter_status
204 * register which will never give all FFs, unless PCI read
208 xge_hal_device_is_slot_freeze(xge_hal_device_h devh)
210 xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
211 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
214 xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
215 &bar0->adapter_status);
216 xge_os_pci_read16(hldev->pdev,hldev->cfgh,
217 xge_offsetof(xge_hal_pci_config_le_t, device_id),
220 if (adapter_status == XGE_HAL_ALL_FOXES)
223 dummy = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
225 printf(">>> Slot is frozen!\n");
229 return((adapter_status == XGE_HAL_ALL_FOXES) || (device_id == 0xffff));
234 * __hal_device_led_actifity_fix
235 * @hldev: pointer to xge_hal_device_t structure
237 * SXE-002: Configure link and activity LED to turn it off
240 __hal_device_led_actifity_fix(xge_hal_device_t *hldev)
242 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
246 xge_os_pci_read16(hldev->pdev, hldev->cfgh,
247 xge_offsetof(xge_hal_pci_config_le_t, subsystem_id), &subid);
250 * In the case of Herc, there is a new register named beacon control
251 * is added which was not present in Xena.
252 * Beacon control register in Herc is at the same offset as
253 * gpio control register in Xena. It means they are one and same in
254 * the case of Xena. Also, gpio control register offset in Herc and
256 * The current register map represents Herc(It means we have
257 * both beacon and gpio control registers in register map).
258 * WRT transition from Xena to Herc, all the code in Xena which was
259 * using gpio control register for LED handling would have to
260 * use beacon control register in Herc and the rest of the code
261 * which uses gpio control in Xena would use the same register
263 * WRT LED handling(following code), In the case of Herc, beacon
264 * control register has to be used. This is applicable for Xena also,
265 * since it represents the gpio control register in Xena.
267 if ((subid & 0xFF) >= 0x07) {
268 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
269 &bar0->beacon_control);
270 val64 |= 0x0000800000000000ULL;
271 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
272 val64, &bar0->beacon_control);
273 val64 = 0x0411040400000000ULL;
274 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
275 (void *) ((u8 *)bar0 + 0x2700));
279 /* Constants for Fixing the MacAddress problem seen mostly on
282 static u64 xena_fix_mac[] = {
283 0x0060000000000000ULL, 0x0060600000000000ULL,
284 0x0040600000000000ULL, 0x0000600000000000ULL,
285 0x0020600000000000ULL, 0x0060600000000000ULL,
286 0x0020600000000000ULL, 0x0060600000000000ULL,
287 0x0020600000000000ULL, 0x0060600000000000ULL,
288 0x0020600000000000ULL, 0x0060600000000000ULL,
289 0x0020600000000000ULL, 0x0060600000000000ULL,
290 0x0020600000000000ULL, 0x0060600000000000ULL,
291 0x0020600000000000ULL, 0x0060600000000000ULL,
292 0x0020600000000000ULL, 0x0060600000000000ULL,
293 0x0020600000000000ULL, 0x0060600000000000ULL,
294 0x0020600000000000ULL, 0x0060600000000000ULL,
295 0x0020600000000000ULL, 0x0000600000000000ULL,
296 0x0040600000000000ULL, 0x0060600000000000ULL,
301 * __hal_device_fix_mac
302 * @hldev: HAL device handle.
304 * Fix for all "FFs" MAC address problems observed on Alpha platforms.
307 __hal_device_xena_fix_mac(xge_hal_device_t *hldev)
310 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
313 * In the case of Herc, there is a new register named beacon control
314 * is added which was not present in Xena.
315 * Beacon control register in Herc is at the same offset as
316 * gpio control register in Xena. It means they are one and same in
317 * the case of Xena. Also, gpio control register offset in Herc and
319 * The current register map represents Herc(It means we have
320 * both beacon and gpio control registers in register map).
321 * WRT transition from Xena to Herc, all the code in Xena which was
322 * using gpio control register for LED handling would have to
323 * use beacon control register in Herc and the rest of the code
324 * which uses gpio control in Xena would use the same register
326 * In the following code(xena_fix_mac), beacon control register has
327 * to be used in the case of Xena, since it represents gpio control
328 * register. In the case of Herc, there is no change required.
330 while (xena_fix_mac[i] != END_SIGN) {
331 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
332 xena_fix_mac[i++], &bar0->beacon_control);
338 * xge_hal_device_bcast_enable
339 * @hldev: HAL device handle.
341 * Enable receiving broadcasts.
342 * The host must first write RMAC_CFG_KEY "key"
343 * register, and then - MAC_CFG register.
346 xge_hal_device_bcast_enable(xge_hal_device_h devh)
348 xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
349 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
352 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
354 val64 |= XGE_HAL_MAC_RMAC_BCAST_ENABLE;
356 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
357 XGE_HAL_RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
359 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0,
360 (u32)(val64 >> 32), &bar0->mac_cfg);
362 xge_debug_device(XGE_TRACE, "mac_cfg 0x"XGE_OS_LLXFMT": broadcast %s",
363 (unsigned long long)val64,
364 hldev->config.mac.rmac_bcast_en ? "enabled" : "disabled");
368 * xge_hal_device_bcast_disable
369 * @hldev: HAL device handle.
371 * Disable receiving broadcasts.
372 * The host must first write RMAC_CFG_KEY "key"
373 * register, and then - MAC_CFG register.
376 xge_hal_device_bcast_disable(xge_hal_device_h devh)
378 xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
379 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
382 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
385 val64 &= ~(XGE_HAL_MAC_RMAC_BCAST_ENABLE);
386 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
387 XGE_HAL_RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
389 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0,
390 (u32)(val64 >> 32), &bar0->mac_cfg);
392 xge_debug_device(XGE_TRACE, "mac_cfg 0x"XGE_OS_LLXFMT": broadcast %s",
393 (unsigned long long)val64,
394 hldev->config.mac.rmac_bcast_en ? "enabled" : "disabled");
398 * __hal_device_shared_splits_configure
399 * @hldev: HAL device handle.
401 * TxDMA will stop Read request if the number of read split had exceeded
402 * the limit set by shared_splits
405 __hal_device_shared_splits_configure(xge_hal_device_t *hldev)
407 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
410 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
413 XGE_HAL_PIC_CNTL_SHARED_SPLITS(hldev->config.shared_splits);
414 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
416 xge_debug_device(XGE_TRACE, "%s", "shared splits configured");
420 * __hal_device_rmac_padding_configure
421 * @hldev: HAL device handle.
423 * Configure RMAC frame padding. Depends on configuration, it
424 * can be send to host or removed by MAC.
427 __hal_device_rmac_padding_configure(xge_hal_device_t *hldev)
429 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
432 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
433 XGE_HAL_RMAC_CFG_KEY(0x4C0D), &bar0->rmac_cfg_key);
434 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
436 val64 &= ( ~XGE_HAL_MAC_RMAC_ALL_ADDR_ENABLE );
437 val64 &= ( ~XGE_HAL_MAC_CFG_RMAC_PROM_ENABLE );
438 val64 |= XGE_HAL_MAC_CFG_TMAC_APPEND_PAD;
441 * If the RTH enable bit is not set, strip the FCS
443 if (!hldev->config.rth_en ||
444 !(xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
445 &bar0->rts_rth_cfg) & XGE_HAL_RTS_RTH_EN)) {
446 val64 |= XGE_HAL_MAC_CFG_RMAC_STRIP_FCS;
449 val64 &= ( ~XGE_HAL_MAC_CFG_RMAC_STRIP_PAD );
450 val64 |= XGE_HAL_MAC_RMAC_DISCARD_PFRM;
452 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0,
453 (u32)(val64 >> 32), (char*)&bar0->mac_cfg);
456 xge_debug_device(XGE_TRACE,
457 "mac_cfg 0x"XGE_OS_LLXFMT": frame padding configured",
458 (unsigned long long)val64);
462 * __hal_device_pause_frames_configure
463 * @hldev: HAL device handle.
465 * Set Pause threshold.
467 * Pause frame is generated if the amount of data outstanding
468 * on any queue exceeded the ratio of
469 * (mac_control.mc_pause_threshold_q0q3 or q4q7)/256
472 __hal_device_pause_frames_configure(xge_hal_device_t *hldev)
474 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
478 switch (hldev->config.mac.media) {
479 case XGE_HAL_MEDIA_SR:
480 case XGE_HAL_MEDIA_SW:
481 val64=0xfffbfffbfffbfffbULL;
483 case XGE_HAL_MEDIA_LR:
484 case XGE_HAL_MEDIA_LW:
485 val64=0xffbbffbbffbbffbbULL;
487 case XGE_HAL_MEDIA_ER:
488 case XGE_HAL_MEDIA_EW:
490 val64=0xffbbffbbffbbffbbULL;
494 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
495 val64, &bar0->mc_pause_thresh_q0q3);
496 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
497 val64, &bar0->mc_pause_thresh_q4q7);
499 /* Set the time value to be inserted in the pause frame generated
501 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
502 &bar0->rmac_pause_cfg);
503 if (hldev->config.mac.rmac_pause_gen_en)
504 val64 |= XGE_HAL_RMAC_PAUSE_GEN_EN;
506 val64 &= ~(XGE_HAL_RMAC_PAUSE_GEN_EN);
507 if (hldev->config.mac.rmac_pause_rcv_en)
508 val64 |= XGE_HAL_RMAC_PAUSE_RCV_EN;
510 val64 &= ~(XGE_HAL_RMAC_PAUSE_RCV_EN);
511 val64 &= ~(XGE_HAL_RMAC_PAUSE_HG_PTIME(0xffff));
512 val64 |= XGE_HAL_RMAC_PAUSE_HG_PTIME(hldev->config.mac.rmac_pause_time);
513 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
514 &bar0->rmac_pause_cfg);
517 for (i = 0; i<4; i++) {
519 (((u64)0xFF00|hldev->config.mac.mc_pause_threshold_q0q3)
522 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
523 &bar0->mc_pause_thresh_q0q3);
526 for (i = 0; i<4; i++) {
528 (((u64)0xFF00|hldev->config.mac.mc_pause_threshold_q4q7)
531 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
532 &bar0->mc_pause_thresh_q4q7);
533 xge_debug_device(XGE_TRACE, "%s", "pause frames configured");
537 * Herc's clock rate doubled, unless the slot is 33MHz.
539 unsigned int __hal_fix_time_ival_herc(xge_hal_device_t *hldev,
540 unsigned int time_ival)
542 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA)
545 xge_assert(xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC);
547 if (hldev->bus_frequency != XGE_HAL_PCI_BUS_FREQUENCY_UNKNOWN &&
548 hldev->bus_frequency != XGE_HAL_PCI_BUS_FREQUENCY_33MHZ)
556 * __hal_device_bus_master_disable
557 * @hldev: HAL device handle.
559 * Disable bus mastership.
562 __hal_device_bus_master_disable (xge_hal_device_t *hldev)
567 xge_os_pci_read16(hldev->pdev, hldev->cfgh,
568 xge_offsetof(xge_hal_pci_config_le_t, command), &cmd);
570 xge_os_pci_write16(hldev->pdev, hldev->cfgh,
571 xge_offsetof(xge_hal_pci_config_le_t, command), cmd);
575 * __hal_device_bus_master_enable
576 * @hldev: HAL device handle.
578 * Disable bus mastership.
581 __hal_device_bus_master_enable (xge_hal_device_t *hldev)
586 xge_os_pci_read16(hldev->pdev, hldev->cfgh,
587 xge_offsetof(xge_hal_pci_config_le_t, command), &cmd);
589 /* already enabled? do nothing */
590 if (cmd & bus_master)
594 xge_os_pci_write16(hldev->pdev, hldev->cfgh,
595 xge_offsetof(xge_hal_pci_config_le_t, command), cmd);
598 * __hal_device_intr_mgmt
599 * @hldev: HAL device handle.
600 * @mask: mask indicating which Intr block must be modified.
601 * @flag: if true - enable, otherwise - disable interrupts.
603 * Disable or enable device interrupts. Mask is used to specify
604 * which hardware blocks should produce interrupts. For details
605 * please refer to Xframe User Guide.
608 __hal_device_intr_mgmt(xge_hal_device_t *hldev, u64 mask, int flag)
610 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
611 u64 val64 = 0, temp64 = 0;
614 gim_saved = gim = xge_os_pio_mem_read64(hldev->pdev,
615 hldev->regh0, &bar0->general_int_mask);
617 /* Top level interrupt classification */
619 if ((mask & (XGE_HAL_TX_PIC_INTR/* | XGE_HAL_RX_PIC_INTR*/))) {
620 /* Enable PIC Intrs in the general intr mask register */
621 val64 = XGE_HAL_TXPIC_INT_M/* | XGE_HAL_PIC_RX_INT_M*/;
623 gim &= ~((u64) val64);
624 temp64 = xge_os_pio_mem_read64(hldev->pdev,
625 hldev->regh0, &bar0->pic_int_mask);
627 temp64 &= ~XGE_HAL_PIC_INT_TX;
628 #ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR
629 if (xge_hal_device_check_id(hldev) ==
631 temp64 &= ~XGE_HAL_PIC_INT_MISC;
634 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
635 temp64, &bar0->pic_int_mask);
636 #ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR
637 if (xge_hal_device_check_id(hldev) ==
640 * Unmask only Link Up interrupt
642 temp64 = xge_os_pio_mem_read64(hldev->pdev,
643 hldev->regh0, &bar0->misc_int_mask);
644 temp64 &= ~XGE_HAL_MISC_INT_REG_LINK_UP_INT;
645 xge_os_pio_mem_write64(hldev->pdev,
646 hldev->regh0, temp64,
647 &bar0->misc_int_mask);
648 xge_debug_device(XGE_TRACE,
649 "unmask link up flag "XGE_OS_LLXFMT,
650 (unsigned long long)temp64);
653 } else { /* flag == 0 */
655 #ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR
656 if (xge_hal_device_check_id(hldev) ==
659 * Mask both Link Up and Down interrupts
661 temp64 = xge_os_pio_mem_read64(hldev->pdev,
662 hldev->regh0, &bar0->misc_int_mask);
663 temp64 |= XGE_HAL_MISC_INT_REG_LINK_UP_INT;
664 temp64 |= XGE_HAL_MISC_INT_REG_LINK_DOWN_INT;
665 xge_os_pio_mem_write64(hldev->pdev,
666 hldev->regh0, temp64,
667 &bar0->misc_int_mask);
668 xge_debug_device(XGE_TRACE,
669 "mask link up/down flag "XGE_OS_LLXFMT,
670 (unsigned long long)temp64);
673 /* Disable PIC Intrs in the general intr mask
675 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
676 XGE_HAL_ALL_INTRS_DIS,
677 &bar0->pic_int_mask);
683 /* Enabling/Disabling Tx DMA interrupts */
684 if (mask & XGE_HAL_TX_DMA_INTR) {
685 /* Enable TxDMA Intrs in the general intr mask register */
686 val64 = XGE_HAL_TXDMA_INT_M;
688 gim &= ~((u64) val64);
689 /* Enable all TxDMA interrupts */
690 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
691 0x0, &bar0->txdma_int_mask);
692 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
693 0x0, &bar0->pfc_err_mask);
694 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
695 0x0, &bar0->tda_err_mask);
696 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
697 0x0, &bar0->pcc_err_mask);
698 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
699 0x0, &bar0->tti_err_mask);
700 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
701 0x0, &bar0->lso_err_mask);
702 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
703 0x0, &bar0->tpa_err_mask);
704 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
705 0x0, &bar0->sm_err_mask);
707 } else { /* flag == 0 */
709 /* Disable TxDMA Intrs in the general intr mask
711 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
712 XGE_HAL_ALL_INTRS_DIS,
713 &bar0->txdma_int_mask);
714 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
715 XGE_HAL_ALL_INTRS_DIS,
716 &bar0->pfc_err_mask);
722 /* Enabling/Disabling Rx DMA interrupts */
723 if (mask & XGE_HAL_RX_DMA_INTR) {
724 /* Enable RxDMA Intrs in the general intr mask register */
725 val64 = XGE_HAL_RXDMA_INT_M;
728 gim &= ~((u64) val64);
729 /* All RxDMA block interrupts are disabled for now
731 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
732 XGE_HAL_ALL_INTRS_DIS,
733 &bar0->rxdma_int_mask);
735 } else { /* flag == 0 */
737 /* Disable RxDMA Intrs in the general intr mask
739 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
740 XGE_HAL_ALL_INTRS_DIS,
741 &bar0->rxdma_int_mask);
748 /* Enabling/Disabling MAC interrupts */
749 if (mask & (XGE_HAL_TX_MAC_INTR | XGE_HAL_RX_MAC_INTR)) {
750 val64 = XGE_HAL_TXMAC_INT_M | XGE_HAL_RXMAC_INT_M;
753 gim &= ~((u64) val64);
755 /* All MAC block error inter. are disabled for now. */
756 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
757 XGE_HAL_ALL_INTRS_DIS, &bar0->mac_int_mask);
758 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
759 XGE_HAL_ALL_INTRS_DIS, &bar0->mac_rmac_err_mask);
761 } else { /* flag == 0 */
763 /* Disable MAC Intrs in the general intr mask
765 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
766 XGE_HAL_ALL_INTRS_DIS, &bar0->mac_int_mask);
767 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
768 XGE_HAL_ALL_INTRS_DIS, &bar0->mac_rmac_err_mask);
774 /* XGXS Interrupts */
775 if (mask & (XGE_HAL_TX_XGXS_INTR | XGE_HAL_RX_XGXS_INTR)) {
776 val64 = XGE_HAL_TXXGXS_INT_M | XGE_HAL_RXXGXS_INT_M;
779 gim &= ~((u64) val64);
780 /* All XGXS block error interrupts are disabled for now
782 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
783 XGE_HAL_ALL_INTRS_DIS, &bar0->xgxs_int_mask);
785 } else { /* flag == 0 */
787 /* Disable MC Intrs in the general intr mask register */
788 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
789 XGE_HAL_ALL_INTRS_DIS, &bar0->xgxs_int_mask);
795 /* Memory Controller(MC) interrupts */
796 if (mask & XGE_HAL_MC_INTR) {
797 val64 = XGE_HAL_MC_INT_M;
800 gim &= ~((u64) val64);
802 /* Enable all MC blocks error interrupts */
803 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
804 0x0ULL, &bar0->mc_int_mask);
806 } else { /* flag == 0 */
808 /* Disable MC Intrs in the general intr mask
810 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
811 XGE_HAL_ALL_INTRS_DIS, &bar0->mc_int_mask);
818 /* Tx traffic interrupts */
819 if (mask & XGE_HAL_TX_TRAFFIC_INTR) {
820 val64 = XGE_HAL_TXTRAFFIC_INT_M;
823 gim &= ~((u64) val64);
825 /* Enable all the Tx side interrupts */
826 /* '0' Enables all 64 TX interrupt levels. */
827 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0x0,
828 &bar0->tx_traffic_mask);
830 } else { /* flag == 0 */
832 /* Disable Tx Traffic Intrs in the general intr mask
834 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
835 XGE_HAL_ALL_INTRS_DIS,
836 &bar0->tx_traffic_mask);
841 /* Rx traffic interrupts */
842 if (mask & XGE_HAL_RX_TRAFFIC_INTR) {
843 val64 = XGE_HAL_RXTRAFFIC_INT_M;
845 gim &= ~((u64) val64);
846 /* '0' Enables all 8 RX interrupt levels. */
847 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0x0,
848 &bar0->rx_traffic_mask);
850 } else { /* flag == 0 */
852 /* Disable Rx Traffic Intrs in the general intr mask
855 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
856 XGE_HAL_ALL_INTRS_DIS,
857 &bar0->rx_traffic_mask);
863 /* Sched Timer interrupt */
864 if (mask & XGE_HAL_SCHED_INTR) {
866 temp64 = xge_os_pio_mem_read64(hldev->pdev,
867 hldev->regh0, &bar0->txpic_int_mask);
868 temp64 &= ~XGE_HAL_TXPIC_INT_SCHED_INTR;
869 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
870 temp64, &bar0->txpic_int_mask);
872 xge_hal_device_sched_timer(hldev,
873 hldev->config.sched_timer_us,
874 hldev->config.sched_timer_one_shot);
876 temp64 = xge_os_pio_mem_read64(hldev->pdev,
877 hldev->regh0, &bar0->txpic_int_mask);
878 temp64 |= XGE_HAL_TXPIC_INT_SCHED_INTR;
880 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
881 temp64, &bar0->txpic_int_mask);
883 xge_hal_device_sched_timer(hldev,
884 XGE_HAL_SCHED_TIMER_DISABLED,
885 XGE_HAL_SCHED_TIMER_ON_SHOT_ENABLE);
889 if (gim != gim_saved) {
890 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, gim,
891 &bar0->general_int_mask);
892 xge_debug_device(XGE_TRACE, "general_int_mask updated "
893 XGE_OS_LLXFMT" => "XGE_OS_LLXFMT,
894 (unsigned long long)gim_saved, (unsigned long long)gim);
899 * __hal_device_bimodal_configure
900 * @hldev: HAL device handle.
902 * Bimodal parameters initialization.
905 __hal_device_bimodal_configure(xge_hal_device_t *hldev)
909 for (i=0; i<XGE_HAL_MAX_RING_NUM; i++) {
910 xge_hal_tti_config_t *tti;
911 xge_hal_rti_config_t *rti;
913 if (!hldev->config.ring.queue[i].configured)
915 rti = &hldev->config.ring.queue[i].rti;
916 tti = &hldev->bimodal_tti[i];
919 tti->urange_a = hldev->bimodal_urange_a_en * 10;
922 tti->ufc_a = hldev->bimodal_urange_a_en * 8;
926 tti->timer_val_us = hldev->bimodal_timer_val_us;
927 tti->timer_ac_en = 1;
928 tti->timer_ci_en = 0;
933 rti->ufc_a = 1; /* <= for netpipe type of tests */
936 rti->ufc_d = 4; /* <= 99% of a bandwidth traffic counts here */
937 rti->timer_ac_en = 1;
938 rti->timer_val_us = 5; /* for optimal bus efficiency usage */
943 * __hal_device_tti_apply
944 * @hldev: HAL device handle.
946 * apply TTI configuration.
948 static xge_hal_status_e
949 __hal_device_tti_apply(xge_hal_device_t *hldev, xge_hal_tti_config_t *tti,
950 int num, int runtime)
952 u64 val64, data1 = 0, data2 = 0;
953 xge_hal_pci_bar0_t *bar0;
956 bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0;
958 bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
960 if (tti->timer_val_us) {
961 unsigned int tx_interval;
963 if (hldev->config.pci_freq_mherz) {
964 tx_interval = hldev->config.pci_freq_mherz *
965 tti->timer_val_us / 64;
967 __hal_fix_time_ival_herc(hldev,
970 tx_interval = tti->timer_val_us;
972 data1 |= XGE_HAL_TTI_DATA1_MEM_TX_TIMER_VAL(tx_interval);
973 if (tti->timer_ac_en) {
974 data1 |= XGE_HAL_TTI_DATA1_MEM_TX_TIMER_AC_EN;
976 if (tti->timer_ci_en) {
977 data1 |= XGE_HAL_TTI_DATA1_MEM_TX_TIMER_CI_EN;
981 xge_debug_device(XGE_TRACE, "TTI[%d] timer enabled to %d, ci %s",
982 num, tx_interval, tti->timer_ci_en ?
983 "enabled": "disabled");
994 data1 |= XGE_HAL_TTI_DATA1_MEM_TX_URNG_A(tti->urange_a) |
995 XGE_HAL_TTI_DATA1_MEM_TX_URNG_B(tti->urange_b) |
996 XGE_HAL_TTI_DATA1_MEM_TX_URNG_C(tti->urange_c);
998 data2 |= XGE_HAL_TTI_DATA2_MEM_TX_UFC_A(tti->ufc_a) |
999 XGE_HAL_TTI_DATA2_MEM_TX_UFC_B(tti->ufc_b) |
1000 XGE_HAL_TTI_DATA2_MEM_TX_UFC_C(tti->ufc_c) |
1001 XGE_HAL_TTI_DATA2_MEM_TX_UFC_D(tti->ufc_d);
1004 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, data1,
1005 &bar0->tti_data1_mem);
1006 (void)xge_os_pio_mem_read64(hldev->pdev,
1007 hldev->regh0, &bar0->tti_data1_mem);
1008 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, data2,
1009 &bar0->tti_data2_mem);
1010 (void)xge_os_pio_mem_read64(hldev->pdev,
1011 hldev->regh0, &bar0->tti_data2_mem);
1014 val64 = XGE_HAL_TTI_CMD_MEM_WE | XGE_HAL_TTI_CMD_MEM_STROBE_NEW_CMD |
1015 XGE_HAL_TTI_CMD_MEM_OFFSET(num);
1016 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
1017 &bar0->tti_command_mem);
1019 if (!runtime && __hal_device_register_poll(hldev, &bar0->tti_command_mem,
1020 0, XGE_HAL_TTI_CMD_MEM_STROBE_NEW_CMD,
1021 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
1022 /* upper layer may require to repeat */
1023 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
1027 xge_debug_device(XGE_TRACE, "TTI[%d] configured: tti_data1_mem 0x"
1029 (unsigned long long)xge_os_pio_mem_read64(hldev->pdev,
1030 hldev->regh0, &bar0->tti_data1_mem));
1037 * __hal_device_tti_configure
1038 * @hldev: HAL device handle.
1040 * TTI Initialization.
1041 * Initialize Transmit Traffic Interrupt Scheme.
1043 static xge_hal_status_e
1044 __hal_device_tti_configure(xge_hal_device_t *hldev, int runtime)
1048 for (i=0; i<XGE_HAL_MAX_FIFO_NUM; i++) {
1051 if (!hldev->config.fifo.queue[i].configured)
1054 for (j=0; j<XGE_HAL_MAX_FIFO_TTI_NUM; j++) {
1055 xge_hal_status_e status;
1057 if (!hldev->config.fifo.queue[i].tti[j].enabled)
1060 /* at least some TTI enabled. Record it. */
1061 hldev->tti_enabled = 1;
1063 status = __hal_device_tti_apply(hldev,
1064 &hldev->config.fifo.queue[i].tti[j],
1065 i * XGE_HAL_MAX_FIFO_TTI_NUM + j, runtime);
1066 if (status != XGE_HAL_OK)
1071 /* processing bimodal TTIs */
1072 for (i=0; i<XGE_HAL_MAX_RING_NUM; i++) {
1073 xge_hal_status_e status;
1075 if (!hldev->bimodal_tti[i].enabled)
1078 /* at least some bimodal TTI enabled. Record it. */
1079 hldev->tti_enabled = 1;
1081 status = __hal_device_tti_apply(hldev, &hldev->bimodal_tti[i],
1082 XGE_HAL_MAX_FIFO_TTI_RING_0 + i, runtime);
1083 if (status != XGE_HAL_OK)
1092 * __hal_device_rti_configure
1093 * @hldev: HAL device handle.
1095 * RTI Initialization.
1096 * Initialize Receive Traffic Interrupt Scheme.
1099 __hal_device_rti_configure(xge_hal_device_t *hldev, int runtime)
1101 xge_hal_pci_bar0_t *bar0;
1102 u64 val64, data1 = 0, data2 = 0;
1107 * we don't want to re-configure RTI in case when
1108 * bimodal interrupts are in use. Instead reconfigure TTI
1109 * with new RTI values.
1111 if (hldev->config.bimodal_interrupts) {
1112 __hal_device_bimodal_configure(hldev);
1113 return __hal_device_tti_configure(hldev, 1);
1115 bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0;
1117 bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
1119 for (i=0; i<XGE_HAL_MAX_RING_NUM; i++) {
1120 xge_hal_rti_config_t *rti = &hldev->config.ring.queue[i].rti;
1122 if (!hldev->config.ring.queue[i].configured)
1125 if (rti->timer_val_us) {
1126 unsigned int rx_interval;
1128 if (hldev->config.pci_freq_mherz) {
1129 rx_interval = hldev->config.pci_freq_mherz *
1130 rti->timer_val_us / 8;
1132 __hal_fix_time_ival_herc(hldev,
1135 rx_interval = rti->timer_val_us;
1137 data1 |=XGE_HAL_RTI_DATA1_MEM_RX_TIMER_VAL(rx_interval);
1138 if (rti->timer_ac_en) {
1139 data1 |= XGE_HAL_RTI_DATA1_MEM_RX_TIMER_AC_EN;
1141 data1 |= XGE_HAL_RTI_DATA1_MEM_RX_TIMER_CI_EN;
1144 if (rti->urange_a ||
1151 data1 |=XGE_HAL_RTI_DATA1_MEM_RX_URNG_A(rti->urange_a) |
1152 XGE_HAL_RTI_DATA1_MEM_RX_URNG_B(rti->urange_b) |
1153 XGE_HAL_RTI_DATA1_MEM_RX_URNG_C(rti->urange_c);
1155 data2 |= XGE_HAL_RTI_DATA2_MEM_RX_UFC_A(rti->ufc_a) |
1156 XGE_HAL_RTI_DATA2_MEM_RX_UFC_B(rti->ufc_b) |
1157 XGE_HAL_RTI_DATA2_MEM_RX_UFC_C(rti->ufc_c) |
1158 XGE_HAL_RTI_DATA2_MEM_RX_UFC_D(rti->ufc_d);
1161 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, data1,
1162 &bar0->rti_data1_mem);
1163 (void)xge_os_pio_mem_read64(hldev->pdev,
1164 hldev->regh0, &bar0->rti_data1_mem);
1165 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, data2,
1166 &bar0->rti_data2_mem);
1167 (void)xge_os_pio_mem_read64(hldev->pdev,
1168 hldev->regh0, &bar0->rti_data2_mem);
1171 val64 = XGE_HAL_RTI_CMD_MEM_WE |
1172 XGE_HAL_RTI_CMD_MEM_STROBE_NEW_CMD;
1173 val64 |= XGE_HAL_RTI_CMD_MEM_OFFSET(i);
1174 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
1175 &bar0->rti_command_mem);
1177 if (!runtime && __hal_device_register_poll(hldev,
1178 &bar0->rti_command_mem, 0,
1179 XGE_HAL_RTI_CMD_MEM_STROBE_NEW_CMD,
1180 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
1181 /* upper layer may require to repeat */
1182 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
1186 xge_debug_device(XGE_TRACE,
1187 "RTI[%d] configured: rti_data1_mem 0x"XGE_OS_LLXFMT,
1189 (unsigned long long)xge_os_pio_mem_read64(hldev->pdev,
1190 hldev->regh0, &bar0->rti_data1_mem));
1198 /* Constants to be programmed into the Xena's registers to configure
1200 static u64 default_xena_mdio_cfg[] = {
1202 0xC001010000000000ULL, 0xC0010100000000E0ULL,
1203 0xC0010100008000E4ULL,
1204 /* Remove Reset from PMA PLL */
1205 0xC001010000000000ULL, 0xC0010100000000E0ULL,
1206 0xC0010100000000E4ULL,
1210 static u64 default_herc_mdio_cfg[] = {
1214 static u64 default_xena_dtx_cfg[] = {
1215 0x8000051500000000ULL, 0x80000515000000E0ULL,
1216 0x80000515D93500E4ULL, 0x8001051500000000ULL,
1217 0x80010515000000E0ULL, 0x80010515001E00E4ULL,
1218 0x8002051500000000ULL, 0x80020515000000E0ULL,
1219 0x80020515F21000E4ULL,
1220 /* Set PADLOOPBACKN */
1221 0x8002051500000000ULL, 0x80020515000000E0ULL,
1222 0x80020515B20000E4ULL, 0x8003051500000000ULL,
1223 0x80030515000000E0ULL, 0x80030515B20000E4ULL,
1224 0x8004051500000000ULL, 0x80040515000000E0ULL,
1225 0x80040515B20000E4ULL, 0x8005051500000000ULL,
1226 0x80050515000000E0ULL, 0x80050515B20000E4ULL,
1228 /* Remove PADLOOPBACKN */
1229 0x8002051500000000ULL, 0x80020515000000E0ULL,
1230 0x80020515F20000E4ULL, 0x8003051500000000ULL,
1231 0x80030515000000E0ULL, 0x80030515F20000E4ULL,
1232 0x8004051500000000ULL, 0x80040515000000E0ULL,
1233 0x80040515F20000E4ULL, 0x8005051500000000ULL,
1234 0x80050515000000E0ULL, 0x80050515F20000E4ULL,
1239 static u64 default_herc_dtx_cfg[] = {
1240 0x80000515BA750000ULL, 0x80000515BA7500E0ULL,
1241 0x80000515BA750004ULL, 0x80000515BA7500E4ULL,
1242 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
1243 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
1244 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
1245 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
1250 static u64 default_herc_dtx_cfg[] = {
1251 0x8000051536750000ULL, 0x80000515367500E0ULL,
1252 0x8000051536750004ULL, 0x80000515367500E4ULL,
1254 0x80010515003F0000ULL, 0x80010515003F00E0ULL,
1255 0x80010515003F0004ULL, 0x80010515003F00E4ULL,
1257 0x801205150D440000ULL, 0x801205150D4400E0ULL,
1258 0x801205150D440004ULL, 0x801205150D4400E4ULL,
1260 0x80020515F2100000ULL, 0x80020515F21000E0ULL,
1261 0x80020515F2100004ULL, 0x80020515F21000E4ULL,
1267 __hal_serial_mem_write64(xge_hal_device_t *hldev, u64 value, u64 *reg)
1269 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0,
1270 (u32)(value>>32), reg);
1272 __hal_pio_mem_write32_lower(hldev->pdev, hldev->regh0,
1279 __hal_serial_mem_read64(xge_hal_device_t *hldev, u64 *reg)
1281 u64 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
1288 * __hal_device_xaui_configure
1289 * @hldev: HAL device handle.
1291 * Configure XAUI Interface of Xena.
1293 * To Configure the Xena's XAUI, one has to write a series
1294 * of 64 bit values into two registers in a particular
1295 * sequence. Hence a macro 'SWITCH_SIGN' has been defined
1296 * which will be defined in the array of configuration values
1297 * (default_dtx_cfg & default_mdio_cfg) at appropriate places
1298 * to switch writing from one regsiter to another. We continue
1299 * writing these values until we encounter the 'END_SIGN' macro.
1300 * For example, After making a series of 21 writes into
1301 * dtx_control register the 'SWITCH_SIGN' appears and hence we
1302 * start writing into mdio_control until we encounter END_SIGN.
1305 __hal_device_xaui_configure(xge_hal_device_t *hldev)
1307 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
1308 int mdio_cnt = 0, dtx_cnt = 0;
1309 u64 *default_dtx_cfg = NULL, *default_mdio_cfg = NULL;
1311 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) {
1312 default_dtx_cfg = default_xena_dtx_cfg;
1313 default_mdio_cfg = default_xena_mdio_cfg;
1314 } else if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
1315 default_dtx_cfg = default_herc_dtx_cfg;
1316 default_mdio_cfg = default_herc_mdio_cfg;
1318 xge_assert(default_dtx_cfg);
1324 while (default_dtx_cfg[dtx_cnt] != END_SIGN) {
1325 if (default_dtx_cfg[dtx_cnt] == SWITCH_SIGN) {
1329 __hal_serial_mem_write64(hldev, default_dtx_cfg[dtx_cnt],
1330 &bar0->dtx_control);
1334 while (default_mdio_cfg[mdio_cnt] != END_SIGN) {
1335 if (default_mdio_cfg[mdio_cnt] == SWITCH_SIGN) {
1339 __hal_serial_mem_write64(hldev, default_mdio_cfg[mdio_cnt],
1340 &bar0->mdio_control);
1343 } while ( !((default_dtx_cfg[dtx_cnt] == END_SIGN) &&
1344 (default_mdio_cfg[mdio_cnt] == END_SIGN)) );
1346 xge_debug_device(XGE_TRACE, "%s", "XAUI interface configured");
1350 * __hal_device_mac_link_util_set
1351 * @hldev: HAL device handle.
1353 * Set sampling rate to calculate link utilization.
1356 __hal_device_mac_link_util_set(xge_hal_device_t *hldev)
1358 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
1361 val64 = XGE_HAL_MAC_TX_LINK_UTIL_VAL(
1362 hldev->config.mac.tmac_util_period) |
1363 XGE_HAL_MAC_RX_LINK_UTIL_VAL(
1364 hldev->config.mac.rmac_util_period);
1365 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
1366 &bar0->mac_link_util);
1367 xge_debug_device(XGE_TRACE, "%s",
1368 "bandwidth link utilization configured");
1372 * __hal_device_set_swapper
1373 * @hldev: HAL device handle.
1375 * Set the Xframe's byte "swapper" in accordance with
1376 * endianness of the host.
1379 __hal_device_set_swapper(xge_hal_device_t *hldev)
1381 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
1385 * from 32bit errarta:
1387 * The SWAPPER_CONTROL register determines how the adapter accesses
1388 * host memory as well as how it responds to read and write requests
1389 * from the host system. Writes to this register should be performed
1390 * carefully, since the byte swappers could reverse the order of bytes.
1391 * When configuring this register keep in mind that writes to the PIF
1392 * read and write swappers could reverse the order of the upper and
1393 * lower 32-bit words. This means that the driver may have to write
1394 * to the upper 32 bits of the SWAPPER_CONTROL twice in order to
1395 * configure the entire register. */
1398 * The device by default set to a big endian format, so a big endian
1399 * driver need not set anything.
1402 #if defined(XGE_HAL_CUSTOM_HW_SWAPPER)
1404 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
1405 0xffffffffffffffffULL, &bar0->swapper_ctrl);
1407 val64 = XGE_HAL_CUSTOM_HW_SWAPPER;
1410 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
1411 &bar0->swapper_ctrl);
1413 xge_debug_device(XGE_TRACE, "using custom HW swapper 0x"XGE_OS_LLXFMT,
1414 (unsigned long long)val64);
1416 #elif !defined(XGE_OS_HOST_BIG_ENDIAN)
1419 * Initially we enable all bits to make it accessible by the driver,
1420 * then we selectively enable only those bits that we want to set.
1421 * i.e. force swapper to swap for the first time since second write
1422 * will overwrite with the final settings.
1424 * Use only for little endian platforms.
1426 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
1427 0xffffffffffffffffULL, &bar0->swapper_ctrl);
1429 val64 = (XGE_HAL_SWAPPER_CTRL_PIF_R_FE |
1430 XGE_HAL_SWAPPER_CTRL_PIF_R_SE |
1431 XGE_HAL_SWAPPER_CTRL_PIF_W_FE |
1432 XGE_HAL_SWAPPER_CTRL_PIF_W_SE |
1433 XGE_HAL_SWAPPER_CTRL_RTH_FE |
1434 XGE_HAL_SWAPPER_CTRL_RTH_SE |
1435 XGE_HAL_SWAPPER_CTRL_TXP_FE |
1436 XGE_HAL_SWAPPER_CTRL_TXP_SE |
1437 XGE_HAL_SWAPPER_CTRL_TXD_R_FE |
1438 XGE_HAL_SWAPPER_CTRL_TXD_R_SE |
1439 XGE_HAL_SWAPPER_CTRL_TXD_W_FE |
1440 XGE_HAL_SWAPPER_CTRL_TXD_W_SE |
1441 XGE_HAL_SWAPPER_CTRL_TXF_R_FE |
1442 XGE_HAL_SWAPPER_CTRL_RXD_R_FE |
1443 XGE_HAL_SWAPPER_CTRL_RXD_R_SE |
1444 XGE_HAL_SWAPPER_CTRL_RXD_W_FE |
1445 XGE_HAL_SWAPPER_CTRL_RXD_W_SE |
1446 XGE_HAL_SWAPPER_CTRL_RXF_W_FE |
1447 XGE_HAL_SWAPPER_CTRL_XMSI_FE |
1448 XGE_HAL_SWAPPER_CTRL_STATS_FE | XGE_HAL_SWAPPER_CTRL_STATS_SE);
1451 if (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX) {
1452 val64 |= XGE_HAL_SWAPPER_CTRL_XMSI_SE;
1454 __hal_pio_mem_write32_lower(hldev->pdev, hldev->regh0, (u32)val64,
1455 &bar0->swapper_ctrl);
1457 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, (u32)(val64>>32),
1458 &bar0->swapper_ctrl);
1460 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0, (u32)(val64>>32),
1461 &bar0->swapper_ctrl);
1462 xge_debug_device(XGE_TRACE, "%s", "using little endian set");
1465 /* Verifying if endian settings are accurate by reading a feedback
1467 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
1468 &bar0->pif_rd_swapper_fb);
1469 if (val64 != XGE_HAL_IF_RD_SWAPPER_FB) {
1470 xge_debug_device(XGE_ERR, "pif_rd_swapper_fb read "XGE_OS_LLXFMT,
1471 (unsigned long long) val64);
1472 return XGE_HAL_ERR_SWAPPER_CTRL;
1475 xge_debug_device(XGE_TRACE, "%s", "be/le swapper enabled");
1481 * __hal_device_rts_mac_configure - Configure RTS steering based on
1482 * destination mac address.
1483 * @hldev: HAL device handle.
1487 __hal_device_rts_mac_configure(xge_hal_device_t *hldev)
1489 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
1492 if (!hldev->config.rts_mac_en) {
1497 * Set the receive traffic steering mode from default(classic)
1500 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
1502 val64 |= XGE_HAL_RTS_CTRL_ENHANCED_MODE;
1503 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
1504 val64, &bar0->rts_ctrl);
1509 * __hal_device_rts_port_configure - Configure RTS steering based on
1510 * destination or source port number.
1511 * @hldev: HAL device handle.
1515 __hal_device_rts_port_configure(xge_hal_device_t *hldev)
1517 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
1521 if (!hldev->config.rts_port_en) {
1526 * Set the receive traffic steering mode from default(classic)
1529 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
1531 val64 |= XGE_HAL_RTS_CTRL_ENHANCED_MODE;
1532 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
1533 val64, &bar0->rts_ctrl);
1536 * Initiate port steering according to per-ring configuration
1538 for (rnum = 0; rnum < XGE_HAL_MAX_RING_NUM; rnum++) {
1540 xge_hal_ring_queue_t *queue = &hldev->config.ring.queue[rnum];
1542 if (!queue->configured || queue->rts_port_en)
1545 for (pnum = 0; pnum < XGE_HAL_MAX_STEERABLE_PORTS; pnum++) {
1546 xge_hal_rts_port_t *port = &queue->rts_ports[pnum];
1549 * Skip and clear empty ports
1555 xge_os_pio_mem_write64(hldev->pdev,
1557 &bar0->rts_pn_cam_data);
1559 val64 = BIT(7) | BIT(15);
1562 * Assign new Port values according
1565 val64 = vBIT(port->num,8,16) |
1566 vBIT(rnum,37,3) | BIT(63);
1571 xge_os_pio_mem_write64(hldev->pdev,
1572 hldev->regh0, val64,
1573 &bar0->rts_pn_cam_data);
1575 val64 = BIT(7) | BIT(15) | vBIT(pnum,24,8);
1578 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
1579 val64, &bar0->rts_pn_cam_ctrl);
1581 /* poll until done */
1582 if (__hal_device_register_poll(hldev,
1583 &bar0->rts_pn_cam_ctrl, 0,
1584 XGE_HAL_RTS_PN_CAM_CTRL_STROBE_BEING_EXECUTED,
1585 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) !=
1587 /* upper layer may require to repeat */
1588 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
1596 * __hal_device_rts_qos_configure - Configure RTS steering based on
1598 * @hldev: HAL device handle.
1602 __hal_device_rts_qos_configure(xge_hal_device_t *hldev)
1604 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
1608 if (!hldev->config.rts_qos_en) {
1612 /* First clear the RTS_DS_MEM_DATA */
1614 for (j = 0; j < 64; j++ )
1616 /* First clear the value */
1617 val64 = XGE_HAL_RTS_DS_MEM_DATA(0);
1619 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
1620 &bar0->rts_ds_mem_data);
1622 val64 = XGE_HAL_RTS_DS_MEM_CTRL_WE |
1623 XGE_HAL_RTS_DS_MEM_CTRL_STROBE_NEW_CMD |
1624 XGE_HAL_RTS_DS_MEM_CTRL_OFFSET ( j );
1626 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
1627 &bar0->rts_ds_mem_ctrl);
1630 /* poll until done */
1631 if (__hal_device_register_poll(hldev,
1632 &bar0->rts_ds_mem_ctrl, 0,
1633 XGE_HAL_RTS_DS_MEM_CTRL_STROBE_CMD_BEING_EXECUTED,
1634 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
1635 /* upper layer may require to repeat */
1636 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
1642 for (j = 0; j < XGE_HAL_MAX_RING_NUM; j++) {
1643 if (hldev->config.ring.queue[j].configured)
1647 switch (rx_ring_num) {
1650 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0);
1651 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1);
1652 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2);
1653 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3);
1654 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4);
1657 val64 = 0x0001000100010001ULL;
1658 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0);
1659 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1);
1660 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2);
1661 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3);
1662 val64 = 0x0001000100000000ULL;
1663 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4);
1666 val64 = 0x0001020001020001ULL;
1667 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0);
1668 val64 = 0x0200010200010200ULL;
1669 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1);
1670 val64 = 0x0102000102000102ULL;
1671 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2);
1672 val64 = 0x0001020001020001ULL;
1673 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3);
1674 val64 = 0x0200010200000000ULL;
1675 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4);
1678 val64 = 0x0001020300010203ULL;
1679 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0);
1680 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1);
1681 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2);
1682 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3);
1683 val64 = 0x0001020300000000ULL;
1684 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4);
1687 val64 = 0x0001020304000102ULL;
1688 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0);
1689 val64 = 0x0304000102030400ULL;
1690 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1);
1691 val64 = 0x0102030400010203ULL;
1692 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2);
1693 val64 = 0x0400010203040001ULL;
1694 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3);
1695 val64 = 0x0203040000000000ULL;
1696 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4);
1699 val64 = 0x0001020304050001ULL;
1700 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0);
1701 val64 = 0x0203040500010203ULL;
1702 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1);
1703 val64 = 0x0405000102030405ULL;
1704 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2);
1705 val64 = 0x0001020304050001ULL;
1706 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3);
1707 val64 = 0x0203040500000000ULL;
1708 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4);
1711 val64 = 0x0001020304050600ULL;
1712 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0);
1713 val64 = 0x0102030405060001ULL;
1714 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1);
1715 val64 = 0x0203040506000102ULL;
1716 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2);
1717 val64 = 0x0304050600010203ULL;
1718 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3);
1719 val64 = 0x0405060000000000ULL;
1720 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4);
1723 val64 = 0x0001020304050607ULL;
1724 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_0);
1725 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_1);
1726 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_2);
1727 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_3);
1728 val64 = 0x0001020300000000ULL;
1729 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64, &bar0->rx_w_round_robin_4);
1737 * xge__hal_device_rts_mac_enable
1739 * @devh: HAL device handle.
1740 * @index: index number where the MAC addr will be stored
1741 * @macaddr: MAC address
1743 * - Enable RTS steering for the given MAC address. This function has to be
1744 * called with lock acquired.
1747 * 1. ULD has to call this function with the index value which
1748 * statisfies the following condition:
1749 * ring_num = (index % 8)
1750 * 2.ULD also needs to make sure that the index is not
1751 * occupied by any MAC address. If that index has any MAC address
1752 * it will be overwritten and HAL will not check for it.
1756 xge_hal_device_rts_mac_enable(xge_hal_device_h devh, int index, macaddr_t macaddr)
1758 int max_addr = XGE_HAL_MAX_MAC_ADDRESSES;
1759 xge_hal_status_e status;
1761 xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
1763 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)
1764 max_addr = XGE_HAL_MAX_MAC_ADDRESSES_HERC;
1766 if ( index >= max_addr )
1767 return XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES;
1770 * Set the MAC address at the given location marked by index.
1772 status = xge_hal_device_macaddr_set(hldev, index, macaddr);
1773 if (status != XGE_HAL_OK) {
1774 xge_debug_device(XGE_ERR, "%s",
1775 "Not able to set the mac addr");
1779 return xge_hal_device_rts_section_enable(hldev, index);
1783 * xge__hal_device_rts_mac_disable
1784 * @hldev: HAL device handle.
1785 * @index: index number where to disable the MAC addr
1787 * Disable RTS Steering based on the MAC address.
1788 * This function should be called with lock acquired.
1792 xge_hal_device_rts_mac_disable(xge_hal_device_h devh, int index)
1794 xge_hal_status_e status;
1795 u8 macaddr[6] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
1796 int max_addr = XGE_HAL_MAX_MAC_ADDRESSES;
1798 xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
1800 xge_debug_ll(XGE_TRACE, "the index value is %d ", index);
1802 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)
1803 max_addr = XGE_HAL_MAX_MAC_ADDRESSES_HERC;
1805 if ( index >= max_addr )
1806 return XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES;
1809 * Disable MAC address @ given index location
1811 status = xge_hal_device_macaddr_set(hldev, index, macaddr);
1812 if (status != XGE_HAL_OK) {
1813 xge_debug_device(XGE_ERR, "%s",
1814 "Not able to set the mac addr");
1823 * __hal_device_rth_configure - Configure RTH for the device
1824 * @hldev: HAL device handle.
1826 * Using IT (Indirection Table).
1829 __hal_device_rth_it_configure(xge_hal_device_t *hldev)
1831 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
1833 int rings[XGE_HAL_MAX_RING_NUM]={0};
1839 if (!hldev->config.rth_en) {
1844 * Set the receive traffic steering mode from default(classic)
1847 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
1849 val64 |= XGE_HAL_RTS_CTRL_ENHANCED_MODE;
1850 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
1851 val64, &bar0->rts_ctrl);
1853 buckets_num = (1 << hldev->config.rth_bucket_size);
1856 for (rnum = 0; rnum < XGE_HAL_MAX_RING_NUM; rnum++) {
1857 if (hldev->config.ring.queue[rnum].configured &&
1858 hldev->config.ring.queue[rnum].rth_en)
1859 rings[rmax++] = rnum;
1863 /* for starters: fill in all the buckets with rings "equally" */
1864 for (bucket = 0; bucket < buckets_num; bucket++) {
1870 val64 = XGE_HAL_RTS_RTH_MAP_MEM_DATA_ENTRY_EN |
1871 XGE_HAL_RTS_RTH_MAP_MEM_DATA(rings[rnum]);
1872 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
1873 &bar0->rts_rth_map_mem_data);
1876 val64 = XGE_HAL_RTS_RTH_MAP_MEM_CTRL_WE |
1877 XGE_HAL_RTS_RTH_MAP_MEM_CTRL_STROBE |
1878 XGE_HAL_RTS_RTH_MAP_MEM_CTRL_OFFSET(bucket);
1879 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
1880 &bar0->rts_rth_map_mem_ctrl);
1882 /* poll until done */
1883 if (__hal_device_register_poll(hldev,
1884 &bar0->rts_rth_map_mem_ctrl, 0,
1885 XGE_HAL_RTS_RTH_MAP_MEM_CTRL_STROBE,
1886 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
1887 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
1893 val64 = XGE_HAL_RTS_RTH_EN;
1894 val64 |= XGE_HAL_RTS_RTH_BUCKET_SIZE(hldev->config.rth_bucket_size);
1895 val64 |= XGE_HAL_RTS_RTH_TCP_IPV4_EN | XGE_HAL_RTS_RTH_UDP_IPV4_EN | XGE_HAL_RTS_RTH_IPV4_EN |
1896 XGE_HAL_RTS_RTH_TCP_IPV6_EN |XGE_HAL_RTS_RTH_UDP_IPV6_EN | XGE_HAL_RTS_RTH_IPV6_EN |
1897 XGE_HAL_RTS_RTH_TCP_IPV6_EX_EN | XGE_HAL_RTS_RTH_UDP_IPV6_EX_EN | XGE_HAL_RTS_RTH_IPV6_EX_EN;
1899 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
1900 &bar0->rts_rth_cfg);
1902 xge_debug_device(XGE_TRACE, "RTH configured, bucket_size %d",
1903 hldev->config.rth_bucket_size);
1910 * __hal_spdm_entry_add - Add a new entry to the SPDM table.
1912 * Add a new entry to the SPDM table
1914 * This function add a new entry to the SPDM table.
1917 * This function should be called with spdm_lock.
1919 * See also: xge_hal_spdm_entry_add , xge_hal_spdm_entry_remove.
1921 static xge_hal_status_e
1922 __hal_spdm_entry_add(xge_hal_device_t *hldev, xge_hal_ipaddr_t *src_ip,
1923 xge_hal_ipaddr_t *dst_ip, u16 l4_sp, u16 l4_dp, u8 is_tcp,
1924 u8 is_ipv4, u8 tgt_queue, u32 jhash_value, u16 spdm_entry)
1926 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
1928 u64 spdm_line_arr[8];
1932 * Clear the SPDM READY bit
1934 val64 = XGE_HAL_RX_PIC_INT_REG_SPDM_READY;
1935 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
1936 &bar0->rxpic_int_reg);
1938 xge_debug_device(XGE_TRACE,
1939 "L4 SP %x:DP %x: hash %x tgt_queue %d ",
1940 l4_sp, l4_dp, jhash_value, tgt_queue);
1942 xge_os_memzero(&spdm_line_arr, sizeof(spdm_line_arr));
1945 * Construct the SPDM entry.
1947 spdm_line_arr[0] = vBIT(l4_sp,0,16) |
1949 vBIT(tgt_queue,53,3) |
1955 spdm_line_arr[1] = vBIT(src_ip->ipv4.addr,0,32) |
1956 vBIT(dst_ip->ipv4.addr,32,32);
1959 xge_os_memcpy(&spdm_line_arr[1], &src_ip->ipv6.addr[0], 8);
1960 xge_os_memcpy(&spdm_line_arr[2], &src_ip->ipv6.addr[1], 8);
1961 xge_os_memcpy(&spdm_line_arr[3], &dst_ip->ipv6.addr[0], 8);
1962 xge_os_memcpy(&spdm_line_arr[4], &dst_ip->ipv6.addr[1], 8);
1965 spdm_line_arr[7] = vBIT(jhash_value,0,32) |
1966 BIT(63); /* entry enable bit */
1969 * Add the entry to the SPDM table
1971 for(line_no = 0; line_no < 8; line_no++) {
1972 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
1973 spdm_line_arr[line_no],
1974 (void *)((char *)hldev->spdm_mem_base +
1980 * Wait for the operation to be completed.
1982 if (__hal_device_register_poll(hldev, &bar0->rxpic_int_reg, 1,
1983 XGE_HAL_RX_PIC_INT_REG_SPDM_READY,
1984 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
1985 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
1989 * Add this information to a local SPDM table. The purpose of
1990 * maintaining a local SPDM table is to avoid a search in the
1991 * adapter SPDM table for spdm entry lookup which is very costly
1994 hldev->spdm_table[spdm_entry]->in_use = 1;
1995 xge_os_memcpy(&hldev->spdm_table[spdm_entry]->src_ip, src_ip,
1996 sizeof(xge_hal_ipaddr_t));
1997 xge_os_memcpy(&hldev->spdm_table[spdm_entry]->dst_ip, dst_ip,
1998 sizeof(xge_hal_ipaddr_t));
1999 hldev->spdm_table[spdm_entry]->l4_sp = l4_sp;
2000 hldev->spdm_table[spdm_entry]->l4_dp = l4_dp;
2001 hldev->spdm_table[spdm_entry]->is_tcp = is_tcp;
2002 hldev->spdm_table[spdm_entry]->is_ipv4 = is_ipv4;
2003 hldev->spdm_table[spdm_entry]->tgt_queue = tgt_queue;
2004 hldev->spdm_table[spdm_entry]->jhash_value = jhash_value;
2005 hldev->spdm_table[spdm_entry]->spdm_entry = spdm_entry;
2011 * __hal_device_rth_spdm_configure - Configure RTH for the device
2012 * @hldev: HAL device handle.
2014 * Using SPDM (Socket-Pair Direct Match).
2017 __hal_device_rth_spdm_configure(xge_hal_device_t *hldev)
2019 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0;
2022 u32 spdm_bar_offset;
2023 int spdm_table_size;
2026 if (!hldev->config.rth_spdm_en) {
2031 * Retrieve the base address of SPDM Table.
2033 val64 = xge_os_pio_mem_read64(hldev->pdev,
2034 hldev->regh0, &bar0->spdm_bir_offset);
2036 spdm_bar_num = XGE_HAL_SPDM_PCI_BAR_NUM(val64);
2037 spdm_bar_offset = XGE_HAL_SPDM_PCI_BAR_OFFSET(val64);
2041 * spdm_bar_num specifies the PCI bar num register used to
2042 * address the memory space. spdm_bar_offset specifies the offset
2043 * of the SPDM memory with in the bar num memory space.
2045 switch (spdm_bar_num) {
2048 hldev->spdm_mem_base = (char *)bar0 +
2049 (spdm_bar_offset * 8);
2054 char *bar1 = (char *)hldev->bar1;
2055 hldev->spdm_mem_base = bar1 + (spdm_bar_offset * 8);
2059 xge_assert(((spdm_bar_num != 0) && (spdm_bar_num != 1)));
2063 * Retrieve the size of SPDM table(number of entries).
2065 val64 = xge_os_pio_mem_read64(hldev->pdev,
2066 hldev->regh0, &bar0->spdm_structure);
2067 hldev->spdm_max_entries = XGE_HAL_SPDM_MAX_ENTRIES(val64);
2070 spdm_table_size = hldev->spdm_max_entries *
2071 sizeof(xge_hal_spdm_entry_t);
2072 if (hldev->spdm_table == NULL) {
2076 * Allocate memory to hold the copy of SPDM table.
2078 if ((hldev->spdm_table = (xge_hal_spdm_entry_t **)
2081 (sizeof(xge_hal_spdm_entry_t *) *
2082 hldev->spdm_max_entries))) == NULL) {
2083 return XGE_HAL_ERR_OUT_OF_MEMORY;
2086 if ((mem = xge_os_malloc(hldev->pdev, spdm_table_size)) == NULL)
2088 xge_os_free(hldev->pdev, hldev->spdm_table,
2089 (sizeof(xge_hal_spdm_entry_t *) *
2090 hldev->spdm_max_entries));
2091 return XGE_HAL_ERR_OUT_OF_MEMORY;
2094 xge_os_memzero(mem, spdm_table_size);
2095 for (i = 0; i < hldev->spdm_max_entries; i++) {
2096 hldev->spdm_table[i] = (xge_hal_spdm_entry_t *)
2098 i * sizeof(xge_hal_spdm_entry_t));
2100 xge_os_spin_lock_init(&hldev->spdm_lock, hldev->pdev);
2103 * We are here because the host driver tries to
2104 * do a soft reset on the device.
2105 * Since the device soft reset clears the SPDM table, copy
2106 * the entries from the local SPDM table to the actual one.
2108 xge_os_spin_lock(&hldev->spdm_lock);
2109 for (i = 0; i < hldev->spdm_max_entries; i++) {
2110 xge_hal_spdm_entry_t *spdm_entry = hldev->spdm_table[i];
2112 if (spdm_entry->in_use) {
2113 if (__hal_spdm_entry_add(hldev,
2114 &spdm_entry->src_ip,
2115 &spdm_entry->dst_ip,
2119 spdm_entry->is_ipv4,
2120 spdm_entry->tgt_queue,
2121 spdm_entry->jhash_value,
2122 spdm_entry->spdm_entry)
2124 /* Log an warning */
2125 xge_debug_device(XGE_ERR,
2126 "SPDM table update from local"
2131 xge_os_spin_unlock(&hldev->spdm_lock);
2135 * Set the receive traffic steering mode from default(classic)
2138 val64 = xge_os_pio_mem_read64(hldev->pdev,
2139 hldev->regh0, &bar0->rts_ctrl);
2140 val64 |= XGE_HAL_RTS_CTRL_ENHANCED_MODE;
2141 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
2142 val64, &bar0->rts_ctrl);
2145 * We may not need to configure rts_rth_jhash_cfg register as the
2146 * default values are good enough to calculate the hash.
2150 * As of now, set all the rth mask registers to zero. TODO.
2152 for(i = 0; i < 5; i++) {
2153 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
2154 0, &bar0->rts_rth_hash_mask[i]);
2157 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
2158 0, &bar0->rts_rth_hash_mask_5);
2160 if (hldev->config.rth_spdm_use_l4) {
2161 val64 = XGE_HAL_RTH_STATUS_SPDM_USE_L4;
2162 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
2163 val64, &bar0->rts_rth_status);
2166 val64 = XGE_HAL_RTS_RTH_EN;
2167 val64 |= XGE_HAL_RTS_RTH_IPV4_EN | XGE_HAL_RTS_RTH_TCP_IPV4_EN;
2168 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
2169 &bar0->rts_rth_cfg);
2176 * __hal_device_pci_init
2177 * @hldev: HAL device handle.
2179 * Initialize certain PCI/PCI-X configuration registers
2180 * with recommended values. Save config space for future hw resets.
2183 __hal_device_pci_init(xge_hal_device_t *hldev)
2189 /* Store PCI device ID and revision for future references where in we
2190 * decide Xena revision using PCI sub system ID */
2191 xge_os_pci_read16(hldev->pdev,hldev->cfgh,
2192 xge_offsetof(xge_hal_pci_config_le_t, device_id),
2194 xge_os_pci_read8(hldev->pdev,hldev->cfgh,
2195 xge_offsetof(xge_hal_pci_config_le_t, revision),
2198 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)
2199 pcisize = XGE_HAL_PCISIZE_HERC;
2200 else if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA)
2201 pcisize = XGE_HAL_PCISIZE_XENA;
2203 /* save original PCI config space to restore it on device_terminate() */
2204 for (i = 0; i < pcisize; i++) {
2205 xge_os_pci_read32(hldev->pdev, hldev->cfgh, i*4,
2206 (u32*)&hldev->pci_config_space_bios + i);
2209 /* Set the PErr Repconse bit and SERR in PCI command register. */
2210 xge_os_pci_read16(hldev->pdev, hldev->cfgh,
2211 xge_offsetof(xge_hal_pci_config_le_t, command), &cmd);
2213 xge_os_pci_write16(hldev->pdev, hldev->cfgh,
2214 xge_offsetof(xge_hal_pci_config_le_t, command), cmd);
2216 /* Set user spcecified value for the PCI Latency Timer */
2217 if (hldev->config.latency_timer &&
2218 hldev->config.latency_timer != XGE_HAL_USE_BIOS_DEFAULT_LATENCY) {
2219 xge_os_pci_write8(hldev->pdev, hldev->cfgh,
2220 xge_offsetof(xge_hal_pci_config_le_t,
2222 (u8)hldev->config.latency_timer);
2224 /* Read back latency timer to reflect it into user level */
2225 xge_os_pci_read8(hldev->pdev, hldev->cfgh,
2226 xge_offsetof(xge_hal_pci_config_le_t, latency_timer), &val);
2227 hldev->config.latency_timer = val;
2229 /* Enable Data Parity Error Recovery in PCI-X command register. */
2230 xge_os_pci_read16(hldev->pdev, hldev->cfgh,
2231 xge_offsetof(xge_hal_pci_config_le_t, pcix_command), &cmd);
2233 xge_os_pci_write16(hldev->pdev, hldev->cfgh,
2234 xge_offsetof(xge_hal_pci_config_le_t, pcix_command), cmd);
2236 /* Set MMRB count in PCI-X command register. */
2237 if (hldev->config.mmrb_count != XGE_HAL_DEFAULT_BIOS_MMRB_COUNT) {
2239 cmd |= hldev->config.mmrb_count << 2;
2240 xge_os_pci_write16(hldev->pdev, hldev->cfgh,
2241 xge_offsetof(xge_hal_pci_config_le_t, pcix_command),
2244 /* Read back MMRB count to reflect it into user level */
2245 xge_os_pci_read16(hldev->pdev, hldev->cfgh,
2246 xge_offsetof(xge_hal_pci_config_le_t, pcix_command),
2249 hldev->config.mmrb_count = cmd>>2;
2251 /* Setting Maximum outstanding splits based on system type. */
2252 if (hldev->config.max_splits_trans != XGE_HAL_USE_BIOS_DEFAULT_SPLITS) {
2253 xge_os_pci_read16(hldev->pdev, hldev->cfgh,
2254 xge_offsetof(xge_hal_pci_config_le_t, pcix_command),
2257 cmd |= hldev->config.max_splits_trans << 4;
2258 xge_os_pci_write16(hldev->pdev, hldev->cfgh,
2259 xge_offsetof(xge_hal_pci_config_le_t, pcix_command),
2263 /* Read back max split trans to reflect it into user level */
2264 xge_os_pci_read16(hldev->pdev, hldev->cfgh,
2265 xge_offsetof(xge_hal_pci_config_le_t, pcix_command), &cmd);
2267 hldev->config.max_splits_trans = cmd>>4;
2269 /* Forcibly disabling relaxed ordering capability of the card. */
2270 xge_os_pci_read16(hldev->pdev, hldev->cfgh,
2271 xge_offsetof(xge_hal_pci_config_le_t, pcix_command), &cmd);
2273 xge_os_pci_write16(hldev->pdev, hldev->cfgh,
2274 xge_offsetof(xge_hal_pci_config_le_t, pcix_command), cmd);
2276 /* save PCI config space for future resets */
2277 for (i = 0; i < pcisize; i++) {
2278 xge_os_pci_read32(hldev->pdev, hldev->cfgh, i*4,
2279 (u32*)&hldev->pci_config_space + i);
2284 * __hal_device_pci_info_get - Get PCI bus informations such as width, frequency
2286 * @devh: HAL device handle.
2287 * @pci_mode: pointer to a variable of enumerated type
2288 * xge_hal_pci_mode_e{}.
2289 * @bus_frequency: pointer to a variable of enumerated type
2290 * xge_hal_pci_bus_frequency_e{}.
2291 * @bus_width: pointer to a variable of enumerated type
2292 * xge_hal_pci_bus_width_e{}.
2294 * Get pci mode, frequency, and PCI bus width.
2296 * Returns: one of the xge_hal_status_e{} enumerated types.
2297 * XGE_HAL_OK - for success.
2298 * XGE_HAL_ERR_INVALID_PCI_INFO - for invalid PCI information from the card.
2299 * XGE_HAL_ERR_BAD_DEVICE_ID - for invalid card.
2301 * See Also: xge_hal_pci_mode_e, xge_hal_pci_mode_e, xge_hal_pci_width_e.
2303 static xge_hal_status_e
2304 __hal_device_pci_info_get(xge_hal_device_h devh, xge_hal_pci_mode_e *pci_mode,
2305 xge_hal_pci_bus_frequency_e *bus_frequency,
2306 xge_hal_pci_bus_width_e *bus_width)
2308 xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
2309 xge_hal_status_e rc_status = XGE_HAL_OK;
2310 xge_hal_card_e card_id = xge_hal_device_check_id (devh);
2312 #ifdef XGE_HAL_HERC_EMULATION
2313 hldev->config.pci_freq_mherz =
2314 XGE_HAL_PCI_BUS_FREQUENCY_66MHZ;
2316 XGE_HAL_PCI_BUS_FREQUENCY_66MHZ;
2317 *pci_mode = XGE_HAL_PCI_66MHZ_MODE;
2319 if (card_id == XGE_HAL_CARD_HERC) {
2320 xge_hal_pci_bar0_t *bar0 =
2321 (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
2322 u64 pci_info = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2324 if (XGE_HAL_PCI_32_BIT & pci_info)
2325 *bus_width = XGE_HAL_PCI_BUS_WIDTH_32BIT;
2327 *bus_width = XGE_HAL_PCI_BUS_WIDTH_64BIT;
2328 switch((pci_info & XGE_HAL_PCI_INFO)>>60)
2330 case XGE_HAL_PCI_33MHZ_MODE:
2332 XGE_HAL_PCI_BUS_FREQUENCY_33MHZ;
2333 *pci_mode = XGE_HAL_PCI_33MHZ_MODE;
2335 case XGE_HAL_PCI_66MHZ_MODE:
2337 XGE_HAL_PCI_BUS_FREQUENCY_66MHZ;
2338 *pci_mode = XGE_HAL_PCI_66MHZ_MODE;
2340 case XGE_HAL_PCIX_M1_66MHZ_MODE:
2342 XGE_HAL_PCI_BUS_FREQUENCY_66MHZ;
2343 *pci_mode = XGE_HAL_PCIX_M1_66MHZ_MODE;
2345 case XGE_HAL_PCIX_M1_100MHZ_MODE:
2347 XGE_HAL_PCI_BUS_FREQUENCY_100MHZ;
2348 *pci_mode = XGE_HAL_PCIX_M1_100MHZ_MODE;
2350 case XGE_HAL_PCIX_M1_133MHZ_MODE:
2352 XGE_HAL_PCI_BUS_FREQUENCY_133MHZ;
2353 *pci_mode = XGE_HAL_PCIX_M1_133MHZ_MODE;
2355 case XGE_HAL_PCIX_M2_66MHZ_MODE:
2357 XGE_HAL_PCI_BUS_FREQUENCY_133MHZ;
2358 *pci_mode = XGE_HAL_PCIX_M2_66MHZ_MODE;
2360 case XGE_HAL_PCIX_M2_100MHZ_MODE:
2362 XGE_HAL_PCI_BUS_FREQUENCY_200MHZ;
2363 *pci_mode = XGE_HAL_PCIX_M2_100MHZ_MODE;
2365 case XGE_HAL_PCIX_M2_133MHZ_MODE:
2367 XGE_HAL_PCI_BUS_FREQUENCY_266MHZ;
2368 *pci_mode = XGE_HAL_PCIX_M2_133MHZ_MODE;
2370 case XGE_HAL_PCIX_M1_RESERVED:
2371 case XGE_HAL_PCIX_M1_66MHZ_NS:
2372 case XGE_HAL_PCIX_M1_100MHZ_NS:
2373 case XGE_HAL_PCIX_M1_133MHZ_NS:
2374 case XGE_HAL_PCIX_M2_RESERVED:
2375 case XGE_HAL_PCIX_533_RESERVED:
2377 rc_status = XGE_HAL_ERR_INVALID_PCI_INFO;
2378 xge_debug_device(XGE_ERR,
2379 "invalid pci info "XGE_OS_LLXFMT,
2380 (unsigned long long)pci_info);
2383 if (rc_status != XGE_HAL_ERR_INVALID_PCI_INFO)
2384 xge_debug_device(XGE_TRACE, "PCI info: mode %d width "
2385 "%d frequency %d", *pci_mode, *bus_width,
2387 if (hldev->config.pci_freq_mherz ==
2388 XGE_HAL_DEFAULT_USE_HARDCODE) {
2389 hldev->config.pci_freq_mherz = *bus_frequency;
2392 /* for XENA, we report PCI mode, only. PCI bus frequency, and bus width
2393 * are set to unknown */
2394 else if (card_id == XGE_HAL_CARD_XENA) {
2396 u8 dev_num, bus_num;
2397 /* initialize defaults for XENA */
2398 *bus_frequency = XGE_HAL_PCI_BUS_FREQUENCY_UNKNOWN;
2399 *bus_width = XGE_HAL_PCI_BUS_WIDTH_UNKNOWN;
2400 xge_os_pci_read32(hldev->pdev, hldev->cfgh,
2401 xge_offsetof(xge_hal_pci_config_le_t, pcix_status),
2403 dev_num = (u8)((pcix_status & 0xF8) >> 3);
2404 bus_num = (u8)((pcix_status & 0xFF00) >> 8);
2405 if (dev_num == 0 && bus_num == 0)
2406 *pci_mode = XGE_HAL_PCI_BASIC_MODE;
2408 *pci_mode = XGE_HAL_PCIX_BASIC_MODE;
2409 xge_debug_device(XGE_TRACE, "PCI info: mode %d", *pci_mode);
2410 if (hldev->config.pci_freq_mherz ==
2411 XGE_HAL_DEFAULT_USE_HARDCODE) {
2413 * There is no way to detect BUS frequency on Xena,
2414 * so, in case of automatic configuration we hopelessly
2417 hldev->config.pci_freq_mherz =
2418 XGE_HAL_PCI_BUS_FREQUENCY_133MHZ;
2420 } else if (card_id == XGE_HAL_CARD_TITAN) {
2421 *bus_width = XGE_HAL_PCI_BUS_WIDTH_64BIT;
2422 *bus_frequency = XGE_HAL_PCI_BUS_FREQUENCY_250MHZ;
2423 if (hldev->config.pci_freq_mherz ==
2424 XGE_HAL_DEFAULT_USE_HARDCODE) {
2425 hldev->config.pci_freq_mherz = *bus_frequency;
2428 rc_status = XGE_HAL_ERR_BAD_DEVICE_ID;
2429 xge_debug_device(XGE_ERR, "invalid device id %d", card_id);
2437 * __hal_device_handle_link_up_ind
2438 * @hldev: HAL device handle.
2440 * Link up indication handler. The function is invoked by HAL when
2441 * Xframe indicates that the link is up for programmable amount of time.
2444 __hal_device_handle_link_up_ind(xge_hal_device_t *hldev)
2446 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
2450 * If the previous link state is not down, return.
2452 if (hldev->link_state == XGE_HAL_LINK_UP) {
2453 #ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR
2454 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC){
2455 val64 = xge_os_pio_mem_read64(
2456 hldev->pdev, hldev->regh0,
2457 &bar0->misc_int_mask);
2458 val64 |= XGE_HAL_MISC_INT_REG_LINK_UP_INT;
2459 val64 &= ~XGE_HAL_MISC_INT_REG_LINK_DOWN_INT;
2460 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
2461 val64, &bar0->misc_int_mask);
2464 xge_debug_device(XGE_TRACE,
2465 "link up indication while link is up, ignoring..");
2469 /* Now re-enable it as due to noise, hardware turned it off */
2470 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2471 &bar0->adapter_control);
2472 val64 |= XGE_HAL_ADAPTER_CNTL_EN;
2473 val64 = val64 & (~XGE_HAL_ADAPTER_ECC_EN); /* ECC enable */
2474 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
2475 &bar0->adapter_control);
2477 /* Turn on the Laser */
2478 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2479 &bar0->adapter_control);
2480 val64 = val64|(XGE_HAL_ADAPTER_EOI_TX_ON |
2481 XGE_HAL_ADAPTER_LED_ON);
2482 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
2483 &bar0->adapter_control);
2485 #ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR
2486 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
2487 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2488 &bar0->adapter_status);
2489 if (val64 & (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT |
2490 XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT)) {
2491 xge_debug_device(XGE_TRACE, "%s",
2492 "fail to transition link to up...");
2497 * Mask the Link Up interrupt and unmask the Link Down
2500 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2501 &bar0->misc_int_mask);
2502 val64 |= XGE_HAL_MISC_INT_REG_LINK_UP_INT;
2503 val64 &= ~XGE_HAL_MISC_INT_REG_LINK_DOWN_INT;
2504 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
2505 &bar0->misc_int_mask);
2506 xge_debug_device(XGE_TRACE, "calling link up..");
2507 hldev->link_state = XGE_HAL_LINK_UP;
2510 if (g_xge_hal_driver->uld_callbacks.link_up) {
2511 g_xge_hal_driver->uld_callbacks.link_up(
2512 hldev->upper_layer_info);
2519 if (__hal_device_register_poll(hldev, &bar0->adapter_status, 0,
2520 (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT |
2521 XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT),
2522 XGE_HAL_DEVICE_FAULT_WAIT_MAX_MILLIS) == XGE_HAL_OK) {
2525 (void) xge_queue_produce_context(hldev->queueh,
2526 XGE_HAL_EVENT_LINK_IS_UP,
2528 /* link is up after been enabled */
2531 xge_debug_device(XGE_TRACE, "%s",
2532 "fail to transition link to up...");
2538 * __hal_device_handle_link_down_ind
2539 * @hldev: HAL device handle.
2541 * Link down indication handler. The function is invoked by HAL when
2542 * Xframe indicates that the link is down.
2545 __hal_device_handle_link_down_ind(xge_hal_device_t *hldev)
2547 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
2551 * If the previous link state is not up, return.
2553 if (hldev->link_state == XGE_HAL_LINK_DOWN) {
2554 #ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR
2555 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC){
2556 val64 = xge_os_pio_mem_read64(
2557 hldev->pdev, hldev->regh0,
2558 &bar0->misc_int_mask);
2559 val64 |= XGE_HAL_MISC_INT_REG_LINK_DOWN_INT;
2560 val64 &= ~XGE_HAL_MISC_INT_REG_LINK_UP_INT;
2561 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
2562 val64, &bar0->misc_int_mask);
2565 xge_debug_device(XGE_TRACE,
2566 "link down indication while link is down, ignoring..");
2571 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2572 &bar0->adapter_control);
2574 /* try to debounce the link only if the adapter is enabled. */
2575 if (val64 & XGE_HAL_ADAPTER_CNTL_EN) {
2576 if (__hal_device_register_poll(hldev, &bar0->adapter_status, 0,
2577 (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT |
2578 XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT),
2579 XGE_HAL_DEVICE_FAULT_WAIT_MAX_MILLIS) == XGE_HAL_OK) {
2580 xge_debug_device(XGE_TRACE,
2581 "link is actually up (possible noisy link?), ignoring.");
2586 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2587 &bar0->adapter_control);
2589 val64 = val64 & (~XGE_HAL_ADAPTER_LED_ON);
2590 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
2591 &bar0->adapter_control);
2593 #ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR
2594 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
2596 * Mask the Link Down interrupt and unmask the Link up
2599 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2600 &bar0->misc_int_mask);
2601 val64 |= XGE_HAL_MISC_INT_REG_LINK_DOWN_INT;
2602 val64 &= ~XGE_HAL_MISC_INT_REG_LINK_UP_INT;
2603 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
2604 &bar0->misc_int_mask);
2607 xge_debug_device(XGE_TRACE, "calling link down..");
2608 hldev->link_state = XGE_HAL_LINK_DOWN;
2611 if (g_xge_hal_driver->uld_callbacks.link_down) {
2612 g_xge_hal_driver->uld_callbacks.link_down(
2613 hldev->upper_layer_info);
2619 (void) xge_queue_produce_context(hldev->queueh,
2620 XGE_HAL_EVENT_LINK_IS_DOWN,
2626 * __hal_device_handle_link_state_change
2627 * @hldev: HAL device handle.
2629 * Link state change handler. The function is invoked by HAL when
2630 * Xframe indicates link state change condition. The code here makes sure to
2631 * 1) ignore redundant state change indications;
2632 * 2) execute link-up sequence, and handle the failure to bring the link up;
2633 * 3) generate XGE_HAL_LINK_UP/DOWN event for the subsequent handling by
2634 * upper-layer driver (ULD).
2637 __hal_device_handle_link_state_change(xge_hal_device_t *hldev)
2642 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
2646 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2647 &bar0->adapter_control);
2649 /* If the adapter is not enabled but the hal thinks we are in the up
2650 * state then transition to the down state.
2652 if ( !(val64 & XGE_HAL_ADAPTER_CNTL_EN) &&
2653 (hldev->link_state == XGE_HAL_LINK_UP) ) {
2654 return(__hal_device_handle_link_down_ind(hldev));
2659 (void) xge_hal_device_status(hldev, &hw_status);
2660 hw_link_state = (hw_status &
2661 (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT |
2662 XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT)) ?
2663 XGE_HAL_LINK_DOWN : XGE_HAL_LINK_UP;
2665 /* check if the current link state is still considered
2666 * to be changed. This way we will make sure that this is
2667 * not a noise which needs to be filtered out */
2668 if (hldev->link_state == hw_link_state)
2670 } while (i++ < hldev->config.link_valid_cnt);
2672 /* If the current link state is same as previous, just return */
2673 if (hldev->link_state == hw_link_state)
2675 /* detected state change */
2676 else if (hw_link_state == XGE_HAL_LINK_UP)
2677 retcode = __hal_device_handle_link_up_ind(hldev);
2679 retcode = __hal_device_handle_link_down_ind(hldev);
2687 __hal_device_handle_serr(xge_hal_device_t *hldev, char *reg, u64 value)
2689 hldev->stats.sw_dev_err_stats.serr_cnt++;
2690 if (hldev->config.dump_on_serr) {
2691 #ifdef XGE_HAL_USE_MGMT_AUX
2692 (void) xge_hal_aux_device_dump(hldev);
2696 (void) xge_queue_produce(hldev->queueh, XGE_HAL_EVENT_SERR, hldev,
2697 1, sizeof(u64), (void *)&value);
2699 xge_debug_device(XGE_ERR, "%s: read "XGE_OS_LLXFMT, reg,
2700 (unsigned long long) value);
2707 __hal_device_handle_eccerr(xge_hal_device_t *hldev, char *reg, u64 value)
2709 if (hldev->config.dump_on_eccerr) {
2710 #ifdef XGE_HAL_USE_MGMT_AUX
2711 (void) xge_hal_aux_device_dump(hldev);
2715 /* Herc smart enough to recover on its own! */
2716 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) {
2717 (void) xge_queue_produce(hldev->queueh,
2718 XGE_HAL_EVENT_ECCERR, hldev,
2719 1, sizeof(u64), (void *)&value);
2722 xge_debug_device(XGE_ERR, "%s: read "XGE_OS_LLXFMT, reg,
2723 (unsigned long long) value);
2730 __hal_device_handle_parityerr(xge_hal_device_t *hldev, char *reg, u64 value)
2732 if (hldev->config.dump_on_parityerr) {
2733 #ifdef XGE_HAL_USE_MGMT_AUX
2734 (void) xge_hal_aux_device_dump(hldev);
2737 (void) xge_queue_produce_context(hldev->queueh,
2738 XGE_HAL_EVENT_PARITYERR, hldev);
2740 xge_debug_device(XGE_ERR, "%s: read "XGE_OS_LLXFMT, reg,
2741 (unsigned long long) value);
2748 __hal_device_handle_targetabort(xge_hal_device_t *hldev)
2750 (void) xge_queue_produce_context(hldev->queueh,
2751 XGE_HAL_EVENT_TARGETABORT, hldev);
2756 * __hal_device_hw_initialize
2757 * @hldev: HAL device handle.
2759 * Initialize Xframe hardware.
2761 static xge_hal_status_e
2762 __hal_device_hw_initialize(xge_hal_device_t *hldev)
2764 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
2765 xge_hal_status_e status;
2768 /* Set proper endian settings and verify the same by reading the PIF
2769 * Feed-back register. */
2770 status = __hal_device_set_swapper(hldev);
2771 if (status != XGE_HAL_OK) {
2775 /* update the pci mode, frequency, and width */
2776 if (__hal_device_pci_info_get(hldev, &hldev->pci_mode,
2777 &hldev->bus_frequency, &hldev->bus_width) != XGE_HAL_OK){
2778 hldev->pci_mode = XGE_HAL_PCI_INVALID_MODE;
2779 hldev->bus_frequency = XGE_HAL_PCI_BUS_FREQUENCY_UNKNOWN;
2780 hldev->bus_width = XGE_HAL_PCI_BUS_WIDTH_UNKNOWN;
2782 * FIXME: this cannot happen.
2783 * But if it happens we cannot continue just like that
2785 xge_debug_device(XGE_ERR, "unable to get pci info");
2788 if ((hldev->pci_mode == XGE_HAL_PCI_33MHZ_MODE) ||
2789 (hldev->pci_mode == XGE_HAL_PCI_66MHZ_MODE) ||
2790 (hldev->pci_mode == XGE_HAL_PCI_BASIC_MODE)) {
2791 /* PCI optimization: set TxReqTimeOut
2792 * register (0x800+0x120) to 0x1ff or
2793 * something close to this.
2794 * Note: not to be used for PCI-X! */
2796 val64 = XGE_HAL_TXREQTO_VAL(0x1FF);
2797 val64 |= XGE_HAL_TXREQTO_EN;
2798 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
2799 &bar0->txreqtimeout);
2801 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0ULL,
2802 &bar0->read_retry_delay);
2804 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0ULL,
2805 &bar0->write_retry_delay);
2807 xge_debug_device(XGE_TRACE, "%s", "optimizing for PCI mode");
2810 if (hldev->bus_frequency == XGE_HAL_PCI_BUS_FREQUENCY_266MHZ ||
2811 hldev->bus_frequency == XGE_HAL_PCI_BUS_FREQUENCY_250MHZ) {
2813 /* Optimizing for PCI-X 266/250 */
2815 val64 = XGE_HAL_TXREQTO_VAL(0x7F);
2816 val64 |= XGE_HAL_TXREQTO_EN;
2817 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
2818 &bar0->txreqtimeout);
2820 xge_debug_device(XGE_TRACE, "%s", "optimizing for PCI-X 266/250 modes");
2823 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
2824 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0x4000000000000ULL,
2825 &bar0->read_retry_delay);
2827 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0x4000000000000ULL,
2828 &bar0->write_retry_delay);
2831 /* added this to set the no of bytes used to update lso_bytes_sent
2833 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2834 &bar0->pic_control_2);
2835 val64 &= ~XGE_HAL_TXD_WRITE_BC(0x2);
2836 val64 |= XGE_HAL_TXD_WRITE_BC(0x4);
2837 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
2838 &bar0->pic_control_2);
2839 /* added this to clear the EOI_RESET field while leaving XGXS_RESET
2840 * in reset, then a 1-second delay */
2841 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
2842 XGE_HAL_SW_RESET_XGXS, &bar0->sw_reset);
2843 xge_os_mdelay(1000);
2845 /* Clear the XGXS_RESET field of the SW_RESET register in order to
2846 * release the XGXS from reset. Its reset value is 0xA5; write 0x00
2847 * to activate the XGXS. The core requires a minimum 500 us reset.*/
2848 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, 0, &bar0->sw_reset);
2849 (void) xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2853 /* read registers in all blocks */
2854 (void) xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2855 &bar0->mac_int_mask);
2856 (void) xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2857 &bar0->mc_int_mask);
2858 (void) xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2859 &bar0->xgxs_int_mask);
2861 /* set default MTU and steer based on length*/
2862 __hal_ring_mtu_set(hldev, hldev->config.mtu+22); // Alway set 22 bytes extra for steering to work
2864 if (hldev->config.mac.rmac_bcast_en) {
2865 xge_hal_device_bcast_enable(hldev);
2867 xge_hal_device_bcast_disable(hldev);
2870 #ifndef XGE_HAL_HERC_EMULATION
2871 __hal_device_xaui_configure(hldev);
2873 __hal_device_mac_link_util_set(hldev);
2875 __hal_device_mac_link_util_set(hldev);
2878 * Keep its PCI REQ# line asserted during a write
2879 * transaction up to the end of the transaction
2881 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2882 &bar0->misc_control);
2884 val64 |= XGE_HAL_MISC_CONTROL_EXT_REQ_EN;
2886 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
2887 val64, &bar0->misc_control);
2889 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
2890 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
2891 &bar0->misc_control);
2893 val64 |= XGE_HAL_MISC_CONTROL_LINK_FAULT;
2895 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
2896 val64, &bar0->misc_control);
2900 * bimodal interrupts is when all Rx traffic interrupts
2901 * will go to TTI, so we need to adjust RTI settings and
2902 * use adaptive TTI timer. We need to make sure RTI is
2903 * properly configured to sane value which will not
2904 * distrupt bimodal behavior.
2906 if (hldev->config.bimodal_interrupts) {
2909 /* force polling_cnt to be "0", otherwise
2910 * IRQ workload statistics will be screwed. This could
2911 * be worked out in TXPIC handler later. */
2912 hldev->config.isr_polling_cnt = 0;
2913 hldev->config.sched_timer_us = 10000;
2915 /* disable all TTI < 56 */
2916 for (i=0; i<XGE_HAL_MAX_FIFO_NUM; i++) {
2918 if (!hldev->config.fifo.queue[i].configured)
2920 for (j=0; j<XGE_HAL_MAX_FIFO_TTI_NUM; j++) {
2921 if (hldev->config.fifo.queue[i].tti[j].enabled)
2922 hldev->config.fifo.queue[i].tti[j].enabled = 0;
2926 /* now configure bimodal interrupts */
2927 __hal_device_bimodal_configure(hldev);
2930 status = __hal_device_tti_configure(hldev, 0);
2931 if (status != XGE_HAL_OK)
2934 status = __hal_device_rti_configure(hldev, 0);
2935 if (status != XGE_HAL_OK)
2938 status = __hal_device_rth_it_configure(hldev);
2939 if (status != XGE_HAL_OK)
2942 status = __hal_device_rth_spdm_configure(hldev);
2943 if (status != XGE_HAL_OK)
2946 status = __hal_device_rts_mac_configure(hldev);
2947 if (status != XGE_HAL_OK) {
2948 xge_debug_device(XGE_ERR, "__hal_device_rts_mac_configure Failed ");
2952 status = __hal_device_rts_port_configure(hldev);
2953 if (status != XGE_HAL_OK) {
2954 xge_debug_device(XGE_ERR, "__hal_device_rts_port_configure Failed ");
2958 status = __hal_device_rts_qos_configure(hldev);
2959 if (status != XGE_HAL_OK) {
2960 xge_debug_device(XGE_ERR, "__hal_device_rts_qos_configure Failed ");
2964 __hal_device_pause_frames_configure(hldev);
2965 __hal_device_rmac_padding_configure(hldev);
2966 __hal_device_shared_splits_configure(hldev);
2968 /* make sure all interrupts going to be disabled at the moment */
2969 __hal_device_intr_mgmt(hldev, XGE_HAL_ALL_INTRS, 0);
2971 /* SXE-008 Transmit DMA arbitration issue */
2972 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA &&
2973 hldev->revision < 4) {
2974 xge_os_pio_mem_write64(hldev->pdev,hldev->regh0,
2975 XGE_HAL_ADAPTER_PCC_ENABLE_FOUR,
2978 #if 0 // Removing temporarily as FreeBSD is seeing lower performance
2979 // attributable to this fix.
2981 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
2982 /* Turn off the ECC error reporting for RLDRAM interface */
2983 if ((status = xge_hal_fix_rldram_ecc_error(hldev)) != XGE_HAL_OK)
2987 __hal_fifo_hw_initialize(hldev);
2988 __hal_ring_hw_initialize(hldev);
2990 if (__hal_device_wait_quiescent(hldev, &val64)) {
2991 return XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT;
2994 if (__hal_device_register_poll(hldev, &bar0->adapter_status, 1,
2995 XGE_HAL_ADAPTER_STATUS_RC_PRC_QUIESCENT,
2996 XGE_HAL_DEVICE_QUIESCENT_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
2997 xge_debug_device(XGE_TRACE, "%s", "PRC is not QUIESCENT!");
2998 return XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT;
3001 xge_debug_device(XGE_TRACE, "device 0x"XGE_OS_LLXFMT" is quiescent",
3002 (unsigned long long)(ulong_t)hldev);
3004 if (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX ||
3005 hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSI) {
3007 * If MSI is enabled, ensure that One Shot for MSI in PCI_CTRL
3010 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3011 &bar0->pic_control);
3012 val64 &= ~(XGE_HAL_PIC_CNTL_ONE_SHOT_TINT);
3013 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
3014 &bar0->pic_control);
3017 hldev->hw_is_initialized = 1;
3018 hldev->terminating = 0;
3023 * __hal_device_reset - Reset device only.
3024 * @hldev: HAL device handle.
3026 * Reset the device, and subsequently restore
3027 * the previously saved PCI configuration space.
3029 #define XGE_HAL_MAX_PCI_CONFIG_SPACE_REINIT 50
3030 static xge_hal_status_e
3031 __hal_device_reset(xge_hal_device_t *hldev)
3033 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
3034 int i, j, swap_done, pcisize = 0;
3035 u64 val64, rawval = 0ULL;
3037 if (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX) {
3038 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
3039 if ( hldev->bar2 ) {
3040 u64 *msix_vetor_table = (u64 *)hldev->bar2;
3042 // 2 64bit words for each entry
3043 for (i = 0; i < XGE_HAL_MAX_MSIX_MESSAGES * 2;
3045 hldev->msix_vector_table[i] =
3046 xge_os_pio_mem_read64(hldev->pdev,
3047 hldev->regh2, &msix_vetor_table[i]);
3052 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3053 &bar0->pif_rd_swapper_fb);
3054 swap_done = (val64 == XGE_HAL_IF_RD_SWAPPER_FB);
3057 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0,
3058 (u32)(XGE_HAL_SW_RESET_ALL>>32), (char *)&bar0->sw_reset);
3060 u32 val = (u32)(XGE_HAL_SW_RESET_ALL >> 32);
3061 #if defined(XGE_OS_HOST_LITTLE_ENDIAN) || defined(XGE_OS_PIO_LITTLE_ENDIAN)
3063 val = (((val & (u32)0x000000ffUL) << 24) |
3064 ((val & (u32)0x0000ff00UL) << 8) |
3065 ((val & (u32)0x00ff0000UL) >> 8) |
3066 ((val & (u32)0xff000000UL) >> 24));
3068 xge_os_pio_mem_write32(hldev->pdev, hldev->regh0, val,
3072 pcisize = (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)?
3073 XGE_HAL_PCISIZE_HERC : XGE_HAL_PCISIZE_XENA;
3075 xge_os_mdelay(20); /* Wait for 20 ms after reset */
3078 /* Poll for no more than 1 second */
3079 for (i = 0; i < XGE_HAL_MAX_PCI_CONFIG_SPACE_REINIT; i++)
3081 for (j = 0; j < pcisize; j++) {
3082 xge_os_pci_write32(hldev->pdev, hldev->cfgh, j * 4,
3083 *((u32*)&hldev->pci_config_space + j));
3086 xge_os_pci_read16(hldev->pdev,hldev->cfgh,
3087 xge_offsetof(xge_hal_pci_config_le_t, device_id),
3090 if (xge_hal_device_check_id(hldev) != XGE_HAL_CARD_UNKNOWN)
3096 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_UNKNOWN)
3098 xge_debug_device(XGE_ERR, "device reset failed");
3099 return XGE_HAL_ERR_RESET_FAILED;
3102 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
3105 rawval = XGE_HAL_SW_RESET_RAW_VAL_HERC;
3106 pcisize = XGE_HAL_PCISIZE_HERC;
3109 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3111 if (val64 != rawval) {
3115 xge_os_mdelay(1); /* Wait for 1ms before retry */
3117 } else if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) {
3118 rawval = XGE_HAL_SW_RESET_RAW_VAL_XENA;
3119 pcisize = XGE_HAL_PCISIZE_XENA;
3120 xge_os_mdelay(XGE_HAL_DEVICE_RESET_WAIT_MAX_MILLIS);
3123 /* Restore MSI-X vector table */
3124 if (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX) {
3125 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
3126 if ( hldev->bar2 ) {
3128 * 94: MSIXTable 00000004 ( BIR:4 Offset:0x0 )
3129 * 98: PBATable 00000404 ( BIR:4 Offset:0x400 )
3131 u64 *msix_vetor_table = (u64 *)hldev->bar2;
3133 /* 2 64bit words for each entry */
3134 for (i = 0; i < XGE_HAL_MAX_MSIX_MESSAGES * 2;
3136 xge_os_pio_mem_write64(hldev->pdev,
3138 hldev->msix_vector_table[i],
3139 &msix_vetor_table[i]);
3145 hldev->link_state = XGE_HAL_LINK_DOWN;
3146 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3149 if (val64 != rawval) {
3150 xge_debug_device(XGE_ERR, "device has not been reset "
3151 "got 0x"XGE_OS_LLXFMT", expected 0x"XGE_OS_LLXFMT,
3152 (unsigned long long)val64, (unsigned long long)rawval);
3153 return XGE_HAL_ERR_RESET_FAILED;
3156 hldev->hw_is_initialized = 0;
3161 * __hal_device_poll - General private routine to poll the device.
3162 * @hldev: HAL device handle.
3164 * Returns: one of the xge_hal_status_e{} enumerated types.
3165 * XGE_HAL_OK - for success.
3166 * XGE_HAL_ERR_CRITICAL - when encounters critical error.
3168 static xge_hal_status_e
3169 __hal_device_poll(xge_hal_device_t *hldev)
3171 xge_hal_pci_bar0_t *bar0;
3174 bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
3176 /* Handling SERR errors by forcing a H/W reset. */
3177 err_reg = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3178 &bar0->serr_source);
3179 if (err_reg & XGE_HAL_SERR_SOURCE_ANY) {
3180 __hal_device_handle_serr(hldev, "serr_source", err_reg);
3181 return XGE_HAL_ERR_CRITICAL;
3184 err_reg = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3185 &bar0->misc_int_reg);
3187 if (err_reg & XGE_HAL_MISC_INT_REG_DP_ERR_INT) {
3188 hldev->stats.sw_dev_err_stats.parity_err_cnt++;
3189 __hal_device_handle_parityerr(hldev, "misc_int_reg", err_reg);
3190 return XGE_HAL_ERR_CRITICAL;
3193 #ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR
3194 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA)
3198 /* Handling link status change error Intr */
3199 err_reg = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3200 &bar0->mac_rmac_err_reg);
3201 if (__hal_device_handle_link_state_change(hldev))
3202 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3203 err_reg, &bar0->mac_rmac_err_reg);
3206 if (hldev->inject_serr != 0) {
3207 err_reg = hldev->inject_serr;
3208 hldev->inject_serr = 0;
3209 __hal_device_handle_serr(hldev, "inject_serr", err_reg);
3210 return XGE_HAL_ERR_CRITICAL;
3213 if (hldev->inject_ecc != 0) {
3214 err_reg = hldev->inject_ecc;
3215 hldev->inject_ecc = 0;
3216 hldev->stats.sw_dev_err_stats.ecc_err_cnt++;
3217 __hal_device_handle_eccerr(hldev, "inject_ecc", err_reg);
3218 return XGE_HAL_ERR_CRITICAL;
3221 if (hldev->inject_bad_tcode != 0) {
3222 u8 t_code = hldev->inject_bad_tcode;
3223 xge_hal_channel_t channel;
3224 xge_hal_fifo_txd_t txd;
3225 xge_hal_ring_rxd_1_t rxd;
3227 channel.devh = hldev;
3229 if (hldev->inject_bad_tcode_for_chan_type ==
3230 XGE_HAL_CHANNEL_TYPE_FIFO) {
3231 channel.type = XGE_HAL_CHANNEL_TYPE_FIFO;
3234 channel.type = XGE_HAL_CHANNEL_TYPE_RING;
3237 hldev->inject_bad_tcode = 0;
3239 if (channel.type == XGE_HAL_CHANNEL_TYPE_FIFO)
3240 return xge_hal_device_handle_tcode(&channel, &txd,
3243 return xge_hal_device_handle_tcode(&channel, &rxd,
3251 * __hal_verify_pcc_idle - Verify All Enbled PCC are IDLE or not
3252 * @hldev: HAL device handle.
3253 * @adp_status: Adapter Status value
3254 * Usage: See xge_hal_device_enable{}.
3257 __hal_verify_pcc_idle(xge_hal_device_t *hldev, u64 adp_status)
3259 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA &&
3260 hldev->revision < 4) {
3262 * For Xena 1,2,3 we enable only 4 PCCs Due to
3263 * SXE-008 (Transmit DMA arbitration issue)
3265 if ((adp_status & XGE_HAL_ADAPTER_STATUS_RMAC_PCC_4_IDLE)
3266 != XGE_HAL_ADAPTER_STATUS_RMAC_PCC_4_IDLE) {
3267 xge_debug_device(XGE_TRACE, "%s",
3268 "PCC is not IDLE after adapter enabled!");
3269 return XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT;
3272 if ((adp_status & XGE_HAL_ADAPTER_STATUS_RMAC_PCC_IDLE) !=
3273 XGE_HAL_ADAPTER_STATUS_RMAC_PCC_IDLE) {
3274 xge_debug_device(XGE_TRACE, "%s",
3275 "PCC is not IDLE after adapter enabled!");
3276 return XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT;
3283 __hal_update_bimodal(xge_hal_device_t *hldev, int ring_no)
3285 int tval, d, iwl_avg, len_avg, bytes_avg, bytes_hist, d_hist;
3286 int iwl_rxcnt, iwl_txcnt, iwl_txavg, len_rxavg, iwl_rxavg, len_txavg;
3289 #define _HIST_SIZE 50 /* 0.5 sec history */
3290 #define _HIST_ADJ_TIMER 1
3293 static int bytes_avg_history[_HIST_SIZE] = {0};
3294 static int d_avg_history[_HIST_SIZE] = {0};
3295 static int history_idx = 0;
3296 static int pstep = 1;
3297 static int hist_adj_timer = 0;
3300 * tval - current value of this bimodal timer
3302 tval = hldev->bimodal_tti[ring_no].timer_val_us;
3305 * d - how many interrupts we were getting since last
3306 * bimodal timer tick.
3308 d = hldev->stats.sw_dev_info_stats.tx_traffic_intr_cnt -
3309 hldev->bimodal_intr_cnt;
3311 /* advance bimodal interrupt counter */
3312 hldev->bimodal_intr_cnt =
3313 hldev->stats.sw_dev_info_stats.tx_traffic_intr_cnt;
3316 * iwl_cnt - how many interrupts we've got since last
3317 * bimodal timer tick.
3319 iwl_rxcnt = (hldev->irq_workload_rxcnt[ring_no] ?
3320 hldev->irq_workload_rxcnt[ring_no] : 1);
3321 iwl_txcnt = (hldev->irq_workload_txcnt[ring_no] ?
3322 hldev->irq_workload_txcnt[ring_no] : 1);
3323 iwl_cnt = iwl_rxcnt + iwl_txcnt;
3326 * we need to take hldev->config.isr_polling_cnt into account
3327 * but for some reason this line causing GCC to produce wrong
3328 * code on Solaris. As of now, if bimodal_interrupts is configured
3329 * hldev->config.isr_polling_cnt is forced to be "0".
3331 * iwl_cnt = iwl_cnt / (hldev->config.isr_polling_cnt + 1); */
3334 * iwl_avg - how many RXDs on avarage been processed since
3335 * last bimodal timer tick. This indirectly includes
3338 iwl_rxavg = hldev->irq_workload_rxd[ring_no] / iwl_rxcnt;
3339 iwl_txavg = hldev->irq_workload_txd[ring_no] / iwl_txcnt;
3340 iwl_avg = iwl_rxavg + iwl_txavg;
3341 iwl_avg = iwl_avg == 0 ? 1 : iwl_avg;
3344 * len_avg - how many bytes on avarage been processed since
3345 * last bimodal timer tick. i.e. avarage frame size.
3347 len_rxavg = 1 + hldev->irq_workload_rxlen[ring_no] /
3348 (hldev->irq_workload_rxd[ring_no] ?
3349 hldev->irq_workload_rxd[ring_no] : 1);
3350 len_txavg = 1 + hldev->irq_workload_txlen[ring_no] /
3351 (hldev->irq_workload_txd[ring_no] ?
3352 hldev->irq_workload_txd[ring_no] : 1);
3353 len_avg = len_rxavg + len_txavg;
3357 /* align on low boundary */
3358 if ((tval -_STEP) < hldev->config.bimodal_timer_lo_us)
3359 tval = hldev->config.bimodal_timer_lo_us;
3363 tval = hldev->config.bimodal_timer_lo_us;
3365 for (i = 0; i < _HIST_SIZE; i++)
3366 bytes_avg_history[i] = d_avg_history[i] = 0;
3372 /* always try to ajust timer to the best throughput value */
3373 bytes_avg = iwl_avg * len_avg;
3374 history_idx %= _HIST_SIZE;
3375 bytes_avg_history[history_idx] = bytes_avg;
3376 d_avg_history[history_idx] = d;
3378 d_hist = bytes_hist = 0;
3379 for (i = 0; i < _HIST_SIZE; i++) {
3380 /* do not re-configure until history is gathered */
3381 if (!bytes_avg_history[i]) {
3382 tval = hldev->config.bimodal_timer_lo_us;
3385 bytes_hist += bytes_avg_history[i];
3386 d_hist += d_avg_history[i];
3388 bytes_hist /= _HIST_SIZE;
3389 d_hist /= _HIST_SIZE;
3391 // xge_os_printf("d %d iwl_avg %d len_avg %d:%d:%d tval %d avg %d hist %d pstep %d",
3392 // d, iwl_avg, len_txavg, len_rxavg, len_avg, tval, d*bytes_avg,
3393 // d_hist*bytes_hist, pstep);
3395 /* make an adaptive step */
3396 if (d * bytes_avg < d_hist * bytes_hist && hist_adj_timer++ > _HIST_ADJ_TIMER) {
3402 (tval + _STEP) <= hldev->config.bimodal_timer_hi_us) {
3404 hldev->stats.sw_dev_info_stats.bimodal_hi_adjust_cnt++;
3405 } else if ((tval - _STEP) >= hldev->config.bimodal_timer_lo_us) {
3407 hldev->stats.sw_dev_info_stats.bimodal_lo_adjust_cnt++;
3410 /* enable TTI range A for better latencies */
3411 hldev->bimodal_urange_a_en = 0;
3412 if (tval <= hldev->config.bimodal_timer_lo_us && iwl_avg > 2)
3413 hldev->bimodal_urange_a_en = 1;
3416 /* reset workload statistics counters */
3417 hldev->irq_workload_rxcnt[ring_no] = 0;
3418 hldev->irq_workload_rxd[ring_no] = 0;
3419 hldev->irq_workload_rxlen[ring_no] = 0;
3420 hldev->irq_workload_txcnt[ring_no] = 0;
3421 hldev->irq_workload_txd[ring_no] = 0;
3422 hldev->irq_workload_txlen[ring_no] = 0;
3424 /* reconfigure TTI56 + ring_no with new timer value */
3425 hldev->bimodal_timer_val_us = tval;
3426 (void) __hal_device_rti_configure(hldev, 1);
3430 __hal_update_rxufca(xge_hal_device_t *hldev, int ring_no)
3434 ufc = hldev->config.ring.queue[ring_no].rti.ufc_a;
3435 ic = hldev->stats.sw_dev_info_stats.rx_traffic_intr_cnt;
3437 /* urange_a adaptive coalescing */
3438 if (hldev->rxufca_lbolt > hldev->rxufca_lbolt_time) {
3439 if (ic > hldev->rxufca_intr_thres) {
3440 if (ufc < hldev->config.rxufca_hi_lim) {
3442 for (i=0; i<XGE_HAL_MAX_RING_NUM; i++)
3443 hldev->config.ring.queue[i].rti.ufc_a = ufc;
3444 (void) __hal_device_rti_configure(hldev, 1);
3445 hldev->stats.sw_dev_info_stats.
3446 rxufca_hi_adjust_cnt++;
3448 hldev->rxufca_intr_thres = ic +
3449 hldev->config.rxufca_intr_thres; /* def: 30 */
3451 if (ufc > hldev->config.rxufca_lo_lim) {
3453 for (i=0; i<XGE_HAL_MAX_RING_NUM; i++)
3454 hldev->config.ring.queue[i].rti.ufc_a = ufc;
3455 (void) __hal_device_rti_configure(hldev, 1);
3456 hldev->stats.sw_dev_info_stats.
3457 rxufca_lo_adjust_cnt++;
3460 hldev->rxufca_lbolt_time = hldev->rxufca_lbolt +
3461 hldev->config.rxufca_lbolt_period;
3463 hldev->rxufca_lbolt++;
3467 * __hal_device_handle_mc - Handle MC interrupt reason
3468 * @hldev: HAL device handle.
3469 * @reason: interrupt reason
3472 __hal_device_handle_mc(xge_hal_device_t *hldev, u64 reason)
3474 xge_hal_pci_bar0_t *isrbar0 =
3475 (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0;
3478 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3479 &isrbar0->mc_int_status);
3480 if (!(val64 & XGE_HAL_MC_INT_STATUS_MC_INT))
3483 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3484 &isrbar0->mc_err_reg);
3485 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3486 val64, &isrbar0->mc_err_reg);
3488 if (val64 & XGE_HAL_MC_ERR_REG_ETQ_ECC_SG_ERR_L ||
3489 val64 & XGE_HAL_MC_ERR_REG_ETQ_ECC_SG_ERR_U ||
3490 val64 & XGE_HAL_MC_ERR_REG_MIRI_ECC_SG_ERR_0 ||
3491 val64 & XGE_HAL_MC_ERR_REG_MIRI_ECC_SG_ERR_1 ||
3492 (xge_hal_device_check_id(hldev) != XGE_HAL_CARD_XENA &&
3493 (val64 & XGE_HAL_MC_ERR_REG_ITQ_ECC_SG_ERR_L ||
3494 val64 & XGE_HAL_MC_ERR_REG_ITQ_ECC_SG_ERR_U ||
3495 val64 & XGE_HAL_MC_ERR_REG_RLD_ECC_SG_ERR_L ||
3496 val64 & XGE_HAL_MC_ERR_REG_RLD_ECC_SG_ERR_U))) {
3497 hldev->stats.sw_dev_err_stats.single_ecc_err_cnt++;
3498 hldev->stats.sw_dev_err_stats.ecc_err_cnt++;
3501 if (val64 & XGE_HAL_MC_ERR_REG_ETQ_ECC_DB_ERR_L ||
3502 val64 & XGE_HAL_MC_ERR_REG_ETQ_ECC_DB_ERR_U ||
3503 val64 & XGE_HAL_MC_ERR_REG_MIRI_ECC_DB_ERR_0 ||
3504 val64 & XGE_HAL_MC_ERR_REG_MIRI_ECC_DB_ERR_1 ||
3505 (xge_hal_device_check_id(hldev) != XGE_HAL_CARD_XENA &&
3506 (val64 & XGE_HAL_MC_ERR_REG_ITQ_ECC_DB_ERR_L ||
3507 val64 & XGE_HAL_MC_ERR_REG_ITQ_ECC_DB_ERR_U ||
3508 val64 & XGE_HAL_MC_ERR_REG_RLD_ECC_DB_ERR_L ||
3509 val64 & XGE_HAL_MC_ERR_REG_RLD_ECC_DB_ERR_U))) {
3510 hldev->stats.sw_dev_err_stats.double_ecc_err_cnt++;
3511 hldev->stats.sw_dev_err_stats.ecc_err_cnt++;
3514 if (val64 & XGE_HAL_MC_ERR_REG_SM_ERR) {
3515 hldev->stats.sw_dev_err_stats.sm_err_cnt++;
3518 /* those two should result in device reset */
3519 if (val64 & XGE_HAL_MC_ERR_REG_MIRI_ECC_DB_ERR_0 ||
3520 val64 & XGE_HAL_MC_ERR_REG_MIRI_ECC_DB_ERR_1) {
3521 __hal_device_handle_eccerr(hldev, "mc_err_reg", val64);
3522 return XGE_HAL_ERR_CRITICAL;
3529 * __hal_device_handle_pic - Handle non-traffic PIC interrupt reason
3530 * @hldev: HAL device handle.
3531 * @reason: interrupt reason
3534 __hal_device_handle_pic(xge_hal_device_t *hldev, u64 reason)
3536 xge_hal_pci_bar0_t *isrbar0 =
3537 (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0;
3540 if (reason & XGE_HAL_PIC_INT_FLSH) {
3541 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3542 &isrbar0->flsh_int_reg);
3543 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3544 val64, &isrbar0->flsh_int_reg);
3545 /* FIXME: handle register */
3547 if (reason & XGE_HAL_PIC_INT_MDIO) {
3548 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3549 &isrbar0->mdio_int_reg);
3550 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3551 val64, &isrbar0->mdio_int_reg);
3552 /* FIXME: handle register */
3554 if (reason & XGE_HAL_PIC_INT_IIC) {
3555 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3556 &isrbar0->iic_int_reg);
3557 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3558 val64, &isrbar0->iic_int_reg);
3559 /* FIXME: handle register */
3561 if (reason & XGE_HAL_PIC_INT_MISC) {
3562 val64 = xge_os_pio_mem_read64(hldev->pdev,
3563 hldev->regh0, &isrbar0->misc_int_reg);
3564 #ifdef XGE_HAL_PROCESS_LINK_INT_IN_ISR
3565 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
3566 /* Check for Link interrupts. If both Link Up/Down
3567 * bits are set, clear both and check adapter status
3569 if ((val64 & XGE_HAL_MISC_INT_REG_LINK_UP_INT) &&
3570 (val64 & XGE_HAL_MISC_INT_REG_LINK_DOWN_INT)) {
3573 xge_debug_device(XGE_TRACE,
3574 "both link up and link down detected "XGE_OS_LLXFMT,
3575 (unsigned long long)val64);
3577 temp64 = (XGE_HAL_MISC_INT_REG_LINK_DOWN_INT |
3578 XGE_HAL_MISC_INT_REG_LINK_UP_INT);
3579 xge_os_pio_mem_write64(hldev->pdev,
3580 hldev->regh0, temp64,
3581 &isrbar0->misc_int_reg);
3583 else if (val64 & XGE_HAL_MISC_INT_REG_LINK_UP_INT) {
3584 xge_debug_device(XGE_TRACE,
3585 "link up call request, misc_int "XGE_OS_LLXFMT,
3586 (unsigned long long)val64);
3587 __hal_device_handle_link_up_ind(hldev);
3589 else if (val64 & XGE_HAL_MISC_INT_REG_LINK_DOWN_INT){
3590 xge_debug_device(XGE_TRACE,
3591 "link down request, misc_int "XGE_OS_LLXFMT,
3592 (unsigned long long)val64);
3593 __hal_device_handle_link_down_ind(hldev);
3598 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3599 val64, &isrbar0->misc_int_reg);
3607 * __hal_device_handle_txpic - Handle TxPIC interrupt reason
3608 * @hldev: HAL device handle.
3609 * @reason: interrupt reason
3612 __hal_device_handle_txpic(xge_hal_device_t *hldev, u64 reason)
3614 xge_hal_status_e status = XGE_HAL_OK;
3615 xge_hal_pci_bar0_t *isrbar0 =
3616 (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0;
3619 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3620 &isrbar0->pic_int_status);
3621 if ( val64 & (XGE_HAL_PIC_INT_FLSH |
3622 XGE_HAL_PIC_INT_MDIO |
3623 XGE_HAL_PIC_INT_IIC |
3624 XGE_HAL_PIC_INT_MISC) ) {
3625 status = __hal_device_handle_pic(hldev, val64);
3629 if (!(val64 & XGE_HAL_PIC_INT_TX))
3632 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3633 &isrbar0->txpic_int_reg);
3634 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3635 val64, &isrbar0->txpic_int_reg);
3638 if (val64 & XGE_HAL_TXPIC_INT_SCHED_INTR) {
3641 if (g_xge_hal_driver->uld_callbacks.sched_timer != NULL)
3642 g_xge_hal_driver->uld_callbacks.sched_timer(
3643 hldev, hldev->upper_layer_info);
3645 * This feature implements adaptive receive interrupt
3646 * coalecing. It is disabled by default. To enable it
3647 * set hldev->config.rxufca_lo_lim to be not equal to
3648 * hldev->config.rxufca_hi_lim.
3650 * We are using HW timer for this feature, so
3651 * use needs to configure hldev->config.rxufca_lbolt_period
3652 * which is essentially a time slice of timer.
3654 * For those who familiar with Linux, lbolt means jiffies
3655 * of this timer. I.e. timer tick.
3657 if (hldev->config.rxufca_lo_lim !=
3658 hldev->config.rxufca_hi_lim &&
3659 hldev->config.rxufca_lo_lim != 0) {
3660 for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
3661 if (!hldev->config.ring.queue[i].configured)
3663 if (hldev->config.ring.queue[i].rti.urange_a)
3664 __hal_update_rxufca(hldev, i);
3669 * This feature implements adaptive TTI timer re-calculation
3670 * based on host utilization, number of interrupt processed,
3671 * number of RXD per tick and avarage length of packets per
3674 if (hldev->config.bimodal_interrupts) {
3675 for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
3676 if (!hldev->config.ring.queue[i].configured)
3678 if (hldev->bimodal_tti[i].enabled)
3679 __hal_update_bimodal(hldev, i);
3688 * __hal_device_handle_txdma - Handle TxDMA interrupt reason
3689 * @hldev: HAL device handle.
3690 * @reason: interrupt reason
3693 __hal_device_handle_txdma(xge_hal_device_t *hldev, u64 reason)
3695 xge_hal_pci_bar0_t *isrbar0 =
3696 (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0;
3697 u64 val64, temp64, err;
3699 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3700 &isrbar0->txdma_int_status);
3701 if (val64 & XGE_HAL_TXDMA_PFC_INT) {
3702 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3703 &isrbar0->pfc_err_reg);
3704 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3705 err, &isrbar0->pfc_err_reg);
3706 hldev->stats.sw_dev_info_stats.pfc_err_cnt++;
3707 temp64 = XGE_HAL_PFC_ECC_DB_ERR|XGE_HAL_PFC_SM_ERR_ALARM
3708 |XGE_HAL_PFC_MISC_0_ERR|XGE_HAL_PFC_MISC_1_ERR
3709 |XGE_HAL_PFC_PCIX_ERR;
3713 if (val64 & XGE_HAL_TXDMA_TDA_INT) {
3714 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3715 &isrbar0->tda_err_reg);
3716 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3717 err, &isrbar0->tda_err_reg);
3718 hldev->stats.sw_dev_info_stats.tda_err_cnt++;
3719 temp64 = XGE_HAL_TDA_Fn_ECC_DB_ERR|XGE_HAL_TDA_SM0_ERR_ALARM
3720 |XGE_HAL_TDA_SM1_ERR_ALARM;
3724 if (val64 & XGE_HAL_TXDMA_PCC_INT) {
3725 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3726 &isrbar0->pcc_err_reg);
3727 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3728 err, &isrbar0->pcc_err_reg);
3729 hldev->stats.sw_dev_info_stats.pcc_err_cnt++;
3730 temp64 = XGE_HAL_PCC_FB_ECC_DB_ERR|XGE_HAL_PCC_TXB_ECC_DB_ERR
3731 |XGE_HAL_PCC_SM_ERR_ALARM|XGE_HAL_PCC_WR_ERR_ALARM
3732 |XGE_HAL_PCC_N_SERR|XGE_HAL_PCC_6_COF_OV_ERR
3733 |XGE_HAL_PCC_7_COF_OV_ERR|XGE_HAL_PCC_6_LSO_OV_ERR
3734 |XGE_HAL_PCC_7_LSO_OV_ERR;
3738 if (val64 & XGE_HAL_TXDMA_TTI_INT) {
3739 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3740 &isrbar0->tti_err_reg);
3741 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3742 err, &isrbar0->tti_err_reg);
3743 hldev->stats.sw_dev_info_stats.tti_err_cnt++;
3744 temp64 = XGE_HAL_TTI_SM_ERR_ALARM;
3748 if (val64 & XGE_HAL_TXDMA_LSO_INT) {
3749 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3750 &isrbar0->lso_err_reg);
3751 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3752 err, &isrbar0->lso_err_reg);
3753 hldev->stats.sw_dev_info_stats.lso_err_cnt++;
3754 temp64 = XGE_HAL_LSO6_ABORT|XGE_HAL_LSO7_ABORT
3755 |XGE_HAL_LSO6_SM_ERR_ALARM|XGE_HAL_LSO7_SM_ERR_ALARM;
3759 if (val64 & XGE_HAL_TXDMA_TPA_INT) {
3760 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3761 &isrbar0->tpa_err_reg);
3762 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3763 err, &isrbar0->tpa_err_reg);
3764 hldev->stats.sw_dev_info_stats.tpa_err_cnt++;
3765 temp64 = XGE_HAL_TPA_SM_ERR_ALARM;
3769 if (val64 & XGE_HAL_TXDMA_SM_INT) {
3770 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3771 &isrbar0->sm_err_reg);
3772 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3773 err, &isrbar0->sm_err_reg);
3774 hldev->stats.sw_dev_info_stats.sm_err_cnt++;
3775 temp64 = XGE_HAL_SM_SM_ERR_ALARM;
3782 reset : xge_hal_device_reset(hldev);
3783 xge_hal_device_enable(hldev);
3784 xge_hal_device_intr_enable(hldev);
3789 * __hal_device_handle_txmac - Handle TxMAC interrupt reason
3790 * @hldev: HAL device handle.
3791 * @reason: interrupt reason
3794 __hal_device_handle_txmac(xge_hal_device_t *hldev, u64 reason)
3796 xge_hal_pci_bar0_t *isrbar0 =
3797 (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0;
3800 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3801 &isrbar0->mac_int_status);
3802 if (!(val64 & XGE_HAL_MAC_INT_STATUS_TMAC_INT))
3805 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3806 &isrbar0->mac_tmac_err_reg);
3807 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3808 val64, &isrbar0->mac_tmac_err_reg);
3809 hldev->stats.sw_dev_info_stats.mac_tmac_err_cnt++;
3810 temp64 = XGE_HAL_TMAC_TX_BUF_OVRN|XGE_HAL_TMAC_TX_SM_ERR;
3811 if (val64 & temp64) {
3812 xge_hal_device_reset(hldev);
3813 xge_hal_device_enable(hldev);
3814 xge_hal_device_intr_enable(hldev);
3821 * __hal_device_handle_txxgxs - Handle TxXGXS interrupt reason
3822 * @hldev: HAL device handle.
3823 * @reason: interrupt reason
3826 __hal_device_handle_txxgxs(xge_hal_device_t *hldev, u64 reason)
3828 xge_hal_pci_bar0_t *isrbar0 =
3829 (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0;
3832 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3833 &isrbar0->xgxs_int_status);
3834 if (!(val64 & XGE_HAL_XGXS_INT_STATUS_TXGXS))
3837 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3838 &isrbar0->xgxs_txgxs_err_reg);
3839 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3840 val64, &isrbar0->xgxs_txgxs_err_reg);
3841 hldev->stats.sw_dev_info_stats.xgxs_txgxs_err_cnt++;
3842 temp64 = XGE_HAL_TXGXS_ESTORE_UFLOW|XGE_HAL_TXGXS_TX_SM_ERR;
3843 if (val64 & temp64) {
3844 xge_hal_device_reset(hldev);
3845 xge_hal_device_enable(hldev);
3846 xge_hal_device_intr_enable(hldev);
3853 * __hal_device_handle_rxpic - Handle RxPIC interrupt reason
3854 * @hldev: HAL device handle.
3855 * @reason: interrupt reason
3858 __hal_device_handle_rxpic(xge_hal_device_t *hldev, u64 reason)
3860 /* FIXME: handle register */
3866 * __hal_device_handle_rxdma - Handle RxDMA interrupt reason
3867 * @hldev: HAL device handle.
3868 * @reason: interrupt reason
3871 __hal_device_handle_rxdma(xge_hal_device_t *hldev, u64 reason)
3873 xge_hal_pci_bar0_t *isrbar0 =
3874 (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0;
3875 u64 val64, err, temp64;
3877 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3878 &isrbar0->rxdma_int_status);
3879 if (val64 & XGE_HAL_RXDMA_RC_INT) {
3880 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3881 &isrbar0->rc_err_reg);
3882 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3883 err, &isrbar0->rc_err_reg);
3884 hldev->stats.sw_dev_info_stats.rc_err_cnt++;
3885 temp64 = XGE_HAL_RC_PRCn_ECC_DB_ERR|XGE_HAL_RC_FTC_ECC_DB_ERR
3886 |XGE_HAL_RC_PRCn_SM_ERR_ALARM
3887 |XGE_HAL_RC_FTC_SM_ERR_ALARM;
3891 if (val64 & XGE_HAL_RXDMA_RPA_INT) {
3892 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3893 &isrbar0->rpa_err_reg);
3894 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3895 err, &isrbar0->rpa_err_reg);
3896 hldev->stats.sw_dev_info_stats.rpa_err_cnt++;
3897 temp64 = XGE_HAL_RPA_SM_ERR_ALARM|XGE_HAL_RPA_CREDIT_ERR;
3901 if (val64 & XGE_HAL_RXDMA_RDA_INT) {
3902 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3903 &isrbar0->rda_err_reg);
3904 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3905 err, &isrbar0->rda_err_reg);
3906 hldev->stats.sw_dev_info_stats.rda_err_cnt++;
3907 temp64 = XGE_HAL_RDA_RXDn_ECC_DB_ERR
3908 |XGE_HAL_RDA_FRM_ECC_DB_N_AERR
3909 |XGE_HAL_RDA_SM1_ERR_ALARM|XGE_HAL_RDA_SM0_ERR_ALARM
3910 |XGE_HAL_RDA_RXD_ECC_DB_SERR;
3914 if (val64 & XGE_HAL_RXDMA_RTI_INT) {
3915 err = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3916 &isrbar0->rti_err_reg);
3917 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3918 err, &isrbar0->rti_err_reg);
3919 hldev->stats.sw_dev_info_stats.rti_err_cnt++;
3920 temp64 = XGE_HAL_RTI_SM_ERR_ALARM;
3927 reset : xge_hal_device_reset(hldev);
3928 xge_hal_device_enable(hldev);
3929 xge_hal_device_intr_enable(hldev);
3934 * __hal_device_handle_rxmac - Handle RxMAC interrupt reason
3935 * @hldev: HAL device handle.
3936 * @reason: interrupt reason
3939 __hal_device_handle_rxmac(xge_hal_device_t *hldev, u64 reason)
3941 xge_hal_pci_bar0_t *isrbar0 =
3942 (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0;
3945 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3946 &isrbar0->mac_int_status);
3947 if (!(val64 & XGE_HAL_MAC_INT_STATUS_RMAC_INT))
3950 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3951 &isrbar0->mac_rmac_err_reg);
3952 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3953 val64, &isrbar0->mac_rmac_err_reg);
3954 hldev->stats.sw_dev_info_stats.mac_rmac_err_cnt++;
3955 temp64 = XGE_HAL_RMAC_RX_BUFF_OVRN|XGE_HAL_RMAC_RX_SM_ERR;
3956 if (val64 & temp64) {
3957 xge_hal_device_reset(hldev);
3958 xge_hal_device_enable(hldev);
3959 xge_hal_device_intr_enable(hldev);
3966 * __hal_device_handle_rxxgxs - Handle RxXGXS interrupt reason
3967 * @hldev: HAL device handle.
3968 * @reason: interrupt reason
3971 __hal_device_handle_rxxgxs(xge_hal_device_t *hldev, u64 reason)
3973 xge_hal_pci_bar0_t *isrbar0 =
3974 (xge_hal_pci_bar0_t *)(void *)hldev->isrbar0;
3977 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3978 &isrbar0->xgxs_int_status);
3979 if (!(val64 & XGE_HAL_XGXS_INT_STATUS_RXGXS))
3982 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
3983 &isrbar0->xgxs_rxgxs_err_reg);
3984 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
3985 val64, &isrbar0->xgxs_rxgxs_err_reg);
3986 hldev->stats.sw_dev_info_stats.xgxs_rxgxs_err_cnt++;
3987 temp64 = XGE_HAL_RXGXS_ESTORE_OFLOW|XGE_HAL_RXGXS_RX_SM_ERR;
3988 if (val64 & temp64) {
3989 xge_hal_device_reset(hldev);
3990 xge_hal_device_enable(hldev);
3991 xge_hal_device_intr_enable(hldev);
3998 * xge_hal_device_enable - Enable device.
3999 * @hldev: HAL device handle.
4001 * Enable the specified device: bring up the link/interface.
4002 * Returns: XGE_HAL_OK - success.
4003 * XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT - Failed to restore the device
4004 * to a "quiescent" state.
4006 * See also: xge_hal_status_e{}.
4008 * Usage: See ex_open{}.
4011 xge_hal_device_enable(xge_hal_device_t *hldev)
4013 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
4018 if (!hldev->hw_is_initialized) {
4019 xge_hal_status_e status;
4021 status = __hal_device_hw_initialize(hldev);
4022 if (status != XGE_HAL_OK) {
4028 * Not needed in most cases, i.e.
4029 * when device_disable() is followed by reset -
4030 * the latter copies back PCI config space, along with
4031 * the bus mastership - see __hal_device_reset().
4032 * However, there are/may-in-future be other cases, and
4035 __hal_device_bus_master_enable(hldev);
4037 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
4039 * Configure the link stability period.
4041 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
4042 &bar0->misc_control);
4043 if (hldev->config.link_stability_period !=
4044 XGE_HAL_DEFAULT_USE_HARDCODE) {
4046 val64 |= XGE_HAL_MISC_CONTROL_LINK_STABILITY_PERIOD(
4047 hldev->config.link_stability_period);
4050 * Use the link stability period 1 ms as default
4052 val64 |= XGE_HAL_MISC_CONTROL_LINK_STABILITY_PERIOD(
4053 XGE_HAL_DEFAULT_LINK_STABILITY_PERIOD);
4055 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
4056 val64, &bar0->misc_control);
4059 * Clearing any possible Link up/down interrupts that
4060 * could have popped up just before Enabling the card.
4062 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
4063 &bar0->misc_int_reg);
4065 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
4066 val64, &bar0->misc_int_reg);
4067 xge_debug_device(XGE_TRACE, "%s","link state cleared");
4069 } else if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) {
4071 * Clearing any possible Link state change interrupts that
4072 * could have popped up just before Enabling the card.
4074 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
4075 &bar0->mac_rmac_err_reg);
4077 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
4078 val64, &bar0->mac_rmac_err_reg);
4079 xge_debug_device(XGE_TRACE, "%s", "link state cleared");
4083 if (__hal_device_wait_quiescent(hldev, &val64)) {
4084 return XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT;
4087 /* Enabling Laser. */
4088 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
4089 &bar0->adapter_control);
4090 val64 |= XGE_HAL_ADAPTER_EOI_TX_ON;
4091 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
4092 &bar0->adapter_control);
4094 /* let link establish */
4097 /* set link down untill poll() routine will set it up (maybe) */
4098 hldev->link_state = XGE_HAL_LINK_DOWN;
4100 /* If link is UP (adpter is connected) then enable the adapter */
4101 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
4102 &bar0->adapter_status);
4103 if( val64 & (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT |
4104 XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT) ) {
4105 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
4106 &bar0->adapter_control);
4107 val64 = val64 & (~XGE_HAL_ADAPTER_LED_ON);
4109 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
4110 &bar0->adapter_control);
4111 val64 = val64 | ( XGE_HAL_ADAPTER_EOI_TX_ON |
4112 XGE_HAL_ADAPTER_LED_ON );
4115 val64 = val64 | XGE_HAL_ADAPTER_CNTL_EN; /* adapter enable */
4116 val64 = val64 & (~XGE_HAL_ADAPTER_ECC_EN); /* ECC enable */
4117 xge_os_pio_mem_write64 (hldev->pdev, hldev->regh0, val64,
4118 &bar0->adapter_control);
4120 /* We spin here waiting for the Link to come up.
4121 * This is the fix for the Link being unstable after the reset. */
4126 adp_status = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
4127 &bar0->adapter_status);
4129 /* Read the adapter control register for Adapter_enable bit */
4130 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
4131 &bar0->adapter_control);
4132 if (!(adp_status & (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT |
4133 XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT)) &&
4134 (val64 & XGE_HAL_ADAPTER_CNTL_EN)) {
4136 if (j >= hldev->config.link_valid_cnt) {
4137 if (xge_hal_device_status(hldev, &adp_status) ==
4139 if (__hal_verify_pcc_idle(hldev,
4140 adp_status) != XGE_HAL_OK) {
4142 XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT;
4144 xge_debug_device(XGE_TRACE,
4145 "adp_status: "XGE_OS_LLXFMT
4148 (unsigned long long)adp_status);
4149 val64 = xge_os_pio_mem_read64(
4152 &bar0->adapter_control);
4154 (XGE_HAL_ADAPTER_EOI_TX_ON |
4155 XGE_HAL_ADAPTER_LED_ON );
4156 xge_os_pio_mem_write64(hldev->pdev,
4157 hldev->regh0, val64,
4158 &bar0->adapter_control);
4161 val64 = xge_os_pio_mem_read64(
4164 &bar0->adapter_control);
4165 break; /* out of for loop */
4168 XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT;
4172 j = 0; /* Reset the count */
4173 /* Turn on the Laser */
4174 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
4175 &bar0->adapter_control);
4176 val64 = val64 | XGE_HAL_ADAPTER_EOI_TX_ON;
4177 xge_os_pio_mem_write64 (hldev->pdev, hldev->regh0,
4178 val64, &bar0->adapter_control);
4182 /* Now re-enable it as due to noise, hardware
4184 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
4185 &bar0->adapter_control);
4186 val64 |= XGE_HAL_ADAPTER_CNTL_EN;
4187 val64 = val64 & (~XGE_HAL_ADAPTER_ECC_EN);/*ECC enable*/
4188 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
4189 &bar0->adapter_control);
4191 xge_os_mdelay(1); /* Sleep for 1 msec */
4193 } while (i < hldev->config.link_retry_cnt);
4195 __hal_device_led_actifity_fix(hldev);
4197 #ifndef XGE_HAL_PROCESS_LINK_INT_IN_ISR
4198 /* Here we are performing soft reset on XGXS to force link down.
4199 * Since link is already up, we will get link state change
4200 * poll notificatoin after adapter is enabled */
4202 __hal_serial_mem_write64(hldev, 0x80010515001E0000ULL,
4203 &bar0->dtx_control);
4204 (void) __hal_serial_mem_read64(hldev, &bar0->dtx_control);
4206 __hal_serial_mem_write64(hldev, 0x80010515001E00E0ULL,
4207 &bar0->dtx_control);
4208 (void) __hal_serial_mem_read64(hldev, &bar0->dtx_control);
4210 __hal_serial_mem_write64(hldev, 0x80070515001F00E4ULL,
4211 &bar0->dtx_control);
4212 (void) __hal_serial_mem_read64(hldev, &bar0->dtx_control);
4214 xge_os_mdelay(100); /* Sleep for 500 msec */
4216 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA)
4220 * With some switches the link state change interrupt does not
4221 * occur even though the xgxs reset is done as per SPN-006. So,
4222 * poll the adapter status register and check if the link state
4225 adp_status = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
4226 &bar0->adapter_status);
4227 if (!(adp_status & (XGE_HAL_ADAPTER_STATUS_RMAC_REMOTE_FAULT |
4228 XGE_HAL_ADAPTER_STATUS_RMAC_LOCAL_FAULT)))
4230 xge_debug_device(XGE_TRACE, "%s",
4231 "enable device causing link state change ind..");
4232 (void) __hal_device_handle_link_state_change(hldev);
4236 if (hldev->config.stats_refresh_time_sec !=
4237 XGE_HAL_STATS_REFRESH_DISABLE)
4238 __hal_stats_enable(&hldev->stats);
4244 * xge_hal_device_disable - Disable Xframe adapter.
4245 * @hldev: Device handle.
4247 * Disable this device. To gracefully reset the adapter, the host should:
4249 * - call xge_hal_device_disable();
4251 * - call xge_hal_device_intr_disable();
4253 * - close all opened channels and clean up outstanding resources;
4255 * - do some work (error recovery, change mtu, reset, etc);
4257 * - call xge_hal_device_enable();
4259 * - open channels, replenish RxDs, etc.
4261 * - call xge_hal_device_intr_enable().
4263 * Note: Disabling the device does _not_ include disabling of interrupts.
4264 * After disabling the device stops receiving new frames but those frames
4265 * that were already in the pipe will keep coming for some few milliseconds.
4267 * Returns: XGE_HAL_OK - success.
4268 * XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT - Failed to restore the device to
4269 * a "quiescent" state.
4271 * See also: xge_hal_status_e{}.
4274 xge_hal_device_disable(xge_hal_device_t *hldev)
4276 xge_hal_status_e status = XGE_HAL_OK;
4277 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
4280 xge_debug_device(XGE_TRACE, "%s", "turn off laser, cleanup hardware");
4282 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
4283 &bar0->adapter_control);
4284 val64 = val64 & (~XGE_HAL_ADAPTER_CNTL_EN);
4285 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
4286 &bar0->adapter_control);
4288 if (__hal_device_wait_quiescent(hldev, &val64) != XGE_HAL_OK) {
4289 status = XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT;
4292 if (__hal_device_register_poll(hldev, &bar0->adapter_status, 1,
4293 XGE_HAL_ADAPTER_STATUS_RC_PRC_QUIESCENT,
4294 XGE_HAL_DEVICE_QUIESCENT_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
4295 xge_debug_device(XGE_TRACE, "%s", "PRC is not QUIESCENT!");
4296 status = XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT;
4299 if (hldev->config.stats_refresh_time_sec !=
4300 XGE_HAL_STATS_REFRESH_DISABLE)
4301 __hal_stats_disable(&hldev->stats);
4302 #ifdef XGE_DEBUG_ASSERT
4304 xge_assert(!hldev->stats.is_enabled);
4307 #ifndef XGE_HAL_DONT_DISABLE_BUS_MASTER_ON_STOP
4308 __hal_device_bus_master_disable(hldev);
4315 * xge_hal_device_reset - Reset device.
4316 * @hldev: HAL device handle.
4318 * Soft-reset the device, reset the device stats except reset_cnt.
4320 * After reset is done, will try to re-initialize HW.
4322 * Returns: XGE_HAL_OK - success.
4323 * XGE_HAL_ERR_DEVICE_NOT_INITIALIZED - Device is not initialized.
4324 * XGE_HAL_ERR_RESET_FAILED - Reset failed.
4326 * See also: xge_hal_status_e{}.
4329 xge_hal_device_reset(xge_hal_device_t *hldev)
4331 xge_hal_status_e status;
4333 /* increment the soft reset counter */
4334 u32 reset_cnt = hldev->stats.sw_dev_info_stats.soft_reset_cnt;
4336 xge_debug_device(XGE_TRACE, "%s (%d)", "resetting the device", reset_cnt);
4338 if (!hldev->is_initialized)
4339 return XGE_HAL_ERR_DEVICE_NOT_INITIALIZED;
4341 /* actual "soft" reset of the adapter */
4342 status = __hal_device_reset(hldev);
4344 /* reset all stats including saved */
4345 __hal_stats_soft_reset(hldev, 1);
4347 /* increment reset counter */
4348 hldev->stats.sw_dev_info_stats.soft_reset_cnt = reset_cnt + 1;
4350 /* re-initialize rxufca_intr_thres */
4351 hldev->rxufca_intr_thres = hldev->config.rxufca_intr_thres;
4353 hldev->reset_needed_after_close = 0;
4359 * xge_hal_device_status - Check whether Xframe hardware is ready for
4361 * @hldev: HAL device handle.
4362 * @hw_status: Xframe status register. Returned by HAL.
4364 * Check whether Xframe hardware is ready for operation.
4365 * The checking includes TDMA, RDMA, PFC, PIC, MC_DRAM, and the rest
4366 * hardware functional blocks.
4368 * Returns: XGE_HAL_OK if the device is ready for operation. Otherwise
4369 * returns XGE_HAL_FAIL. Also, fills in adapter status (in @hw_status).
4371 * See also: xge_hal_status_e{}.
4372 * Usage: See ex_open{}.
4375 xge_hal_device_status(xge_hal_device_t *hldev, u64 *hw_status)
4377 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
4380 tmp64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
4381 &bar0->adapter_status);
4385 if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_TDMA_READY)) {
4386 xge_debug_device(XGE_TRACE, "%s", "TDMA is not ready!");
4387 return XGE_HAL_FAIL;
4389 if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_RDMA_READY)) {
4390 xge_debug_device(XGE_TRACE, "%s", "RDMA is not ready!");
4391 return XGE_HAL_FAIL;
4393 if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_PFC_READY)) {
4394 xge_debug_device(XGE_TRACE, "%s", "PFC is not ready!");
4395 return XGE_HAL_FAIL;
4397 if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_TMAC_BUF_EMPTY)) {
4398 xge_debug_device(XGE_TRACE, "%s", "TMAC BUF is not empty!");
4399 return XGE_HAL_FAIL;
4401 if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_PIC_QUIESCENT)) {
4402 xge_debug_device(XGE_TRACE, "%s", "PIC is not QUIESCENT!");
4403 return XGE_HAL_FAIL;
4405 if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_MC_DRAM_READY)) {
4406 xge_debug_device(XGE_TRACE, "%s", "MC_DRAM is not ready!");
4407 return XGE_HAL_FAIL;
4409 if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_MC_QUEUES_READY)) {
4410 xge_debug_device(XGE_TRACE, "%s", "MC_QUEUES is not ready!");
4411 return XGE_HAL_FAIL;
4413 if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_M_PLL_LOCK)) {
4414 xge_debug_device(XGE_TRACE, "%s", "M_PLL is not locked!");
4415 return XGE_HAL_FAIL;
4417 #ifndef XGE_HAL_HERC_EMULATION
4419 * Andrew: in PCI 33 mode, the P_PLL is not used, and therefore,
4420 * the P_PLL_LOCK bit in the adapter_status register will
4423 if (!(tmp64 & XGE_HAL_ADAPTER_STATUS_P_PLL_LOCK) &&
4424 xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC &&
4425 hldev->pci_mode != XGE_HAL_PCI_33MHZ_MODE) {
4426 xge_debug_device(XGE_TRACE, "%s", "P_PLL is not locked!");
4427 return XGE_HAL_FAIL;
4435 __hal_device_msi_intr_endis(xge_hal_device_t *hldev, int flag)
4437 u16 msi_control_reg;
4439 xge_os_pci_read16(hldev->pdev, hldev->cfgh,
4440 xge_offsetof(xge_hal_pci_config_le_t,
4441 msi_control), &msi_control_reg);
4444 msi_control_reg |= 0x1;
4446 msi_control_reg &= ~0x1;
4448 xge_os_pci_write16(hldev->pdev, hldev->cfgh,
4449 xge_offsetof(xge_hal_pci_config_le_t,
4450 msi_control), msi_control_reg);
4454 __hal_device_msix_intr_endis(xge_hal_device_t *hldev,
4455 xge_hal_channel_t *channel, int flag)
4458 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0;
4460 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
4461 &bar0->xmsi_mask_reg);
4464 val64 &= ~(1LL << ( 63 - channel->msix_idx ));
4466 val64 |= (1LL << ( 63 - channel->msix_idx ));
4467 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
4468 &bar0->xmsi_mask_reg);
4472 * xge_hal_device_intr_enable - Enable Xframe interrupts.
4473 * @hldev: HAL device handle.
4474 * @op: One of the xge_hal_device_intr_e enumerated values specifying
4475 * the type(s) of interrupts to enable.
4477 * Enable Xframe interrupts. The function is to be executed the last in
4478 * Xframe initialization sequence.
4480 * See also: xge_hal_device_intr_disable()
4483 xge_hal_device_intr_enable(xge_hal_device_t *hldev)
4488 /* PRC initialization and configuration */
4489 xge_list_for_each(item, &hldev->ring_channels) {
4490 xge_hal_channel_h channel;
4491 channel = xge_container_of(item, xge_hal_channel_t, item);
4492 __hal_ring_prc_enable(channel);
4495 /* enable traffic only interrupts */
4496 if (hldev->config.intr_mode != XGE_HAL_INTR_MODE_IRQLINE) {
4498 * make sure all interrupts going to be disabled if MSI
4501 __hal_device_intr_mgmt(hldev, XGE_HAL_ALL_INTRS, 0);
4504 * Enable the Tx traffic interrupts only if the TTI feature is
4508 if (hldev->tti_enabled)
4509 val64 = XGE_HAL_TX_TRAFFIC_INTR;
4511 if (!hldev->config.bimodal_interrupts)
4512 val64 |= XGE_HAL_RX_TRAFFIC_INTR;
4514 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA)
4515 val64 |= XGE_HAL_RX_TRAFFIC_INTR;
4517 val64 |=XGE_HAL_TX_PIC_INTR |
4519 XGE_HAL_TX_DMA_INTR |
4520 (hldev->config.sched_timer_us !=
4521 XGE_HAL_SCHED_TIMER_DISABLED ? XGE_HAL_SCHED_INTR : 0);
4522 __hal_device_intr_mgmt(hldev, val64, 1);
4526 * Enable MSI-X interrupts
4528 if (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX) {
4530 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
4532 * To enable MSI-X, MSI also needs to be enabled,
4533 * due to a bug in the herc NIC.
4535 __hal_device_msi_intr_endis(hldev, 1);
4539 /* Enable the MSI-X interrupt for each configured channel */
4540 xge_list_for_each(item, &hldev->fifo_channels) {
4541 xge_hal_channel_t *channel;
4543 channel = xge_container_of(item,
4544 xge_hal_channel_t, item);
4546 /* 0 vector is reserved for alarms */
4547 if (!channel->msix_idx)
4550 __hal_device_msix_intr_endis(hldev, channel, 1);
4553 xge_list_for_each(item, &hldev->ring_channels) {
4554 xge_hal_channel_t *channel;
4556 channel = xge_container_of(item,
4557 xge_hal_channel_t, item);
4559 /* 0 vector is reserved for alarms */
4560 if (!channel->msix_idx)
4563 __hal_device_msix_intr_endis(hldev, channel, 1);
4567 xge_debug_device(XGE_TRACE, "%s", "interrupts are enabled");
4572 * xge_hal_device_intr_disable - Disable Xframe interrupts.
4573 * @hldev: HAL device handle.
4574 * @op: One of the xge_hal_device_intr_e enumerated values specifying
4575 * the type(s) of interrupts to disable.
4577 * Disable Xframe interrupts.
4579 * See also: xge_hal_device_intr_enable()
4582 xge_hal_device_intr_disable(xge_hal_device_t *hldev)
4585 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
4588 if (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX) {
4590 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
4592 * To disable MSI-X, MSI also needs to be disabled,
4593 * due to a bug in the herc NIC.
4595 __hal_device_msi_intr_endis(hldev, 0);
4598 /* Disable the MSI-X interrupt for each configured channel */
4599 xge_list_for_each(item, &hldev->fifo_channels) {
4600 xge_hal_channel_t *channel;
4602 channel = xge_container_of(item,
4603 xge_hal_channel_t, item);
4605 /* 0 vector is reserved for alarms */
4606 if (!channel->msix_idx)
4609 __hal_device_msix_intr_endis(hldev, channel, 0);
4613 xge_os_pio_mem_write64(hldev->pdev,
4614 hldev->regh0, 0xFFFFFFFFFFFFFFFFULL,
4615 &bar0->tx_traffic_mask);
4617 xge_list_for_each(item, &hldev->ring_channels) {
4618 xge_hal_channel_t *channel;
4620 channel = xge_container_of(item,
4621 xge_hal_channel_t, item);
4623 /* 0 vector is reserved for alarms */
4624 if (!channel->msix_idx)
4627 __hal_device_msix_intr_endis(hldev, channel, 0);
4630 xge_os_pio_mem_write64(hldev->pdev,
4631 hldev->regh0, 0xFFFFFFFFFFFFFFFFULL,
4632 &bar0->rx_traffic_mask);
4636 * Disable traffic only interrupts.
4637 * Tx traffic interrupts are used only if the TTI feature is
4641 if (hldev->tti_enabled)
4642 val64 = XGE_HAL_TX_TRAFFIC_INTR;
4644 val64 |= XGE_HAL_RX_TRAFFIC_INTR |
4645 XGE_HAL_TX_PIC_INTR |
4647 (hldev->config.sched_timer_us != XGE_HAL_SCHED_TIMER_DISABLED ?
4648 XGE_HAL_SCHED_INTR : 0);
4649 __hal_device_intr_mgmt(hldev, val64, 0);
4651 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
4652 0xFFFFFFFFFFFFFFFFULL,
4653 &bar0->general_int_mask);
4656 /* disable all configured PRCs */
4657 xge_list_for_each(item, &hldev->ring_channels) {
4658 xge_hal_channel_h channel;
4659 channel = xge_container_of(item, xge_hal_channel_t, item);
4660 __hal_ring_prc_disable(channel);
4663 xge_debug_device(XGE_TRACE, "%s", "interrupts are disabled");
4668 * xge_hal_device_mcast_enable - Enable Xframe multicast addresses.
4669 * @hldev: HAL device handle.
4671 * Enable Xframe multicast addresses.
4672 * Returns: XGE_HAL_OK on success.
4673 * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to enable mcast
4674 * feature within the time(timeout).
4676 * See also: xge_hal_device_mcast_disable(), xge_hal_status_e{}.
4679 xge_hal_device_mcast_enable(xge_hal_device_t *hldev)
4682 xge_hal_pci_bar0_t *bar0;
4683 int mc_offset = XGE_HAL_MAC_MC_ALL_MC_ADDR_OFFSET;
4686 return XGE_HAL_ERR_INVALID_DEVICE;
4688 if (hldev->mcast_refcnt)
4691 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)
4692 mc_offset = XGE_HAL_MAC_MC_ALL_MC_ADDR_OFFSET_HERC;
4694 hldev->mcast_refcnt = 1;
4696 bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
4698 /* Enable all Multicast addresses */
4699 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
4700 XGE_HAL_RMAC_ADDR_DATA0_MEM_ADDR(0x010203040506ULL),
4701 &bar0->rmac_addr_data0_mem);
4702 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
4703 XGE_HAL_RMAC_ADDR_DATA1_MEM_MASK(0xfeffffffffffULL),
4704 &bar0->rmac_addr_data1_mem);
4705 val64 = XGE_HAL_RMAC_ADDR_CMD_MEM_WE |
4706 XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4707 XGE_HAL_RMAC_ADDR_CMD_MEM_OFFSET(mc_offset);
4708 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
4709 &bar0->rmac_addr_cmd_mem);
4711 if (__hal_device_register_poll(hldev,
4712 &bar0->rmac_addr_cmd_mem, 0,
4713 XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4714 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
4715 /* upper layer may require to repeat */
4716 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
4723 * xge_hal_device_mcast_disable - Disable Xframe multicast addresses.
4724 * @hldev: HAL device handle.
4726 * Disable Xframe multicast addresses.
4727 * Returns: XGE_HAL_OK - success.
4728 * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to disable mcast
4729 * feature within the time(timeout).
4731 * See also: xge_hal_device_mcast_enable(), xge_hal_status_e{}.
4734 xge_hal_device_mcast_disable(xge_hal_device_t *hldev)
4737 xge_hal_pci_bar0_t *bar0;
4738 int mc_offset = XGE_HAL_MAC_MC_ALL_MC_ADDR_OFFSET;
4741 return XGE_HAL_ERR_INVALID_DEVICE;
4743 if (hldev->mcast_refcnt == 0)
4746 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)
4747 mc_offset = XGE_HAL_MAC_MC_ALL_MC_ADDR_OFFSET_HERC;
4749 hldev->mcast_refcnt = 0;
4751 bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
4753 /* Disable all Multicast addresses */
4754 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
4755 XGE_HAL_RMAC_ADDR_DATA0_MEM_ADDR(0xffffffffffffULL),
4756 &bar0->rmac_addr_data0_mem);
4757 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
4758 XGE_HAL_RMAC_ADDR_DATA1_MEM_MASK(0),
4759 &bar0->rmac_addr_data1_mem);
4761 val64 = XGE_HAL_RMAC_ADDR_CMD_MEM_WE |
4762 XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4763 XGE_HAL_RMAC_ADDR_CMD_MEM_OFFSET(mc_offset);
4764 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
4765 &bar0->rmac_addr_cmd_mem);
4767 if (__hal_device_register_poll(hldev,
4768 &bar0->rmac_addr_cmd_mem, 0,
4769 XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4770 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
4771 /* upper layer may require to repeat */
4772 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
4779 * xge_hal_device_promisc_enable - Enable promiscuous mode.
4780 * @hldev: HAL device handle.
4782 * Enable promiscuous mode of Xframe operation.
4784 * See also: xge_hal_device_promisc_disable().
4787 xge_hal_device_promisc_enable(xge_hal_device_t *hldev)
4790 xge_hal_pci_bar0_t *bar0;
4794 bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
4796 if (!hldev->is_promisc) {
4797 /* Put the NIC into promiscuous mode */
4798 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
4800 val64 |= XGE_HAL_MAC_CFG_RMAC_PROM_ENABLE;
4802 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
4803 XGE_HAL_RMAC_CFG_KEY(0x4C0D),
4804 &bar0->rmac_cfg_key);
4806 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0,
4810 hldev->is_promisc = 1;
4811 xge_debug_device(XGE_TRACE,
4812 "mac_cfg 0x"XGE_OS_LLXFMT": promisc enabled",
4813 (unsigned long long)val64);
4818 * xge_hal_device_promisc_disable - Disable promiscuous mode.
4819 * @hldev: HAL device handle.
4821 * Disable promiscuous mode of Xframe operation.
4823 * See also: xge_hal_device_promisc_enable().
4826 xge_hal_device_promisc_disable(xge_hal_device_t *hldev)
4829 xge_hal_pci_bar0_t *bar0;
4833 bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
4835 if (hldev->is_promisc) {
4836 /* Remove the NIC from promiscuous mode */
4837 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
4839 val64 &= ~XGE_HAL_MAC_CFG_RMAC_PROM_ENABLE;
4841 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
4842 XGE_HAL_RMAC_CFG_KEY(0x4C0D),
4843 &bar0->rmac_cfg_key);
4845 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0,
4849 hldev->is_promisc = 0;
4850 xge_debug_device(XGE_TRACE,
4851 "mac_cfg 0x"XGE_OS_LLXFMT": promisc disabled",
4852 (unsigned long long)val64);
4857 * xge_hal_device_macaddr_get - Get MAC addresses.
4858 * @hldev: HAL device handle.
4859 * @index: MAC address index, in the range from 0 to
4860 * XGE_HAL_MAX_MAC_ADDRESSES.
4861 * @macaddr: MAC address. Returned by HAL.
4863 * Retrieve one of the stored MAC addresses by reading non-volatile
4864 * memory on the chip.
4866 * Up to %XGE_HAL_MAX_MAC_ADDRESSES addresses is supported.
4868 * Returns: XGE_HAL_OK - success.
4869 * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to retrieve the mac
4870 * address within the time(timeout).
4871 * XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES - Invalid MAC address index.
4873 * See also: xge_hal_device_macaddr_set(), xge_hal_status_e{}.
4876 xge_hal_device_macaddr_get(xge_hal_device_t *hldev, int index,
4879 xge_hal_pci_bar0_t *bar0;
4883 if (hldev == NULL) {
4884 return XGE_HAL_ERR_INVALID_DEVICE;
4887 bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
4889 if ( index >= XGE_HAL_MAX_MAC_ADDRESSES ) {
4890 return XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES;
4893 #ifdef XGE_HAL_HERC_EMULATION
4894 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,0x0000010000000000,
4895 &bar0->rmac_addr_data0_mem);
4896 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,0x0000000000000000,
4897 &bar0->rmac_addr_data1_mem);
4898 val64 = XGE_HAL_RMAC_ADDR_CMD_MEM_RD |
4899 XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4900 XGE_HAL_RMAC_ADDR_CMD_MEM_OFFSET((index));
4901 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
4902 &bar0->rmac_addr_cmd_mem);
4904 /* poll until done */
4905 __hal_device_register_poll(hldev,
4906 &bar0->rmac_addr_cmd_mem, 0,
4907 XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD,
4908 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS);
4912 val64 = ( XGE_HAL_RMAC_ADDR_CMD_MEM_RD |
4913 XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4914 XGE_HAL_RMAC_ADDR_CMD_MEM_OFFSET((index)) );
4915 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
4916 &bar0->rmac_addr_cmd_mem);
4918 if (__hal_device_register_poll(hldev, &bar0->rmac_addr_cmd_mem, 0,
4919 XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4920 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
4921 /* upper layer may require to repeat */
4922 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
4925 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
4926 &bar0->rmac_addr_data0_mem);
4927 for (i=0; i < XGE_HAL_ETH_ALEN; i++) {
4928 (*macaddr)[i] = (u8)(val64 >> ((64 - 8) - (i * 8)));
4931 #ifdef XGE_HAL_HERC_EMULATION
4932 for (i=0; i < XGE_HAL_ETH_ALEN; i++) {
4933 (*macaddr)[i] = (u8)0;
4935 (*macaddr)[1] = (u8)1;
4943 * xge_hal_device_macaddr_set - Set MAC address.
4944 * @hldev: HAL device handle.
4945 * @index: MAC address index, in the range from 0 to
4946 * XGE_HAL_MAX_MAC_ADDRESSES.
4947 * @macaddr: New MAC address to configure.
4949 * Configure one of the available MAC address "slots".
4951 * Up to %XGE_HAL_MAX_MAC_ADDRESSES addresses is supported.
4953 * Returns: XGE_HAL_OK - success.
4954 * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to set the new mac
4955 * address within the time(timeout).
4956 * XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES - Invalid MAC address index.
4958 * See also: xge_hal_device_macaddr_get(), xge_hal_status_e{}.
4961 xge_hal_device_macaddr_set(xge_hal_device_t *hldev, int index,
4964 xge_hal_pci_bar0_t *bar0 =
4965 (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
4969 if ( index >= XGE_HAL_MAX_MAC_ADDRESSES )
4970 return XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES;
4973 for (i=0; i < XGE_HAL_ETH_ALEN; i++) {
4974 temp64 |= macaddr[i];
4979 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
4980 XGE_HAL_RMAC_ADDR_DATA0_MEM_ADDR(temp64),
4981 &bar0->rmac_addr_data0_mem);
4983 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
4984 XGE_HAL_RMAC_ADDR_DATA1_MEM_MASK(0ULL),
4985 &bar0->rmac_addr_data1_mem);
4987 val64 = ( XGE_HAL_RMAC_ADDR_CMD_MEM_WE |
4988 XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_NEW_CMD |
4989 XGE_HAL_RMAC_ADDR_CMD_MEM_OFFSET((index)) );
4991 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
4992 &bar0->rmac_addr_cmd_mem);
4994 if (__hal_device_register_poll(hldev, &bar0->rmac_addr_cmd_mem, 0,
4995 XGE_HAL_RMAC_ADDR_CMD_MEM_STROBE_CMD_EXECUTING,
4996 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
4997 /* upper layer may require to repeat */
4998 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
5005 * xge_hal_device_macaddr_clear - Set MAC address.
5006 * @hldev: HAL device handle.
5007 * @index: MAC address index, in the range from 0 to
5008 * XGE_HAL_MAX_MAC_ADDRESSES.
5010 * Clear one of the available MAC address "slots".
5012 * Returns: XGE_HAL_OK - success.
5013 * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to set the new mac
5014 * address within the time(timeout).
5015 * XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES - Invalid MAC address index.
5017 * See also: xge_hal_device_macaddr_set(), xge_hal_status_e{}.
5020 xge_hal_device_macaddr_clear(xge_hal_device_t *hldev, int index)
5022 xge_hal_status_e status;
5023 u8 macaddr[6] = {0xFF, 0xFF, 0xFF, 0xFF, 0xFF, 0xFF};
5025 status = xge_hal_device_macaddr_set(hldev, index, macaddr);
5026 if (status != XGE_HAL_OK) {
5027 xge_debug_device(XGE_ERR, "%s",
5028 "Not able to set the mac addr");
5036 * xge_hal_device_macaddr_find - Finds index in the rmac table.
5037 * @hldev: HAL device handle.
5038 * @wanted: Wanted MAC address.
5040 * See also: xge_hal_device_macaddr_set().
5043 xge_hal_device_macaddr_find(xge_hal_device_t *hldev, macaddr_t wanted)
5047 if (hldev == NULL) {
5048 return XGE_HAL_ERR_INVALID_DEVICE;
5051 for (i=1; i<XGE_HAL_MAX_MAC_ADDRESSES; i++) {
5053 (void) xge_hal_device_macaddr_get(hldev, i, &macaddr);
5054 if (!xge_os_memcmp(macaddr, wanted, sizeof(macaddr_t))) {
5063 * xge_hal_device_mtu_set - Set MTU.
5064 * @hldev: HAL device handle.
5065 * @new_mtu: New MTU size to configure.
5067 * Set new MTU value. Example, to use jumbo frames:
5068 * xge_hal_device_mtu_set(my_device, my_channel, 9600);
5070 * Returns: XGE_HAL_OK on success.
5071 * XGE_HAL_ERR_SWAPPER_CTRL - Failed to configure swapper control
5073 * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to initialize TTI/RTI
5075 * XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT - Failed to restore the device to
5076 * a "quiescent" state.
5079 xge_hal_device_mtu_set(xge_hal_device_t *hldev, int new_mtu)
5081 xge_hal_status_e status;
5084 * reset needed if 1) new MTU differs, and
5085 * 2a) device was closed or
5086 * 2b) device is being upped for first time.
5088 if (hldev->config.mtu != new_mtu) {
5089 if (hldev->reset_needed_after_close ||
5090 !hldev->mtu_first_time_set) {
5091 status = xge_hal_device_reset(hldev);
5092 if (status != XGE_HAL_OK) {
5093 xge_debug_device(XGE_TRACE, "%s",
5094 "fatal: can not reset the device");
5098 /* store the new MTU in device, reset will use it */
5099 hldev->config.mtu = new_mtu;
5100 xge_debug_device(XGE_TRACE, "new MTU %d applied",
5104 if (!hldev->mtu_first_time_set)
5105 hldev->mtu_first_time_set = 1;
5111 * xge_hal_device_initialize - Initialize Xframe device.
5112 * @hldev: HAL device handle.
5113 * @attr: pointer to xge_hal_device_attr_t structure
5114 * @device_config: Configuration to be _applied_ to the device,
5115 * For the Xframe configuration "knobs" please
5116 * refer to xge_hal_device_config_t and Xframe
5119 * Initialize Xframe device. Note that all the arguments of this public API
5120 * are 'IN', including @hldev. Upper-layer driver (ULD) cooperates with
5121 * OS to find new Xframe device, locate its PCI and memory spaces.
5123 * When done, the ULD allocates sizeof(xge_hal_device_t) bytes for HAL
5124 * to enable the latter to perform Xframe hardware initialization.
5126 * Returns: XGE_HAL_OK - success.
5127 * XGE_HAL_ERR_DRIVER_NOT_INITIALIZED - Driver is not initialized.
5128 * XGE_HAL_ERR_BAD_DEVICE_CONFIG - Device configuration params are not
5130 * XGE_HAL_ERR_OUT_OF_MEMORY - Memory allocation failed.
5131 * XGE_HAL_ERR_BAD_SUBSYSTEM_ID - Device subsystem id is invalid.
5132 * XGE_HAL_ERR_INVALID_MAC_ADDRESS - Device mac address in not valid.
5133 * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to retrieve the mac
5134 * address within the time(timeout) or TTI/RTI initialization failed.
5135 * XGE_HAL_ERR_SWAPPER_CTRL - Failed to configure swapper control.
5136 * XGE_HAL_ERR_DEVICE_IS_NOT_QUIESCENT -Device is not queiscent.
5138 * See also: xge_hal_device_terminate(), xge_hal_status_e{}
5139 * xge_hal_device_attr_t{}.
5142 xge_hal_device_initialize(xge_hal_device_t *hldev, xge_hal_device_attr_t *attr,
5143 xge_hal_device_config_t *device_config)
5146 xge_hal_status_e status;
5147 xge_hal_channel_t *channel;
5150 int total_dram_size, ring_auto_dram_cfg, left_dram_size;
5151 int total_dram_size_max = 0;
5153 xge_debug_device(XGE_TRACE, "device 0x"XGE_OS_LLXFMT" is initializing",
5154 (unsigned long long)(ulong_t)hldev);
5157 if (g_xge_hal_driver == NULL ||
5158 !g_xge_hal_driver->is_initialized) {
5159 return XGE_HAL_ERR_DRIVER_NOT_INITIALIZED;
5162 xge_os_memzero(hldev, sizeof(xge_hal_device_t));
5165 * validate a common part of Xframe-I/II configuration
5166 * (and run check_card() later, once PCI inited - see below)
5168 status = __hal_device_config_check_common(device_config);
5169 if (status != XGE_HAL_OK)
5173 xge_os_memcpy(&hldev->config, device_config,
5174 sizeof(xge_hal_device_config_t));
5176 /* save original attr */
5177 xge_os_memcpy(&hldev->orig_attr, attr,
5178 sizeof(xge_hal_device_attr_t));
5180 /* initialize rxufca_intr_thres */
5181 hldev->rxufca_intr_thres = hldev->config.rxufca_intr_thres;
5183 hldev->regh0 = attr->regh0;
5184 hldev->regh1 = attr->regh1;
5185 hldev->regh2 = attr->regh2;
5186 hldev->isrbar0 = hldev->bar0 = attr->bar0;
5187 hldev->bar1 = attr->bar1;
5188 hldev->bar2 = attr->bar2;
5189 hldev->pdev = attr->pdev;
5190 hldev->irqh = attr->irqh;
5191 hldev->cfgh = attr->cfgh;
5193 /* set initial bimodal timer for bimodal adaptive schema */
5194 hldev->bimodal_timer_val_us = hldev->config.bimodal_timer_lo_us;
5196 hldev->queueh = xge_queue_create(hldev->pdev, hldev->irqh,
5197 g_xge_hal_driver->config.queue_size_initial,
5198 g_xge_hal_driver->config.queue_size_max,
5199 __hal_device_event_queued, hldev);
5200 if (hldev->queueh == NULL)
5201 return XGE_HAL_ERR_OUT_OF_MEMORY;
5203 hldev->magic = XGE_HAL_MAGIC;
5205 xge_assert(hldev->regh0);
5206 xge_assert(hldev->regh1);
5207 xge_assert(hldev->bar0);
5208 xge_assert(hldev->bar1);
5209 xge_assert(hldev->pdev);
5210 xge_assert(hldev->irqh);
5211 xge_assert(hldev->cfgh);
5213 /* initialize some PCI/PCI-X fields of this PCI device. */
5214 __hal_device_pci_init(hldev);
5217 * initlialize lists to properly handling a potential
5220 xge_list_init(&hldev->free_channels);
5221 xge_list_init(&hldev->fifo_channels);
5222 xge_list_init(&hldev->ring_channels);
5224 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA) {
5225 /* fixups for xena */
5226 hldev->config.rth_en = 0;
5227 hldev->config.rth_spdm_en = 0;
5228 hldev->config.rts_mac_en = 0;
5229 total_dram_size_max = XGE_HAL_MAX_RING_QUEUE_SIZE_XENA;
5231 status = __hal_device_config_check_xena(device_config);
5232 if (status != XGE_HAL_OK) {
5233 xge_hal_device_terminate(hldev);
5236 if (hldev->config.bimodal_interrupts == 1) {
5237 xge_hal_device_terminate(hldev);
5238 return XGE_HAL_BADCFG_BIMODAL_XENA_NOT_ALLOWED;
5239 } else if (hldev->config.bimodal_interrupts ==
5240 XGE_HAL_DEFAULT_USE_HARDCODE)
5241 hldev->config.bimodal_interrupts = 0;
5242 } else if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC) {
5243 /* fixups for herc */
5244 total_dram_size_max = XGE_HAL_MAX_RING_QUEUE_SIZE_HERC;
5245 status = __hal_device_config_check_herc(device_config);
5246 if (status != XGE_HAL_OK) {
5247 xge_hal_device_terminate(hldev);
5250 if (hldev->config.bimodal_interrupts ==
5251 XGE_HAL_DEFAULT_USE_HARDCODE)
5252 hldev->config.bimodal_interrupts = 1;
5254 xge_debug_device(XGE_ERR,
5255 "detected unknown device_id 0x%x", hldev->device_id);
5256 xge_hal_device_terminate(hldev);
5257 return XGE_HAL_ERR_BAD_DEVICE_ID;
5260 /* allocate and initialize FIFO types of channels according to
5262 for (i = 0; i < XGE_HAL_MAX_FIFO_NUM; i++) {
5263 if (!device_config->fifo.queue[i].configured)
5266 channel = __hal_channel_allocate(hldev, i,
5267 XGE_HAL_CHANNEL_TYPE_FIFO);
5268 if (channel == NULL) {
5269 xge_debug_device(XGE_ERR,
5270 "fifo: __hal_channel_allocate failed");
5271 xge_hal_device_terminate(hldev);
5272 return XGE_HAL_ERR_OUT_OF_MEMORY;
5274 /* add new channel to the device */
5275 xge_list_insert(&channel->item, &hldev->free_channels);
5279 * automatic DRAM adjustment
5281 total_dram_size = 0;
5282 ring_auto_dram_cfg = 0;
5283 for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
5284 if (!device_config->ring.queue[i].configured)
5286 if (device_config->ring.queue[i].dram_size_mb ==
5287 XGE_HAL_DEFAULT_USE_HARDCODE) {
5288 ring_auto_dram_cfg++;
5291 total_dram_size += device_config->ring.queue[i].dram_size_mb;
5293 left_dram_size = total_dram_size_max - total_dram_size;
5294 if (left_dram_size < 0 ||
5295 (ring_auto_dram_cfg && left_dram_size / ring_auto_dram_cfg == 0)) {
5296 xge_debug_device(XGE_ERR,
5297 "ring config: exceeded DRAM size %d MB",
5298 total_dram_size_max);
5299 xge_hal_device_terminate(hldev);
5300 return XGE_HAL_BADCFG_RING_QUEUE_SIZE;
5304 * allocate and initialize RING types of channels according to
5307 for (i = 0; i < XGE_HAL_MAX_RING_NUM; i++) {
5308 if (!device_config->ring.queue[i].configured)
5311 if (device_config->ring.queue[i].dram_size_mb ==
5312 XGE_HAL_DEFAULT_USE_HARDCODE) {
5313 hldev->config.ring.queue[i].dram_size_mb =
5314 device_config->ring.queue[i].dram_size_mb =
5315 left_dram_size / ring_auto_dram_cfg;
5318 channel = __hal_channel_allocate(hldev, i,
5319 XGE_HAL_CHANNEL_TYPE_RING);
5320 if (channel == NULL) {
5321 xge_debug_device(XGE_ERR,
5322 "ring: __hal_channel_allocate failed");
5323 xge_hal_device_terminate(hldev);
5324 return XGE_HAL_ERR_OUT_OF_MEMORY;
5326 /* add new channel to the device */
5327 xge_list_insert(&channel->item, &hldev->free_channels);
5330 /* get subsystem IDs */
5331 xge_os_pci_read16(hldev->pdev, hldev->cfgh,
5332 xge_offsetof(xge_hal_pci_config_le_t, subsystem_id),
5334 xge_os_pci_read16(hldev->pdev, hldev->cfgh,
5335 xge_offsetof(xge_hal_pci_config_le_t, subsystem_vendor_id),
5337 xge_debug_device(XGE_TRACE,
5338 "subsystem_id %04x:%04x",
5339 subsys_vendor, subsys_device);
5341 /* reset device initially */
5342 (void) __hal_device_reset(hldev);
5344 /* set host endian before, to assure proper action */
5345 status = __hal_device_set_swapper(hldev);
5346 if (status != XGE_HAL_OK) {
5347 xge_debug_device(XGE_ERR,
5348 "__hal_device_set_swapper failed");
5349 xge_hal_device_terminate(hldev);
5350 (void) __hal_device_reset(hldev);
5354 #ifndef XGE_HAL_HERC_EMULATION
5355 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_XENA)
5356 __hal_device_xena_fix_mac(hldev);
5359 /* MAC address initialization.
5360 * For now only one mac address will be read and used. */
5361 status = xge_hal_device_macaddr_get(hldev, 0, &hldev->macaddr[0]);
5362 if (status != XGE_HAL_OK) {
5363 xge_debug_device(XGE_ERR,
5364 "xge_hal_device_macaddr_get failed");
5365 xge_hal_device_terminate(hldev);
5369 if (hldev->macaddr[0][0] == 0xFF &&
5370 hldev->macaddr[0][1] == 0xFF &&
5371 hldev->macaddr[0][2] == 0xFF &&
5372 hldev->macaddr[0][3] == 0xFF &&
5373 hldev->macaddr[0][4] == 0xFF &&
5374 hldev->macaddr[0][5] == 0xFF) {
5375 xge_debug_device(XGE_ERR,
5376 "xge_hal_device_macaddr_get returns all FFs");
5377 xge_hal_device_terminate(hldev);
5378 return XGE_HAL_ERR_INVALID_MAC_ADDRESS;
5381 xge_debug_device(XGE_TRACE,
5382 "default macaddr: 0x%02x-%02x-%02x-%02x-%02x-%02x",
5383 hldev->macaddr[0][0], hldev->macaddr[0][1],
5384 hldev->macaddr[0][2], hldev->macaddr[0][3],
5385 hldev->macaddr[0][4], hldev->macaddr[0][5]);
5387 status = __hal_stats_initialize(&hldev->stats, hldev);
5388 if (status != XGE_HAL_OK) {
5389 xge_debug_device(XGE_ERR,
5390 "__hal_stats_initialize failed");
5391 xge_hal_device_terminate(hldev);
5395 status = __hal_device_hw_initialize(hldev);
5396 if (status != XGE_HAL_OK) {
5397 xge_debug_device(XGE_ERR,
5398 "__hal_device_hw_initialize failed");
5399 xge_hal_device_terminate(hldev);
5402 hldev->dump_buf=(char*)xge_os_malloc(hldev->pdev, XGE_HAL_DUMP_BUF_SIZE);
5403 if (hldev->dump_buf == NULL) {
5404 xge_debug_device(XGE_ERR,
5405 "__hal_device_hw_initialize failed");
5406 xge_hal_device_terminate(hldev);
5407 return XGE_HAL_ERR_OUT_OF_MEMORY;
5411 /* Xena-only: need to serialize fifo posts across all device fifos */
5412 #if defined(XGE_HAL_TX_MULTI_POST)
5413 xge_os_spin_lock_init(&hldev->xena_post_lock, hldev->pdev);
5414 #elif defined(XGE_HAL_TX_MULTI_POST_IRQ)
5415 xge_os_spin_lock_init_irq(&hldev->xena_post_lock, hldev->irqh);
5417 /* Getting VPD data */
5418 __hal_device_get_vpd_data(hldev);
5420 hldev->is_initialized = 1;
5426 * xge_hal_device_terminating - Mark the device as 'terminating'.
5427 * @devh: HAL device handle.
5429 * Mark the device as 'terminating', going to terminate. Can be used
5430 * to serialize termination with other running processes/contexts.
5432 * See also: xge_hal_device_terminate().
5435 xge_hal_device_terminating(xge_hal_device_h devh)
5437 xge_hal_device_t *hldev = (xge_hal_device_t*)devh;
5439 xge_hal_channel_t *channel;
5440 #if defined(XGE_HAL_TX_MULTI_RESERVE_IRQ)
5441 unsigned long flags=0;
5445 * go through each opened tx channel and aquire
5446 * lock, so it will serialize with HAL termination flag
5448 xge_list_for_each(item, &hldev->fifo_channels) {
5449 channel = xge_container_of(item, xge_hal_channel_t, item);
5450 #if defined(XGE_HAL_TX_MULTI_RESERVE)
5451 xge_os_spin_lock(&channel->reserve_lock);
5452 #elif defined(XGE_HAL_TX_MULTI_RESERVE_IRQ)
5453 xge_os_spin_lock_irq(&channel->reserve_lock, flags);
5456 channel->terminating = 1;
5458 #if defined(XGE_HAL_TX_MULTI_RESERVE)
5459 xge_os_spin_unlock(&channel->reserve_lock);
5460 #elif defined(XGE_HAL_TX_MULTI_RESERVE_IRQ)
5461 xge_os_spin_unlock_irq(&channel->reserve_lock, flags);
5465 hldev->terminating = 1;
5469 * xge_hal_device_terminate - Terminate Xframe device.
5470 * @hldev: HAL device handle.
5472 * Terminate HAL device.
5474 * See also: xge_hal_device_initialize().
5477 xge_hal_device_terminate(xge_hal_device_t *hldev)
5479 xge_assert(g_xge_hal_driver != NULL);
5480 xge_assert(hldev != NULL);
5481 xge_assert(hldev->magic == XGE_HAL_MAGIC);
5483 xge_queue_flush(hldev->queueh);
5485 hldev->terminating = 1;
5486 hldev->is_initialized = 0;
5488 hldev->magic = XGE_HAL_DEAD;
5490 #if defined(XGE_HAL_TX_MULTI_POST)
5491 xge_os_spin_lock_destroy(&hldev->xena_post_lock, hldev->pdev);
5492 #elif defined(XGE_HAL_TX_MULTI_POST_IRQ)
5493 xge_os_spin_lock_destroy_irq(&hldev->xena_post_lock, hldev->pdev);
5496 xge_debug_device(XGE_TRACE, "device "XGE_OS_LLXFMT" is terminating",
5497 (unsigned long long)(ulong_t)hldev);
5499 xge_assert(xge_list_is_empty(&hldev->fifo_channels));
5500 xge_assert(xge_list_is_empty(&hldev->ring_channels));
5502 if (hldev->stats.is_initialized) {
5503 __hal_stats_terminate(&hldev->stats);
5506 /* close if open and free all channels */
5507 while (!xge_list_is_empty(&hldev->free_channels)) {
5508 xge_hal_channel_t *channel = (xge_hal_channel_t*)
5509 hldev->free_channels.next;
5511 xge_assert(!channel->is_open);
5512 xge_list_remove(&channel->item);
5513 __hal_channel_free(channel);
5516 if (hldev->queueh) {
5517 xge_queue_destroy(hldev->queueh);
5520 if (hldev->spdm_table) {
5521 xge_os_free(hldev->pdev,
5522 hldev->spdm_table[0],
5523 (sizeof(xge_hal_spdm_entry_t) *
5524 hldev->spdm_max_entries));
5525 xge_os_free(hldev->pdev,
5527 (sizeof(xge_hal_spdm_entry_t *) *
5528 hldev->spdm_max_entries));
5529 xge_os_spin_lock_destroy(&hldev->spdm_lock, hldev->pdev);
5530 hldev->spdm_table = NULL;
5533 if (hldev->dump_buf) {
5534 xge_os_free(hldev->pdev, hldev->dump_buf,
5535 XGE_HAL_DUMP_BUF_SIZE);
5536 hldev->dump_buf = NULL;
5539 if (hldev->device_id != 0) {
5542 pcisize = (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)?
5543 XGE_HAL_PCISIZE_HERC : XGE_HAL_PCISIZE_XENA;
5544 for (j = 0; j < pcisize; j++) {
5545 xge_os_pci_write32(hldev->pdev, hldev->cfgh, j * 4,
5546 *((u32*)&hldev->pci_config_space_bios + j));
5551 * __hal_device_get_vpd_data - Getting vpd_data.
5553 * @hldev: HAL device handle.
5555 * Getting product name and serial number from vpd capabilites structure
5559 __hal_device_get_vpd_data(xge_hal_device_t *hldev)
5563 int index = 0, count, fail = 0;
5564 u8 vpd_addr = XGE_HAL_CARD_XENA_VPD_ADDR;
5565 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)
5566 vpd_addr = XGE_HAL_CARD_HERC_VPD_ADDR;
5568 xge_os_strcpy((char *) hldev->vpd_data.product_name,
5569 "10 Gigabit Ethernet Adapter");
5570 xge_os_strcpy((char *) hldev->vpd_data.serial_num, "not available");
5572 vpd_data = ( u8*) xge_os_malloc(hldev->pdev, XGE_HAL_VPD_BUFFER_SIZE + 16);
5573 if ( vpd_data == NULL )
5576 for (index = 0; index < XGE_HAL_VPD_BUFFER_SIZE; index +=4 ) {
5577 xge_os_pci_write8(hldev->pdev, hldev->cfgh, (vpd_addr + 2), (u8)index);
5578 xge_os_pci_read8(hldev->pdev, hldev->cfgh,(vpd_addr + 2), &data);
5579 xge_os_pci_write8(hldev->pdev, hldev->cfgh, (vpd_addr + 3), 0);
5580 for (count = 0; count < 5; count++ ) {
5582 xge_os_pci_read8(hldev->pdev, hldev->cfgh,(vpd_addr + 3), &data);
5583 if (data == XGE_HAL_VPD_READ_COMPLETE)
5588 xge_os_printf("ERR, Reading VPD data failed");
5593 xge_os_pci_read32(hldev->pdev, hldev->cfgh,(vpd_addr + 4),
5594 (u32 *)&vpd_data[index]);
5599 /* read serial number of adapter */
5600 for (count = 0; count < XGE_HAL_VPD_BUFFER_SIZE; count++) {
5601 if ((vpd_data[count] == 'S') &&
5602 (vpd_data[count + 1] == 'N') &&
5603 (vpd_data[count + 2] < XGE_HAL_VPD_LENGTH)) {
5604 memset(hldev->vpd_data.serial_num, 0, XGE_HAL_VPD_LENGTH);
5605 memcpy(hldev->vpd_data.serial_num, &vpd_data[count + 3],
5606 vpd_data[count + 2]);
5611 if (vpd_data[1] < XGE_HAL_VPD_LENGTH) {
5612 memset(hldev->vpd_data.product_name, 0, vpd_data[1]);
5613 memcpy(hldev->vpd_data.product_name, &vpd_data[3], vpd_data[1]);
5618 xge_os_free(hldev->pdev, vpd_data, XGE_HAL_VPD_BUFFER_SIZE + 16);
5623 * xge_hal_device_handle_tcode - Handle transfer code.
5624 * @channelh: Channel handle.
5625 * @dtrh: Descriptor handle.
5626 * @t_code: One of the enumerated (and documented in the Xframe user guide)
5629 * Handle descriptor's transfer code. The latter comes with each completed
5630 * descriptor, see xge_hal_fifo_dtr_next_completed() and
5631 * xge_hal_ring_dtr_next_completed().
5632 * Transfer codes are enumerated in xgehal-fifo.h and xgehal-ring.h.
5634 * Returns: one of the xge_hal_status_e{} enumerated types.
5635 * XGE_HAL_OK - for success.
5636 * XGE_HAL_ERR_CRITICAL - when encounters critical error.
5639 xge_hal_device_handle_tcode (xge_hal_channel_h channelh,
5640 xge_hal_dtr_h dtrh, u8 t_code)
5642 xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
5643 xge_hal_device_t *hldev = (xge_hal_device_t *)channel->devh;
5646 xge_os_printf("invalid t_code %d", t_code);
5650 if (channel->type == XGE_HAL_CHANNEL_TYPE_FIFO) {
5651 hldev->stats.sw_dev_err_stats.txd_t_code_err_cnt[t_code]++;
5653 #if defined(XGE_HAL_DEBUG_BAD_TCODE)
5654 xge_hal_fifo_txd_t *txdp = (xge_hal_fifo_txd_t *)dtrh;
5655 xge_os_printf(""XGE_OS_LLXFMT":"XGE_OS_LLXFMT":"
5656 XGE_OS_LLXFMT":"XGE_OS_LLXFMT,
5657 txdp->control_1, txdp->control_2, txdp->buffer_pointer,
5658 txdp->host_control);
5661 /* handle link "down" immediately without going through
5662 * xge_hal_device_poll() routine. */
5663 if (t_code == XGE_HAL_TXD_T_CODE_LOSS_OF_LINK) {
5665 if (hldev->link_state != XGE_HAL_LINK_DOWN) {
5666 xge_hal_pci_bar0_t *bar0 =
5667 (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
5670 hldev->link_state = XGE_HAL_LINK_DOWN;
5672 val64 = xge_os_pio_mem_read64(hldev->pdev,
5673 hldev->regh0, &bar0->adapter_control);
5676 val64 = val64 & (~XGE_HAL_ADAPTER_LED_ON);
5677 xge_os_pio_mem_write64(hldev->pdev,
5678 hldev->regh0, val64,
5679 &bar0->adapter_control);
5681 g_xge_hal_driver->uld_callbacks.link_down(
5682 hldev->upper_layer_info);
5684 } else if (t_code == XGE_HAL_TXD_T_CODE_ABORT_BUFFER ||
5685 t_code == XGE_HAL_TXD_T_CODE_ABORT_DTOR) {
5686 __hal_device_handle_targetabort(hldev);
5687 return XGE_HAL_ERR_CRITICAL;
5689 return XGE_HAL_ERR_PKT_DROP;
5690 } else if (channel->type == XGE_HAL_CHANNEL_TYPE_RING) {
5691 hldev->stats.sw_dev_err_stats.rxd_t_code_err_cnt[t_code]++;
5693 #if defined(XGE_HAL_DEBUG_BAD_TCODE)
5694 xge_hal_ring_rxd_1_t *rxdp = (xge_hal_ring_rxd_1_t *)dtrh;
5695 xge_os_printf(""XGE_OS_LLXFMT":"XGE_OS_LLXFMT":"XGE_OS_LLXFMT
5696 ":"XGE_OS_LLXFMT, rxdp->control_1,
5697 rxdp->control_2, rxdp->buffer0_ptr,
5698 rxdp->host_control);
5700 if (t_code == XGE_HAL_RXD_T_CODE_BAD_ECC) {
5701 hldev->stats.sw_dev_err_stats.ecc_err_cnt++;
5702 __hal_device_handle_eccerr(hldev, "rxd_t_code",
5704 return XGE_HAL_ERR_CRITICAL;
5705 } else if (t_code == XGE_HAL_RXD_T_CODE_PARITY ||
5706 t_code == XGE_HAL_RXD_T_CODE_PARITY_ABORT) {
5707 hldev->stats.sw_dev_err_stats.parity_err_cnt++;
5708 __hal_device_handle_parityerr(hldev, "rxd_t_code",
5710 return XGE_HAL_ERR_CRITICAL;
5711 /* do not drop if detected unknown IPv6 extension */
5712 } else if (t_code != XGE_HAL_RXD_T_CODE_UNKNOWN_PROTO) {
5713 return XGE_HAL_ERR_PKT_DROP;
5720 * xge_hal_device_link_state - Get link state.
5721 * @devh: HAL device handle.
5722 * @ls: Link state, see xge_hal_device_link_state_e{}.
5725 * Returns: XGE_HAL_OK.
5726 * See also: xge_hal_device_link_state_e{}.
5728 xge_hal_status_e xge_hal_device_link_state(xge_hal_device_h devh,
5729 xge_hal_device_link_state_e *ls)
5731 xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
5733 xge_assert(ls != NULL);
5734 *ls = hldev->link_state;
5739 * xge_hal_device_sched_timer - Configure scheduled device interrupt.
5740 * @devh: HAL device handle.
5741 * @interval_us: Time interval, in miscoseconds.
5742 * Unlike transmit and receive interrupts,
5743 * the scheduled interrupt is generated independently of
5744 * traffic, but purely based on time.
5745 * @one_shot: 1 - generate scheduled interrupt only once.
5746 * 0 - generate scheduled interrupt periodically at the specified
5747 * @interval_us interval.
5749 * (Re-)configure scheduled interrupt. Can be called at runtime to change
5750 * the setting, generate one-shot interrupts based on the resource and/or
5751 * traffic conditions, other purposes.
5752 * See also: xge_hal_device_config_t{}.
5754 void xge_hal_device_sched_timer(xge_hal_device_h devh, int interval_us,
5758 xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
5759 xge_hal_pci_bar0_t *bar0 =
5760 (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
5761 unsigned int interval = hldev->config.pci_freq_mherz * interval_us;
5763 interval = __hal_fix_time_ival_herc(hldev, interval);
5765 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
5766 &bar0->scheduled_int_ctrl);
5768 val64 &= XGE_HAL_SCHED_INT_PERIOD_MASK;
5769 val64 |= XGE_HAL_SCHED_INT_PERIOD(interval);
5771 val64 |= XGE_HAL_SCHED_INT_CTRL_ONE_SHOT;
5773 val64 |= XGE_HAL_SCHED_INT_CTRL_TIMER_EN;
5775 val64 &= ~XGE_HAL_SCHED_INT_CTRL_TIMER_EN;
5778 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
5779 val64, &bar0->scheduled_int_ctrl);
5781 xge_debug_device(XGE_TRACE, "sched_timer 0x"XGE_OS_LLXFMT": %s",
5782 (unsigned long long)val64,
5783 interval ? "enabled" : "disabled");
5787 * xge_hal_device_check_id - Verify device ID.
5788 * @devh: HAL device handle.
5791 * Returns: one of the xge_hal_card_e{} enumerated types.
5792 * See also: xge_hal_card_e{}.
5795 xge_hal_device_check_id(xge_hal_device_h devh)
5797 xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
5798 switch (hldev->device_id) {
5799 case XGE_PCI_DEVICE_ID_XENA_1:
5800 case XGE_PCI_DEVICE_ID_XENA_2:
5801 return XGE_HAL_CARD_XENA;
5802 case XGE_PCI_DEVICE_ID_HERC_1:
5803 case XGE_PCI_DEVICE_ID_HERC_2:
5804 return XGE_HAL_CARD_HERC;
5805 case XGE_PCI_DEVICE_ID_TITAN_1:
5806 case XGE_PCI_DEVICE_ID_TITAN_2:
5807 return XGE_HAL_CARD_TITAN;
5809 return XGE_HAL_CARD_UNKNOWN;
5814 * xge_hal_device_pci_info_get - Get PCI bus informations such as width,
5815 * frequency, and mode from previously stored values.
5816 * @devh: HAL device handle.
5817 * @pci_mode: pointer to a variable of enumerated type
5818 * xge_hal_pci_mode_e{}.
5819 * @bus_frequency: pointer to a variable of enumerated type
5820 * xge_hal_pci_bus_frequency_e{}.
5821 * @bus_width: pointer to a variable of enumerated type
5822 * xge_hal_pci_bus_width_e{}.
5824 * Get pci mode, frequency, and PCI bus width.
5825 * Returns: one of the xge_hal_status_e{} enumerated types.
5826 * XGE_HAL_OK - for success.
5827 * XGE_HAL_ERR_INVALID_DEVICE - for invalid device handle.
5828 * See Also: xge_hal_pci_mode_e, xge_hal_pci_mode_e, xge_hal_pci_width_e.
5831 xge_hal_device_pci_info_get(xge_hal_device_h devh, xge_hal_pci_mode_e *pci_mode,
5832 xge_hal_pci_bus_frequency_e *bus_frequency,
5833 xge_hal_pci_bus_width_e *bus_width)
5835 xge_hal_status_e rc_status;
5836 xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
5838 if (!hldev || !hldev->is_initialized || hldev->magic != XGE_HAL_MAGIC) {
5839 rc_status = XGE_HAL_ERR_INVALID_DEVICE;
5840 xge_debug_device(XGE_ERR,
5841 "xge_hal_device_pci_info_get error, rc %d for device %p",
5847 *pci_mode = hldev->pci_mode;
5848 *bus_frequency = hldev->bus_frequency;
5849 *bus_width = hldev->bus_width;
5850 rc_status = XGE_HAL_OK;
5855 * xge_hal_reinitialize_hw
5856 * @hldev: private member of the device structure.
5858 * This function will soft reset the NIC and re-initalize all the
5859 * I/O registers to the values they had after it's inital initialization
5860 * through the probe function.
5862 int xge_hal_reinitialize_hw(xge_hal_device_t * hldev)
5864 (void) xge_hal_device_reset(hldev);
5865 if (__hal_device_hw_initialize(hldev) != XGE_HAL_OK) {
5866 xge_hal_device_terminate(hldev);
5867 (void) __hal_device_reset(hldev);
5875 * __hal_read_spdm_entry_line
5876 * @hldev: pointer to xge_hal_device_t structure
5877 * @spdm_line: spdm line in the spdm entry to be read.
5878 * @spdm_entry: spdm entry of the spdm_line in the SPDM table.
5879 * @spdm_line_val: Contains the value stored in the spdm line.
5881 * SPDM table contains upto a maximum of 256 spdm entries.
5882 * Each spdm entry contains 8 lines and each line stores 8 bytes.
5883 * This function reads the spdm line(addressed by @spdm_line)
5884 * of the spdm entry(addressed by @spdm_entry) in
5888 __hal_read_spdm_entry_line(xge_hal_device_t *hldev, u8 spdm_line,
5889 u16 spdm_entry, u64 *spdm_line_val)
5891 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
5894 val64 = XGE_HAL_RTS_RTH_SPDM_MEM_CTRL_STROBE |
5895 XGE_HAL_RTS_RTH_SPDM_MEM_CTRL_LINE_SEL(spdm_line) |
5896 XGE_HAL_RTS_RTH_SPDM_MEM_CTRL_OFFSET(spdm_entry);
5898 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
5899 &bar0->rts_rth_spdm_mem_ctrl);
5901 /* poll until done */
5902 if (__hal_device_register_poll(hldev,
5903 &bar0->rts_rth_spdm_mem_ctrl, 0,
5904 XGE_HAL_RTS_RTH_SPDM_MEM_CTRL_STROBE,
5905 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
5907 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
5910 *spdm_line_val = xge_os_pio_mem_read64(hldev->pdev,
5911 hldev->regh0, &bar0->rts_rth_spdm_mem_data);
5917 * __hal_get_free_spdm_entry
5918 * @hldev: pointer to xge_hal_device_t structure
5919 * @spdm_entry: Contains an index to the unused spdm entry in the SPDM table.
5921 * This function returns an index of unused spdm entry in the SPDM
5924 static xge_hal_status_e
5925 __hal_get_free_spdm_entry(xge_hal_device_t *hldev, u16 *spdm_entry)
5927 xge_hal_status_e status;
5928 u64 spdm_line_val=0;
5931 * Search in the local SPDM table for a free slot.
5934 for(; *spdm_entry < hldev->spdm_max_entries; (*spdm_entry)++) {
5935 if (hldev->spdm_table[*spdm_entry]->in_use) {
5940 if (*spdm_entry >= hldev->spdm_max_entries) {
5941 return XGE_HAL_ERR_SPDM_TABLE_FULL;
5945 * Make sure that the corresponding spdm entry in the SPDM
5947 * Seventh line of the spdm entry contains information about
5948 * whether the entry is free or not.
5950 if ((status = __hal_read_spdm_entry_line(hldev, 7, *spdm_entry,
5951 &spdm_line_val)) != XGE_HAL_OK) {
5955 /* BIT(63) in spdm_line 7 corresponds to entry_enable bit */
5956 if ((spdm_line_val & BIT(63))) {
5960 xge_debug_device(XGE_ERR, "Local SPDM table is not "
5961 "consistent with the actual one for the spdm "
5962 "entry %d", *spdm_entry);
5963 return XGE_HAL_ERR_SPDM_TABLE_DATA_INCONSISTENT;
5971 * __hal_calc_jhash - Calculate Jenkins hash.
5972 * @msg: Jenkins hash algorithm key.
5973 * @length: Length of the key.
5974 * @golden_ratio: Jenkins hash golden ratio.
5975 * @init_value: Jenkins hash initial value.
5977 * This function implements the Jenkins based algorithm used for the
5978 * calculation of the RTH hash.
5979 * Returns: Jenkins hash value.
5983 __hal_calc_jhash(u8 *msg, u32 length, u32 golden_ratio, u32 init_value)
5986 register u32 a,b,c,len;
5989 * Set up the internal state
5992 a = b = golden_ratio; /* the golden ratio; an arbitrary value */
5993 c = init_value; /* the previous hash value */
5995 /* handle most of the key */
5998 a += (msg[0] + ((u32)msg[1]<<8) + ((u32)msg[2]<<16)
5999 + ((u32)msg[3]<<24));
6000 b += (msg[4] + ((u32)msg[5]<<8) + ((u32)msg[6]<<16)
6001 + ((u32)msg[7]<<24));
6002 c += (msg[8] + ((u32)msg[9]<<8) + ((u32)msg[10]<<16)
6003 + ((u32)msg[11]<<24));
6005 msg += 12; len -= 12;
6008 /* handle the last 11 bytes */
6010 switch(len) /* all the case statements fall through */
6012 case 11: c+= ((u32)msg[10]<<24);
6014 case 10: c+= ((u32)msg[9]<<16);
6016 case 9 : c+= ((u32)msg[8]<<8);
6018 /* the first byte of c is reserved for the length */
6019 case 8 : b+= ((u32)msg[7]<<24);
6021 case 7 : b+= ((u32)msg[6]<<16);
6023 case 6 : b+= ((u32)msg[5]<<8);
6025 case 5 : b+= msg[4];
6027 case 4 : a+= ((u32)msg[3]<<24);
6029 case 3 : a+= ((u32)msg[2]<<16);
6031 case 2 : a+= ((u32)msg[1]<<8);
6033 case 1 : a+= msg[0];
6035 /* case 0: nothing left to add */
6040 /* report the result */
6046 * xge_hal_spdm_entry_add - Add a new entry to the SPDM table.
6047 * @devh: HAL device handle.
6048 * @src_ip: Source ip address(IPv4/IPv6).
6049 * @dst_ip: Destination ip address(IPv4/IPv6).
6050 * @l4_sp: L4 source port.
6051 * @l4_dp: L4 destination port.
6052 * @is_tcp: Set to 1, if the protocol is TCP.
6053 * 0, if the protocol is UDP.
6054 * @is_ipv4: Set to 1, if the protocol is IPv4.
6055 * 0, if the protocol is IPv6.
6056 * @tgt_queue: Target queue to route the receive packet.
6058 * This function add a new entry to the SPDM table.
6060 * Returns: XGE_HAL_OK - success.
6061 * XGE_HAL_ERR_SPDM_NOT_ENABLED - SPDM support is not enabled.
6062 * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to add a new entry with in
6063 * the time(timeout).
6064 * XGE_HAL_ERR_SPDM_TABLE_FULL - SPDM table is full.
6065 * XGE_HAL_ERR_SPDM_INVALID_ENTRY - Invalid SPDM entry.
6067 * See also: xge_hal_spdm_entry_remove{}.
6070 xge_hal_spdm_entry_add(xge_hal_device_h devh, xge_hal_ipaddr_t *src_ip,
6071 xge_hal_ipaddr_t *dst_ip, u16 l4_sp, u16 l4_dp,
6072 u8 is_tcp, u8 is_ipv4, u8 tgt_queue)
6075 xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
6076 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
6079 u32 jhash_golden_ratio;
6083 u8 msg[XGE_HAL_JHASH_MSG_LEN];
6085 xge_hal_status_e status;
6088 if (!hldev->config.rth_spdm_en) {
6089 return XGE_HAL_ERR_SPDM_NOT_ENABLED;
6092 if ((tgt_queue < XGE_HAL_MIN_RING_NUM) ||
6093 (tgt_queue > XGE_HAL_MAX_RING_NUM)) {
6094 return XGE_HAL_ERR_SPDM_INVALID_ENTRY;
6099 * Calculate the jenkins hash.
6102 * Create the Jenkins hash algorithm key.
6103 * key = {L3SA, L3DA, L4SP, L4DP}, if SPDM is configured to
6104 * use L4 information. Otherwize key = {L3SA, L3DA}.
6108 ipaddr_len = 4; // In bytes
6114 * Jenkins hash algorithm expects the key in the big endian
6115 * format. Since key is the byte array, memcpy won't work in the
6116 * case of little endian. So, the current code extracts each
6117 * byte starting from MSB and store it in the key.
6120 for (off = 0; off < ipaddr_len; off++) {
6121 u32 mask = vBIT32(0xff,(off*8),8);
6122 int shift = 32-(off+1)*8;
6123 msg[off] = (u8)((src_ip->ipv4.addr & mask) >> shift);
6124 msg[off+ipaddr_len] =
6125 (u8)((dst_ip->ipv4.addr & mask) >> shift);
6128 for (off = 0; off < ipaddr_len; off++) {
6130 u64 mask = vBIT(0xff,(loc*8),8);
6131 int shift = 64-(loc+1)*8;
6133 msg[off] = (u8)((src_ip->ipv6.addr[off/8] & mask)
6135 msg[off+ipaddr_len] = (u8)((dst_ip->ipv6.addr[off/8]
6140 off = (2*ipaddr_len);
6142 if (hldev->config.rth_spdm_use_l4) {
6143 msg[off] = (u8)((l4_sp & 0xff00) >> 8);
6144 msg[off + 1] = (u8)(l4_sp & 0xff);
6145 msg[off + 2] = (u8)((l4_dp & 0xff00) >> 8);
6146 msg[off + 3] = (u8)(l4_dp & 0xff);
6151 * Calculate jenkins hash for this configuration
6153 val64 = xge_os_pio_mem_read64(hldev->pdev,
6155 &bar0->rts_rth_jhash_cfg);
6156 jhash_golden_ratio = (u32)(val64 >> 32);
6157 jhash_init_val = (u32)(val64 & 0xffffffff);
6159 jhash_value = __hal_calc_jhash(msg, off,
6163 xge_os_spin_lock(&hldev->spdm_lock);
6166 * Locate a free slot in the SPDM table. To avoid a seach in the
6167 * actual SPDM table, which is very expensive in terms of time,
6168 * we are maintaining a local copy of the table and the search for
6169 * the free entry is performed in the local table.
6171 if ((status = __hal_get_free_spdm_entry(hldev,&spdm_entry))
6173 xge_os_spin_unlock(&hldev->spdm_lock);
6178 * Add this entry to the SPDM table
6180 status = __hal_spdm_entry_add(hldev, src_ip, dst_ip, l4_sp, l4_dp,
6181 is_tcp, is_ipv4, tgt_queue,
6182 jhash_value, /* calculated jhash */
6185 xge_os_spin_unlock(&hldev->spdm_lock);
6191 * xge_hal_spdm_entry_remove - Remove an entry from the SPDM table.
6192 * @devh: HAL device handle.
6193 * @src_ip: Source ip address(IPv4/IPv6).
6194 * @dst_ip: Destination ip address(IPv4/IPv6).
6195 * @l4_sp: L4 source port.
6196 * @l4_dp: L4 destination port.
6197 * @is_tcp: Set to 1, if the protocol is TCP.
6198 * 0, if the protocol os UDP.
6199 * @is_ipv4: Set to 1, if the protocol is IPv4.
6200 * 0, if the protocol is IPv6.
6202 * This function remove an entry from the SPDM table.
6204 * Returns: XGE_HAL_OK - success.
6205 * XGE_HAL_ERR_SPDM_NOT_ENABLED - SPDM support is not enabled.
6206 * XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING - Failed to remove an entry with in
6207 * the time(timeout).
6208 * XGE_HAL_ERR_SPDM_ENTRY_NOT_FOUND - Unable to locate the entry in the SPDM
6211 * See also: xge_hal_spdm_entry_add{}.
6214 xge_hal_spdm_entry_remove(xge_hal_device_h devh, xge_hal_ipaddr_t *src_ip,
6215 xge_hal_ipaddr_t *dst_ip, u16 l4_sp, u16 l4_dp,
6216 u8 is_tcp, u8 is_ipv4)
6219 xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
6220 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
6223 xge_hal_status_e status;
6224 u64 spdm_line_arr[8];
6231 if (!hldev->config.rth_spdm_en) {
6232 return XGE_HAL_ERR_SPDM_NOT_ENABLED;
6235 xge_os_spin_lock(&hldev->spdm_lock);
6238 * Poll the rxpic_int_reg register until spdm ready bit is set or
6241 if (__hal_device_register_poll(hldev, &bar0->rxpic_int_reg, 1,
6242 XGE_HAL_RX_PIC_INT_REG_SPDM_READY,
6243 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
6245 /* upper layer may require to repeat */
6246 xge_os_spin_unlock(&hldev->spdm_lock);
6247 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
6251 * Clear the SPDM READY bit.
6253 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
6254 &bar0->rxpic_int_reg);
6255 val64 &= ~XGE_HAL_RX_PIC_INT_REG_SPDM_READY;
6256 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
6257 &bar0->rxpic_int_reg);
6260 * Search in the local SPDM table to get the index of the
6261 * corresponding entry in the SPDM table.
6264 for (;spdm_entry < hldev->spdm_max_entries; spdm_entry++) {
6265 if ((!hldev->spdm_table[spdm_entry]->in_use) ||
6266 (hldev->spdm_table[spdm_entry]->is_tcp != is_tcp) ||
6267 (hldev->spdm_table[spdm_entry]->l4_sp != l4_sp) ||
6268 (hldev->spdm_table[spdm_entry]->l4_dp != l4_dp) ||
6269 (hldev->spdm_table[spdm_entry]->is_ipv4 != is_ipv4)) {
6274 * Compare the src/dst IP addresses of source and target
6277 if ((hldev->spdm_table[spdm_entry]->src_ip.ipv4.addr
6278 != src_ip->ipv4.addr) ||
6279 (hldev->spdm_table[spdm_entry]->dst_ip.ipv4.addr
6280 != dst_ip->ipv4.addr)) {
6284 if ((hldev->spdm_table[spdm_entry]->src_ip.ipv6.addr[0]
6285 != src_ip->ipv6.addr[0]) ||
6286 (hldev->spdm_table[spdm_entry]->src_ip.ipv6.addr[1]
6287 != src_ip->ipv6.addr[1]) ||
6288 (hldev->spdm_table[spdm_entry]->dst_ip.ipv6.addr[0]
6289 != dst_ip->ipv6.addr[0]) ||
6290 (hldev->spdm_table[spdm_entry]->dst_ip.ipv6.addr[1]
6291 != dst_ip->ipv6.addr[1])) {
6298 if (spdm_entry >= hldev->spdm_max_entries) {
6299 xge_os_spin_unlock(&hldev->spdm_lock);
6300 return XGE_HAL_ERR_SPDM_ENTRY_NOT_FOUND;
6304 * Retrieve the corresponding entry from the SPDM table and
6305 * make sure that the data is consistent.
6307 for(line_no = 0; line_no < 8; line_no++) {
6310 * SPDM line 2,3,4 are valid only for IPv6 entry.
6311 * SPDM line 5 & 6 are reserved. We don't have to
6312 * read these entries in the above cases.
6315 ((line_no == 2)||(line_no == 3)||(line_no == 4))) ||
6321 if ((status = __hal_read_spdm_entry_line(
6325 &spdm_line_arr[line_no]))
6327 xge_os_spin_unlock(&hldev->spdm_lock);
6333 * Seventh line of the spdm entry contains the entry_enable
6334 * bit. Make sure that the entry_enable bit of this spdm entry
6336 * To remove an entry from the SPDM table, reset this
6339 if (!(spdm_line_arr[7] & BIT(63))) {
6343 xge_debug_device(XGE_ERR, "Local SPDM table is not "
6344 "consistent with the actual one for the spdm "
6345 "entry %d ", spdm_entry);
6350 * Retreive the L4 SP/DP, src/dst ip addresses from the SPDM
6351 * table and do a comparision.
6353 spdm_is_tcp = (u8)((spdm_line_arr[0] & BIT(59)) >> 4);
6354 spdm_is_ipv4 = (u8)(spdm_line_arr[0] & BIT(63));
6355 spdm_l4_sp = (u16)(spdm_line_arr[0] >> 48);
6356 spdm_l4_dp = (u16)((spdm_line_arr[0] >> 32) & 0xffff);
6359 if ((spdm_is_tcp != is_tcp) ||
6360 (spdm_is_ipv4 != is_ipv4) ||
6361 (spdm_l4_sp != l4_sp) ||
6362 (spdm_l4_dp != l4_dp)) {
6366 xge_debug_device(XGE_ERR, "Local SPDM table is not "
6367 "consistent with the actual one for the spdm "
6368 "entry %d ", spdm_entry);
6373 /* Upper 32 bits of spdm_line(64 bit) contains the
6374 * src IPv4 address. Lower 32 bits of spdm_line
6375 * contains the destination IPv4 address.
6377 u32 temp_src_ip = (u32)(spdm_line_arr[1] >> 32);
6378 u32 temp_dst_ip = (u32)(spdm_line_arr[1] & 0xffffffff);
6380 if ((temp_src_ip != src_ip->ipv4.addr) ||
6381 (temp_dst_ip != dst_ip->ipv4.addr)) {
6382 xge_debug_device(XGE_ERR, "Local SPDM table is not "
6383 "consistent with the actual one for the spdm "
6384 "entry %d ", spdm_entry);
6390 * SPDM line 1 & 2 contains the src IPv6 address.
6391 * SPDM line 3 & 4 contains the dst IPv6 address.
6393 if ((spdm_line_arr[1] != src_ip->ipv6.addr[0]) ||
6394 (spdm_line_arr[2] != src_ip->ipv6.addr[1]) ||
6395 (spdm_line_arr[3] != dst_ip->ipv6.addr[0]) ||
6396 (spdm_line_arr[4] != dst_ip->ipv6.addr[1])) {
6401 xge_debug_device(XGE_ERR, "Local SPDM table is not "
6402 "consistent with the actual one for the spdm "
6403 "entry %d ", spdm_entry);
6409 * Reset the entry_enable bit to zero
6411 spdm_line_arr[7] &= ~BIT(63);
6413 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
6415 (void *)((char *)hldev->spdm_mem_base +
6416 (spdm_entry * 64) + (7 * 8)));
6419 * Wait for the operation to be completed.
6421 if (__hal_device_register_poll(hldev,
6422 &bar0->rxpic_int_reg, 1,
6423 XGE_HAL_RX_PIC_INT_REG_SPDM_READY,
6424 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
6425 xge_os_spin_unlock(&hldev->spdm_lock);
6426 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
6430 * Make the corresponding spdm entry in the local SPDM table
6431 * available for future use.
6433 hldev->spdm_table[spdm_entry]->in_use = 0;
6434 xge_os_spin_unlock(&hldev->spdm_lock);
6439 xge_os_spin_unlock(&hldev->spdm_lock);
6440 return XGE_HAL_ERR_SPDM_TABLE_DATA_INCONSISTENT;
6444 * __hal_device_rti_set
6445 * @ring: The post_qid of the ring.
6446 * @channel: HAL channel of the ring.
6448 * This function stores the RTI value associated for the MSI and
6449 * also unmasks this particular RTI in the rti_mask register.
6451 static void __hal_device_rti_set(int ring_qid, xge_hal_channel_t *channel)
6453 xge_hal_device_t *hldev = (xge_hal_device_t*)channel->devh;
6454 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0;
6457 if (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSI ||
6458 hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX)
6459 channel->rti = (u8)ring_qid;
6461 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
6462 &bar0->rx_traffic_mask);
6463 val64 &= ~BIT(ring_qid);
6464 xge_os_pio_mem_write64(hldev->pdev,
6465 hldev->regh0, val64,
6466 &bar0->rx_traffic_mask);
6470 * __hal_device_tti_set
6471 * @ring: The post_qid of the FIFO.
6472 * @channel: HAL channel the FIFO.
6474 * This function stores the TTI value associated for the MSI and
6475 * also unmasks this particular TTI in the tti_mask register.
6477 static void __hal_device_tti_set(int fifo_qid, xge_hal_channel_t *channel)
6479 xge_hal_device_t *hldev = (xge_hal_device_t*)channel->devh;
6480 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0;
6483 if (hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSI ||
6484 hldev->config.intr_mode == XGE_HAL_INTR_MODE_MSIX)
6485 channel->tti = (u8)fifo_qid;
6487 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
6488 &bar0->tx_traffic_mask);
6489 val64 &= ~BIT(fifo_qid);
6490 xge_os_pio_mem_write64(hldev->pdev,
6491 hldev->regh0, val64,
6492 &bar0->tx_traffic_mask);
6496 * xge_hal_channel_msi_set - Associate a RTI with a ring or TTI with a
6497 * FIFO for a given MSI.
6498 * @channelh: HAL channel handle.
6499 * @msi: MSI Number associated with the channel.
6500 * @msi_msg: The MSI message associated with the MSI number above.
6502 * This API will associate a given channel (either Ring or FIFO) with the
6503 * given MSI number. It will alo program the Tx_Mat/Rx_Mat tables in the
6504 * hardware to indicate this association to the hardware.
6507 xge_hal_channel_msi_set(xge_hal_channel_h channelh, int msi, u32 msi_msg)
6509 xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
6510 xge_hal_device_t *hldev = (xge_hal_device_t*)channel->devh;
6511 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0;
6514 channel->msi_msg = msi_msg;
6515 if (channel->type == XGE_HAL_CHANNEL_TYPE_RING) {
6516 int ring = channel->post_qid;
6517 xge_debug_osdep(XGE_TRACE, "MSI Data: 0x%4x, Ring: %d,"
6518 " MSI: %d", channel->msi_msg, ring, msi);
6519 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
6521 val64 |= XGE_HAL_SET_RX_MAT(ring, msi);
6522 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
6524 __hal_device_rti_set(ring, channel);
6526 int fifo = channel->post_qid;
6527 xge_debug_osdep(XGE_TRACE, "MSI Data: 0x%4x, Fifo: %d,"
6528 " MSI: %d", channel->msi_msg, fifo, msi);
6529 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
6531 val64 |= XGE_HAL_SET_TX_MAT(fifo, msi);
6532 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
6534 __hal_device_tti_set(fifo, channel);
6541 * xge_hal_mask_msix - Begin IRQ processing.
6542 * @hldev: HAL device handle.
6545 * The function masks the msix interrupt for the given msi_id
6550 * Otherwise, XGE_HAL_ERR_WRONG_IRQ if the msix index is out of range
6555 xge_hal_mask_msix(xge_hal_device_h devh, int msi_id)
6557 xge_hal_status_e status = XGE_HAL_OK;
6558 xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
6559 u32 *bar2 = (u32 *)hldev->bar2;
6562 xge_assert(msi_id < XGE_HAL_MAX_MSIX_MESSAGES);
6564 val32 = xge_os_pio_mem_read32(hldev->pdev, hldev->regh2, &bar2[msi_id*4+3]);
6566 xge_os_pio_mem_write32(hldev->pdev, hldev->regh2, val32, &bar2[msi_id*4+3]);
6571 * xge_hal_mask_msix - Begin IRQ processing.
6572 * @hldev: HAL device handle.
6575 * The function masks the msix interrupt for the given msi_id
6580 * Otherwise, XGE_HAL_ERR_WRONG_IRQ if the msix index is out of range
6585 xge_hal_unmask_msix(xge_hal_device_h devh, int msi_id)
6587 xge_hal_status_e status = XGE_HAL_OK;
6588 xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
6589 u32 *bar2 = (u32 *)hldev->bar2;
6592 xge_assert(msi_id < XGE_HAL_MAX_MSIX_MESSAGES);
6594 val32 = xge_os_pio_mem_read32(hldev->pdev, hldev->regh2, &bar2[msi_id*4+3]);
6596 xge_os_pio_mem_write32(hldev->pdev, hldev->regh2, val32, &bar2[msi_id*4+3]);
6601 * __hal_set_msix_vals
6602 * @devh: HAL device handle.
6603 * @msix_value: 32bit MSI-X value transferred across PCI to @msix_address.
6604 * Filled in by this function.
6605 * @msix_address: 32bit MSI-X DMA address.
6606 * Filled in by this function.
6607 * @msix_idx: index that corresponds to the (@msix_value, @msix_address)
6608 * entry in the table of MSI-X (value, address) pairs.
6610 * This function will program the hardware associating the given
6611 * address/value cobination to the specified msi number.
6613 static void __hal_set_msix_vals (xge_hal_device_h devh,
6620 xge_hal_device_t *hldev = (xge_hal_device_t*)devh;
6621 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0;
6624 val64 = XGE_HAL_XMSI_NO(msix_idx) | XGE_HAL_XMSI_STROBE;
6625 __hal_pio_mem_write32_upper(hldev->pdev, hldev->regh0,
6626 (u32)(val64 >> 32), &bar0->xmsi_access);
6627 __hal_pio_mem_write32_lower(hldev->pdev, hldev->regh0,
6628 (u32)(val64), &bar0->xmsi_access);
6630 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
6631 &bar0->xmsi_access);
6632 if (val64 & XGE_HAL_XMSI_STROBE)
6637 *msix_value = (u32)(xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
6639 *msix_addr = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
6640 &bar0->xmsi_address);
6644 * xge_hal_channel_msix_set - Associate MSI-X with a channel.
6645 * @channelh: HAL channel handle.
6646 * @msix_idx: index that corresponds to a particular (@msix_value,
6647 * @msix_address) entry in the MSI-X table.
6649 * This API associates a given channel (either Ring or FIFO) with the
6650 * given MSI-X number. It programs the Xframe's Tx_Mat/Rx_Mat tables
6651 * to indicate this association.
6654 xge_hal_channel_msix_set(xge_hal_channel_h channelh, int msix_idx)
6656 xge_hal_channel_t *channel = (xge_hal_channel_t *)channelh;
6657 xge_hal_device_t *hldev = (xge_hal_device_t*)channel->devh;
6658 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0;
6661 if (channel->type == XGE_HAL_CHANNEL_TYPE_RING) {
6662 /* Currently Ring and RTI is one on one. */
6663 int ring = channel->post_qid;
6664 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
6666 val64 |= XGE_HAL_SET_RX_MAT(ring, msix_idx);
6667 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
6669 __hal_device_rti_set(ring, channel);
6670 hldev->config.fifo.queue[channel->post_qid].intr_vector =
6672 } else if (channel->type == XGE_HAL_CHANNEL_TYPE_FIFO) {
6673 int fifo = channel->post_qid;
6674 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
6676 val64 |= XGE_HAL_SET_TX_MAT(fifo, msix_idx);
6677 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
6679 __hal_device_tti_set(fifo, channel);
6680 hldev->config.ring.queue[channel->post_qid].intr_vector =
6683 channel->msix_idx = msix_idx;
6684 __hal_set_msix_vals(hldev, &channel->msix_data,
6685 &channel->msix_address,
6691 #if defined(XGE_HAL_CONFIG_LRO)
6693 * xge_hal_lro_terminate - Terminate lro resources.
6694 * @lro_scale: Amount of lro memory.
6695 * @hldev: Hal device structure.
6699 xge_hal_lro_terminate(u32 lro_scale,
6700 xge_hal_device_t *hldev)
6705 * xge_hal_lro_init - Initiate lro resources.
6706 * @lro_scale: Amount of lro memory.
6707 * @hldev: Hal device structure.
6708 * Note: For time being I am using only one LRO per device. Later on size
6709 * will be increased.
6713 xge_hal_lro_init(u32 lro_scale,
6714 xge_hal_device_t *hldev)
6718 if (hldev->config.lro_sg_size == XGE_HAL_DEFAULT_USE_HARDCODE)
6719 hldev->config.lro_sg_size = XGE_HAL_LRO_DEFAULT_SG_SIZE;
6721 if (hldev->config.lro_frm_len == XGE_HAL_DEFAULT_USE_HARDCODE)
6722 hldev->config.lro_frm_len = XGE_HAL_LRO_DEFAULT_FRM_LEN;
6724 for (i=0; i < XGE_HAL_MAX_RING_NUM; i++)
6726 xge_os_memzero(hldev->lro_desc[i].lro_pool,
6727 sizeof(lro_t) * XGE_HAL_LRO_MAX_BUCKETS);
6729 hldev->lro_desc[i].lro_next_idx = 0;
6730 hldev->lro_desc[i].lro_recent = NULL;
6739 * xge_hal_device_poll - HAL device "polling" entry point.
6740 * @devh: HAL device.
6742 * HAL "polling" entry point. Note that this is part of HAL public API.
6743 * Upper-Layer driver _must_ periodically poll HAL via
6744 * xge_hal_device_poll().
6746 * HAL uses caller's execution context to serially process accumulated
6747 * slow-path events, such as link state changes and hardware error
6750 * The rate of polling could be somewhere between 500us to 10ms,
6751 * depending on requirements (e.g., the requirement to support fail-over
6752 * could mean that 500us or even 100us polling interval need to be used).
6754 * The need and motivation for external polling includes
6756 * - remove the error-checking "burden" from the HAL interrupt handler
6757 * (see xge_hal_device_handle_irq());
6759 * - remove the potential source of portability issues by _not_
6760 * implementing separate polling thread within HAL itself.
6762 * See also: xge_hal_event_e{}, xge_hal_driver_config_t{}.
6763 * Usage: See ex_slow_path{}.
6766 xge_hal_device_poll(xge_hal_device_h devh)
6768 unsigned char item_buf[sizeof(xge_queue_item_t) +
6769 XGE_DEFAULT_EVENT_MAX_DATA_SIZE];
6770 xge_queue_item_t *item = (xge_queue_item_t *)(void *)item_buf;
6771 xge_queue_status_e qstatus;
6772 xge_hal_status_e hstatus;
6774 int queue_has_critical_event = 0;
6775 xge_hal_device_t *hldev = (xge_hal_device_t*)devh;
6777 xge_os_memzero(item_buf, (sizeof(xge_queue_item_t) +
6778 XGE_DEFAULT_EVENT_MAX_DATA_SIZE));
6781 if (!hldev->is_initialized ||
6782 hldev->terminating ||
6783 hldev->magic != XGE_HAL_MAGIC)
6786 if(hldev->stats.sw_dev_err_stats.xpak_counter.tick_period < 72000)
6791 hldev->stats.sw_dev_err_stats.xpak_counter.tick_period++;
6794 * Logging Error messages in the excess temperature,
6795 * Bias current, laser output for three cycle
6797 __hal_updt_stats_xpak(hldev);
6798 hldev->stats.sw_dev_err_stats.xpak_counter.tick_period = 0;
6801 if (!queue_has_critical_event)
6802 queue_has_critical_event =
6803 __queue_get_reset_critical(hldev->queueh);
6806 while (i++ < XGE_HAL_DRIVER_QUEUE_CONSUME_MAX || queue_has_critical_event) {
6808 qstatus = xge_queue_consume(hldev->queueh,
6809 XGE_DEFAULT_EVENT_MAX_DATA_SIZE,
6811 if (qstatus == XGE_QUEUE_IS_EMPTY)
6814 xge_debug_queue(XGE_TRACE,
6815 "queueh 0x"XGE_OS_LLXFMT" consumed event: %d ctxt 0x"
6816 XGE_OS_LLXFMT, (u64)(ulong_t)hldev->queueh, item->event_type,
6817 (u64)(ulong_t)item->context);
6819 if (!hldev->is_initialized ||
6820 hldev->magic != XGE_HAL_MAGIC) {
6825 switch (item->event_type) {
6826 case XGE_HAL_EVENT_LINK_IS_UP: {
6827 if (!queue_has_critical_event &&
6828 g_xge_hal_driver->uld_callbacks.link_up) {
6829 g_xge_hal_driver->uld_callbacks.link_up(
6830 hldev->upper_layer_info);
6831 hldev->link_state = XGE_HAL_LINK_UP;
6834 case XGE_HAL_EVENT_LINK_IS_DOWN: {
6835 if (!queue_has_critical_event &&
6836 g_xge_hal_driver->uld_callbacks.link_down) {
6837 g_xge_hal_driver->uld_callbacks.link_down(
6838 hldev->upper_layer_info);
6839 hldev->link_state = XGE_HAL_LINK_DOWN;
6842 case XGE_HAL_EVENT_SERR:
6843 case XGE_HAL_EVENT_ECCERR:
6844 case XGE_HAL_EVENT_PARITYERR:
6845 case XGE_HAL_EVENT_TARGETABORT:
6846 case XGE_HAL_EVENT_SLOT_FREEZE: {
6847 void *item_data = xge_queue_item_data(item);
6848 xge_hal_event_e event_type = item->event_type;
6849 u64 val64 = *((u64*)item_data);
6851 if (event_type != XGE_HAL_EVENT_SLOT_FREEZE)
6852 if (xge_hal_device_is_slot_freeze(hldev))
6853 event_type = XGE_HAL_EVENT_SLOT_FREEZE;
6854 if (g_xge_hal_driver->uld_callbacks.crit_err) {
6855 g_xge_hal_driver->uld_callbacks.crit_err(
6856 hldev->upper_layer_info,
6859 /* handle one critical event per poll cycle */
6865 xge_debug_queue(XGE_TRACE,
6866 "got non-HAL event %d",
6871 /* broadcast this event */
6872 if (g_xge_hal_driver->uld_callbacks.event)
6873 g_xge_hal_driver->uld_callbacks.event(item);
6876 if (g_xge_hal_driver->uld_callbacks.before_device_poll) {
6877 if (g_xge_hal_driver->uld_callbacks.before_device_poll(
6884 hstatus = __hal_device_poll(hldev);
6885 if (g_xge_hal_driver->uld_callbacks.after_device_poll)
6886 g_xge_hal_driver->uld_callbacks.after_device_poll(hldev);
6889 * handle critical error right away:
6890 * - walk the device queue again
6891 * - drop non-critical events, if any
6892 * - look for the 1st critical
6894 if (hstatus == XGE_HAL_ERR_CRITICAL) {
6895 queue_has_critical_event = 1;
6903 * xge_hal_rts_rth_init - Set enhanced mode for RTS hashing.
6904 * @hldev: HAL device handle.
6906 * This function is used to set the adapter to enhanced mode.
6908 * See also: xge_hal_rts_rth_clr(), xge_hal_rts_rth_set().
6911 xge_hal_rts_rth_init(xge_hal_device_t *hldev)
6913 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
6917 * Set the receive traffic steering mode from default(classic)
6920 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
6922 val64 |= XGE_HAL_RTS_CTRL_ENHANCED_MODE;
6923 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
6924 val64, &bar0->rts_ctrl);
6928 * xge_hal_rts_rth_clr - Clear RTS hashing.
6929 * @hldev: HAL device handle.
6931 * This function is used to clear all RTS hashing related stuff.
6932 * It brings the adapter out from enhanced mode to classic mode.
6933 * It also clears RTS_RTH_CFG register i.e clears hash type, function etc.
6935 * See also: xge_hal_rts_rth_set(), xge_hal_rts_rth_itable_set().
6938 xge_hal_rts_rth_clr(xge_hal_device_t *hldev)
6940 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
6944 * Set the receive traffic steering mode from default(classic)
6947 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
6949 val64 &= ~XGE_HAL_RTS_CTRL_ENHANCED_MODE;
6950 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
6951 val64, &bar0->rts_ctrl);
6953 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
6954 &bar0->rts_rth_cfg);
6958 * xge_hal_rts_rth_set - Set/configure RTS hashing.
6959 * @hldev: HAL device handle.
6960 * @def_q: default queue
6961 * @hash_type: hash type i.e TcpIpV4, TcpIpV6 etc.
6962 * @bucket_size: no of least significant bits to be used for hashing.
6964 * Used to set/configure all RTS hashing related stuff.
6965 * - set the steering mode to enhanced.
6966 * - set hash function i.e algo selection.
6967 * - set the default queue.
6969 * See also: xge_hal_rts_rth_clr(), xge_hal_rts_rth_itable_set().
6972 xge_hal_rts_rth_set(xge_hal_device_t *hldev, u8 def_q, u64 hash_type,
6975 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
6978 val64 = XGE_HAL_RTS_DEFAULT_Q(def_q);
6979 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
6980 &bar0->rts_default_q);
6983 val64 |= XGE_HAL_RTS_RTH_EN;
6984 val64 |= XGE_HAL_RTS_RTH_BUCKET_SIZE(bucket_size);
6985 val64 |= XGE_HAL_RTS_RTH_ALG_SEL_MS;
6986 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
6987 &bar0->rts_rth_cfg);
6991 * xge_hal_rts_rth_start - Start RTS hashing.
6992 * @hldev: HAL device handle.
6994 * Used to Start RTS hashing .
6996 * See also: xge_hal_rts_rth_clr(), xge_hal_rts_rth_itable_set(), xge_hal_rts_rth_start.
6999 xge_hal_rts_rth_start(xge_hal_device_t *hldev)
7001 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
7005 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
7006 &bar0->rts_rth_cfg);
7007 val64 |= XGE_HAL_RTS_RTH_EN;
7008 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
7009 &bar0->rts_rth_cfg);
7013 * xge_hal_rts_rth_stop - Stop the RTS hashing.
7014 * @hldev: HAL device handle.
7016 * Used to Staop RTS hashing .
7018 * See also: xge_hal_rts_rth_clr(), xge_hal_rts_rth_itable_set(), xge_hal_rts_rth_start.
7021 xge_hal_rts_rth_stop(xge_hal_device_t *hldev)
7023 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
7026 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
7027 &bar0->rts_rth_cfg);
7028 val64 &= ~XGE_HAL_RTS_RTH_EN;
7029 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
7030 &bar0->rts_rth_cfg);
7034 * xge_hal_rts_rth_itable_set - Set/configure indirection table (IT).
7035 * @hldev: HAL device handle.
7036 * @itable: Pointer to the indirection table
7037 * @itable_size: no of least significant bits to be used for hashing
7039 * Used to set/configure indirection table.
7040 * It enables the required no of entries in the IT.
7041 * It adds entries to the IT.
7043 * See also: xge_hal_rts_rth_clr(), xge_hal_rts_rth_set().
7046 xge_hal_rts_rth_itable_set(xge_hal_device_t *hldev, u8 *itable, u32 itable_size)
7048 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
7052 for (idx = 0; idx < itable_size; idx++) {
7053 val64 = XGE_HAL_RTS_RTH_MAP_MEM_DATA_ENTRY_EN |
7054 XGE_HAL_RTS_RTH_MAP_MEM_DATA(itable[idx]);
7056 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
7057 &bar0->rts_rth_map_mem_data);
7060 val64 = (XGE_HAL_RTS_RTH_MAP_MEM_CTRL_WE |
7061 XGE_HAL_RTS_RTH_MAP_MEM_CTRL_STROBE |
7062 XGE_HAL_RTS_RTH_MAP_MEM_CTRL_OFFSET(idx));
7063 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
7064 &bar0->rts_rth_map_mem_ctrl);
7066 /* poll until done */
7067 if (__hal_device_register_poll(hldev,
7068 &bar0->rts_rth_map_mem_ctrl, 0,
7069 XGE_HAL_RTS_RTH_MAP_MEM_CTRL_STROBE,
7070 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK) {
7071 /* upper layer may require to repeat */
7072 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
7081 * xge_hal_device_rts_rth_key_set - Configure 40byte secret for hash calc.
7083 * @hldev: HAL device handle.
7084 * @KeySize: Number of 64-bit words
7085 * @Key: upto 40-byte array of 8-bit values
7086 * This function configures the 40-byte secret which is used for hash
7089 * See also: xge_hal_rts_rth_clr(), xge_hal_rts_rth_set().
7092 xge_hal_device_rts_rth_key_set(xge_hal_device_t *hldev, u8 KeySize, u8 *Key)
7094 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *) hldev->bar0;
7103 for ( i = 0; i < 8 ; i++) {
7104 /* Prepare 64-bit word for 'nreg' containing 8 keys. */
7107 val64 |= Key[entry++];
7112 /* temp64 = XGE_HAL_RTH_HASH_MASK_n(val64, (n<<3), (n<<3)+7);*/
7113 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
7114 &bar0->rts_rth_hash_mask[nreg++]);
7118 /* Clear the rest if key is less than 40 bytes */
7120 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
7121 &bar0->rts_rth_hash_mask[nreg++]);
7127 * xge_hal_device_is_closed - Device is closed
7129 * @devh: HAL device handle.
7132 xge_hal_device_is_closed(xge_hal_device_h devh)
7134 xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
7136 if (xge_list_is_empty(&hldev->fifo_channels) &&
7137 xge_list_is_empty(&hldev->ring_channels))
7144 xge_hal_device_rts_section_enable(xge_hal_device_h devh, int index)
7148 int max_addr = XGE_HAL_MAX_MAC_ADDRESSES;
7150 xge_hal_device_t *hldev = (xge_hal_device_t *)devh;
7151 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)(void *)hldev->bar0;
7153 if (xge_hal_device_check_id(hldev) == XGE_HAL_CARD_HERC)
7154 max_addr = XGE_HAL_MAX_MAC_ADDRESSES_HERC;
7156 if ( index >= max_addr )
7157 return XGE_HAL_ERR_OUT_OF_MAC_ADDRESSES;
7160 * Calculate the section value
7162 section = index / 32;
7164 xge_debug_device(XGE_TRACE, "the Section value is %d ", section);
7166 val64 = xge_os_pio_mem_read64(hldev->pdev, hldev->regh0,
7167 &bar0->rts_mac_cfg);
7171 val64 |= XGE_HAL_RTS_MAC_SECT0_EN;
7174 val64 |= XGE_HAL_RTS_MAC_SECT1_EN;
7177 val64 |= XGE_HAL_RTS_MAC_SECT2_EN;
7180 val64 |= XGE_HAL_RTS_MAC_SECT3_EN;
7183 val64 |= XGE_HAL_RTS_MAC_SECT4_EN;
7186 val64 |= XGE_HAL_RTS_MAC_SECT5_EN;
7189 val64 |= XGE_HAL_RTS_MAC_SECT6_EN;
7192 val64 |= XGE_HAL_RTS_MAC_SECT7_EN;
7195 xge_debug_device(XGE_ERR, "Invalid Section value %d "
7199 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0,
7200 val64, &bar0->rts_mac_cfg);
7206 * xge_hal_fix_rldram_ecc_error
7207 * @hldev: private member of the device structure.
7209 * SXE-02-010. This function will turn OFF the ECC error reporting for the
7210 * interface bet'n external Micron RLDRAM II device and memory controller.
7211 * The error would have been reported in RLD_ECC_DB_ERR_L and RLD_ECC_DB_ERR_U
7212 * fields of MC_ERR_REG register. Issue reported by HP-Unix folks during the
7213 * qualification of Herc.
7216 xge_hal_fix_rldram_ecc_error(xge_hal_device_t * hldev)
7218 xge_hal_pci_bar0_t *bar0 = (xge_hal_pci_bar0_t *)hldev->bar0;
7222 val64 = XGE_HAL_MC_RLDRAM_TEST_MODE;
7223 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
7224 &bar0->mc_rldram_test_ctrl);
7226 // Enable fg/bg tests.
7227 val64 = 0x0100000000000000ULL;
7228 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
7231 // Enable RLDRAM configuration.
7232 val64 = 0x0000000000017B00ULL;
7233 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
7234 &bar0->mc_rldram_mrs);
7236 // Enable RLDRAM queues.
7237 val64 = 0x0000000001017B00ULL;
7238 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
7239 &bar0->mc_rldram_mrs);
7241 // Setup test ranges
7242 val64 = 0x00000000001E0100ULL;
7243 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
7244 &bar0->mc_rldram_test_add);
7246 val64 = 0x00000100001F0100ULL;
7247 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
7248 &bar0->mc_rldram_test_add_bkg);
7250 val64 = 0x0001000000010000ULL;
7251 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
7252 &bar0->mc_rldram_test_ctrl);
7254 if (__hal_device_register_poll(hldev, &bar0->mc_rldram_test_ctrl, 1,
7255 XGE_HAL_MC_RLDRAM_TEST_DONE,
7256 XGE_HAL_DEVICE_CMDMEM_WAIT_MAX_MILLIS) != XGE_HAL_OK){
7257 return XGE_HAL_INF_MEM_STROBE_CMD_EXECUTING;
7261 val64 = 0x0000000000000000ULL;
7262 xge_os_pio_mem_write64(hldev->pdev, hldev->regh0, val64,
7263 &bar0->mc_rldram_test_ctrl);