2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2010-2016 Solarflare Communications Inc.
7 * This software was developed in part by Philip Paeps under contract for
8 * Solarflare Communications, Inc.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions are met:
13 * 1. Redistributions of source code must retain the above copyright notice,
14 * this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright notice,
16 * this list of conditions and the following disclaimer in the documentation
17 * and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
20 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO,
21 * THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
22 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR
23 * CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL,
24 * EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO,
25 * PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS;
26 * OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY,
27 * WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR
28 * OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE,
29 * EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 * The views and conclusions contained in the software and documentation are
32 * those of the authors and should not be interpreted as representing official
33 * policies, either expressed or implied, of the FreeBSD Project.
45 #include <sys/param.h>
47 #include <sys/endian.h>
49 #include <sys/malloc.h>
51 #include <sys/mutex.h>
52 #include <sys/rwlock.h>
54 #include <sys/systm.h>
56 #include <machine/bus.h>
57 #include <machine/endian.h>
59 #define EFSYS_HAS_UINT64 1
60 #if defined(__x86_64__)
61 #define EFSYS_USE_UINT64 1
63 #define EFSYS_USE_UINT64 0
65 #define EFSYS_HAS_SSE2_M128 0
66 #if _BYTE_ORDER == _BIG_ENDIAN
67 #define EFSYS_IS_BIG_ENDIAN 1
68 #define EFSYS_IS_LITTLE_ENDIAN 0
69 #elif _BYTE_ORDER == _LITTLE_ENDIAN
70 #define EFSYS_IS_BIG_ENDIAN 0
71 #define EFSYS_IS_LITTLE_ENDIAN 1
73 #include "efx_types.h"
75 /* Common code requires this */
76 #if __FreeBSD_version < 800068
77 #define memmove(d, s, l) bcopy(s, d, l)
80 /* FreeBSD equivalents of Solaris things */
93 #define IS_P2ALIGNED(v, a) ((((uintptr_t)(v)) & ((uintptr_t)(a) - 1)) == 0)
97 #define P2ROUNDUP(x, align) (-(-(x) & -(align)))
101 #define P2ALIGN(_x, _a) ((_x) & -(_a))
105 #define ISP2(x) (((x) & ((x) - 1)) == 0)
108 #if defined(__x86_64__) && __FreeBSD_version >= 1000000
110 #define SFXGE_USE_BUS_SPACE_8 1
112 #if !defined(bus_space_read_stream_8)
114 #define bus_space_read_stream_8(t, h, o) \
115 bus_space_read_8((t), (h), (o))
117 #define bus_space_write_stream_8(t, h, o, v) \
118 bus_space_write_8((t), (h), (o), (v))
124 #define ENOTACTIVE EINVAL
126 /* Memory type to use on FreeBSD */
127 MALLOC_DECLARE(M_SFXGE);
129 /* Machine dependend prefetch wrappers */
130 #if defined(__i386__) || defined(__amd64__)
132 prefetch_read_many(void *addr)
142 prefetch_read_once(void *addr)
150 #elif defined(__sparc64__)
152 prefetch_read_many(void *addr)
162 prefetch_read_once(void *addr)
172 prefetch_read_many(void *addr)
178 prefetch_read_once(void *addr)
184 #if defined(__i386__) || defined(__amd64__)
189 sfxge_map_mbuf_fast(bus_dma_tag_t tag, bus_dmamap_t map,
190 struct mbuf *m, bus_dma_segment_t *seg)
192 #if defined(__i386__) || defined(__amd64__)
193 seg->ds_addr = pmap_kextract(mtod(m, vm_offset_t));
194 seg->ds_len = m->m_len;
198 bus_dmamap_load_mbuf_sg(tag, map, m, seg, &nsegstmp, 0);
202 /* Modifiers used for Windows builds */
205 #define __in_ecount(_n)
206 #define __in_ecount_opt(_n)
207 #define __in_bcount(_n)
208 #define __in_bcount_opt(_n)
212 #define __out_ecount(_n)
213 #define __out_ecount_opt(_n)
214 #define __out_bcount(_n)
215 #define __out_bcount_opt(_n)
216 #define __out_bcount_part(_n, _l)
217 #define __out_bcount_part_opt(_n, _l)
223 #define __inout_ecount(_n)
224 #define __inout_ecount_opt(_n)
225 #define __inout_bcount(_n)
226 #define __inout_bcount_opt(_n)
227 #define __inout_bcount_full_opt(_n)
229 #define __deref_out_bcount_opt(n)
231 #define __checkReturn
232 #define __success(_x)
234 #define __drv_when(_p, _c)
236 /* Code inclusion options */
239 #define EFSYS_OPT_NAMES 1
241 #define EFSYS_OPT_SIENA 1
242 #define EFSYS_OPT_HUNTINGTON 1
243 #define EFSYS_OPT_MEDFORD 1
245 #define EFSYS_OPT_CHECK_REG 1
247 #define EFSYS_OPT_CHECK_REG 0
250 #define EFSYS_OPT_MCDI 1
251 #define EFSYS_OPT_MCDI_LOGGING 0
252 #define EFSYS_OPT_MCDI_PROXY_AUTH 0
254 #define EFSYS_OPT_MAC_STATS 1
256 #define EFSYS_OPT_LOOPBACK 0
258 #define EFSYS_OPT_MON_MCDI 0
259 #define EFSYS_OPT_MON_STATS 0
261 #define EFSYS_OPT_PHY_STATS 1
262 #define EFSYS_OPT_BIST 1
263 #define EFSYS_OPT_PHY_LED_CONTROL 1
264 #define EFSYS_OPT_PHY_FLAGS 0
266 #define EFSYS_OPT_VPD 1
267 #define EFSYS_OPT_NVRAM 1
268 #define EFSYS_OPT_BOOTCFG 0
270 #define EFSYS_OPT_DIAG 0
271 #define EFSYS_OPT_RX_SCALE 1
272 #define EFSYS_OPT_QSTATS 1
273 #define EFSYS_OPT_FILTER 1
274 #define EFSYS_OPT_RX_SCATTER 0
276 #define EFSYS_OPT_EV_PREFETCH 0
278 #define EFSYS_OPT_DECODE_INTR_FATAL 1
280 #define EFSYS_OPT_LICENSING 0
282 #define EFSYS_OPT_ALLOW_UNCONFIGURED_NIC 0
286 typedef struct __efsys_identifier_s efsys_identifier_t;
292 #define EFSYS_PROBE(_name)
294 #define EFSYS_PROBE1(_name, _type1, _arg1)
296 #define EFSYS_PROBE2(_name, _type1, _arg1, _type2, _arg2)
298 #define EFSYS_PROBE3(_name, _type1, _arg1, _type2, _arg2, \
301 #define EFSYS_PROBE4(_name, _type1, _arg1, _type2, _arg2, \
302 _type3, _arg3, _type4, _arg4)
304 #define EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2, \
305 _type3, _arg3, _type4, _arg4, _type5, _arg5)
307 #define EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2, \
308 _type3, _arg3, _type4, _arg4, _type5, _arg5, \
311 #define EFSYS_PROBE7(_name, _type1, _arg1, _type2, _arg2, \
312 _type3, _arg3, _type4, _arg4, _type5, _arg5, \
313 _type6, _arg6, _type7, _arg7)
315 #else /* DTRACE_PROBE */
317 #define EFSYS_PROBE(_name) \
320 #define EFSYS_PROBE1(_name, _type1, _arg1) \
321 DTRACE_PROBE1(_name, _type1, _arg1)
323 #define EFSYS_PROBE2(_name, _type1, _arg1, _type2, _arg2) \
324 DTRACE_PROBE2(_name, _type1, _arg1, _type2, _arg2)
326 #define EFSYS_PROBE3(_name, _type1, _arg1, _type2, _arg2, \
328 DTRACE_PROBE3(_name, _type1, _arg1, _type2, _arg2, \
331 #define EFSYS_PROBE4(_name, _type1, _arg1, _type2, _arg2, \
332 _type3, _arg3, _type4, _arg4) \
333 DTRACE_PROBE4(_name, _type1, _arg1, _type2, _arg2, \
334 _type3, _arg3, _type4, _arg4)
337 #define EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2, \
338 _type3, _arg3, _type4, _arg4, _type5, _arg5) \
339 DTRACE_PROBE5(_name, _type1, _arg1, _type2, _arg2, \
340 _type3, _arg3, _type4, _arg4, _type5, _arg5)
342 #define EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2, \
343 _type3, _arg3, _type4, _arg4, _type5, _arg5) \
344 DTRACE_PROBE4(_name, _type1, _arg1, _type2, _arg2, \
345 _type3, _arg3, _type4, _arg4)
349 #define EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2, \
350 _type3, _arg3, _type4, _arg4, _type5, _arg5, \
352 DTRACE_PROBE6(_name, _type1, _arg1, _type2, _arg2, \
353 _type3, _arg3, _type4, _arg4, _type5, _arg5, \
356 #define EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2, \
357 _type3, _arg3, _type4, _arg4, _type5, _arg5, \
359 EFSYS_PROBE5(_name, _type1, _arg1, _type2, _arg2, \
360 _type3, _arg3, _type4, _arg4, _type5, _arg5)
364 #define EFSYS_PROBE7(_name, _type1, _arg1, _type2, _arg2, \
365 _type3, _arg3, _type4, _arg4, _type5, _arg5, \
366 _type6, _arg6, _type7, _arg7) \
367 DTRACE_PROBE7(_name, _type1, _arg1, _type2, _arg2, \
368 _type3, _arg3, _type4, _arg4, _type5, _arg5, \
369 _type6, _arg6, _type7, _arg7)
371 #define EFSYS_PROBE7(_name, _type1, _arg1, _type2, _arg2, \
372 _type3, _arg3, _type4, _arg4, _type5, _arg5, \
373 _type6, _arg6, _type7, _arg7) \
374 EFSYS_PROBE6(_name, _type1, _arg1, _type2, _arg2, \
375 _type3, _arg3, _type4, _arg4, _type5, _arg5, \
379 #endif /* DTRACE_PROBE */
383 typedef uint64_t efsys_dma_addr_t;
385 typedef struct efsys_mem_s {
386 bus_dma_tag_t esm_tag;
387 bus_dmamap_t esm_map;
389 efsys_dma_addr_t esm_addr;
393 #define EFSYS_MEM_ZERO(_esmp, _size) \
395 (void) memset((_esmp)->esm_base, 0, (_size)); \
397 _NOTE(CONSTANTCONDITION) \
400 #define EFSYS_MEM_READD(_esmp, _offset, _edp) \
404 _NOTE(CONSTANTCONDITION) \
405 KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_dword_t)), \
406 ("not power of 2 aligned")); \
408 addr = (void *)((_esmp)->esm_base + (_offset)); \
410 (_edp)->ed_u32[0] = *addr; \
412 EFSYS_PROBE2(mem_readd, unsigned int, (_offset), \
413 uint32_t, (_edp)->ed_u32[0]); \
415 _NOTE(CONSTANTCONDITION) \
418 #if defined(__x86_64__)
419 #define EFSYS_MEM_READQ(_esmp, _offset, _eqp) \
423 _NOTE(CONSTANTCONDITION) \
424 KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)), \
425 ("not power of 2 aligned")); \
427 addr = (void *)((_esmp)->esm_base + (_offset)); \
429 (_eqp)->eq_u64[0] = *addr; \
431 EFSYS_PROBE3(mem_readq, unsigned int, (_offset), \
432 uint32_t, (_eqp)->eq_u32[1], \
433 uint32_t, (_eqp)->eq_u32[0]); \
435 _NOTE(CONSTANTCONDITION) \
438 #define EFSYS_MEM_READQ(_esmp, _offset, _eqp) \
442 _NOTE(CONSTANTCONDITION) \
443 KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)), \
444 ("not power of 2 aligned")); \
446 addr = (void *)((_esmp)->esm_base + (_offset)); \
448 (_eqp)->eq_u32[0] = *addr++; \
449 (_eqp)->eq_u32[1] = *addr; \
451 EFSYS_PROBE3(mem_readq, unsigned int, (_offset), \
452 uint32_t, (_eqp)->eq_u32[1], \
453 uint32_t, (_eqp)->eq_u32[0]); \
455 _NOTE(CONSTANTCONDITION) \
459 #if defined(__x86_64__)
460 #define EFSYS_MEM_READO(_esmp, _offset, _eop) \
464 _NOTE(CONSTANTCONDITION) \
465 KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)), \
466 ("not power of 2 aligned")); \
468 addr = (void *)((_esmp)->esm_base + (_offset)); \
470 (_eop)->eo_u64[0] = *addr++; \
471 (_eop)->eo_u64[1] = *addr; \
473 EFSYS_PROBE5(mem_reado, unsigned int, (_offset), \
474 uint32_t, (_eop)->eo_u32[3], \
475 uint32_t, (_eop)->eo_u32[2], \
476 uint32_t, (_eop)->eo_u32[1], \
477 uint32_t, (_eop)->eo_u32[0]); \
479 _NOTE(CONSTANTCONDITION) \
482 #define EFSYS_MEM_READO(_esmp, _offset, _eop) \
486 _NOTE(CONSTANTCONDITION) \
487 KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)), \
488 ("not power of 2 aligned")); \
490 addr = (void *)((_esmp)->esm_base + (_offset)); \
492 (_eop)->eo_u32[0] = *addr++; \
493 (_eop)->eo_u32[1] = *addr++; \
494 (_eop)->eo_u32[2] = *addr++; \
495 (_eop)->eo_u32[3] = *addr; \
497 EFSYS_PROBE5(mem_reado, unsigned int, (_offset), \
498 uint32_t, (_eop)->eo_u32[3], \
499 uint32_t, (_eop)->eo_u32[2], \
500 uint32_t, (_eop)->eo_u32[1], \
501 uint32_t, (_eop)->eo_u32[0]); \
503 _NOTE(CONSTANTCONDITION) \
507 #define EFSYS_MEM_WRITED(_esmp, _offset, _edp) \
511 _NOTE(CONSTANTCONDITION) \
512 KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_dword_t)), \
513 ("not power of 2 aligned")); \
515 EFSYS_PROBE2(mem_writed, unsigned int, (_offset), \
516 uint32_t, (_edp)->ed_u32[0]); \
518 addr = (void *)((_esmp)->esm_base + (_offset)); \
520 *addr = (_edp)->ed_u32[0]; \
522 _NOTE(CONSTANTCONDITION) \
525 #if defined(__x86_64__)
526 #define EFSYS_MEM_WRITEQ(_esmp, _offset, _eqp) \
530 _NOTE(CONSTANTCONDITION) \
531 KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)), \
532 ("not power of 2 aligned")); \
534 EFSYS_PROBE3(mem_writeq, unsigned int, (_offset), \
535 uint32_t, (_eqp)->eq_u32[1], \
536 uint32_t, (_eqp)->eq_u32[0]); \
538 addr = (void *)((_esmp)->esm_base + (_offset)); \
540 *addr = (_eqp)->eq_u64[0]; \
542 _NOTE(CONSTANTCONDITION) \
546 #define EFSYS_MEM_WRITEQ(_esmp, _offset, _eqp) \
550 _NOTE(CONSTANTCONDITION) \
551 KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)), \
552 ("not power of 2 aligned")); \
554 EFSYS_PROBE3(mem_writeq, unsigned int, (_offset), \
555 uint32_t, (_eqp)->eq_u32[1], \
556 uint32_t, (_eqp)->eq_u32[0]); \
558 addr = (void *)((_esmp)->esm_base + (_offset)); \
560 *addr++ = (_eqp)->eq_u32[0]; \
561 *addr = (_eqp)->eq_u32[1]; \
563 _NOTE(CONSTANTCONDITION) \
567 #if defined(__x86_64__)
568 #define EFSYS_MEM_WRITEO(_esmp, _offset, _eop) \
572 _NOTE(CONSTANTCONDITION) \
573 KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)), \
574 ("not power of 2 aligned")); \
576 EFSYS_PROBE5(mem_writeo, unsigned int, (_offset), \
577 uint32_t, (_eop)->eo_u32[3], \
578 uint32_t, (_eop)->eo_u32[2], \
579 uint32_t, (_eop)->eo_u32[1], \
580 uint32_t, (_eop)->eo_u32[0]); \
582 addr = (void *)((_esmp)->esm_base + (_offset)); \
584 *addr++ = (_eop)->eo_u64[0]; \
585 *addr = (_eop)->eo_u64[1]; \
587 _NOTE(CONSTANTCONDITION) \
590 #define EFSYS_MEM_WRITEO(_esmp, _offset, _eop) \
594 _NOTE(CONSTANTCONDITION) \
595 KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)), \
596 ("not power of 2 aligned")); \
598 EFSYS_PROBE5(mem_writeo, unsigned int, (_offset), \
599 uint32_t, (_eop)->eo_u32[3], \
600 uint32_t, (_eop)->eo_u32[2], \
601 uint32_t, (_eop)->eo_u32[1], \
602 uint32_t, (_eop)->eo_u32[0]); \
604 addr = (void *)((_esmp)->esm_base + (_offset)); \
606 *addr++ = (_eop)->eo_u32[0]; \
607 *addr++ = (_eop)->eo_u32[1]; \
608 *addr++ = (_eop)->eo_u32[2]; \
609 *addr = (_eop)->eo_u32[3]; \
611 _NOTE(CONSTANTCONDITION) \
615 #define EFSYS_MEM_ADDR(_esmp) \
618 #define EFSYS_MEM_IS_NULL(_esmp) \
619 ((_esmp)->esm_base == NULL)
623 #define SFXGE_LOCK_NAME_MAX 16
625 typedef struct efsys_bar_s {
627 char esb_lock_name[SFXGE_LOCK_NAME_MAX];
628 bus_space_tag_t esb_tag;
629 bus_space_handle_t esb_handle;
631 struct resource *esb_res;
634 #define SFXGE_BAR_LOCK_INIT(_esbp, _ifname) \
636 snprintf((_esbp)->esb_lock_name, \
637 sizeof((_esbp)->esb_lock_name), \
638 "%s:bar", (_ifname)); \
639 mtx_init(&(_esbp)->esb_lock, (_esbp)->esb_lock_name, \
641 _NOTE(CONSTANTCONDITION) \
643 #define SFXGE_BAR_LOCK_DESTROY(_esbp) \
644 mtx_destroy(&(_esbp)->esb_lock)
645 #define SFXGE_BAR_LOCK(_esbp) \
646 mtx_lock(&(_esbp)->esb_lock)
647 #define SFXGE_BAR_UNLOCK(_esbp) \
648 mtx_unlock(&(_esbp)->esb_lock)
650 #define EFSYS_BAR_READD(_esbp, _offset, _edp, _lock) \
652 _NOTE(CONSTANTCONDITION) \
653 KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_dword_t)), \
654 ("not power of 2 aligned")); \
656 _NOTE(CONSTANTCONDITION) \
658 SFXGE_BAR_LOCK(_esbp); \
660 (_edp)->ed_u32[0] = bus_space_read_stream_4( \
661 (_esbp)->esb_tag, (_esbp)->esb_handle, \
664 EFSYS_PROBE2(bar_readd, unsigned int, (_offset), \
665 uint32_t, (_edp)->ed_u32[0]); \
667 _NOTE(CONSTANTCONDITION) \
669 SFXGE_BAR_UNLOCK(_esbp); \
670 _NOTE(CONSTANTCONDITION) \
673 #if defined(SFXGE_USE_BUS_SPACE_8)
674 #define EFSYS_BAR_READQ(_esbp, _offset, _eqp) \
676 _NOTE(CONSTANTCONDITION) \
677 KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)), \
678 ("not power of 2 aligned")); \
680 SFXGE_BAR_LOCK(_esbp); \
682 (_eqp)->eq_u64[0] = bus_space_read_stream_8( \
683 (_esbp)->esb_tag, (_esbp)->esb_handle, \
686 EFSYS_PROBE3(bar_readq, unsigned int, (_offset), \
687 uint32_t, (_eqp)->eq_u32[1], \
688 uint32_t, (_eqp)->eq_u32[0]); \
690 SFXGE_BAR_UNLOCK(_esbp); \
691 _NOTE(CONSTANTCONDITION) \
694 #define EFSYS_BAR_READO(_esbp, _offset, _eop, _lock) \
696 _NOTE(CONSTANTCONDITION) \
697 KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)), \
698 ("not power of 2 aligned")); \
700 _NOTE(CONSTANTCONDITION) \
702 SFXGE_BAR_LOCK(_esbp); \
704 (_eop)->eo_u64[0] = bus_space_read_stream_8( \
705 (_esbp)->esb_tag, (_esbp)->esb_handle, \
707 (_eop)->eo_u64[1] = bus_space_read_stream_8( \
708 (_esbp)->esb_tag, (_esbp)->esb_handle, \
711 EFSYS_PROBE5(bar_reado, unsigned int, (_offset), \
712 uint32_t, (_eop)->eo_u32[3], \
713 uint32_t, (_eop)->eo_u32[2], \
714 uint32_t, (_eop)->eo_u32[1], \
715 uint32_t, (_eop)->eo_u32[0]); \
717 _NOTE(CONSTANTCONDITION) \
719 SFXGE_BAR_UNLOCK(_esbp); \
720 _NOTE(CONSTANTCONDITION) \
724 #define EFSYS_BAR_READQ(_esbp, _offset, _eqp) \
726 _NOTE(CONSTANTCONDITION) \
727 KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)), \
728 ("not power of 2 aligned")); \
730 SFXGE_BAR_LOCK(_esbp); \
732 (_eqp)->eq_u32[0] = bus_space_read_stream_4( \
733 (_esbp)->esb_tag, (_esbp)->esb_handle, \
735 (_eqp)->eq_u32[1] = bus_space_read_stream_4( \
736 (_esbp)->esb_tag, (_esbp)->esb_handle, \
739 EFSYS_PROBE3(bar_readq, unsigned int, (_offset), \
740 uint32_t, (_eqp)->eq_u32[1], \
741 uint32_t, (_eqp)->eq_u32[0]); \
743 SFXGE_BAR_UNLOCK(_esbp); \
744 _NOTE(CONSTANTCONDITION) \
747 #define EFSYS_BAR_READO(_esbp, _offset, _eop, _lock) \
749 _NOTE(CONSTANTCONDITION) \
750 KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)), \
751 ("not power of 2 aligned")); \
753 _NOTE(CONSTANTCONDITION) \
755 SFXGE_BAR_LOCK(_esbp); \
757 (_eop)->eo_u32[0] = bus_space_read_stream_4( \
758 (_esbp)->esb_tag, (_esbp)->esb_handle, \
760 (_eop)->eo_u32[1] = bus_space_read_stream_4( \
761 (_esbp)->esb_tag, (_esbp)->esb_handle, \
763 (_eop)->eo_u32[2] = bus_space_read_stream_4( \
764 (_esbp)->esb_tag, (_esbp)->esb_handle, \
766 (_eop)->eo_u32[3] = bus_space_read_stream_4( \
767 (_esbp)->esb_tag, (_esbp)->esb_handle, \
770 EFSYS_PROBE5(bar_reado, unsigned int, (_offset), \
771 uint32_t, (_eop)->eo_u32[3], \
772 uint32_t, (_eop)->eo_u32[2], \
773 uint32_t, (_eop)->eo_u32[1], \
774 uint32_t, (_eop)->eo_u32[0]); \
776 _NOTE(CONSTANTCONDITION) \
778 SFXGE_BAR_UNLOCK(_esbp); \
779 _NOTE(CONSTANTCONDITION) \
783 #define EFSYS_BAR_WRITED(_esbp, _offset, _edp, _lock) \
785 _NOTE(CONSTANTCONDITION) \
786 KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_dword_t)), \
787 ("not power of 2 aligned")); \
789 _NOTE(CONSTANTCONDITION) \
791 SFXGE_BAR_LOCK(_esbp); \
793 EFSYS_PROBE2(bar_writed, unsigned int, (_offset), \
794 uint32_t, (_edp)->ed_u32[0]); \
797 * Make sure that previous writes to the dword have \
798 * been done. It should be cheaper than barrier just \
799 * after the write below. \
801 bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\
802 (_offset), sizeof (efx_dword_t), \
803 BUS_SPACE_BARRIER_WRITE); \
804 bus_space_write_stream_4((_esbp)->esb_tag, \
805 (_esbp)->esb_handle, \
806 (_offset), (_edp)->ed_u32[0]); \
808 _NOTE(CONSTANTCONDITION) \
810 SFXGE_BAR_UNLOCK(_esbp); \
811 _NOTE(CONSTANTCONDITION) \
814 #if defined(SFXGE_USE_BUS_SPACE_8)
815 #define EFSYS_BAR_WRITEQ(_esbp, _offset, _eqp) \
817 _NOTE(CONSTANTCONDITION) \
818 KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)), \
819 ("not power of 2 aligned")); \
821 SFXGE_BAR_LOCK(_esbp); \
823 EFSYS_PROBE3(bar_writeq, unsigned int, (_offset), \
824 uint32_t, (_eqp)->eq_u32[1], \
825 uint32_t, (_eqp)->eq_u32[0]); \
828 * Make sure that previous writes to the qword have \
829 * been done. It should be cheaper than barrier just \
830 * after the write below. \
832 bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\
833 (_offset), sizeof (efx_qword_t), \
834 BUS_SPACE_BARRIER_WRITE); \
835 bus_space_write_stream_8((_esbp)->esb_tag, \
836 (_esbp)->esb_handle, \
837 (_offset), (_eqp)->eq_u64[0]); \
839 SFXGE_BAR_UNLOCK(_esbp); \
840 _NOTE(CONSTANTCONDITION) \
843 #define EFSYS_BAR_WRITEQ(_esbp, _offset, _eqp) \
845 _NOTE(CONSTANTCONDITION) \
846 KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)), \
847 ("not power of 2 aligned")); \
849 SFXGE_BAR_LOCK(_esbp); \
851 EFSYS_PROBE3(bar_writeq, unsigned int, (_offset), \
852 uint32_t, (_eqp)->eq_u32[1], \
853 uint32_t, (_eqp)->eq_u32[0]); \
856 * Make sure that previous writes to the qword have \
857 * been done. It should be cheaper than barrier just \
858 * after the last write below. \
860 bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\
861 (_offset), sizeof (efx_qword_t), \
862 BUS_SPACE_BARRIER_WRITE); \
863 bus_space_write_stream_4((_esbp)->esb_tag, \
864 (_esbp)->esb_handle, \
865 (_offset), (_eqp)->eq_u32[0]); \
867 * It should be guaranteed that the last dword comes \
868 * the last, so barrier entire qword to be sure that \
869 * neither above nor below writes are reordered. \
871 bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\
872 (_offset), sizeof (efx_qword_t), \
873 BUS_SPACE_BARRIER_WRITE); \
874 bus_space_write_stream_4((_esbp)->esb_tag, \
875 (_esbp)->esb_handle, \
876 (_offset) + 4, (_eqp)->eq_u32[1]); \
878 SFXGE_BAR_UNLOCK(_esbp); \
879 _NOTE(CONSTANTCONDITION) \
884 * Guarantees 64bit aligned 64bit writes to write combined BAR mapping
885 * (required by PIO hardware)
887 #define EFSYS_BAR_WC_WRITEQ(_esbp, _offset, _eqp) \
889 _NOTE(CONSTANTCONDITION) \
890 KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_qword_t)), \
891 ("not power of 2 aligned")); \
895 /* FIXME: Perform a 64-bit write */ \
896 KASSERT(0, ("not implemented")); \
898 _NOTE(CONSTANTCONDITION) \
901 #if defined(SFXGE_USE_BUS_SPACE_8)
902 #define EFSYS_BAR_WRITEO(_esbp, _offset, _eop, _lock) \
904 _NOTE(CONSTANTCONDITION) \
905 KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)), \
906 ("not power of 2 aligned")); \
908 _NOTE(CONSTANTCONDITION) \
910 SFXGE_BAR_LOCK(_esbp); \
912 EFSYS_PROBE5(bar_writeo, unsigned int, (_offset), \
913 uint32_t, (_eop)->eo_u32[3], \
914 uint32_t, (_eop)->eo_u32[2], \
915 uint32_t, (_eop)->eo_u32[1], \
916 uint32_t, (_eop)->eo_u32[0]); \
919 * Make sure that previous writes to the oword have \
920 * been done. It should be cheaper than barrier just \
921 * after the last write below. \
923 bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\
924 (_offset), sizeof (efx_oword_t), \
925 BUS_SPACE_BARRIER_WRITE); \
926 bus_space_write_stream_8((_esbp)->esb_tag, \
927 (_esbp)->esb_handle, \
928 (_offset), (_eop)->eo_u64[0]); \
930 * It should be guaranteed that the last qword comes \
931 * the last, so barrier entire oword to be sure that \
932 * neither above nor below writes are reordered. \
934 bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\
935 (_offset), sizeof (efx_oword_t), \
936 BUS_SPACE_BARRIER_WRITE); \
937 bus_space_write_stream_8((_esbp)->esb_tag, \
938 (_esbp)->esb_handle, \
939 (_offset) + 8, (_eop)->eo_u64[1]); \
941 _NOTE(CONSTANTCONDITION) \
943 SFXGE_BAR_UNLOCK(_esbp); \
944 _NOTE(CONSTANTCONDITION) \
948 #define EFSYS_BAR_WRITEO(_esbp, _offset, _eop, _lock) \
950 _NOTE(CONSTANTCONDITION) \
951 KASSERT(IS_P2ALIGNED(_offset, sizeof (efx_oword_t)), \
952 ("not power of 2 aligned")); \
954 _NOTE(CONSTANTCONDITION) \
956 SFXGE_BAR_LOCK(_esbp); \
958 EFSYS_PROBE5(bar_writeo, unsigned int, (_offset), \
959 uint32_t, (_eop)->eo_u32[3], \
960 uint32_t, (_eop)->eo_u32[2], \
961 uint32_t, (_eop)->eo_u32[1], \
962 uint32_t, (_eop)->eo_u32[0]); \
965 * Make sure that previous writes to the oword have \
966 * been done. It should be cheaper than barrier just \
967 * after the last write below. \
969 bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\
970 (_offset), sizeof (efx_oword_t), \
971 BUS_SPACE_BARRIER_WRITE); \
972 bus_space_write_stream_4((_esbp)->esb_tag, \
973 (_esbp)->esb_handle, \
974 (_offset), (_eop)->eo_u32[0]); \
975 bus_space_write_stream_4((_esbp)->esb_tag, \
976 (_esbp)->esb_handle, \
977 (_offset) + 4, (_eop)->eo_u32[1]); \
978 bus_space_write_stream_4((_esbp)->esb_tag, \
979 (_esbp)->esb_handle, \
980 (_offset) + 8, (_eop)->eo_u32[2]); \
982 * It should be guaranteed that the last dword comes \
983 * the last, so barrier entire oword to be sure that \
984 * neither above nor below writes are reordered. \
986 bus_space_barrier((_esbp)->esb_tag, (_esbp)->esb_handle,\
987 (_offset), sizeof (efx_oword_t), \
988 BUS_SPACE_BARRIER_WRITE); \
989 bus_space_write_stream_4((_esbp)->esb_tag, \
990 (_esbp)->esb_handle, \
991 (_offset) + 12, (_eop)->eo_u32[3]); \
993 _NOTE(CONSTANTCONDITION) \
995 SFXGE_BAR_UNLOCK(_esbp); \
996 _NOTE(CONSTANTCONDITION) \
1000 /* Use the standard octo-word write for doorbell writes */
1001 #define EFSYS_BAR_DOORBELL_WRITEO(_esbp, _offset, _eop) \
1003 EFSYS_BAR_WRITEO((_esbp), (_offset), (_eop), B_FALSE); \
1004 _NOTE(CONSTANTCONDITION) \
1009 #define EFSYS_SPIN(_us) \
1012 _NOTE(CONSTANTCONDITION) \
1015 #define EFSYS_SLEEP EFSYS_SPIN
1019 #define EFSYS_MEM_READ_BARRIER() rmb()
1020 #define EFSYS_PIO_WRITE_BARRIER()
1023 #define EFSYS_DMA_SYNC_FOR_KERNEL(_esmp, _offset, _size) \
1025 bus_dmamap_sync((_esmp)->esm_tag, \
1027 BUS_DMASYNC_POSTREAD); \
1028 _NOTE(CONSTANTCONDITION) \
1031 #define EFSYS_DMA_SYNC_FOR_DEVICE(_esmp, _offset, _size) \
1033 bus_dmamap_sync((_esmp)->esm_tag, \
1035 BUS_DMASYNC_PREWRITE); \
1036 _NOTE(CONSTANTCONDITION) \
1041 typedef clock_t efsys_timestamp_t;
1043 #define EFSYS_TIMESTAMP(_usp) \
1048 *(_usp) = now * hz / 1000000; \
1049 _NOTE(CONSTANTCONDITION) \
1054 #define EFSYS_KMEM_ALLOC(_esip, _size, _p) \
1056 (_esip) = (_esip); \
1058 * The macro is used in non-sleepable contexts, for \
1059 * example, holding a mutex. \
1061 (_p) = malloc((_size), M_SFXGE, M_NOWAIT|M_ZERO); \
1062 _NOTE(CONSTANTCONDITION) \
1065 #define EFSYS_KMEM_FREE(_esip, _size, _p) \
1069 free((_p), M_SFXGE); \
1070 _NOTE(CONSTANTCONDITION) \
1075 typedef struct efsys_lock_s {
1077 char lock_name[SFXGE_LOCK_NAME_MAX];
1080 #define SFXGE_EFSYS_LOCK_INIT(_eslp, _ifname, _label) \
1082 efsys_lock_t *__eslp = (_eslp); \
1084 snprintf((__eslp)->lock_name, \
1085 sizeof((__eslp)->lock_name), \
1086 "%s:%s", (_ifname), (_label)); \
1087 mtx_init(&(__eslp)->lock, (__eslp)->lock_name, \
1090 #define SFXGE_EFSYS_LOCK_DESTROY(_eslp) \
1091 mtx_destroy(&(_eslp)->lock)
1092 #define SFXGE_EFSYS_LOCK(_eslp) \
1093 mtx_lock(&(_eslp)->lock)
1094 #define SFXGE_EFSYS_UNLOCK(_eslp) \
1095 mtx_unlock(&(_eslp)->lock)
1096 #define SFXGE_EFSYS_LOCK_ASSERT_OWNED(_eslp) \
1097 mtx_assert(&(_eslp)->lock, MA_OWNED)
1099 typedef int efsys_lock_state_t;
1101 #define EFSYS_LOCK_MAGIC 0x000010c4
1103 #define EFSYS_LOCK(_lockp, _state) \
1105 SFXGE_EFSYS_LOCK(_lockp); \
1106 (_state) = EFSYS_LOCK_MAGIC; \
1107 _NOTE(CONSTANTCONDITION) \
1110 #define EFSYS_UNLOCK(_lockp, _state) \
1112 if ((_state) != EFSYS_LOCK_MAGIC) \
1113 KASSERT(B_FALSE, ("not locked")); \
1114 SFXGE_EFSYS_UNLOCK(_lockp); \
1115 _NOTE(CONSTANTCONDITION) \
1120 typedef uint64_t efsys_stat_t;
1122 #define EFSYS_STAT_INCR(_knp, _delta) \
1124 *(_knp) += (_delta); \
1125 _NOTE(CONSTANTCONDITION) \
1128 #define EFSYS_STAT_DECR(_knp, _delta) \
1130 *(_knp) -= (_delta); \
1131 _NOTE(CONSTANTCONDITION) \
1134 #define EFSYS_STAT_SET(_knp, _val) \
1137 _NOTE(CONSTANTCONDITION) \
1140 #define EFSYS_STAT_SET_QWORD(_knp, _valp) \
1142 *(_knp) = le64toh((_valp)->eq_u64[0]); \
1143 _NOTE(CONSTANTCONDITION) \
1146 #define EFSYS_STAT_SET_DWORD(_knp, _valp) \
1148 *(_knp) = le32toh((_valp)->ed_u32[0]); \
1149 _NOTE(CONSTANTCONDITION) \
1152 #define EFSYS_STAT_INCR_QWORD(_knp, _valp) \
1154 *(_knp) += le64toh((_valp)->eq_u64[0]); \
1155 _NOTE(CONSTANTCONDITION) \
1158 #define EFSYS_STAT_SUBR_QWORD(_knp, _valp) \
1160 *(_knp) -= le64toh((_valp)->eq_u64[0]); \
1161 _NOTE(CONSTANTCONDITION) \
1166 extern void sfxge_err(efsys_identifier_t *, unsigned int,
1167 uint32_t, uint32_t);
1169 #if EFSYS_OPT_DECODE_INTR_FATAL
1170 #define EFSYS_ERR(_esip, _code, _dword0, _dword1) \
1172 sfxge_err((_esip), (_code), (_dword0), (_dword1)); \
1173 _NOTE(CONSTANTCONDITION) \
1179 #define EFSYS_ASSERT(_exp) do { \
1181 panic("%s", #_exp); \
1184 #define EFSYS_ASSERT3(_x, _op, _y, _t) do { \
1185 const _t __x = (_t)(_x); \
1186 const _t __y = (_t)(_y); \
1187 if (!(__x _op __y)) \
1188 panic("assertion failed at %s:%u", __FILE__, __LINE__); \
1191 #define EFSYS_ASSERT3U(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, uint64_t)
1192 #define EFSYS_ASSERT3S(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, int64_t)
1193 #define EFSYS_ASSERT3P(_x, _op, _y) EFSYS_ASSERT3(_x, _op, _y, uintptr_t)
1197 #define EFSYS_HAS_ROTL_DWORD 0
1203 #endif /* _SYS_EFSYS_H */