2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2011, Bryan Venteicher <bryanv@FreeBSD.org>
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice unmodified, this list of conditions, and the following
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in the
15 * documentation and/or other materials provided with the distribution.
17 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
31 #ifndef _IF_VTNETVAR_H
32 #define _IF_VTNETVAR_H
36 struct vtnet_statistics {
37 uint64_t mbuf_alloc_failed;
39 uint64_t rx_frame_too_large;
40 uint64_t rx_enq_replacement_failed;
41 uint64_t rx_mergeable_failed;
42 uint64_t rx_csum_bad_ethtype;
43 uint64_t rx_csum_bad_ipproto;
44 uint64_t rx_csum_bad_offset;
45 uint64_t rx_csum_bad_proto;
46 uint64_t tx_csum_bad_ethtype;
47 uint64_t tx_tso_bad_ethtype;
48 uint64_t tx_tso_not_tcp;
49 uint64_t tx_defragged;
50 uint64_t tx_defrag_failed;
53 * These are accumulated from each Rx/Tx queue.
55 uint64_t rx_csum_failed;
56 uint64_t rx_csum_offloaded;
57 uint64_t rx_task_rescheduled;
58 uint64_t tx_csum_offloaded;
59 uint64_t tx_tso_offloaded;
60 uint64_t tx_task_rescheduled;
63 struct vtnet_rxq_stats {
64 uint64_t vrxs_ipackets; /* if_ipackets */
65 uint64_t vrxs_ibytes; /* if_ibytes */
66 uint64_t vrxs_iqdrops; /* if_iqdrops */
67 uint64_t vrxs_ierrors; /* if_ierrors */
69 uint64_t vrxs_csum_failed;
70 uint64_t vrxs_rescheduled;
75 struct vtnet_softc *vtnrx_sc;
76 struct virtqueue *vtnrx_vq;
77 struct sglist *vtnrx_sg;
79 struct vtnet_rxq_stats vtnrx_stats;
80 struct taskqueue *vtnrx_tq;
81 struct task vtnrx_intrtask;
83 struct virtio_net_hdr_mrg_rxbuf vtnrx_shrhdr;
84 #endif /* DEV_NETMAP */
86 } __aligned(CACHE_LINE_SIZE);
88 #define VTNET_RXQ_LOCK(_rxq) mtx_lock(&(_rxq)->vtnrx_mtx)
89 #define VTNET_RXQ_UNLOCK(_rxq) mtx_unlock(&(_rxq)->vtnrx_mtx)
90 #define VTNET_RXQ_LOCK_ASSERT(_rxq) \
91 mtx_assert(&(_rxq)->vtnrx_mtx, MA_OWNED)
92 #define VTNET_RXQ_LOCK_ASSERT_NOTOWNED(_rxq) \
93 mtx_assert(&(_rxq)->vtnrx_mtx, MA_NOTOWNED)
95 struct vtnet_txq_stats {
96 uint64_t vtxs_opackets; /* if_opackets */
97 uint64_t vtxs_obytes; /* if_obytes */
98 uint64_t vtxs_omcasts; /* if_omcasts */
101 uint64_t vtxs_rescheduled;
105 struct mtx vtntx_mtx;
106 struct vtnet_softc *vtntx_sc;
107 struct virtqueue *vtntx_vq;
108 struct sglist *vtntx_sg;
109 #ifndef VTNET_LEGACY_TX
110 struct buf_ring *vtntx_br;
114 struct vtnet_txq_stats vtntx_stats;
115 struct taskqueue *vtntx_tq;
116 struct task vtntx_intrtask;
117 #ifndef VTNET_LEGACY_TX
118 struct task vtntx_defrtask;
121 struct virtio_net_hdr_mrg_rxbuf vtntx_shrhdr;
122 #endif /* DEV_NETMAP */
124 } __aligned(CACHE_LINE_SIZE);
126 #define VTNET_TXQ_LOCK(_txq) mtx_lock(&(_txq)->vtntx_mtx)
127 #define VTNET_TXQ_TRYLOCK(_txq) mtx_trylock(&(_txq)->vtntx_mtx)
128 #define VTNET_TXQ_UNLOCK(_txq) mtx_unlock(&(_txq)->vtntx_mtx)
129 #define VTNET_TXQ_LOCK_ASSERT(_txq) \
130 mtx_assert(&(_txq)->vtntx_mtx, MA_OWNED)
131 #define VTNET_TXQ_LOCK_ASSERT_NOTOWNED(_txq) \
132 mtx_assert(&(_txq)->vtntx_mtx, MA_NOTOWNED)
136 struct ifnet *vtnet_ifp;
137 struct vtnet_rxq *vtnet_rxqs;
138 struct vtnet_txq *vtnet_txqs;
140 uint32_t vtnet_flags;
141 #define VTNET_FLAG_SUSPENDED 0x0001
142 #define VTNET_FLAG_MAC 0x0002
143 #define VTNET_FLAG_CTRL_VQ 0x0004
144 #define VTNET_FLAG_CTRL_RX 0x0008
145 #define VTNET_FLAG_CTRL_MAC 0x0010
146 #define VTNET_FLAG_VLAN_FILTER 0x0020
147 #define VTNET_FLAG_TSO_ECN 0x0040
148 #define VTNET_FLAG_MRG_RXBUFS 0x0080
149 #define VTNET_FLAG_LRO_NOMRG 0x0100
150 #define VTNET_FLAG_MULTIQ 0x0200
151 #define VTNET_FLAG_INDIRECT 0x0400
152 #define VTNET_FLAG_EVENT_IDX 0x0800
154 int vtnet_link_active;
156 int vtnet_rx_process_limit;
160 int vtnet_rx_new_clsize;
161 int vtnet_tx_intr_thresh;
164 int vtnet_act_vq_pairs;
165 int vtnet_max_vq_pairs;
166 int vtnet_requested_vq_pairs;
168 struct virtqueue *vtnet_ctrl_vq;
169 struct vtnet_mac_filter *vtnet_mac_filter;
170 uint32_t *vtnet_vlan_filter;
172 uint64_t vtnet_features;
173 struct vtnet_statistics vtnet_stats;
174 struct callout vtnet_tick_ch;
175 struct ifmedia vtnet_media;
176 eventhandler_tag vtnet_vlan_attach;
177 eventhandler_tag vtnet_vlan_detach;
179 struct mtx vtnet_mtx;
180 char vtnet_mtx_name[16];
181 char vtnet_hwaddr[ETHER_ADDR_LEN];
185 * Maximum number of queue pairs we will autoconfigure to.
187 #define VTNET_MAX_QUEUE_PAIRS 8
190 * Additional completed entries can appear in a virtqueue before we can
191 * reenable interrupts. Number of times to retry before scheduling the
192 * taskqueue to process the completed entries.
194 #define VTNET_INTR_DISABLE_RETRIES 4
197 * Similarly, additional completed entries can appear in a virtqueue
198 * between when lasted checked and before notifying the host. Number
199 * of times to retry before scheduling the taskqueue to process the
202 #define VTNET_NOTIFY_RETRIES 4
205 * Fake the media type. The host does not provide us with any real media
208 #define VTNET_MEDIATYPE (IFM_ETHER | IFM_10G_T | IFM_FDX)
211 * Number of words to allocate for the VLAN shadow table. There is one
214 #define VTNET_VLAN_FILTER_NWORDS (4096 / 32)
217 * When mergeable buffers are not negotiated, the vtnet_rx_header structure
218 * below is placed at the beginning of the mbuf data. Use 4 bytes of pad to
219 * both keep the VirtIO header and the data non-contiguous and to keep the
220 * frame's payload 4 byte aligned.
222 * When mergeable buffers are negotiated, the host puts the VirtIO header in
223 * the beginning of the first mbuf's data.
225 #define VTNET_RX_HEADER_PAD 4
226 struct vtnet_rx_header {
227 struct virtio_net_hdr vrh_hdr;
228 char vrh_pad[VTNET_RX_HEADER_PAD];
232 * For each outgoing frame, the vtnet_tx_header below is allocated from
233 * the vtnet_tx_header_zone.
235 struct vtnet_tx_header {
237 struct virtio_net_hdr hdr;
238 struct virtio_net_hdr_mrg_rxbuf mhdr;
241 struct mbuf *vth_mbuf;
245 * The VirtIO specification does not place a limit on the number of MAC
246 * addresses the guest driver may request to be filtered. In practice,
247 * the host is constrained by available resources. To simplify this driver,
248 * impose a reasonably high limit of MAC addresses we will filter before
249 * falling back to promiscuous or all-multicast modes.
251 #define VTNET_MAX_MAC_ENTRIES 128
253 struct vtnet_mac_table {
255 uint8_t macs[VTNET_MAX_MAC_ENTRIES][ETHER_ADDR_LEN];
258 struct vtnet_mac_filter {
259 struct vtnet_mac_table vmf_unicast;
260 uint32_t vmf_pad; /* Make tables non-contiguous. */
261 struct vtnet_mac_table vmf_multicast;
265 * The MAC filter table is malloc(9)'d when needed. Ensure it will
266 * always fit in one segment.
268 CTASSERT(sizeof(struct vtnet_mac_filter) <= PAGE_SIZE);
270 #define VTNET_TX_TIMEOUT 5
271 #define VTNET_CSUM_OFFLOAD (CSUM_TCP | CSUM_UDP)
272 #define VTNET_CSUM_OFFLOAD_IPV6 (CSUM_TCP_IPV6 | CSUM_UDP_IPV6)
274 #define VTNET_CSUM_ALL_OFFLOAD \
275 (VTNET_CSUM_OFFLOAD | VTNET_CSUM_OFFLOAD_IPV6 | CSUM_TSO)
277 /* Features desired/implemented by this driver. */
278 #define VTNET_FEATURES \
279 (VIRTIO_NET_F_MAC | \
280 VIRTIO_NET_F_STATUS | \
281 VIRTIO_NET_F_CTRL_VQ | \
282 VIRTIO_NET_F_CTRL_RX | \
283 VIRTIO_NET_F_CTRL_MAC_ADDR | \
284 VIRTIO_NET_F_CTRL_VLAN | \
285 VIRTIO_NET_F_CSUM | \
287 VIRTIO_NET_F_HOST_TSO4 | \
288 VIRTIO_NET_F_HOST_TSO6 | \
289 VIRTIO_NET_F_HOST_ECN | \
290 VIRTIO_NET_F_GUEST_CSUM | \
291 VIRTIO_NET_F_GUEST_TSO4 | \
292 VIRTIO_NET_F_GUEST_TSO6 | \
293 VIRTIO_NET_F_GUEST_ECN | \
294 VIRTIO_NET_F_MRG_RXBUF | \
296 VIRTIO_RING_F_EVENT_IDX | \
297 VIRTIO_RING_F_INDIRECT_DESC)
300 * The VIRTIO_NET_F_HOST_TSO[46] features permit us to send the host
301 * frames larger than 1514 bytes.
303 #define VTNET_TSO_FEATURES (VIRTIO_NET_F_GSO | VIRTIO_NET_F_HOST_TSO4 | \
304 VIRTIO_NET_F_HOST_TSO6 | VIRTIO_NET_F_HOST_ECN)
307 * The VIRTIO_NET_F_GUEST_TSO[46] features permit the host to send us
308 * frames larger than 1514 bytes. We do not yet support software LRO
311 #define VTNET_LRO_FEATURES (VIRTIO_NET_F_GUEST_TSO4 | \
312 VIRTIO_NET_F_GUEST_TSO6 | VIRTIO_NET_F_GUEST_ECN)
314 #define VTNET_MAX_MTU 65536
315 #define VTNET_MAX_RX_SIZE 65550
318 * Used to preallocate the Vq indirect descriptors. The first segment
319 * is reserved for the header, except for mergeable buffers since the
320 * header is placed inline with the data.
322 #define VTNET_MRG_RX_SEGS 1
323 #define VTNET_MIN_RX_SEGS 2
324 #define VTNET_MAX_RX_SEGS 34
325 #define VTNET_MIN_TX_SEGS 32
326 #define VTNET_MAX_TX_SEGS 64
329 * Assert we can receive and transmit the maximum with regular
332 CTASSERT(((VTNET_MAX_RX_SEGS - 1) * MCLBYTES) >= VTNET_MAX_RX_SIZE);
333 CTASSERT(((VTNET_MAX_TX_SEGS - 1) * MCLBYTES) >= VTNET_MAX_MTU);
336 * Number of slots in the Tx bufrings. This value matches most other
337 * multiqueue drivers.
339 #define VTNET_DEFAULT_BUFRING_SIZE 4096
342 * Determine how many mbufs are in each receive buffer. For LRO without
343 * mergeable buffers, we must allocate an mbuf chain large enough to
344 * hold both the vtnet_rx_header and the maximum receivable data.
346 #define VTNET_NEEDED_RX_MBUFS(_sc, _clsize) \
347 ((_sc)->vtnet_flags & VTNET_FLAG_LRO_NOMRG) == 0 ? 1 : \
348 howmany(sizeof(struct vtnet_rx_header) + VTNET_MAX_RX_SIZE, \
351 #define VTNET_CORE_MTX(_sc) &(_sc)->vtnet_mtx
352 #define VTNET_CORE_LOCK(_sc) mtx_lock(VTNET_CORE_MTX((_sc)))
353 #define VTNET_CORE_UNLOCK(_sc) mtx_unlock(VTNET_CORE_MTX((_sc)))
354 #define VTNET_CORE_LOCK_DESTROY(_sc) mtx_destroy(VTNET_CORE_MTX((_sc)))
355 #define VTNET_CORE_LOCK_ASSERT(_sc) \
356 mtx_assert(VTNET_CORE_MTX((_sc)), MA_OWNED)
357 #define VTNET_CORE_LOCK_ASSERT_NOTOWNED(_sc) \
358 mtx_assert(VTNET_CORE_MTX((_sc)), MA_NOTOWNED)
360 #define VTNET_CORE_LOCK_INIT(_sc) do { \
361 snprintf((_sc)->vtnet_mtx_name, sizeof((_sc)->vtnet_mtx_name), \
362 "%s", device_get_nameunit((_sc)->vtnet_dev)); \
363 mtx_init(VTNET_CORE_MTX((_sc)), (_sc)->vtnet_mtx_name, \
364 "VTNET Core Lock", MTX_DEF); \
367 #endif /* _IF_VTNETVAR_H */