2 * Copyright (c) 2009-2011 Spectra Logic Corporation
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions, and the following disclaimer,
10 * without modification.
11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
12 * substantially similar to the "NO WARRANTY" disclaimer below
13 * ("Disclaimer") and any redistribution must be conditioned upon
14 * including a substantially similar Disclaimer requirement for further
15 * binary redistribution.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
26 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
27 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGES.
30 * Authors: Justin T. Gibbs (Spectra Logic Corporation)
31 * Alan Somers (Spectra Logic Corporation)
32 * John Suykerbuyk (Spectra Logic Corporation)
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
39 * \file netback_unit_tests.c
41 * \brief Unit tests for the Xen netback driver.
43 * Due to the driver's use of static functions, these tests cannot be compiled
44 * standalone; they must be #include'd from the driver's .c file.
48 /** Helper macro used to snprintf to a buffer and update the buffer pointer */
49 #define SNCATF(buffer, buflen, ...) do { \
50 size_t new_chars = snprintf(buffer, buflen, __VA_ARGS__); \
51 buffer += new_chars; \
52 /* be careful; snprintf's return value can be > buflen */ \
53 buflen -= MIN(buflen, new_chars); \
56 /* STRINGIFY and TOSTRING are used only to help turn __LINE__ into a string */
57 #define STRINGIFY(x) #x
58 #define TOSTRING(x) STRINGIFY(x)
61 * Writes an error message to buffer if cond is false
62 * Note the implied parameters buffer and
65 #define XNB_ASSERT(cond) ({ \
66 int passed = (cond); \
67 char *_buffer = (buffer); \
68 size_t _buflen = (buflen); \
70 strlcat(_buffer, __func__, _buflen); \
71 strlcat(_buffer, ":" TOSTRING(__LINE__) \
72 " Assertion Error: " #cond "\n", _buflen); \
78 * The signature used by all testcases. If the test writes anything
79 * to buffer, then it will be considered a failure
80 * \param buffer Return storage for error messages
81 * \param buflen The space available in the buffer
83 typedef void testcase_t(char *buffer, size_t buflen);
86 * Signature used by setup functions
87 * \return nonzero on error
89 typedef int setup_t(void);
91 typedef void teardown_t(void);
93 /** A simple test fixture comprising setup, teardown, and test */
95 /** Will be run before the test to allocate and initialize variables */
98 /** Will be run if setup succeeds */
101 /** Cleans up test data whether or not the setup suceeded*/
102 teardown_t *teardown;
105 typedef struct test_fixture test_fixture_t;
107 static void xnb_fill_eh_and_ip(struct mbuf *m, uint16_t ip_len,
108 uint16_t ip_id, uint16_t ip_p,
109 uint16_t ip_off, uint16_t ip_sum);
110 static void xnb_fill_tcp(struct mbuf *m);
111 static int xnb_get1pkt(struct xnb_pkt *pkt, size_t size, uint16_t flags);
112 static int xnb_unit_test_runner(test_fixture_t const tests[], int ntests,
113 char *buffer, size_t buflen);
116 null_setup(void) { return 0; }
119 null_teardown(void) { }
121 static setup_t setup_pvt_data;
122 static teardown_t teardown_pvt_data;
123 static testcase_t xnb_ring2pkt_emptyring;
124 static testcase_t xnb_ring2pkt_1req;
125 static testcase_t xnb_ring2pkt_2req;
126 static testcase_t xnb_ring2pkt_3req;
127 static testcase_t xnb_ring2pkt_extra;
128 static testcase_t xnb_ring2pkt_partial;
129 static testcase_t xnb_ring2pkt_wraps;
130 static testcase_t xnb_txpkt2rsp_emptypkt;
131 static testcase_t xnb_txpkt2rsp_1req;
132 static testcase_t xnb_txpkt2rsp_extra;
133 static testcase_t xnb_txpkt2rsp_long;
134 static testcase_t xnb_txpkt2rsp_invalid;
135 static testcase_t xnb_txpkt2rsp_error;
136 static testcase_t xnb_txpkt2rsp_wraps;
137 static testcase_t xnb_pkt2mbufc_empty;
138 static testcase_t xnb_pkt2mbufc_short;
139 static testcase_t xnb_pkt2mbufc_csum;
140 static testcase_t xnb_pkt2mbufc_1cluster;
141 static testcase_t xnb_pkt2mbufc_largecluster;
142 static testcase_t xnb_pkt2mbufc_2cluster;
143 static testcase_t xnb_txpkt2gnttab_empty;
144 static testcase_t xnb_txpkt2gnttab_short;
145 static testcase_t xnb_txpkt2gnttab_2req;
146 static testcase_t xnb_txpkt2gnttab_2cluster;
147 static testcase_t xnb_update_mbufc_short;
148 static testcase_t xnb_update_mbufc_2req;
149 static testcase_t xnb_update_mbufc_2cluster;
150 static testcase_t xnb_mbufc2pkt_empty;
151 static testcase_t xnb_mbufc2pkt_short;
152 static testcase_t xnb_mbufc2pkt_1cluster;
153 static testcase_t xnb_mbufc2pkt_2short;
154 static testcase_t xnb_mbufc2pkt_long;
155 static testcase_t xnb_mbufc2pkt_extra;
156 static testcase_t xnb_mbufc2pkt_nospace;
157 static testcase_t xnb_rxpkt2gnttab_empty;
158 static testcase_t xnb_rxpkt2gnttab_short;
159 static testcase_t xnb_rxpkt2gnttab_2req;
160 static testcase_t xnb_rxpkt2rsp_empty;
161 static testcase_t xnb_rxpkt2rsp_short;
162 static testcase_t xnb_rxpkt2rsp_extra;
163 static testcase_t xnb_rxpkt2rsp_2short;
164 static testcase_t xnb_rxpkt2rsp_2slots;
165 static testcase_t xnb_rxpkt2rsp_copyerror;
166 /* TODO: add test cases for xnb_add_mbuf_cksum for IPV6 tcp and udp */
167 static testcase_t xnb_add_mbuf_cksum_arp;
168 static testcase_t xnb_add_mbuf_cksum_tcp;
169 static testcase_t xnb_add_mbuf_cksum_udp;
170 static testcase_t xnb_add_mbuf_cksum_icmp;
171 static testcase_t xnb_add_mbuf_cksum_tcp_swcksum;
172 static testcase_t xnb_sscanf_llu;
173 static testcase_t xnb_sscanf_lld;
174 static testcase_t xnb_sscanf_hhu;
175 static testcase_t xnb_sscanf_hhd;
176 static testcase_t xnb_sscanf_hhn;
178 /** Private data used by unit tests */
180 gnttab_copy_table gnttab;
181 netif_rx_back_ring_t rxb;
182 netif_rx_front_ring_t rxf;
183 netif_tx_back_ring_t txb;
184 netif_tx_front_ring_t txf;
186 netif_rx_sring_t* rxs;
187 netif_tx_sring_t* txs;
190 static inline void safe_m_freem(struct mbuf **ppMbuf) {
191 if (*ppMbuf != NULL) {
198 * The unit test runner. It will run every supplied test and return an
199 * output message as a string
200 * \param tests An array of tests. Every test will be attempted.
201 * \param ntests The length of tests
202 * \param buffer Return storage for the result string
203 * \param buflen The length of buffer
204 * \return The number of tests that failed
207 xnb_unit_test_runner(test_fixture_t const tests[], int ntests, char *buffer,
214 for (i = 0; i < ntests; i++) {
215 int error = tests[i].setup();
217 SNCATF(buffer, buflen,
218 "Setup failed for test idx %d\n", i);
223 tests[i].test(buffer, buflen);
224 new_chars = strnlen(buffer, buflen);
235 n_passes = ntests - n_failures;
237 SNCATF(buffer, buflen, "%d Tests Passed\n", n_passes);
239 if (n_failures > 0) {
240 SNCATF(buffer, buflen, "%d Tests FAILED\n", n_failures);
246 /** Number of unit tests. Must match the length of the tests array below */
247 #define TOTAL_TESTS (53)
249 * Max memory available for returning results. 400 chars/test should give
250 * enough space for a five line error message for every test
252 #define TOTAL_BUFLEN (400 * TOTAL_TESTS + 2)
255 * Called from userspace by a sysctl. Runs all internal unit tests, and
256 * returns the results to userspace as a string
258 * \param arg1 pointer to an xnb_softc for a specific xnb device
260 * \param req sysctl access structure
261 * \return a string via the special SYSCTL_OUT macro.
265 xnb_unit_test_main(SYSCTL_HANDLER_ARGS) {
266 test_fixture_t const tests[TOTAL_TESTS] = {
267 {setup_pvt_data, xnb_ring2pkt_emptyring, teardown_pvt_data},
268 {setup_pvt_data, xnb_ring2pkt_1req, teardown_pvt_data},
269 {setup_pvt_data, xnb_ring2pkt_2req, teardown_pvt_data},
270 {setup_pvt_data, xnb_ring2pkt_3req, teardown_pvt_data},
271 {setup_pvt_data, xnb_ring2pkt_extra, teardown_pvt_data},
272 {setup_pvt_data, xnb_ring2pkt_partial, teardown_pvt_data},
273 {setup_pvt_data, xnb_ring2pkt_wraps, teardown_pvt_data},
274 {setup_pvt_data, xnb_txpkt2rsp_emptypkt, teardown_pvt_data},
275 {setup_pvt_data, xnb_txpkt2rsp_1req, teardown_pvt_data},
276 {setup_pvt_data, xnb_txpkt2rsp_extra, teardown_pvt_data},
277 {setup_pvt_data, xnb_txpkt2rsp_long, teardown_pvt_data},
278 {setup_pvt_data, xnb_txpkt2rsp_invalid, teardown_pvt_data},
279 {setup_pvt_data, xnb_txpkt2rsp_error, teardown_pvt_data},
280 {setup_pvt_data, xnb_txpkt2rsp_wraps, teardown_pvt_data},
281 {setup_pvt_data, xnb_pkt2mbufc_empty, teardown_pvt_data},
282 {setup_pvt_data, xnb_pkt2mbufc_short, teardown_pvt_data},
283 {setup_pvt_data, xnb_pkt2mbufc_csum, teardown_pvt_data},
284 {setup_pvt_data, xnb_pkt2mbufc_1cluster, teardown_pvt_data},
285 {setup_pvt_data, xnb_pkt2mbufc_largecluster, teardown_pvt_data},
286 {setup_pvt_data, xnb_pkt2mbufc_2cluster, teardown_pvt_data},
287 {setup_pvt_data, xnb_txpkt2gnttab_empty, teardown_pvt_data},
288 {setup_pvt_data, xnb_txpkt2gnttab_short, teardown_pvt_data},
289 {setup_pvt_data, xnb_txpkt2gnttab_2req, teardown_pvt_data},
290 {setup_pvt_data, xnb_txpkt2gnttab_2cluster, teardown_pvt_data},
291 {setup_pvt_data, xnb_update_mbufc_short, teardown_pvt_data},
292 {setup_pvt_data, xnb_update_mbufc_2req, teardown_pvt_data},
293 {setup_pvt_data, xnb_update_mbufc_2cluster, teardown_pvt_data},
294 {setup_pvt_data, xnb_mbufc2pkt_empty, teardown_pvt_data},
295 {setup_pvt_data, xnb_mbufc2pkt_short, teardown_pvt_data},
296 {setup_pvt_data, xnb_mbufc2pkt_1cluster, teardown_pvt_data},
297 {setup_pvt_data, xnb_mbufc2pkt_2short, teardown_pvt_data},
298 {setup_pvt_data, xnb_mbufc2pkt_long, teardown_pvt_data},
299 {setup_pvt_data, xnb_mbufc2pkt_extra, teardown_pvt_data},
300 {setup_pvt_data, xnb_mbufc2pkt_nospace, teardown_pvt_data},
301 {setup_pvt_data, xnb_rxpkt2gnttab_empty, teardown_pvt_data},
302 {setup_pvt_data, xnb_rxpkt2gnttab_short, teardown_pvt_data},
303 {setup_pvt_data, xnb_rxpkt2gnttab_2req, teardown_pvt_data},
304 {setup_pvt_data, xnb_rxpkt2rsp_empty, teardown_pvt_data},
305 {setup_pvt_data, xnb_rxpkt2rsp_short, teardown_pvt_data},
306 {setup_pvt_data, xnb_rxpkt2rsp_extra, teardown_pvt_data},
307 {setup_pvt_data, xnb_rxpkt2rsp_2short, teardown_pvt_data},
308 {setup_pvt_data, xnb_rxpkt2rsp_2slots, teardown_pvt_data},
309 {setup_pvt_data, xnb_rxpkt2rsp_copyerror, teardown_pvt_data},
310 {null_setup, xnb_add_mbuf_cksum_arp, null_teardown},
311 {null_setup, xnb_add_mbuf_cksum_icmp, null_teardown},
312 {null_setup, xnb_add_mbuf_cksum_tcp, null_teardown},
313 {null_setup, xnb_add_mbuf_cksum_tcp_swcksum, null_teardown},
314 {null_setup, xnb_add_mbuf_cksum_udp, null_teardown},
315 {null_setup, xnb_sscanf_hhd, null_teardown},
316 {null_setup, xnb_sscanf_hhu, null_teardown},
317 {null_setup, xnb_sscanf_lld, null_teardown},
318 {null_setup, xnb_sscanf_llu, null_teardown},
319 {null_setup, xnb_sscanf_hhn, null_teardown},
322 * results is static so that the data will persist after this function
323 * returns. The sysctl code expects us to return a constant string.
324 * \todo: the static variable is not thread safe. Put a mutex around
327 static char results[TOTAL_BUFLEN];
329 /* empty the result strings */
331 xnb_unit_test_runner(tests, TOTAL_TESTS, results, TOTAL_BUFLEN);
333 return (SYSCTL_OUT(req, results, strnlen(results, TOTAL_BUFLEN)));
341 bzero(xnb_unit_pvt.gnttab, sizeof(xnb_unit_pvt.gnttab));
343 xnb_unit_pvt.txs = malloc(PAGE_SIZE, M_XENNETBACK, M_WAITOK|M_ZERO);
344 if (xnb_unit_pvt.txs != NULL) {
345 SHARED_RING_INIT(xnb_unit_pvt.txs);
346 BACK_RING_INIT(&xnb_unit_pvt.txb, xnb_unit_pvt.txs, PAGE_SIZE);
347 FRONT_RING_INIT(&xnb_unit_pvt.txf, xnb_unit_pvt.txs, PAGE_SIZE);
352 xnb_unit_pvt.ifp = if_alloc(IFT_ETHER);
353 if (xnb_unit_pvt.ifp == NULL) {
357 xnb_unit_pvt.rxs = malloc(PAGE_SIZE, M_XENNETBACK, M_WAITOK|M_ZERO);
358 if (xnb_unit_pvt.rxs != NULL) {
359 SHARED_RING_INIT(xnb_unit_pvt.rxs);
360 BACK_RING_INIT(&xnb_unit_pvt.rxb, xnb_unit_pvt.rxs, PAGE_SIZE);
361 FRONT_RING_INIT(&xnb_unit_pvt.rxf, xnb_unit_pvt.rxs, PAGE_SIZE);
370 teardown_pvt_data(void)
372 if (xnb_unit_pvt.txs != NULL) {
373 free(xnb_unit_pvt.txs, M_XENNETBACK);
375 if (xnb_unit_pvt.rxs != NULL) {
376 free(xnb_unit_pvt.rxs, M_XENNETBACK);
378 if (xnb_unit_pvt.ifp != NULL) {
379 if_free(xnb_unit_pvt.ifp);
384 * Verify that xnb_ring2pkt will not consume any requests from an empty ring
387 xnb_ring2pkt_emptyring(char *buffer, size_t buflen)
392 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
393 xnb_unit_pvt.txb.req_cons);
394 XNB_ASSERT(num_consumed == 0);
398 * Verify that xnb_ring2pkt can convert a single request packet correctly
401 xnb_ring2pkt_1req(char *buffer, size_t buflen)
405 struct netif_tx_request *req;
407 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
408 xnb_unit_pvt.txf.req_prod_pvt);
411 req->size = 69; /* arbitrary number for test */
412 xnb_unit_pvt.txf.req_prod_pvt++;
414 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
416 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
417 xnb_unit_pvt.txb.req_cons);
418 XNB_ASSERT(num_consumed == 1);
419 XNB_ASSERT(pkt.size == 69);
420 XNB_ASSERT(pkt.car_size == 69);
421 XNB_ASSERT(pkt.flags == 0);
422 XNB_ASSERT(xnb_pkt_is_valid(&pkt));
423 XNB_ASSERT(pkt.list_len == 1);
424 XNB_ASSERT(pkt.car == 0);
428 * Verify that xnb_ring2pkt can convert a two request packet correctly.
429 * This tests handling of the MORE_DATA flag and cdr
432 xnb_ring2pkt_2req(char *buffer, size_t buflen)
436 struct netif_tx_request *req;
437 RING_IDX start_idx = xnb_unit_pvt.txf.req_prod_pvt;
439 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
440 xnb_unit_pvt.txf.req_prod_pvt);
441 req->flags = NETTXF_more_data;
443 xnb_unit_pvt.txf.req_prod_pvt++;
445 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
446 xnb_unit_pvt.txf.req_prod_pvt);
449 xnb_unit_pvt.txf.req_prod_pvt++;
451 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
453 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
454 xnb_unit_pvt.txb.req_cons);
455 XNB_ASSERT(num_consumed == 2);
456 XNB_ASSERT(pkt.size == 100);
457 XNB_ASSERT(pkt.car_size == 60);
458 XNB_ASSERT(pkt.flags == 0);
459 XNB_ASSERT(xnb_pkt_is_valid(&pkt));
460 XNB_ASSERT(pkt.list_len == 2);
461 XNB_ASSERT(pkt.car == start_idx);
462 XNB_ASSERT(pkt.cdr == start_idx + 1);
466 * Verify that xnb_ring2pkt can convert a three request packet correctly
469 xnb_ring2pkt_3req(char *buffer, size_t buflen)
473 struct netif_tx_request *req;
474 RING_IDX start_idx = xnb_unit_pvt.txf.req_prod_pvt;
476 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
477 xnb_unit_pvt.txf.req_prod_pvt);
478 req->flags = NETTXF_more_data;
480 xnb_unit_pvt.txf.req_prod_pvt++;
482 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
483 xnb_unit_pvt.txf.req_prod_pvt);
484 req->flags = NETTXF_more_data;
486 xnb_unit_pvt.txf.req_prod_pvt++;
488 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
489 xnb_unit_pvt.txf.req_prod_pvt);
492 xnb_unit_pvt.txf.req_prod_pvt++;
494 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
496 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
497 xnb_unit_pvt.txb.req_cons);
498 XNB_ASSERT(num_consumed == 3);
499 XNB_ASSERT(pkt.size == 200);
500 XNB_ASSERT(pkt.car_size == 110);
501 XNB_ASSERT(pkt.flags == 0);
502 XNB_ASSERT(xnb_pkt_is_valid(&pkt));
503 XNB_ASSERT(pkt.list_len == 3);
504 XNB_ASSERT(pkt.car == start_idx);
505 XNB_ASSERT(pkt.cdr == start_idx + 1);
506 XNB_ASSERT(RING_GET_REQUEST(&xnb_unit_pvt.txb, pkt.cdr + 1) == req);
510 * Verify that xnb_ring2pkt can read extra inf
513 xnb_ring2pkt_extra(char *buffer, size_t buflen)
517 struct netif_tx_request *req;
518 struct netif_extra_info *ext;
519 RING_IDX start_idx = xnb_unit_pvt.txf.req_prod_pvt;
521 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
522 xnb_unit_pvt.txf.req_prod_pvt);
523 req->flags = NETTXF_extra_info | NETTXF_more_data;
525 xnb_unit_pvt.txf.req_prod_pvt++;
527 ext = (struct netif_extra_info*) RING_GET_REQUEST(&xnb_unit_pvt.txf,
528 xnb_unit_pvt.txf.req_prod_pvt);
530 ext->type = XEN_NETIF_EXTRA_TYPE_GSO;
531 ext->u.gso.size = 250;
532 ext->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
533 ext->u.gso.features = 0;
534 xnb_unit_pvt.txf.req_prod_pvt++;
536 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
537 xnb_unit_pvt.txf.req_prod_pvt);
540 xnb_unit_pvt.txf.req_prod_pvt++;
542 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
544 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
545 xnb_unit_pvt.txb.req_cons);
546 XNB_ASSERT(num_consumed == 3);
547 XNB_ASSERT(pkt.extra.flags == 0);
548 XNB_ASSERT(pkt.extra.type == XEN_NETIF_EXTRA_TYPE_GSO);
549 XNB_ASSERT(pkt.extra.u.gso.size == 250);
550 XNB_ASSERT(pkt.extra.u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4);
551 XNB_ASSERT(pkt.size == 150);
552 XNB_ASSERT(pkt.car_size == 100);
553 XNB_ASSERT(pkt.flags == NETTXF_extra_info);
554 XNB_ASSERT(xnb_pkt_is_valid(&pkt));
555 XNB_ASSERT(pkt.list_len == 2);
556 XNB_ASSERT(pkt.car == start_idx);
557 XNB_ASSERT(pkt.cdr == start_idx + 2);
558 XNB_ASSERT(RING_GET_REQUEST(&xnb_unit_pvt.txb, pkt.cdr) == req);
562 * Verify that xnb_ring2pkt will consume no requests if the entire packet is
563 * not yet in the ring
566 xnb_ring2pkt_partial(char *buffer, size_t buflen)
570 struct netif_tx_request *req;
572 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
573 xnb_unit_pvt.txf.req_prod_pvt);
574 req->flags = NETTXF_more_data;
576 xnb_unit_pvt.txf.req_prod_pvt++;
578 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
580 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
581 xnb_unit_pvt.txb.req_cons);
582 XNB_ASSERT(num_consumed == 0);
583 XNB_ASSERT(! xnb_pkt_is_valid(&pkt));
587 * Verity that xnb_ring2pkt can read a packet whose requests wrap around
588 * the end of the ring
591 xnb_ring2pkt_wraps(char *buffer, size_t buflen)
595 struct netif_tx_request *req;
599 * Manually tweak the ring indices to create a ring with no responses
600 * and the next request slot at position 2 from the end
602 rsize = RING_SIZE(&xnb_unit_pvt.txf);
603 xnb_unit_pvt.txf.req_prod_pvt = rsize - 2;
604 xnb_unit_pvt.txf.rsp_cons = rsize - 2;
605 xnb_unit_pvt.txs->req_prod = rsize - 2;
606 xnb_unit_pvt.txs->req_event = rsize - 1;
607 xnb_unit_pvt.txs->rsp_prod = rsize - 2;
608 xnb_unit_pvt.txs->rsp_event = rsize - 1;
609 xnb_unit_pvt.txb.rsp_prod_pvt = rsize - 2;
610 xnb_unit_pvt.txb.req_cons = rsize - 2;
612 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
613 xnb_unit_pvt.txf.req_prod_pvt);
614 req->flags = NETTXF_more_data;
616 xnb_unit_pvt.txf.req_prod_pvt++;
618 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
619 xnb_unit_pvt.txf.req_prod_pvt);
620 req->flags = NETTXF_more_data;
622 xnb_unit_pvt.txf.req_prod_pvt++;
624 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
625 xnb_unit_pvt.txf.req_prod_pvt);
628 xnb_unit_pvt.txf.req_prod_pvt++;
630 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
632 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
633 xnb_unit_pvt.txb.req_cons);
634 XNB_ASSERT(num_consumed == 3);
635 XNB_ASSERT(xnb_pkt_is_valid(&pkt));
636 XNB_ASSERT(pkt.list_len == 3);
637 XNB_ASSERT(RING_GET_REQUEST(&xnb_unit_pvt.txb, pkt.cdr + 1) == req);
642 * xnb_txpkt2rsp should do nothing for an empty packet
645 xnb_txpkt2rsp_emptypkt(char *buffer, size_t buflen)
649 netif_tx_back_ring_t txb_backup = xnb_unit_pvt.txb;
650 netif_tx_sring_t txs_backup = *xnb_unit_pvt.txs;
653 /* must call xnb_ring2pkt just to intialize pkt */
654 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
655 xnb_unit_pvt.txb.req_cons);
656 xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 0);
658 memcmp(&txb_backup, &xnb_unit_pvt.txb, sizeof(txb_backup)) == 0);
660 memcmp(&txs_backup, xnb_unit_pvt.txs, sizeof(txs_backup)) == 0);
664 * xnb_txpkt2rsp responding to one request
667 xnb_txpkt2rsp_1req(char *buffer, size_t buflen)
669 uint16_t num_consumed;
671 struct netif_tx_request *req;
672 struct netif_tx_response *rsp;
674 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
675 xnb_unit_pvt.txf.req_prod_pvt);
678 xnb_unit_pvt.txf.req_prod_pvt++;
680 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
682 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
683 xnb_unit_pvt.txb.req_cons);
684 xnb_unit_pvt.txb.req_cons += num_consumed;
686 xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 0);
687 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb, xnb_unit_pvt.txf.rsp_cons);
690 xnb_unit_pvt.txb.rsp_prod_pvt == xnb_unit_pvt.txs->req_prod);
691 XNB_ASSERT(rsp->id == req->id);
692 XNB_ASSERT(rsp->status == NETIF_RSP_OKAY);
696 * xnb_txpkt2rsp responding to 1 data request and 1 extra info
699 xnb_txpkt2rsp_extra(char *buffer, size_t buflen)
701 uint16_t num_consumed;
703 struct netif_tx_request *req;
704 netif_extra_info_t *ext;
705 struct netif_tx_response *rsp;
707 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
708 xnb_unit_pvt.txf.req_prod_pvt);
710 req->flags = NETTXF_extra_info;
712 xnb_unit_pvt.txf.req_prod_pvt++;
714 ext = (netif_extra_info_t*) RING_GET_REQUEST(&xnb_unit_pvt.txf,
715 xnb_unit_pvt.txf.req_prod_pvt);
716 ext->type = XEN_NETIF_EXTRA_TYPE_GSO;
718 xnb_unit_pvt.txf.req_prod_pvt++;
720 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
722 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
723 xnb_unit_pvt.txb.req_cons);
724 xnb_unit_pvt.txb.req_cons += num_consumed;
726 xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 0);
729 xnb_unit_pvt.txb.rsp_prod_pvt == xnb_unit_pvt.txs->req_prod);
731 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb, xnb_unit_pvt.txf.rsp_cons);
732 XNB_ASSERT(rsp->id == req->id);
733 XNB_ASSERT(rsp->status == NETIF_RSP_OKAY);
735 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb,
736 xnb_unit_pvt.txf.rsp_cons + 1);
737 XNB_ASSERT(rsp->status == NETIF_RSP_NULL);
741 * xnb_pkg2rsp responding to 3 data requests and 1 extra info
744 xnb_txpkt2rsp_long(char *buffer, size_t buflen)
746 uint16_t num_consumed;
748 struct netif_tx_request *req;
749 netif_extra_info_t *ext;
750 struct netif_tx_response *rsp;
752 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
753 xnb_unit_pvt.txf.req_prod_pvt);
755 req->flags = NETTXF_extra_info | NETTXF_more_data;
757 xnb_unit_pvt.txf.req_prod_pvt++;
759 ext = (netif_extra_info_t*) RING_GET_REQUEST(&xnb_unit_pvt.txf,
760 xnb_unit_pvt.txf.req_prod_pvt);
761 ext->type = XEN_NETIF_EXTRA_TYPE_GSO;
763 xnb_unit_pvt.txf.req_prod_pvt++;
765 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
766 xnb_unit_pvt.txf.req_prod_pvt);
768 req->flags = NETTXF_more_data;
770 xnb_unit_pvt.txf.req_prod_pvt++;
772 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
773 xnb_unit_pvt.txf.req_prod_pvt);
777 xnb_unit_pvt.txf.req_prod_pvt++;
779 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
781 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
782 xnb_unit_pvt.txb.req_cons);
783 xnb_unit_pvt.txb.req_cons += num_consumed;
785 xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 0);
788 xnb_unit_pvt.txb.rsp_prod_pvt == xnb_unit_pvt.txs->req_prod);
790 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb, xnb_unit_pvt.txf.rsp_cons);
791 XNB_ASSERT(rsp->id ==
792 RING_GET_REQUEST(&xnb_unit_pvt.txf, 0)->id);
793 XNB_ASSERT(rsp->status == NETIF_RSP_OKAY);
795 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb,
796 xnb_unit_pvt.txf.rsp_cons + 1);
797 XNB_ASSERT(rsp->status == NETIF_RSP_NULL);
799 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb,
800 xnb_unit_pvt.txf.rsp_cons + 2);
801 XNB_ASSERT(rsp->id ==
802 RING_GET_REQUEST(&xnb_unit_pvt.txf, 2)->id);
803 XNB_ASSERT(rsp->status == NETIF_RSP_OKAY);
805 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb,
806 xnb_unit_pvt.txf.rsp_cons + 3);
807 XNB_ASSERT(rsp->id ==
808 RING_GET_REQUEST(&xnb_unit_pvt.txf, 3)->id);
809 XNB_ASSERT(rsp->status == NETIF_RSP_OKAY);
813 * xnb_txpkt2rsp responding to an invalid packet.
814 * Note: this test will result in an error message being printed to the console
816 * xnb(xnb_ring2pkt:1306): Unknown extra info type 255. Discarding packet
819 xnb_txpkt2rsp_invalid(char *buffer, size_t buflen)
821 uint16_t num_consumed;
823 struct netif_tx_request *req;
824 netif_extra_info_t *ext;
825 struct netif_tx_response *rsp;
827 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
828 xnb_unit_pvt.txf.req_prod_pvt);
830 req->flags = NETTXF_extra_info;
832 xnb_unit_pvt.txf.req_prod_pvt++;
834 ext = (netif_extra_info_t*) RING_GET_REQUEST(&xnb_unit_pvt.txf,
835 xnb_unit_pvt.txf.req_prod_pvt);
836 ext->type = 0xFF; /* Invalid extra type */
838 xnb_unit_pvt.txf.req_prod_pvt++;
840 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
842 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
843 xnb_unit_pvt.txb.req_cons);
844 xnb_unit_pvt.txb.req_cons += num_consumed;
845 XNB_ASSERT(! xnb_pkt_is_valid(&pkt));
847 xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 0);
850 xnb_unit_pvt.txb.rsp_prod_pvt == xnb_unit_pvt.txs->req_prod);
852 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb, xnb_unit_pvt.txf.rsp_cons);
853 XNB_ASSERT(rsp->id == req->id);
854 XNB_ASSERT(rsp->status == NETIF_RSP_ERROR);
856 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb,
857 xnb_unit_pvt.txf.rsp_cons + 1);
858 XNB_ASSERT(rsp->status == NETIF_RSP_NULL);
862 * xnb_txpkt2rsp responding to one request which caused an error
865 xnb_txpkt2rsp_error(char *buffer, size_t buflen)
867 uint16_t num_consumed;
869 struct netif_tx_request *req;
870 struct netif_tx_response *rsp;
872 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
873 xnb_unit_pvt.txf.req_prod_pvt);
876 xnb_unit_pvt.txf.req_prod_pvt++;
878 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
880 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
881 xnb_unit_pvt.txb.req_cons);
882 xnb_unit_pvt.txb.req_cons += num_consumed;
884 xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 1);
885 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb, xnb_unit_pvt.txf.rsp_cons);
888 xnb_unit_pvt.txb.rsp_prod_pvt == xnb_unit_pvt.txs->req_prod);
889 XNB_ASSERT(rsp->id == req->id);
890 XNB_ASSERT(rsp->status == NETIF_RSP_ERROR);
894 * xnb_txpkt2rsp's responses wrap around the end of the ring
897 xnb_txpkt2rsp_wraps(char *buffer, size_t buflen)
901 struct netif_tx_request *req;
902 struct netif_tx_response *rsp;
906 * Manually tweak the ring indices to create a ring with no responses
907 * and the next request slot at position 2 from the end
909 rsize = RING_SIZE(&xnb_unit_pvt.txf);
910 xnb_unit_pvt.txf.req_prod_pvt = rsize - 2;
911 xnb_unit_pvt.txf.rsp_cons = rsize - 2;
912 xnb_unit_pvt.txs->req_prod = rsize - 2;
913 xnb_unit_pvt.txs->req_event = rsize - 1;
914 xnb_unit_pvt.txs->rsp_prod = rsize - 2;
915 xnb_unit_pvt.txs->rsp_event = rsize - 1;
916 xnb_unit_pvt.txb.rsp_prod_pvt = rsize - 2;
917 xnb_unit_pvt.txb.req_cons = rsize - 2;
919 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
920 xnb_unit_pvt.txf.req_prod_pvt);
921 req->flags = NETTXF_more_data;
924 xnb_unit_pvt.txf.req_prod_pvt++;
926 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
927 xnb_unit_pvt.txf.req_prod_pvt);
928 req->flags = NETTXF_more_data;
931 xnb_unit_pvt.txf.req_prod_pvt++;
933 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
934 xnb_unit_pvt.txf.req_prod_pvt);
938 xnb_unit_pvt.txf.req_prod_pvt++;
940 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
942 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
943 xnb_unit_pvt.txb.req_cons);
945 xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 0);
948 xnb_unit_pvt.txb.rsp_prod_pvt == xnb_unit_pvt.txs->req_prod);
949 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb,
950 xnb_unit_pvt.txf.rsp_cons + 2);
951 XNB_ASSERT(rsp->id == req->id);
952 XNB_ASSERT(rsp->status == NETIF_RSP_OKAY);
957 * Helper function used to setup pkt2mbufc tests
958 * \param size size in bytes of the single request to push to the ring
959 * \param flags optional flags to put in the netif request
960 * \param[out] pkt the returned packet object
961 * \return number of requests consumed from the ring
964 xnb_get1pkt(struct xnb_pkt *pkt, size_t size, uint16_t flags)
966 struct netif_tx_request *req;
968 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
969 xnb_unit_pvt.txf.req_prod_pvt);
972 xnb_unit_pvt.txf.req_prod_pvt++;
974 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
976 return xnb_ring2pkt(pkt, &xnb_unit_pvt.txb,
977 xnb_unit_pvt.txb.req_cons);
981 * xnb_pkt2mbufc on an empty packet
984 xnb_pkt2mbufc_empty(char *buffer, size_t buflen)
991 /* must call xnb_ring2pkt just to intialize pkt */
992 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
993 xnb_unit_pvt.txb.req_cons);
995 pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
996 safe_m_freem(&pMbuf);
1000 * xnb_pkt2mbufc on short packet that can fit in an mbuf internal buffer
1003 xnb_pkt2mbufc_short(char *buffer, size_t buflen)
1005 const size_t size = MINCLSIZE - 1;
1009 xnb_get1pkt(&pkt, size, 0);
1011 pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1012 XNB_ASSERT(M_TRAILINGSPACE(pMbuf) >= size);
1013 safe_m_freem(&pMbuf);
1017 * xnb_pkt2mbufc on short packet whose checksum was validated by the netfron
1020 xnb_pkt2mbufc_csum(char *buffer, size_t buflen)
1022 const size_t size = MINCLSIZE - 1;
1026 xnb_get1pkt(&pkt, size, NETTXF_data_validated);
1028 pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1029 XNB_ASSERT(M_TRAILINGSPACE(pMbuf) >= size);
1030 XNB_ASSERT(pMbuf->m_pkthdr.csum_flags & CSUM_IP_CHECKED);
1031 XNB_ASSERT(pMbuf->m_pkthdr.csum_flags & CSUM_IP_VALID);
1032 XNB_ASSERT(pMbuf->m_pkthdr.csum_flags & CSUM_DATA_VALID);
1033 XNB_ASSERT(pMbuf->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR);
1034 safe_m_freem(&pMbuf);
1038 * xnb_pkt2mbufc on packet that can fit in one cluster
1041 xnb_pkt2mbufc_1cluster(char *buffer, size_t buflen)
1043 const size_t size = MINCLSIZE;
1047 xnb_get1pkt(&pkt, size, 0);
1049 pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1050 XNB_ASSERT(M_TRAILINGSPACE(pMbuf) >= size);
1051 safe_m_freem(&pMbuf);
1055 * xnb_pkt2mbufc on packet that cannot fit in one regular cluster
1058 xnb_pkt2mbufc_largecluster(char *buffer, size_t buflen)
1060 const size_t size = MCLBYTES + 1;
1064 xnb_get1pkt(&pkt, size, 0);
1066 pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1067 XNB_ASSERT(M_TRAILINGSPACE(pMbuf) >= size);
1068 safe_m_freem(&pMbuf);
1072 * xnb_pkt2mbufc on packet that cannot fit in one clusters
1075 xnb_pkt2mbufc_2cluster(char *buffer, size_t buflen)
1077 const size_t size = 2 * MCLBYTES + 1;
1083 xnb_get1pkt(&pkt, size, 0);
1085 pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1087 for (m = pMbuf; m != NULL; m = m->m_next) {
1088 space += M_TRAILINGSPACE(m);
1090 XNB_ASSERT(space >= size);
1091 safe_m_freem(&pMbuf);
1095 * xnb_txpkt2gnttab on an empty packet. Should return empty gnttab
1098 xnb_txpkt2gnttab_empty(char *buffer, size_t buflen)
1105 /* must call xnb_ring2pkt just to intialize pkt */
1106 xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
1108 pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1109 n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab,
1110 &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED);
1111 XNB_ASSERT(n_entries == 0);
1112 safe_m_freem(&pMbuf);
1116 * xnb_txpkt2gnttab on a short packet, that can fit in one mbuf internal buffer
1117 * and has one request
1120 xnb_txpkt2gnttab_short(char *buffer, size_t buflen)
1122 const size_t size = MINCLSIZE - 1;
1127 struct netif_tx_request *req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1128 xnb_unit_pvt.txf.req_prod_pvt);
1133 xnb_unit_pvt.txf.req_prod_pvt++;
1135 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
1137 xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
1139 pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1140 n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab,
1141 &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED);
1142 XNB_ASSERT(n_entries == 1);
1143 XNB_ASSERT(xnb_unit_pvt.gnttab[0].len == size);
1144 /* flags should indicate gref's for source */
1145 XNB_ASSERT(xnb_unit_pvt.gnttab[0].flags & GNTCOPY_source_gref);
1146 XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.offset == req->offset);
1147 XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.domid == DOMID_SELF);
1148 XNB_ASSERT(xnb_unit_pvt.gnttab[0].dest.offset == virt_to_offset(
1149 mtod(pMbuf, vm_offset_t)));
1150 XNB_ASSERT(xnb_unit_pvt.gnttab[0].dest.u.gmfn ==
1151 virt_to_mfn(mtod(pMbuf, vm_offset_t)));
1152 XNB_ASSERT(xnb_unit_pvt.gnttab[0].dest.domid == DOMID_FIRST_RESERVED);
1153 safe_m_freem(&pMbuf);
1157 * xnb_txpkt2gnttab on a packet with two requests, that can fit into a single
1161 xnb_txpkt2gnttab_2req(char *buffer, size_t buflen)
1167 struct netif_tx_request *req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1168 xnb_unit_pvt.txf.req_prod_pvt);
1169 req->flags = NETTXF_more_data;
1173 xnb_unit_pvt.txf.req_prod_pvt++;
1175 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1176 xnb_unit_pvt.txf.req_prod_pvt);
1181 xnb_unit_pvt.txf.req_prod_pvt++;
1183 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
1185 xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
1187 pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1188 n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab,
1189 &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED);
1191 XNB_ASSERT(n_entries == 2);
1192 XNB_ASSERT(xnb_unit_pvt.gnttab[0].len == 1400);
1193 XNB_ASSERT(xnb_unit_pvt.gnttab[0].dest.offset == virt_to_offset(
1194 mtod(pMbuf, vm_offset_t)));
1196 XNB_ASSERT(xnb_unit_pvt.gnttab[1].len == 500);
1197 XNB_ASSERT(xnb_unit_pvt.gnttab[1].dest.offset == virt_to_offset(
1198 mtod(pMbuf, vm_offset_t) + 1400));
1199 safe_m_freem(&pMbuf);
1203 * xnb_txpkt2gnttab on a single request that spans two mbuf clusters
1206 xnb_txpkt2gnttab_2cluster(char *buffer, size_t buflen)
1211 const uint16_t data_this_transaction = (MCLBYTES*2) + 1;
1213 struct netif_tx_request *req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1214 xnb_unit_pvt.txf.req_prod_pvt);
1216 req->size = data_this_transaction;
1219 xnb_unit_pvt.txf.req_prod_pvt++;
1221 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
1222 xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
1224 pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1225 n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab,
1226 &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED);
1228 if (M_TRAILINGSPACE(pMbuf) == MCLBYTES) {
1229 /* there should be three mbufs and three gnttab entries */
1230 XNB_ASSERT(n_entries == 3);
1231 XNB_ASSERT(xnb_unit_pvt.gnttab[0].len == MCLBYTES);
1233 xnb_unit_pvt.gnttab[0].dest.offset == virt_to_offset(
1234 mtod(pMbuf, vm_offset_t)));
1235 XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.offset == 0);
1237 XNB_ASSERT(xnb_unit_pvt.gnttab[1].len == MCLBYTES);
1239 xnb_unit_pvt.gnttab[1].dest.offset == virt_to_offset(
1240 mtod(pMbuf->m_next, vm_offset_t)));
1241 XNB_ASSERT(xnb_unit_pvt.gnttab[1].source.offset == MCLBYTES);
1243 XNB_ASSERT(xnb_unit_pvt.gnttab[2].len == 1);
1245 xnb_unit_pvt.gnttab[2].dest.offset == virt_to_offset(
1246 mtod(pMbuf->m_next, vm_offset_t)));
1247 XNB_ASSERT(xnb_unit_pvt.gnttab[2].source.offset == 2 *
1249 } else if (M_TRAILINGSPACE(pMbuf) == 2 * MCLBYTES) {
1250 /* there should be two mbufs and two gnttab entries */
1251 XNB_ASSERT(n_entries == 2);
1252 XNB_ASSERT(xnb_unit_pvt.gnttab[0].len == 2 * MCLBYTES);
1254 xnb_unit_pvt.gnttab[0].dest.offset == virt_to_offset(
1255 mtod(pMbuf, vm_offset_t)));
1256 XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.offset == 0);
1258 XNB_ASSERT(xnb_unit_pvt.gnttab[1].len == 1);
1260 xnb_unit_pvt.gnttab[1].dest.offset == virt_to_offset(
1261 mtod(pMbuf->m_next, vm_offset_t)));
1263 xnb_unit_pvt.gnttab[1].source.offset == 2 * MCLBYTES);
1266 /* should never get here */
1275 * xnb_update_mbufc on a short packet that only has one gnttab entry
1278 xnb_update_mbufc_short(char *buffer, size_t buflen)
1280 const size_t size = MINCLSIZE - 1;
1285 struct netif_tx_request *req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1286 xnb_unit_pvt.txf.req_prod_pvt);
1291 xnb_unit_pvt.txf.req_prod_pvt++;
1293 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
1295 xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
1297 pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1298 n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab,
1299 &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED);
1301 /* Update grant table's status fields as the hypervisor call would */
1302 xnb_unit_pvt.gnttab[0].status = GNTST_okay;
1304 xnb_update_mbufc(pMbuf, xnb_unit_pvt.gnttab, n_entries);
1305 XNB_ASSERT(pMbuf->m_len == size);
1306 XNB_ASSERT(pMbuf->m_pkthdr.len == size);
1307 safe_m_freem(&pMbuf);
1311 * xnb_update_mbufc on a packet with two requests, that can fit into a single
1315 xnb_update_mbufc_2req(char *buffer, size_t buflen)
1321 struct netif_tx_request *req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1322 xnb_unit_pvt.txf.req_prod_pvt);
1323 req->flags = NETTXF_more_data;
1327 xnb_unit_pvt.txf.req_prod_pvt++;
1329 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1330 xnb_unit_pvt.txf.req_prod_pvt);
1335 xnb_unit_pvt.txf.req_prod_pvt++;
1337 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
1339 xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
1341 pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1342 n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab,
1343 &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED);
1345 /* Update grant table's status fields as the hypervisor call would */
1346 xnb_unit_pvt.gnttab[0].status = GNTST_okay;
1347 xnb_unit_pvt.gnttab[1].status = GNTST_okay;
1349 xnb_update_mbufc(pMbuf, xnb_unit_pvt.gnttab, n_entries);
1350 XNB_ASSERT(n_entries == 2);
1351 XNB_ASSERT(pMbuf->m_pkthdr.len == 1900);
1352 XNB_ASSERT(pMbuf->m_len == 1900);
1354 safe_m_freem(&pMbuf);
1358 * xnb_update_mbufc on a single request that spans two mbuf clusters
1361 xnb_update_mbufc_2cluster(char *buffer, size_t buflen)
1367 const uint16_t data_this_transaction = (MCLBYTES*2) + 1;
1369 struct netif_tx_request *req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1370 xnb_unit_pvt.txf.req_prod_pvt);
1372 req->size = data_this_transaction;
1375 xnb_unit_pvt.txf.req_prod_pvt++;
1377 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
1378 xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
1380 pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1381 n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab,
1382 &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED);
1384 /* Update grant table's status fields */
1385 for (i = 0; i < n_entries; i++) {
1386 xnb_unit_pvt.gnttab[0].status = GNTST_okay;
1388 xnb_update_mbufc(pMbuf, xnb_unit_pvt.gnttab, n_entries);
1390 if (n_entries == 3) {
1391 /* there should be three mbufs and three gnttab entries */
1392 XNB_ASSERT(pMbuf->m_pkthdr.len == data_this_transaction);
1393 XNB_ASSERT(pMbuf->m_len == MCLBYTES);
1394 XNB_ASSERT(pMbuf->m_next->m_len == MCLBYTES);
1395 XNB_ASSERT(pMbuf->m_next->m_next->m_len == 1);
1396 } else if (n_entries == 2) {
1397 /* there should be two mbufs and two gnttab entries */
1398 XNB_ASSERT(n_entries == 2);
1399 XNB_ASSERT(pMbuf->m_pkthdr.len == data_this_transaction);
1400 XNB_ASSERT(pMbuf->m_len == 2 * MCLBYTES);
1401 XNB_ASSERT(pMbuf->m_next->m_len == 1);
1403 /* should never get here */
1406 safe_m_freem(&pMbuf);
1409 /** xnb_mbufc2pkt on an empty mbufc */
1411 xnb_mbufc2pkt_empty(char *buffer, size_t buflen) {
1413 int free_slots = 64;
1416 mbuf = m_get(M_WAITOK, MT_DATA);
1418 * note: it is illegal to set M_PKTHDR on a mbuf with no data. Doing so
1419 * will cause m_freem to segfault
1421 XNB_ASSERT(mbuf->m_len == 0);
1423 xnb_mbufc2pkt(mbuf, &pkt, 0, free_slots);
1424 XNB_ASSERT(! xnb_pkt_is_valid(&pkt));
1426 safe_m_freem(&mbuf);
1429 /** xnb_mbufc2pkt on a short mbufc */
1431 xnb_mbufc2pkt_short(char *buffer, size_t buflen) {
1434 int free_slots = 64;
1438 mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
1439 mbuf->m_flags |= M_PKTHDR;
1440 mbuf->m_pkthdr.len = size;
1443 xnb_mbufc2pkt(mbuf, &pkt, start, free_slots);
1444 XNB_ASSERT(xnb_pkt_is_valid(&pkt));
1445 XNB_ASSERT(pkt.size == size);
1446 XNB_ASSERT(pkt.car_size == size);
1447 XNB_ASSERT(! (pkt.flags &
1448 (NETRXF_more_data | NETRXF_extra_info)));
1449 XNB_ASSERT(pkt.list_len == 1);
1450 XNB_ASSERT(pkt.car == start);
1452 safe_m_freem(&mbuf);
1455 /** xnb_mbufc2pkt on a single mbuf with an mbuf cluster */
1457 xnb_mbufc2pkt_1cluster(char *buffer, size_t buflen) {
1459 size_t size = MCLBYTES;
1460 int free_slots = 32;
1461 RING_IDX start = 12;
1464 mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
1465 mbuf->m_flags |= M_PKTHDR;
1466 mbuf->m_pkthdr.len = size;
1469 xnb_mbufc2pkt(mbuf, &pkt, start, free_slots);
1470 XNB_ASSERT(xnb_pkt_is_valid(&pkt));
1471 XNB_ASSERT(pkt.size == size);
1472 XNB_ASSERT(pkt.car_size == size);
1473 XNB_ASSERT(! (pkt.flags &
1474 (NETRXF_more_data | NETRXF_extra_info)));
1475 XNB_ASSERT(pkt.list_len == 1);
1476 XNB_ASSERT(pkt.car == start);
1478 safe_m_freem(&mbuf);
1481 /** xnb_mbufc2pkt on a a two-mbuf chain with short data regions */
1483 xnb_mbufc2pkt_2short(char *buffer, size_t buflen) {
1485 size_t size1 = MHLEN - 5;
1486 size_t size2 = MHLEN - 15;
1487 int free_slots = 32;
1488 RING_IDX start = 14;
1489 struct mbuf *mbufc, *mbufc2;
1491 mbufc = m_getm(NULL, size1, M_WAITOK, MT_DATA);
1492 mbufc->m_flags |= M_PKTHDR;
1493 if (mbufc == NULL) {
1494 XNB_ASSERT(mbufc != NULL);
1498 mbufc2 = m_getm(mbufc, size2, M_WAITOK, MT_DATA);
1499 if (mbufc2 == NULL) {
1500 XNB_ASSERT(mbufc2 != NULL);
1501 safe_m_freem(&mbufc);
1504 mbufc2->m_pkthdr.len = size1 + size2;
1505 mbufc2->m_len = size1;
1507 xnb_mbufc2pkt(mbufc2, &pkt, start, free_slots);
1508 XNB_ASSERT(xnb_pkt_is_valid(&pkt));
1509 XNB_ASSERT(pkt.size == size1 + size2);
1510 XNB_ASSERT(pkt.car == start);
1512 * The second m_getm may allocate a new mbuf and append
1513 * it to the chain, or it may simply extend the first mbuf.
1515 if (mbufc2->m_next != NULL) {
1516 XNB_ASSERT(pkt.car_size == size1);
1517 XNB_ASSERT(pkt.list_len == 1);
1518 XNB_ASSERT(pkt.cdr == start + 1);
1521 safe_m_freem(&mbufc2);
1524 /** xnb_mbufc2pkt on a a mbuf chain with >1 mbuf cluster */
1526 xnb_mbufc2pkt_long(char *buffer, size_t buflen) {
1528 size_t size = 14 * MCLBYTES / 3;
1529 size_t size_remaining;
1530 int free_slots = 15;
1532 struct mbuf *mbufc, *m;
1534 mbufc = m_getm(NULL, size, M_WAITOK, MT_DATA);
1535 mbufc->m_flags |= M_PKTHDR;
1536 if (mbufc == NULL) {
1537 XNB_ASSERT(mbufc != NULL);
1541 mbufc->m_pkthdr.len = size;
1542 size_remaining = size;
1543 for (m = mbufc; m != NULL; m = m->m_next) {
1544 m->m_len = MAX(M_TRAILINGSPACE(m), size_remaining);
1545 size_remaining -= m->m_len;
1548 xnb_mbufc2pkt(mbufc, &pkt, start, free_slots);
1549 XNB_ASSERT(xnb_pkt_is_valid(&pkt));
1550 XNB_ASSERT(pkt.size == size);
1551 XNB_ASSERT(pkt.car == start);
1552 XNB_ASSERT(pkt.car_size = mbufc->m_len);
1554 * There should be >1 response in the packet, and there is no
1557 XNB_ASSERT(! (pkt.flags & NETRXF_extra_info));
1558 XNB_ASSERT(pkt.cdr == pkt.car + 1);
1560 safe_m_freem(&mbufc);
1563 /** xnb_mbufc2pkt on a a mbuf chain with >1 mbuf cluster and extra info */
1565 xnb_mbufc2pkt_extra(char *buffer, size_t buflen) {
1567 size_t size = 14 * MCLBYTES / 3;
1568 size_t size_remaining;
1569 int free_slots = 15;
1571 struct mbuf *mbufc, *m;
1573 mbufc = m_getm(NULL, size, M_WAITOK, MT_DATA);
1574 if (mbufc == NULL) {
1575 XNB_ASSERT(mbufc != NULL);
1579 mbufc->m_flags |= M_PKTHDR;
1580 mbufc->m_pkthdr.len = size;
1581 mbufc->m_pkthdr.csum_flags |= CSUM_TSO;
1582 mbufc->m_pkthdr.tso_segsz = TCP_MSS - 40;
1583 size_remaining = size;
1584 for (m = mbufc; m != NULL; m = m->m_next) {
1585 m->m_len = MAX(M_TRAILINGSPACE(m), size_remaining);
1586 size_remaining -= m->m_len;
1589 xnb_mbufc2pkt(mbufc, &pkt, start, free_slots);
1590 XNB_ASSERT(xnb_pkt_is_valid(&pkt));
1591 XNB_ASSERT(pkt.size == size);
1592 XNB_ASSERT(pkt.car == start);
1593 XNB_ASSERT(pkt.car_size = mbufc->m_len);
1594 /* There should be >1 response in the packet, there is extra info */
1595 XNB_ASSERT(pkt.flags & NETRXF_extra_info);
1596 XNB_ASSERT(pkt.flags & NETRXF_data_validated);
1597 XNB_ASSERT(pkt.cdr == pkt.car + 2);
1598 XNB_ASSERT(pkt.extra.u.gso.size = mbufc->m_pkthdr.tso_segsz);
1599 XNB_ASSERT(pkt.extra.type == XEN_NETIF_EXTRA_TYPE_GSO);
1600 XNB_ASSERT(! (pkt.extra.flags & XEN_NETIF_EXTRA_FLAG_MORE));
1602 safe_m_freem(&mbufc);
1605 /** xnb_mbufc2pkt with insufficient space in the ring */
1607 xnb_mbufc2pkt_nospace(char *buffer, size_t buflen) {
1609 size_t size = 14 * MCLBYTES / 3;
1610 size_t size_remaining;
1613 struct mbuf *mbufc, *m;
1616 mbufc = m_getm(NULL, size, M_WAITOK, MT_DATA);
1617 mbufc->m_flags |= M_PKTHDR;
1618 if (mbufc == NULL) {
1619 XNB_ASSERT(mbufc != NULL);
1623 mbufc->m_pkthdr.len = size;
1624 size_remaining = size;
1625 for (m = mbufc; m != NULL; m = m->m_next) {
1626 m->m_len = MAX(M_TRAILINGSPACE(m), size_remaining);
1627 size_remaining -= m->m_len;
1630 error = xnb_mbufc2pkt(mbufc, &pkt, start, free_slots);
1631 XNB_ASSERT(error == EAGAIN);
1632 XNB_ASSERT(! xnb_pkt_is_valid(&pkt));
1634 safe_m_freem(&mbufc);
1638 * xnb_rxpkt2gnttab on an empty packet. Should return empty gnttab
1641 xnb_rxpkt2gnttab_empty(char *buffer, size_t buflen)
1645 int free_slots = 60;
1648 mbuf = m_get(M_WAITOK, MT_DATA);
1650 xnb_mbufc2pkt(mbuf, &pkt, 0, free_slots);
1651 nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab,
1652 &xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1654 XNB_ASSERT(nr_entries == 0);
1656 safe_m_freem(&mbuf);
1659 /** xnb_rxpkt2gnttab on a short packet without extra data */
1661 xnb_rxpkt2gnttab_short(char *buffer, size_t buflen) {
1665 int free_slots = 60;
1667 struct netif_rx_request *req;
1670 mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
1671 mbuf->m_flags |= M_PKTHDR;
1672 mbuf->m_pkthdr.len = size;
1675 xnb_mbufc2pkt(mbuf, &pkt, start, free_slots);
1676 req = RING_GET_REQUEST(&xnb_unit_pvt.rxf,
1677 xnb_unit_pvt.txf.req_prod_pvt);
1680 nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab,
1681 &xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1683 XNB_ASSERT(nr_entries == 1);
1684 XNB_ASSERT(xnb_unit_pvt.gnttab[0].len == size);
1685 /* flags should indicate gref's for dest */
1686 XNB_ASSERT(xnb_unit_pvt.gnttab[0].flags & GNTCOPY_dest_gref);
1687 XNB_ASSERT(xnb_unit_pvt.gnttab[0].dest.offset == 0);
1688 XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.domid == DOMID_SELF);
1689 XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.offset == virt_to_offset(
1690 mtod(mbuf, vm_offset_t)));
1691 XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.u.gmfn ==
1692 virt_to_mfn(mtod(mbuf, vm_offset_t)));
1693 XNB_ASSERT(xnb_unit_pvt.gnttab[0].dest.domid == DOMID_FIRST_RESERVED);
1695 safe_m_freem(&mbuf);
1699 * xnb_rxpkt2gnttab on a packet with two different mbufs in a single chai
1702 xnb_rxpkt2gnttab_2req(char *buffer, size_t buflen)
1707 size_t total_granted_size = 0;
1708 size_t size = MJUMPAGESIZE + 1;
1709 int free_slots = 60;
1710 RING_IDX start = 11;
1711 struct netif_rx_request *req;
1712 struct mbuf *mbuf, *m;
1714 mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
1715 mbuf->m_flags |= M_PKTHDR;
1716 mbuf->m_pkthdr.len = size;
1719 xnb_mbufc2pkt(mbuf, &pkt, start, free_slots);
1721 for (i = 0, m=mbuf; m != NULL; i++, m = m->m_next) {
1722 req = RING_GET_REQUEST(&xnb_unit_pvt.rxf,
1723 xnb_unit_pvt.txf.req_prod_pvt);
1729 nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab,
1730 &xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1732 XNB_ASSERT(nr_entries >= num_mbufs);
1733 for (i = 0; i < nr_entries; i++) {
1734 int end_offset = xnb_unit_pvt.gnttab[i].len +
1735 xnb_unit_pvt.gnttab[i].dest.offset;
1736 XNB_ASSERT(end_offset <= PAGE_SIZE);
1737 total_granted_size += xnb_unit_pvt.gnttab[i].len;
1739 XNB_ASSERT(total_granted_size == size);
1743 * xnb_rxpkt2rsp on an empty packet. Shouldn't make any response
1746 xnb_rxpkt2rsp_empty(char *buffer, size_t buflen)
1751 int free_slots = 60;
1752 netif_rx_back_ring_t rxb_backup = xnb_unit_pvt.rxb;
1753 netif_rx_sring_t rxs_backup = *xnb_unit_pvt.rxs;
1756 mbuf = m_get(M_WAITOK, MT_DATA);
1758 xnb_mbufc2pkt(mbuf, &pkt, 0, free_slots);
1759 nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab,
1760 &xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1762 nr_reqs = xnb_rxpkt2rsp(&pkt, xnb_unit_pvt.gnttab, nr_entries,
1764 XNB_ASSERT(nr_reqs == 0);
1766 memcmp(&rxb_backup, &xnb_unit_pvt.rxb, sizeof(rxb_backup)) == 0);
1768 memcmp(&rxs_backup, xnb_unit_pvt.rxs, sizeof(rxs_backup)) == 0);
1770 safe_m_freem(&mbuf);
1774 * xnb_rxpkt2rsp on a short packet with no extras
1777 xnb_rxpkt2rsp_short(char *buffer, size_t buflen)
1780 int nr_entries, nr_reqs;
1782 int free_slots = 60;
1784 struct netif_rx_request *req;
1785 struct netif_rx_response *rsp;
1788 mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
1789 mbuf->m_flags |= M_PKTHDR;
1790 mbuf->m_pkthdr.len = size;
1793 xnb_mbufc2pkt(mbuf, &pkt, start, free_slots);
1794 req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start);
1796 xnb_unit_pvt.rxb.req_cons = start;
1797 xnb_unit_pvt.rxb.rsp_prod_pvt = start;
1798 xnb_unit_pvt.rxs->req_prod = start + 1;
1799 xnb_unit_pvt.rxs->rsp_prod = start;
1801 nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab,
1802 &xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1804 nr_reqs = xnb_rxpkt2rsp(&pkt, xnb_unit_pvt.gnttab, nr_entries,
1807 XNB_ASSERT(nr_reqs == 1);
1808 XNB_ASSERT(xnb_unit_pvt.rxb.rsp_prod_pvt == start + 1);
1809 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start);
1810 XNB_ASSERT(rsp->id == req->id);
1811 XNB_ASSERT(rsp->offset == 0);
1812 XNB_ASSERT((rsp->flags & (NETRXF_more_data | NETRXF_extra_info)) == 0);
1813 XNB_ASSERT(rsp->status == size);
1815 safe_m_freem(&mbuf);
1819 * xnb_rxpkt2rsp with extra data
1822 xnb_rxpkt2rsp_extra(char *buffer, size_t buflen)
1825 int nr_entries, nr_reqs;
1827 int free_slots = 15;
1831 uint16_t mss = TCP_MSS - 40;
1833 struct netif_rx_request *req;
1834 struct netif_rx_response *rsp;
1835 struct netif_extra_info *ext;
1837 mbufc = m_getm(NULL, size, M_WAITOK, MT_DATA);
1838 if (mbufc == NULL) {
1839 XNB_ASSERT(mbufc != NULL);
1843 mbufc->m_flags |= M_PKTHDR;
1844 mbufc->m_pkthdr.len = size;
1845 mbufc->m_pkthdr.csum_flags |= CSUM_TSO;
1846 mbufc->m_pkthdr.tso_segsz = mss;
1847 mbufc->m_len = size;
1849 xnb_mbufc2pkt(mbufc, &pkt, start, free_slots);
1850 req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start);
1853 req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start + 1);
1855 req->gref = gref + 1;
1856 xnb_unit_pvt.rxb.req_cons = start;
1857 xnb_unit_pvt.rxb.rsp_prod_pvt = start;
1858 xnb_unit_pvt.rxs->req_prod = start + 2;
1859 xnb_unit_pvt.rxs->rsp_prod = start;
1861 nr_entries = xnb_rxpkt2gnttab(&pkt, mbufc, xnb_unit_pvt.gnttab,
1862 &xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1864 nr_reqs = xnb_rxpkt2rsp(&pkt, xnb_unit_pvt.gnttab, nr_entries,
1867 XNB_ASSERT(nr_reqs == 2);
1868 XNB_ASSERT(xnb_unit_pvt.rxb.rsp_prod_pvt == start + 2);
1869 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start);
1870 XNB_ASSERT(rsp->id == id);
1871 XNB_ASSERT((rsp->flags & NETRXF_more_data) == 0);
1872 XNB_ASSERT((rsp->flags & NETRXF_extra_info));
1873 XNB_ASSERT((rsp->flags & NETRXF_data_validated));
1874 XNB_ASSERT((rsp->flags & NETRXF_csum_blank));
1875 XNB_ASSERT(rsp->status == size);
1877 ext = (struct netif_extra_info*)
1878 RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start + 1);
1879 XNB_ASSERT(ext->type == XEN_NETIF_EXTRA_TYPE_GSO);
1880 XNB_ASSERT(! (ext->flags & XEN_NETIF_EXTRA_FLAG_MORE));
1881 XNB_ASSERT(ext->u.gso.size == mss);
1882 XNB_ASSERT(ext->u.gso.type == XEN_NETIF_EXTRA_TYPE_GSO);
1884 safe_m_freem(&mbufc);
1888 * xnb_rxpkt2rsp on a packet with more than a pages's worth of data. It should
1889 * generate two response slot
1892 xnb_rxpkt2rsp_2slots(char *buffer, size_t buflen)
1895 int nr_entries, nr_reqs;
1896 size_t size = PAGE_SIZE + 100;
1900 uint16_t gref1 = 24;
1901 uint16_t gref2 = 34;
1902 RING_IDX start = 15;
1903 struct netif_rx_request *req;
1904 struct netif_rx_response *rsp;
1907 mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
1908 mbuf->m_flags |= M_PKTHDR;
1909 mbuf->m_pkthdr.len = size;
1910 if (mbuf->m_next != NULL) {
1911 size_t first_len = MIN(M_TRAILINGSPACE(mbuf), size);
1912 mbuf->m_len = first_len;
1913 mbuf->m_next->m_len = size - first_len;
1919 xnb_mbufc2pkt(mbuf, &pkt, start, free_slots);
1920 req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start);
1923 req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start + 1);
1926 xnb_unit_pvt.rxb.req_cons = start;
1927 xnb_unit_pvt.rxb.rsp_prod_pvt = start;
1928 xnb_unit_pvt.rxs->req_prod = start + 2;
1929 xnb_unit_pvt.rxs->rsp_prod = start;
1931 nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab,
1932 &xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1934 nr_reqs = xnb_rxpkt2rsp(&pkt, xnb_unit_pvt.gnttab, nr_entries,
1937 XNB_ASSERT(nr_reqs == 2);
1938 XNB_ASSERT(xnb_unit_pvt.rxb.rsp_prod_pvt == start + 2);
1939 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start);
1940 XNB_ASSERT(rsp->id == id1);
1941 XNB_ASSERT(rsp->offset == 0);
1942 XNB_ASSERT((rsp->flags & NETRXF_extra_info) == 0);
1943 XNB_ASSERT(rsp->flags & NETRXF_more_data);
1944 XNB_ASSERT(rsp->status == PAGE_SIZE);
1946 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start + 1);
1947 XNB_ASSERT(rsp->id == id2);
1948 XNB_ASSERT(rsp->offset == 0);
1949 XNB_ASSERT((rsp->flags & NETRXF_extra_info) == 0);
1950 XNB_ASSERT(! (rsp->flags & NETRXF_more_data));
1951 XNB_ASSERT(rsp->status == size - PAGE_SIZE);
1953 safe_m_freem(&mbuf);
1956 /** xnb_rxpkt2rsp on a grant table with two sub-page entries */
1958 xnb_rxpkt2rsp_2short(char *buffer, size_t buflen) {
1960 int nr_reqs, nr_entries;
1961 size_t size1 = MHLEN - 5;
1962 size_t size2 = MHLEN - 15;
1963 int free_slots = 32;
1964 RING_IDX start = 14;
1967 struct netif_rx_request *req;
1968 struct netif_rx_response *rsp;
1971 mbufc = m_getm(NULL, size1, M_WAITOK, MT_DATA);
1972 mbufc->m_flags |= M_PKTHDR;
1973 if (mbufc == NULL) {
1974 XNB_ASSERT(mbufc != NULL);
1978 m_getm(mbufc, size2, M_WAITOK, MT_DATA);
1979 XNB_ASSERT(mbufc->m_next != NULL);
1980 mbufc->m_pkthdr.len = size1 + size2;
1981 mbufc->m_len = size1;
1982 mbufc->m_next->m_len = size2;
1984 xnb_mbufc2pkt(mbufc, &pkt, start, free_slots);
1986 req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start);
1989 xnb_unit_pvt.rxb.req_cons = start;
1990 xnb_unit_pvt.rxb.rsp_prod_pvt = start;
1991 xnb_unit_pvt.rxs->req_prod = start + 1;
1992 xnb_unit_pvt.rxs->rsp_prod = start;
1994 nr_entries = xnb_rxpkt2gnttab(&pkt, mbufc, xnb_unit_pvt.gnttab,
1995 &xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1997 nr_reqs = xnb_rxpkt2rsp(&pkt, xnb_unit_pvt.gnttab, nr_entries,
2000 XNB_ASSERT(nr_entries == 2);
2001 XNB_ASSERT(nr_reqs == 1);
2002 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start);
2003 XNB_ASSERT(rsp->id == id);
2004 XNB_ASSERT(rsp->status == size1 + size2);
2005 XNB_ASSERT(rsp->offset == 0);
2006 XNB_ASSERT(! (rsp->flags & (NETRXF_more_data | NETRXF_extra_info)));
2008 safe_m_freem(&mbufc);
2012 * xnb_rxpkt2rsp on a long packet with a hypervisor gnttab_copy error
2013 * Note: this test will result in an error message being printed to the console
2015 * xnb(xnb_rxpkt2rsp:1720): Got error -1 for hypervisor gnttab_copy status
2018 xnb_rxpkt2rsp_copyerror(char *buffer, size_t buflen)
2021 int nr_entries, nr_reqs;
2024 uint16_t canary = 6859;
2025 size_t size = 7 * MCLBYTES;
2028 struct netif_rx_request *req;
2029 struct netif_rx_response *rsp;
2032 mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
2033 mbuf->m_flags |= M_PKTHDR;
2034 mbuf->m_pkthdr.len = size;
2037 xnb_mbufc2pkt(mbuf, &pkt, start, free_slots);
2038 req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start);
2041 xnb_unit_pvt.rxb.req_cons = start;
2042 xnb_unit_pvt.rxb.rsp_prod_pvt = start;
2043 xnb_unit_pvt.rxs->req_prod = start + 1;
2044 xnb_unit_pvt.rxs->rsp_prod = start;
2045 req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start + 1);
2049 nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab,
2050 &xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
2051 /* Inject the error*/
2052 xnb_unit_pvt.gnttab[2].status = GNTST_general_error;
2054 nr_reqs = xnb_rxpkt2rsp(&pkt, xnb_unit_pvt.gnttab, nr_entries,
2057 XNB_ASSERT(nr_reqs == 1);
2058 XNB_ASSERT(xnb_unit_pvt.rxb.rsp_prod_pvt == start + 1);
2059 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start);
2060 XNB_ASSERT(rsp->id == id);
2061 XNB_ASSERT(rsp->status == NETIF_RSP_ERROR);
2062 req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start + 1);
2063 XNB_ASSERT(req->gref == canary);
2064 XNB_ASSERT(req->id == canary);
2066 safe_m_freem(&mbuf);
2070 * xnb_add_mbuf_cksum on an ARP request packet
2073 xnb_add_mbuf_cksum_arp(char *buffer, size_t buflen)
2075 const size_t pkt_len = sizeof(struct ether_header) +
2076 sizeof(struct ether_arp);
2078 struct ether_header *eh;
2079 struct ether_arp *ep;
2080 unsigned char pkt_orig[pkt_len];
2082 mbufc = m_getm(NULL, pkt_len, M_WAITOK, MT_DATA);
2083 /* Fill in an example arp request */
2084 eh = mtod(mbufc, struct ether_header*);
2085 eh->ether_dhost[0] = 0xff;
2086 eh->ether_dhost[1] = 0xff;
2087 eh->ether_dhost[2] = 0xff;
2088 eh->ether_dhost[3] = 0xff;
2089 eh->ether_dhost[4] = 0xff;
2090 eh->ether_dhost[5] = 0xff;
2091 eh->ether_shost[0] = 0x00;
2092 eh->ether_shost[1] = 0x15;
2093 eh->ether_shost[2] = 0x17;
2094 eh->ether_shost[3] = 0xe9;
2095 eh->ether_shost[4] = 0x30;
2096 eh->ether_shost[5] = 0x68;
2097 eh->ether_type = htons(ETHERTYPE_ARP);
2098 ep = (struct ether_arp*)(eh + 1);
2099 ep->ea_hdr.ar_hrd = htons(ARPHRD_ETHER);
2100 ep->ea_hdr.ar_pro = htons(ETHERTYPE_IP);
2101 ep->ea_hdr.ar_hln = 6;
2102 ep->ea_hdr.ar_pln = 4;
2103 ep->ea_hdr.ar_op = htons(ARPOP_REQUEST);
2104 ep->arp_sha[0] = 0x00;
2105 ep->arp_sha[1] = 0x15;
2106 ep->arp_sha[2] = 0x17;
2107 ep->arp_sha[3] = 0xe9;
2108 ep->arp_sha[4] = 0x30;
2109 ep->arp_sha[5] = 0x68;
2110 ep->arp_spa[0] = 0xc0;
2111 ep->arp_spa[1] = 0xa8;
2112 ep->arp_spa[2] = 0x0a;
2113 ep->arp_spa[3] = 0x04;
2114 bzero(&(ep->arp_tha), ETHER_ADDR_LEN);
2115 ep->arp_tpa[0] = 0xc0;
2116 ep->arp_tpa[1] = 0xa8;
2117 ep->arp_tpa[2] = 0x0a;
2118 ep->arp_tpa[3] = 0x06;
2120 /* fill in the length field */
2121 mbufc->m_len = pkt_len;
2122 mbufc->m_pkthdr.len = pkt_len;
2123 /* indicate that the netfront uses hw-assisted checksums */
2124 mbufc->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID |
2125 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2127 /* Make a backup copy of the packet */
2128 bcopy(mtod(mbufc, const void*), pkt_orig, pkt_len);
2130 /* Function under test */
2131 xnb_add_mbuf_cksum(mbufc);
2133 /* Verify that the packet's data did not change */
2134 XNB_ASSERT(bcmp(mtod(mbufc, const void*), pkt_orig, pkt_len) == 0);
2139 * Helper function that populates the ethernet header and IP header used by
2140 * some of the xnb_add_mbuf_cksum unit tests. m must already be allocated
2141 * and must be large enough
2144 xnb_fill_eh_and_ip(struct mbuf *m, uint16_t ip_len, uint16_t ip_id,
2145 uint16_t ip_p, uint16_t ip_off, uint16_t ip_sum)
2147 struct ether_header *eh;
2150 eh = mtod(m, struct ether_header*);
2151 eh->ether_dhost[0] = 0x00;
2152 eh->ether_dhost[1] = 0x16;
2153 eh->ether_dhost[2] = 0x3e;
2154 eh->ether_dhost[3] = 0x23;
2155 eh->ether_dhost[4] = 0x50;
2156 eh->ether_dhost[5] = 0x0b;
2157 eh->ether_shost[0] = 0x00;
2158 eh->ether_shost[1] = 0x16;
2159 eh->ether_shost[2] = 0x30;
2160 eh->ether_shost[3] = 0x00;
2161 eh->ether_shost[4] = 0x00;
2162 eh->ether_shost[5] = 0x00;
2163 eh->ether_type = htons(ETHERTYPE_IP);
2164 iph = (struct ip*)(eh + 1);
2165 iph->ip_hl = 0x5; /* 5 dwords == 20 bytes */
2166 iph->ip_v = 4; /* IP v4 */
2168 iph->ip_len = htons(ip_len);
2169 iph->ip_id = htons(ip_id);
2170 iph->ip_off = htons(ip_off);
2173 iph->ip_sum = htons(ip_sum);
2174 iph->ip_src.s_addr = htonl(0xc0a80a04);
2175 iph->ip_dst.s_addr = htonl(0xc0a80a05);
2179 * xnb_add_mbuf_cksum on an ICMP packet, based on a tcpdump of an actual
2183 xnb_add_mbuf_cksum_icmp(char *buffer, size_t buflen)
2185 const size_t icmp_len = 64; /* set by ping(1) */
2186 const size_t pkt_len = sizeof(struct ether_header) +
2187 sizeof(struct ip) + icmp_len;
2189 struct ether_header *eh;
2192 unsigned char pkt_orig[icmp_len];
2194 uint8_t *data_payload;
2196 const uint16_t ICMP_CSUM = 0xaed7;
2197 const uint16_t IP_CSUM = 0xe533;
2199 mbufc = m_getm(NULL, pkt_len, M_WAITOK, MT_DATA);
2200 /* Fill in an example ICMP ping request */
2201 eh = mtod(mbufc, struct ether_header*);
2202 xnb_fill_eh_and_ip(mbufc, 84, 28, IPPROTO_ICMP, 0, 0);
2203 iph = (struct ip*)(eh + 1);
2204 icmph = (struct icmp*)(iph + 1);
2205 icmph->icmp_type = ICMP_ECHO;
2206 icmph->icmp_code = 0;
2207 icmph->icmp_cksum = htons(ICMP_CSUM);
2208 icmph->icmp_id = htons(31492);
2209 icmph->icmp_seq = htons(0);
2211 * ping(1) uses bcopy to insert a native-endian timeval after icmp_seq.
2212 * For this test, we will set the bytes individually for portability.
2214 tv_field = (uint32_t*)(&(icmph->icmp_hun));
2215 tv_field[0] = 0x4f02cfac;
2216 tv_field[1] = 0x0007c46a;
2218 * Remainder of packet is an incrmenting 8 bit integer, starting with 8
2220 data_payload = (uint8_t*)(&tv_field[2]);
2221 for (i = 8; i < 37; i++) {
2222 *data_payload++ = i;
2225 /* fill in the length field */
2226 mbufc->m_len = pkt_len;
2227 mbufc->m_pkthdr.len = pkt_len;
2228 /* indicate that the netfront uses hw-assisted checksums */
2229 mbufc->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID |
2230 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2232 bcopy(mtod(mbufc, const void*), pkt_orig, icmp_len);
2233 /* Function under test */
2234 xnb_add_mbuf_cksum(mbufc);
2236 /* Check the IP checksum */
2237 XNB_ASSERT(iph->ip_sum == htons(IP_CSUM));
2239 /* Check that the ICMP packet did not change */
2240 XNB_ASSERT(bcmp(icmph, pkt_orig, icmp_len));
2245 * xnb_add_mbuf_cksum on a UDP packet, based on a tcpdump of an actual
2249 xnb_add_mbuf_cksum_udp(char *buffer, size_t buflen)
2251 const size_t udp_len = 16;
2252 const size_t pkt_len = sizeof(struct ether_header) +
2253 sizeof(struct ip) + udp_len;
2255 struct ether_header *eh;
2258 uint8_t *data_payload;
2259 const uint16_t IP_CSUM = 0xe56b;
2260 const uint16_t UDP_CSUM = 0xdde2;
2262 mbufc = m_getm(NULL, pkt_len, M_WAITOK, MT_DATA);
2263 /* Fill in an example UDP packet made by 'uname | nc -u <host> 2222 */
2264 eh = mtod(mbufc, struct ether_header*);
2265 xnb_fill_eh_and_ip(mbufc, 36, 4, IPPROTO_UDP, 0, 0xbaad);
2266 iph = (struct ip*)(eh + 1);
2267 udp = (struct udphdr*)(iph + 1);
2268 udp->uh_sport = htons(0x51ae);
2269 udp->uh_dport = htons(0x08ae);
2270 udp->uh_ulen = htons(udp_len);
2271 udp->uh_sum = htons(0xbaad); /* xnb_add_mbuf_cksum will fill this in */
2272 data_payload = (uint8_t*)(udp + 1);
2273 data_payload[0] = 'F';
2274 data_payload[1] = 'r';
2275 data_payload[2] = 'e';
2276 data_payload[3] = 'e';
2277 data_payload[4] = 'B';
2278 data_payload[5] = 'S';
2279 data_payload[6] = 'D';
2280 data_payload[7] = '\n';
2282 /* fill in the length field */
2283 mbufc->m_len = pkt_len;
2284 mbufc->m_pkthdr.len = pkt_len;
2285 /* indicate that the netfront uses hw-assisted checksums */
2286 mbufc->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID |
2287 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2289 /* Function under test */
2290 xnb_add_mbuf_cksum(mbufc);
2292 /* Check the checksums */
2293 XNB_ASSERT(iph->ip_sum == htons(IP_CSUM));
2294 XNB_ASSERT(udp->uh_sum == htons(UDP_CSUM));
2300 * Helper function that populates a TCP packet used by all of the
2301 * xnb_add_mbuf_cksum tcp unit tests. m must already be allocated and must be
2305 xnb_fill_tcp(struct mbuf *m)
2307 struct ether_header *eh;
2311 uint8_t *data_payload;
2313 /* Fill in an example TCP packet made by 'uname | nc <host> 2222' */
2314 eh = mtod(m, struct ether_header*);
2315 xnb_fill_eh_and_ip(m, 60, 8, IPPROTO_TCP, IP_DF, 0);
2316 iph = (struct ip*)(eh + 1);
2317 tcp = (struct tcphdr*)(iph + 1);
2318 tcp->th_sport = htons(0x9cd9);
2319 tcp->th_dport = htons(2222);
2320 tcp->th_seq = htonl(0x00f72b10);
2321 tcp->th_ack = htonl(0x7f37ba6c);
2324 tcp->th_flags = 0x18;
2325 tcp->th_win = htons(0x410);
2326 /* th_sum is incorrect; will be inserted by function under test */
2327 tcp->th_sum = htons(0xbaad);
2328 tcp->th_urp = htons(0);
2330 * The following 12 bytes of options encode:
2331 * [nop, nop, TS val 33247 ecr 3457687679]
2333 options = (uint32_t*)(tcp + 1);
2334 options[0] = htonl(0x0101080a);
2335 options[1] = htonl(0x000081df);
2336 options[2] = htonl(0xce18207f);
2337 data_payload = (uint8_t*)(&options[3]);
2338 data_payload[0] = 'F';
2339 data_payload[1] = 'r';
2340 data_payload[2] = 'e';
2341 data_payload[3] = 'e';
2342 data_payload[4] = 'B';
2343 data_payload[5] = 'S';
2344 data_payload[6] = 'D';
2345 data_payload[7] = '\n';
2349 * xnb_add_mbuf_cksum on a TCP packet, based on a tcpdump of an actual TCP
2353 xnb_add_mbuf_cksum_tcp(char *buffer, size_t buflen)
2355 const size_t payload_len = 8;
2356 const size_t tcp_options_len = 12;
2357 const size_t pkt_len = sizeof(struct ether_header) + sizeof(struct ip) +
2358 sizeof(struct tcphdr) + tcp_options_len + payload_len;
2360 struct ether_header *eh;
2363 const uint16_t IP_CSUM = 0xa55a;
2364 const uint16_t TCP_CSUM = 0x2f64;
2366 mbufc = m_getm(NULL, pkt_len, M_WAITOK, MT_DATA);
2367 /* Fill in an example TCP packet made by 'uname | nc <host> 2222' */
2368 xnb_fill_tcp(mbufc);
2369 eh = mtod(mbufc, struct ether_header*);
2370 iph = (struct ip*)(eh + 1);
2371 tcp = (struct tcphdr*)(iph + 1);
2373 /* fill in the length field */
2374 mbufc->m_len = pkt_len;
2375 mbufc->m_pkthdr.len = pkt_len;
2376 /* indicate that the netfront uses hw-assisted checksums */
2377 mbufc->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID |
2378 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2380 /* Function under test */
2381 xnb_add_mbuf_cksum(mbufc);
2383 /* Check the checksums */
2384 XNB_ASSERT(iph->ip_sum == htons(IP_CSUM));
2385 XNB_ASSERT(tcp->th_sum == htons(TCP_CSUM));
2391 * xnb_add_mbuf_cksum on a TCP packet that does not use HW assisted checksums
2394 xnb_add_mbuf_cksum_tcp_swcksum(char *buffer, size_t buflen)
2396 const size_t payload_len = 8;
2397 const size_t tcp_options_len = 12;
2398 const size_t pkt_len = sizeof(struct ether_header) + sizeof(struct ip) +
2399 sizeof(struct tcphdr) + tcp_options_len + payload_len;
2401 struct ether_header *eh;
2404 /* Use deliberately bad checksums, and verify that they don't get */
2405 /* corrected by xnb_add_mbuf_cksum */
2406 const uint16_t IP_CSUM = 0xdead;
2407 const uint16_t TCP_CSUM = 0xbeef;
2409 mbufc = m_getm(NULL, pkt_len, M_WAITOK, MT_DATA);
2410 /* Fill in an example TCP packet made by 'uname | nc <host> 2222' */
2411 xnb_fill_tcp(mbufc);
2412 eh = mtod(mbufc, struct ether_header*);
2413 iph = (struct ip*)(eh + 1);
2414 iph->ip_sum = htons(IP_CSUM);
2415 tcp = (struct tcphdr*)(iph + 1);
2416 tcp->th_sum = htons(TCP_CSUM);
2418 /* fill in the length field */
2419 mbufc->m_len = pkt_len;
2420 mbufc->m_pkthdr.len = pkt_len;
2421 /* indicate that the netfront does not use hw-assisted checksums */
2422 mbufc->m_pkthdr.csum_flags = 0;
2424 /* Function under test */
2425 xnb_add_mbuf_cksum(mbufc);
2427 /* Check that the checksums didn't change */
2428 XNB_ASSERT(iph->ip_sum == htons(IP_CSUM));
2429 XNB_ASSERT(tcp->th_sum == htons(TCP_CSUM));
2435 * sscanf on unsigned chars
2438 xnb_sscanf_hhu(char *buffer, size_t buflen)
2440 const char mystr[] = "137";
2444 for (i = 0; i < 12; i++)
2447 sscanf(mystr, "%hhu", &dest[4]);
2448 for (i = 0; i < 12; i++)
2449 XNB_ASSERT(dest[i] == (i == 4 ? 137 : 'X'));
2453 * sscanf on signed chars
2456 xnb_sscanf_hhd(char *buffer, size_t buflen)
2458 const char mystr[] = "-27";
2462 for (i = 0; i < 12; i++)
2465 sscanf(mystr, "%hhd", &dest[4]);
2466 for (i = 0; i < 12; i++)
2467 XNB_ASSERT(dest[i] == (i == 4 ? -27 : 'X'));
2471 * sscanf on signed long longs
2474 xnb_sscanf_lld(char *buffer, size_t buflen)
2476 const char mystr[] = "-123456789012345"; /* about -2**47 */
2480 for (i = 0; i < 3; i++)
2481 dest[i] = (long long)0xdeadbeefdeadbeef;
2483 sscanf(mystr, "%lld", &dest[1]);
2484 for (i = 0; i < 3; i++)
2485 XNB_ASSERT(dest[i] == (i != 1 ? (long long)0xdeadbeefdeadbeef :
2490 * sscanf on unsigned long longs
2493 xnb_sscanf_llu(char *buffer, size_t buflen)
2495 const char mystr[] = "12802747070103273189";
2496 unsigned long long dest[3];
2499 for (i = 0; i < 3; i++)
2500 dest[i] = (long long)0xdeadbeefdeadbeef;
2502 sscanf(mystr, "%llu", &dest[1]);
2503 for (i = 0; i < 3; i++)
2504 XNB_ASSERT(dest[i] == (i != 1 ? (long long)0xdeadbeefdeadbeef :
2505 12802747070103273189ull));
2509 * sscanf on unsigned short short n's
2512 xnb_sscanf_hhn(char *buffer, size_t buflen)
2514 const char mystr[] =
2515 "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f"
2516 "202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f"
2517 "404142434445464748494a4b4c4d4e4f505152535455565758595a5b5c5d5e5f";
2518 unsigned char dest[12];
2521 for (i = 0; i < 12; i++)
2522 dest[i] = (unsigned char)'X';
2525 "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f"
2526 "202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f"
2527 "404142434445464748494a4b4c4d4e4f%hhn", &dest[4]);
2528 for (i = 0; i < 12; i++)
2529 XNB_ASSERT(dest[i] == (i == 4 ? 160 : 'X'));