2 * Copyright (c) 2009-2011 Spectra Logic Corporation
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions, and the following disclaimer,
10 * without modification.
11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
12 * substantially similar to the "NO WARRANTY" disclaimer below
13 * ("Disclaimer") and any redistribution must be conditioned upon
14 * including a substantially similar Disclaimer requirement for further
15 * binary redistribution.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
26 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
27 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGES.
30 * Authors: Justin T. Gibbs (Spectra Logic Corporation)
31 * Alan Somers (Spectra Logic Corporation)
32 * John Suykerbuyk (Spectra Logic Corporation)
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
39 * \file netback_unit_tests.c
41 * \brief Unit tests for the Xen netback driver.
43 * Due to the driver's use of static functions, these tests cannot be compiled
44 * standalone; they must be #include'd from the driver's .c file.
48 /** Helper macro used to snprintf to a buffer and update the buffer pointer */
49 #define SNCATF(buffer, buflen, ...) do { \
50 size_t new_chars = snprintf(buffer, buflen, __VA_ARGS__); \
51 buffer += new_chars; \
52 /* be careful; snprintf's return value can be > buflen */ \
53 buflen -= MIN(buflen, new_chars); \
56 /* STRINGIFY and TOSTRING are used only to help turn __LINE__ into a string */
57 #define STRINGIFY(x) #x
58 #define TOSTRING(x) STRINGIFY(x)
61 * Writes an error message to buffer if cond is false
62 * Note the implied parameters buffer and
65 #define XNB_ASSERT(cond) ({ \
66 int passed = (cond); \
67 char *_buffer = (buffer); \
68 size_t _buflen = (buflen); \
70 strlcat(_buffer, __func__, _buflen); \
71 strlcat(_buffer, ":" TOSTRING(__LINE__) \
72 " Assertion Error: " #cond "\n", _buflen); \
78 * The signature used by all testcases. If the test writes anything
79 * to buffer, then it will be considered a failure
80 * \param buffer Return storage for error messages
81 * \param buflen The space available in the buffer
83 typedef void testcase_t(char *buffer, size_t buflen);
86 * Signature used by setup functions
87 * \return nonzero on error
89 typedef int setup_t(void);
91 typedef void teardown_t(void);
93 /** A simple test fixture comprising setup, teardown, and test */
95 /** Will be run before the test to allocate and initialize variables */
98 /** Will be run if setup succeeds */
101 /** Cleans up test data whether or not the setup suceeded*/
102 teardown_t *teardown;
105 typedef struct test_fixture test_fixture_t;
107 static int xnb_get1pkt(struct xnb_pkt *pkt, size_t size, uint16_t flags);
108 static int xnb_unit_test_runner(test_fixture_t const tests[], int ntests,
109 char *buffer, size_t buflen);
112 null_setup(void) { return 0; }
115 null_teardown(void) { }
117 static setup_t setup_pvt_data;
118 static teardown_t teardown_pvt_data;
119 static testcase_t xnb_ring2pkt_emptyring;
120 static testcase_t xnb_ring2pkt_1req;
121 static testcase_t xnb_ring2pkt_2req;
122 static testcase_t xnb_ring2pkt_3req;
123 static testcase_t xnb_ring2pkt_extra;
124 static testcase_t xnb_ring2pkt_partial;
125 static testcase_t xnb_ring2pkt_wraps;
126 static testcase_t xnb_txpkt2rsp_emptypkt;
127 static testcase_t xnb_txpkt2rsp_1req;
128 static testcase_t xnb_txpkt2rsp_extra;
129 static testcase_t xnb_txpkt2rsp_long;
130 static testcase_t xnb_txpkt2rsp_invalid;
131 static testcase_t xnb_txpkt2rsp_error;
132 static testcase_t xnb_txpkt2rsp_wraps;
133 static testcase_t xnb_pkt2mbufc_empty;
134 static testcase_t xnb_pkt2mbufc_short;
135 static testcase_t xnb_pkt2mbufc_csum;
136 static testcase_t xnb_pkt2mbufc_1cluster;
137 static testcase_t xnb_pkt2mbufc_largecluster;
138 static testcase_t xnb_pkt2mbufc_2cluster;
139 static testcase_t xnb_txpkt2gnttab_empty;
140 static testcase_t xnb_txpkt2gnttab_short;
141 static testcase_t xnb_txpkt2gnttab_2req;
142 static testcase_t xnb_txpkt2gnttab_2cluster;
143 static testcase_t xnb_update_mbufc_short;
144 static testcase_t xnb_update_mbufc_2req;
145 static testcase_t xnb_update_mbufc_2cluster;
146 static testcase_t xnb_mbufc2pkt_empty;
147 static testcase_t xnb_mbufc2pkt_short;
148 static testcase_t xnb_mbufc2pkt_1cluster;
149 static testcase_t xnb_mbufc2pkt_2short;
150 static testcase_t xnb_mbufc2pkt_long;
151 static testcase_t xnb_mbufc2pkt_extra;
152 static testcase_t xnb_mbufc2pkt_nospace;
153 static testcase_t xnb_rxpkt2gnttab_empty;
154 static testcase_t xnb_rxpkt2gnttab_short;
155 static testcase_t xnb_rxpkt2gnttab_2req;
156 static testcase_t xnb_rxpkt2rsp_empty;
157 static testcase_t xnb_rxpkt2rsp_short;
158 static testcase_t xnb_rxpkt2rsp_extra;
159 static testcase_t xnb_rxpkt2rsp_2short;
160 static testcase_t xnb_rxpkt2rsp_2slots;
161 static testcase_t xnb_rxpkt2rsp_copyerror;
162 static testcase_t xnb_sscanf_llu;
163 static testcase_t xnb_sscanf_lld;
164 static testcase_t xnb_sscanf_hhu;
165 static testcase_t xnb_sscanf_hhd;
166 static testcase_t xnb_sscanf_hhn;
168 #if defined(INET) || defined(INET6)
169 /* TODO: add test cases for xnb_add_mbuf_cksum for IPV6 tcp and udp */
170 static testcase_t xnb_add_mbuf_cksum_arp;
171 static testcase_t xnb_add_mbuf_cksum_tcp;
172 static testcase_t xnb_add_mbuf_cksum_udp;
173 static testcase_t xnb_add_mbuf_cksum_icmp;
174 static testcase_t xnb_add_mbuf_cksum_tcp_swcksum;
175 static void xnb_fill_eh_and_ip(struct mbuf *m, uint16_t ip_len,
176 uint16_t ip_id, uint16_t ip_p,
177 uint16_t ip_off, uint16_t ip_sum);
178 static void xnb_fill_tcp(struct mbuf *m);
179 #endif /* INET || INET6 */
181 /** Private data used by unit tests */
183 gnttab_copy_table gnttab;
184 netif_rx_back_ring_t rxb;
185 netif_rx_front_ring_t rxf;
186 netif_tx_back_ring_t txb;
187 netif_tx_front_ring_t txf;
189 netif_rx_sring_t* rxs;
190 netif_tx_sring_t* txs;
193 static inline void safe_m_freem(struct mbuf **ppMbuf) {
194 if (*ppMbuf != NULL) {
201 * The unit test runner. It will run every supplied test and return an
202 * output message as a string
203 * \param tests An array of tests. Every test will be attempted.
204 * \param ntests The length of tests
205 * \param buffer Return storage for the result string
206 * \param buflen The length of buffer
207 * \return The number of tests that failed
210 xnb_unit_test_runner(test_fixture_t const tests[], int ntests, char *buffer,
217 for (i = 0; i < ntests; i++) {
218 int error = tests[i].setup();
220 SNCATF(buffer, buflen,
221 "Setup failed for test idx %d\n", i);
226 tests[i].test(buffer, buflen);
227 new_chars = strnlen(buffer, buflen);
238 n_passes = ntests - n_failures;
240 SNCATF(buffer, buflen, "%d Tests Passed\n", n_passes);
242 if (n_failures > 0) {
243 SNCATF(buffer, buflen, "%d Tests FAILED\n", n_failures);
249 /** Number of unit tests. Must match the length of the tests array below */
250 #define TOTAL_TESTS (53)
252 * Max memory available for returning results. 400 chars/test should give
253 * enough space for a five line error message for every test
255 #define TOTAL_BUFLEN (400 * TOTAL_TESTS + 2)
258 * Called from userspace by a sysctl. Runs all internal unit tests, and
259 * returns the results to userspace as a string
261 * \param arg1 pointer to an xnb_softc for a specific xnb device
263 * \param req sysctl access structure
264 * \return a string via the special SYSCTL_OUT macro.
268 xnb_unit_test_main(SYSCTL_HANDLER_ARGS) {
269 test_fixture_t const tests[TOTAL_TESTS] = {
270 {setup_pvt_data, xnb_ring2pkt_emptyring, teardown_pvt_data},
271 {setup_pvt_data, xnb_ring2pkt_1req, teardown_pvt_data},
272 {setup_pvt_data, xnb_ring2pkt_2req, teardown_pvt_data},
273 {setup_pvt_data, xnb_ring2pkt_3req, teardown_pvt_data},
274 {setup_pvt_data, xnb_ring2pkt_extra, teardown_pvt_data},
275 {setup_pvt_data, xnb_ring2pkt_partial, teardown_pvt_data},
276 {setup_pvt_data, xnb_ring2pkt_wraps, teardown_pvt_data},
277 {setup_pvt_data, xnb_txpkt2rsp_emptypkt, teardown_pvt_data},
278 {setup_pvt_data, xnb_txpkt2rsp_1req, teardown_pvt_data},
279 {setup_pvt_data, xnb_txpkt2rsp_extra, teardown_pvt_data},
280 {setup_pvt_data, xnb_txpkt2rsp_long, teardown_pvt_data},
281 {setup_pvt_data, xnb_txpkt2rsp_invalid, teardown_pvt_data},
282 {setup_pvt_data, xnb_txpkt2rsp_error, teardown_pvt_data},
283 {setup_pvt_data, xnb_txpkt2rsp_wraps, teardown_pvt_data},
284 {setup_pvt_data, xnb_pkt2mbufc_empty, teardown_pvt_data},
285 {setup_pvt_data, xnb_pkt2mbufc_short, teardown_pvt_data},
286 {setup_pvt_data, xnb_pkt2mbufc_csum, teardown_pvt_data},
287 {setup_pvt_data, xnb_pkt2mbufc_1cluster, teardown_pvt_data},
288 {setup_pvt_data, xnb_pkt2mbufc_largecluster, teardown_pvt_data},
289 {setup_pvt_data, xnb_pkt2mbufc_2cluster, teardown_pvt_data},
290 {setup_pvt_data, xnb_txpkt2gnttab_empty, teardown_pvt_data},
291 {setup_pvt_data, xnb_txpkt2gnttab_short, teardown_pvt_data},
292 {setup_pvt_data, xnb_txpkt2gnttab_2req, teardown_pvt_data},
293 {setup_pvt_data, xnb_txpkt2gnttab_2cluster, teardown_pvt_data},
294 {setup_pvt_data, xnb_update_mbufc_short, teardown_pvt_data},
295 {setup_pvt_data, xnb_update_mbufc_2req, teardown_pvt_data},
296 {setup_pvt_data, xnb_update_mbufc_2cluster, teardown_pvt_data},
297 {setup_pvt_data, xnb_mbufc2pkt_empty, teardown_pvt_data},
298 {setup_pvt_data, xnb_mbufc2pkt_short, teardown_pvt_data},
299 {setup_pvt_data, xnb_mbufc2pkt_1cluster, teardown_pvt_data},
300 {setup_pvt_data, xnb_mbufc2pkt_2short, teardown_pvt_data},
301 {setup_pvt_data, xnb_mbufc2pkt_long, teardown_pvt_data},
302 {setup_pvt_data, xnb_mbufc2pkt_extra, teardown_pvt_data},
303 {setup_pvt_data, xnb_mbufc2pkt_nospace, teardown_pvt_data},
304 {setup_pvt_data, xnb_rxpkt2gnttab_empty, teardown_pvt_data},
305 {setup_pvt_data, xnb_rxpkt2gnttab_short, teardown_pvt_data},
306 {setup_pvt_data, xnb_rxpkt2gnttab_2req, teardown_pvt_data},
307 {setup_pvt_data, xnb_rxpkt2rsp_empty, teardown_pvt_data},
308 {setup_pvt_data, xnb_rxpkt2rsp_short, teardown_pvt_data},
309 {setup_pvt_data, xnb_rxpkt2rsp_extra, teardown_pvt_data},
310 {setup_pvt_data, xnb_rxpkt2rsp_2short, teardown_pvt_data},
311 {setup_pvt_data, xnb_rxpkt2rsp_2slots, teardown_pvt_data},
312 {setup_pvt_data, xnb_rxpkt2rsp_copyerror, teardown_pvt_data},
313 #if defined(INET) || defined(INET6)
314 {null_setup, xnb_add_mbuf_cksum_arp, null_teardown},
315 {null_setup, xnb_add_mbuf_cksum_icmp, null_teardown},
316 {null_setup, xnb_add_mbuf_cksum_tcp, null_teardown},
317 {null_setup, xnb_add_mbuf_cksum_tcp_swcksum, null_teardown},
318 {null_setup, xnb_add_mbuf_cksum_udp, null_teardown},
320 {null_setup, xnb_sscanf_hhd, null_teardown},
321 {null_setup, xnb_sscanf_hhu, null_teardown},
322 {null_setup, xnb_sscanf_lld, null_teardown},
323 {null_setup, xnb_sscanf_llu, null_teardown},
324 {null_setup, xnb_sscanf_hhn, null_teardown},
327 * results is static so that the data will persist after this function
328 * returns. The sysctl code expects us to return a constant string.
329 * \todo: the static variable is not thread safe. Put a mutex around
332 static char results[TOTAL_BUFLEN];
334 /* empty the result strings */
336 xnb_unit_test_runner(tests, TOTAL_TESTS, results, TOTAL_BUFLEN);
338 return (SYSCTL_OUT(req, results, strnlen(results, TOTAL_BUFLEN)));
346 bzero(xnb_unit_pvt.gnttab, sizeof(xnb_unit_pvt.gnttab));
348 xnb_unit_pvt.txs = malloc(PAGE_SIZE, M_XENNETBACK, M_WAITOK|M_ZERO);
349 if (xnb_unit_pvt.txs != NULL) {
350 SHARED_RING_INIT(xnb_unit_pvt.txs);
351 BACK_RING_INIT(&xnb_unit_pvt.txb, xnb_unit_pvt.txs, PAGE_SIZE);
352 FRONT_RING_INIT(&xnb_unit_pvt.txf, xnb_unit_pvt.txs, PAGE_SIZE);
357 xnb_unit_pvt.ifp = if_alloc(IFT_ETHER);
358 if (xnb_unit_pvt.ifp == NULL) {
362 xnb_unit_pvt.rxs = malloc(PAGE_SIZE, M_XENNETBACK, M_WAITOK|M_ZERO);
363 if (xnb_unit_pvt.rxs != NULL) {
364 SHARED_RING_INIT(xnb_unit_pvt.rxs);
365 BACK_RING_INIT(&xnb_unit_pvt.rxb, xnb_unit_pvt.rxs, PAGE_SIZE);
366 FRONT_RING_INIT(&xnb_unit_pvt.rxf, xnb_unit_pvt.rxs, PAGE_SIZE);
375 teardown_pvt_data(void)
377 if (xnb_unit_pvt.txs != NULL) {
378 free(xnb_unit_pvt.txs, M_XENNETBACK);
380 if (xnb_unit_pvt.rxs != NULL) {
381 free(xnb_unit_pvt.rxs, M_XENNETBACK);
383 if (xnb_unit_pvt.ifp != NULL) {
384 if_free(xnb_unit_pvt.ifp);
389 * Verify that xnb_ring2pkt will not consume any requests from an empty ring
392 xnb_ring2pkt_emptyring(char *buffer, size_t buflen)
397 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
398 xnb_unit_pvt.txb.req_cons);
399 XNB_ASSERT(num_consumed == 0);
403 * Verify that xnb_ring2pkt can convert a single request packet correctly
406 xnb_ring2pkt_1req(char *buffer, size_t buflen)
410 struct netif_tx_request *req;
412 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
413 xnb_unit_pvt.txf.req_prod_pvt);
416 req->size = 69; /* arbitrary number for test */
417 xnb_unit_pvt.txf.req_prod_pvt++;
419 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
421 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
422 xnb_unit_pvt.txb.req_cons);
423 XNB_ASSERT(num_consumed == 1);
424 XNB_ASSERT(pkt.size == 69);
425 XNB_ASSERT(pkt.car_size == 69);
426 XNB_ASSERT(pkt.flags == 0);
427 XNB_ASSERT(xnb_pkt_is_valid(&pkt));
428 XNB_ASSERT(pkt.list_len == 1);
429 XNB_ASSERT(pkt.car == 0);
433 * Verify that xnb_ring2pkt can convert a two request packet correctly.
434 * This tests handling of the MORE_DATA flag and cdr
437 xnb_ring2pkt_2req(char *buffer, size_t buflen)
441 struct netif_tx_request *req;
442 RING_IDX start_idx = xnb_unit_pvt.txf.req_prod_pvt;
444 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
445 xnb_unit_pvt.txf.req_prod_pvt);
446 req->flags = NETTXF_more_data;
448 xnb_unit_pvt.txf.req_prod_pvt++;
450 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
451 xnb_unit_pvt.txf.req_prod_pvt);
454 xnb_unit_pvt.txf.req_prod_pvt++;
456 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
458 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
459 xnb_unit_pvt.txb.req_cons);
460 XNB_ASSERT(num_consumed == 2);
461 XNB_ASSERT(pkt.size == 100);
462 XNB_ASSERT(pkt.car_size == 60);
463 XNB_ASSERT(pkt.flags == 0);
464 XNB_ASSERT(xnb_pkt_is_valid(&pkt));
465 XNB_ASSERT(pkt.list_len == 2);
466 XNB_ASSERT(pkt.car == start_idx);
467 XNB_ASSERT(pkt.cdr == start_idx + 1);
471 * Verify that xnb_ring2pkt can convert a three request packet correctly
474 xnb_ring2pkt_3req(char *buffer, size_t buflen)
478 struct netif_tx_request *req;
479 RING_IDX start_idx = xnb_unit_pvt.txf.req_prod_pvt;
481 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
482 xnb_unit_pvt.txf.req_prod_pvt);
483 req->flags = NETTXF_more_data;
485 xnb_unit_pvt.txf.req_prod_pvt++;
487 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
488 xnb_unit_pvt.txf.req_prod_pvt);
489 req->flags = NETTXF_more_data;
491 xnb_unit_pvt.txf.req_prod_pvt++;
493 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
494 xnb_unit_pvt.txf.req_prod_pvt);
497 xnb_unit_pvt.txf.req_prod_pvt++;
499 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
501 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
502 xnb_unit_pvt.txb.req_cons);
503 XNB_ASSERT(num_consumed == 3);
504 XNB_ASSERT(pkt.size == 200);
505 XNB_ASSERT(pkt.car_size == 110);
506 XNB_ASSERT(pkt.flags == 0);
507 XNB_ASSERT(xnb_pkt_is_valid(&pkt));
508 XNB_ASSERT(pkt.list_len == 3);
509 XNB_ASSERT(pkt.car == start_idx);
510 XNB_ASSERT(pkt.cdr == start_idx + 1);
511 XNB_ASSERT(RING_GET_REQUEST(&xnb_unit_pvt.txb, pkt.cdr + 1) == req);
515 * Verify that xnb_ring2pkt can read extra inf
518 xnb_ring2pkt_extra(char *buffer, size_t buflen)
522 struct netif_tx_request *req;
523 struct netif_extra_info *ext;
524 RING_IDX start_idx = xnb_unit_pvt.txf.req_prod_pvt;
526 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
527 xnb_unit_pvt.txf.req_prod_pvt);
528 req->flags = NETTXF_extra_info | NETTXF_more_data;
530 xnb_unit_pvt.txf.req_prod_pvt++;
532 ext = (struct netif_extra_info*) RING_GET_REQUEST(&xnb_unit_pvt.txf,
533 xnb_unit_pvt.txf.req_prod_pvt);
535 ext->type = XEN_NETIF_EXTRA_TYPE_GSO;
536 ext->u.gso.size = 250;
537 ext->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
538 ext->u.gso.features = 0;
539 xnb_unit_pvt.txf.req_prod_pvt++;
541 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
542 xnb_unit_pvt.txf.req_prod_pvt);
545 xnb_unit_pvt.txf.req_prod_pvt++;
547 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
549 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
550 xnb_unit_pvt.txb.req_cons);
551 XNB_ASSERT(num_consumed == 3);
552 XNB_ASSERT(pkt.extra.flags == 0);
553 XNB_ASSERT(pkt.extra.type == XEN_NETIF_EXTRA_TYPE_GSO);
554 XNB_ASSERT(pkt.extra.u.gso.size == 250);
555 XNB_ASSERT(pkt.extra.u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4);
556 XNB_ASSERT(pkt.size == 150);
557 XNB_ASSERT(pkt.car_size == 100);
558 XNB_ASSERT(pkt.flags == NETTXF_extra_info);
559 XNB_ASSERT(xnb_pkt_is_valid(&pkt));
560 XNB_ASSERT(pkt.list_len == 2);
561 XNB_ASSERT(pkt.car == start_idx);
562 XNB_ASSERT(pkt.cdr == start_idx + 2);
563 XNB_ASSERT(RING_GET_REQUEST(&xnb_unit_pvt.txb, pkt.cdr) == req);
567 * Verify that xnb_ring2pkt will consume no requests if the entire packet is
568 * not yet in the ring
571 xnb_ring2pkt_partial(char *buffer, size_t buflen)
575 struct netif_tx_request *req;
577 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
578 xnb_unit_pvt.txf.req_prod_pvt);
579 req->flags = NETTXF_more_data;
581 xnb_unit_pvt.txf.req_prod_pvt++;
583 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
585 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
586 xnb_unit_pvt.txb.req_cons);
587 XNB_ASSERT(num_consumed == 0);
588 XNB_ASSERT(! xnb_pkt_is_valid(&pkt));
592 * Verity that xnb_ring2pkt can read a packet whose requests wrap around
593 * the end of the ring
596 xnb_ring2pkt_wraps(char *buffer, size_t buflen)
600 struct netif_tx_request *req;
604 * Manually tweak the ring indices to create a ring with no responses
605 * and the next request slot at position 2 from the end
607 rsize = RING_SIZE(&xnb_unit_pvt.txf);
608 xnb_unit_pvt.txf.req_prod_pvt = rsize - 2;
609 xnb_unit_pvt.txf.rsp_cons = rsize - 2;
610 xnb_unit_pvt.txs->req_prod = rsize - 2;
611 xnb_unit_pvt.txs->req_event = rsize - 1;
612 xnb_unit_pvt.txs->rsp_prod = rsize - 2;
613 xnb_unit_pvt.txs->rsp_event = rsize - 1;
614 xnb_unit_pvt.txb.rsp_prod_pvt = rsize - 2;
615 xnb_unit_pvt.txb.req_cons = rsize - 2;
617 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
618 xnb_unit_pvt.txf.req_prod_pvt);
619 req->flags = NETTXF_more_data;
621 xnb_unit_pvt.txf.req_prod_pvt++;
623 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
624 xnb_unit_pvt.txf.req_prod_pvt);
625 req->flags = NETTXF_more_data;
627 xnb_unit_pvt.txf.req_prod_pvt++;
629 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
630 xnb_unit_pvt.txf.req_prod_pvt);
633 xnb_unit_pvt.txf.req_prod_pvt++;
635 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
637 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
638 xnb_unit_pvt.txb.req_cons);
639 XNB_ASSERT(num_consumed == 3);
640 XNB_ASSERT(xnb_pkt_is_valid(&pkt));
641 XNB_ASSERT(pkt.list_len == 3);
642 XNB_ASSERT(RING_GET_REQUEST(&xnb_unit_pvt.txb, pkt.cdr + 1) == req);
647 * xnb_txpkt2rsp should do nothing for an empty packet
650 xnb_txpkt2rsp_emptypkt(char *buffer, size_t buflen)
654 netif_tx_back_ring_t txb_backup = xnb_unit_pvt.txb;
655 netif_tx_sring_t txs_backup = *xnb_unit_pvt.txs;
658 /* must call xnb_ring2pkt just to intialize pkt */
659 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
660 xnb_unit_pvt.txb.req_cons);
661 xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 0);
663 memcmp(&txb_backup, &xnb_unit_pvt.txb, sizeof(txb_backup)) == 0);
665 memcmp(&txs_backup, xnb_unit_pvt.txs, sizeof(txs_backup)) == 0);
669 * xnb_txpkt2rsp responding to one request
672 xnb_txpkt2rsp_1req(char *buffer, size_t buflen)
674 uint16_t num_consumed;
676 struct netif_tx_request *req;
677 struct netif_tx_response *rsp;
679 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
680 xnb_unit_pvt.txf.req_prod_pvt);
683 xnb_unit_pvt.txf.req_prod_pvt++;
685 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
687 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
688 xnb_unit_pvt.txb.req_cons);
689 xnb_unit_pvt.txb.req_cons += num_consumed;
691 xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 0);
692 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb, xnb_unit_pvt.txf.rsp_cons);
695 xnb_unit_pvt.txb.rsp_prod_pvt == xnb_unit_pvt.txs->req_prod);
696 XNB_ASSERT(rsp->id == req->id);
697 XNB_ASSERT(rsp->status == NETIF_RSP_OKAY);
701 * xnb_txpkt2rsp responding to 1 data request and 1 extra info
704 xnb_txpkt2rsp_extra(char *buffer, size_t buflen)
706 uint16_t num_consumed;
708 struct netif_tx_request *req;
709 netif_extra_info_t *ext;
710 struct netif_tx_response *rsp;
712 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
713 xnb_unit_pvt.txf.req_prod_pvt);
715 req->flags = NETTXF_extra_info;
717 xnb_unit_pvt.txf.req_prod_pvt++;
719 ext = (netif_extra_info_t*) RING_GET_REQUEST(&xnb_unit_pvt.txf,
720 xnb_unit_pvt.txf.req_prod_pvt);
721 ext->type = XEN_NETIF_EXTRA_TYPE_GSO;
723 xnb_unit_pvt.txf.req_prod_pvt++;
725 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
727 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
728 xnb_unit_pvt.txb.req_cons);
729 xnb_unit_pvt.txb.req_cons += num_consumed;
731 xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 0);
734 xnb_unit_pvt.txb.rsp_prod_pvt == xnb_unit_pvt.txs->req_prod);
736 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb, xnb_unit_pvt.txf.rsp_cons);
737 XNB_ASSERT(rsp->id == req->id);
738 XNB_ASSERT(rsp->status == NETIF_RSP_OKAY);
740 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb,
741 xnb_unit_pvt.txf.rsp_cons + 1);
742 XNB_ASSERT(rsp->status == NETIF_RSP_NULL);
746 * xnb_pkg2rsp responding to 3 data requests and 1 extra info
749 xnb_txpkt2rsp_long(char *buffer, size_t buflen)
751 uint16_t num_consumed;
753 struct netif_tx_request *req;
754 netif_extra_info_t *ext;
755 struct netif_tx_response *rsp;
757 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
758 xnb_unit_pvt.txf.req_prod_pvt);
760 req->flags = NETTXF_extra_info | NETTXF_more_data;
762 xnb_unit_pvt.txf.req_prod_pvt++;
764 ext = (netif_extra_info_t*) RING_GET_REQUEST(&xnb_unit_pvt.txf,
765 xnb_unit_pvt.txf.req_prod_pvt);
766 ext->type = XEN_NETIF_EXTRA_TYPE_GSO;
768 xnb_unit_pvt.txf.req_prod_pvt++;
770 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
771 xnb_unit_pvt.txf.req_prod_pvt);
773 req->flags = NETTXF_more_data;
775 xnb_unit_pvt.txf.req_prod_pvt++;
777 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
778 xnb_unit_pvt.txf.req_prod_pvt);
782 xnb_unit_pvt.txf.req_prod_pvt++;
784 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
786 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
787 xnb_unit_pvt.txb.req_cons);
788 xnb_unit_pvt.txb.req_cons += num_consumed;
790 xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 0);
793 xnb_unit_pvt.txb.rsp_prod_pvt == xnb_unit_pvt.txs->req_prod);
795 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb, xnb_unit_pvt.txf.rsp_cons);
796 XNB_ASSERT(rsp->id ==
797 RING_GET_REQUEST(&xnb_unit_pvt.txf, 0)->id);
798 XNB_ASSERT(rsp->status == NETIF_RSP_OKAY);
800 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb,
801 xnb_unit_pvt.txf.rsp_cons + 1);
802 XNB_ASSERT(rsp->status == NETIF_RSP_NULL);
804 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb,
805 xnb_unit_pvt.txf.rsp_cons + 2);
806 XNB_ASSERT(rsp->id ==
807 RING_GET_REQUEST(&xnb_unit_pvt.txf, 2)->id);
808 XNB_ASSERT(rsp->status == NETIF_RSP_OKAY);
810 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb,
811 xnb_unit_pvt.txf.rsp_cons + 3);
812 XNB_ASSERT(rsp->id ==
813 RING_GET_REQUEST(&xnb_unit_pvt.txf, 3)->id);
814 XNB_ASSERT(rsp->status == NETIF_RSP_OKAY);
818 * xnb_txpkt2rsp responding to an invalid packet.
819 * Note: this test will result in an error message being printed to the console
821 * xnb(xnb_ring2pkt:1306): Unknown extra info type 255. Discarding packet
824 xnb_txpkt2rsp_invalid(char *buffer, size_t buflen)
826 uint16_t num_consumed;
828 struct netif_tx_request *req;
829 netif_extra_info_t *ext;
830 struct netif_tx_response *rsp;
832 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
833 xnb_unit_pvt.txf.req_prod_pvt);
835 req->flags = NETTXF_extra_info;
837 xnb_unit_pvt.txf.req_prod_pvt++;
839 ext = (netif_extra_info_t*) RING_GET_REQUEST(&xnb_unit_pvt.txf,
840 xnb_unit_pvt.txf.req_prod_pvt);
841 ext->type = 0xFF; /* Invalid extra type */
843 xnb_unit_pvt.txf.req_prod_pvt++;
845 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
847 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
848 xnb_unit_pvt.txb.req_cons);
849 xnb_unit_pvt.txb.req_cons += num_consumed;
850 XNB_ASSERT(! xnb_pkt_is_valid(&pkt));
852 xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 0);
855 xnb_unit_pvt.txb.rsp_prod_pvt == xnb_unit_pvt.txs->req_prod);
857 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb, xnb_unit_pvt.txf.rsp_cons);
858 XNB_ASSERT(rsp->id == req->id);
859 XNB_ASSERT(rsp->status == NETIF_RSP_ERROR);
861 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb,
862 xnb_unit_pvt.txf.rsp_cons + 1);
863 XNB_ASSERT(rsp->status == NETIF_RSP_NULL);
867 * xnb_txpkt2rsp responding to one request which caused an error
870 xnb_txpkt2rsp_error(char *buffer, size_t buflen)
872 uint16_t num_consumed;
874 struct netif_tx_request *req;
875 struct netif_tx_response *rsp;
877 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
878 xnb_unit_pvt.txf.req_prod_pvt);
881 xnb_unit_pvt.txf.req_prod_pvt++;
883 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
885 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
886 xnb_unit_pvt.txb.req_cons);
887 xnb_unit_pvt.txb.req_cons += num_consumed;
889 xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 1);
890 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb, xnb_unit_pvt.txf.rsp_cons);
893 xnb_unit_pvt.txb.rsp_prod_pvt == xnb_unit_pvt.txs->req_prod);
894 XNB_ASSERT(rsp->id == req->id);
895 XNB_ASSERT(rsp->status == NETIF_RSP_ERROR);
899 * xnb_txpkt2rsp's responses wrap around the end of the ring
902 xnb_txpkt2rsp_wraps(char *buffer, size_t buflen)
906 struct netif_tx_request *req;
907 struct netif_tx_response *rsp;
911 * Manually tweak the ring indices to create a ring with no responses
912 * and the next request slot at position 2 from the end
914 rsize = RING_SIZE(&xnb_unit_pvt.txf);
915 xnb_unit_pvt.txf.req_prod_pvt = rsize - 2;
916 xnb_unit_pvt.txf.rsp_cons = rsize - 2;
917 xnb_unit_pvt.txs->req_prod = rsize - 2;
918 xnb_unit_pvt.txs->req_event = rsize - 1;
919 xnb_unit_pvt.txs->rsp_prod = rsize - 2;
920 xnb_unit_pvt.txs->rsp_event = rsize - 1;
921 xnb_unit_pvt.txb.rsp_prod_pvt = rsize - 2;
922 xnb_unit_pvt.txb.req_cons = rsize - 2;
924 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
925 xnb_unit_pvt.txf.req_prod_pvt);
926 req->flags = NETTXF_more_data;
929 xnb_unit_pvt.txf.req_prod_pvt++;
931 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
932 xnb_unit_pvt.txf.req_prod_pvt);
933 req->flags = NETTXF_more_data;
936 xnb_unit_pvt.txf.req_prod_pvt++;
938 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
939 xnb_unit_pvt.txf.req_prod_pvt);
943 xnb_unit_pvt.txf.req_prod_pvt++;
945 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
947 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
948 xnb_unit_pvt.txb.req_cons);
950 xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 0);
953 xnb_unit_pvt.txb.rsp_prod_pvt == xnb_unit_pvt.txs->req_prod);
954 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb,
955 xnb_unit_pvt.txf.rsp_cons + 2);
956 XNB_ASSERT(rsp->id == req->id);
957 XNB_ASSERT(rsp->status == NETIF_RSP_OKAY);
962 * Helper function used to setup pkt2mbufc tests
963 * \param size size in bytes of the single request to push to the ring
964 * \param flags optional flags to put in the netif request
965 * \param[out] pkt the returned packet object
966 * \return number of requests consumed from the ring
969 xnb_get1pkt(struct xnb_pkt *pkt, size_t size, uint16_t flags)
971 struct netif_tx_request *req;
973 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
974 xnb_unit_pvt.txf.req_prod_pvt);
977 xnb_unit_pvt.txf.req_prod_pvt++;
979 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
981 return xnb_ring2pkt(pkt, &xnb_unit_pvt.txb,
982 xnb_unit_pvt.txb.req_cons);
986 * xnb_pkt2mbufc on an empty packet
989 xnb_pkt2mbufc_empty(char *buffer, size_t buflen)
996 /* must call xnb_ring2pkt just to intialize pkt */
997 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
998 xnb_unit_pvt.txb.req_cons);
1000 pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1001 safe_m_freem(&pMbuf);
1005 * xnb_pkt2mbufc on short packet that can fit in an mbuf internal buffer
1008 xnb_pkt2mbufc_short(char *buffer, size_t buflen)
1010 const size_t size = MINCLSIZE - 1;
1014 xnb_get1pkt(&pkt, size, 0);
1016 pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1017 XNB_ASSERT(M_TRAILINGSPACE(pMbuf) >= size);
1018 safe_m_freem(&pMbuf);
1022 * xnb_pkt2mbufc on short packet whose checksum was validated by the netfron
1025 xnb_pkt2mbufc_csum(char *buffer, size_t buflen)
1027 const size_t size = MINCLSIZE - 1;
1031 xnb_get1pkt(&pkt, size, NETTXF_data_validated);
1033 pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1034 XNB_ASSERT(M_TRAILINGSPACE(pMbuf) >= size);
1035 XNB_ASSERT(pMbuf->m_pkthdr.csum_flags & CSUM_IP_CHECKED);
1036 XNB_ASSERT(pMbuf->m_pkthdr.csum_flags & CSUM_IP_VALID);
1037 XNB_ASSERT(pMbuf->m_pkthdr.csum_flags & CSUM_DATA_VALID);
1038 XNB_ASSERT(pMbuf->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR);
1039 safe_m_freem(&pMbuf);
1043 * xnb_pkt2mbufc on packet that can fit in one cluster
1046 xnb_pkt2mbufc_1cluster(char *buffer, size_t buflen)
1048 const size_t size = MINCLSIZE;
1052 xnb_get1pkt(&pkt, size, 0);
1054 pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1055 XNB_ASSERT(M_TRAILINGSPACE(pMbuf) >= size);
1056 safe_m_freem(&pMbuf);
1060 * xnb_pkt2mbufc on packet that cannot fit in one regular cluster
1063 xnb_pkt2mbufc_largecluster(char *buffer, size_t buflen)
1065 const size_t size = MCLBYTES + 1;
1069 xnb_get1pkt(&pkt, size, 0);
1071 pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1072 XNB_ASSERT(M_TRAILINGSPACE(pMbuf) >= size);
1073 safe_m_freem(&pMbuf);
1077 * xnb_pkt2mbufc on packet that cannot fit in one clusters
1080 xnb_pkt2mbufc_2cluster(char *buffer, size_t buflen)
1082 const size_t size = 2 * MCLBYTES + 1;
1088 xnb_get1pkt(&pkt, size, 0);
1090 pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1092 for (m = pMbuf; m != NULL; m = m->m_next) {
1093 space += M_TRAILINGSPACE(m);
1095 XNB_ASSERT(space >= size);
1096 safe_m_freem(&pMbuf);
1100 * xnb_txpkt2gnttab on an empty packet. Should return empty gnttab
1103 xnb_txpkt2gnttab_empty(char *buffer, size_t buflen)
1110 /* must call xnb_ring2pkt just to intialize pkt */
1111 xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
1113 pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1114 n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab,
1115 &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED);
1116 XNB_ASSERT(n_entries == 0);
1117 safe_m_freem(&pMbuf);
1121 * xnb_txpkt2gnttab on a short packet, that can fit in one mbuf internal buffer
1122 * and has one request
1125 xnb_txpkt2gnttab_short(char *buffer, size_t buflen)
1127 const size_t size = MINCLSIZE - 1;
1132 struct netif_tx_request *req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1133 xnb_unit_pvt.txf.req_prod_pvt);
1138 xnb_unit_pvt.txf.req_prod_pvt++;
1140 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
1142 xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
1144 pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1145 n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab,
1146 &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED);
1147 XNB_ASSERT(n_entries == 1);
1148 XNB_ASSERT(xnb_unit_pvt.gnttab[0].len == size);
1149 /* flags should indicate gref's for source */
1150 XNB_ASSERT(xnb_unit_pvt.gnttab[0].flags & GNTCOPY_source_gref);
1151 XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.offset == req->offset);
1152 XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.domid == DOMID_SELF);
1153 XNB_ASSERT(xnb_unit_pvt.gnttab[0].dest.offset == virt_to_offset(
1154 mtod(pMbuf, vm_offset_t)));
1155 XNB_ASSERT(xnb_unit_pvt.gnttab[0].dest.u.gmfn ==
1156 virt_to_mfn(mtod(pMbuf, vm_offset_t)));
1157 XNB_ASSERT(xnb_unit_pvt.gnttab[0].dest.domid == DOMID_FIRST_RESERVED);
1158 safe_m_freem(&pMbuf);
1162 * xnb_txpkt2gnttab on a packet with two requests, that can fit into a single
1166 xnb_txpkt2gnttab_2req(char *buffer, size_t buflen)
1172 struct netif_tx_request *req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1173 xnb_unit_pvt.txf.req_prod_pvt);
1174 req->flags = NETTXF_more_data;
1178 xnb_unit_pvt.txf.req_prod_pvt++;
1180 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1181 xnb_unit_pvt.txf.req_prod_pvt);
1186 xnb_unit_pvt.txf.req_prod_pvt++;
1188 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
1190 xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
1192 pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1193 n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab,
1194 &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED);
1196 XNB_ASSERT(n_entries == 2);
1197 XNB_ASSERT(xnb_unit_pvt.gnttab[0].len == 1400);
1198 XNB_ASSERT(xnb_unit_pvt.gnttab[0].dest.offset == virt_to_offset(
1199 mtod(pMbuf, vm_offset_t)));
1201 XNB_ASSERT(xnb_unit_pvt.gnttab[1].len == 500);
1202 XNB_ASSERT(xnb_unit_pvt.gnttab[1].dest.offset == virt_to_offset(
1203 mtod(pMbuf, vm_offset_t) + 1400));
1204 safe_m_freem(&pMbuf);
1208 * xnb_txpkt2gnttab on a single request that spans two mbuf clusters
1211 xnb_txpkt2gnttab_2cluster(char *buffer, size_t buflen)
1216 const uint16_t data_this_transaction = (MCLBYTES*2) + 1;
1218 struct netif_tx_request *req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1219 xnb_unit_pvt.txf.req_prod_pvt);
1221 req->size = data_this_transaction;
1224 xnb_unit_pvt.txf.req_prod_pvt++;
1226 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
1227 xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
1229 pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1230 XNB_ASSERT(pMbuf != NULL);
1234 n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab,
1235 &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED);
1237 if (M_TRAILINGSPACE(pMbuf) == MCLBYTES) {
1238 /* there should be three mbufs and three gnttab entries */
1239 XNB_ASSERT(n_entries == 3);
1240 XNB_ASSERT(xnb_unit_pvt.gnttab[0].len == MCLBYTES);
1242 xnb_unit_pvt.gnttab[0].dest.offset == virt_to_offset(
1243 mtod(pMbuf, vm_offset_t)));
1244 XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.offset == 0);
1246 XNB_ASSERT(xnb_unit_pvt.gnttab[1].len == MCLBYTES);
1248 xnb_unit_pvt.gnttab[1].dest.offset == virt_to_offset(
1249 mtod(pMbuf->m_next, vm_offset_t)));
1250 XNB_ASSERT(xnb_unit_pvt.gnttab[1].source.offset == MCLBYTES);
1252 XNB_ASSERT(xnb_unit_pvt.gnttab[2].len == 1);
1254 xnb_unit_pvt.gnttab[2].dest.offset == virt_to_offset(
1255 mtod(pMbuf->m_next, vm_offset_t)));
1256 XNB_ASSERT(xnb_unit_pvt.gnttab[2].source.offset == 2 *
1258 } else if (M_TRAILINGSPACE(pMbuf) == 2 * MCLBYTES) {
1259 /* there should be two mbufs and two gnttab entries */
1260 XNB_ASSERT(n_entries == 2);
1261 XNB_ASSERT(xnb_unit_pvt.gnttab[0].len == 2 * MCLBYTES);
1263 xnb_unit_pvt.gnttab[0].dest.offset == virt_to_offset(
1264 mtod(pMbuf, vm_offset_t)));
1265 XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.offset == 0);
1267 XNB_ASSERT(xnb_unit_pvt.gnttab[1].len == 1);
1269 xnb_unit_pvt.gnttab[1].dest.offset == virt_to_offset(
1270 mtod(pMbuf->m_next, vm_offset_t)));
1272 xnb_unit_pvt.gnttab[1].source.offset == 2 * MCLBYTES);
1275 /* should never get here */
1283 * xnb_update_mbufc on a short packet that only has one gnttab entry
1286 xnb_update_mbufc_short(char *buffer, size_t buflen)
1288 const size_t size = MINCLSIZE - 1;
1293 struct netif_tx_request *req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1294 xnb_unit_pvt.txf.req_prod_pvt);
1299 xnb_unit_pvt.txf.req_prod_pvt++;
1301 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
1303 xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
1305 pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1306 n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab,
1307 &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED);
1309 /* Update grant table's status fields as the hypervisor call would */
1310 xnb_unit_pvt.gnttab[0].status = GNTST_okay;
1312 xnb_update_mbufc(pMbuf, xnb_unit_pvt.gnttab, n_entries);
1313 XNB_ASSERT(pMbuf->m_len == size);
1314 XNB_ASSERT(pMbuf->m_pkthdr.len == size);
1315 safe_m_freem(&pMbuf);
1319 * xnb_update_mbufc on a packet with two requests, that can fit into a single
1323 xnb_update_mbufc_2req(char *buffer, size_t buflen)
1329 struct netif_tx_request *req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1330 xnb_unit_pvt.txf.req_prod_pvt);
1331 req->flags = NETTXF_more_data;
1335 xnb_unit_pvt.txf.req_prod_pvt++;
1337 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1338 xnb_unit_pvt.txf.req_prod_pvt);
1343 xnb_unit_pvt.txf.req_prod_pvt++;
1345 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
1347 xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
1349 pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1350 n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab,
1351 &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED);
1353 /* Update grant table's status fields as the hypervisor call would */
1354 xnb_unit_pvt.gnttab[0].status = GNTST_okay;
1355 xnb_unit_pvt.gnttab[1].status = GNTST_okay;
1357 xnb_update_mbufc(pMbuf, xnb_unit_pvt.gnttab, n_entries);
1358 XNB_ASSERT(n_entries == 2);
1359 XNB_ASSERT(pMbuf->m_pkthdr.len == 1900);
1360 XNB_ASSERT(pMbuf->m_len == 1900);
1362 safe_m_freem(&pMbuf);
1366 * xnb_update_mbufc on a single request that spans two mbuf clusters
1369 xnb_update_mbufc_2cluster(char *buffer, size_t buflen)
1375 const uint16_t data_this_transaction = (MCLBYTES*2) + 1;
1377 struct netif_tx_request *req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1378 xnb_unit_pvt.txf.req_prod_pvt);
1380 req->size = data_this_transaction;
1383 xnb_unit_pvt.txf.req_prod_pvt++;
1385 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
1386 xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
1388 pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1389 n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab,
1390 &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED);
1392 /* Update grant table's status fields */
1393 for (i = 0; i < n_entries; i++) {
1394 xnb_unit_pvt.gnttab[0].status = GNTST_okay;
1396 xnb_update_mbufc(pMbuf, xnb_unit_pvt.gnttab, n_entries);
1398 if (n_entries == 3) {
1399 /* there should be three mbufs and three gnttab entries */
1400 XNB_ASSERT(pMbuf->m_pkthdr.len == data_this_transaction);
1401 XNB_ASSERT(pMbuf->m_len == MCLBYTES);
1402 XNB_ASSERT(pMbuf->m_next->m_len == MCLBYTES);
1403 XNB_ASSERT(pMbuf->m_next->m_next->m_len == 1);
1404 } else if (n_entries == 2) {
1405 /* there should be two mbufs and two gnttab entries */
1406 XNB_ASSERT(n_entries == 2);
1407 XNB_ASSERT(pMbuf->m_pkthdr.len == data_this_transaction);
1408 XNB_ASSERT(pMbuf->m_len == 2 * MCLBYTES);
1409 XNB_ASSERT(pMbuf->m_next->m_len == 1);
1411 /* should never get here */
1414 safe_m_freem(&pMbuf);
1417 /** xnb_mbufc2pkt on an empty mbufc */
1419 xnb_mbufc2pkt_empty(char *buffer, size_t buflen) {
1421 int free_slots = 64;
1424 mbuf = m_get(M_WAITOK, MT_DATA);
1426 * note: it is illegal to set M_PKTHDR on a mbuf with no data. Doing so
1427 * will cause m_freem to segfault
1429 XNB_ASSERT(mbuf->m_len == 0);
1431 xnb_mbufc2pkt(mbuf, &pkt, 0, free_slots);
1432 XNB_ASSERT(! xnb_pkt_is_valid(&pkt));
1434 safe_m_freem(&mbuf);
1437 /** xnb_mbufc2pkt on a short mbufc */
1439 xnb_mbufc2pkt_short(char *buffer, size_t buflen) {
1442 int free_slots = 64;
1446 mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
1447 mbuf->m_flags |= M_PKTHDR;
1448 mbuf->m_pkthdr.len = size;
1451 xnb_mbufc2pkt(mbuf, &pkt, start, free_slots);
1452 XNB_ASSERT(xnb_pkt_is_valid(&pkt));
1453 XNB_ASSERT(pkt.size == size);
1454 XNB_ASSERT(pkt.car_size == size);
1455 XNB_ASSERT(! (pkt.flags &
1456 (NETRXF_more_data | NETRXF_extra_info)));
1457 XNB_ASSERT(pkt.list_len == 1);
1458 XNB_ASSERT(pkt.car == start);
1460 safe_m_freem(&mbuf);
1463 /** xnb_mbufc2pkt on a single mbuf with an mbuf cluster */
1465 xnb_mbufc2pkt_1cluster(char *buffer, size_t buflen) {
1467 size_t size = MCLBYTES;
1468 int free_slots = 32;
1469 RING_IDX start = 12;
1472 mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
1473 mbuf->m_flags |= M_PKTHDR;
1474 mbuf->m_pkthdr.len = size;
1477 xnb_mbufc2pkt(mbuf, &pkt, start, free_slots);
1478 XNB_ASSERT(xnb_pkt_is_valid(&pkt));
1479 XNB_ASSERT(pkt.size == size);
1480 XNB_ASSERT(pkt.car_size == size);
1481 XNB_ASSERT(! (pkt.flags &
1482 (NETRXF_more_data | NETRXF_extra_info)));
1483 XNB_ASSERT(pkt.list_len == 1);
1484 XNB_ASSERT(pkt.car == start);
1486 safe_m_freem(&mbuf);
1489 /** xnb_mbufc2pkt on a two-mbuf chain with short data regions */
1491 xnb_mbufc2pkt_2short(char *buffer, size_t buflen) {
1493 size_t size1 = MHLEN - 5;
1494 size_t size2 = MHLEN - 15;
1495 int free_slots = 32;
1496 RING_IDX start = 14;
1497 struct mbuf *mbufc, *mbufc2;
1499 mbufc = m_getm(NULL, size1, M_WAITOK, MT_DATA);
1500 XNB_ASSERT(mbufc != NULL);
1503 mbufc->m_flags |= M_PKTHDR;
1505 mbufc2 = m_getm(mbufc, size2, M_WAITOK, MT_DATA);
1506 XNB_ASSERT(mbufc2 != NULL);
1507 if (mbufc2 == NULL) {
1508 safe_m_freem(&mbufc);
1511 mbufc2->m_pkthdr.len = size1 + size2;
1512 mbufc2->m_len = size1;
1514 xnb_mbufc2pkt(mbufc2, &pkt, start, free_slots);
1515 XNB_ASSERT(xnb_pkt_is_valid(&pkt));
1516 XNB_ASSERT(pkt.size == size1 + size2);
1517 XNB_ASSERT(pkt.car == start);
1519 * The second m_getm may allocate a new mbuf and append
1520 * it to the chain, or it may simply extend the first mbuf.
1522 if (mbufc2->m_next != NULL) {
1523 XNB_ASSERT(pkt.car_size == size1);
1524 XNB_ASSERT(pkt.list_len == 1);
1525 XNB_ASSERT(pkt.cdr == start + 1);
1528 safe_m_freem(&mbufc2);
1531 /** xnb_mbufc2pkt on a mbuf chain with >1 mbuf cluster */
1533 xnb_mbufc2pkt_long(char *buffer, size_t buflen) {
1535 size_t size = 14 * MCLBYTES / 3;
1536 size_t size_remaining;
1537 int free_slots = 15;
1539 struct mbuf *mbufc, *m;
1541 mbufc = m_getm(NULL, size, M_WAITOK, MT_DATA);
1542 XNB_ASSERT(mbufc != NULL);
1545 mbufc->m_flags |= M_PKTHDR;
1547 mbufc->m_pkthdr.len = size;
1548 size_remaining = size;
1549 for (m = mbufc; m != NULL; m = m->m_next) {
1550 m->m_len = MAX(M_TRAILINGSPACE(m), size_remaining);
1551 size_remaining -= m->m_len;
1554 xnb_mbufc2pkt(mbufc, &pkt, start, free_slots);
1555 XNB_ASSERT(xnb_pkt_is_valid(&pkt));
1556 XNB_ASSERT(pkt.size == size);
1557 XNB_ASSERT(pkt.car == start);
1558 XNB_ASSERT(pkt.car_size = mbufc->m_len);
1560 * There should be >1 response in the packet, and there is no
1563 XNB_ASSERT(! (pkt.flags & NETRXF_extra_info));
1564 XNB_ASSERT(pkt.cdr == pkt.car + 1);
1566 safe_m_freem(&mbufc);
1569 /** xnb_mbufc2pkt on a mbuf chain with >1 mbuf cluster and extra info */
1571 xnb_mbufc2pkt_extra(char *buffer, size_t buflen) {
1573 size_t size = 14 * MCLBYTES / 3;
1574 size_t size_remaining;
1575 int free_slots = 15;
1577 struct mbuf *mbufc, *m;
1579 mbufc = m_getm(NULL, size, M_WAITOK, MT_DATA);
1580 XNB_ASSERT(mbufc != NULL);
1584 mbufc->m_flags |= M_PKTHDR;
1585 mbufc->m_pkthdr.len = size;
1586 mbufc->m_pkthdr.csum_flags |= CSUM_TSO;
1587 mbufc->m_pkthdr.tso_segsz = TCP_MSS - 40;
1588 size_remaining = size;
1589 for (m = mbufc; m != NULL; m = m->m_next) {
1590 m->m_len = MAX(M_TRAILINGSPACE(m), size_remaining);
1591 size_remaining -= m->m_len;
1594 xnb_mbufc2pkt(mbufc, &pkt, start, free_slots);
1595 XNB_ASSERT(xnb_pkt_is_valid(&pkt));
1596 XNB_ASSERT(pkt.size == size);
1597 XNB_ASSERT(pkt.car == start);
1598 XNB_ASSERT(pkt.car_size = mbufc->m_len);
1599 /* There should be >1 response in the packet, there is extra info */
1600 XNB_ASSERT(pkt.flags & NETRXF_extra_info);
1601 XNB_ASSERT(pkt.flags & NETRXF_data_validated);
1602 XNB_ASSERT(pkt.cdr == pkt.car + 2);
1603 XNB_ASSERT(pkt.extra.u.gso.size = mbufc->m_pkthdr.tso_segsz);
1604 XNB_ASSERT(pkt.extra.type == XEN_NETIF_EXTRA_TYPE_GSO);
1605 XNB_ASSERT(! (pkt.extra.flags & XEN_NETIF_EXTRA_FLAG_MORE));
1607 safe_m_freem(&mbufc);
1610 /** xnb_mbufc2pkt with insufficient space in the ring */
1612 xnb_mbufc2pkt_nospace(char *buffer, size_t buflen) {
1614 size_t size = 14 * MCLBYTES / 3;
1615 size_t size_remaining;
1618 struct mbuf *mbufc, *m;
1621 mbufc = m_getm(NULL, size, M_WAITOK, MT_DATA);
1622 XNB_ASSERT(mbufc != NULL);
1625 mbufc->m_flags |= M_PKTHDR;
1627 mbufc->m_pkthdr.len = size;
1628 size_remaining = size;
1629 for (m = mbufc; m != NULL; m = m->m_next) {
1630 m->m_len = MAX(M_TRAILINGSPACE(m), size_remaining);
1631 size_remaining -= m->m_len;
1634 error = xnb_mbufc2pkt(mbufc, &pkt, start, free_slots);
1635 XNB_ASSERT(error == EAGAIN);
1636 XNB_ASSERT(! xnb_pkt_is_valid(&pkt));
1638 safe_m_freem(&mbufc);
1642 * xnb_rxpkt2gnttab on an empty packet. Should return empty gnttab
1645 xnb_rxpkt2gnttab_empty(char *buffer, size_t buflen)
1649 int free_slots = 60;
1652 mbuf = m_get(M_WAITOK, MT_DATA);
1654 xnb_mbufc2pkt(mbuf, &pkt, 0, free_slots);
1655 nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab,
1656 &xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1658 XNB_ASSERT(nr_entries == 0);
1660 safe_m_freem(&mbuf);
1663 /** xnb_rxpkt2gnttab on a short packet without extra data */
1665 xnb_rxpkt2gnttab_short(char *buffer, size_t buflen) {
1669 int free_slots = 60;
1671 struct netif_rx_request *req;
1674 mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
1675 mbuf->m_flags |= M_PKTHDR;
1676 mbuf->m_pkthdr.len = size;
1679 xnb_mbufc2pkt(mbuf, &pkt, start, free_slots);
1680 req = RING_GET_REQUEST(&xnb_unit_pvt.rxf,
1681 xnb_unit_pvt.txf.req_prod_pvt);
1684 nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab,
1685 &xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1687 XNB_ASSERT(nr_entries == 1);
1688 XNB_ASSERT(xnb_unit_pvt.gnttab[0].len == size);
1689 /* flags should indicate gref's for dest */
1690 XNB_ASSERT(xnb_unit_pvt.gnttab[0].flags & GNTCOPY_dest_gref);
1691 XNB_ASSERT(xnb_unit_pvt.gnttab[0].dest.offset == 0);
1692 XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.domid == DOMID_SELF);
1693 XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.offset == virt_to_offset(
1694 mtod(mbuf, vm_offset_t)));
1695 XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.u.gmfn ==
1696 virt_to_mfn(mtod(mbuf, vm_offset_t)));
1697 XNB_ASSERT(xnb_unit_pvt.gnttab[0].dest.domid == DOMID_FIRST_RESERVED);
1699 safe_m_freem(&mbuf);
1703 * xnb_rxpkt2gnttab on a packet with two different mbufs in a single chai
1706 xnb_rxpkt2gnttab_2req(char *buffer, size_t buflen)
1711 size_t total_granted_size = 0;
1712 size_t size = MJUMPAGESIZE + 1;
1713 int free_slots = 60;
1714 RING_IDX start = 11;
1715 struct netif_rx_request *req;
1716 struct mbuf *mbuf, *m;
1718 mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
1719 mbuf->m_flags |= M_PKTHDR;
1720 mbuf->m_pkthdr.len = size;
1723 xnb_mbufc2pkt(mbuf, &pkt, start, free_slots);
1725 for (i = 0, m=mbuf; m != NULL; i++, m = m->m_next) {
1726 req = RING_GET_REQUEST(&xnb_unit_pvt.rxf,
1727 xnb_unit_pvt.txf.req_prod_pvt);
1733 nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab,
1734 &xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1736 XNB_ASSERT(nr_entries >= num_mbufs);
1737 for (i = 0; i < nr_entries; i++) {
1738 int end_offset = xnb_unit_pvt.gnttab[i].len +
1739 xnb_unit_pvt.gnttab[i].dest.offset;
1740 XNB_ASSERT(end_offset <= PAGE_SIZE);
1741 total_granted_size += xnb_unit_pvt.gnttab[i].len;
1743 XNB_ASSERT(total_granted_size == size);
1747 * xnb_rxpkt2rsp on an empty packet. Shouldn't make any response
1750 xnb_rxpkt2rsp_empty(char *buffer, size_t buflen)
1755 int free_slots = 60;
1756 netif_rx_back_ring_t rxb_backup = xnb_unit_pvt.rxb;
1757 netif_rx_sring_t rxs_backup = *xnb_unit_pvt.rxs;
1760 mbuf = m_get(M_WAITOK, MT_DATA);
1762 xnb_mbufc2pkt(mbuf, &pkt, 0, free_slots);
1763 nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab,
1764 &xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1766 nr_reqs = xnb_rxpkt2rsp(&pkt, xnb_unit_pvt.gnttab, nr_entries,
1768 XNB_ASSERT(nr_reqs == 0);
1770 memcmp(&rxb_backup, &xnb_unit_pvt.rxb, sizeof(rxb_backup)) == 0);
1772 memcmp(&rxs_backup, xnb_unit_pvt.rxs, sizeof(rxs_backup)) == 0);
1774 safe_m_freem(&mbuf);
1778 * xnb_rxpkt2rsp on a short packet with no extras
1781 xnb_rxpkt2rsp_short(char *buffer, size_t buflen)
1784 int nr_entries, nr_reqs;
1786 int free_slots = 60;
1788 struct netif_rx_request *req;
1789 struct netif_rx_response *rsp;
1792 mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
1793 mbuf->m_flags |= M_PKTHDR;
1794 mbuf->m_pkthdr.len = size;
1797 xnb_mbufc2pkt(mbuf, &pkt, start, free_slots);
1798 req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start);
1800 xnb_unit_pvt.rxb.req_cons = start;
1801 xnb_unit_pvt.rxb.rsp_prod_pvt = start;
1802 xnb_unit_pvt.rxs->req_prod = start + 1;
1803 xnb_unit_pvt.rxs->rsp_prod = start;
1805 nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab,
1806 &xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1808 nr_reqs = xnb_rxpkt2rsp(&pkt, xnb_unit_pvt.gnttab, nr_entries,
1811 XNB_ASSERT(nr_reqs == 1);
1812 XNB_ASSERT(xnb_unit_pvt.rxb.rsp_prod_pvt == start + 1);
1813 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start);
1814 XNB_ASSERT(rsp->id == req->id);
1815 XNB_ASSERT(rsp->offset == 0);
1816 XNB_ASSERT((rsp->flags & (NETRXF_more_data | NETRXF_extra_info)) == 0);
1817 XNB_ASSERT(rsp->status == size);
1819 safe_m_freem(&mbuf);
1823 * xnb_rxpkt2rsp with extra data
1826 xnb_rxpkt2rsp_extra(char *buffer, size_t buflen)
1829 int nr_entries, nr_reqs;
1831 int free_slots = 15;
1835 uint16_t mss = TCP_MSS - 40;
1837 struct netif_rx_request *req;
1838 struct netif_rx_response *rsp;
1839 struct netif_extra_info *ext;
1841 mbufc = m_getm(NULL, size, M_WAITOK, MT_DATA);
1842 XNB_ASSERT(mbufc != NULL);
1846 mbufc->m_flags |= M_PKTHDR;
1847 mbufc->m_pkthdr.len = size;
1848 mbufc->m_pkthdr.csum_flags |= CSUM_TSO;
1849 mbufc->m_pkthdr.tso_segsz = mss;
1850 mbufc->m_len = size;
1852 xnb_mbufc2pkt(mbufc, &pkt, start, free_slots);
1853 req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start);
1856 req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start + 1);
1858 req->gref = gref + 1;
1859 xnb_unit_pvt.rxb.req_cons = start;
1860 xnb_unit_pvt.rxb.rsp_prod_pvt = start;
1861 xnb_unit_pvt.rxs->req_prod = start + 2;
1862 xnb_unit_pvt.rxs->rsp_prod = start;
1864 nr_entries = xnb_rxpkt2gnttab(&pkt, mbufc, xnb_unit_pvt.gnttab,
1865 &xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1867 nr_reqs = xnb_rxpkt2rsp(&pkt, xnb_unit_pvt.gnttab, nr_entries,
1870 XNB_ASSERT(nr_reqs == 2);
1871 XNB_ASSERT(xnb_unit_pvt.rxb.rsp_prod_pvt == start + 2);
1872 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start);
1873 XNB_ASSERT(rsp->id == id);
1874 XNB_ASSERT((rsp->flags & NETRXF_more_data) == 0);
1875 XNB_ASSERT((rsp->flags & NETRXF_extra_info));
1876 XNB_ASSERT((rsp->flags & NETRXF_data_validated));
1877 XNB_ASSERT((rsp->flags & NETRXF_csum_blank));
1878 XNB_ASSERT(rsp->status == size);
1880 ext = (struct netif_extra_info*)
1881 RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start + 1);
1882 XNB_ASSERT(ext->type == XEN_NETIF_EXTRA_TYPE_GSO);
1883 XNB_ASSERT(! (ext->flags & XEN_NETIF_EXTRA_FLAG_MORE));
1884 XNB_ASSERT(ext->u.gso.size == mss);
1885 XNB_ASSERT(ext->u.gso.type == XEN_NETIF_EXTRA_TYPE_GSO);
1887 safe_m_freem(&mbufc);
1891 * xnb_rxpkt2rsp on a packet with more than a pages's worth of data. It should
1892 * generate two response slot
1895 xnb_rxpkt2rsp_2slots(char *buffer, size_t buflen)
1898 int nr_entries, nr_reqs;
1899 size_t size = PAGE_SIZE + 100;
1903 uint16_t gref1 = 24;
1904 uint16_t gref2 = 34;
1905 RING_IDX start = 15;
1906 struct netif_rx_request *req;
1907 struct netif_rx_response *rsp;
1910 mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
1911 mbuf->m_flags |= M_PKTHDR;
1912 mbuf->m_pkthdr.len = size;
1913 if (mbuf->m_next != NULL) {
1914 size_t first_len = MIN(M_TRAILINGSPACE(mbuf), size);
1915 mbuf->m_len = first_len;
1916 mbuf->m_next->m_len = size - first_len;
1922 xnb_mbufc2pkt(mbuf, &pkt, start, free_slots);
1923 req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start);
1926 req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start + 1);
1929 xnb_unit_pvt.rxb.req_cons = start;
1930 xnb_unit_pvt.rxb.rsp_prod_pvt = start;
1931 xnb_unit_pvt.rxs->req_prod = start + 2;
1932 xnb_unit_pvt.rxs->rsp_prod = start;
1934 nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab,
1935 &xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1937 nr_reqs = xnb_rxpkt2rsp(&pkt, xnb_unit_pvt.gnttab, nr_entries,
1940 XNB_ASSERT(nr_reqs == 2);
1941 XNB_ASSERT(xnb_unit_pvt.rxb.rsp_prod_pvt == start + 2);
1942 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start);
1943 XNB_ASSERT(rsp->id == id1);
1944 XNB_ASSERT(rsp->offset == 0);
1945 XNB_ASSERT((rsp->flags & NETRXF_extra_info) == 0);
1946 XNB_ASSERT(rsp->flags & NETRXF_more_data);
1947 XNB_ASSERT(rsp->status == PAGE_SIZE);
1949 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start + 1);
1950 XNB_ASSERT(rsp->id == id2);
1951 XNB_ASSERT(rsp->offset == 0);
1952 XNB_ASSERT((rsp->flags & NETRXF_extra_info) == 0);
1953 XNB_ASSERT(! (rsp->flags & NETRXF_more_data));
1954 XNB_ASSERT(rsp->status == size - PAGE_SIZE);
1956 safe_m_freem(&mbuf);
1959 /** xnb_rxpkt2rsp on a grant table with two sub-page entries */
1961 xnb_rxpkt2rsp_2short(char *buffer, size_t buflen) {
1963 int nr_reqs, nr_entries;
1964 size_t size1 = MHLEN - 5;
1965 size_t size2 = MHLEN - 15;
1966 int free_slots = 32;
1967 RING_IDX start = 14;
1970 struct netif_rx_request *req;
1971 struct netif_rx_response *rsp;
1974 mbufc = m_getm(NULL, size1, M_WAITOK, MT_DATA);
1975 XNB_ASSERT(mbufc != NULL);
1978 mbufc->m_flags |= M_PKTHDR;
1980 m_getm(mbufc, size2, M_WAITOK, MT_DATA);
1981 XNB_ASSERT(mbufc->m_next != NULL);
1982 mbufc->m_pkthdr.len = size1 + size2;
1983 mbufc->m_len = size1;
1984 mbufc->m_next->m_len = size2;
1986 xnb_mbufc2pkt(mbufc, &pkt, start, free_slots);
1988 req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start);
1991 xnb_unit_pvt.rxb.req_cons = start;
1992 xnb_unit_pvt.rxb.rsp_prod_pvt = start;
1993 xnb_unit_pvt.rxs->req_prod = start + 1;
1994 xnb_unit_pvt.rxs->rsp_prod = start;
1996 nr_entries = xnb_rxpkt2gnttab(&pkt, mbufc, xnb_unit_pvt.gnttab,
1997 &xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1999 nr_reqs = xnb_rxpkt2rsp(&pkt, xnb_unit_pvt.gnttab, nr_entries,
2002 XNB_ASSERT(nr_entries == 2);
2003 XNB_ASSERT(nr_reqs == 1);
2004 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start);
2005 XNB_ASSERT(rsp->id == id);
2006 XNB_ASSERT(rsp->status == size1 + size2);
2007 XNB_ASSERT(rsp->offset == 0);
2008 XNB_ASSERT(! (rsp->flags & (NETRXF_more_data | NETRXF_extra_info)));
2010 safe_m_freem(&mbufc);
2014 * xnb_rxpkt2rsp on a long packet with a hypervisor gnttab_copy error
2015 * Note: this test will result in an error message being printed to the console
2017 * xnb(xnb_rxpkt2rsp:1720): Got error -1 for hypervisor gnttab_copy status
2020 xnb_rxpkt2rsp_copyerror(char *buffer, size_t buflen)
2023 int nr_entries, nr_reqs;
2026 uint16_t canary = 6859;
2027 size_t size = 7 * MCLBYTES;
2030 struct netif_rx_request *req;
2031 struct netif_rx_response *rsp;
2034 mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
2035 mbuf->m_flags |= M_PKTHDR;
2036 mbuf->m_pkthdr.len = size;
2039 xnb_mbufc2pkt(mbuf, &pkt, start, free_slots);
2040 req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start);
2043 xnb_unit_pvt.rxb.req_cons = start;
2044 xnb_unit_pvt.rxb.rsp_prod_pvt = start;
2045 xnb_unit_pvt.rxs->req_prod = start + 1;
2046 xnb_unit_pvt.rxs->rsp_prod = start;
2047 req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start + 1);
2051 nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab,
2052 &xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
2053 /* Inject the error*/
2054 xnb_unit_pvt.gnttab[2].status = GNTST_general_error;
2056 nr_reqs = xnb_rxpkt2rsp(&pkt, xnb_unit_pvt.gnttab, nr_entries,
2059 XNB_ASSERT(nr_reqs == 1);
2060 XNB_ASSERT(xnb_unit_pvt.rxb.rsp_prod_pvt == start + 1);
2061 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start);
2062 XNB_ASSERT(rsp->id == id);
2063 XNB_ASSERT(rsp->status == NETIF_RSP_ERROR);
2064 req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start + 1);
2065 XNB_ASSERT(req->gref == canary);
2066 XNB_ASSERT(req->id == canary);
2068 safe_m_freem(&mbuf);
2071 #if defined(INET) || defined(INET6)
2073 * xnb_add_mbuf_cksum on an ARP request packet
2076 xnb_add_mbuf_cksum_arp(char *buffer, size_t buflen)
2078 const size_t pkt_len = sizeof(struct ether_header) +
2079 sizeof(struct ether_arp);
2081 struct ether_header *eh;
2082 struct ether_arp *ep;
2083 unsigned char pkt_orig[pkt_len];
2085 mbufc = m_getm(NULL, pkt_len, M_WAITOK, MT_DATA);
2086 /* Fill in an example arp request */
2087 eh = mtod(mbufc, struct ether_header*);
2088 eh->ether_dhost[0] = 0xff;
2089 eh->ether_dhost[1] = 0xff;
2090 eh->ether_dhost[2] = 0xff;
2091 eh->ether_dhost[3] = 0xff;
2092 eh->ether_dhost[4] = 0xff;
2093 eh->ether_dhost[5] = 0xff;
2094 eh->ether_shost[0] = 0x00;
2095 eh->ether_shost[1] = 0x15;
2096 eh->ether_shost[2] = 0x17;
2097 eh->ether_shost[3] = 0xe9;
2098 eh->ether_shost[4] = 0x30;
2099 eh->ether_shost[5] = 0x68;
2100 eh->ether_type = htons(ETHERTYPE_ARP);
2101 ep = (struct ether_arp*)(eh + 1);
2102 ep->ea_hdr.ar_hrd = htons(ARPHRD_ETHER);
2103 ep->ea_hdr.ar_pro = htons(ETHERTYPE_IP);
2104 ep->ea_hdr.ar_hln = 6;
2105 ep->ea_hdr.ar_pln = 4;
2106 ep->ea_hdr.ar_op = htons(ARPOP_REQUEST);
2107 ep->arp_sha[0] = 0x00;
2108 ep->arp_sha[1] = 0x15;
2109 ep->arp_sha[2] = 0x17;
2110 ep->arp_sha[3] = 0xe9;
2111 ep->arp_sha[4] = 0x30;
2112 ep->arp_sha[5] = 0x68;
2113 ep->arp_spa[0] = 0xc0;
2114 ep->arp_spa[1] = 0xa8;
2115 ep->arp_spa[2] = 0x0a;
2116 ep->arp_spa[3] = 0x04;
2117 bzero(&(ep->arp_tha), ETHER_ADDR_LEN);
2118 ep->arp_tpa[0] = 0xc0;
2119 ep->arp_tpa[1] = 0xa8;
2120 ep->arp_tpa[2] = 0x0a;
2121 ep->arp_tpa[3] = 0x06;
2123 /* fill in the length field */
2124 mbufc->m_len = pkt_len;
2125 mbufc->m_pkthdr.len = pkt_len;
2126 /* indicate that the netfront uses hw-assisted checksums */
2127 mbufc->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID |
2128 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2130 /* Make a backup copy of the packet */
2131 bcopy(mtod(mbufc, const void*), pkt_orig, pkt_len);
2133 /* Function under test */
2134 xnb_add_mbuf_cksum(mbufc);
2136 /* Verify that the packet's data did not change */
2137 XNB_ASSERT(bcmp(mtod(mbufc, const void*), pkt_orig, pkt_len) == 0);
2142 * Helper function that populates the ethernet header and IP header used by
2143 * some of the xnb_add_mbuf_cksum unit tests. m must already be allocated
2144 * and must be large enough
2147 xnb_fill_eh_and_ip(struct mbuf *m, uint16_t ip_len, uint16_t ip_id,
2148 uint16_t ip_p, uint16_t ip_off, uint16_t ip_sum)
2150 struct ether_header *eh;
2153 eh = mtod(m, struct ether_header*);
2154 eh->ether_dhost[0] = 0x00;
2155 eh->ether_dhost[1] = 0x16;
2156 eh->ether_dhost[2] = 0x3e;
2157 eh->ether_dhost[3] = 0x23;
2158 eh->ether_dhost[4] = 0x50;
2159 eh->ether_dhost[5] = 0x0b;
2160 eh->ether_shost[0] = 0x00;
2161 eh->ether_shost[1] = 0x16;
2162 eh->ether_shost[2] = 0x30;
2163 eh->ether_shost[3] = 0x00;
2164 eh->ether_shost[4] = 0x00;
2165 eh->ether_shost[5] = 0x00;
2166 eh->ether_type = htons(ETHERTYPE_IP);
2167 iph = (struct ip*)(eh + 1);
2168 iph->ip_hl = 0x5; /* 5 dwords == 20 bytes */
2169 iph->ip_v = 4; /* IP v4 */
2171 iph->ip_len = htons(ip_len);
2172 iph->ip_id = htons(ip_id);
2173 iph->ip_off = htons(ip_off);
2176 iph->ip_sum = htons(ip_sum);
2177 iph->ip_src.s_addr = htonl(0xc0a80a04);
2178 iph->ip_dst.s_addr = htonl(0xc0a80a05);
2182 * xnb_add_mbuf_cksum on an ICMP packet, based on a tcpdump of an actual
2186 xnb_add_mbuf_cksum_icmp(char *buffer, size_t buflen)
2188 const size_t icmp_len = 64; /* set by ping(1) */
2189 const size_t pkt_len = sizeof(struct ether_header) +
2190 sizeof(struct ip) + icmp_len;
2192 struct ether_header *eh;
2195 unsigned char pkt_orig[icmp_len];
2197 uint8_t *data_payload;
2199 const uint16_t ICMP_CSUM = 0xaed7;
2200 const uint16_t IP_CSUM = 0xe533;
2202 mbufc = m_getm(NULL, pkt_len, M_WAITOK, MT_DATA);
2203 /* Fill in an example ICMP ping request */
2204 eh = mtod(mbufc, struct ether_header*);
2205 xnb_fill_eh_and_ip(mbufc, 84, 28, IPPROTO_ICMP, 0, 0);
2206 iph = (struct ip*)(eh + 1);
2207 icmph = (struct icmp*)(iph + 1);
2208 icmph->icmp_type = ICMP_ECHO;
2209 icmph->icmp_code = 0;
2210 icmph->icmp_cksum = htons(ICMP_CSUM);
2211 icmph->icmp_id = htons(31492);
2212 icmph->icmp_seq = htons(0);
2214 * ping(1) uses bcopy to insert a native-endian timeval after icmp_seq.
2215 * For this test, we will set the bytes individually for portability.
2217 tv_field = (uint32_t*)(&(icmph->icmp_hun));
2218 tv_field[0] = 0x4f02cfac;
2219 tv_field[1] = 0x0007c46a;
2221 * Remainder of packet is an incrmenting 8 bit integer, starting with 8
2223 data_payload = (uint8_t*)(&tv_field[2]);
2224 for (i = 8; i < 37; i++) {
2225 *data_payload++ = i;
2228 /* fill in the length field */
2229 mbufc->m_len = pkt_len;
2230 mbufc->m_pkthdr.len = pkt_len;
2231 /* indicate that the netfront uses hw-assisted checksums */
2232 mbufc->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID |
2233 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2235 bcopy(mtod(mbufc, const void*), pkt_orig, icmp_len);
2236 /* Function under test */
2237 xnb_add_mbuf_cksum(mbufc);
2239 /* Check the IP checksum */
2240 XNB_ASSERT(iph->ip_sum == htons(IP_CSUM));
2242 /* Check that the ICMP packet did not change */
2243 XNB_ASSERT(bcmp(icmph, pkt_orig, icmp_len));
2248 * xnb_add_mbuf_cksum on a UDP packet, based on a tcpdump of an actual
2252 xnb_add_mbuf_cksum_udp(char *buffer, size_t buflen)
2254 const size_t udp_len = 16;
2255 const size_t pkt_len = sizeof(struct ether_header) +
2256 sizeof(struct ip) + udp_len;
2258 struct ether_header *eh;
2261 uint8_t *data_payload;
2262 const uint16_t IP_CSUM = 0xe56b;
2263 const uint16_t UDP_CSUM = 0xdde2;
2265 mbufc = m_getm(NULL, pkt_len, M_WAITOK, MT_DATA);
2266 /* Fill in an example UDP packet made by 'uname | nc -u <host> 2222 */
2267 eh = mtod(mbufc, struct ether_header*);
2268 xnb_fill_eh_and_ip(mbufc, 36, 4, IPPROTO_UDP, 0, 0xbaad);
2269 iph = (struct ip*)(eh + 1);
2270 udp = (struct udphdr*)(iph + 1);
2271 udp->uh_sport = htons(0x51ae);
2272 udp->uh_dport = htons(0x08ae);
2273 udp->uh_ulen = htons(udp_len);
2274 udp->uh_sum = htons(0xbaad); /* xnb_add_mbuf_cksum will fill this in */
2275 data_payload = (uint8_t*)(udp + 1);
2276 data_payload[0] = 'F';
2277 data_payload[1] = 'r';
2278 data_payload[2] = 'e';
2279 data_payload[3] = 'e';
2280 data_payload[4] = 'B';
2281 data_payload[5] = 'S';
2282 data_payload[6] = 'D';
2283 data_payload[7] = '\n';
2285 /* fill in the length field */
2286 mbufc->m_len = pkt_len;
2287 mbufc->m_pkthdr.len = pkt_len;
2288 /* indicate that the netfront uses hw-assisted checksums */
2289 mbufc->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID |
2290 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2292 /* Function under test */
2293 xnb_add_mbuf_cksum(mbufc);
2295 /* Check the checksums */
2296 XNB_ASSERT(iph->ip_sum == htons(IP_CSUM));
2297 XNB_ASSERT(udp->uh_sum == htons(UDP_CSUM));
2303 * Helper function that populates a TCP packet used by all of the
2304 * xnb_add_mbuf_cksum tcp unit tests. m must already be allocated and must be
2308 xnb_fill_tcp(struct mbuf *m)
2310 struct ether_header *eh;
2314 uint8_t *data_payload;
2316 /* Fill in an example TCP packet made by 'uname | nc <host> 2222' */
2317 eh = mtod(m, struct ether_header*);
2318 xnb_fill_eh_and_ip(m, 60, 8, IPPROTO_TCP, IP_DF, 0);
2319 iph = (struct ip*)(eh + 1);
2320 tcp = (struct tcphdr*)(iph + 1);
2321 tcp->th_sport = htons(0x9cd9);
2322 tcp->th_dport = htons(2222);
2323 tcp->th_seq = htonl(0x00f72b10);
2324 tcp->th_ack = htonl(0x7f37ba6c);
2327 tcp->th_flags = 0x18;
2328 tcp->th_win = htons(0x410);
2329 /* th_sum is incorrect; will be inserted by function under test */
2330 tcp->th_sum = htons(0xbaad);
2331 tcp->th_urp = htons(0);
2333 * The following 12 bytes of options encode:
2334 * [nop, nop, TS val 33247 ecr 3457687679]
2336 options = (uint32_t*)(tcp + 1);
2337 options[0] = htonl(0x0101080a);
2338 options[1] = htonl(0x000081df);
2339 options[2] = htonl(0xce18207f);
2340 data_payload = (uint8_t*)(&options[3]);
2341 data_payload[0] = 'F';
2342 data_payload[1] = 'r';
2343 data_payload[2] = 'e';
2344 data_payload[3] = 'e';
2345 data_payload[4] = 'B';
2346 data_payload[5] = 'S';
2347 data_payload[6] = 'D';
2348 data_payload[7] = '\n';
2352 * xnb_add_mbuf_cksum on a TCP packet, based on a tcpdump of an actual TCP
2356 xnb_add_mbuf_cksum_tcp(char *buffer, size_t buflen)
2358 const size_t payload_len = 8;
2359 const size_t tcp_options_len = 12;
2360 const size_t pkt_len = sizeof(struct ether_header) + sizeof(struct ip) +
2361 sizeof(struct tcphdr) + tcp_options_len + payload_len;
2363 struct ether_header *eh;
2366 const uint16_t IP_CSUM = 0xa55a;
2367 const uint16_t TCP_CSUM = 0x2f64;
2369 mbufc = m_getm(NULL, pkt_len, M_WAITOK, MT_DATA);
2370 /* Fill in an example TCP packet made by 'uname | nc <host> 2222' */
2371 xnb_fill_tcp(mbufc);
2372 eh = mtod(mbufc, struct ether_header*);
2373 iph = (struct ip*)(eh + 1);
2374 tcp = (struct tcphdr*)(iph + 1);
2376 /* fill in the length field */
2377 mbufc->m_len = pkt_len;
2378 mbufc->m_pkthdr.len = pkt_len;
2379 /* indicate that the netfront uses hw-assisted checksums */
2380 mbufc->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID |
2381 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2383 /* Function under test */
2384 xnb_add_mbuf_cksum(mbufc);
2386 /* Check the checksums */
2387 XNB_ASSERT(iph->ip_sum == htons(IP_CSUM));
2388 XNB_ASSERT(tcp->th_sum == htons(TCP_CSUM));
2394 * xnb_add_mbuf_cksum on a TCP packet that does not use HW assisted checksums
2397 xnb_add_mbuf_cksum_tcp_swcksum(char *buffer, size_t buflen)
2399 const size_t payload_len = 8;
2400 const size_t tcp_options_len = 12;
2401 const size_t pkt_len = sizeof(struct ether_header) + sizeof(struct ip) +
2402 sizeof(struct tcphdr) + tcp_options_len + payload_len;
2404 struct ether_header *eh;
2407 /* Use deliberately bad checksums, and verify that they don't get */
2408 /* corrected by xnb_add_mbuf_cksum */
2409 const uint16_t IP_CSUM = 0xdead;
2410 const uint16_t TCP_CSUM = 0xbeef;
2412 mbufc = m_getm(NULL, pkt_len, M_WAITOK, MT_DATA);
2413 /* Fill in an example TCP packet made by 'uname | nc <host> 2222' */
2414 xnb_fill_tcp(mbufc);
2415 eh = mtod(mbufc, struct ether_header*);
2416 iph = (struct ip*)(eh + 1);
2417 iph->ip_sum = htons(IP_CSUM);
2418 tcp = (struct tcphdr*)(iph + 1);
2419 tcp->th_sum = htons(TCP_CSUM);
2421 /* fill in the length field */
2422 mbufc->m_len = pkt_len;
2423 mbufc->m_pkthdr.len = pkt_len;
2424 /* indicate that the netfront does not use hw-assisted checksums */
2425 mbufc->m_pkthdr.csum_flags = 0;
2427 /* Function under test */
2428 xnb_add_mbuf_cksum(mbufc);
2430 /* Check that the checksums didn't change */
2431 XNB_ASSERT(iph->ip_sum == htons(IP_CSUM));
2432 XNB_ASSERT(tcp->th_sum == htons(TCP_CSUM));
2436 #endif /* INET || INET6 */
2439 * sscanf on unsigned chars
2442 xnb_sscanf_hhu(char *buffer, size_t buflen)
2444 const char mystr[] = "137";
2448 for (i = 0; i < 12; i++)
2451 XNB_ASSERT(sscanf(mystr, "%hhu", &dest[4]) == 1);
2452 for (i = 0; i < 12; i++)
2453 XNB_ASSERT(dest[i] == (i == 4 ? 137 : 'X'));
2457 * sscanf on signed chars
2460 xnb_sscanf_hhd(char *buffer, size_t buflen)
2462 const char mystr[] = "-27";
2466 for (i = 0; i < 12; i++)
2469 XNB_ASSERT(sscanf(mystr, "%hhd", &dest[4]) == 1);
2470 for (i = 0; i < 12; i++)
2471 XNB_ASSERT(dest[i] == (i == 4 ? -27 : 'X'));
2475 * sscanf on signed long longs
2478 xnb_sscanf_lld(char *buffer, size_t buflen)
2480 const char mystr[] = "-123456789012345"; /* about -2**47 */
2484 for (i = 0; i < 3; i++)
2485 dest[i] = (long long)0xdeadbeefdeadbeef;
2487 XNB_ASSERT(sscanf(mystr, "%lld", &dest[1]) == 1);
2488 for (i = 0; i < 3; i++)
2489 XNB_ASSERT(dest[i] == (i != 1 ? (long long)0xdeadbeefdeadbeef :
2494 * sscanf on unsigned long longs
2497 xnb_sscanf_llu(char *buffer, size_t buflen)
2499 const char mystr[] = "12802747070103273189";
2500 unsigned long long dest[3];
2503 for (i = 0; i < 3; i++)
2504 dest[i] = (long long)0xdeadbeefdeadbeef;
2506 XNB_ASSERT(sscanf(mystr, "%llu", &dest[1]) == 1);
2507 for (i = 0; i < 3; i++)
2508 XNB_ASSERT(dest[i] == (i != 1 ? (long long)0xdeadbeefdeadbeef :
2509 12802747070103273189ull));
2513 * sscanf on unsigned short short n's
2516 xnb_sscanf_hhn(char *buffer, size_t buflen)
2518 const char mystr[] =
2519 "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f"
2520 "202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f"
2521 "404142434445464748494a4b4c4d4e4f505152535455565758595a5b5c5d5e5f";
2522 unsigned char dest[12];
2525 for (i = 0; i < 12; i++)
2526 dest[i] = (unsigned char)'X';
2528 XNB_ASSERT(sscanf(mystr,
2529 "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f"
2530 "202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f"
2531 "404142434445464748494a4b4c4d4e4f%hhn", &dest[4]) == 0);
2532 for (i = 0; i < 12; i++)
2533 XNB_ASSERT(dest[i] == (i == 4 ? 160 : 'X'));