2 * Copyright (c) 2009-2011 Spectra Logic Corporation
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions, and the following disclaimer,
10 * without modification.
11 * 2. Redistributions in binary form must reproduce at minimum a disclaimer
12 * substantially similar to the "NO WARRANTY" disclaimer below
13 * ("Disclaimer") and any redistribution must be conditioned upon
14 * including a substantially similar Disclaimer requirement for further
15 * binary redistribution.
18 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
19 * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
20 * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTIBILITY AND FITNESS FOR
21 * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
22 * HOLDERS OR CONTRIBUTORS BE LIABLE FOR SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT,
26 * STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING
27 * IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
28 * POSSIBILITY OF SUCH DAMAGES.
30 * Authors: Justin T. Gibbs (Spectra Logic Corporation)
31 * Alan Somers (Spectra Logic Corporation)
32 * John Suykerbuyk (Spectra Logic Corporation)
35 #include <sys/cdefs.h>
36 __FBSDID("$FreeBSD$");
39 * \file netback_unit_tests.c
41 * \brief Unit tests for the Xen netback driver.
43 * Due to the driver's use of static functions, these tests cannot be compiled
44 * standalone; they must be #include'd from the driver's .c file.
48 /** Helper macro used to snprintf to a buffer and update the buffer pointer */
49 #define SNCATF(buffer, buflen, ...) do { \
50 size_t new_chars = snprintf(buffer, buflen, __VA_ARGS__); \
51 buffer += new_chars; \
52 /* be careful; snprintf's return value can be > buflen */ \
53 buflen -= MIN(buflen, new_chars); \
56 /* STRINGIFY and TOSTRING are used only to help turn __LINE__ into a string */
57 #define STRINGIFY(x) #x
58 #define TOSTRING(x) STRINGIFY(x)
61 * Writes an error message to buffer if cond is false
62 * Note the implied parameters buffer and
65 #define XNB_ASSERT(cond) ({ \
66 int passed = (cond); \
67 char *_buffer = (buffer); \
68 size_t _buflen = (buflen); \
70 strlcat(_buffer, __func__, _buflen); \
71 strlcat(_buffer, ":" TOSTRING(__LINE__) \
72 " Assertion Error: " #cond "\n", _buflen); \
78 * The signature used by all testcases. If the test writes anything
79 * to buffer, then it will be considered a failure
80 * \param buffer Return storage for error messages
81 * \param buflen The space available in the buffer
83 typedef void testcase_t(char *buffer, size_t buflen);
86 * Signature used by setup functions
87 * \return nonzero on error
89 typedef int setup_t(void);
91 typedef void teardown_t(void);
93 /** A simple test fixture comprising setup, teardown, and test */
95 /** Will be run before the test to allocate and initialize variables */
98 /** Will be run if setup succeeds */
101 /** Cleans up test data whether or not the setup suceeded*/
102 teardown_t *teardown;
105 typedef struct test_fixture test_fixture_t;
107 static int xnb_get1pkt(struct xnb_pkt *pkt, size_t size, uint16_t flags);
108 static int xnb_unit_test_runner(test_fixture_t const tests[], int ntests,
109 char *buffer, size_t buflen);
112 null_setup(void) { return 0; }
115 null_teardown(void) { }
117 static setup_t setup_pvt_data;
118 static teardown_t teardown_pvt_data;
119 static testcase_t xnb_ring2pkt_emptyring;
120 static testcase_t xnb_ring2pkt_1req;
121 static testcase_t xnb_ring2pkt_2req;
122 static testcase_t xnb_ring2pkt_3req;
123 static testcase_t xnb_ring2pkt_extra;
124 static testcase_t xnb_ring2pkt_partial;
125 static testcase_t xnb_ring2pkt_wraps;
126 static testcase_t xnb_txpkt2rsp_emptypkt;
127 static testcase_t xnb_txpkt2rsp_1req;
128 static testcase_t xnb_txpkt2rsp_extra;
129 static testcase_t xnb_txpkt2rsp_long;
130 static testcase_t xnb_txpkt2rsp_invalid;
131 static testcase_t xnb_txpkt2rsp_error;
132 static testcase_t xnb_txpkt2rsp_wraps;
133 static testcase_t xnb_pkt2mbufc_empty;
134 static testcase_t xnb_pkt2mbufc_short;
135 static testcase_t xnb_pkt2mbufc_csum;
136 static testcase_t xnb_pkt2mbufc_1cluster;
137 static testcase_t xnb_pkt2mbufc_largecluster;
138 static testcase_t xnb_pkt2mbufc_2cluster;
139 static testcase_t xnb_txpkt2gnttab_empty;
140 static testcase_t xnb_txpkt2gnttab_short;
141 static testcase_t xnb_txpkt2gnttab_2req;
142 static testcase_t xnb_txpkt2gnttab_2cluster;
143 static testcase_t xnb_update_mbufc_short;
144 static testcase_t xnb_update_mbufc_2req;
145 static testcase_t xnb_update_mbufc_2cluster;
146 static testcase_t xnb_mbufc2pkt_empty;
147 static testcase_t xnb_mbufc2pkt_short;
148 static testcase_t xnb_mbufc2pkt_1cluster;
149 static testcase_t xnb_mbufc2pkt_2short;
150 static testcase_t xnb_mbufc2pkt_long;
151 static testcase_t xnb_mbufc2pkt_extra;
152 static testcase_t xnb_mbufc2pkt_nospace;
153 static testcase_t xnb_rxpkt2gnttab_empty;
154 static testcase_t xnb_rxpkt2gnttab_short;
155 static testcase_t xnb_rxpkt2gnttab_2req;
156 static testcase_t xnb_rxpkt2rsp_empty;
157 static testcase_t xnb_rxpkt2rsp_short;
158 static testcase_t xnb_rxpkt2rsp_extra;
159 static testcase_t xnb_rxpkt2rsp_2short;
160 static testcase_t xnb_rxpkt2rsp_2slots;
161 static testcase_t xnb_rxpkt2rsp_copyerror;
162 static testcase_t xnb_sscanf_llu;
163 static testcase_t xnb_sscanf_lld;
164 static testcase_t xnb_sscanf_hhu;
165 static testcase_t xnb_sscanf_hhd;
166 static testcase_t xnb_sscanf_hhn;
168 #if defined(INET) || defined(INET6)
169 /* TODO: add test cases for xnb_add_mbuf_cksum for IPV6 tcp and udp */
170 static testcase_t xnb_add_mbuf_cksum_arp;
171 static testcase_t xnb_add_mbuf_cksum_tcp;
172 static testcase_t xnb_add_mbuf_cksum_udp;
173 static testcase_t xnb_add_mbuf_cksum_icmp;
174 static testcase_t xnb_add_mbuf_cksum_tcp_swcksum;
175 static void xnb_fill_eh_and_ip(struct mbuf *m, uint16_t ip_len,
176 uint16_t ip_id, uint16_t ip_p,
177 uint16_t ip_off, uint16_t ip_sum);
178 static void xnb_fill_tcp(struct mbuf *m);
179 #endif /* INET || INET6 */
181 /** Private data used by unit tests */
183 gnttab_copy_table gnttab;
184 netif_rx_back_ring_t rxb;
185 netif_rx_front_ring_t rxf;
186 netif_tx_back_ring_t txb;
187 netif_tx_front_ring_t txf;
189 netif_rx_sring_t* rxs;
190 netif_tx_sring_t* txs;
193 static inline void safe_m_freem(struct mbuf **ppMbuf) {
194 if (*ppMbuf != NULL) {
201 * The unit test runner. It will run every supplied test and return an
202 * output message as a string
203 * \param tests An array of tests. Every test will be attempted.
204 * \param ntests The length of tests
205 * \param buffer Return storage for the result string
206 * \param buflen The length of buffer
207 * \return The number of tests that failed
210 xnb_unit_test_runner(test_fixture_t const tests[], int ntests, char *buffer,
217 for (i = 0; i < ntests; i++) {
218 int error = tests[i].setup();
220 SNCATF(buffer, buflen,
221 "Setup failed for test idx %d\n", i);
226 tests[i].test(buffer, buflen);
227 new_chars = strnlen(buffer, buflen);
238 n_passes = ntests - n_failures;
240 SNCATF(buffer, buflen, "%d Tests Passed\n", n_passes);
242 if (n_failures > 0) {
243 SNCATF(buffer, buflen, "%d Tests FAILED\n", n_failures);
249 /** Number of unit tests. Must match the length of the tests array below */
250 #define TOTAL_TESTS (53)
252 * Max memory available for returning results. 400 chars/test should give
253 * enough space for a five line error message for every test
255 #define TOTAL_BUFLEN (400 * TOTAL_TESTS + 2)
258 * Called from userspace by a sysctl. Runs all internal unit tests, and
259 * returns the results to userspace as a string
261 * \param arg1 pointer to an xnb_softc for a specific xnb device
263 * \param req sysctl access structure
264 * \return a string via the special SYSCTL_OUT macro.
268 xnb_unit_test_main(SYSCTL_HANDLER_ARGS) {
269 test_fixture_t const tests[TOTAL_TESTS] = {
270 {setup_pvt_data, xnb_ring2pkt_emptyring, teardown_pvt_data},
271 {setup_pvt_data, xnb_ring2pkt_1req, teardown_pvt_data},
272 {setup_pvt_data, xnb_ring2pkt_2req, teardown_pvt_data},
273 {setup_pvt_data, xnb_ring2pkt_3req, teardown_pvt_data},
274 {setup_pvt_data, xnb_ring2pkt_extra, teardown_pvt_data},
275 {setup_pvt_data, xnb_ring2pkt_partial, teardown_pvt_data},
276 {setup_pvt_data, xnb_ring2pkt_wraps, teardown_pvt_data},
277 {setup_pvt_data, xnb_txpkt2rsp_emptypkt, teardown_pvt_data},
278 {setup_pvt_data, xnb_txpkt2rsp_1req, teardown_pvt_data},
279 {setup_pvt_data, xnb_txpkt2rsp_extra, teardown_pvt_data},
280 {setup_pvt_data, xnb_txpkt2rsp_long, teardown_pvt_data},
281 {setup_pvt_data, xnb_txpkt2rsp_invalid, teardown_pvt_data},
282 {setup_pvt_data, xnb_txpkt2rsp_error, teardown_pvt_data},
283 {setup_pvt_data, xnb_txpkt2rsp_wraps, teardown_pvt_data},
284 {setup_pvt_data, xnb_pkt2mbufc_empty, teardown_pvt_data},
285 {setup_pvt_data, xnb_pkt2mbufc_short, teardown_pvt_data},
286 {setup_pvt_data, xnb_pkt2mbufc_csum, teardown_pvt_data},
287 {setup_pvt_data, xnb_pkt2mbufc_1cluster, teardown_pvt_data},
288 {setup_pvt_data, xnb_pkt2mbufc_largecluster, teardown_pvt_data},
289 {setup_pvt_data, xnb_pkt2mbufc_2cluster, teardown_pvt_data},
290 {setup_pvt_data, xnb_txpkt2gnttab_empty, teardown_pvt_data},
291 {setup_pvt_data, xnb_txpkt2gnttab_short, teardown_pvt_data},
292 {setup_pvt_data, xnb_txpkt2gnttab_2req, teardown_pvt_data},
293 {setup_pvt_data, xnb_txpkt2gnttab_2cluster, teardown_pvt_data},
294 {setup_pvt_data, xnb_update_mbufc_short, teardown_pvt_data},
295 {setup_pvt_data, xnb_update_mbufc_2req, teardown_pvt_data},
296 {setup_pvt_data, xnb_update_mbufc_2cluster, teardown_pvt_data},
297 {setup_pvt_data, xnb_mbufc2pkt_empty, teardown_pvt_data},
298 {setup_pvt_data, xnb_mbufc2pkt_short, teardown_pvt_data},
299 {setup_pvt_data, xnb_mbufc2pkt_1cluster, teardown_pvt_data},
300 {setup_pvt_data, xnb_mbufc2pkt_2short, teardown_pvt_data},
301 {setup_pvt_data, xnb_mbufc2pkt_long, teardown_pvt_data},
302 {setup_pvt_data, xnb_mbufc2pkt_extra, teardown_pvt_data},
303 {setup_pvt_data, xnb_mbufc2pkt_nospace, teardown_pvt_data},
304 {setup_pvt_data, xnb_rxpkt2gnttab_empty, teardown_pvt_data},
305 {setup_pvt_data, xnb_rxpkt2gnttab_short, teardown_pvt_data},
306 {setup_pvt_data, xnb_rxpkt2gnttab_2req, teardown_pvt_data},
307 {setup_pvt_data, xnb_rxpkt2rsp_empty, teardown_pvt_data},
308 {setup_pvt_data, xnb_rxpkt2rsp_short, teardown_pvt_data},
309 {setup_pvt_data, xnb_rxpkt2rsp_extra, teardown_pvt_data},
310 {setup_pvt_data, xnb_rxpkt2rsp_2short, teardown_pvt_data},
311 {setup_pvt_data, xnb_rxpkt2rsp_2slots, teardown_pvt_data},
312 {setup_pvt_data, xnb_rxpkt2rsp_copyerror, teardown_pvt_data},
313 #if defined(INET) || defined(INET6)
314 {null_setup, xnb_add_mbuf_cksum_arp, null_teardown},
315 {null_setup, xnb_add_mbuf_cksum_icmp, null_teardown},
316 {null_setup, xnb_add_mbuf_cksum_tcp, null_teardown},
317 {null_setup, xnb_add_mbuf_cksum_tcp_swcksum, null_teardown},
318 {null_setup, xnb_add_mbuf_cksum_udp, null_teardown},
320 {null_setup, xnb_sscanf_hhd, null_teardown},
321 {null_setup, xnb_sscanf_hhu, null_teardown},
322 {null_setup, xnb_sscanf_lld, null_teardown},
323 {null_setup, xnb_sscanf_llu, null_teardown},
324 {null_setup, xnb_sscanf_hhn, null_teardown},
327 * results is static so that the data will persist after this function
328 * returns. The sysctl code expects us to return a constant string.
329 * \todo: the static variable is not thread safe. Put a mutex around
332 static char results[TOTAL_BUFLEN];
334 /* empty the result strings */
336 xnb_unit_test_runner(tests, TOTAL_TESTS, results, TOTAL_BUFLEN);
338 return (SYSCTL_OUT(req, results, strnlen(results, TOTAL_BUFLEN)));
346 bzero(xnb_unit_pvt.gnttab, sizeof(xnb_unit_pvt.gnttab));
348 xnb_unit_pvt.txs = malloc(PAGE_SIZE, M_XENNETBACK, M_WAITOK|M_ZERO);
349 if (xnb_unit_pvt.txs != NULL) {
350 SHARED_RING_INIT(xnb_unit_pvt.txs);
351 BACK_RING_INIT(&xnb_unit_pvt.txb, xnb_unit_pvt.txs, PAGE_SIZE);
352 FRONT_RING_INIT(&xnb_unit_pvt.txf, xnb_unit_pvt.txs, PAGE_SIZE);
357 xnb_unit_pvt.ifp = if_alloc(IFT_ETHER);
358 if (xnb_unit_pvt.ifp == NULL) {
362 xnb_unit_pvt.rxs = malloc(PAGE_SIZE, M_XENNETBACK, M_WAITOK|M_ZERO);
363 if (xnb_unit_pvt.rxs != NULL) {
364 SHARED_RING_INIT(xnb_unit_pvt.rxs);
365 BACK_RING_INIT(&xnb_unit_pvt.rxb, xnb_unit_pvt.rxs, PAGE_SIZE);
366 FRONT_RING_INIT(&xnb_unit_pvt.rxf, xnb_unit_pvt.rxs, PAGE_SIZE);
375 teardown_pvt_data(void)
377 if (xnb_unit_pvt.txs != NULL) {
378 free(xnb_unit_pvt.txs, M_XENNETBACK);
380 if (xnb_unit_pvt.rxs != NULL) {
381 free(xnb_unit_pvt.rxs, M_XENNETBACK);
383 if (xnb_unit_pvt.ifp != NULL) {
384 if_free(xnb_unit_pvt.ifp);
389 * Verify that xnb_ring2pkt will not consume any requests from an empty ring
392 xnb_ring2pkt_emptyring(char *buffer, size_t buflen)
397 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
398 xnb_unit_pvt.txb.req_cons);
399 XNB_ASSERT(num_consumed == 0);
403 * Verify that xnb_ring2pkt can convert a single request packet correctly
406 xnb_ring2pkt_1req(char *buffer, size_t buflen)
410 struct netif_tx_request *req;
412 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
413 xnb_unit_pvt.txf.req_prod_pvt);
416 req->size = 69; /* arbitrary number for test */
417 xnb_unit_pvt.txf.req_prod_pvt++;
419 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
421 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
422 xnb_unit_pvt.txb.req_cons);
423 XNB_ASSERT(num_consumed == 1);
424 XNB_ASSERT(pkt.size == 69);
425 XNB_ASSERT(pkt.car_size == 69);
426 XNB_ASSERT(pkt.flags == 0);
427 XNB_ASSERT(xnb_pkt_is_valid(&pkt));
428 XNB_ASSERT(pkt.list_len == 1);
429 XNB_ASSERT(pkt.car == 0);
433 * Verify that xnb_ring2pkt can convert a two request packet correctly.
434 * This tests handling of the MORE_DATA flag and cdr
437 xnb_ring2pkt_2req(char *buffer, size_t buflen)
441 struct netif_tx_request *req;
442 RING_IDX start_idx = xnb_unit_pvt.txf.req_prod_pvt;
444 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
445 xnb_unit_pvt.txf.req_prod_pvt);
446 req->flags = NETTXF_more_data;
448 xnb_unit_pvt.txf.req_prod_pvt++;
450 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
451 xnb_unit_pvt.txf.req_prod_pvt);
454 xnb_unit_pvt.txf.req_prod_pvt++;
456 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
458 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
459 xnb_unit_pvt.txb.req_cons);
460 XNB_ASSERT(num_consumed == 2);
461 XNB_ASSERT(pkt.size == 100);
462 XNB_ASSERT(pkt.car_size == 60);
463 XNB_ASSERT(pkt.flags == 0);
464 XNB_ASSERT(xnb_pkt_is_valid(&pkt));
465 XNB_ASSERT(pkt.list_len == 2);
466 XNB_ASSERT(pkt.car == start_idx);
467 XNB_ASSERT(pkt.cdr == start_idx + 1);
471 * Verify that xnb_ring2pkt can convert a three request packet correctly
474 xnb_ring2pkt_3req(char *buffer, size_t buflen)
478 struct netif_tx_request *req;
479 RING_IDX start_idx = xnb_unit_pvt.txf.req_prod_pvt;
481 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
482 xnb_unit_pvt.txf.req_prod_pvt);
483 req->flags = NETTXF_more_data;
485 xnb_unit_pvt.txf.req_prod_pvt++;
487 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
488 xnb_unit_pvt.txf.req_prod_pvt);
489 req->flags = NETTXF_more_data;
491 xnb_unit_pvt.txf.req_prod_pvt++;
493 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
494 xnb_unit_pvt.txf.req_prod_pvt);
497 xnb_unit_pvt.txf.req_prod_pvt++;
499 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
501 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
502 xnb_unit_pvt.txb.req_cons);
503 XNB_ASSERT(num_consumed == 3);
504 XNB_ASSERT(pkt.size == 200);
505 XNB_ASSERT(pkt.car_size == 110);
506 XNB_ASSERT(pkt.flags == 0);
507 XNB_ASSERT(xnb_pkt_is_valid(&pkt));
508 XNB_ASSERT(pkt.list_len == 3);
509 XNB_ASSERT(pkt.car == start_idx);
510 XNB_ASSERT(pkt.cdr == start_idx + 1);
511 XNB_ASSERT(RING_GET_REQUEST(&xnb_unit_pvt.txb, pkt.cdr + 1) == req);
515 * Verify that xnb_ring2pkt can read extra inf
518 xnb_ring2pkt_extra(char *buffer, size_t buflen)
522 struct netif_tx_request *req;
523 struct netif_extra_info *ext;
524 RING_IDX start_idx = xnb_unit_pvt.txf.req_prod_pvt;
526 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
527 xnb_unit_pvt.txf.req_prod_pvt);
528 req->flags = NETTXF_extra_info | NETTXF_more_data;
530 xnb_unit_pvt.txf.req_prod_pvt++;
532 ext = (struct netif_extra_info*) RING_GET_REQUEST(&xnb_unit_pvt.txf,
533 xnb_unit_pvt.txf.req_prod_pvt);
535 ext->type = XEN_NETIF_EXTRA_TYPE_GSO;
536 ext->u.gso.size = 250;
537 ext->u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4;
538 ext->u.gso.features = 0;
539 xnb_unit_pvt.txf.req_prod_pvt++;
541 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
542 xnb_unit_pvt.txf.req_prod_pvt);
545 xnb_unit_pvt.txf.req_prod_pvt++;
547 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
549 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
550 xnb_unit_pvt.txb.req_cons);
551 XNB_ASSERT(num_consumed == 3);
552 XNB_ASSERT(pkt.extra.flags == 0);
553 XNB_ASSERT(pkt.extra.type == XEN_NETIF_EXTRA_TYPE_GSO);
554 XNB_ASSERT(pkt.extra.u.gso.size == 250);
555 XNB_ASSERT(pkt.extra.u.gso.type = XEN_NETIF_GSO_TYPE_TCPV4);
556 XNB_ASSERT(pkt.size == 150);
557 XNB_ASSERT(pkt.car_size == 100);
558 XNB_ASSERT(pkt.flags == NETTXF_extra_info);
559 XNB_ASSERT(xnb_pkt_is_valid(&pkt));
560 XNB_ASSERT(pkt.list_len == 2);
561 XNB_ASSERT(pkt.car == start_idx);
562 XNB_ASSERT(pkt.cdr == start_idx + 2);
563 XNB_ASSERT(RING_GET_REQUEST(&xnb_unit_pvt.txb, pkt.cdr) == req);
567 * Verify that xnb_ring2pkt will consume no requests if the entire packet is
568 * not yet in the ring
571 xnb_ring2pkt_partial(char *buffer, size_t buflen)
575 struct netif_tx_request *req;
577 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
578 xnb_unit_pvt.txf.req_prod_pvt);
579 req->flags = NETTXF_more_data;
581 xnb_unit_pvt.txf.req_prod_pvt++;
583 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
585 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
586 xnb_unit_pvt.txb.req_cons);
587 XNB_ASSERT(num_consumed == 0);
588 XNB_ASSERT(! xnb_pkt_is_valid(&pkt));
592 * Verity that xnb_ring2pkt can read a packet whose requests wrap around
593 * the end of the ring
596 xnb_ring2pkt_wraps(char *buffer, size_t buflen)
600 struct netif_tx_request *req;
604 * Manually tweak the ring indices to create a ring with no responses
605 * and the next request slot at position 2 from the end
607 rsize = RING_SIZE(&xnb_unit_pvt.txf);
608 xnb_unit_pvt.txf.req_prod_pvt = rsize - 2;
609 xnb_unit_pvt.txf.rsp_cons = rsize - 2;
610 xnb_unit_pvt.txs->req_prod = rsize - 2;
611 xnb_unit_pvt.txs->req_event = rsize - 1;
612 xnb_unit_pvt.txs->rsp_prod = rsize - 2;
613 xnb_unit_pvt.txs->rsp_event = rsize - 1;
614 xnb_unit_pvt.txb.rsp_prod_pvt = rsize - 2;
615 xnb_unit_pvt.txb.req_cons = rsize - 2;
617 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
618 xnb_unit_pvt.txf.req_prod_pvt);
619 req->flags = NETTXF_more_data;
621 xnb_unit_pvt.txf.req_prod_pvt++;
623 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
624 xnb_unit_pvt.txf.req_prod_pvt);
625 req->flags = NETTXF_more_data;
627 xnb_unit_pvt.txf.req_prod_pvt++;
629 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
630 xnb_unit_pvt.txf.req_prod_pvt);
633 xnb_unit_pvt.txf.req_prod_pvt++;
635 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
637 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
638 xnb_unit_pvt.txb.req_cons);
639 XNB_ASSERT(num_consumed == 3);
640 XNB_ASSERT(xnb_pkt_is_valid(&pkt));
641 XNB_ASSERT(pkt.list_len == 3);
642 XNB_ASSERT(RING_GET_REQUEST(&xnb_unit_pvt.txb, pkt.cdr + 1) == req);
647 * xnb_txpkt2rsp should do nothing for an empty packet
650 xnb_txpkt2rsp_emptypkt(char *buffer, size_t buflen)
654 netif_tx_back_ring_t txb_backup = xnb_unit_pvt.txb;
655 netif_tx_sring_t txs_backup = *xnb_unit_pvt.txs;
658 /* must call xnb_ring2pkt just to intialize pkt */
659 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
660 xnb_unit_pvt.txb.req_cons);
661 xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 0);
663 memcmp(&txb_backup, &xnb_unit_pvt.txb, sizeof(txb_backup)) == 0);
665 memcmp(&txs_backup, xnb_unit_pvt.txs, sizeof(txs_backup)) == 0);
669 * xnb_txpkt2rsp responding to one request
672 xnb_txpkt2rsp_1req(char *buffer, size_t buflen)
674 uint16_t num_consumed;
676 struct netif_tx_request *req;
677 struct netif_tx_response *rsp;
679 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
680 xnb_unit_pvt.txf.req_prod_pvt);
683 xnb_unit_pvt.txf.req_prod_pvt++;
685 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
687 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
688 xnb_unit_pvt.txb.req_cons);
689 xnb_unit_pvt.txb.req_cons += num_consumed;
691 xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 0);
692 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb, xnb_unit_pvt.txf.rsp_cons);
695 xnb_unit_pvt.txb.rsp_prod_pvt == xnb_unit_pvt.txs->req_prod);
696 XNB_ASSERT(rsp->id == req->id);
697 XNB_ASSERT(rsp->status == NETIF_RSP_OKAY);
701 * xnb_txpkt2rsp responding to 1 data request and 1 extra info
704 xnb_txpkt2rsp_extra(char *buffer, size_t buflen)
706 uint16_t num_consumed;
708 struct netif_tx_request *req;
709 netif_extra_info_t *ext;
710 struct netif_tx_response *rsp;
712 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
713 xnb_unit_pvt.txf.req_prod_pvt);
715 req->flags = NETTXF_extra_info;
717 xnb_unit_pvt.txf.req_prod_pvt++;
719 ext = (netif_extra_info_t*) RING_GET_REQUEST(&xnb_unit_pvt.txf,
720 xnb_unit_pvt.txf.req_prod_pvt);
721 ext->type = XEN_NETIF_EXTRA_TYPE_GSO;
723 xnb_unit_pvt.txf.req_prod_pvt++;
725 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
727 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
728 xnb_unit_pvt.txb.req_cons);
729 xnb_unit_pvt.txb.req_cons += num_consumed;
731 xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 0);
734 xnb_unit_pvt.txb.rsp_prod_pvt == xnb_unit_pvt.txs->req_prod);
736 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb, xnb_unit_pvt.txf.rsp_cons);
737 XNB_ASSERT(rsp->id == req->id);
738 XNB_ASSERT(rsp->status == NETIF_RSP_OKAY);
740 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb,
741 xnb_unit_pvt.txf.rsp_cons + 1);
742 XNB_ASSERT(rsp->status == NETIF_RSP_NULL);
746 * xnb_pkg2rsp responding to 3 data requests and 1 extra info
749 xnb_txpkt2rsp_long(char *buffer, size_t buflen)
751 uint16_t num_consumed;
753 struct netif_tx_request *req;
754 netif_extra_info_t *ext;
755 struct netif_tx_response *rsp;
757 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
758 xnb_unit_pvt.txf.req_prod_pvt);
760 req->flags = NETTXF_extra_info | NETTXF_more_data;
762 xnb_unit_pvt.txf.req_prod_pvt++;
764 ext = (netif_extra_info_t*) RING_GET_REQUEST(&xnb_unit_pvt.txf,
765 xnb_unit_pvt.txf.req_prod_pvt);
766 ext->type = XEN_NETIF_EXTRA_TYPE_GSO;
768 xnb_unit_pvt.txf.req_prod_pvt++;
770 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
771 xnb_unit_pvt.txf.req_prod_pvt);
773 req->flags = NETTXF_more_data;
775 xnb_unit_pvt.txf.req_prod_pvt++;
777 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
778 xnb_unit_pvt.txf.req_prod_pvt);
782 xnb_unit_pvt.txf.req_prod_pvt++;
784 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
786 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
787 xnb_unit_pvt.txb.req_cons);
788 xnb_unit_pvt.txb.req_cons += num_consumed;
790 xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 0);
793 xnb_unit_pvt.txb.rsp_prod_pvt == xnb_unit_pvt.txs->req_prod);
795 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb, xnb_unit_pvt.txf.rsp_cons);
796 XNB_ASSERT(rsp->id ==
797 RING_GET_REQUEST(&xnb_unit_pvt.txf, 0)->id);
798 XNB_ASSERT(rsp->status == NETIF_RSP_OKAY);
800 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb,
801 xnb_unit_pvt.txf.rsp_cons + 1);
802 XNB_ASSERT(rsp->status == NETIF_RSP_NULL);
804 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb,
805 xnb_unit_pvt.txf.rsp_cons + 2);
806 XNB_ASSERT(rsp->id ==
807 RING_GET_REQUEST(&xnb_unit_pvt.txf, 2)->id);
808 XNB_ASSERT(rsp->status == NETIF_RSP_OKAY);
810 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb,
811 xnb_unit_pvt.txf.rsp_cons + 3);
812 XNB_ASSERT(rsp->id ==
813 RING_GET_REQUEST(&xnb_unit_pvt.txf, 3)->id);
814 XNB_ASSERT(rsp->status == NETIF_RSP_OKAY);
818 * xnb_txpkt2rsp responding to an invalid packet.
819 * Note: this test will result in an error message being printed to the console
821 * xnb(xnb_ring2pkt:1306): Unknown extra info type 255. Discarding packet
824 xnb_txpkt2rsp_invalid(char *buffer, size_t buflen)
826 uint16_t num_consumed;
828 struct netif_tx_request *req;
829 netif_extra_info_t *ext;
830 struct netif_tx_response *rsp;
832 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
833 xnb_unit_pvt.txf.req_prod_pvt);
835 req->flags = NETTXF_extra_info;
837 xnb_unit_pvt.txf.req_prod_pvt++;
839 ext = (netif_extra_info_t*) RING_GET_REQUEST(&xnb_unit_pvt.txf,
840 xnb_unit_pvt.txf.req_prod_pvt);
841 ext->type = 0xFF; /* Invalid extra type */
843 xnb_unit_pvt.txf.req_prod_pvt++;
845 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
847 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
848 xnb_unit_pvt.txb.req_cons);
849 xnb_unit_pvt.txb.req_cons += num_consumed;
850 XNB_ASSERT(! xnb_pkt_is_valid(&pkt));
852 xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 0);
855 xnb_unit_pvt.txb.rsp_prod_pvt == xnb_unit_pvt.txs->req_prod);
857 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb, xnb_unit_pvt.txf.rsp_cons);
858 XNB_ASSERT(rsp->id == req->id);
859 XNB_ASSERT(rsp->status == NETIF_RSP_ERROR);
861 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb,
862 xnb_unit_pvt.txf.rsp_cons + 1);
863 XNB_ASSERT(rsp->status == NETIF_RSP_NULL);
867 * xnb_txpkt2rsp responding to one request which caused an error
870 xnb_txpkt2rsp_error(char *buffer, size_t buflen)
872 uint16_t num_consumed;
874 struct netif_tx_request *req;
875 struct netif_tx_response *rsp;
877 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
878 xnb_unit_pvt.txf.req_prod_pvt);
881 xnb_unit_pvt.txf.req_prod_pvt++;
883 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
885 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
886 xnb_unit_pvt.txb.req_cons);
887 xnb_unit_pvt.txb.req_cons += num_consumed;
889 xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 1);
890 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb, xnb_unit_pvt.txf.rsp_cons);
893 xnb_unit_pvt.txb.rsp_prod_pvt == xnb_unit_pvt.txs->req_prod);
894 XNB_ASSERT(rsp->id == req->id);
895 XNB_ASSERT(rsp->status == NETIF_RSP_ERROR);
899 * xnb_txpkt2rsp's responses wrap around the end of the ring
902 xnb_txpkt2rsp_wraps(char *buffer, size_t buflen)
906 struct netif_tx_request *req;
907 struct netif_tx_response *rsp;
911 * Manually tweak the ring indices to create a ring with no responses
912 * and the next request slot at position 2 from the end
914 rsize = RING_SIZE(&xnb_unit_pvt.txf);
915 xnb_unit_pvt.txf.req_prod_pvt = rsize - 2;
916 xnb_unit_pvt.txf.rsp_cons = rsize - 2;
917 xnb_unit_pvt.txs->req_prod = rsize - 2;
918 xnb_unit_pvt.txs->req_event = rsize - 1;
919 xnb_unit_pvt.txs->rsp_prod = rsize - 2;
920 xnb_unit_pvt.txs->rsp_event = rsize - 1;
921 xnb_unit_pvt.txb.rsp_prod_pvt = rsize - 2;
922 xnb_unit_pvt.txb.req_cons = rsize - 2;
924 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
925 xnb_unit_pvt.txf.req_prod_pvt);
926 req->flags = NETTXF_more_data;
929 xnb_unit_pvt.txf.req_prod_pvt++;
931 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
932 xnb_unit_pvt.txf.req_prod_pvt);
933 req->flags = NETTXF_more_data;
936 xnb_unit_pvt.txf.req_prod_pvt++;
938 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
939 xnb_unit_pvt.txf.req_prod_pvt);
943 xnb_unit_pvt.txf.req_prod_pvt++;
945 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
947 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
948 xnb_unit_pvt.txb.req_cons);
950 xnb_txpkt2rsp(&pkt, &xnb_unit_pvt.txb, 0);
953 xnb_unit_pvt.txb.rsp_prod_pvt == xnb_unit_pvt.txs->req_prod);
954 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.txb,
955 xnb_unit_pvt.txf.rsp_cons + 2);
956 XNB_ASSERT(rsp->id == req->id);
957 XNB_ASSERT(rsp->status == NETIF_RSP_OKAY);
962 * Helper function used to setup pkt2mbufc tests
963 * \param size size in bytes of the single request to push to the ring
964 * \param flags optional flags to put in the netif request
965 * \param[out] pkt the returned packet object
966 * \return number of requests consumed from the ring
969 xnb_get1pkt(struct xnb_pkt *pkt, size_t size, uint16_t flags)
971 struct netif_tx_request *req;
973 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
974 xnb_unit_pvt.txf.req_prod_pvt);
977 xnb_unit_pvt.txf.req_prod_pvt++;
979 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
981 return xnb_ring2pkt(pkt, &xnb_unit_pvt.txb,
982 xnb_unit_pvt.txb.req_cons);
986 * xnb_pkt2mbufc on an empty packet
989 xnb_pkt2mbufc_empty(char *buffer, size_t buflen)
996 /* must call xnb_ring2pkt just to intialize pkt */
997 num_consumed = xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb,
998 xnb_unit_pvt.txb.req_cons);
1000 pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1001 safe_m_freem(&pMbuf);
1005 * xnb_pkt2mbufc on short packet that can fit in an mbuf internal buffer
1008 xnb_pkt2mbufc_short(char *buffer, size_t buflen)
1010 const size_t size = MINCLSIZE - 1;
1014 xnb_get1pkt(&pkt, size, 0);
1016 pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1017 XNB_ASSERT(M_TRAILINGSPACE(pMbuf) >= size);
1018 safe_m_freem(&pMbuf);
1022 * xnb_pkt2mbufc on short packet whose checksum was validated by the netfron
1025 xnb_pkt2mbufc_csum(char *buffer, size_t buflen)
1027 const size_t size = MINCLSIZE - 1;
1031 xnb_get1pkt(&pkt, size, NETTXF_data_validated);
1033 pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1034 XNB_ASSERT(M_TRAILINGSPACE(pMbuf) >= size);
1035 XNB_ASSERT(pMbuf->m_pkthdr.csum_flags & CSUM_IP_CHECKED);
1036 XNB_ASSERT(pMbuf->m_pkthdr.csum_flags & CSUM_IP_VALID);
1037 XNB_ASSERT(pMbuf->m_pkthdr.csum_flags & CSUM_DATA_VALID);
1038 XNB_ASSERT(pMbuf->m_pkthdr.csum_flags & CSUM_PSEUDO_HDR);
1039 safe_m_freem(&pMbuf);
1043 * xnb_pkt2mbufc on packet that can fit in one cluster
1046 xnb_pkt2mbufc_1cluster(char *buffer, size_t buflen)
1048 const size_t size = MINCLSIZE;
1052 xnb_get1pkt(&pkt, size, 0);
1054 pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1055 XNB_ASSERT(M_TRAILINGSPACE(pMbuf) >= size);
1056 safe_m_freem(&pMbuf);
1060 * xnb_pkt2mbufc on packet that cannot fit in one regular cluster
1063 xnb_pkt2mbufc_largecluster(char *buffer, size_t buflen)
1065 const size_t size = MCLBYTES + 1;
1069 xnb_get1pkt(&pkt, size, 0);
1071 pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1072 XNB_ASSERT(M_TRAILINGSPACE(pMbuf) >= size);
1073 safe_m_freem(&pMbuf);
1077 * xnb_pkt2mbufc on packet that cannot fit in one clusters
1080 xnb_pkt2mbufc_2cluster(char *buffer, size_t buflen)
1082 const size_t size = 2 * MCLBYTES + 1;
1088 xnb_get1pkt(&pkt, size, 0);
1090 pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1092 for (m = pMbuf; m != NULL; m = m->m_next) {
1093 space += M_TRAILINGSPACE(m);
1095 XNB_ASSERT(space >= size);
1096 safe_m_freem(&pMbuf);
1100 * xnb_txpkt2gnttab on an empty packet. Should return empty gnttab
1103 xnb_txpkt2gnttab_empty(char *buffer, size_t buflen)
1110 /* must call xnb_ring2pkt just to intialize pkt */
1111 xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
1113 pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1114 n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab,
1115 &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED);
1116 XNB_ASSERT(n_entries == 0);
1117 safe_m_freem(&pMbuf);
1121 * xnb_txpkt2gnttab on a short packet, that can fit in one mbuf internal buffer
1122 * and has one request
1125 xnb_txpkt2gnttab_short(char *buffer, size_t buflen)
1127 const size_t size = MINCLSIZE - 1;
1132 struct netif_tx_request *req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1133 xnb_unit_pvt.txf.req_prod_pvt);
1138 xnb_unit_pvt.txf.req_prod_pvt++;
1140 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
1142 xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
1144 pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1145 n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab,
1146 &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED);
1147 XNB_ASSERT(n_entries == 1);
1148 XNB_ASSERT(xnb_unit_pvt.gnttab[0].len == size);
1149 /* flags should indicate gref's for source */
1150 XNB_ASSERT(xnb_unit_pvt.gnttab[0].flags & GNTCOPY_source_gref);
1151 XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.offset == req->offset);
1152 XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.domid == DOMID_SELF);
1153 XNB_ASSERT(xnb_unit_pvt.gnttab[0].dest.offset == virt_to_offset(
1154 mtod(pMbuf, vm_offset_t)));
1155 XNB_ASSERT(xnb_unit_pvt.gnttab[0].dest.u.gmfn ==
1156 virt_to_mfn(mtod(pMbuf, vm_offset_t)));
1157 XNB_ASSERT(xnb_unit_pvt.gnttab[0].dest.domid == DOMID_FIRST_RESERVED);
1158 safe_m_freem(&pMbuf);
1162 * xnb_txpkt2gnttab on a packet with two requests, that can fit into a single
1166 xnb_txpkt2gnttab_2req(char *buffer, size_t buflen)
1172 struct netif_tx_request *req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1173 xnb_unit_pvt.txf.req_prod_pvt);
1174 req->flags = NETTXF_more_data;
1178 xnb_unit_pvt.txf.req_prod_pvt++;
1180 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1181 xnb_unit_pvt.txf.req_prod_pvt);
1186 xnb_unit_pvt.txf.req_prod_pvt++;
1188 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
1190 xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
1192 pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1193 n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab,
1194 &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED);
1196 XNB_ASSERT(n_entries == 2);
1197 XNB_ASSERT(xnb_unit_pvt.gnttab[0].len == 1400);
1198 XNB_ASSERT(xnb_unit_pvt.gnttab[0].dest.offset == virt_to_offset(
1199 mtod(pMbuf, vm_offset_t)));
1201 XNB_ASSERT(xnb_unit_pvt.gnttab[1].len == 500);
1202 XNB_ASSERT(xnb_unit_pvt.gnttab[1].dest.offset == virt_to_offset(
1203 mtod(pMbuf, vm_offset_t) + 1400));
1204 safe_m_freem(&pMbuf);
1208 * xnb_txpkt2gnttab on a single request that spans two mbuf clusters
1211 xnb_txpkt2gnttab_2cluster(char *buffer, size_t buflen)
1216 const uint16_t data_this_transaction = (MCLBYTES*2) + 1;
1218 struct netif_tx_request *req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1219 xnb_unit_pvt.txf.req_prod_pvt);
1221 req->size = data_this_transaction;
1224 xnb_unit_pvt.txf.req_prod_pvt++;
1226 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
1227 xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
1229 pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1230 n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab,
1231 &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED);
1233 if (M_TRAILINGSPACE(pMbuf) == MCLBYTES) {
1234 /* there should be three mbufs and three gnttab entries */
1235 XNB_ASSERT(n_entries == 3);
1236 XNB_ASSERT(xnb_unit_pvt.gnttab[0].len == MCLBYTES);
1238 xnb_unit_pvt.gnttab[0].dest.offset == virt_to_offset(
1239 mtod(pMbuf, vm_offset_t)));
1240 XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.offset == 0);
1242 XNB_ASSERT(xnb_unit_pvt.gnttab[1].len == MCLBYTES);
1244 xnb_unit_pvt.gnttab[1].dest.offset == virt_to_offset(
1245 mtod(pMbuf->m_next, vm_offset_t)));
1246 XNB_ASSERT(xnb_unit_pvt.gnttab[1].source.offset == MCLBYTES);
1248 XNB_ASSERT(xnb_unit_pvt.gnttab[2].len == 1);
1250 xnb_unit_pvt.gnttab[2].dest.offset == virt_to_offset(
1251 mtod(pMbuf->m_next, vm_offset_t)));
1252 XNB_ASSERT(xnb_unit_pvt.gnttab[2].source.offset == 2 *
1254 } else if (M_TRAILINGSPACE(pMbuf) == 2 * MCLBYTES) {
1255 /* there should be two mbufs and two gnttab entries */
1256 XNB_ASSERT(n_entries == 2);
1257 XNB_ASSERT(xnb_unit_pvt.gnttab[0].len == 2 * MCLBYTES);
1259 xnb_unit_pvt.gnttab[0].dest.offset == virt_to_offset(
1260 mtod(pMbuf, vm_offset_t)));
1261 XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.offset == 0);
1263 XNB_ASSERT(xnb_unit_pvt.gnttab[1].len == 1);
1265 xnb_unit_pvt.gnttab[1].dest.offset == virt_to_offset(
1266 mtod(pMbuf->m_next, vm_offset_t)));
1268 xnb_unit_pvt.gnttab[1].source.offset == 2 * MCLBYTES);
1271 /* should never get here */
1280 * xnb_update_mbufc on a short packet that only has one gnttab entry
1283 xnb_update_mbufc_short(char *buffer, size_t buflen)
1285 const size_t size = MINCLSIZE - 1;
1290 struct netif_tx_request *req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1291 xnb_unit_pvt.txf.req_prod_pvt);
1296 xnb_unit_pvt.txf.req_prod_pvt++;
1298 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
1300 xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
1302 pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1303 n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab,
1304 &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED);
1306 /* Update grant table's status fields as the hypervisor call would */
1307 xnb_unit_pvt.gnttab[0].status = GNTST_okay;
1309 xnb_update_mbufc(pMbuf, xnb_unit_pvt.gnttab, n_entries);
1310 XNB_ASSERT(pMbuf->m_len == size);
1311 XNB_ASSERT(pMbuf->m_pkthdr.len == size);
1312 safe_m_freem(&pMbuf);
1316 * xnb_update_mbufc on a packet with two requests, that can fit into a single
1320 xnb_update_mbufc_2req(char *buffer, size_t buflen)
1326 struct netif_tx_request *req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1327 xnb_unit_pvt.txf.req_prod_pvt);
1328 req->flags = NETTXF_more_data;
1332 xnb_unit_pvt.txf.req_prod_pvt++;
1334 req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1335 xnb_unit_pvt.txf.req_prod_pvt);
1340 xnb_unit_pvt.txf.req_prod_pvt++;
1342 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
1344 xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
1346 pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1347 n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab,
1348 &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED);
1350 /* Update grant table's status fields as the hypervisor call would */
1351 xnb_unit_pvt.gnttab[0].status = GNTST_okay;
1352 xnb_unit_pvt.gnttab[1].status = GNTST_okay;
1354 xnb_update_mbufc(pMbuf, xnb_unit_pvt.gnttab, n_entries);
1355 XNB_ASSERT(n_entries == 2);
1356 XNB_ASSERT(pMbuf->m_pkthdr.len == 1900);
1357 XNB_ASSERT(pMbuf->m_len == 1900);
1359 safe_m_freem(&pMbuf);
1363 * xnb_update_mbufc on a single request that spans two mbuf clusters
1366 xnb_update_mbufc_2cluster(char *buffer, size_t buflen)
1372 const uint16_t data_this_transaction = (MCLBYTES*2) + 1;
1374 struct netif_tx_request *req = RING_GET_REQUEST(&xnb_unit_pvt.txf,
1375 xnb_unit_pvt.txf.req_prod_pvt);
1377 req->size = data_this_transaction;
1380 xnb_unit_pvt.txf.req_prod_pvt++;
1382 RING_PUSH_REQUESTS(&xnb_unit_pvt.txf);
1383 xnb_ring2pkt(&pkt, &xnb_unit_pvt.txb, xnb_unit_pvt.txb.req_cons);
1385 pMbuf = xnb_pkt2mbufc(&pkt, xnb_unit_pvt.ifp);
1386 n_entries = xnb_txpkt2gnttab(&pkt, pMbuf, xnb_unit_pvt.gnttab,
1387 &xnb_unit_pvt.txb, DOMID_FIRST_RESERVED);
1389 /* Update grant table's status fields */
1390 for (i = 0; i < n_entries; i++) {
1391 xnb_unit_pvt.gnttab[0].status = GNTST_okay;
1393 xnb_update_mbufc(pMbuf, xnb_unit_pvt.gnttab, n_entries);
1395 if (n_entries == 3) {
1396 /* there should be three mbufs and three gnttab entries */
1397 XNB_ASSERT(pMbuf->m_pkthdr.len == data_this_transaction);
1398 XNB_ASSERT(pMbuf->m_len == MCLBYTES);
1399 XNB_ASSERT(pMbuf->m_next->m_len == MCLBYTES);
1400 XNB_ASSERT(pMbuf->m_next->m_next->m_len == 1);
1401 } else if (n_entries == 2) {
1402 /* there should be two mbufs and two gnttab entries */
1403 XNB_ASSERT(n_entries == 2);
1404 XNB_ASSERT(pMbuf->m_pkthdr.len == data_this_transaction);
1405 XNB_ASSERT(pMbuf->m_len == 2 * MCLBYTES);
1406 XNB_ASSERT(pMbuf->m_next->m_len == 1);
1408 /* should never get here */
1411 safe_m_freem(&pMbuf);
1414 /** xnb_mbufc2pkt on an empty mbufc */
1416 xnb_mbufc2pkt_empty(char *buffer, size_t buflen) {
1418 int free_slots = 64;
1421 mbuf = m_get(M_WAITOK, MT_DATA);
1423 * note: it is illegal to set M_PKTHDR on a mbuf with no data. Doing so
1424 * will cause m_freem to segfault
1426 XNB_ASSERT(mbuf->m_len == 0);
1428 xnb_mbufc2pkt(mbuf, &pkt, 0, free_slots);
1429 XNB_ASSERT(! xnb_pkt_is_valid(&pkt));
1431 safe_m_freem(&mbuf);
1434 /** xnb_mbufc2pkt on a short mbufc */
1436 xnb_mbufc2pkt_short(char *buffer, size_t buflen) {
1439 int free_slots = 64;
1443 mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
1444 mbuf->m_flags |= M_PKTHDR;
1445 mbuf->m_pkthdr.len = size;
1448 xnb_mbufc2pkt(mbuf, &pkt, start, free_slots);
1449 XNB_ASSERT(xnb_pkt_is_valid(&pkt));
1450 XNB_ASSERT(pkt.size == size);
1451 XNB_ASSERT(pkt.car_size == size);
1452 XNB_ASSERT(! (pkt.flags &
1453 (NETRXF_more_data | NETRXF_extra_info)));
1454 XNB_ASSERT(pkt.list_len == 1);
1455 XNB_ASSERT(pkt.car == start);
1457 safe_m_freem(&mbuf);
1460 /** xnb_mbufc2pkt on a single mbuf with an mbuf cluster */
1462 xnb_mbufc2pkt_1cluster(char *buffer, size_t buflen) {
1464 size_t size = MCLBYTES;
1465 int free_slots = 32;
1466 RING_IDX start = 12;
1469 mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
1470 mbuf->m_flags |= M_PKTHDR;
1471 mbuf->m_pkthdr.len = size;
1474 xnb_mbufc2pkt(mbuf, &pkt, start, free_slots);
1475 XNB_ASSERT(xnb_pkt_is_valid(&pkt));
1476 XNB_ASSERT(pkt.size == size);
1477 XNB_ASSERT(pkt.car_size == size);
1478 XNB_ASSERT(! (pkt.flags &
1479 (NETRXF_more_data | NETRXF_extra_info)));
1480 XNB_ASSERT(pkt.list_len == 1);
1481 XNB_ASSERT(pkt.car == start);
1483 safe_m_freem(&mbuf);
1486 /** xnb_mbufc2pkt on a two-mbuf chain with short data regions */
1488 xnb_mbufc2pkt_2short(char *buffer, size_t buflen) {
1490 size_t size1 = MHLEN - 5;
1491 size_t size2 = MHLEN - 15;
1492 int free_slots = 32;
1493 RING_IDX start = 14;
1494 struct mbuf *mbufc, *mbufc2;
1496 mbufc = m_getm(NULL, size1, M_WAITOK, MT_DATA);
1497 mbufc->m_flags |= M_PKTHDR;
1498 if (mbufc == NULL) {
1499 XNB_ASSERT(mbufc != NULL);
1503 mbufc2 = m_getm(mbufc, size2, M_WAITOK, MT_DATA);
1504 if (mbufc2 == NULL) {
1505 XNB_ASSERT(mbufc2 != NULL);
1506 safe_m_freem(&mbufc);
1509 mbufc2->m_pkthdr.len = size1 + size2;
1510 mbufc2->m_len = size1;
1512 xnb_mbufc2pkt(mbufc2, &pkt, start, free_slots);
1513 XNB_ASSERT(xnb_pkt_is_valid(&pkt));
1514 XNB_ASSERT(pkt.size == size1 + size2);
1515 XNB_ASSERT(pkt.car == start);
1517 * The second m_getm may allocate a new mbuf and append
1518 * it to the chain, or it may simply extend the first mbuf.
1520 if (mbufc2->m_next != NULL) {
1521 XNB_ASSERT(pkt.car_size == size1);
1522 XNB_ASSERT(pkt.list_len == 1);
1523 XNB_ASSERT(pkt.cdr == start + 1);
1526 safe_m_freem(&mbufc2);
1529 /** xnb_mbufc2pkt on a mbuf chain with >1 mbuf cluster */
1531 xnb_mbufc2pkt_long(char *buffer, size_t buflen) {
1533 size_t size = 14 * MCLBYTES / 3;
1534 size_t size_remaining;
1535 int free_slots = 15;
1537 struct mbuf *mbufc, *m;
1539 mbufc = m_getm(NULL, size, M_WAITOK, MT_DATA);
1540 mbufc->m_flags |= M_PKTHDR;
1541 if (mbufc == NULL) {
1542 XNB_ASSERT(mbufc != NULL);
1546 mbufc->m_pkthdr.len = size;
1547 size_remaining = size;
1548 for (m = mbufc; m != NULL; m = m->m_next) {
1549 m->m_len = MAX(M_TRAILINGSPACE(m), size_remaining);
1550 size_remaining -= m->m_len;
1553 xnb_mbufc2pkt(mbufc, &pkt, start, free_slots);
1554 XNB_ASSERT(xnb_pkt_is_valid(&pkt));
1555 XNB_ASSERT(pkt.size == size);
1556 XNB_ASSERT(pkt.car == start);
1557 XNB_ASSERT(pkt.car_size = mbufc->m_len);
1559 * There should be >1 response in the packet, and there is no
1562 XNB_ASSERT(! (pkt.flags & NETRXF_extra_info));
1563 XNB_ASSERT(pkt.cdr == pkt.car + 1);
1565 safe_m_freem(&mbufc);
1568 /** xnb_mbufc2pkt on a mbuf chain with >1 mbuf cluster and extra info */
1570 xnb_mbufc2pkt_extra(char *buffer, size_t buflen) {
1572 size_t size = 14 * MCLBYTES / 3;
1573 size_t size_remaining;
1574 int free_slots = 15;
1576 struct mbuf *mbufc, *m;
1578 mbufc = m_getm(NULL, size, M_WAITOK, MT_DATA);
1579 if (mbufc == NULL) {
1580 XNB_ASSERT(mbufc != NULL);
1584 mbufc->m_flags |= M_PKTHDR;
1585 mbufc->m_pkthdr.len = size;
1586 mbufc->m_pkthdr.csum_flags |= CSUM_TSO;
1587 mbufc->m_pkthdr.tso_segsz = TCP_MSS - 40;
1588 size_remaining = size;
1589 for (m = mbufc; m != NULL; m = m->m_next) {
1590 m->m_len = MAX(M_TRAILINGSPACE(m), size_remaining);
1591 size_remaining -= m->m_len;
1594 xnb_mbufc2pkt(mbufc, &pkt, start, free_slots);
1595 XNB_ASSERT(xnb_pkt_is_valid(&pkt));
1596 XNB_ASSERT(pkt.size == size);
1597 XNB_ASSERT(pkt.car == start);
1598 XNB_ASSERT(pkt.car_size = mbufc->m_len);
1599 /* There should be >1 response in the packet, there is extra info */
1600 XNB_ASSERT(pkt.flags & NETRXF_extra_info);
1601 XNB_ASSERT(pkt.flags & NETRXF_data_validated);
1602 XNB_ASSERT(pkt.cdr == pkt.car + 2);
1603 XNB_ASSERT(pkt.extra.u.gso.size = mbufc->m_pkthdr.tso_segsz);
1604 XNB_ASSERT(pkt.extra.type == XEN_NETIF_EXTRA_TYPE_GSO);
1605 XNB_ASSERT(! (pkt.extra.flags & XEN_NETIF_EXTRA_FLAG_MORE));
1607 safe_m_freem(&mbufc);
1610 /** xnb_mbufc2pkt with insufficient space in the ring */
1612 xnb_mbufc2pkt_nospace(char *buffer, size_t buflen) {
1614 size_t size = 14 * MCLBYTES / 3;
1615 size_t size_remaining;
1618 struct mbuf *mbufc, *m;
1621 mbufc = m_getm(NULL, size, M_WAITOK, MT_DATA);
1622 mbufc->m_flags |= M_PKTHDR;
1623 if (mbufc == NULL) {
1624 XNB_ASSERT(mbufc != NULL);
1628 mbufc->m_pkthdr.len = size;
1629 size_remaining = size;
1630 for (m = mbufc; m != NULL; m = m->m_next) {
1631 m->m_len = MAX(M_TRAILINGSPACE(m), size_remaining);
1632 size_remaining -= m->m_len;
1635 error = xnb_mbufc2pkt(mbufc, &pkt, start, free_slots);
1636 XNB_ASSERT(error == EAGAIN);
1637 XNB_ASSERT(! xnb_pkt_is_valid(&pkt));
1639 safe_m_freem(&mbufc);
1643 * xnb_rxpkt2gnttab on an empty packet. Should return empty gnttab
1646 xnb_rxpkt2gnttab_empty(char *buffer, size_t buflen)
1650 int free_slots = 60;
1653 mbuf = m_get(M_WAITOK, MT_DATA);
1655 xnb_mbufc2pkt(mbuf, &pkt, 0, free_slots);
1656 nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab,
1657 &xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1659 XNB_ASSERT(nr_entries == 0);
1661 safe_m_freem(&mbuf);
1664 /** xnb_rxpkt2gnttab on a short packet without extra data */
1666 xnb_rxpkt2gnttab_short(char *buffer, size_t buflen) {
1670 int free_slots = 60;
1672 struct netif_rx_request *req;
1675 mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
1676 mbuf->m_flags |= M_PKTHDR;
1677 mbuf->m_pkthdr.len = size;
1680 xnb_mbufc2pkt(mbuf, &pkt, start, free_slots);
1681 req = RING_GET_REQUEST(&xnb_unit_pvt.rxf,
1682 xnb_unit_pvt.txf.req_prod_pvt);
1685 nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab,
1686 &xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1688 XNB_ASSERT(nr_entries == 1);
1689 XNB_ASSERT(xnb_unit_pvt.gnttab[0].len == size);
1690 /* flags should indicate gref's for dest */
1691 XNB_ASSERT(xnb_unit_pvt.gnttab[0].flags & GNTCOPY_dest_gref);
1692 XNB_ASSERT(xnb_unit_pvt.gnttab[0].dest.offset == 0);
1693 XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.domid == DOMID_SELF);
1694 XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.offset == virt_to_offset(
1695 mtod(mbuf, vm_offset_t)));
1696 XNB_ASSERT(xnb_unit_pvt.gnttab[0].source.u.gmfn ==
1697 virt_to_mfn(mtod(mbuf, vm_offset_t)));
1698 XNB_ASSERT(xnb_unit_pvt.gnttab[0].dest.domid == DOMID_FIRST_RESERVED);
1700 safe_m_freem(&mbuf);
1704 * xnb_rxpkt2gnttab on a packet with two different mbufs in a single chai
1707 xnb_rxpkt2gnttab_2req(char *buffer, size_t buflen)
1712 size_t total_granted_size = 0;
1713 size_t size = MJUMPAGESIZE + 1;
1714 int free_slots = 60;
1715 RING_IDX start = 11;
1716 struct netif_rx_request *req;
1717 struct mbuf *mbuf, *m;
1719 mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
1720 mbuf->m_flags |= M_PKTHDR;
1721 mbuf->m_pkthdr.len = size;
1724 xnb_mbufc2pkt(mbuf, &pkt, start, free_slots);
1726 for (i = 0, m=mbuf; m != NULL; i++, m = m->m_next) {
1727 req = RING_GET_REQUEST(&xnb_unit_pvt.rxf,
1728 xnb_unit_pvt.txf.req_prod_pvt);
1734 nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab,
1735 &xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1737 XNB_ASSERT(nr_entries >= num_mbufs);
1738 for (i = 0; i < nr_entries; i++) {
1739 int end_offset = xnb_unit_pvt.gnttab[i].len +
1740 xnb_unit_pvt.gnttab[i].dest.offset;
1741 XNB_ASSERT(end_offset <= PAGE_SIZE);
1742 total_granted_size += xnb_unit_pvt.gnttab[i].len;
1744 XNB_ASSERT(total_granted_size == size);
1748 * xnb_rxpkt2rsp on an empty packet. Shouldn't make any response
1751 xnb_rxpkt2rsp_empty(char *buffer, size_t buflen)
1756 int free_slots = 60;
1757 netif_rx_back_ring_t rxb_backup = xnb_unit_pvt.rxb;
1758 netif_rx_sring_t rxs_backup = *xnb_unit_pvt.rxs;
1761 mbuf = m_get(M_WAITOK, MT_DATA);
1763 xnb_mbufc2pkt(mbuf, &pkt, 0, free_slots);
1764 nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab,
1765 &xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1767 nr_reqs = xnb_rxpkt2rsp(&pkt, xnb_unit_pvt.gnttab, nr_entries,
1769 XNB_ASSERT(nr_reqs == 0);
1771 memcmp(&rxb_backup, &xnb_unit_pvt.rxb, sizeof(rxb_backup)) == 0);
1773 memcmp(&rxs_backup, xnb_unit_pvt.rxs, sizeof(rxs_backup)) == 0);
1775 safe_m_freem(&mbuf);
1779 * xnb_rxpkt2rsp on a short packet with no extras
1782 xnb_rxpkt2rsp_short(char *buffer, size_t buflen)
1785 int nr_entries, nr_reqs;
1787 int free_slots = 60;
1789 struct netif_rx_request *req;
1790 struct netif_rx_response *rsp;
1793 mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
1794 mbuf->m_flags |= M_PKTHDR;
1795 mbuf->m_pkthdr.len = size;
1798 xnb_mbufc2pkt(mbuf, &pkt, start, free_slots);
1799 req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start);
1801 xnb_unit_pvt.rxb.req_cons = start;
1802 xnb_unit_pvt.rxb.rsp_prod_pvt = start;
1803 xnb_unit_pvt.rxs->req_prod = start + 1;
1804 xnb_unit_pvt.rxs->rsp_prod = start;
1806 nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab,
1807 &xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1809 nr_reqs = xnb_rxpkt2rsp(&pkt, xnb_unit_pvt.gnttab, nr_entries,
1812 XNB_ASSERT(nr_reqs == 1);
1813 XNB_ASSERT(xnb_unit_pvt.rxb.rsp_prod_pvt == start + 1);
1814 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start);
1815 XNB_ASSERT(rsp->id == req->id);
1816 XNB_ASSERT(rsp->offset == 0);
1817 XNB_ASSERT((rsp->flags & (NETRXF_more_data | NETRXF_extra_info)) == 0);
1818 XNB_ASSERT(rsp->status == size);
1820 safe_m_freem(&mbuf);
1824 * xnb_rxpkt2rsp with extra data
1827 xnb_rxpkt2rsp_extra(char *buffer, size_t buflen)
1830 int nr_entries, nr_reqs;
1832 int free_slots = 15;
1836 uint16_t mss = TCP_MSS - 40;
1838 struct netif_rx_request *req;
1839 struct netif_rx_response *rsp;
1840 struct netif_extra_info *ext;
1842 mbufc = m_getm(NULL, size, M_WAITOK, MT_DATA);
1843 if (mbufc == NULL) {
1844 XNB_ASSERT(mbufc != NULL);
1848 mbufc->m_flags |= M_PKTHDR;
1849 mbufc->m_pkthdr.len = size;
1850 mbufc->m_pkthdr.csum_flags |= CSUM_TSO;
1851 mbufc->m_pkthdr.tso_segsz = mss;
1852 mbufc->m_len = size;
1854 xnb_mbufc2pkt(mbufc, &pkt, start, free_slots);
1855 req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start);
1858 req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start + 1);
1860 req->gref = gref + 1;
1861 xnb_unit_pvt.rxb.req_cons = start;
1862 xnb_unit_pvt.rxb.rsp_prod_pvt = start;
1863 xnb_unit_pvt.rxs->req_prod = start + 2;
1864 xnb_unit_pvt.rxs->rsp_prod = start;
1866 nr_entries = xnb_rxpkt2gnttab(&pkt, mbufc, xnb_unit_pvt.gnttab,
1867 &xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1869 nr_reqs = xnb_rxpkt2rsp(&pkt, xnb_unit_pvt.gnttab, nr_entries,
1872 XNB_ASSERT(nr_reqs == 2);
1873 XNB_ASSERT(xnb_unit_pvt.rxb.rsp_prod_pvt == start + 2);
1874 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start);
1875 XNB_ASSERT(rsp->id == id);
1876 XNB_ASSERT((rsp->flags & NETRXF_more_data) == 0);
1877 XNB_ASSERT((rsp->flags & NETRXF_extra_info));
1878 XNB_ASSERT((rsp->flags & NETRXF_data_validated));
1879 XNB_ASSERT((rsp->flags & NETRXF_csum_blank));
1880 XNB_ASSERT(rsp->status == size);
1882 ext = (struct netif_extra_info*)
1883 RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start + 1);
1884 XNB_ASSERT(ext->type == XEN_NETIF_EXTRA_TYPE_GSO);
1885 XNB_ASSERT(! (ext->flags & XEN_NETIF_EXTRA_FLAG_MORE));
1886 XNB_ASSERT(ext->u.gso.size == mss);
1887 XNB_ASSERT(ext->u.gso.type == XEN_NETIF_EXTRA_TYPE_GSO);
1889 safe_m_freem(&mbufc);
1893 * xnb_rxpkt2rsp on a packet with more than a pages's worth of data. It should
1894 * generate two response slot
1897 xnb_rxpkt2rsp_2slots(char *buffer, size_t buflen)
1900 int nr_entries, nr_reqs;
1901 size_t size = PAGE_SIZE + 100;
1905 uint16_t gref1 = 24;
1906 uint16_t gref2 = 34;
1907 RING_IDX start = 15;
1908 struct netif_rx_request *req;
1909 struct netif_rx_response *rsp;
1912 mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
1913 mbuf->m_flags |= M_PKTHDR;
1914 mbuf->m_pkthdr.len = size;
1915 if (mbuf->m_next != NULL) {
1916 size_t first_len = MIN(M_TRAILINGSPACE(mbuf), size);
1917 mbuf->m_len = first_len;
1918 mbuf->m_next->m_len = size - first_len;
1924 xnb_mbufc2pkt(mbuf, &pkt, start, free_slots);
1925 req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start);
1928 req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start + 1);
1931 xnb_unit_pvt.rxb.req_cons = start;
1932 xnb_unit_pvt.rxb.rsp_prod_pvt = start;
1933 xnb_unit_pvt.rxs->req_prod = start + 2;
1934 xnb_unit_pvt.rxs->rsp_prod = start;
1936 nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab,
1937 &xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
1939 nr_reqs = xnb_rxpkt2rsp(&pkt, xnb_unit_pvt.gnttab, nr_entries,
1942 XNB_ASSERT(nr_reqs == 2);
1943 XNB_ASSERT(xnb_unit_pvt.rxb.rsp_prod_pvt == start + 2);
1944 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start);
1945 XNB_ASSERT(rsp->id == id1);
1946 XNB_ASSERT(rsp->offset == 0);
1947 XNB_ASSERT((rsp->flags & NETRXF_extra_info) == 0);
1948 XNB_ASSERT(rsp->flags & NETRXF_more_data);
1949 XNB_ASSERT(rsp->status == PAGE_SIZE);
1951 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start + 1);
1952 XNB_ASSERT(rsp->id == id2);
1953 XNB_ASSERT(rsp->offset == 0);
1954 XNB_ASSERT((rsp->flags & NETRXF_extra_info) == 0);
1955 XNB_ASSERT(! (rsp->flags & NETRXF_more_data));
1956 XNB_ASSERT(rsp->status == size - PAGE_SIZE);
1958 safe_m_freem(&mbuf);
1961 /** xnb_rxpkt2rsp on a grant table with two sub-page entries */
1963 xnb_rxpkt2rsp_2short(char *buffer, size_t buflen) {
1965 int nr_reqs, nr_entries;
1966 size_t size1 = MHLEN - 5;
1967 size_t size2 = MHLEN - 15;
1968 int free_slots = 32;
1969 RING_IDX start = 14;
1972 struct netif_rx_request *req;
1973 struct netif_rx_response *rsp;
1976 mbufc = m_getm(NULL, size1, M_WAITOK, MT_DATA);
1977 mbufc->m_flags |= M_PKTHDR;
1978 if (mbufc == NULL) {
1979 XNB_ASSERT(mbufc != NULL);
1983 m_getm(mbufc, size2, M_WAITOK, MT_DATA);
1984 XNB_ASSERT(mbufc->m_next != NULL);
1985 mbufc->m_pkthdr.len = size1 + size2;
1986 mbufc->m_len = size1;
1987 mbufc->m_next->m_len = size2;
1989 xnb_mbufc2pkt(mbufc, &pkt, start, free_slots);
1991 req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start);
1994 xnb_unit_pvt.rxb.req_cons = start;
1995 xnb_unit_pvt.rxb.rsp_prod_pvt = start;
1996 xnb_unit_pvt.rxs->req_prod = start + 1;
1997 xnb_unit_pvt.rxs->rsp_prod = start;
1999 nr_entries = xnb_rxpkt2gnttab(&pkt, mbufc, xnb_unit_pvt.gnttab,
2000 &xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
2002 nr_reqs = xnb_rxpkt2rsp(&pkt, xnb_unit_pvt.gnttab, nr_entries,
2005 XNB_ASSERT(nr_entries == 2);
2006 XNB_ASSERT(nr_reqs == 1);
2007 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start);
2008 XNB_ASSERT(rsp->id == id);
2009 XNB_ASSERT(rsp->status == size1 + size2);
2010 XNB_ASSERT(rsp->offset == 0);
2011 XNB_ASSERT(! (rsp->flags & (NETRXF_more_data | NETRXF_extra_info)));
2013 safe_m_freem(&mbufc);
2017 * xnb_rxpkt2rsp on a long packet with a hypervisor gnttab_copy error
2018 * Note: this test will result in an error message being printed to the console
2020 * xnb(xnb_rxpkt2rsp:1720): Got error -1 for hypervisor gnttab_copy status
2023 xnb_rxpkt2rsp_copyerror(char *buffer, size_t buflen)
2026 int nr_entries, nr_reqs;
2029 uint16_t canary = 6859;
2030 size_t size = 7 * MCLBYTES;
2033 struct netif_rx_request *req;
2034 struct netif_rx_response *rsp;
2037 mbuf = m_getm(NULL, size, M_WAITOK, MT_DATA);
2038 mbuf->m_flags |= M_PKTHDR;
2039 mbuf->m_pkthdr.len = size;
2042 xnb_mbufc2pkt(mbuf, &pkt, start, free_slots);
2043 req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start);
2046 xnb_unit_pvt.rxb.req_cons = start;
2047 xnb_unit_pvt.rxb.rsp_prod_pvt = start;
2048 xnb_unit_pvt.rxs->req_prod = start + 1;
2049 xnb_unit_pvt.rxs->rsp_prod = start;
2050 req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start + 1);
2054 nr_entries = xnb_rxpkt2gnttab(&pkt, mbuf, xnb_unit_pvt.gnttab,
2055 &xnb_unit_pvt.rxb, DOMID_FIRST_RESERVED);
2056 /* Inject the error*/
2057 xnb_unit_pvt.gnttab[2].status = GNTST_general_error;
2059 nr_reqs = xnb_rxpkt2rsp(&pkt, xnb_unit_pvt.gnttab, nr_entries,
2062 XNB_ASSERT(nr_reqs == 1);
2063 XNB_ASSERT(xnb_unit_pvt.rxb.rsp_prod_pvt == start + 1);
2064 rsp = RING_GET_RESPONSE(&xnb_unit_pvt.rxb, start);
2065 XNB_ASSERT(rsp->id == id);
2066 XNB_ASSERT(rsp->status == NETIF_RSP_ERROR);
2067 req = RING_GET_REQUEST(&xnb_unit_pvt.rxf, start + 1);
2068 XNB_ASSERT(req->gref == canary);
2069 XNB_ASSERT(req->id == canary);
2071 safe_m_freem(&mbuf);
2074 #if defined(INET) || defined(INET6)
2076 * xnb_add_mbuf_cksum on an ARP request packet
2079 xnb_add_mbuf_cksum_arp(char *buffer, size_t buflen)
2081 const size_t pkt_len = sizeof(struct ether_header) +
2082 sizeof(struct ether_arp);
2084 struct ether_header *eh;
2085 struct ether_arp *ep;
2086 unsigned char pkt_orig[pkt_len];
2088 mbufc = m_getm(NULL, pkt_len, M_WAITOK, MT_DATA);
2089 /* Fill in an example arp request */
2090 eh = mtod(mbufc, struct ether_header*);
2091 eh->ether_dhost[0] = 0xff;
2092 eh->ether_dhost[1] = 0xff;
2093 eh->ether_dhost[2] = 0xff;
2094 eh->ether_dhost[3] = 0xff;
2095 eh->ether_dhost[4] = 0xff;
2096 eh->ether_dhost[5] = 0xff;
2097 eh->ether_shost[0] = 0x00;
2098 eh->ether_shost[1] = 0x15;
2099 eh->ether_shost[2] = 0x17;
2100 eh->ether_shost[3] = 0xe9;
2101 eh->ether_shost[4] = 0x30;
2102 eh->ether_shost[5] = 0x68;
2103 eh->ether_type = htons(ETHERTYPE_ARP);
2104 ep = (struct ether_arp*)(eh + 1);
2105 ep->ea_hdr.ar_hrd = htons(ARPHRD_ETHER);
2106 ep->ea_hdr.ar_pro = htons(ETHERTYPE_IP);
2107 ep->ea_hdr.ar_hln = 6;
2108 ep->ea_hdr.ar_pln = 4;
2109 ep->ea_hdr.ar_op = htons(ARPOP_REQUEST);
2110 ep->arp_sha[0] = 0x00;
2111 ep->arp_sha[1] = 0x15;
2112 ep->arp_sha[2] = 0x17;
2113 ep->arp_sha[3] = 0xe9;
2114 ep->arp_sha[4] = 0x30;
2115 ep->arp_sha[5] = 0x68;
2116 ep->arp_spa[0] = 0xc0;
2117 ep->arp_spa[1] = 0xa8;
2118 ep->arp_spa[2] = 0x0a;
2119 ep->arp_spa[3] = 0x04;
2120 bzero(&(ep->arp_tha), ETHER_ADDR_LEN);
2121 ep->arp_tpa[0] = 0xc0;
2122 ep->arp_tpa[1] = 0xa8;
2123 ep->arp_tpa[2] = 0x0a;
2124 ep->arp_tpa[3] = 0x06;
2126 /* fill in the length field */
2127 mbufc->m_len = pkt_len;
2128 mbufc->m_pkthdr.len = pkt_len;
2129 /* indicate that the netfront uses hw-assisted checksums */
2130 mbufc->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID |
2131 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2133 /* Make a backup copy of the packet */
2134 bcopy(mtod(mbufc, const void*), pkt_orig, pkt_len);
2136 /* Function under test */
2137 xnb_add_mbuf_cksum(mbufc);
2139 /* Verify that the packet's data did not change */
2140 XNB_ASSERT(bcmp(mtod(mbufc, const void*), pkt_orig, pkt_len) == 0);
2145 * Helper function that populates the ethernet header and IP header used by
2146 * some of the xnb_add_mbuf_cksum unit tests. m must already be allocated
2147 * and must be large enough
2150 xnb_fill_eh_and_ip(struct mbuf *m, uint16_t ip_len, uint16_t ip_id,
2151 uint16_t ip_p, uint16_t ip_off, uint16_t ip_sum)
2153 struct ether_header *eh;
2156 eh = mtod(m, struct ether_header*);
2157 eh->ether_dhost[0] = 0x00;
2158 eh->ether_dhost[1] = 0x16;
2159 eh->ether_dhost[2] = 0x3e;
2160 eh->ether_dhost[3] = 0x23;
2161 eh->ether_dhost[4] = 0x50;
2162 eh->ether_dhost[5] = 0x0b;
2163 eh->ether_shost[0] = 0x00;
2164 eh->ether_shost[1] = 0x16;
2165 eh->ether_shost[2] = 0x30;
2166 eh->ether_shost[3] = 0x00;
2167 eh->ether_shost[4] = 0x00;
2168 eh->ether_shost[5] = 0x00;
2169 eh->ether_type = htons(ETHERTYPE_IP);
2170 iph = (struct ip*)(eh + 1);
2171 iph->ip_hl = 0x5; /* 5 dwords == 20 bytes */
2172 iph->ip_v = 4; /* IP v4 */
2174 iph->ip_len = htons(ip_len);
2175 iph->ip_id = htons(ip_id);
2176 iph->ip_off = htons(ip_off);
2179 iph->ip_sum = htons(ip_sum);
2180 iph->ip_src.s_addr = htonl(0xc0a80a04);
2181 iph->ip_dst.s_addr = htonl(0xc0a80a05);
2185 * xnb_add_mbuf_cksum on an ICMP packet, based on a tcpdump of an actual
2189 xnb_add_mbuf_cksum_icmp(char *buffer, size_t buflen)
2191 const size_t icmp_len = 64; /* set by ping(1) */
2192 const size_t pkt_len = sizeof(struct ether_header) +
2193 sizeof(struct ip) + icmp_len;
2195 struct ether_header *eh;
2198 unsigned char pkt_orig[icmp_len];
2200 uint8_t *data_payload;
2202 const uint16_t ICMP_CSUM = 0xaed7;
2203 const uint16_t IP_CSUM = 0xe533;
2205 mbufc = m_getm(NULL, pkt_len, M_WAITOK, MT_DATA);
2206 /* Fill in an example ICMP ping request */
2207 eh = mtod(mbufc, struct ether_header*);
2208 xnb_fill_eh_and_ip(mbufc, 84, 28, IPPROTO_ICMP, 0, 0);
2209 iph = (struct ip*)(eh + 1);
2210 icmph = (struct icmp*)(iph + 1);
2211 icmph->icmp_type = ICMP_ECHO;
2212 icmph->icmp_code = 0;
2213 icmph->icmp_cksum = htons(ICMP_CSUM);
2214 icmph->icmp_id = htons(31492);
2215 icmph->icmp_seq = htons(0);
2217 * ping(1) uses bcopy to insert a native-endian timeval after icmp_seq.
2218 * For this test, we will set the bytes individually for portability.
2220 tv_field = (uint32_t*)(&(icmph->icmp_hun));
2221 tv_field[0] = 0x4f02cfac;
2222 tv_field[1] = 0x0007c46a;
2224 * Remainder of packet is an incrmenting 8 bit integer, starting with 8
2226 data_payload = (uint8_t*)(&tv_field[2]);
2227 for (i = 8; i < 37; i++) {
2228 *data_payload++ = i;
2231 /* fill in the length field */
2232 mbufc->m_len = pkt_len;
2233 mbufc->m_pkthdr.len = pkt_len;
2234 /* indicate that the netfront uses hw-assisted checksums */
2235 mbufc->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID |
2236 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2238 bcopy(mtod(mbufc, const void*), pkt_orig, icmp_len);
2239 /* Function under test */
2240 xnb_add_mbuf_cksum(mbufc);
2242 /* Check the IP checksum */
2243 XNB_ASSERT(iph->ip_sum == htons(IP_CSUM));
2245 /* Check that the ICMP packet did not change */
2246 XNB_ASSERT(bcmp(icmph, pkt_orig, icmp_len));
2251 * xnb_add_mbuf_cksum on a UDP packet, based on a tcpdump of an actual
2255 xnb_add_mbuf_cksum_udp(char *buffer, size_t buflen)
2257 const size_t udp_len = 16;
2258 const size_t pkt_len = sizeof(struct ether_header) +
2259 sizeof(struct ip) + udp_len;
2261 struct ether_header *eh;
2264 uint8_t *data_payload;
2265 const uint16_t IP_CSUM = 0xe56b;
2266 const uint16_t UDP_CSUM = 0xdde2;
2268 mbufc = m_getm(NULL, pkt_len, M_WAITOK, MT_DATA);
2269 /* Fill in an example UDP packet made by 'uname | nc -u <host> 2222 */
2270 eh = mtod(mbufc, struct ether_header*);
2271 xnb_fill_eh_and_ip(mbufc, 36, 4, IPPROTO_UDP, 0, 0xbaad);
2272 iph = (struct ip*)(eh + 1);
2273 udp = (struct udphdr*)(iph + 1);
2274 udp->uh_sport = htons(0x51ae);
2275 udp->uh_dport = htons(0x08ae);
2276 udp->uh_ulen = htons(udp_len);
2277 udp->uh_sum = htons(0xbaad); /* xnb_add_mbuf_cksum will fill this in */
2278 data_payload = (uint8_t*)(udp + 1);
2279 data_payload[0] = 'F';
2280 data_payload[1] = 'r';
2281 data_payload[2] = 'e';
2282 data_payload[3] = 'e';
2283 data_payload[4] = 'B';
2284 data_payload[5] = 'S';
2285 data_payload[6] = 'D';
2286 data_payload[7] = '\n';
2288 /* fill in the length field */
2289 mbufc->m_len = pkt_len;
2290 mbufc->m_pkthdr.len = pkt_len;
2291 /* indicate that the netfront uses hw-assisted checksums */
2292 mbufc->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID |
2293 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2295 /* Function under test */
2296 xnb_add_mbuf_cksum(mbufc);
2298 /* Check the checksums */
2299 XNB_ASSERT(iph->ip_sum == htons(IP_CSUM));
2300 XNB_ASSERT(udp->uh_sum == htons(UDP_CSUM));
2306 * Helper function that populates a TCP packet used by all of the
2307 * xnb_add_mbuf_cksum tcp unit tests. m must already be allocated and must be
2311 xnb_fill_tcp(struct mbuf *m)
2313 struct ether_header *eh;
2317 uint8_t *data_payload;
2319 /* Fill in an example TCP packet made by 'uname | nc <host> 2222' */
2320 eh = mtod(m, struct ether_header*);
2321 xnb_fill_eh_and_ip(m, 60, 8, IPPROTO_TCP, IP_DF, 0);
2322 iph = (struct ip*)(eh + 1);
2323 tcp = (struct tcphdr*)(iph + 1);
2324 tcp->th_sport = htons(0x9cd9);
2325 tcp->th_dport = htons(2222);
2326 tcp->th_seq = htonl(0x00f72b10);
2327 tcp->th_ack = htonl(0x7f37ba6c);
2330 tcp->th_flags = 0x18;
2331 tcp->th_win = htons(0x410);
2332 /* th_sum is incorrect; will be inserted by function under test */
2333 tcp->th_sum = htons(0xbaad);
2334 tcp->th_urp = htons(0);
2336 * The following 12 bytes of options encode:
2337 * [nop, nop, TS val 33247 ecr 3457687679]
2339 options = (uint32_t*)(tcp + 1);
2340 options[0] = htonl(0x0101080a);
2341 options[1] = htonl(0x000081df);
2342 options[2] = htonl(0xce18207f);
2343 data_payload = (uint8_t*)(&options[3]);
2344 data_payload[0] = 'F';
2345 data_payload[1] = 'r';
2346 data_payload[2] = 'e';
2347 data_payload[3] = 'e';
2348 data_payload[4] = 'B';
2349 data_payload[5] = 'S';
2350 data_payload[6] = 'D';
2351 data_payload[7] = '\n';
2355 * xnb_add_mbuf_cksum on a TCP packet, based on a tcpdump of an actual TCP
2359 xnb_add_mbuf_cksum_tcp(char *buffer, size_t buflen)
2361 const size_t payload_len = 8;
2362 const size_t tcp_options_len = 12;
2363 const size_t pkt_len = sizeof(struct ether_header) + sizeof(struct ip) +
2364 sizeof(struct tcphdr) + tcp_options_len + payload_len;
2366 struct ether_header *eh;
2369 const uint16_t IP_CSUM = 0xa55a;
2370 const uint16_t TCP_CSUM = 0x2f64;
2372 mbufc = m_getm(NULL, pkt_len, M_WAITOK, MT_DATA);
2373 /* Fill in an example TCP packet made by 'uname | nc <host> 2222' */
2374 xnb_fill_tcp(mbufc);
2375 eh = mtod(mbufc, struct ether_header*);
2376 iph = (struct ip*)(eh + 1);
2377 tcp = (struct tcphdr*)(iph + 1);
2379 /* fill in the length field */
2380 mbufc->m_len = pkt_len;
2381 mbufc->m_pkthdr.len = pkt_len;
2382 /* indicate that the netfront uses hw-assisted checksums */
2383 mbufc->m_pkthdr.csum_flags = CSUM_IP_CHECKED | CSUM_IP_VALID |
2384 CSUM_DATA_VALID | CSUM_PSEUDO_HDR;
2386 /* Function under test */
2387 xnb_add_mbuf_cksum(mbufc);
2389 /* Check the checksums */
2390 XNB_ASSERT(iph->ip_sum == htons(IP_CSUM));
2391 XNB_ASSERT(tcp->th_sum == htons(TCP_CSUM));
2397 * xnb_add_mbuf_cksum on a TCP packet that does not use HW assisted checksums
2400 xnb_add_mbuf_cksum_tcp_swcksum(char *buffer, size_t buflen)
2402 const size_t payload_len = 8;
2403 const size_t tcp_options_len = 12;
2404 const size_t pkt_len = sizeof(struct ether_header) + sizeof(struct ip) +
2405 sizeof(struct tcphdr) + tcp_options_len + payload_len;
2407 struct ether_header *eh;
2410 /* Use deliberately bad checksums, and verify that they don't get */
2411 /* corrected by xnb_add_mbuf_cksum */
2412 const uint16_t IP_CSUM = 0xdead;
2413 const uint16_t TCP_CSUM = 0xbeef;
2415 mbufc = m_getm(NULL, pkt_len, M_WAITOK, MT_DATA);
2416 /* Fill in an example TCP packet made by 'uname | nc <host> 2222' */
2417 xnb_fill_tcp(mbufc);
2418 eh = mtod(mbufc, struct ether_header*);
2419 iph = (struct ip*)(eh + 1);
2420 iph->ip_sum = htons(IP_CSUM);
2421 tcp = (struct tcphdr*)(iph + 1);
2422 tcp->th_sum = htons(TCP_CSUM);
2424 /* fill in the length field */
2425 mbufc->m_len = pkt_len;
2426 mbufc->m_pkthdr.len = pkt_len;
2427 /* indicate that the netfront does not use hw-assisted checksums */
2428 mbufc->m_pkthdr.csum_flags = 0;
2430 /* Function under test */
2431 xnb_add_mbuf_cksum(mbufc);
2433 /* Check that the checksums didn't change */
2434 XNB_ASSERT(iph->ip_sum == htons(IP_CSUM));
2435 XNB_ASSERT(tcp->th_sum == htons(TCP_CSUM));
2439 #endif /* INET || INET6 */
2442 * sscanf on unsigned chars
2445 xnb_sscanf_hhu(char *buffer, size_t buflen)
2447 const char mystr[] = "137";
2451 for (i = 0; i < 12; i++)
2454 sscanf(mystr, "%hhu", &dest[4]);
2455 for (i = 0; i < 12; i++)
2456 XNB_ASSERT(dest[i] == (i == 4 ? 137 : 'X'));
2460 * sscanf on signed chars
2463 xnb_sscanf_hhd(char *buffer, size_t buflen)
2465 const char mystr[] = "-27";
2469 for (i = 0; i < 12; i++)
2472 sscanf(mystr, "%hhd", &dest[4]);
2473 for (i = 0; i < 12; i++)
2474 XNB_ASSERT(dest[i] == (i == 4 ? -27 : 'X'));
2478 * sscanf on signed long longs
2481 xnb_sscanf_lld(char *buffer, size_t buflen)
2483 const char mystr[] = "-123456789012345"; /* about -2**47 */
2487 for (i = 0; i < 3; i++)
2488 dest[i] = (long long)0xdeadbeefdeadbeef;
2490 sscanf(mystr, "%lld", &dest[1]);
2491 for (i = 0; i < 3; i++)
2492 XNB_ASSERT(dest[i] == (i != 1 ? (long long)0xdeadbeefdeadbeef :
2497 * sscanf on unsigned long longs
2500 xnb_sscanf_llu(char *buffer, size_t buflen)
2502 const char mystr[] = "12802747070103273189";
2503 unsigned long long dest[3];
2506 for (i = 0; i < 3; i++)
2507 dest[i] = (long long)0xdeadbeefdeadbeef;
2509 sscanf(mystr, "%llu", &dest[1]);
2510 for (i = 0; i < 3; i++)
2511 XNB_ASSERT(dest[i] == (i != 1 ? (long long)0xdeadbeefdeadbeef :
2512 12802747070103273189ull));
2516 * sscanf on unsigned short short n's
2519 xnb_sscanf_hhn(char *buffer, size_t buflen)
2521 const char mystr[] =
2522 "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f"
2523 "202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f"
2524 "404142434445464748494a4b4c4d4e4f505152535455565758595a5b5c5d5e5f";
2525 unsigned char dest[12];
2528 for (i = 0; i < 12; i++)
2529 dest[i] = (unsigned char)'X';
2532 "000102030405060708090a0b0c0d0e0f101112131415161718191a1b1c1d1e1f"
2533 "202122232425262728292a2b2c2d2e2f303132333435363738393a3b3c3d3e3f"
2534 "404142434445464748494a4b4c4d4e4f%hhn", &dest[4]);
2535 for (i = 0; i < 12; i++)
2536 XNB_ASSERT(dest[i] == (i == 4 ? 160 : 'X'));