2 * Copyright (c) 2016-2018, Intel Corporation
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
7 * * Redistributions of source code must retain the above copyright notice,
8 * this list of conditions and the following disclaimer.
9 * * Redistributions in binary form must reproduce the above copyright notice,
10 * this list of conditions and the following disclaimer in the documentation
11 * and/or other materials provided with the distribution.
12 * * Neither the name of Intel Corporation nor the names of its contributors
13 * may be used to endorse or promote products derived from this software
14 * without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
29 #include "ptunit_threads.h"
31 #include "pt_block_cache.h"
36 /* A test fixture optionally providing a block cache and automatically freeing
39 struct bcache_fixture {
40 /* Threading support. */
41 struct ptunit_thrd_fixture thrd;
43 /* The cache - it will be freed automatically. */
44 struct pt_block_cache *bcache;
46 /* The test fixture initialization and finalization functions. */
47 struct ptunit_result (*init)(struct bcache_fixture *);
48 struct ptunit_result (*fini)(struct bcache_fixture *);
52 /* The number of entries in fixture-provided caches. */
53 bfix_nentries = 0x10000,
55 #if defined(FEATURE_THREADS)
57 /* The number of additional threads to use for stress testing. */
60 #endif /* defined(FEATURE_THREADS) */
62 /* The number of iterations in stress testing. */
63 bfix_iterations = 0x10
66 static struct ptunit_result cfix_init(struct bcache_fixture *bfix)
68 ptu_test(ptunit_thrd_init, &bfix->thrd);
75 static struct ptunit_result bfix_init(struct bcache_fixture *bfix)
77 ptu_test(cfix_init, bfix);
79 bfix->bcache = pt_bcache_alloc(bfix_nentries);
80 ptu_ptr(bfix->bcache);
85 static struct ptunit_result bfix_fini(struct bcache_fixture *bfix)
89 ptu_test(ptunit_thrd_fini, &bfix->thrd);
91 for (thrd = 0; thrd < bfix->thrd.nthreads; ++thrd)
92 ptu_int_eq(bfix->thrd.result[thrd], 0);
94 pt_bcache_free(bfix->bcache);
99 static struct ptunit_result bcache_entry_size(void)
101 ptu_uint_eq(sizeof(struct pt_bcache_entry), sizeof(uint32_t));
106 static struct ptunit_result bcache_size(void)
108 ptu_uint_le(sizeof(struct pt_block_cache),
109 2 * sizeof(struct pt_bcache_entry));
114 static struct ptunit_result free_null(void)
116 pt_bcache_free(NULL);
121 static struct ptunit_result add_null(void)
123 struct pt_bcache_entry bce;
126 memset(&bce, 0, sizeof(bce));
128 errcode = pt_bcache_add(NULL, 0ull, bce);
129 ptu_int_eq(errcode, -pte_internal);
134 static struct ptunit_result lookup_null(void)
136 struct pt_bcache_entry bce;
137 struct pt_block_cache bcache;
140 errcode = pt_bcache_lookup(&bce, NULL, 0ull);
141 ptu_int_eq(errcode, -pte_internal);
143 errcode = pt_bcache_lookup(NULL, &bcache, 0ull);
144 ptu_int_eq(errcode, -pte_internal);
149 static struct ptunit_result alloc(struct bcache_fixture *bfix)
151 bfix->bcache = pt_bcache_alloc(0x10000ull);
152 ptu_ptr(bfix->bcache);
157 static struct ptunit_result alloc_min(struct bcache_fixture *bfix)
159 bfix->bcache = pt_bcache_alloc(1ull);
160 ptu_ptr(bfix->bcache);
165 static struct ptunit_result alloc_too_big(struct bcache_fixture *bfix)
167 bfix->bcache = pt_bcache_alloc(UINT32_MAX + 1ull);
168 ptu_null(bfix->bcache);
173 static struct ptunit_result alloc_zero(struct bcache_fixture *bfix)
175 bfix->bcache = pt_bcache_alloc(0ull);
176 ptu_null(bfix->bcache);
181 static struct ptunit_result initially_empty(struct bcache_fixture *bfix)
185 for (index = 0; index < bfix_nentries; ++index) {
186 struct pt_bcache_entry bce;
189 memset(&bce, 0xff, sizeof(bce));
191 status = pt_bcache_lookup(&bce, bfix->bcache, index);
192 ptu_int_eq(status, 0);
194 status = pt_bce_is_valid(bce);
195 ptu_int_eq(status, 0);
201 static struct ptunit_result add_bad_index(struct bcache_fixture *bfix)
203 struct pt_bcache_entry bce;
206 memset(&bce, 0, sizeof(bce));
208 errcode = pt_bcache_add(bfix->bcache, bfix_nentries, bce);
209 ptu_int_eq(errcode, -pte_internal);
214 static struct ptunit_result lookup_bad_index(struct bcache_fixture *bfix)
216 struct pt_bcache_entry bce;
219 errcode = pt_bcache_lookup(&bce, bfix->bcache, bfix_nentries);
220 ptu_int_eq(errcode, -pte_internal);
225 static struct ptunit_result add(struct bcache_fixture *bfix, uint64_t index)
227 struct pt_bcache_entry bce, exp;
230 memset(&bce, 0xff, sizeof(bce));
231 memset(&exp, 0x00, sizeof(exp));
234 exp.displacement = 7;
235 exp.mode = ptem_64bit;
236 exp.qualifier = ptbq_decode;
239 errcode = pt_bcache_add(bfix->bcache, index, exp);
240 ptu_int_eq(errcode, 0);
242 errcode = pt_bcache_lookup(&bce, bfix->bcache, index);
243 ptu_int_eq(errcode, 0);
245 ptu_uint_eq(bce.ninsn, exp.ninsn);
246 ptu_int_eq(bce.displacement, exp.displacement);
247 ptu_uint_eq(pt_bce_exec_mode(bce), pt_bce_exec_mode(exp));
248 ptu_uint_eq(pt_bce_qualifier(bce), pt_bce_qualifier(exp));
249 ptu_uint_eq(bce.isize, exp.isize);
254 static int worker(void *arg)
256 struct pt_bcache_entry exp;
257 struct pt_block_cache *bcache;
258 uint64_t iter, index;
262 return -pte_internal;
264 memset(&exp, 0x00, sizeof(exp));
266 exp.displacement = 28;
267 exp.mode = ptem_64bit;
268 exp.qualifier = ptbq_again;
271 for (index = 0; index < bfix_nentries; ++index) {
272 for (iter = 0; iter < bfix_iterations; ++iter) {
273 struct pt_bcache_entry bce;
276 memset(&bce, 0xff, sizeof(bce));
278 errcode = pt_bcache_lookup(&bce, bcache, index);
282 if (!pt_bce_is_valid(bce)) {
283 errcode = pt_bcache_add(bcache, index, exp);
288 errcode = pt_bcache_lookup(&bce, bcache, index);
292 if (!pt_bce_is_valid(bce))
295 if (bce.ninsn != exp.ninsn)
298 if (bce.displacement != exp.displacement)
301 if (pt_bce_exec_mode(bce) != pt_bce_exec_mode(exp))
304 if (pt_bce_qualifier(bce) != pt_bce_qualifier(exp))
307 if (bce.isize != exp.isize)
315 static struct ptunit_result stress(struct bcache_fixture *bfix)
319 #if defined(FEATURE_THREADS)
323 for (thrd = 0; thrd < bfix_threads; ++thrd)
324 ptu_test(ptunit_thrd_create, &bfix->thrd, worker,
327 #endif /* defined(FEATURE_THREADS) */
329 errcode = worker(bfix->bcache);
330 ptu_int_eq(errcode, 0);
335 int main(int argc, char **argv)
337 struct bcache_fixture bfix, cfix;
338 struct ptunit_suite suite;
340 bfix.init = bfix_init;
341 bfix.fini = bfix_fini;
343 cfix.init = cfix_init;
344 cfix.fini = bfix_fini;
346 suite = ptunit_mk_suite(argc, argv);
348 ptu_run(suite, bcache_entry_size);
349 ptu_run(suite, bcache_size);
351 ptu_run(suite, free_null);
352 ptu_run(suite, add_null);
353 ptu_run(suite, lookup_null);
355 ptu_run_f(suite, alloc, cfix);
356 ptu_run_f(suite, alloc_min, cfix);
357 ptu_run_f(suite, alloc_too_big, cfix);
358 ptu_run_f(suite, alloc_zero, cfix);
360 ptu_run_f(suite, initially_empty, bfix);
362 ptu_run_f(suite, add_bad_index, bfix);
363 ptu_run_f(suite, lookup_bad_index, bfix);
365 ptu_run_fp(suite, add, bfix, 0ull);
366 ptu_run_fp(suite, add, bfix, bfix_nentries - 1ull);
367 ptu_run_f(suite, stress, bfix);
369 return ptunit_report(&suite);