]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/kern/subr_coverage.c
bhnd(9): Fix a few mandoc related issues
[FreeBSD/FreeBSD.git] / sys / kern / subr_coverage.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (C) 2018 The FreeBSD Foundation. All rights reserved.
5  * Copyright (C) 2018, 2019 Andrew Turner
6  *
7  * This software was developed by Mitchell Horne under sponsorship of
8  * the FreeBSD Foundation.
9  *
10  * This software was developed by SRI International and the University of
11  * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
12  * ("CTSRD"), as part of the DARPA CRASH research programme.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions
16  * are met:
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in the
21  *    documentation and/or other materials provided with the distribution.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  * $FreeBSD$
36  */
37
38 #define KCSAN_RUNTIME
39
40 #include <sys/cdefs.h>
41 __FBSDID("$FreeBSD$");
42
43 #include <sys/param.h>
44 #include <sys/coverage.h>
45
46 #include <machine/atomic.h>
47
48 void __sanitizer_cov_trace_pc(void);
49 void __sanitizer_cov_trace_cmp1(uint8_t, uint8_t);
50 void __sanitizer_cov_trace_cmp2(uint16_t, uint16_t);
51 void __sanitizer_cov_trace_cmp4(uint32_t, uint32_t);
52 void __sanitizer_cov_trace_cmp8(uint64_t, uint64_t);
53 void __sanitizer_cov_trace_const_cmp1(uint8_t, uint8_t);
54 void __sanitizer_cov_trace_const_cmp2(uint16_t, uint16_t);
55 void __sanitizer_cov_trace_const_cmp4(uint32_t, uint32_t);
56 void __sanitizer_cov_trace_const_cmp8(uint64_t, uint64_t);
57 void __sanitizer_cov_trace_switch(uint64_t, uint64_t *);
58
59 static cov_trace_pc_t cov_trace_pc;
60 static cov_trace_cmp_t cov_trace_cmp;
61
62 void
63 cov_register_pc(cov_trace_pc_t trace_pc)
64 {
65
66         atomic_store_ptr(&cov_trace_pc, trace_pc);
67 }
68
69 void
70 cov_unregister_pc(void)
71 {
72
73         atomic_store_ptr(&cov_trace_pc, NULL);
74 }
75
76 void
77 cov_register_cmp(cov_trace_cmp_t trace_cmp)
78 {
79
80         atomic_store_ptr(&cov_trace_cmp, trace_cmp);
81 }
82
83 void
84 cov_unregister_cmp(void)
85 {
86
87         atomic_store_ptr(&cov_trace_cmp, NULL);
88 }
89
90 /*
91  * Main entry point. A call to this function will be inserted
92  * at every edge, and if coverage is enabled for the thread
93  * this function will add the PC to the buffer.
94  */
95 void
96 __sanitizer_cov_trace_pc(void)
97 {
98         cov_trace_pc_t trace_pc;
99
100         trace_pc = atomic_load_ptr(&cov_trace_pc);
101         if (trace_pc != NULL)
102                 trace_pc((uint64_t)__builtin_return_address(0));
103 }
104
105 /*
106  * Comparison entry points. When the kernel performs a comparison
107  * operation the compiler inserts a call to one of the following
108  * functions to record the operation.
109  */
110 void
111 __sanitizer_cov_trace_cmp1(uint8_t arg1, uint8_t arg2)
112 {
113         cov_trace_cmp_t trace_cmp;
114
115         trace_cmp = atomic_load_ptr(&cov_trace_cmp);
116         if (trace_cmp != NULL)
117                 trace_cmp(COV_CMP_SIZE(0), arg1, arg2,
118                     (uint64_t)__builtin_return_address(0));
119 }
120
121 void
122 __sanitizer_cov_trace_cmp2(uint16_t arg1, uint16_t arg2)
123 {
124         cov_trace_cmp_t trace_cmp;
125
126         trace_cmp = atomic_load_ptr(&cov_trace_cmp);
127         if (trace_cmp != NULL)
128                 trace_cmp(COV_CMP_SIZE(1), arg1, arg2,
129                     (uint64_t)__builtin_return_address(0));
130 }
131
132 void
133 __sanitizer_cov_trace_cmp4(uint32_t arg1, uint32_t arg2)
134 {
135         cov_trace_cmp_t trace_cmp;
136
137         trace_cmp = atomic_load_ptr(&cov_trace_cmp);
138         if (trace_cmp != NULL)
139                 trace_cmp(COV_CMP_SIZE(2), arg1, arg2,
140                     (uint64_t)__builtin_return_address(0));
141 }
142
143 void
144 __sanitizer_cov_trace_cmp8(uint64_t arg1, uint64_t arg2)
145 {
146         cov_trace_cmp_t trace_cmp;
147
148         trace_cmp = atomic_load_ptr(&cov_trace_cmp);
149         if (trace_cmp != NULL)
150                 trace_cmp(COV_CMP_SIZE(3), arg1, arg2,
151                     (uint64_t)__builtin_return_address(0));
152 }
153
154 void
155 __sanitizer_cov_trace_const_cmp1(uint8_t arg1, uint8_t arg2)
156 {
157         cov_trace_cmp_t trace_cmp;
158
159         trace_cmp = atomic_load_ptr(&cov_trace_cmp);
160         if (trace_cmp != NULL)
161                 trace_cmp(COV_CMP_SIZE(0) | COV_CMP_CONST, arg1, arg2,
162                     (uint64_t)__builtin_return_address(0));
163 }
164
165 void
166 __sanitizer_cov_trace_const_cmp2(uint16_t arg1, uint16_t arg2)
167 {
168         cov_trace_cmp_t trace_cmp;
169
170         trace_cmp = atomic_load_ptr(&cov_trace_cmp);
171         if (trace_cmp != NULL)
172                 trace_cmp(COV_CMP_SIZE(1) | COV_CMP_CONST, arg1, arg2,
173                     (uint64_t)__builtin_return_address(0));
174 }
175
176 void
177 __sanitizer_cov_trace_const_cmp4(uint32_t arg1, uint32_t arg2)
178 {
179         cov_trace_cmp_t trace_cmp;
180
181         trace_cmp = atomic_load_ptr(&cov_trace_cmp);
182         if (trace_cmp != NULL)
183                 trace_cmp(COV_CMP_SIZE(2) | COV_CMP_CONST, arg1, arg2,
184                     (uint64_t)__builtin_return_address(0));
185 }
186
187 void
188 __sanitizer_cov_trace_const_cmp8(uint64_t arg1, uint64_t arg2)
189 {
190         cov_trace_cmp_t trace_cmp;
191
192         trace_cmp = atomic_load_ptr(&cov_trace_cmp);
193         if (trace_cmp != NULL)
194                 trace_cmp(COV_CMP_SIZE(3) | COV_CMP_CONST, arg1, arg2,
195                     (uint64_t)__builtin_return_address(0));
196 }
197
198 /*
199  * val is the switch operand
200  * cases[0] is the number of case constants
201  * cases[1] is the size of val in bits
202  * cases[2..n] are the case constants
203  */
204 void
205 __sanitizer_cov_trace_switch(uint64_t val, uint64_t *cases)
206 {
207         uint64_t i, count, ret, type;
208         cov_trace_cmp_t trace_cmp;
209
210         trace_cmp = atomic_load_ptr(&cov_trace_cmp);
211         if (trace_cmp == NULL)
212                 return;
213
214         count = cases[0];
215         ret = (uint64_t)__builtin_return_address(0);
216
217         switch (cases[1]) {
218         case 8:
219                 type = COV_CMP_SIZE(0);
220                 break;
221         case 16:
222                 type = COV_CMP_SIZE(1);
223                 break;
224         case 32:
225                 type = COV_CMP_SIZE(2);
226                 break;
227         case 64:
228                 type = COV_CMP_SIZE(3);
229                 break;
230         default:
231                 return;
232         }
233
234         val |= COV_CMP_CONST;
235
236         for (i = 0; i < count; i++)
237                 if (!trace_cmp(type, val, cases[i + 2], ret))
238                         return;
239 }