]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/kern/subr_coverage.c
FCP-101: Remove ep(4).
[FreeBSD/FreeBSD.git] / sys / kern / subr_coverage.c
1 /*-
2  * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
3  *
4  * Copyright (C) 2018 The FreeBSD Foundation. All rights reserved.
5  * Copyright (C) 2018, 2019 Andrew Turner
6  *
7  * This software was developed by Mitchell Horne under sponsorship of
8  * the FreeBSD Foundation.
9  *
10  * This software was developed by SRI International and the University of
11  * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
12  * ("CTSRD"), as part of the DARPA CRASH research programme.
13  *
14  * Redistribution and use in source and binary forms, with or without
15  * modification, are permitted provided that the following conditions
16  * are met:
17  * 1. Redistributions of source code must retain the above copyright
18  *    notice, this list of conditions and the following disclaimer.
19  * 2. Redistributions in binary form must reproduce the above copyright
20  *    notice, this list of conditions and the following disclaimer in the
21  *    documentation and/or other materials provided with the distribution.
22  *
23  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
24  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
25  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
26  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
27  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
28  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
29  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
30  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
31  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
32  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33  * SUCH DAMAGE.
34  *
35  * $FreeBSD$
36  */
37
38 #include <sys/cdefs.h>
39 __FBSDID("$FreeBSD$");
40
41 #include <sys/param.h>
42 #include <sys/coverage.h>
43
44 #include <machine/atomic.h>
45
46 void __sanitizer_cov_trace_pc(void);
47 void __sanitizer_cov_trace_cmp1(uint8_t, uint8_t);
48 void __sanitizer_cov_trace_cmp2(uint16_t, uint16_t);
49 void __sanitizer_cov_trace_cmp4(uint32_t, uint32_t);
50 void __sanitizer_cov_trace_cmp8(uint64_t, uint64_t);
51 void __sanitizer_cov_trace_const_cmp1(uint8_t, uint8_t);
52 void __sanitizer_cov_trace_const_cmp2(uint16_t, uint16_t);
53 void __sanitizer_cov_trace_const_cmp4(uint32_t, uint32_t);
54 void __sanitizer_cov_trace_const_cmp8(uint64_t, uint64_t);
55 void __sanitizer_cov_trace_switch(uint64_t, uint64_t *);
56
57 static cov_trace_pc_t cov_trace_pc;
58 static cov_trace_cmp_t cov_trace_cmp;
59
60 void
61 cov_register_pc(cov_trace_pc_t trace_pc)
62 {
63
64         atomic_store_ptr(&cov_trace_pc, trace_pc);
65 }
66
67 void
68 cov_unregister_pc(void)
69 {
70
71         atomic_store_ptr(&cov_trace_pc, NULL);
72 }
73
74 void
75 cov_register_cmp(cov_trace_cmp_t trace_cmp)
76 {
77
78         atomic_store_ptr(&cov_trace_cmp, trace_cmp);
79 }
80
81 void
82 cov_unregister_cmp(void)
83 {
84
85         atomic_store_ptr(&cov_trace_cmp, NULL);
86 }
87
88 /*
89  * Main entry point. A call to this function will be inserted
90  * at every edge, and if coverage is enabled for the thread
91  * this function will add the PC to the buffer.
92  */
93 void
94 __sanitizer_cov_trace_pc(void)
95 {
96         cov_trace_pc_t trace_pc;
97
98         trace_pc = (cov_trace_pc_t)atomic_load_ptr(&cov_trace_pc);
99         if (trace_pc != NULL)
100                 trace_pc((uint64_t)__builtin_return_address(0));
101 }
102
103 /*
104  * Comparison entry points. When the kernel performs a comparison
105  * operation the compiler inserts a call to one of the following
106  * functions to record the operation.
107  */
108 void
109 __sanitizer_cov_trace_cmp1(uint8_t arg1, uint8_t arg2)
110 {
111         cov_trace_cmp_t trace_cmp;
112
113         trace_cmp = (cov_trace_cmp_t)atomic_load_ptr(&cov_trace_cmp);
114         if (trace_cmp != NULL)
115                 trace_cmp(COV_CMP_SIZE(0), arg1, arg2,
116                     (uint64_t)__builtin_return_address(0));
117 }
118
119 void
120 __sanitizer_cov_trace_cmp2(uint16_t arg1, uint16_t arg2)
121 {
122         cov_trace_cmp_t trace_cmp;
123
124         trace_cmp = (cov_trace_cmp_t)atomic_load_ptr(&cov_trace_cmp);
125         if (trace_cmp != NULL)
126                 trace_cmp(COV_CMP_SIZE(1), arg1, arg2,
127                     (uint64_t)__builtin_return_address(0));
128 }
129
130 void
131 __sanitizer_cov_trace_cmp4(uint32_t arg1, uint32_t arg2)
132 {
133         cov_trace_cmp_t trace_cmp;
134
135         trace_cmp = (cov_trace_cmp_t)atomic_load_ptr(&cov_trace_cmp);
136         if (trace_cmp != NULL)
137                 trace_cmp(COV_CMP_SIZE(2), arg1, arg2,
138                     (uint64_t)__builtin_return_address(0));
139 }
140
141 void
142 __sanitizer_cov_trace_cmp8(uint64_t arg1, uint64_t arg2)
143 {
144         cov_trace_cmp_t trace_cmp;
145
146         trace_cmp = (cov_trace_cmp_t)atomic_load_ptr(&cov_trace_cmp);
147         if (trace_cmp != NULL)
148                 trace_cmp(COV_CMP_SIZE(3), arg1, arg2,
149                     (uint64_t)__builtin_return_address(0));
150 }
151
152 void
153 __sanitizer_cov_trace_const_cmp1(uint8_t arg1, uint8_t arg2)
154 {
155         cov_trace_cmp_t trace_cmp;
156
157         trace_cmp = (cov_trace_cmp_t)atomic_load_ptr(&cov_trace_cmp);
158         if (trace_cmp != NULL)
159                 trace_cmp(COV_CMP_SIZE(0) | COV_CMP_CONST, arg1, arg2,
160                     (uint64_t)__builtin_return_address(0));
161 }
162
163 void
164 __sanitizer_cov_trace_const_cmp2(uint16_t arg1, uint16_t arg2)
165 {
166         cov_trace_cmp_t trace_cmp;
167
168         trace_cmp = (cov_trace_cmp_t)atomic_load_ptr(&cov_trace_cmp);
169         if (trace_cmp != NULL)
170                 trace_cmp(COV_CMP_SIZE(1) | COV_CMP_CONST, arg1, arg2,
171                     (uint64_t)__builtin_return_address(0));
172 }
173
174 void
175 __sanitizer_cov_trace_const_cmp4(uint32_t arg1, uint32_t arg2)
176 {
177         cov_trace_cmp_t trace_cmp;
178
179         trace_cmp = (cov_trace_cmp_t)atomic_load_ptr(&cov_trace_cmp);
180         if (trace_cmp != NULL)
181                 trace_cmp(COV_CMP_SIZE(2) | COV_CMP_CONST, arg1, arg2,
182                     (uint64_t)__builtin_return_address(0));
183 }
184
185 void
186 __sanitizer_cov_trace_const_cmp8(uint64_t arg1, uint64_t arg2)
187 {
188         cov_trace_cmp_t trace_cmp;
189
190         trace_cmp = (cov_trace_cmp_t)atomic_load_ptr(&cov_trace_cmp);
191         if (trace_cmp != NULL)
192                 trace_cmp(COV_CMP_SIZE(3) | COV_CMP_CONST, arg1, arg2,
193                     (uint64_t)__builtin_return_address(0));
194 }
195
196 /*
197  * val is the switch operand
198  * cases[0] is the number of case constants
199  * cases[1] is the size of val in bits
200  * cases[2..n] are the case constants
201  */
202 void
203 __sanitizer_cov_trace_switch(uint64_t val, uint64_t *cases)
204 {
205         uint64_t i, count, ret, type;
206         cov_trace_cmp_t trace_cmp;
207
208         trace_cmp = (cov_trace_cmp_t)atomic_load_ptr(&cov_trace_cmp);
209         if (trace_cmp == NULL)
210                 return;
211
212         count = cases[0];
213         ret = (uint64_t)__builtin_return_address(0);
214
215         switch (cases[1]) {
216         case 8:
217                 type = COV_CMP_SIZE(0);
218                 break;
219         case 16:
220                 type = COV_CMP_SIZE(1);
221                 break;
222         case 32:
223                 type = COV_CMP_SIZE(2);
224                 break;
225         case 64:
226                 type = COV_CMP_SIZE(3);
227                 break;
228         default:
229                 return;
230         }
231
232         val |= COV_CMP_CONST;
233
234         for (i = 0; i < count; i++)
235                 if (!trace_cmp(type, val, cases[i + 2], ret))
236                         return;
237 }