]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - contrib/coverity/model.c
Reduce false positives from Static Analyzers
[FreeBSD/FreeBSD.git] / contrib / coverity / model.c
1 /*
2  * Coverity Scan model
3  * https://scan.coverity.com/models
4  *
5  * This is a modeling file for Coverity Scan.
6  * Modeling helps to avoid false positives.
7  *
8  * - Modeling doesn't need full structs and typedefs. Rudimentary structs
9  *   and similar types are sufficient.
10  * - An uninitialized local pointer is not an error. It signifies that the
11  *   variable could be either NULL or have some data.
12  *
13  * Coverity Scan doesn't pick up modifications automatically. The model file
14  * must be uploaded by an admin in the analysis settings.
15  *
16  * Some of this initially cribbed from:
17  *
18  * https://github.com/kees/coverity-linux/blob/trunk/model.c
19  *
20  * The below model was based on the original model by Brian Behlendorf for the
21  * original zfsonlinux/zfs repository. Some inspiration was taken from
22  * kees/coverity-linux, specifically involving memory copies.
23  */
24
25 #include <stdarg.h>
26
27 #define UMEM_DEFAULT            0x0000  /* normal -- may fail */
28 #define UMEM_NOFAIL             0x0100  /* Never fails */
29
30 #define NULL    (0)
31
32 int condition0, condition1;
33
34 int
35 ddi_copyin(const void *from, void *to, size_t len, int flags)
36 {
37         __coverity_tainted_data_argument__(from);
38         __coverity_tainted_data_argument__(to);
39         __coverity_writeall__(to);
40 }
41
42 void *
43 memset(void *dst, int c, size_t len)
44 {
45         __coverity_writeall__(dst);
46         return (dst);
47 }
48
49 void *
50 memmove(void *dst, void *src, size_t len)
51 {
52         __coverity_writeall__(dst);
53         return (dst);
54 }
55
56 void *
57 memcpy(void *dst, void *src, size_t len)
58 {
59         __coverity_writeall__(dst);
60         return (dst);
61 }
62
63 void *
64 umem_alloc_aligned(size_t size, size_t align, int kmflags)
65 {
66         (void) align;
67
68         if ((UMEM_NOFAIL & kmflags) == UMEM_NOFAIL)
69                 return (__coverity_alloc__(size));
70         else if (condition0)
71                 return (__coverity_alloc__(size));
72         else
73                 return (NULL);
74 }
75
76 void *
77 umem_alloc(size_t size, int kmflags)
78 {
79         if ((UMEM_NOFAIL & kmflags) == UMEM_NOFAIL)
80                 return (__coverity_alloc__(size));
81         else if (condition0)
82                 return (__coverity_alloc__(size));
83         else
84                 return (NULL);
85 }
86
87 void *
88 umem_zalloc(size_t size, int kmflags)
89 {
90         if ((UMEM_NOFAIL & kmflags) == UMEM_NOFAIL)
91                 return (__coverity_alloc__(size));
92         else if (condition0)
93                 return (__coverity_alloc__(size));
94         else
95                 return (NULL);
96 }
97
98 void
99 umem_free(void *buf, size_t size)
100 {
101         (void) size;
102
103         __coverity_free__(buf);
104 }
105
106 typedef struct {} umem_cache_t;
107
108 void *
109 umem_cache_alloc(umem_cache_t *skc, int flags)
110 {
111         (void) skc;
112
113         if (condition1)
114                 __coverity_sleep__();
115
116         if ((UMEM_NOFAIL & flags) == UMEM_NOFAIL)
117                 return (__coverity_alloc_nosize__());
118         else if (condition0)
119                 return (__coverity_alloc_nosize__());
120         else
121                 return (NULL);
122 }
123
124 void
125 umem_cache_free(umem_cache_t *skc, void *obj)
126 {
127         (void) skc;
128
129         __coverity_free__(obj);
130 }
131
132 void *
133 spl_kmem_alloc(size_t sz, int fl, const char *func, int line)
134 {
135         (void) func;
136         (void) line;
137
138         if (condition1)
139                 __coverity_sleep__();
140
141         if (fl == 0) {
142                 return (__coverity_alloc__(sz));
143         } else if (condition0)
144                 return (__coverity_alloc__(sz));
145         else
146                 return (NULL);
147 }
148
149 void *
150 spl_kmem_zalloc(size_t sz, int fl, const char *func, int line)
151 {
152         (void) func;
153         (void) line;
154
155         if (condition1)
156                 __coverity_sleep__();
157
158         if (fl == 0) {
159                 return (__coverity_alloc__(sz));
160         } else if (condition0)
161                 return (__coverity_alloc__(sz));
162         else
163                 return (NULL);
164 }
165
166 void
167 spl_kmem_free(const void *ptr, size_t sz)
168 {
169         (void) sz;
170
171         __coverity_free__(ptr);
172 }
173
174 typedef struct {} spl_kmem_cache_t;
175
176 void *
177 spl_kmem_cache_alloc(spl_kmem_cache_t *skc, int flags)
178 {
179         (void) skc;
180
181         if (condition1)
182                 __coverity_sleep__();
183
184         if (flags == 0) {
185                 return (__coverity_alloc_nosize__());
186         } else if (condition0)
187                 return (__coverity_alloc_nosize__());
188         else
189                 return (NULL);
190 }
191
192 void
193 spl_kmem_cache_free(spl_kmem_cache_t *skc, void *obj)
194 {
195         (void) skc;
196
197         __coverity_free__(obj);
198 }
199
200 void
201 malloc(size_t size)
202 {
203         __coverity_alloc__(size);
204 }
205
206 void
207 free(void *buf)
208 {
209         __coverity_free__(buf);
210 }
211
212 int
213 sched_yield(void)
214 {
215         __coverity_sleep__();
216 }
217
218 typedef struct {} kmutex_t;
219 typedef struct {} krwlock_t;
220 typedef int krw_t;
221
222 /*
223  * Coverty reportedly does not support macros, so this only works for
224  * userspace.
225  */
226
227 void
228 mutex_enter(kmutex_t *mp)
229 {
230         if (condition0)
231                 __coverity_sleep__();
232
233         __coverity_exclusive_lock_acquire__(mp);
234 }
235
236 int
237 mutex_tryenter(kmutex_t *mp)
238 {
239         if (condition0) {
240                 __coverity_exclusive_lock_acquire__(mp);
241                 return (1);
242         }
243
244         return (0);
245 }
246
247 void
248 mutex_exit(kmutex_t *mp)
249 {
250         __coverity_exclusive_lock_release__(mp);
251 }
252
253 void
254 rw_enter(krwlock_t *rwlp, krw_t rw)
255 {
256         (void) rw;
257
258         if (condition0)
259                 __coverity_sleep__();
260
261         __coverity_recursive_lock_acquire__(rwlp);
262 }
263
264 void
265 rw_exit(krwlock_t *rwlp)
266 {
267         __coverity_recursive_lock_release__(rwlp);
268
269 }
270
271 int
272 rw_tryenter(krwlock_t *rwlp, krw_t rw)
273 {
274         if (condition0) {
275                 __coverity_recursive_lock_acquire__(rwlp);
276                 return (1);
277         }
278
279         return (0);
280 }
281
282 /* Thus, we fallback to the Linux kernel locks */
283 struct {} mutex;
284 struct {} rw_semaphore;
285
286 void
287 mutex_lock(struct mutex *lock)
288 {
289         if (condition0) {
290                 __coverity_sleep__();
291         }
292         __coverity_exclusive_lock_acquire__(lock);
293 }
294
295 void
296 mutex_unlock(struct mutex *lock)
297 {
298         __coverity_exclusive_lock_release__(lock);
299 }
300
301 void
302 down_read(struct rw_semaphore *sem)
303 {
304         if (condition0) {
305                 __coverity_sleep__();
306         }
307         __coverity_recursive_lock_acquire__(sem);
308 }
309
310 void
311 down_write(struct rw_semaphore *sem)
312 {
313         if (condition0) {
314                 __coverity_sleep__();
315         }
316         __coverity_recursive_lock_acquire__(sem);
317 }
318
319 int
320 down_read_trylock(struct rw_semaphore *sem)
321 {
322         if (condition0) {
323                 __coverity_recursive_lock_acquire__(sem);
324                 return (1);
325         }
326
327         return (0);
328 }
329
330 int
331 down_write_trylock(struct rw_semaphore *sem)
332 {
333         if (condition0) {
334                 __coverity_recursive_lock_acquire__(sem);
335                 return (1);
336         }
337
338         return (0);
339 }
340
341 void
342 up_read(struct rw_semaphore *sem)
343 {
344         __coverity_recursive_lock_release__(sem);
345 }
346
347 void
348 up_write(struct rw_semaphore *sem)
349 {
350         __coverity_recursive_lock_release__(sem);
351 }
352
353 int
354 __cond_resched(void)
355 {
356         if (condition0) {
357                 __coverity_sleep__();
358         }
359 }