3 * https://scan.coverity.com/models
5 * This is a modeling file for Coverity Scan.
6 * Modeling helps to avoid false positives.
8 * - Modeling doesn't need full structs and typedefs. Rudimentary structs
9 * and similar types are sufficient.
10 * - An uninitialized local pointer is not an error. It signifies that the
11 * variable could be either NULL or have some data.
13 * Coverity Scan doesn't pick up modifications automatically. The model file
14 * must be uploaded by an admin in the analysis settings.
16 * Some of this initially cribbed from:
18 * https://github.com/kees/coverity-linux/blob/trunk/model.c
20 * The below model was based on the original model by Brian Behlendorf for the
21 * original zfsonlinux/zfs repository. Some inspiration was taken from
22 * kees/coverity-linux, specifically involving memory copies.
27 #define UMEM_DEFAULT 0x0000 /* normal -- may fail */
28 #define UMEM_NOFAIL 0x0100 /* Never fails */
32 int condition0, condition1;
35 ddi_copyin(const void *from, void *to, size_t len, int flags)
37 __coverity_tainted_data_argument__(from);
38 __coverity_tainted_data_argument__(to);
39 __coverity_writeall__(to);
43 memset(void *dst, int c, size_t len)
45 __coverity_writeall__(dst);
50 memmove(void *dst, void *src, size_t len)
52 __coverity_writeall__(dst);
57 memcpy(void *dst, void *src, size_t len)
59 __coverity_writeall__(dst);
64 umem_alloc_aligned(size_t size, size_t align, int kmflags)
68 if ((UMEM_NOFAIL & kmflags) == UMEM_NOFAIL)
69 return (__coverity_alloc__(size));
71 return (__coverity_alloc__(size));
77 umem_alloc(size_t size, int kmflags)
79 if ((UMEM_NOFAIL & kmflags) == UMEM_NOFAIL)
80 return (__coverity_alloc__(size));
82 return (__coverity_alloc__(size));
88 umem_zalloc(size_t size, int kmflags)
90 if ((UMEM_NOFAIL & kmflags) == UMEM_NOFAIL)
91 return (__coverity_alloc__(size));
93 return (__coverity_alloc__(size));
99 umem_free(void *buf, size_t size)
103 __coverity_free__(buf);
106 typedef struct {} umem_cache_t;
109 umem_cache_alloc(umem_cache_t *skc, int flags)
114 __coverity_sleep__();
116 if ((UMEM_NOFAIL & flags) == UMEM_NOFAIL)
117 return (__coverity_alloc_nosize__());
119 return (__coverity_alloc_nosize__());
125 umem_cache_free(umem_cache_t *skc, void *obj)
129 __coverity_free__(obj);
133 spl_kmem_alloc(size_t sz, int fl, const char *func, int line)
139 __coverity_sleep__();
142 return (__coverity_alloc__(sz));
143 } else if (condition0)
144 return (__coverity_alloc__(sz));
150 spl_kmem_zalloc(size_t sz, int fl, const char *func, int line)
156 __coverity_sleep__();
159 return (__coverity_alloc__(sz));
160 } else if (condition0)
161 return (__coverity_alloc__(sz));
167 spl_kmem_free(const void *ptr, size_t sz)
171 __coverity_free__(ptr);
174 typedef struct {} spl_kmem_cache_t;
177 spl_kmem_cache_alloc(spl_kmem_cache_t *skc, int flags)
182 __coverity_sleep__();
185 return (__coverity_alloc_nosize__());
186 } else if (condition0)
187 return (__coverity_alloc_nosize__());
193 spl_kmem_cache_free(spl_kmem_cache_t *skc, void *obj)
197 __coverity_free__(obj);
203 __coverity_alloc__(size);
209 __coverity_free__(buf);
215 __coverity_sleep__();
218 typedef struct {} kmutex_t;
219 typedef struct {} krwlock_t;
223 * Coverty reportedly does not support macros, so this only works for
228 mutex_enter(kmutex_t *mp)
231 __coverity_sleep__();
233 __coverity_exclusive_lock_acquire__(mp);
237 mutex_tryenter(kmutex_t *mp)
240 __coverity_exclusive_lock_acquire__(mp);
248 mutex_exit(kmutex_t *mp)
250 __coverity_exclusive_lock_release__(mp);
254 rw_enter(krwlock_t *rwlp, krw_t rw)
259 __coverity_sleep__();
261 __coverity_recursive_lock_acquire__(rwlp);
265 rw_exit(krwlock_t *rwlp)
267 __coverity_recursive_lock_release__(rwlp);
272 rw_tryenter(krwlock_t *rwlp, krw_t rw)
275 __coverity_recursive_lock_acquire__(rwlp);
282 /* Thus, we fallback to the Linux kernel locks */
284 struct {} rw_semaphore;
287 mutex_lock(struct mutex *lock)
290 __coverity_sleep__();
292 __coverity_exclusive_lock_acquire__(lock);
296 mutex_unlock(struct mutex *lock)
298 __coverity_exclusive_lock_release__(lock);
302 down_read(struct rw_semaphore *sem)
305 __coverity_sleep__();
307 __coverity_recursive_lock_acquire__(sem);
311 down_write(struct rw_semaphore *sem)
314 __coverity_sleep__();
316 __coverity_recursive_lock_acquire__(sem);
320 down_read_trylock(struct rw_semaphore *sem)
323 __coverity_recursive_lock_acquire__(sem);
331 down_write_trylock(struct rw_semaphore *sem)
334 __coverity_recursive_lock_acquire__(sem);
342 up_read(struct rw_semaphore *sem)
344 __coverity_recursive_lock_release__(sem);
348 up_write(struct rw_semaphore *sem)
350 __coverity_recursive_lock_release__(sem);
357 __coverity_sleep__();