]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/contrib/openzfs/module/os/freebsd/spl/spl_taskq.c
FreeBSD: fix panic due to tqid overflow
[FreeBSD/FreeBSD.git] / sys / contrib / openzfs / module / os / freebsd / spl / spl_taskq.c
1 /*
2  * Copyright (c) 2009 Pawel Jakub Dawidek <pjd@FreeBSD.org>
3  * All rights reserved.
4  *
5  * Copyright (c) 2012 Spectra Logic Corporation.  All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHORS AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHORS OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31
32 #include <sys/types.h>
33 #include <sys/param.h>
34 #include <sys/kernel.h>
35 #include <sys/kmem.h>
36 #include <sys/lock.h>
37 #include <sys/mutex.h>
38 #include <sys/queue.h>
39 #include <sys/taskqueue.h>
40 #include <sys/taskq.h>
41 #include <sys/zfs_context.h>
42 #include <sys/ck.h>
43 #include <sys/epoch.h>
44
45 #include <vm/uma.h>
46
47 #if __FreeBSD_version < 1201522
48 #define taskqueue_start_threads_in_proc(tqp, count, pri, proc, name, ...) \
49     taskqueue_start_threads(tqp, count, pri, name, __VA_ARGS__)
50 #endif
51
52 static uint_t taskq_tsd;
53 static uma_zone_t taskq_zone;
54
55 taskq_t *system_taskq = NULL;
56 taskq_t *system_delay_taskq = NULL;
57 taskq_t *dynamic_taskq = NULL;
58
59 proc_t *system_proc;
60
61 extern int uma_align_cache;
62
63 static MALLOC_DEFINE(M_TASKQ, "taskq", "taskq structures");
64
65 static CK_LIST_HEAD(tqenthashhead, taskq_ent) *tqenthashtbl;
66 static unsigned long tqenthash;
67 static unsigned long tqenthashlock;
68 static struct sx *tqenthashtbl_lock;
69
70 static taskqid_t tqidnext;
71
72 #define TQIDHASH(tqid) (&tqenthashtbl[(tqid) & tqenthash])
73 #define TQIDHASHLOCK(tqid) (&tqenthashtbl_lock[((tqid) & tqenthashlock)])
74
75 #define TIMEOUT_TASK 1
76 #define NORMAL_TASK 2
77
78 static void
79 system_taskq_init(void *arg)
80 {
81         int i;
82
83         tsd_create(&taskq_tsd, NULL);
84         tqenthashtbl = hashinit(mp_ncpus * 8, M_TASKQ, &tqenthash);
85         tqenthashlock = (tqenthash + 1) / 8;
86         if (tqenthashlock > 0)
87                 tqenthashlock--;
88         tqenthashtbl_lock =
89             malloc(sizeof (*tqenthashtbl_lock) * (tqenthashlock + 1),
90             M_TASKQ, M_WAITOK | M_ZERO);
91         for (i = 0; i < tqenthashlock + 1; i++)
92                 sx_init_flags(&tqenthashtbl_lock[i], "tqenthash", SX_DUPOK);
93         taskq_zone = uma_zcreate("taskq_zone", sizeof (taskq_ent_t),
94             NULL, NULL, NULL, NULL,
95             UMA_ALIGN_CACHE, 0);
96         system_taskq = taskq_create("system_taskq", mp_ncpus, minclsyspri,
97             0, 0, 0);
98         system_delay_taskq = taskq_create("system_delay_taskq", mp_ncpus,
99             minclsyspri, 0, 0, 0);
100 }
101 SYSINIT(system_taskq_init, SI_SUB_CONFIGURE, SI_ORDER_ANY, system_taskq_init,
102     NULL);
103
104 static void
105 system_taskq_fini(void *arg)
106 {
107         int i;
108
109         taskq_destroy(system_delay_taskq);
110         taskq_destroy(system_taskq);
111         uma_zdestroy(taskq_zone);
112         tsd_destroy(&taskq_tsd);
113         for (i = 0; i < tqenthashlock + 1; i++)
114                 sx_destroy(&tqenthashtbl_lock[i]);
115         for (i = 0; i < tqenthash + 1; i++)
116                 VERIFY(CK_LIST_EMPTY(&tqenthashtbl[i]));
117         free(tqenthashtbl_lock, M_TASKQ);
118         free(tqenthashtbl, M_TASKQ);
119 }
120 SYSUNINIT(system_taskq_fini, SI_SUB_CONFIGURE, SI_ORDER_ANY, system_taskq_fini,
121     NULL);
122
123 #ifdef __LP64__
124 static taskqid_t
125 __taskq_genid(void)
126 {
127
128         return (atomic_fetchadd_long(&tqidnext, 1) + 1);
129 }
130 #else
131 static taskqid_t
132 __taskq_genid(void)
133 {
134         taskqid_t tqid;
135
136         for (;;) {
137                 tqid = atomic_fetchadd_int(&tqidnext, 1) + 1;
138                 if (__predict_true(tqid != 0))
139                         break;
140         }
141         return (tqid);
142 }
143 #endif
144
145 static taskq_ent_t *
146 taskq_lookup(taskqid_t tqid)
147 {
148         taskq_ent_t *ent = NULL;
149
150         sx_xlock(TQIDHASHLOCK(tqid));
151         CK_LIST_FOREACH(ent, TQIDHASH(tqid), tqent_hash) {
152                 if (ent->tqent_id == tqid)
153                         break;
154         }
155         if (ent != NULL)
156                 refcount_acquire(&ent->tqent_rc);
157         sx_xunlock(TQIDHASHLOCK(tqid));
158         return (ent);
159 }
160
161 static taskqid_t
162 taskq_insert(taskq_ent_t *ent)
163 {
164         taskqid_t tqid;
165
166         tqid = __taskq_genid();
167         VERIFY(tqid);
168         ent->tqent_id = tqid;
169         ent->tqent_registered = B_TRUE;
170         sx_xlock(TQIDHASHLOCK(tqid));
171         CK_LIST_INSERT_HEAD(TQIDHASH(tqid), ent, tqent_hash);
172         sx_xunlock(TQIDHASHLOCK(tqid));
173         return (tqid);
174 }
175
176 static void
177 taskq_remove(taskq_ent_t *ent)
178 {
179         taskqid_t tqid = ent->tqent_id;
180
181         if (!ent->tqent_registered)
182                 return;
183
184         sx_xlock(TQIDHASHLOCK(tqid));
185         CK_LIST_REMOVE(ent, tqent_hash);
186         sx_xunlock(TQIDHASHLOCK(tqid));
187         ent->tqent_registered = B_FALSE;
188 }
189
190 static void
191 taskq_tsd_set(void *context)
192 {
193         taskq_t *tq = context;
194
195 #if defined(__amd64__) || defined(__aarch64__) 
196         if (context != NULL && tsd_get(taskq_tsd) == NULL)
197                 fpu_kern_thread(FPU_KERN_NORMAL);
198 #endif
199         tsd_set(taskq_tsd, tq);
200 }
201
202 static taskq_t *
203 taskq_create_impl(const char *name, int nthreads, pri_t pri,
204     proc_t *proc __maybe_unused, uint_t flags)
205 {
206         taskq_t *tq;
207
208         if ((flags & TASKQ_THREADS_CPU_PCT) != 0)
209                 nthreads = MAX((mp_ncpus * nthreads) / 100, 1);
210
211         tq = kmem_alloc(sizeof (*tq), KM_SLEEP);
212         tq->tq_queue = taskqueue_create(name, M_WAITOK,
213             taskqueue_thread_enqueue, &tq->tq_queue);
214         taskqueue_set_callback(tq->tq_queue, TASKQUEUE_CALLBACK_TYPE_INIT,
215             taskq_tsd_set, tq);
216         taskqueue_set_callback(tq->tq_queue, TASKQUEUE_CALLBACK_TYPE_SHUTDOWN,
217             taskq_tsd_set, NULL);
218         (void) taskqueue_start_threads_in_proc(&tq->tq_queue, nthreads, pri,
219             proc, "%s", name);
220
221         return ((taskq_t *)tq);
222 }
223
224 taskq_t *
225 taskq_create(const char *name, int nthreads, pri_t pri, int minalloc __unused,
226     int maxalloc __unused, uint_t flags)
227 {
228         return (taskq_create_impl(name, nthreads, pri, system_proc, flags));
229 }
230
231 taskq_t *
232 taskq_create_proc(const char *name, int nthreads, pri_t pri,
233     int minalloc __unused, int maxalloc __unused, proc_t *proc, uint_t flags)
234 {
235         return (taskq_create_impl(name, nthreads, pri, proc, flags));
236 }
237
238 void
239 taskq_destroy(taskq_t *tq)
240 {
241
242         taskqueue_free(tq->tq_queue);
243         kmem_free(tq, sizeof (*tq));
244 }
245
246 int
247 taskq_member(taskq_t *tq, kthread_t *thread)
248 {
249
250         return (taskqueue_member(tq->tq_queue, thread));
251 }
252
253 taskq_t *
254 taskq_of_curthread(void)
255 {
256         return (tsd_get(taskq_tsd));
257 }
258
259 static void
260 taskq_free(taskq_ent_t *task)
261 {
262         taskq_remove(task);
263         if (refcount_release(&task->tqent_rc))
264                 uma_zfree(taskq_zone, task);
265 }
266
267 int
268 taskq_cancel_id(taskq_t *tq, taskqid_t tid)
269 {
270         uint32_t pend;
271         int rc;
272         taskq_ent_t *ent;
273
274         if (tid == 0)
275                 return (0);
276
277         if ((ent = taskq_lookup(tid)) == NULL)
278                 return (0);
279
280         ent->tqent_cancelled = B_TRUE;
281         if (ent->tqent_type == TIMEOUT_TASK) {
282                 rc = taskqueue_cancel_timeout(tq->tq_queue,
283                     &ent->tqent_timeout_task, &pend);
284         } else
285                 rc = taskqueue_cancel(tq->tq_queue, &ent->tqent_task, &pend);
286         if (rc == EBUSY) {
287                 taskqueue_drain(tq->tq_queue, &ent->tqent_task);
288         } else if (pend) {
289                 /*
290                  * Tasks normally free themselves when run, but here the task
291                  * was cancelled so it did not free itself.
292                  */
293                 taskq_free(ent);
294         }
295         /* Free the extra reference we added with taskq_lookup. */
296         taskq_free(ent);
297         return (rc);
298 }
299
300 static void
301 taskq_run(void *arg, int pending __unused)
302 {
303         taskq_ent_t *task = arg;
304
305         if (!task->tqent_cancelled)
306                 task->tqent_func(task->tqent_arg);
307         taskq_free(task);
308 }
309
310 taskqid_t
311 taskq_dispatch_delay(taskq_t *tq, task_func_t func, void *arg,
312     uint_t flags, clock_t expire_time)
313 {
314         taskq_ent_t *task;
315         taskqid_t tqid;
316         clock_t timo;
317         int mflag;
318
319         timo = expire_time - ddi_get_lbolt();
320         if (timo <= 0)
321                 return (taskq_dispatch(tq, func, arg, flags));
322
323         if ((flags & (TQ_SLEEP | TQ_NOQUEUE)) == TQ_SLEEP)
324                 mflag = M_WAITOK;
325         else
326                 mflag = M_NOWAIT;
327
328         task = uma_zalloc(taskq_zone, mflag);
329         if (task == NULL)
330                 return (0);
331         task->tqent_func = func;
332         task->tqent_arg = arg;
333         task->tqent_type = TIMEOUT_TASK;
334         task->tqent_cancelled = B_FALSE;
335         refcount_init(&task->tqent_rc, 1);
336         tqid = taskq_insert(task);
337         TIMEOUT_TASK_INIT(tq->tq_queue, &task->tqent_timeout_task, 0,
338             taskq_run, task);
339
340         taskqueue_enqueue_timeout(tq->tq_queue, &task->tqent_timeout_task,
341             timo);
342         return (tqid);
343 }
344
345 taskqid_t
346 taskq_dispatch(taskq_t *tq, task_func_t func, void *arg, uint_t flags)
347 {
348         taskq_ent_t *task;
349         int mflag, prio;
350         taskqid_t tqid;
351
352         if ((flags & (TQ_SLEEP | TQ_NOQUEUE)) == TQ_SLEEP)
353                 mflag = M_WAITOK;
354         else
355                 mflag = M_NOWAIT;
356         /*
357          * If TQ_FRONT is given, we want higher priority for this task, so it
358          * can go at the front of the queue.
359          */
360         prio = !!(flags & TQ_FRONT);
361
362         task = uma_zalloc(taskq_zone, mflag);
363         if (task == NULL)
364                 return (0);
365         refcount_init(&task->tqent_rc, 1);
366         task->tqent_func = func;
367         task->tqent_arg = arg;
368         task->tqent_cancelled = B_FALSE;
369         task->tqent_type = NORMAL_TASK;
370         tqid = taskq_insert(task);
371         TASK_INIT(&task->tqent_task, prio, taskq_run, task);
372         taskqueue_enqueue(tq->tq_queue, &task->tqent_task);
373         return (tqid);
374 }
375
376 static void
377 taskq_run_ent(void *arg, int pending __unused)
378 {
379         taskq_ent_t *task = arg;
380
381         task->tqent_func(task->tqent_arg);
382 }
383
384 void
385 taskq_dispatch_ent(taskq_t *tq, task_func_t func, void *arg, uint32_t flags,
386     taskq_ent_t *task)
387 {
388         int prio;
389
390         /*
391          * If TQ_FRONT is given, we want higher priority for this task, so it
392          * can go at the front of the queue.
393          */
394         prio = !!(flags & TQ_FRONT);
395         task->tqent_cancelled = B_FALSE;
396         task->tqent_registered = B_FALSE;
397         task->tqent_id = 0;
398         task->tqent_func = func;
399         task->tqent_arg = arg;
400
401         TASK_INIT(&task->tqent_task, prio, taskq_run_ent, task);
402         taskqueue_enqueue(tq->tq_queue, &task->tqent_task);
403 }
404
405 void
406 taskq_wait(taskq_t *tq)
407 {
408         taskqueue_quiesce(tq->tq_queue);
409 }
410
411 void
412 taskq_wait_id(taskq_t *tq, taskqid_t tid)
413 {
414         taskq_ent_t *ent;
415
416         if (tid == 0)
417                 return;
418         if ((ent = taskq_lookup(tid)) == NULL)
419                 return;
420
421         taskqueue_drain(tq->tq_queue, &ent->tqent_task);
422         taskq_free(ent);
423 }
424
425 void
426 taskq_wait_outstanding(taskq_t *tq, taskqid_t id __unused)
427 {
428         taskqueue_drain_all(tq->tq_queue);
429 }
430
431 int
432 taskq_empty_ent(taskq_ent_t *t)
433 {
434         return (t->tqent_task.ta_pending == 0);
435 }