2 * Copyright (c) 2017 Hans Petter Selasky
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice unmodified, this list of conditions, and the following
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
16 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
17 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
18 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
19 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
20 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
21 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
22 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
23 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
24 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
30 #include <linux/workqueue.h>
31 #include <linux/wait.h>
32 #include <linux/compat.h>
33 #include <linux/spinlock.h>
35 #include <sys/kernel.h>
38 * Define all work struct states
41 WORK_ST_IDLE, /* idle - not started */
42 WORK_ST_TIMER, /* timer is being started */
43 WORK_ST_TASK, /* taskqueue is being queued */
44 WORK_ST_EXEC, /* callback is being called */
45 WORK_ST_CANCEL, /* cancel is being requested */
50 * Define global workqueues
52 static struct workqueue_struct *linux_system_short_wq;
53 static struct workqueue_struct *linux_system_long_wq;
55 struct workqueue_struct *system_wq;
56 struct workqueue_struct *system_long_wq;
57 struct workqueue_struct *system_unbound_wq;
58 struct workqueue_struct *system_power_efficient_wq;
60 static void linux_delayed_work_timer_fn(void *);
63 * This function atomically updates the work state and returns the
64 * previous state at the time of update.
67 linux_update_state(atomic_t *v, const uint8_t *pstate)
73 while ((old = atomic_cmpxchg(v, c, pstate[c])) != c)
80 * A LinuxKPI task is allowed to free itself inside the callback function
81 * and cannot safely be referred after the callback function has
82 * completed. This function gives the linux_work_fn() function a hint,
83 * that the task is not going away and can have its state checked
84 * again. Without this extra hint LinuxKPI tasks cannot be serialized
85 * accross multiple worker threads.
88 linux_work_exec_unblock(struct work_struct *work)
90 struct workqueue_struct *wq;
91 struct work_exec *exec;
94 wq = work->work_queue;
95 if (unlikely(wq == NULL))
99 TAILQ_FOREACH(exec, &wq->exec_head, entry) {
100 if (exec->target == work) {
112 linux_delayed_work_enqueue(struct delayed_work *dwork)
114 struct taskqueue *tq;
116 tq = dwork->work.work_queue->taskqueue;
117 taskqueue_enqueue(tq, &dwork->work.work_task);
121 * This function queues the given work structure on the given
122 * workqueue. It returns non-zero if the work was successfully
123 * [re-]queued. Else the work is already pending for completion.
126 linux_queue_work_on(int cpu __unused, struct workqueue_struct *wq,
127 struct work_struct *work)
129 static const uint8_t states[WORK_ST_MAX] __aligned(8) = {
130 [WORK_ST_IDLE] = WORK_ST_TASK, /* start queuing task */
131 [WORK_ST_TIMER] = WORK_ST_TIMER, /* NOP */
132 [WORK_ST_TASK] = WORK_ST_TASK, /* NOP */
133 [WORK_ST_EXEC] = WORK_ST_TASK, /* queue task another time */
134 [WORK_ST_CANCEL] = WORK_ST_TASK, /* start queuing task again */
137 if (atomic_read(&wq->draining) != 0)
138 return (!work_pending(work));
140 switch (linux_update_state(&work->state, states)) {
143 if (linux_work_exec_unblock(work) != 0)
147 work->work_queue = wq;
148 taskqueue_enqueue(wq->taskqueue, &work->work_task);
151 return (0); /* already on a queue */
156 * This function queues the given work structure on the given
157 * workqueue after a given delay in ticks. It returns non-zero if the
158 * work was successfully [re-]queued. Else the work is already pending
162 linux_queue_delayed_work_on(int cpu, struct workqueue_struct *wq,
163 struct delayed_work *dwork, unsigned delay)
165 static const uint8_t states[WORK_ST_MAX] __aligned(8) = {
166 [WORK_ST_IDLE] = WORK_ST_TIMER, /* start timeout */
167 [WORK_ST_TIMER] = WORK_ST_TIMER, /* NOP */
168 [WORK_ST_TASK] = WORK_ST_TASK, /* NOP */
169 [WORK_ST_EXEC] = WORK_ST_TIMER, /* start timeout */
170 [WORK_ST_CANCEL] = WORK_ST_TIMER, /* start timeout */
173 if (atomic_read(&wq->draining) != 0)
174 return (!work_pending(&dwork->work));
176 switch (linux_update_state(&dwork->work.state, states)) {
179 if (delay == 0 && linux_work_exec_unblock(&dwork->work) != 0) {
180 dwork->timer.expires = jiffies;
185 dwork->work.work_queue = wq;
186 dwork->timer.expires = jiffies + delay;
189 linux_delayed_work_enqueue(dwork);
190 } else if (unlikely(cpu != WORK_CPU_UNBOUND)) {
191 mtx_lock(&dwork->timer.mtx);
192 callout_reset_on(&dwork->timer.callout, delay,
193 &linux_delayed_work_timer_fn, dwork, cpu);
194 mtx_unlock(&dwork->timer.mtx);
196 mtx_lock(&dwork->timer.mtx);
197 callout_reset(&dwork->timer.callout, delay,
198 &linux_delayed_work_timer_fn, dwork);
199 mtx_unlock(&dwork->timer.mtx);
203 return (0); /* already on a queue */
208 linux_work_fn(void *context, int pending)
210 static const uint8_t states[WORK_ST_MAX] __aligned(8) = {
211 [WORK_ST_IDLE] = WORK_ST_IDLE, /* NOP */
212 [WORK_ST_TIMER] = WORK_ST_EXEC, /* delayed work w/o timeout */
213 [WORK_ST_TASK] = WORK_ST_EXEC, /* call callback */
214 [WORK_ST_EXEC] = WORK_ST_IDLE, /* complete callback */
215 [WORK_ST_CANCEL] = WORK_ST_IDLE, /* complete cancel */
217 struct work_struct *work;
218 struct workqueue_struct *wq;
219 struct work_exec exec;
221 linux_set_current(curthread);
223 /* setup local variables */
225 wq = work->work_queue;
227 /* store target pointer */
230 /* insert executor into list */
232 TAILQ_INSERT_TAIL(&wq->exec_head, &exec, entry);
234 switch (linux_update_state(&work->state, states)) {
239 /* call work function */
243 /* check if unblocked */
244 if (exec.target != work) {
255 /* remove executor from list */
256 TAILQ_REMOVE(&wq->exec_head, &exec, entry);
261 linux_delayed_work_timer_fn(void *arg)
263 static const uint8_t states[WORK_ST_MAX] __aligned(8) = {
264 [WORK_ST_IDLE] = WORK_ST_IDLE, /* NOP */
265 [WORK_ST_TIMER] = WORK_ST_TASK, /* start queueing task */
266 [WORK_ST_TASK] = WORK_ST_TASK, /* NOP */
267 [WORK_ST_EXEC] = WORK_ST_TASK, /* queue task another time */
268 [WORK_ST_CANCEL] = WORK_ST_IDLE, /* complete cancel */
270 struct delayed_work *dwork = arg;
272 switch (linux_update_state(&dwork->work.state, states)) {
274 linux_delayed_work_enqueue(dwork);
282 * This function cancels the given work structure in a synchronous
283 * fashion. It returns non-zero if the work was successfully
284 * cancelled. Else the work was already cancelled.
287 linux_cancel_work_sync(struct work_struct *work)
289 static const uint8_t states[WORK_ST_MAX] __aligned(8) = {
290 [WORK_ST_IDLE] = WORK_ST_IDLE, /* NOP */
291 [WORK_ST_TIMER] = WORK_ST_IDLE, /* idle */
292 [WORK_ST_TASK] = WORK_ST_IDLE, /* idle */
293 [WORK_ST_EXEC] = WORK_ST_IDLE, /* idle */
294 [WORK_ST_CANCEL] = WORK_ST_IDLE, /* idle */
296 struct taskqueue *tq;
298 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
299 "linux_cancel_work_sync() might sleep");
301 switch (linux_update_state(&work->state, states)) {
305 tq = work->work_queue->taskqueue;
306 if (taskqueue_cancel(tq, &work->work_task, NULL) != 0)
307 taskqueue_drain(tq, &work->work_task);
313 * This function atomically stops the timer and callback. The timer
314 * callback will not be called after this function returns. This
315 * functions returns true when the timeout was cancelled. Else the
316 * timeout was not started or has already been called.
319 linux_cancel_timer(struct delayed_work *dwork, bool drain)
323 mtx_lock(&dwork->timer.mtx);
324 cancelled = (callout_stop(&dwork->timer.callout) == 1);
325 mtx_unlock(&dwork->timer.mtx);
327 /* check if we should drain */
329 callout_drain(&dwork->timer.callout);
334 * This function cancels the given delayed work structure in a
335 * non-blocking fashion. It returns non-zero if the work was
336 * successfully cancelled. Else the work may still be busy or already
340 linux_cancel_delayed_work(struct delayed_work *dwork)
342 static const uint8_t states[WORK_ST_MAX] __aligned(8) = {
343 [WORK_ST_IDLE] = WORK_ST_IDLE, /* NOP */
344 [WORK_ST_TIMER] = WORK_ST_CANCEL, /* cancel */
345 [WORK_ST_TASK] = WORK_ST_CANCEL, /* cancel */
346 [WORK_ST_EXEC] = WORK_ST_CANCEL, /* cancel */
347 [WORK_ST_CANCEL] = WORK_ST_CANCEL, /* cancel */
349 struct taskqueue *tq;
351 switch (linux_update_state(&dwork->work.state, states)) {
353 if (linux_cancel_timer(dwork, 0))
358 tq = dwork->work.work_queue->taskqueue;
359 if (taskqueue_cancel(tq, &dwork->work.work_task, NULL) == 0)
368 * This function cancels the given work structure in a synchronous
369 * fashion. It returns non-zero if the work was successfully
370 * cancelled. Else the work was already cancelled.
373 linux_cancel_delayed_work_sync(struct delayed_work *dwork)
375 static const uint8_t states[WORK_ST_MAX] __aligned(8) = {
376 [WORK_ST_IDLE] = WORK_ST_IDLE, /* NOP */
377 [WORK_ST_TIMER] = WORK_ST_IDLE, /* idle */
378 [WORK_ST_TASK] = WORK_ST_IDLE, /* idle */
379 [WORK_ST_EXEC] = WORK_ST_IDLE, /* idle */
380 [WORK_ST_CANCEL] = WORK_ST_IDLE, /* idle */
382 struct taskqueue *tq;
384 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
385 "linux_cancel_delayed_work_sync() might sleep");
387 switch (linux_update_state(&dwork->work.state, states)) {
391 if (linux_cancel_timer(dwork, 1)) {
393 * Make sure taskqueue is also drained before
396 tq = dwork->work.work_queue->taskqueue;
397 taskqueue_drain(tq, &dwork->work.work_task);
402 tq = dwork->work.work_queue->taskqueue;
403 if (taskqueue_cancel(tq, &dwork->work.work_task, NULL) != 0)
404 taskqueue_drain(tq, &dwork->work.work_task);
410 * This function waits until the given work structure is completed.
411 * It returns non-zero if the work was successfully
412 * waited for. Else the work was not waited for.
415 linux_flush_work(struct work_struct *work)
417 struct taskqueue *tq;
419 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
420 "linux_flush_work() might sleep");
422 switch (atomic_read(&work->state)) {
426 tq = work->work_queue->taskqueue;
427 taskqueue_drain(tq, &work->work_task);
433 * This function waits until the given delayed work structure is
434 * completed. It returns non-zero if the work was successfully waited
435 * for. Else the work was not waited for.
438 linux_flush_delayed_work(struct delayed_work *dwork)
440 struct taskqueue *tq;
442 WITNESS_WARN(WARN_GIANTOK | WARN_SLEEPOK, NULL,
443 "linux_flush_delayed_work() might sleep");
445 switch (atomic_read(&dwork->work.state)) {
449 if (linux_cancel_timer(dwork, 1))
450 linux_delayed_work_enqueue(dwork);
453 tq = dwork->work.work_queue->taskqueue;
454 taskqueue_drain(tq, &dwork->work.work_task);
460 * This function returns true if the given work is pending, and not
464 linux_work_pending(struct work_struct *work)
466 switch (atomic_read(&work->state)) {
476 * This function returns true if the given work is busy.
479 linux_work_busy(struct work_struct *work)
481 struct taskqueue *tq;
483 switch (atomic_read(&work->state)) {
488 tq = work->work_queue->taskqueue;
489 return (taskqueue_poll_is_busy(tq, &work->work_task));
495 struct workqueue_struct *
496 linux_create_workqueue_common(const char *name, int cpus)
498 struct workqueue_struct *wq;
500 wq = kmalloc(sizeof(*wq), M_WAITOK | M_ZERO);
501 wq->taskqueue = taskqueue_create(name, M_WAITOK,
502 taskqueue_thread_enqueue, &wq->taskqueue);
503 atomic_set(&wq->draining, 0);
504 taskqueue_start_threads(&wq->taskqueue, cpus, PWAIT, "%s", name);
505 TAILQ_INIT(&wq->exec_head);
506 mtx_init(&wq->exec_mtx, "linux_wq_exec", NULL, MTX_DEF);
512 linux_destroy_workqueue(struct workqueue_struct *wq)
514 atomic_inc(&wq->draining);
516 taskqueue_free(wq->taskqueue);
517 mtx_destroy(&wq->exec_mtx);
522 linux_init_delayed_work(struct delayed_work *dwork, work_func_t func)
524 memset(dwork, 0, sizeof(*dwork));
525 INIT_WORK(&dwork->work, func);
526 mtx_init(&dwork->timer.mtx, spin_lock_name("lkpi-dwork"), NULL,
527 MTX_DEF | MTX_NOWITNESS);
528 callout_init_mtx(&dwork->timer.callout, &dwork->timer.mtx, 0);
532 linux_work_init(void *arg)
534 int max_wq_cpus = mp_ncpus + 1;
536 /* avoid deadlock when there are too few threads */
540 linux_system_short_wq = alloc_workqueue("linuxkpi_short_wq", 0, max_wq_cpus);
541 linux_system_long_wq = alloc_workqueue("linuxkpi_long_wq", 0, max_wq_cpus);
543 /* populate the workqueue pointers */
544 system_long_wq = linux_system_long_wq;
545 system_wq = linux_system_short_wq;
546 system_power_efficient_wq = linux_system_short_wq;
547 system_unbound_wq = linux_system_short_wq;
549 SYSINIT(linux_work_init, SI_SUB_INIT_IF, SI_ORDER_THIRD, linux_work_init, NULL);
552 linux_work_uninit(void *arg)
554 destroy_workqueue(linux_system_short_wq);
555 destroy_workqueue(linux_system_long_wq);
557 /* clear workqueue pointers */
558 system_long_wq = NULL;
560 system_power_efficient_wq = NULL;
561 system_unbound_wq = NULL;
563 SYSUNINIT(linux_work_uninit, SI_SUB_INIT_IF, SI_ORDER_THIRD, linux_work_uninit, NULL);