]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/compat/linuxkpi/common/include/linux/workqueue.h
Merge llvm, clang, compiler-rt, libc++, libunwind, lld, lldb, and openmp
[FreeBSD/FreeBSD.git] / sys / compat / linuxkpi / common / include / linux / workqueue.h
1 /*-
2  * Copyright (c) 2010 Isilon Systems, Inc.
3  * Copyright (c) 2010 iX Systems, Inc.
4  * Copyright (c) 2010 Panasas, Inc.
5  * Copyright (c) 2013-2017 Mellanox Technologies, Ltd.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice unmodified, this list of conditions, and the following
13  *    disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  *
29  * $FreeBSD$
30  */
31 #ifndef _LINUX_WORKQUEUE_H_
32 #define _LINUX_WORKQUEUE_H_
33
34 #include <linux/types.h>
35 #include <linux/kernel.h>
36 #include <linux/timer.h>
37 #include <linux/slab.h>
38
39 #include <asm/atomic.h>
40
41 #include <sys/param.h>
42 #include <sys/kernel.h>
43 #include <sys/taskqueue.h>
44 #include <sys/mutex.h>
45
46 #define WORK_CPU_UNBOUND MAXCPU
47 #define WQ_UNBOUND (1 << 0)
48 #define WQ_HIGHPRI (1 << 1)
49
50 struct work_struct;
51 typedef void (*work_func_t)(struct work_struct *);
52
53 struct work_exec {
54         TAILQ_ENTRY(work_exec) entry;
55         struct work_struct *target;
56 };
57
58 struct workqueue_struct {
59         struct taskqueue *taskqueue;
60         struct mtx exec_mtx;
61         TAILQ_HEAD(, work_exec) exec_head;
62         atomic_t draining;
63 };
64
65 #define WQ_EXEC_LOCK(wq) mtx_lock(&(wq)->exec_mtx)
66 #define WQ_EXEC_UNLOCK(wq) mtx_unlock(&(wq)->exec_mtx)
67
68 struct work_struct {
69         struct task work_task;
70         struct workqueue_struct *work_queue;
71         work_func_t func;
72         atomic_t state;
73 };
74
75 #define DECLARE_WORK(name, fn)                                          \
76         struct work_struct name;                                        \
77         static void name##_init(void *arg)                              \
78         {                                                               \
79                 INIT_WORK(&name, fn);                                   \
80         }                                                               \
81         SYSINIT(name, SI_SUB_LOCK, SI_ORDER_SECOND, name##_init, NULL)
82
83 struct delayed_work {
84         struct work_struct work;
85         struct {
86                 struct callout callout;
87                 struct mtx mtx;
88                 int     expires;
89         } timer;
90 };
91
92 #define DECLARE_DELAYED_WORK(name, fn)                                  \
93         struct delayed_work name;                                       \
94         static void __linux_delayed_ ## name ## _init(void *arg)        \
95         {                                                               \
96                 linux_init_delayed_work(&name, fn);                     \
97         }                                                               \
98         SYSINIT(name, SI_SUB_LOCK, SI_ORDER_SECOND,                     \
99             __linux_delayed_ ## name##_init, NULL)
100
101 static inline struct delayed_work *
102 to_delayed_work(struct work_struct *work)
103 {
104         return (container_of(work, struct delayed_work, work));
105 }
106
107 #define INIT_WORK(work, fn)                                             \
108 do {                                                                    \
109         (work)->func = (fn);                                            \
110         (work)->work_queue = NULL;                                      \
111         atomic_set(&(work)->state, 0);                                  \
112         TASK_INIT(&(work)->work_task, 0, linux_work_fn, (work));        \
113 } while (0)
114
115 #define INIT_WORK_ONSTACK(work, fn) \
116         INIT_WORK(work, fn)
117
118 #define INIT_DELAYED_WORK(dwork, fn) \
119         linux_init_delayed_work(dwork, fn)
120
121 #define INIT_DELAYED_WORK_ONSTACK(dwork, fn) \
122         linux_init_delayed_work(dwork, fn)
123
124 #define INIT_DEFERRABLE_WORK(dwork, fn) \
125         INIT_DELAYED_WORK(dwork, fn)
126
127 #define flush_scheduled_work() \
128         taskqueue_drain_all(system_wq->taskqueue)
129
130 #define queue_work(wq, work) \
131         linux_queue_work_on(WORK_CPU_UNBOUND, wq, work)
132
133 #define schedule_work(work) \
134         linux_queue_work_on(WORK_CPU_UNBOUND, system_wq, work)
135
136 #define queue_delayed_work(wq, dwork, delay) \
137         linux_queue_delayed_work_on(WORK_CPU_UNBOUND, wq, dwork, delay)
138
139 #define schedule_delayed_work_on(cpu, dwork, delay) \
140         linux_queue_delayed_work_on(cpu, system_wq, dwork, delay)
141
142 #define queue_work_on(cpu, wq, work) \
143         linux_queue_work_on(cpu, wq, work)
144
145 #define schedule_delayed_work(dwork, delay) \
146         linux_queue_delayed_work_on(WORK_CPU_UNBOUND, system_wq, dwork, delay)
147
148 #define queue_delayed_work_on(cpu, wq, dwork, delay) \
149         linux_queue_delayed_work_on(cpu, wq, dwork, delay)
150
151 #define create_singlethread_workqueue(name) \
152         linux_create_workqueue_common(name, 1)
153
154 #define create_workqueue(name) \
155         linux_create_workqueue_common(name, mp_ncpus)
156
157 #define alloc_ordered_workqueue(name, flags) \
158         linux_create_workqueue_common(name, 1)
159
160 #define alloc_workqueue(name, flags, max_active) \
161         linux_create_workqueue_common(name, max_active)
162
163 #define flush_workqueue(wq) \
164         taskqueue_drain_all((wq)->taskqueue)
165
166 #define drain_workqueue(wq) do {                \
167         atomic_inc(&(wq)->draining);            \
168         taskqueue_drain_all((wq)->taskqueue);   \
169         atomic_dec(&(wq)->draining);            \
170 } while (0)
171
172 #define mod_delayed_work(wq, dwork, delay) ({           \
173         bool __retval;                                  \
174         __retval = linux_cancel_delayed_work(dwork);    \
175         linux_queue_delayed_work_on(WORK_CPU_UNBOUND,   \
176             wq, dwork, delay);                          \
177         __retval;                                       \
178 })
179
180 #define delayed_work_pending(dwork) \
181         linux_work_pending(&(dwork)->work)
182
183 #define cancel_delayed_work(dwork) \
184         linux_cancel_delayed_work(dwork)
185
186 #define cancel_work_sync(work) \
187         linux_cancel_work_sync(work)
188
189 #define cancel_delayed_work_sync(dwork) \
190         linux_cancel_delayed_work_sync(dwork)
191
192 #define flush_work(work) \
193         linux_flush_work(work)
194
195 #define flush_delayed_work(dwork) \
196         linux_flush_delayed_work(dwork)
197
198 #define work_pending(work) \
199         linux_work_pending(work)
200
201 #define work_busy(work) \
202         linux_work_busy(work)
203
204 #define destroy_work_on_stack(work) \
205         do { } while (0)
206
207 #define destroy_delayed_work_on_stack(dwork) \
208         do { } while (0)
209
210 #define destroy_workqueue(wq) \
211         linux_destroy_workqueue(wq)
212
213 #define current_work() \
214         linux_current_work()
215
216 /* prototypes */
217
218 extern struct workqueue_struct *system_wq;
219 extern struct workqueue_struct *system_long_wq;
220 extern struct workqueue_struct *system_unbound_wq;
221 extern struct workqueue_struct *system_highpri_wq;
222 extern struct workqueue_struct *system_power_efficient_wq;
223
224 extern void linux_init_delayed_work(struct delayed_work *, work_func_t);
225 extern void linux_work_fn(void *, int);
226 extern void linux_delayed_work_fn(void *, int);
227 extern struct workqueue_struct *linux_create_workqueue_common(const char *, int);
228 extern void linux_destroy_workqueue(struct workqueue_struct *);
229 extern bool linux_queue_work_on(int cpu, struct workqueue_struct *, struct work_struct *);
230 extern bool linux_queue_delayed_work_on(int cpu, struct workqueue_struct *,
231     struct delayed_work *, unsigned delay);
232 extern bool linux_cancel_delayed_work(struct delayed_work *);
233 extern bool linux_cancel_work_sync(struct work_struct *);
234 extern bool linux_cancel_delayed_work_sync(struct delayed_work *);
235 extern bool linux_flush_work(struct work_struct *);
236 extern bool linux_flush_delayed_work(struct delayed_work *);
237 extern bool linux_work_pending(struct work_struct *);
238 extern bool linux_work_busy(struct work_struct *);
239 extern struct work_struct *linux_current_work(void);
240
241 #endif                                  /* _LINUX_WORKQUEUE_H_ */