]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/compat/linuxkpi/common/include/linux/workqueue.h
Merge from head
[FreeBSD/FreeBSD.git] / sys / compat / linuxkpi / common / include / linux / workqueue.h
1 /*-
2  * Copyright (c) 2010 Isilon Systems, Inc.
3  * Copyright (c) 2010 iX Systems, Inc.
4  * Copyright (c) 2010 Panasas, Inc.
5  * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice unmodified, this list of conditions, and the following
13  *    disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  *
29  * $FreeBSD$
30  */
31 #ifndef _LINUX_WORKQUEUE_H_
32 #define _LINUX_WORKQUEUE_H_
33
34 #include <linux/types.h>
35 #include <linux/kernel.h>
36 #include <linux/timer.h>
37 #include <linux/slab.h>
38
39 #include <sys/taskqueue.h>
40
41 struct workqueue_struct {
42         struct taskqueue        *taskqueue;
43 };
44
45 struct work_struct {
46         struct  task            work_task;
47         struct  taskqueue       *taskqueue;
48         void                    (*fn)(struct work_struct *);
49 };
50
51 struct delayed_work {
52         struct work_struct      work;
53         struct callout          timer;
54 };
55
56 static inline struct delayed_work *
57 to_delayed_work(struct work_struct *work)
58 {
59
60         return container_of(work, struct delayed_work, work);
61 }
62
63
64 static inline void
65 _work_fn(void *context, int pending)
66 {
67         struct work_struct *work;
68
69         work = context;
70         work->fn(work);
71 }
72
73 #define INIT_WORK(work, func)                                           \
74 do {                                                                    \
75         (work)->fn = (func);                                            \
76         (work)->taskqueue = NULL;                                       \
77         TASK_INIT(&(work)->work_task, 0, _work_fn, (work));             \
78 } while (0)
79
80 #define INIT_DELAYED_WORK(_work, func)                                  \
81 do {                                                                    \
82         INIT_WORK(&(_work)->work, func);                                \
83         callout_init(&(_work)->timer, 1);                               \
84 } while (0)
85
86 #define INIT_DEFERRABLE_WORK    INIT_DELAYED_WORK
87
88 #define schedule_work(work)                                             \
89 do {                                                                    \
90         (work)->taskqueue = taskqueue_thread;                           \
91         taskqueue_enqueue(taskqueue_thread, &(work)->work_task);        \
92 } while (0)
93
94 #define flush_scheduled_work()  flush_taskqueue(taskqueue_thread)
95
96 static inline int queue_work(struct workqueue_struct *q, struct work_struct *work)
97 {
98         (work)->taskqueue = (q)->taskqueue;
99         /* Return opposite val to align with Linux logic */
100         return !taskqueue_enqueue((q)->taskqueue, &(work)->work_task);
101 }
102
103 static inline void
104 _delayed_work_fn(void *arg)
105 {
106         struct delayed_work *work;
107
108         work = arg;
109         taskqueue_enqueue(work->work.taskqueue, &work->work.work_task);
110 }
111
112 static inline int
113 queue_delayed_work(struct workqueue_struct *wq, struct delayed_work *work,
114     unsigned long delay)
115 {
116         int pending;
117
118         pending = work->work.work_task.ta_pending;
119         work->work.taskqueue = wq->taskqueue;
120         if (delay != 0)
121                 callout_reset(&work->timer, delay, _delayed_work_fn, work);
122         else
123                 _delayed_work_fn((void *)work);
124
125         return (!pending);
126 }
127
128 static inline bool schedule_delayed_work(struct delayed_work *dwork,
129                                          unsigned long delay)
130 {
131         struct workqueue_struct wq;
132         wq.taskqueue = taskqueue_thread;
133         return queue_delayed_work(&wq, dwork, delay);
134 }
135
136 static inline struct workqueue_struct *
137 _create_workqueue_common(char *name, int cpus)
138 {
139         struct workqueue_struct *wq;
140
141         wq = kmalloc(sizeof(*wq), M_WAITOK);
142         wq->taskqueue = taskqueue_create((name), M_WAITOK,
143             taskqueue_thread_enqueue,  &wq->taskqueue);
144         taskqueue_start_threads(&wq->taskqueue, cpus, PWAIT, "%s", name);
145
146         return (wq);
147 }
148
149
150 #define create_singlethread_workqueue(name)                             \
151         _create_workqueue_common(name, 1)
152
153 #define create_workqueue(name)                                          \
154         _create_workqueue_common(name, MAXCPU)
155
156 #define alloc_ordered_workqueue(name, flags)                            \
157         _create_workqueue_common(name, 1)
158
159 #define alloc_workqueue(name, flags, max_active)                        \
160         _create_workqueue_common(name, max_active)
161
162 static inline void
163 destroy_workqueue(struct workqueue_struct *wq)
164 {
165         taskqueue_free(wq->taskqueue);
166         kfree(wq);
167 }
168
169 #define flush_workqueue(wq)     flush_taskqueue((wq)->taskqueue)
170
171 static inline void
172 _flush_fn(void *context, int pending)
173 {
174 }
175
176 static inline void
177 flush_taskqueue(struct taskqueue *tq)
178 {
179         struct task flushtask;
180
181         PHOLD(curproc);
182         TASK_INIT(&flushtask, 0, _flush_fn, NULL);
183         taskqueue_enqueue(tq, &flushtask);
184         taskqueue_drain(tq, &flushtask);
185         PRELE(curproc);
186 }
187
188 static inline int
189 cancel_work_sync(struct work_struct *work)
190 {
191         if (work->taskqueue &&
192             taskqueue_cancel(work->taskqueue, &work->work_task, NULL))
193                 taskqueue_drain(work->taskqueue, &work->work_task);
194         return 0;
195 }
196
197 /*
198  * This may leave work running on another CPU as it does on Linux.
199  */
200 static inline int
201 cancel_delayed_work(struct delayed_work *work)
202 {
203
204         callout_stop(&work->timer);
205         if (work->work.taskqueue)
206                 return (taskqueue_cancel(work->work.taskqueue,
207                     &work->work.work_task, NULL) == 0);
208         return 0;
209 }
210
211 static inline int
212 cancel_delayed_work_sync(struct delayed_work *work)
213 {
214
215         callout_drain(&work->timer);
216         if (work->work.taskqueue &&
217             taskqueue_cancel(work->work.taskqueue, &work->work.work_task, NULL))
218                 taskqueue_drain(work->work.taskqueue, &work->work.work_task);
219         return 0;
220 }
221
222 static inline bool
223 mod_delayed_work(struct workqueue_struct *wq, struct delayed_work *dwork,
224                                       unsigned long delay)
225 {
226         cancel_delayed_work(dwork);
227         queue_delayed_work(wq, dwork, delay);
228         return false;
229 }
230
231 #endif  /* _LINUX_WORKQUEUE_H_ */