]> CyberLeo.Net >> Repos - FreeBSD/releng/10.2.git/blob - sys/ofed/include/linux/workqueue.h
- Copy stable/10@285827 to releng/10.2 in preparation for 10.2-RC1
[FreeBSD/releng/10.2.git] / sys / ofed / include / linux / workqueue.h
1 /*-
2  * Copyright (c) 2010 Isilon Systems, Inc.
3  * Copyright (c) 2010 iX Systems, Inc.
4  * Copyright (c) 2010 Panasas, Inc.
5  * Copyright (c) 2013, 2014 Mellanox Technologies, Ltd.
6  * All rights reserved.
7  *
8  * Redistribution and use in source and binary forms, with or without
9  * modification, are permitted provided that the following conditions
10  * are met:
11  * 1. Redistributions of source code must retain the above copyright
12  *    notice unmodified, this list of conditions, and the following
13  *    disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
19  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
20  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
21  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
22  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
23  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
24  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
25  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
26  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
27  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
28  */
29 #ifndef _LINUX_WORKQUEUE_H_
30 #define _LINUX_WORKQUEUE_H_
31
32 #include <linux/types.h>
33 #include <linux/kernel.h>
34 #include <linux/timer.h>
35 #include <linux/slab.h>
36
37 #include <sys/taskqueue.h>
38
39 struct workqueue_struct {
40         struct taskqueue        *taskqueue;
41 };
42
43 struct work_struct {
44         struct  task            work_task;
45         struct  taskqueue       *taskqueue;
46         void                    (*fn)(struct work_struct *);
47 };
48
49 struct delayed_work {
50         struct work_struct      work;
51         struct callout          timer;
52 };
53
54 static inline struct delayed_work *
55 to_delayed_work(struct work_struct *work)
56 {
57
58         return container_of(work, struct delayed_work, work);
59 }
60
61
62 static inline void
63 _work_fn(void *context, int pending)
64 {
65         struct work_struct *work;
66
67         work = context;
68         work->fn(work);
69 }
70
71 #define INIT_WORK(work, func)                                           \
72 do {                                                                    \
73         (work)->fn = (func);                                            \
74         (work)->taskqueue = NULL;                                       \
75         TASK_INIT(&(work)->work_task, 0, _work_fn, (work));             \
76 } while (0)
77
78 #define INIT_DELAYED_WORK(_work, func)                                  \
79 do {                                                                    \
80         INIT_WORK(&(_work)->work, func);                                \
81         callout_init(&(_work)->timer, CALLOUT_MPSAFE);                  \
82 } while (0)
83
84 #define INIT_DEFERRABLE_WORK    INIT_DELAYED_WORK
85
86 #define schedule_work(work)                                             \
87 do {                                                                    \
88         (work)->taskqueue = taskqueue_thread;                           \
89         taskqueue_enqueue(taskqueue_thread, &(work)->work_task);        \
90 } while (0)
91
92 #define flush_scheduled_work()  flush_taskqueue(taskqueue_thread)
93
94 static inline int queue_work (struct workqueue_struct *q, struct work_struct *work)
95 {
96         (work)->taskqueue = (q)->taskqueue;
97         /* Return opposite val to align with Linux logic */
98         return !taskqueue_enqueue((q)->taskqueue, &(work)->work_task);
99 }
100
101 static inline void
102 _delayed_work_fn(void *arg)
103 {
104         struct delayed_work *work;
105
106         work = arg;
107         taskqueue_enqueue(work->work.taskqueue, &work->work.work_task);
108 }
109
110 static inline int
111 queue_delayed_work(struct workqueue_struct *wq, struct delayed_work *work,
112     unsigned long delay)
113 {
114         int pending;
115
116         pending = work->work.work_task.ta_pending;
117         work->work.taskqueue = wq->taskqueue;
118         if (delay != 0)
119                 callout_reset(&work->timer, delay, _delayed_work_fn, work);
120         else
121                 _delayed_work_fn((void *)work);
122
123         return (!pending);
124 }
125
126 static inline bool schedule_delayed_work(struct delayed_work *dwork,
127                                          unsigned long delay)
128 {
129         struct workqueue_struct wq;
130         wq.taskqueue = taskqueue_thread;
131         return queue_delayed_work(&wq, dwork, delay);
132 }
133
134 static inline struct workqueue_struct *
135 _create_workqueue_common(char *name, int cpus)
136 {
137         struct workqueue_struct *wq;
138
139         wq = kmalloc(sizeof(*wq), M_WAITOK);
140         wq->taskqueue = taskqueue_create((name), M_WAITOK,
141             taskqueue_thread_enqueue,  &wq->taskqueue);
142         taskqueue_start_threads(&wq->taskqueue, cpus, PWAIT, "%s", name);
143
144         return (wq);
145 }
146
147
148 #define create_singlethread_workqueue(name)                             \
149         _create_workqueue_common(name, 1)
150
151 #define create_workqueue(name)                                          \
152         _create_workqueue_common(name, MAXCPU)
153
154 static inline void
155 destroy_workqueue(struct workqueue_struct *wq)
156 {
157         taskqueue_free(wq->taskqueue);
158         kfree(wq);
159 }
160
161 #define flush_workqueue(wq)     flush_taskqueue((wq)->taskqueue)
162
163 static inline void
164 _flush_fn(void *context, int pending)
165 {
166 }
167
168 static inline void
169 flush_taskqueue(struct taskqueue *tq)
170 {
171         struct task flushtask;
172
173         PHOLD(curproc);
174         TASK_INIT(&flushtask, 0, _flush_fn, NULL);
175         taskqueue_enqueue(tq, &flushtask);
176         taskqueue_drain(tq, &flushtask);
177         PRELE(curproc);
178 }
179
180 static inline int
181 cancel_work_sync(struct work_struct *work)
182 {
183         if (work->taskqueue &&
184             taskqueue_cancel(work->taskqueue, &work->work_task, NULL))
185                 taskqueue_drain(work->taskqueue, &work->work_task);
186         return 0;
187 }
188
189 /*
190  * This may leave work running on another CPU as it does on Linux.
191  */
192 static inline int
193 cancel_delayed_work(struct delayed_work *work)
194 {
195
196         callout_stop(&work->timer);
197         if (work->work.taskqueue)
198                 return (taskqueue_cancel(work->work.taskqueue,
199                     &work->work.work_task, NULL) == 0);
200         return 0;
201 }
202
203 static inline int
204 cancel_delayed_work_sync(struct delayed_work *work)
205 {
206
207         callout_drain(&work->timer);
208         if (work->work.taskqueue &&
209             taskqueue_cancel(work->work.taskqueue, &work->work.work_task, NULL))
210                 taskqueue_drain(work->work.taskqueue, &work->work.work_task);
211         return 0;
212 }
213
214 static inline bool
215 mod_delayed_work(struct workqueue_struct *wq, struct delayed_work *dwork,
216                                       unsigned long delay)
217 {
218         cancel_delayed_work(dwork);
219         queue_delayed_work(wq, dwork, delay);
220         return false;
221 }
222
223 #endif  /* _LINUX_WORKQUEUE_H_ */