]> CyberLeo.Net >> Repos - FreeBSD/releng/10.0.git/blob - sys/ofed/include/linux/workqueue.h
- Copy stable/10 (r259064) to releng/10.0 as part of the
[FreeBSD/releng/10.0.git] / sys / ofed / include / linux / workqueue.h
1 /*-
2  * Copyright (c) 2010 Isilon Systems, Inc.
3  * Copyright (c) 2010 iX Systems, Inc.
4  * Copyright (c) 2010 Panasas, Inc.
5  * All rights reserved.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice unmodified, this list of conditions, and the following
12  *    disclaimer.
13  * 2. Redistributions in binary form must reproduce the above copyright
14  *    notice, this list of conditions and the following disclaimer in the
15  *    documentation and/or other materials provided with the distribution.
16  *
17  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
18  * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
19  * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
20  * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
21  * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
22  * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
23  * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
24  * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
25  * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
26  * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
27  */
28 #ifndef _LINUX_WORKQUEUE_H_
29 #define _LINUX_WORKQUEUE_H_
30
31 #include <linux/types.h>
32 #include <linux/kernel.h>
33 #include <linux/timer.h>
34 #include <linux/slab.h>
35
36 #include <sys/taskqueue.h>
37
38 struct workqueue_struct {
39         struct taskqueue        *taskqueue;
40 };
41
42 struct work_struct {
43         struct  task            work_task;
44         struct  taskqueue       *taskqueue;
45         void                    (*fn)(struct work_struct *);
46 };
47
48 struct delayed_work {
49         struct work_struct      work;
50         struct callout          timer;
51 };
52
53 static inline struct delayed_work *
54 to_delayed_work(struct work_struct *work)
55 {
56
57         return container_of(work, struct delayed_work, work);
58 }
59
60
61 static inline void
62 _work_fn(void *context, int pending)
63 {
64         struct work_struct *work;
65
66         work = context;
67         work->fn(work);
68 }
69
70 #define INIT_WORK(work, func)                                           \
71 do {                                                                    \
72         (work)->fn = (func);                                            \
73         (work)->taskqueue = NULL;                                       \
74         TASK_INIT(&(work)->work_task, 0, _work_fn, (work));             \
75 } while (0)
76
77 #define INIT_DELAYED_WORK(_work, func)                                  \
78 do {                                                                    \
79         INIT_WORK(&(_work)->work, func);                                \
80         callout_init(&(_work)->timer, CALLOUT_MPSAFE);                  \
81 } while (0)
82
83 #define INIT_DEFERRABLE_WORK    INIT_DELAYED_WORK
84
85 #define schedule_work(work)                                             \
86 do {                                                                    \
87         (work)->taskqueue = taskqueue_thread;                           \
88         taskqueue_enqueue(taskqueue_thread, &(work)->work_task);        \
89 } while (0)
90
91 #define flush_scheduled_work()  flush_taskqueue(taskqueue_thread)
92
93 #define queue_work(q, work)                                             \
94 do {                                                                    \
95         (work)->taskqueue = (q)->taskqueue;                             \
96         taskqueue_enqueue((q)->taskqueue, &(work)->work_task);          \
97 } while (0)
98
99 static inline void
100 _delayed_work_fn(void *arg)
101 {
102         struct delayed_work *work;
103
104         work = arg;
105         taskqueue_enqueue(work->work.taskqueue, &work->work.work_task);
106 }
107
108 static inline int
109 queue_delayed_work(struct workqueue_struct *wq, struct delayed_work *work,
110     unsigned long delay)
111 {
112         int pending;
113
114         pending = work->work.work_task.ta_pending;
115         work->work.taskqueue = wq->taskqueue;
116         if (delay != 0)
117                 callout_reset(&work->timer, delay, _delayed_work_fn, work);
118         else
119                 _delayed_work_fn((void *)work);
120
121         return (!pending);
122 }
123
124 static inline bool schedule_delayed_work(struct delayed_work *dwork,
125                                          unsigned long delay)
126 {
127         struct workqueue_struct wq;
128         wq.taskqueue = taskqueue_thread;
129         return queue_delayed_work(&wq, dwork, delay);
130 }
131
132 static inline struct workqueue_struct *
133 _create_workqueue_common(char *name, int cpus)
134 {
135         struct workqueue_struct *wq;
136
137         wq = kmalloc(sizeof(*wq), M_WAITOK);
138         wq->taskqueue = taskqueue_create((name), M_WAITOK,
139             taskqueue_thread_enqueue,  &wq->taskqueue);
140         taskqueue_start_threads(&wq->taskqueue, cpus, PWAIT, "%s", name);
141
142         return (wq);
143 }
144
145
146 #define create_singlethread_workqueue(name)                             \
147         _create_workqueue_common(name, 1)
148
149 #define create_workqueue(name)                                          \
150         _create_workqueue_common(name, MAXCPU)
151
152 static inline void
153 destroy_workqueue(struct workqueue_struct *wq)
154 {
155         taskqueue_free(wq->taskqueue);
156         kfree(wq);
157 }
158
159 #define flush_workqueue(wq)     flush_taskqueue((wq)->taskqueue)
160
161 static inline void
162 _flush_fn(void *context, int pending)
163 {
164 }
165
166 static inline void
167 flush_taskqueue(struct taskqueue *tq)
168 {
169         struct task flushtask;
170
171         PHOLD(curproc);
172         TASK_INIT(&flushtask, 0, _flush_fn, NULL);
173         taskqueue_enqueue(tq, &flushtask);
174         taskqueue_drain(tq, &flushtask);
175         PRELE(curproc);
176 }
177
178 static inline int
179 cancel_work_sync(struct work_struct *work)
180 {
181         if (work->taskqueue &&
182             taskqueue_cancel(work->taskqueue, &work->work_task, NULL))
183                 taskqueue_drain(work->taskqueue, &work->work_task);
184         return 0;
185 }
186
187 /*
188  * This may leave work running on another CPU as it does on Linux.
189  */
190 static inline int
191 cancel_delayed_work(struct delayed_work *work)
192 {
193
194         callout_stop(&work->timer);
195         if (work->work.taskqueue)
196                 return (taskqueue_cancel(work->work.taskqueue,
197                     &work->work.work_task, NULL) == 0);
198         return 0;
199 }
200
201 static inline int
202 cancel_delayed_work_sync(struct delayed_work *work)
203 {
204
205         callout_drain(&work->timer);
206         if (work->work.taskqueue &&
207             taskqueue_cancel(work->work.taskqueue, &work->work.work_task, NULL))
208                 taskqueue_drain(work->work.taskqueue, &work->work.work_task);
209         return 0;
210 }
211
212 #endif  /* _LINUX_WORKQUEUE_H_ */