]> CyberLeo.Net >> Repos - FreeBSD/releng/7.2.git/blob - sys/dev/cxgb/sys/cxgb_support.c
Create releng/7.2 from stable/7 in preparation for 7.2-RELEASE.
[FreeBSD/releng/7.2.git] / sys / dev / cxgb / sys / cxgb_support.c
1 /**************************************************************************
2
3 Copyright (c) 2007, Chelsio Inc.
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9  1. Redistributions of source code must retain the above copyright notice,
10     this list of conditions and the following disclaimer.
11
12  2. Neither the name of the Chelsio Corporation nor the names of its
13     contributors may be used to endorse or promote products derived from
14     this software without specific prior written permission.
15  
16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 POSSIBILITY OF SUCH DAMAGE.
27
28 ***************************************************************************/
29
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/kernel.h>
36 #include <sys/malloc.h>
37 #include <sys/queue.h>
38
39
40 #include <sys/proc.h>
41 #include <sys/sched.h>
42 #include <sys/smp.h>
43 #include <sys/systm.h>
44 #include <vm/vm.h>
45 #include <vm/pmap.h>
46
47 #ifdef CONFIG_DEFINED
48 #include <cxgb_include.h>
49 #include <sys/mvec.h>
50 #else
51 #include <dev/cxgb/cxgb_include.h>
52 #include <dev/cxgb/sys/mvec.h>
53 #endif
54
55 extern int cxgb_use_16k_clusters;
56 int cxgb_pcpu_cache_enable = 1;
57
58 struct buf_stack {
59         caddr_t            *bs_stack;
60         volatile int        bs_head;
61         int                 bs_size;
62 };
63
64 static __inline int
65 buf_stack_push(struct buf_stack *bs, caddr_t buf)
66 {
67         if (bs->bs_head + 1 >= bs->bs_size)
68                 return (ENOSPC);
69
70         bs->bs_stack[++(bs->bs_head)] = buf;
71         return (0);
72 }
73
74 static __inline caddr_t
75 buf_stack_pop(struct buf_stack *bs)
76 {
77         if (bs->bs_head < 0)
78                 return (NULL);
79
80         return (bs->bs_stack[(bs->bs_head)--]);
81 }
82
83 /*
84  * Stack is full
85  *
86  */
87 static __inline int
88 buf_stack_avail(struct buf_stack *bs)
89 {
90         return (bs->bs_size - bs->bs_head - 1);
91 }
92
93 struct cxgb_cache_pcpu {
94         struct buf_stack        ccp_jumbo_free;
95         struct buf_stack        ccp_cluster_free;
96         uma_zone_t              ccp_jumbo_zone;
97 };
98
99 struct cxgb_cache_system {
100         struct cxgb_cache_pcpu ccs_array[0];
101 } *cxgb_caches;
102
103 static int
104 buf_stack_init(struct buf_stack *bs, int size)
105 {
106         bs->bs_size = size;
107         bs->bs_head = -1;
108         if((bs->bs_stack = malloc(sizeof(caddr_t)*size, M_DEVBUF, M_NOWAIT)) == NULL)
109                 return (ENOMEM);
110
111         return (0);
112 }
113
114 static void
115 buf_stack_deinit(struct buf_stack *bs)
116 {
117         if (bs->bs_stack != NULL)
118                 free(bs->bs_stack, M_DEVBUF);
119 }
120
121 static int
122 cxgb_cache_pcpu_init(struct cxgb_cache_pcpu *ccp)
123 {
124         int err;
125         
126         if ((err = buf_stack_init(&ccp->ccp_jumbo_free, (JUMBO_Q_SIZE >> 2))))
127                 return (err);
128         
129         if ((err = buf_stack_init(&ccp->ccp_cluster_free, (FL_Q_SIZE >> 2))))
130                 return (err);
131
132 #if __FreeBSD_version > 800000          
133         if (cxgb_use_16k_clusters) 
134                 ccp->ccp_jumbo_zone = zone_jumbo16;
135         else
136                 ccp->ccp_jumbo_zone = zone_jumbo9;
137 #else
138                 ccp->ccp_jumbo_zone = zone_jumbop;
139 #endif
140         return (0);
141 }
142
143 static void
144 cxgb_cache_pcpu_deinit(struct cxgb_cache_pcpu *ccp)
145 {
146         void *cl;
147
148         while ((cl = buf_stack_pop(&ccp->ccp_jumbo_free)) != NULL)
149                 uma_zfree(ccp->ccp_jumbo_zone, cl);
150         while ((cl = buf_stack_pop(&ccp->ccp_cluster_free)) != NULL)
151                 uma_zfree(zone_clust, cl);
152
153         buf_stack_deinit(&ccp->ccp_jumbo_free);
154         buf_stack_deinit(&ccp->ccp_cluster_free);
155         
156 }
157
158 static int inited = 0;
159
160 int
161 cxgb_cache_init(void)
162 {
163         int i, err;
164         
165         if (inited++ > 0)
166                 return (0);
167
168         if ((cxgb_caches = malloc(sizeof(struct cxgb_cache_pcpu)*mp_ncpus, M_DEVBUF, M_WAITOK|M_ZERO)) == NULL)
169                 return (ENOMEM);
170         
171         for (i = 0; i < mp_ncpus; i++) 
172                 if ((err = cxgb_cache_pcpu_init(&cxgb_caches->ccs_array[i])))
173                         goto err;
174
175         return (0);
176 err:
177         cxgb_cache_flush();
178
179         return (err);
180 }
181
182 void
183 cxgb_cache_flush(void)
184 {
185         int i;
186         
187         if (--inited > 0) 
188                 return;
189
190         if (cxgb_caches == NULL)
191                 return;
192         
193         for (i = 0; i < mp_ncpus; i++) 
194                 cxgb_cache_pcpu_deinit(&cxgb_caches->ccs_array[i]);
195
196         free(cxgb_caches, M_DEVBUF);
197         cxgb_caches = NULL;
198 }
199
200 caddr_t
201 cxgb_cache_get(uma_zone_t zone)
202 {
203         caddr_t cl = NULL;
204         struct cxgb_cache_pcpu *ccp;
205
206         if (cxgb_pcpu_cache_enable) {
207                 critical_enter();
208                 ccp = &cxgb_caches->ccs_array[curcpu];
209                 if (zone == zone_clust) {
210                         cl = buf_stack_pop(&ccp->ccp_cluster_free);
211                 } else if (zone == ccp->ccp_jumbo_zone) {
212                         cl = buf_stack_pop(&ccp->ccp_jumbo_free);
213                 }
214                 critical_exit();
215         }
216         
217         if (cl == NULL) 
218                 cl = uma_zalloc(zone, M_NOWAIT);
219         else
220                 cxgb_cached_allocations++;
221         
222         return (cl);
223 }
224
225 void
226 cxgb_cache_put(uma_zone_t zone, void *cl)
227 {
228         struct cxgb_cache_pcpu *ccp;
229         int err = ENOSPC;
230
231         if (cxgb_pcpu_cache_enable) {
232                 critical_enter();
233                 ccp = &cxgb_caches->ccs_array[curcpu];
234                 if (zone == zone_clust) {
235                         err = buf_stack_push(&ccp->ccp_cluster_free, cl);
236                 } else if (zone == ccp->ccp_jumbo_zone){
237                         err = buf_stack_push(&ccp->ccp_jumbo_free, cl);
238                 }
239                 critical_exit();
240         }
241         
242         if (err)
243                 uma_zfree(zone, cl);
244         else
245                 cxgb_cached++;
246 }
247
248 void
249 cxgb_cache_refill(void)
250 {
251         struct cxgb_cache_pcpu *ccp;
252         caddr_t vec[8];
253         uma_zone_t zone;
254         int i, count;
255
256
257         return;
258 restart:
259         critical_enter();
260         ccp = &cxgb_caches->ccs_array[curcpu];
261         zone = ccp->ccp_jumbo_zone;
262         if (!buf_stack_avail(&ccp->ccp_jumbo_free) &&
263             !buf_stack_avail(&ccp->ccp_cluster_free)) {
264                 critical_exit();
265                 return;
266         }
267         critical_exit();
268
269
270         
271         for (i = 0; i < 8; i++)
272                 if ((vec[i] = uma_zalloc(zone, M_NOWAIT)) == NULL) 
273                         goto free;
274
275         critical_enter();
276         ccp = &cxgb_caches->ccs_array[curcpu];
277         for (i = 0; i < 8 && buf_stack_avail(&ccp->ccp_jumbo_free); i++)
278                 if (buf_stack_push(&ccp->ccp_jumbo_free, vec[i]))
279                         break;
280         critical_exit();
281
282         for (; i < 8; i++)
283                 uma_zfree(zone, vec[i]);
284
285
286         
287         zone = zone_clust;
288         for (i = 0; i < 8; i++)
289                 if ((vec[i] = uma_zalloc(zone, M_NOWAIT)) == NULL) 
290                         goto free;
291
292         critical_enter();
293         ccp = &cxgb_caches->ccs_array[curcpu];
294         for (i = 0; i < 8 && buf_stack_avail(&ccp->ccp_cluster_free); i++)
295                 if (buf_stack_push(&ccp->ccp_cluster_free, vec[i]))
296                         break;
297         critical_exit();
298         
299         for (; i < 8; i++)
300                 uma_zfree(zone, vec[i]);
301
302         goto restart;
303
304
305 free:
306         count = i;
307         for (; i < count; i++)
308                 uma_zfree(zone, vec[i]);
309 }
310         
311 struct buf_ring *
312 buf_ring_alloc(int count, int flags)
313 {
314         struct buf_ring *br;
315
316         KASSERT(powerof2(count), ("buf ring must be size power of 2"));
317         
318         br = malloc(sizeof(struct buf_ring), M_DEVBUF, flags|M_ZERO);
319         if (br == NULL)
320                 return (NULL);
321         
322         br->br_ring = malloc(sizeof(caddr_t)*count, M_DEVBUF, flags|M_ZERO);
323         if (br->br_ring == NULL) {
324                 free(br, M_DEVBUF);
325                 return (NULL);
326         }
327         
328         mtx_init(&br->br_lock, "buf ring", NULL, MTX_DUPOK|MTX_DEF);
329         br->br_size = count;
330         br->br_prod = br->br_cons = 0;
331
332         return (br);
333 }
334
335 void
336 buf_ring_free(struct buf_ring *br)
337 {
338         free(br->br_ring, M_DEVBUF);
339         free(br, M_DEVBUF);
340 }