1 /**************************************************************************
3 Copyright (c) 2007, Chelsio Inc.
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Neither the name of the Chelsio Corporation nor the names of its
13 contributors may be used to endorse or promote products derived from
14 this software without specific prior written permission.
16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 POSSIBILITY OF SUCH DAMAGE.
28 ***************************************************************************/
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
33 #include <sys/param.h>
34 #include <sys/systm.h>
35 #include <sys/kernel.h>
36 #include <sys/malloc.h>
37 #include <sys/queue.h>
41 #include <sys/sched.h>
43 #include <sys/systm.h>
48 #include <cxgb_include.h>
51 #include <dev/cxgb/cxgb_include.h>
52 #include <dev/cxgb/sys/mvec.h>
55 extern int cxgb_use_16k_clusters;
56 int cxgb_pcpu_cache_enable = 1;
65 buf_stack_push(struct buf_stack *bs, caddr_t buf)
67 if (bs->bs_head + 1 >= bs->bs_size)
70 bs->bs_stack[++(bs->bs_head)] = buf;
74 static __inline caddr_t
75 buf_stack_pop(struct buf_stack *bs)
80 return (bs->bs_stack[(bs->bs_head)--]);
88 buf_stack_avail(struct buf_stack *bs)
90 return (bs->bs_size - bs->bs_head - 1);
93 struct cxgb_cache_pcpu {
94 struct buf_stack ccp_jumbo_free;
95 struct buf_stack ccp_cluster_free;
96 uma_zone_t ccp_jumbo_zone;
99 struct cxgb_cache_system {
100 struct cxgb_cache_pcpu ccs_array[0];
104 buf_stack_init(struct buf_stack *bs, int size)
108 if((bs->bs_stack = malloc(sizeof(caddr_t)*size, M_DEVBUF, M_NOWAIT)) == NULL)
115 buf_stack_deinit(struct buf_stack *bs)
117 if (bs->bs_stack != NULL)
118 free(bs->bs_stack, M_DEVBUF);
122 cxgb_cache_pcpu_init(struct cxgb_cache_pcpu *ccp)
126 if ((err = buf_stack_init(&ccp->ccp_jumbo_free, (JUMBO_Q_SIZE >> 2))))
129 if ((err = buf_stack_init(&ccp->ccp_cluster_free, (FL_Q_SIZE >> 2))))
132 #if __FreeBSD_version > 800000
133 if (cxgb_use_16k_clusters)
134 ccp->ccp_jumbo_zone = zone_jumbo16;
136 ccp->ccp_jumbo_zone = zone_jumbo9;
138 ccp->ccp_jumbo_zone = zone_jumbop;
144 cxgb_cache_pcpu_deinit(struct cxgb_cache_pcpu *ccp)
148 while ((cl = buf_stack_pop(&ccp->ccp_jumbo_free)) != NULL)
149 uma_zfree(ccp->ccp_jumbo_zone, cl);
150 while ((cl = buf_stack_pop(&ccp->ccp_cluster_free)) != NULL)
151 uma_zfree(zone_clust, cl);
153 buf_stack_deinit(&ccp->ccp_jumbo_free);
154 buf_stack_deinit(&ccp->ccp_cluster_free);
158 static int inited = 0;
161 cxgb_cache_init(void)
168 if ((cxgb_caches = malloc(sizeof(struct cxgb_cache_pcpu)*mp_ncpus, M_DEVBUF, M_WAITOK|M_ZERO)) == NULL)
171 for (i = 0; i < mp_ncpus; i++)
172 if ((err = cxgb_cache_pcpu_init(&cxgb_caches->ccs_array[i])))
183 cxgb_cache_flush(void)
190 if (cxgb_caches == NULL)
193 for (i = 0; i < mp_ncpus; i++)
194 cxgb_cache_pcpu_deinit(&cxgb_caches->ccs_array[i]);
196 free(cxgb_caches, M_DEVBUF);
201 cxgb_cache_get(uma_zone_t zone)
204 struct cxgb_cache_pcpu *ccp;
206 if (cxgb_pcpu_cache_enable) {
208 ccp = &cxgb_caches->ccs_array[curcpu];
209 if (zone == zone_clust) {
210 cl = buf_stack_pop(&ccp->ccp_cluster_free);
211 } else if (zone == ccp->ccp_jumbo_zone) {
212 cl = buf_stack_pop(&ccp->ccp_jumbo_free);
218 cl = uma_zalloc(zone, M_NOWAIT);
220 cxgb_cached_allocations++;
226 cxgb_cache_put(uma_zone_t zone, void *cl)
228 struct cxgb_cache_pcpu *ccp;
231 if (cxgb_pcpu_cache_enable) {
233 ccp = &cxgb_caches->ccs_array[curcpu];
234 if (zone == zone_clust) {
235 err = buf_stack_push(&ccp->ccp_cluster_free, cl);
236 } else if (zone == ccp->ccp_jumbo_zone){
237 err = buf_stack_push(&ccp->ccp_jumbo_free, cl);
249 cxgb_cache_refill(void)
251 struct cxgb_cache_pcpu *ccp;
260 ccp = &cxgb_caches->ccs_array[curcpu];
261 zone = ccp->ccp_jumbo_zone;
262 if (!buf_stack_avail(&ccp->ccp_jumbo_free) &&
263 !buf_stack_avail(&ccp->ccp_cluster_free)) {
271 for (i = 0; i < 8; i++)
272 if ((vec[i] = uma_zalloc(zone, M_NOWAIT)) == NULL)
276 ccp = &cxgb_caches->ccs_array[curcpu];
277 for (i = 0; i < 8 && buf_stack_avail(&ccp->ccp_jumbo_free); i++)
278 if (buf_stack_push(&ccp->ccp_jumbo_free, vec[i]))
283 uma_zfree(zone, vec[i]);
288 for (i = 0; i < 8; i++)
289 if ((vec[i] = uma_zalloc(zone, M_NOWAIT)) == NULL)
293 ccp = &cxgb_caches->ccs_array[curcpu];
294 for (i = 0; i < 8 && buf_stack_avail(&ccp->ccp_cluster_free); i++)
295 if (buf_stack_push(&ccp->ccp_cluster_free, vec[i]))
300 uma_zfree(zone, vec[i]);
307 for (; i < count; i++)
308 uma_zfree(zone, vec[i]);
312 buf_ring_alloc(int count, int flags)
316 KASSERT(powerof2(count), ("buf ring must be size power of 2"));
318 br = malloc(sizeof(struct buf_ring), M_DEVBUF, flags|M_ZERO);
322 br->br_ring = malloc(sizeof(caddr_t)*count, M_DEVBUF, flags|M_ZERO);
323 if (br->br_ring == NULL) {
328 mtx_init(&br->br_lock, "buf ring", NULL, MTX_DUPOK|MTX_DEF);
330 br->br_prod = br->br_cons = 0;
336 buf_ring_free(struct buf_ring *br)
338 free(br->br_ring, M_DEVBUF);