]> CyberLeo.Net >> Repos - FreeBSD/releng/10.0.git/blob - sys/dev/cxgb/ulp/iw_cxgb/iw_cxgb_mem.c
- Copy stable/10 (r259064) to releng/10.0 as part of the
[FreeBSD/releng/10.0.git] / sys / dev / cxgb / ulp / iw_cxgb / iw_cxgb_mem.c
1 /**************************************************************************
2
3 Copyright (c) 2007, Chelsio Inc.
4 All rights reserved.
5
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
8
9  1. Redistributions of source code must retain the above copyright notice,
10     this list of conditions and the following disclaimer.
11
12  2. Neither the name of the Chelsio Corporation nor the names of its
13     contributors may be used to endorse or promote products derived from
14     this software without specific prior written permission.
15
16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 POSSIBILITY OF SUCH DAMAGE.
27
28 ***************************************************************************/
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31
32 #include "opt_inet.h"
33
34 #ifdef TCP_OFFLOAD
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
38 #include <sys/bus.h>
39 #include <sys/pciio.h>
40 #include <sys/conf.h>
41 #include <machine/bus.h>
42 #include <machine/resource.h>
43 #include <sys/bus_dma.h>
44 #include <sys/rman.h>
45 #include <sys/ioccom.h>
46 #include <sys/mbuf.h>
47 #include <sys/mutex.h>
48 #include <sys/rwlock.h>
49 #include <sys/linker.h>
50 #include <sys/firmware.h>
51 #include <sys/socket.h>
52 #include <sys/sockio.h>
53 #include <sys/smp.h>
54 #include <sys/sysctl.h>
55 #include <sys/syslog.h>
56 #include <sys/queue.h>
57 #include <sys/taskqueue.h>
58 #include <sys/proc.h>
59 #include <sys/queue.h>
60 #include <sys/libkern.h>
61
62 #include <netinet/in.h>
63
64 #include <rdma/ib_verbs.h>
65 #include <rdma/ib_umem.h>
66 #include <rdma/ib_user_verbs.h>
67 #include <linux/idr.h>
68 #include <ulp/iw_cxgb/iw_cxgb_ib_intfc.h>
69
70 #include <cxgb_include.h>
71 #include <ulp/iw_cxgb/iw_cxgb_wr.h>
72 #include <ulp/iw_cxgb/iw_cxgb_hal.h>
73 #include <ulp/iw_cxgb/iw_cxgb_provider.h>
74 #include <ulp/iw_cxgb/iw_cxgb_cm.h>
75 #include <ulp/iw_cxgb/iw_cxgb.h>
76 #include <ulp/iw_cxgb/iw_cxgb_resource.h>
77 #include <ulp/iw_cxgb/iw_cxgb_user.h>
78
79 static int iwch_finish_mem_reg(struct iwch_mr *mhp, u32 stag)
80 {
81         u32 mmid;
82
83         mhp->attr.state = 1;
84         mhp->attr.stag = stag;
85         mmid = stag >> 8;
86         mhp->ibmr.rkey = mhp->ibmr.lkey = stag;
87         CTR3(KTR_IW_CXGB, "%s mmid 0x%x mhp %p", __func__, mmid, mhp);
88         return insert_handle(mhp->rhp, &mhp->rhp->mmidr, mhp, mmid);
89 }
90
91 int iwch_register_mem(struct iwch_dev *rhp, struct iwch_pd *php,
92                                         struct iwch_mr *mhp,
93                                         int shift)
94 {
95         u32 stag;
96         int ret;
97
98         if (cxio_register_phys_mem(&rhp->rdev,
99                                    &stag, mhp->attr.pdid,
100                                    mhp->attr.perms,
101                                    mhp->attr.zbva,
102                                    mhp->attr.va_fbo,
103                                    mhp->attr.len,
104                                    shift - 12,
105                                    mhp->attr.pbl_size, mhp->attr.pbl_addr))
106                 return (-ENOMEM);
107
108         ret = iwch_finish_mem_reg(mhp, stag);
109         if (ret)
110                 cxio_dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
111                         mhp->attr.pbl_addr);
112         return ret;
113 }
114
115 int iwch_reregister_mem(struct iwch_dev *rhp, struct iwch_pd *php,
116                                         struct iwch_mr *mhp,
117                                         int shift,
118                                         int npages)
119 {
120         u32 stag;
121         int ret;
122
123         /* We could support this... */
124         if (npages > mhp->attr.pbl_size)
125                 return (-ENOMEM);
126
127         stag = mhp->attr.stag;
128         if (cxio_reregister_phys_mem(&rhp->rdev,
129                                    &stag, mhp->attr.pdid,
130                                    mhp->attr.perms,
131                                    mhp->attr.zbva,
132                                    mhp->attr.va_fbo,
133                                    mhp->attr.len,
134                                    shift - 12,
135                                    mhp->attr.pbl_size, mhp->attr.pbl_addr))
136                 return (-ENOMEM);
137         
138         ret = iwch_finish_mem_reg(mhp, stag);
139         if (ret)
140                 cxio_dereg_mem(&rhp->rdev, mhp->attr.stag, mhp->attr.pbl_size,
141                         mhp->attr.pbl_addr);
142         return ret;
143 }
144
145 int iwch_alloc_pbl(struct iwch_mr *mhp, int npages)
146 {
147         mhp->attr.pbl_addr = cxio_hal_pblpool_alloc(&mhp->rhp->rdev,
148                                                     npages << 3);
149
150         if (!mhp->attr.pbl_addr)
151                 return -ENOMEM;
152
153         mhp->attr.pbl_size = npages;
154
155         return 0;
156  }
157
158 void iwch_free_pbl(struct iwch_mr *mhp)
159 {
160         cxio_hal_pblpool_free(&mhp->rhp->rdev, mhp->attr.pbl_addr,
161                               mhp->attr.pbl_size << 3);
162 }
163
164 int iwch_write_pbl(struct iwch_mr *mhp, __be64 *pages, int npages, int offset)
165 {
166         return cxio_write_pbl(&mhp->rhp->rdev, pages,
167                               mhp->attr.pbl_addr + (offset << 3), npages);
168 }
169
170 int build_phys_page_list(struct ib_phys_buf *buffer_list,
171                                         int num_phys_buf,
172                                         u64 *iova_start,
173                                         u64 *total_size,
174                                         int *npages,
175                                         int *shift,
176                                         __be64 **page_list)
177 {
178         u64 mask;
179         int i, j, n;
180
181         mask = 0;
182         *total_size = 0;
183         for (i = 0; i < num_phys_buf; ++i) {
184                 if (i != 0 && buffer_list[i].addr & ~PAGE_MASK)
185                         return (-EINVAL);
186                 if (i != 0 && i != num_phys_buf - 1 &&
187                     (buffer_list[i].size & ~PAGE_MASK))
188                         return (-EINVAL);
189                 *total_size += buffer_list[i].size;
190                 if (i > 0)
191                         mask |= buffer_list[i].addr;
192                 else
193                         mask |= buffer_list[i].addr & PAGE_MASK;
194                 if (i != num_phys_buf - 1)
195                         mask |= buffer_list[i].addr + buffer_list[i].size;
196                 else
197                         mask |= (buffer_list[i].addr + buffer_list[i].size +
198                                 PAGE_SIZE - 1) & PAGE_MASK;
199         }
200
201         if (*total_size > 0xFFFFFFFFULL)
202                 return (-ENOMEM);
203
204         /* Find largest page shift we can use to cover buffers */
205         for (*shift = PAGE_SHIFT; *shift < 27; ++(*shift))
206                 if ((1ULL << *shift) & mask)
207                         break;
208
209         buffer_list[0].size += buffer_list[0].addr & ((1ULL << *shift) - 1);
210         buffer_list[0].addr &= ~0ull << *shift;
211
212         *npages = 0;
213         for (i = 0; i < num_phys_buf; ++i)
214                 *npages += (buffer_list[i].size +
215                         (1ULL << *shift) - 1) >> *shift;
216
217         if (!*npages)
218                 return (-EINVAL);
219
220         *page_list = kmalloc(sizeof(u64) * *npages, M_NOWAIT);
221         if (!*page_list)
222                 return (-ENOMEM);
223
224         n = 0;
225         for (i = 0; i < num_phys_buf; ++i)
226                 for (j = 0;
227                      j < (buffer_list[i].size + (1ULL << *shift) - 1) >> *shift;
228                      ++j)
229                         (*page_list)[n++] = htobe64(buffer_list[i].addr +
230                             ((u64) j << *shift));
231
232         CTR6(KTR_IW_CXGB, "%s va 0x%llx mask 0x%llx shift %d len %lld pbl_size %d",
233              __FUNCTION__, (unsigned long long) *iova_start,
234              (unsigned long long) mask, *shift, (unsigned long long) *total_size,
235              *npages);
236
237         return 0;
238
239 }
240 #endif