1 /**************************************************************************
3 Copyright (c) 2007-2008, Chelsio Inc.
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Neither the name of the Chelsio Corporation nor the names of its
13 contributors may be used to endorse or promote products derived from
14 this software without specific prior written permission.
16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 POSSIBILITY OF SUCH DAMAGE.
29 ***************************************************************************/
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
39 #include <sys/module.h>
40 #include <sys/pciio.h>
42 #include <machine/bus.h>
43 #include <machine/resource.h>
44 #include <sys/bus_dma.h>
46 #include <sys/ioccom.h>
48 #include <sys/linker.h>
49 #include <sys/firmware.h>
50 #include <sys/socket.h>
51 #include <sys/sockio.h>
53 #include <sys/sysctl.h>
54 #include <sys/syslog.h>
55 #include <sys/queue.h>
56 #include <sys/taskqueue.h>
59 #include <cxgb_include.h>
61 #include <net/route.h>
63 #define VALIDATE_TID 0
64 MALLOC_DEFINE(M_CXGB, "cxgb", "Chelsio 10 Gigabit Ethernet and services");
66 TAILQ_HEAD(, cxgb_client) client_list;
67 TAILQ_HEAD(, t3cdev) ofld_dev_list;
70 static struct mtx cxgb_db_lock;
73 static int inited = 0;
76 offload_activated(struct t3cdev *tdev)
78 struct adapter *adapter = tdev2adap(tdev);
80 return (isset(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT));
84 register_tdev(struct t3cdev *tdev)
88 mtx_lock(&cxgb_db_lock);
89 snprintf(tdev->name, sizeof(tdev->name), "ofld_dev%d", unit++);
90 TAILQ_INSERT_TAIL(&ofld_dev_list, tdev, entry);
91 mtx_unlock(&cxgb_db_lock);
95 unregister_tdev(struct t3cdev *tdev)
100 mtx_lock(&cxgb_db_lock);
101 TAILQ_REMOVE(&ofld_dev_list, tdev, entry);
102 mtx_unlock(&cxgb_db_lock);
105 #ifndef TCP_OFFLOAD_DISABLE
107 * cxgb_register_client - register an offload client
108 * @client: the client
110 * Add the client to the client list,
111 * and call backs the client for each activated offload device
114 cxgb_register_client(struct cxgb_client *client)
118 mtx_lock(&cxgb_db_lock);
119 TAILQ_INSERT_TAIL(&client_list, client, client_entry);
122 TAILQ_FOREACH(tdev, &ofld_dev_list, entry) {
123 if (offload_activated(tdev)) {
127 "cxgb_register_client: %p not activated", tdev);
131 mtx_unlock(&cxgb_db_lock);
135 * cxgb_unregister_client - unregister an offload client
136 * @client: the client
138 * Remove the client to the client list,
139 * and call backs the client for each activated offload device.
142 cxgb_unregister_client(struct cxgb_client *client)
146 mtx_lock(&cxgb_db_lock);
147 TAILQ_REMOVE(&client_list, client, client_entry);
149 if (client->remove) {
150 TAILQ_FOREACH(tdev, &ofld_dev_list, entry) {
151 if (offload_activated(tdev))
152 client->remove(tdev);
155 mtx_unlock(&cxgb_db_lock);
159 * cxgb_add_clients - activate register clients for an offload device
160 * @tdev: the offload device
162 * Call backs all registered clients once a offload device is activated
165 cxgb_add_clients(struct t3cdev *tdev)
167 struct cxgb_client *client;
169 mtx_lock(&cxgb_db_lock);
170 TAILQ_FOREACH(client, &client_list, client_entry) {
174 mtx_unlock(&cxgb_db_lock);
178 * cxgb_remove_clients - activate register clients for an offload device
179 * @tdev: the offload device
181 * Call backs all registered clients once a offload device is deactivated
184 cxgb_remove_clients(struct t3cdev *tdev)
186 struct cxgb_client *client;
188 mtx_lock(&cxgb_db_lock);
189 TAILQ_FOREACH(client, &client_list, client_entry) {
191 client->remove(tdev);
193 mtx_unlock(&cxgb_db_lock);
198 * cxgb_ofld_recv - process n received offload packets
199 * @dev: the offload device
200 * @m: an array of offload packets
201 * @n: the number of offload packets
203 * Process an array of ingress offload packets. Each packet is forwarded
204 * to any active network taps and then passed to the offload device's receive
205 * method. We optimize passing packets to the receive method by passing
206 * it the whole array at once except when there are active taps.
209 cxgb_ofld_recv(struct t3cdev *dev, struct mbuf **m, int n)
212 return dev->recv(dev, m, n);
216 * Dummy handler for Rx offload packets in case we get an offload packet before
217 * proper processing is setup. This complains and drops the packet as it isn't
218 * normal to get offload packets at this stage.
221 rx_offload_blackhole(struct t3cdev *dev, struct mbuf **m, int n)
229 dummy_neigh_update(struct t3cdev *dev, struct rtentry *neigh, uint8_t *enaddr,
235 cxgb_set_dummy_ops(struct t3cdev *dev)
237 dev->recv = rx_offload_blackhole;
238 dev->arp_update = dummy_neigh_update;
242 do_smt_write_rpl(struct t3cdev *dev, struct mbuf *m)
244 struct cpl_smt_write_rpl *rpl = cplhdr(m);
246 if (rpl->status != CPL_ERR_NONE)
248 "Unexpected SMT_WRITE_RPL status %u for entry %u\n",
249 rpl->status, GET_TID(rpl));
251 return CPL_RET_BUF_DONE;
255 do_l2t_write_rpl(struct t3cdev *dev, struct mbuf *m)
257 struct cpl_l2t_write_rpl *rpl = cplhdr(m);
259 if (rpl->status != CPL_ERR_NONE)
261 "Unexpected L2T_WRITE_RPL status %u for entry %u\n",
262 rpl->status, GET_TID(rpl));
264 return CPL_RET_BUF_DONE;
268 do_rte_write_rpl(struct t3cdev *dev, struct mbuf *m)
270 struct cpl_rte_write_rpl *rpl = cplhdr(m);
272 if (rpl->status != CPL_ERR_NONE)
274 "Unexpected L2T_WRITE_RPL status %u for entry %u\n",
275 rpl->status, GET_TID(rpl));
277 return CPL_RET_BUF_DONE;
281 do_set_tcb_rpl(struct t3cdev *dev, struct mbuf *m)
283 struct cpl_set_tcb_rpl *rpl = cplhdr(m);
285 if (rpl->status != CPL_ERR_NONE)
287 "Unexpected SET_TCB_RPL status %u for tid %u\n",
288 rpl->status, GET_TID(rpl));
289 return CPL_RET_BUF_DONE;
293 do_trace(struct t3cdev *dev, struct mbuf *m)
296 struct cpl_trace_pkt *p = cplhdr(m);
299 skb->protocol = 0xffff;
300 skb->dev = dev->lldev;
301 skb_pull(skb, sizeof(*p));
302 skb->mac.raw = mtod(m, (char *));
303 netif_receive_skb(skb);
309 * Process a received packet with an unknown/unexpected CPL opcode.
312 do_bad_cpl(struct t3cdev *dev, struct mbuf *m)
314 log(LOG_ERR, "%s: received bad CPL command 0x%x\n", dev->name,
315 0xFF & *mtod(m, uint32_t *));
316 return (CPL_RET_BUF_DONE | CPL_RET_BAD_MSG);
320 * Handlers for each CPL opcode
322 static cpl_handler_func cpl_handlers[256];
325 * T3CDEV's receive method.
328 process_rx(struct t3cdev *dev, struct mbuf **m, int n)
331 struct mbuf *m0 = *m++;
332 unsigned int opcode = G_OPCODE(ntohl(m0->m_pkthdr.csum_data));
335 DPRINTF("processing op=0x%x m=%p data=%p\n", opcode, m0, m0->m_data);
337 ret = cpl_handlers[opcode] (dev, m0);
340 if (ret & CPL_RET_UNKNOWN_TID) {
341 union opcode_tid *p = cplhdr(m0);
343 log(LOG_ERR, "%s: CPL message (opcode %u) had "
344 "unknown TID %u\n", dev->name, opcode,
345 G_TID(ntohl(p->opcode_tid)));
348 if (ret & CPL_RET_BUF_DONE)
355 * Add a new handler to the CPL dispatch table. A NULL handler may be supplied
356 * to unregister an existing handler.
359 t3_register_cpl_handler(unsigned int opcode, cpl_handler_func h)
361 if (opcode < NUM_CPL_CMDS)
362 cpl_handlers[opcode] = h ? h : do_bad_cpl;
364 log(LOG_ERR, "T3C: handler registration for "
365 "opcode %x failed\n", opcode);
369 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
370 * The allocated memory is cleared.
373 cxgb_alloc_mem(unsigned long size)
376 return malloc(size, M_CXGB, M_ZERO|M_NOWAIT);
380 * Free memory allocated through t3_alloc_mem().
383 cxgb_free_mem(void *addr)
389 adap2type(struct adapter *adapter)
393 switch (adapter->params.rev) {
409 cxgb_adapter_ofld(struct adapter *adapter)
411 struct t3cdev *tdev = &adapter->tdev;
413 cxgb_set_dummy_ops(tdev);
414 tdev->type = adap2type(adapter);
415 tdev->adapter = adapter;
421 cxgb_adapter_unofld(struct adapter *adapter)
423 struct t3cdev *tdev = &adapter->tdev;
426 tdev->arp_update = NULL;
427 unregister_tdev(tdev);
431 cxgb_offload_init(void)
438 mtx_init(&cxgb_db_lock, "ofld db", NULL, MTX_DEF);
440 TAILQ_INIT(&client_list);
441 TAILQ_INIT(&ofld_dev_list);
443 for (i = 0; i < 0x100; ++i)
444 cpl_handlers[i] = do_bad_cpl;
446 t3_register_cpl_handler(CPL_SMT_WRITE_RPL, do_smt_write_rpl);
447 t3_register_cpl_handler(CPL_RTE_WRITE_RPL, do_rte_write_rpl);
448 t3_register_cpl_handler(CPL_L2T_WRITE_RPL, do_l2t_write_rpl);
450 t3_register_cpl_handler(CPL_SET_TCB_RPL, do_set_tcb_rpl);
451 t3_register_cpl_handler(CPL_TRACE_PKT, do_trace);
456 cxgb_offload_exit(void)
462 mtx_destroy(&cxgb_db_lock);
465 MODULE_VERSION(if_cxgb, 1);