1 /**************************************************************************
3 Copyright (c) 2007-2008, Chelsio Inc.
6 Redistribution and use in source and binary forms, with or without
7 modification, are permitted provided that the following conditions are met:
9 1. Redistributions of source code must retain the above copyright notice,
10 this list of conditions and the following disclaimer.
12 2. Neither the name of the Chelsio Corporation nor the names of its
13 contributors may be used to endorse or promote products derived from
14 this software without specific prior written permission.
16 THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 POSSIBILITY OF SUCH DAMAGE.
29 ***************************************************************************/
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
35 #include <sys/param.h>
36 #include <sys/systm.h>
37 #include <sys/kernel.h>
39 #include <sys/module.h>
40 #include <sys/pciio.h>
42 #include <machine/bus.h>
43 #include <machine/resource.h>
44 #include <sys/bus_dma.h>
46 #include <sys/ioccom.h>
48 #include <sys/linker.h>
49 #include <sys/firmware.h>
50 #include <sys/socket.h>
51 #include <sys/sockio.h>
53 #include <sys/sysctl.h>
54 #include <sys/syslog.h>
55 #include <sys/queue.h>
56 #include <sys/taskqueue.h>
60 #include <cxgb_include.h>
62 #include <dev/cxgb/cxgb_include.h>
65 #include <net/route.h>
67 #define VALIDATE_TID 0
68 MALLOC_DEFINE(M_CXGB, "cxgb", "Chelsio 10 Gigabit Ethernet and services");
70 TAILQ_HEAD(, cxgb_client) client_list;
71 TAILQ_HEAD(, t3cdev) ofld_dev_list;
74 static struct mtx cxgb_db_lock;
77 static int inited = 0;
80 offload_activated(struct t3cdev *tdev)
82 struct adapter *adapter = tdev2adap(tdev);
84 return (isset(&adapter->open_device_map, OFFLOAD_DEVMAP_BIT));
88 register_tdev(struct t3cdev *tdev)
92 mtx_lock(&cxgb_db_lock);
93 snprintf(tdev->name, sizeof(tdev->name), "ofld_dev%d", unit++);
94 TAILQ_INSERT_TAIL(&ofld_dev_list, tdev, entry);
95 mtx_unlock(&cxgb_db_lock);
99 unregister_tdev(struct t3cdev *tdev)
101 mtx_lock(&cxgb_db_lock);
102 TAILQ_REMOVE(&ofld_dev_list, tdev, entry);
103 mtx_unlock(&cxgb_db_lock);
106 #ifndef TCP_OFFLOAD_DISABLE
108 * cxgb_register_client - register an offload client
109 * @client: the client
111 * Add the client to the client list,
112 * and call backs the client for each activated offload device
115 cxgb_register_client(struct cxgb_client *client)
119 mtx_lock(&cxgb_db_lock);
120 TAILQ_INSERT_TAIL(&client_list, client, client_entry);
123 TAILQ_FOREACH(tdev, &ofld_dev_list, entry) {
124 if (offload_activated(tdev)) {
128 "cxgb_register_client: %p not activated", tdev);
132 mtx_unlock(&cxgb_db_lock);
136 * cxgb_unregister_client - unregister an offload client
137 * @client: the client
139 * Remove the client to the client list,
140 * and call backs the client for each activated offload device.
143 cxgb_unregister_client(struct cxgb_client *client)
147 mtx_lock(&cxgb_db_lock);
148 TAILQ_REMOVE(&client_list, client, client_entry);
150 if (client->remove) {
151 TAILQ_FOREACH(tdev, &ofld_dev_list, entry) {
152 if (offload_activated(tdev))
153 client->remove(tdev);
156 mtx_unlock(&cxgb_db_lock);
160 * cxgb_add_clients - activate register clients for an offload device
161 * @tdev: the offload device
163 * Call backs all registered clients once a offload device is activated
166 cxgb_add_clients(struct t3cdev *tdev)
168 struct cxgb_client *client;
170 mtx_lock(&cxgb_db_lock);
171 TAILQ_FOREACH(client, &client_list, client_entry) {
175 mtx_unlock(&cxgb_db_lock);
179 * cxgb_remove_clients - activate register clients for an offload device
180 * @tdev: the offload device
182 * Call backs all registered clients once a offload device is deactivated
185 cxgb_remove_clients(struct t3cdev *tdev)
187 struct cxgb_client *client;
189 mtx_lock(&cxgb_db_lock);
190 TAILQ_FOREACH(client, &client_list, client_entry) {
192 client->remove(tdev);
194 mtx_unlock(&cxgb_db_lock);
199 * cxgb_ofld_recv - process n received offload packets
200 * @dev: the offload device
201 * @m: an array of offload packets
202 * @n: the number of offload packets
204 * Process an array of ingress offload packets. Each packet is forwarded
205 * to any active network taps and then passed to the offload device's receive
206 * method. We optimize passing packets to the receive method by passing
207 * it the whole array at once except when there are active taps.
210 cxgb_ofld_recv(struct t3cdev *dev, struct mbuf **m, int n)
213 return dev->recv(dev, m, n);
217 * Dummy handler for Rx offload packets in case we get an offload packet before
218 * proper processing is setup. This complains and drops the packet as it isn't
219 * normal to get offload packets at this stage.
222 rx_offload_blackhole(struct t3cdev *dev, struct mbuf **m, int n)
230 dummy_neigh_update(struct t3cdev *dev, struct rtentry *neigh, uint8_t *enaddr,
236 cxgb_set_dummy_ops(struct t3cdev *dev)
238 dev->recv = rx_offload_blackhole;
239 dev->arp_update = dummy_neigh_update;
243 do_smt_write_rpl(struct t3cdev *dev, struct mbuf *m)
245 struct cpl_smt_write_rpl *rpl = cplhdr(m);
247 if (rpl->status != CPL_ERR_NONE)
249 "Unexpected SMT_WRITE_RPL status %u for entry %u\n",
250 rpl->status, GET_TID(rpl));
252 return CPL_RET_BUF_DONE;
256 do_l2t_write_rpl(struct t3cdev *dev, struct mbuf *m)
258 struct cpl_l2t_write_rpl *rpl = cplhdr(m);
260 if (rpl->status != CPL_ERR_NONE)
262 "Unexpected L2T_WRITE_RPL status %u for entry %u\n",
263 rpl->status, GET_TID(rpl));
265 return CPL_RET_BUF_DONE;
269 do_rte_write_rpl(struct t3cdev *dev, struct mbuf *m)
271 struct cpl_rte_write_rpl *rpl = cplhdr(m);
273 if (rpl->status != CPL_ERR_NONE)
275 "Unexpected L2T_WRITE_RPL status %u for entry %u\n",
276 rpl->status, GET_TID(rpl));
278 return CPL_RET_BUF_DONE;
282 do_set_tcb_rpl(struct t3cdev *dev, struct mbuf *m)
284 struct cpl_set_tcb_rpl *rpl = cplhdr(m);
286 if (rpl->status != CPL_ERR_NONE)
288 "Unexpected SET_TCB_RPL status %u for tid %u\n",
289 rpl->status, GET_TID(rpl));
290 return CPL_RET_BUF_DONE;
294 do_trace(struct t3cdev *dev, struct mbuf *m)
297 struct cpl_trace_pkt *p = cplhdr(m);
300 skb->protocol = 0xffff;
301 skb->dev = dev->lldev;
302 skb_pull(skb, sizeof(*p));
303 skb->mac.raw = mtod(m, (char *));
304 netif_receive_skb(skb);
310 * Process a received packet with an unknown/unexpected CPL opcode.
313 do_bad_cpl(struct t3cdev *dev, struct mbuf *m)
315 log(LOG_ERR, "%s: received bad CPL command 0x%x\n", dev->name,
316 0xFF & *mtod(m, uint32_t *));
317 return (CPL_RET_BUF_DONE | CPL_RET_BAD_MSG);
321 * Handlers for each CPL opcode
323 static cpl_handler_func cpl_handlers[256];
326 * T3CDEV's receive method.
329 process_rx(struct t3cdev *dev, struct mbuf **m, int n)
332 struct mbuf *m0 = *m++;
333 unsigned int opcode = G_OPCODE(ntohl(m0->m_pkthdr.csum_data));
336 DPRINTF("processing op=0x%x m=%p data=%p\n", opcode, m0, m0->m_data);
338 ret = cpl_handlers[opcode] (dev, m0);
341 if (ret & CPL_RET_UNKNOWN_TID) {
342 union opcode_tid *p = cplhdr(m0);
344 log(LOG_ERR, "%s: CPL message (opcode %u) had "
345 "unknown TID %u\n", dev->name, opcode,
346 G_TID(ntohl(p->opcode_tid)));
349 if (ret & CPL_RET_BUF_DONE)
356 * Add a new handler to the CPL dispatch table. A NULL handler may be supplied
357 * to unregister an existing handler.
360 t3_register_cpl_handler(unsigned int opcode, cpl_handler_func h)
362 if (opcode < NUM_CPL_CMDS)
363 cpl_handlers[opcode] = h ? h : do_bad_cpl;
365 log(LOG_ERR, "T3C: handler registration for "
366 "opcode %x failed\n", opcode);
370 * Allocate a chunk of memory using kmalloc or, if that fails, vmalloc.
371 * The allocated memory is cleared.
374 cxgb_alloc_mem(unsigned long size)
377 return malloc(size, M_CXGB, M_ZERO|M_NOWAIT);
381 * Free memory allocated through t3_alloc_mem().
384 cxgb_free_mem(void *addr)
390 adap2type(struct adapter *adapter)
394 switch (adapter->params.rev) {
410 cxgb_adapter_ofld(struct adapter *adapter)
412 struct t3cdev *tdev = &adapter->tdev;
414 cxgb_set_dummy_ops(tdev);
415 tdev->type = adap2type(adapter);
416 tdev->adapter = adapter;
422 cxgb_adapter_unofld(struct adapter *adapter)
424 struct t3cdev *tdev = &adapter->tdev;
427 tdev->arp_update = NULL;
428 unregister_tdev(tdev);
432 cxgb_offload_init(void)
439 mtx_init(&cxgb_db_lock, "ofld db", NULL, MTX_DEF);
441 TAILQ_INIT(&client_list);
442 TAILQ_INIT(&ofld_dev_list);
444 for (i = 0; i < 0x100; ++i)
445 cpl_handlers[i] = do_bad_cpl;
447 t3_register_cpl_handler(CPL_SMT_WRITE_RPL, do_smt_write_rpl);
448 t3_register_cpl_handler(CPL_RTE_WRITE_RPL, do_rte_write_rpl);
449 t3_register_cpl_handler(CPL_L2T_WRITE_RPL, do_l2t_write_rpl);
451 t3_register_cpl_handler(CPL_SET_TCB_RPL, do_set_tcb_rpl);
452 t3_register_cpl_handler(CPL_TRACE_PKT, do_trace);
457 cxgb_offload_exit(void)
463 mtx_destroy(&cxgb_db_lock);
466 MODULE_VERSION(if_cxgb, 1);