4 * Copyright (c) 2002-2003
5 * Hidetoshi Shimokawa. All rights reserved.
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions
10 * 1. Redistributions of source code must retain the above copyright
11 * notice, this list of conditions and the following disclaimer.
12 * 2. Redistributions in binary form must reproduce the above copyright
13 * notice, this list of conditions and the following disclaimer in the
14 * documentation and/or other materials provided with the distribution.
15 * 3. All advertising materials mentioning features or use of this software
16 * must display the following acknowledgement:
18 * This product includes software developed by Hidetoshi Shimokawa.
20 * 4. Neither the name of the author nor the names of its contributors
21 * may be used to endorse or promote products derived from this software
22 * without specific prior written permission.
24 * THIS SOFTWARE IS PROVIDED BY THE REGENTS AND CONTRIBUTORS ``AS IS'' AND
25 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
26 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
27 * ARE DISCLAIMED. IN NO EVENT SHALL THE REGENTS OR CONTRIBUTORS BE LIABLE
28 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
29 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
30 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
31 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
32 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
33 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
39 #ifdef HAVE_KERNEL_OPTION_HEADERS
40 #include "opt_device_polling.h"
44 #include <sys/param.h>
45 #include <sys/kernel.h>
46 #include <sys/malloc.h>
48 #include <sys/socket.h>
49 #include <sys/sockio.h>
50 #include <sys/sysctl.h>
51 #include <sys/systm.h>
52 #include <sys/taskqueue.h>
53 #include <sys/module.h>
55 #include <machine/bus.h>
59 #include <net/if_var.h>
60 #include <net/firewire.h>
61 #include <net/if_arp.h>
62 #include <net/if_types.h>
63 #include <dev/firewire/firewire.h>
64 #include <dev/firewire/firewirereg.h>
65 #include <dev/firewire/iec13213.h>
66 #include <dev/firewire/if_fwipvar.h>
69 * We really need a mechanism for allocating regions in the FIFO
70 * address space. We pick a address in the OHCI controller's 'middle'
71 * address space. This means that the controller will automatically
72 * send responses for us, which is fine since we don't have any
73 * important information to put in the response anyway.
75 #define INET_FIFO 0xfffe00000000LL
77 #define FWIPDEBUG if (fwipdebug) if_printf
78 #define TX_MAX_QUEUE (FWMAXQUEUE - 1)
80 /* network interface */
81 static void fwip_start (struct ifnet *);
82 static int fwip_ioctl (struct ifnet *, u_long, caddr_t);
83 static void fwip_init (void *);
85 static void fwip_post_busreset (void *);
86 static void fwip_output_callback (struct fw_xfer *);
87 static void fwip_async_output (struct fwip_softc *, struct ifnet *);
88 static void fwip_start_send (void *, int);
89 static void fwip_stream_input (struct fw_xferq *);
90 static void fwip_unicast_input(struct fw_xfer *);
92 static int fwipdebug = 0;
93 static int broadcast_channel = 0xc0 | 0x1f; /* tag | channel(XXX) */
94 static int tx_speed = 2;
95 static int rx_queue_len = FWMAXQUEUE;
97 static MALLOC_DEFINE(M_FWIP, "if_fwip", "IP over FireWire interface");
98 SYSCTL_INT(_debug, OID_AUTO, if_fwip_debug, CTLFLAG_RW, &fwipdebug, 0, "");
99 SYSCTL_DECL(_hw_firewire);
100 static SYSCTL_NODE(_hw_firewire, OID_AUTO, fwip, CTLFLAG_RD, 0,
101 "Firewire ip subsystem");
102 SYSCTL_INT(_hw_firewire_fwip, OID_AUTO, rx_queue_len, CTLFLAG_RWTUN, &rx_queue_len,
103 0, "Length of the receive queue");
105 #ifdef DEVICE_POLLING
106 static poll_handler_t fwip_poll;
109 fwip_poll(struct ifnet *ifp, enum poll_cmd cmd, int count)
111 struct fwip_softc *fwip;
112 struct firewire_comm *fc;
114 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
117 fwip = ((struct fwip_eth_softc *)ifp->if_softc)->fwip;
119 fc->poll(fc, (cmd == POLL_AND_CHECK_STATUS)?0:1, count);
122 #endif /* DEVICE_POLLING */
125 fwip_identify(driver_t *driver, device_t parent)
127 BUS_ADD_CHILD(parent, 0, "fwip", device_get_unit(parent));
131 fwip_probe(device_t dev)
135 pa = device_get_parent(dev);
136 if (device_get_unit(dev) != device_get_unit(pa)) {
140 device_set_desc(dev, "IP over FireWire");
145 fwip_attach(device_t dev)
147 struct fwip_softc *fwip;
150 struct fw_hwaddr *hwaddr;
152 fwip = ((struct fwip_softc *)device_get_softc(dev));
153 unit = device_get_unit(dev);
154 ifp = fwip->fw_softc.fwip_ifp = if_alloc(IFT_IEEE1394);
158 mtx_init(&fwip->mtx, "fwip", NULL, MTX_DEF);
162 fwip->fd.fc = device_get_ivars(dev);
164 tx_speed = fwip->fd.fc->speed;
167 fwip->fd.post_explore = NULL;
168 fwip->fd.post_busreset = fwip_post_busreset;
169 fwip->fw_softc.fwip = fwip;
170 TASK_INIT(&fwip->start_send, 0, fwip_start_send, fwip);
173 * Encode our hardware the way that arp likes it.
175 hwaddr = &IFP2FWC(fwip->fw_softc.fwip_ifp)->fc_hwaddr;
176 hwaddr->sender_unique_ID_hi = htonl(fwip->fd.fc->eui.hi);
177 hwaddr->sender_unique_ID_lo = htonl(fwip->fd.fc->eui.lo);
178 hwaddr->sender_max_rec = fwip->fd.fc->maxrec;
179 hwaddr->sspd = fwip->fd.fc->speed;
180 hwaddr->sender_unicast_FIFO_hi = htons((uint16_t)(INET_FIFO >> 32));
181 hwaddr->sender_unicast_FIFO_lo = htonl((uint32_t)INET_FIFO);
183 /* fill the rest and attach interface */
184 ifp->if_softc = &fwip->fw_softc;
186 if_initname(ifp, device_get_name(dev), unit);
187 ifp->if_init = fwip_init;
188 ifp->if_start = fwip_start;
189 ifp->if_ioctl = fwip_ioctl;
190 ifp->if_flags = (IFF_BROADCAST|IFF_SIMPLEX|IFF_MULTICAST);
191 ifp->if_snd.ifq_maxlen = TX_MAX_QUEUE;
192 #ifdef DEVICE_POLLING
193 ifp->if_capabilities |= IFCAP_POLLING;
197 firewire_ifattach(ifp, hwaddr);
200 FWIPDEBUG(ifp, "interface created\n");
205 fwip_stop(struct fwip_softc *fwip)
207 struct firewire_comm *fc;
208 struct fw_xferq *xferq;
209 struct ifnet *ifp = fwip->fw_softc.fwip_ifp;
210 struct fw_xfer *xfer, *next;
215 if (fwip->dma_ch >= 0) {
216 xferq = fc->ir[fwip->dma_ch];
218 if (xferq->flag & FWXFERQ_RUNNING)
219 fc->irx_disable(fc, fwip->dma_ch);
221 ~(FWXFERQ_MODEMASK | FWXFERQ_OPEN | FWXFERQ_STREAM |
222 FWXFERQ_EXTBUF | FWXFERQ_HANDLER | FWXFERQ_CHTAGMASK);
225 for (i = 0; i < xferq->bnchunk; i++)
226 m_freem(xferq->bulkxfer[i].mbuf);
227 free(xferq->bulkxfer, M_FWIP);
229 fw_bindremove(fc, &fwip->fwb);
230 for (xfer = STAILQ_FIRST(&fwip->fwb.xferlist); xfer != NULL;
232 next = STAILQ_NEXT(xfer, link);
236 for (xfer = STAILQ_FIRST(&fwip->xferlist); xfer != NULL;
238 next = STAILQ_NEXT(xfer, link);
241 STAILQ_INIT(&fwip->xferlist);
243 xferq->bulkxfer = NULL;
247 ifp->if_drv_flags &= ~(IFF_DRV_RUNNING | IFF_DRV_OACTIVE);
251 fwip_detach(device_t dev)
253 struct fwip_softc *fwip;
257 fwip = (struct fwip_softc *)device_get_softc(dev);
258 ifp = fwip->fw_softc.fwip_ifp;
260 #ifdef DEVICE_POLLING
261 if (ifp->if_capenable & IFCAP_POLLING)
262 ether_poll_deregister(ifp);
268 firewire_ifdetach(ifp);
270 mtx_destroy(&fwip->mtx);
279 struct fwip_softc *fwip = ((struct fwip_eth_softc *)arg)->fwip;
280 struct firewire_comm *fc;
281 struct ifnet *ifp = fwip->fw_softc.fwip_ifp;
282 struct fw_xferq *xferq;
283 struct fw_xfer *xfer;
287 FWIPDEBUG(ifp, "initializing\n");
291 if (fwip->dma_ch < 0) {
292 fwip->dma_ch = fw_open_isodma(fc, /* tx */0);
293 if (fwip->dma_ch < 0)
295 xferq = fc->ir[fwip->dma_ch];
296 xferq->flag |= FWXFERQ_EXTBUF |
297 FWXFERQ_HANDLER | FWXFERQ_STREAM;
298 xferq->flag &= ~0xff;
299 xferq->flag |= broadcast_channel & 0xff;
300 /* register fwip_input handler */
301 xferq->sc = (caddr_t) fwip;
302 xferq->hand = fwip_stream_input;
303 xferq->bnchunk = rx_queue_len;
305 xferq->psize = MCLBYTES;
308 xferq->bulkxfer = (struct fw_bulkxfer *) malloc(
309 sizeof(struct fw_bulkxfer) * xferq->bnchunk,
311 if (xferq->bulkxfer == NULL) {
312 printf("if_fwip: malloc failed\n");
315 STAILQ_INIT(&xferq->stvalid);
316 STAILQ_INIT(&xferq->stfree);
317 STAILQ_INIT(&xferq->stdma);
318 xferq->stproc = NULL;
319 for (i = 0; i < xferq->bnchunk; i++) {
320 m = m_getcl(M_WAITOK, MT_DATA, M_PKTHDR);
321 xferq->bulkxfer[i].mbuf = m;
322 m->m_len = m->m_pkthdr.len = m->m_ext.ext_size;
323 STAILQ_INSERT_TAIL(&xferq->stfree,
324 &xferq->bulkxfer[i], link);
327 fwip->fwb.start = INET_FIFO;
328 fwip->fwb.end = INET_FIFO + 16384; /* S3200 packet size */
330 /* pre-allocate xfer */
331 STAILQ_INIT(&fwip->fwb.xferlist);
332 for (i = 0; i < rx_queue_len; i++) {
333 xfer = fw_xfer_alloc(M_FWIP);
336 m = m_getcl(M_WAITOK, MT_DATA, M_PKTHDR);
337 xfer->recv.payload = mtod(m, uint32_t *);
338 xfer->recv.pay_len = MCLBYTES;
339 xfer->hand = fwip_unicast_input;
341 xfer->sc = (caddr_t)fwip;
343 STAILQ_INSERT_TAIL(&fwip->fwb.xferlist, xfer, link);
345 fw_bindadd(fc, &fwip->fwb);
347 STAILQ_INIT(&fwip->xferlist);
348 for (i = 0; i < TX_MAX_QUEUE; i++) {
349 xfer = fw_xfer_alloc(M_FWIP);
352 xfer->send.spd = tx_speed;
353 xfer->fc = fwip->fd.fc;
354 xfer->sc = (caddr_t)fwip;
355 xfer->hand = fwip_output_callback;
356 STAILQ_INSERT_TAIL(&fwip->xferlist, xfer, link);
359 xferq = fc->ir[fwip->dma_ch];
361 fwip->last_dest.hi = 0;
362 fwip->last_dest.lo = 0;
365 if ((xferq->flag & FWXFERQ_RUNNING) == 0)
366 fc->irx_enable(fc, fwip->dma_ch);
368 ifp->if_drv_flags |= IFF_DRV_RUNNING;
369 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
372 /* attempt to start output */
378 fwip_ioctl(struct ifnet *ifp, u_long cmd, caddr_t data)
380 struct fwip_softc *fwip = ((struct fwip_eth_softc *)ifp->if_softc)->fwip;
386 if (ifp->if_flags & IFF_UP) {
387 if (!(ifp->if_drv_flags & IFF_DRV_RUNNING))
388 fwip_init(&fwip->fw_softc);
390 if (ifp->if_drv_flags & IFF_DRV_RUNNING)
399 #ifdef DEVICE_POLLING
401 struct ifreq *ifr = (struct ifreq *) data;
402 struct firewire_comm *fc = fwip->fd.fc;
404 if (ifr->ifr_reqcap & IFCAP_POLLING &&
405 !(ifp->if_capenable & IFCAP_POLLING)) {
406 error = ether_poll_register(fwip_poll, ifp);
409 /* Disable interrupts */
411 ifp->if_capenable |= IFCAP_POLLING |
412 IFCAP_POLLING_NOCOUNT;
415 if (!(ifr->ifr_reqcap & IFCAP_POLLING) &&
416 ifp->if_capenable & IFCAP_POLLING) {
417 error = ether_poll_deregister(ifp);
418 /* Enable interrupts. */
420 ifp->if_capenable &= ~IFCAP_POLLING;
421 ifp->if_capenable &= ~IFCAP_POLLING_NOCOUNT;
425 #endif /* DEVICE_POLLING */
429 error = firewire_ioctl(ifp, cmd, data);
438 fwip_post_busreset(void *arg)
440 struct fwip_softc *fwip = arg;
441 struct crom_src *src;
442 struct crom_chunk *root;
444 src = fwip->fd.fc->crom_src;
445 root = fwip->fd.fc->crom_root;
447 /* RFC2734 IPv4 over IEEE1394 */
448 bzero(&fwip->unit4, sizeof(struct crom_chunk));
449 crom_add_chunk(src, root, &fwip->unit4, CROM_UDIR);
450 crom_add_entry(&fwip->unit4, CSRKEY_SPEC, CSRVAL_IETF);
451 crom_add_simple_text(src, &fwip->unit4, &fwip->spec4, "IANA");
452 crom_add_entry(&fwip->unit4, CSRKEY_VER, 1);
453 crom_add_simple_text(src, &fwip->unit4, &fwip->ver4, "IPv4");
455 /* RFC3146 IPv6 over IEEE1394 */
456 bzero(&fwip->unit6, sizeof(struct crom_chunk));
457 crom_add_chunk(src, root, &fwip->unit6, CROM_UDIR);
458 crom_add_entry(&fwip->unit6, CSRKEY_SPEC, CSRVAL_IETF);
459 crom_add_simple_text(src, &fwip->unit6, &fwip->spec6, "IANA");
460 crom_add_entry(&fwip->unit6, CSRKEY_VER, 2);
461 crom_add_simple_text(src, &fwip->unit6, &fwip->ver6, "IPv6");
463 fwip->last_dest.hi = 0;
464 fwip->last_dest.lo = 0;
465 firewire_busreset(fwip->fw_softc.fwip_ifp);
469 fwip_output_callback(struct fw_xfer *xfer)
471 struct fwip_softc *fwip;
475 fwip = (struct fwip_softc *)xfer->sc;
476 ifp = fwip->fw_softc.fwip_ifp;
477 /* XXX error check */
478 FWIPDEBUG(ifp, "resp = %d\n", xfer->resp);
480 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
482 fw_xfer_unload(xfer);
486 STAILQ_INSERT_TAIL(&fwip->xferlist, xfer, link);
491 if (ifp->if_snd.ifq_head != NULL) {
497 fwip_start(struct ifnet *ifp)
499 struct fwip_softc *fwip = ((struct fwip_eth_softc *)ifp->if_softc)->fwip;
502 FWIPDEBUG(ifp, "starting\n");
504 if (fwip->dma_ch < 0) {
505 struct mbuf *m = NULL;
507 FWIPDEBUG(ifp, "not ready\n");
511 IF_DEQUEUE(&ifp->if_snd, m);
514 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
522 ifp->if_drv_flags |= IFF_DRV_OACTIVE;
524 if (ifp->if_snd.ifq_len != 0)
525 fwip_async_output(fwip, ifp);
527 ifp->if_drv_flags &= ~IFF_DRV_OACTIVE;
531 /* Async. stream output */
533 fwip_async_output(struct fwip_softc *fwip, struct ifnet *ifp)
535 struct firewire_comm *fc = fwip->fd.fc;
538 struct fw_hwaddr *destfw;
539 struct fw_xfer *xfer;
540 struct fw_xferq *xferq;
548 while ((xferq->queued < xferq->maxq - 1) &&
549 (ifp->if_snd.ifq_head != NULL)) {
551 xfer = STAILQ_FIRST(&fwip->xferlist);
555 printf("if_fwip: lack of xfer\n");
559 STAILQ_REMOVE_HEAD(&fwip->xferlist, link);
562 IF_DEQUEUE(&ifp->if_snd, m);
565 STAILQ_INSERT_HEAD(&fwip->xferlist, xfer, link);
571 * Dig out the link-level address which
572 * firewire_output got via arp or neighbour
573 * discovery. If we don't have a link-level address,
574 * just stick the thing on the broadcast channel.
576 mtag = m_tag_locate(m, MTAG_FIREWIRE, MTAG_FIREWIRE_HWADDR, 0);
580 destfw = (struct fw_hwaddr *) (mtag + 1);
584 * We don't do any bpf stuff here - the generic code
585 * in firewire_output gives the packet to bpf before
586 * it adds the link-level encapsulation.
590 * Put the mbuf in the xfer early in case we hit an
591 * error case below - fwip_output_callback will free
597 * We use the arp result (if any) to add a suitable firewire
598 * packet header before handing off to the bus.
600 fp = &xfer->send.hdr;
601 nodeid = FWLOCALBUS | fc->nodeid;
602 if ((m->m_flags & M_BCAST) || !destfw) {
604 * Broadcast packets are sent as GASP packets with
605 * specifier ID 0x00005e, version 1 on the broadcast
606 * channel. To be conservative, we send at the
607 * slowest possible speed.
611 M_PREPEND(m, 2*sizeof(uint32_t), M_NOWAIT);
612 p = mtod(m, uint32_t *);
613 fp->mode.stream.len = m->m_pkthdr.len;
614 fp->mode.stream.chtag = broadcast_channel;
615 fp->mode.stream.tcode = FWTCODE_STREAM;
616 fp->mode.stream.sy = 0;
618 p[0] = htonl(nodeid << 16);
619 p[1] = htonl((0x5e << 24) | 1);
622 * Unicast packets are sent as block writes to the
623 * target's unicast fifo address. If we can't
624 * find the node address, we just give up. We
625 * could broadcast it but that might overflow
626 * the packet size limitations due to the
627 * extra GASP header. Note: the hardware
628 * address is stored in network byte order to
629 * make life easier for ARP.
631 struct fw_device *fd;
634 eui.hi = ntohl(destfw->sender_unique_ID_hi);
635 eui.lo = ntohl(destfw->sender_unique_ID_lo);
636 if (fwip->last_dest.hi != eui.hi ||
637 fwip->last_dest.lo != eui.lo) {
638 fd = fw_noderesolve_eui64(fc, &eui);
641 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
642 /* XXX set error code */
643 fwip_output_callback(xfer);
647 fwip->last_hdr.mode.wreqb.dst = FWLOCALBUS | fd->dst;
648 fwip->last_hdr.mode.wreqb.tlrt = 0;
649 fwip->last_hdr.mode.wreqb.tcode = FWTCODE_WREQB;
650 fwip->last_hdr.mode.wreqb.pri = 0;
651 fwip->last_hdr.mode.wreqb.src = nodeid;
652 fwip->last_hdr.mode.wreqb.dest_hi =
653 ntohs(destfw->sender_unicast_FIFO_hi);
654 fwip->last_hdr.mode.wreqb.dest_lo =
655 ntohl(destfw->sender_unicast_FIFO_lo);
656 fwip->last_hdr.mode.wreqb.extcode = 0;
657 fwip->last_dest = eui;
660 fp->mode.wreqb = fwip->last_hdr.mode.wreqb;
661 fp->mode.wreqb.len = m->m_pkthdr.len;
662 xfer->send.spd = min(destfw->sspd, fc->speed);
665 xfer->send.pay_len = m->m_pkthdr.len;
667 error = fw_asyreq(fc, -1, xfer);
668 if (error == EAGAIN) {
670 * We ran out of tlabels - requeue the packet
671 * for later transmission.
675 STAILQ_INSERT_TAIL(&fwip->xferlist, xfer, link);
677 IF_PREPEND(&ifp->if_snd, m);
682 if_inc_counter(ifp, IFCOUNTER_OERRORS, 1);
683 /* XXX set error code */
684 fwip_output_callback(xfer);
687 if_inc_counter(ifp, IFCOUNTER_OPACKETS, 1);
693 printf("%d queued\n", i);
700 fwip_start_send (void *arg, int count)
702 struct fwip_softc *fwip = arg;
704 fwip->fd.fc->atq->start(fwip->fd.fc);
707 /* Async. stream output */
709 fwip_stream_input(struct fw_xferq *xferq)
714 struct fwip_softc *fwip;
715 struct fw_bulkxfer *sxfer;
721 fwip = (struct fwip_softc *)xferq->sc;
722 ifp = fwip->fw_softc.fwip_ifp;
724 while ((sxfer = STAILQ_FIRST(&xferq->stvalid)) != NULL) {
725 STAILQ_REMOVE_HEAD(&xferq->stvalid, link);
726 fp = mtod(sxfer->mbuf, struct fw_pkt *);
727 if (fwip->fd.fc->irx_post != NULL)
728 fwip->fd.fc->irx_post(fwip->fd.fc, fp->mode.ld);
731 /* insert new rbuf */
732 sxfer->mbuf = m0 = m_getcl(M_NOWAIT, MT_DATA, M_PKTHDR);
734 m0->m_len = m0->m_pkthdr.len = m0->m_ext.ext_size;
735 STAILQ_INSERT_TAIL(&xferq->stfree, sxfer, link);
737 printf("fwip_as_input: m_getcl failed\n");
740 * We must have a GASP header - leave the
741 * encapsulation sanity checks to the generic
742 * code. Remeber that we also have the firewire async
743 * stream header even though that isn't accounted for
744 * in mode.stream.len.
746 if (sxfer->resp != 0 || fp->mode.stream.len <
747 2*sizeof(uint32_t)) {
749 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
752 m->m_len = m->m_pkthdr.len = fp->mode.stream.len
753 + sizeof(fp->mode.stream);
756 * If we received the packet on the broadcast channel,
757 * mark it as broadcast, otherwise we assume it must
760 if (fp->mode.stream.chtag == broadcast_channel)
761 m->m_flags |= M_BCAST;
763 m->m_flags |= M_MCAST;
766 * Make sure we recognise the GASP specifier and
769 p = mtod(m, uint32_t *);
770 if ((((ntohl(p[1]) & 0xffff) << 8) | ntohl(p[2]) >> 24) != 0x00005e
771 || (ntohl(p[2]) & 0xffffff) != 1) {
772 FWIPDEBUG(ifp, "Unrecognised GASP header %#08x %#08x\n",
773 ntohl(p[1]), ntohl(p[2]));
775 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
780 * Record the sender ID for possible BPF usage.
782 src = ntohl(p[1]) >> 16;
783 if (bpf_peers_present(ifp->if_bpf)) {
784 mtag = m_tag_alloc(MTAG_FIREWIRE,
785 MTAG_FIREWIRE_SENDER_EUID,
786 2*sizeof(uint32_t), M_NOWAIT);
788 /* bpf wants it in network byte order */
789 struct fw_device *fd;
790 uint32_t *p = (uint32_t *) (mtag + 1);
791 fd = fw_noderesolve_nodeid(fwip->fd.fc,
794 p[0] = htonl(fd->eui.hi);
795 p[1] = htonl(fd->eui.lo);
800 m_tag_prepend(m, mtag);
805 * Trim off the GASP header
807 m_adj(m, 3*sizeof(uint32_t));
808 m->m_pkthdr.rcvif = ifp;
809 firewire_input(ifp, m, src);
810 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
812 if (STAILQ_FIRST(&xferq->stfree) != NULL)
813 fwip->fd.fc->irx_enable(fwip->fd.fc, fwip->dma_ch);
817 fwip_unicast_input_recycle(struct fwip_softc *fwip, struct fw_xfer *xfer)
822 * We have finished with a unicast xfer. Allocate a new
823 * cluster and stick it on the back of the input queue.
825 m = m_getcl(M_WAITOK, MT_DATA, M_PKTHDR);
827 xfer->recv.payload = mtod(m, uint32_t *);
828 xfer->recv.pay_len = MCLBYTES;
830 STAILQ_INSERT_TAIL(&fwip->fwb.xferlist, xfer, link);
834 fwip_unicast_input(struct fw_xfer *xfer)
840 struct fwip_softc *fwip;
842 //struct fw_pkt *sfp;
845 fwip = (struct fwip_softc *)xfer->sc;
846 ifp = fwip->fw_softc.fwip_ifp;
849 fp = &xfer->recv.hdr;
852 * Check the fifo address - we only accept addresses of
855 address = ((uint64_t)fp->mode.wreqb.dest_hi << 32)
856 | fp->mode.wreqb.dest_lo;
857 if (fp->mode.wreqb.tcode != FWTCODE_WREQB) {
858 rtcode = FWRCODE_ER_TYPE;
859 } else if (address != INET_FIFO) {
860 rtcode = FWRCODE_ER_ADDR;
862 rtcode = FWRCODE_COMPLETE;
866 * Pick up a new mbuf and stick it on the back of the receive
869 fwip_unicast_input_recycle(fwip, xfer);
872 * If we've already rejected the packet, give up now.
874 if (rtcode != FWRCODE_COMPLETE) {
876 if_inc_counter(ifp, IFCOUNTER_IERRORS, 1);
880 if (bpf_peers_present(ifp->if_bpf)) {
882 * Record the sender ID for possible BPF usage.
884 mtag = m_tag_alloc(MTAG_FIREWIRE, MTAG_FIREWIRE_SENDER_EUID,
885 2*sizeof(uint32_t), M_NOWAIT);
887 /* bpf wants it in network byte order */
888 struct fw_device *fd;
889 uint32_t *p = (uint32_t *) (mtag + 1);
890 fd = fw_noderesolve_nodeid(fwip->fd.fc,
891 fp->mode.wreqb.src & 0x3f);
893 p[0] = htonl(fd->eui.hi);
894 p[1] = htonl(fd->eui.lo);
899 m_tag_prepend(m, mtag);
904 * Hand off to the generic encapsulation code. We don't use
905 * ifp->if_input so that we can pass the source nodeid as an
906 * argument to facilitate link-level fragment reassembly.
908 m->m_len = m->m_pkthdr.len = fp->mode.wreqb.len;
909 m->m_pkthdr.rcvif = ifp;
910 firewire_input(ifp, m, fp->mode.wreqb.src);
911 if_inc_counter(ifp, IFCOUNTER_IPACKETS, 1);
914 static devclass_t fwip_devclass;
916 static device_method_t fwip_methods[] = {
917 /* device interface */
918 DEVMETHOD(device_identify, fwip_identify),
919 DEVMETHOD(device_probe, fwip_probe),
920 DEVMETHOD(device_attach, fwip_attach),
921 DEVMETHOD(device_detach, fwip_detach),
925 static driver_t fwip_driver = {
928 sizeof(struct fwip_softc),
932 DRIVER_MODULE(fwip, firewire, fwip_driver, fwip_devclass, 0, 0);
933 MODULE_VERSION(fwip, 1);
934 MODULE_DEPEND(fwip, firewire, 1, 1, 1);