2 * Copyright (c) 2001-2003
3 * Fraunhofer Institute for Open Communication Systems (FhG Fokus).
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 * Author: Hartmut Brandt <harti@freebsd.org>
31 * Fore HE driver for NATM
35 * Debug statistics of the HE driver
50 uint32_t bug_no_irq_upd;
57 uint32_t itype_unknown;
68 /* Card memory layout parameters */
69 #define HE_CONFIG_MEM_LAYOUT { \
71 20, /* cells_per_row */ \
72 1024, /* bytes_per_row */ \
73 512, /* r0_numrows */ \
74 1018, /* tx_numrows */ \
75 512, /* r1_numrows */ \
76 6, /* r0_startrow */ \
77 2 /* cells_per_lbuf */ \
79 40, /* cells_per_row */ \
80 2048, /* bytes_per_row */ \
81 256, /* r0_numrows */ \
82 512, /* tx_numrows */ \
83 256, /* r1_numrows */ \
84 0, /* r0_startrow */ \
85 4 /* cells_per_lbuf */ \
89 /*********************************************************************/
93 * A chunk of DMA-able memory
96 u_int size; /* in bytes */
97 u_int align; /* alignement */
98 bus_dma_tag_t tag; /* DMA tag */
99 void *base; /* the memory */
100 bus_addr_t paddr; /* physical address */
101 bus_dmamap_t map; /* the MAP */
105 * RBP (Receive Buffer Pool) queue entry and queue.
108 u_int size; /* RBP number of entries (power of two) */
109 u_int thresh; /* interrupt treshold */
110 uint32_t bsize; /* buffer size in bytes */
111 u_int offset; /* free space at start for small bufs */
112 uint32_t mask; /* mask for index */
113 struct dmamem mem; /* the queue area */
114 struct he_rbpen *rbp;
115 uint32_t head, tail; /* head and tail */
119 * RBRQ (Receive Buffer Return Queue) entry and queue.
122 u_int size; /* number of entries */
123 u_int thresh; /* interrupt threshold */
124 u_int tout; /* timeout value */
125 u_int pcnt; /* packet count threshold */
126 struct dmamem mem; /* memory */
127 struct he_rbrqen *rbrq;
128 uint32_t head; /* driver end */
132 * TPDRQ (Transmit Packet Descriptor Ready Queue) entry and queue
135 u_int size; /* number of entries */
136 struct dmamem mem; /* memory */
137 struct he_tpdrqen *tpdrq;
138 u_int head; /* head (copy of adapter) */
139 u_int tail; /* written back to adapter */
143 * TBRQ (Transmit Buffer Return Queue) entry and queue
146 u_int size; /* number of entries */
147 u_int thresh; /* interrupt threshold */
148 struct dmamem mem; /* memory */
149 struct he_tbrqen *tbrq;
150 u_int head; /* adapter end */
153 /*==================================================================*/
156 * TPDs are 32 byte and must be aligned on 64 byte boundaries. That means,
157 * that half of the space is free. We use this space to plug in a link for
158 * the list of free TPDs. Note, that the m_act member of the mbufs contain
159 * a pointer to the dmamap.
161 * The maximum number of TDPs is the size of the common transmit packet
162 * descriptor ready queue plus the sizes of the transmit buffer return queues
163 * (currently only queue 0). We allocate and map these TPD when initializing
164 * the card. We also allocate on DMA map for each TPD. Only the map in the
165 * last TPD of a packets is used when a packet is transmitted.
166 * This is signalled by having the mbuf member of this TPD non-zero and
167 * pointing to the mbuf.
169 #define HE_TPD_SIZE 64
171 struct he_tpd tpd; /* at beginning */
172 SLIST_ENTRY(tpd) link; /* free cid list link */
173 struct mbuf *mbuf; /* the buf chain */
174 bus_dmamap_t map; /* map */
175 uint32_t cid; /* CID */
176 uint16_t no; /* number of this tpd */
178 SLIST_HEAD(tpd_list, tpd);
180 #define TPD_SET_USED(SC, I) do { \
181 (SC)->tpd_used[(I) / 8] |= (1 << ((I) % 8)); \
184 #define TPD_CLR_USED(SC, I) do { \
185 (SC)->tpd_used[(I) / 8] &= ~(1 << ((I) % 8)); \
188 #define TPD_TST_USED(SC, I) ((SC)->tpd_used[(I) / 8] & (1 << ((I) % 8)))
190 #define TPD_ADDR(SC, I) ((struct tpd *)((char *)sc->tpds.base + \
193 /*==================================================================*/
196 * External MBUFs. The card needs a lot of mbufs in the pools for high
197 * performance. The problem with using mbufs directly is that we would need
198 * a dmamap for each of the mbufs. This can exhaust iommu space on the sparc
199 * and it eats also a lot of processing time. So we use external mbufs
200 * for the small buffers and clusters for the large buffers.
201 * For receive group 0 we use 5 ATM cells, for group 1 one (52 byte) ATM
202 * cell. The mbuf storage is allocated pagewise and one dmamap is used per
205 * The handle we give to the card for the small buffers is a word combined
206 * of the page number and the number of the chunk in the page. This restricts
207 * the number of chunks per page to 256 (8 bit) and the number of pages to
210 * A chunk may be in one of three states: free, on the card and floating around
211 * in the system. If it is free, it is on one of the two free lists and
212 * start with a struct mbufx_free. Each page has a bitmap that tracks where
215 * For large buffers we use mbuf clusters. Here we have two problems: we need
216 * to track the buffers on the card (in the case we want to stop it) and
217 * we need to map the 64bit mbuf address to a 26bit handle for 64-bit machines.
218 * The card uses the buffers in the order we give it to the card. Therefor
219 * we can use a private array holding pointers to the mbufs as a circular
220 * queue for both tasks. This is done with the lbufs member of softc. The
221 * handle for these buffer is the lbufs index ored with a flag.
224 /* data space in each external mbuf */
225 #define MBUF0_SIZE (5 * 48) /* 240 */
226 #define MBUF1_SIZE (52) /* 1 raw cell */
228 /* size of the buffer. Must fit data, offset and header */
229 #define MBUF0_CHUNK 256 /* 16 free bytes */
230 #define MBUF1_CHUNK 96 /* 44 free bytes */
232 /* start of actual data in buffer */
233 #define MBUF0_OFFSET 0
234 #define MBUF1_OFFSET 16
236 #define MBUFL_OFFSET 16 /* two pointers for HARP */
239 #define MBUF_ALLOC_SIZE (8192)
241 #define MBUF_ALLOC_SIZE (PAGE_SIZE)
244 /* each allocated page has one of these structures at its very end. */
245 struct mbuf_page_hdr {
246 uint16_t nchunks; /* chunks on this page */
247 bus_dmamap_t map; /* the DMA MAP */
248 uint32_t phys; /* physical base address */
249 uint32_t hdroff; /* chunk header offset */
250 uint32_t chunksize; /* chunk size */
251 u_int pool; /* pool number */
254 char storage[MBUF_ALLOC_SIZE - sizeof(struct mbuf_page_hdr)];
255 struct mbuf_page_hdr hdr;
258 /* numbers per page */
259 #define MBUF0_PER_PAGE ((MBUF_ALLOC_SIZE - sizeof(struct mbuf_page_hdr)) / \
261 #define MBUF1_PER_PAGE ((MBUF_ALLOC_SIZE - sizeof(struct mbuf_page_hdr)) / \
265 * Convert to/from handles
268 #define MBUF_MAKE_HANDLE(PAGENO, CHUNKNO) \
269 ((((PAGENO) << 10) | (CHUNKNO)) << HE_REGS_RBRQ_ADDR)
270 #define MBUF_MAKE_LHANDLE(INDEX) \
271 (MBUF_LARGE_FLAG | ((INDEX) << HE_REGS_RBRQ_ADDR))
274 #define MBUF_PARSE_HANDLE(HANDLE, PAGENO, CHUNKNO) do { \
275 (CHUNKNO) = ((HANDLE) >> HE_REGS_RBRQ_ADDR) & 0x3ff; \
276 (PAGENO) = (((HANDLE) >> 10) >> HE_REGS_RBRQ_ADDR) & 0x3fff; \
278 #define MBUF_PARSE_LHANDLE(HANDLE, INDEX) do { \
279 (INDEX) = ((HANDLE) >> HE_REGS_RBRQ_ADDR) & 0xffffff; \
282 #define MBUF_LARGE_FLAG 0x80000000
284 /* chunks have the following structure at the end (8 byte) */
285 struct mbuf_chunk_hdr {
291 #define MBUF_CARD 0x01 /* buffer is on card */
292 #define MBUF_USED 0x02 /* buffer is somewhere in the system */
294 #define MBUFX_STORAGE_SIZE(X) (MBUF##X##_CHUNK \
295 - sizeof(struct mbuf_chunk_hdr))
298 char storage[MBUFX_STORAGE_SIZE(0)];
299 struct mbuf_chunk_hdr hdr;
303 char storage[MBUFX_STORAGE_SIZE(1)];
304 struct mbuf_chunk_hdr hdr;
308 struct mbufx_free *link;
311 /*==================================================================*/
317 u_int size; /* number of entries */
318 u_int thresh; /* re-interrupt threshold */
319 u_int line; /* interrupt line to use */
320 struct dmamem mem; /* interrupt queues */
321 uint32_t * irq; /* interrupt queue */
322 uint32_t head; /* head index */
323 uint32_t * tailp; /* pointer to tail */
324 struct hatm_softc *sc; /* back pointer */
325 u_int group; /* interrupt group */
329 * This structure describes all information for a VCC open on the card.
330 * The array of these structures is indexed by the compressed connection ID
331 * (CID). This structure must begin with the atmio_vcc.
334 struct atmio_vcc param; /* traffic parameters */
335 void * rxhand; /* NATM protocol block */
336 u_int vflags; /* private flags */
342 u_int rc; /* rate control group for CBR */
343 struct mbuf * chain; /* partial received PDU */
344 struct mbuf * last; /* last mbuf in chain */
345 u_int ntpds; /* number of active TPDs */
347 #define HE_VCC_OPEN 0x000f0000
348 #define HE_VCC_RX_OPEN 0x00010000
349 #define HE_VCC_RX_CLOSING 0x00020000
350 #define HE_VCC_TX_OPEN 0x00040000
351 #define HE_VCC_TX_CLOSING 0x00080000
352 #define HE_VCC_FLOW_CTRL 0x00100000
358 u_int refcnt; /* how many connections reference this group */
359 u_int rate; /* the value */
367 struct mtx mtx; /* lock */
368 struct ifmedia media; /* media */
369 device_t dev; /* device */
370 int memid; /* resoure id for memory */
371 struct resource * memres; /* memory resource */
372 bus_space_handle_t memh; /* handle */
373 bus_space_tag_t memt; /* ... and tag */
374 bus_dma_tag_t parent_tag; /* global restriction */
375 struct cv vcc_cv; /* condition variable */
376 int irqid; /* resource id */
377 struct resource * irqres; /* resource */
378 void * ih; /* interrupt handle */
379 struct utopia utopia; /* utopia state */
381 /* rest has to be reset by stop */
382 int he622; /* this is a HE622 */
383 int pci64; /* 64bit bus */
384 char prod_id[HE_EEPROM_PROD_ID_LEN + 1];
385 char rev[HE_EEPROM_REV_LEN + 1];
386 struct heirq irq_0; /* interrupt queues 0 */
388 /* generic network controller state */
397 u_int cells_per_lbuf;
404 struct dmamem hsp_mem;
407 struct hetbrq tbrq; /* TBRQ 0 */
408 struct hetpdrq tpdrq; /* TPDRQ */
409 struct tpd_list tpd_free; /* Free TPDs */
410 u_int tpd_nfree; /* number of free TPDs */
411 u_int tpd_total; /* total TPDs */
412 uint8_t *tpd_used; /* bitmap of used TPDs */
413 struct dmamem tpds; /* TPD memory */
414 bus_dma_tag_t tx_tag; /* DMA tag for all tx mbufs */
417 /* receive/transmit groups */
418 struct herbp rbp_s0; /* RBPS0 */
419 struct herbp rbp_l0; /* RBPL0 */
420 struct herbp rbp_s1; /* RBPS1 */
421 struct herbrq rbrq_0; /* RBRQ0 */
422 struct herbrq rbrq_1; /* RBRQ1 */
424 /* list of external mbuf storage */
425 bus_dma_tag_t mbuf_tag;
426 struct mbuf_page **mbuf_pages;
428 u_int mbuf_max_pages;
429 struct mbufx_free *mbuf_list[2];
431 /* mbuf cluster tracking and mapping for group 0 */
432 struct mbuf **lbufs; /* mbufs */
433 bus_dmamap_t *rmaps; /* DMA maps */
438 struct hevcc *vccs[HE_MAX_VCCS];
439 u_int cbr_bw; /* BW allocated to CBR */
440 u_int max_tpd; /* per VCC */
445 struct herg rate_ctrl[HE_REGN_CS_STPER];
448 u_int tsrb, tsrc, tsrd;
451 struct cv cv_rcclose; /* condition variable */
452 uint32_t rate_grid[16][16]; /* our copy */
455 struct sysctl_ctx_list sysctl_ctx;
456 struct sysctl_oid *sysctl_tree;
458 /* internal statistics */
459 struct istats istats;
467 /* transmit mbuf count */
472 #define READ4(SC,OFF) bus_space_read_4(SC->memt, SC->memh, (OFF))
473 #define READ2(SC,OFF) bus_space_read_2(SC->memt, SC->memh, (OFF))
474 #define READ1(SC,OFF) bus_space_read_1(SC->memt, SC->memh, (OFF))
476 #define WRITE4(SC,OFF,VAL) bus_space_write_4(SC->memt, SC->memh, (OFF), (VAL))
477 #define WRITE2(SC,OFF,VAL) bus_space_write_2(SC->memt, SC->memh, (OFF), (VAL))
478 #define WRITE1(SC,OFF,VAL) bus_space_write_1(SC->memt, SC->memh, (OFF), (VAL))
480 #define BARRIER_R(SC) bus_space_barrier(SC->memt, SC->memh, 0, HE_REGO_END, \
481 BUS_SPACE_BARRIER_READ)
482 #define BARRIER_W(SC) bus_space_barrier(SC->memt, SC->memh, 0, HE_REGO_END, \
483 BUS_SPACE_BARRIER_WRITE)
484 #define BARRIER_RW(SC) bus_space_barrier(SC->memt, SC->memh, 0, HE_REGO_END, \
485 BUS_SPACE_BARRIER_READ | BUS_SPACE_BARRIER_WRITE)
487 #define READ_SUNI(SC,OFF) READ4(SC, HE_REGO_SUNI + 4 * (OFF))
488 #define WRITE_SUNI(SC,OFF,VAL) WRITE4(SC, HE_REGO_SUNI + 4 * (OFF), (VAL))
490 #define READ_LB4(SC,OFF) \
492 WRITE4(SC, HE_REGO_LB_MEM_ADDR, (OFF)); \
493 WRITE4(SC, HE_REGO_LB_MEM_ACCESS, \
494 (HE_REGM_LB_MEM_HNDSHK | HE_REGM_LB_MEM_READ)); \
495 while((READ4(SC, HE_REGO_LB_MEM_ACCESS) & HE_REGM_LB_MEM_HNDSHK))\
497 READ4(SC, HE_REGO_LB_MEM_DATA); \
499 #define WRITE_LB4(SC,OFF,VAL) \
501 WRITE4(SC, HE_REGO_LB_MEM_ADDR, (OFF)); \
502 WRITE4(SC, HE_REGO_LB_MEM_DATA, (VAL)); \
503 WRITE4(SC, HE_REGO_LB_MEM_ACCESS, \
504 (HE_REGM_LB_MEM_HNDSHK | HE_REGM_LB_MEM_WRITE)); \
505 while((READ4(SC, HE_REGO_LB_MEM_ACCESS) & HE_REGM_LB_MEM_HNDSHK))\
509 #define WRITE_MEM4(SC,OFF,VAL,SPACE) \
511 WRITE4(SC, HE_REGO_CON_DAT, (VAL)); \
512 WRITE4(SC, HE_REGO_CON_CTL, \
513 (SPACE | HE_REGM_CON_WE | HE_REGM_CON_STATUS | (OFF))); \
514 while((READ4(SC, HE_REGO_CON_CTL) & HE_REGM_CON_STATUS) != 0) \
518 #define READ_MEM4(SC,OFF,SPACE) \
520 WRITE4(SC, HE_REGO_CON_CTL, \
521 (SPACE | HE_REGM_CON_STATUS | (OFF))); \
522 while((READ4(SC, HE_REGO_CON_CTL) & HE_REGM_CON_STATUS) != 0) \
524 READ4(SC, HE_REGO_CON_DAT); \
527 #define WRITE_TCM4(SC,OFF,VAL) WRITE_MEM4(SC,(OFF),(VAL),HE_REGM_CON_TCM)
528 #define WRITE_RCM4(SC,OFF,VAL) WRITE_MEM4(SC,(OFF),(VAL),HE_REGM_CON_RCM)
529 #define WRITE_MBOX4(SC,OFF,VAL) WRITE_MEM4(SC,(OFF),(VAL),HE_REGM_CON_MBOX)
531 #define READ_TCM4(SC,OFF) READ_MEM4(SC,(OFF),HE_REGM_CON_TCM)
532 #define READ_RCM4(SC,OFF) READ_MEM4(SC,(OFF),HE_REGM_CON_RCM)
533 #define READ_MBOX4(SC,OFF) READ_MEM4(SC,(OFF),HE_REGM_CON_MBOX)
535 #define WRITE_TCM(SC,OFF,BYTES,VAL) \
536 WRITE_MEM4(SC,(OFF) | ((~(BYTES) & 0xf) << HE_REGS_CON_DIS), \
537 (VAL), HE_REGM_CON_TCM)
538 #define WRITE_RCM(SC,OFF,BYTES,VAL) \
539 WRITE_MEM4(SC,(OFF) | ((~(BYTES) & 0xf) << HE_REGS_CON_DIS), \
540 (VAL), HE_REGM_CON_RCM)
542 #define READ_TSR(SC,CID,NR) \
546 _v = READ_TCM4(SC, HE_REGO_TSRA(0,CID,NR)); \
547 } else if((NR) <= 11) { \
548 _v = READ_TCM4(SC, HE_REGO_TSRB((SC)->tsrb,CID,(NR-8)));\
549 } else if((NR) <= 13) { \
550 _v = READ_TCM4(SC, HE_REGO_TSRC((SC)->tsrc,CID,(NR-12)));\
552 _v = READ_TCM4(SC, HE_REGO_TSRD((SC)->tsrd,CID)); \
557 #define WRITE_TSR(SC,CID,NR,BEN,VAL) \
560 WRITE_TCM(SC, HE_REGO_TSRA(0,CID,NR),BEN,VAL); \
561 } else if((NR) <= 11) { \
562 WRITE_TCM(SC, HE_REGO_TSRB((SC)->tsrb,CID,(NR-8)),BEN,VAL);\
563 } else if((NR) <= 13) { \
564 WRITE_TCM(SC, HE_REGO_TSRC((SC)->tsrc,CID,(NR-12)),BEN,VAL);\
566 WRITE_TCM(SC, HE_REGO_TSRD((SC)->tsrd,CID),BEN,VAL); \
570 #define READ_RSR(SC,CID,NR) \
574 _v = READ_RCM4(SC, HE_REGO_RSRA(0,CID,NR)); \
576 _v = READ_RCM4(SC, HE_REGO_RSRB((SC)->rsrb,CID,(NR-8)));\
581 #define WRITE_RSR(SC,CID,NR,BEN,VAL) \
584 WRITE_RCM(SC, HE_REGO_RSRA(0,CID,NR),BEN,VAL); \
586 WRITE_RCM(SC, HE_REGO_RSRB((SC)->rsrb,CID,(NR-8)),BEN,VAL);\
591 #define DBG(SC, FL, PRINT) do { \
592 if((SC)->debug & DBG_##FL) { \
593 if_printf((SC)->ifp, "%s: ", __func__); \
600 DBG_DUMMY = 0x0001, /* default value for -DHATM_DEBUG */
615 #define DBG(SC, FL, PRINT)
618 u_int hatm_cps2atmf(uint32_t);
619 u_int hatm_atmf2cps(uint32_t);
621 void hatm_intr(void *);
622 int hatm_ioctl(struct ifnet *, u_long, caddr_t);
623 void hatm_initialize(struct hatm_softc *);
624 void hatm_stop(struct hatm_softc *sc);
625 void hatm_start(struct ifnet *);
627 void hatm_rx(struct hatm_softc *sc, u_int cid, u_int flags, struct mbuf *m,
629 void hatm_tx_complete(struct hatm_softc *sc, struct tpd *tpd, uint32_t);
631 int hatm_tx_vcc_can_open(struct hatm_softc *sc, u_int cid, struct hevcc *);
632 void hatm_tx_vcc_open(struct hatm_softc *sc, u_int cid);
633 void hatm_rx_vcc_open(struct hatm_softc *sc, u_int cid);
634 void hatm_tx_vcc_close(struct hatm_softc *sc, u_int cid);
635 void hatm_rx_vcc_close(struct hatm_softc *sc, u_int cid);
636 void hatm_tx_vcc_closed(struct hatm_softc *sc, u_int cid);
637 void hatm_vcc_closed(struct hatm_softc *sc, u_int cid);
638 void hatm_load_vc(struct hatm_softc *sc, u_int cid, int reopen);
640 void hatm_ext_free(struct mbufx_free **, struct mbufx_free *);