2 * Copyright 2003-2011 Netlogic Microsystems (Netlogic). All rights
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions are
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in
13 * the documentation and/or other materials provided with the
16 * THIS SOFTWARE IS PROVIDED BY Netlogic Microsystems ``AS IS'' AND
17 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
19 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE
20 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
26 * THE POSSIBILITY OF SUCH DAMAGE.
30 #include <sys/cdefs.h>
31 __FBSDID("$FreeBSD$");
32 #include <sys/types.h>
33 #include <sys/systm.h>
34 #include <sys/param.h>
36 #include <sys/mutex.h>
38 #include <sys/limits.h>
43 #include <sys/kernel.h>
44 #include <sys/kthread.h>
46 #include <sys/resourcevar.h>
47 #include <sys/sched.h>
48 #include <sys/unistd.h>
49 #include <sys/sysctl.h>
50 #include <sys/malloc.h>
52 #include <machine/reg.h>
53 #include <machine/cpu.h>
54 #include <machine/hwfunc.h>
55 #include <machine/mips_opcode.h>
56 #include <machine/param.h>
57 #include <machine/intr_machdep.h>
59 #include <mips/nlm/hal/mips-extns.h>
60 #include <mips/nlm/hal/haldefs.h>
61 #include <mips/nlm/hal/iomap.h>
62 #include <mips/nlm/hal/cop2.h>
63 #include <mips/nlm/hal/fmn.h>
64 #include <mips/nlm/hal/pic.h>
66 #include <mips/nlm/msgring.h>
67 #include <mips/nlm/interrupt.h>
68 #include <mips/nlm/xlp.h>
70 #define MSGRNG_NSTATIONS 1024
72 * Keep track of our message ring handler threads, each core has a
73 * different message station. Ideally we will need to start a few
74 * message handling threads every core, and wake them up depending on
77 struct msgring_thread {
78 struct thread *thread; /* msgring handler threads */
79 int needed; /* thread needs to wake up */
81 static struct msgring_thread msgring_threads[XLP_MAX_CORES * XLP_MAX_THREADS];
82 static struct proc *msgring_proc; /* all threads are under a proc */
85 * The device drivers can register a handler for the messages sent
86 * from a station (corresponding to the device).
88 struct tx_stn_handler {
89 msgring_handler action;
92 static struct tx_stn_handler msgmap[MSGRNG_NSTATIONS];
93 static struct mtx msgmap_lock;
94 uint32_t xlp_msg_thread_mask;
95 static int xlp_msg_threads_per_core = XLP_MAX_THREADS;
97 static void create_msgring_thread(int hwtid);
98 static int msgring_process_fast_intr(void *arg);
101 static int msgring_nintr[XLP_MAX_CORES * XLP_MAX_THREADS];
102 static int msgring_wakeup_sleep[XLP_MAX_CORES * XLP_MAX_THREADS];
103 static int msgring_wakeup_nosleep[XLP_MAX_CORES * XLP_MAX_THREADS];
104 static int fmn_msgcount[XLP_MAX_CORES * XLP_MAX_THREADS][4];
105 static int fmn_loops[XLP_MAX_CORES * XLP_MAX_THREADS];
107 /* Whether polled driver implementation */
108 static int polled = 0;
110 /* We do only i/o device credit setup here. CPU credit setup is now
111 * moved to xlp_msgring_cpu_init() so that the credits get setup
112 * only if the CPU exists. xlp_msgring_cpu_init() gets called from
113 * platform_init_ap; and this makes it easy for us to setup CMS
114 * credits for various types of XLP chips, with varying number of
118 xlp_cms_credit_setup(int credit)
120 uint64_t cmspcibase, cmsbase, pcibase;
125 for (i = 0; i < XLP_MAX_NODES; i++) {
126 cmspcibase = nlm_get_cms_pcibase(i);
127 if (!nlm_dev_exists(XLP_IO_CMS_OFFSET(i)))
129 cmsbase = nlm_get_cms_regbase(i);
130 maxqid = nlm_read_reg(cmspcibase, XLP_PCI_DEVINFO_REG0);
131 for (dev = 0; dev < 8; dev++) {
132 for (fn = 0; fn < 8; fn++) {
133 devoffset = XLP_HDR_OFFSET(i, 0, dev, fn);
134 if (nlm_dev_exists(devoffset) == 0)
136 pcibase = nlm_pcicfg_base(devoffset);
137 src = nlm_qidstart(pcibase);
141 printf("Setup CMS credits for queues ");
142 printf("[%d to %d] from src %d\n", 0,
145 for (qid = 0; qid < maxqid; qid++)
146 nlm_cms_setup_credits(cmsbase, qid,
154 xlp_msgring_cpu_init(int node, int cpu, int credit)
156 uint64_t cmspcibase = nlm_get_cms_pcibase(node);
157 uint64_t cmsbase = nlm_get_cms_regbase(node);
158 int qid, maxqid, src;
160 maxqid = nlm_read_reg(cmspcibase, XLP_PCI_DEVINFO_REG0);
162 /* cpu credit setup is done only from thread-0 of each core */
164 src = cpu << 2; /* each thread has 4 vc's */
165 for (qid = 0; qid < maxqid; qid++)
166 nlm_cms_setup_credits(cmsbase, qid, src, credit);
171 * Drain out max_messages for the buckets set in the bucket mask.
172 * Use max_msgs = 0 to drain out all messages.
175 xlp_handle_msg_vc(u_int vcmask, int max_msgs)
177 struct nlm_fmn_msg msg;
178 int srcid = 0, size = 0, code = 0;
179 struct tx_stn_handler *he;
180 uint32_t mflags, status;
181 int n_msgs = 0, vc, m, hwtid;
186 /* check if VC empty */
187 mflags = nlm_save_flags_cop2();
188 status = nlm_read_c2_msgstatus1();
189 nlm_restore_flags(mflags);
191 msgmask = ((status >> 24) & 0xf) ^ 0xf;
196 for (vc = 0; vc < 4; vc++) {
197 if ((msgmask & (1 << vc)) == 0)
200 mflags = nlm_save_flags_cop2();
201 status = nlm_fmn_msgrcv(vc, &srcid, &size, &code,
203 nlm_restore_flags(mflags);
204 if (status != 0) /* no msg or error */
206 if (srcid < 0 && srcid >= 1024) {
207 printf("[%s]: bad src id %d\n", __func__,
212 if(he->action != NULL)
213 (he->action)(vc, size, code, srcid, &msg,
217 printf("[%s]: No Handler for msg from stn %d,"
218 " vc=%d, size=%d, msg0=%jx, droppinge\n",
219 __func__, srcid, vc, size,
220 (uintmax_t)msg.msg[0]);
222 fmn_msgcount[hwtid][vc] += 1;
223 m++; /* msgs handled in this iter */
226 break; /* nothing done in this iter */
228 if (max_msgs > 0 && n_msgs >= max_msgs)
236 xlp_discard_msg_vc(u_int vcmask)
238 struct nlm_fmn_msg msg;
239 int srcid = 0, size = 0, code = 0, vc;
240 uint32_t mflags, status;
242 for (vc = 0; vc < 4; vc++) {
244 mflags = nlm_save_flags_cop2();
245 status = nlm_fmn_msgrcv(vc, &srcid,
247 nlm_restore_flags(mflags);
249 /* break if there is no msg or error */
257 xlp_cms_enable_intr(int node, int cpu, int type, int watermark)
262 cmsbase = nlm_get_cms_regbase(node);
264 for (i = 0; i < 4; i++) {
265 qid = (i + (cpu * 4)) & 0x7f;
266 nlm_cms_per_queue_level_intr(cmsbase, qid, type, watermark);
267 nlm_cms_per_queue_timer_intr(cmsbase, qid, 0x1, 0);
272 msgring_process_fast_intr(void *arg)
274 struct msgring_thread *mthd;
279 mthd = &msgring_threads[cpu];
280 msgring_nintr[cpu]++;
283 /* clear pending interrupts */
284 nlm_write_c0_eirr(1ULL << IRQ_MSGRING);
286 /* wake up the target thread */
289 if (TD_AWAITING_INTR(td)) {
290 msgring_wakeup_sleep[cpu]++;
292 sched_add(td, SRQ_INTR);
294 msgring_wakeup_nosleep[cpu]++;
298 return (FILTER_HANDLED);
302 msgring_process(void * arg)
304 volatile struct msgring_thread *mthd;
306 uint32_t mflags, msgstatus1;
309 hwtid = (intptr_t)arg;
310 mthd = &msgring_threads[hwtid];
312 KASSERT(curthread == td,
313 ("%s:msg_ithread and proc linkage out of sync", __func__));
315 /* First bind this thread to the right CPU */
317 sched_bind(td, xlp_hwtid_to_cpuid[hwtid]);
320 if (hwtid != nlm_cpuid())
321 printf("Misscheduled hwtid %d != cpuid %d\n", hwtid,
324 xlp_discard_msg_vc(0xf);
325 xlp_msgring_cpu_init(nlm_nodeid(), nlm_cpuid(), CMS_DEFAULT_CREDIT);
327 mflags = nlm_save_flags_cop2();
328 nlm_fmn_cpu_init(IRQ_MSGRING, 0, 0, 0, 0, 0);
329 nlm_restore_flags(mflags);
330 xlp_cms_enable_intr(nlm_nodeid(), nlm_cpuid(), 0x2, 0);
331 /* clear pending interrupts.
332 * they will get re-raised if still valid */
333 nlm_write_c0_eirr(1ULL << IRQ_MSGRING);
336 /* start processing messages */
338 atomic_store_rel_int(&mthd->needed, 0);
339 nmsgs = xlp_handle_msg_vc(0xf, 0);
343 /* clear VC-pend bits */
344 mflags = nlm_save_flags_cop2();
345 msgstatus1 = nlm_read_c2_msgstatus1();
346 msgstatus1 |= (0xf << 16);
347 nlm_write_c2_msgstatus1(msgstatus1);
348 nlm_restore_flags(mflags);
355 sched_class(td, PRI_ITHD);
357 mi_switch(SW_VOL, NULL);
367 create_msgring_thread(int hwtid)
369 struct msgring_thread *mthd;
373 mthd = &msgring_threads[hwtid];
374 error = kproc_kthread_add(msgring_process, (void *)(uintptr_t)hwtid,
375 &msgring_proc, &td, RFSTOPPED, 2, "msgrngproc",
378 panic("kproc_kthread_add() failed with %d", error);
382 sched_class(td, PRI_ITHD);
383 sched_add(td, SRQ_INTR);
388 register_msgring_handler(int startb, int endb, msgring_handler action,
394 printf("Register handler %d-%d %p(%p)\n",
395 startb, endb, action, arg);
396 KASSERT(startb >= 0 && startb <= endb && endb < MSGRNG_NSTATIONS,
397 ("Invalid value for bucket range %d,%d", startb, endb));
399 mtx_lock_spin(&msgmap_lock);
400 for (i = startb; i <= endb; i++) {
401 KASSERT(msgmap[i].action == NULL,
402 ("Bucket %d already used [action %p]", i, msgmap[i].action));
403 msgmap[i].action = action;
406 mtx_unlock_spin(&msgmap_lock);
411 * Initialize the messaging subsystem.
413 * Message Stations are shared among all threads in a cpu core, this
414 * has to be called once from every core which is online.
417 xlp_msgring_config(void *arg)
420 unsigned int thrmask, mask;
423 /* used polled handler for Ax silion */
424 if (nlm_is_xlp8xx_ax())
427 /* Don't poll on all threads, if polled */
429 xlp_msg_threads_per_core -= 1;
431 mtx_init(&msgmap_lock, "msgring", NULL, MTX_SPIN);
432 if (xlp_threads_per_core < xlp_msg_threads_per_core)
433 xlp_msg_threads_per_core = xlp_threads_per_core;
434 thrmask = ((1 << xlp_msg_threads_per_core) - 1);
436 for (i = 0; i < XLP_MAX_CORES; i++) {
437 mask <<= XLP_MAX_THREADS;
440 xlp_msg_thread_mask = xlp_hw_thread_mask & mask;
442 printf("CMS Message handler thread mask %#jx\n",
443 (uintmax_t)xlp_msg_thread_mask);
445 xlp_cms_credit_setup(CMS_DEFAULT_CREDIT);
446 create_msgring_thread(0);
447 cpu_establish_hardintr("msgring", msgring_process_fast_intr, NULL,
448 NULL, IRQ_MSGRING, INTR_TYPE_NET, &cookie);
452 * Start message ring processing threads on other CPUs, after SMP start
455 start_msgring_threads(void *arg)
459 for (hwt = 1; hwt < XLP_MAX_CORES * XLP_MAX_THREADS; hwt++) {
460 if ((xlp_msg_thread_mask & (1 << hwt)) == 0)
462 create_msgring_thread(hwt);
466 SYSINIT(xlp_msgring_config, SI_SUB_DRIVERS, SI_ORDER_FIRST,
467 xlp_msgring_config, NULL);
468 SYSINIT(start_msgring_threads, SI_SUB_SMP, SI_ORDER_MIDDLE,
469 start_msgring_threads, NULL);
472 * DEBUG support, XXX: static buffer, not locked
475 sys_print_debug(SYSCTL_HANDLER_ARGS)
480 sbuf_new_for_sysctl(&sb, NULL, 64, req);
482 "\nID vc0 vc1 vc2 vc3 loops\n");
483 for (i = 0; i < 32; i++) {
484 if ((xlp_hw_thread_mask & (1 << i)) == 0)
486 sbuf_printf(&sb, "%2d: %8d %8d %8d %8d %8d\n", i,
487 fmn_msgcount[i][0], fmn_msgcount[i][1],
488 fmn_msgcount[i][2], fmn_msgcount[i][3],
491 error = sbuf_finish(&sb);
496 SYSCTL_PROC(_debug, OID_AUTO, msgring, CTLTYPE_STRING | CTLFLAG_RD, 0, 0,
497 sys_print_debug, "A", "msgring debug info");