2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright 2003-2011 Netlogic Microsystems (Netlogic). All rights
7 * Redistribution and use in source and binary forms, with or without
8 * modification, are permitted provided that the following conditions are
11 * 1. Redistributions of source code must retain the above copyright
12 * notice, this list of conditions and the following disclaimer.
13 * 2. Redistributions in binary form must reproduce the above copyright
14 * notice, this list of conditions and the following disclaimer in
15 * the documentation and/or other materials provided with the
18 * THIS SOFTWARE IS PROVIDED BY Netlogic Microsystems ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
21 * PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL NETLOGIC OR CONTRIBUTORS BE
22 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
23 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
24 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
25 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
26 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
27 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF
28 * THE POSSIBILITY OF SUCH DAMAGE.
32 #include <sys/cdefs.h>
33 __FBSDID("$FreeBSD$");
34 #include <sys/types.h>
35 #include <sys/systm.h>
36 #include <sys/param.h>
38 #include <sys/mutex.h>
40 #include <sys/limits.h>
45 #include <sys/kernel.h>
46 #include <sys/kthread.h>
48 #include <sys/resourcevar.h>
49 #include <sys/sched.h>
50 #include <sys/unistd.h>
51 #include <sys/sysctl.h>
52 #include <sys/malloc.h>
54 #include <machine/reg.h>
55 #include <machine/cpu.h>
56 #include <machine/hwfunc.h>
57 #include <machine/mips_opcode.h>
58 #include <machine/intr_machdep.h>
60 #include <mips/nlm/hal/mips-extns.h>
61 #include <mips/nlm/hal/haldefs.h>
62 #include <mips/nlm/hal/iomap.h>
63 #include <mips/nlm/hal/cop2.h>
64 #include <mips/nlm/hal/fmn.h>
65 #include <mips/nlm/hal/pic.h>
67 #include <mips/nlm/msgring.h>
68 #include <mips/nlm/interrupt.h>
69 #include <mips/nlm/xlp.h>
71 #define MSGRNG_NSTATIONS 1024
73 * Keep track of our message ring handler threads, each core has a
74 * different message station. Ideally we will need to start a few
75 * message handling threads every core, and wake them up depending on
78 struct msgring_thread {
79 struct thread *thread; /* msgring handler threads */
80 int needed; /* thread needs to wake up */
82 static struct msgring_thread msgring_threads[XLP_MAX_CORES * XLP_MAX_THREADS];
83 static struct proc *msgring_proc; /* all threads are under a proc */
86 * The device drivers can register a handler for the messages sent
87 * from a station (corresponding to the device).
89 struct tx_stn_handler {
90 msgring_handler action;
93 static struct tx_stn_handler msgmap[MSGRNG_NSTATIONS];
94 static struct mtx msgmap_lock;
95 uint32_t xlp_msg_thread_mask;
96 static int xlp_msg_threads_per_core = XLP_MAX_THREADS;
98 static void create_msgring_thread(int hwtid);
99 static int msgring_process_fast_intr(void *arg);
102 static int msgring_nintr[XLP_MAX_CORES * XLP_MAX_THREADS];
103 static int msgring_wakeup_sleep[XLP_MAX_CORES * XLP_MAX_THREADS];
104 static int msgring_wakeup_nosleep[XLP_MAX_CORES * XLP_MAX_THREADS];
105 static int fmn_msgcount[XLP_MAX_CORES * XLP_MAX_THREADS][4];
106 static int fmn_loops[XLP_MAX_CORES * XLP_MAX_THREADS];
108 /* Whether polled driver implementation */
109 static int polled = 0;
111 /* We do only i/o device credit setup here. CPU credit setup is now
112 * moved to xlp_msgring_cpu_init() so that the credits get setup
113 * only if the CPU exists. xlp_msgring_cpu_init() gets called from
114 * platform_init_ap; and this makes it easy for us to setup CMS
115 * credits for various types of XLP chips, with varying number of
119 xlp_cms_credit_setup(int credit)
121 uint64_t cmspcibase, cmsbase, pcibase;
126 for (i = 0; i < XLP_MAX_NODES; i++) {
127 cmspcibase = nlm_get_cms_pcibase(i);
128 if (!nlm_dev_exists(XLP_IO_CMS_OFFSET(i)))
130 cmsbase = nlm_get_cms_regbase(i);
131 maxqid = nlm_read_reg(cmspcibase, XLP_PCI_DEVINFO_REG0);
132 for (dev = 0; dev < 8; dev++) {
133 for (fn = 0; fn < 8; fn++) {
134 devoffset = XLP_HDR_OFFSET(i, 0, dev, fn);
135 if (nlm_dev_exists(devoffset) == 0)
137 pcibase = nlm_pcicfg_base(devoffset);
138 src = nlm_qidstart(pcibase);
142 printf("Setup CMS credits for queues ");
143 printf("[%d to %d] from src %d\n", 0,
146 for (qid = 0; qid < maxqid; qid++)
147 nlm_cms_setup_credits(cmsbase, qid,
155 xlp_msgring_cpu_init(int node, int cpu, int credit)
157 uint64_t cmspcibase = nlm_get_cms_pcibase(node);
158 uint64_t cmsbase = nlm_get_cms_regbase(node);
159 int qid, maxqid, src;
161 maxqid = nlm_read_reg(cmspcibase, XLP_PCI_DEVINFO_REG0);
163 /* cpu credit setup is done only from thread-0 of each core */
165 src = cpu << 2; /* each thread has 4 vc's */
166 for (qid = 0; qid < maxqid; qid++)
167 nlm_cms_setup_credits(cmsbase, qid, src, credit);
172 * Drain out max_messages for the buckets set in the bucket mask.
173 * Use max_msgs = 0 to drain out all messages.
176 xlp_handle_msg_vc(u_int vcmask, int max_msgs)
178 struct nlm_fmn_msg msg;
179 int srcid = 0, size = 0, code = 0;
180 struct tx_stn_handler *he;
181 uint32_t mflags, status;
182 int n_msgs = 0, vc, m, hwtid;
187 /* check if VC empty */
188 mflags = nlm_save_flags_cop2();
189 status = nlm_read_c2_msgstatus1();
190 nlm_restore_flags(mflags);
192 msgmask = ((status >> 24) & 0xf) ^ 0xf;
197 for (vc = 0; vc < 4; vc++) {
198 if ((msgmask & (1 << vc)) == 0)
201 mflags = nlm_save_flags_cop2();
202 status = nlm_fmn_msgrcv(vc, &srcid, &size, &code,
204 nlm_restore_flags(mflags);
205 if (status != 0) /* no msg or error */
207 if (srcid < 0 || srcid >= 1024) {
208 printf("[%s]: bad src id %d\n", __func__,
213 if(he->action != NULL)
214 (he->action)(vc, size, code, srcid, &msg,
218 printf("[%s]: No Handler for msg from stn %d,"
219 " vc=%d, size=%d, msg0=%jx, droppinge\n",
220 __func__, srcid, vc, size,
221 (uintmax_t)msg.msg[0]);
223 fmn_msgcount[hwtid][vc] += 1;
224 m++; /* msgs handled in this iter */
227 break; /* nothing done in this iter */
229 if (max_msgs > 0 && n_msgs >= max_msgs)
237 xlp_discard_msg_vc(u_int vcmask)
239 struct nlm_fmn_msg msg;
240 int srcid = 0, size = 0, code = 0, vc;
241 uint32_t mflags, status;
243 for (vc = 0; vc < 4; vc++) {
245 mflags = nlm_save_flags_cop2();
246 status = nlm_fmn_msgrcv(vc, &srcid,
248 nlm_restore_flags(mflags);
250 /* break if there is no msg or error */
258 xlp_cms_enable_intr(int node, int cpu, int type, int watermark)
263 cmsbase = nlm_get_cms_regbase(node);
265 for (i = 0; i < 4; i++) {
266 qid = (i + (cpu * 4)) & 0x7f;
267 nlm_cms_per_queue_level_intr(cmsbase, qid, type, watermark);
268 nlm_cms_per_queue_timer_intr(cmsbase, qid, 0x1, 0);
273 msgring_process_fast_intr(void *arg)
275 struct msgring_thread *mthd;
280 mthd = &msgring_threads[cpu];
281 msgring_nintr[cpu]++;
284 /* clear pending interrupts */
285 nlm_write_c0_eirr(1ULL << IRQ_MSGRING);
287 /* wake up the target thread */
290 if (TD_AWAITING_INTR(td)) {
291 msgring_wakeup_sleep[cpu]++;
293 sched_add(td, SRQ_INTR);
296 msgring_wakeup_nosleep[cpu]++;
299 return (FILTER_HANDLED);
303 msgring_process(void * arg)
305 volatile struct msgring_thread *mthd;
307 uint32_t mflags, msgstatus1;
310 hwtid = (intptr_t)arg;
311 mthd = &msgring_threads[hwtid];
313 KASSERT(curthread == td,
314 ("%s:msg_ithread and proc linkage out of sync", __func__));
316 /* First bind this thread to the right CPU */
318 sched_bind(td, xlp_hwtid_to_cpuid[hwtid]);
321 if (hwtid != nlm_cpuid())
322 printf("Misscheduled hwtid %d != cpuid %d\n", hwtid,
325 xlp_discard_msg_vc(0xf);
326 xlp_msgring_cpu_init(nlm_nodeid(), nlm_cpuid(), CMS_DEFAULT_CREDIT);
328 mflags = nlm_save_flags_cop2();
329 nlm_fmn_cpu_init(IRQ_MSGRING, 0, 0, 0, 0, 0);
330 nlm_restore_flags(mflags);
331 xlp_cms_enable_intr(nlm_nodeid(), nlm_cpuid(), 0x2, 0);
332 /* clear pending interrupts.
333 * they will get re-raised if still valid */
334 nlm_write_c0_eirr(1ULL << IRQ_MSGRING);
337 /* start processing messages */
339 atomic_store_rel_int(&mthd->needed, 0);
340 nmsgs = xlp_handle_msg_vc(0xf, 0);
344 /* clear VC-pend bits */
345 mflags = nlm_save_flags_cop2();
346 msgstatus1 = nlm_read_c2_msgstatus1();
347 msgstatus1 |= (0xf << 16);
348 nlm_write_c2_msgstatus1(msgstatus1);
349 nlm_restore_flags(mflags);
356 sched_class(td, PRI_ITHD);
367 create_msgring_thread(int hwtid)
369 struct msgring_thread *mthd;
373 mthd = &msgring_threads[hwtid];
374 error = kproc_kthread_add(msgring_process, (void *)(uintptr_t)hwtid,
375 &msgring_proc, &td, RFSTOPPED, 2, "msgrngproc",
378 panic("kproc_kthread_add() failed with %d", error);
382 sched_class(td, PRI_ITHD);
383 sched_add(td, SRQ_INTR);
387 register_msgring_handler(int startb, int endb, msgring_handler action,
393 printf("Register handler %d-%d %p(%p)\n",
394 startb, endb, action, arg);
395 KASSERT(startb >= 0 && startb <= endb && endb < MSGRNG_NSTATIONS,
396 ("Invalid value for bucket range %d,%d", startb, endb));
398 mtx_lock_spin(&msgmap_lock);
399 for (i = startb; i <= endb; i++) {
400 KASSERT(msgmap[i].action == NULL,
401 ("Bucket %d already used [action %p]", i, msgmap[i].action));
402 msgmap[i].action = action;
405 mtx_unlock_spin(&msgmap_lock);
410 * Initialize the messaging subsystem.
412 * Message Stations are shared among all threads in a cpu core, this
413 * has to be called once from every core which is online.
416 xlp_msgring_config(void *arg)
419 unsigned int thrmask, mask;
422 /* used polled handler for Ax silion */
423 if (nlm_is_xlp8xx_ax())
426 /* Don't poll on all threads, if polled */
428 xlp_msg_threads_per_core -= 1;
430 mtx_init(&msgmap_lock, "msgring", NULL, MTX_SPIN);
431 if (xlp_threads_per_core < xlp_msg_threads_per_core)
432 xlp_msg_threads_per_core = xlp_threads_per_core;
433 thrmask = ((1 << xlp_msg_threads_per_core) - 1);
435 for (i = 0; i < XLP_MAX_CORES; i++) {
436 mask <<= XLP_MAX_THREADS;
439 xlp_msg_thread_mask = xlp_hw_thread_mask & mask;
441 printf("CMS Message handler thread mask %#jx\n",
442 (uintmax_t)xlp_msg_thread_mask);
444 xlp_cms_credit_setup(CMS_DEFAULT_CREDIT);
445 create_msgring_thread(0);
446 cpu_establish_hardintr("msgring", msgring_process_fast_intr, NULL,
447 NULL, IRQ_MSGRING, INTR_TYPE_NET, &cookie);
451 * Start message ring processing threads on other CPUs, after SMP start
454 start_msgring_threads(void *arg)
458 for (hwt = 1; hwt < XLP_MAX_CORES * XLP_MAX_THREADS; hwt++) {
459 if ((xlp_msg_thread_mask & (1 << hwt)) == 0)
461 create_msgring_thread(hwt);
465 SYSINIT(xlp_msgring_config, SI_SUB_DRIVERS, SI_ORDER_FIRST,
466 xlp_msgring_config, NULL);
467 SYSINIT(start_msgring_threads, SI_SUB_SMP, SI_ORDER_MIDDLE,
468 start_msgring_threads, NULL);
471 * DEBUG support, XXX: static buffer, not locked
474 sys_print_debug(SYSCTL_HANDLER_ARGS)
479 sbuf_new_for_sysctl(&sb, NULL, 64, req);
481 "\nID vc0 vc1 vc2 vc3 loops\n");
482 for (i = 0; i < 32; i++) {
483 if ((xlp_hw_thread_mask & (1 << i)) == 0)
485 sbuf_printf(&sb, "%2d: %8d %8d %8d %8d %8d\n", i,
486 fmn_msgcount[i][0], fmn_msgcount[i][1],
487 fmn_msgcount[i][2], fmn_msgcount[i][3],
490 error = sbuf_finish(&sb);
495 SYSCTL_PROC(_debug, OID_AUTO, msgring,
496 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_NEEDGIANT, 0, 0,
497 sys_print_debug, "A",
498 "msgring debug info");