2 * This file is provided under a dual BSD/GPLv2 license. When using or
3 * redistributing this file, you may do so under either license.
7 * Copyright (C) 2019 Advanced Micro Devices, Inc.
9 * This program is free software; you can redistribute it and/or modify
10 * it under the terms of version 2 of the GNU General Public License as
11 * published by the Free Software Foundation.
15 * Copyright (c) 2019 Advanced Micro Devices, Inc.
17 * Redistribution and use in source and binary forms, with or without
18 * modification, are permitted provided that the following conditions
20 * 1. Redistributions of source code must retain the above copyright
21 * notice, this list of conditions and the following disclaimer.
22 * 2. Redistributions in binary form must reproduce the above copyright
23 * notice, this list of conditions and the following disclaimer in the
24 * documentation and/or other materials provided with the distribution.
25 * 3. Neither the name of AMD corporation nor the names of its
26 * contributors may be used to endorse or promote products derived
27 * from this software without specific prior written permission.
29 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
30 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
31 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
32 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
33 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
34 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
35 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
36 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
37 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
38 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
41 * Contact Information :
42 * Rajesh Kumar <rajesh1.kumar@amd.com>
46 * The Non-Transparent Bridge (NTB) is a device that allows you to connect
47 * two or more systems using a PCI-e links, providing remote memory access.
49 * This module contains a driver for NTB hardware in AMD CPUs
51 * Much of the code in this module is shared with Linux. Any patches may
52 * be picked up and redistributed in Linux with a dual GPL/BSD license.
55 #include <sys/cdefs.h>
56 __FBSDID("$FreeBSD$");
58 #include <sys/param.h>
59 #include <sys/kernel.h>
60 #include <sys/systm.h>
63 #include <sys/malloc.h>
64 #include <sys/module.h>
65 #include <sys/mutex.h>
68 #include <sys/sysctl.h>
73 #include <machine/bus.h>
75 #include <dev/pci/pcireg.h>
76 #include <dev/pci/pcivar.h>
78 #include "ntb_hw_amd.h"
79 #include "dev/ntb/ntb.h"
81 MALLOC_DEFINE(M_AMD_NTB, "amd_ntb_hw", "amd_ntb_hw driver memory allocations");
83 struct pci_device_table amd_ntb_devs[] = {
84 { PCI_DEV(NTB_HW_AMD_VENDOR_ID, NTB_HW_AMD_DEVICE_ID),
85 PCI_DESCR("AMD Non-Transparent Bridge") }
88 static unsigned g_amd_ntb_hw_debug_level;
89 SYSCTL_UINT(_hw_ntb, OID_AUTO, debug_level, CTLFLAG_RWTUN,
90 &g_amd_ntb_hw_debug_level, 0, "amd_ntb_hw log level -- higher is verbose");
92 #define amd_ntb_printf(lvl, ...) do { \
93 if (lvl <= g_amd_ntb_hw_debug_level) \
94 device_printf(ntb->device, __VA_ARGS__); \
98 static __inline uint64_t
99 bus_space_read_8(bus_space_tag_t tag, bus_space_handle_t handle,
103 return (bus_space_read_4(tag, handle, offset) |
104 ((uint64_t)bus_space_read_4(tag, handle, offset + 4)) << 32);
108 bus_space_write_8(bus_space_tag_t tag, bus_space_handle_t handle,
109 bus_size_t offset, uint64_t val)
112 bus_space_write_4(tag, handle, offset, val);
113 bus_space_write_4(tag, handle, offset + 4, val >> 32);
118 * AMD NTB INTERFACE ROUTINES
121 amd_ntb_port_number(device_t dev)
123 struct amd_ntb_softc *ntb = device_get_softc(dev);
125 amd_ntb_printf(1, "%s: conn_type %d\n", __func__, ntb->conn_type);
127 switch (ntb->conn_type) {
129 return (NTB_PORT_PRI_USD);
131 return (NTB_PORT_SEC_DSD);
140 amd_ntb_peer_port_count(device_t dev)
142 struct amd_ntb_softc *ntb = device_get_softc(dev);
144 amd_ntb_printf(1, "%s: peer cnt %d\n", __func__, NTB_DEF_PEER_CNT);
145 return (NTB_DEF_PEER_CNT);
149 amd_ntb_peer_port_number(device_t dev, int pidx)
151 struct amd_ntb_softc *ntb = device_get_softc(dev);
153 amd_ntb_printf(1, "%s: pidx %d conn type %d\n",
154 __func__, pidx, ntb->conn_type);
156 if (pidx != NTB_DEF_PEER_IDX)
159 switch (ntb->conn_type) {
161 return (NTB_PORT_SEC_DSD);
163 return (NTB_PORT_PRI_USD);
172 amd_ntb_peer_port_idx(device_t dev, int port)
174 struct amd_ntb_softc *ntb = device_get_softc(dev);
177 peer_port = amd_ntb_peer_port_number(dev, NTB_DEF_PEER_IDX);
179 amd_ntb_printf(1, "%s: port %d peer_port %d\n",
180 __func__, port, peer_port);
182 if (peer_port == -EINVAL || port != peer_port)
189 * AMD NTB INTERFACE - LINK ROUTINES
192 amd_link_is_up(struct amd_ntb_softc *ntb)
195 amd_ntb_printf(2, "%s: peer_sta 0x%x cntl_sta 0x%x\n",
196 __func__, ntb->peer_sta, ntb->cntl_sta);
199 return (NTB_LNK_STA_ACTIVE(ntb->cntl_sta));
204 static inline enum ntb_speed
205 amd_ntb_link_sta_speed(struct amd_ntb_softc *ntb)
208 if (!amd_link_is_up(ntb))
209 return (NTB_SPEED_NONE);
211 return (NTB_LNK_STA_SPEED(ntb->lnk_sta));
214 static inline enum ntb_width
215 amd_ntb_link_sta_width(struct amd_ntb_softc *ntb)
218 if (!amd_link_is_up(ntb))
219 return (NTB_WIDTH_NONE);
221 return (NTB_LNK_STA_WIDTH(ntb->lnk_sta));
225 amd_ntb_link_is_up(device_t dev, enum ntb_speed *speed, enum ntb_width *width)
227 struct amd_ntb_softc *ntb = device_get_softc(dev);
230 *speed = amd_ntb_link_sta_speed(ntb);
232 *width = amd_ntb_link_sta_width(ntb);
234 return (amd_link_is_up(ntb));
238 amd_ntb_link_enable(device_t dev, enum ntb_speed max_speed,
239 enum ntb_width max_width)
241 struct amd_ntb_softc *ntb = device_get_softc(dev);
244 amd_ntb_printf(1, "%s: int_mask 0x%x conn_type %d\n",
245 __func__, ntb->int_mask, ntb->conn_type);
247 amd_init_side_info(ntb);
249 /* Enable event interrupt */
250 ntb->int_mask &= ~AMD_EVENT_INTMASK;
251 amd_ntb_reg_write(4, AMD_INTMASK_OFFSET, ntb->int_mask);
253 if (ntb->conn_type == NTB_CONN_SEC)
256 amd_ntb_printf(0, "%s: Enabling Link.\n", __func__);
258 ntb_ctl = amd_ntb_reg_read(4, AMD_CNTL_OFFSET);
259 ntb_ctl |= (PMM_REG_CTL | SMM_REG_CTL);
260 amd_ntb_printf(1, "%s: ntb_ctl 0x%x\n", __func__, ntb_ctl);
261 amd_ntb_reg_write(4, AMD_CNTL_OFFSET, ntb_ctl);
267 amd_ntb_link_disable(device_t dev)
269 struct amd_ntb_softc *ntb = device_get_softc(dev);
272 amd_ntb_printf(1, "%s: int_mask 0x%x conn_type %d\n",
273 __func__, ntb->int_mask, ntb->conn_type);
275 amd_deinit_side_info(ntb);
277 /* Disable event interrupt */
278 ntb->int_mask |= AMD_EVENT_INTMASK;
279 amd_ntb_reg_write(4, AMD_INTMASK_OFFSET, ntb->int_mask);
281 if (ntb->conn_type == NTB_CONN_SEC)
284 amd_ntb_printf(0, "%s: Disabling Link.\n", __func__);
286 ntb_ctl = amd_ntb_reg_read(4, AMD_CNTL_OFFSET);
287 ntb_ctl &= ~(PMM_REG_CTL | SMM_REG_CTL);
288 amd_ntb_printf(1, "%s: ntb_ctl 0x%x\n", __func__, ntb_ctl);
289 amd_ntb_reg_write(4, AMD_CNTL_OFFSET, ntb_ctl);
295 * AMD NTB memory window routines
298 amd_ntb_mw_count(device_t dev)
300 struct amd_ntb_softc *ntb = device_get_softc(dev);
302 return (ntb->mw_count);
306 amd_ntb_mw_get_range(device_t dev, unsigned mw_idx, vm_paddr_t *base,
307 caddr_t *vbase, size_t *size, size_t *align, size_t *align_size,
310 struct amd_ntb_softc *ntb = device_get_softc(dev);
311 struct amd_ntb_pci_bar_info *bar_info;
313 if (mw_idx < 0 || mw_idx >= ntb->mw_count)
316 bar_info = &ntb->bar_info[mw_idx+1];
319 *base = bar_info->pbase;
322 *vbase = bar_info->vbase;
325 *align = bar_info->size;
328 *size = bar_info->size;
330 if (align_size != NULL)
333 if (plimit != NULL) {
335 *plimit = BUS_SPACE_MAXADDR;
337 *plimit = BUS_SPACE_MAXADDR_32BIT;
344 amd_ntb_mw_set_trans(device_t dev, unsigned mw_idx, bus_addr_t addr, size_t size)
346 struct amd_ntb_softc *ntb = device_get_softc(dev);
347 struct amd_ntb_pci_bar_info *bar_info;
349 if (mw_idx < 0 || mw_idx >= ntb->mw_count)
352 bar_info = &ntb->bar_info[mw_idx+1];
354 /* make sure the range fits in the usable mw size */
355 if (size > bar_info->size) {
356 amd_ntb_printf(0, "%s: size 0x%x greater than mw_size 0x%x\n",
357 __func__, (uint32_t)size, (uint32_t)bar_info->size);
361 amd_ntb_printf(1, "%s: mw %d mw_size 0x%x size 0x%x base %p\n",
362 __func__, mw_idx, (uint32_t)bar_info->size,
363 (uint32_t)size, (void *)bar_info->pci_bus_handle);
366 * AMD NTB XLAT and Limit registers needs to be written only after
369 * set and verify setting the translation address
371 amd_ntb_peer_reg_write(8, bar_info->xlat_off, (uint64_t)addr);
372 amd_ntb_printf(0, "%s: mw %d xlat_off 0x%x cur_val 0x%jx addr %p\n",
373 __func__, mw_idx, bar_info->xlat_off,
374 amd_ntb_peer_reg_read(8, bar_info->xlat_off), (void *)addr);
376 /* set and verify setting the limit */
378 amd_ntb_reg_write(8, bar_info->limit_off, (uint64_t)size);
379 amd_ntb_printf(1, "%s: limit_off 0x%x cur_val 0x%jx limit 0x%x\n",
380 __func__, bar_info->limit_off,
381 amd_ntb_peer_reg_read(8, bar_info->limit_off), (uint32_t)size);
383 amd_ntb_reg_write(4, bar_info->limit_off, (uint64_t)size);
384 amd_ntb_printf(1, "%s: limit_off 0x%x cur_val 0x%x limit 0x%x\n",
385 __func__, bar_info->limit_off,
386 amd_ntb_peer_reg_read(4, bar_info->limit_off), (uint32_t)size);
393 amd_ntb_mw_clear_trans(device_t dev, unsigned mw_idx)
395 struct amd_ntb_softc *ntb = device_get_softc(dev);
397 amd_ntb_printf(1, "%s: mw_idx %d\n", __func__, mw_idx);
399 if (mw_idx < 0 || mw_idx >= ntb->mw_count)
402 return (amd_ntb_mw_set_trans(dev, mw_idx, 0, 0));
406 amd_ntb_mw_set_wc(device_t dev, unsigned int mw_idx, vm_memattr_t mode)
408 struct amd_ntb_softc *ntb = device_get_softc(dev);
409 struct amd_ntb_pci_bar_info *bar_info;
412 if (mw_idx < 0 || mw_idx >= ntb->mw_count)
415 bar_info = &ntb->bar_info[mw_idx+1];
416 if (mode == bar_info->map_mode)
419 rc = pmap_change_attr((vm_offset_t)bar_info->vbase, bar_info->size, mode);
421 bar_info->map_mode = mode;
427 amd_ntb_mw_get_wc(device_t dev, unsigned mw_idx, vm_memattr_t *mode)
429 struct amd_ntb_softc *ntb = device_get_softc(dev);
430 struct amd_ntb_pci_bar_info *bar_info;
432 amd_ntb_printf(1, "%s: mw_idx %d\n", __func__, mw_idx);
434 if (mw_idx < 0 || mw_idx >= ntb->mw_count)
437 bar_info = &ntb->bar_info[mw_idx+1];
438 *mode = bar_info->map_mode;
444 * AMD NTB doorbell routines
447 amd_ntb_db_vector_count(device_t dev)
449 struct amd_ntb_softc *ntb = device_get_softc(dev);
451 amd_ntb_printf(1, "%s: db_count 0x%x\n", __func__, ntb->db_count);
453 return (ntb->db_count);
457 amd_ntb_db_valid_mask(device_t dev)
459 struct amd_ntb_softc *ntb = device_get_softc(dev);
461 amd_ntb_printf(1, "%s: db_valid_mask 0x%x\n",
462 __func__, ntb->db_valid_mask);
464 return (ntb->db_valid_mask);
468 amd_ntb_db_vector_mask(device_t dev, uint32_t vector)
470 struct amd_ntb_softc *ntb = device_get_softc(dev);
472 amd_ntb_printf(1, "%s: vector %d db_count 0x%x db_valid_mask 0x%x\n",
473 __func__, vector, ntb->db_count, ntb->db_valid_mask);
475 if (vector < 0 || vector >= ntb->db_count)
478 return (ntb->db_valid_mask & (1 << vector));
482 amd_ntb_db_read(device_t dev)
484 struct amd_ntb_softc *ntb = device_get_softc(dev);
487 dbstat_off = (uint64_t)amd_ntb_reg_read(2, AMD_DBSTAT_OFFSET);
489 amd_ntb_printf(1, "%s: dbstat_off 0x%jx\n", __func__, dbstat_off);
495 amd_ntb_db_clear(device_t dev, uint64_t db_bits)
497 struct amd_ntb_softc *ntb = device_get_softc(dev);
499 amd_ntb_printf(1, "%s: db_bits 0x%jx\n", __func__, db_bits);
500 amd_ntb_reg_write(2, AMD_DBSTAT_OFFSET, (uint16_t)db_bits);
504 amd_ntb_db_set_mask(device_t dev, uint64_t db_bits)
506 struct amd_ntb_softc *ntb = device_get_softc(dev);
509 amd_ntb_printf(1, "%s: db_mask 0x%x db_bits 0x%jx\n",
510 __func__, ntb->db_mask, db_bits);
512 ntb->db_mask |= db_bits;
513 amd_ntb_reg_write(2, AMD_DBMASK_OFFSET, ntb->db_mask);
518 amd_ntb_db_clear_mask(device_t dev, uint64_t db_bits)
520 struct amd_ntb_softc *ntb = device_get_softc(dev);
523 amd_ntb_printf(1, "%s: db_mask 0x%x db_bits 0x%jx\n",
524 __func__, ntb->db_mask, db_bits);
526 ntb->db_mask &= ~db_bits;
527 amd_ntb_reg_write(2, AMD_DBMASK_OFFSET, ntb->db_mask);
532 amd_ntb_peer_db_set(device_t dev, uint64_t db_bits)
534 struct amd_ntb_softc *ntb = device_get_softc(dev);
536 amd_ntb_printf(1, "%s: db_bits 0x%jx\n", __func__, db_bits);
537 amd_ntb_reg_write(2, AMD_DBREQ_OFFSET, (uint16_t)db_bits);
541 * AMD NTB scratchpad routines
544 amd_ntb_spad_count(device_t dev)
546 struct amd_ntb_softc *ntb = device_get_softc(dev);
548 amd_ntb_printf(1, "%s: spad_count 0x%x\n", __func__, ntb->spad_count);
550 return (ntb->spad_count);
554 amd_ntb_spad_read(device_t dev, unsigned int idx, uint32_t *val)
556 struct amd_ntb_softc *ntb = device_get_softc(dev);
559 amd_ntb_printf(2, "%s: idx %d\n", __func__, idx);
561 if (idx < 0 || idx >= ntb->spad_count)
564 offset = ntb->self_spad + (idx << 2);
565 *val = amd_ntb_reg_read(4, AMD_SPAD_OFFSET + offset);
566 amd_ntb_printf(2, "%s: offset 0x%x val 0x%x\n", __func__, offset, *val);
572 amd_ntb_spad_write(device_t dev, unsigned int idx, uint32_t val)
574 struct amd_ntb_softc *ntb = device_get_softc(dev);
577 amd_ntb_printf(2, "%s: idx %d\n", __func__, idx);
579 if (idx < 0 || idx >= ntb->spad_count)
582 offset = ntb->self_spad + (idx << 2);
583 amd_ntb_reg_write(4, AMD_SPAD_OFFSET + offset, val);
584 amd_ntb_printf(2, "%s: offset 0x%x val 0x%x\n", __func__, offset, val);
590 amd_ntb_spad_clear(struct amd_ntb_softc *ntb)
594 for (i = 0; i < ntb->spad_count; i++)
595 amd_ntb_spad_write(ntb->device, i, 0);
599 amd_ntb_peer_spad_read(device_t dev, unsigned int idx, uint32_t *val)
601 struct amd_ntb_softc *ntb = device_get_softc(dev);
604 amd_ntb_printf(2, "%s: idx %d\n", __func__, idx);
606 if (idx < 0 || idx >= ntb->spad_count)
609 offset = ntb->peer_spad + (idx << 2);
610 *val = amd_ntb_reg_read(4, AMD_SPAD_OFFSET + offset);
611 amd_ntb_printf(2, "%s: offset 0x%x val 0x%x\n", __func__, offset, *val);
617 amd_ntb_peer_spad_write(device_t dev, unsigned int idx, uint32_t val)
619 struct amd_ntb_softc *ntb = device_get_softc(dev);
622 amd_ntb_printf(2, "%s: idx %d\n", __func__, idx);
624 if (idx < 0 || idx >= ntb->spad_count)
627 offset = ntb->peer_spad + (idx << 2);
628 amd_ntb_reg_write(4, AMD_SPAD_OFFSET + offset, val);
629 amd_ntb_printf(2, "%s: offset 0x%x val 0x%x\n", __func__, offset, val);
639 amd_ntb_hw_info_handler(SYSCTL_HANDLER_ARGS)
641 struct amd_ntb_softc* ntb = arg1;
645 sb = sbuf_new_for_sysctl(NULL, NULL, 4096, req);
647 return (sb->s_error);
649 sbuf_printf(sb, "NTB AMD Hardware info:\n\n");
650 sbuf_printf(sb, "AMD NTB side: %s\n",
651 (ntb->conn_type == NTB_CONN_PRI)? "PRIMARY" : "SECONDARY");
652 sbuf_printf(sb, "AMD LNK STA: 0x%#06x\n", ntb->lnk_sta);
654 if (!amd_link_is_up(ntb))
655 sbuf_printf(sb, "AMD Link Status: Down\n");
657 sbuf_printf(sb, "AMD Link Status: Up\n");
658 sbuf_printf(sb, "AMD Link Speed: PCI-E Gen %u\n",
659 NTB_LNK_STA_SPEED(ntb->lnk_sta));
660 sbuf_printf(sb, "AMD Link Width: PCI-E Width %u\n",
661 NTB_LNK_STA_WIDTH(ntb->lnk_sta));
664 sbuf_printf(sb, "AMD Memory window count: %d\n",
666 sbuf_printf(sb, "AMD Spad count: %d\n",
668 sbuf_printf(sb, "AMD Doorbell count: %d\n",
670 sbuf_printf(sb, "AMD MSI-X vec count: %d\n\n",
671 ntb->msix_vec_count);
672 sbuf_printf(sb, "AMD Doorbell valid mask: 0x%x\n",
674 sbuf_printf(sb, "AMD Doorbell Mask: 0x%x\n",
675 amd_ntb_reg_read(4, AMD_DBMASK_OFFSET));
676 sbuf_printf(sb, "AMD Doorbell: 0x%x\n",
677 amd_ntb_reg_read(4, AMD_DBSTAT_OFFSET));
678 sbuf_printf(sb, "AMD NTB Incoming XLAT: \n");
679 sbuf_printf(sb, "AMD XLAT1: 0x%jx\n",
680 amd_ntb_peer_reg_read(8, AMD_BAR1XLAT_OFFSET));
681 sbuf_printf(sb, "AMD XLAT23: 0x%jx\n",
682 amd_ntb_peer_reg_read(8, AMD_BAR23XLAT_OFFSET));
683 sbuf_printf(sb, "AMD XLAT45: 0x%jx\n",
684 amd_ntb_peer_reg_read(8, AMD_BAR45XLAT_OFFSET));
685 sbuf_printf(sb, "AMD LMT1: 0x%x\n",
686 amd_ntb_reg_read(4, AMD_BAR1LMT_OFFSET));
687 sbuf_printf(sb, "AMD LMT23: 0x%jx\n",
688 amd_ntb_reg_read(8, AMD_BAR23LMT_OFFSET));
689 sbuf_printf(sb, "AMD LMT45: 0x%jx\n",
690 amd_ntb_reg_read(8, AMD_BAR45LMT_OFFSET));
692 rc = sbuf_finish(sb);
698 amd_ntb_sysctl_init(struct amd_ntb_softc *ntb)
700 struct sysctl_oid_list *globals;
701 struct sysctl_ctx_list *ctx;
703 ctx = device_get_sysctl_ctx(ntb->device);
704 globals = SYSCTL_CHILDREN(device_get_sysctl_tree(ntb->device));
706 SYSCTL_ADD_PROC(ctx, globals, OID_AUTO, "info",
707 CTLTYPE_STRING | CTLFLAG_RD | CTLFLAG_MPSAFE, ntb, 0,
708 amd_ntb_hw_info_handler, "A", "AMD NTB HW Information");
712 * Polls the HW link status register(s); returns true if something has changed.
715 amd_ntb_poll_link(struct amd_ntb_softc *ntb)
717 uint32_t fullreg, reg, stat;
719 fullreg = amd_ntb_peer_reg_read(4, AMD_SIDEINFO_OFFSET);
720 reg = fullreg & NTB_LIN_STA_ACTIVE_BIT;
722 if (reg == ntb->cntl_sta)
725 amd_ntb_printf(0, "%s: SIDEINFO reg_val = 0x%x cntl_sta 0x%x\n",
726 __func__, fullreg, ntb->cntl_sta);
730 stat = pci_read_config(ntb->device, AMD_LINK_STATUS_OFFSET, 4);
732 amd_ntb_printf(0, "%s: LINK_STATUS stat = 0x%x lnk_sta 0x%x.\n",
733 __func__, stat, ntb->lnk_sta);
741 amd_link_hb(void *arg)
743 struct amd_ntb_softc *ntb = arg;
745 if (amd_ntb_poll_link(ntb))
746 ntb_link_event(ntb->device);
748 if (!amd_link_is_up(ntb)) {
749 callout_reset(&ntb->hb_timer, AMD_LINK_HB_TIMEOUT,
752 callout_reset(&ntb->hb_timer, (AMD_LINK_HB_TIMEOUT * 10),
758 amd_ntb_interrupt(struct amd_ntb_softc *ntb, uint16_t vec)
760 if (vec < AMD_DB_CNT)
761 ntb_db_event(ntb->device, vec);
763 amd_ntb_printf(0, "Invalid vector %d\n", vec);
767 amd_ntb_vec_isr(void *arg)
769 struct amd_ntb_vec *nvec = arg;
771 amd_ntb_interrupt(nvec->ntb, nvec->num);
775 amd_ntb_irq_isr(void *arg)
777 /* If we couldn't set up MSI-X, we only have the one vector. */
778 amd_ntb_interrupt(arg, 0);
782 amd_init_side_info(struct amd_ntb_softc *ntb)
786 reg = amd_ntb_reg_read(4, AMD_SIDEINFO_OFFSET);
787 if (!(reg & AMD_SIDE_READY)) {
788 reg |= AMD_SIDE_READY;
789 amd_ntb_reg_write(4, AMD_SIDEINFO_OFFSET, reg);
791 reg = amd_ntb_reg_read(4, AMD_SIDEINFO_OFFSET);
795 amd_deinit_side_info(struct amd_ntb_softc *ntb)
799 reg = amd_ntb_reg_read(4, AMD_SIDEINFO_OFFSET);
800 if (reg & AMD_SIDE_READY) {
801 reg &= ~AMD_SIDE_READY;
802 amd_ntb_reg_write(4, AMD_SIDEINFO_OFFSET, reg);
803 amd_ntb_reg_read(4, AMD_SIDEINFO_OFFSET);
808 amd_ntb_setup_isr(struct amd_ntb_softc *ntb, uint16_t num_vectors, bool msi,
812 int flags = 0, rc = 0;
816 flags |= RF_SHAREABLE;
818 for (i = 0; i < num_vectors; i++) {
820 /* RID should be 0 for intx */
822 ntb->int_info[i].rid = i;
824 ntb->int_info[i].rid = i + 1;
826 ntb->int_info[i].res = bus_alloc_resource_any(ntb->device,
827 SYS_RES_IRQ, &ntb->int_info[i].rid, flags);
828 if (ntb->int_info[i].res == NULL) {
829 amd_ntb_printf(0, "bus_alloc_resource IRQ failed\n");
833 ntb->int_info[i].tag = NULL;
834 ntb->allocated_interrupts++;
837 rc = bus_setup_intr(ntb->device, ntb->int_info[i].res,
838 INTR_MPSAFE | INTR_TYPE_MISC, NULL, amd_ntb_irq_isr,
839 ntb, &ntb->int_info[i].tag);
841 rc = bus_setup_intr(ntb->device, ntb->int_info[i].res,
842 INTR_MPSAFE | INTR_TYPE_MISC, NULL, amd_ntb_vec_isr,
843 &ntb->msix_vec[i], &ntb->int_info[i].tag);
847 amd_ntb_printf(0, "bus_setup_intr %d failed\n", i);
856 amd_ntb_create_msix_vec(struct amd_ntb_softc *ntb, uint32_t max_vectors)
860 ntb->msix_vec = malloc(max_vectors * sizeof(*ntb->msix_vec), M_AMD_NTB,
863 for (i = 0; i < max_vectors; i++) {
864 ntb->msix_vec[i].num = i;
865 ntb->msix_vec[i].ntb = ntb;
872 amd_ntb_free_msix_vec(struct amd_ntb_softc *ntb)
874 if (ntb->msix_vec_count) {
875 pci_release_msi(ntb->device);
876 ntb->msix_vec_count = 0;
879 if (ntb->msix_vec != NULL) {
880 free(ntb->msix_vec, M_AMD_NTB);
881 ntb->msix_vec = NULL;
886 amd_ntb_init_isr(struct amd_ntb_softc *ntb)
888 uint32_t supported_vectors, num_vectors;
889 bool msi = false, intx = false;
892 ntb->db_mask = ntb->db_valid_mask;
894 rc = amd_ntb_create_msix_vec(ntb, AMD_MSIX_VECTOR_CNT);
896 amd_ntb_printf(0, "Error creating msix vectors: %d\n", rc);
901 * Check the number of MSI-X message supported by the device.
902 * Minimum necessary MSI-X message count should be equal to db_count
904 supported_vectors = pci_msix_count(ntb->device);
905 num_vectors = MIN(supported_vectors, ntb->db_count);
906 if (num_vectors < ntb->db_count) {
907 amd_ntb_printf(0, "No minimum msix: supported %d db %d\n",
908 supported_vectors, ntb->db_count);
910 goto err_msix_enable;
913 /* Allocate the necessary number of MSI-x messages */
914 rc = pci_alloc_msix(ntb->device, &num_vectors);
916 amd_ntb_printf(0, "Error allocating msix vectors: %d\n", rc);
918 goto err_msix_enable;
921 if (num_vectors < ntb->db_count) {
922 amd_ntb_printf(0, "Allocated only %d MSI-X\n", num_vectors);
925 * Else set ntb->db_count = ntb->msix_vec_count = num_vectors,
926 * msi=false and dont release msi
933 free(ntb->msix_vec, M_AMD_NTB);
934 ntb->msix_vec = NULL;
935 pci_release_msi(ntb->device);
937 rc = pci_alloc_msi(ntb->device, &num_vectors);
939 amd_ntb_printf(0, "Error allocating msix vectors: %d\n", rc);
945 ntb->db_count = ntb->msix_vec_count = num_vectors;
950 ntb->msix_vec_count = 0;
953 amd_ntb_printf(0, "%s: db %d msix %d msi %d intx %d\n",
954 __func__, ntb->db_count, ntb->msix_vec_count, (int)msi, (int)intx);
956 rc = amd_ntb_setup_isr(ntb, num_vectors, msi, intx);
958 amd_ntb_printf(0, "Error setting up isr: %d\n", rc);
959 amd_ntb_free_msix_vec(ntb);
966 amd_ntb_deinit_isr(struct amd_ntb_softc *ntb)
968 struct amd_ntb_int_info *current_int;
971 /* Mask all doorbell interrupts */
972 ntb->db_mask = ntb->db_valid_mask;
973 amd_ntb_reg_write(4, AMD_DBMASK_OFFSET, ntb->db_mask);
975 for (i = 0; i < ntb->allocated_interrupts; i++) {
976 current_int = &ntb->int_info[i];
977 if (current_int->tag != NULL)
978 bus_teardown_intr(ntb->device, current_int->res,
981 if (current_int->res != NULL)
982 bus_release_resource(ntb->device, SYS_RES_IRQ,
983 rman_get_rid(current_int->res), current_int->res);
986 amd_ntb_free_msix_vec(ntb);
989 static enum amd_ntb_conn_type
990 amd_ntb_get_topo(struct amd_ntb_softc *ntb)
994 info = amd_ntb_reg_read(4, AMD_SIDEINFO_OFFSET);
996 if (info & AMD_SIDE_MASK)
997 return (NTB_CONN_SEC);
999 return (NTB_CONN_PRI);
1003 amd_ntb_init_dev(struct amd_ntb_softc *ntb)
1005 ntb->mw_count = AMD_MW_CNT;
1006 ntb->spad_count = AMD_SPADS_CNT;
1007 ntb->db_count = AMD_DB_CNT;
1008 ntb->db_valid_mask = (1ull << ntb->db_count) - 1;
1009 mtx_init(&ntb->db_mask_lock, "amd ntb db bits", NULL, MTX_SPIN);
1011 switch (ntb->conn_type) {
1014 ntb->spad_count >>= 1;
1016 if (ntb->conn_type == NTB_CONN_PRI) {
1018 ntb->peer_spad = 0x20;
1020 ntb->self_spad = 0x20;
1024 callout_init(&ntb->hb_timer, 1);
1025 callout_reset(&ntb->hb_timer, AMD_LINK_HB_TIMEOUT,
1031 amd_ntb_printf(0, "Unsupported AMD NTB topology %d\n",
1036 ntb->int_mask = AMD_EVENT_INTMASK;
1037 amd_ntb_reg_write(4, AMD_INTMASK_OFFSET, ntb->int_mask);
1043 amd_ntb_init(struct amd_ntb_softc *ntb)
1047 ntb->conn_type = amd_ntb_get_topo(ntb);
1048 amd_ntb_printf(0, "AMD NTB Side: %s\n",
1049 (ntb->conn_type == NTB_CONN_PRI)? "PRIMARY" : "SECONDARY");
1051 rc = amd_ntb_init_dev(ntb);
1055 rc = amd_ntb_init_isr(ntb);
1063 print_map_success(struct amd_ntb_softc *ntb, struct amd_ntb_pci_bar_info *bar,
1066 amd_ntb_printf(0, "Mapped BAR%d v:[%p-%p] p:[%p-%p] (0x%jx bytes) (%s)\n",
1067 PCI_RID2BAR(bar->pci_resource_id), bar->vbase,
1068 (char *)bar->vbase + bar->size - 1, (void *)bar->pbase,
1069 (void *)(bar->pbase + bar->size - 1), (uintmax_t)bar->size, kind);
1073 save_bar_parameters(struct amd_ntb_pci_bar_info *bar)
1075 bar->pci_bus_tag = rman_get_bustag(bar->pci_resource);
1076 bar->pci_bus_handle = rman_get_bushandle(bar->pci_resource);
1077 bar->pbase = rman_get_start(bar->pci_resource);
1078 bar->size = rman_get_size(bar->pci_resource);
1079 bar->vbase = rman_get_virtual(bar->pci_resource);
1080 bar->map_mode = VM_MEMATTR_UNCACHEABLE;
1084 map_bar(struct amd_ntb_softc *ntb, struct amd_ntb_pci_bar_info *bar)
1086 bar->pci_resource = bus_alloc_resource_any(ntb->device, SYS_RES_MEMORY,
1087 &bar->pci_resource_id, RF_ACTIVE);
1088 if (bar->pci_resource == NULL)
1091 save_bar_parameters(bar);
1092 print_map_success(ntb, bar, "mmr");
1098 amd_ntb_map_pci_bars(struct amd_ntb_softc *ntb)
1102 /* NTB Config/Control registers - BAR 0 */
1103 ntb->bar_info[NTB_CONFIG_BAR].pci_resource_id = PCIR_BAR(0);
1104 rc = map_bar(ntb, &ntb->bar_info[NTB_CONFIG_BAR]);
1108 /* Memory Window 0 BAR - BAR 1*/
1109 ntb->bar_info[NTB_BAR_1].pci_resource_id = PCIR_BAR(1);
1110 rc = map_bar(ntb, &ntb->bar_info[NTB_BAR_1]);
1113 ntb->bar_info[NTB_BAR_1].xlat_off = AMD_BAR1XLAT_OFFSET;
1114 ntb->bar_info[NTB_BAR_1].limit_off = AMD_BAR1LMT_OFFSET;
1116 /* Memory Window 1 BAR - BAR 2&3 */
1117 ntb->bar_info[NTB_BAR_2].pci_resource_id = PCIR_BAR(2);
1118 rc = map_bar(ntb, &ntb->bar_info[NTB_BAR_2]);
1121 ntb->bar_info[NTB_BAR_2].xlat_off = AMD_BAR23XLAT_OFFSET;
1122 ntb->bar_info[NTB_BAR_2].limit_off = AMD_BAR23LMT_OFFSET;
1124 /* Memory Window 2 BAR - BAR 4&5 */
1125 ntb->bar_info[NTB_BAR_3].pci_resource_id = PCIR_BAR(4);
1126 rc = map_bar(ntb, &ntb->bar_info[NTB_BAR_3]);
1129 ntb->bar_info[NTB_BAR_3].xlat_off = AMD_BAR45XLAT_OFFSET;
1130 ntb->bar_info[NTB_BAR_3].limit_off = AMD_BAR45LMT_OFFSET;
1134 amd_ntb_printf(0, "unable to allocate pci resource\n");
1140 amd_ntb_unmap_pci_bars(struct amd_ntb_softc *ntb)
1142 struct amd_ntb_pci_bar_info *bar_info;
1145 for (i = 0; i < NTB_MAX_BARS; i++) {
1146 bar_info = &ntb->bar_info[i];
1147 if (bar_info->pci_resource != NULL)
1148 bus_release_resource(ntb->device, SYS_RES_MEMORY,
1149 bar_info->pci_resource_id, bar_info->pci_resource);
1154 amd_ntb_probe(device_t device)
1156 const struct pci_device_table *tbl;
1158 tbl = PCI_MATCH(device, amd_ntb_devs);
1162 device_set_desc(device, tbl->descr);
1164 return (BUS_PROBE_GENERIC);
1168 amd_ntb_attach(device_t device)
1170 struct amd_ntb_softc *ntb = device_get_softc(device);
1173 ntb->device = device;
1175 /* Enable PCI bus mastering for "device" */
1176 pci_enable_busmaster(ntb->device);
1178 error = amd_ntb_map_pci_bars(ntb);
1182 error = amd_ntb_init(ntb);
1186 amd_init_side_info(ntb);
1188 amd_ntb_spad_clear(ntb);
1190 amd_ntb_sysctl_init(ntb);
1192 /* Attach children to this controller */
1193 error = ntb_register_device(device);
1197 amd_ntb_detach(device);
1203 amd_ntb_detach(device_t device)
1205 struct amd_ntb_softc *ntb = device_get_softc(device);
1207 ntb_unregister_device(device);
1208 amd_deinit_side_info(ntb);
1209 callout_drain(&ntb->hb_timer);
1210 amd_ntb_deinit_isr(ntb);
1211 mtx_destroy(&ntb->db_mask_lock);
1212 pci_disable_busmaster(ntb->device);
1213 amd_ntb_unmap_pci_bars(ntb);
1218 static device_method_t ntb_amd_methods[] = {
1219 /* Device interface */
1220 DEVMETHOD(device_probe, amd_ntb_probe),
1221 DEVMETHOD(device_attach, amd_ntb_attach),
1222 DEVMETHOD(device_detach, amd_ntb_detach),
1225 DEVMETHOD(bus_child_location_str, ntb_child_location_str),
1226 DEVMETHOD(bus_print_child, ntb_print_child),
1229 DEVMETHOD(ntb_port_number, amd_ntb_port_number),
1230 DEVMETHOD(ntb_peer_port_count, amd_ntb_peer_port_count),
1231 DEVMETHOD(ntb_peer_port_number, amd_ntb_peer_port_number),
1232 DEVMETHOD(ntb_peer_port_idx, amd_ntb_peer_port_idx),
1233 DEVMETHOD(ntb_link_is_up, amd_ntb_link_is_up),
1234 DEVMETHOD(ntb_link_enable, amd_ntb_link_enable),
1235 DEVMETHOD(ntb_link_disable, amd_ntb_link_disable),
1236 DEVMETHOD(ntb_mw_count, amd_ntb_mw_count),
1237 DEVMETHOD(ntb_mw_get_range, amd_ntb_mw_get_range),
1238 DEVMETHOD(ntb_mw_set_trans, amd_ntb_mw_set_trans),
1239 DEVMETHOD(ntb_mw_clear_trans, amd_ntb_mw_clear_trans),
1240 DEVMETHOD(ntb_mw_set_wc, amd_ntb_mw_set_wc),
1241 DEVMETHOD(ntb_mw_get_wc, amd_ntb_mw_get_wc),
1242 DEVMETHOD(ntb_db_valid_mask, amd_ntb_db_valid_mask),
1243 DEVMETHOD(ntb_db_vector_count, amd_ntb_db_vector_count),
1244 DEVMETHOD(ntb_db_vector_mask, amd_ntb_db_vector_mask),
1245 DEVMETHOD(ntb_db_read, amd_ntb_db_read),
1246 DEVMETHOD(ntb_db_clear, amd_ntb_db_clear),
1247 DEVMETHOD(ntb_db_set_mask, amd_ntb_db_set_mask),
1248 DEVMETHOD(ntb_db_clear_mask, amd_ntb_db_clear_mask),
1249 DEVMETHOD(ntb_peer_db_set, amd_ntb_peer_db_set),
1250 DEVMETHOD(ntb_spad_count, amd_ntb_spad_count),
1251 DEVMETHOD(ntb_spad_read, amd_ntb_spad_read),
1252 DEVMETHOD(ntb_spad_write, amd_ntb_spad_write),
1253 DEVMETHOD(ntb_peer_spad_read, amd_ntb_peer_spad_read),
1254 DEVMETHOD(ntb_peer_spad_write, amd_ntb_peer_spad_write),
1258 static DEFINE_CLASS_0(ntb_hw, ntb_amd_driver, ntb_amd_methods,
1259 sizeof(struct amd_ntb_softc));
1260 DRIVER_MODULE(ntb_hw_amd, pci, ntb_amd_driver, ntb_hw_devclass, NULL, NULL);
1261 MODULE_DEPEND(ntb_hw_amd, ntb, 1, 1, 1);
1262 MODULE_VERSION(ntb_hw_amd, 1);
1263 PCI_PNP_INFO(amd_ntb_devs);