2 * Copyright (C) 2012 Intel Corporation
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
32 #include <sys/param.h>
33 #include <sys/systm.h>
37 #include <sys/ioccom.h>
38 #include <sys/kernel.h>
40 #include <sys/malloc.h>
41 #include <sys/module.h>
42 #include <sys/mutex.h>
45 #include <sys/sysctl.h>
46 #include <sys/taskqueue.h>
48 #include <dev/pci/pcireg.h>
49 #include <dev/pci/pcivar.h>
50 #include <machine/bus.h>
51 #include <machine/resource.h>
52 #include <machine/stdarg.h>
60 #include "ioat_internal.h"
62 #ifndef BUS_SPACE_MAXADDR_40BIT
63 #define BUS_SPACE_MAXADDR_40BIT 0xFFFFFFFFFFULL
65 #define IOAT_REFLK (&ioat->submit_lock)
67 static int ioat_probe(device_t device);
68 static int ioat_attach(device_t device);
69 static int ioat_detach(device_t device);
70 static int ioat_setup_intr(struct ioat_softc *ioat);
71 static int ioat_teardown_intr(struct ioat_softc *ioat);
72 static int ioat3_attach(device_t device);
73 static int ioat_start_channel(struct ioat_softc *ioat);
74 static int ioat_map_pci_bar(struct ioat_softc *ioat);
75 static void ioat_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg,
77 static void ioat_interrupt_handler(void *arg);
78 static boolean_t ioat_model_resets_msix(struct ioat_softc *ioat);
79 static int chanerr_to_errno(uint32_t);
80 static void ioat_process_events(struct ioat_softc *ioat);
81 static inline uint32_t ioat_get_active(struct ioat_softc *ioat);
82 static inline uint32_t ioat_get_ring_space(struct ioat_softc *ioat);
83 static void ioat_free_ring(struct ioat_softc *, uint32_t size,
84 struct ioat_descriptor *);
85 static int ioat_reserve_space(struct ioat_softc *, uint32_t, int mflags);
86 static union ioat_hw_descriptor *ioat_get_descriptor(struct ioat_softc *,
88 static struct ioat_descriptor *ioat_get_ring_entry(struct ioat_softc *,
90 static void ioat_halted_debug(struct ioat_softc *, uint32_t);
91 static void ioat_poll_timer_callback(void *arg);
92 static void dump_descriptor(void *hw_desc);
93 static void ioat_submit_single(struct ioat_softc *ioat);
94 static void ioat_comp_update_map(void *arg, bus_dma_segment_t *seg, int nseg,
96 static int ioat_reset_hw(struct ioat_softc *ioat);
97 static void ioat_reset_hw_task(void *, int);
98 static void ioat_setup_sysctl(device_t device);
99 static int sysctl_handle_reset(SYSCTL_HANDLER_ARGS);
100 static inline struct ioat_softc *ioat_get(struct ioat_softc *,
102 static inline void ioat_put(struct ioat_softc *, enum ioat_ref_kind);
103 static inline void _ioat_putn(struct ioat_softc *, uint32_t,
104 enum ioat_ref_kind, boolean_t);
105 static inline void ioat_putn(struct ioat_softc *, uint32_t,
107 static inline void ioat_putn_locked(struct ioat_softc *, uint32_t,
109 static void ioat_drain_locked(struct ioat_softc *);
111 #define ioat_log_message(v, ...) do { \
112 if ((v) <= g_ioat_debug_level) { \
113 device_printf(ioat->device, __VA_ARGS__); \
117 MALLOC_DEFINE(M_IOAT, "ioat", "ioat driver memory allocations");
118 SYSCTL_NODE(_hw, OID_AUTO, ioat, CTLFLAG_RD, 0, "ioat node");
120 static int g_force_legacy_interrupts;
121 SYSCTL_INT(_hw_ioat, OID_AUTO, force_legacy_interrupts, CTLFLAG_RDTUN,
122 &g_force_legacy_interrupts, 0, "Set to non-zero to force MSI-X disabled");
124 int g_ioat_debug_level = 0;
125 SYSCTL_INT(_hw_ioat, OID_AUTO, debug_level, CTLFLAG_RWTUN, &g_ioat_debug_level,
126 0, "Set log level (0-3) for ioat(4). Higher is more verbose.");
128 unsigned g_ioat_ring_order = 13;
129 SYSCTL_UINT(_hw_ioat, OID_AUTO, ring_order, CTLFLAG_RDTUN, &g_ioat_ring_order,
130 0, "Set IOAT ring order. (1 << this) == ring size.");
133 * OS <-> Driver interface structures
135 static device_method_t ioat_pci_methods[] = {
136 /* Device interface */
137 DEVMETHOD(device_probe, ioat_probe),
138 DEVMETHOD(device_attach, ioat_attach),
139 DEVMETHOD(device_detach, ioat_detach),
143 static driver_t ioat_pci_driver = {
146 sizeof(struct ioat_softc),
149 static devclass_t ioat_devclass;
150 DRIVER_MODULE(ioat, pci, ioat_pci_driver, ioat_devclass, 0, 0);
151 MODULE_VERSION(ioat, 1);
154 * Private data structures
156 static struct ioat_softc *ioat_channel[IOAT_MAX_CHANNELS];
157 static unsigned ioat_channel_index = 0;
158 SYSCTL_UINT(_hw_ioat, OID_AUTO, channels, CTLFLAG_RD, &ioat_channel_index, 0,
159 "Number of IOAT channels attached");
166 { 0x34308086, "TBG IOAT Ch0" },
167 { 0x34318086, "TBG IOAT Ch1" },
168 { 0x34328086, "TBG IOAT Ch2" },
169 { 0x34338086, "TBG IOAT Ch3" },
170 { 0x34298086, "TBG IOAT Ch4" },
171 { 0x342a8086, "TBG IOAT Ch5" },
172 { 0x342b8086, "TBG IOAT Ch6" },
173 { 0x342c8086, "TBG IOAT Ch7" },
175 { 0x37108086, "JSF IOAT Ch0" },
176 { 0x37118086, "JSF IOAT Ch1" },
177 { 0x37128086, "JSF IOAT Ch2" },
178 { 0x37138086, "JSF IOAT Ch3" },
179 { 0x37148086, "JSF IOAT Ch4" },
180 { 0x37158086, "JSF IOAT Ch5" },
181 { 0x37168086, "JSF IOAT Ch6" },
182 { 0x37178086, "JSF IOAT Ch7" },
183 { 0x37188086, "JSF IOAT Ch0 (RAID)" },
184 { 0x37198086, "JSF IOAT Ch1 (RAID)" },
186 { 0x3c208086, "SNB IOAT Ch0" },
187 { 0x3c218086, "SNB IOAT Ch1" },
188 { 0x3c228086, "SNB IOAT Ch2" },
189 { 0x3c238086, "SNB IOAT Ch3" },
190 { 0x3c248086, "SNB IOAT Ch4" },
191 { 0x3c258086, "SNB IOAT Ch5" },
192 { 0x3c268086, "SNB IOAT Ch6" },
193 { 0x3c278086, "SNB IOAT Ch7" },
194 { 0x3c2e8086, "SNB IOAT Ch0 (RAID)" },
195 { 0x3c2f8086, "SNB IOAT Ch1 (RAID)" },
197 { 0x0e208086, "IVB IOAT Ch0" },
198 { 0x0e218086, "IVB IOAT Ch1" },
199 { 0x0e228086, "IVB IOAT Ch2" },
200 { 0x0e238086, "IVB IOAT Ch3" },
201 { 0x0e248086, "IVB IOAT Ch4" },
202 { 0x0e258086, "IVB IOAT Ch5" },
203 { 0x0e268086, "IVB IOAT Ch6" },
204 { 0x0e278086, "IVB IOAT Ch7" },
205 { 0x0e2e8086, "IVB IOAT Ch0 (RAID)" },
206 { 0x0e2f8086, "IVB IOAT Ch1 (RAID)" },
208 { 0x2f208086, "HSW IOAT Ch0" },
209 { 0x2f218086, "HSW IOAT Ch1" },
210 { 0x2f228086, "HSW IOAT Ch2" },
211 { 0x2f238086, "HSW IOAT Ch3" },
212 { 0x2f248086, "HSW IOAT Ch4" },
213 { 0x2f258086, "HSW IOAT Ch5" },
214 { 0x2f268086, "HSW IOAT Ch6" },
215 { 0x2f278086, "HSW IOAT Ch7" },
216 { 0x2f2e8086, "HSW IOAT Ch0 (RAID)" },
217 { 0x2f2f8086, "HSW IOAT Ch1 (RAID)" },
219 { 0x0c508086, "BWD IOAT Ch0" },
220 { 0x0c518086, "BWD IOAT Ch1" },
221 { 0x0c528086, "BWD IOAT Ch2" },
222 { 0x0c538086, "BWD IOAT Ch3" },
224 { 0x6f508086, "BDXDE IOAT Ch0" },
225 { 0x6f518086, "BDXDE IOAT Ch1" },
226 { 0x6f528086, "BDXDE IOAT Ch2" },
227 { 0x6f538086, "BDXDE IOAT Ch3" },
229 { 0x6f208086, "BDX IOAT Ch0" },
230 { 0x6f218086, "BDX IOAT Ch1" },
231 { 0x6f228086, "BDX IOAT Ch2" },
232 { 0x6f238086, "BDX IOAT Ch3" },
233 { 0x6f248086, "BDX IOAT Ch4" },
234 { 0x6f258086, "BDX IOAT Ch5" },
235 { 0x6f268086, "BDX IOAT Ch6" },
236 { 0x6f278086, "BDX IOAT Ch7" },
237 { 0x6f2e8086, "BDX IOAT Ch0 (RAID)" },
238 { 0x6f2f8086, "BDX IOAT Ch1 (RAID)" },
241 MODULE_PNP_INFO("W32:vendor/device;D:human", pci, ioat, pci_ids,
242 sizeof(pci_ids[0]), nitems(pci_ids));
245 * OS <-> Driver linkage functions
248 ioat_probe(device_t device)
253 type = pci_get_devid(device);
254 for (ep = pci_ids; ep < &pci_ids[nitems(pci_ids)]; ep++) {
255 if (ep->type == type) {
256 device_set_desc(device, ep->desc);
264 ioat_attach(device_t device)
266 struct ioat_softc *ioat;
269 ioat = DEVICE2SOFTC(device);
270 ioat->device = device;
272 error = ioat_map_pci_bar(ioat);
276 ioat->version = ioat_read_cbver(ioat);
277 if (ioat->version < IOAT_VER_3_0) {
282 error = ioat3_attach(device);
286 error = pci_enable_busmaster(device);
290 error = ioat_setup_intr(ioat);
294 error = ioat_reset_hw(ioat);
298 ioat_process_events(ioat);
299 ioat_setup_sysctl(device);
301 ioat->chan_idx = ioat_channel_index;
302 ioat_channel[ioat_channel_index++] = ioat;
312 ioat_detach(device_t device)
314 struct ioat_softc *ioat;
316 ioat = DEVICE2SOFTC(device);
319 taskqueue_drain(taskqueue_thread, &ioat->reset_task);
321 mtx_lock(IOAT_REFLK);
322 ioat->quiescing = TRUE;
323 ioat->destroying = TRUE;
324 wakeup(&ioat->quiescing);
325 wakeup(&ioat->resetting);
327 ioat_channel[ioat->chan_idx] = NULL;
329 ioat_drain_locked(ioat);
330 mtx_unlock(IOAT_REFLK);
332 ioat_teardown_intr(ioat);
333 callout_drain(&ioat->poll_timer);
335 pci_disable_busmaster(device);
337 if (ioat->pci_resource != NULL)
338 bus_release_resource(device, SYS_RES_MEMORY,
339 ioat->pci_resource_id, ioat->pci_resource);
341 if (ioat->ring != NULL)
342 ioat_free_ring(ioat, 1 << ioat->ring_size_order, ioat->ring);
344 if (ioat->comp_update != NULL) {
345 bus_dmamap_unload(ioat->comp_update_tag, ioat->comp_update_map);
346 bus_dmamem_free(ioat->comp_update_tag, ioat->comp_update,
347 ioat->comp_update_map);
348 bus_dma_tag_destroy(ioat->comp_update_tag);
351 if (ioat->hw_desc_ring != NULL) {
352 bus_dmamap_unload(ioat->hw_desc_tag, ioat->hw_desc_map);
353 bus_dmamem_free(ioat->hw_desc_tag, ioat->hw_desc_ring,
355 bus_dma_tag_destroy(ioat->hw_desc_tag);
362 ioat_teardown_intr(struct ioat_softc *ioat)
365 if (ioat->tag != NULL)
366 bus_teardown_intr(ioat->device, ioat->res, ioat->tag);
368 if (ioat->res != NULL)
369 bus_release_resource(ioat->device, SYS_RES_IRQ,
370 rman_get_rid(ioat->res), ioat->res);
372 pci_release_msi(ioat->device);
377 ioat_start_channel(struct ioat_softc *ioat)
379 struct ioat_dma_hw_descriptor *hw_desc;
380 struct ioat_descriptor *desc;
381 struct bus_dmadesc *dmadesc;
386 ioat_acquire(&ioat->dmaengine);
388 /* Submit 'NULL' operation manually to avoid quiescing flag */
389 desc = ioat_get_ring_entry(ioat, ioat->head);
390 hw_desc = &ioat_get_descriptor(ioat, ioat->head)->dma;
391 dmadesc = &desc->bus_dmadesc;
393 dmadesc->callback_fn = NULL;
394 dmadesc->callback_arg = NULL;
396 hw_desc->u.control_raw = 0;
397 hw_desc->u.control_generic.op = IOAT_OP_COPY;
398 hw_desc->u.control_generic.completion_update = 1;
400 hw_desc->src_addr = 0;
401 hw_desc->dest_addr = 0;
402 hw_desc->u.control.null = 1;
404 ioat_submit_single(ioat);
405 ioat_release(&ioat->dmaengine);
407 for (i = 0; i < 100; i++) {
409 status = ioat_get_chansts(ioat);
410 if (is_ioat_idle(status))
414 chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET);
415 ioat_log_message(0, "could not start channel: "
416 "status = %#jx error = %b\n", (uintmax_t)status, (int)chanerr,
422 * Initialize Hardware
425 ioat3_attach(device_t device)
427 struct ioat_softc *ioat;
428 struct ioat_descriptor *ring;
429 struct ioat_dma_hw_descriptor *dma_hw_desc;
432 int i, num_descriptors;
437 ioat = DEVICE2SOFTC(device);
438 ioat->capabilities = ioat_read_dmacapability(ioat);
440 ioat_log_message(0, "Capabilities: %b\n", (int)ioat->capabilities,
443 xfercap = ioat_read_xfercap(ioat);
444 ioat->max_xfer_size = 1 << xfercap;
446 ioat->intrdelay_supported = (ioat_read_2(ioat, IOAT_INTRDELAY_OFFSET) &
447 IOAT_INTRDELAY_SUPPORTED) != 0;
448 if (ioat->intrdelay_supported)
449 ioat->intrdelay_max = IOAT_INTRDELAY_US_MASK;
451 /* TODO: need to check DCA here if we ever do XOR/PQ */
453 mtx_init(&ioat->submit_lock, "ioat_submit", NULL, MTX_DEF);
454 mtx_init(&ioat->cleanup_lock, "ioat_cleanup", NULL, MTX_DEF);
455 callout_init(&ioat->poll_timer, 1);
456 TASK_INIT(&ioat->reset_task, 0, ioat_reset_hw_task, ioat);
458 /* Establish lock order for Witness */
459 mtx_lock(&ioat->submit_lock);
460 mtx_lock(&ioat->cleanup_lock);
461 mtx_unlock(&ioat->cleanup_lock);
462 mtx_unlock(&ioat->submit_lock);
464 ioat->is_submitter_processing = FALSE;
465 ioat->is_completion_pending = FALSE;
466 ioat->is_reset_pending = FALSE;
467 ioat->is_channel_running = FALSE;
469 bus_dma_tag_create(bus_get_dma_tag(ioat->device), sizeof(uint64_t), 0x0,
470 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
471 sizeof(uint64_t), 1, sizeof(uint64_t), 0, NULL, NULL,
472 &ioat->comp_update_tag);
474 error = bus_dmamem_alloc(ioat->comp_update_tag,
475 (void **)&ioat->comp_update, BUS_DMA_ZERO, &ioat->comp_update_map);
476 if (ioat->comp_update == NULL)
479 error = bus_dmamap_load(ioat->comp_update_tag, ioat->comp_update_map,
480 ioat->comp_update, sizeof(uint64_t), ioat_comp_update_map, ioat,
485 ioat->ring_size_order = g_ioat_ring_order;
486 num_descriptors = 1 << ioat->ring_size_order;
487 ringsz = sizeof(struct ioat_dma_hw_descriptor) * num_descriptors;
489 error = bus_dma_tag_create(bus_get_dma_tag(ioat->device),
490 2 * 1024 * 1024, 0x0, (bus_addr_t)BUS_SPACE_MAXADDR_40BIT,
491 BUS_SPACE_MAXADDR, NULL, NULL, ringsz, 1, ringsz, 0, NULL, NULL,
496 error = bus_dmamem_alloc(ioat->hw_desc_tag, &hw_desc,
497 BUS_DMA_ZERO | BUS_DMA_WAITOK, &ioat->hw_desc_map);
501 error = bus_dmamap_load(ioat->hw_desc_tag, ioat->hw_desc_map, hw_desc,
502 ringsz, ioat_dmamap_cb, &ioat->hw_desc_bus_addr, BUS_DMA_WAITOK);
506 ioat->hw_desc_ring = hw_desc;
508 ioat->ring = malloc(num_descriptors * sizeof(*ring), M_IOAT,
512 for (i = 0; i < num_descriptors; i++) {
513 memset(&ring[i].bus_dmadesc, 0, sizeof(ring[i].bus_dmadesc));
517 for (i = 0; i < num_descriptors; i++) {
518 dma_hw_desc = &ioat->hw_desc_ring[i].dma;
519 dma_hw_desc->next = RING_PHYS_ADDR(ioat, i + 1);
522 ioat->head = ioat->hw_head = 0;
525 *ioat->comp_update = 0;
530 ioat_map_pci_bar(struct ioat_softc *ioat)
533 ioat->pci_resource_id = PCIR_BAR(0);
534 ioat->pci_resource = bus_alloc_resource_any(ioat->device,
535 SYS_RES_MEMORY, &ioat->pci_resource_id, RF_ACTIVE);
537 if (ioat->pci_resource == NULL) {
538 ioat_log_message(0, "unable to allocate pci resource\n");
542 ioat->pci_bus_tag = rman_get_bustag(ioat->pci_resource);
543 ioat->pci_bus_handle = rman_get_bushandle(ioat->pci_resource);
548 ioat_comp_update_map(void *arg, bus_dma_segment_t *seg, int nseg, int error)
550 struct ioat_softc *ioat = arg;
552 KASSERT(error == 0, ("%s: error:%d", __func__, error));
553 ioat->comp_update_bus_addr = seg[0].ds_addr;
557 ioat_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
561 KASSERT(error == 0, ("%s: error:%d", __func__, error));
563 *baddr = segs->ds_addr;
567 * Interrupt setup and handlers
570 ioat_setup_intr(struct ioat_softc *ioat)
572 uint32_t num_vectors;
575 boolean_t force_legacy_interrupts;
578 force_legacy_interrupts = FALSE;
580 if (!g_force_legacy_interrupts && pci_msix_count(ioat->device) >= 1) {
582 pci_alloc_msix(ioat->device, &num_vectors);
583 if (num_vectors == 1)
589 ioat->res = bus_alloc_resource_any(ioat->device, SYS_RES_IRQ,
590 &ioat->rid, RF_ACTIVE);
593 ioat->res = bus_alloc_resource_any(ioat->device, SYS_RES_IRQ,
594 &ioat->rid, RF_SHAREABLE | RF_ACTIVE);
596 if (ioat->res == NULL) {
597 ioat_log_message(0, "bus_alloc_resource failed\n");
602 error = bus_setup_intr(ioat->device, ioat->res, INTR_MPSAFE |
603 INTR_TYPE_MISC, NULL, ioat_interrupt_handler, ioat, &ioat->tag);
605 ioat_log_message(0, "bus_setup_intr failed\n");
609 ioat_write_intrctrl(ioat, IOAT_INTRCTRL_MASTER_INT_EN);
614 ioat_model_resets_msix(struct ioat_softc *ioat)
618 pciid = pci_get_devid(ioat->device);
637 ioat_interrupt_handler(void *arg)
639 struct ioat_softc *ioat = arg;
641 ioat->stats.interrupts++;
642 ioat_process_events(ioat);
646 chanerr_to_errno(uint32_t chanerr)
651 if ((chanerr & (IOAT_CHANERR_XSADDERR | IOAT_CHANERR_XDADDERR)) != 0)
653 if ((chanerr & (IOAT_CHANERR_RDERR | IOAT_CHANERR_WDERR)) != 0)
655 /* This one is probably our fault: */
656 if ((chanerr & IOAT_CHANERR_NDADDERR) != 0)
662 ioat_process_events(struct ioat_softc *ioat)
664 struct ioat_descriptor *desc;
665 struct bus_dmadesc *dmadesc;
666 uint64_t comp_update, status;
667 uint32_t completed, chanerr;
671 mtx_lock(&ioat->cleanup_lock);
674 * Don't run while the hardware is being reset. Reset is responsible
675 * for blocking new work and draining & completing existing work, so
676 * there is nothing to do until new work is queued after reset anyway.
678 if (ioat->resetting_cleanup) {
679 mtx_unlock(&ioat->cleanup_lock);
684 comp_update = *ioat->comp_update;
685 status = comp_update & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_MASK;
687 if (status < ioat->hw_desc_bus_addr ||
688 status >= ioat->hw_desc_bus_addr + (1 << ioat->ring_size_order) *
689 sizeof(struct ioat_generic_hw_descriptor))
690 panic("Bogus completion address %jx (channel %u)",
691 (uintmax_t)status, ioat->chan_idx);
693 if (status == ioat->last_seen) {
695 * If we landed in process_events and nothing has been
696 * completed, check for a timeout due to channel halt.
700 CTR4(KTR_IOAT, "%s channel=%u hw_status=0x%lx last_seen=0x%lx",
701 __func__, ioat->chan_idx, comp_update, ioat->last_seen);
703 while (RING_PHYS_ADDR(ioat, ioat->tail - 1) != status) {
704 desc = ioat_get_ring_entry(ioat, ioat->tail);
705 dmadesc = &desc->bus_dmadesc;
706 CTR5(KTR_IOAT, "channel=%u completing desc idx %u (%p) ok cb %p(%p)",
707 ioat->chan_idx, ioat->tail, dmadesc, dmadesc->callback_fn,
708 dmadesc->callback_arg);
710 if (dmadesc->callback_fn != NULL)
711 dmadesc->callback_fn(dmadesc->callback_arg, 0);
716 CTR5(KTR_IOAT, "%s channel=%u head=%u tail=%u active=%u", __func__,
717 ioat->chan_idx, ioat->head, ioat->tail, ioat_get_active(ioat));
719 if (completed != 0) {
720 ioat->last_seen = RING_PHYS_ADDR(ioat, ioat->tail - 1);
721 ioat->stats.descriptors_processed += completed;
725 ioat_write_chanctrl(ioat, IOAT_CHANCTRL_RUN);
727 /* Perform a racy check first; only take the locks if it passes. */
728 pending = (ioat_get_active(ioat) != 0);
729 if (!pending && ioat->is_completion_pending) {
730 mtx_unlock(&ioat->cleanup_lock);
731 mtx_lock(&ioat->submit_lock);
732 mtx_lock(&ioat->cleanup_lock);
734 pending = (ioat_get_active(ioat) != 0);
735 if (!pending && ioat->is_completion_pending) {
736 ioat->is_completion_pending = FALSE;
737 callout_stop(&ioat->poll_timer);
739 mtx_unlock(&ioat->submit_lock);
741 mtx_unlock(&ioat->cleanup_lock);
744 callout_reset(&ioat->poll_timer, 1, ioat_poll_timer_callback,
747 if (completed != 0) {
748 ioat_putn(ioat, completed, IOAT_ACTIVE_DESCR_REF);
753 * The device doesn't seem to reliably push suspend/halt statuses to
754 * the channel completion memory address, so poll the device register
757 comp_update = ioat_get_chansts(ioat) & IOAT_CHANSTS_STATUS;
758 if (!is_ioat_halted(comp_update) && !is_ioat_suspended(comp_update))
761 ioat->stats.channel_halts++;
764 * Fatal programming error on this DMA channel. Flush any outstanding
765 * work with error status and restart the engine.
767 mtx_lock(&ioat->submit_lock);
768 mtx_lock(&ioat->cleanup_lock);
769 ioat->quiescing = TRUE;
771 * This is safe to do here because we have both locks and the submit
772 * queue is quiesced. We know that we will drain all outstanding
773 * events, so ioat_reset_hw can't deadlock. It is necessary to
774 * protect other ioat_process_event threads from racing ioat_reset_hw,
775 * reading an indeterminate hw state, and attempting to continue
776 * issuing completions.
778 ioat->resetting_cleanup = TRUE;
780 chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET);
781 if (1 <= g_ioat_debug_level)
782 ioat_halted_debug(ioat, chanerr);
783 ioat->stats.last_halt_chanerr = chanerr;
785 while (ioat_get_active(ioat) > 0) {
786 desc = ioat_get_ring_entry(ioat, ioat->tail);
787 dmadesc = &desc->bus_dmadesc;
788 CTR5(KTR_IOAT, "channel=%u completing desc idx %u (%p) err cb %p(%p)",
789 ioat->chan_idx, ioat->tail, dmadesc, dmadesc->callback_fn,
790 dmadesc->callback_arg);
792 if (dmadesc->callback_fn != NULL)
793 dmadesc->callback_fn(dmadesc->callback_arg,
794 chanerr_to_errno(chanerr));
796 ioat_putn_locked(ioat, 1, IOAT_ACTIVE_DESCR_REF);
798 ioat->stats.descriptors_processed++;
799 ioat->stats.descriptors_error++;
801 CTR5(KTR_IOAT, "%s channel=%u head=%u tail=%u active=%u", __func__,
802 ioat->chan_idx, ioat->head, ioat->tail, ioat_get_active(ioat));
804 if (ioat->is_completion_pending) {
805 ioat->is_completion_pending = FALSE;
806 callout_stop(&ioat->poll_timer);
809 /* Clear error status */
810 ioat_write_4(ioat, IOAT_CHANERR_OFFSET, chanerr);
812 mtx_unlock(&ioat->cleanup_lock);
813 mtx_unlock(&ioat->submit_lock);
815 ioat_log_message(0, "Resetting channel to recover from error\n");
816 error = taskqueue_enqueue(taskqueue_thread, &ioat->reset_task);
818 ("%s: taskqueue_enqueue failed: %d", __func__, error));
822 ioat_reset_hw_task(void *ctx, int pending __unused)
824 struct ioat_softc *ioat;
828 ioat_log_message(1, "%s: Resetting channel\n", __func__);
830 error = ioat_reset_hw(ioat);
831 KASSERT(error == 0, ("%s: reset failed: %d", __func__, error));
839 ioat_get_nchannels(void)
842 return (ioat_channel_index);
846 ioat_get_dmaengine(uint32_t index, int flags)
848 struct ioat_softc *ioat;
850 KASSERT((flags & ~(M_NOWAIT | M_WAITOK)) == 0,
851 ("invalid flags: 0x%08x", flags));
852 KASSERT((flags & (M_NOWAIT | M_WAITOK)) != (M_NOWAIT | M_WAITOK),
853 ("invalid wait | nowait"));
855 if (index >= ioat_channel_index)
858 ioat = ioat_channel[index];
859 if (ioat == NULL || ioat->destroying)
862 if (ioat->quiescing) {
863 if ((flags & M_NOWAIT) != 0)
866 mtx_lock(IOAT_REFLK);
867 while (ioat->quiescing && !ioat->destroying)
868 msleep(&ioat->quiescing, IOAT_REFLK, 0, "getdma", 0);
869 mtx_unlock(IOAT_REFLK);
871 if (ioat->destroying)
876 * There's a race here between the quiescing check and HW reset or
879 return (&ioat_get(ioat, IOAT_DMAENGINE_REF)->dmaengine);
883 ioat_put_dmaengine(bus_dmaengine_t dmaengine)
885 struct ioat_softc *ioat;
887 ioat = to_ioat_softc(dmaengine);
888 ioat_put(ioat, IOAT_DMAENGINE_REF);
892 ioat_get_hwversion(bus_dmaengine_t dmaengine)
894 struct ioat_softc *ioat;
896 ioat = to_ioat_softc(dmaengine);
897 return (ioat->version);
901 ioat_get_max_io_size(bus_dmaengine_t dmaengine)
903 struct ioat_softc *ioat;
905 ioat = to_ioat_softc(dmaengine);
906 return (ioat->max_xfer_size);
910 ioat_get_capabilities(bus_dmaengine_t dmaengine)
912 struct ioat_softc *ioat;
914 ioat = to_ioat_softc(dmaengine);
915 return (ioat->capabilities);
919 ioat_set_interrupt_coalesce(bus_dmaengine_t dmaengine, uint16_t delay)
921 struct ioat_softc *ioat;
923 ioat = to_ioat_softc(dmaengine);
924 if (!ioat->intrdelay_supported)
926 if (delay > ioat->intrdelay_max)
929 ioat_write_2(ioat, IOAT_INTRDELAY_OFFSET, delay);
930 ioat->cached_intrdelay =
931 ioat_read_2(ioat, IOAT_INTRDELAY_OFFSET) & IOAT_INTRDELAY_US_MASK;
936 ioat_get_max_coalesce_period(bus_dmaengine_t dmaengine)
938 struct ioat_softc *ioat;
940 ioat = to_ioat_softc(dmaengine);
941 return (ioat->intrdelay_max);
945 ioat_acquire(bus_dmaengine_t dmaengine)
947 struct ioat_softc *ioat;
949 ioat = to_ioat_softc(dmaengine);
950 mtx_lock(&ioat->submit_lock);
951 CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx);
952 ioat->acq_head = ioat->head;
956 ioat_acquire_reserve(bus_dmaengine_t dmaengine, unsigned n, int mflags)
958 struct ioat_softc *ioat;
961 ioat = to_ioat_softc(dmaengine);
962 ioat_acquire(dmaengine);
964 error = ioat_reserve_space(ioat, n, mflags);
966 ioat_release(dmaengine);
971 ioat_release(bus_dmaengine_t dmaengine)
973 struct ioat_softc *ioat;
975 ioat = to_ioat_softc(dmaengine);
976 CTR4(KTR_IOAT, "%s channel=%u dispatch1 hw_head=%u head=%u", __func__,
977 ioat->chan_idx, ioat->hw_head & UINT16_MAX, ioat->head);
978 KFAIL_POINT_CODE(DEBUG_FP, ioat_release, /* do nothing */);
979 CTR4(KTR_IOAT, "%s channel=%u dispatch2 hw_head=%u head=%u", __func__,
980 ioat->chan_idx, ioat->hw_head & UINT16_MAX, ioat->head);
982 if (ioat->acq_head != ioat->head) {
983 ioat_write_2(ioat, IOAT_DMACOUNT_OFFSET,
984 (uint16_t)ioat->hw_head);
986 if (!ioat->is_completion_pending) {
987 ioat->is_completion_pending = TRUE;
988 callout_reset(&ioat->poll_timer, 1,
989 ioat_poll_timer_callback, ioat);
992 mtx_unlock(&ioat->submit_lock);
995 static struct ioat_descriptor *
996 ioat_op_generic(struct ioat_softc *ioat, uint8_t op,
997 uint32_t size, uint64_t src, uint64_t dst,
998 bus_dmaengine_callback_t callback_fn, void *callback_arg,
1001 struct ioat_generic_hw_descriptor *hw_desc;
1002 struct ioat_descriptor *desc;
1005 mtx_assert(&ioat->submit_lock, MA_OWNED);
1007 KASSERT((flags & ~_DMA_GENERIC_FLAGS) == 0,
1008 ("Unrecognized flag(s): %#x", flags & ~_DMA_GENERIC_FLAGS));
1009 if ((flags & DMA_NO_WAIT) != 0)
1014 if (size > ioat->max_xfer_size) {
1015 ioat_log_message(0, "%s: max_xfer_size = %d, requested = %u\n",
1016 __func__, ioat->max_xfer_size, (unsigned)size);
1020 if (ioat_reserve_space(ioat, 1, mflags) != 0)
1023 desc = ioat_get_ring_entry(ioat, ioat->head);
1024 hw_desc = &ioat_get_descriptor(ioat, ioat->head)->generic;
1026 hw_desc->u.control_raw = 0;
1027 hw_desc->u.control_generic.op = op;
1028 hw_desc->u.control_generic.completion_update = 1;
1030 if ((flags & DMA_INT_EN) != 0)
1031 hw_desc->u.control_generic.int_enable = 1;
1032 if ((flags & DMA_FENCE) != 0)
1033 hw_desc->u.control_generic.fence = 1;
1035 hw_desc->size = size;
1036 hw_desc->src_addr = src;
1037 hw_desc->dest_addr = dst;
1039 desc->bus_dmadesc.callback_fn = callback_fn;
1040 desc->bus_dmadesc.callback_arg = callback_arg;
1044 struct bus_dmadesc *
1045 ioat_null(bus_dmaengine_t dmaengine, bus_dmaengine_callback_t callback_fn,
1046 void *callback_arg, uint32_t flags)
1048 struct ioat_dma_hw_descriptor *hw_desc;
1049 struct ioat_descriptor *desc;
1050 struct ioat_softc *ioat;
1052 ioat = to_ioat_softc(dmaengine);
1053 CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx);
1055 desc = ioat_op_generic(ioat, IOAT_OP_COPY, 8, 0, 0, callback_fn,
1056 callback_arg, flags);
1060 hw_desc = &ioat_get_descriptor(ioat, desc->id)->dma;
1061 hw_desc->u.control.null = 1;
1062 ioat_submit_single(ioat);
1063 return (&desc->bus_dmadesc);
1066 struct bus_dmadesc *
1067 ioat_copy(bus_dmaengine_t dmaengine, bus_addr_t dst,
1068 bus_addr_t src, bus_size_t len, bus_dmaengine_callback_t callback_fn,
1069 void *callback_arg, uint32_t flags)
1071 struct ioat_dma_hw_descriptor *hw_desc;
1072 struct ioat_descriptor *desc;
1073 struct ioat_softc *ioat;
1075 ioat = to_ioat_softc(dmaengine);
1077 if (((src | dst) & (0xffffull << 48)) != 0) {
1078 ioat_log_message(0, "%s: High 16 bits of src/dst invalid\n",
1083 desc = ioat_op_generic(ioat, IOAT_OP_COPY, len, src, dst, callback_fn,
1084 callback_arg, flags);
1088 hw_desc = &ioat_get_descriptor(ioat, desc->id)->dma;
1089 if (g_ioat_debug_level >= 3)
1090 dump_descriptor(hw_desc);
1092 ioat_submit_single(ioat);
1093 CTR6(KTR_IOAT, "%s channel=%u desc=%p dest=%lx src=%lx len=%lx",
1094 __func__, ioat->chan_idx, &desc->bus_dmadesc, dst, src, len);
1095 return (&desc->bus_dmadesc);
1098 struct bus_dmadesc *
1099 ioat_copy_8k_aligned(bus_dmaengine_t dmaengine, bus_addr_t dst1,
1100 bus_addr_t dst2, bus_addr_t src1, bus_addr_t src2,
1101 bus_dmaengine_callback_t callback_fn, void *callback_arg, uint32_t flags)
1103 struct ioat_dma_hw_descriptor *hw_desc;
1104 struct ioat_descriptor *desc;
1105 struct ioat_softc *ioat;
1107 ioat = to_ioat_softc(dmaengine);
1108 CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx);
1110 if (((src1 | src2 | dst1 | dst2) & (0xffffull << 48)) != 0) {
1111 ioat_log_message(0, "%s: High 16 bits of src/dst invalid\n",
1115 if (((src1 | src2 | dst1 | dst2) & PAGE_MASK) != 0) {
1116 ioat_log_message(0, "%s: Addresses must be page-aligned\n",
1121 desc = ioat_op_generic(ioat, IOAT_OP_COPY, 2 * PAGE_SIZE, src1, dst1,
1122 callback_fn, callback_arg, flags);
1126 hw_desc = &ioat_get_descriptor(ioat, desc->id)->dma;
1127 if (src2 != src1 + PAGE_SIZE) {
1128 hw_desc->u.control.src_page_break = 1;
1129 hw_desc->next_src_addr = src2;
1131 if (dst2 != dst1 + PAGE_SIZE) {
1132 hw_desc->u.control.dest_page_break = 1;
1133 hw_desc->next_dest_addr = dst2;
1136 if (g_ioat_debug_level >= 3)
1137 dump_descriptor(hw_desc);
1139 ioat_submit_single(ioat);
1140 return (&desc->bus_dmadesc);
1143 struct bus_dmadesc *
1144 ioat_copy_crc(bus_dmaengine_t dmaengine, bus_addr_t dst, bus_addr_t src,
1145 bus_size_t len, uint32_t *initialseed, bus_addr_t crcptr,
1146 bus_dmaengine_callback_t callback_fn, void *callback_arg, uint32_t flags)
1148 struct ioat_crc32_hw_descriptor *hw_desc;
1149 struct ioat_descriptor *desc;
1150 struct ioat_softc *ioat;
1154 ioat = to_ioat_softc(dmaengine);
1155 CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx);
1157 if ((ioat->capabilities & IOAT_DMACAP_MOVECRC) == 0) {
1158 ioat_log_message(0, "%s: Device lacks MOVECRC capability\n",
1162 if (((src | dst) & (0xffffffull << 40)) != 0) {
1163 ioat_log_message(0, "%s: High 24 bits of src/dst invalid\n",
1167 teststore = (flags & _DMA_CRC_TESTSTORE);
1168 if (teststore == _DMA_CRC_TESTSTORE) {
1169 ioat_log_message(0, "%s: TEST and STORE invalid\n", __func__);
1172 if (teststore == 0 && (flags & DMA_CRC_INLINE) != 0) {
1173 ioat_log_message(0, "%s: INLINE invalid without TEST or STORE\n",
1178 switch (teststore) {
1180 op = IOAT_OP_MOVECRC_STORE;
1183 op = IOAT_OP_MOVECRC_TEST;
1186 KASSERT(teststore == 0, ("bogus"));
1187 op = IOAT_OP_MOVECRC;
1191 if ((flags & DMA_CRC_INLINE) == 0 &&
1192 (crcptr & (0xffffffull << 40)) != 0) {
1194 "%s: High 24 bits of crcptr invalid\n", __func__);
1198 desc = ioat_op_generic(ioat, op, len, src, dst, callback_fn,
1199 callback_arg, flags & ~_DMA_CRC_FLAGS);
1203 hw_desc = &ioat_get_descriptor(ioat, desc->id)->crc32;
1205 if ((flags & DMA_CRC_INLINE) == 0)
1206 hw_desc->crc_address = crcptr;
1208 hw_desc->u.control.crc_location = 1;
1210 if (initialseed != NULL) {
1211 hw_desc->u.control.use_seed = 1;
1212 hw_desc->seed = *initialseed;
1215 if (g_ioat_debug_level >= 3)
1216 dump_descriptor(hw_desc);
1218 ioat_submit_single(ioat);
1219 return (&desc->bus_dmadesc);
1222 struct bus_dmadesc *
1223 ioat_crc(bus_dmaengine_t dmaengine, bus_addr_t src, bus_size_t len,
1224 uint32_t *initialseed, bus_addr_t crcptr,
1225 bus_dmaengine_callback_t callback_fn, void *callback_arg, uint32_t flags)
1227 struct ioat_crc32_hw_descriptor *hw_desc;
1228 struct ioat_descriptor *desc;
1229 struct ioat_softc *ioat;
1233 ioat = to_ioat_softc(dmaengine);
1234 CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx);
1236 if ((ioat->capabilities & IOAT_DMACAP_CRC) == 0) {
1237 ioat_log_message(0, "%s: Device lacks CRC capability\n",
1241 if ((src & (0xffffffull << 40)) != 0) {
1242 ioat_log_message(0, "%s: High 24 bits of src invalid\n",
1246 teststore = (flags & _DMA_CRC_TESTSTORE);
1247 if (teststore == _DMA_CRC_TESTSTORE) {
1248 ioat_log_message(0, "%s: TEST and STORE invalid\n", __func__);
1251 if (teststore == 0 && (flags & DMA_CRC_INLINE) != 0) {
1252 ioat_log_message(0, "%s: INLINE invalid without TEST or STORE\n",
1257 switch (teststore) {
1259 op = IOAT_OP_CRC_STORE;
1262 op = IOAT_OP_CRC_TEST;
1265 KASSERT(teststore == 0, ("bogus"));
1270 if ((flags & DMA_CRC_INLINE) == 0 &&
1271 (crcptr & (0xffffffull << 40)) != 0) {
1273 "%s: High 24 bits of crcptr invalid\n", __func__);
1277 desc = ioat_op_generic(ioat, op, len, src, 0, callback_fn,
1278 callback_arg, flags & ~_DMA_CRC_FLAGS);
1282 hw_desc = &ioat_get_descriptor(ioat, desc->id)->crc32;
1284 if ((flags & DMA_CRC_INLINE) == 0)
1285 hw_desc->crc_address = crcptr;
1287 hw_desc->u.control.crc_location = 1;
1289 if (initialseed != NULL) {
1290 hw_desc->u.control.use_seed = 1;
1291 hw_desc->seed = *initialseed;
1294 if (g_ioat_debug_level >= 3)
1295 dump_descriptor(hw_desc);
1297 ioat_submit_single(ioat);
1298 return (&desc->bus_dmadesc);
1301 struct bus_dmadesc *
1302 ioat_blockfill(bus_dmaengine_t dmaengine, bus_addr_t dst, uint64_t fillpattern,
1303 bus_size_t len, bus_dmaengine_callback_t callback_fn, void *callback_arg,
1306 struct ioat_fill_hw_descriptor *hw_desc;
1307 struct ioat_descriptor *desc;
1308 struct ioat_softc *ioat;
1310 ioat = to_ioat_softc(dmaengine);
1311 CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx);
1313 if ((ioat->capabilities & IOAT_DMACAP_BFILL) == 0) {
1314 ioat_log_message(0, "%s: Device lacks BFILL capability\n",
1319 if ((dst & (0xffffull << 48)) != 0) {
1320 ioat_log_message(0, "%s: High 16 bits of dst invalid\n",
1325 desc = ioat_op_generic(ioat, IOAT_OP_FILL, len, fillpattern, dst,
1326 callback_fn, callback_arg, flags);
1330 hw_desc = &ioat_get_descriptor(ioat, desc->id)->fill;
1331 if (g_ioat_debug_level >= 3)
1332 dump_descriptor(hw_desc);
1334 ioat_submit_single(ioat);
1335 return (&desc->bus_dmadesc);
1341 static inline uint32_t
1342 ioat_get_active(struct ioat_softc *ioat)
1345 return ((ioat->head - ioat->tail) & ((1 << ioat->ring_size_order) - 1));
1348 static inline uint32_t
1349 ioat_get_ring_space(struct ioat_softc *ioat)
1352 return ((1 << ioat->ring_size_order) - ioat_get_active(ioat) - 1);
1356 * Reserves space in this IOAT descriptor ring by ensuring enough slots remain
1359 * If mflags contains M_WAITOK, blocks until enough space is available.
1361 * Returns zero on success, or an errno on error. If num_descs is beyond the
1362 * maximum ring size, returns EINVAl; if allocation would block and mflags
1363 * contains M_NOWAIT, returns EAGAIN.
1365 * Must be called with the submit_lock held; returns with the lock held. The
1366 * lock may be dropped to allocate the ring.
1368 * (The submit_lock is needed to add any entries to the ring, so callers are
1369 * assured enough room is available.)
1372 ioat_reserve_space(struct ioat_softc *ioat, uint32_t num_descs, int mflags)
1377 mtx_assert(&ioat->submit_lock, MA_OWNED);
1381 if (num_descs < 1 || num_descs >= (1 << ioat->ring_size_order)) {
1387 if (ioat->quiescing) {
1392 if (ioat_get_ring_space(ioat) >= num_descs)
1395 CTR3(KTR_IOAT, "%s channel=%u starved (%u)", __func__,
1396 ioat->chan_idx, num_descs);
1398 if (!dug && !ioat->is_submitter_processing) {
1399 ioat->is_submitter_processing = TRUE;
1400 mtx_unlock(&ioat->submit_lock);
1402 CTR2(KTR_IOAT, "%s channel=%u attempting to process events",
1403 __func__, ioat->chan_idx);
1404 ioat_process_events(ioat);
1406 mtx_lock(&ioat->submit_lock);
1408 KASSERT(ioat->is_submitter_processing == TRUE,
1409 ("is_submitter_processing"));
1410 ioat->is_submitter_processing = FALSE;
1411 wakeup(&ioat->tail);
1415 if ((mflags & M_WAITOK) == 0) {
1419 CTR2(KTR_IOAT, "%s channel=%u blocking on completions",
1420 __func__, ioat->chan_idx);
1421 msleep(&ioat->tail, &ioat->submit_lock, 0,
1427 mtx_assert(&ioat->submit_lock, MA_OWNED);
1428 KASSERT(!ioat->quiescing || error == ENXIO,
1429 ("reserved during quiesce"));
1434 ioat_free_ring(struct ioat_softc *ioat, uint32_t size,
1435 struct ioat_descriptor *ring)
1441 static struct ioat_descriptor *
1442 ioat_get_ring_entry(struct ioat_softc *ioat, uint32_t index)
1445 return (&ioat->ring[index % (1 << ioat->ring_size_order)]);
1448 static union ioat_hw_descriptor *
1449 ioat_get_descriptor(struct ioat_softc *ioat, uint32_t index)
1452 return (&ioat->hw_desc_ring[index % (1 << ioat->ring_size_order)]);
1456 ioat_halted_debug(struct ioat_softc *ioat, uint32_t chanerr)
1458 union ioat_hw_descriptor *desc;
1460 ioat_log_message(0, "Channel halted (%b)\n", (int)chanerr,
1465 mtx_assert(&ioat->cleanup_lock, MA_OWNED);
1467 desc = ioat_get_descriptor(ioat, ioat->tail + 0);
1468 dump_descriptor(desc);
1470 desc = ioat_get_descriptor(ioat, ioat->tail + 1);
1471 dump_descriptor(desc);
1475 ioat_poll_timer_callback(void *arg)
1477 struct ioat_softc *ioat;
1480 ioat_log_message(3, "%s\n", __func__);
1482 ioat_process_events(ioat);
1489 ioat_submit_single(struct ioat_softc *ioat)
1492 mtx_assert(&ioat->submit_lock, MA_OWNED);
1494 ioat_get(ioat, IOAT_ACTIVE_DESCR_REF);
1495 atomic_add_rel_int(&ioat->head, 1);
1496 atomic_add_rel_int(&ioat->hw_head, 1);
1497 CTR5(KTR_IOAT, "%s channel=%u head=%u hw_head=%u tail=%u", __func__,
1498 ioat->chan_idx, ioat->head, ioat->hw_head & UINT16_MAX,
1501 ioat->stats.descriptors_submitted++;
1505 ioat_reset_hw(struct ioat_softc *ioat)
1512 CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx);
1514 mtx_lock(IOAT_REFLK);
1515 while (ioat->resetting && !ioat->destroying)
1516 msleep(&ioat->resetting, IOAT_REFLK, 0, "IRH_drain", 0);
1517 if (ioat->destroying) {
1518 mtx_unlock(IOAT_REFLK);
1521 ioat->resetting = TRUE;
1523 ioat->quiescing = TRUE;
1524 ioat_drain_locked(ioat);
1525 mtx_unlock(IOAT_REFLK);
1528 * Suspend ioat_process_events while the hardware and softc are in an
1529 * indeterminate state.
1531 mtx_lock(&ioat->cleanup_lock);
1532 ioat->resetting_cleanup = TRUE;
1533 mtx_unlock(&ioat->cleanup_lock);
1535 CTR2(KTR_IOAT, "%s channel=%u quiesced and drained", __func__,
1538 status = ioat_get_chansts(ioat);
1539 if (is_ioat_active(status) || is_ioat_idle(status))
1542 /* Wait at most 20 ms */
1543 for (timeout = 0; (is_ioat_active(status) || is_ioat_idle(status)) &&
1544 timeout < 20; timeout++) {
1546 status = ioat_get_chansts(ioat);
1548 if (timeout == 20) {
1553 KASSERT(ioat_get_active(ioat) == 0, ("active after quiesce"));
1555 chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET);
1556 ioat_write_4(ioat, IOAT_CHANERR_OFFSET, chanerr);
1558 CTR2(KTR_IOAT, "%s channel=%u hardware suspended", __func__,
1562 * IOAT v3 workaround - CHANERRMSK_INT with 3E07h to masks out errors
1563 * that can cause stability issues for IOAT v3.
1565 pci_write_config(ioat->device, IOAT_CFG_CHANERRMASK_INT_OFFSET, 0x3e07,
1567 chanerr = pci_read_config(ioat->device, IOAT_CFG_CHANERR_INT_OFFSET, 4);
1568 pci_write_config(ioat->device, IOAT_CFG_CHANERR_INT_OFFSET, chanerr, 4);
1571 * BDXDE and BWD models reset MSI-X registers on device reset.
1572 * Save/restore their contents manually.
1574 if (ioat_model_resets_msix(ioat)) {
1575 ioat_log_message(1, "device resets MSI-X registers; saving\n");
1576 pci_save_state(ioat->device);
1580 CTR2(KTR_IOAT, "%s channel=%u hardware reset", __func__,
1583 /* Wait at most 20 ms */
1584 for (timeout = 0; ioat_reset_pending(ioat) && timeout < 20; timeout++)
1586 if (timeout == 20) {
1591 if (ioat_model_resets_msix(ioat)) {
1592 ioat_log_message(1, "device resets registers; restored\n");
1593 pci_restore_state(ioat->device);
1596 /* Reset attempts to return the hardware to "halted." */
1597 status = ioat_get_chansts(ioat);
1598 if (is_ioat_active(status) || is_ioat_idle(status)) {
1599 /* So this really shouldn't happen... */
1600 ioat_log_message(0, "Device is active after a reset?\n");
1601 ioat_write_chanctrl(ioat, IOAT_CHANCTRL_RUN);
1606 chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET);
1608 mtx_lock(&ioat->cleanup_lock);
1609 ioat_halted_debug(ioat, chanerr);
1610 mtx_unlock(&ioat->cleanup_lock);
1616 * Bring device back online after reset. Writing CHAINADDR brings the
1617 * device back to active.
1619 * The internal ring counter resets to zero, so we have to start over
1622 ioat->tail = ioat->head = ioat->hw_head = 0;
1623 ioat->last_seen = 0;
1624 *ioat->comp_update = 0;
1625 KASSERT(!ioat->is_completion_pending, ("bogus completion_pending"));
1627 ioat_write_chanctrl(ioat, IOAT_CHANCTRL_RUN);
1628 ioat_write_chancmp(ioat, ioat->comp_update_bus_addr);
1629 ioat_write_chainaddr(ioat, RING_PHYS_ADDR(ioat, 0));
1631 CTR2(KTR_IOAT, "%s channel=%u configured channel", __func__,
1635 /* Enqueues a null operation and ensures it completes. */
1637 error = ioat_start_channel(ioat);
1638 CTR2(KTR_IOAT, "%s channel=%u started channel", __func__,
1643 * Resume completions now that ring state is consistent.
1645 mtx_lock(&ioat->cleanup_lock);
1646 ioat->resetting_cleanup = FALSE;
1647 mtx_unlock(&ioat->cleanup_lock);
1649 /* Unblock submission of new work */
1650 mtx_lock(IOAT_REFLK);
1651 ioat->quiescing = FALSE;
1652 wakeup(&ioat->quiescing);
1654 ioat->resetting = FALSE;
1655 wakeup(&ioat->resetting);
1657 if (ioat->is_completion_pending)
1658 callout_reset(&ioat->poll_timer, 1, ioat_poll_timer_callback,
1660 CTR2(KTR_IOAT, "%s channel=%u reset done", __func__, ioat->chan_idx);
1661 mtx_unlock(IOAT_REFLK);
1667 sysctl_handle_chansts(SYSCTL_HANDLER_ARGS)
1669 struct ioat_softc *ioat;
1676 status = ioat_get_chansts(ioat) & IOAT_CHANSTS_STATUS;
1678 sbuf_new_for_sysctl(&sb, NULL, 256, req);
1680 case IOAT_CHANSTS_ACTIVE:
1681 sbuf_printf(&sb, "ACTIVE");
1683 case IOAT_CHANSTS_IDLE:
1684 sbuf_printf(&sb, "IDLE");
1686 case IOAT_CHANSTS_SUSPENDED:
1687 sbuf_printf(&sb, "SUSPENDED");
1689 case IOAT_CHANSTS_HALTED:
1690 sbuf_printf(&sb, "HALTED");
1692 case IOAT_CHANSTS_ARMED:
1693 sbuf_printf(&sb, "ARMED");
1696 sbuf_printf(&sb, "UNKNOWN");
1699 error = sbuf_finish(&sb);
1702 if (error != 0 || req->newptr == NULL)
1708 sysctl_handle_dpi(SYSCTL_HANDLER_ARGS)
1710 struct ioat_softc *ioat;
1712 #define PRECISION "1"
1713 const uintmax_t factor = 10;
1718 sbuf_new_for_sysctl(&sb, NULL, 16, req);
1720 if (ioat->stats.interrupts == 0) {
1721 sbuf_printf(&sb, "NaN");
1724 rate = ioat->stats.descriptors_processed * factor /
1725 ioat->stats.interrupts;
1726 sbuf_printf(&sb, "%ju.%." PRECISION "ju", rate / factor,
1730 error = sbuf_finish(&sb);
1732 if (error != 0 || req->newptr == NULL)
1738 sysctl_handle_reset(SYSCTL_HANDLER_ARGS)
1740 struct ioat_softc *ioat;
1746 error = SYSCTL_OUT(req, &arg, sizeof(arg));
1747 if (error != 0 || req->newptr == NULL)
1750 error = SYSCTL_IN(req, &arg, sizeof(arg));
1755 error = ioat_reset_hw(ioat);
1761 dump_descriptor(void *hw_desc)
1765 for (i = 0; i < 2; i++) {
1766 for (j = 0; j < 8; j++)
1767 printf("%08x ", ((uint32_t *)hw_desc)[i * 8 + j]);
1773 ioat_setup_sysctl(device_t device)
1775 struct sysctl_oid_list *par, *statpar, *state, *hammer;
1776 struct sysctl_ctx_list *ctx;
1777 struct sysctl_oid *tree, *tmp;
1778 struct ioat_softc *ioat;
1780 ioat = DEVICE2SOFTC(device);
1781 ctx = device_get_sysctl_ctx(device);
1782 tree = device_get_sysctl_tree(device);
1783 par = SYSCTL_CHILDREN(tree);
1785 SYSCTL_ADD_INT(ctx, par, OID_AUTO, "version", CTLFLAG_RD,
1786 &ioat->version, 0, "HW version (0xMM form)");
1787 SYSCTL_ADD_UINT(ctx, par, OID_AUTO, "max_xfer_size", CTLFLAG_RD,
1788 &ioat->max_xfer_size, 0, "HW maximum transfer size");
1789 SYSCTL_ADD_INT(ctx, par, OID_AUTO, "intrdelay_supported", CTLFLAG_RD,
1790 &ioat->intrdelay_supported, 0, "Is INTRDELAY supported");
1791 SYSCTL_ADD_U16(ctx, par, OID_AUTO, "intrdelay_max", CTLFLAG_RD,
1792 &ioat->intrdelay_max, 0,
1793 "Maximum configurable INTRDELAY on this channel (microseconds)");
1795 tmp = SYSCTL_ADD_NODE(ctx, par, OID_AUTO, "state", CTLFLAG_RD, NULL,
1796 "IOAT channel internal state");
1797 state = SYSCTL_CHILDREN(tmp);
1799 SYSCTL_ADD_UINT(ctx, state, OID_AUTO, "ring_size_order", CTLFLAG_RD,
1800 &ioat->ring_size_order, 0, "SW descriptor ring size order");
1801 SYSCTL_ADD_UINT(ctx, state, OID_AUTO, "head", CTLFLAG_RD, &ioat->head,
1802 0, "SW descriptor head pointer index");
1803 SYSCTL_ADD_UINT(ctx, state, OID_AUTO, "tail", CTLFLAG_RD, &ioat->tail,
1804 0, "SW descriptor tail pointer index");
1805 SYSCTL_ADD_UINT(ctx, state, OID_AUTO, "hw_head", CTLFLAG_RD,
1806 &ioat->hw_head, 0, "HW DMACOUNT");
1808 SYSCTL_ADD_UQUAD(ctx, state, OID_AUTO, "last_completion", CTLFLAG_RD,
1809 ioat->comp_update, "HW addr of last completion");
1811 SYSCTL_ADD_INT(ctx, state, OID_AUTO, "is_submitter_processing",
1812 CTLFLAG_RD, &ioat->is_submitter_processing, 0,
1813 "submitter processing");
1814 SYSCTL_ADD_INT(ctx, state, OID_AUTO, "is_completion_pending",
1815 CTLFLAG_RD, &ioat->is_completion_pending, 0, "completion pending");
1816 SYSCTL_ADD_INT(ctx, state, OID_AUTO, "is_reset_pending", CTLFLAG_RD,
1817 &ioat->is_reset_pending, 0, "reset pending");
1818 SYSCTL_ADD_INT(ctx, state, OID_AUTO, "is_channel_running", CTLFLAG_RD,
1819 &ioat->is_channel_running, 0, "channel running");
1821 SYSCTL_ADD_PROC(ctx, state, OID_AUTO, "chansts",
1822 CTLTYPE_STRING | CTLFLAG_RD, ioat, 0, sysctl_handle_chansts, "A",
1823 "String of the channel status");
1825 SYSCTL_ADD_U16(ctx, state, OID_AUTO, "intrdelay", CTLFLAG_RD,
1826 &ioat->cached_intrdelay, 0,
1827 "Current INTRDELAY on this channel (cached, microseconds)");
1829 tmp = SYSCTL_ADD_NODE(ctx, par, OID_AUTO, "hammer", CTLFLAG_RD, NULL,
1830 "Big hammers (mostly for testing)");
1831 hammer = SYSCTL_CHILDREN(tmp);
1833 SYSCTL_ADD_PROC(ctx, hammer, OID_AUTO, "force_hw_reset",
1834 CTLTYPE_INT | CTLFLAG_RW, ioat, 0, sysctl_handle_reset, "I",
1835 "Set to non-zero to reset the hardware");
1837 tmp = SYSCTL_ADD_NODE(ctx, par, OID_AUTO, "stats", CTLFLAG_RD, NULL,
1838 "IOAT channel statistics");
1839 statpar = SYSCTL_CHILDREN(tmp);
1841 SYSCTL_ADD_UQUAD(ctx, statpar, OID_AUTO, "interrupts", CTLFLAG_RW,
1842 &ioat->stats.interrupts,
1843 "Number of interrupts processed on this channel");
1844 SYSCTL_ADD_UQUAD(ctx, statpar, OID_AUTO, "descriptors", CTLFLAG_RW,
1845 &ioat->stats.descriptors_processed,
1846 "Number of descriptors processed on this channel");
1847 SYSCTL_ADD_UQUAD(ctx, statpar, OID_AUTO, "submitted", CTLFLAG_RW,
1848 &ioat->stats.descriptors_submitted,
1849 "Number of descriptors submitted to this channel");
1850 SYSCTL_ADD_UQUAD(ctx, statpar, OID_AUTO, "errored", CTLFLAG_RW,
1851 &ioat->stats.descriptors_error,
1852 "Number of descriptors failed by channel errors");
1853 SYSCTL_ADD_U32(ctx, statpar, OID_AUTO, "halts", CTLFLAG_RW,
1854 &ioat->stats.channel_halts, 0,
1855 "Number of times the channel has halted");
1856 SYSCTL_ADD_U32(ctx, statpar, OID_AUTO, "last_halt_chanerr", CTLFLAG_RW,
1857 &ioat->stats.last_halt_chanerr, 0,
1858 "The raw CHANERR when the channel was last halted");
1860 SYSCTL_ADD_PROC(ctx, statpar, OID_AUTO, "desc_per_interrupt",
1861 CTLTYPE_STRING | CTLFLAG_RD, ioat, 0, sysctl_handle_dpi, "A",
1862 "Descriptors per interrupt");
1865 static inline struct ioat_softc *
1866 ioat_get(struct ioat_softc *ioat, enum ioat_ref_kind kind)
1870 KASSERT(kind < IOAT_NUM_REF_KINDS, ("bogus"));
1872 old = atomic_fetchadd_32(&ioat->refcnt, 1);
1873 KASSERT(old < UINT32_MAX, ("refcnt overflow"));
1876 old = atomic_fetchadd_32(&ioat->refkinds[kind], 1);
1877 KASSERT(old < UINT32_MAX, ("refcnt kind overflow"));
1884 ioat_putn(struct ioat_softc *ioat, uint32_t n, enum ioat_ref_kind kind)
1887 _ioat_putn(ioat, n, kind, FALSE);
1891 ioat_putn_locked(struct ioat_softc *ioat, uint32_t n, enum ioat_ref_kind kind)
1894 _ioat_putn(ioat, n, kind, TRUE);
1898 _ioat_putn(struct ioat_softc *ioat, uint32_t n, enum ioat_ref_kind kind,
1903 KASSERT(kind < IOAT_NUM_REF_KINDS, ("bogus"));
1909 old = atomic_fetchadd_32(&ioat->refkinds[kind], -n);
1910 KASSERT(old >= n, ("refcnt kind underflow"));
1913 /* Skip acquiring the lock if resulting refcnt > 0. */
1918 if (atomic_cmpset_32(&ioat->refcnt, old, old - n))
1923 mtx_assert(IOAT_REFLK, MA_OWNED);
1925 mtx_lock(IOAT_REFLK);
1927 old = atomic_fetchadd_32(&ioat->refcnt, -n);
1928 KASSERT(old >= n, ("refcnt error"));
1933 mtx_unlock(IOAT_REFLK);
1937 ioat_put(struct ioat_softc *ioat, enum ioat_ref_kind kind)
1940 ioat_putn(ioat, 1, kind);
1944 ioat_drain_locked(struct ioat_softc *ioat)
1947 mtx_assert(IOAT_REFLK, MA_OWNED);
1948 while (ioat->refcnt > 0)
1949 msleep(IOAT_REFLK, IOAT_REFLK, 0, "ioat_drain", 0);
1953 #define _db_show_lock(lo) LOCK_CLASS(lo)->lc_ddb_show(lo)
1954 #define db_show_lock(lk) _db_show_lock(&(lk)->lock_object)
1955 DB_SHOW_COMMAND(ioat, db_show_ioat)
1957 struct ioat_softc *sc;
1962 idx = (unsigned)addr;
1963 if (idx >= ioat_channel_index)
1966 sc = ioat_channel[idx];
1967 db_printf("ioat softc at %p\n", sc);
1971 db_printf(" version: %d\n", sc->version);
1972 db_printf(" chan_idx: %u\n", sc->chan_idx);
1973 db_printf(" submit_lock: ");
1974 db_show_lock(&sc->submit_lock);
1976 db_printf(" capabilities: %b\n", (int)sc->capabilities,
1978 db_printf(" cached_intrdelay: %u\n", sc->cached_intrdelay);
1979 db_printf(" *comp_update: 0x%jx\n", (uintmax_t)*sc->comp_update);
1981 db_printf(" poll_timer:\n");
1982 db_printf(" c_time: %ju\n", (uintmax_t)sc->poll_timer.c_time);
1983 db_printf(" c_arg: %p\n", sc->poll_timer.c_arg);
1984 db_printf(" c_func: %p\n", sc->poll_timer.c_func);
1985 db_printf(" c_lock: %p\n", sc->poll_timer.c_lock);
1986 db_printf(" c_flags: 0x%x\n", (unsigned)sc->poll_timer.c_flags);
1988 db_printf(" quiescing: %d\n", (int)sc->quiescing);
1989 db_printf(" destroying: %d\n", (int)sc->destroying);
1990 db_printf(" is_submitter_processing: %d\n",
1991 (int)sc->is_submitter_processing);
1992 db_printf(" is_completion_pending: %d\n", (int)sc->is_completion_pending);
1993 db_printf(" is_reset_pending: %d\n", (int)sc->is_reset_pending);
1994 db_printf(" is_channel_running: %d\n", (int)sc->is_channel_running);
1995 db_printf(" intrdelay_supported: %d\n", (int)sc->intrdelay_supported);
1996 db_printf(" resetting: %d\n", (int)sc->resetting);
1998 db_printf(" head: %u\n", sc->head);
1999 db_printf(" tail: %u\n", sc->tail);
2000 db_printf(" hw_head: %u\n", sc->hw_head);
2001 db_printf(" ring_size_order: %u\n", sc->ring_size_order);
2002 db_printf(" last_seen: 0x%lx\n", sc->last_seen);
2003 db_printf(" ring: %p\n", sc->ring);
2004 db_printf(" descriptors: %p\n", sc->hw_desc_ring);
2005 db_printf(" descriptors (phys): 0x%jx\n",
2006 (uintmax_t)sc->hw_desc_bus_addr);
2008 db_printf(" ring[%u] (tail):\n", sc->tail %
2009 (1 << sc->ring_size_order));
2010 db_printf(" id: %u\n", ioat_get_ring_entry(sc, sc->tail)->id);
2011 db_printf(" addr: 0x%lx\n",
2012 RING_PHYS_ADDR(sc, sc->tail));
2013 db_printf(" next: 0x%lx\n",
2014 ioat_get_descriptor(sc, sc->tail)->generic.next);
2016 db_printf(" ring[%u] (head - 1):\n", (sc->head - 1) %
2017 (1 << sc->ring_size_order));
2018 db_printf(" id: %u\n", ioat_get_ring_entry(sc, sc->head - 1)->id);
2019 db_printf(" addr: 0x%lx\n",
2020 RING_PHYS_ADDR(sc, sc->head - 1));
2021 db_printf(" next: 0x%lx\n",
2022 ioat_get_descriptor(sc, sc->head - 1)->generic.next);
2024 db_printf(" ring[%u] (head):\n", (sc->head) %
2025 (1 << sc->ring_size_order));
2026 db_printf(" id: %u\n", ioat_get_ring_entry(sc, sc->head)->id);
2027 db_printf(" addr: 0x%lx\n",
2028 RING_PHYS_ADDR(sc, sc->head));
2029 db_printf(" next: 0x%lx\n",
2030 ioat_get_descriptor(sc, sc->head)->generic.next);
2032 for (idx = 0; idx < (1 << sc->ring_size_order); idx++)
2033 if ((*sc->comp_update & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_MASK)
2034 == RING_PHYS_ADDR(sc, idx))
2035 db_printf(" ring[%u] == hardware tail\n", idx);
2037 db_printf(" cleanup_lock: ");
2038 db_show_lock(&sc->cleanup_lock);
2040 db_printf(" refcnt: %u\n", sc->refcnt);
2042 CTASSERT(IOAT_NUM_REF_KINDS == 2);
2043 db_printf(" refkinds: [ENG=%u, DESCR=%u]\n", sc->refkinds[0],
2046 db_printf(" stats:\n");
2047 db_printf(" interrupts: %lu\n", sc->stats.interrupts);
2048 db_printf(" descriptors_processed: %lu\n", sc->stats.descriptors_processed);
2049 db_printf(" descriptors_error: %lu\n", sc->stats.descriptors_error);
2050 db_printf(" descriptors_submitted: %lu\n", sc->stats.descriptors_submitted);
2052 db_printf(" channel_halts: %u\n", sc->stats.channel_halts);
2053 db_printf(" last_halt_chanerr: %u\n", sc->stats.last_halt_chanerr);
2058 db_printf(" hw status:\n");
2059 db_printf(" status: 0x%lx\n", ioat_get_chansts(sc));
2060 db_printf(" chanctrl: 0x%x\n",
2061 (unsigned)ioat_read_2(sc, IOAT_CHANCTRL_OFFSET));
2062 db_printf(" chancmd: 0x%x\n",
2063 (unsigned)ioat_read_1(sc, IOAT_CHANCMD_OFFSET));
2064 db_printf(" dmacount: 0x%x\n",
2065 (unsigned)ioat_read_2(sc, IOAT_DMACOUNT_OFFSET));
2066 db_printf(" chainaddr: 0x%lx\n",
2067 ioat_read_double_4(sc, IOAT_CHAINADDR_OFFSET_LOW));
2068 db_printf(" chancmp: 0x%lx\n",
2069 ioat_read_double_4(sc, IOAT_CHANCMP_OFFSET_LOW));
2070 db_printf(" chanerr: %b\n",
2071 (int)ioat_read_4(sc, IOAT_CHANERR_OFFSET), IOAT_CHANERR_STR);
2074 db_printf("usage: show ioat <0-%u>\n", ioat_channel_index);