2 * Copyright (C) 2012 Intel Corporation
4 * Copyright (C) 2018 Alexander Motin <mav@FreeBSD.org>
6 * Redistribution and use in source and binary forms, with or without
7 * modification, are permitted provided that the following conditions
9 * 1. Redistributions of source code must retain the above copyright
10 * notice, this list of conditions and the following disclaimer.
11 * 2. Redistributions in binary form must reproduce the above copyright
12 * notice, this list of conditions and the following disclaimer in the
13 * documentation and/or other materials provided with the distribution.
15 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
16 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
17 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
18 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
19 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
20 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
21 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
22 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
23 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
24 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28 #include <sys/cdefs.h>
29 __FBSDID("$FreeBSD$");
33 #include <sys/param.h>
34 #include <sys/systm.h>
37 #include <sys/domainset.h>
39 #include <sys/ioccom.h>
40 #include <sys/kernel.h>
43 #include <sys/malloc.h>
44 #include <sys/module.h>
45 #include <sys/mutex.h>
49 #include <sys/sysctl.h>
50 #include <sys/taskqueue.h>
52 #include <dev/pci/pcireg.h>
53 #include <dev/pci/pcivar.h>
54 #include <machine/bus.h>
55 #include <machine/resource.h>
56 #include <machine/stdarg.h>
64 #include "ioat_internal.h"
66 #ifndef BUS_SPACE_MAXADDR_40BIT
67 #define BUS_SPACE_MAXADDR_40BIT 0xFFFFFFFFFFULL
70 static int ioat_probe(device_t device);
71 static int ioat_attach(device_t device);
72 static int ioat_detach(device_t device);
73 static int ioat_setup_intr(struct ioat_softc *ioat);
74 static int ioat_teardown_intr(struct ioat_softc *ioat);
75 static int ioat3_attach(device_t device);
76 static int ioat_start_channel(struct ioat_softc *ioat);
77 static int ioat_map_pci_bar(struct ioat_softc *ioat);
78 static void ioat_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg,
80 static void ioat_interrupt_handler(void *arg);
81 static boolean_t ioat_model_resets_msix(struct ioat_softc *ioat);
82 static int chanerr_to_errno(uint32_t);
83 static void ioat_process_events(struct ioat_softc *ioat, boolean_t intr);
84 static inline uint32_t ioat_get_active(struct ioat_softc *ioat);
85 static inline uint32_t ioat_get_ring_space(struct ioat_softc *ioat);
86 static void ioat_free_ring(struct ioat_softc *, uint32_t size,
87 struct ioat_descriptor *);
88 static int ioat_reserve_space(struct ioat_softc *, uint32_t, int mflags);
89 static union ioat_hw_descriptor *ioat_get_descriptor(struct ioat_softc *,
91 static struct ioat_descriptor *ioat_get_ring_entry(struct ioat_softc *,
93 static void ioat_halted_debug(struct ioat_softc *, uint32_t);
94 static void ioat_poll_timer_callback(void *arg);
95 static void dump_descriptor(void *hw_desc);
96 static void ioat_submit_single(struct ioat_softc *ioat);
97 static void ioat_comp_update_map(void *arg, bus_dma_segment_t *seg, int nseg,
99 static int ioat_reset_hw(struct ioat_softc *ioat);
100 static void ioat_reset_hw_task(void *, int);
101 static void ioat_setup_sysctl(device_t device);
102 static int sysctl_handle_reset(SYSCTL_HANDLER_ARGS);
103 static void ioat_get(struct ioat_softc *);
104 static void ioat_put(struct ioat_softc *);
105 static void ioat_drain_locked(struct ioat_softc *);
107 #define ioat_log_message(v, ...) do { \
108 if ((v) <= g_ioat_debug_level) { \
109 device_printf(ioat->device, __VA_ARGS__); \
113 MALLOC_DEFINE(M_IOAT, "ioat", "ioat driver memory allocations");
114 SYSCTL_NODE(_hw, OID_AUTO, ioat, CTLFLAG_RD, 0, "ioat node");
116 static int g_force_legacy_interrupts;
117 SYSCTL_INT(_hw_ioat, OID_AUTO, force_legacy_interrupts, CTLFLAG_RDTUN,
118 &g_force_legacy_interrupts, 0, "Set to non-zero to force MSI-X disabled");
120 int g_ioat_debug_level = 0;
121 SYSCTL_INT(_hw_ioat, OID_AUTO, debug_level, CTLFLAG_RWTUN, &g_ioat_debug_level,
122 0, "Set log level (0-3) for ioat(4). Higher is more verbose.");
124 unsigned g_ioat_ring_order = 13;
125 SYSCTL_UINT(_hw_ioat, OID_AUTO, ring_order, CTLFLAG_RDTUN, &g_ioat_ring_order,
126 0, "Set IOAT ring order. (1 << this) == ring size.");
129 * OS <-> Driver interface structures
131 static device_method_t ioat_pci_methods[] = {
132 /* Device interface */
133 DEVMETHOD(device_probe, ioat_probe),
134 DEVMETHOD(device_attach, ioat_attach),
135 DEVMETHOD(device_detach, ioat_detach),
139 static driver_t ioat_pci_driver = {
142 sizeof(struct ioat_softc),
145 static devclass_t ioat_devclass;
146 DRIVER_MODULE(ioat, pci, ioat_pci_driver, ioat_devclass, 0, 0);
147 MODULE_VERSION(ioat, 1);
150 * Private data structures
152 static struct ioat_softc *ioat_channel[IOAT_MAX_CHANNELS];
153 static unsigned ioat_channel_index = 0;
154 SYSCTL_UINT(_hw_ioat, OID_AUTO, channels, CTLFLAG_RD, &ioat_channel_index, 0,
155 "Number of IOAT channels attached");
156 static struct mtx ioat_list_mtx;
157 MTX_SYSINIT(ioat_list_mtx, &ioat_list_mtx, "ioat list mtx", MTX_DEF);
164 { 0x34308086, "TBG IOAT Ch0" },
165 { 0x34318086, "TBG IOAT Ch1" },
166 { 0x34328086, "TBG IOAT Ch2" },
167 { 0x34338086, "TBG IOAT Ch3" },
168 { 0x34298086, "TBG IOAT Ch4" },
169 { 0x342a8086, "TBG IOAT Ch5" },
170 { 0x342b8086, "TBG IOAT Ch6" },
171 { 0x342c8086, "TBG IOAT Ch7" },
173 { 0x37108086, "JSF IOAT Ch0" },
174 { 0x37118086, "JSF IOAT Ch1" },
175 { 0x37128086, "JSF IOAT Ch2" },
176 { 0x37138086, "JSF IOAT Ch3" },
177 { 0x37148086, "JSF IOAT Ch4" },
178 { 0x37158086, "JSF IOAT Ch5" },
179 { 0x37168086, "JSF IOAT Ch6" },
180 { 0x37178086, "JSF IOAT Ch7" },
181 { 0x37188086, "JSF IOAT Ch0 (RAID)" },
182 { 0x37198086, "JSF IOAT Ch1 (RAID)" },
184 { 0x3c208086, "SNB IOAT Ch0" },
185 { 0x3c218086, "SNB IOAT Ch1" },
186 { 0x3c228086, "SNB IOAT Ch2" },
187 { 0x3c238086, "SNB IOAT Ch3" },
188 { 0x3c248086, "SNB IOAT Ch4" },
189 { 0x3c258086, "SNB IOAT Ch5" },
190 { 0x3c268086, "SNB IOAT Ch6" },
191 { 0x3c278086, "SNB IOAT Ch7" },
192 { 0x3c2e8086, "SNB IOAT Ch0 (RAID)" },
193 { 0x3c2f8086, "SNB IOAT Ch1 (RAID)" },
195 { 0x0e208086, "IVB IOAT Ch0" },
196 { 0x0e218086, "IVB IOAT Ch1" },
197 { 0x0e228086, "IVB IOAT Ch2" },
198 { 0x0e238086, "IVB IOAT Ch3" },
199 { 0x0e248086, "IVB IOAT Ch4" },
200 { 0x0e258086, "IVB IOAT Ch5" },
201 { 0x0e268086, "IVB IOAT Ch6" },
202 { 0x0e278086, "IVB IOAT Ch7" },
203 { 0x0e2e8086, "IVB IOAT Ch0 (RAID)" },
204 { 0x0e2f8086, "IVB IOAT Ch1 (RAID)" },
206 { 0x2f208086, "HSW IOAT Ch0" },
207 { 0x2f218086, "HSW IOAT Ch1" },
208 { 0x2f228086, "HSW IOAT Ch2" },
209 { 0x2f238086, "HSW IOAT Ch3" },
210 { 0x2f248086, "HSW IOAT Ch4" },
211 { 0x2f258086, "HSW IOAT Ch5" },
212 { 0x2f268086, "HSW IOAT Ch6" },
213 { 0x2f278086, "HSW IOAT Ch7" },
214 { 0x2f2e8086, "HSW IOAT Ch0 (RAID)" },
215 { 0x2f2f8086, "HSW IOAT Ch1 (RAID)" },
217 { 0x0c508086, "BWD IOAT Ch0" },
218 { 0x0c518086, "BWD IOAT Ch1" },
219 { 0x0c528086, "BWD IOAT Ch2" },
220 { 0x0c538086, "BWD IOAT Ch3" },
222 { 0x6f508086, "BDXDE IOAT Ch0" },
223 { 0x6f518086, "BDXDE IOAT Ch1" },
224 { 0x6f528086, "BDXDE IOAT Ch2" },
225 { 0x6f538086, "BDXDE IOAT Ch3" },
227 { 0x6f208086, "BDX IOAT Ch0" },
228 { 0x6f218086, "BDX IOAT Ch1" },
229 { 0x6f228086, "BDX IOAT Ch2" },
230 { 0x6f238086, "BDX IOAT Ch3" },
231 { 0x6f248086, "BDX IOAT Ch4" },
232 { 0x6f258086, "BDX IOAT Ch5" },
233 { 0x6f268086, "BDX IOAT Ch6" },
234 { 0x6f278086, "BDX IOAT Ch7" },
235 { 0x6f2e8086, "BDX IOAT Ch0 (RAID)" },
236 { 0x6f2f8086, "BDX IOAT Ch1 (RAID)" },
238 { 0x20218086, "SKX IOAT" },
241 MODULE_PNP_INFO("W32:vendor/device;D:#", pci, ioat, pci_ids,
245 * OS <-> Driver linkage functions
248 ioat_probe(device_t device)
253 type = pci_get_devid(device);
254 for (ep = pci_ids; ep < &pci_ids[nitems(pci_ids)]; ep++) {
255 if (ep->type == type) {
256 device_set_desc(device, ep->desc);
264 ioat_attach(device_t device)
266 struct ioat_softc *ioat;
269 ioat = DEVICE2SOFTC(device);
270 ioat->device = device;
271 if (bus_get_domain(device, &ioat->domain) != 0)
273 ioat->cpu = CPU_FFS(&cpuset_domain[ioat->domain]) - 1;
275 ioat->cpu = CPU_FIRST();
277 error = ioat_map_pci_bar(ioat);
281 ioat->version = ioat_read_cbver(ioat);
282 if (ioat->version < IOAT_VER_3_0) {
287 error = ioat3_attach(device);
291 error = pci_enable_busmaster(device);
295 error = ioat_setup_intr(ioat);
299 error = ioat_reset_hw(ioat);
303 ioat_process_events(ioat, FALSE);
304 ioat_setup_sysctl(device);
306 mtx_lock(&ioat_list_mtx);
307 for (i = 0; i < IOAT_MAX_CHANNELS; i++) {
308 if (ioat_channel[i] == NULL)
311 if (i >= IOAT_MAX_CHANNELS) {
312 mtx_unlock(&ioat_list_mtx);
313 device_printf(device, "Too many I/OAT devices in system\n");
318 ioat_channel[i] = ioat;
319 if (i >= ioat_channel_index)
320 ioat_channel_index = i + 1;
321 mtx_unlock(&ioat_list_mtx);
332 ioat_bus_dmamap_destroy(struct ioat_softc *ioat, const char *func,
333 bus_dma_tag_t dmat, bus_dmamap_t map)
337 error = bus_dmamap_destroy(dmat, map);
340 "%s: bus_dmamap_destroy failed %d\n", func, error);
347 ioat_detach(device_t device)
349 struct ioat_softc *ioat;
352 ioat = DEVICE2SOFTC(device);
354 mtx_lock(&ioat_list_mtx);
355 ioat_channel[ioat->chan_idx] = NULL;
356 while (ioat_channel_index > 0 &&
357 ioat_channel[ioat_channel_index - 1] == NULL)
358 ioat_channel_index--;
359 mtx_unlock(&ioat_list_mtx);
362 taskqueue_drain(taskqueue_thread, &ioat->reset_task);
364 mtx_lock(&ioat->submit_lock);
365 ioat->quiescing = TRUE;
366 ioat->destroying = TRUE;
367 wakeup(&ioat->quiescing);
368 wakeup(&ioat->resetting);
370 ioat_drain_locked(ioat);
371 mtx_unlock(&ioat->submit_lock);
372 mtx_lock(&ioat->cleanup_lock);
373 while (ioat_get_active(ioat) > 0)
374 msleep(&ioat->tail, &ioat->cleanup_lock, 0, "ioat_drain", 1);
375 mtx_unlock(&ioat->cleanup_lock);
377 ioat_teardown_intr(ioat);
378 callout_drain(&ioat->poll_timer);
380 pci_disable_busmaster(device);
382 if (ioat->pci_resource != NULL)
383 bus_release_resource(device, SYS_RES_MEMORY,
384 ioat->pci_resource_id, ioat->pci_resource);
386 if (ioat->data_tag != NULL) {
387 for (i = 0; i < 1 << ioat->ring_size_order; i++) {
388 error = ioat_bus_dmamap_destroy(ioat, __func__,
389 ioat->data_tag, ioat->ring[i].src_dmamap);
393 for (i = 0; i < 1 << ioat->ring_size_order; i++) {
394 error = ioat_bus_dmamap_destroy(ioat, __func__,
395 ioat->data_tag, ioat->ring[i].dst_dmamap);
400 for (i = 0; i < 1 << ioat->ring_size_order; i++) {
401 error = ioat_bus_dmamap_destroy(ioat, __func__,
402 ioat->data_tag, ioat->ring[i].src2_dmamap);
406 for (i = 0; i < 1 << ioat->ring_size_order; i++) {
407 error = ioat_bus_dmamap_destroy(ioat, __func__,
408 ioat->data_tag, ioat->ring[i].dst2_dmamap);
413 bus_dma_tag_destroy(ioat->data_tag);
416 if (ioat->data_crc_tag != NULL) {
417 for (i = 0; i < 1 << ioat->ring_size_order; i++) {
418 error = ioat_bus_dmamap_destroy(ioat, __func__,
419 ioat->data_crc_tag, ioat->ring[i].crc_dmamap);
424 bus_dma_tag_destroy(ioat->data_crc_tag);
427 if (ioat->ring != NULL)
428 ioat_free_ring(ioat, 1 << ioat->ring_size_order, ioat->ring);
430 if (ioat->comp_update != NULL) {
431 bus_dmamap_unload(ioat->comp_update_tag, ioat->comp_update_map);
432 bus_dmamem_free(ioat->comp_update_tag, ioat->comp_update,
433 ioat->comp_update_map);
434 bus_dma_tag_destroy(ioat->comp_update_tag);
437 if (ioat->hw_desc_ring != NULL) {
438 bus_dmamap_unload(ioat->hw_desc_tag, ioat->hw_desc_map);
439 bus_dmamem_free(ioat->hw_desc_tag, ioat->hw_desc_ring,
441 bus_dma_tag_destroy(ioat->hw_desc_tag);
448 ioat_teardown_intr(struct ioat_softc *ioat)
451 if (ioat->tag != NULL)
452 bus_teardown_intr(ioat->device, ioat->res, ioat->tag);
454 if (ioat->res != NULL)
455 bus_release_resource(ioat->device, SYS_RES_IRQ,
456 rman_get_rid(ioat->res), ioat->res);
458 pci_release_msi(ioat->device);
463 ioat_start_channel(struct ioat_softc *ioat)
465 struct ioat_dma_hw_descriptor *hw_desc;
466 struct ioat_descriptor *desc;
467 struct bus_dmadesc *dmadesc;
472 ioat_acquire(&ioat->dmaengine);
474 /* Submit 'NULL' operation manually to avoid quiescing flag */
475 desc = ioat_get_ring_entry(ioat, ioat->head);
476 hw_desc = &ioat_get_descriptor(ioat, ioat->head)->dma;
477 dmadesc = &desc->bus_dmadesc;
479 dmadesc->callback_fn = NULL;
480 dmadesc->callback_arg = NULL;
482 hw_desc->u.control_raw = 0;
483 hw_desc->u.control_generic.op = IOAT_OP_COPY;
484 hw_desc->u.control_generic.completion_update = 1;
486 hw_desc->src_addr = 0;
487 hw_desc->dest_addr = 0;
488 hw_desc->u.control.null = 1;
490 ioat_submit_single(ioat);
491 ioat_release(&ioat->dmaengine);
493 for (i = 0; i < 100; i++) {
495 status = ioat_get_chansts(ioat);
496 if (is_ioat_idle(status))
500 chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET);
501 ioat_log_message(0, "could not start channel: "
502 "status = %#jx error = %b\n", (uintmax_t)status, (int)chanerr,
508 * Initialize Hardware
511 ioat3_attach(device_t device)
513 struct ioat_softc *ioat;
514 struct ioat_descriptor *ring;
515 struct ioat_dma_hw_descriptor *dma_hw_desc;
518 int i, num_descriptors;
523 ioat = DEVICE2SOFTC(device);
524 ioat->capabilities = ioat_read_dmacapability(ioat);
526 ioat_log_message(0, "Capabilities: %b\n", (int)ioat->capabilities,
529 xfercap = ioat_read_xfercap(ioat);
530 ioat->max_xfer_size = 1 << xfercap;
532 ioat->intrdelay_supported = (ioat_read_2(ioat, IOAT_INTRDELAY_OFFSET) &
533 IOAT_INTRDELAY_SUPPORTED) != 0;
534 if (ioat->intrdelay_supported)
535 ioat->intrdelay_max = IOAT_INTRDELAY_US_MASK;
537 /* TODO: need to check DCA here if we ever do XOR/PQ */
539 mtx_init(&ioat->submit_lock, "ioat_submit", NULL, MTX_DEF);
540 mtx_init(&ioat->cleanup_lock, "ioat_cleanup", NULL, MTX_DEF);
541 callout_init(&ioat->poll_timer, 1);
542 TASK_INIT(&ioat->reset_task, 0, ioat_reset_hw_task, ioat);
544 /* Establish lock order for Witness */
545 mtx_lock(&ioat->cleanup_lock);
546 mtx_lock(&ioat->submit_lock);
547 mtx_unlock(&ioat->submit_lock);
548 mtx_unlock(&ioat->cleanup_lock);
550 ioat->is_submitter_processing = FALSE;
552 bus_dma_tag_create(bus_get_dma_tag(ioat->device), sizeof(uint64_t), 0x0,
553 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
554 sizeof(uint64_t), 1, sizeof(uint64_t), 0, NULL, NULL,
555 &ioat->comp_update_tag);
557 error = bus_dmamem_alloc(ioat->comp_update_tag,
558 (void **)&ioat->comp_update, BUS_DMA_ZERO, &ioat->comp_update_map);
559 if (ioat->comp_update == NULL)
562 error = bus_dmamap_load(ioat->comp_update_tag, ioat->comp_update_map,
563 ioat->comp_update, sizeof(uint64_t), ioat_comp_update_map, ioat,
568 ioat->ring_size_order = g_ioat_ring_order;
569 num_descriptors = 1 << ioat->ring_size_order;
570 ringsz = sizeof(struct ioat_dma_hw_descriptor) * num_descriptors;
572 error = bus_dma_tag_create(bus_get_dma_tag(ioat->device),
573 2 * 1024 * 1024, 0x0, (bus_addr_t)BUS_SPACE_MAXADDR_40BIT,
574 BUS_SPACE_MAXADDR, NULL, NULL, ringsz, 1, ringsz, 0, NULL, NULL,
579 error = bus_dmamem_alloc(ioat->hw_desc_tag, &hw_desc,
580 BUS_DMA_ZERO | BUS_DMA_WAITOK, &ioat->hw_desc_map);
584 error = bus_dmamap_load(ioat->hw_desc_tag, ioat->hw_desc_map, hw_desc,
585 ringsz, ioat_dmamap_cb, &ioat->hw_desc_bus_addr, BUS_DMA_WAITOK);
589 ioat->hw_desc_ring = hw_desc;
591 error = bus_dma_tag_create(bus_get_dma_tag(ioat->device),
592 1, 0, BUS_SPACE_MAXADDR_40BIT, BUS_SPACE_MAXADDR, NULL, NULL,
593 ioat->max_xfer_size, 1, ioat->max_xfer_size, 0, NULL, NULL,
594 &ioat->data_crc_tag);
596 ioat_log_message(0, "%s: bus_dma_tag_create failed %d\n",
601 error = bus_dma_tag_create(bus_get_dma_tag(ioat->device),
602 1, 0, BUS_SPACE_MAXADDR_48BIT, BUS_SPACE_MAXADDR, NULL, NULL,
603 ioat->max_xfer_size, 1, ioat->max_xfer_size, 0, NULL, NULL,
606 ioat_log_message(0, "%s: bus_dma_tag_create failed %d\n",
610 ioat->ring = malloc_domainset(num_descriptors * sizeof(*ring), M_IOAT,
611 DOMAINSET_PREF(ioat->domain), M_ZERO | M_WAITOK);
614 for (i = 0; i < num_descriptors; i++) {
615 memset(&ring[i].bus_dmadesc, 0, sizeof(ring[i].bus_dmadesc));
617 error = bus_dmamap_create(ioat->data_tag, 0,
618 &ring[i].src_dmamap);
621 "%s: bus_dmamap_create failed %d\n", __func__,
625 error = bus_dmamap_create(ioat->data_tag, 0,
626 &ring[i].dst_dmamap);
629 "%s: bus_dmamap_create failed %d\n", __func__,
633 error = bus_dmamap_create(ioat->data_tag, 0,
634 &ring[i].src2_dmamap);
637 "%s: bus_dmamap_create failed %d\n", __func__,
641 error = bus_dmamap_create(ioat->data_tag, 0,
642 &ring[i].dst2_dmamap);
645 "%s: bus_dmamap_create failed %d\n", __func__,
649 error = bus_dmamap_create(ioat->data_crc_tag, 0,
650 &ring[i].crc_dmamap);
653 "%s: bus_dmamap_create failed %d\n", __func__,
659 for (i = 0; i < num_descriptors; i++) {
660 dma_hw_desc = &ioat->hw_desc_ring[i].dma;
661 dma_hw_desc->next = RING_PHYS_ADDR(ioat, i + 1);
667 *ioat->comp_update = 0;
672 ioat_map_pci_bar(struct ioat_softc *ioat)
675 ioat->pci_resource_id = PCIR_BAR(0);
676 ioat->pci_resource = bus_alloc_resource_any(ioat->device,
677 SYS_RES_MEMORY, &ioat->pci_resource_id, RF_ACTIVE);
679 if (ioat->pci_resource == NULL) {
680 ioat_log_message(0, "unable to allocate pci resource\n");
684 ioat->pci_bus_tag = rman_get_bustag(ioat->pci_resource);
685 ioat->pci_bus_handle = rman_get_bushandle(ioat->pci_resource);
690 ioat_comp_update_map(void *arg, bus_dma_segment_t *seg, int nseg, int error)
692 struct ioat_softc *ioat = arg;
694 KASSERT(error == 0, ("%s: error:%d", __func__, error));
695 ioat->comp_update_bus_addr = seg[0].ds_addr;
699 ioat_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
703 KASSERT(error == 0, ("%s: error:%d", __func__, error));
705 *baddr = segs->ds_addr;
709 * Interrupt setup and handlers
712 ioat_setup_intr(struct ioat_softc *ioat)
714 uint32_t num_vectors;
717 boolean_t force_legacy_interrupts;
720 force_legacy_interrupts = FALSE;
722 if (!g_force_legacy_interrupts && pci_msix_count(ioat->device) >= 1) {
724 pci_alloc_msix(ioat->device, &num_vectors);
725 if (num_vectors == 1)
731 ioat->res = bus_alloc_resource_any(ioat->device, SYS_RES_IRQ,
732 &ioat->rid, RF_ACTIVE);
735 ioat->res = bus_alloc_resource_any(ioat->device, SYS_RES_IRQ,
736 &ioat->rid, RF_SHAREABLE | RF_ACTIVE);
738 if (ioat->res == NULL) {
739 ioat_log_message(0, "bus_alloc_resource failed\n");
744 error = bus_setup_intr(ioat->device, ioat->res, INTR_MPSAFE |
745 INTR_TYPE_MISC, NULL, ioat_interrupt_handler, ioat, &ioat->tag);
747 ioat_log_message(0, "bus_setup_intr failed\n");
751 ioat_write_intrctrl(ioat, IOAT_INTRCTRL_MASTER_INT_EN);
756 ioat_model_resets_msix(struct ioat_softc *ioat)
760 pciid = pci_get_devid(ioat->device);
779 ioat_interrupt_handler(void *arg)
781 struct ioat_softc *ioat = arg;
783 ioat->stats.interrupts++;
784 ioat_process_events(ioat, TRUE);
788 chanerr_to_errno(uint32_t chanerr)
793 if ((chanerr & (IOAT_CHANERR_XSADDERR | IOAT_CHANERR_XDADDERR)) != 0)
795 if ((chanerr & (IOAT_CHANERR_RDERR | IOAT_CHANERR_WDERR)) != 0)
797 /* This one is probably our fault: */
798 if ((chanerr & IOAT_CHANERR_NDADDERR) != 0)
804 ioat_process_events(struct ioat_softc *ioat, boolean_t intr)
806 struct ioat_descriptor *desc;
807 struct bus_dmadesc *dmadesc;
808 uint64_t comp_update, status;
809 uint32_t completed, chanerr;
812 mtx_lock(&ioat->cleanup_lock);
815 * Don't run while the hardware is being reset. Reset is responsible
816 * for blocking new work and draining & completing existing work, so
817 * there is nothing to do until new work is queued after reset anyway.
819 if (ioat->resetting_cleanup) {
820 mtx_unlock(&ioat->cleanup_lock);
825 comp_update = *ioat->comp_update;
826 status = comp_update & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_MASK;
828 if (status < ioat->hw_desc_bus_addr ||
829 status >= ioat->hw_desc_bus_addr + (1 << ioat->ring_size_order) *
830 sizeof(struct ioat_generic_hw_descriptor))
831 panic("Bogus completion address %jx (channel %u)",
832 (uintmax_t)status, ioat->chan_idx);
834 if (status == ioat->last_seen) {
836 * If we landed in process_events and nothing has been
837 * completed, check for a timeout due to channel halt.
841 CTR4(KTR_IOAT, "%s channel=%u hw_status=0x%lx last_seen=0x%lx",
842 __func__, ioat->chan_idx, comp_update, ioat->last_seen);
844 while (RING_PHYS_ADDR(ioat, ioat->tail - 1) != status) {
845 desc = ioat_get_ring_entry(ioat, ioat->tail);
846 dmadesc = &desc->bus_dmadesc;
847 CTR5(KTR_IOAT, "channel=%u completing desc idx %u (%p) ok cb %p(%p)",
848 ioat->chan_idx, ioat->tail, dmadesc, dmadesc->callback_fn,
849 dmadesc->callback_arg);
851 bus_dmamap_unload(ioat->data_tag, desc->src_dmamap);
852 bus_dmamap_unload(ioat->data_tag, desc->dst_dmamap);
853 bus_dmamap_unload(ioat->data_tag, desc->src2_dmamap);
854 bus_dmamap_unload(ioat->data_tag, desc->dst2_dmamap);
855 bus_dmamap_unload(ioat->data_crc_tag, desc->crc_dmamap);
857 if (dmadesc->callback_fn != NULL)
858 dmadesc->callback_fn(dmadesc->callback_arg, 0);
863 CTR5(KTR_IOAT, "%s channel=%u head=%u tail=%u active=%u", __func__,
864 ioat->chan_idx, ioat->head, ioat->tail, ioat_get_active(ioat));
866 if (completed != 0) {
867 ioat->last_seen = RING_PHYS_ADDR(ioat, ioat->tail - 1);
868 ioat->stats.descriptors_processed += completed;
873 ioat_write_chanctrl(ioat, IOAT_CHANCTRL_RUN);
874 mtx_unlock(&ioat->cleanup_lock);
877 * The device doesn't seem to reliably push suspend/halt statuses to
878 * the channel completion memory address, so poll the device register
879 * here. For performance reasons skip it on interrupts, do it only
880 * on much more rare polling events.
883 comp_update = ioat_get_chansts(ioat) & IOAT_CHANSTS_STATUS;
884 if (!is_ioat_halted(comp_update) && !is_ioat_suspended(comp_update))
887 ioat->stats.channel_halts++;
890 * Fatal programming error on this DMA channel. Flush any outstanding
891 * work with error status and restart the engine.
893 mtx_lock(&ioat->submit_lock);
894 ioat->quiescing = TRUE;
895 mtx_unlock(&ioat->submit_lock);
898 * This is safe to do here because the submit queue is quiesced. We
899 * know that we will drain all outstanding events, so ioat_reset_hw
900 * can't deadlock. It is necessary to protect other ioat_process_event
901 * threads from racing ioat_reset_hw, reading an indeterminate hw
902 * state, and attempting to continue issuing completions.
904 mtx_lock(&ioat->cleanup_lock);
905 ioat->resetting_cleanup = TRUE;
907 chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET);
908 if (1 <= g_ioat_debug_level)
909 ioat_halted_debug(ioat, chanerr);
910 ioat->stats.last_halt_chanerr = chanerr;
912 while (ioat_get_active(ioat) > 0) {
913 desc = ioat_get_ring_entry(ioat, ioat->tail);
914 dmadesc = &desc->bus_dmadesc;
915 CTR5(KTR_IOAT, "channel=%u completing desc idx %u (%p) err cb %p(%p)",
916 ioat->chan_idx, ioat->tail, dmadesc, dmadesc->callback_fn,
917 dmadesc->callback_arg);
919 if (dmadesc->callback_fn != NULL)
920 dmadesc->callback_fn(dmadesc->callback_arg,
921 chanerr_to_errno(chanerr));
924 ioat->stats.descriptors_processed++;
925 ioat->stats.descriptors_error++;
927 CTR5(KTR_IOAT, "%s channel=%u head=%u tail=%u active=%u", __func__,
928 ioat->chan_idx, ioat->head, ioat->tail, ioat_get_active(ioat));
930 /* Clear error status */
931 ioat_write_4(ioat, IOAT_CHANERR_OFFSET, chanerr);
933 mtx_unlock(&ioat->cleanup_lock);
935 ioat_log_message(0, "Resetting channel to recover from error\n");
936 error = taskqueue_enqueue(taskqueue_thread, &ioat->reset_task);
938 ("%s: taskqueue_enqueue failed: %d", __func__, error));
942 ioat_reset_hw_task(void *ctx, int pending __unused)
944 struct ioat_softc *ioat;
948 ioat_log_message(1, "%s: Resetting channel\n", __func__);
950 error = ioat_reset_hw(ioat);
951 KASSERT(error == 0, ("%s: reset failed: %d", __func__, error));
959 ioat_get_nchannels(void)
962 return (ioat_channel_index);
966 ioat_get_dmaengine(uint32_t index, int flags)
968 struct ioat_softc *ioat;
970 KASSERT((flags & ~(M_NOWAIT | M_WAITOK)) == 0,
971 ("invalid flags: 0x%08x", flags));
972 KASSERT((flags & (M_NOWAIT | M_WAITOK)) != (M_NOWAIT | M_WAITOK),
973 ("invalid wait | nowait"));
975 mtx_lock(&ioat_list_mtx);
976 if (index >= ioat_channel_index ||
977 (ioat = ioat_channel[index]) == NULL) {
978 mtx_unlock(&ioat_list_mtx);
981 mtx_lock(&ioat->submit_lock);
982 mtx_unlock(&ioat_list_mtx);
984 if (ioat->destroying) {
985 mtx_unlock(&ioat->submit_lock);
990 if (ioat->quiescing) {
991 if ((flags & M_NOWAIT) != 0) {
993 mtx_unlock(&ioat->submit_lock);
997 while (ioat->quiescing && !ioat->destroying)
998 msleep(&ioat->quiescing, &ioat->submit_lock, 0, "getdma", 0);
1000 if (ioat->destroying) {
1002 mtx_unlock(&ioat->submit_lock);
1006 mtx_unlock(&ioat->submit_lock);
1007 return (&ioat->dmaengine);
1011 ioat_put_dmaengine(bus_dmaengine_t dmaengine)
1013 struct ioat_softc *ioat;
1015 ioat = to_ioat_softc(dmaengine);
1016 mtx_lock(&ioat->submit_lock);
1018 mtx_unlock(&ioat->submit_lock);
1022 ioat_get_hwversion(bus_dmaengine_t dmaengine)
1024 struct ioat_softc *ioat;
1026 ioat = to_ioat_softc(dmaengine);
1027 return (ioat->version);
1031 ioat_get_max_io_size(bus_dmaengine_t dmaengine)
1033 struct ioat_softc *ioat;
1035 ioat = to_ioat_softc(dmaengine);
1036 return (ioat->max_xfer_size);
1040 ioat_get_capabilities(bus_dmaengine_t dmaengine)
1042 struct ioat_softc *ioat;
1044 ioat = to_ioat_softc(dmaengine);
1045 return (ioat->capabilities);
1049 ioat_set_interrupt_coalesce(bus_dmaengine_t dmaengine, uint16_t delay)
1051 struct ioat_softc *ioat;
1053 ioat = to_ioat_softc(dmaengine);
1054 if (!ioat->intrdelay_supported)
1056 if (delay > ioat->intrdelay_max)
1059 ioat_write_2(ioat, IOAT_INTRDELAY_OFFSET, delay);
1060 ioat->cached_intrdelay =
1061 ioat_read_2(ioat, IOAT_INTRDELAY_OFFSET) & IOAT_INTRDELAY_US_MASK;
1066 ioat_get_max_coalesce_period(bus_dmaengine_t dmaengine)
1068 struct ioat_softc *ioat;
1070 ioat = to_ioat_softc(dmaengine);
1071 return (ioat->intrdelay_max);
1075 ioat_acquire(bus_dmaengine_t dmaengine)
1077 struct ioat_softc *ioat;
1079 ioat = to_ioat_softc(dmaengine);
1080 mtx_lock(&ioat->submit_lock);
1081 CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx);
1082 ioat->acq_head = ioat->head;
1086 ioat_acquire_reserve(bus_dmaengine_t dmaengine, unsigned n, int mflags)
1088 struct ioat_softc *ioat;
1091 ioat = to_ioat_softc(dmaengine);
1092 ioat_acquire(dmaengine);
1094 error = ioat_reserve_space(ioat, n, mflags);
1096 ioat_release(dmaengine);
1101 ioat_release(bus_dmaengine_t dmaengine)
1103 struct ioat_softc *ioat;
1105 ioat = to_ioat_softc(dmaengine);
1106 CTR3(KTR_IOAT, "%s channel=%u dispatch1 head=%u", __func__,
1107 ioat->chan_idx, ioat->head);
1108 KFAIL_POINT_CODE(DEBUG_FP, ioat_release, /* do nothing */);
1109 CTR3(KTR_IOAT, "%s channel=%u dispatch2 head=%u", __func__,
1110 ioat->chan_idx, ioat->head);
1112 if (ioat->acq_head != ioat->head) {
1113 ioat_write_2(ioat, IOAT_DMACOUNT_OFFSET,
1114 (uint16_t)ioat->head);
1116 if (!callout_pending(&ioat->poll_timer)) {
1117 callout_reset_on(&ioat->poll_timer, 1,
1118 ioat_poll_timer_callback, ioat, ioat->cpu);
1121 mtx_unlock(&ioat->submit_lock);
1124 static struct ioat_descriptor *
1125 ioat_op_generic(struct ioat_softc *ioat, uint8_t op,
1126 uint32_t size, uint64_t src, uint64_t dst,
1127 bus_dmaengine_callback_t callback_fn, void *callback_arg,
1130 struct ioat_generic_hw_descriptor *hw_desc;
1131 struct ioat_descriptor *desc;
1132 bus_dma_segment_t seg;
1133 int mflags, nseg, error;
1135 mtx_assert(&ioat->submit_lock, MA_OWNED);
1137 KASSERT((flags & ~_DMA_GENERIC_FLAGS) == 0,
1138 ("Unrecognized flag(s): %#x", flags & ~_DMA_GENERIC_FLAGS));
1139 KASSERT(size <= ioat->max_xfer_size, ("%s: size too big (%u > %u)",
1140 __func__, (unsigned)size, ioat->max_xfer_size));
1142 if ((flags & DMA_NO_WAIT) != 0)
1147 if (ioat_reserve_space(ioat, 1, mflags) != 0)
1150 desc = ioat_get_ring_entry(ioat, ioat->head);
1151 hw_desc = &ioat_get_descriptor(ioat, ioat->head)->generic;
1153 hw_desc->u.control_raw = 0;
1154 hw_desc->u.control_generic.op = op;
1155 hw_desc->u.control_generic.completion_update = 1;
1157 if ((flags & DMA_INT_EN) != 0)
1158 hw_desc->u.control_generic.int_enable = 1;
1159 if ((flags & DMA_FENCE) != 0)
1160 hw_desc->u.control_generic.fence = 1;
1162 hw_desc->size = size;
1166 error = _bus_dmamap_load_phys(ioat->data_tag, desc->src_dmamap,
1167 src, size, 0, &seg, &nseg);
1169 ioat_log_message(0, "%s: _bus_dmamap_load_phys"
1170 " failed %d\n", __func__, error);
1173 hw_desc->src_addr = seg.ds_addr;
1178 error = _bus_dmamap_load_phys(ioat->data_tag, desc->dst_dmamap,
1179 dst, size, 0, &seg, &nseg);
1181 ioat_log_message(0, "%s: _bus_dmamap_load_phys"
1182 " failed %d\n", __func__, error);
1185 hw_desc->dest_addr = seg.ds_addr;
1188 desc->bus_dmadesc.callback_fn = callback_fn;
1189 desc->bus_dmadesc.callback_arg = callback_arg;
1193 struct bus_dmadesc *
1194 ioat_null(bus_dmaengine_t dmaengine, bus_dmaengine_callback_t callback_fn,
1195 void *callback_arg, uint32_t flags)
1197 struct ioat_dma_hw_descriptor *hw_desc;
1198 struct ioat_descriptor *desc;
1199 struct ioat_softc *ioat;
1201 ioat = to_ioat_softc(dmaengine);
1202 CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx);
1204 desc = ioat_op_generic(ioat, IOAT_OP_COPY, 8, 0, 0, callback_fn,
1205 callback_arg, flags);
1209 hw_desc = &ioat_get_descriptor(ioat, desc->id)->dma;
1210 hw_desc->u.control.null = 1;
1211 ioat_submit_single(ioat);
1212 return (&desc->bus_dmadesc);
1215 struct bus_dmadesc *
1216 ioat_copy(bus_dmaengine_t dmaengine, bus_addr_t dst,
1217 bus_addr_t src, bus_size_t len, bus_dmaengine_callback_t callback_fn,
1218 void *callback_arg, uint32_t flags)
1220 struct ioat_dma_hw_descriptor *hw_desc;
1221 struct ioat_descriptor *desc;
1222 struct ioat_softc *ioat;
1224 ioat = to_ioat_softc(dmaengine);
1226 KASSERT(((src | dst) & (0xffffull << 48)) == 0,
1227 ("%s: high 16 bits of src/dst are not zero", __func__));
1229 desc = ioat_op_generic(ioat, IOAT_OP_COPY, len, src, dst, callback_fn,
1230 callback_arg, flags);
1234 hw_desc = &ioat_get_descriptor(ioat, desc->id)->dma;
1235 if (g_ioat_debug_level >= 3)
1236 dump_descriptor(hw_desc);
1238 ioat_submit_single(ioat);
1239 CTR6(KTR_IOAT, "%s channel=%u desc=%p dest=%lx src=%lx len=%lx",
1240 __func__, ioat->chan_idx, &desc->bus_dmadesc, dst, src, len);
1241 return (&desc->bus_dmadesc);
1244 struct bus_dmadesc *
1245 ioat_copy_8k_aligned(bus_dmaengine_t dmaengine, bus_addr_t dst1,
1246 bus_addr_t dst2, bus_addr_t src1, bus_addr_t src2,
1247 bus_dmaengine_callback_t callback_fn, void *callback_arg, uint32_t flags)
1249 struct ioat_dma_hw_descriptor *hw_desc;
1250 struct ioat_descriptor *desc;
1251 struct ioat_softc *ioat;
1252 bus_size_t src1_len, dst1_len;
1253 bus_dma_segment_t seg;
1256 ioat = to_ioat_softc(dmaengine);
1257 CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx);
1259 KASSERT(((src1 | src2 | dst1 | dst2) & (0xffffull << 48)) == 0,
1260 ("%s: high 16 bits of src/dst are not zero", __func__));
1261 KASSERT(((src1 | src2 | dst1 | dst2) & PAGE_MASK) == 0,
1262 ("%s: addresses are not page-aligned", __func__));
1264 desc = ioat_op_generic(ioat, IOAT_OP_COPY, 2 * PAGE_SIZE, 0, 0,
1265 callback_fn, callback_arg, flags);
1269 hw_desc = &ioat_get_descriptor(ioat, desc->id)->dma;
1271 src1_len = (src2 != src1 + PAGE_SIZE) ? PAGE_SIZE : 2 * PAGE_SIZE;
1273 error = _bus_dmamap_load_phys(ioat->data_tag,
1274 desc->src_dmamap, src1, src1_len, 0, &seg, &nseg);
1276 ioat_log_message(0, "%s: _bus_dmamap_load_phys"
1277 " failed %d\n", __func__, error);
1280 hw_desc->src_addr = seg.ds_addr;
1281 if (src1_len != 2 * PAGE_SIZE) {
1282 hw_desc->u.control.src_page_break = 1;
1284 error = _bus_dmamap_load_phys(ioat->data_tag,
1285 desc->src2_dmamap, src2, PAGE_SIZE, 0, &seg, &nseg);
1287 ioat_log_message(0, "%s: _bus_dmamap_load_phys"
1288 " failed %d\n", __func__, error);
1291 hw_desc->next_src_addr = seg.ds_addr;
1294 dst1_len = (dst2 != dst1 + PAGE_SIZE) ? PAGE_SIZE : 2 * PAGE_SIZE;
1296 error = _bus_dmamap_load_phys(ioat->data_tag,
1297 desc->dst_dmamap, dst1, dst1_len, 0, &seg, &nseg);
1299 ioat_log_message(0, "%s: _bus_dmamap_load_phys"
1300 " failed %d\n", __func__, error);
1303 hw_desc->dest_addr = seg.ds_addr;
1304 if (dst1_len != 2 * PAGE_SIZE) {
1305 hw_desc->u.control.dest_page_break = 1;
1307 error = _bus_dmamap_load_phys(ioat->data_tag,
1308 desc->dst2_dmamap, dst2, PAGE_SIZE, 0, &seg, &nseg);
1310 ioat_log_message(0, "%s: _bus_dmamap_load_phys"
1311 " failed %d\n", __func__, error);
1314 hw_desc->next_dest_addr = seg.ds_addr;
1317 if (g_ioat_debug_level >= 3)
1318 dump_descriptor(hw_desc);
1320 ioat_submit_single(ioat);
1321 return (&desc->bus_dmadesc);
1324 struct bus_dmadesc *
1325 ioat_copy_crc(bus_dmaengine_t dmaengine, bus_addr_t dst, bus_addr_t src,
1326 bus_size_t len, uint32_t *initialseed, bus_addr_t crcptr,
1327 bus_dmaengine_callback_t callback_fn, void *callback_arg, uint32_t flags)
1329 struct ioat_crc32_hw_descriptor *hw_desc;
1330 struct ioat_descriptor *desc;
1331 struct ioat_softc *ioat;
1334 bus_dma_segment_t seg;
1337 ioat = to_ioat_softc(dmaengine);
1338 CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx);
1340 KASSERT((ioat->capabilities & IOAT_DMACAP_MOVECRC) != 0,
1341 ("%s: device lacks MOVECRC capability", __func__));
1342 KASSERT(((src | dst) & (0xffffffull << 40)) == 0,
1343 ("%s: high 24 bits of src/dst are not zero", __func__));
1344 teststore = (flags & _DMA_CRC_TESTSTORE);
1345 KASSERT(teststore != _DMA_CRC_TESTSTORE,
1346 ("%s: TEST and STORE invalid", __func__));
1347 KASSERT(teststore != 0 || (flags & DMA_CRC_INLINE) == 0,
1348 ("%s: INLINE invalid without TEST or STORE", __func__));
1350 switch (teststore) {
1352 op = IOAT_OP_MOVECRC_STORE;
1355 op = IOAT_OP_MOVECRC_TEST;
1358 KASSERT(teststore == 0, ("bogus"));
1359 op = IOAT_OP_MOVECRC;
1363 KASSERT((flags & DMA_CRC_INLINE) != 0 ||
1364 (crcptr & (0xffffffull << 40)) == 0,
1365 ("%s: high 24 bits of crcptr are not zero", __func__));
1367 desc = ioat_op_generic(ioat, op, len, src, dst, callback_fn,
1368 callback_arg, flags & ~_DMA_CRC_FLAGS);
1372 hw_desc = &ioat_get_descriptor(ioat, desc->id)->crc32;
1374 if ((flags & DMA_CRC_INLINE) == 0) {
1376 error = _bus_dmamap_load_phys(ioat->data_crc_tag,
1377 desc->crc_dmamap, crcptr, sizeof(uint32_t), 0,
1380 ioat_log_message(0, "%s: _bus_dmamap_load_phys"
1381 " failed %d\n", __func__, error);
1384 hw_desc->crc_address = seg.ds_addr;
1386 hw_desc->u.control.crc_location = 1;
1388 if (initialseed != NULL) {
1389 hw_desc->u.control.use_seed = 1;
1390 hw_desc->seed = *initialseed;
1393 if (g_ioat_debug_level >= 3)
1394 dump_descriptor(hw_desc);
1396 ioat_submit_single(ioat);
1397 return (&desc->bus_dmadesc);
1400 struct bus_dmadesc *
1401 ioat_crc(bus_dmaengine_t dmaengine, bus_addr_t src, bus_size_t len,
1402 uint32_t *initialseed, bus_addr_t crcptr,
1403 bus_dmaengine_callback_t callback_fn, void *callback_arg, uint32_t flags)
1405 struct ioat_crc32_hw_descriptor *hw_desc;
1406 struct ioat_descriptor *desc;
1407 struct ioat_softc *ioat;
1410 bus_dma_segment_t seg;
1413 ioat = to_ioat_softc(dmaengine);
1414 CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx);
1416 KASSERT((ioat->capabilities & IOAT_DMACAP_CRC) != 0,
1417 ("%s: device lacks CRC capability", __func__));
1418 KASSERT((src & (0xffffffull << 40)) == 0,
1419 ("%s: high 24 bits of src are not zero", __func__));
1420 teststore = (flags & _DMA_CRC_TESTSTORE);
1421 KASSERT(teststore != _DMA_CRC_TESTSTORE,
1422 ("%s: TEST and STORE invalid", __func__));
1423 KASSERT(teststore != 0 || (flags & DMA_CRC_INLINE) == 0,
1424 ("%s: INLINE invalid without TEST or STORE", __func__));
1426 switch (teststore) {
1428 op = IOAT_OP_CRC_STORE;
1431 op = IOAT_OP_CRC_TEST;
1434 KASSERT(teststore == 0, ("bogus"));
1439 KASSERT((flags & DMA_CRC_INLINE) != 0 ||
1440 (crcptr & (0xffffffull << 40)) == 0,
1441 ("%s: high 24 bits of crcptr are not zero", __func__));
1443 desc = ioat_op_generic(ioat, op, len, src, 0, callback_fn,
1444 callback_arg, flags & ~_DMA_CRC_FLAGS);
1448 hw_desc = &ioat_get_descriptor(ioat, desc->id)->crc32;
1450 if ((flags & DMA_CRC_INLINE) == 0) {
1452 error = _bus_dmamap_load_phys(ioat->data_crc_tag,
1453 desc->crc_dmamap, crcptr, sizeof(uint32_t), 0,
1456 ioat_log_message(0, "%s: _bus_dmamap_load_phys"
1457 " failed %d\n", __func__, error);
1460 hw_desc->crc_address = seg.ds_addr;
1462 hw_desc->u.control.crc_location = 1;
1464 if (initialseed != NULL) {
1465 hw_desc->u.control.use_seed = 1;
1466 hw_desc->seed = *initialseed;
1469 if (g_ioat_debug_level >= 3)
1470 dump_descriptor(hw_desc);
1472 ioat_submit_single(ioat);
1473 return (&desc->bus_dmadesc);
1476 struct bus_dmadesc *
1477 ioat_blockfill(bus_dmaengine_t dmaengine, bus_addr_t dst, uint64_t fillpattern,
1478 bus_size_t len, bus_dmaengine_callback_t callback_fn, void *callback_arg,
1481 struct ioat_fill_hw_descriptor *hw_desc;
1482 struct ioat_descriptor *desc;
1483 struct ioat_softc *ioat;
1485 ioat = to_ioat_softc(dmaengine);
1486 CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx);
1488 KASSERT((ioat->capabilities & IOAT_DMACAP_BFILL) != 0,
1489 ("%s: device lacks BFILL capability", __func__));
1490 KASSERT((dst & (0xffffull << 48)) == 0,
1491 ("%s: high 16 bits of crcptr are not zero", __func__));
1493 desc = ioat_op_generic(ioat, IOAT_OP_FILL, len, 0, dst,
1494 callback_fn, callback_arg, flags);
1498 hw_desc = &ioat_get_descriptor(ioat, desc->id)->fill;
1499 hw_desc->src_data = fillpattern;
1500 if (g_ioat_debug_level >= 3)
1501 dump_descriptor(hw_desc);
1503 ioat_submit_single(ioat);
1504 return (&desc->bus_dmadesc);
1510 static inline uint32_t
1511 ioat_get_active(struct ioat_softc *ioat)
1514 return ((ioat->head - ioat->tail) & ((1 << ioat->ring_size_order) - 1));
1517 static inline uint32_t
1518 ioat_get_ring_space(struct ioat_softc *ioat)
1521 return ((1 << ioat->ring_size_order) - ioat_get_active(ioat) - 1);
1525 * Reserves space in this IOAT descriptor ring by ensuring enough slots remain
1528 * If mflags contains M_WAITOK, blocks until enough space is available.
1530 * Returns zero on success, or an errno on error. If num_descs is beyond the
1531 * maximum ring size, returns EINVAl; if allocation would block and mflags
1532 * contains M_NOWAIT, returns EAGAIN.
1534 * Must be called with the submit_lock held; returns with the lock held. The
1535 * lock may be dropped to allocate the ring.
1537 * (The submit_lock is needed to add any entries to the ring, so callers are
1538 * assured enough room is available.)
1541 ioat_reserve_space(struct ioat_softc *ioat, uint32_t num_descs, int mflags)
1546 mtx_assert(&ioat->submit_lock, MA_OWNED);
1550 if (num_descs < 1 || num_descs >= (1 << ioat->ring_size_order)) {
1556 if (ioat->quiescing) {
1561 if (ioat_get_ring_space(ioat) >= num_descs)
1564 CTR3(KTR_IOAT, "%s channel=%u starved (%u)", __func__,
1565 ioat->chan_idx, num_descs);
1567 if (!dug && !ioat->is_submitter_processing) {
1568 ioat->is_submitter_processing = TRUE;
1569 mtx_unlock(&ioat->submit_lock);
1571 CTR2(KTR_IOAT, "%s channel=%u attempting to process events",
1572 __func__, ioat->chan_idx);
1573 ioat_process_events(ioat, FALSE);
1575 mtx_lock(&ioat->submit_lock);
1577 KASSERT(ioat->is_submitter_processing == TRUE,
1578 ("is_submitter_processing"));
1579 ioat->is_submitter_processing = FALSE;
1580 wakeup(&ioat->tail);
1584 if ((mflags & M_WAITOK) == 0) {
1588 CTR2(KTR_IOAT, "%s channel=%u blocking on completions",
1589 __func__, ioat->chan_idx);
1590 msleep(&ioat->tail, &ioat->submit_lock, 0,
1596 mtx_assert(&ioat->submit_lock, MA_OWNED);
1597 KASSERT(!ioat->quiescing || error == ENXIO,
1598 ("reserved during quiesce"));
1603 ioat_free_ring(struct ioat_softc *ioat, uint32_t size,
1604 struct ioat_descriptor *ring)
1607 free_domain(ring, M_IOAT);
1610 static struct ioat_descriptor *
1611 ioat_get_ring_entry(struct ioat_softc *ioat, uint32_t index)
1614 return (&ioat->ring[index % (1 << ioat->ring_size_order)]);
1617 static union ioat_hw_descriptor *
1618 ioat_get_descriptor(struct ioat_softc *ioat, uint32_t index)
1621 return (&ioat->hw_desc_ring[index % (1 << ioat->ring_size_order)]);
1625 ioat_halted_debug(struct ioat_softc *ioat, uint32_t chanerr)
1627 union ioat_hw_descriptor *desc;
1629 ioat_log_message(0, "Channel halted (%b)\n", (int)chanerr,
1634 mtx_assert(&ioat->cleanup_lock, MA_OWNED);
1636 desc = ioat_get_descriptor(ioat, ioat->tail + 0);
1637 dump_descriptor(desc);
1639 desc = ioat_get_descriptor(ioat, ioat->tail + 1);
1640 dump_descriptor(desc);
1644 ioat_poll_timer_callback(void *arg)
1646 struct ioat_softc *ioat;
1649 CTR1(KTR_IOAT, "%s", __func__);
1651 ioat_process_events(ioat, FALSE);
1653 mtx_lock(&ioat->submit_lock);
1654 if (ioat_get_active(ioat) > 0)
1655 callout_schedule(&ioat->poll_timer, 1);
1656 mtx_unlock(&ioat->submit_lock);
1663 ioat_submit_single(struct ioat_softc *ioat)
1666 mtx_assert(&ioat->submit_lock, MA_OWNED);
1669 CTR4(KTR_IOAT, "%s channel=%u head=%u tail=%u", __func__,
1670 ioat->chan_idx, ioat->head, ioat->tail);
1672 ioat->stats.descriptors_submitted++;
1676 ioat_reset_hw(struct ioat_softc *ioat)
1683 CTR2(KTR_IOAT, "%s channel=%u", __func__, ioat->chan_idx);
1685 mtx_lock(&ioat->submit_lock);
1686 while (ioat->resetting && !ioat->destroying)
1687 msleep(&ioat->resetting, &ioat->submit_lock, 0, "IRH_drain", 0);
1688 if (ioat->destroying) {
1689 mtx_unlock(&ioat->submit_lock);
1692 ioat->resetting = TRUE;
1693 ioat->quiescing = TRUE;
1694 mtx_unlock(&ioat->submit_lock);
1695 mtx_lock(&ioat->cleanup_lock);
1696 while (ioat_get_active(ioat) > 0)
1697 msleep(&ioat->tail, &ioat->cleanup_lock, 0, "ioat_drain", 1);
1700 * Suspend ioat_process_events while the hardware and softc are in an
1701 * indeterminate state.
1703 ioat->resetting_cleanup = TRUE;
1704 mtx_unlock(&ioat->cleanup_lock);
1706 CTR2(KTR_IOAT, "%s channel=%u quiesced and drained", __func__,
1709 status = ioat_get_chansts(ioat);
1710 if (is_ioat_active(status) || is_ioat_idle(status))
1713 /* Wait at most 20 ms */
1714 for (timeout = 0; (is_ioat_active(status) || is_ioat_idle(status)) &&
1715 timeout < 20; timeout++) {
1717 status = ioat_get_chansts(ioat);
1719 if (timeout == 20) {
1724 KASSERT(ioat_get_active(ioat) == 0, ("active after quiesce"));
1726 chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET);
1727 ioat_write_4(ioat, IOAT_CHANERR_OFFSET, chanerr);
1729 CTR2(KTR_IOAT, "%s channel=%u hardware suspended", __func__,
1733 * IOAT v3 workaround - CHANERRMSK_INT with 3E07h to masks out errors
1734 * that can cause stability issues for IOAT v3.
1736 pci_write_config(ioat->device, IOAT_CFG_CHANERRMASK_INT_OFFSET, 0x3e07,
1738 chanerr = pci_read_config(ioat->device, IOAT_CFG_CHANERR_INT_OFFSET, 4);
1739 pci_write_config(ioat->device, IOAT_CFG_CHANERR_INT_OFFSET, chanerr, 4);
1742 * BDXDE and BWD models reset MSI-X registers on device reset.
1743 * Save/restore their contents manually.
1745 if (ioat_model_resets_msix(ioat)) {
1746 ioat_log_message(1, "device resets MSI-X registers; saving\n");
1747 pci_save_state(ioat->device);
1751 CTR2(KTR_IOAT, "%s channel=%u hardware reset", __func__,
1754 /* Wait at most 20 ms */
1755 for (timeout = 0; ioat_reset_pending(ioat) && timeout < 20; timeout++)
1757 if (timeout == 20) {
1762 if (ioat_model_resets_msix(ioat)) {
1763 ioat_log_message(1, "device resets registers; restored\n");
1764 pci_restore_state(ioat->device);
1767 /* Reset attempts to return the hardware to "halted." */
1768 status = ioat_get_chansts(ioat);
1769 if (is_ioat_active(status) || is_ioat_idle(status)) {
1770 /* So this really shouldn't happen... */
1771 ioat_log_message(0, "Device is active after a reset?\n");
1772 ioat_write_chanctrl(ioat, IOAT_CHANCTRL_RUN);
1777 chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET);
1779 mtx_lock(&ioat->cleanup_lock);
1780 ioat_halted_debug(ioat, chanerr);
1781 mtx_unlock(&ioat->cleanup_lock);
1787 * Bring device back online after reset. Writing CHAINADDR brings the
1788 * device back to active.
1790 * The internal ring counter resets to zero, so we have to start over
1793 ioat->tail = ioat->head = 0;
1794 ioat->last_seen = 0;
1795 *ioat->comp_update = 0;
1797 ioat_write_chanctrl(ioat, IOAT_CHANCTRL_RUN);
1798 ioat_write_chancmp(ioat, ioat->comp_update_bus_addr);
1799 ioat_write_chainaddr(ioat, RING_PHYS_ADDR(ioat, 0));
1801 CTR2(KTR_IOAT, "%s channel=%u configured channel", __func__,
1805 /* Enqueues a null operation and ensures it completes. */
1807 error = ioat_start_channel(ioat);
1808 CTR2(KTR_IOAT, "%s channel=%u started channel", __func__,
1813 * Resume completions now that ring state is consistent.
1815 mtx_lock(&ioat->cleanup_lock);
1816 ioat->resetting_cleanup = FALSE;
1817 mtx_unlock(&ioat->cleanup_lock);
1819 /* Unblock submission of new work */
1820 mtx_lock(&ioat->submit_lock);
1821 ioat->quiescing = FALSE;
1822 wakeup(&ioat->quiescing);
1824 ioat->resetting = FALSE;
1825 wakeup(&ioat->resetting);
1827 CTR2(KTR_IOAT, "%s channel=%u reset done", __func__, ioat->chan_idx);
1828 mtx_unlock(&ioat->submit_lock);
1834 sysctl_handle_chansts(SYSCTL_HANDLER_ARGS)
1836 struct ioat_softc *ioat;
1843 status = ioat_get_chansts(ioat) & IOAT_CHANSTS_STATUS;
1845 sbuf_new_for_sysctl(&sb, NULL, 256, req);
1847 case IOAT_CHANSTS_ACTIVE:
1848 sbuf_printf(&sb, "ACTIVE");
1850 case IOAT_CHANSTS_IDLE:
1851 sbuf_printf(&sb, "IDLE");
1853 case IOAT_CHANSTS_SUSPENDED:
1854 sbuf_printf(&sb, "SUSPENDED");
1856 case IOAT_CHANSTS_HALTED:
1857 sbuf_printf(&sb, "HALTED");
1859 case IOAT_CHANSTS_ARMED:
1860 sbuf_printf(&sb, "ARMED");
1863 sbuf_printf(&sb, "UNKNOWN");
1866 error = sbuf_finish(&sb);
1869 if (error != 0 || req->newptr == NULL)
1875 sysctl_handle_dpi(SYSCTL_HANDLER_ARGS)
1877 struct ioat_softc *ioat;
1879 #define PRECISION "1"
1880 const uintmax_t factor = 10;
1885 sbuf_new_for_sysctl(&sb, NULL, 16, req);
1887 if (ioat->stats.interrupts == 0) {
1888 sbuf_printf(&sb, "NaN");
1891 rate = ioat->stats.descriptors_processed * factor /
1892 ioat->stats.interrupts;
1893 sbuf_printf(&sb, "%ju.%." PRECISION "ju", rate / factor,
1897 error = sbuf_finish(&sb);
1899 if (error != 0 || req->newptr == NULL)
1905 sysctl_handle_reset(SYSCTL_HANDLER_ARGS)
1907 struct ioat_softc *ioat;
1913 error = SYSCTL_OUT(req, &arg, sizeof(arg));
1914 if (error != 0 || req->newptr == NULL)
1917 error = SYSCTL_IN(req, &arg, sizeof(arg));
1922 error = ioat_reset_hw(ioat);
1928 dump_descriptor(void *hw_desc)
1932 for (i = 0; i < 2; i++) {
1933 for (j = 0; j < 8; j++)
1934 printf("%08x ", ((uint32_t *)hw_desc)[i * 8 + j]);
1940 ioat_setup_sysctl(device_t device)
1942 struct sysctl_oid_list *par, *statpar, *state, *hammer;
1943 struct sysctl_ctx_list *ctx;
1944 struct sysctl_oid *tree, *tmp;
1945 struct ioat_softc *ioat;
1947 ioat = DEVICE2SOFTC(device);
1948 ctx = device_get_sysctl_ctx(device);
1949 tree = device_get_sysctl_tree(device);
1950 par = SYSCTL_CHILDREN(tree);
1952 SYSCTL_ADD_INT(ctx, par, OID_AUTO, "version", CTLFLAG_RD,
1953 &ioat->version, 0, "HW version (0xMM form)");
1954 SYSCTL_ADD_UINT(ctx, par, OID_AUTO, "max_xfer_size", CTLFLAG_RD,
1955 &ioat->max_xfer_size, 0, "HW maximum transfer size");
1956 SYSCTL_ADD_INT(ctx, par, OID_AUTO, "intrdelay_supported", CTLFLAG_RD,
1957 &ioat->intrdelay_supported, 0, "Is INTRDELAY supported");
1958 SYSCTL_ADD_U16(ctx, par, OID_AUTO, "intrdelay_max", CTLFLAG_RD,
1959 &ioat->intrdelay_max, 0,
1960 "Maximum configurable INTRDELAY on this channel (microseconds)");
1962 tmp = SYSCTL_ADD_NODE(ctx, par, OID_AUTO, "state", CTLFLAG_RD, NULL,
1963 "IOAT channel internal state");
1964 state = SYSCTL_CHILDREN(tmp);
1966 SYSCTL_ADD_UINT(ctx, state, OID_AUTO, "ring_size_order", CTLFLAG_RD,
1967 &ioat->ring_size_order, 0, "SW descriptor ring size order");
1968 SYSCTL_ADD_UINT(ctx, state, OID_AUTO, "head", CTLFLAG_RD, &ioat->head,
1969 0, "SW descriptor head pointer index");
1970 SYSCTL_ADD_UINT(ctx, state, OID_AUTO, "tail", CTLFLAG_RD, &ioat->tail,
1971 0, "SW descriptor tail pointer index");
1973 SYSCTL_ADD_UQUAD(ctx, state, OID_AUTO, "last_completion", CTLFLAG_RD,
1974 ioat->comp_update, "HW addr of last completion");
1976 SYSCTL_ADD_INT(ctx, state, OID_AUTO, "is_submitter_processing",
1977 CTLFLAG_RD, &ioat->is_submitter_processing, 0,
1978 "submitter processing");
1980 SYSCTL_ADD_PROC(ctx, state, OID_AUTO, "chansts",
1981 CTLTYPE_STRING | CTLFLAG_RD, ioat, 0, sysctl_handle_chansts, "A",
1982 "String of the channel status");
1984 SYSCTL_ADD_U16(ctx, state, OID_AUTO, "intrdelay", CTLFLAG_RD,
1985 &ioat->cached_intrdelay, 0,
1986 "Current INTRDELAY on this channel (cached, microseconds)");
1988 tmp = SYSCTL_ADD_NODE(ctx, par, OID_AUTO, "hammer", CTLFLAG_RD, NULL,
1989 "Big hammers (mostly for testing)");
1990 hammer = SYSCTL_CHILDREN(tmp);
1992 SYSCTL_ADD_PROC(ctx, hammer, OID_AUTO, "force_hw_reset",
1993 CTLTYPE_INT | CTLFLAG_RW, ioat, 0, sysctl_handle_reset, "I",
1994 "Set to non-zero to reset the hardware");
1996 tmp = SYSCTL_ADD_NODE(ctx, par, OID_AUTO, "stats", CTLFLAG_RD, NULL,
1997 "IOAT channel statistics");
1998 statpar = SYSCTL_CHILDREN(tmp);
2000 SYSCTL_ADD_UQUAD(ctx, statpar, OID_AUTO, "interrupts",
2001 CTLFLAG_RW | CTLFLAG_STATS, &ioat->stats.interrupts,
2002 "Number of interrupts processed on this channel");
2003 SYSCTL_ADD_UQUAD(ctx, statpar, OID_AUTO, "descriptors",
2004 CTLFLAG_RW | CTLFLAG_STATS, &ioat->stats.descriptors_processed,
2005 "Number of descriptors processed on this channel");
2006 SYSCTL_ADD_UQUAD(ctx, statpar, OID_AUTO, "submitted",
2007 CTLFLAG_RW | CTLFLAG_STATS, &ioat->stats.descriptors_submitted,
2008 "Number of descriptors submitted to this channel");
2009 SYSCTL_ADD_UQUAD(ctx, statpar, OID_AUTO, "errored",
2010 CTLFLAG_RW | CTLFLAG_STATS, &ioat->stats.descriptors_error,
2011 "Number of descriptors failed by channel errors");
2012 SYSCTL_ADD_U32(ctx, statpar, OID_AUTO, "halts",
2013 CTLFLAG_RW | CTLFLAG_STATS, &ioat->stats.channel_halts, 0,
2014 "Number of times the channel has halted");
2015 SYSCTL_ADD_U32(ctx, statpar, OID_AUTO, "last_halt_chanerr",
2016 CTLFLAG_RW | CTLFLAG_STATS, &ioat->stats.last_halt_chanerr, 0,
2017 "The raw CHANERR when the channel was last halted");
2019 SYSCTL_ADD_PROC(ctx, statpar, OID_AUTO, "desc_per_interrupt",
2020 CTLTYPE_STRING | CTLFLAG_RD, ioat, 0, sysctl_handle_dpi, "A",
2021 "Descriptors per interrupt");
2025 ioat_get(struct ioat_softc *ioat)
2028 mtx_assert(&ioat->submit_lock, MA_OWNED);
2029 KASSERT(ioat->refcnt < UINT32_MAX, ("refcnt overflow"));
2035 ioat_put(struct ioat_softc *ioat)
2038 mtx_assert(&ioat->submit_lock, MA_OWNED);
2039 KASSERT(ioat->refcnt >= 1, ("refcnt error"));
2041 if (--ioat->refcnt == 0)
2042 wakeup(&ioat->refcnt);
2046 ioat_drain_locked(struct ioat_softc *ioat)
2049 mtx_assert(&ioat->submit_lock, MA_OWNED);
2051 while (ioat->refcnt > 0)
2052 msleep(&ioat->refcnt, &ioat->submit_lock, 0, "ioat_drain", 0);
2056 #define _db_show_lock(lo) LOCK_CLASS(lo)->lc_ddb_show(lo)
2057 #define db_show_lock(lk) _db_show_lock(&(lk)->lock_object)
2058 DB_SHOW_COMMAND(ioat, db_show_ioat)
2060 struct ioat_softc *sc;
2065 idx = (unsigned)addr;
2066 if (idx >= ioat_channel_index)
2069 sc = ioat_channel[idx];
2070 db_printf("ioat softc at %p\n", sc);
2074 db_printf(" version: %d\n", sc->version);
2075 db_printf(" chan_idx: %u\n", sc->chan_idx);
2076 db_printf(" submit_lock: ");
2077 db_show_lock(&sc->submit_lock);
2079 db_printf(" capabilities: %b\n", (int)sc->capabilities,
2081 db_printf(" cached_intrdelay: %u\n", sc->cached_intrdelay);
2082 db_printf(" *comp_update: 0x%jx\n", (uintmax_t)*sc->comp_update);
2084 db_printf(" poll_timer:\n");
2085 db_printf(" c_time: %ju\n", (uintmax_t)sc->poll_timer.c_time);
2086 db_printf(" c_arg: %p\n", sc->poll_timer.c_arg);
2087 db_printf(" c_func: %p\n", sc->poll_timer.c_func);
2088 db_printf(" c_lock: %p\n", sc->poll_timer.c_lock);
2089 db_printf(" c_flags: 0x%x\n", (unsigned)sc->poll_timer.c_flags);
2091 db_printf(" quiescing: %d\n", (int)sc->quiescing);
2092 db_printf(" destroying: %d\n", (int)sc->destroying);
2093 db_printf(" is_submitter_processing: %d\n",
2094 (int)sc->is_submitter_processing);
2095 db_printf(" intrdelay_supported: %d\n", (int)sc->intrdelay_supported);
2096 db_printf(" resetting: %d\n", (int)sc->resetting);
2098 db_printf(" head: %u\n", sc->head);
2099 db_printf(" tail: %u\n", sc->tail);
2100 db_printf(" ring_size_order: %u\n", sc->ring_size_order);
2101 db_printf(" last_seen: 0x%lx\n", sc->last_seen);
2102 db_printf(" ring: %p\n", sc->ring);
2103 db_printf(" descriptors: %p\n", sc->hw_desc_ring);
2104 db_printf(" descriptors (phys): 0x%jx\n",
2105 (uintmax_t)sc->hw_desc_bus_addr);
2107 db_printf(" ring[%u] (tail):\n", sc->tail %
2108 (1 << sc->ring_size_order));
2109 db_printf(" id: %u\n", ioat_get_ring_entry(sc, sc->tail)->id);
2110 db_printf(" addr: 0x%lx\n",
2111 RING_PHYS_ADDR(sc, sc->tail));
2112 db_printf(" next: 0x%lx\n",
2113 ioat_get_descriptor(sc, sc->tail)->generic.next);
2115 db_printf(" ring[%u] (head - 1):\n", (sc->head - 1) %
2116 (1 << sc->ring_size_order));
2117 db_printf(" id: %u\n", ioat_get_ring_entry(sc, sc->head - 1)->id);
2118 db_printf(" addr: 0x%lx\n",
2119 RING_PHYS_ADDR(sc, sc->head - 1));
2120 db_printf(" next: 0x%lx\n",
2121 ioat_get_descriptor(sc, sc->head - 1)->generic.next);
2123 db_printf(" ring[%u] (head):\n", (sc->head) %
2124 (1 << sc->ring_size_order));
2125 db_printf(" id: %u\n", ioat_get_ring_entry(sc, sc->head)->id);
2126 db_printf(" addr: 0x%lx\n",
2127 RING_PHYS_ADDR(sc, sc->head));
2128 db_printf(" next: 0x%lx\n",
2129 ioat_get_descriptor(sc, sc->head)->generic.next);
2131 for (idx = 0; idx < (1 << sc->ring_size_order); idx++)
2132 if ((*sc->comp_update & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_MASK)
2133 == RING_PHYS_ADDR(sc, idx))
2134 db_printf(" ring[%u] == hardware tail\n", idx);
2136 db_printf(" cleanup_lock: ");
2137 db_show_lock(&sc->cleanup_lock);
2139 db_printf(" refcnt: %u\n", sc->refcnt);
2140 db_printf(" stats:\n");
2141 db_printf(" interrupts: %lu\n", sc->stats.interrupts);
2142 db_printf(" descriptors_processed: %lu\n", sc->stats.descriptors_processed);
2143 db_printf(" descriptors_error: %lu\n", sc->stats.descriptors_error);
2144 db_printf(" descriptors_submitted: %lu\n", sc->stats.descriptors_submitted);
2146 db_printf(" channel_halts: %u\n", sc->stats.channel_halts);
2147 db_printf(" last_halt_chanerr: %u\n", sc->stats.last_halt_chanerr);
2152 db_printf(" hw status:\n");
2153 db_printf(" status: 0x%lx\n", ioat_get_chansts(sc));
2154 db_printf(" chanctrl: 0x%x\n",
2155 (unsigned)ioat_read_2(sc, IOAT_CHANCTRL_OFFSET));
2156 db_printf(" chancmd: 0x%x\n",
2157 (unsigned)ioat_read_1(sc, IOAT_CHANCMD_OFFSET));
2158 db_printf(" dmacount: 0x%x\n",
2159 (unsigned)ioat_read_2(sc, IOAT_DMACOUNT_OFFSET));
2160 db_printf(" chainaddr: 0x%lx\n",
2161 ioat_read_double_4(sc, IOAT_CHAINADDR_OFFSET_LOW));
2162 db_printf(" chancmp: 0x%lx\n",
2163 ioat_read_double_4(sc, IOAT_CHANCMP_OFFSET_LOW));
2164 db_printf(" chanerr: %b\n",
2165 (int)ioat_read_4(sc, IOAT_CHANERR_OFFSET), IOAT_CHANERR_STR);
2168 db_printf("usage: show ioat <0-%u>\n", ioat_channel_index);