2 * Copyright (C) 2012 Intel Corporation
5 * Redistribution and use in source and binary forms, with or without
6 * modification, are permitted provided that the following conditions
8 * 1. Redistributions of source code must retain the above copyright
9 * notice, this list of conditions and the following disclaimer.
10 * 2. Redistributions in binary form must reproduce the above copyright
11 * notice, this list of conditions and the following disclaimer in the
12 * documentation and/or other materials provided with the distribution.
14 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
15 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
16 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
17 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
18 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
19 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
20 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
21 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
22 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
23 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
27 #include <sys/cdefs.h>
28 __FBSDID("$FreeBSD$");
30 #include <sys/param.h>
31 #include <sys/systm.h>
34 #include <sys/ioccom.h>
35 #include <sys/kernel.h>
37 #include <sys/malloc.h>
38 #include <sys/module.h>
39 #include <sys/mutex.h>
42 #include <sys/sysctl.h>
43 #include <sys/taskqueue.h>
45 #include <dev/pci/pcireg.h>
46 #include <dev/pci/pcivar.h>
47 #include <machine/bus.h>
48 #include <machine/resource.h>
49 #include <machine/stdarg.h>
53 #include "ioat_internal.h"
55 #ifndef BUS_SPACE_MAXADDR_40BIT
56 #define BUS_SPACE_MAXADDR_40BIT 0xFFFFFFFFFFULL
58 #define IOAT_INTR_TIMO (hz / 10)
59 #define IOAT_REFLK (&ioat->submit_lock)
61 static int ioat_probe(device_t device);
62 static int ioat_attach(device_t device);
63 static int ioat_detach(device_t device);
64 static int ioat_setup_intr(struct ioat_softc *ioat);
65 static int ioat_teardown_intr(struct ioat_softc *ioat);
66 static int ioat3_attach(device_t device);
67 static int ioat_start_channel(struct ioat_softc *ioat);
68 static int ioat_map_pci_bar(struct ioat_softc *ioat);
69 static void ioat_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg,
71 static void ioat_interrupt_handler(void *arg);
72 static boolean_t ioat_model_resets_msix(struct ioat_softc *ioat);
73 static int chanerr_to_errno(uint32_t);
74 static void ioat_process_events(struct ioat_softc *ioat);
75 static inline uint32_t ioat_get_active(struct ioat_softc *ioat);
76 static inline uint32_t ioat_get_ring_space(struct ioat_softc *ioat);
77 static void ioat_free_ring(struct ioat_softc *, uint32_t size,
78 struct ioat_descriptor **);
79 static void ioat_free_ring_entry(struct ioat_softc *ioat,
80 struct ioat_descriptor *desc);
81 static struct ioat_descriptor *ioat_alloc_ring_entry(struct ioat_softc *,
83 static int ioat_reserve_space(struct ioat_softc *, uint32_t, int mflags);
84 static struct ioat_descriptor *ioat_get_ring_entry(struct ioat_softc *ioat,
86 static struct ioat_descriptor **ioat_prealloc_ring(struct ioat_softc *,
87 uint32_t size, boolean_t need_dscr, int mflags);
88 static int ring_grow(struct ioat_softc *, uint32_t oldorder,
89 struct ioat_descriptor **);
90 static int ring_shrink(struct ioat_softc *, uint32_t oldorder,
91 struct ioat_descriptor **);
92 static void ioat_halted_debug(struct ioat_softc *, uint32_t);
93 static void ioat_timer_callback(void *arg);
94 static void dump_descriptor(void *hw_desc);
95 static void ioat_submit_single(struct ioat_softc *ioat);
96 static void ioat_comp_update_map(void *arg, bus_dma_segment_t *seg, int nseg,
98 static int ioat_reset_hw(struct ioat_softc *ioat);
99 static void ioat_reset_hw_task(void *, int);
100 static void ioat_setup_sysctl(device_t device);
101 static int sysctl_handle_reset(SYSCTL_HANDLER_ARGS);
102 static inline struct ioat_softc *ioat_get(struct ioat_softc *,
104 static inline void ioat_put(struct ioat_softc *, enum ioat_ref_kind);
105 static inline void _ioat_putn(struct ioat_softc *, uint32_t,
106 enum ioat_ref_kind, boolean_t);
107 static inline void ioat_putn(struct ioat_softc *, uint32_t,
109 static inline void ioat_putn_locked(struct ioat_softc *, uint32_t,
111 static void ioat_drain_locked(struct ioat_softc *);
113 #define ioat_log_message(v, ...) do { \
114 if ((v) <= g_ioat_debug_level) { \
115 device_printf(ioat->device, __VA_ARGS__); \
119 MALLOC_DEFINE(M_IOAT, "ioat", "ioat driver memory allocations");
120 SYSCTL_NODE(_hw, OID_AUTO, ioat, CTLFLAG_RD, 0, "ioat node");
122 static int g_force_legacy_interrupts;
123 SYSCTL_INT(_hw_ioat, OID_AUTO, force_legacy_interrupts, CTLFLAG_RDTUN,
124 &g_force_legacy_interrupts, 0, "Set to non-zero to force MSI-X disabled");
126 int g_ioat_debug_level = 0;
127 SYSCTL_INT(_hw_ioat, OID_AUTO, debug_level, CTLFLAG_RWTUN, &g_ioat_debug_level,
128 0, "Set log level (0-3) for ioat(4). Higher is more verbose.");
131 * OS <-> Driver interface structures
133 static device_method_t ioat_pci_methods[] = {
134 /* Device interface */
135 DEVMETHOD(device_probe, ioat_probe),
136 DEVMETHOD(device_attach, ioat_attach),
137 DEVMETHOD(device_detach, ioat_detach),
141 static driver_t ioat_pci_driver = {
144 sizeof(struct ioat_softc),
147 static devclass_t ioat_devclass;
148 DRIVER_MODULE(ioat, pci, ioat_pci_driver, ioat_devclass, 0, 0);
149 MODULE_VERSION(ioat, 1);
152 * Private data structures
154 static struct ioat_softc *ioat_channel[IOAT_MAX_CHANNELS];
155 static int ioat_channel_index = 0;
156 SYSCTL_INT(_hw_ioat, OID_AUTO, channels, CTLFLAG_RD, &ioat_channel_index, 0,
157 "Number of IOAT channels attached");
164 { 0x34308086, "TBG IOAT Ch0" },
165 { 0x34318086, "TBG IOAT Ch1" },
166 { 0x34328086, "TBG IOAT Ch2" },
167 { 0x34338086, "TBG IOAT Ch3" },
168 { 0x34298086, "TBG IOAT Ch4" },
169 { 0x342a8086, "TBG IOAT Ch5" },
170 { 0x342b8086, "TBG IOAT Ch6" },
171 { 0x342c8086, "TBG IOAT Ch7" },
173 { 0x37108086, "JSF IOAT Ch0" },
174 { 0x37118086, "JSF IOAT Ch1" },
175 { 0x37128086, "JSF IOAT Ch2" },
176 { 0x37138086, "JSF IOAT Ch3" },
177 { 0x37148086, "JSF IOAT Ch4" },
178 { 0x37158086, "JSF IOAT Ch5" },
179 { 0x37168086, "JSF IOAT Ch6" },
180 { 0x37178086, "JSF IOAT Ch7" },
181 { 0x37188086, "JSF IOAT Ch0 (RAID)" },
182 { 0x37198086, "JSF IOAT Ch1 (RAID)" },
184 { 0x3c208086, "SNB IOAT Ch0" },
185 { 0x3c218086, "SNB IOAT Ch1" },
186 { 0x3c228086, "SNB IOAT Ch2" },
187 { 0x3c238086, "SNB IOAT Ch3" },
188 { 0x3c248086, "SNB IOAT Ch4" },
189 { 0x3c258086, "SNB IOAT Ch5" },
190 { 0x3c268086, "SNB IOAT Ch6" },
191 { 0x3c278086, "SNB IOAT Ch7" },
192 { 0x3c2e8086, "SNB IOAT Ch0 (RAID)" },
193 { 0x3c2f8086, "SNB IOAT Ch1 (RAID)" },
195 { 0x0e208086, "IVB IOAT Ch0" },
196 { 0x0e218086, "IVB IOAT Ch1" },
197 { 0x0e228086, "IVB IOAT Ch2" },
198 { 0x0e238086, "IVB IOAT Ch3" },
199 { 0x0e248086, "IVB IOAT Ch4" },
200 { 0x0e258086, "IVB IOAT Ch5" },
201 { 0x0e268086, "IVB IOAT Ch6" },
202 { 0x0e278086, "IVB IOAT Ch7" },
203 { 0x0e2e8086, "IVB IOAT Ch0 (RAID)" },
204 { 0x0e2f8086, "IVB IOAT Ch1 (RAID)" },
206 { 0x2f208086, "HSW IOAT Ch0" },
207 { 0x2f218086, "HSW IOAT Ch1" },
208 { 0x2f228086, "HSW IOAT Ch2" },
209 { 0x2f238086, "HSW IOAT Ch3" },
210 { 0x2f248086, "HSW IOAT Ch4" },
211 { 0x2f258086, "HSW IOAT Ch5" },
212 { 0x2f268086, "HSW IOAT Ch6" },
213 { 0x2f278086, "HSW IOAT Ch7" },
214 { 0x2f2e8086, "HSW IOAT Ch0 (RAID)" },
215 { 0x2f2f8086, "HSW IOAT Ch1 (RAID)" },
217 { 0x0c508086, "BWD IOAT Ch0" },
218 { 0x0c518086, "BWD IOAT Ch1" },
219 { 0x0c528086, "BWD IOAT Ch2" },
220 { 0x0c538086, "BWD IOAT Ch3" },
222 { 0x6f508086, "BDXDE IOAT Ch0" },
223 { 0x6f518086, "BDXDE IOAT Ch1" },
224 { 0x6f528086, "BDXDE IOAT Ch2" },
225 { 0x6f538086, "BDXDE IOAT Ch3" },
227 { 0x6f208086, "BDX IOAT Ch0" },
228 { 0x6f218086, "BDX IOAT Ch1" },
229 { 0x6f228086, "BDX IOAT Ch2" },
230 { 0x6f238086, "BDX IOAT Ch3" },
231 { 0x6f248086, "BDX IOAT Ch4" },
232 { 0x6f258086, "BDX IOAT Ch5" },
233 { 0x6f268086, "BDX IOAT Ch6" },
234 { 0x6f278086, "BDX IOAT Ch7" },
235 { 0x6f2e8086, "BDX IOAT Ch0 (RAID)" },
236 { 0x6f2f8086, "BDX IOAT Ch1 (RAID)" },
242 * OS <-> Driver linkage functions
245 ioat_probe(device_t device)
250 type = pci_get_devid(device);
251 for (ep = pci_ids; ep->type; ep++) {
252 if (ep->type == type) {
253 device_set_desc(device, ep->desc);
261 ioat_attach(device_t device)
263 struct ioat_softc *ioat;
266 ioat = DEVICE2SOFTC(device);
267 ioat->device = device;
269 error = ioat_map_pci_bar(ioat);
273 ioat->version = ioat_read_cbver(ioat);
274 if (ioat->version < IOAT_VER_3_0) {
279 error = ioat3_attach(device);
283 error = pci_enable_busmaster(device);
287 error = ioat_setup_intr(ioat);
291 error = ioat_reset_hw(ioat);
295 ioat_process_events(ioat);
296 ioat_setup_sysctl(device);
298 ioat->chan_idx = ioat_channel_index;
299 ioat_channel[ioat_channel_index++] = ioat;
309 ioat_detach(device_t device)
311 struct ioat_softc *ioat;
313 ioat = DEVICE2SOFTC(device);
316 taskqueue_drain(taskqueue_thread, &ioat->reset_task);
318 mtx_lock(IOAT_REFLK);
319 ioat->quiescing = TRUE;
320 ioat->destroying = TRUE;
321 wakeup(&ioat->quiescing);
323 ioat_channel[ioat->chan_idx] = NULL;
325 ioat_drain_locked(ioat);
326 mtx_unlock(IOAT_REFLK);
328 ioat_teardown_intr(ioat);
329 callout_drain(&ioat->timer);
331 pci_disable_busmaster(device);
333 if (ioat->pci_resource != NULL)
334 bus_release_resource(device, SYS_RES_MEMORY,
335 ioat->pci_resource_id, ioat->pci_resource);
337 if (ioat->ring != NULL)
338 ioat_free_ring(ioat, 1 << ioat->ring_size_order, ioat->ring);
340 if (ioat->comp_update != NULL) {
341 bus_dmamap_unload(ioat->comp_update_tag, ioat->comp_update_map);
342 bus_dmamem_free(ioat->comp_update_tag, ioat->comp_update,
343 ioat->comp_update_map);
344 bus_dma_tag_destroy(ioat->comp_update_tag);
347 bus_dma_tag_destroy(ioat->hw_desc_tag);
353 ioat_teardown_intr(struct ioat_softc *ioat)
356 if (ioat->tag != NULL)
357 bus_teardown_intr(ioat->device, ioat->res, ioat->tag);
359 if (ioat->res != NULL)
360 bus_release_resource(ioat->device, SYS_RES_IRQ,
361 rman_get_rid(ioat->res), ioat->res);
363 pci_release_msi(ioat->device);
368 ioat_start_channel(struct ioat_softc *ioat)
374 ioat_acquire(&ioat->dmaengine);
375 ioat_null(&ioat->dmaengine, NULL, NULL, 0);
376 ioat_release(&ioat->dmaengine);
378 for (i = 0; i < 100; i++) {
380 status = ioat_get_chansts(ioat);
381 if (is_ioat_idle(status))
385 chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET);
386 ioat_log_message(0, "could not start channel: "
387 "status = %#jx error = %b\n", (uintmax_t)status, (int)chanerr,
393 * Initialize Hardware
396 ioat3_attach(device_t device)
398 struct ioat_softc *ioat;
399 struct ioat_descriptor **ring;
400 struct ioat_descriptor *next;
401 struct ioat_dma_hw_descriptor *dma_hw_desc;
402 int i, num_descriptors;
407 ioat = DEVICE2SOFTC(device);
408 ioat->capabilities = ioat_read_dmacapability(ioat);
410 ioat_log_message(1, "Capabilities: %b\n", (int)ioat->capabilities,
413 xfercap = ioat_read_xfercap(ioat);
414 ioat->max_xfer_size = 1 << xfercap;
416 ioat->intrdelay_supported = (ioat_read_2(ioat, IOAT_INTRDELAY_OFFSET) &
417 IOAT_INTRDELAY_SUPPORTED) != 0;
418 if (ioat->intrdelay_supported)
419 ioat->intrdelay_max = IOAT_INTRDELAY_US_MASK;
421 /* TODO: need to check DCA here if we ever do XOR/PQ */
423 mtx_init(&ioat->submit_lock, "ioat_submit", NULL, MTX_DEF);
424 mtx_init(&ioat->cleanup_lock, "ioat_cleanup", NULL, MTX_DEF);
425 callout_init(&ioat->timer, 1);
426 TASK_INIT(&ioat->reset_task, 0, ioat_reset_hw_task, ioat);
428 /* Establish lock order for Witness */
429 mtx_lock(&ioat->submit_lock);
430 mtx_lock(&ioat->cleanup_lock);
431 mtx_unlock(&ioat->cleanup_lock);
432 mtx_unlock(&ioat->submit_lock);
434 ioat->is_resize_pending = FALSE;
435 ioat->is_completion_pending = FALSE;
436 ioat->is_reset_pending = FALSE;
437 ioat->is_channel_running = FALSE;
439 bus_dma_tag_create(bus_get_dma_tag(ioat->device), sizeof(uint64_t), 0x0,
440 BUS_SPACE_MAXADDR, BUS_SPACE_MAXADDR, NULL, NULL,
441 sizeof(uint64_t), 1, sizeof(uint64_t), 0, NULL, NULL,
442 &ioat->comp_update_tag);
444 error = bus_dmamem_alloc(ioat->comp_update_tag,
445 (void **)&ioat->comp_update, BUS_DMA_ZERO, &ioat->comp_update_map);
446 if (ioat->comp_update == NULL)
449 error = bus_dmamap_load(ioat->comp_update_tag, ioat->comp_update_map,
450 ioat->comp_update, sizeof(uint64_t), ioat_comp_update_map, ioat,
455 ioat->ring_size_order = IOAT_MIN_ORDER;
457 num_descriptors = 1 << ioat->ring_size_order;
459 bus_dma_tag_create(bus_get_dma_tag(ioat->device), 0x40, 0x0,
460 BUS_SPACE_MAXADDR_40BIT, BUS_SPACE_MAXADDR, NULL, NULL,
461 sizeof(struct ioat_dma_hw_descriptor), 1,
462 sizeof(struct ioat_dma_hw_descriptor), 0, NULL, NULL,
465 ioat->ring = malloc(num_descriptors * sizeof(*ring), M_IOAT,
469 for (i = 0; i < num_descriptors; i++) {
470 ring[i] = ioat_alloc_ring_entry(ioat, M_WAITOK);
477 for (i = 0; i < num_descriptors - 1; i++) {
479 dma_hw_desc = ring[i]->u.dma;
481 dma_hw_desc->next = next->hw_desc_bus_addr;
484 ring[i]->u.dma->next = ring[0]->hw_desc_bus_addr;
486 ioat->head = ioat->hw_head = 0;
493 ioat_map_pci_bar(struct ioat_softc *ioat)
496 ioat->pci_resource_id = PCIR_BAR(0);
497 ioat->pci_resource = bus_alloc_resource_any(ioat->device,
498 SYS_RES_MEMORY, &ioat->pci_resource_id, RF_ACTIVE);
500 if (ioat->pci_resource == NULL) {
501 ioat_log_message(0, "unable to allocate pci resource\n");
505 ioat->pci_bus_tag = rman_get_bustag(ioat->pci_resource);
506 ioat->pci_bus_handle = rman_get_bushandle(ioat->pci_resource);
511 ioat_comp_update_map(void *arg, bus_dma_segment_t *seg, int nseg, int error)
513 struct ioat_softc *ioat = arg;
515 KASSERT(error == 0, ("%s: error:%d", __func__, error));
516 ioat->comp_update_bus_addr = seg[0].ds_addr;
520 ioat_dmamap_cb(void *arg, bus_dma_segment_t *segs, int nseg, int error)
524 KASSERT(error == 0, ("%s: error:%d", __func__, error));
526 *baddr = segs->ds_addr;
530 * Interrupt setup and handlers
533 ioat_setup_intr(struct ioat_softc *ioat)
535 uint32_t num_vectors;
538 boolean_t force_legacy_interrupts;
541 force_legacy_interrupts = FALSE;
543 if (!g_force_legacy_interrupts && pci_msix_count(ioat->device) >= 1) {
545 pci_alloc_msix(ioat->device, &num_vectors);
546 if (num_vectors == 1)
552 ioat->res = bus_alloc_resource_any(ioat->device, SYS_RES_IRQ,
553 &ioat->rid, RF_ACTIVE);
556 ioat->res = bus_alloc_resource_any(ioat->device, SYS_RES_IRQ,
557 &ioat->rid, RF_SHAREABLE | RF_ACTIVE);
559 if (ioat->res == NULL) {
560 ioat_log_message(0, "bus_alloc_resource failed\n");
565 error = bus_setup_intr(ioat->device, ioat->res, INTR_MPSAFE |
566 INTR_TYPE_MISC, NULL, ioat_interrupt_handler, ioat, &ioat->tag);
568 ioat_log_message(0, "bus_setup_intr failed\n");
572 ioat_write_intrctrl(ioat, IOAT_INTRCTRL_MASTER_INT_EN);
577 ioat_model_resets_msix(struct ioat_softc *ioat)
581 pciid = pci_get_devid(ioat->device);
600 ioat_interrupt_handler(void *arg)
602 struct ioat_softc *ioat = arg;
604 ioat->stats.interrupts++;
605 ioat_process_events(ioat);
609 chanerr_to_errno(uint32_t chanerr)
614 if ((chanerr & (IOAT_CHANERR_XSADDERR | IOAT_CHANERR_XDADDERR)) != 0)
616 if ((chanerr & (IOAT_CHANERR_RDERR | IOAT_CHANERR_WDERR)) != 0)
618 /* This one is probably our fault: */
619 if ((chanerr & IOAT_CHANERR_NDADDERR) != 0)
625 ioat_process_events(struct ioat_softc *ioat)
627 struct ioat_descriptor *desc;
628 struct bus_dmadesc *dmadesc;
629 uint64_t comp_update, status;
630 uint32_t completed, chanerr;
633 mtx_lock(&ioat->cleanup_lock);
636 comp_update = *ioat->comp_update;
637 status = comp_update & IOAT_CHANSTS_COMPLETED_DESCRIPTOR_MASK;
639 CTR0(KTR_IOAT, __func__);
641 if (status == ioat->last_seen) {
643 * If we landed in process_events and nothing has been
644 * completed, check for a timeout due to channel halt.
646 comp_update = ioat_get_chansts(ioat);
651 desc = ioat_get_ring_entry(ioat, ioat->tail);
652 dmadesc = &desc->bus_dmadesc;
653 CTR1(KTR_IOAT, "completing desc %d", ioat->tail);
655 if (dmadesc->callback_fn != NULL)
656 dmadesc->callback_fn(dmadesc->callback_arg, 0);
660 if (desc->hw_desc_bus_addr == status)
664 ioat->last_seen = desc->hw_desc_bus_addr;
666 if (ioat->head == ioat->tail) {
667 ioat->is_completion_pending = FALSE;
668 callout_reset(&ioat->timer, IOAT_INTR_TIMO,
669 ioat_timer_callback, ioat);
672 ioat->stats.descriptors_processed += completed;
675 ioat_write_chanctrl(ioat, IOAT_CHANCTRL_RUN);
676 mtx_unlock(&ioat->cleanup_lock);
678 if (completed != 0) {
679 ioat_putn(ioat, completed, IOAT_ACTIVE_DESCR_REF);
683 if (!is_ioat_halted(comp_update) && !is_ioat_suspended(comp_update))
686 ioat->stats.channel_halts++;
689 * Fatal programming error on this DMA channel. Flush any outstanding
690 * work with error status and restart the engine.
692 ioat_log_message(0, "Channel halted due to fatal programming error\n");
693 mtx_lock(&ioat->submit_lock);
694 mtx_lock(&ioat->cleanup_lock);
695 ioat->quiescing = TRUE;
697 chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET);
698 ioat_halted_debug(ioat, chanerr);
699 ioat->stats.last_halt_chanerr = chanerr;
701 while (ioat_get_active(ioat) > 0) {
702 desc = ioat_get_ring_entry(ioat, ioat->tail);
703 dmadesc = &desc->bus_dmadesc;
704 CTR1(KTR_IOAT, "completing err desc %d", ioat->tail);
706 if (dmadesc->callback_fn != NULL)
707 dmadesc->callback_fn(dmadesc->callback_arg,
708 chanerr_to_errno(chanerr));
710 ioat_putn_locked(ioat, 1, IOAT_ACTIVE_DESCR_REF);
712 ioat->stats.descriptors_processed++;
713 ioat->stats.descriptors_error++;
716 /* Clear error status */
717 ioat_write_4(ioat, IOAT_CHANERR_OFFSET, chanerr);
719 mtx_unlock(&ioat->cleanup_lock);
720 mtx_unlock(&ioat->submit_lock);
722 ioat_log_message(0, "Resetting channel to recover from error\n");
723 error = taskqueue_enqueue(taskqueue_thread, &ioat->reset_task);
725 ("%s: taskqueue_enqueue failed: %d", __func__, error));
729 ioat_reset_hw_task(void *ctx, int pending __unused)
731 struct ioat_softc *ioat;
735 ioat_log_message(1, "%s: Resetting channel\n", __func__);
737 error = ioat_reset_hw(ioat);
738 KASSERT(error == 0, ("%s: reset failed: %d", __func__, error));
746 ioat_get_dmaengine(uint32_t index, int flags)
748 struct ioat_softc *ioat;
750 KASSERT((flags & ~(M_NOWAIT | M_WAITOK)) == 0,
751 ("invalid flags: 0x%08x", flags));
752 KASSERT((flags & (M_NOWAIT | M_WAITOK)) != (M_NOWAIT | M_WAITOK),
753 ("invalid wait | nowait"));
755 if (index >= ioat_channel_index)
758 ioat = ioat_channel[index];
759 if (ioat == NULL || ioat->destroying)
762 if (ioat->quiescing) {
763 if ((flags & M_NOWAIT) != 0)
766 mtx_lock(IOAT_REFLK);
767 while (ioat->quiescing && !ioat->destroying)
768 msleep(&ioat->quiescing, IOAT_REFLK, 0, "getdma", 0);
769 mtx_unlock(IOAT_REFLK);
771 if (ioat->destroying)
776 * There's a race here between the quiescing check and HW reset or
779 return (&ioat_get(ioat, IOAT_DMAENGINE_REF)->dmaengine);
783 ioat_put_dmaengine(bus_dmaengine_t dmaengine)
785 struct ioat_softc *ioat;
787 ioat = to_ioat_softc(dmaengine);
788 ioat_put(ioat, IOAT_DMAENGINE_REF);
792 ioat_get_hwversion(bus_dmaengine_t dmaengine)
794 struct ioat_softc *ioat;
796 ioat = to_ioat_softc(dmaengine);
797 return (ioat->version);
801 ioat_get_max_io_size(bus_dmaengine_t dmaengine)
803 struct ioat_softc *ioat;
805 ioat = to_ioat_softc(dmaengine);
806 return (ioat->max_xfer_size);
810 ioat_set_interrupt_coalesce(bus_dmaengine_t dmaengine, uint16_t delay)
812 struct ioat_softc *ioat;
814 ioat = to_ioat_softc(dmaengine);
815 if (!ioat->intrdelay_supported)
817 if (delay > ioat->intrdelay_max)
820 ioat_write_2(ioat, IOAT_INTRDELAY_OFFSET, delay);
821 ioat->cached_intrdelay =
822 ioat_read_2(ioat, IOAT_INTRDELAY_OFFSET) & IOAT_INTRDELAY_US_MASK;
827 ioat_get_max_coalesce_period(bus_dmaengine_t dmaengine)
829 struct ioat_softc *ioat;
831 ioat = to_ioat_softc(dmaengine);
832 return (ioat->intrdelay_max);
836 ioat_acquire(bus_dmaengine_t dmaengine)
838 struct ioat_softc *ioat;
840 ioat = to_ioat_softc(dmaengine);
841 mtx_lock(&ioat->submit_lock);
842 CTR0(KTR_IOAT, __func__);
846 ioat_acquire_reserve(bus_dmaengine_t dmaengine, unsigned n, int mflags)
848 struct ioat_softc *ioat;
851 ioat = to_ioat_softc(dmaengine);
852 ioat_acquire(dmaengine);
854 error = ioat_reserve_space(ioat, n, mflags);
856 ioat_release(dmaengine);
861 ioat_release(bus_dmaengine_t dmaengine)
863 struct ioat_softc *ioat;
865 ioat = to_ioat_softc(dmaengine);
866 CTR0(KTR_IOAT, __func__);
867 ioat_write_2(ioat, IOAT_DMACOUNT_OFFSET, (uint16_t)ioat->hw_head);
868 mtx_unlock(&ioat->submit_lock);
871 static struct ioat_descriptor *
872 ioat_op_generic(struct ioat_softc *ioat, uint8_t op,
873 uint32_t size, uint64_t src, uint64_t dst,
874 bus_dmaengine_callback_t callback_fn, void *callback_arg,
877 struct ioat_generic_hw_descriptor *hw_desc;
878 struct ioat_descriptor *desc;
881 mtx_assert(&ioat->submit_lock, MA_OWNED);
883 KASSERT((flags & ~_DMA_GENERIC_FLAGS) == 0,
884 ("Unrecognized flag(s): %#x", flags & ~_DMA_GENERIC_FLAGS));
885 if ((flags & DMA_NO_WAIT) != 0)
890 if (size > ioat->max_xfer_size) {
891 ioat_log_message(0, "%s: max_xfer_size = %d, requested = %u\n",
892 __func__, ioat->max_xfer_size, (unsigned)size);
896 if (ioat_reserve_space(ioat, 1, mflags) != 0)
899 desc = ioat_get_ring_entry(ioat, ioat->head);
900 hw_desc = desc->u.generic;
902 hw_desc->u.control_raw = 0;
903 hw_desc->u.control_generic.op = op;
904 hw_desc->u.control_generic.completion_update = 1;
906 if ((flags & DMA_INT_EN) != 0)
907 hw_desc->u.control_generic.int_enable = 1;
908 if ((flags & DMA_FENCE) != 0)
909 hw_desc->u.control_generic.fence = 1;
911 hw_desc->size = size;
912 hw_desc->src_addr = src;
913 hw_desc->dest_addr = dst;
915 desc->bus_dmadesc.callback_fn = callback_fn;
916 desc->bus_dmadesc.callback_arg = callback_arg;
921 ioat_null(bus_dmaengine_t dmaengine, bus_dmaengine_callback_t callback_fn,
922 void *callback_arg, uint32_t flags)
924 struct ioat_dma_hw_descriptor *hw_desc;
925 struct ioat_descriptor *desc;
926 struct ioat_softc *ioat;
928 CTR0(KTR_IOAT, __func__);
929 ioat = to_ioat_softc(dmaengine);
931 desc = ioat_op_generic(ioat, IOAT_OP_COPY, 8, 0, 0, callback_fn,
932 callback_arg, flags);
936 hw_desc = desc->u.dma;
937 hw_desc->u.control.null = 1;
938 ioat_submit_single(ioat);
939 return (&desc->bus_dmadesc);
943 ioat_copy(bus_dmaengine_t dmaengine, bus_addr_t dst,
944 bus_addr_t src, bus_size_t len, bus_dmaengine_callback_t callback_fn,
945 void *callback_arg, uint32_t flags)
947 struct ioat_dma_hw_descriptor *hw_desc;
948 struct ioat_descriptor *desc;
949 struct ioat_softc *ioat;
951 CTR0(KTR_IOAT, __func__);
952 ioat = to_ioat_softc(dmaengine);
954 if (((src | dst) & (0xffffull << 48)) != 0) {
955 ioat_log_message(0, "%s: High 16 bits of src/dst invalid\n",
960 desc = ioat_op_generic(ioat, IOAT_OP_COPY, len, src, dst, callback_fn,
961 callback_arg, flags);
965 hw_desc = desc->u.dma;
966 if (g_ioat_debug_level >= 3)
967 dump_descriptor(hw_desc);
969 ioat_submit_single(ioat);
970 return (&desc->bus_dmadesc);
974 ioat_copy_8k_aligned(bus_dmaengine_t dmaengine, bus_addr_t dst1,
975 bus_addr_t dst2, bus_addr_t src1, bus_addr_t src2,
976 bus_dmaengine_callback_t callback_fn, void *callback_arg, uint32_t flags)
978 struct ioat_dma_hw_descriptor *hw_desc;
979 struct ioat_descriptor *desc;
980 struct ioat_softc *ioat;
982 CTR0(KTR_IOAT, __func__);
983 ioat = to_ioat_softc(dmaengine);
985 if (((src1 | src2 | dst1 | dst2) & (0xffffull << 48)) != 0) {
986 ioat_log_message(0, "%s: High 16 bits of src/dst invalid\n",
990 if (((src1 | src2 | dst1 | dst2) & PAGE_MASK) != 0) {
991 ioat_log_message(0, "%s: Addresses must be page-aligned\n",
996 desc = ioat_op_generic(ioat, IOAT_OP_COPY, 2 * PAGE_SIZE, src1, dst1,
997 callback_fn, callback_arg, flags);
1001 hw_desc = desc->u.dma;
1002 if (src2 != src1 + PAGE_SIZE) {
1003 hw_desc->u.control.src_page_break = 1;
1004 hw_desc->next_src_addr = src2;
1006 if (dst2 != dst1 + PAGE_SIZE) {
1007 hw_desc->u.control.dest_page_break = 1;
1008 hw_desc->next_dest_addr = dst2;
1011 if (g_ioat_debug_level >= 3)
1012 dump_descriptor(hw_desc);
1014 ioat_submit_single(ioat);
1015 return (&desc->bus_dmadesc);
1018 struct bus_dmadesc *
1019 ioat_copy_crc(bus_dmaengine_t dmaengine, bus_addr_t dst, bus_addr_t src,
1020 bus_size_t len, uint32_t *initialseed, bus_addr_t crcptr,
1021 bus_dmaengine_callback_t callback_fn, void *callback_arg, uint32_t flags)
1023 struct ioat_crc32_hw_descriptor *hw_desc;
1024 struct ioat_descriptor *desc;
1025 struct ioat_softc *ioat;
1029 CTR0(KTR_IOAT, __func__);
1030 ioat = to_ioat_softc(dmaengine);
1032 if ((ioat->capabilities & IOAT_DMACAP_MOVECRC) == 0) {
1033 ioat_log_message(0, "%s: Device lacks MOVECRC capability\n",
1037 if (((src | dst) & (0xffffffull << 40)) != 0) {
1038 ioat_log_message(0, "%s: High 24 bits of src/dst invalid\n",
1042 teststore = (flags & _DMA_CRC_TESTSTORE);
1043 if (teststore == _DMA_CRC_TESTSTORE) {
1044 ioat_log_message(0, "%s: TEST and STORE invalid\n", __func__);
1047 if (teststore == 0 && (flags & DMA_CRC_INLINE) != 0) {
1048 ioat_log_message(0, "%s: INLINE invalid without TEST or STORE\n",
1053 switch (teststore) {
1055 op = IOAT_OP_MOVECRC_STORE;
1058 op = IOAT_OP_MOVECRC_TEST;
1061 KASSERT(teststore == 0, ("bogus"));
1062 op = IOAT_OP_MOVECRC;
1066 if ((flags & DMA_CRC_INLINE) == 0 &&
1067 (crcptr & (0xffffffull << 40)) != 0) {
1069 "%s: High 24 bits of crcptr invalid\n", __func__);
1073 desc = ioat_op_generic(ioat, op, len, src, dst, callback_fn,
1074 callback_arg, flags & ~_DMA_CRC_FLAGS);
1078 hw_desc = desc->u.crc32;
1080 if ((flags & DMA_CRC_INLINE) == 0)
1081 hw_desc->crc_address = crcptr;
1083 hw_desc->u.control.crc_location = 1;
1085 if (initialseed != NULL) {
1086 hw_desc->u.control.use_seed = 1;
1087 hw_desc->seed = *initialseed;
1090 if (g_ioat_debug_level >= 3)
1091 dump_descriptor(hw_desc);
1093 ioat_submit_single(ioat);
1094 return (&desc->bus_dmadesc);
1097 struct bus_dmadesc *
1098 ioat_crc(bus_dmaengine_t dmaengine, bus_addr_t src, bus_size_t len,
1099 uint32_t *initialseed, bus_addr_t crcptr,
1100 bus_dmaengine_callback_t callback_fn, void *callback_arg, uint32_t flags)
1102 struct ioat_crc32_hw_descriptor *hw_desc;
1103 struct ioat_descriptor *desc;
1104 struct ioat_softc *ioat;
1108 CTR0(KTR_IOAT, __func__);
1109 ioat = to_ioat_softc(dmaengine);
1111 if ((ioat->capabilities & IOAT_DMACAP_CRC) == 0) {
1112 ioat_log_message(0, "%s: Device lacks CRC capability\n",
1116 if ((src & (0xffffffull << 40)) != 0) {
1117 ioat_log_message(0, "%s: High 24 bits of src invalid\n",
1121 teststore = (flags & _DMA_CRC_TESTSTORE);
1122 if (teststore == _DMA_CRC_TESTSTORE) {
1123 ioat_log_message(0, "%s: TEST and STORE invalid\n", __func__);
1126 if (teststore == 0 && (flags & DMA_CRC_INLINE) != 0) {
1127 ioat_log_message(0, "%s: INLINE invalid without TEST or STORE\n",
1132 switch (teststore) {
1134 op = IOAT_OP_CRC_STORE;
1137 op = IOAT_OP_CRC_TEST;
1140 KASSERT(teststore == 0, ("bogus"));
1145 if ((flags & DMA_CRC_INLINE) == 0 &&
1146 (crcptr & (0xffffffull << 40)) != 0) {
1148 "%s: High 24 bits of crcptr invalid\n", __func__);
1152 desc = ioat_op_generic(ioat, op, len, src, 0, callback_fn,
1153 callback_arg, flags & ~_DMA_CRC_FLAGS);
1157 hw_desc = desc->u.crc32;
1159 if ((flags & DMA_CRC_INLINE) == 0)
1160 hw_desc->crc_address = crcptr;
1162 hw_desc->u.control.crc_location = 1;
1164 if (initialseed != NULL) {
1165 hw_desc->u.control.use_seed = 1;
1166 hw_desc->seed = *initialseed;
1169 if (g_ioat_debug_level >= 3)
1170 dump_descriptor(hw_desc);
1172 ioat_submit_single(ioat);
1173 return (&desc->bus_dmadesc);
1176 struct bus_dmadesc *
1177 ioat_blockfill(bus_dmaengine_t dmaengine, bus_addr_t dst, uint64_t fillpattern,
1178 bus_size_t len, bus_dmaengine_callback_t callback_fn, void *callback_arg,
1181 struct ioat_fill_hw_descriptor *hw_desc;
1182 struct ioat_descriptor *desc;
1183 struct ioat_softc *ioat;
1185 CTR0(KTR_IOAT, __func__);
1186 ioat = to_ioat_softc(dmaengine);
1188 if ((ioat->capabilities & IOAT_DMACAP_BFILL) == 0) {
1189 ioat_log_message(0, "%s: Device lacks BFILL capability\n",
1194 if ((dst & (0xffffull << 48)) != 0) {
1195 ioat_log_message(0, "%s: High 16 bits of dst invalid\n",
1200 desc = ioat_op_generic(ioat, IOAT_OP_FILL, len, fillpattern, dst,
1201 callback_fn, callback_arg, flags);
1205 hw_desc = desc->u.fill;
1206 if (g_ioat_debug_level >= 3)
1207 dump_descriptor(hw_desc);
1209 ioat_submit_single(ioat);
1210 return (&desc->bus_dmadesc);
1216 static inline uint32_t
1217 ioat_get_active(struct ioat_softc *ioat)
1220 return ((ioat->head - ioat->tail) & ((1 << ioat->ring_size_order) - 1));
1223 static inline uint32_t
1224 ioat_get_ring_space(struct ioat_softc *ioat)
1227 return ((1 << ioat->ring_size_order) - ioat_get_active(ioat) - 1);
1230 static struct ioat_descriptor *
1231 ioat_alloc_ring_entry(struct ioat_softc *ioat, int mflags)
1233 struct ioat_generic_hw_descriptor *hw_desc;
1234 struct ioat_descriptor *desc;
1235 int error, busdmaflag;
1240 if ((mflags & M_WAITOK) != 0)
1241 busdmaflag = BUS_DMA_WAITOK;
1243 busdmaflag = BUS_DMA_NOWAIT;
1245 desc = malloc(sizeof(*desc), M_IOAT, mflags);
1249 bus_dmamem_alloc(ioat->hw_desc_tag, (void **)&hw_desc,
1250 BUS_DMA_ZERO | busdmaflag, &ioat->hw_desc_map);
1251 if (hw_desc == NULL)
1254 memset(&desc->bus_dmadesc, 0, sizeof(desc->bus_dmadesc));
1255 desc->u.generic = hw_desc;
1257 error = bus_dmamap_load(ioat->hw_desc_tag, ioat->hw_desc_map, hw_desc,
1258 sizeof(*hw_desc), ioat_dmamap_cb, &desc->hw_desc_bus_addr,
1265 ioat_free_ring_entry(ioat, desc);
1272 ioat_free_ring_entry(struct ioat_softc *ioat, struct ioat_descriptor *desc)
1278 if (desc->u.generic)
1279 bus_dmamem_free(ioat->hw_desc_tag, desc->u.generic,
1285 * Reserves space in this IOAT descriptor ring by ensuring enough slots remain
1288 * If mflags contains M_WAITOK, blocks until enough space is available.
1290 * Returns zero on success, or an errno on error. If num_descs is beyond the
1291 * maximum ring size, returns EINVAl; if allocation would block and mflags
1292 * contains M_NOWAIT, returns EAGAIN.
1294 * Must be called with the submit_lock held; returns with the lock held. The
1295 * lock may be dropped to allocate the ring.
1297 * (The submit_lock is needed to add any entries to the ring, so callers are
1298 * assured enough room is available.)
1301 ioat_reserve_space(struct ioat_softc *ioat, uint32_t num_descs, int mflags)
1303 struct ioat_descriptor **new_ring;
1307 mtx_assert(&ioat->submit_lock, MA_OWNED);
1310 if (num_descs < 1 || num_descs > (1 << IOAT_MAX_ORDER)) {
1314 if (ioat->quiescing) {
1320 if (ioat_get_ring_space(ioat) >= num_descs)
1323 order = ioat->ring_size_order;
1324 if (ioat->is_resize_pending || order == IOAT_MAX_ORDER) {
1325 if ((mflags & M_WAITOK) != 0) {
1326 msleep(&ioat->tail, &ioat->submit_lock, 0,
1335 ioat->is_resize_pending = TRUE;
1337 mtx_unlock(&ioat->submit_lock);
1339 new_ring = ioat_prealloc_ring(ioat, 1 << (order + 1),
1342 mtx_lock(&ioat->submit_lock);
1343 KASSERT(ioat->ring_size_order == order,
1344 ("is_resize_pending should protect order"));
1346 if (new_ring == NULL) {
1347 KASSERT((mflags & M_WAITOK) == 0,
1348 ("allocation failed"));
1353 error = ring_grow(ioat, order, new_ring);
1357 ioat->is_resize_pending = FALSE;
1358 wakeup(&ioat->tail);
1364 mtx_assert(&ioat->submit_lock, MA_OWNED);
1368 static struct ioat_descriptor **
1369 ioat_prealloc_ring(struct ioat_softc *ioat, uint32_t size, boolean_t need_dscr,
1372 struct ioat_descriptor **ring;
1376 KASSERT(size > 0 && powerof2(size), ("bogus size"));
1378 ring = malloc(size * sizeof(*ring), M_IOAT, M_ZERO | mflags);
1384 for (i = size / 2; i < size; i++) {
1385 ring[i] = ioat_alloc_ring_entry(ioat, mflags);
1386 if (ring[i] == NULL)
1394 if (error != 0 && ring != NULL) {
1395 ioat_free_ring(ioat, size, ring);
1402 ioat_free_ring(struct ioat_softc *ioat, uint32_t size,
1403 struct ioat_descriptor **ring)
1407 for (i = 0; i < size; i++) {
1408 if (ring[i] != NULL)
1409 ioat_free_ring_entry(ioat, ring[i]);
1414 static struct ioat_descriptor *
1415 ioat_get_ring_entry(struct ioat_softc *ioat, uint32_t index)
1418 return (ioat->ring[index % (1 << ioat->ring_size_order)]);
1422 ring_grow(struct ioat_softc *ioat, uint32_t oldorder,
1423 struct ioat_descriptor **newring)
1425 struct ioat_descriptor *tmp, *next;
1426 struct ioat_dma_hw_descriptor *hw;
1427 uint32_t oldsize, newsize, head, tail, i, end;
1430 CTR0(KTR_IOAT, __func__);
1432 mtx_assert(&ioat->submit_lock, MA_OWNED);
1434 if (oldorder != ioat->ring_size_order || oldorder >= IOAT_MAX_ORDER) {
1439 oldsize = (1 << oldorder);
1440 newsize = (1 << (oldorder + 1));
1442 mtx_lock(&ioat->cleanup_lock);
1444 head = ioat->head & (oldsize - 1);
1445 tail = ioat->tail & (oldsize - 1);
1447 /* Copy old descriptors to new ring */
1448 for (i = 0; i < oldsize; i++)
1449 newring[i] = ioat->ring[i];
1452 * If head has wrapped but tail hasn't, we must swap some descriptors
1453 * around so that tail can increment directly to head.
1456 for (i = 0; i <= head; i++) {
1457 tmp = newring[oldsize + i];
1459 newring[oldsize + i] = newring[i];
1460 newring[oldsize + i]->id = oldsize + i;
1468 KASSERT(head >= tail, ("invariants"));
1470 /* Head didn't wrap; we only need to link in oldsize..newsize */
1471 if (head < oldsize) {
1475 /* Head did wrap; link newhead..newsize and 0..oldhead */
1477 end = newsize + (head - oldsize) + 1;
1481 * Fix up hardware ring, being careful not to trample the active
1482 * section (tail -> head).
1484 for (; i < end; i++) {
1485 KASSERT((i & (newsize - 1)) < tail ||
1486 (i & (newsize - 1)) >= head, ("trampling snake"));
1488 next = newring[(i + 1) & (newsize - 1)];
1489 hw = newring[i & (newsize - 1)]->u.dma;
1490 hw->next = next->hw_desc_bus_addr;
1493 free(ioat->ring, M_IOAT);
1494 ioat->ring = newring;
1495 ioat->ring_size_order = oldorder + 1;
1500 mtx_unlock(&ioat->cleanup_lock);
1503 ioat_free_ring(ioat, (1 << (oldorder + 1)), newring);
1508 ring_shrink(struct ioat_softc *ioat, uint32_t oldorder,
1509 struct ioat_descriptor **newring)
1511 struct ioat_dma_hw_descriptor *hw;
1512 struct ioat_descriptor *ent, *next;
1513 uint32_t oldsize, newsize, current_idx, new_idx, i;
1516 CTR0(KTR_IOAT, __func__);
1518 mtx_assert(&ioat->submit_lock, MA_OWNED);
1520 if (oldorder != ioat->ring_size_order || oldorder <= IOAT_MIN_ORDER) {
1525 oldsize = (1 << oldorder);
1526 newsize = (1 << (oldorder - 1));
1528 mtx_lock(&ioat->cleanup_lock);
1530 /* Can't shrink below current active set! */
1531 if (ioat_get_active(ioat) >= newsize) {
1537 * Copy current descriptors to the new ring, dropping the removed
1540 for (i = 0; i < newsize; i++) {
1541 current_idx = (ioat->tail + i) & (oldsize - 1);
1542 new_idx = (ioat->tail + i) & (newsize - 1);
1544 newring[new_idx] = ioat->ring[current_idx];
1545 newring[new_idx]->id = new_idx;
1548 /* Free deleted descriptors */
1549 for (i = newsize; i < oldsize; i++) {
1550 ent = ioat_get_ring_entry(ioat, ioat->tail + i);
1551 ioat_free_ring_entry(ioat, ent);
1554 /* Fix up hardware ring. */
1555 hw = newring[(ioat->tail + newsize - 1) & (newsize - 1)]->u.dma;
1556 next = newring[(ioat->tail + newsize) & (newsize - 1)];
1557 hw->next = next->hw_desc_bus_addr;
1559 free(ioat->ring, M_IOAT);
1560 ioat->ring = newring;
1561 ioat->ring_size_order = oldorder - 1;
1565 mtx_unlock(&ioat->cleanup_lock);
1568 ioat_free_ring(ioat, (1 << (oldorder - 1)), newring);
1573 ioat_halted_debug(struct ioat_softc *ioat, uint32_t chanerr)
1575 struct ioat_descriptor *desc;
1577 ioat_log_message(0, "Channel halted (%b)\n", (int)chanerr,
1582 mtx_assert(&ioat->cleanup_lock, MA_OWNED);
1584 desc = ioat_get_ring_entry(ioat, ioat->tail + 0);
1585 dump_descriptor(desc->u.raw);
1587 desc = ioat_get_ring_entry(ioat, ioat->tail + 1);
1588 dump_descriptor(desc->u.raw);
1592 ioat_timer_callback(void *arg)
1594 struct ioat_descriptor **newring;
1595 struct ioat_softc *ioat;
1599 ioat_log_message(1, "%s\n", __func__);
1601 if (ioat->is_completion_pending) {
1602 ioat_process_events(ioat);
1606 /* Slowly scale the ring down if idle. */
1607 mtx_lock(&ioat->submit_lock);
1608 order = ioat->ring_size_order;
1609 if (ioat->is_resize_pending || order == IOAT_MIN_ORDER) {
1610 mtx_unlock(&ioat->submit_lock);
1613 ioat->is_resize_pending = TRUE;
1614 mtx_unlock(&ioat->submit_lock);
1616 newring = ioat_prealloc_ring(ioat, 1 << (order - 1), FALSE,
1619 mtx_lock(&ioat->submit_lock);
1620 KASSERT(ioat->ring_size_order == order,
1621 ("resize_pending protects order"));
1623 if (newring != NULL)
1624 ring_shrink(ioat, order, newring);
1626 ioat->is_resize_pending = FALSE;
1627 mtx_unlock(&ioat->submit_lock);
1630 if (ioat->ring_size_order > IOAT_MIN_ORDER)
1631 callout_reset(&ioat->timer, 10 * hz,
1632 ioat_timer_callback, ioat);
1639 ioat_submit_single(struct ioat_softc *ioat)
1642 ioat_get(ioat, IOAT_ACTIVE_DESCR_REF);
1643 atomic_add_rel_int(&ioat->head, 1);
1644 atomic_add_rel_int(&ioat->hw_head, 1);
1646 if (!ioat->is_completion_pending) {
1647 ioat->is_completion_pending = TRUE;
1648 callout_reset(&ioat->timer, IOAT_INTR_TIMO,
1649 ioat_timer_callback, ioat);
1652 ioat->stats.descriptors_submitted++;
1656 ioat_reset_hw(struct ioat_softc *ioat)
1663 mtx_lock(IOAT_REFLK);
1664 ioat->quiescing = TRUE;
1665 ioat_drain_locked(ioat);
1666 mtx_unlock(IOAT_REFLK);
1668 status = ioat_get_chansts(ioat);
1669 if (is_ioat_active(status) || is_ioat_idle(status))
1672 /* Wait at most 20 ms */
1673 for (timeout = 0; (is_ioat_active(status) || is_ioat_idle(status)) &&
1674 timeout < 20; timeout++) {
1676 status = ioat_get_chansts(ioat);
1678 if (timeout == 20) {
1683 KASSERT(ioat_get_active(ioat) == 0, ("active after quiesce"));
1685 chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET);
1686 ioat_write_4(ioat, IOAT_CHANERR_OFFSET, chanerr);
1689 * IOAT v3 workaround - CHANERRMSK_INT with 3E07h to masks out errors
1690 * that can cause stability issues for IOAT v3.
1692 pci_write_config(ioat->device, IOAT_CFG_CHANERRMASK_INT_OFFSET, 0x3e07,
1694 chanerr = pci_read_config(ioat->device, IOAT_CFG_CHANERR_INT_OFFSET, 4);
1695 pci_write_config(ioat->device, IOAT_CFG_CHANERR_INT_OFFSET, chanerr, 4);
1698 * BDXDE and BWD models reset MSI-X registers on device reset.
1699 * Save/restore their contents manually.
1701 if (ioat_model_resets_msix(ioat)) {
1702 ioat_log_message(1, "device resets MSI-X registers; saving\n");
1703 pci_save_state(ioat->device);
1708 /* Wait at most 20 ms */
1709 for (timeout = 0; ioat_reset_pending(ioat) && timeout < 20; timeout++)
1711 if (timeout == 20) {
1716 if (ioat_model_resets_msix(ioat)) {
1717 ioat_log_message(1, "device resets registers; restored\n");
1718 pci_restore_state(ioat->device);
1721 /* Reset attempts to return the hardware to "halted." */
1722 status = ioat_get_chansts(ioat);
1723 if (is_ioat_active(status) || is_ioat_idle(status)) {
1724 /* So this really shouldn't happen... */
1725 ioat_log_message(0, "Device is active after a reset?\n");
1726 ioat_write_chanctrl(ioat, IOAT_CHANCTRL_RUN);
1731 chanerr = ioat_read_4(ioat, IOAT_CHANERR_OFFSET);
1733 mtx_lock(&ioat->cleanup_lock);
1734 ioat_halted_debug(ioat, chanerr);
1735 mtx_unlock(&ioat->cleanup_lock);
1741 * Bring device back online after reset. Writing CHAINADDR brings the
1742 * device back to active.
1744 * The internal ring counter resets to zero, so we have to start over
1747 ioat->tail = ioat->head = ioat->hw_head = 0;
1748 ioat->last_seen = 0;
1750 ioat_write_chanctrl(ioat, IOAT_CHANCTRL_RUN);
1751 ioat_write_chancmp(ioat, ioat->comp_update_bus_addr);
1752 ioat_write_chainaddr(ioat, ioat->ring[0]->hw_desc_bus_addr);
1756 mtx_lock(IOAT_REFLK);
1757 ioat->quiescing = FALSE;
1758 wakeup(&ioat->quiescing);
1759 mtx_unlock(IOAT_REFLK);
1762 error = ioat_start_channel(ioat);
1768 sysctl_handle_chansts(SYSCTL_HANDLER_ARGS)
1770 struct ioat_softc *ioat;
1777 status = ioat_get_chansts(ioat) & IOAT_CHANSTS_STATUS;
1779 sbuf_new_for_sysctl(&sb, NULL, 256, req);
1781 case IOAT_CHANSTS_ACTIVE:
1782 sbuf_printf(&sb, "ACTIVE");
1784 case IOAT_CHANSTS_IDLE:
1785 sbuf_printf(&sb, "IDLE");
1787 case IOAT_CHANSTS_SUSPENDED:
1788 sbuf_printf(&sb, "SUSPENDED");
1790 case IOAT_CHANSTS_HALTED:
1791 sbuf_printf(&sb, "HALTED");
1793 case IOAT_CHANSTS_ARMED:
1794 sbuf_printf(&sb, "ARMED");
1797 sbuf_printf(&sb, "UNKNOWN");
1800 error = sbuf_finish(&sb);
1803 if (error != 0 || req->newptr == NULL)
1809 sysctl_handle_dpi(SYSCTL_HANDLER_ARGS)
1811 struct ioat_softc *ioat;
1813 #define PRECISION "1"
1814 const uintmax_t factor = 10;
1819 sbuf_new_for_sysctl(&sb, NULL, 16, req);
1821 if (ioat->stats.interrupts == 0) {
1822 sbuf_printf(&sb, "NaN");
1825 rate = ioat->stats.descriptors_processed * factor /
1826 ioat->stats.interrupts;
1827 sbuf_printf(&sb, "%ju.%." PRECISION "ju", rate / factor,
1831 error = sbuf_finish(&sb);
1833 if (error != 0 || req->newptr == NULL)
1839 sysctl_handle_error(SYSCTL_HANDLER_ARGS)
1841 struct ioat_descriptor *desc;
1842 struct ioat_softc *ioat;
1848 error = SYSCTL_OUT(req, &arg, sizeof(arg));
1849 if (error != 0 || req->newptr == NULL)
1852 error = SYSCTL_IN(req, &arg, sizeof(arg));
1857 ioat_acquire(&ioat->dmaengine);
1858 desc = ioat_op_generic(ioat, IOAT_OP_COPY, 1,
1859 0xffff000000000000ull, 0xffff000000000000ull, NULL, NULL,
1864 ioat_submit_single(ioat);
1865 ioat_release(&ioat->dmaengine);
1871 sysctl_handle_reset(SYSCTL_HANDLER_ARGS)
1873 struct ioat_softc *ioat;
1879 error = SYSCTL_OUT(req, &arg, sizeof(arg));
1880 if (error != 0 || req->newptr == NULL)
1883 error = SYSCTL_IN(req, &arg, sizeof(arg));
1888 error = ioat_reset_hw(ioat);
1894 dump_descriptor(void *hw_desc)
1898 for (i = 0; i < 2; i++) {
1899 for (j = 0; j < 8; j++)
1900 printf("%08x ", ((uint32_t *)hw_desc)[i * 8 + j]);
1906 ioat_setup_sysctl(device_t device)
1908 struct sysctl_oid_list *par, *statpar, *state, *hammer;
1909 struct sysctl_ctx_list *ctx;
1910 struct sysctl_oid *tree, *tmp;
1911 struct ioat_softc *ioat;
1913 ioat = DEVICE2SOFTC(device);
1914 ctx = device_get_sysctl_ctx(device);
1915 tree = device_get_sysctl_tree(device);
1916 par = SYSCTL_CHILDREN(tree);
1918 SYSCTL_ADD_INT(ctx, par, OID_AUTO, "version", CTLFLAG_RD,
1919 &ioat->version, 0, "HW version (0xMM form)");
1920 SYSCTL_ADD_UINT(ctx, par, OID_AUTO, "max_xfer_size", CTLFLAG_RD,
1921 &ioat->max_xfer_size, 0, "HW maximum transfer size");
1922 SYSCTL_ADD_INT(ctx, par, OID_AUTO, "intrdelay_supported", CTLFLAG_RD,
1923 &ioat->intrdelay_supported, 0, "Is INTRDELAY supported");
1925 SYSCTL_ADD_U16(ctx, par, OID_AUTO, "intrdelay_max", CTLFLAG_RD,
1926 &ioat->intrdelay_max, 0,
1927 "Maximum configurable INTRDELAY on this channel (microseconds)");
1930 tmp = SYSCTL_ADD_NODE(ctx, par, OID_AUTO, "state", CTLFLAG_RD, NULL,
1931 "IOAT channel internal state");
1932 state = SYSCTL_CHILDREN(tmp);
1934 SYSCTL_ADD_UINT(ctx, state, OID_AUTO, "ring_size_order", CTLFLAG_RD,
1935 &ioat->ring_size_order, 0, "SW descriptor ring size order");
1936 SYSCTL_ADD_UINT(ctx, state, OID_AUTO, "head", CTLFLAG_RD, &ioat->head,
1937 0, "SW descriptor head pointer index");
1938 SYSCTL_ADD_UINT(ctx, state, OID_AUTO, "tail", CTLFLAG_RD, &ioat->tail,
1939 0, "SW descriptor tail pointer index");
1940 SYSCTL_ADD_UINT(ctx, state, OID_AUTO, "hw_head", CTLFLAG_RD,
1941 &ioat->hw_head, 0, "HW DMACOUNT");
1943 SYSCTL_ADD_UQUAD(ctx, state, OID_AUTO, "last_completion", CTLFLAG_RD,
1944 ioat->comp_update, "HW addr of last completion");
1946 SYSCTL_ADD_INT(ctx, state, OID_AUTO, "is_resize_pending", CTLFLAG_RD,
1947 &ioat->is_resize_pending, 0, "resize pending");
1948 SYSCTL_ADD_INT(ctx, state, OID_AUTO, "is_completion_pending",
1949 CTLFLAG_RD, &ioat->is_completion_pending, 0, "completion pending");
1950 SYSCTL_ADD_INT(ctx, state, OID_AUTO, "is_reset_pending", CTLFLAG_RD,
1951 &ioat->is_reset_pending, 0, "reset pending");
1952 SYSCTL_ADD_INT(ctx, state, OID_AUTO, "is_channel_running", CTLFLAG_RD,
1953 &ioat->is_channel_running, 0, "channel running");
1955 SYSCTL_ADD_PROC(ctx, state, OID_AUTO, "chansts",
1956 CTLTYPE_STRING | CTLFLAG_RD, ioat, 0, sysctl_handle_chansts, "A",
1957 "String of the channel status");
1960 SYSCTL_ADD_U16(ctx, state, OID_AUTO, "intrdelay", CTLFLAG_RD,
1961 &ioat->cached_intrdelay, 0,
1962 "Current INTRDELAY on this channel (cached, microseconds)");
1965 tmp = SYSCTL_ADD_NODE(ctx, par, OID_AUTO, "hammer", CTLFLAG_RD, NULL,
1966 "Big hammers (mostly for testing)");
1967 hammer = SYSCTL_CHILDREN(tmp);
1969 SYSCTL_ADD_PROC(ctx, hammer, OID_AUTO, "force_hw_reset",
1970 CTLTYPE_INT | CTLFLAG_RW, ioat, 0, sysctl_handle_reset, "I",
1971 "Set to non-zero to reset the hardware");
1972 SYSCTL_ADD_PROC(ctx, hammer, OID_AUTO, "force_hw_error",
1973 CTLTYPE_INT | CTLFLAG_RW, ioat, 0, sysctl_handle_error, "I",
1974 "Set to non-zero to inject a recoverable hardware error");
1976 tmp = SYSCTL_ADD_NODE(ctx, par, OID_AUTO, "stats", CTLFLAG_RD, NULL,
1977 "IOAT channel statistics");
1978 statpar = SYSCTL_CHILDREN(tmp);
1980 SYSCTL_ADD_UQUAD(ctx, statpar, OID_AUTO, "interrupts", CTLFLAG_RW,
1981 &ioat->stats.interrupts,
1982 "Number of interrupts processed on this channel");
1983 SYSCTL_ADD_UQUAD(ctx, statpar, OID_AUTO, "descriptors", CTLFLAG_RW,
1984 &ioat->stats.descriptors_processed,
1985 "Number of descriptors processed on this channel");
1986 SYSCTL_ADD_UQUAD(ctx, statpar, OID_AUTO, "submitted", CTLFLAG_RW,
1987 &ioat->stats.descriptors_submitted,
1988 "Number of descriptors submitted to this channel");
1989 SYSCTL_ADD_UQUAD(ctx, statpar, OID_AUTO, "errored", CTLFLAG_RW,
1990 &ioat->stats.descriptors_error,
1991 "Number of descriptors failed by channel errors");
1993 SYSCTL_ADD_U32(ctx, statpar, OID_AUTO, "halts", CTLFLAG_RW,
1994 &ioat->stats.channel_halts, 0,
1995 "Number of times the channel has halted");
1996 SYSCTL_ADD_U32(ctx, statpar, OID_AUTO, "last_halt_chanerr", CTLFLAG_RW,
1997 &ioat->stats.last_halt_chanerr, 0,
1998 "The raw CHANERR when the channel was last halted");
2001 SYSCTL_ADD_PROC(ctx, statpar, OID_AUTO, "desc_per_interrupt",
2002 CTLTYPE_STRING | CTLFLAG_RD, ioat, 0, sysctl_handle_dpi, "A",
2003 "Descriptors per interrupt");
2006 static inline struct ioat_softc *
2007 ioat_get(struct ioat_softc *ioat, enum ioat_ref_kind kind)
2011 KASSERT(kind < IOAT_NUM_REF_KINDS, ("bogus"));
2013 old = atomic_fetchadd_32(&ioat->refcnt, 1);
2014 KASSERT(old < UINT32_MAX, ("refcnt overflow"));
2017 old = atomic_fetchadd_32(&ioat->refkinds[kind], 1);
2018 KASSERT(old < UINT32_MAX, ("refcnt kind overflow"));
2025 ioat_putn(struct ioat_softc *ioat, uint32_t n, enum ioat_ref_kind kind)
2028 _ioat_putn(ioat, n, kind, FALSE);
2032 ioat_putn_locked(struct ioat_softc *ioat, uint32_t n, enum ioat_ref_kind kind)
2035 _ioat_putn(ioat, n, kind, TRUE);
2039 _ioat_putn(struct ioat_softc *ioat, uint32_t n, enum ioat_ref_kind kind,
2044 KASSERT(kind < IOAT_NUM_REF_KINDS, ("bogus"));
2050 old = atomic_fetchadd_32(&ioat->refkinds[kind], -n);
2051 KASSERT(old >= n, ("refcnt kind underflow"));
2054 /* Skip acquiring the lock if resulting refcnt > 0. */
2059 if (atomic_cmpset_32(&ioat->refcnt, old, old - n))
2064 mtx_assert(IOAT_REFLK, MA_OWNED);
2066 mtx_lock(IOAT_REFLK);
2068 old = atomic_fetchadd_32(&ioat->refcnt, -n);
2069 KASSERT(old >= n, ("refcnt error"));
2074 mtx_unlock(IOAT_REFLK);
2078 ioat_put(struct ioat_softc *ioat, enum ioat_ref_kind kind)
2081 ioat_putn(ioat, 1, kind);
2085 ioat_drain_locked(struct ioat_softc *ioat)
2088 mtx_assert(IOAT_REFLK, MA_OWNED);
2089 while (ioat->refcnt > 0)
2090 msleep(IOAT_REFLK, IOAT_REFLK, 0, "ioat_drain", 0);