2 * SPDX-License-Identifier: BSD-2-Clause-FreeBSD
4 * Copyright (c) 2006 Stephane E. Potvin <sepotvin@videotron.ca>
5 * Copyright (c) 2006 Ariff Abdullah <ariff@FreeBSD.org>
6 * Copyright (c) 2008-2012 Alexander Motin <mav@FreeBSD.org>
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
32 * Intel High Definition Audio (Controller) driver for FreeBSD.
35 #ifdef HAVE_KERNEL_OPTION_HEADERS
39 #include <dev/sound/pcm/sound.h>
40 #include <dev/pci/pcireg.h>
41 #include <dev/pci/pcivar.h>
43 #include <sys/ctype.h>
44 #include <sys/endian.h>
45 #include <sys/taskqueue.h>
47 #include <dev/sound/pci/hda/hdac_private.h>
48 #include <dev/sound/pci/hda/hdac_reg.h>
49 #include <dev/sound/pci/hda/hda_reg.h>
50 #include <dev/sound/pci/hda/hdac.h>
52 #define HDA_DRV_TEST_REV "20120126_0002"
54 SND_DECLARE_FILE("$FreeBSD$");
56 #define hdac_lock(sc) snd_mtxlock((sc)->lock)
57 #define hdac_unlock(sc) snd_mtxunlock((sc)->lock)
58 #define hdac_lockassert(sc) snd_mtxassert((sc)->lock)
60 #define HDAC_QUIRK_64BIT (1 << 0)
61 #define HDAC_QUIRK_DMAPOS (1 << 1)
62 #define HDAC_QUIRK_MSI (1 << 2)
67 } hdac_quirks_tab[] = {
68 { "64bit", HDAC_QUIRK_64BIT },
69 { "dmapos", HDAC_QUIRK_DMAPOS },
70 { "msi", HDAC_QUIRK_MSI },
73 MALLOC_DEFINE(M_HDAC, "hdac", "HDA Controller");
81 { HDA_INTEL_OAK, "Intel Oaktrail", 0, 0 },
82 { HDA_INTEL_CMLKLP, "Intel Comet Lake-LP", 0, 0 },
83 { HDA_INTEL_CMLKH, "Intel Comet Lake-H", 0, 0 },
84 { HDA_INTEL_BAY, "Intel BayTrail", 0, 0 },
85 { HDA_INTEL_HSW1, "Intel Haswell", 0, 0 },
86 { HDA_INTEL_HSW2, "Intel Haswell", 0, 0 },
87 { HDA_INTEL_HSW3, "Intel Haswell", 0, 0 },
88 { HDA_INTEL_BDW1, "Intel Broadwell", 0, 0 },
89 { HDA_INTEL_BDW2, "Intel Broadwell", 0, 0 },
90 { HDA_INTEL_BXTNT, "Intel Broxton-T", 0, 0 },
91 { HDA_INTEL_CPT, "Intel Cougar Point", 0, 0 },
92 { HDA_INTEL_PATSBURG,"Intel Patsburg", 0, 0 },
93 { HDA_INTEL_PPT1, "Intel Panther Point", 0, 0 },
94 { HDA_INTEL_BR, "Intel Braswell", 0, 0 },
95 { HDA_INTEL_LPT1, "Intel Lynx Point", 0, 0 },
96 { HDA_INTEL_LPT2, "Intel Lynx Point", 0, 0 },
97 { HDA_INTEL_WCPT, "Intel Wildcat Point", 0, 0 },
98 { HDA_INTEL_WELLS1, "Intel Wellsburg", 0, 0 },
99 { HDA_INTEL_WELLS2, "Intel Wellsburg", 0, 0 },
100 { HDA_INTEL_LPTLP1, "Intel Lynx Point-LP", 0, 0 },
101 { HDA_INTEL_LPTLP2, "Intel Lynx Point-LP", 0, 0 },
102 { HDA_INTEL_SRPTLP, "Intel Sunrise Point-LP", 0, 0 },
103 { HDA_INTEL_KBLKLP, "Intel Kaby Lake-LP", 0, 0 },
104 { HDA_INTEL_SRPT, "Intel Sunrise Point", 0, 0 },
105 { HDA_INTEL_KBLK, "Intel Kaby Lake", 0, 0 },
106 { HDA_INTEL_KBLKH, "Intel Kaby Lake-H", 0, 0 },
107 { HDA_INTEL_CFLK, "Intel Coffee Lake", 0, 0 },
108 { HDA_INTEL_CMLKS, "Intel Comet Lake-S", 0, 0 },
109 { HDA_INTEL_CNLK, "Intel Cannon Lake", 0, 0 },
110 { HDA_INTEL_ICLK, "Intel Ice Lake", 0, 0 },
111 { HDA_INTEL_CMLKLP, "Intel Comet Lake-LP", 0, 0 },
112 { HDA_INTEL_CMLKH, "Intel Comet Lake-H", 0, 0 },
113 { HDA_INTEL_TGLK, "Intel Tiger Lake", 0, 0 },
114 { HDA_INTEL_GMLK, "Intel Gemini Lake", 0, 0 },
115 { HDA_INTEL_82801F, "Intel 82801F", 0, 0 },
116 { HDA_INTEL_63XXESB, "Intel 631x/632xESB", 0, 0 },
117 { HDA_INTEL_82801G, "Intel 82801G", 0, 0 },
118 { HDA_INTEL_82801H, "Intel 82801H", 0, 0 },
119 { HDA_INTEL_82801I, "Intel 82801I", 0, 0 },
120 { HDA_INTEL_JLK, "Intel Jasper Lake", 0, 0 },
121 { HDA_INTEL_82801JI, "Intel 82801JI", 0, 0 },
122 { HDA_INTEL_82801JD, "Intel 82801JD", 0, 0 },
123 { HDA_INTEL_PCH, "Intel Ibex Peak", 0, 0 },
124 { HDA_INTEL_PCH2, "Intel Ibex Peak", 0, 0 },
125 { HDA_INTEL_ELLK, "Intel Elkhart Lake", 0, 0 },
126 { HDA_INTEL_JLK2, "Intel Jasper Lake", 0, 0 },
127 { HDA_INTEL_BXTNP, "Intel Broxton-P", 0, 0 },
128 { HDA_INTEL_SCH, "Intel SCH", 0, 0 },
129 { HDA_NVIDIA_MCP51, "NVIDIA MCP51", 0, HDAC_QUIRK_MSI },
130 { HDA_NVIDIA_MCP55, "NVIDIA MCP55", 0, HDAC_QUIRK_MSI },
131 { HDA_NVIDIA_MCP61_1, "NVIDIA MCP61", 0, 0 },
132 { HDA_NVIDIA_MCP61_2, "NVIDIA MCP61", 0, 0 },
133 { HDA_NVIDIA_MCP65_1, "NVIDIA MCP65", 0, 0 },
134 { HDA_NVIDIA_MCP65_2, "NVIDIA MCP65", 0, 0 },
135 { HDA_NVIDIA_MCP67_1, "NVIDIA MCP67", 0, 0 },
136 { HDA_NVIDIA_MCP67_2, "NVIDIA MCP67", 0, 0 },
137 { HDA_NVIDIA_MCP73_1, "NVIDIA MCP73", 0, 0 },
138 { HDA_NVIDIA_MCP73_2, "NVIDIA MCP73", 0, 0 },
139 { HDA_NVIDIA_MCP78_1, "NVIDIA MCP78", 0, HDAC_QUIRK_64BIT },
140 { HDA_NVIDIA_MCP78_2, "NVIDIA MCP78", 0, HDAC_QUIRK_64BIT },
141 { HDA_NVIDIA_MCP78_3, "NVIDIA MCP78", 0, HDAC_QUIRK_64BIT },
142 { HDA_NVIDIA_MCP78_4, "NVIDIA MCP78", 0, HDAC_QUIRK_64BIT },
143 { HDA_NVIDIA_MCP79_1, "NVIDIA MCP79", 0, 0 },
144 { HDA_NVIDIA_MCP79_2, "NVIDIA MCP79", 0, 0 },
145 { HDA_NVIDIA_MCP79_3, "NVIDIA MCP79", 0, 0 },
146 { HDA_NVIDIA_MCP79_4, "NVIDIA MCP79", 0, 0 },
147 { HDA_NVIDIA_MCP89_1, "NVIDIA MCP89", 0, 0 },
148 { HDA_NVIDIA_MCP89_2, "NVIDIA MCP89", 0, 0 },
149 { HDA_NVIDIA_MCP89_3, "NVIDIA MCP89", 0, 0 },
150 { HDA_NVIDIA_MCP89_4, "NVIDIA MCP89", 0, 0 },
151 { HDA_NVIDIA_0BE2, "NVIDIA (0x0be2)", 0, HDAC_QUIRK_MSI },
152 { HDA_NVIDIA_0BE3, "NVIDIA (0x0be3)", 0, HDAC_QUIRK_MSI },
153 { HDA_NVIDIA_0BE4, "NVIDIA (0x0be4)", 0, HDAC_QUIRK_MSI },
154 { HDA_NVIDIA_GT100, "NVIDIA GT100", 0, HDAC_QUIRK_MSI },
155 { HDA_NVIDIA_GT104, "NVIDIA GT104", 0, HDAC_QUIRK_MSI },
156 { HDA_NVIDIA_GT106, "NVIDIA GT106", 0, HDAC_QUIRK_MSI },
157 { HDA_NVIDIA_GT108, "NVIDIA GT108", 0, HDAC_QUIRK_MSI },
158 { HDA_NVIDIA_GT116, "NVIDIA GT116", 0, HDAC_QUIRK_MSI },
159 { HDA_NVIDIA_GF119, "NVIDIA GF119", 0, 0 },
160 { HDA_NVIDIA_GF110_1, "NVIDIA GF110", 0, HDAC_QUIRK_MSI },
161 { HDA_NVIDIA_GF110_2, "NVIDIA GF110", 0, HDAC_QUIRK_MSI },
162 { HDA_ATI_SB450, "ATI SB450", 0, 0 },
163 { HDA_ATI_SB600, "ATI SB600", 0, 0 },
164 { HDA_ATI_RS600, "ATI RS600", 0, 0 },
165 { HDA_ATI_RS690, "ATI RS690", 0, 0 },
166 { HDA_ATI_RS780, "ATI RS780", 0, 0 },
167 { HDA_ATI_R600, "ATI R600", 0, 0 },
168 { HDA_ATI_RV610, "ATI RV610", 0, 0 },
169 { HDA_ATI_RV620, "ATI RV620", 0, 0 },
170 { HDA_ATI_RV630, "ATI RV630", 0, 0 },
171 { HDA_ATI_RV635, "ATI RV635", 0, 0 },
172 { HDA_ATI_RV710, "ATI RV710", 0, 0 },
173 { HDA_ATI_RV730, "ATI RV730", 0, 0 },
174 { HDA_ATI_RV740, "ATI RV740", 0, 0 },
175 { HDA_ATI_RV770, "ATI RV770", 0, 0 },
176 { HDA_ATI_RV810, "ATI RV810", 0, 0 },
177 { HDA_ATI_RV830, "ATI RV830", 0, 0 },
178 { HDA_ATI_RV840, "ATI RV840", 0, 0 },
179 { HDA_ATI_RV870, "ATI RV870", 0, 0 },
180 { HDA_ATI_RV910, "ATI RV910", 0, 0 },
181 { HDA_ATI_RV930, "ATI RV930", 0, 0 },
182 { HDA_ATI_RV940, "ATI RV940", 0, 0 },
183 { HDA_ATI_RV970, "ATI RV970", 0, 0 },
184 { HDA_ATI_R1000, "ATI R1000", 0, 0 },
185 { HDA_AMD_X370, "AMD X370", 0, 0 },
186 { HDA_AMD_X570, "AMD X570", 0, 0 },
187 { HDA_AMD_STONEY, "AMD Stoney", 0, 0 },
188 { HDA_AMD_RAVEN, "AMD Raven", 0, 0 },
189 { HDA_AMD_HUDSON2, "AMD Hudson-2", 0, 0 },
190 { HDA_RDC_M3010, "RDC M3010", 0, 0 },
191 { HDA_VIA_VT82XX, "VIA VT8251/8237A",0, 0 },
192 { HDA_SIS_966, "SiS 966/968", 0, 0 },
193 { HDA_ULI_M5461, "ULI M5461", 0, 0 },
195 { HDA_INTEL_ALL, "Intel", 0, 0 },
196 { HDA_NVIDIA_ALL, "NVIDIA", 0, 0 },
197 { HDA_ATI_ALL, "ATI", 0, 0 },
198 { HDA_AMD_ALL, "AMD", 0, 0 },
199 { HDA_CREATIVE_ALL, "Creative", 0, 0 },
200 { HDA_VIA_ALL, "VIA", 0, 0 },
201 { HDA_SIS_ALL, "SiS", 0, 0 },
202 { HDA_ULI_ALL, "ULI", 0, 0 },
205 static const struct {
210 } hdac_pcie_snoop[] = {
211 { INTEL_VENDORID, 0x00, 0x00, 0x00 },
212 { ATI_VENDORID, 0x42, 0xf8, 0x02 },
213 { AMD_VENDORID, 0x42, 0xf8, 0x02 },
214 { NVIDIA_VENDORID, 0x4e, 0xf0, 0x0f },
217 /****************************************************************************
218 * Function prototypes
219 ****************************************************************************/
220 static void hdac_intr_handler(void *);
221 static int hdac_reset(struct hdac_softc *, bool);
222 static int hdac_get_capabilities(struct hdac_softc *);
223 static void hdac_dma_cb(void *, bus_dma_segment_t *, int, int);
224 static int hdac_dma_alloc(struct hdac_softc *,
225 struct hdac_dma *, bus_size_t);
226 static void hdac_dma_free(struct hdac_softc *, struct hdac_dma *);
227 static int hdac_mem_alloc(struct hdac_softc *);
228 static void hdac_mem_free(struct hdac_softc *);
229 static int hdac_irq_alloc(struct hdac_softc *);
230 static void hdac_irq_free(struct hdac_softc *);
231 static void hdac_corb_init(struct hdac_softc *);
232 static void hdac_rirb_init(struct hdac_softc *);
233 static void hdac_corb_start(struct hdac_softc *);
234 static void hdac_rirb_start(struct hdac_softc *);
236 static void hdac_attach2(void *);
238 static uint32_t hdac_send_command(struct hdac_softc *, nid_t, uint32_t);
240 static int hdac_probe(device_t);
241 static int hdac_attach(device_t);
242 static int hdac_detach(device_t);
243 static int hdac_suspend(device_t);
244 static int hdac_resume(device_t);
246 static int hdac_rirb_flush(struct hdac_softc *sc);
247 static int hdac_unsolq_flush(struct hdac_softc *sc);
249 /* This function surely going to make its way into upper level someday. */
251 hdac_config_fetch(struct hdac_softc *sc, uint32_t *on, uint32_t *off)
253 const char *res = NULL;
254 int i = 0, j, k, len, inv;
256 if (resource_string_value(device_get_name(sc->dev),
257 device_get_unit(sc->dev), "config", &res) != 0)
259 if (!(res != NULL && strlen(res) > 0))
262 device_printf(sc->dev, "Config options:");
265 while (res[i] != '\0' &&
266 (res[i] == ',' || isspace(res[i]) != 0))
268 if (res[i] == '\0') {
275 while (res[j] != '\0' &&
276 !(res[j] == ',' || isspace(res[j]) != 0))
279 if (len > 2 && strncmp(res + i, "no", 2) == 0)
283 for (k = 0; len > inv && k < nitems(hdac_quirks_tab); k++) {
284 if (strncmp(res + i + inv,
285 hdac_quirks_tab[k].key, len - inv) != 0)
287 if (len - inv != strlen(hdac_quirks_tab[k].key))
290 printf(" %s%s", (inv != 0) ? "no" : "",
291 hdac_quirks_tab[k].key);
294 *on |= hdac_quirks_tab[k].value;
295 *off &= ~hdac_quirks_tab[k].value;
296 } else if (inv != 0) {
297 *off |= hdac_quirks_tab[k].value;
298 *on &= ~hdac_quirks_tab[k].value;
307 hdac_one_intr(struct hdac_softc *sc, uint32_t intsts)
313 /* Was this a controller interrupt? */
314 if (intsts & HDAC_INTSTS_CIS) {
315 rirbsts = HDAC_READ_1(&sc->mem, HDAC_RIRBSTS);
316 /* Get as many responses that we can */
317 while (rirbsts & HDAC_RIRBSTS_RINTFL) {
318 HDAC_WRITE_1(&sc->mem,
319 HDAC_RIRBSTS, HDAC_RIRBSTS_RINTFL);
321 rirbsts = HDAC_READ_1(&sc->mem, HDAC_RIRBSTS);
323 if (sc->unsolq_rp != sc->unsolq_wp)
324 taskqueue_enqueue(taskqueue_thread, &sc->unsolq_task);
327 if (intsts & HDAC_INTSTS_SIS_MASK) {
328 for (i = 0; i < sc->num_ss; i++) {
329 if ((intsts & (1 << i)) == 0)
331 HDAC_WRITE_1(&sc->mem, (i << 5) + HDAC_SDSTS,
332 HDAC_SDSTS_DESE | HDAC_SDSTS_FIFOE | HDAC_SDSTS_BCIS);
333 if ((dev = sc->streams[i].dev) != NULL) {
334 HDAC_STREAM_INTR(dev,
335 sc->streams[i].dir, sc->streams[i].stream);
341 /****************************************************************************
342 * void hdac_intr_handler(void *)
344 * Interrupt handler. Processes interrupts received from the hdac.
345 ****************************************************************************/
347 hdac_intr_handler(void *context)
349 struct hdac_softc *sc;
352 sc = (struct hdac_softc *)context;
355 * Loop until HDAC_INTSTS_GIS gets clear.
356 * It is plausible that hardware interrupts a host only when GIS goes
357 * from zero to one. GIS is formed by OR-ing multiple hardware
358 * statuses, so it's possible that a previously cleared status gets set
359 * again while another status has not been cleared yet. Thus, there
360 * will be no new interrupt as GIS always stayed set. If we don't
361 * re-examine GIS then we can leave it set and never get an interrupt
364 intsts = HDAC_READ_4(&sc->mem, HDAC_INTSTS);
365 while ((intsts & HDAC_INTSTS_GIS) != 0) {
367 hdac_one_intr(sc, intsts);
369 intsts = HDAC_READ_4(&sc->mem, HDAC_INTSTS);
374 hdac_poll_callback(void *arg)
376 struct hdac_softc *sc = arg;
382 if (sc->polling == 0) {
386 callout_reset(&sc->poll_callout, sc->poll_ival, hdac_poll_callback, sc);
389 hdac_intr_handler(sc);
392 /****************************************************************************
393 * int hdac_reset(hdac_softc *, bool)
395 * Reset the hdac to a quiescent and known state.
396 ****************************************************************************/
398 hdac_reset(struct hdac_softc *sc, bool wakeup)
404 * Stop all Streams DMA engine
406 for (i = 0; i < sc->num_iss; i++)
407 HDAC_WRITE_4(&sc->mem, HDAC_ISDCTL(sc, i), 0x0);
408 for (i = 0; i < sc->num_oss; i++)
409 HDAC_WRITE_4(&sc->mem, HDAC_OSDCTL(sc, i), 0x0);
410 for (i = 0; i < sc->num_bss; i++)
411 HDAC_WRITE_4(&sc->mem, HDAC_BSDCTL(sc, i), 0x0);
414 * Stop Control DMA engines.
416 HDAC_WRITE_1(&sc->mem, HDAC_CORBCTL, 0x0);
417 HDAC_WRITE_1(&sc->mem, HDAC_RIRBCTL, 0x0);
420 * Reset DMA position buffer.
422 HDAC_WRITE_4(&sc->mem, HDAC_DPIBLBASE, 0x0);
423 HDAC_WRITE_4(&sc->mem, HDAC_DPIBUBASE, 0x0);
426 * Reset the controller. The reset must remain asserted for
427 * a minimum of 100us.
429 gctl = HDAC_READ_4(&sc->mem, HDAC_GCTL);
430 HDAC_WRITE_4(&sc->mem, HDAC_GCTL, gctl & ~HDAC_GCTL_CRST);
433 gctl = HDAC_READ_4(&sc->mem, HDAC_GCTL);
434 if (!(gctl & HDAC_GCTL_CRST))
438 if (gctl & HDAC_GCTL_CRST) {
439 device_printf(sc->dev, "Unable to put hdac in reset\n");
443 /* If wakeup is not requested - leave the controller in reset state. */
448 gctl = HDAC_READ_4(&sc->mem, HDAC_GCTL);
449 HDAC_WRITE_4(&sc->mem, HDAC_GCTL, gctl | HDAC_GCTL_CRST);
452 gctl = HDAC_READ_4(&sc->mem, HDAC_GCTL);
453 if (gctl & HDAC_GCTL_CRST)
457 if (!(gctl & HDAC_GCTL_CRST)) {
458 device_printf(sc->dev, "Device stuck in reset\n");
463 * Wait for codecs to finish their own reset sequence. The delay here
464 * must be at least 521us (HDA 1.0a section 4.3 Codec Discovery).
471 /****************************************************************************
472 * int hdac_get_capabilities(struct hdac_softc *);
474 * Retreive the general capabilities of the hdac;
475 * Number of Input Streams
476 * Number of Output Streams
477 * Number of bidirectional Streams
479 * CORB and RIRB sizes
480 ****************************************************************************/
482 hdac_get_capabilities(struct hdac_softc *sc)
485 uint8_t corbsize, rirbsize;
487 gcap = HDAC_READ_2(&sc->mem, HDAC_GCAP);
488 sc->num_iss = HDAC_GCAP_ISS(gcap);
489 sc->num_oss = HDAC_GCAP_OSS(gcap);
490 sc->num_bss = HDAC_GCAP_BSS(gcap);
491 sc->num_ss = sc->num_iss + sc->num_oss + sc->num_bss;
492 sc->num_sdo = HDAC_GCAP_NSDO(gcap);
493 sc->support_64bit = (gcap & HDAC_GCAP_64OK) != 0;
494 if (sc->quirks_on & HDAC_QUIRK_64BIT)
495 sc->support_64bit = 1;
496 else if (sc->quirks_off & HDAC_QUIRK_64BIT)
497 sc->support_64bit = 0;
499 corbsize = HDAC_READ_1(&sc->mem, HDAC_CORBSIZE);
500 if ((corbsize & HDAC_CORBSIZE_CORBSZCAP_256) ==
501 HDAC_CORBSIZE_CORBSZCAP_256)
503 else if ((corbsize & HDAC_CORBSIZE_CORBSZCAP_16) ==
504 HDAC_CORBSIZE_CORBSZCAP_16)
506 else if ((corbsize & HDAC_CORBSIZE_CORBSZCAP_2) ==
507 HDAC_CORBSIZE_CORBSZCAP_2)
510 device_printf(sc->dev, "%s: Invalid corb size (%x)\n",
515 rirbsize = HDAC_READ_1(&sc->mem, HDAC_RIRBSIZE);
516 if ((rirbsize & HDAC_RIRBSIZE_RIRBSZCAP_256) ==
517 HDAC_RIRBSIZE_RIRBSZCAP_256)
519 else if ((rirbsize & HDAC_RIRBSIZE_RIRBSZCAP_16) ==
520 HDAC_RIRBSIZE_RIRBSZCAP_16)
522 else if ((rirbsize & HDAC_RIRBSIZE_RIRBSZCAP_2) ==
523 HDAC_RIRBSIZE_RIRBSZCAP_2)
526 device_printf(sc->dev, "%s: Invalid rirb size (%x)\n",
532 device_printf(sc->dev, "Caps: OSS %d, ISS %d, BSS %d, "
533 "NSDO %d%s, CORB %d, RIRB %d\n",
534 sc->num_oss, sc->num_iss, sc->num_bss, 1 << sc->num_sdo,
535 sc->support_64bit ? ", 64bit" : "",
536 sc->corb_size, sc->rirb_size);
543 /****************************************************************************
546 * This function is called by bus_dmamap_load when the mapping has been
547 * established. We just record the physical address of the mapping into
548 * the struct hdac_dma passed in.
549 ****************************************************************************/
551 hdac_dma_cb(void *callback_arg, bus_dma_segment_t *segs, int nseg, int error)
553 struct hdac_dma *dma;
556 dma = (struct hdac_dma *)callback_arg;
557 dma->dma_paddr = segs[0].ds_addr;
562 /****************************************************************************
565 * This function allocate and setup a dma region (struct hdac_dma).
566 * It must be freed by a corresponding hdac_dma_free.
567 ****************************************************************************/
569 hdac_dma_alloc(struct hdac_softc *sc, struct hdac_dma *dma, bus_size_t size)
574 roundsz = roundup2(size, HDA_DMA_ALIGNMENT);
575 bzero(dma, sizeof(*dma));
580 result = bus_dma_tag_create(
581 bus_get_dma_tag(sc->dev), /* parent */
582 HDA_DMA_ALIGNMENT, /* alignment */
584 (sc->support_64bit) ? BUS_SPACE_MAXADDR :
585 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
586 BUS_SPACE_MAXADDR, /* highaddr */
588 NULL, /* fistfuncarg */
589 roundsz, /* maxsize */
591 roundsz, /* maxsegsz */
594 NULL, /* lockfuncarg */
595 &dma->dma_tag); /* dmat */
597 device_printf(sc->dev, "%s: bus_dma_tag_create failed (%d)\n",
599 goto hdac_dma_alloc_fail;
603 * Allocate DMA memory
605 result = bus_dmamem_alloc(dma->dma_tag, (void **)&dma->dma_vaddr,
606 BUS_DMA_NOWAIT | BUS_DMA_ZERO |
607 ((sc->flags & HDAC_F_DMA_NOCACHE) ? BUS_DMA_NOCACHE :
611 device_printf(sc->dev, "%s: bus_dmamem_alloc failed (%d)\n",
613 goto hdac_dma_alloc_fail;
616 dma->dma_size = roundsz;
621 result = bus_dmamap_load(dma->dma_tag, dma->dma_map,
622 (void *)dma->dma_vaddr, roundsz, hdac_dma_cb, (void *)dma, 0);
623 if (result != 0 || dma->dma_paddr == 0) {
626 device_printf(sc->dev, "%s: bus_dmamem_load failed (%d)\n",
628 goto hdac_dma_alloc_fail;
632 device_printf(sc->dev, "%s: size=%ju -> roundsz=%ju\n",
633 __func__, (uintmax_t)size, (uintmax_t)roundsz);
639 hdac_dma_free(sc, dma);
644 /****************************************************************************
645 * void hdac_dma_free(struct hdac_softc *, struct hdac_dma *)
647 * Free a struct hdac_dma that has been previously allocated via the
648 * hdac_dma_alloc function.
649 ****************************************************************************/
651 hdac_dma_free(struct hdac_softc *sc, struct hdac_dma *dma)
653 if (dma->dma_paddr != 0) {
655 bus_dmamap_sync(dma->dma_tag, dma->dma_map,
656 BUS_DMASYNC_POSTREAD | BUS_DMASYNC_POSTWRITE);
657 bus_dmamap_unload(dma->dma_tag, dma->dma_map);
660 if (dma->dma_vaddr != NULL) {
661 bus_dmamem_free(dma->dma_tag, dma->dma_vaddr, dma->dma_map);
662 dma->dma_vaddr = NULL;
664 if (dma->dma_tag != NULL) {
665 bus_dma_tag_destroy(dma->dma_tag);
671 /****************************************************************************
672 * int hdac_mem_alloc(struct hdac_softc *)
674 * Allocate all the bus resources necessary to speak with the physical
676 ****************************************************************************/
678 hdac_mem_alloc(struct hdac_softc *sc)
680 struct hdac_mem *mem;
683 mem->mem_rid = PCIR_BAR(0);
684 mem->mem_res = bus_alloc_resource_any(sc->dev, SYS_RES_MEMORY,
685 &mem->mem_rid, RF_ACTIVE);
686 if (mem->mem_res == NULL) {
687 device_printf(sc->dev,
688 "%s: Unable to allocate memory resource\n", __func__);
691 mem->mem_tag = rman_get_bustag(mem->mem_res);
692 mem->mem_handle = rman_get_bushandle(mem->mem_res);
697 /****************************************************************************
698 * void hdac_mem_free(struct hdac_softc *)
700 * Free up resources previously allocated by hdac_mem_alloc.
701 ****************************************************************************/
703 hdac_mem_free(struct hdac_softc *sc)
705 struct hdac_mem *mem;
708 if (mem->mem_res != NULL)
709 bus_release_resource(sc->dev, SYS_RES_MEMORY, mem->mem_rid,
714 /****************************************************************************
715 * int hdac_irq_alloc(struct hdac_softc *)
717 * Allocate and setup the resources necessary for interrupt handling.
718 ****************************************************************************/
720 hdac_irq_alloc(struct hdac_softc *sc)
722 struct hdac_irq *irq;
728 if ((sc->quirks_off & HDAC_QUIRK_MSI) == 0 &&
729 (result = pci_msi_count(sc->dev)) == 1 &&
730 pci_alloc_msi(sc->dev, &result) == 0)
733 irq->irq_res = bus_alloc_resource_any(sc->dev, SYS_RES_IRQ,
734 &irq->irq_rid, RF_SHAREABLE | RF_ACTIVE);
735 if (irq->irq_res == NULL) {
736 device_printf(sc->dev, "%s: Unable to allocate irq\n",
738 goto hdac_irq_alloc_fail;
740 result = bus_setup_intr(sc->dev, irq->irq_res, INTR_MPSAFE | INTR_TYPE_AV,
741 NULL, hdac_intr_handler, sc, &irq->irq_handle);
743 device_printf(sc->dev,
744 "%s: Unable to setup interrupt handler (%d)\n",
746 goto hdac_irq_alloc_fail;
757 /****************************************************************************
758 * void hdac_irq_free(struct hdac_softc *)
760 * Free up resources previously allocated by hdac_irq_alloc.
761 ****************************************************************************/
763 hdac_irq_free(struct hdac_softc *sc)
765 struct hdac_irq *irq;
768 if (irq->irq_res != NULL && irq->irq_handle != NULL)
769 bus_teardown_intr(sc->dev, irq->irq_res, irq->irq_handle);
770 if (irq->irq_res != NULL)
771 bus_release_resource(sc->dev, SYS_RES_IRQ, irq->irq_rid,
773 if (irq->irq_rid == 0x1)
774 pci_release_msi(sc->dev);
775 irq->irq_handle = NULL;
780 /****************************************************************************
781 * void hdac_corb_init(struct hdac_softc *)
783 * Initialize the corb registers for operations but do not start it up yet.
784 * The CORB engine must not be running when this function is called.
785 ****************************************************************************/
787 hdac_corb_init(struct hdac_softc *sc)
792 /* Setup the CORB size. */
793 switch (sc->corb_size) {
795 corbsize = HDAC_CORBSIZE_CORBSIZE(HDAC_CORBSIZE_CORBSIZE_256);
798 corbsize = HDAC_CORBSIZE_CORBSIZE(HDAC_CORBSIZE_CORBSIZE_16);
801 corbsize = HDAC_CORBSIZE_CORBSIZE(HDAC_CORBSIZE_CORBSIZE_2);
804 panic("%s: Invalid CORB size (%x)\n", __func__, sc->corb_size);
806 HDAC_WRITE_1(&sc->mem, HDAC_CORBSIZE, corbsize);
808 /* Setup the CORB Address in the hdac */
809 corbpaddr = (uint64_t)sc->corb_dma.dma_paddr;
810 HDAC_WRITE_4(&sc->mem, HDAC_CORBLBASE, (uint32_t)corbpaddr);
811 HDAC_WRITE_4(&sc->mem, HDAC_CORBUBASE, (uint32_t)(corbpaddr >> 32));
813 /* Set the WP and RP */
815 HDAC_WRITE_2(&sc->mem, HDAC_CORBWP, sc->corb_wp);
816 HDAC_WRITE_2(&sc->mem, HDAC_CORBRP, HDAC_CORBRP_CORBRPRST);
818 * The HDA specification indicates that the CORBRPRST bit will always
819 * read as zero. Unfortunately, it seems that at least the 82801G
820 * doesn't reset the bit to zero, which stalls the corb engine.
821 * manually reset the bit to zero before continuing.
823 HDAC_WRITE_2(&sc->mem, HDAC_CORBRP, 0x0);
825 /* Enable CORB error reporting */
827 HDAC_WRITE_1(&sc->mem, HDAC_CORBCTL, HDAC_CORBCTL_CMEIE);
831 /****************************************************************************
832 * void hdac_rirb_init(struct hdac_softc *)
834 * Initialize the rirb registers for operations but do not start it up yet.
835 * The RIRB engine must not be running when this function is called.
836 ****************************************************************************/
838 hdac_rirb_init(struct hdac_softc *sc)
843 /* Setup the RIRB size. */
844 switch (sc->rirb_size) {
846 rirbsize = HDAC_RIRBSIZE_RIRBSIZE(HDAC_RIRBSIZE_RIRBSIZE_256);
849 rirbsize = HDAC_RIRBSIZE_RIRBSIZE(HDAC_RIRBSIZE_RIRBSIZE_16);
852 rirbsize = HDAC_RIRBSIZE_RIRBSIZE(HDAC_RIRBSIZE_RIRBSIZE_2);
855 panic("%s: Invalid RIRB size (%x)\n", __func__, sc->rirb_size);
857 HDAC_WRITE_1(&sc->mem, HDAC_RIRBSIZE, rirbsize);
859 /* Setup the RIRB Address in the hdac */
860 rirbpaddr = (uint64_t)sc->rirb_dma.dma_paddr;
861 HDAC_WRITE_4(&sc->mem, HDAC_RIRBLBASE, (uint32_t)rirbpaddr);
862 HDAC_WRITE_4(&sc->mem, HDAC_RIRBUBASE, (uint32_t)(rirbpaddr >> 32));
864 /* Setup the WP and RP */
866 HDAC_WRITE_2(&sc->mem, HDAC_RIRBWP, HDAC_RIRBWP_RIRBWPRST);
868 /* Setup the interrupt threshold */
869 HDAC_WRITE_2(&sc->mem, HDAC_RINTCNT, sc->rirb_size / 2);
871 /* Enable Overrun and response received reporting */
873 HDAC_WRITE_1(&sc->mem, HDAC_RIRBCTL,
874 HDAC_RIRBCTL_RIRBOIC | HDAC_RIRBCTL_RINTCTL);
876 HDAC_WRITE_1(&sc->mem, HDAC_RIRBCTL, HDAC_RIRBCTL_RINTCTL);
880 * Make sure that the Host CPU cache doesn't contain any dirty
881 * cache lines that falls in the rirb. If I understood correctly, it
882 * should be sufficient to do this only once as the rirb is purely
883 * read-only from now on.
885 bus_dmamap_sync(sc->rirb_dma.dma_tag, sc->rirb_dma.dma_map,
886 BUS_DMASYNC_PREREAD);
889 /****************************************************************************
890 * void hdac_corb_start(hdac_softc *)
892 * Startup the corb DMA engine
893 ****************************************************************************/
895 hdac_corb_start(struct hdac_softc *sc)
899 corbctl = HDAC_READ_1(&sc->mem, HDAC_CORBCTL);
900 corbctl |= HDAC_CORBCTL_CORBRUN;
901 HDAC_WRITE_1(&sc->mem, HDAC_CORBCTL, corbctl);
904 /****************************************************************************
905 * void hdac_rirb_start(hdac_softc *)
907 * Startup the rirb DMA engine
908 ****************************************************************************/
910 hdac_rirb_start(struct hdac_softc *sc)
914 rirbctl = HDAC_READ_1(&sc->mem, HDAC_RIRBCTL);
915 rirbctl |= HDAC_RIRBCTL_RIRBDMAEN;
916 HDAC_WRITE_1(&sc->mem, HDAC_RIRBCTL, rirbctl);
920 hdac_rirb_flush(struct hdac_softc *sc)
922 struct hdac_rirb *rirb_base, *rirb;
924 uint32_t resp, resp_ex;
928 rirb_base = (struct hdac_rirb *)sc->rirb_dma.dma_vaddr;
929 rirbwp = HDAC_READ_1(&sc->mem, HDAC_RIRBWP);
930 bus_dmamap_sync(sc->rirb_dma.dma_tag, sc->rirb_dma.dma_map,
931 BUS_DMASYNC_POSTREAD);
934 while (sc->rirb_rp != rirbwp) {
936 sc->rirb_rp %= sc->rirb_size;
937 rirb = &rirb_base[sc->rirb_rp];
938 resp = le32toh(rirb->response);
939 resp_ex = le32toh(rirb->response_ex);
940 cad = HDAC_RIRB_RESPONSE_EX_SDATA_IN(resp_ex);
941 if (resp_ex & HDAC_RIRB_RESPONSE_EX_UNSOLICITED) {
942 sc->unsolq[sc->unsolq_wp++] = resp;
943 sc->unsolq_wp %= HDAC_UNSOLQ_MAX;
944 sc->unsolq[sc->unsolq_wp++] = cad;
945 sc->unsolq_wp %= HDAC_UNSOLQ_MAX;
946 } else if (sc->codecs[cad].pending <= 0) {
947 device_printf(sc->dev, "Unexpected unsolicited "
948 "response from address %d: %08x\n", cad, resp);
950 sc->codecs[cad].response = resp;
951 sc->codecs[cad].pending--;
956 bus_dmamap_sync(sc->rirb_dma.dma_tag, sc->rirb_dma.dma_map,
957 BUS_DMASYNC_PREREAD);
962 hdac_unsolq_flush(struct hdac_softc *sc)
969 if (sc->unsolq_st == HDAC_UNSOLQ_READY) {
970 sc->unsolq_st = HDAC_UNSOLQ_BUSY;
971 while (sc->unsolq_rp != sc->unsolq_wp) {
972 resp = sc->unsolq[sc->unsolq_rp++];
973 sc->unsolq_rp %= HDAC_UNSOLQ_MAX;
974 cad = sc->unsolq[sc->unsolq_rp++];
975 sc->unsolq_rp %= HDAC_UNSOLQ_MAX;
976 if ((child = sc->codecs[cad].dev) != NULL)
977 HDAC_UNSOL_INTR(child, resp);
980 sc->unsolq_st = HDAC_UNSOLQ_READY;
986 /****************************************************************************
987 * uint32_t hdac_send_command
989 * Wrapper function that sends only one command to a given codec
990 ****************************************************************************/
992 hdac_send_command(struct hdac_softc *sc, nid_t cad, uint32_t verb)
998 verb &= ~HDA_CMD_CAD_MASK;
999 verb |= ((uint32_t)cad) << HDA_CMD_CAD_SHIFT;
1000 sc->codecs[cad].response = HDA_INVALID;
1002 sc->codecs[cad].pending++;
1004 sc->corb_wp %= sc->corb_size;
1005 corb = (uint32_t *)sc->corb_dma.dma_vaddr;
1006 bus_dmamap_sync(sc->corb_dma.dma_tag,
1007 sc->corb_dma.dma_map, BUS_DMASYNC_PREWRITE);
1008 corb[sc->corb_wp] = htole32(verb);
1009 bus_dmamap_sync(sc->corb_dma.dma_tag,
1010 sc->corb_dma.dma_map, BUS_DMASYNC_POSTWRITE);
1011 HDAC_WRITE_2(&sc->mem, HDAC_CORBWP, sc->corb_wp);
1015 if (hdac_rirb_flush(sc) == 0)
1017 } while (sc->codecs[cad].pending != 0 && --timeout);
1019 if (sc->codecs[cad].pending != 0) {
1020 device_printf(sc->dev, "Command 0x%08x timeout on address %d\n",
1022 sc->codecs[cad].pending = 0;
1025 if (sc->unsolq_rp != sc->unsolq_wp)
1026 taskqueue_enqueue(taskqueue_thread, &sc->unsolq_task);
1027 return (sc->codecs[cad].response);
1030 /****************************************************************************
1032 ****************************************************************************/
1034 /****************************************************************************
1035 * int hdac_probe(device_t)
1037 * Probe for the presence of an hdac. If none is found, check for a generic
1038 * match using the subclass of the device.
1039 ****************************************************************************/
1041 hdac_probe(device_t dev)
1045 uint16_t class, subclass;
1048 model = (uint32_t)pci_get_device(dev) << 16;
1049 model |= (uint32_t)pci_get_vendor(dev) & 0x0000ffff;
1050 class = pci_get_class(dev);
1051 subclass = pci_get_subclass(dev);
1053 bzero(desc, sizeof(desc));
1055 for (i = 0; i < nitems(hdac_devices); i++) {
1056 if (hdac_devices[i].model == model) {
1057 strlcpy(desc, hdac_devices[i].desc, sizeof(desc));
1058 result = BUS_PROBE_DEFAULT;
1061 if (HDA_DEV_MATCH(hdac_devices[i].model, model) &&
1062 class == PCIC_MULTIMEDIA &&
1063 subclass == PCIS_MULTIMEDIA_HDA) {
1064 snprintf(desc, sizeof(desc), "%s (0x%04x)",
1065 hdac_devices[i].desc, pci_get_device(dev));
1066 result = BUS_PROBE_GENERIC;
1070 if (result == ENXIO && class == PCIC_MULTIMEDIA &&
1071 subclass == PCIS_MULTIMEDIA_HDA) {
1072 snprintf(desc, sizeof(desc), "Generic (0x%08x)", model);
1073 result = BUS_PROBE_GENERIC;
1075 if (result != ENXIO) {
1076 strlcat(desc, " HDA Controller", sizeof(desc));
1077 device_set_desc_copy(dev, desc);
1084 hdac_unsolq_task(void *context, int pending)
1086 struct hdac_softc *sc;
1088 sc = (struct hdac_softc *)context;
1091 hdac_unsolq_flush(sc);
1095 /****************************************************************************
1096 * int hdac_attach(device_t)
1098 * Attach the device into the kernel. Interrupts usually won't be enabled
1099 * when this function is called. Setup everything that doesn't require
1100 * interrupts and defer probing of codecs until interrupts are enabled.
1101 ****************************************************************************/
1103 hdac_attach(device_t dev)
1105 struct hdac_softc *sc;
1109 uint16_t class, subclass;
1113 sc = device_get_softc(dev);
1115 device_printf(dev, "PCI card vendor: 0x%04x, device: 0x%04x\n",
1116 pci_get_subvendor(dev), pci_get_subdevice(dev));
1117 device_printf(dev, "HDA Driver Revision: %s\n",
1121 model = (uint32_t)pci_get_device(dev) << 16;
1122 model |= (uint32_t)pci_get_vendor(dev) & 0x0000ffff;
1123 class = pci_get_class(dev);
1124 subclass = pci_get_subclass(dev);
1126 for (i = 0; i < nitems(hdac_devices); i++) {
1127 if (hdac_devices[i].model == model) {
1131 if (HDA_DEV_MATCH(hdac_devices[i].model, model) &&
1132 class == PCIC_MULTIMEDIA &&
1133 subclass == PCIS_MULTIMEDIA_HDA) {
1139 sc->lock = snd_mtxcreate(device_get_nameunit(dev), "HDA driver mutex");
1141 TASK_INIT(&sc->unsolq_task, 0, hdac_unsolq_task, sc);
1142 callout_init(&sc->poll_callout, 1);
1143 for (i = 0; i < HDAC_CODEC_MAX; i++)
1144 sc->codecs[i].dev = NULL;
1146 sc->quirks_on = hdac_devices[devid].quirks_on;
1147 sc->quirks_off = hdac_devices[devid].quirks_off;
1152 if (resource_int_value(device_get_name(dev),
1153 device_get_unit(dev), "msi", &i) == 0) {
1155 sc->quirks_off |= HDAC_QUIRK_MSI;
1157 sc->quirks_on |= HDAC_QUIRK_MSI;
1158 sc->quirks_off |= ~HDAC_QUIRK_MSI;
1161 hdac_config_fetch(sc, &sc->quirks_on, &sc->quirks_off);
1163 device_printf(sc->dev,
1164 "Config options: on=0x%08x off=0x%08x\n",
1165 sc->quirks_on, sc->quirks_off);
1168 if (resource_int_value(device_get_name(dev),
1169 device_get_unit(dev), "polling", &i) == 0 && i != 0)
1174 pci_enable_busmaster(dev);
1176 vendor = pci_get_vendor(dev);
1177 if (vendor == INTEL_VENDORID) {
1179 v = pci_read_config(dev, 0x44, 1);
1180 pci_write_config(dev, 0x44, v & 0xf8, 1);
1182 device_printf(dev, "TCSEL: 0x%02d -> 0x%02d\n", v,
1183 pci_read_config(dev, 0x44, 1));
1187 #if defined(__i386__) || defined(__amd64__)
1188 sc->flags |= HDAC_F_DMA_NOCACHE;
1190 if (resource_int_value(device_get_name(dev),
1191 device_get_unit(dev), "snoop", &i) == 0 && i != 0) {
1193 sc->flags &= ~HDAC_F_DMA_NOCACHE;
1196 * Try to enable PCIe snoop to avoid messing around with
1197 * uncacheable DMA attribute. Since PCIe snoop register
1198 * config is pretty much vendor specific, there are no
1199 * general solutions on how to enable it, forcing us (even
1200 * Microsoft) to enable uncacheable or write combined DMA
1203 * http://msdn2.microsoft.com/en-us/library/ms790324.aspx
1205 for (i = 0; i < nitems(hdac_pcie_snoop); i++) {
1206 if (hdac_pcie_snoop[i].vendor != vendor)
1208 sc->flags &= ~HDAC_F_DMA_NOCACHE;
1209 if (hdac_pcie_snoop[i].reg == 0x00)
1211 v = pci_read_config(dev, hdac_pcie_snoop[i].reg, 1);
1212 if ((v & hdac_pcie_snoop[i].enable) ==
1213 hdac_pcie_snoop[i].enable)
1215 v &= hdac_pcie_snoop[i].mask;
1216 v |= hdac_pcie_snoop[i].enable;
1217 pci_write_config(dev, hdac_pcie_snoop[i].reg, v, 1);
1218 v = pci_read_config(dev, hdac_pcie_snoop[i].reg, 1);
1219 if ((v & hdac_pcie_snoop[i].enable) !=
1220 hdac_pcie_snoop[i].enable) {
1223 "WARNING: Failed to enable PCIe "
1226 #if defined(__i386__) || defined(__amd64__)
1227 sc->flags |= HDAC_F_DMA_NOCACHE;
1232 #if defined(__i386__) || defined(__amd64__)
1237 device_printf(dev, "DMA Coherency: %s / vendor=0x%04x\n",
1238 (sc->flags & HDAC_F_DMA_NOCACHE) ?
1239 "Uncacheable" : "PCIe snoop", vendor);
1242 /* Allocate resources */
1243 result = hdac_mem_alloc(sc);
1245 goto hdac_attach_fail;
1246 result = hdac_irq_alloc(sc);
1248 goto hdac_attach_fail;
1250 /* Get Capabilities */
1251 result = hdac_get_capabilities(sc);
1253 goto hdac_attach_fail;
1255 /* Allocate CORB, RIRB, POS and BDLs dma memory */
1256 result = hdac_dma_alloc(sc, &sc->corb_dma,
1257 sc->corb_size * sizeof(uint32_t));
1259 goto hdac_attach_fail;
1260 result = hdac_dma_alloc(sc, &sc->rirb_dma,
1261 sc->rirb_size * sizeof(struct hdac_rirb));
1263 goto hdac_attach_fail;
1264 sc->streams = malloc(sizeof(struct hdac_stream) * sc->num_ss,
1265 M_HDAC, M_ZERO | M_WAITOK);
1266 for (i = 0; i < sc->num_ss; i++) {
1267 result = hdac_dma_alloc(sc, &sc->streams[i].bdl,
1268 sizeof(struct hdac_bdle) * HDA_BDL_MAX);
1270 goto hdac_attach_fail;
1272 if (sc->quirks_on & HDAC_QUIRK_DMAPOS) {
1273 if (hdac_dma_alloc(sc, &sc->pos_dma, (sc->num_ss) * 8) != 0) {
1275 device_printf(dev, "Failed to "
1276 "allocate DMA pos buffer "
1280 uint64_t addr = sc->pos_dma.dma_paddr;
1282 HDAC_WRITE_4(&sc->mem, HDAC_DPIBUBASE, addr >> 32);
1283 HDAC_WRITE_4(&sc->mem, HDAC_DPIBLBASE,
1284 (addr & HDAC_DPLBASE_DPLBASE_MASK) |
1285 HDAC_DPLBASE_DPLBASE_DMAPBE);
1289 result = bus_dma_tag_create(
1290 bus_get_dma_tag(sc->dev), /* parent */
1291 HDA_DMA_ALIGNMENT, /* alignment */
1293 (sc->support_64bit) ? BUS_SPACE_MAXADDR :
1294 BUS_SPACE_MAXADDR_32BIT, /* lowaddr */
1295 BUS_SPACE_MAXADDR, /* highaddr */
1296 NULL, /* filtfunc */
1297 NULL, /* fistfuncarg */
1298 HDA_BUFSZ_MAX, /* maxsize */
1300 HDA_BUFSZ_MAX, /* maxsegsz */
1302 NULL, /* lockfunc */
1303 NULL, /* lockfuncarg */
1304 &sc->chan_dmat); /* dmat */
1306 device_printf(dev, "%s: bus_dma_tag_create failed (%d)\n",
1308 goto hdac_attach_fail;
1311 /* Quiesce everything */
1313 device_printf(dev, "Reset controller...\n");
1315 hdac_reset(sc, true);
1317 /* Initialize the CORB and RIRB */
1321 /* Defer remaining of initialization until interrupts are enabled */
1322 sc->intrhook.ich_func = hdac_attach2;
1323 sc->intrhook.ich_arg = (void *)sc;
1324 if (cold == 0 || config_intrhook_establish(&sc->intrhook) != 0) {
1325 sc->intrhook.ich_func = NULL;
1326 hdac_attach2((void *)sc);
1333 if (sc->streams != NULL)
1334 for (i = 0; i < sc->num_ss; i++)
1335 hdac_dma_free(sc, &sc->streams[i].bdl);
1336 free(sc->streams, M_HDAC);
1337 hdac_dma_free(sc, &sc->rirb_dma);
1338 hdac_dma_free(sc, &sc->corb_dma);
1340 snd_mtxfree(sc->lock);
1346 sysctl_hdac_pindump(SYSCTL_HANDLER_ARGS)
1348 struct hdac_softc *sc;
1351 int devcount, i, err, val;
1353 dev = oidp->oid_arg1;
1354 sc = device_get_softc(dev);
1358 err = sysctl_handle_int(oidp, &val, 0, req);
1359 if (err != 0 || req->newptr == NULL || val == 0)
1362 /* XXX: Temporary. For debugging. */
1366 } else if (val == 101) {
1371 if ((err = device_get_children(dev, &devlist, &devcount)) != 0)
1374 for (i = 0; i < devcount; i++)
1375 HDAC_PINDUMP(devlist[i]);
1377 free(devlist, M_TEMP);
1382 hdac_mdata_rate(uint16_t fmt)
1384 static const int mbits[8] = { 8, 16, 32, 32, 32, 32, 32, 32 };
1387 if (fmt & (1 << 14))
1391 rate *= ((fmt >> 11) & 0x07) + 1;
1392 rate /= ((fmt >> 8) & 0x07) + 1;
1393 bits = mbits[(fmt >> 4) & 0x03];
1394 bits *= (fmt & 0x0f) + 1;
1395 return (rate * bits);
1399 hdac_bdata_rate(uint16_t fmt, int output)
1401 static const int bbits[8] = { 8, 16, 20, 24, 32, 32, 32, 32 };
1405 rate *= ((fmt >> 11) & 0x07) + 1;
1406 bits = bbits[(fmt >> 4) & 0x03];
1407 bits *= (fmt & 0x0f) + 1;
1409 bits = ((bits + 7) & ~0x07) + 10;
1410 return (rate * bits);
1414 hdac_poll_reinit(struct hdac_softc *sc)
1416 int i, pollticks, min = 1000000;
1417 struct hdac_stream *s;
1419 if (sc->polling == 0)
1421 if (sc->unsol_registered > 0)
1423 for (i = 0; i < sc->num_ss; i++) {
1424 s = &sc->streams[i];
1425 if (s->running == 0)
1427 pollticks = ((uint64_t)hz * s->blksz) /
1428 (hdac_mdata_rate(s->format) / 8);
1434 if (min > pollticks)
1437 sc->poll_ival = min;
1439 callout_stop(&sc->poll_callout);
1441 callout_reset(&sc->poll_callout, 1, hdac_poll_callback, sc);
1445 sysctl_hdac_polling(SYSCTL_HANDLER_ARGS)
1447 struct hdac_softc *sc;
1452 dev = oidp->oid_arg1;
1453 sc = device_get_softc(dev);
1459 err = sysctl_handle_int(oidp, &val, 0, req);
1461 if (err != 0 || req->newptr == NULL)
1463 if (val < 0 || val > 1)
1467 if (val != sc->polling) {
1469 callout_stop(&sc->poll_callout);
1471 callout_drain(&sc->poll_callout);
1474 ctl = HDAC_READ_4(&sc->mem, HDAC_INTCTL);
1475 ctl |= HDAC_INTCTL_GIE;
1476 HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, ctl);
1478 ctl = HDAC_READ_4(&sc->mem, HDAC_INTCTL);
1479 ctl &= ~HDAC_INTCTL_GIE;
1480 HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, ctl);
1482 hdac_poll_reinit(sc);
1491 hdac_attach2(void *arg)
1493 struct hdac_softc *sc;
1495 uint32_t vendorid, revisionid;
1499 sc = (struct hdac_softc *)arg;
1503 /* Remove ourselves from the config hooks */
1504 if (sc->intrhook.ich_func != NULL) {
1505 config_intrhook_disestablish(&sc->intrhook);
1506 sc->intrhook.ich_func = NULL;
1510 device_printf(sc->dev, "Starting CORB Engine...\n");
1512 hdac_corb_start(sc);
1514 device_printf(sc->dev, "Starting RIRB Engine...\n");
1516 hdac_rirb_start(sc);
1518 device_printf(sc->dev,
1519 "Enabling controller interrupt...\n");
1521 HDAC_WRITE_4(&sc->mem, HDAC_GCTL, HDAC_READ_4(&sc->mem, HDAC_GCTL) |
1523 if (sc->polling == 0) {
1524 HDAC_WRITE_4(&sc->mem, HDAC_INTCTL,
1525 HDAC_INTCTL_CIE | HDAC_INTCTL_GIE);
1530 device_printf(sc->dev, "Scanning HDA codecs ...\n");
1532 statests = HDAC_READ_2(&sc->mem, HDAC_STATESTS);
1534 for (i = 0; i < HDAC_CODEC_MAX; i++) {
1535 if (HDAC_STATESTS_SDIWAKE(statests, i)) {
1537 device_printf(sc->dev,
1538 "Found CODEC at address %d\n", i);
1541 vendorid = hdac_send_command(sc, i,
1542 HDA_CMD_GET_PARAMETER(0, 0x0, HDA_PARAM_VENDOR_ID));
1543 revisionid = hdac_send_command(sc, i,
1544 HDA_CMD_GET_PARAMETER(0, 0x0, HDA_PARAM_REVISION_ID));
1546 if (vendorid == HDA_INVALID &&
1547 revisionid == HDA_INVALID) {
1548 device_printf(sc->dev,
1549 "CODEC at address %d not responding!\n", i);
1552 sc->codecs[i].vendor_id =
1553 HDA_PARAM_VENDOR_ID_VENDOR_ID(vendorid);
1554 sc->codecs[i].device_id =
1555 HDA_PARAM_VENDOR_ID_DEVICE_ID(vendorid);
1556 sc->codecs[i].revision_id =
1557 HDA_PARAM_REVISION_ID_REVISION_ID(revisionid);
1558 sc->codecs[i].stepping_id =
1559 HDA_PARAM_REVISION_ID_STEPPING_ID(revisionid);
1560 child = device_add_child(sc->dev, "hdacc", -1);
1561 if (child == NULL) {
1562 device_printf(sc->dev,
1563 "Failed to add CODEC device\n");
1566 device_set_ivars(child, (void *)(intptr_t)i);
1567 sc->codecs[i].dev = child;
1570 bus_generic_attach(sc->dev);
1572 SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->dev),
1573 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), OID_AUTO,
1574 "pindump", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc->dev,
1575 sizeof(sc->dev), sysctl_hdac_pindump, "I", "Dump pin states/data");
1576 SYSCTL_ADD_PROC(device_get_sysctl_ctx(sc->dev),
1577 SYSCTL_CHILDREN(device_get_sysctl_tree(sc->dev)), OID_AUTO,
1578 "polling", CTLTYPE_INT | CTLFLAG_RW | CTLFLAG_NEEDGIANT, sc->dev,
1579 sizeof(sc->dev), sysctl_hdac_polling, "I", "Enable polling mode");
1582 /****************************************************************************
1583 * int hdac_suspend(device_t)
1585 * Suspend and power down HDA bus and codecs.
1586 ****************************************************************************/
1588 hdac_suspend(device_t dev)
1590 struct hdac_softc *sc = device_get_softc(dev);
1593 device_printf(dev, "Suspend...\n");
1595 bus_generic_suspend(dev);
1599 device_printf(dev, "Reset controller...\n");
1601 callout_stop(&sc->poll_callout);
1602 hdac_reset(sc, false);
1604 callout_drain(&sc->poll_callout);
1605 taskqueue_drain(taskqueue_thread, &sc->unsolq_task);
1607 device_printf(dev, "Suspend done\n");
1612 /****************************************************************************
1613 * int hdac_resume(device_t)
1615 * Powerup and restore HDA bus and codecs state.
1616 ****************************************************************************/
1618 hdac_resume(device_t dev)
1620 struct hdac_softc *sc = device_get_softc(dev);
1624 device_printf(dev, "Resume...\n");
1628 /* Quiesce everything */
1630 device_printf(dev, "Reset controller...\n");
1632 hdac_reset(sc, true);
1634 /* Initialize the CORB and RIRB */
1639 device_printf(dev, "Starting CORB Engine...\n");
1641 hdac_corb_start(sc);
1643 device_printf(dev, "Starting RIRB Engine...\n");
1645 hdac_rirb_start(sc);
1647 device_printf(dev, "Enabling controller interrupt...\n");
1649 HDAC_WRITE_4(&sc->mem, HDAC_GCTL, HDAC_READ_4(&sc->mem, HDAC_GCTL) |
1651 HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, HDAC_INTCTL_CIE | HDAC_INTCTL_GIE);
1653 hdac_poll_reinit(sc);
1656 error = bus_generic_resume(dev);
1658 device_printf(dev, "Resume done\n");
1663 /****************************************************************************
1664 * int hdac_detach(device_t)
1666 * Detach and free up resources utilized by the hdac device.
1667 ****************************************************************************/
1669 hdac_detach(device_t dev)
1671 struct hdac_softc *sc = device_get_softc(dev);
1673 int cad, i, devcount, error;
1675 if ((error = device_get_children(dev, &devlist, &devcount)) != 0)
1677 for (i = 0; i < devcount; i++) {
1678 cad = (intptr_t)device_get_ivars(devlist[i]);
1679 if ((error = device_delete_child(dev, devlist[i])) != 0) {
1680 free(devlist, M_TEMP);
1683 sc->codecs[cad].dev = NULL;
1685 free(devlist, M_TEMP);
1688 hdac_reset(sc, false);
1690 taskqueue_drain(taskqueue_thread, &sc->unsolq_task);
1693 for (i = 0; i < sc->num_ss; i++)
1694 hdac_dma_free(sc, &sc->streams[i].bdl);
1695 free(sc->streams, M_HDAC);
1696 hdac_dma_free(sc, &sc->pos_dma);
1697 hdac_dma_free(sc, &sc->rirb_dma);
1698 hdac_dma_free(sc, &sc->corb_dma);
1699 if (sc->chan_dmat != NULL) {
1700 bus_dma_tag_destroy(sc->chan_dmat);
1701 sc->chan_dmat = NULL;
1704 snd_mtxfree(sc->lock);
1708 static bus_dma_tag_t
1709 hdac_get_dma_tag(device_t dev, device_t child)
1711 struct hdac_softc *sc = device_get_softc(dev);
1713 return (sc->chan_dmat);
1717 hdac_print_child(device_t dev, device_t child)
1721 retval = bus_print_child_header(dev, child);
1722 retval += printf(" at cad %d", (int)(intptr_t)device_get_ivars(child));
1723 retval += bus_print_child_footer(dev, child);
1729 hdac_child_location_str(device_t dev, device_t child, char *buf, size_t buflen)
1732 snprintf(buf, buflen, "cad=%d", (int)(intptr_t)device_get_ivars(child));
1737 hdac_child_pnpinfo_str_method(device_t dev, device_t child, char *buf,
1740 struct hdac_softc *sc = device_get_softc(dev);
1741 nid_t cad = (uintptr_t)device_get_ivars(child);
1743 snprintf(buf, buflen,
1744 "vendor=0x%04x device=0x%04x revision=0x%02x stepping=0x%02x",
1745 sc->codecs[cad].vendor_id, sc->codecs[cad].device_id,
1746 sc->codecs[cad].revision_id, sc->codecs[cad].stepping_id);
1751 hdac_read_ivar(device_t dev, device_t child, int which, uintptr_t *result)
1753 struct hdac_softc *sc = device_get_softc(dev);
1754 nid_t cad = (uintptr_t)device_get_ivars(child);
1757 case HDA_IVAR_CODEC_ID:
1760 case HDA_IVAR_VENDOR_ID:
1761 *result = sc->codecs[cad].vendor_id;
1763 case HDA_IVAR_DEVICE_ID:
1764 *result = sc->codecs[cad].device_id;
1766 case HDA_IVAR_REVISION_ID:
1767 *result = sc->codecs[cad].revision_id;
1769 case HDA_IVAR_STEPPING_ID:
1770 *result = sc->codecs[cad].stepping_id;
1772 case HDA_IVAR_SUBVENDOR_ID:
1773 *result = pci_get_subvendor(dev);
1775 case HDA_IVAR_SUBDEVICE_ID:
1776 *result = pci_get_subdevice(dev);
1778 case HDA_IVAR_DMA_NOCACHE:
1779 *result = (sc->flags & HDAC_F_DMA_NOCACHE) != 0;
1781 case HDA_IVAR_STRIPES_MASK:
1782 *result = (1 << (1 << sc->num_sdo)) - 1;
1791 hdac_get_mtx(device_t dev, device_t child)
1793 struct hdac_softc *sc = device_get_softc(dev);
1799 hdac_codec_command(device_t dev, device_t child, uint32_t verb)
1802 return (hdac_send_command(device_get_softc(dev),
1803 (intptr_t)device_get_ivars(child), verb));
1807 hdac_find_stream(struct hdac_softc *sc, int dir, int stream)
1812 /* Allocate ISS/OSS first. */
1814 for (i = 0; i < sc->num_iss; i++) {
1815 if (sc->streams[i].stream == stream) {
1821 for (i = 0; i < sc->num_oss; i++) {
1822 if (sc->streams[i + sc->num_iss].stream == stream) {
1823 ss = i + sc->num_iss;
1828 /* Fallback to BSS. */
1830 for (i = 0; i < sc->num_bss; i++) {
1831 if (sc->streams[i + sc->num_iss + sc->num_oss].stream
1833 ss = i + sc->num_iss + sc->num_oss;
1842 hdac_stream_alloc(device_t dev, device_t child, int dir, int format, int stripe,
1845 struct hdac_softc *sc = device_get_softc(dev);
1846 nid_t cad = (uintptr_t)device_get_ivars(child);
1847 int stream, ss, bw, maxbw, prevbw;
1849 /* Look for empty stream. */
1850 ss = hdac_find_stream(sc, dir, 0);
1852 /* Return if found nothing. */
1856 /* Check bus bandwidth. */
1857 bw = hdac_bdata_rate(format, dir);
1859 bw *= 1 << (sc->num_sdo - stripe);
1860 prevbw = sc->sdo_bw_used;
1861 maxbw = 48000 * 960 * (1 << sc->num_sdo);
1863 prevbw = sc->codecs[cad].sdi_bw_used;
1864 maxbw = 48000 * 464;
1867 device_printf(dev, "%dKbps of %dKbps bandwidth used%s\n",
1868 (bw + prevbw) / 1000, maxbw / 1000,
1869 bw + prevbw > maxbw ? " -- OVERFLOW!" : "");
1871 if (bw + prevbw > maxbw)
1874 sc->sdo_bw_used += bw;
1876 sc->codecs[cad].sdi_bw_used += bw;
1878 /* Allocate stream number */
1879 if (ss >= sc->num_iss + sc->num_oss)
1880 stream = 15 - (ss - sc->num_iss - sc->num_oss);
1881 else if (ss >= sc->num_iss)
1882 stream = ss - sc->num_iss + 1;
1886 sc->streams[ss].dev = child;
1887 sc->streams[ss].dir = dir;
1888 sc->streams[ss].stream = stream;
1889 sc->streams[ss].bw = bw;
1890 sc->streams[ss].format = format;
1891 sc->streams[ss].stripe = stripe;
1892 if (dmapos != NULL) {
1893 if (sc->pos_dma.dma_vaddr != NULL)
1894 *dmapos = (uint32_t *)(sc->pos_dma.dma_vaddr + ss * 8);
1902 hdac_stream_free(device_t dev, device_t child, int dir, int stream)
1904 struct hdac_softc *sc = device_get_softc(dev);
1905 nid_t cad = (uintptr_t)device_get_ivars(child);
1908 ss = hdac_find_stream(sc, dir, stream);
1910 ("Free for not allocated stream (%d/%d)\n", dir, stream));
1912 sc->sdo_bw_used -= sc->streams[ss].bw;
1914 sc->codecs[cad].sdi_bw_used -= sc->streams[ss].bw;
1915 sc->streams[ss].stream = 0;
1916 sc->streams[ss].dev = NULL;
1920 hdac_stream_start(device_t dev, device_t child, int dir, int stream,
1921 bus_addr_t buf, int blksz, int blkcnt)
1923 struct hdac_softc *sc = device_get_softc(dev);
1924 struct hdac_bdle *bdle;
1929 ss = hdac_find_stream(sc, dir, stream);
1931 ("Start for not allocated stream (%d/%d)\n", dir, stream));
1933 addr = (uint64_t)buf;
1934 bdle = (struct hdac_bdle *)sc->streams[ss].bdl.dma_vaddr;
1935 for (i = 0; i < blkcnt; i++, bdle++) {
1936 bdle->addrl = htole32((uint32_t)addr);
1937 bdle->addrh = htole32((uint32_t)(addr >> 32));
1938 bdle->len = htole32(blksz);
1939 bdle->ioc = htole32(1);
1943 bus_dmamap_sync(sc->streams[ss].bdl.dma_tag,
1944 sc->streams[ss].bdl.dma_map, BUS_DMASYNC_PREWRITE);
1947 HDAC_WRITE_4(&sc->mem, off + HDAC_SDCBL, blksz * blkcnt);
1948 HDAC_WRITE_2(&sc->mem, off + HDAC_SDLVI, blkcnt - 1);
1949 addr = sc->streams[ss].bdl.dma_paddr;
1950 HDAC_WRITE_4(&sc->mem, off + HDAC_SDBDPL, (uint32_t)addr);
1951 HDAC_WRITE_4(&sc->mem, off + HDAC_SDBDPU, (uint32_t)(addr >> 32));
1953 ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL2);
1955 ctl |= HDAC_SDCTL2_DIR;
1957 ctl &= ~HDAC_SDCTL2_DIR;
1958 ctl &= ~HDAC_SDCTL2_STRM_MASK;
1959 ctl |= stream << HDAC_SDCTL2_STRM_SHIFT;
1960 ctl &= ~HDAC_SDCTL2_STRIPE_MASK;
1961 ctl |= sc->streams[ss].stripe << HDAC_SDCTL2_STRIPE_SHIFT;
1962 HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL2, ctl);
1964 HDAC_WRITE_2(&sc->mem, off + HDAC_SDFMT, sc->streams[ss].format);
1966 ctl = HDAC_READ_4(&sc->mem, HDAC_INTCTL);
1968 HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, ctl);
1970 HDAC_WRITE_1(&sc->mem, off + HDAC_SDSTS,
1971 HDAC_SDSTS_DESE | HDAC_SDSTS_FIFOE | HDAC_SDSTS_BCIS);
1972 ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0);
1973 ctl |= HDAC_SDCTL_IOCE | HDAC_SDCTL_FEIE | HDAC_SDCTL_DEIE |
1975 HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL0, ctl);
1977 sc->streams[ss].blksz = blksz;
1978 sc->streams[ss].running = 1;
1979 hdac_poll_reinit(sc);
1984 hdac_stream_stop(device_t dev, device_t child, int dir, int stream)
1986 struct hdac_softc *sc = device_get_softc(dev);
1990 ss = hdac_find_stream(sc, dir, stream);
1992 ("Stop for not allocated stream (%d/%d)\n", dir, stream));
1994 bus_dmamap_sync(sc->streams[ss].bdl.dma_tag,
1995 sc->streams[ss].bdl.dma_map, BUS_DMASYNC_POSTWRITE);
1998 ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0);
1999 ctl &= ~(HDAC_SDCTL_IOCE | HDAC_SDCTL_FEIE | HDAC_SDCTL_DEIE |
2001 HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL0, ctl);
2003 ctl = HDAC_READ_4(&sc->mem, HDAC_INTCTL);
2005 HDAC_WRITE_4(&sc->mem, HDAC_INTCTL, ctl);
2007 sc->streams[ss].running = 0;
2008 hdac_poll_reinit(sc);
2012 hdac_stream_reset(device_t dev, device_t child, int dir, int stream)
2014 struct hdac_softc *sc = device_get_softc(dev);
2020 ss = hdac_find_stream(sc, dir, stream);
2022 ("Reset for not allocated stream (%d/%d)\n", dir, stream));
2025 ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0);
2026 ctl |= HDAC_SDCTL_SRST;
2027 HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL0, ctl);
2029 ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0);
2030 if (ctl & HDAC_SDCTL_SRST)
2034 if (!(ctl & HDAC_SDCTL_SRST))
2035 device_printf(dev, "Reset setting timeout\n");
2036 ctl &= ~HDAC_SDCTL_SRST;
2037 HDAC_WRITE_1(&sc->mem, off + HDAC_SDCTL0, ctl);
2040 ctl = HDAC_READ_1(&sc->mem, off + HDAC_SDCTL0);
2041 if (!(ctl & HDAC_SDCTL_SRST))
2045 if (ctl & HDAC_SDCTL_SRST)
2046 device_printf(dev, "Reset timeout!\n");
2050 hdac_stream_getptr(device_t dev, device_t child, int dir, int stream)
2052 struct hdac_softc *sc = device_get_softc(dev);
2055 ss = hdac_find_stream(sc, dir, stream);
2057 ("Reset for not allocated stream (%d/%d)\n", dir, stream));
2060 return (HDAC_READ_4(&sc->mem, off + HDAC_SDLPIB));
2064 hdac_unsol_alloc(device_t dev, device_t child, int tag)
2066 struct hdac_softc *sc = device_get_softc(dev);
2068 sc->unsol_registered++;
2069 hdac_poll_reinit(sc);
2074 hdac_unsol_free(device_t dev, device_t child, int tag)
2076 struct hdac_softc *sc = device_get_softc(dev);
2078 sc->unsol_registered--;
2079 hdac_poll_reinit(sc);
2082 static device_method_t hdac_methods[] = {
2083 /* device interface */
2084 DEVMETHOD(device_probe, hdac_probe),
2085 DEVMETHOD(device_attach, hdac_attach),
2086 DEVMETHOD(device_detach, hdac_detach),
2087 DEVMETHOD(device_suspend, hdac_suspend),
2088 DEVMETHOD(device_resume, hdac_resume),
2090 DEVMETHOD(bus_get_dma_tag, hdac_get_dma_tag),
2091 DEVMETHOD(bus_print_child, hdac_print_child),
2092 DEVMETHOD(bus_child_location_str, hdac_child_location_str),
2093 DEVMETHOD(bus_child_pnpinfo_str, hdac_child_pnpinfo_str_method),
2094 DEVMETHOD(bus_read_ivar, hdac_read_ivar),
2095 DEVMETHOD(hdac_get_mtx, hdac_get_mtx),
2096 DEVMETHOD(hdac_codec_command, hdac_codec_command),
2097 DEVMETHOD(hdac_stream_alloc, hdac_stream_alloc),
2098 DEVMETHOD(hdac_stream_free, hdac_stream_free),
2099 DEVMETHOD(hdac_stream_start, hdac_stream_start),
2100 DEVMETHOD(hdac_stream_stop, hdac_stream_stop),
2101 DEVMETHOD(hdac_stream_reset, hdac_stream_reset),
2102 DEVMETHOD(hdac_stream_getptr, hdac_stream_getptr),
2103 DEVMETHOD(hdac_unsol_alloc, hdac_unsol_alloc),
2104 DEVMETHOD(hdac_unsol_free, hdac_unsol_free),
2108 static driver_t hdac_driver = {
2111 sizeof(struct hdac_softc),
2114 static devclass_t hdac_devclass;
2116 DRIVER_MODULE(snd_hda, pci, hdac_driver, hdac_devclass, NULL, NULL);