2 * SPDX-License-Identifier: BSD-2-Clause
4 * Copyright (c) 2016-2019 Ruslan Bukin <br@bsdpad.com>
6 * This software was developed by SRI International and the University of
7 * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
8 * ("CTSRD"), as part of the DARPA CRASH research programme.
10 * Redistribution and use in source and binary forms, with or without
11 * modification, are permitted provided that the following conditions
13 * 1. Redistributions of source code must retain the above copyright
14 * notice, this list of conditions and the following disclaimer.
15 * 2. Redistributions in binary form must reproduce the above copyright
16 * notice, this list of conditions and the following disclaimer in the
17 * documentation and/or other materials provided with the distribution.
19 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
20 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
21 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
22 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
23 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
24 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
25 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
26 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
27 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
28 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
34 #ifndef _DEV_XDMA_XDMA_H_
35 #define _DEV_XDMA_XDMA_H_
47 enum xdma_operation_type {
54 enum xdma_request_type {
67 struct xdma_transfer_status {
72 typedef struct xdma_transfer_status xdma_transfer_status_t;
74 struct xdma_controller {
75 device_t dev; /* DMA consumer device_t. */
76 device_t dma_dev; /* A real DMA device_t. */
77 void *data; /* OFW MD part. */
78 vmem_t *vmem; /* Bounce memory. */
80 /* List of virtual channels allocated. */
81 TAILQ_HEAD(xdma_channel_list, xdma_channel) channels;
84 typedef struct xdma_controller xdma_controller_t;
98 enum xdma_operation_type operation;
99 enum xdma_request_type req_type;
100 enum xdma_direction direction;
105 bus_size_t block_num;
106 bus_size_t block_len;
107 xdma_transfer_status_t status;
109 TAILQ_ENTRY(xdma_request) xr_next;
110 struct xchan_buf buf;
119 enum xdma_direction direction;
124 struct xdma_channel {
125 xdma_controller_t *xdma;
129 #define XCHAN_BUFS_ALLOCATED (1 << 0)
130 #define XCHAN_SGLIST_ALLOCATED (1 << 1)
131 #define XCHAN_CONFIGURED (1 << 2)
132 #define XCHAN_TYPE_CYCLIC (1 << 3)
133 #define XCHAN_TYPE_MEMCPY (1 << 4)
134 #define XCHAN_TYPE_FIFO (1 << 5)
135 #define XCHAN_TYPE_SG (1 << 6)
138 #define XCHAN_CAP_BUSDMA (1 << 0)
139 #define XCHAN_CAP_NOSEG (1 << 1)
140 #define XCHAN_CAP_NOBUFS (1 << 2)
142 /* A real hardware driver channel. */
145 /* Interrupt handlers. */
146 TAILQ_HEAD(, xdma_intr_handler) ie_handlers;
147 TAILQ_ENTRY(xdma_channel) xchan_next;
150 struct mtx mtx_qin_lock;
151 struct mtx mtx_qout_lock;
152 struct mtx mtx_bank_lock;
153 struct mtx mtx_proc_lock;
156 bus_dma_tag_t dma_tag_bufs;
157 struct xdma_request *xr_mem;
160 /* Bus dma tag options. */
161 bus_size_t maxsegsize;
163 bus_size_t alignment;
168 struct xdma_sglist *sg;
170 TAILQ_HEAD(, xdma_request) bank;
171 TAILQ_HEAD(, xdma_request) queue_in;
172 TAILQ_HEAD(, xdma_request) queue_out;
173 TAILQ_HEAD(, xdma_request) processing;
176 typedef struct xdma_channel xdma_channel_t;
178 struct xdma_intr_handler {
179 int (*cb)(void *cb_user, xdma_transfer_status_t *status);
181 TAILQ_ENTRY(xdma_intr_handler) ih_next;
184 static MALLOC_DEFINE(M_XDMA, "xdma", "xDMA framework");
186 #define XCHAN_LOCK(xchan) mtx_lock(&(xchan)->mtx_lock)
187 #define XCHAN_UNLOCK(xchan) mtx_unlock(&(xchan)->mtx_lock)
188 #define XCHAN_ASSERT_LOCKED(xchan) \
189 mtx_assert(&(xchan)->mtx_lock, MA_OWNED)
191 #define QUEUE_IN_LOCK(xchan) mtx_lock(&(xchan)->mtx_qin_lock)
192 #define QUEUE_IN_UNLOCK(xchan) mtx_unlock(&(xchan)->mtx_qin_lock)
193 #define QUEUE_IN_ASSERT_LOCKED(xchan) \
194 mtx_assert(&(xchan)->mtx_qin_lock, MA_OWNED)
196 #define QUEUE_OUT_LOCK(xchan) mtx_lock(&(xchan)->mtx_qout_lock)
197 #define QUEUE_OUT_UNLOCK(xchan) mtx_unlock(&(xchan)->mtx_qout_lock)
198 #define QUEUE_OUT_ASSERT_LOCKED(xchan) \
199 mtx_assert(&(xchan)->mtx_qout_lock, MA_OWNED)
201 #define QUEUE_BANK_LOCK(xchan) mtx_lock(&(xchan)->mtx_bank_lock)
202 #define QUEUE_BANK_UNLOCK(xchan) mtx_unlock(&(xchan)->mtx_bank_lock)
203 #define QUEUE_BANK_ASSERT_LOCKED(xchan) \
204 mtx_assert(&(xchan)->mtx_bank_lock, MA_OWNED)
206 #define QUEUE_PROC_LOCK(xchan) mtx_lock(&(xchan)->mtx_proc_lock)
207 #define QUEUE_PROC_UNLOCK(xchan) mtx_unlock(&(xchan)->mtx_proc_lock)
208 #define QUEUE_PROC_ASSERT_LOCKED(xchan) \
209 mtx_assert(&(xchan)->mtx_proc_lock, MA_OWNED)
211 #define XDMA_SGLIST_MAXLEN 2048
212 #define XDMA_MAX_SEG 128
214 /* xDMA controller ops */
215 xdma_controller_t *xdma_ofw_get(device_t dev, const char *prop);
216 int xdma_put(xdma_controller_t *xdma);
217 vmem_t * xdma_get_memory(device_t dev);
218 void xdma_put_memory(vmem_t *vmem);
220 /* xDMA channel ops */
221 xdma_channel_t * xdma_channel_alloc(xdma_controller_t *, uint32_t caps);
222 int xdma_channel_free(xdma_channel_t *);
223 int xdma_request(xdma_channel_t *xchan, struct xdma_request *r);
224 void xchan_set_memory(xdma_channel_t *xchan, vmem_t *vmem);
227 int xdma_prep_sg(xdma_channel_t *, uint32_t,
228 bus_size_t, bus_size_t, bus_size_t, bus_addr_t, bus_addr_t, bus_addr_t);
229 void xdma_channel_free_sg(xdma_channel_t *xchan);
230 int xdma_queue_submit_sg(xdma_channel_t *xchan);
231 void xchan_seg_done(xdma_channel_t *xchan, xdma_transfer_status_t *);
233 /* Queue operations */
234 int xdma_dequeue_mbuf(xdma_channel_t *xchan, struct mbuf **m,
235 xdma_transfer_status_t *);
236 int xdma_enqueue_mbuf(xdma_channel_t *xchan, struct mbuf **m, uintptr_t addr,
237 uint8_t, uint8_t, enum xdma_direction dir);
238 int xdma_dequeue_bio(xdma_channel_t *xchan, struct bio **bp,
239 xdma_transfer_status_t *status);
240 int xdma_enqueue_bio(xdma_channel_t *xchan, struct bio **bp, bus_addr_t addr,
241 uint8_t, uint8_t, enum xdma_direction dir);
242 int xdma_dequeue(xdma_channel_t *xchan, void **user,
243 xdma_transfer_status_t *status);
244 int xdma_enqueue(xdma_channel_t *xchan, uintptr_t src, uintptr_t dst,
245 uint8_t, uint8_t, bus_size_t, enum xdma_direction dir, void *);
246 int xdma_queue_submit(xdma_channel_t *xchan);
248 /* Mbuf operations */
249 uint32_t xdma_mbuf_defrag(xdma_channel_t *xchan, struct xdma_request *xr);
250 uint32_t xdma_mbuf_chain_count(struct mbuf *m0);
252 /* Channel Control */
253 int xdma_control(xdma_channel_t *xchan, enum xdma_command cmd);
255 /* Interrupt callback */
256 int xdma_setup_intr(xdma_channel_t *xchan, int (*cb)(void *,
257 xdma_transfer_status_t *), void *arg, void **);
258 int xdma_teardown_intr(xdma_channel_t *xchan, struct xdma_intr_handler *ih);
259 int xdma_teardown_all_intr(xdma_channel_t *xchan);
260 void xdma_callback(struct xdma_channel *xchan, xdma_transfer_status_t *status);
263 int xchan_sglist_alloc(xdma_channel_t *xchan);
264 void xchan_sglist_free(xdma_channel_t *xchan);
265 int xdma_sglist_add(struct xdma_sglist *sg, struct bus_dma_segment *seg,
266 uint32_t nsegs, struct xdma_request *xr);
269 void xchan_bank_init(xdma_channel_t *xchan);
270 int xchan_bank_free(xdma_channel_t *xchan);
271 struct xdma_request * xchan_bank_get(xdma_channel_t *xchan);
272 int xchan_bank_put(xdma_channel_t *xchan, struct xdma_request *xr);
274 #endif /* !_DEV_XDMA_XDMA_H_ */