2 * Copyright (c) 2016-2018 Ruslan Bukin <br@bsdpad.com>
5 * This software was developed by SRI International and the University of
6 * Cambridge Computer Laboratory under DARPA/AFRL contract FA8750-10-C-0237
7 * ("CTSRD"), as part of the DARPA CRASH research programme.
9 * Redistribution and use in source and binary forms, with or without
10 * modification, are permitted provided that the following conditions
12 * 1. Redistributions of source code must retain the above copyright
13 * notice, this list of conditions and the following disclaimer.
14 * 2. Redistributions in binary form must reproduce the above copyright
15 * notice, this list of conditions and the following disclaimer in the
16 * documentation and/or other materials provided with the distribution.
18 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19 * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21 * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22 * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23 * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24 * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25 * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26 * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27 * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
33 #ifndef _DEV_XDMA_XDMA_H_
34 #define _DEV_XDMA_XDMA_H_
45 enum xdma_operation_type {
52 enum xdma_request_type {
65 struct xdma_transfer_status {
70 typedef struct xdma_transfer_status xdma_transfer_status_t;
72 struct xdma_controller {
73 device_t dev; /* DMA consumer device_t. */
74 device_t dma_dev; /* A real DMA device_t. */
75 void *data; /* OFW MD part. */
77 /* List of virtual channels allocated. */
78 TAILQ_HEAD(xdma_channel_list, xdma_channel) channels;
81 typedef struct xdma_controller xdma_controller_t;
93 enum xdma_operation_type operation;
94 enum xdma_request_type req_type;
95 enum xdma_direction direction;
100 bus_size_t block_num;
101 bus_size_t block_len;
102 xdma_transfer_status_t status;
104 TAILQ_ENTRY(xdma_request) xr_next;
105 struct xchan_buf buf;
114 enum xdma_direction direction;
119 struct xdma_channel {
120 xdma_controller_t *xdma;
123 #define XCHAN_BUFS_ALLOCATED (1 << 0)
124 #define XCHAN_SGLIST_ALLOCATED (1 << 1)
125 #define XCHAN_CONFIGURED (1 << 2)
126 #define XCHAN_TYPE_CYCLIC (1 << 3)
127 #define XCHAN_TYPE_MEMCPY (1 << 4)
128 #define XCHAN_TYPE_FIFO (1 << 5)
129 #define XCHAN_TYPE_SG (1 << 6)
132 #define XCHAN_CAP_BUSDMA (1 << 0)
133 #define XCHAN_CAP_BUSDMA_NOSEG (1 << 1)
135 /* A real hardware driver channel. */
138 /* Interrupt handlers. */
139 TAILQ_HEAD(, xdma_intr_handler) ie_handlers;
140 TAILQ_ENTRY(xdma_channel) xchan_next;
143 struct sx sx_qin_lock;
144 struct sx sx_qout_lock;
145 struct sx sx_bank_lock;
146 struct sx sx_proc_lock;
149 bus_dma_tag_t dma_tag_bufs;
150 struct xdma_request *xr_mem;
153 /* Bus dma tag options. */
154 bus_size_t maxsegsize;
156 bus_size_t alignment;
161 struct xdma_sglist *sg;
163 TAILQ_HEAD(, xdma_request) bank;
164 TAILQ_HEAD(, xdma_request) queue_in;
165 TAILQ_HEAD(, xdma_request) queue_out;
166 TAILQ_HEAD(, xdma_request) processing;
169 typedef struct xdma_channel xdma_channel_t;
171 struct xdma_intr_handler {
172 int (*cb)(void *cb_user, xdma_transfer_status_t *status);
174 TAILQ_ENTRY(xdma_intr_handler) ih_next;
177 static MALLOC_DEFINE(M_XDMA, "xdma", "xDMA framework");
179 #define XCHAN_LOCK(xchan) sx_xlock(&(xchan)->sx_lock)
180 #define XCHAN_UNLOCK(xchan) sx_xunlock(&(xchan)->sx_lock)
181 #define XCHAN_ASSERT_LOCKED(xchan) \
182 sx_assert(&(xchan)->sx_lock, SX_XLOCKED)
184 #define QUEUE_IN_LOCK(xchan) sx_xlock(&(xchan)->sx_qin_lock)
185 #define QUEUE_IN_UNLOCK(xchan) sx_xunlock(&(xchan)->sx_qin_lock)
186 #define QUEUE_IN_ASSERT_LOCKED(xchan) \
187 sx_assert(&(xchan)->sx_qin_lock, SX_XLOCKED)
189 #define QUEUE_OUT_LOCK(xchan) sx_xlock(&(xchan)->sx_qout_lock)
190 #define QUEUE_OUT_UNLOCK(xchan) sx_xunlock(&(xchan)->sx_qout_lock)
191 #define QUEUE_OUT_ASSERT_LOCKED(xchan) \
192 sx_assert(&(xchan)->sx_qout_lock, SX_XLOCKED)
194 #define QUEUE_BANK_LOCK(xchan) sx_xlock(&(xchan)->sx_bank_lock)
195 #define QUEUE_BANK_UNLOCK(xchan) sx_xunlock(&(xchan)->sx_bank_lock)
196 #define QUEUE_BANK_ASSERT_LOCKED(xchan) \
197 sx_assert(&(xchan)->sx_bank_lock, SX_XLOCKED)
199 #define QUEUE_PROC_LOCK(xchan) sx_xlock(&(xchan)->sx_proc_lock)
200 #define QUEUE_PROC_UNLOCK(xchan) sx_xunlock(&(xchan)->sx_proc_lock)
201 #define QUEUE_PROC_ASSERT_LOCKED(xchan) \
202 sx_assert(&(xchan)->sx_proc_lock, SX_XLOCKED)
204 #define XDMA_SGLIST_MAXLEN 2048
205 #define XDMA_MAX_SEG 128
207 /* xDMA controller ops */
208 xdma_controller_t *xdma_ofw_get(device_t dev, const char *prop);
209 int xdma_put(xdma_controller_t *xdma);
211 /* xDMA channel ops */
212 xdma_channel_t * xdma_channel_alloc(xdma_controller_t *, uint32_t caps);
213 int xdma_channel_free(xdma_channel_t *);
214 int xdma_request(xdma_channel_t *xchan, struct xdma_request *r);
217 int xdma_prep_sg(xdma_channel_t *, uint32_t,
218 bus_size_t, bus_size_t, bus_size_t, bus_addr_t, bus_addr_t, bus_addr_t);
219 void xdma_channel_free_sg(xdma_channel_t *xchan);
220 int xdma_queue_submit_sg(xdma_channel_t *xchan);
221 void xchan_seg_done(xdma_channel_t *xchan, xdma_transfer_status_t *);
223 /* Queue operations */
224 int xdma_dequeue_mbuf(xdma_channel_t *xchan, struct mbuf **m,
225 xdma_transfer_status_t *);
226 int xdma_enqueue_mbuf(xdma_channel_t *xchan, struct mbuf **m, uintptr_t addr,
227 uint8_t, uint8_t, enum xdma_direction dir);
228 int xdma_dequeue_bio(xdma_channel_t *xchan, struct bio **bp,
229 xdma_transfer_status_t *status);
230 int xdma_enqueue_bio(xdma_channel_t *xchan, struct bio **bp, bus_addr_t addr,
231 uint8_t, uint8_t, enum xdma_direction dir);
232 int xdma_dequeue(xdma_channel_t *xchan, void **user,
233 xdma_transfer_status_t *status);
234 int xdma_enqueue(xdma_channel_t *xchan, uintptr_t src, uintptr_t dst,
235 uint8_t, uint8_t, bus_size_t, enum xdma_direction dir, void *);
236 int xdma_queue_submit(xdma_channel_t *xchan);
238 /* Mbuf operations */
239 uint32_t xdma_mbuf_defrag(xdma_channel_t *xchan, struct xdma_request *xr);
240 uint32_t xdma_mbuf_chain_count(struct mbuf *m0);
242 /* Channel Control */
243 int xdma_control(xdma_channel_t *xchan, enum xdma_command cmd);
245 /* Interrupt callback */
246 int xdma_setup_intr(xdma_channel_t *xchan, int (*cb)(void *,
247 xdma_transfer_status_t *), void *arg, void **);
248 int xdma_teardown_intr(xdma_channel_t *xchan, struct xdma_intr_handler *ih);
249 int xdma_teardown_all_intr(xdma_channel_t *xchan);
250 void xdma_callback(struct xdma_channel *xchan, xdma_transfer_status_t *status);
253 int xchan_sglist_alloc(xdma_channel_t *xchan);
254 void xchan_sglist_free(xdma_channel_t *xchan);
255 int xdma_sglist_add(struct xdma_sglist *sg, struct bus_dma_segment *seg,
256 uint32_t nsegs, struct xdma_request *xr);
259 void xchan_bank_init(xdma_channel_t *xchan);
260 int xchan_bank_free(xdma_channel_t *xchan);
261 struct xdma_request * xchan_bank_get(xdma_channel_t *xchan);
262 int xchan_bank_put(xdma_channel_t *xchan, struct xdma_request *xr);
264 #endif /* !_DEV_XDMA_XDMA_H_ */