2 * Copyright (c) 2015 Brian Fundakowski Feldman. All rights reserved.
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions
7 * 1. Redistributions of source code must retain the above copyright
8 * notice, this list of conditions and the following disclaimer.
9 * 2. Redistributions in binary form must reproduce the above copyright
10 * notice, this list of conditions and the following disclaimer in the
11 * documentation and/or other materials provided with the distribution.
13 * THIS SOFTWARE IS PROVIDED BY THE AUTHOR ``AS IS'' AND ANY EXPRESS OR
14 * IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE IMPLIED WARRANTIES
15 * OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE ARE DISCLAIMED.
16 * IN NO EVENT SHALL THE AUTHOR BE LIABLE FOR ANY DIRECT, INDIRECT,
17 * INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT
18 * NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
19 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
20 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
21 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE OF
22 * THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
25 #include <sys/cdefs.h>
26 __FBSDID("$FreeBSD$");
28 #include "opt_platform.h"
30 #include <sys/param.h>
31 #include <sys/systm.h>
34 #include <sys/kernel.h>
36 #include <sys/malloc.h>
38 #include <sys/mutex.h>
39 #include <sys/module.h>
41 #include <sys/rwlock.h>
42 #include <sys/spigenio.h>
43 #include <sys/sysctl.h>
44 #include <sys/types.h>
47 #include <vm/vm_extern.h>
48 #include <vm/vm_object.h>
49 #include <vm/vm_page.h>
50 #include <vm/vm_pager.h>
52 #include <dev/spibus/spi.h>
54 #include "spibus_if.h"
56 #define SPIGEN_OPEN (1 << 0)
57 #define SPIGEN_MMAP_BUSY (1 << 1)
63 uint32_t sc_clock_speed;
64 uint32_t sc_command_length_max; /* cannot change while mmapped */
65 uint32_t sc_data_length_max; /* cannot change while mmapped */
66 vm_object_t sc_mmap_buffer; /* command, then data */
67 vm_offset_t sc_mmap_kvaddr;
68 size_t sc_mmap_buffer_size;
75 spigen_identify(driver_t *driver, device_t parent)
77 if (device_find_child(parent, "spigen", -1) != NULL)
79 if (BUS_ADD_CHILD(parent, 0, "spigen", -1) == NULL)
80 device_printf(parent, "add child failed\n");
85 spigen_probe(device_t dev)
88 device_set_desc(dev, "SPI Generic IO");
90 return (BUS_PROBE_NOWILDCARD);
93 static int spigen_open(struct cdev *, int, int, struct thread *);
94 static int spigen_ioctl(struct cdev *, u_long, caddr_t, int, struct thread *);
95 static int spigen_close(struct cdev *, int, int, struct thread *);
96 static d_mmap_single_t spigen_mmap_single;
98 static struct cdevsw spigen_cdevsw = {
99 .d_version = D_VERSION,
101 .d_open = spigen_open,
102 .d_ioctl = spigen_ioctl,
103 .d_mmap_single = spigen_mmap_single,
104 .d_close = spigen_close
108 spigen_command_length_max_proc(SYSCTL_HANDLER_ARGS)
110 struct spigen_softc *sc = (struct spigen_softc *)arg1;
111 uint32_t command_length_max;
114 mtx_lock(&sc->sc_mtx);
115 command_length_max = sc->sc_command_length_max;
116 mtx_unlock(&sc->sc_mtx);
117 error = sysctl_handle_int(oidp, &command_length_max,
118 sizeof(command_length_max), req);
119 if (error == 0 && req->newptr != NULL) {
120 mtx_lock(&sc->sc_mtx);
121 if (sc->sc_mmap_buffer != NULL)
124 sc->sc_command_length_max = command_length_max;
125 mtx_unlock(&sc->sc_mtx);
131 spigen_data_length_max_proc(SYSCTL_HANDLER_ARGS)
133 struct spigen_softc *sc = (struct spigen_softc *)arg1;
134 uint32_t data_length_max;
137 mtx_lock(&sc->sc_mtx);
138 data_length_max = sc->sc_data_length_max;
139 mtx_unlock(&sc->sc_mtx);
140 error = sysctl_handle_int(oidp, &data_length_max,
141 sizeof(data_length_max), req);
142 if (error == 0 && req->newptr != NULL) {
143 mtx_lock(&sc->sc_mtx);
144 if (sc->sc_mmap_buffer != NULL)
147 sc->sc_data_length_max = data_length_max;
148 mtx_unlock(&sc->sc_mtx);
154 spigen_sysctl_init(struct spigen_softc *sc)
156 struct sysctl_ctx_list *ctx;
157 struct sysctl_oid *tree_node;
158 struct sysctl_oid_list *tree;
161 * Add system sysctl tree/handlers.
163 ctx = device_get_sysctl_ctx(sc->sc_dev);
164 tree_node = device_get_sysctl_tree(sc->sc_dev);
165 tree = SYSCTL_CHILDREN(tree_node);
166 SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, "command_length_max",
167 CTLFLAG_MPSAFE | CTLFLAG_RW | CTLTYPE_UINT, sc, sizeof(*sc),
168 spigen_command_length_max_proc, "IU", "SPI command header portion (octets)");
169 SYSCTL_ADD_PROC(ctx, tree, OID_AUTO, "data_length_max",
170 CTLFLAG_MPSAFE | CTLFLAG_RW | CTLTYPE_UINT, sc, sizeof(*sc),
171 spigen_data_length_max_proc, "IU", "SPI data trailer portion (octets)");
172 SYSCTL_ADD_INT(ctx, tree, OID_AUTO, "data", CTLFLAG_RW,
173 &sc->sc_debug, 0, "debug flags");
178 spigen_attach(device_t dev)
180 struct spigen_softc *sc;
181 const int unit = device_get_unit(dev);
183 sc = device_get_softc(dev);
185 sc->sc_cdev = make_dev(&spigen_cdevsw, unit,
186 UID_ROOT, GID_OPERATOR, 0660, "spigen%d", unit);
187 sc->sc_cdev->si_drv1 = dev;
188 sc->sc_command_length_max = PAGE_SIZE;
189 sc->sc_data_length_max = PAGE_SIZE;
190 mtx_init(&sc->sc_mtx, device_get_nameunit(dev), NULL, MTX_DEF);
191 spigen_sysctl_init(sc);
197 spigen_open(struct cdev *cdev, int oflags, int devtype, struct thread *td)
201 struct spigen_softc *sc;
205 sc = device_get_softc(dev);
207 mtx_lock(&sc->sc_mtx);
208 if (sc->sc_flags & SPIGEN_OPEN)
211 sc->sc_flags |= SPIGEN_OPEN;
212 mtx_unlock(&sc->sc_mtx);
218 spigen_transfer(struct cdev *cdev, struct spigen_transfer *st)
220 struct spi_command transfer = SPI_COMMAND_INITIALIZER;
221 device_t dev = cdev->si_drv1;
222 struct spigen_softc *sc = device_get_softc(dev);
225 mtx_lock(&sc->sc_mtx);
226 if (st->st_command.iov_len == 0)
228 else if (st->st_command.iov_len > sc->sc_command_length_max ||
229 st->st_data.iov_len > sc->sc_data_length_max)
231 mtx_unlock(&sc->sc_mtx);
236 device_printf(dev, "cmd %p %u data %p %u\n", st->st_command.iov_base,
237 st->st_command.iov_len, st->st_data.iov_base, st->st_data.iov_len);
239 transfer.tx_cmd = transfer.rx_cmd = malloc(st->st_command.iov_len,
241 if (transfer.tx_cmd == NULL)
243 if (st->st_data.iov_len > 0) {
244 transfer.tx_data = transfer.rx_data = malloc(st->st_data.iov_len,
246 if (transfer.tx_data == NULL) {
247 free(transfer.tx_cmd, M_DEVBUF);
252 transfer.tx_data = transfer.rx_data = NULL;
254 error = copyin(st->st_command.iov_base, transfer.tx_cmd,
255 transfer.tx_cmd_sz = transfer.rx_cmd_sz = st->st_command.iov_len);
256 if ((error == 0) && (st->st_data.iov_len > 0))
257 error = copyin(st->st_data.iov_base, transfer.tx_data,
258 transfer.tx_data_sz = transfer.rx_data_sz =
259 st->st_data.iov_len);
261 error = SPIBUS_TRANSFER(device_get_parent(dev), dev, &transfer);
263 error = copyout(transfer.rx_cmd, st->st_command.iov_base,
265 if ((error == 0) && (st->st_data.iov_len > 0))
266 error = copyout(transfer.rx_data, st->st_data.iov_base,
267 transfer.rx_data_sz);
270 free(transfer.tx_cmd, M_DEVBUF);
271 free(transfer.tx_data, M_DEVBUF);
276 spigen_transfer_mmapped(struct cdev *cdev, struct spigen_transfer_mmapped *stm)
278 struct spi_command transfer = SPI_COMMAND_INITIALIZER;
279 device_t dev = cdev->si_drv1;
280 struct spigen_softc *sc = device_get_softc(dev);
283 mtx_lock(&sc->sc_mtx);
284 if (sc->sc_flags & SPIGEN_MMAP_BUSY)
286 else if (stm->stm_command_length > sc->sc_command_length_max ||
287 stm->stm_data_length > sc->sc_data_length_max)
289 else if (sc->sc_mmap_buffer == NULL)
291 else if (sc->sc_mmap_buffer_size <
292 stm->stm_command_length + stm->stm_data_length)
295 sc->sc_flags |= SPIGEN_MMAP_BUSY;
296 mtx_unlock(&sc->sc_mtx);
300 transfer.tx_cmd = transfer.rx_cmd = (void *)sc->sc_mmap_kvaddr;
301 transfer.tx_cmd_sz = transfer.rx_cmd_sz = stm->stm_command_length;
302 transfer.tx_data = transfer.rx_data =
303 (void *)(sc->sc_mmap_kvaddr + stm->stm_command_length);
304 transfer.tx_data_sz = transfer.rx_data_sz = stm->stm_data_length;
305 error = SPIBUS_TRANSFER(device_get_parent(dev), dev, &transfer);
307 mtx_lock(&sc->sc_mtx);
308 KASSERT((sc->sc_flags & SPIGEN_MMAP_BUSY), ("mmap no longer marked busy"));
309 sc->sc_flags &= ~(SPIGEN_MMAP_BUSY);
310 mtx_unlock(&sc->sc_mtx);
315 spigen_ioctl(struct cdev *cdev, u_long cmd, caddr_t data, int fflag,
318 device_t dev = cdev->si_drv1;
319 struct spigen_softc *sc = device_get_softc(dev);
323 case SPIGENIOC_TRANSFER:
324 error = spigen_transfer(cdev, (struct spigen_transfer *)data);
326 case SPIGENIOC_TRANSFER_MMAPPED:
327 error = spigen_transfer_mmapped(cdev, (struct spigen_transfer_mmapped *)data);
329 case SPIGENIOC_GET_CLOCK_SPEED:
330 mtx_lock(&sc->sc_mtx);
331 *(uint32_t *)data = sc->sc_clock_speed;
332 /* XXX TODO: implement spibus ivar call */
333 mtx_unlock(&sc->sc_mtx);
336 case SPIGENIOC_SET_CLOCK_SPEED:
337 mtx_lock(&sc->sc_mtx);
338 sc->sc_clock_speed = *(uint32_t *)data;
339 mtx_unlock(&sc->sc_mtx);
349 spigen_mmap_single(struct cdev *cdev, vm_ooffset_t *offset,
350 vm_size_t size, struct vm_object **object, int nprot)
352 device_t dev = cdev->si_drv1;
353 struct spigen_softc *sc = device_get_softc(dev);
358 (nprot & (PROT_EXEC | PROT_READ | PROT_WRITE))
359 != (PROT_READ | PROT_WRITE))
361 size = roundup2(size, PAGE_SIZE);
362 pages = size / PAGE_SIZE;
364 mtx_lock(&sc->sc_mtx);
365 if (sc->sc_mmap_buffer != NULL) {
366 mtx_unlock(&sc->sc_mtx);
368 } else if (size > sc->sc_command_length_max + sc->sc_data_length_max) {
369 mtx_unlock(&sc->sc_mtx);
372 sc->sc_mmap_buffer_size = size;
374 sc->sc_mmap_buffer = *object = vm_pager_allocate(OBJT_PHYS, 0, size,
375 nprot, *offset, curthread->td_ucred);
376 m = malloc(sizeof(*m) * pages, M_TEMP, M_WAITOK);
377 VM_OBJECT_WLOCK(*object);
378 vm_object_reference_locked(*object); // kernel and userland both
379 for (n = 0; n < pages; n++) {
380 m[n] = vm_page_grab(*object, n,
381 VM_ALLOC_NOBUSY | VM_ALLOC_ZERO | VM_ALLOC_WIRED);
382 m[n]->valid = VM_PAGE_BITS_ALL;
384 VM_OBJECT_WUNLOCK(*object);
385 sc->sc_mmap_kvaddr = kva_alloc(size);
386 pmap_qenter(sc->sc_mmap_kvaddr, m, pages);
388 mtx_unlock(&sc->sc_mtx);
396 spigen_close(struct cdev *cdev, int fflag, int devtype, struct thread *td)
398 device_t dev = cdev->si_drv1;
399 struct spigen_softc *sc = device_get_softc(dev);
401 mtx_lock(&sc->sc_mtx);
402 if (sc->sc_mmap_buffer != NULL) {
403 pmap_qremove(sc->sc_mmap_kvaddr,
404 sc->sc_mmap_buffer_size / PAGE_SIZE);
405 kva_free(sc->sc_mmap_kvaddr, sc->sc_mmap_buffer_size);
406 sc->sc_mmap_kvaddr = 0;
407 vm_object_deallocate(sc->sc_mmap_buffer);
408 sc->sc_mmap_buffer = NULL;
409 sc->sc_mmap_buffer_size = 0;
411 sc->sc_flags &= ~(SPIGEN_OPEN);
412 mtx_unlock(&sc->sc_mtx);
417 spigen_detach(device_t dev)
419 struct spigen_softc *sc;
421 sc = device_get_softc(dev);
423 mtx_lock(&sc->sc_mtx);
424 if (sc->sc_flags & SPIGEN_OPEN) {
425 mtx_unlock(&sc->sc_mtx);
428 mtx_unlock(&sc->sc_mtx);
430 mtx_destroy(&sc->sc_mtx);
433 destroy_dev(sc->sc_cdev);
438 static devclass_t spigen_devclass;
440 static device_method_t spigen_methods[] = {
441 /* Device interface */
443 DEVMETHOD(device_identify, spigen_identify),
445 DEVMETHOD(device_probe, spigen_probe),
446 DEVMETHOD(device_attach, spigen_attach),
447 DEVMETHOD(device_detach, spigen_detach),
452 static driver_t spigen_driver = {
455 sizeof(struct spigen_softc),
458 DRIVER_MODULE(spigen, spibus, spigen_driver, spigen_devclass, 0, 0);