]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - sys/dev/nvdimm/nvdimm_spa.c
Merge LLVM libunwind trunk r351319, from just before upstream's
[FreeBSD/FreeBSD.git] / sys / dev / nvdimm / nvdimm_spa.c
1 /*-
2  * Copyright (c) 2017, 2018 The FreeBSD Foundation
3  * All rights reserved.
4  * Copyright (c) 2018, 2019 Intel Corporation
5  *
6  * This software was developed by Konstantin Belousov <kib@FreeBSD.org>
7  * under sponsorship from the FreeBSD Foundation.
8  *
9  * Redistribution and use in source and binary forms, with or without
10  * modification, are permitted provided that the following conditions
11  * are met:
12  * 1. Redistributions of source code must retain the above copyright
13  *    notice, this list of conditions and the following disclaimer.
14  * 2. Redistributions in binary form must reproduce the above copyright
15  *    notice, this list of conditions and the following disclaimer in the
16  *    documentation and/or other materials provided with the distribution.
17  *
18  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
19  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
20  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
21  * ARE DISCLAIMED. IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
22  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
23  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
24  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
25  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
26  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
27  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
28  * SUCH DAMAGE.
29  */
30
31 #include <sys/cdefs.h>
32 __FBSDID("$FreeBSD$");
33
34 #include "opt_acpi.h"
35 #include "opt_ddb.h"
36
37 #include <sys/param.h>
38 #include <sys/systm.h>
39 #include <sys/bio.h>
40 #include <sys/bus.h>
41 #include <sys/conf.h>
42 #include <sys/devicestat.h>
43 #include <sys/disk.h>
44 #include <sys/efi.h>
45 #include <sys/kernel.h>
46 #include <sys/kthread.h>
47 #include <sys/limits.h>
48 #include <sys/lock.h>
49 #include <sys/malloc.h>
50 #include <sys/module.h>
51 #include <sys/rwlock.h>
52 #include <sys/sglist.h>
53 #include <sys/uio.h>
54 #include <sys/uuid.h>
55 #include <geom/geom.h>
56 #include <geom/geom_int.h>
57 #include <machine/vmparam.h>
58 #include <vm/vm.h>
59 #include <vm/vm_object.h>
60 #include <vm/vm_page.h>
61 #include <vm/vm_pager.h>
62 #include <contrib/dev/acpica/include/acpi.h>
63 #include <contrib/dev/acpica/include/accommon.h>
64 #include <contrib/dev/acpica/include/acuuid.h>
65 #include <dev/acpica/acpivar.h>
66 #include <dev/nvdimm/nvdimm_var.h>
67
68 #define UUID_INITIALIZER_VOLATILE_MEMORY \
69     {0x7305944f,0xfdda,0x44e3,0xb1,0x6c,{0x3f,0x22,0xd2,0x52,0xe5,0xd0}}
70 #define UUID_INITIALIZER_PERSISTENT_MEMORY \
71     {0x66f0d379,0xb4f3,0x4074,0xac,0x43,{0x0d,0x33,0x18,0xb7,0x8c,0xdb}}
72 #define UUID_INITIALIZER_CONTROL_REGION \
73     {0x92f701f6,0x13b4,0x405d,0x91,0x0b,{0x29,0x93,0x67,0xe8,0x23,0x4c}}
74 #define UUID_INITIALIZER_DATA_REGION \
75     {0x91af0530,0x5d86,0x470e,0xa6,0xb0,{0x0a,0x2d,0xb9,0x40,0x82,0x49}}
76 #define UUID_INITIALIZER_VOLATILE_VIRTUAL_DISK \
77     {0x77ab535a,0x45fc,0x624b,0x55,0x60,{0xf7,0xb2,0x81,0xd1,0xf9,0x6e}}
78 #define UUID_INITIALIZER_VOLATILE_VIRTUAL_CD \
79     {0x3d5abd30,0x4175,0x87ce,0x6d,0x64,{0xd2,0xad,0xe5,0x23,0xc4,0xbb}}
80 #define UUID_INITIALIZER_PERSISTENT_VIRTUAL_DISK \
81     {0x5cea02c9,0x4d07,0x69d3,0x26,0x9f,{0x44,0x96,0xfb,0xe0,0x96,0xf9}}
82 #define UUID_INITIALIZER_PERSISTENT_VIRTUAL_CD \
83     {0x08018188,0x42cd,0xbb48,0x10,0x0f,{0x53,0x87,0xd5,0x3d,0xed,0x3d}}
84
85 static struct nvdimm_SPA_uuid_list_elm {
86         const char              *u_name;
87         struct uuid             u_id;
88         const bool              u_usr_acc;
89 } nvdimm_SPA_uuid_list[] = {
90         [SPA_TYPE_VOLATILE_MEMORY] = {
91                 .u_name =       "VOLA MEM ",
92                 .u_id =         UUID_INITIALIZER_VOLATILE_MEMORY,
93                 .u_usr_acc =    true,
94         },
95         [SPA_TYPE_PERSISTENT_MEMORY] = {
96                 .u_name =       "PERS MEM",
97                 .u_id =         UUID_INITIALIZER_PERSISTENT_MEMORY,
98                 .u_usr_acc =    true,
99         },
100         [SPA_TYPE_CONTROL_REGION] = {
101                 .u_name =       "CTRL RG ",
102                 .u_id =         UUID_INITIALIZER_CONTROL_REGION,
103                 .u_usr_acc =    false,
104         },
105         [SPA_TYPE_DATA_REGION] = {
106                 .u_name =       "DATA RG ",
107                 .u_id =         UUID_INITIALIZER_DATA_REGION,
108                 .u_usr_acc =    true,
109         },
110         [SPA_TYPE_VOLATILE_VIRTUAL_DISK] = {
111                 .u_name =       "VIRT DSK",
112                 .u_id =         UUID_INITIALIZER_VOLATILE_VIRTUAL_DISK,
113                 .u_usr_acc =    true,
114         },
115         [SPA_TYPE_VOLATILE_VIRTUAL_CD] = {
116                 .u_name =       "VIRT CD ",
117                 .u_id =         UUID_INITIALIZER_VOLATILE_VIRTUAL_CD,
118                 .u_usr_acc =    true,
119         },
120         [SPA_TYPE_PERSISTENT_VIRTUAL_DISK] = {
121                 .u_name =       "PV DSK  ",
122                 .u_id =         UUID_INITIALIZER_PERSISTENT_VIRTUAL_DISK,
123                 .u_usr_acc =    true,
124         },
125         [SPA_TYPE_PERSISTENT_VIRTUAL_CD] = {
126                 .u_name =       "PV CD   ",
127                 .u_id =         UUID_INITIALIZER_PERSISTENT_VIRTUAL_CD,
128                 .u_usr_acc =    true,
129         },
130 };
131
132 enum SPA_mapping_type
133 nvdimm_spa_type_from_uuid(struct uuid *uuid)
134 {
135         int j;
136
137         for (j = 0; j < nitems(nvdimm_SPA_uuid_list); j++) {
138                 if (uuidcmp(uuid, &nvdimm_SPA_uuid_list[j].u_id) != 0)
139                         continue;
140                 return (j);
141         }
142         return (SPA_TYPE_UNKNOWN);
143 }
144
145 static vm_memattr_t
146 nvdimm_spa_memattr(struct nvdimm_spa_dev *dev)
147 {
148         vm_memattr_t mode;
149
150         if ((dev->spa_efi_mem_flags & EFI_MD_ATTR_WB) != 0)
151                 mode = VM_MEMATTR_WRITE_BACK;
152         else if ((dev->spa_efi_mem_flags & EFI_MD_ATTR_WT) != 0)
153                 mode = VM_MEMATTR_WRITE_THROUGH;
154         else if ((dev->spa_efi_mem_flags & EFI_MD_ATTR_WC) != 0)
155                 mode = VM_MEMATTR_WRITE_COMBINING;
156         else if ((dev->spa_efi_mem_flags & EFI_MD_ATTR_WP) != 0)
157                 mode = VM_MEMATTR_WRITE_PROTECTED;
158         else if ((dev->spa_efi_mem_flags & EFI_MD_ATTR_UC) != 0)
159                 mode = VM_MEMATTR_UNCACHEABLE;
160         else {
161                 if (bootverbose)
162                         printf("SPA mapping attr %#lx unsupported\n",
163                             dev->spa_efi_mem_flags);
164                 mode = VM_MEMATTR_UNCACHEABLE;
165         }
166         return (mode);
167 }
168
169 static int
170 nvdimm_spa_uio(struct nvdimm_spa_dev *dev, struct uio *uio)
171 {
172         struct vm_page m, *ma;
173         off_t off;
174         vm_memattr_t mattr;
175         int error, n;
176
177         error = 0;
178         if (dev->spa_kva == NULL) {
179                 mattr = nvdimm_spa_memattr(dev);
180                 vm_page_initfake(&m, 0, mattr);
181                 ma = &m;
182                 while (uio->uio_resid > 0) {
183                         if (uio->uio_offset >= dev->spa_len)
184                                 break;
185                         off = dev->spa_phys_base + uio->uio_offset;
186                         vm_page_updatefake(&m, trunc_page(off), mattr);
187                         n = PAGE_SIZE;
188                         if (n > uio->uio_resid)
189                                 n = uio->uio_resid;
190                         error = uiomove_fromphys(&ma, off & PAGE_MASK, n, uio);
191                         if (error != 0)
192                                 break;
193                 }
194         } else {
195                 while (uio->uio_resid > 0) {
196                         if (uio->uio_offset >= dev->spa_len)
197                                 break;
198                         n = INT_MAX;
199                         if (n > uio->uio_resid)
200                                 n = uio->uio_resid;
201                         if (uio->uio_offset + n > dev->spa_len)
202                                 n = dev->spa_len - uio->uio_offset;
203                         error = uiomove((char *)dev->spa_kva + uio->uio_offset,
204                             n, uio);
205                         if (error != 0)
206                                 break;
207                 }
208         }
209         return (error);
210 }
211
212 static int
213 nvdimm_spa_rw(struct cdev *dev, struct uio *uio, int ioflag)
214 {
215
216         return (nvdimm_spa_uio(dev->si_drv1, uio));
217 }
218
219 static int
220 nvdimm_spa_ioctl(struct cdev *cdev, u_long cmd, caddr_t data, int fflag,
221     struct thread *td)
222 {
223         struct nvdimm_spa_dev *dev;
224         int error;
225
226         dev = cdev->si_drv1;
227         error = 0;
228         switch (cmd) {
229         case DIOCGSECTORSIZE:
230                 *(u_int *)data = DEV_BSIZE;
231                 break;
232         case DIOCGMEDIASIZE:
233                 *(off_t *)data = dev->spa_len;
234                 break;
235         default:
236                 error = ENOTTY;
237                 break;
238         }
239         return (error);
240 }
241
242 static int
243 nvdimm_spa_mmap_single(struct cdev *cdev, vm_ooffset_t *offset, vm_size_t size,
244     vm_object_t *objp, int nprot)
245 {
246         struct nvdimm_spa_dev *dev;
247
248         dev = cdev->si_drv1;
249         if (dev->spa_obj == NULL)
250                 return (ENXIO);
251         if (*offset >= dev->spa_len || *offset + size < *offset ||
252             *offset + size > dev->spa_len)
253                 return (EINVAL);
254         vm_object_reference(dev->spa_obj);
255         *objp = dev->spa_obj;
256         return (0);
257 }
258
259 static struct cdevsw spa_cdevsw = {
260         .d_version =    D_VERSION,
261         .d_flags =      D_DISK,
262         .d_name =       "nvdimm_spa",
263         .d_read =       nvdimm_spa_rw,
264         .d_write =      nvdimm_spa_rw,
265         .d_ioctl =      nvdimm_spa_ioctl,
266         .d_mmap_single = nvdimm_spa_mmap_single,
267 };
268
269 static void
270 nvdimm_spa_g_all_unmapped(struct nvdimm_spa_dev *dev, struct bio *bp, int rw)
271 {
272         struct vm_page maa[bp->bio_ma_n];
273         vm_page_t ma[bp->bio_ma_n];
274         vm_memattr_t mattr;
275         int i;
276
277         mattr = nvdimm_spa_memattr(dev);
278         for (i = 0; i < nitems(ma); i++) {
279                 maa[i].flags = 0;
280                 vm_page_initfake(&maa[i], dev->spa_phys_base +
281                     trunc_page(bp->bio_offset) + PAGE_SIZE * i, mattr);
282                 ma[i] = &maa[i];
283         }
284         if (rw == BIO_READ)
285                 pmap_copy_pages(ma, bp->bio_offset & PAGE_MASK, bp->bio_ma,
286                     bp->bio_ma_offset, bp->bio_length);
287         else
288                 pmap_copy_pages(bp->bio_ma, bp->bio_ma_offset, ma,
289                     bp->bio_offset & PAGE_MASK, bp->bio_length);
290 }
291
292 static void
293 nvdimm_spa_g_thread(void *arg)
294 {
295         struct g_spa *sc;
296         struct bio *bp;
297         struct uio auio;
298         struct iovec aiovec;
299         int error;
300
301         sc = arg;
302         for (;;) {
303                 mtx_lock(&sc->spa_g_mtx);
304                 for (;;) {
305                         bp = bioq_takefirst(&sc->spa_g_queue);
306                         if (bp != NULL)
307                                 break;
308                         msleep(&sc->spa_g_queue, &sc->spa_g_mtx, PRIBIO,
309                             "spa_g", 0);
310                         if (!sc->spa_g_proc_run) {
311                                 sc->spa_g_proc_exiting = true;
312                                 wakeup(&sc->spa_g_queue);
313                                 mtx_unlock(&sc->spa_g_mtx);
314                                 kproc_exit(0);
315                         }
316                         continue;
317                 }
318                 mtx_unlock(&sc->spa_g_mtx);
319                 if (bp->bio_cmd != BIO_READ && bp->bio_cmd != BIO_WRITE &&
320                     bp->bio_cmd != BIO_FLUSH) {
321                         error = EOPNOTSUPP;
322                         goto completed;
323                 }
324
325                 error = 0;
326                 if (bp->bio_cmd == BIO_FLUSH) {
327                         if (sc->dev->spa_kva != NULL) {
328                                 pmap_large_map_wb(sc->dev->spa_kva,
329                                     sc->dev->spa_len);
330                         } else {
331                                 pmap_flush_cache_phys_range(
332                                     (vm_paddr_t)sc->dev->spa_phys_base,
333                                     (vm_paddr_t)sc->dev->spa_phys_base +
334                                     sc->dev->spa_len,
335                                     nvdimm_spa_memattr(sc->dev));
336                         }
337                         /*
338                          * XXX flush IMC
339                          */
340                         goto completed;
341                 }
342                 
343                 if ((bp->bio_flags & BIO_UNMAPPED) != 0) {
344                         if (sc->dev->spa_kva != NULL) {
345                                 aiovec.iov_base = (char *)sc->dev->spa_kva +
346                                     bp->bio_offset;
347                                 aiovec.iov_len = bp->bio_length;
348                                 auio.uio_iov = &aiovec;
349                                 auio.uio_iovcnt = 1;
350                                 auio.uio_resid = bp->bio_length;
351                                 auio.uio_offset = bp->bio_offset;
352                                 auio.uio_segflg = UIO_SYSSPACE;
353                                 auio.uio_rw = bp->bio_cmd == BIO_READ ?
354                                     UIO_WRITE : UIO_READ;
355                                 auio.uio_td = curthread;
356                                 error = uiomove_fromphys(bp->bio_ma,
357                                     bp->bio_ma_offset, bp->bio_length, &auio);
358                                 bp->bio_resid = auio.uio_resid;
359                         } else {
360                                 nvdimm_spa_g_all_unmapped(sc->dev, bp,
361                                     bp->bio_cmd);
362                                 bp->bio_resid = bp->bio_length;
363                                 error = 0;
364                         }
365                 } else {
366                         aiovec.iov_base = bp->bio_data;
367                         aiovec.iov_len = bp->bio_length;
368                         auio.uio_iov = &aiovec;
369                         auio.uio_iovcnt = 1;
370                         auio.uio_resid = bp->bio_length;
371                         auio.uio_offset = bp->bio_offset;
372                         auio.uio_segflg = UIO_SYSSPACE;
373                         auio.uio_rw = bp->bio_cmd == BIO_READ ? UIO_READ :
374                             UIO_WRITE;
375                         auio.uio_td = curthread;
376                         error = nvdimm_spa_uio(sc->dev, &auio);
377                         bp->bio_resid = auio.uio_resid;
378                 }
379                 bp->bio_bcount = bp->bio_length;
380                 devstat_end_transaction_bio(sc->spa_g_devstat, bp);
381 completed:
382                 bp->bio_completed = bp->bio_length;
383                 g_io_deliver(bp, error);
384         }
385 }
386
387 static void
388 nvdimm_spa_g_start(struct bio *bp)
389 {
390         struct g_spa *sc;
391
392         sc = bp->bio_to->geom->softc;
393         if (bp->bio_cmd == BIO_READ || bp->bio_cmd == BIO_WRITE) {
394                 mtx_lock(&sc->spa_g_stat_mtx);
395                 devstat_start_transaction_bio(sc->spa_g_devstat, bp);
396                 mtx_unlock(&sc->spa_g_stat_mtx);
397         }
398         mtx_lock(&sc->spa_g_mtx);
399         bioq_disksort(&sc->spa_g_queue, bp);
400         wakeup(&sc->spa_g_queue);
401         mtx_unlock(&sc->spa_g_mtx);
402 }
403
404 static int
405 nvdimm_spa_g_access(struct g_provider *pp, int r, int w, int e)
406 {
407
408         return (0);
409 }
410
411 static struct g_geom * nvdimm_spa_g_create(struct nvdimm_spa_dev *dev,
412     const char *name);
413 static g_ctl_destroy_geom_t nvdimm_spa_g_destroy_geom;
414
415 struct g_class nvdimm_spa_g_class = {
416         .name =         "SPA",
417         .version =      G_VERSION,
418         .start =        nvdimm_spa_g_start,
419         .access =       nvdimm_spa_g_access,
420         .destroy_geom = nvdimm_spa_g_destroy_geom,
421 };
422 DECLARE_GEOM_CLASS(nvdimm_spa_g_class, g_spa);
423
424 int
425 nvdimm_spa_init(struct SPA_mapping *spa, ACPI_NFIT_SYSTEM_ADDRESS *nfitaddr,
426     enum SPA_mapping_type spa_type)
427 {
428         char *name;
429         int error;
430
431         spa->spa_type = spa_type;
432         spa->spa_nfit_idx = nfitaddr->RangeIndex;
433         spa->dev.spa_domain =
434             ((nfitaddr->Flags & ACPI_NFIT_PROXIMITY_VALID) != 0) ?
435             nfitaddr->ProximityDomain : -1;
436         spa->dev.spa_phys_base = nfitaddr->Address;
437         spa->dev.spa_len = nfitaddr->Length;
438         spa->dev.spa_efi_mem_flags = nfitaddr->MemoryMapping;
439         if (bootverbose) {
440                 printf("NVDIMM SPA%d base %#016jx len %#016jx %s fl %#jx\n",
441                     spa->spa_nfit_idx,
442                     (uintmax_t)spa->dev.spa_phys_base,
443                     (uintmax_t)spa->dev.spa_len,
444                     nvdimm_SPA_uuid_list[spa_type].u_name,
445                     spa->dev.spa_efi_mem_flags);
446         }
447         if (!nvdimm_SPA_uuid_list[spa_type].u_usr_acc)
448                 return (0);
449
450         asprintf(&name, M_NVDIMM, "spa%d", spa->spa_nfit_idx);
451         error = nvdimm_spa_dev_init(&spa->dev, name);
452         free(name, M_NVDIMM);
453         return (error);
454 }
455
456 int
457 nvdimm_spa_dev_init(struct nvdimm_spa_dev *dev, const char *name)
458 {
459         struct make_dev_args mda;
460         struct sglist *spa_sg;
461         char *devname;
462         int error, error1;
463
464         error1 = pmap_large_map(dev->spa_phys_base, dev->spa_len,
465             &dev->spa_kva, nvdimm_spa_memattr(dev));
466         if (error1 != 0) {
467                 printf("NVDIMM %s cannot map into KVA, error %d\n", name,
468                     error1);
469                 dev->spa_kva = NULL;
470         }
471
472         spa_sg = sglist_alloc(1, M_WAITOK);
473         error = sglist_append_phys(spa_sg, dev->spa_phys_base,
474             dev->spa_len);
475         if (error == 0) {
476                 dev->spa_obj = vm_pager_allocate(OBJT_SG, spa_sg, dev->spa_len,
477                     VM_PROT_ALL, 0, NULL);
478                 if (dev->spa_obj == NULL) {
479                         printf("NVDIMM %s failed to alloc vm object", name);
480                         sglist_free(spa_sg);
481                 }
482         } else {
483                 printf("NVDIMM %s failed to init sglist, error %d", name,
484                     error);
485                 sglist_free(spa_sg);
486         }
487
488         make_dev_args_init(&mda);
489         mda.mda_flags = MAKEDEV_WAITOK | MAKEDEV_CHECKNAME;
490         mda.mda_devsw = &spa_cdevsw;
491         mda.mda_cr = NULL;
492         mda.mda_uid = UID_ROOT;
493         mda.mda_gid = GID_OPERATOR;
494         mda.mda_mode = 0660;
495         mda.mda_si_drv1 = dev;
496         asprintf(&devname, M_NVDIMM, "nvdimm_%s", name);
497         error = make_dev_s(&mda, &dev->spa_dev, "%s", devname);
498         free(devname, M_NVDIMM);
499         if (error != 0) {
500                 printf("NVDIMM %s cannot create devfs node, error %d\n", name,
501                     error);
502                 if (error1 == 0)
503                         error1 = error;
504         }
505         dev->spa_g = nvdimm_spa_g_create(dev, name);
506         if (dev->spa_g == NULL && error1 == 0)
507                 error1 = ENXIO;
508         return (error1);
509 }
510
511 static struct g_geom *
512 nvdimm_spa_g_create(struct nvdimm_spa_dev *dev, const char *name)
513 {
514         struct g_geom *gp;
515         struct g_spa *sc;
516         int error;
517
518         gp = NULL;
519         sc = malloc(sizeof(struct g_spa), M_NVDIMM, M_WAITOK | M_ZERO);
520         sc->dev = dev;
521         bioq_init(&sc->spa_g_queue);
522         mtx_init(&sc->spa_g_mtx, "spag", NULL, MTX_DEF);
523         mtx_init(&sc->spa_g_stat_mtx, "spagst", NULL, MTX_DEF);
524         sc->spa_g_proc_run = true;
525         sc->spa_g_proc_exiting = false;
526         error = kproc_create(nvdimm_spa_g_thread, sc, &sc->spa_g_proc, 0, 0,
527             "g_spa");
528         if (error != 0) {
529                 mtx_destroy(&sc->spa_g_mtx);
530                 mtx_destroy(&sc->spa_g_stat_mtx);
531                 free(sc, M_NVDIMM);
532                 printf("NVDIMM %s cannot create geom worker, error %d\n", name,
533                     error);
534         } else {
535                 g_topology_lock();
536                 gp = g_new_geomf(&nvdimm_spa_g_class, "%s", name);
537                 gp->softc = sc;
538                 sc->spa_p = g_new_providerf(gp, "%s", name);
539                 sc->spa_p->mediasize = dev->spa_len;
540                 sc->spa_p->sectorsize = DEV_BSIZE;
541                 sc->spa_p->flags |= G_PF_DIRECT_SEND | G_PF_DIRECT_RECEIVE |
542                     G_PF_ACCEPT_UNMAPPED;
543                 g_error_provider(sc->spa_p, 0);
544                 sc->spa_g_devstat = devstat_new_entry("spa", -1, DEV_BSIZE,
545                     DEVSTAT_ALL_SUPPORTED, DEVSTAT_TYPE_DIRECT,
546                     DEVSTAT_PRIORITY_MAX);
547                 g_topology_unlock();
548         }
549         return (gp);
550 }
551
552 void
553 nvdimm_spa_fini(struct SPA_mapping *spa)
554 {
555
556         nvdimm_spa_dev_fini(&spa->dev);
557 }
558
559 void
560 nvdimm_spa_dev_fini(struct nvdimm_spa_dev *dev)
561 {
562
563         if (dev->spa_g != NULL) {
564                 g_topology_lock();
565                 nvdimm_spa_g_destroy_geom(NULL, dev->spa_g->class, dev->spa_g);
566                 g_topology_unlock();
567         }
568         if (dev->spa_dev != NULL) {
569                 destroy_dev(dev->spa_dev);
570                 dev->spa_dev = NULL;
571         }
572         vm_object_deallocate(dev->spa_obj);
573         if (dev->spa_kva != NULL) {
574                 pmap_large_unmap(dev->spa_kva, dev->spa_len);
575                 dev->spa_kva = NULL;
576         }
577 }
578
579 static int
580 nvdimm_spa_g_destroy_geom(struct gctl_req *req, struct g_class *cp,
581     struct g_geom *gp)
582 {
583         struct g_spa *sc;
584
585         sc = gp->softc;
586         mtx_lock(&sc->spa_g_mtx);
587         sc->spa_g_proc_run = false;
588         wakeup(&sc->spa_g_queue);
589         while (!sc->spa_g_proc_exiting)
590                 msleep(&sc->spa_g_queue, &sc->spa_g_mtx, PRIBIO, "spa_e", 0);
591         mtx_unlock(&sc->spa_g_mtx);
592         g_topology_assert();
593         g_wither_geom(gp, ENXIO);
594         sc->spa_p = NULL;
595         if (sc->spa_g_devstat != NULL) {
596                 devstat_remove_entry(sc->spa_g_devstat);
597                 sc->spa_g_devstat = NULL;
598         }
599         mtx_destroy(&sc->spa_g_mtx);
600         mtx_destroy(&sc->spa_g_stat_mtx);
601         free(sc, M_NVDIMM);
602         return (0);
603 }