]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - lib/libthr/thread/thr_pshared.c
bhnd(9): Fix a few mandoc related issues
[FreeBSD/FreeBSD.git] / lib / libthr / thread / thr_pshared.c
1 /*-
2  * Copyright (c) 2015 The FreeBSD Foundation
3  *
4  * This software was developed by Konstantin Belousov
5  * under sponsorship from the FreeBSD Foundation.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31
32 #include <sys/types.h>
33 #include <sys/mman.h>
34 #include <sys/queue.h>
35 #include "namespace.h"
36 #include <stdlib.h>
37 #include "un-namespace.h"
38
39 #include "thr_private.h"
40
41 struct psh {
42         LIST_ENTRY(psh) link;
43         void *key;
44         void *val;
45 };
46
47 LIST_HEAD(pshared_hash_head, psh);
48 #define HASH_SIZE       128
49 static struct pshared_hash_head pshared_hash[HASH_SIZE];
50 #define PSHARED_KEY_HASH(key)   (((unsigned long)(key) >> 8) % HASH_SIZE)
51 /* XXXKIB: lock could be split to per-hash chain, if appears contested */
52 static struct urwlock pshared_lock = DEFAULT_URWLOCK;
53
54 void
55 __thr_pshared_init(void)
56 {
57         int i;
58
59         _thr_urwlock_init(&pshared_lock);
60         for (i = 0; i < HASH_SIZE; i++)
61                 LIST_INIT(&pshared_hash[i]);
62 }
63
64 static void
65 pshared_rlock(struct pthread *curthread)
66 {
67
68         curthread->locklevel++;
69         _thr_rwl_rdlock(&pshared_lock);
70 }
71
72 static void
73 pshared_wlock(struct pthread *curthread)
74 {
75
76         curthread->locklevel++;
77         _thr_rwl_wrlock(&pshared_lock);
78 }
79
80 static void
81 pshared_unlock(struct pthread *curthread)
82 {
83
84         _thr_rwl_unlock(&pshared_lock);
85         curthread->locklevel--;
86         _thr_ast(curthread);
87 }
88
89 /*
90  * Among all processes sharing a lock only one executes
91  * pthread_lock_destroy().  Other processes still have the hash and
92  * mapped off-page.
93  *
94  * Mitigate the problem by checking the liveness of all hashed keys
95  * periodically.  Right now this is executed on each
96  * pthread_lock_destroy(), but may be done less often if found to be
97  * too time-consuming.
98  */
99 static void
100 pshared_gc(struct pthread *curthread)
101 {
102         struct pshared_hash_head *hd;
103         struct psh *h, *h1;
104         int error, i;
105
106         pshared_wlock(curthread);
107         for (i = 0; i < HASH_SIZE; i++) {
108                 hd = &pshared_hash[i];
109                 LIST_FOREACH_SAFE(h, hd, link, h1) {
110                         error = _umtx_op(NULL, UMTX_OP_SHM, UMTX_SHM_ALIVE,
111                             h->val, NULL);
112                         if (error == 0)
113                                 continue;
114                         LIST_REMOVE(h, link);
115                         munmap(h->val, PAGE_SIZE);
116                         free(h);
117                 }
118         }
119         pshared_unlock(curthread);
120 }
121
122 static void *
123 pshared_lookup(void *key)
124 {
125         struct pshared_hash_head *hd;
126         struct psh *h;
127
128         hd = &pshared_hash[PSHARED_KEY_HASH(key)];
129         LIST_FOREACH(h, hd, link) {
130                 if (h->key == key)
131                         return (h->val);
132         }
133         return (NULL);
134 }
135
136 static int
137 pshared_insert(void *key, void **val)
138 {
139         struct pshared_hash_head *hd;
140         struct psh *h;
141
142         hd = &pshared_hash[PSHARED_KEY_HASH(key)];
143         LIST_FOREACH(h, hd, link) {
144                 /*
145                  * When the key already exists in the hash, we should
146                  * return either the new (just mapped) or old (hashed)
147                  * val, and the other val should be unmapped to avoid
148                  * address space leak.
149                  *
150                  * If two threads perform lock of the same object
151                  * which is not yet stored in the pshared_hash, then
152                  * the val already inserted by the first thread should
153                  * be returned, and the second val freed (order is by
154                  * the pshared_lock()).  Otherwise, if we unmap the
155                  * value obtained from the hash, the first thread
156                  * might operate on an unmapped off-page object.
157                  *
158                  * There is still an issue: if hashed key was unmapped
159                  * and then other page is mapped at the same key
160                  * address, the hash would return the old val.  I
161                  * decided to handle the race of simultaneous hash
162                  * insertion, leaving the unlikely remap problem
163                  * unaddressed.
164                  */
165                 if (h->key == key) {
166                         if (h->val != *val) {
167                                 munmap(*val, PAGE_SIZE);
168                                 *val = h->val;
169                         }
170                         return (1);
171                 }
172         }
173
174         h = malloc(sizeof(*h));
175         if (h == NULL)
176                 return (0);
177         h->key = key;
178         h->val = *val;
179         LIST_INSERT_HEAD(hd, h, link);
180         return (1);
181 }
182
183 static void *
184 pshared_remove(void *key)
185 {
186         struct pshared_hash_head *hd;
187         struct psh *h;
188         void *val;
189
190         hd = &pshared_hash[PSHARED_KEY_HASH(key)];
191         LIST_FOREACH(h, hd, link) {
192                 if (h->key == key) {
193                         LIST_REMOVE(h, link);
194                         val = h->val;
195                         free(h);
196                         return (val);
197                 }
198         }
199         return (NULL);
200 }
201
202 static void
203 pshared_clean(void *key, void *val)
204 {
205
206         if (val != NULL)
207                 munmap(val, PAGE_SIZE);
208         _umtx_op(NULL, UMTX_OP_SHM, UMTX_SHM_DESTROY, key, NULL);
209 }
210
211 void *
212 __thr_pshared_offpage(void *key, int doalloc)
213 {
214         struct pthread *curthread;
215         void *res;
216         int fd, ins_done;
217
218         curthread = _get_curthread();
219         pshared_rlock(curthread);
220         res = pshared_lookup(key);
221         pshared_unlock(curthread);
222         if (res != NULL)
223                 return (res);
224         fd = _umtx_op(NULL, UMTX_OP_SHM, doalloc ? UMTX_SHM_CREAT :
225             UMTX_SHM_LOOKUP, key, NULL);
226         if (fd == -1)
227                 return (NULL);
228         res = mmap(NULL, PAGE_SIZE, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
229         close(fd);
230         if (res == MAP_FAILED)
231                 return (NULL);
232         pshared_wlock(curthread);
233         ins_done = pshared_insert(key, &res);
234         pshared_unlock(curthread);
235         if (!ins_done) {
236                 pshared_clean(key, res);
237                 res = NULL;
238         }
239         return (res);
240 }
241
242 void
243 __thr_pshared_destroy(void *key)
244 {
245         struct pthread *curthread;
246         void *val;
247
248         curthread = _get_curthread();
249         pshared_wlock(curthread);
250         val = pshared_remove(key);
251         pshared_unlock(curthread);
252         pshared_clean(key, val);
253         pshared_gc(curthread);
254 }
255
256 void
257 __thr_pshared_atfork_pre(void)
258 {
259
260         _thr_rwl_rdlock(&pshared_lock);
261 }
262
263 void
264 __thr_pshared_atfork_post(void)
265 {
266
267         _thr_rwl_unlock(&pshared_lock);
268 }