]> CyberLeo.Net >> Repos - FreeBSD/FreeBSD.git/blob - lib/libthr/thread/thr_pshared.c
ntp: import ntp-4.2.8p16
[FreeBSD/FreeBSD.git] / lib / libthr / thread / thr_pshared.c
1 /*-
2  * Copyright (c) 2015 The FreeBSD Foundation
3  *
4  * This software was developed by Konstantin Belousov
5  * under sponsorship from the FreeBSD Foundation.
6  *
7  * Redistribution and use in source and binary forms, with or without
8  * modification, are permitted provided that the following conditions
9  * are met:
10  * 1. Redistributions of source code must retain the above copyright
11  *    notice, this list of conditions and the following disclaimer.
12  * 2. Redistributions in binary form must reproduce the above copyright
13  *    notice, this list of conditions and the following disclaimer in the
14  *    documentation and/or other materials provided with the distribution.
15  *
16  * THIS SOFTWARE IS PROVIDED BY THE AUTHOR AND CONTRIBUTORS ``AS IS'' AND
17  * ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18  * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19  * ARE DISCLAIMED.  IN NO EVENT SHALL THE AUTHOR OR CONTRIBUTORS BE LIABLE
20  * FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR CONSEQUENTIAL
21  * DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS
22  * OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS INTERRUPTION)
23  * HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT
24  * LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY
25  * OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF
26  * SUCH DAMAGE.
27  */
28
29 #include <sys/cdefs.h>
30 __FBSDID("$FreeBSD$");
31
32 #include <sys/types.h>
33 #include <sys/mman.h>
34 #include <sys/queue.h>
35 #include "namespace.h"
36 #include <stdlib.h>
37 #include "un-namespace.h"
38
39 #include "thr_private.h"
40
41 struct psh {
42         LIST_ENTRY(psh) link;
43         void *key;
44         void *val;
45 };
46
47 LIST_HEAD(pshared_hash_head, psh);
48 #define HASH_SIZE       128
49 static struct pshared_hash_head pshared_hash[HASH_SIZE];
50 #define PSHARED_KEY_HASH(key)   (((unsigned long)(key) >> 8) % HASH_SIZE)
51 /* XXXKIB: lock could be split to per-hash chain, if appears contested */
52 static struct urwlock pshared_lock = DEFAULT_URWLOCK;
53 static int page_size;
54
55 void
56 __thr_pshared_init(void)
57 {
58         int i;
59
60         page_size = getpagesize();
61         THR_ASSERT(page_size >= THR_PAGE_SIZE_MIN,
62             "THR_PAGE_SIZE_MIN is too large");
63
64         _thr_urwlock_init(&pshared_lock);
65         for (i = 0; i < HASH_SIZE; i++)
66                 LIST_INIT(&pshared_hash[i]);
67 }
68
69 static void
70 pshared_rlock(struct pthread *curthread)
71 {
72
73         curthread->locklevel++;
74         _thr_rwl_rdlock(&pshared_lock);
75 }
76
77 static void
78 pshared_wlock(struct pthread *curthread)
79 {
80
81         curthread->locklevel++;
82         _thr_rwl_wrlock(&pshared_lock);
83 }
84
85 static void
86 pshared_unlock(struct pthread *curthread)
87 {
88
89         _thr_rwl_unlock(&pshared_lock);
90         curthread->locklevel--;
91         _thr_ast(curthread);
92 }
93
94 /*
95  * Among all processes sharing a lock only one executes
96  * pthread_lock_destroy().  Other processes still have the hash and
97  * mapped off-page.
98  *
99  * Mitigate the problem by checking the liveness of all hashed keys
100  * periodically.  Right now this is executed on each
101  * pthread_lock_destroy(), but may be done less often if found to be
102  * too time-consuming.
103  */
104 static void
105 pshared_gc(struct pthread *curthread)
106 {
107         struct pshared_hash_head *hd;
108         struct psh *h, *h1;
109         int error, i;
110
111         pshared_wlock(curthread);
112         for (i = 0; i < HASH_SIZE; i++) {
113                 hd = &pshared_hash[i];
114                 LIST_FOREACH_SAFE(h, hd, link, h1) {
115                         error = _umtx_op(NULL, UMTX_OP_SHM, UMTX_SHM_ALIVE,
116                             h->val, NULL);
117                         if (error == 0)
118                                 continue;
119                         LIST_REMOVE(h, link);
120                         munmap(h->val, page_size);
121                         free(h);
122                 }
123         }
124         pshared_unlock(curthread);
125 }
126
127 static void *
128 pshared_lookup(void *key)
129 {
130         struct pshared_hash_head *hd;
131         struct psh *h;
132
133         hd = &pshared_hash[PSHARED_KEY_HASH(key)];
134         LIST_FOREACH(h, hd, link) {
135                 if (h->key == key)
136                         return (h->val);
137         }
138         return (NULL);
139 }
140
141 static int
142 pshared_insert(void *key, void **val)
143 {
144         struct pshared_hash_head *hd;
145         struct psh *h;
146
147         hd = &pshared_hash[PSHARED_KEY_HASH(key)];
148         LIST_FOREACH(h, hd, link) {
149                 /*
150                  * When the key already exists in the hash, we should
151                  * return either the new (just mapped) or old (hashed)
152                  * val, and the other val should be unmapped to avoid
153                  * address space leak.
154                  *
155                  * If two threads perform lock of the same object
156                  * which is not yet stored in the pshared_hash, then
157                  * the val already inserted by the first thread should
158                  * be returned, and the second val freed (order is by
159                  * the pshared_lock()).  Otherwise, if we unmap the
160                  * value obtained from the hash, the first thread
161                  * might operate on an unmapped off-page object.
162                  *
163                  * There is still an issue: if hashed key was unmapped
164                  * and then other page is mapped at the same key
165                  * address, the hash would return the old val.  I
166                  * decided to handle the race of simultaneous hash
167                  * insertion, leaving the unlikely remap problem
168                  * unaddressed.
169                  */
170                 if (h->key == key) {
171                         if (h->val != *val) {
172                                 munmap(*val, page_size);
173                                 *val = h->val;
174                         }
175                         return (1);
176                 }
177         }
178
179         h = malloc(sizeof(*h));
180         if (h == NULL)
181                 return (0);
182         h->key = key;
183         h->val = *val;
184         LIST_INSERT_HEAD(hd, h, link);
185         return (1);
186 }
187
188 static void *
189 pshared_remove(void *key)
190 {
191         struct pshared_hash_head *hd;
192         struct psh *h;
193         void *val;
194
195         hd = &pshared_hash[PSHARED_KEY_HASH(key)];
196         LIST_FOREACH(h, hd, link) {
197                 if (h->key == key) {
198                         LIST_REMOVE(h, link);
199                         val = h->val;
200                         free(h);
201                         return (val);
202                 }
203         }
204         return (NULL);
205 }
206
207 static void
208 pshared_clean(void *key, void *val)
209 {
210
211         if (val != NULL)
212                 munmap(val, page_size);
213         _umtx_op(NULL, UMTX_OP_SHM, UMTX_SHM_DESTROY, key, NULL);
214 }
215
216 static void
217 pshared_destroy(struct pthread *curthread, void *key)
218 {
219         void *val;
220
221         pshared_wlock(curthread);
222         val = pshared_remove(key);
223         pshared_unlock(curthread);
224         pshared_clean(key, val);
225 }
226
227 void *
228 __thr_pshared_offpage(void *key, int doalloc)
229 {
230         struct pthread *curthread;
231         void *res;
232         int fd, ins_done;
233
234         curthread = _get_curthread();
235         if (doalloc) {
236                 pshared_destroy(curthread, key);
237                 res = NULL;
238         } else {
239                 pshared_rlock(curthread);
240                 res = pshared_lookup(key);
241                 pshared_unlock(curthread);
242                 if (res != NULL)
243                         return (res);
244         }
245         fd = _umtx_op(NULL, UMTX_OP_SHM, doalloc ? UMTX_SHM_CREAT :
246             UMTX_SHM_LOOKUP, key, NULL);
247         if (fd == -1)
248                 return (NULL);
249         res = mmap(NULL, page_size, PROT_READ | PROT_WRITE, MAP_SHARED, fd, 0);
250         close(fd);
251         if (res == MAP_FAILED)
252                 return (NULL);
253         pshared_wlock(curthread);
254         ins_done = pshared_insert(key, &res);
255         pshared_unlock(curthread);
256         if (!ins_done) {
257                 pshared_clean(key, res);
258                 res = NULL;
259         }
260         return (res);
261 }
262
263 void
264 __thr_pshared_destroy(void *key)
265 {
266         struct pthread *curthread;
267
268         curthread = _get_curthread();
269         pshared_destroy(curthread, key);
270         pshared_gc(curthread);
271 }
272
273 void
274 __thr_pshared_atfork_pre(void)
275 {
276
277         _thr_rwl_rdlock(&pshared_lock);
278 }
279
280 void
281 __thr_pshared_atfork_post(void)
282 {
283
284         _thr_rwl_unlock(&pshared_lock);
285 }