2 * Copyright (c) 2013-2018, Intel Corporation
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
7 * * Redistributions of source code must retain the above copyright notice,
8 * this list of conditions and the following disclaimer.
9 * * Redistributions in binary form must reproduce the above copyright notice,
10 * this list of conditions and the following disclaimer in the documentation
11 * and/or other materials provided with the distribution.
12 * * Neither the name of Intel Corporation nor the names of its contributors
13 * may be used to endorse or promote products derived from this software
14 * without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
29 #include "pt_section.h"
30 #include "pt_block_cache.h"
31 #include "pt_image_section_cache.h"
40 static char *dupstr(const char *str)
49 dup = malloc(len + 1);
53 return strcpy(dup, str);
56 struct pt_section *pt_mk_section(const char *filename, uint64_t offset,
59 struct pt_section *section;
64 errcode = pt_section_mk_status(&status, &fsize, filename);
68 /* Fail if the requested @offset lies beyond the end of @file. */
72 /* Truncate @size so the entire range lies within @file. */
77 section = malloc(sizeof(*section));
81 memset(section, 0, sizeof(*section));
83 section->filename = dupstr(filename);
84 section->status = status;
85 section->offset = offset;
89 #if defined(FEATURE_THREADS)
91 errcode = mtx_init(§ion->lock, mtx_plain);
92 if (errcode != thrd_success) {
93 free(section->filename);
98 errcode = mtx_init(§ion->alock, mtx_plain);
99 if (errcode != thrd_success) {
100 mtx_destroy(§ion->lock);
101 free(section->filename);
106 #endif /* defined(FEATURE_THREADS) */
115 int pt_section_lock(struct pt_section *section)
118 return -pte_internal;
120 #if defined(FEATURE_THREADS)
124 errcode = mtx_lock(§ion->lock);
125 if (errcode != thrd_success)
126 return -pte_bad_lock;
128 #endif /* defined(FEATURE_THREADS) */
133 int pt_section_unlock(struct pt_section *section)
136 return -pte_internal;
138 #if defined(FEATURE_THREADS)
142 errcode = mtx_unlock(§ion->lock);
143 if (errcode != thrd_success)
144 return -pte_bad_lock;
146 #endif /* defined(FEATURE_THREADS) */
151 static void pt_section_free(struct pt_section *section)
156 #if defined(FEATURE_THREADS)
158 mtx_destroy(§ion->alock);
159 mtx_destroy(§ion->lock);
161 #endif /* defined(FEATURE_THREADS) */
163 free(section->filename);
164 free(section->status);
168 int pt_section_get(struct pt_section *section)
174 return -pte_internal;
176 errcode = pt_section_lock(section);
180 ucount = section->ucount + 1;
182 (void) pt_section_unlock(section);
183 return -pte_overflow;
186 section->ucount = ucount;
188 return pt_section_unlock(section);
191 int pt_section_put(struct pt_section *section)
193 uint16_t ucount, mcount;
197 return -pte_internal;
199 errcode = pt_section_lock(section);
203 mcount = section->mcount;
204 ucount = section->ucount;
206 section->ucount = ucount - 1;
207 return pt_section_unlock(section);
210 errcode = pt_section_unlock(section);
214 if (!ucount || mcount)
215 return -pte_internal;
217 pt_section_free(section);
221 static int pt_section_lock_attach(struct pt_section *section)
224 return -pte_internal;
226 #if defined(FEATURE_THREADS)
230 errcode = mtx_lock(§ion->alock);
231 if (errcode != thrd_success)
232 return -pte_bad_lock;
234 #endif /* defined(FEATURE_THREADS) */
239 static int pt_section_unlock_attach(struct pt_section *section)
242 return -pte_internal;
244 #if defined(FEATURE_THREADS)
248 errcode = mtx_unlock(§ion->alock);
249 if (errcode != thrd_success)
250 return -pte_bad_lock;
252 #endif /* defined(FEATURE_THREADS) */
257 int pt_section_attach(struct pt_section *section,
258 struct pt_image_section_cache *iscache)
260 uint16_t acount, ucount;
263 if (!section || !iscache)
264 return -pte_internal;
266 errcode = pt_section_lock_attach(section);
270 ucount = section->ucount;
271 acount = section->acount;
273 if (section->iscache || !ucount)
276 section->iscache = iscache;
279 return pt_section_unlock_attach(section);
284 (void) pt_section_unlock_attach(section);
285 return -pte_overflow;
291 if (section->iscache != iscache)
294 section->acount = acount;
296 return pt_section_unlock_attach(section);
299 (void) pt_section_unlock_attach(section);
300 return -pte_internal;
303 int pt_section_detach(struct pt_section *section,
304 struct pt_image_section_cache *iscache)
306 uint16_t acount, ucount;
309 if (!section || !iscache)
310 return -pte_internal;
312 errcode = pt_section_lock_attach(section);
316 if (section->iscache != iscache)
319 acount = section->acount;
324 ucount = section->ucount;
328 section->acount = acount;
330 section->iscache = NULL;
332 return pt_section_unlock_attach(section);
335 (void) pt_section_unlock_attach(section);
336 return -pte_internal;
339 const char *pt_section_filename(const struct pt_section *section)
344 return section->filename;
347 uint64_t pt_section_size(const struct pt_section *section)
352 return section->size;
355 static int pt_section_bcache_memsize(const struct pt_section *section,
358 struct pt_block_cache *bcache;
360 if (!section || !psize)
361 return -pte_internal;
363 bcache = section->bcache;
369 *psize = sizeof(*bcache) +
370 (bcache->nentries * sizeof(struct pt_bcache_entry));
375 static int pt_section_memsize_locked(const struct pt_section *section,
378 uint64_t msize, bcsize;
379 int (*memsize)(const struct pt_section *section, uint64_t *size);
382 if (!section || !psize)
383 return -pte_internal;
385 memsize = section->memsize;
388 return -pte_internal;
394 errcode = memsize(section, &msize);
398 errcode = pt_section_bcache_memsize(section, &bcsize);
402 *psize = msize + bcsize;
407 int pt_section_memsize(struct pt_section *section, uint64_t *size)
411 errcode = pt_section_lock(section);
415 status = pt_section_memsize_locked(section, size);
417 errcode = pt_section_unlock(section);
424 uint64_t pt_section_offset(const struct pt_section *section)
429 return section->offset;
432 int pt_section_alloc_bcache(struct pt_section *section)
434 struct pt_image_section_cache *iscache;
435 struct pt_block_cache *bcache;
436 uint64_t ssize, memsize;
441 return -pte_internal;
443 if (!section->mcount)
444 return -pte_internal;
446 ssize = pt_section_size(section);
447 csize = (uint32_t) ssize;
450 return -pte_not_supported;
454 /* We need to take both the attach and the section lock in order to pair
455 * the block cache allocation and the resize notification.
457 * This allows map notifications in between but they only change the
458 * order of sections in the cache.
460 * The attach lock needs to be taken first.
462 errcode = pt_section_lock_attach(section);
466 errcode = pt_section_lock(section);
470 bcache = pt_section_bcache(section);
476 bcache = pt_bcache_alloc(csize);
478 errcode = -pte_nomem;
482 /* Install the block cache. It will become visible and may be used
485 * If we fail later on, we leave the block cache and report the error to
486 * the allocating decoder thread.
488 section->bcache = bcache;
490 errcode = pt_section_memsize_locked(section, &memsize);
494 errcode = pt_section_unlock(section);
499 iscache = section->iscache;
501 errcode = pt_iscache_notify_resize(iscache, section,
508 return pt_section_unlock_attach(section);
512 (void) pt_section_unlock(section);
515 (void) pt_section_unlock_attach(section);
519 int pt_section_on_map_lock(struct pt_section *section)
521 struct pt_image_section_cache *iscache;
525 return -pte_internal;
527 errcode = pt_section_lock_attach(section);
531 iscache = section->iscache;
533 return pt_section_unlock_attach(section);
535 /* There is a potential deadlock when @section was unmapped again and
536 * @iscache tries to map it. This would cause this function to be
537 * re-entered while we're still holding the attach lock.
539 * This scenario is very unlikely, though, since our caller does not yet
540 * know whether pt_section_map() succeeded.
542 status = pt_iscache_notify_map(iscache, section);
544 errcode = pt_section_unlock_attach(section);
551 int pt_section_map_share(struct pt_section *section)
557 return -pte_internal;
559 errcode = pt_section_lock(section);
563 mcount = section->mcount;
565 (void) pt_section_unlock(section);
566 return -pte_internal;
571 (void) pt_section_unlock(section);
572 return -pte_overflow;
575 section->mcount = mcount;
577 return pt_section_unlock(section);
580 int pt_section_unmap(struct pt_section *section)
586 return -pte_internal;
588 errcode = pt_section_lock(section);
592 mcount = section->mcount;
594 errcode = -pte_nomap;
598 section->mcount = mcount -= 1;
600 return pt_section_unlock(section);
602 errcode = -pte_internal;
606 status = section->unmap(section);
608 pt_bcache_free(section->bcache);
609 section->bcache = NULL;
611 errcode = pt_section_unlock(section);
618 (void) pt_section_unlock(section);
622 int pt_section_read(const struct pt_section *section, uint8_t *buffer,
623 uint16_t size, uint64_t offset)
625 uint64_t limit, space;
628 return -pte_internal;
633 limit = section->size;
637 /* Truncate if we try to read past the end of the section. */
638 space = limit - offset;
640 size = (uint16_t) space;
642 return section->read(section, buffer, size, offset);