2 * Copyright (c) 2013-2019, Intel Corporation
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
7 * * Redistributions of source code must retain the above copyright notice,
8 * this list of conditions and the following disclaimer.
9 * * Redistributions in binary form must reproduce the above copyright notice,
10 * this list of conditions and the following disclaimer in the documentation
11 * and/or other materials provided with the distribution.
12 * * Neither the name of Intel Corporation nor the names of its contributors
13 * may be used to endorse or promote products derived from this software
14 * without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
29 #include "pt_section.h"
30 #include "pt_block_cache.h"
31 #include "pt_image_section_cache.h"
40 int pt_mk_section(struct pt_section **psection, const char *filename,
41 uint64_t offset, uint64_t size)
43 struct pt_section *section;
53 flen = strnlen(filename, FILENAME_MAX);
54 if (FILENAME_MAX <= flen)
63 memcpy(fname, filename, flen);
65 errcode = pt_section_mk_status(&status, &fsize, fname);
69 /* Fail if the requested @offset lies beyond the end of @file. */
70 if (fsize <= offset) {
71 errcode = -pte_invalid;
75 /* Truncate @size so the entire range lies within @file. */
80 section = malloc(sizeof(*section));
86 memset(section, 0, sizeof(*section));
88 section->filename = fname;
89 section->status = status;
90 section->offset = offset;
94 #if defined(FEATURE_THREADS)
96 errcode = mtx_init(§ion->lock, mtx_plain);
97 if (errcode != thrd_success) {
100 errcode = -pte_bad_lock;
104 errcode = mtx_init(§ion->alock, mtx_plain);
105 if (errcode != thrd_success) {
106 mtx_destroy(§ion->lock);
109 errcode = -pte_bad_lock;
113 #endif /* defined(FEATURE_THREADS) */
126 int pt_section_lock(struct pt_section *section)
129 return -pte_internal;
131 #if defined(FEATURE_THREADS)
135 errcode = mtx_lock(§ion->lock);
136 if (errcode != thrd_success)
137 return -pte_bad_lock;
139 #endif /* defined(FEATURE_THREADS) */
144 int pt_section_unlock(struct pt_section *section)
147 return -pte_internal;
149 #if defined(FEATURE_THREADS)
153 errcode = mtx_unlock(§ion->lock);
154 if (errcode != thrd_success)
155 return -pte_bad_lock;
157 #endif /* defined(FEATURE_THREADS) */
162 static void pt_section_free(struct pt_section *section)
167 #if defined(FEATURE_THREADS)
169 mtx_destroy(§ion->alock);
170 mtx_destroy(§ion->lock);
172 #endif /* defined(FEATURE_THREADS) */
174 free(section->filename);
175 free(section->status);
179 int pt_section_get(struct pt_section *section)
185 return -pte_internal;
187 errcode = pt_section_lock(section);
191 ucount = section->ucount + 1;
193 (void) pt_section_unlock(section);
194 return -pte_overflow;
197 section->ucount = ucount;
199 return pt_section_unlock(section);
202 int pt_section_put(struct pt_section *section)
204 uint16_t ucount, mcount;
208 return -pte_internal;
210 errcode = pt_section_lock(section);
214 mcount = section->mcount;
215 ucount = section->ucount;
217 section->ucount = ucount - 1;
218 return pt_section_unlock(section);
221 errcode = pt_section_unlock(section);
225 if (!ucount || mcount)
226 return -pte_internal;
228 pt_section_free(section);
232 static int pt_section_lock_attach(struct pt_section *section)
235 return -pte_internal;
237 #if defined(FEATURE_THREADS)
241 errcode = mtx_lock(§ion->alock);
242 if (errcode != thrd_success)
243 return -pte_bad_lock;
245 #endif /* defined(FEATURE_THREADS) */
250 static int pt_section_unlock_attach(struct pt_section *section)
253 return -pte_internal;
255 #if defined(FEATURE_THREADS)
259 errcode = mtx_unlock(§ion->alock);
260 if (errcode != thrd_success)
261 return -pte_bad_lock;
263 #endif /* defined(FEATURE_THREADS) */
268 int pt_section_attach(struct pt_section *section,
269 struct pt_image_section_cache *iscache)
271 uint16_t acount, ucount;
274 if (!section || !iscache)
275 return -pte_internal;
277 errcode = pt_section_lock_attach(section);
281 ucount = section->ucount;
282 acount = section->acount;
284 if (section->iscache || !ucount)
287 section->iscache = iscache;
290 return pt_section_unlock_attach(section);
295 (void) pt_section_unlock_attach(section);
296 return -pte_overflow;
302 if (section->iscache != iscache)
305 section->acount = acount;
307 return pt_section_unlock_attach(section);
310 (void) pt_section_unlock_attach(section);
311 return -pte_internal;
314 int pt_section_detach(struct pt_section *section,
315 struct pt_image_section_cache *iscache)
317 uint16_t acount, ucount;
320 if (!section || !iscache)
321 return -pte_internal;
323 errcode = pt_section_lock_attach(section);
327 if (section->iscache != iscache)
330 acount = section->acount;
335 ucount = section->ucount;
339 section->acount = acount;
341 section->iscache = NULL;
343 return pt_section_unlock_attach(section);
346 (void) pt_section_unlock_attach(section);
347 return -pte_internal;
350 const char *pt_section_filename(const struct pt_section *section)
355 return section->filename;
358 uint64_t pt_section_size(const struct pt_section *section)
363 return section->size;
366 static int pt_section_bcache_memsize(const struct pt_section *section,
369 struct pt_block_cache *bcache;
371 if (!section || !psize)
372 return -pte_internal;
374 bcache = section->bcache;
380 *psize = sizeof(*bcache) +
381 (bcache->nentries * sizeof(struct pt_bcache_entry));
386 static int pt_section_memsize_locked(const struct pt_section *section,
389 uint64_t msize, bcsize;
390 int (*memsize)(const struct pt_section *section, uint64_t *size);
393 if (!section || !psize)
394 return -pte_internal;
396 memsize = section->memsize;
399 return -pte_internal;
405 errcode = memsize(section, &msize);
409 errcode = pt_section_bcache_memsize(section, &bcsize);
413 *psize = msize + bcsize;
418 int pt_section_memsize(struct pt_section *section, uint64_t *size)
422 errcode = pt_section_lock(section);
426 status = pt_section_memsize_locked(section, size);
428 errcode = pt_section_unlock(section);
435 uint64_t pt_section_offset(const struct pt_section *section)
440 return section->offset;
443 int pt_section_alloc_bcache(struct pt_section *section)
445 struct pt_image_section_cache *iscache;
446 struct pt_block_cache *bcache;
447 uint64_t ssize, memsize;
452 return -pte_internal;
454 if (!section->mcount)
455 return -pte_internal;
457 ssize = pt_section_size(section);
458 csize = (uint32_t) ssize;
461 return -pte_not_supported;
465 /* We need to take both the attach and the section lock in order to pair
466 * the block cache allocation and the resize notification.
468 * This allows map notifications in between but they only change the
469 * order of sections in the cache.
471 * The attach lock needs to be taken first.
473 errcode = pt_section_lock_attach(section);
477 errcode = pt_section_lock(section);
481 bcache = pt_section_bcache(section);
487 bcache = pt_bcache_alloc(csize);
489 errcode = -pte_nomem;
493 /* Install the block cache. It will become visible and may be used
496 * If we fail later on, we leave the block cache and report the error to
497 * the allocating decoder thread.
499 section->bcache = bcache;
501 errcode = pt_section_memsize_locked(section, &memsize);
505 errcode = pt_section_unlock(section);
510 iscache = section->iscache;
512 errcode = pt_iscache_notify_resize(iscache, section,
519 return pt_section_unlock_attach(section);
523 (void) pt_section_unlock(section);
526 (void) pt_section_unlock_attach(section);
530 int pt_section_on_map_lock(struct pt_section *section)
532 struct pt_image_section_cache *iscache;
536 return -pte_internal;
538 errcode = pt_section_lock_attach(section);
542 iscache = section->iscache;
544 return pt_section_unlock_attach(section);
546 /* There is a potential deadlock when @section was unmapped again and
547 * @iscache tries to map it. This would cause this function to be
548 * re-entered while we're still holding the attach lock.
550 * This scenario is very unlikely, though, since our caller does not yet
551 * know whether pt_section_map() succeeded.
553 status = pt_iscache_notify_map(iscache, section);
555 errcode = pt_section_unlock_attach(section);
562 int pt_section_map_share(struct pt_section *section)
568 return -pte_internal;
570 errcode = pt_section_lock(section);
574 mcount = section->mcount;
576 (void) pt_section_unlock(section);
577 return -pte_internal;
582 (void) pt_section_unlock(section);
583 return -pte_overflow;
586 section->mcount = mcount;
588 return pt_section_unlock(section);
591 int pt_section_unmap(struct pt_section *section)
597 return -pte_internal;
599 errcode = pt_section_lock(section);
603 mcount = section->mcount;
605 errcode = -pte_nomap;
609 section->mcount = mcount -= 1;
611 return pt_section_unlock(section);
613 errcode = -pte_internal;
617 status = section->unmap(section);
619 pt_bcache_free(section->bcache);
620 section->bcache = NULL;
622 errcode = pt_section_unlock(section);
629 (void) pt_section_unlock(section);
633 int pt_section_read(const struct pt_section *section, uint8_t *buffer,
634 uint16_t size, uint64_t offset)
636 uint64_t limit, space;
639 return -pte_internal;
644 limit = section->size;
648 /* Truncate if we try to read past the end of the section. */
649 space = limit - offset;
651 size = (uint16_t) space;
653 return section->read(section, buffer, size, offset);