2 * Copyright (c) 2016-2019, Intel Corporation
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
7 * * Redistributions of source code must retain the above copyright notice,
8 * this list of conditions and the following disclaimer.
9 * * Redistributions in binary form must reproduce the above copyright notice,
10 * this list of conditions and the following disclaimer in the documentation
11 * and/or other materials provided with the distribution.
12 * * Neither the name of Intel Corporation nor the names of its contributors
13 * may be used to endorse or promote products derived from this software
14 * without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
29 #include "pt_image_section_cache.h"
30 #include "pt_section.h"
37 static char *dupstr(const char *str)
45 /* Silently truncate the name if it gets too big. */
46 len = strnlen(str, 4096ul);
48 dup = malloc(len + 1);
54 return memcpy(dup, str, len);
57 int pt_iscache_init(struct pt_image_section_cache *iscache, const char *name)
62 memset(iscache, 0, sizeof(*iscache));
63 iscache->limit = UINT64_MAX;
65 iscache->name = dupstr(name);
70 #if defined(FEATURE_THREADS)
74 errcode = mtx_init(&iscache->lock, mtx_plain);
75 if (errcode != thrd_success)
78 #endif /* defined(FEATURE_THREADS) */
83 void pt_iscache_fini(struct pt_image_section_cache *iscache)
88 (void) pt_iscache_clear(iscache);
91 #if defined(FEATURE_THREADS)
93 mtx_destroy(&iscache->lock);
95 #endif /* defined(FEATURE_THREADS) */
98 static inline int pt_iscache_lock(struct pt_image_section_cache *iscache)
101 return -pte_internal;
103 #if defined(FEATURE_THREADS)
107 errcode = mtx_lock(&iscache->lock);
108 if (errcode != thrd_success)
109 return -pte_bad_lock;
111 #endif /* defined(FEATURE_THREADS) */
116 static inline int pt_iscache_unlock(struct pt_image_section_cache *iscache)
119 return -pte_internal;
121 #if defined(FEATURE_THREADS)
125 errcode = mtx_unlock(&iscache->lock);
126 if (errcode != thrd_success)
127 return -pte_bad_lock;
129 #endif /* defined(FEATURE_THREADS) */
134 static inline int isid_from_index(uint16_t index)
139 static int pt_iscache_expand(struct pt_image_section_cache *iscache)
141 struct pt_iscache_entry *entries;
142 uint16_t capacity, target;
145 return -pte_internal;
147 capacity = iscache->capacity;
148 target = capacity + 8;
150 /* Check for overflows. */
151 if (target < capacity)
154 entries = realloc(iscache->entries, target * sizeof(*entries));
158 iscache->capacity = target;
159 iscache->entries = entries;
163 static int pt_iscache_find_locked(struct pt_image_section_cache *iscache,
164 const char *filename, uint64_t offset,
165 uint64_t size, uint64_t laddr)
169 if (!iscache || !filename)
170 return -pte_internal;
173 for (idx = 0; idx < end; ++idx) {
174 const struct pt_iscache_entry *entry;
175 const struct pt_section *section;
176 const char *sec_filename;
177 uint64_t sec_offset, sec_size;
179 entry = &iscache->entries[idx];
181 /* We do not zero-initialize the array - a NULL check is
184 section = entry->section;
185 sec_filename = pt_section_filename(section);
186 sec_offset = pt_section_offset(section);
187 sec_size = pt_section_size(section);
189 if (entry->laddr != laddr)
192 if (sec_offset != offset)
195 if (sec_size != size)
198 /* We should not have a section without a filename. */
200 return -pte_internal;
202 if (strcmp(sec_filename, filename) != 0)
205 return isid_from_index(idx);
211 static int pt_iscache_lru_free(struct pt_iscache_lru_entry *lru)
214 struct pt_iscache_lru_entry *trash;
220 errcode = pt_section_unmap(trash->section);
230 static int pt_iscache_lru_prune(struct pt_image_section_cache *iscache,
231 struct pt_iscache_lru_entry **tail)
233 struct pt_iscache_lru_entry *lru, **pnext;
234 uint64_t limit, used;
236 if (!iscache || !tail)
237 return -pte_internal;
239 limit = iscache->limit;
242 pnext = &iscache->lru;
243 for (lru = *pnext; lru; pnext = &lru->next, lru = *pnext) {
249 /* The cache got too big; prune it starting from @lru. */
250 iscache->used = used - lru->size;
257 /* We shouldn't prune the cache unnecessarily. */
258 return -pte_internal;
261 /* Add @section to the front of @iscache->lru.
263 * Returns a positive integer if we need to prune the cache.
264 * Returns zero if we don't need to prune the cache.
265 * Returns a negative pt_error_code otherwise.
267 static int pt_isache_lru_new(struct pt_image_section_cache *iscache,
268 struct pt_section *section)
270 struct pt_iscache_lru_entry *lru;
271 uint64_t memsize, used, total, limit;
275 return -pte_internal;
277 errcode = pt_section_memsize(section, &memsize);
281 /* Don't try to add the section if it is too big. We'd prune it again
282 * together with all other sections in our cache.
284 limit = iscache->limit;
288 errcode = pt_section_map_share(section);
292 lru = malloc(sizeof(*lru));
294 (void) pt_section_unmap(section);
298 lru->section = section;
301 lru->next = iscache->lru;
304 used = iscache->used;
305 total = used + memsize;
306 if (total < used || total < memsize)
307 return -pte_overflow;
309 iscache->used = total;
311 return (limit < total) ? 1 : 0;
314 /* Add or move @section to the front of @iscache->lru.
316 * Returns a positive integer if we need to prune the cache.
317 * Returns zero if we don't need to prune the cache.
318 * Returns a negative pt_error_code otherwise.
320 static int pt_iscache_lru_add(struct pt_image_section_cache *iscache,
321 struct pt_section *section)
323 struct pt_iscache_lru_entry *lru, **pnext;
326 return -pte_internal;
328 pnext = &iscache->lru;
329 for (lru = *pnext; lru; pnext = &lru->next, lru = *pnext) {
331 if (lru->section != section)
334 /* We found it in the cache. Move it to the front. */
336 lru->next = iscache->lru;
342 /* We didn't find it in the cache. Add it. */
343 return pt_isache_lru_new(iscache, section);
347 /* Remove @section from @iscache->lru.
349 * Returns zero on success, a negative pt_error_code otherwise.
351 static int pt_iscache_lru_remove(struct pt_image_section_cache *iscache,
352 const struct pt_section *section)
354 struct pt_iscache_lru_entry *lru, **pnext;
357 return -pte_internal;
359 pnext = &iscache->lru;
360 for (lru = *pnext; lru; pnext = &lru->next, lru = *pnext) {
362 if (lru->section != section)
365 /* We found it in the cache. Remove it. */
371 return pt_iscache_lru_free(lru);
375 /* Add or move @section to the front of @iscache->lru and update its size.
377 * Returns a positive integer if we need to prune the cache.
378 * Returns zero if we don't need to prune the cache.
379 * Returns a negative pt_error_code otherwise.
381 static int pt_iscache_lru_resize(struct pt_image_section_cache *iscache,
382 struct pt_section *section, uint64_t memsize)
384 struct pt_iscache_lru_entry *lru;
385 uint64_t oldsize, used;
389 return -pte_internal;
391 status = pt_iscache_lru_add(iscache, section);
398 return -pte_internal;
402 /* If @section is cached, it must be first.
404 * We may choose not to cache it, though, e.g. if it is too big.
406 if (lru->section != section) {
407 if (iscache->limit < memsize)
410 return -pte_internal;
416 /* If we need to prune anyway, we're done. */
420 used = iscache->used;
424 iscache->used = used;
426 return (iscache->limit < used) ? 1 : 0;
429 /* Clear @iscache->lru.
431 * Unlike other iscache_lru functions, the caller does not lock @iscache.
433 * Return zero on success, a negative pt_error_code otherwise.
435 static int pt_iscache_lru_clear(struct pt_image_section_cache *iscache)
437 struct pt_iscache_lru_entry *lru;
440 errcode = pt_iscache_lock(iscache);
446 iscache->used = 0ull;
448 errcode = pt_iscache_unlock(iscache);
452 return pt_iscache_lru_free(lru);
455 /* Search @iscache for a partial or exact match of @section loaded at @laddr and
456 * return the corresponding index or @iscache->size if no match is found.
458 * The caller must lock @iscache.
460 * Returns a non-zero index on success, a negative pt_error_code otherwise.
463 pt_iscache_find_section_locked(const struct pt_image_section_cache *iscache,
464 const char *filename, uint64_t offset,
465 uint64_t size, uint64_t laddr)
467 const struct pt_section *section;
471 if (!iscache || !filename)
472 return -pte_internal;
475 match = end = iscache->size;
476 for (idx = 0; idx < end; ++idx) {
477 const struct pt_iscache_entry *entry;
478 const struct pt_section *sec;
480 entry = &iscache->entries[idx];
482 /* We do not zero-initialize the array - a NULL check is
485 sec = entry->section;
487 /* Avoid redundant match checks. */
488 if (sec != section) {
489 const char *sec_filename;
491 /* We don't have duplicates. Skip the check. */
495 if (offset != pt_section_offset(sec))
498 if (size != pt_section_size(sec))
501 sec_filename = pt_section_filename(sec);
503 return -pte_internal;
505 if (strcmp(filename, sec_filename) != 0)
508 /* Use the cached section instead. */
513 /* If we didn't continue, @section == @sec and we have a match.
515 * If we also find a matching load address, we're done.
517 if (laddr == entry->laddr)
524 int pt_iscache_add(struct pt_image_section_cache *iscache,
525 struct pt_section *section, uint64_t laddr)
527 const char *filename;
528 uint64_t offset, size;
532 if (!iscache || !section)
533 return -pte_internal;
535 /* We must have a filename for @section. */
536 filename = pt_section_filename(section);
538 return -pte_internal;
540 offset = pt_section_offset(section);
541 size = pt_section_size(section);
543 /* Adding a section is slightly complicated by a potential deadlock
546 * - in order to add a section, we need to attach to it, which
547 * requires taking the section's attach lock.
549 * - if we are already attached to it, we may receive on-map
550 * notifications, which will be sent while holding the attach lock
551 * and require taking the iscache lock.
553 * Hence we can't attach to a section while holding the iscache lock.
556 * We therefore attach to @section first and then lock @iscache.
558 * This opens a small window where an existing @section may be removed
559 * from @iscache and replaced by a new matching section. We would want
560 * to share that new section rather than adding a duplicate @section.
562 * After locking @iscache, we therefore check for existing matching
563 * sections and, if one is found, update @section. This involves
564 * detaching from @section and attaching to the existing section.
566 * And for this, we will have to temporarily unlock @iscache again.
568 errcode = pt_section_get(section);
572 errcode = pt_section_attach(section, iscache);
576 errcode = pt_iscache_lock(iscache);
580 /* We may need to repeat this step.
582 * Typically we don't and this takes only a single iteration. One
583 * scenario where we do repeat this is when adding a section with an
584 * out-of-bounds size.
586 * We will not find a matching section in pt_iscache_add_file() so we
587 * create a new section. This will have its size reduced to match the
590 * For this reduced size, we may now find an existing section, and we
591 * will take another trip in the below loop.
594 const struct pt_iscache_entry *entry;
595 struct pt_section *sec;
598 /* Find an existing section matching @section that we'd share
599 * rather than adding @section.
601 match = pt_iscache_find_section_locked(iscache, filename,
602 offset, size, laddr);
605 goto out_unlock_detach;
608 /* We're done if we have not found a matching section. */
609 if (iscache->size <= match)
612 entry = &iscache->entries[match];
614 /* We're also done if we found the same section again.
616 * We further check for a perfect match. In that case, we don't
617 * need to insert anything, at all.
619 sec = entry->section;
620 if (sec == section) {
621 if (entry->laddr == laddr) {
622 errcode = pt_iscache_unlock(iscache);
626 errcode = pt_section_detach(section, iscache);
630 errcode = pt_section_put(section);
634 return isid_from_index((uint16_t) match);
640 /* We update @section to share the existing @sec.
642 * This requires detaching from @section, which, in turn,
643 * requires temporarily unlocking @iscache.
645 * We further need to remove @section from @iscache->lru.
647 errcode = pt_section_get(sec);
649 goto out_unlock_detach;
651 errcode = pt_iscache_unlock(iscache);
653 (void) pt_section_put(sec);
657 errcode = pt_section_detach(section, iscache);
659 (void) pt_section_put(sec);
663 errcode = pt_section_attach(sec, iscache);
665 (void) pt_section_put(sec);
669 errcode = pt_iscache_lock(iscache);
671 (void) pt_section_put(section);
672 /* Complete the swap for cleanup. */
677 /* We may have received on-map notifications for @section and we
678 * may have added @section to @iscache->lru.
680 * Since we're still holding a reference to it, no harm has been
681 * done. But we need to remove it before we drop our reference.
683 errcode = pt_iscache_lru_remove(iscache, section);
685 (void) pt_section_put(section);
686 /* Complete the swap for cleanup. */
688 goto out_unlock_detach;
691 /* Drop the reference to @section. */
692 errcode = pt_section_put(section);
694 /* Complete the swap for cleanup. */
696 goto out_unlock_detach;
701 * We will try again in the next iteration.
706 /* Expand the cache, if necessary. */
707 if (iscache->capacity <= iscache->size) {
708 /* We must never exceed the capacity. */
709 if (iscache->capacity < iscache->size) {
710 errcode = -pte_internal;
711 goto out_unlock_detach;
714 errcode = pt_iscache_expand(iscache);
716 goto out_unlock_detach;
718 /* Make sure it is big enough, now. */
719 if (iscache->capacity <= iscache->size) {
720 errcode = -pte_internal;
721 goto out_unlock_detach;
725 /* Insert a new entry for @section at @laddr.
727 * This hands both attach and reference over to @iscache. We will
728 * detach and drop the reference again when the entry is removed.
730 idx = iscache->size++;
732 iscache->entries[idx].section = section;
733 iscache->entries[idx].laddr = laddr;
735 errcode = pt_iscache_unlock(iscache);
739 return isid_from_index(idx);
742 (void) pt_iscache_unlock(iscache);
745 (void) pt_section_detach(section, iscache);
748 (void) pt_iscache_lru_clear(iscache);
751 (void) pt_section_put(section);
756 int pt_iscache_find(struct pt_image_section_cache *iscache,
757 const char *filename, uint64_t offset, uint64_t size,
762 errcode = pt_iscache_lock(iscache);
766 isid = pt_iscache_find_locked(iscache, filename, offset, size, laddr);
768 errcode = pt_iscache_unlock(iscache);
775 int pt_iscache_lookup(struct pt_image_section_cache *iscache,
776 struct pt_section **section, uint64_t *laddr, int isid)
781 if (!iscache || !section || !laddr)
782 return -pte_internal;
785 return -pte_bad_image;
788 if (isid > UINT16_MAX)
789 return -pte_internal;
791 index = (uint16_t) isid;
793 errcode = pt_iscache_lock(iscache);
797 if (iscache->size <= index)
798 status = -pte_bad_image;
800 const struct pt_iscache_entry *entry;
802 entry = &iscache->entries[index];
803 *section = entry->section;
804 *laddr = entry->laddr;
806 status = pt_section_get(*section);
809 errcode = pt_iscache_unlock(iscache);
816 int pt_iscache_clear(struct pt_image_section_cache *iscache)
818 struct pt_iscache_lru_entry *lru;
819 struct pt_iscache_entry *entries;
824 return -pte_internal;
826 errcode = pt_iscache_lock(iscache);
830 entries = iscache->entries;
834 iscache->entries = NULL;
835 iscache->capacity = 0;
838 iscache->used = 0ull;
840 errcode = pt_iscache_unlock(iscache);
844 errcode = pt_iscache_lru_free(lru);
848 for (idx = 0; idx < end; ++idx) {
849 struct pt_section *section;
851 section = entries[idx].section;
853 /* We do not zero-initialize the array - a NULL check is
856 errcode = pt_section_detach(section, iscache);
860 errcode = pt_section_put(section);
869 struct pt_image_section_cache *pt_iscache_alloc(const char *name)
871 struct pt_image_section_cache *iscache;
873 iscache = malloc(sizeof(*iscache));
875 pt_iscache_init(iscache, name);
880 void pt_iscache_free(struct pt_image_section_cache *iscache)
885 pt_iscache_fini(iscache);
889 int pt_iscache_set_limit(struct pt_image_section_cache *iscache, uint64_t limit)
891 struct pt_iscache_lru_entry *tail;
900 errcode = pt_iscache_lock(iscache);
904 iscache->limit = limit;
905 if (limit < iscache->used)
906 status = pt_iscache_lru_prune(iscache, &tail);
908 errcode = pt_iscache_unlock(iscache);
910 if (errcode < 0 || status < 0)
911 return (status < 0) ? status : errcode;
913 return pt_iscache_lru_free(tail);
916 const char *pt_iscache_name(const struct pt_image_section_cache *iscache)
921 return iscache->name;
924 int pt_iscache_add_file(struct pt_image_section_cache *iscache,
925 const char *filename, uint64_t offset, uint64_t size,
928 struct pt_section *section;
929 int errcode, match, isid;
931 if (!iscache || !filename)
934 errcode = pt_iscache_lock(iscache);
938 match = pt_iscache_find_section_locked(iscache, filename, offset,
941 (void) pt_iscache_unlock(iscache);
945 /* If we found a perfect match, we will share the existing entry.
947 * If we found a section, we need to grab a reference before we unlock.
949 * If we didn't find a matching section, we create a new section, which
950 * implicitly gives us a reference to it.
952 if (match < iscache->size) {
953 const struct pt_iscache_entry *entry;
955 entry = &iscache->entries[match];
956 if (entry->laddr == vaddr) {
957 errcode = pt_iscache_unlock(iscache);
961 return isid_from_index((uint16_t) match);
964 section = entry->section;
966 errcode = pt_section_get(section);
968 (void) pt_iscache_unlock(iscache);
972 errcode = pt_iscache_unlock(iscache);
974 (void) pt_section_put(section);
978 errcode = pt_iscache_unlock(iscache);
983 errcode = pt_mk_section(§ion, filename, offset, size);
988 /* We unlocked @iscache and hold a reference to @section. */
989 isid = pt_iscache_add(iscache, section, vaddr);
991 /* We grab a reference when we add the section. Drop the one we
994 errcode = pt_section_put(section);
1002 int pt_iscache_read(struct pt_image_section_cache *iscache, uint8_t *buffer,
1003 uint64_t size, int isid, uint64_t vaddr)
1005 struct pt_section *section;
1007 int errcode, status;
1009 if (!iscache || !buffer || !size)
1010 return -pte_invalid;
1012 errcode = pt_iscache_lookup(iscache, §ion, &laddr, isid);
1016 if (vaddr < laddr) {
1017 (void) pt_section_put(section);
1023 errcode = pt_section_map(section);
1025 (void) pt_section_put(section);
1029 /* We truncate the read if it gets too big. The user is expected to
1030 * issue further reads for the remaining part.
1032 if (UINT16_MAX < size)
1035 status = pt_section_read(section, buffer, (uint16_t) size, vaddr);
1037 errcode = pt_section_unmap(section);
1039 (void) pt_section_put(section);
1043 errcode = pt_section_put(section);
1050 int pt_iscache_notify_map(struct pt_image_section_cache *iscache,
1051 struct pt_section *section)
1053 struct pt_iscache_lru_entry *tail;
1054 int errcode, status;
1058 errcode = pt_iscache_lock(iscache);
1062 status = pt_iscache_lru_add(iscache, section);
1064 status = pt_iscache_lru_prune(iscache, &tail);
1066 errcode = pt_iscache_unlock(iscache);
1068 if (errcode < 0 || status < 0)
1069 return (status < 0) ? status : errcode;
1071 return pt_iscache_lru_free(tail);
1074 int pt_iscache_notify_resize(struct pt_image_section_cache *iscache,
1075 struct pt_section *section, uint64_t memsize)
1077 struct pt_iscache_lru_entry *tail;
1078 int errcode, status;
1082 errcode = pt_iscache_lock(iscache);
1086 status = pt_iscache_lru_resize(iscache, section, memsize);
1088 status = pt_iscache_lru_prune(iscache, &tail);
1090 errcode = pt_iscache_unlock(iscache);
1092 if (errcode < 0 || status < 0)
1093 return (status < 0) ? status : errcode;
1095 return pt_iscache_lru_free(tail);