2 * Copyright (c) 2016-2018, Intel Corporation
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
7 * * Redistributions of source code must retain the above copyright notice,
8 * this list of conditions and the following disclaimer.
9 * * Redistributions in binary form must reproduce the above copyright notice,
10 * this list of conditions and the following disclaimer in the documentation
11 * and/or other materials provided with the distribution.
12 * * Neither the name of Intel Corporation nor the names of its contributors
13 * may be used to endorse or promote products derived from this software
14 * without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
29 #include "pt_image_section_cache.h"
30 #include "pt_section.h"
37 static char *dupstr(const char *str)
46 dup = malloc(len + 1);
50 return strcpy(dup, str);
53 int pt_iscache_init(struct pt_image_section_cache *iscache, const char *name)
58 memset(iscache, 0, sizeof(*iscache));
59 iscache->limit = UINT64_MAX;
61 iscache->name = dupstr(name);
66 #if defined(FEATURE_THREADS)
70 errcode = mtx_init(&iscache->lock, mtx_plain);
71 if (errcode != thrd_success)
74 #endif /* defined(FEATURE_THREADS) */
79 void pt_iscache_fini(struct pt_image_section_cache *iscache)
84 (void) pt_iscache_clear(iscache);
87 #if defined(FEATURE_THREADS)
89 mtx_destroy(&iscache->lock);
91 #endif /* defined(FEATURE_THREADS) */
94 static inline int pt_iscache_lock(struct pt_image_section_cache *iscache)
99 #if defined(FEATURE_THREADS)
103 errcode = mtx_lock(&iscache->lock);
104 if (errcode != thrd_success)
105 return -pte_bad_lock;
107 #endif /* defined(FEATURE_THREADS) */
112 static inline int pt_iscache_unlock(struct pt_image_section_cache *iscache)
115 return -pte_internal;
117 #if defined(FEATURE_THREADS)
121 errcode = mtx_unlock(&iscache->lock);
122 if (errcode != thrd_success)
123 return -pte_bad_lock;
125 #endif /* defined(FEATURE_THREADS) */
130 static inline int isid_from_index(uint16_t index)
135 static int pt_iscache_expand(struct pt_image_section_cache *iscache)
137 struct pt_iscache_entry *entries;
138 uint16_t capacity, target;
141 return -pte_internal;
143 capacity = iscache->capacity;
144 target = capacity + 8;
146 /* Check for overflows. */
147 if (target < capacity)
150 entries = realloc(iscache->entries, target * sizeof(*entries));
154 iscache->capacity = target;
155 iscache->entries = entries;
159 static int pt_iscache_find_locked(struct pt_image_section_cache *iscache,
160 const char *filename, uint64_t offset,
161 uint64_t size, uint64_t laddr)
165 if (!iscache || !filename)
166 return -pte_internal;
169 for (idx = 0; idx < end; ++idx) {
170 const struct pt_iscache_entry *entry;
171 const struct pt_section *section;
172 const char *sec_filename;
173 uint64_t sec_offset, sec_size;
175 entry = &iscache->entries[idx];
177 /* We do not zero-initialize the array - a NULL check is
180 section = entry->section;
181 sec_filename = pt_section_filename(section);
182 sec_offset = pt_section_offset(section);
183 sec_size = pt_section_size(section);
185 if (entry->laddr != laddr)
188 if (sec_offset != offset)
191 if (sec_size != size)
194 /* We should not have a section without a filename. */
196 return -pte_internal;
198 if (strcmp(sec_filename, filename) != 0)
201 return isid_from_index(idx);
207 static int pt_iscache_lru_free(struct pt_iscache_lru_entry *lru)
210 struct pt_iscache_lru_entry *trash;
216 errcode = pt_section_unmap(trash->section);
226 static int pt_iscache_lru_prune(struct pt_image_section_cache *iscache,
227 struct pt_iscache_lru_entry **tail)
229 struct pt_iscache_lru_entry *lru, **pnext;
230 uint64_t limit, used;
232 if (!iscache || !tail)
233 return -pte_internal;
235 limit = iscache->limit;
238 pnext = &iscache->lru;
239 for (lru = *pnext; lru; pnext = &lru->next, lru = *pnext) {
245 /* The cache got too big; prune it starting from @lru. */
246 iscache->used = used - lru->size;
253 /* We shouldn't prune the cache unnecessarily. */
254 return -pte_internal;
257 /* Add @section to the front of @iscache->lru.
259 * Returns a positive integer if we need to prune the cache.
260 * Returns zero if we don't need to prune the cache.
261 * Returns a negative pt_error_code otherwise.
263 static int pt_isache_lru_new(struct pt_image_section_cache *iscache,
264 struct pt_section *section)
266 struct pt_iscache_lru_entry *lru;
267 uint64_t memsize, used, total, limit;
271 return -pte_internal;
273 errcode = pt_section_memsize(section, &memsize);
277 /* Don't try to add the section if it is too big. We'd prune it again
278 * together with all other sections in our cache.
280 limit = iscache->limit;
284 errcode = pt_section_map_share(section);
288 lru = malloc(sizeof(*lru));
290 (void) pt_section_unmap(section);
294 lru->section = section;
297 lru->next = iscache->lru;
300 used = iscache->used;
301 total = used + memsize;
302 if (total < used || total < memsize)
303 return -pte_overflow;
305 iscache->used = total;
307 return (limit < total) ? 1 : 0;
310 /* Add or move @section to the front of @iscache->lru.
312 * Returns a positive integer if we need to prune the cache.
313 * Returns zero if we don't need to prune the cache.
314 * Returns a negative pt_error_code otherwise.
316 static int pt_iscache_lru_add(struct pt_image_section_cache *iscache,
317 struct pt_section *section)
319 struct pt_iscache_lru_entry *lru, **pnext;
322 return -pte_internal;
324 pnext = &iscache->lru;
325 for (lru = *pnext; lru; pnext = &lru->next, lru = *pnext) {
327 if (lru->section != section)
330 /* We found it in the cache. Move it to the front. */
332 lru->next = iscache->lru;
338 /* We didn't find it in the cache. Add it. */
339 return pt_isache_lru_new(iscache, section);
343 /* Remove @section from @iscache->lru.
345 * Returns zero on success, a negative pt_error_code otherwise.
347 static int pt_iscache_lru_remove(struct pt_image_section_cache *iscache,
348 const struct pt_section *section)
350 struct pt_iscache_lru_entry *lru, **pnext;
353 return -pte_internal;
355 pnext = &iscache->lru;
356 for (lru = *pnext; lru; pnext = &lru->next, lru = *pnext) {
358 if (lru->section != section)
361 /* We found it in the cache. Remove it. */
367 return pt_iscache_lru_free(lru);
371 /* Add or move @section to the front of @iscache->lru and update its size.
373 * Returns a positive integer if we need to prune the cache.
374 * Returns zero if we don't need to prune the cache.
375 * Returns a negative pt_error_code otherwise.
377 static int pt_iscache_lru_resize(struct pt_image_section_cache *iscache,
378 struct pt_section *section, uint64_t memsize)
380 struct pt_iscache_lru_entry *lru;
381 uint64_t oldsize, used;
385 return -pte_internal;
387 status = pt_iscache_lru_add(iscache, section);
394 return -pte_internal;
398 /* If @section is cached, it must be first.
400 * We may choose not to cache it, though, e.g. if it is too big.
402 if (lru->section != section) {
403 if (iscache->limit < memsize)
406 return -pte_internal;
412 /* If we need to prune anyway, we're done. */
416 used = iscache->used;
420 iscache->used = used;
422 return (iscache->limit < used) ? 1 : 0;
425 /* Clear @iscache->lru.
427 * Unlike other iscache_lru functions, the caller does not lock @iscache.
429 * Return zero on success, a negative pt_error_code otherwise.
431 static int pt_iscache_lru_clear(struct pt_image_section_cache *iscache)
433 struct pt_iscache_lru_entry *lru;
436 errcode = pt_iscache_lock(iscache);
442 iscache->used = 0ull;
444 errcode = pt_iscache_unlock(iscache);
448 return pt_iscache_lru_free(lru);
451 /* Search @iscache for a partial or exact match of @section loaded at @laddr and
452 * return the corresponding index or @iscache->size if no match is found.
454 * The caller must lock @iscache.
456 * Returns a non-zero index on success, a negative pt_error_code otherwise.
459 pt_iscache_find_section_locked(const struct pt_image_section_cache *iscache,
460 const char *filename, uint64_t offset,
461 uint64_t size, uint64_t laddr)
463 const struct pt_section *section;
467 if (!iscache || !filename)
468 return -pte_internal;
471 match = end = iscache->size;
472 for (idx = 0; idx < end; ++idx) {
473 const struct pt_iscache_entry *entry;
474 const struct pt_section *sec;
476 entry = &iscache->entries[idx];
478 /* We do not zero-initialize the array - a NULL check is
481 sec = entry->section;
483 /* Avoid redundant match checks. */
484 if (sec != section) {
485 const char *sec_filename;
487 /* We don't have duplicates. Skip the check. */
491 if (offset != pt_section_offset(sec))
494 if (size != pt_section_size(sec))
497 sec_filename = pt_section_filename(sec);
499 return -pte_internal;
501 if (strcmp(filename, sec_filename) != 0)
504 /* Use the cached section instead. */
509 /* If we didn't continue, @section == @sec and we have a match.
511 * If we also find a matching load address, we're done.
513 if (laddr == entry->laddr)
520 int pt_iscache_add(struct pt_image_section_cache *iscache,
521 struct pt_section *section, uint64_t laddr)
523 const char *filename;
524 uint64_t offset, size;
528 if (!iscache || !section)
529 return -pte_internal;
531 /* We must have a filename for @section. */
532 filename = pt_section_filename(section);
534 return -pte_internal;
536 offset = pt_section_offset(section);
537 size = pt_section_size(section);
539 /* Adding a section is slightly complicated by a potential deadlock
542 * - in order to add a section, we need to attach to it, which
543 * requires taking the section's attach lock.
545 * - if we are already attached to it, we may receive on-map
546 * notifications, which will be sent while holding the attach lock
547 * and require taking the iscache lock.
549 * Hence we can't attach to a section while holding the iscache lock.
552 * We therefore attach to @section first and then lock @iscache.
554 * This opens a small window where an existing @section may be removed
555 * from @iscache and replaced by a new matching section. We would want
556 * to share that new section rather than adding a duplicate @section.
558 * After locking @iscache, we therefore check for existing matching
559 * sections and, if one is found, update @section. This involves
560 * detaching from @section and attaching to the existing section.
562 * And for this, we will have to temporarily unlock @iscache again.
564 errcode = pt_section_get(section);
568 errcode = pt_section_attach(section, iscache);
572 errcode = pt_iscache_lock(iscache);
576 /* We may need to repeat this step.
578 * Typically we don't and this takes only a single iteration. One
579 * scenario where we do repeat this is when adding a section with an
580 * out-of-bounds size.
582 * We will not find a matching section in pt_iscache_add_file() so we
583 * create a new section. This will have its size reduced to match the
586 * For this reduced size, we may now find an existing section, and we
587 * will take another trip in the below loop.
590 const struct pt_iscache_entry *entry;
591 struct pt_section *sec;
594 /* Find an existing section matching @section that we'd share
595 * rather than adding @section.
597 match = pt_iscache_find_section_locked(iscache, filename,
598 offset, size, laddr);
601 goto out_unlock_detach;
604 /* We're done if we have not found a matching section. */
605 if (iscache->size <= match)
608 entry = &iscache->entries[match];
610 /* We're also done if we found the same section again.
612 * We further check for a perfect match. In that case, we don't
613 * need to insert anything, at all.
615 sec = entry->section;
616 if (sec == section) {
617 if (entry->laddr == laddr) {
618 errcode = pt_iscache_unlock(iscache);
622 errcode = pt_section_detach(section, iscache);
626 errcode = pt_section_put(section);
630 return isid_from_index((uint16_t) match);
636 /* We update @section to share the existing @sec.
638 * This requires detaching from @section, which, in turn,
639 * requires temporarily unlocking @iscache.
641 * We further need to remove @section from @iscache->lru.
643 errcode = pt_section_get(sec);
645 goto out_unlock_detach;
647 errcode = pt_iscache_unlock(iscache);
649 (void) pt_section_put(sec);
653 errcode = pt_section_detach(section, iscache);
655 (void) pt_section_put(sec);
659 errcode = pt_section_attach(sec, iscache);
661 (void) pt_section_put(sec);
665 errcode = pt_iscache_lock(iscache);
667 (void) pt_section_put(section);
668 /* Complete the swap for cleanup. */
673 /* We may have received on-map notifications for @section and we
674 * may have added @section to @iscache->lru.
676 * Since we're still holding a reference to it, no harm has been
677 * done. But we need to remove it before we drop our reference.
679 errcode = pt_iscache_lru_remove(iscache, section);
681 (void) pt_section_put(section);
682 /* Complete the swap for cleanup. */
684 goto out_unlock_detach;
687 /* Drop the reference to @section. */
688 errcode = pt_section_put(section);
690 /* Complete the swap for cleanup. */
692 goto out_unlock_detach;
697 * We will try again in the next iteration.
702 /* Expand the cache, if necessary. */
703 if (iscache->capacity <= iscache->size) {
704 /* We must never exceed the capacity. */
705 if (iscache->capacity < iscache->size) {
706 errcode = -pte_internal;
707 goto out_unlock_detach;
710 errcode = pt_iscache_expand(iscache);
712 goto out_unlock_detach;
714 /* Make sure it is big enough, now. */
715 if (iscache->capacity <= iscache->size) {
716 errcode = -pte_internal;
717 goto out_unlock_detach;
721 /* Insert a new entry for @section at @laddr.
723 * This hands both attach and reference over to @iscache. We will
724 * detach and drop the reference again when the entry is removed.
726 idx = iscache->size++;
728 iscache->entries[idx].section = section;
729 iscache->entries[idx].laddr = laddr;
731 errcode = pt_iscache_unlock(iscache);
735 return isid_from_index(idx);
738 (void) pt_iscache_unlock(iscache);
741 (void) pt_section_detach(section, iscache);
744 (void) pt_iscache_lru_clear(iscache);
747 (void) pt_section_put(section);
752 int pt_iscache_find(struct pt_image_section_cache *iscache,
753 const char *filename, uint64_t offset, uint64_t size,
758 errcode = pt_iscache_lock(iscache);
762 isid = pt_iscache_find_locked(iscache, filename, offset, size, laddr);
764 errcode = pt_iscache_unlock(iscache);
771 int pt_iscache_lookup(struct pt_image_section_cache *iscache,
772 struct pt_section **section, uint64_t *laddr, int isid)
777 if (!iscache || !section || !laddr)
778 return -pte_internal;
781 return -pte_bad_image;
784 if (isid > UINT16_MAX)
785 return -pte_internal;
787 index = (uint16_t) isid;
789 errcode = pt_iscache_lock(iscache);
793 if (iscache->size <= index)
794 status = -pte_bad_image;
796 const struct pt_iscache_entry *entry;
798 entry = &iscache->entries[index];
799 *section = entry->section;
800 *laddr = entry->laddr;
802 status = pt_section_get(*section);
805 errcode = pt_iscache_unlock(iscache);
812 int pt_iscache_clear(struct pt_image_section_cache *iscache)
814 struct pt_iscache_lru_entry *lru;
815 struct pt_iscache_entry *entries;
820 return -pte_internal;
822 errcode = pt_iscache_lock(iscache);
826 entries = iscache->entries;
830 iscache->entries = NULL;
831 iscache->capacity = 0;
834 iscache->used = 0ull;
836 errcode = pt_iscache_unlock(iscache);
840 errcode = pt_iscache_lru_free(lru);
844 for (idx = 0; idx < end; ++idx) {
845 struct pt_section *section;
847 section = entries[idx].section;
849 /* We do not zero-initialize the array - a NULL check is
852 errcode = pt_section_detach(section, iscache);
856 errcode = pt_section_put(section);
865 struct pt_image_section_cache *pt_iscache_alloc(const char *name)
867 struct pt_image_section_cache *iscache;
869 iscache = malloc(sizeof(*iscache));
871 pt_iscache_init(iscache, name);
876 void pt_iscache_free(struct pt_image_section_cache *iscache)
881 pt_iscache_fini(iscache);
885 int pt_iscache_set_limit(struct pt_image_section_cache *iscache, uint64_t limit)
887 struct pt_iscache_lru_entry *tail;
896 errcode = pt_iscache_lock(iscache);
900 iscache->limit = limit;
901 if (limit < iscache->used)
902 status = pt_iscache_lru_prune(iscache, &tail);
904 errcode = pt_iscache_unlock(iscache);
906 if (errcode < 0 || status < 0)
907 return (status < 0) ? status : errcode;
909 return pt_iscache_lru_free(tail);
912 const char *pt_iscache_name(const struct pt_image_section_cache *iscache)
917 return iscache->name;
920 int pt_iscache_add_file(struct pt_image_section_cache *iscache,
921 const char *filename, uint64_t offset, uint64_t size,
924 struct pt_section *section;
925 int errcode, match, isid;
927 if (!iscache || !filename)
930 errcode = pt_iscache_lock(iscache);
934 match = pt_iscache_find_section_locked(iscache, filename, offset,
937 (void) pt_iscache_unlock(iscache);
941 /* If we found a perfect match, we will share the existing entry.
943 * If we found a section, we need to grab a reference before we unlock.
945 * If we didn't find a matching section, we create a new section, which
946 * implicitly gives us a reference to it.
948 if (match < iscache->size) {
949 const struct pt_iscache_entry *entry;
951 entry = &iscache->entries[match];
952 if (entry->laddr == vaddr) {
953 errcode = pt_iscache_unlock(iscache);
957 return isid_from_index((uint16_t) match);
960 section = entry->section;
962 errcode = pt_section_get(section);
964 (void) pt_iscache_unlock(iscache);
968 errcode = pt_iscache_unlock(iscache);
970 (void) pt_section_put(section);
974 errcode = pt_iscache_unlock(iscache);
978 section = pt_mk_section(filename, offset, size);
983 /* We unlocked @iscache and hold a reference to @section. */
984 isid = pt_iscache_add(iscache, section, vaddr);
986 /* We grab a reference when we add the section. Drop the one we
989 errcode = pt_section_put(section);
997 int pt_iscache_read(struct pt_image_section_cache *iscache, uint8_t *buffer,
998 uint64_t size, int isid, uint64_t vaddr)
1000 struct pt_section *section;
1002 int errcode, status;
1004 if (!iscache || !buffer || !size)
1005 return -pte_invalid;
1007 errcode = pt_iscache_lookup(iscache, §ion, &laddr, isid);
1011 if (vaddr < laddr) {
1012 (void) pt_section_put(section);
1018 errcode = pt_section_map(section);
1020 (void) pt_section_put(section);
1024 /* We truncate the read if it gets too big. The user is expected to
1025 * issue further reads for the remaining part.
1027 if (UINT16_MAX < size)
1030 status = pt_section_read(section, buffer, (uint16_t) size, vaddr);
1032 errcode = pt_section_unmap(section);
1034 (void) pt_section_put(section);
1038 errcode = pt_section_put(section);
1045 int pt_iscache_notify_map(struct pt_image_section_cache *iscache,
1046 struct pt_section *section)
1048 struct pt_iscache_lru_entry *tail;
1049 int errcode, status;
1053 errcode = pt_iscache_lock(iscache);
1057 status = pt_iscache_lru_add(iscache, section);
1059 status = pt_iscache_lru_prune(iscache, &tail);
1061 errcode = pt_iscache_unlock(iscache);
1063 if (errcode < 0 || status < 0)
1064 return (status < 0) ? status : errcode;
1066 return pt_iscache_lru_free(tail);
1069 int pt_iscache_notify_resize(struct pt_image_section_cache *iscache,
1070 struct pt_section *section, uint64_t memsize)
1072 struct pt_iscache_lru_entry *tail;
1073 int errcode, status;
1077 errcode = pt_iscache_lock(iscache);
1081 status = pt_iscache_lru_resize(iscache, section, memsize);
1083 status = pt_iscache_lru_prune(iscache, &tail);
1085 errcode = pt_iscache_unlock(iscache);
1087 if (errcode < 0 || status < 0)
1088 return (status < 0) ? status : errcode;
1090 return pt_iscache_lru_free(tail);