2 * Copyright (c) 2014-2019, Intel Corporation
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
7 * * Redistributions of source code must retain the above copyright notice,
8 * this list of conditions and the following disclaimer.
9 * * Redistributions in binary form must reproduce the above copyright notice,
10 * this list of conditions and the following disclaimer in the documentation
11 * and/or other materials provided with the distribution.
12 * * Neither the name of Intel Corporation nor the names of its contributors
13 * may be used to endorse or promote products derived from this software
14 * without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
29 #include "pt_query_decoder.h"
31 #include "pt_decoder_function.h"
32 #include "pt_packet.h"
33 #include "pt_packet_decoder.h"
34 #include "pt_config.h"
35 #include "pt_opcodes.h"
36 #include "pt_compiler.h"
46 /* Find a FUP in a PSB+ header.
48 * The packet @decoder must be synchronized onto the trace stream at the
49 * beginning or somewhere inside a PSB+ header.
51 * It uses @packet to hold trace packets during its search. If the search is
52 * successful, @packet will contain the first (and hopefully only) FUP packet in
53 * this PSB+. Otherwise, @packet may contain anything.
55 * Returns one if a FUP packet is found (@packet will contain it).
56 * Returns zero if no FUP packet is found (@packet is undefined).
57 * Returns a negative error code otherwise.
59 static int pt_qry_find_header_fup(struct pt_packet *packet,
60 struct pt_packet_decoder *decoder)
62 if (!packet || !decoder)
68 errcode = pt_pkt_next(decoder, packet, sizeof(*packet));
72 switch (packet->type) {
74 /* Ignore the packet. */
78 /* There's no FUP in here. */
88 int pt_qry_decoder_init(struct pt_query_decoder *decoder,
89 const struct pt_config *config)
96 memset(decoder, 0, sizeof(*decoder));
98 errcode = pt_config_from_user(&decoder->config, config);
102 pt_last_ip_init(&decoder->ip);
103 pt_tnt_cache_init(&decoder->tnt);
104 pt_time_init(&decoder->time);
105 pt_time_init(&decoder->last_time);
106 pt_tcal_init(&decoder->tcal);
107 pt_evq_init(&decoder->evq);
112 struct pt_query_decoder *pt_qry_alloc_decoder(const struct pt_config *config)
114 struct pt_query_decoder *decoder;
117 decoder = malloc(sizeof(*decoder));
121 errcode = pt_qry_decoder_init(decoder, config);
130 void pt_qry_decoder_fini(struct pt_query_decoder *decoder)
137 void pt_qry_free_decoder(struct pt_query_decoder *decoder)
139 pt_qry_decoder_fini(decoder);
143 static void pt_qry_reset(struct pt_query_decoder *decoder)
148 decoder->enabled = 0;
149 decoder->consume_packet = 0;
150 decoder->event = NULL;
152 pt_last_ip_init(&decoder->ip);
153 pt_tnt_cache_init(&decoder->tnt);
154 pt_time_init(&decoder->time);
155 pt_time_init(&decoder->last_time);
156 pt_tcal_init(&decoder->tcal);
157 pt_evq_init(&decoder->evq);
160 static int pt_qry_will_event(const struct pt_query_decoder *decoder)
162 const struct pt_decoder_function *dfun;
165 return -pte_internal;
167 dfun = decoder->next;
171 if (dfun->flags & pdff_event)
174 if (dfun->flags & pdff_psbend)
175 return pt_evq_pending(&decoder->evq, evb_psbend);
177 if (dfun->flags & pdff_tip)
178 return pt_evq_pending(&decoder->evq, evb_tip);
180 if (dfun->flags & pdff_fup)
181 return pt_evq_pending(&decoder->evq, evb_fup);
186 static int pt_qry_will_eos(const struct pt_query_decoder *decoder)
188 const struct pt_decoder_function *dfun;
192 return -pte_internal;
194 dfun = decoder->next;
198 /* The decoding function may be NULL for two reasons:
200 * - we ran out of trace
201 * - we ran into a fetch error such as -pte_bad_opc
205 errcode = pt_df_fetch(&dfun, decoder->pos, &decoder->config);
206 return errcode == -pte_eos;
209 static int pt_qry_status_flags(const struct pt_query_decoder *decoder)
214 return -pte_internal;
216 /* Some packets force out TNT and any deferred TIPs in order to
217 * establish the correct context for the subsequent packet.
219 * Users are expected to first navigate to the correct code region
220 * by using up the cached TNT bits before interpreting any subsequent
223 * We do need to read ahead in order to signal upcoming events. We may
224 * have already decoded those packets while our user has not navigated
225 * to the correct code region, yet.
227 * In order to have our user use up the cached TNT bits first, we do
228 * not indicate the next event until the TNT cache is empty.
230 if (pt_tnt_cache_is_empty(&decoder->tnt)) {
231 if (pt_qry_will_event(decoder))
232 flags |= pts_event_pending;
234 if (pt_qry_will_eos(decoder))
241 static int pt_qry_provoke_fetch_error(const struct pt_query_decoder *decoder)
243 const struct pt_decoder_function *dfun;
247 return -pte_internal;
249 /* Repeat the decoder fetch to reproduce the error. */
250 errcode = pt_df_fetch(&dfun, decoder->pos, &decoder->config);
254 /* We must get some error or something's wrong. */
255 return -pte_internal;
258 static int pt_qry_read_ahead(struct pt_query_decoder *decoder)
261 return -pte_internal;
264 const struct pt_decoder_function *dfun;
267 errcode = pt_df_fetch(&decoder->next, decoder->pos,
272 dfun = decoder->next;
274 return -pte_internal;
277 return -pte_internal;
279 /* We're done once we reach
281 * - a branching related packet. */
282 if (dfun->flags & (pdff_tip | pdff_tnt))
285 /* - an event related packet. */
286 if (pt_qry_will_event(decoder))
289 /* Decode status update packets. */
290 errcode = dfun->decode(decoder);
292 /* Ignore truncated status packets at the end.
294 * Move beyond the packet and clear @decoder->next to
295 * indicate that we were not able to fetch the next
298 if (errcode == -pte_eos) {
299 decoder->pos = decoder->config.end;
300 decoder->next = NULL;
308 static int pt_qry_start(struct pt_query_decoder *decoder, const uint8_t *pos,
311 const struct pt_decoder_function *dfun;
314 if (!decoder || !pos)
317 pt_qry_reset(decoder);
322 errcode = pt_df_fetch(&decoder->next, pos, &decoder->config);
326 dfun = decoder->next;
328 /* We do need to start at a PSB in order to initialize the state. */
329 if (dfun != &pt_decode_psb)
332 /* Decode the PSB+ header to initialize the state. */
333 errcode = dfun->decode(decoder);
337 /* Fill in the start address.
338 * We do this before reading ahead since the latter may read an
339 * adjacent PSB+ that might change the decoder's IP, causing us
343 status = pt_last_ip_query(addr, &decoder->ip);
345 /* Make sure we don't clobber it later on. */
350 /* Read ahead until the first query-relevant packet. */
351 errcode = pt_qry_read_ahead(decoder);
355 /* We return the current decoder status. */
356 status = pt_qry_status_flags(decoder);
360 errcode = pt_last_ip_query(addr, &decoder->ip);
362 /* Indicate the missing IP in the status. */
364 status |= pts_ip_suppressed;
370 static int pt_qry_apply_tsc(struct pt_time *time, struct pt_time_cal *tcal,
371 const struct pt_packet_tsc *packet,
372 const struct pt_config *config)
376 /* We ignore configuration errors. They will result in imprecise
377 * calibration which will result in imprecise cycle-accurate timing.
379 * We currently do not track them.
381 errcode = pt_tcal_update_tsc(tcal, packet, config);
382 if (errcode < 0 && (errcode != -pte_bad_config))
385 /* We ignore configuration errors. They will result in imprecise
386 * timing and are tracked as packet losses in struct pt_time.
388 errcode = pt_time_update_tsc(time, packet, config);
389 if (errcode < 0 && (errcode != -pte_bad_config))
395 static int pt_qry_apply_header_tsc(struct pt_time *time,
396 struct pt_time_cal *tcal,
397 const struct pt_packet_tsc *packet,
398 const struct pt_config *config)
402 /* We ignore configuration errors. They will result in imprecise
403 * calibration which will result in imprecise cycle-accurate timing.
405 * We currently do not track them.
407 errcode = pt_tcal_header_tsc(tcal, packet, config);
408 if (errcode < 0 && (errcode != -pte_bad_config))
411 /* We ignore configuration errors. They will result in imprecise
412 * timing and are tracked as packet losses in struct pt_time.
414 errcode = pt_time_update_tsc(time, packet, config);
415 if (errcode < 0 && (errcode != -pte_bad_config))
421 static int pt_qry_apply_cbr(struct pt_time *time, struct pt_time_cal *tcal,
422 const struct pt_packet_cbr *packet,
423 const struct pt_config *config)
427 /* We ignore configuration errors. They will result in imprecise
428 * calibration which will result in imprecise cycle-accurate timing.
430 * We currently do not track them.
432 errcode = pt_tcal_update_cbr(tcal, packet, config);
433 if (errcode < 0 && (errcode != -pte_bad_config))
436 /* We ignore configuration errors. They will result in imprecise
437 * timing and are tracked as packet losses in struct pt_time.
439 errcode = pt_time_update_cbr(time, packet, config);
440 if (errcode < 0 && (errcode != -pte_bad_config))
446 static int pt_qry_apply_header_cbr(struct pt_time *time,
447 struct pt_time_cal *tcal,
448 const struct pt_packet_cbr *packet,
449 const struct pt_config *config)
453 /* We ignore configuration errors. They will result in imprecise
454 * calibration which will result in imprecise cycle-accurate timing.
456 * We currently do not track them.
458 errcode = pt_tcal_header_cbr(tcal, packet, config);
459 if (errcode < 0 && (errcode != -pte_bad_config))
462 /* We ignore configuration errors. They will result in imprecise
463 * timing and are tracked as packet losses in struct pt_time.
465 errcode = pt_time_update_cbr(time, packet, config);
466 if (errcode < 0 && (errcode != -pte_bad_config))
472 static int pt_qry_apply_tma(struct pt_time *time, struct pt_time_cal *tcal,
473 const struct pt_packet_tma *packet,
474 const struct pt_config *config)
478 /* We ignore configuration errors. They will result in imprecise
479 * calibration which will result in imprecise cycle-accurate timing.
481 * We currently do not track them.
483 errcode = pt_tcal_update_tma(tcal, packet, config);
484 if (errcode < 0 && (errcode != -pte_bad_config))
487 /* We ignore configuration errors. They will result in imprecise
488 * timing and are tracked as packet losses in struct pt_time.
490 errcode = pt_time_update_tma(time, packet, config);
491 if (errcode < 0 && (errcode != -pte_bad_config))
497 static int pt_qry_apply_mtc(struct pt_time *time, struct pt_time_cal *tcal,
498 const struct pt_packet_mtc *packet,
499 const struct pt_config *config)
503 /* We ignore configuration errors. They will result in imprecise
504 * calibration which will result in imprecise cycle-accurate timing.
506 * We currently do not track them.
508 errcode = pt_tcal_update_mtc(tcal, packet, config);
509 if (errcode < 0 && (errcode != -pte_bad_config))
512 /* We ignore configuration errors. They will result in imprecise
513 * timing and are tracked as packet losses in struct pt_time.
515 errcode = pt_time_update_mtc(time, packet, config);
516 if (errcode < 0 && (errcode != -pte_bad_config))
522 static int pt_qry_apply_cyc(struct pt_time *time, struct pt_time_cal *tcal,
523 const struct pt_packet_cyc *packet,
524 const struct pt_config *config)
529 /* We ignore configuration errors. They will result in imprecise
530 * calibration which will result in imprecise cycle-accurate timing.
532 * We currently do not track them.
534 errcode = pt_tcal_update_cyc(tcal, packet, config);
535 if (errcode < 0 && (errcode != -pte_bad_config))
538 /* We need the FastCounter to Cycles ratio below. Fall back to
539 * an invalid ratio of 0 if calibration has not kicked in, yet.
541 * This will be tracked as packet loss in struct pt_time.
543 errcode = pt_tcal_fcr(&fcr, tcal);
545 if (errcode == -pte_no_time)
551 /* We ignore configuration errors. They will result in imprecise
552 * timing and are tracked as packet losses in struct pt_time.
554 errcode = pt_time_update_cyc(time, packet, config, fcr);
555 if (errcode < 0 && (errcode != -pte_bad_config))
561 int pt_qry_sync_forward(struct pt_query_decoder *decoder, uint64_t *ip)
563 const uint8_t *pos, *sync, *begin;
570 begin = decoder->config.begin;
571 sync = decoder->sync;
580 return -pte_internal;
582 /* Start a bit earlier so we find PSB that have been partially consumed
583 * by a preceding packet.
586 if (ptps_psb <= space)
587 space = ptps_psb - 1;
591 errcode = pt_sync_forward(&sync, pos, &decoder->config);
595 return pt_qry_start(decoder, sync, ip);
598 int pt_qry_sync_backward(struct pt_query_decoder *decoder, uint64_t *ip)
600 const uint8_t *start, *sync;
606 start = decoder->pos;
608 start = decoder->config.end;
612 errcode = pt_sync_backward(&sync, sync, &decoder->config);
616 errcode = pt_qry_start(decoder, sync, ip);
618 /* Ignore incomplete trace segments at the end. We need
619 * a full PSB+ to start decoding.
621 if (errcode == -pte_eos)
627 /* An empty trace segment in the middle of the trace might bring
628 * us back to where we started.
630 * We're done when we reached a new position.
632 if (decoder->pos != start)
639 int pt_qry_sync_set(struct pt_query_decoder *decoder, uint64_t *ip,
642 const uint8_t *sync, *pos;
648 pos = decoder->config.begin + offset;
650 errcode = pt_sync_set(&sync, pos, &decoder->config);
654 return pt_qry_start(decoder, sync, ip);
657 int pt_qry_get_offset(const struct pt_query_decoder *decoder, uint64_t *offset)
659 const uint8_t *begin, *pos;
661 if (!decoder || !offset)
664 begin = decoder->config.begin;
670 *offset = (uint64_t) (int64_t) (pos - begin);
674 int pt_qry_get_sync_offset(const struct pt_query_decoder *decoder,
677 const uint8_t *begin, *sync;
679 if (!decoder || !offset)
682 begin = decoder->config.begin;
683 sync = decoder->sync;
688 *offset = (uint64_t) (int64_t) (sync - begin);
692 const struct pt_config *
693 pt_qry_get_config(const struct pt_query_decoder *decoder)
698 return &decoder->config;
701 static int pt_qry_cache_tnt(struct pt_query_decoder *decoder)
706 return -pte_internal;
709 const struct pt_decoder_function *dfun;
711 dfun = decoder->next;
713 return pt_qry_provoke_fetch_error(decoder);
716 return -pte_internal;
718 /* There's an event ahead of us. */
719 if (pt_qry_will_event(decoder))
720 return -pte_bad_query;
722 /* Diagnose a TIP that has not been part of an event. */
723 if (dfun->flags & pdff_tip)
724 return -pte_bad_query;
726 /* Clear the decoder's current event so we know when we
727 * accidentally skipped an event.
729 decoder->event = NULL;
731 /* Apply the decoder function. */
732 errcode = dfun->decode(decoder);
736 /* If we skipped an event, we're in trouble. */
738 return -pte_event_ignored;
740 /* We're done when we decoded a TNT packet. */
741 if (dfun->flags & pdff_tnt)
744 /* Read ahead until the next query-relevant packet. */
745 errcode = pt_qry_read_ahead(decoder);
750 /* Preserve the time at the TNT packet. */
751 decoder->last_time = decoder->time;
753 /* Read ahead until the next query-relevant packet. */
754 errcode = pt_qry_read_ahead(decoder);
755 if ((errcode < 0) && (errcode != -pte_eos))
761 int pt_qry_cond_branch(struct pt_query_decoder *decoder, int *taken)
765 if (!decoder || !taken)
768 /* We cache the latest tnt packet in the decoder. Let's re-fill the
769 * cache in case it is empty.
771 if (pt_tnt_cache_is_empty(&decoder->tnt)) {
772 errcode = pt_qry_cache_tnt(decoder);
777 query = pt_tnt_cache_query(&decoder->tnt);
783 return pt_qry_status_flags(decoder);
786 int pt_qry_indirect_branch(struct pt_query_decoder *decoder, uint64_t *addr)
790 if (!decoder || !addr)
795 const struct pt_decoder_function *dfun;
797 dfun = decoder->next;
799 return pt_qry_provoke_fetch_error(decoder);
802 return -pte_internal;
804 /* There's an event ahead of us. */
805 if (pt_qry_will_event(decoder))
806 return -pte_bad_query;
808 /* Clear the decoder's current event so we know when we
809 * accidentally skipped an event.
811 decoder->event = NULL;
813 /* We may see a single TNT packet if the current tnt is empty.
815 * If we see a TNT while the current tnt is not empty, it means
816 * that our user got out of sync. Let's report no data and hope
817 * that our user is able to re-sync.
819 if ((dfun->flags & pdff_tnt) &&
820 !pt_tnt_cache_is_empty(&decoder->tnt))
821 return -pte_bad_query;
823 /* Apply the decoder function. */
824 errcode = dfun->decode(decoder);
828 /* If we skipped an event, we're in trouble. */
830 return -pte_event_ignored;
832 /* We're done when we found a TIP packet that isn't part of an
835 if (dfun->flags & pdff_tip) {
838 /* We already decoded it, so the branch destination
839 * is stored in the decoder's last ip.
841 errcode = pt_last_ip_query(&ip, &decoder->ip);
843 flags |= pts_ip_suppressed;
850 /* Read ahead until the next query-relevant packet. */
851 errcode = pt_qry_read_ahead(decoder);
856 /* Preserve the time at the TIP packet. */
857 decoder->last_time = decoder->time;
859 /* Read ahead until the next query-relevant packet. */
860 errcode = pt_qry_read_ahead(decoder);
861 if ((errcode < 0) && (errcode != -pte_eos))
864 flags |= pt_qry_status_flags(decoder);
869 int pt_qry_event(struct pt_query_decoder *decoder, struct pt_event *event,
874 if (!decoder || !event)
877 if (size < offsetof(struct pt_event, variant))
880 /* We do not allow querying for events while there are still TNT
883 if (!pt_tnt_cache_is_empty(&decoder->tnt))
884 return -pte_bad_query;
886 /* Do not provide more than we actually have. */
887 if (sizeof(*event) < size)
888 size = sizeof(*event);
892 const struct pt_decoder_function *dfun;
894 dfun = decoder->next;
896 return pt_qry_provoke_fetch_error(decoder);
899 return -pte_internal;
901 /* We must not see a TIP or TNT packet unless it belongs
904 * If we see one, it means that our user got out of sync.
905 * Let's report no data and hope that our user is able
908 if ((dfun->flags & (pdff_tip | pdff_tnt)) &&
909 !pt_qry_will_event(decoder))
910 return -pte_bad_query;
912 /* Clear the decoder's current event so we know when decoding
913 * produces a new event.
915 decoder->event = NULL;
917 /* Apply any other decoder function. */
918 errcode = dfun->decode(decoder);
922 /* Check if there has been an event.
924 * Some packets may result in events in some but not in all
927 if (decoder->event) {
928 (void) memcpy(event, decoder->event, size);
932 /* Read ahead until the next query-relevant packet. */
933 errcode = pt_qry_read_ahead(decoder);
938 /* Preserve the time at the event. */
939 decoder->last_time = decoder->time;
941 /* Read ahead until the next query-relevant packet. */
942 errcode = pt_qry_read_ahead(decoder);
943 if ((errcode < 0) && (errcode != -pte_eos))
946 flags |= pt_qry_status_flags(decoder);
951 int pt_qry_time(struct pt_query_decoder *decoder, uint64_t *time,
952 uint32_t *lost_mtc, uint32_t *lost_cyc)
954 if (!decoder || !time)
957 return pt_time_query_tsc(time, lost_mtc, lost_cyc, &decoder->last_time);
960 int pt_qry_core_bus_ratio(struct pt_query_decoder *decoder, uint32_t *cbr)
962 if (!decoder || !cbr)
965 return pt_time_query_cbr(cbr, &decoder->last_time);
968 static int pt_qry_event_time(struct pt_event *event,
969 const struct pt_query_decoder *decoder)
973 if (!event || !decoder)
974 return -pte_internal;
976 errcode = pt_time_query_tsc(&event->tsc, &event->lost_mtc,
977 &event->lost_cyc, &decoder->time);
979 if (errcode != -pte_no_time)
987 int pt_qry_decode_unknown(struct pt_query_decoder *decoder)
989 struct pt_packet packet;
993 return -pte_internal;
995 size = pt_pkt_read_unknown(&packet, decoder->pos, &decoder->config);
999 decoder->pos += size;
1003 int pt_qry_decode_pad(struct pt_query_decoder *decoder)
1006 return -pte_internal;
1008 decoder->pos += ptps_pad;
1013 static int pt_qry_read_psb_header(struct pt_query_decoder *decoder)
1016 return -pte_internal;
1018 pt_last_ip_init(&decoder->ip);
1021 const struct pt_decoder_function *dfun;
1024 errcode = pt_df_fetch(&decoder->next, decoder->pos,
1029 dfun = decoder->next;
1031 return -pte_internal;
1033 /* We're done once we reach an psbend packet. */
1034 if (dfun->flags & pdff_psbend)
1038 return -pte_bad_context;
1040 errcode = dfun->header(decoder);
1046 int pt_qry_decode_psb(struct pt_query_decoder *decoder)
1052 return -pte_internal;
1056 size = pt_pkt_read_psb(pos, &decoder->config);
1060 errcode = pt_tcal_update_psb(&decoder->tcal, &decoder->config);
1064 decoder->pos += size;
1066 errcode = pt_qry_read_psb_header(decoder);
1068 /* Move back to the PSB so we have a chance to recover and
1069 * continue decoding.
1073 /* Clear any PSB+ events that have already been queued. */
1074 (void) pt_evq_clear(&decoder->evq, evb_psbend);
1076 /* Reset the decoder's decode function. */
1077 decoder->next = &pt_decode_psb;
1082 /* The next packet following the PSB header will be of type PSBEND.
1084 * Decoding this packet will publish the PSB events what have been
1085 * accumulated while reading the PSB header.
1090 static int pt_qry_event_ip(uint64_t *ip, struct pt_event *event,
1091 const struct pt_query_decoder *decoder)
1096 return -pte_internal;
1098 errcode = pt_last_ip_query(ip, &decoder->ip);
1100 switch (pt_errcode(errcode)) {
1102 case pte_ip_suppressed:
1103 event->ip_suppressed = 1;
1114 /* Decode a generic IP packet.
1116 * Returns the number of bytes read, on success.
1117 * Returns -pte_eos if the ip does not fit into the buffer.
1118 * Returns -pte_bad_packet if the ip compression is not known.
1120 static int pt_qry_decode_ip(struct pt_query_decoder *decoder)
1122 struct pt_packet_ip packet;
1126 return -pte_internal;
1128 size = pt_pkt_read_ip(&packet, decoder->pos, &decoder->config);
1132 errcode = pt_last_ip_update_ip(&decoder->ip, &packet, &decoder->config);
1136 /* We do not update the decoder's position, yet. */
1141 static int pt_qry_consume_tip(struct pt_query_decoder *decoder, int size)
1144 return -pte_internal;
1146 decoder->pos += size;
1150 static int pt_qry_event_tip(struct pt_event *ev,
1151 struct pt_query_decoder *decoder)
1153 if (!ev || !decoder)
1154 return -pte_internal;
1157 case ptev_async_branch:
1158 decoder->consume_packet = 1;
1160 return pt_qry_event_ip(&ev->variant.async_branch.to, ev,
1163 case ptev_async_paging:
1164 return pt_qry_event_ip(&ev->variant.async_paging.ip, ev,
1167 case ptev_async_vmcs:
1168 return pt_qry_event_ip(&ev->variant.async_vmcs.ip, ev,
1171 case ptev_exec_mode:
1172 return pt_qry_event_ip(&ev->variant.exec_mode.ip, ev,
1179 return -pte_bad_context;
1182 int pt_qry_decode_tip(struct pt_query_decoder *decoder)
1184 struct pt_event *ev;
1188 return -pte_internal;
1190 size = pt_qry_decode_ip(decoder);
1194 /* Process any pending events binding to TIP. */
1195 ev = pt_evq_dequeue(&decoder->evq, evb_tip);
1197 errcode = pt_qry_event_tip(ev, decoder);
1201 /* Publish the event. */
1202 decoder->event = ev;
1204 /* Process further pending events. */
1205 if (pt_evq_pending(&decoder->evq, evb_tip))
1208 /* No further events.
1210 * If none of the events consumed the packet, we're done.
1212 if (!decoder->consume_packet)
1215 /* We're done with this packet. Clear the flag we set previously
1218 decoder->consume_packet = 0;
1221 return pt_qry_consume_tip(decoder, size);
1224 int pt_qry_decode_tnt_8(struct pt_query_decoder *decoder)
1226 struct pt_packet_tnt packet;
1230 return -pte_internal;
1232 size = pt_pkt_read_tnt_8(&packet, decoder->pos, &decoder->config);
1236 errcode = pt_tnt_cache_update_tnt(&decoder->tnt, &packet,
1241 decoder->pos += size;
1245 int pt_qry_decode_tnt_64(struct pt_query_decoder *decoder)
1247 struct pt_packet_tnt packet;
1251 return -pte_internal;
1253 size = pt_pkt_read_tnt_64(&packet, decoder->pos, &decoder->config);
1257 errcode = pt_tnt_cache_update_tnt(&decoder->tnt, &packet,
1262 decoder->pos += size;
1266 static int pt_qry_consume_tip_pge(struct pt_query_decoder *decoder, int size)
1269 return -pte_internal;
1271 decoder->pos += size;
1275 static int pt_qry_event_tip_pge(struct pt_event *ev,
1276 const struct pt_query_decoder *decoder)
1279 return -pte_internal;
1282 case ptev_exec_mode:
1283 return pt_qry_event_ip(&ev->variant.exec_mode.ip, ev, decoder);
1289 return -pte_bad_context;
1292 int pt_qry_decode_tip_pge(struct pt_query_decoder *decoder)
1294 struct pt_event *ev;
1298 return -pte_internal;
1300 size = pt_qry_decode_ip(decoder);
1304 /* We send the enable event first. This is more convenient for our users
1305 * and does not require them to either store or blindly apply other
1306 * events that might be pending.
1308 * We use the consume packet decoder flag to indicate this.
1310 if (!decoder->consume_packet) {
1311 /* This packet signals a standalone enabled event. */
1312 ev = pt_evq_standalone(&decoder->evq);
1314 return -pte_internal;
1316 ev->type = ptev_enabled;
1318 /* We can't afford having a suppressed IP here. */
1319 errcode = pt_last_ip_query(&ev->variant.enabled.ip,
1322 return -pte_bad_packet;
1324 errcode = pt_qry_event_time(ev, decoder);
1328 /* Discard any cached TNT bits.
1330 * They should have been consumed at the corresponding disable
1331 * event. If they have not, for whatever reason, discard them
1332 * now so our user does not get out of sync.
1334 pt_tnt_cache_init(&decoder->tnt);
1336 /* Process pending events next. */
1337 decoder->consume_packet = 1;
1338 decoder->enabled = 1;
1340 /* Process any pending events binding to TIP. */
1341 ev = pt_evq_dequeue(&decoder->evq, evb_tip);
1343 errcode = pt_qry_event_tip_pge(ev, decoder);
1349 /* We must have an event. Either the initial enable event or one of the
1353 return -pte_internal;
1355 /* Publish the event. */
1356 decoder->event = ev;
1358 /* Process further pending events. */
1359 if (pt_evq_pending(&decoder->evq, evb_tip))
1362 /* We must consume the packet. */
1363 if (!decoder->consume_packet)
1364 return -pte_internal;
1366 decoder->consume_packet = 0;
1368 return pt_qry_consume_tip_pge(decoder, size);
1371 static int pt_qry_consume_tip_pgd(struct pt_query_decoder *decoder, int size)
1374 return -pte_internal;
1376 decoder->enabled = 0;
1377 decoder->pos += size;
1381 static int pt_qry_event_tip_pgd(struct pt_event *ev,
1382 const struct pt_query_decoder *decoder)
1385 return -pte_internal;
1388 case ptev_async_branch: {
1391 /* Turn the async branch into an async disable. */
1392 at = ev->variant.async_branch.from;
1394 ev->type = ptev_async_disabled;
1395 ev->variant.async_disabled.at = at;
1397 return pt_qry_event_ip(&ev->variant.async_disabled.ip, ev,
1401 case ptev_async_paging:
1402 case ptev_async_vmcs:
1403 case ptev_exec_mode:
1404 /* These events are ordered after the async disable event. It
1405 * is not quite clear what IP to give them.
1407 * If we give them the async disable's source IP, we'd make an
1408 * error if the IP is updated when applying the async disable
1411 * If we give them the async disable's destination IP, we'd make
1412 * an error if the IP is not updated when applying the async
1413 * disable event. That's what our decoders do since tracing is
1414 * likely to resume from there.
1416 * In all cases, tracing will be disabled when those events are
1417 * applied, so we may as well suppress the IP.
1419 ev->ip_suppressed = 1;
1427 return -pte_bad_context;
1430 int pt_qry_decode_tip_pgd(struct pt_query_decoder *decoder)
1432 struct pt_event *ev;
1436 return -pte_internal;
1438 size = pt_qry_decode_ip(decoder);
1442 /* Process any pending events binding to TIP. */
1443 ev = pt_evq_dequeue(&decoder->evq, evb_tip);
1445 errcode = pt_qry_event_tip_pgd(ev, decoder);
1449 /* This packet signals a standalone disabled event. */
1450 ev = pt_evq_standalone(&decoder->evq);
1452 return -pte_internal;
1453 ev->type = ptev_disabled;
1455 errcode = pt_qry_event_ip(&ev->variant.disabled.ip, ev,
1460 errcode = pt_qry_event_time(ev, decoder);
1465 /* We must have an event. Either the initial enable event or one of the
1469 return -pte_internal;
1471 /* Publish the event. */
1472 decoder->event = ev;
1474 /* Process further pending events. */
1475 if (pt_evq_pending(&decoder->evq, evb_tip))
1478 return pt_qry_consume_tip_pgd(decoder, size);
1481 static int pt_qry_consume_fup(struct pt_query_decoder *decoder, int size)
1484 return -pte_internal;
1486 decoder->pos += size;
1490 static int scan_for_erratum_bdm70(struct pt_packet_decoder *decoder)
1493 struct pt_packet packet;
1496 errcode = pt_pkt_next(decoder, &packet, sizeof(packet));
1498 /* Running out of packets is not an error. */
1499 if (errcode == -pte_eos)
1505 switch (packet.type) {
1507 /* All other packets cancel our search.
1509 * We do not enumerate those packets since we also
1510 * want to include new packets.
1515 /* We found it - the erratum applies. */
1529 /* Intentionally skip a few packets. */
1535 static int check_erratum_bdm70(const uint8_t *pos,
1536 const struct pt_config *config)
1538 struct pt_packet_decoder decoder;
1541 if (!pos || !config)
1542 return -pte_internal;
1544 errcode = pt_pkt_decoder_init(&decoder, config);
1548 errcode = pt_pkt_sync_set(&decoder, (uint64_t) (pos - config->begin));
1550 errcode = scan_for_erratum_bdm70(&decoder);
1552 pt_pkt_decoder_fini(&decoder);
1556 int pt_qry_header_fup(struct pt_query_decoder *decoder)
1558 struct pt_packet_ip packet;
1562 return -pte_internal;
1564 size = pt_pkt_read_ip(&packet, decoder->pos, &decoder->config);
1568 if (decoder->config.errata.bdm70 && !decoder->enabled) {
1569 errcode = check_erratum_bdm70(decoder->pos + size,
1575 return pt_qry_consume_fup(decoder, size);
1578 errcode = pt_last_ip_update_ip(&decoder->ip, &packet, &decoder->config);
1582 /* Tracing is enabled if we have an IP in the header. */
1583 if (packet.ipc != pt_ipc_suppressed)
1584 decoder->enabled = 1;
1586 return pt_qry_consume_fup(decoder, size);
1589 static int pt_qry_event_fup(struct pt_event *ev,
1590 struct pt_query_decoder *decoder)
1592 if (!ev || !decoder)
1593 return -pte_internal;
1597 decoder->consume_packet = 1;
1599 /* We can't afford having a suppressed IP here. */
1600 return pt_last_ip_query(&ev->variant.overflow.ip,
1604 if (!(ev->variant.tsx.aborted))
1605 decoder->consume_packet = 1;
1607 return pt_qry_event_ip(&ev->variant.tsx.ip, ev, decoder);
1610 decoder->consume_packet = 1;
1612 return pt_qry_event_ip(&ev->variant.exstop.ip, ev, decoder);
1615 decoder->consume_packet = 1;
1617 return pt_qry_event_ip(&ev->variant.mwait.ip, ev, decoder);
1620 decoder->consume_packet = 1;
1622 return pt_qry_event_ip(&ev->variant.ptwrite.ip, ev, decoder);
1628 return -pte_internal;
1631 int pt_qry_decode_fup(struct pt_query_decoder *decoder)
1633 struct pt_event *ev;
1637 return -pte_internal;
1639 size = pt_qry_decode_ip(decoder);
1643 /* Process any pending events binding to FUP. */
1644 ev = pt_evq_dequeue(&decoder->evq, evb_fup);
1646 errcode = pt_qry_event_fup(ev, decoder);
1650 /* Publish the event. */
1651 decoder->event = ev;
1653 /* Process further pending events. */
1654 if (pt_evq_pending(&decoder->evq, evb_fup))
1657 /* No further events.
1659 * If none of the events consumed the packet, we're done.
1661 if (!decoder->consume_packet)
1664 /* We're done with this packet. Clear the flag we set previously
1667 decoder->consume_packet = 0;
1669 /* FUP indicates an async branch event; it binds to TIP.
1671 * We do need an IP in this case.
1675 errcode = pt_last_ip_query(&ip, &decoder->ip);
1679 ev = pt_evq_enqueue(&decoder->evq, evb_tip);
1683 ev->type = ptev_async_branch;
1684 ev->variant.async_branch.from = ip;
1686 errcode = pt_qry_event_time(ev, decoder);
1691 return pt_qry_consume_fup(decoder, size);
1694 int pt_qry_decode_pip(struct pt_query_decoder *decoder)
1696 struct pt_packet_pip packet;
1697 struct pt_event *event;
1701 return -pte_internal;
1703 size = pt_pkt_read_pip(&packet, decoder->pos, &decoder->config);
1707 /* Paging events are either standalone or bind to the same TIP packet
1708 * as an in-flight async branch event.
1710 event = pt_evq_find(&decoder->evq, evb_tip, ptev_async_branch);
1712 event = pt_evq_standalone(&decoder->evq);
1714 return -pte_internal;
1715 event->type = ptev_paging;
1716 event->variant.paging.cr3 = packet.cr3;
1717 event->variant.paging.non_root = packet.nr;
1719 decoder->event = event;
1721 event = pt_evq_enqueue(&decoder->evq, evb_tip);
1725 event->type = ptev_async_paging;
1726 event->variant.async_paging.cr3 = packet.cr3;
1727 event->variant.async_paging.non_root = packet.nr;
1730 errcode = pt_qry_event_time(event, decoder);
1734 decoder->pos += size;
1738 int pt_qry_header_pip(struct pt_query_decoder *decoder)
1740 struct pt_packet_pip packet;
1741 struct pt_event *event;
1745 return -pte_internal;
1747 size = pt_pkt_read_pip(&packet, decoder->pos, &decoder->config);
1751 /* Paging events are reported at the end of the PSB. */
1752 event = pt_evq_enqueue(&decoder->evq, evb_psbend);
1756 event->type = ptev_async_paging;
1757 event->variant.async_paging.cr3 = packet.cr3;
1758 event->variant.async_paging.non_root = packet.nr;
1760 decoder->pos += size;
1764 static int pt_qry_event_psbend(struct pt_event *ev,
1765 struct pt_query_decoder *decoder)
1769 if (!ev || !decoder)
1770 return -pte_internal;
1772 /* PSB+ events are status updates. */
1773 ev->status_update = 1;
1775 errcode = pt_qry_event_time(ev, decoder);
1780 case ptev_async_paging:
1781 return pt_qry_event_ip(&ev->variant.async_paging.ip, ev,
1784 case ptev_exec_mode:
1785 return pt_qry_event_ip(&ev->variant.exec_mode.ip, ev, decoder);
1788 return pt_qry_event_ip(&ev->variant.tsx.ip, ev, decoder);
1790 case ptev_async_vmcs:
1791 return pt_qry_event_ip(&ev->variant.async_vmcs.ip, ev,
1798 /* Maintenance packets may appear anywhere. Do not mark them as
1799 * status updates even if they appear in PSB+.
1801 ev->status_update = 0;
1808 return -pte_internal;
1811 static int pt_qry_process_pending_psb_events(struct pt_query_decoder *decoder)
1813 struct pt_event *ev;
1817 return -pte_internal;
1819 ev = pt_evq_dequeue(&decoder->evq, evb_psbend);
1823 errcode = pt_qry_event_psbend(ev, decoder);
1827 /* Publish the event. */
1828 decoder->event = ev;
1830 /* Signal a pending event. */
1834 /* Create a standalone overflow event with tracing disabled.
1836 * Creates and published the event and disables tracing in @decoder.
1838 * Returns zero on success, a negative pt_error_code otherwise.
1840 static int pt_qry_event_ovf_disabled(struct pt_query_decoder *decoder)
1842 struct pt_event *ev;
1845 return -pte_internal;
1847 ev = pt_evq_standalone(&decoder->evq);
1849 return -pte_internal;
1851 ev->type = ptev_overflow;
1853 /* We suppress the IP to indicate that tracing has been disabled before
1854 * the overflow resolved. There can be several events before tracing is
1857 ev->ip_suppressed = 1;
1859 decoder->enabled = 0;
1860 decoder->event = ev;
1862 return pt_qry_event_time(ev, decoder);
1865 /* Queues an overflow event with tracing enabled.
1867 * Creates and enqueues the event and enables tracing in @decoder.
1869 * Returns zero on success, a negative pt_error_code otherwise.
1871 static int pt_qry_event_ovf_enabled(struct pt_query_decoder *decoder)
1873 struct pt_event *ev;
1876 return -pte_internal;
1878 ev = pt_evq_enqueue(&decoder->evq, evb_fup);
1880 return -pte_internal;
1882 ev->type = ptev_overflow;
1884 decoder->enabled = 1;
1886 return pt_qry_event_time(ev, decoder);
1889 /* Recover from SKD010.
1891 * Creates and publishes an overflow event at @packet's IP payload.
1893 * Further updates @decoder as follows:
1895 * - set time tracking to @time and @tcal
1896 * - set the position to @offset
1897 * - set ip to @packet's IP payload
1898 * - set tracing to be enabled
1900 * Returns zero on success, a negative error code otherwise.
1902 static int skd010_recover(struct pt_query_decoder *decoder,
1903 const struct pt_packet_ip *packet,
1904 const struct pt_time_cal *tcal,
1905 const struct pt_time *time, uint64_t offset)
1907 struct pt_last_ip ip;
1908 struct pt_event *ev;
1911 if (!decoder || !packet || !tcal || !time)
1912 return -pte_internal;
1914 /* We use the decoder's IP. It should be newly initialized. */
1917 /* Extract the IP payload from the packet. */
1918 errcode = pt_last_ip_update_ip(&ip, packet, &decoder->config);
1922 /* Synthesize the overflow event. */
1923 ev = pt_evq_standalone(&decoder->evq);
1925 return -pte_internal;
1927 ev->type = ptev_overflow;
1929 /* We do need a full IP. */
1930 errcode = pt_last_ip_query(&ev->variant.overflow.ip, &ip);
1932 return -pte_bad_context;
1934 /* We continue decoding at the given offset. */
1935 decoder->pos = decoder->config.begin + offset;
1937 /* Tracing is enabled. */
1938 decoder->enabled = 1;
1941 decoder->time = *time;
1942 decoder->tcal = *tcal;
1944 /* Publish the event. */
1945 decoder->event = ev;
1947 return pt_qry_event_time(ev, decoder);
1950 /* Recover from SKD010 with tracing disabled.
1952 * Creates and publishes a standalone overflow event.
1954 * Further updates @decoder as follows:
1956 * - set time tracking to @time and @tcal
1957 * - set the position to @offset
1958 * - set tracing to be disabled
1960 * Returns zero on success, a negative error code otherwise.
1962 static int skd010_recover_disabled(struct pt_query_decoder *decoder,
1963 const struct pt_time_cal *tcal,
1964 const struct pt_time *time, uint64_t offset)
1966 if (!decoder || !tcal || !time)
1967 return -pte_internal;
1969 decoder->time = *time;
1970 decoder->tcal = *tcal;
1972 /* We continue decoding at the given offset. */
1973 decoder->pos = decoder->config.begin + offset;
1975 return pt_qry_event_ovf_disabled(decoder);
1978 /* Scan ahead for a packet at which to resume after an overflow.
1980 * This function is called after an OVF without a corresponding FUP. This
1981 * normally means that the overflow resolved while tracing was disabled.
1983 * With erratum SKD010 it might also mean that the FUP (or TIP.PGE) was dropped.
1984 * The overflow thus resolved while tracing was enabled (or tracing was enabled
1985 * after the overflow resolved). Search for an indication whether tracing is
1986 * enabled or disabled by scanning upcoming packets.
1988 * If we can confirm that tracing is disabled, the erratum does not apply and we
1989 * can continue normally.
1991 * If we can confirm that tracing is enabled, the erratum applies and we try to
1992 * recover by synchronizing at a later packet and a different IP. If we can't
1993 * recover, pretend the erratum didn't apply so we run into the error later.
1994 * Since this assumes that tracing is disabled, no harm should be done, i.e. no
1995 * bad trace should be generated.
1997 * Returns zero if the overflow is handled.
1998 * Returns a positive value if the overflow is not yet handled.
1999 * Returns a negative error code otherwise.
2001 static int skd010_scan_for_ovf_resume(struct pt_packet_decoder *pkt,
2002 struct pt_query_decoder *decoder)
2004 struct pt_time_cal tcal;
2005 struct pt_time time;
2007 struct pt_time_cal tcal;
2008 struct pt_time time;
2014 return -pte_internal;
2016 /* Keep track of time as we skip packets. */
2017 time = decoder->time;
2018 tcal = decoder->tcal;
2020 /* Keep track of a potential recovery point at MODE.TSX. */
2021 memset(&mode_tsx, 0, sizeof(mode_tsx));
2024 struct pt_packet packet;
2027 errcode = pt_pkt_get_offset(pkt, &offset);
2031 errcode = pt_pkt_next(pkt, &packet, sizeof(packet));
2033 /* Let's assume the trace is correct if we run out
2036 if (errcode == -pte_eos)
2042 switch (packet.type) {
2044 /* Everything is fine. There is nothing to do. */
2048 /* This is a clear indication that the erratum
2051 * We synchronize after the disable.
2053 return skd010_recover_disabled(decoder, &tcal, &time,
2054 offset + packet.size);
2058 /* This is a clear indication that the erratum
2061 * Yet, we can't recover from it as we wouldn't know how
2062 * many TNT bits will have been used when we eventually
2063 * find an IP packet at which to resume tracing.
2069 /* We could track those changes and synthesize extra
2070 * events after the overflow event when recovering from
2071 * the erratum. This requires infrastructure that we
2072 * don't currently have, though, so we're not going to
2075 * Instead, we ignore those changes. We already don't
2076 * know how many other changes were lost in the
2082 switch (packet.payload.mode.leaf) {
2084 /* A MODE.EXEC packet binds to TIP, i.e.
2086 * TIP.PGE: everything is fine
2087 * TIP: the erratum applies
2089 * In the TIP.PGE case, we may just follow the
2092 * In the TIP case, we'd be able to re-sync at
2093 * the TIP IP but have to skip packets up to and
2094 * including the TIP.
2096 * We'd need to synthesize the MODE.EXEC event
2097 * after the overflow event when recovering at
2098 * the TIP. We lack the infrastructure for this
2099 * - it's getting too complicated.
2101 * Instead, we ignore the execution mode change;
2102 * we already don't know how many more such
2103 * changes were lost in the overflow.
2108 /* A MODE.TSX packet may be standalone or bind
2111 * If this is the second MODE.TSX, we're sure
2112 * that tracing is disabled and everything is
2115 if (mode_tsx.offset)
2118 /* If we find the FUP this packet binds to, we
2119 * may recover at the FUP IP and restart
2120 * processing packets from here. Remember the
2123 mode_tsx.offset = offset;
2124 mode_tsx.time = time;
2125 mode_tsx.tcal = tcal;
2133 /* This is a pretty good indication that tracing
2134 * is indeed enabled and the erratum applies.
2137 /* If we got a MODE.TSX packet before, we synchronize at
2138 * the FUP IP but continue decoding packets starting
2139 * from the MODE.TSX.
2141 if (mode_tsx.offset)
2142 return skd010_recover(decoder,
2148 /* Without a preceding MODE.TSX, this FUP is the start
2149 * of an async branch or disable. We synchronize at the
2150 * FUP IP and continue decoding packets from here.
2152 return skd010_recover(decoder, &packet.payload.ip,
2153 &tcal, &time, offset);
2156 /* We syhchronize at the TIP IP and continue decoding
2157 * packets after the TIP packet.
2159 return skd010_recover(decoder, &packet.payload.ip,
2161 offset + packet.size);
2164 /* We reached a synchronization point. Tracing is
2165 * enabled if and only if the PSB+ contains a FUP.
2167 errcode = pt_qry_find_header_fup(&packet, pkt);
2169 /* If we ran out of packets, we can't tell.
2170 * Let's assume the trace is correct.
2172 if (errcode == -pte_eos)
2178 /* If there is no FUP, tracing is disabled and
2179 * everything is fine.
2184 /* We should have a FUP. */
2185 if (packet.type != ppt_fup)
2186 return -pte_internal;
2188 /* Otherwise, we may synchronize at the FUP IP and
2189 * continue decoding packets at the PSB.
2191 return skd010_recover(decoder, &packet.payload.ip,
2192 &tcal, &time, offset);
2195 /* We shouldn't see this. */
2196 return -pte_bad_context;
2200 /* It doesn't matter if it had been enabled or disabled
2201 * before. We may resume normally.
2207 /* We can't skip this packet. */
2214 /* Ignore this packet. */
2218 /* We may skip a stand-alone EXSTOP. */
2219 if (!packet.payload.exstop.ip)
2224 /* To skip this packet, we'd need to take care of the
2225 * FUP it binds to. This is getting complicated.
2230 /* We may skip a stand-alone PTW. */
2231 if (!packet.payload.ptw.ip)
2234 /* To skip this packet, we'd need to take care of the
2235 * FUP it binds to. This is getting complicated.
2240 /* Keep track of time. */
2241 errcode = pt_qry_apply_tsc(&time, &tcal,
2242 &packet.payload.tsc,
2250 /* Keep track of time. */
2251 errcode = pt_qry_apply_cbr(&time, &tcal,
2252 &packet.payload.cbr,
2260 /* Keep track of time. */
2261 errcode = pt_qry_apply_tma(&time, &tcal,
2262 &packet.payload.tma,
2270 /* Keep track of time. */
2271 errcode = pt_qry_apply_mtc(&time, &tcal,
2272 &packet.payload.mtc,
2280 /* Keep track of time. */
2281 errcode = pt_qry_apply_cyc(&time, &tcal,
2282 &packet.payload.cyc,
2292 static int pt_qry_handle_skd010(struct pt_query_decoder *decoder)
2294 struct pt_packet_decoder pkt;
2299 return -pte_internal;
2301 errcode = pt_qry_get_offset(decoder, &offset);
2305 errcode = pt_pkt_decoder_init(&pkt, &decoder->config);
2309 errcode = pt_pkt_sync_set(&pkt, offset);
2311 errcode = skd010_scan_for_ovf_resume(&pkt, decoder);
2313 pt_pkt_decoder_fini(&pkt);
2317 /* Scan ahead for an indication whether tracing is enabled or disabled.
2319 * Returns zero if tracing is clearly disabled.
2320 * Returns a positive integer if tracing is enabled or if we can't tell.
2321 * Returns a negative error code otherwise.
2323 static int apl12_tracing_is_disabled(struct pt_packet_decoder *decoder)
2326 return -pte_internal;
2329 struct pt_packet packet;
2332 status = pt_pkt_next(decoder, &packet, sizeof(packet));
2334 /* Running out of packets is not an error. */
2335 if (status == -pte_eos)
2341 switch (packet.type) {
2343 /* Skip other packets. */
2347 /* Tracing is disabled before a stop. */
2351 /* Tracing gets enabled - it must have been disabled. */
2358 /* Those packets are only generated when tracing is
2359 * enabled. We're done.
2364 /* We reached a synchronization point. Tracing is
2365 * enabled if and only if the PSB+ contains a FUP.
2367 status = pt_qry_find_header_fup(&packet, decoder);
2369 /* If we ran out of packets, we can't tell. */
2370 if (status == -pte_eos)
2376 /* We shouldn't see this. */
2377 return -pte_bad_context;
2380 /* It doesn't matter - we run into the next overflow. */
2385 /* We can't skip this packet. */
2391 /* Apply workaround for erratum APL12.
2393 * We resume from @offset (relative to @decoder->pos) with tracing disabled. On
2394 * our way to the resume location we process packets to update our state.
2396 * Any event will be dropped.
2398 * Returns zero on success, a negative pt_error_code otherwise.
2400 static int apl12_resume_disabled(struct pt_query_decoder *decoder,
2401 struct pt_packet_decoder *pkt,
2402 unsigned int offset)
2404 uint64_t begin, end;
2408 return -pte_internal;
2410 errcode = pt_qry_get_offset(decoder, &begin);
2414 errcode = pt_pkt_sync_set(pkt, begin);
2418 end = begin + offset;
2420 struct pt_packet packet;
2423 errcode = pt_pkt_next(pkt, &packet, sizeof(packet));
2425 /* Running out of packets is not an error. */
2426 if (errcode == -pte_eos)
2432 /* The offset is the start of the next packet. */
2433 errcode = pt_pkt_get_offset(pkt, &next);
2437 /* We're done when we reach @offset.
2439 * The current @packet will be the FUP after which we started
2440 * our search. We skip it.
2442 * Check that we're not accidentally proceeding past @offset.
2446 return -pte_internal;
2451 switch (packet.type) {
2453 /* Skip other packets. */
2459 /* We should not encounter those.
2461 * We should not encounter a lot of packets but those
2462 * are state-relevant; let's check them explicitly.
2464 return -pte_internal;
2467 /* Keep track of time. */
2468 errcode = pt_qry_apply_tsc(&decoder->time,
2470 &packet.payload.tsc,
2478 /* Keep track of time. */
2479 errcode = pt_qry_apply_cbr(&decoder->time,
2481 &packet.payload.cbr,
2489 /* Keep track of time. */
2490 errcode = pt_qry_apply_tma(&decoder->time,
2492 &packet.payload.tma,
2500 /* Keep track of time. */
2501 errcode = pt_qry_apply_mtc(&decoder->time,
2503 &packet.payload.mtc,
2511 /* Keep track of time. */
2512 errcode = pt_qry_apply_cyc(&decoder->time,
2514 &packet.payload.cyc,
2523 decoder->pos += offset;
2525 return pt_qry_event_ovf_disabled(decoder);
2528 /* Handle erratum APL12.
2530 * This function is called when a FUP is found after an OVF. The @offset
2531 * argument gives the relative offset from @decoder->pos to after the FUP.
2533 * A FUP after OVF normally indicates that the overflow resolved while tracing
2534 * is enabled. Due to erratum APL12, however, the overflow may have resolved
2535 * while tracing is disabled and still generate a FUP.
2537 * We scan ahead for an indication whether tracing is actually disabled. If we
2538 * find one, the erratum applies and we proceed from after the FUP packet.
2540 * This will drop any CBR or MTC events. We will update @decoder's timing state
2541 * on CBR but drop the event.
2543 * Returns zero if the erratum was handled.
2544 * Returns a positive integer if the erratum was not handled.
2545 * Returns a negative pt_error_code otherwise.
2547 static int pt_qry_handle_apl12(struct pt_query_decoder *decoder,
2548 unsigned int offset)
2550 struct pt_packet_decoder pkt;
2555 return -pte_internal;
2557 status = pt_qry_get_offset(decoder, &here);
2561 status = pt_pkt_decoder_init(&pkt, &decoder->config);
2565 status = pt_pkt_sync_set(&pkt, here + offset);
2567 status = apl12_tracing_is_disabled(&pkt);
2569 status = apl12_resume_disabled(decoder, &pkt, offset);
2572 pt_pkt_decoder_fini(&pkt);
2576 /* Apply workaround for erratum APL11.
2578 * We search for a TIP.PGD and, if we found one, resume from after that packet
2579 * with tracing disabled. On our way to the resume location we process packets
2580 * to update our state.
2582 * If we don't find a TIP.PGD but instead some other packet that indicates that
2583 * tracing is disabled, indicate that the erratum does not apply.
2585 * Any event will be dropped.
2587 * Returns zero if the erratum was handled.
2588 * Returns a positive integer if the erratum was not handled.
2589 * Returns a negative pt_error_code otherwise.
2591 static int apl11_apply(struct pt_query_decoder *decoder,
2592 struct pt_packet_decoder *pkt)
2594 struct pt_time_cal tcal;
2595 struct pt_time time;
2598 return -pte_internal;
2600 time = decoder->time;
2601 tcal = decoder->tcal;
2603 struct pt_packet packet;
2606 errcode = pt_pkt_next(pkt, &packet, sizeof(packet));
2610 switch (packet.type) {
2614 /* We found a TIP.PGD. The erratum applies.
2616 * Resume from here with tracing disabled.
2618 errcode = pt_pkt_get_offset(pkt, &offset);
2622 decoder->time = time;
2623 decoder->tcal = tcal;
2624 decoder->pos = decoder->config.begin + offset;
2626 return pt_qry_event_ovf_disabled(decoder);
2630 return -pte_bad_opc;
2645 /* The erratum does not apply. */
2651 /* Skip those packets. */
2658 return -pte_bad_context;
2662 /* Keep track of time. */
2663 errcode = pt_qry_apply_tsc(&time, &tcal,
2664 &packet.payload.tsc,
2672 /* Keep track of time. */
2673 errcode = pt_qry_apply_cbr(&time, &tcal,
2674 &packet.payload.cbr,
2682 /* Keep track of time. */
2683 errcode = pt_qry_apply_tma(&time, &tcal,
2684 &packet.payload.tma,
2692 /* Keep track of time. */
2693 errcode = pt_qry_apply_mtc(&time, &tcal,
2694 &packet.payload.mtc,
2702 /* Keep track of time. */
2703 errcode = pt_qry_apply_cyc(&time, &tcal,
2704 &packet.payload.cyc,
2714 /* Handle erratum APL11.
2716 * This function is called when we diagnose a bad packet while searching for a
2719 * Due to erratum APL11 we may get an extra TIP.PGD after the OVF. Find that
2720 * TIP.PGD and resume from there with tracing disabled.
2722 * This will drop any CBR or MTC events. We will update @decoder's timing state
2723 * on CBR but drop the event.
2725 * Returns zero if the erratum was handled.
2726 * Returns a positive integer if the erratum was not handled.
2727 * Returns a negative pt_error_code otherwise.
2729 static int pt_qry_handle_apl11(struct pt_query_decoder *decoder)
2731 struct pt_packet_decoder pkt;
2736 return -pte_internal;
2738 status = pt_qry_get_offset(decoder, &offset);
2742 status = pt_pkt_decoder_init(&pkt, &decoder->config);
2746 status = pt_pkt_sync_set(&pkt, offset);
2748 status = apl11_apply(decoder, &pkt);
2750 pt_pkt_decoder_fini(&pkt);
2754 static int pt_pkt_find_ovf_fup(struct pt_packet_decoder *decoder)
2757 struct pt_packet packet;
2760 errcode = pt_pkt_next(decoder, &packet, sizeof(packet));
2764 switch (packet.type) {
2769 return -pte_bad_opc;
2800 return -pte_bad_context;
2805 /* Find a FUP to which the current OVF may bind.
2807 * Scans the trace for a FUP or for a packet that indicates that tracing is
2810 * Return the relative offset of the packet following the found FUP on success.
2811 * Returns zero if no FUP is found and tracing is assumed to be disabled.
2812 * Returns a negative pt_error_code otherwise.
2814 static int pt_qry_find_ovf_fup(const struct pt_query_decoder *decoder)
2816 struct pt_packet_decoder pkt;
2817 uint64_t begin, end, offset;
2821 return -pte_internal;
2823 status = pt_qry_get_offset(decoder, &begin);
2827 status = pt_pkt_decoder_init(&pkt, &decoder->config);
2831 status = pt_pkt_sync_set(&pkt, begin);
2833 status = pt_pkt_find_ovf_fup(&pkt);
2835 status = pt_pkt_get_offset(&pkt, &end);
2840 return -pte_overflow;
2842 offset = end - begin;
2843 if (INT_MAX < offset)
2844 return -pte_overflow;
2846 status = (int) offset;
2850 pt_pkt_decoder_fini(&pkt);
2854 int pt_qry_decode_ovf(struct pt_query_decoder *decoder)
2856 struct pt_time_cal tcal;
2857 struct pt_time time;
2861 return -pte_internal;
2863 status = pt_qry_process_pending_psb_events(decoder);
2867 /* If we have any pending psbend events, we're done for now. */
2871 /* Reset the decoder state but preserve timing. */
2872 time = decoder->time;
2873 tcal = decoder->tcal;
2875 pt_qry_reset(decoder);
2877 decoder->time = time;
2878 if (decoder->config.flags.variant.query.keep_tcal_on_ovf) {
2879 status = pt_tcal_update_ovf(&tcal, &decoder->config);
2883 decoder->tcal = tcal;
2886 /* We must consume the OVF before we search for the binding packet. */
2887 decoder->pos += ptps_ovf;
2889 /* Overflow binds to either FUP or TIP.PGE.
2891 * If the overflow can be resolved while PacketEn=1 it binds to FUP. We
2892 * can see timing packets between OVF anf FUP but that's it.
2894 * Otherwise, PacketEn will be zero when the overflow resolves and OVF
2895 * binds to TIP.PGE. There can be packets between OVF and TIP.PGE that
2896 * do not depend on PacketEn.
2898 * We don't need to decode everything until TIP.PGE, however. As soon
2899 * as we see a non-timing non-FUP packet, we know that tracing has been
2900 * disabled before the overflow resolves.
2902 offset = pt_qry_find_ovf_fup(decoder);
2904 /* Check for erratum SKD010.
2906 * The FUP may have been dropped. If we can figure out that
2907 * tracing is enabled and hence the FUP is missing, we resume
2908 * at a later packet and a different IP.
2910 if (decoder->config.errata.skd010) {
2911 status = pt_qry_handle_skd010(decoder);
2916 /* Check for erratum APL11.
2918 * We may have gotten an extra TIP.PGD, which should be
2919 * diagnosed by our search for a subsequent FUP.
2921 if (decoder->config.errata.apl11 &&
2922 (offset == -pte_bad_context)) {
2923 status = pt_qry_handle_apl11(decoder);
2928 /* Report the original error from searching for the FUP packet
2929 * if we were not able to fix the trace.
2931 * We treat an overflow at the end of the trace as standalone.
2933 if (offset < 0 && offset != -pte_eos)
2936 return pt_qry_event_ovf_disabled(decoder);
2938 /* Check for erratum APL12.
2940 * We may get an extra FUP even though the overflow resolved
2941 * with tracing disabled.
2943 if (decoder->config.errata.apl12) {
2944 status = pt_qry_handle_apl12(decoder,
2945 (unsigned int) offset);
2950 return pt_qry_event_ovf_enabled(decoder);
2954 static int pt_qry_decode_mode_exec(struct pt_query_decoder *decoder,
2955 const struct pt_packet_mode_exec *packet)
2957 struct pt_event *event;
2959 if (!decoder || !packet)
2960 return -pte_internal;
2962 /* MODE.EXEC binds to TIP. */
2963 event = pt_evq_enqueue(&decoder->evq, evb_tip);
2967 event->type = ptev_exec_mode;
2968 event->variant.exec_mode.mode = pt_get_exec_mode(packet);
2970 return pt_qry_event_time(event, decoder);
2973 static int pt_qry_decode_mode_tsx(struct pt_query_decoder *decoder,
2974 const struct pt_packet_mode_tsx *packet)
2976 struct pt_event *event;
2978 if (!decoder || !packet)
2979 return -pte_internal;
2981 /* MODE.TSX is standalone if tracing is disabled. */
2982 if (!decoder->enabled) {
2983 event = pt_evq_standalone(&decoder->evq);
2985 return -pte_internal;
2987 /* We don't have an IP in this case. */
2988 event->variant.tsx.ip = 0;
2989 event->ip_suppressed = 1;
2991 /* Publish the event. */
2992 decoder->event = event;
2994 /* MODE.TSX binds to FUP. */
2995 event = pt_evq_enqueue(&decoder->evq, evb_fup);
3000 event->type = ptev_tsx;
3001 event->variant.tsx.speculative = packet->intx;
3002 event->variant.tsx.aborted = packet->abrt;
3004 return pt_qry_event_time(event, decoder);
3007 int pt_qry_decode_mode(struct pt_query_decoder *decoder)
3009 struct pt_packet_mode packet;
3013 return -pte_internal;
3015 size = pt_pkt_read_mode(&packet, decoder->pos, &decoder->config);
3020 switch (packet.leaf) {
3022 errcode = pt_qry_decode_mode_exec(decoder, &packet.bits.exec);
3026 errcode = pt_qry_decode_mode_tsx(decoder, &packet.bits.tsx);
3033 decoder->pos += size;
3037 int pt_qry_header_mode(struct pt_query_decoder *decoder)
3039 struct pt_packet_mode packet;
3040 struct pt_event *event;
3044 return -pte_internal;
3046 size = pt_pkt_read_mode(&packet, decoder->pos, &decoder->config);
3050 /* Inside the header, events are reported at the end. */
3051 event = pt_evq_enqueue(&decoder->evq, evb_psbend);
3055 switch (packet.leaf) {
3057 event->type = ptev_exec_mode;
3058 event->variant.exec_mode.mode =
3059 pt_get_exec_mode(&packet.bits.exec);
3063 event->type = ptev_tsx;
3064 event->variant.tsx.speculative = packet.bits.tsx.intx;
3065 event->variant.tsx.aborted = packet.bits.tsx.abrt;
3069 decoder->pos += size;
3073 int pt_qry_decode_psbend(struct pt_query_decoder *decoder)
3078 return -pte_internal;
3080 status = pt_qry_process_pending_psb_events(decoder);
3084 /* If we had any psb events, we're done for now. */
3088 /* Skip the psbend extended opcode that we fetched before if no more
3089 * psbend events are pending.
3091 decoder->pos += ptps_psbend;
3095 int pt_qry_decode_tsc(struct pt_query_decoder *decoder)
3097 struct pt_packet_tsc packet;
3101 return -pte_internal;
3103 size = pt_pkt_read_tsc(&packet, decoder->pos, &decoder->config);
3107 errcode = pt_qry_apply_tsc(&decoder->time, &decoder->tcal,
3108 &packet, &decoder->config);
3112 decoder->pos += size;
3116 int pt_qry_header_tsc(struct pt_query_decoder *decoder)
3118 struct pt_packet_tsc packet;
3122 return -pte_internal;
3124 size = pt_pkt_read_tsc(&packet, decoder->pos, &decoder->config);
3128 errcode = pt_qry_apply_header_tsc(&decoder->time, &decoder->tcal,
3129 &packet, &decoder->config);
3133 decoder->pos += size;
3137 int pt_qry_decode_cbr(struct pt_query_decoder *decoder)
3139 struct pt_packet_cbr packet;
3140 struct pt_event *event;
3144 return -pte_internal;
3146 size = pt_pkt_read_cbr(&packet, decoder->pos, &decoder->config);
3150 errcode = pt_qry_apply_cbr(&decoder->time, &decoder->tcal,
3151 &packet, &decoder->config);
3155 event = pt_evq_standalone(&decoder->evq);
3157 return -pte_internal;
3159 event->type = ptev_cbr;
3160 event->variant.cbr.ratio = packet.ratio;
3162 decoder->event = event;
3164 errcode = pt_qry_event_time(event, decoder);
3168 decoder->pos += size;
3172 int pt_qry_header_cbr(struct pt_query_decoder *decoder)
3174 struct pt_packet_cbr packet;
3175 struct pt_event *event;
3179 return -pte_internal;
3181 size = pt_pkt_read_cbr(&packet, decoder->pos, &decoder->config);
3185 errcode = pt_qry_apply_header_cbr(&decoder->time, &decoder->tcal,
3186 &packet, &decoder->config);
3190 event = pt_evq_enqueue(&decoder->evq, evb_psbend);
3194 event->type = ptev_cbr;
3195 event->variant.cbr.ratio = packet.ratio;
3197 decoder->pos += size;
3201 int pt_qry_decode_tma(struct pt_query_decoder *decoder)
3203 struct pt_packet_tma packet;
3207 return -pte_internal;
3209 size = pt_pkt_read_tma(&packet, decoder->pos, &decoder->config);
3213 errcode = pt_qry_apply_tma(&decoder->time, &decoder->tcal,
3214 &packet, &decoder->config);
3218 decoder->pos += size;
3222 int pt_qry_decode_mtc(struct pt_query_decoder *decoder)
3224 struct pt_packet_mtc packet;
3228 return -pte_internal;
3230 size = pt_pkt_read_mtc(&packet, decoder->pos, &decoder->config);
3234 errcode = pt_qry_apply_mtc(&decoder->time, &decoder->tcal,
3235 &packet, &decoder->config);
3239 decoder->pos += size;
3243 static int check_erratum_skd007(struct pt_query_decoder *decoder,
3244 const struct pt_packet_cyc *packet, int size)
3249 if (!decoder || !packet || size < 0)
3250 return -pte_internal;
3252 /* It must be a 2-byte CYC. */
3256 payload = (uint16_t) packet->value;
3258 /* The 2nd byte of the CYC payload must look like an ext opcode. */
3259 if ((payload & ~0x1f) != 0x20)
3262 /* Skip this CYC packet. */
3263 pos = decoder->pos + size;
3264 if (decoder->config.end <= pos)
3267 /* See if we got a second CYC that looks like an OVF ext opcode. */
3268 if (*pos != pt_ext_ovf)
3271 /* We shouldn't get back-to-back CYCs unless they are sent when the
3272 * counter wraps around. In this case, we'd expect a full payload.
3274 * Since we got two non-full CYC packets, we assume the erratum hit.
3280 int pt_qry_decode_cyc(struct pt_query_decoder *decoder)
3282 struct pt_packet_cyc packet;
3283 struct pt_config *config;
3287 return -pte_internal;
3289 config = &decoder->config;
3291 size = pt_pkt_read_cyc(&packet, decoder->pos, config);
3295 if (config->errata.skd007) {
3296 errcode = check_erratum_skd007(decoder, &packet, size);
3300 /* If the erratum hits, we ignore the partial CYC and instead
3301 * process the OVF following/overlapping it.
3304 /* We skip the first byte of the CYC, which brings us
3305 * to the beginning of the OVF packet.
3312 errcode = pt_qry_apply_cyc(&decoder->time, &decoder->tcal,
3317 decoder->pos += size;
3321 int pt_qry_decode_stop(struct pt_query_decoder *decoder)
3323 struct pt_event *event;
3327 return -pte_internal;
3329 /* Stop events are reported immediately. */
3330 event = pt_evq_standalone(&decoder->evq);
3332 return -pte_internal;
3334 event->type = ptev_stop;
3336 decoder->event = event;
3338 errcode = pt_qry_event_time(event, decoder);
3342 decoder->pos += ptps_stop;
3346 int pt_qry_header_vmcs(struct pt_query_decoder *decoder)
3348 struct pt_packet_vmcs packet;
3349 struct pt_event *event;
3353 return -pte_internal;
3355 size = pt_pkt_read_vmcs(&packet, decoder->pos, &decoder->config);
3359 event = pt_evq_enqueue(&decoder->evq, evb_psbend);
3363 event->type = ptev_async_vmcs;
3364 event->variant.async_vmcs.base = packet.base;
3366 decoder->pos += size;
3370 int pt_qry_decode_vmcs(struct pt_query_decoder *decoder)
3372 struct pt_packet_vmcs packet;
3373 struct pt_event *event;
3377 return -pte_internal;
3379 size = pt_pkt_read_vmcs(&packet, decoder->pos, &decoder->config);
3383 /* VMCS events bind to the same IP as an in-flight async paging event.
3385 * In that case, the VMCS event should be applied first. We reorder
3386 * events here to simplify the life of higher layers.
3388 event = pt_evq_find(&decoder->evq, evb_tip, ptev_async_paging);
3390 struct pt_event *paging;
3392 paging = pt_evq_enqueue(&decoder->evq, evb_tip);
3398 event->type = ptev_async_vmcs;
3399 event->variant.async_vmcs.base = packet.base;
3401 decoder->pos += size;
3405 /* VMCS events bind to the same TIP packet as an in-flight async
3408 event = pt_evq_find(&decoder->evq, evb_tip, ptev_async_branch);
3410 event = pt_evq_enqueue(&decoder->evq, evb_tip);
3414 event->type = ptev_async_vmcs;
3415 event->variant.async_vmcs.base = packet.base;
3417 decoder->pos += size;
3421 /* VMCS events that do not bind to an in-flight async event are
3424 event = pt_evq_standalone(&decoder->evq);
3426 return -pte_internal;
3428 event->type = ptev_vmcs;
3429 event->variant.vmcs.base = packet.base;
3431 decoder->event = event;
3433 errcode = pt_qry_event_time(event, decoder);
3437 decoder->pos += size;
3441 int pt_qry_decode_mnt(struct pt_query_decoder *decoder)
3443 struct pt_packet_mnt packet;
3444 struct pt_event *event;
3448 return -pte_internal;
3450 size = pt_pkt_read_mnt(&packet, decoder->pos, &decoder->config);
3454 event = pt_evq_standalone(&decoder->evq);
3456 return -pte_internal;
3458 event->type = ptev_mnt;
3459 event->variant.mnt.payload = packet.payload;
3461 decoder->event = event;
3463 errcode = pt_qry_event_time(event, decoder);
3467 decoder->pos += size;
3472 int pt_qry_header_mnt(struct pt_query_decoder *decoder)
3474 struct pt_packet_mnt packet;
3475 struct pt_event *event;
3479 return -pte_internal;
3481 size = pt_pkt_read_mnt(&packet, decoder->pos, &decoder->config);
3485 event = pt_evq_enqueue(&decoder->evq, evb_psbend);
3489 event->type = ptev_mnt;
3490 event->variant.mnt.payload = packet.payload;
3492 decoder->pos += size;
3497 int pt_qry_decode_exstop(struct pt_query_decoder *decoder)
3499 struct pt_packet_exstop packet;
3500 struct pt_event *event;
3504 return -pte_internal;
3506 size = pt_pkt_read_exstop(&packet, decoder->pos, &decoder->config);
3511 event = pt_evq_enqueue(&decoder->evq, evb_fup);
3513 return -pte_internal;
3515 event->type = ptev_exstop;
3517 event = pt_evq_standalone(&decoder->evq);
3519 return -pte_internal;
3521 event->type = ptev_exstop;
3523 event->ip_suppressed = 1;
3524 event->variant.exstop.ip = 0ull;
3526 decoder->event = event;
3529 decoder->pos += size;
3533 int pt_qry_decode_mwait(struct pt_query_decoder *decoder)
3535 struct pt_packet_mwait packet;
3536 struct pt_event *event;
3540 return -pte_internal;
3542 size = pt_pkt_read_mwait(&packet, decoder->pos, &decoder->config);
3546 event = pt_evq_enqueue(&decoder->evq, evb_fup);
3548 return -pte_internal;
3550 event->type = ptev_mwait;
3551 event->variant.mwait.hints = packet.hints;
3552 event->variant.mwait.ext = packet.ext;
3554 decoder->pos += size;
3558 int pt_qry_decode_pwre(struct pt_query_decoder *decoder)
3560 struct pt_packet_pwre packet;
3561 struct pt_event *event;
3565 return -pte_internal;
3567 size = pt_pkt_read_pwre(&packet, decoder->pos, &decoder->config);
3571 event = pt_evq_standalone(&decoder->evq);
3573 return -pte_internal;
3575 event->type = ptev_pwre;
3576 event->variant.pwre.state = packet.state;
3577 event->variant.pwre.sub_state = packet.sub_state;
3580 event->variant.pwre.hw = 1;
3582 decoder->event = event;
3584 decoder->pos += size;
3588 int pt_qry_decode_pwrx(struct pt_query_decoder *decoder)
3590 struct pt_packet_pwrx packet;
3591 struct pt_event *event;
3595 return -pte_internal;
3597 size = pt_pkt_read_pwrx(&packet, decoder->pos, &decoder->config);
3601 event = pt_evq_standalone(&decoder->evq);
3603 return -pte_internal;
3605 event->type = ptev_pwrx;
3606 event->variant.pwrx.last = packet.last;
3607 event->variant.pwrx.deepest = packet.deepest;
3609 if (packet.interrupt)
3610 event->variant.pwrx.interrupt = 1;
3612 event->variant.pwrx.store = 1;
3613 if (packet.autonomous)
3614 event->variant.pwrx.autonomous = 1;
3616 decoder->event = event;
3618 decoder->pos += size;
3622 int pt_qry_decode_ptw(struct pt_query_decoder *decoder)
3624 struct pt_packet_ptw packet;
3625 struct pt_event *event;
3629 return -pte_internal;
3631 size = pt_pkt_read_ptw(&packet, decoder->pos, &decoder->config);
3635 pls = pt_ptw_size(packet.plc);
3640 event = pt_evq_enqueue(&decoder->evq, evb_fup);
3642 return -pte_internal;
3644 event = pt_evq_standalone(&decoder->evq);
3646 return -pte_internal;
3648 event->ip_suppressed = 1;
3650 decoder->event = event;
3653 event->type = ptev_ptwrite;
3654 event->variant.ptwrite.size = (uint8_t) pls;
3655 event->variant.ptwrite.payload = packet.payload;
3657 decoder->pos += size;