2 * Copyright (c) 2014-2018, Intel Corporation
4 * Redistribution and use in source and binary forms, with or without
5 * modification, are permitted provided that the following conditions are met:
7 * * Redistributions of source code must retain the above copyright notice,
8 * this list of conditions and the following disclaimer.
9 * * Redistributions in binary form must reproduce the above copyright notice,
10 * this list of conditions and the following disclaimer in the documentation
11 * and/or other materials provided with the distribution.
12 * * Neither the name of Intel Corporation nor the names of its contributors
13 * may be used to endorse or promote products derived from this software
14 * without specific prior written permission.
16 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS IS"
17 * AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED TO, THE
18 * IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR PURPOSE
19 * ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT OWNER OR CONTRIBUTORS BE
20 * LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL, SPECIAL, EXEMPLARY, OR
21 * CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT LIMITED TO, PROCUREMENT OF
22 * SUBSTITUTE GOODS OR SERVICES; LOSS OF USE, DATA, OR PROFITS; OR BUSINESS
23 * INTERRUPTION) HOWEVER CAUSED AND ON ANY THEORY OF LIABILITY, WHETHER IN
24 * CONTRACT, STRICT LIABILITY, OR TORT (INCLUDING NEGLIGENCE OR OTHERWISE)
25 * ARISING IN ANY WAY OUT OF THE USE OF THIS SOFTWARE, EVEN IF ADVISED OF THE
26 * POSSIBILITY OF SUCH DAMAGE.
29 #include "pt_query_decoder.h"
31 #include "pt_decoder_function.h"
32 #include "pt_packet.h"
33 #include "pt_packet_decoder.h"
34 #include "pt_config.h"
35 #include "pt_opcodes.h"
36 #include "pt_compiler.h"
46 /* Find a FUP in a PSB+ header.
48 * The packet @decoder must be synchronized onto the trace stream at the
49 * beginning or somewhere inside a PSB+ header.
51 * It uses @packet to hold trace packets during its search. If the search is
52 * successful, @packet will contain the first (and hopefully only) FUP packet in
53 * this PSB+. Otherwise, @packet may contain anything.
55 * Returns one if a FUP packet is found (@packet will contain it).
56 * Returns zero if no FUP packet is found (@packet is undefined).
57 * Returns a negative error code otherwise.
59 static int pt_qry_find_header_fup(struct pt_packet *packet,
60 struct pt_packet_decoder *decoder)
62 if (!packet || !decoder)
68 errcode = pt_pkt_next(decoder, packet, sizeof(*packet));
72 switch (packet->type) {
74 /* Ignore the packet. */
78 /* There's no FUP in here. */
88 int pt_qry_decoder_init(struct pt_query_decoder *decoder,
89 const struct pt_config *config)
96 memset(decoder, 0, sizeof(*decoder));
98 errcode = pt_config_from_user(&decoder->config, config);
102 pt_last_ip_init(&decoder->ip);
103 pt_tnt_cache_init(&decoder->tnt);
104 pt_time_init(&decoder->time);
105 pt_time_init(&decoder->last_time);
106 pt_tcal_init(&decoder->tcal);
107 pt_evq_init(&decoder->evq);
112 struct pt_query_decoder *pt_qry_alloc_decoder(const struct pt_config *config)
114 struct pt_query_decoder *decoder;
117 decoder = malloc(sizeof(*decoder));
121 errcode = pt_qry_decoder_init(decoder, config);
130 void pt_qry_decoder_fini(struct pt_query_decoder *decoder)
137 void pt_qry_free_decoder(struct pt_query_decoder *decoder)
139 pt_qry_decoder_fini(decoder);
143 static void pt_qry_reset(struct pt_query_decoder *decoder)
148 decoder->enabled = 0;
149 decoder->consume_packet = 0;
150 decoder->event = NULL;
152 pt_last_ip_init(&decoder->ip);
153 pt_tnt_cache_init(&decoder->tnt);
154 pt_time_init(&decoder->time);
155 pt_time_init(&decoder->last_time);
156 pt_tcal_init(&decoder->tcal);
157 pt_evq_init(&decoder->evq);
160 static int pt_qry_will_event(const struct pt_query_decoder *decoder)
162 const struct pt_decoder_function *dfun;
165 return -pte_internal;
167 dfun = decoder->next;
171 if (dfun->flags & pdff_event)
174 if (dfun->flags & pdff_psbend)
175 return pt_evq_pending(&decoder->evq, evb_psbend);
177 if (dfun->flags & pdff_tip)
178 return pt_evq_pending(&decoder->evq, evb_tip);
180 if (dfun->flags & pdff_fup)
181 return pt_evq_pending(&decoder->evq, evb_fup);
186 static int pt_qry_will_eos(const struct pt_query_decoder *decoder)
188 const struct pt_decoder_function *dfun;
192 return -pte_internal;
194 dfun = decoder->next;
198 /* The decoding function may be NULL for two reasons:
200 * - we ran out of trace
201 * - we ran into a fetch error such as -pte_bad_opc
205 errcode = pt_df_fetch(&dfun, decoder->pos, &decoder->config);
206 return errcode == -pte_eos;
209 static int pt_qry_status_flags(const struct pt_query_decoder *decoder)
214 return -pte_internal;
216 /* Some packets force out TNT and any deferred TIPs in order to
217 * establish the correct context for the subsequent packet.
219 * Users are expected to first navigate to the correct code region
220 * by using up the cached TNT bits before interpreting any subsequent
223 * We do need to read ahead in order to signal upcoming events. We may
224 * have already decoded those packets while our user has not navigated
225 * to the correct code region, yet.
227 * In order to have our user use up the cached TNT bits first, we do
228 * not indicate the next event until the TNT cache is empty.
230 if (pt_tnt_cache_is_empty(&decoder->tnt)) {
231 if (pt_qry_will_event(decoder))
232 flags |= pts_event_pending;
234 if (pt_qry_will_eos(decoder))
241 static int pt_qry_provoke_fetch_error(const struct pt_query_decoder *decoder)
243 const struct pt_decoder_function *dfun;
247 return -pte_internal;
249 /* Repeat the decoder fetch to reproduce the error. */
250 errcode = pt_df_fetch(&dfun, decoder->pos, &decoder->config);
254 /* We must get some error or something's wrong. */
255 return -pte_internal;
258 static int pt_qry_read_ahead(struct pt_query_decoder *decoder)
261 return -pte_internal;
264 const struct pt_decoder_function *dfun;
267 errcode = pt_df_fetch(&decoder->next, decoder->pos,
272 dfun = decoder->next;
274 return -pte_internal;
277 return -pte_internal;
279 /* We're done once we reach
281 * - a branching related packet. */
282 if (dfun->flags & (pdff_tip | pdff_tnt))
285 /* - an event related packet. */
286 if (pt_qry_will_event(decoder))
289 /* Decode status update packets. */
290 errcode = dfun->decode(decoder);
292 /* Ignore truncated status packets at the end.
294 * Move beyond the packet and clear @decoder->next to
295 * indicate that we were not able to fetch the next
298 if (errcode == -pte_eos) {
299 decoder->pos = decoder->config.end;
300 decoder->next = NULL;
308 static int pt_qry_start(struct pt_query_decoder *decoder, const uint8_t *pos,
311 const struct pt_decoder_function *dfun;
314 if (!decoder || !pos)
317 pt_qry_reset(decoder);
322 errcode = pt_df_fetch(&decoder->next, pos, &decoder->config);
326 dfun = decoder->next;
328 /* We do need to start at a PSB in order to initialize the state. */
329 if (dfun != &pt_decode_psb)
332 /* Decode the PSB+ header to initialize the state. */
333 errcode = dfun->decode(decoder);
337 /* Fill in the start address.
338 * We do this before reading ahead since the latter may read an
339 * adjacent PSB+ that might change the decoder's IP, causing us
343 status = pt_last_ip_query(addr, &decoder->ip);
345 /* Make sure we don't clobber it later on. */
350 /* Read ahead until the first query-relevant packet. */
351 errcode = pt_qry_read_ahead(decoder);
355 /* We return the current decoder status. */
356 status = pt_qry_status_flags(decoder);
360 errcode = pt_last_ip_query(addr, &decoder->ip);
362 /* Indicate the missing IP in the status. */
364 status |= pts_ip_suppressed;
370 static int pt_qry_apply_tsc(struct pt_time *time, struct pt_time_cal *tcal,
371 const struct pt_packet_tsc *packet,
372 const struct pt_config *config)
376 /* We ignore configuration errors. They will result in imprecise
377 * calibration which will result in imprecise cycle-accurate timing.
379 * We currently do not track them.
381 errcode = pt_tcal_update_tsc(tcal, packet, config);
382 if (errcode < 0 && (errcode != -pte_bad_config))
385 /* We ignore configuration errors. They will result in imprecise
386 * timing and are tracked as packet losses in struct pt_time.
388 errcode = pt_time_update_tsc(time, packet, config);
389 if (errcode < 0 && (errcode != -pte_bad_config))
395 static int pt_qry_apply_header_tsc(struct pt_time *time,
396 struct pt_time_cal *tcal,
397 const struct pt_packet_tsc *packet,
398 const struct pt_config *config)
402 /* We ignore configuration errors. They will result in imprecise
403 * calibration which will result in imprecise cycle-accurate timing.
405 * We currently do not track them.
407 errcode = pt_tcal_header_tsc(tcal, packet, config);
408 if (errcode < 0 && (errcode != -pte_bad_config))
411 /* We ignore configuration errors. They will result in imprecise
412 * timing and are tracked as packet losses in struct pt_time.
414 errcode = pt_time_update_tsc(time, packet, config);
415 if (errcode < 0 && (errcode != -pte_bad_config))
421 static int pt_qry_apply_cbr(struct pt_time *time, struct pt_time_cal *tcal,
422 const struct pt_packet_cbr *packet,
423 const struct pt_config *config)
427 /* We ignore configuration errors. They will result in imprecise
428 * calibration which will result in imprecise cycle-accurate timing.
430 * We currently do not track them.
432 errcode = pt_tcal_update_cbr(tcal, packet, config);
433 if (errcode < 0 && (errcode != -pte_bad_config))
436 /* We ignore configuration errors. They will result in imprecise
437 * timing and are tracked as packet losses in struct pt_time.
439 errcode = pt_time_update_cbr(time, packet, config);
440 if (errcode < 0 && (errcode != -pte_bad_config))
446 static int pt_qry_apply_header_cbr(struct pt_time *time,
447 struct pt_time_cal *tcal,
448 const struct pt_packet_cbr *packet,
449 const struct pt_config *config)
453 /* We ignore configuration errors. They will result in imprecise
454 * calibration which will result in imprecise cycle-accurate timing.
456 * We currently do not track them.
458 errcode = pt_tcal_header_cbr(tcal, packet, config);
459 if (errcode < 0 && (errcode != -pte_bad_config))
462 /* We ignore configuration errors. They will result in imprecise
463 * timing and are tracked as packet losses in struct pt_time.
465 errcode = pt_time_update_cbr(time, packet, config);
466 if (errcode < 0 && (errcode != -pte_bad_config))
472 static int pt_qry_apply_tma(struct pt_time *time, struct pt_time_cal *tcal,
473 const struct pt_packet_tma *packet,
474 const struct pt_config *config)
478 /* We ignore configuration errors. They will result in imprecise
479 * calibration which will result in imprecise cycle-accurate timing.
481 * We currently do not track them.
483 errcode = pt_tcal_update_tma(tcal, packet, config);
484 if (errcode < 0 && (errcode != -pte_bad_config))
487 /* We ignore configuration errors. They will result in imprecise
488 * timing and are tracked as packet losses in struct pt_time.
490 errcode = pt_time_update_tma(time, packet, config);
491 if (errcode < 0 && (errcode != -pte_bad_config))
497 static int pt_qry_apply_mtc(struct pt_time *time, struct pt_time_cal *tcal,
498 const struct pt_packet_mtc *packet,
499 const struct pt_config *config)
503 /* We ignore configuration errors. They will result in imprecise
504 * calibration which will result in imprecise cycle-accurate timing.
506 * We currently do not track them.
508 errcode = pt_tcal_update_mtc(tcal, packet, config);
509 if (errcode < 0 && (errcode != -pte_bad_config))
512 /* We ignore configuration errors. They will result in imprecise
513 * timing and are tracked as packet losses in struct pt_time.
515 errcode = pt_time_update_mtc(time, packet, config);
516 if (errcode < 0 && (errcode != -pte_bad_config))
522 static int pt_qry_apply_cyc(struct pt_time *time, struct pt_time_cal *tcal,
523 const struct pt_packet_cyc *packet,
524 const struct pt_config *config)
529 /* We ignore configuration errors. They will result in imprecise
530 * calibration which will result in imprecise cycle-accurate timing.
532 * We currently do not track them.
534 errcode = pt_tcal_update_cyc(tcal, packet, config);
535 if (errcode < 0 && (errcode != -pte_bad_config))
538 /* We need the FastCounter to Cycles ratio below. Fall back to
539 * an invalid ratio of 0 if calibration has not kicked in, yet.
541 * This will be tracked as packet loss in struct pt_time.
543 errcode = pt_tcal_fcr(&fcr, tcal);
545 if (errcode == -pte_no_time)
551 /* We ignore configuration errors. They will result in imprecise
552 * timing and are tracked as packet losses in struct pt_time.
554 errcode = pt_time_update_cyc(time, packet, config, fcr);
555 if (errcode < 0 && (errcode != -pte_bad_config))
561 int pt_qry_sync_forward(struct pt_query_decoder *decoder, uint64_t *ip)
563 const uint8_t *pos, *sync;
569 sync = decoder->sync;
572 pos = decoder->config.begin;
577 errcode = pt_sync_forward(&sync, pos, &decoder->config);
581 return pt_qry_start(decoder, sync, ip);
584 int pt_qry_sync_backward(struct pt_query_decoder *decoder, uint64_t *ip)
586 const uint8_t *start, *sync;
592 start = decoder->pos;
594 start = decoder->config.end;
598 errcode = pt_sync_backward(&sync, sync, &decoder->config);
602 errcode = pt_qry_start(decoder, sync, ip);
604 /* Ignore incomplete trace segments at the end. We need
605 * a full PSB+ to start decoding.
607 if (errcode == -pte_eos)
613 /* An empty trace segment in the middle of the trace might bring
614 * us back to where we started.
616 * We're done when we reached a new position.
618 if (decoder->pos != start)
625 int pt_qry_sync_set(struct pt_query_decoder *decoder, uint64_t *ip,
628 const uint8_t *sync, *pos;
634 pos = decoder->config.begin + offset;
636 errcode = pt_sync_set(&sync, pos, &decoder->config);
640 return pt_qry_start(decoder, sync, ip);
643 int pt_qry_get_offset(const struct pt_query_decoder *decoder, uint64_t *offset)
645 const uint8_t *begin, *pos;
647 if (!decoder || !offset)
650 begin = decoder->config.begin;
656 *offset = pos - begin;
660 int pt_qry_get_sync_offset(const struct pt_query_decoder *decoder,
663 const uint8_t *begin, *sync;
665 if (!decoder || !offset)
668 begin = decoder->config.begin;
669 sync = decoder->sync;
674 *offset = sync - begin;
678 const struct pt_config *
679 pt_qry_get_config(const struct pt_query_decoder *decoder)
684 return &decoder->config;
687 static int pt_qry_cache_tnt(struct pt_query_decoder *decoder)
692 return -pte_internal;
695 const struct pt_decoder_function *dfun;
697 dfun = decoder->next;
699 return pt_qry_provoke_fetch_error(decoder);
702 return -pte_internal;
704 /* There's an event ahead of us. */
705 if (pt_qry_will_event(decoder))
706 return -pte_bad_query;
708 /* Diagnose a TIP that has not been part of an event. */
709 if (dfun->flags & pdff_tip)
710 return -pte_bad_query;
712 /* Clear the decoder's current event so we know when we
713 * accidentally skipped an event.
715 decoder->event = NULL;
717 /* Apply the decoder function. */
718 errcode = dfun->decode(decoder);
722 /* If we skipped an event, we're in trouble. */
724 return -pte_event_ignored;
726 /* We're done when we decoded a TNT packet. */
727 if (dfun->flags & pdff_tnt)
730 /* Read ahead until the next query-relevant packet. */
731 errcode = pt_qry_read_ahead(decoder);
736 /* Preserve the time at the TNT packet. */
737 decoder->last_time = decoder->time;
739 /* Read ahead until the next query-relevant packet. */
740 errcode = pt_qry_read_ahead(decoder);
741 if ((errcode < 0) && (errcode != -pte_eos))
747 int pt_qry_cond_branch(struct pt_query_decoder *decoder, int *taken)
751 if (!decoder || !taken)
754 /* We cache the latest tnt packet in the decoder. Let's re-fill the
755 * cache in case it is empty.
757 if (pt_tnt_cache_is_empty(&decoder->tnt)) {
758 errcode = pt_qry_cache_tnt(decoder);
763 query = pt_tnt_cache_query(&decoder->tnt);
769 return pt_qry_status_flags(decoder);
772 int pt_qry_indirect_branch(struct pt_query_decoder *decoder, uint64_t *addr)
776 if (!decoder || !addr)
781 const struct pt_decoder_function *dfun;
783 dfun = decoder->next;
785 return pt_qry_provoke_fetch_error(decoder);
788 return -pte_internal;
790 /* There's an event ahead of us. */
791 if (pt_qry_will_event(decoder))
792 return -pte_bad_query;
794 /* Clear the decoder's current event so we know when we
795 * accidentally skipped an event.
797 decoder->event = NULL;
799 /* We may see a single TNT packet if the current tnt is empty.
801 * If we see a TNT while the current tnt is not empty, it means
802 * that our user got out of sync. Let's report no data and hope
803 * that our user is able to re-sync.
805 if ((dfun->flags & pdff_tnt) &&
806 !pt_tnt_cache_is_empty(&decoder->tnt))
807 return -pte_bad_query;
809 /* Apply the decoder function. */
810 errcode = dfun->decode(decoder);
814 /* If we skipped an event, we're in trouble. */
816 return -pte_event_ignored;
818 /* We're done when we found a TIP packet that isn't part of an
821 if (dfun->flags & pdff_tip) {
824 /* We already decoded it, so the branch destination
825 * is stored in the decoder's last ip.
827 errcode = pt_last_ip_query(&ip, &decoder->ip);
829 flags |= pts_ip_suppressed;
836 /* Read ahead until the next query-relevant packet. */
837 errcode = pt_qry_read_ahead(decoder);
842 /* Preserve the time at the TIP packet. */
843 decoder->last_time = decoder->time;
845 /* Read ahead until the next query-relevant packet. */
846 errcode = pt_qry_read_ahead(decoder);
847 if ((errcode < 0) && (errcode != -pte_eos))
850 flags |= pt_qry_status_flags(decoder);
855 int pt_qry_event(struct pt_query_decoder *decoder, struct pt_event *event,
860 if (!decoder || !event)
863 if (size < offsetof(struct pt_event, variant))
866 /* We do not allow querying for events while there are still TNT
869 if (!pt_tnt_cache_is_empty(&decoder->tnt))
870 return -pte_bad_query;
872 /* Do not provide more than we actually have. */
873 if (sizeof(*event) < size)
874 size = sizeof(*event);
878 const struct pt_decoder_function *dfun;
880 dfun = decoder->next;
882 return pt_qry_provoke_fetch_error(decoder);
885 return -pte_internal;
887 /* We must not see a TIP or TNT packet unless it belongs
890 * If we see one, it means that our user got out of sync.
891 * Let's report no data and hope that our user is able
894 if ((dfun->flags & (pdff_tip | pdff_tnt)) &&
895 !pt_qry_will_event(decoder))
896 return -pte_bad_query;
898 /* Clear the decoder's current event so we know when decoding
899 * produces a new event.
901 decoder->event = NULL;
903 /* Apply any other decoder function. */
904 errcode = dfun->decode(decoder);
908 /* Check if there has been an event.
910 * Some packets may result in events in some but not in all
913 if (decoder->event) {
914 (void) memcpy(event, decoder->event, size);
918 /* Read ahead until the next query-relevant packet. */
919 errcode = pt_qry_read_ahead(decoder);
924 /* Preserve the time at the event. */
925 decoder->last_time = decoder->time;
927 /* Read ahead until the next query-relevant packet. */
928 errcode = pt_qry_read_ahead(decoder);
929 if ((errcode < 0) && (errcode != -pte_eos))
932 flags |= pt_qry_status_flags(decoder);
937 int pt_qry_time(struct pt_query_decoder *decoder, uint64_t *time,
938 uint32_t *lost_mtc, uint32_t *lost_cyc)
940 if (!decoder || !time)
943 return pt_time_query_tsc(time, lost_mtc, lost_cyc, &decoder->last_time);
946 int pt_qry_core_bus_ratio(struct pt_query_decoder *decoder, uint32_t *cbr)
948 if (!decoder || !cbr)
951 return pt_time_query_cbr(cbr, &decoder->last_time);
954 static int pt_qry_event_time(struct pt_event *event,
955 const struct pt_query_decoder *decoder)
959 if (!event || !decoder)
960 return -pte_internal;
962 errcode = pt_time_query_tsc(&event->tsc, &event->lost_mtc,
963 &event->lost_cyc, &decoder->time);
965 if (errcode != -pte_no_time)
973 int pt_qry_decode_unknown(struct pt_query_decoder *decoder)
975 struct pt_packet packet;
979 return -pte_internal;
981 size = pt_pkt_read_unknown(&packet, decoder->pos, &decoder->config);
985 decoder->pos += size;
989 int pt_qry_decode_pad(struct pt_query_decoder *decoder)
992 return -pte_internal;
994 decoder->pos += ptps_pad;
999 static int pt_qry_read_psb_header(struct pt_query_decoder *decoder)
1002 return -pte_internal;
1004 pt_last_ip_init(&decoder->ip);
1007 const struct pt_decoder_function *dfun;
1010 errcode = pt_df_fetch(&decoder->next, decoder->pos,
1015 dfun = decoder->next;
1017 return -pte_internal;
1019 /* We're done once we reach an psbend packet. */
1020 if (dfun->flags & pdff_psbend)
1024 return -pte_bad_context;
1026 errcode = dfun->header(decoder);
1032 int pt_qry_decode_psb(struct pt_query_decoder *decoder)
1038 return -pte_internal;
1042 size = pt_pkt_read_psb(pos, &decoder->config);
1046 decoder->pos += size;
1048 errcode = pt_qry_read_psb_header(decoder);
1050 /* Move back to the PSB so we have a chance to recover and
1051 * continue decoding.
1055 /* Clear any PSB+ events that have already been queued. */
1056 (void) pt_evq_clear(&decoder->evq, evb_psbend);
1058 /* Reset the decoder's decode function. */
1059 decoder->next = &pt_decode_psb;
1064 /* The next packet following the PSB header will be of type PSBEND.
1066 * Decoding this packet will publish the PSB events what have been
1067 * accumulated while reading the PSB header.
1072 static int pt_qry_event_ip(uint64_t *ip, struct pt_event *event,
1073 const struct pt_query_decoder *decoder)
1078 return -pte_internal;
1080 errcode = pt_last_ip_query(ip, &decoder->ip);
1082 switch (pt_errcode(errcode)) {
1084 case pte_ip_suppressed:
1085 event->ip_suppressed = 1;
1096 /* Decode a generic IP packet.
1098 * Returns the number of bytes read, on success.
1099 * Returns -pte_eos if the ip does not fit into the buffer.
1100 * Returns -pte_bad_packet if the ip compression is not known.
1102 static int pt_qry_decode_ip(struct pt_query_decoder *decoder)
1104 struct pt_packet_ip packet;
1108 return -pte_internal;
1110 size = pt_pkt_read_ip(&packet, decoder->pos, &decoder->config);
1114 errcode = pt_last_ip_update_ip(&decoder->ip, &packet, &decoder->config);
1118 /* We do not update the decoder's position, yet. */
1123 static int pt_qry_consume_tip(struct pt_query_decoder *decoder, int size)
1126 return -pte_internal;
1128 decoder->pos += size;
1132 static int pt_qry_event_tip(struct pt_event *ev,
1133 struct pt_query_decoder *decoder)
1135 if (!ev || !decoder)
1136 return -pte_internal;
1139 case ptev_async_branch:
1140 decoder->consume_packet = 1;
1142 return pt_qry_event_ip(&ev->variant.async_branch.to, ev,
1145 case ptev_async_paging:
1146 return pt_qry_event_ip(&ev->variant.async_paging.ip, ev,
1149 case ptev_async_vmcs:
1150 return pt_qry_event_ip(&ev->variant.async_vmcs.ip, ev,
1153 case ptev_exec_mode:
1154 return pt_qry_event_ip(&ev->variant.exec_mode.ip, ev,
1161 return -pte_bad_context;
1164 int pt_qry_decode_tip(struct pt_query_decoder *decoder)
1166 struct pt_event *ev;
1170 return -pte_internal;
1172 size = pt_qry_decode_ip(decoder);
1176 /* Process any pending events binding to TIP. */
1177 ev = pt_evq_dequeue(&decoder->evq, evb_tip);
1179 errcode = pt_qry_event_tip(ev, decoder);
1183 /* Publish the event. */
1184 decoder->event = ev;
1186 /* Process further pending events. */
1187 if (pt_evq_pending(&decoder->evq, evb_tip))
1190 /* No further events.
1192 * If none of the events consumed the packet, we're done.
1194 if (!decoder->consume_packet)
1197 /* We're done with this packet. Clear the flag we set previously
1200 decoder->consume_packet = 0;
1203 return pt_qry_consume_tip(decoder, size);
1206 int pt_qry_decode_tnt_8(struct pt_query_decoder *decoder)
1208 struct pt_packet_tnt packet;
1212 return -pte_internal;
1214 size = pt_pkt_read_tnt_8(&packet, decoder->pos, &decoder->config);
1218 errcode = pt_tnt_cache_update_tnt(&decoder->tnt, &packet,
1223 decoder->pos += size;
1227 int pt_qry_decode_tnt_64(struct pt_query_decoder *decoder)
1229 struct pt_packet_tnt packet;
1233 return -pte_internal;
1235 size = pt_pkt_read_tnt_64(&packet, decoder->pos, &decoder->config);
1239 errcode = pt_tnt_cache_update_tnt(&decoder->tnt, &packet,
1244 decoder->pos += size;
1248 static int pt_qry_consume_tip_pge(struct pt_query_decoder *decoder, int size)
1251 return -pte_internal;
1253 decoder->pos += size;
1257 static int pt_qry_event_tip_pge(struct pt_event *ev,
1258 const struct pt_query_decoder *decoder)
1261 return -pte_internal;
1264 case ptev_exec_mode:
1265 return pt_qry_event_ip(&ev->variant.exec_mode.ip, ev, decoder);
1271 return -pte_bad_context;
1274 int pt_qry_decode_tip_pge(struct pt_query_decoder *decoder)
1276 struct pt_event *ev;
1280 return -pte_internal;
1282 size = pt_qry_decode_ip(decoder);
1286 /* We send the enable event first. This is more convenient for our users
1287 * and does not require them to either store or blindly apply other
1288 * events that might be pending.
1290 * We use the consume packet decoder flag to indicate this.
1292 if (!decoder->consume_packet) {
1293 /* This packet signals a standalone enabled event. */
1294 ev = pt_evq_standalone(&decoder->evq);
1296 return -pte_internal;
1298 ev->type = ptev_enabled;
1300 /* We can't afford having a suppressed IP here. */
1301 errcode = pt_last_ip_query(&ev->variant.enabled.ip,
1304 return -pte_bad_packet;
1306 errcode = pt_qry_event_time(ev, decoder);
1310 /* Discard any cached TNT bits.
1312 * They should have been consumed at the corresponding disable
1313 * event. If they have not, for whatever reason, discard them
1314 * now so our user does not get out of sync.
1316 pt_tnt_cache_init(&decoder->tnt);
1318 /* Process pending events next. */
1319 decoder->consume_packet = 1;
1320 decoder->enabled = 1;
1322 /* Process any pending events binding to TIP. */
1323 ev = pt_evq_dequeue(&decoder->evq, evb_tip);
1325 errcode = pt_qry_event_tip_pge(ev, decoder);
1331 /* We must have an event. Either the initial enable event or one of the
1335 return -pte_internal;
1337 /* Publish the event. */
1338 decoder->event = ev;
1340 /* Process further pending events. */
1341 if (pt_evq_pending(&decoder->evq, evb_tip))
1344 /* We must consume the packet. */
1345 if (!decoder->consume_packet)
1346 return -pte_internal;
1348 decoder->consume_packet = 0;
1350 return pt_qry_consume_tip_pge(decoder, size);
1353 static int pt_qry_consume_tip_pgd(struct pt_query_decoder *decoder, int size)
1356 return -pte_internal;
1358 decoder->enabled = 0;
1359 decoder->pos += size;
1363 static int pt_qry_event_tip_pgd(struct pt_event *ev,
1364 const struct pt_query_decoder *decoder)
1367 return -pte_internal;
1370 case ptev_async_branch: {
1373 /* Turn the async branch into an async disable. */
1374 at = ev->variant.async_branch.from;
1376 ev->type = ptev_async_disabled;
1377 ev->variant.async_disabled.at = at;
1379 return pt_qry_event_ip(&ev->variant.async_disabled.ip, ev,
1383 case ptev_async_paging:
1384 case ptev_async_vmcs:
1385 case ptev_exec_mode:
1386 /* These events are ordered after the async disable event. It
1387 * is not quite clear what IP to give them.
1389 * If we give them the async disable's source IP, we'd make an
1390 * error if the IP is updated when applying the async disable
1393 * If we give them the async disable's destination IP, we'd make
1394 * an error if the IP is not updated when applying the async
1395 * disable event. That's what our decoders do since tracing is
1396 * likely to resume from there.
1398 * In all cases, tracing will be disabled when those events are
1399 * applied, so we may as well suppress the IP.
1401 ev->ip_suppressed = 1;
1409 return -pte_bad_context;
1412 int pt_qry_decode_tip_pgd(struct pt_query_decoder *decoder)
1414 struct pt_event *ev;
1418 return -pte_internal;
1420 size = pt_qry_decode_ip(decoder);
1424 /* Process any pending events binding to TIP. */
1425 ev = pt_evq_dequeue(&decoder->evq, evb_tip);
1427 errcode = pt_qry_event_tip_pgd(ev, decoder);
1431 /* This packet signals a standalone disabled event. */
1432 ev = pt_evq_standalone(&decoder->evq);
1434 return -pte_internal;
1435 ev->type = ptev_disabled;
1437 errcode = pt_qry_event_ip(&ev->variant.disabled.ip, ev,
1442 errcode = pt_qry_event_time(ev, decoder);
1447 /* We must have an event. Either the initial enable event or one of the
1451 return -pte_internal;
1453 /* Publish the event. */
1454 decoder->event = ev;
1456 /* Process further pending events. */
1457 if (pt_evq_pending(&decoder->evq, evb_tip))
1460 return pt_qry_consume_tip_pgd(decoder, size);
1463 static int pt_qry_consume_fup(struct pt_query_decoder *decoder, int size)
1466 return -pte_internal;
1468 decoder->pos += size;
1472 static int scan_for_erratum_bdm70(struct pt_packet_decoder *decoder)
1475 struct pt_packet packet;
1478 errcode = pt_pkt_next(decoder, &packet, sizeof(packet));
1480 /* Running out of packets is not an error. */
1481 if (errcode == -pte_eos)
1487 switch (packet.type) {
1489 /* All other packets cancel our search.
1491 * We do not enumerate those packets since we also
1492 * want to include new packets.
1497 /* We found it - the erratum applies. */
1511 /* Intentionally skip a few packets. */
1517 static int check_erratum_bdm70(const uint8_t *pos,
1518 const struct pt_config *config)
1520 struct pt_packet_decoder decoder;
1523 if (!pos || !config)
1524 return -pte_internal;
1526 errcode = pt_pkt_decoder_init(&decoder, config);
1530 errcode = pt_pkt_sync_set(&decoder, (uint64_t) (pos - config->begin));
1532 errcode = scan_for_erratum_bdm70(&decoder);
1534 pt_pkt_decoder_fini(&decoder);
1538 int pt_qry_header_fup(struct pt_query_decoder *decoder)
1540 struct pt_packet_ip packet;
1544 return -pte_internal;
1546 size = pt_pkt_read_ip(&packet, decoder->pos, &decoder->config);
1550 if (decoder->config.errata.bdm70 && !decoder->enabled) {
1551 errcode = check_erratum_bdm70(decoder->pos + size,
1557 return pt_qry_consume_fup(decoder, size);
1560 errcode = pt_last_ip_update_ip(&decoder->ip, &packet, &decoder->config);
1564 /* Tracing is enabled if we have an IP in the header. */
1565 if (packet.ipc != pt_ipc_suppressed)
1566 decoder->enabled = 1;
1568 return pt_qry_consume_fup(decoder, size);
1571 static int pt_qry_event_fup(struct pt_event *ev,
1572 struct pt_query_decoder *decoder)
1574 if (!ev || !decoder)
1575 return -pte_internal;
1579 decoder->consume_packet = 1;
1581 /* We can't afford having a suppressed IP here. */
1582 return pt_last_ip_query(&ev->variant.overflow.ip,
1586 if (!(ev->variant.tsx.aborted))
1587 decoder->consume_packet = 1;
1589 return pt_qry_event_ip(&ev->variant.tsx.ip, ev, decoder);
1592 decoder->consume_packet = 1;
1594 return pt_qry_event_ip(&ev->variant.exstop.ip, ev, decoder);
1597 decoder->consume_packet = 1;
1599 return pt_qry_event_ip(&ev->variant.mwait.ip, ev, decoder);
1602 decoder->consume_packet = 1;
1604 return pt_qry_event_ip(&ev->variant.ptwrite.ip, ev, decoder);
1610 return -pte_internal;
1613 int pt_qry_decode_fup(struct pt_query_decoder *decoder)
1615 struct pt_event *ev;
1619 return -pte_internal;
1621 size = pt_qry_decode_ip(decoder);
1625 /* Process any pending events binding to FUP. */
1626 ev = pt_evq_dequeue(&decoder->evq, evb_fup);
1628 errcode = pt_qry_event_fup(ev, decoder);
1632 /* Publish the event. */
1633 decoder->event = ev;
1635 /* Process further pending events. */
1636 if (pt_evq_pending(&decoder->evq, evb_fup))
1639 /* No further events.
1641 * If none of the events consumed the packet, we're done.
1643 if (!decoder->consume_packet)
1646 /* We're done with this packet. Clear the flag we set previously
1649 decoder->consume_packet = 0;
1651 /* FUP indicates an async branch event; it binds to TIP.
1653 * We do need an IP in this case.
1657 errcode = pt_last_ip_query(&ip, &decoder->ip);
1661 ev = pt_evq_enqueue(&decoder->evq, evb_tip);
1665 ev->type = ptev_async_branch;
1666 ev->variant.async_branch.from = ip;
1668 errcode = pt_qry_event_time(ev, decoder);
1673 return pt_qry_consume_fup(decoder, size);
1676 int pt_qry_decode_pip(struct pt_query_decoder *decoder)
1678 struct pt_packet_pip packet;
1679 struct pt_event *event;
1683 return -pte_internal;
1685 size = pt_pkt_read_pip(&packet, decoder->pos, &decoder->config);
1689 /* Paging events are either standalone or bind to the same TIP packet
1690 * as an in-flight async branch event.
1692 event = pt_evq_find(&decoder->evq, evb_tip, ptev_async_branch);
1694 event = pt_evq_standalone(&decoder->evq);
1696 return -pte_internal;
1697 event->type = ptev_paging;
1698 event->variant.paging.cr3 = packet.cr3;
1699 event->variant.paging.non_root = packet.nr;
1701 decoder->event = event;
1703 event = pt_evq_enqueue(&decoder->evq, evb_tip);
1707 event->type = ptev_async_paging;
1708 event->variant.async_paging.cr3 = packet.cr3;
1709 event->variant.async_paging.non_root = packet.nr;
1712 errcode = pt_qry_event_time(event, decoder);
1716 decoder->pos += size;
1720 int pt_qry_header_pip(struct pt_query_decoder *decoder)
1722 struct pt_packet_pip packet;
1723 struct pt_event *event;
1727 return -pte_internal;
1729 size = pt_pkt_read_pip(&packet, decoder->pos, &decoder->config);
1733 /* Paging events are reported at the end of the PSB. */
1734 event = pt_evq_enqueue(&decoder->evq, evb_psbend);
1738 event->type = ptev_async_paging;
1739 event->variant.async_paging.cr3 = packet.cr3;
1740 event->variant.async_paging.non_root = packet.nr;
1742 decoder->pos += size;
1746 static int pt_qry_event_psbend(struct pt_event *ev,
1747 struct pt_query_decoder *decoder)
1751 if (!ev || !decoder)
1752 return -pte_internal;
1754 /* PSB+ events are status updates. */
1755 ev->status_update = 1;
1757 errcode = pt_qry_event_time(ev, decoder);
1762 case ptev_async_paging:
1763 return pt_qry_event_ip(&ev->variant.async_paging.ip, ev,
1766 case ptev_exec_mode:
1767 return pt_qry_event_ip(&ev->variant.exec_mode.ip, ev, decoder);
1770 return pt_qry_event_ip(&ev->variant.tsx.ip, ev, decoder);
1772 case ptev_async_vmcs:
1773 return pt_qry_event_ip(&ev->variant.async_vmcs.ip, ev,
1780 /* Maintenance packets may appear anywhere. Do not mark them as
1781 * status updates even if they appear in PSB+.
1783 ev->status_update = 0;
1790 return -pte_internal;
1793 static int pt_qry_process_pending_psb_events(struct pt_query_decoder *decoder)
1795 struct pt_event *ev;
1799 return -pte_internal;
1801 ev = pt_evq_dequeue(&decoder->evq, evb_psbend);
1805 errcode = pt_qry_event_psbend(ev, decoder);
1809 /* Publish the event. */
1810 decoder->event = ev;
1812 /* Signal a pending event. */
1816 /* Create a standalone overflow event with tracing disabled.
1818 * Creates and published the event and disables tracing in @decoder.
1820 * Returns zero on success, a negative pt_error_code otherwise.
1822 static int pt_qry_event_ovf_disabled(struct pt_query_decoder *decoder)
1824 struct pt_event *ev;
1827 return -pte_internal;
1829 ev = pt_evq_standalone(&decoder->evq);
1831 return -pte_internal;
1833 ev->type = ptev_overflow;
1835 /* We suppress the IP to indicate that tracing has been disabled before
1836 * the overflow resolved. There can be several events before tracing is
1839 ev->ip_suppressed = 1;
1841 decoder->enabled = 0;
1842 decoder->event = ev;
1844 return pt_qry_event_time(ev, decoder);
1847 /* Queues an overflow event with tracing enabled.
1849 * Creates and enqueues the event and enables tracing in @decoder.
1851 * Returns zero on success, a negative pt_error_code otherwise.
1853 static int pt_qry_event_ovf_enabled(struct pt_query_decoder *decoder)
1855 struct pt_event *ev;
1858 return -pte_internal;
1860 ev = pt_evq_enqueue(&decoder->evq, evb_fup);
1862 return -pte_internal;
1864 ev->type = ptev_overflow;
1866 decoder->enabled = 1;
1868 return pt_qry_event_time(ev, decoder);
1871 /* Recover from SKD010.
1873 * Creates and publishes an overflow event at @packet's IP payload.
1875 * Further updates @decoder as follows:
1877 * - set time tracking to @time and @tcal
1878 * - set the position to @offset
1879 * - set ip to @packet's IP payload
1880 * - set tracing to be enabled
1882 * Returns zero on success, a negative error code otherwise.
1884 static int skd010_recover(struct pt_query_decoder *decoder,
1885 const struct pt_packet_ip *packet,
1886 const struct pt_time_cal *tcal,
1887 const struct pt_time *time, uint64_t offset)
1889 struct pt_last_ip ip;
1890 struct pt_event *ev;
1893 if (!decoder || !packet || !tcal || !time)
1894 return -pte_internal;
1896 /* We use the decoder's IP. It should be newly initialized. */
1899 /* Extract the IP payload from the packet. */
1900 errcode = pt_last_ip_update_ip(&ip, packet, &decoder->config);
1904 /* Synthesize the overflow event. */
1905 ev = pt_evq_standalone(&decoder->evq);
1907 return -pte_internal;
1909 ev->type = ptev_overflow;
1911 /* We do need a full IP. */
1912 errcode = pt_last_ip_query(&ev->variant.overflow.ip, &ip);
1914 return -pte_bad_context;
1916 /* We continue decoding at the given offset. */
1917 decoder->pos = decoder->config.begin + offset;
1919 /* Tracing is enabled. */
1920 decoder->enabled = 1;
1923 decoder->time = *time;
1924 decoder->tcal = *tcal;
1926 /* Publish the event. */
1927 decoder->event = ev;
1929 return pt_qry_event_time(ev, decoder);
1932 /* Recover from SKD010 with tracing disabled.
1934 * Creates and publishes a standalone overflow event.
1936 * Further updates @decoder as follows:
1938 * - set time tracking to @time and @tcal
1939 * - set the position to @offset
1940 * - set tracing to be disabled
1942 * Returns zero on success, a negative error code otherwise.
1944 static int skd010_recover_disabled(struct pt_query_decoder *decoder,
1945 const struct pt_time_cal *tcal,
1946 const struct pt_time *time, uint64_t offset)
1948 if (!decoder || !tcal || !time)
1949 return -pte_internal;
1951 decoder->time = *time;
1952 decoder->tcal = *tcal;
1954 /* We continue decoding at the given offset. */
1955 decoder->pos = decoder->config.begin + offset;
1957 return pt_qry_event_ovf_disabled(decoder);
1960 /* Scan ahead for a packet at which to resume after an overflow.
1962 * This function is called after an OVF without a corresponding FUP. This
1963 * normally means that the overflow resolved while tracing was disabled.
1965 * With erratum SKD010 it might also mean that the FUP (or TIP.PGE) was dropped.
1966 * The overflow thus resolved while tracing was enabled (or tracing was enabled
1967 * after the overflow resolved). Search for an indication whether tracing is
1968 * enabled or disabled by scanning upcoming packets.
1970 * If we can confirm that tracing is disabled, the erratum does not apply and we
1971 * can continue normally.
1973 * If we can confirm that tracing is enabled, the erratum applies and we try to
1974 * recover by synchronizing at a later packet and a different IP. If we can't
1975 * recover, pretend the erratum didn't apply so we run into the error later.
1976 * Since this assumes that tracing is disabled, no harm should be done, i.e. no
1977 * bad trace should be generated.
1979 * Returns zero if the overflow is handled.
1980 * Returns a positive value if the overflow is not yet handled.
1981 * Returns a negative error code otherwise.
1983 static int skd010_scan_for_ovf_resume(struct pt_packet_decoder *pkt,
1984 struct pt_query_decoder *decoder)
1986 struct pt_time_cal tcal;
1987 struct pt_time time;
1989 struct pt_time_cal tcal;
1990 struct pt_time time;
1996 return -pte_internal;
1998 /* Keep track of time as we skip packets. */
1999 time = decoder->time;
2000 tcal = decoder->tcal;
2002 /* Keep track of a potential recovery point at MODE.TSX. */
2003 memset(&mode_tsx, 0, sizeof(mode_tsx));
2006 struct pt_packet packet;
2009 errcode = pt_pkt_get_offset(pkt, &offset);
2013 errcode = pt_pkt_next(pkt, &packet, sizeof(packet));
2015 /* Let's assume the trace is correct if we run out
2018 if (errcode == -pte_eos)
2024 switch (packet.type) {
2026 /* Everything is fine. There is nothing to do. */
2030 /* This is a clear indication that the erratum
2033 * We synchronize after the disable.
2035 return skd010_recover_disabled(decoder, &tcal, &time,
2036 offset + packet.size);
2040 /* This is a clear indication that the erratum
2043 * Yet, we can't recover from it as we wouldn't know how
2044 * many TNT bits will have been used when we eventually
2045 * find an IP packet at which to resume tracing.
2051 /* We could track those changes and synthesize extra
2052 * events after the overflow event when recovering from
2053 * the erratum. This requires infrastructure that we
2054 * don't currently have, though, so we're not going to
2057 * Instead, we ignore those changes. We already don't
2058 * know how many other changes were lost in the
2064 switch (packet.payload.mode.leaf) {
2066 /* A MODE.EXEC packet binds to TIP, i.e.
2068 * TIP.PGE: everything is fine
2069 * TIP: the erratum applies
2071 * In the TIP.PGE case, we may just follow the
2074 * In the TIP case, we'd be able to re-sync at
2075 * the TIP IP but have to skip packets up to and
2076 * including the TIP.
2078 * We'd need to synthesize the MODE.EXEC event
2079 * after the overflow event when recovering at
2080 * the TIP. We lack the infrastructure for this
2081 * - it's getting too complicated.
2083 * Instead, we ignore the execution mode change;
2084 * we already don't know how many more such
2085 * changes were lost in the overflow.
2090 /* A MODE.TSX packet may be standalone or bind
2093 * If this is the second MODE.TSX, we're sure
2094 * that tracing is disabled and everything is
2097 if (mode_tsx.offset)
2100 /* If we find the FUP this packet binds to, we
2101 * may recover at the FUP IP and restart
2102 * processing packets from here. Remember the
2105 mode_tsx.offset = offset;
2106 mode_tsx.time = time;
2107 mode_tsx.tcal = tcal;
2115 /* This is a pretty good indication that tracing
2116 * is indeed enabled and the erratum applies.
2119 /* If we got a MODE.TSX packet before, we synchronize at
2120 * the FUP IP but continue decoding packets starting
2121 * from the MODE.TSX.
2123 if (mode_tsx.offset)
2124 return skd010_recover(decoder,
2130 /* Without a preceding MODE.TSX, this FUP is the start
2131 * of an async branch or disable. We synchronize at the
2132 * FUP IP and continue decoding packets from here.
2134 return skd010_recover(decoder, &packet.payload.ip,
2135 &tcal, &time, offset);
2138 /* We syhchronize at the TIP IP and continue decoding
2139 * packets after the TIP packet.
2141 return skd010_recover(decoder, &packet.payload.ip,
2143 offset + packet.size);
2146 /* We reached a synchronization point. Tracing is
2147 * enabled if and only if the PSB+ contains a FUP.
2149 errcode = pt_qry_find_header_fup(&packet, pkt);
2151 /* If we ran out of packets, we can't tell.
2152 * Let's assume the trace is correct.
2154 if (errcode == -pte_eos)
2160 /* If there is no FUP, tracing is disabled and
2161 * everything is fine.
2166 /* We should have a FUP. */
2167 if (packet.type != ppt_fup)
2168 return -pte_internal;
2170 /* Otherwise, we may synchronize at the FUP IP and
2171 * continue decoding packets at the PSB.
2173 return skd010_recover(decoder, &packet.payload.ip,
2174 &tcal, &time, offset);
2177 /* We shouldn't see this. */
2178 return -pte_bad_context;
2182 /* It doesn't matter if it had been enabled or disabled
2183 * before. We may resume normally.
2189 /* We can't skip this packet. */
2196 /* Ignore this packet. */
2200 /* We may skip a stand-alone EXSTOP. */
2201 if (!packet.payload.exstop.ip)
2206 /* To skip this packet, we'd need to take care of the
2207 * FUP it binds to. This is getting complicated.
2212 /* We may skip a stand-alone PTW. */
2213 if (!packet.payload.ptw.ip)
2216 /* To skip this packet, we'd need to take care of the
2217 * FUP it binds to. This is getting complicated.
2222 /* Keep track of time. */
2223 errcode = pt_qry_apply_tsc(&time, &tcal,
2224 &packet.payload.tsc,
2232 /* Keep track of time. */
2233 errcode = pt_qry_apply_cbr(&time, &tcal,
2234 &packet.payload.cbr,
2242 /* Keep track of time. */
2243 errcode = pt_qry_apply_tma(&time, &tcal,
2244 &packet.payload.tma,
2252 /* Keep track of time. */
2253 errcode = pt_qry_apply_mtc(&time, &tcal,
2254 &packet.payload.mtc,
2262 /* Keep track of time. */
2263 errcode = pt_qry_apply_cyc(&time, &tcal,
2264 &packet.payload.cyc,
2274 static int pt_qry_handle_skd010(struct pt_query_decoder *decoder)
2276 struct pt_packet_decoder pkt;
2281 return -pte_internal;
2283 errcode = pt_qry_get_offset(decoder, &offset);
2287 errcode = pt_pkt_decoder_init(&pkt, &decoder->config);
2291 errcode = pt_pkt_sync_set(&pkt, offset);
2293 errcode = skd010_scan_for_ovf_resume(&pkt, decoder);
2295 pt_pkt_decoder_fini(&pkt);
2299 /* Scan ahead for an indication whether tracing is enabled or disabled.
2301 * Returns zero if tracing is clearly disabled.
2302 * Returns a positive integer if tracing is enabled or if we can't tell.
2303 * Returns a negative error code otherwise.
2305 static int apl12_tracing_is_disabled(struct pt_packet_decoder *decoder)
2308 return -pte_internal;
2311 struct pt_packet packet;
2314 status = pt_pkt_next(decoder, &packet, sizeof(packet));
2316 /* Running out of packets is not an error. */
2317 if (status == -pte_eos)
2323 switch (packet.type) {
2325 /* Skip other packets. */
2329 /* Tracing is disabled before a stop. */
2333 /* Tracing gets enabled - it must have been disabled. */
2340 /* Those packets are only generated when tracing is
2341 * enabled. We're done.
2346 /* We reached a synchronization point. Tracing is
2347 * enabled if and only if the PSB+ contains a FUP.
2349 status = pt_qry_find_header_fup(&packet, decoder);
2351 /* If we ran out of packets, we can't tell. */
2352 if (status == -pte_eos)
2358 /* We shouldn't see this. */
2359 return -pte_bad_context;
2362 /* It doesn't matter - we run into the next overflow. */
2367 /* We can't skip this packet. */
2373 /* Apply workaround for erratum APL12.
2375 * We resume from @offset (relative to @decoder->pos) with tracing disabled. On
2376 * our way to the resume location we process packets to update our state.
2378 * Any event will be dropped.
2380 * Returns zero on success, a negative pt_error_code otherwise.
2382 static int apl12_resume_disabled(struct pt_query_decoder *decoder,
2383 struct pt_packet_decoder *pkt,
2384 unsigned int offset)
2386 uint64_t begin, end;
2390 return -pte_internal;
2392 errcode = pt_qry_get_offset(decoder, &begin);
2396 errcode = pt_pkt_sync_set(pkt, begin);
2400 end = begin + offset;
2402 struct pt_packet packet;
2405 errcode = pt_pkt_next(pkt, &packet, sizeof(packet));
2407 /* Running out of packets is not an error. */
2408 if (errcode == -pte_eos)
2414 /* The offset is the start of the next packet. */
2415 errcode = pt_pkt_get_offset(pkt, &next);
2419 /* We're done when we reach @offset.
2421 * The current @packet will be the FUP after which we started
2422 * our search. We skip it.
2424 * Check that we're not accidentally proceeding past @offset.
2428 return -pte_internal;
2433 switch (packet.type) {
2435 /* Skip other packets. */
2441 /* We should not encounter those.
2443 * We should not encounter a lot of packets but those
2444 * are state-relevant; let's check them explicitly.
2446 return -pte_internal;
2449 /* Keep track of time. */
2450 errcode = pt_qry_apply_tsc(&decoder->time,
2452 &packet.payload.tsc,
2460 /* Keep track of time. */
2461 errcode = pt_qry_apply_cbr(&decoder->time,
2463 &packet.payload.cbr,
2471 /* Keep track of time. */
2472 errcode = pt_qry_apply_tma(&decoder->time,
2474 &packet.payload.tma,
2482 /* Keep track of time. */
2483 errcode = pt_qry_apply_mtc(&decoder->time,
2485 &packet.payload.mtc,
2493 /* Keep track of time. */
2494 errcode = pt_qry_apply_cyc(&decoder->time,
2496 &packet.payload.cyc,
2505 decoder->pos += offset;
2507 return pt_qry_event_ovf_disabled(decoder);
2510 /* Handle erratum APL12.
2512 * This function is called when a FUP is found after an OVF. The @offset
2513 * argument gives the relative offset from @decoder->pos to after the FUP.
2515 * A FUP after OVF normally indicates that the overflow resolved while tracing
2516 * is enabled. Due to erratum APL12, however, the overflow may have resolved
2517 * while tracing is disabled and still generate a FUP.
2519 * We scan ahead for an indication whether tracing is actually disabled. If we
2520 * find one, the erratum applies and we proceed from after the FUP packet.
2522 * This will drop any CBR or MTC events. We will update @decoder's timing state
2523 * on CBR but drop the event.
2525 * Returns zero if the erratum was handled.
2526 * Returns a positive integer if the erratum was not handled.
2527 * Returns a negative pt_error_code otherwise.
2529 static int pt_qry_handle_apl12(struct pt_query_decoder *decoder,
2530 unsigned int offset)
2532 struct pt_packet_decoder pkt;
2537 return -pte_internal;
2539 status = pt_qry_get_offset(decoder, &here);
2543 status = pt_pkt_decoder_init(&pkt, &decoder->config);
2547 status = pt_pkt_sync_set(&pkt, here + offset);
2549 status = apl12_tracing_is_disabled(&pkt);
2551 status = apl12_resume_disabled(decoder, &pkt, offset);
2554 pt_pkt_decoder_fini(&pkt);
2558 /* Apply workaround for erratum APL11.
2560 * We search for a TIP.PGD and, if we found one, resume from after that packet
2561 * with tracing disabled. On our way to the resume location we process packets
2562 * to update our state.
2564 * If we don't find a TIP.PGD but instead some other packet that indicates that
2565 * tracing is disabled, indicate that the erratum does not apply.
2567 * Any event will be dropped.
2569 * Returns zero if the erratum was handled.
2570 * Returns a positive integer if the erratum was not handled.
2571 * Returns a negative pt_error_code otherwise.
2573 static int apl11_apply(struct pt_query_decoder *decoder,
2574 struct pt_packet_decoder *pkt)
2576 struct pt_time_cal tcal;
2577 struct pt_time time;
2580 return -pte_internal;
2582 time = decoder->time;
2583 tcal = decoder->tcal;
2585 struct pt_packet packet;
2588 errcode = pt_pkt_next(pkt, &packet, sizeof(packet));
2592 switch (packet.type) {
2596 /* We found a TIP.PGD. The erratum applies.
2598 * Resume from here with tracing disabled.
2600 errcode = pt_pkt_get_offset(pkt, &offset);
2604 decoder->time = time;
2605 decoder->tcal = tcal;
2606 decoder->pos = decoder->config.begin + offset;
2608 return pt_qry_event_ovf_disabled(decoder);
2612 return -pte_bad_opc;
2627 /* The erratum does not apply. */
2633 /* Skip those packets. */
2640 return -pte_bad_context;
2644 /* Keep track of time. */
2645 errcode = pt_qry_apply_tsc(&time, &tcal,
2646 &packet.payload.tsc,
2654 /* Keep track of time. */
2655 errcode = pt_qry_apply_cbr(&time, &tcal,
2656 &packet.payload.cbr,
2664 /* Keep track of time. */
2665 errcode = pt_qry_apply_tma(&time, &tcal,
2666 &packet.payload.tma,
2674 /* Keep track of time. */
2675 errcode = pt_qry_apply_mtc(&time, &tcal,
2676 &packet.payload.mtc,
2684 /* Keep track of time. */
2685 errcode = pt_qry_apply_cyc(&time, &tcal,
2686 &packet.payload.cyc,
2696 /* Handle erratum APL11.
2698 * This function is called when we diagnose a bad packet while searching for a
2701 * Due to erratum APL11 we may get an extra TIP.PGD after the OVF. Find that
2702 * TIP.PGD and resume from there with tracing disabled.
2704 * This will drop any CBR or MTC events. We will update @decoder's timing state
2705 * on CBR but drop the event.
2707 * Returns zero if the erratum was handled.
2708 * Returns a positive integer if the erratum was not handled.
2709 * Returns a negative pt_error_code otherwise.
2711 static int pt_qry_handle_apl11(struct pt_query_decoder *decoder)
2713 struct pt_packet_decoder pkt;
2718 return -pte_internal;
2720 status = pt_qry_get_offset(decoder, &offset);
2724 status = pt_pkt_decoder_init(&pkt, &decoder->config);
2728 status = pt_pkt_sync_set(&pkt, offset);
2730 status = apl11_apply(decoder, &pkt);
2732 pt_pkt_decoder_fini(&pkt);
2736 static int pt_pkt_find_ovf_fup(struct pt_packet_decoder *decoder)
2739 struct pt_packet packet;
2742 errcode = pt_pkt_next(decoder, &packet, sizeof(packet));
2746 switch (packet.type) {
2751 return -pte_bad_opc;
2782 return -pte_bad_context;
2787 /* Find a FUP to which the current OVF may bind.
2789 * Scans the trace for a FUP or for a packet that indicates that tracing is
2792 * Return the relative offset of the packet following the found FUP on success.
2793 * Returns zero if no FUP is found and tracing is assumed to be disabled.
2794 * Returns a negative pt_error_code otherwise.
2796 static int pt_qry_find_ovf_fup(const struct pt_query_decoder *decoder)
2798 struct pt_packet_decoder pkt;
2799 uint64_t begin, end, offset;
2803 return -pte_internal;
2805 status = pt_qry_get_offset(decoder, &begin);
2809 status = pt_pkt_decoder_init(&pkt, &decoder->config);
2813 status = pt_pkt_sync_set(&pkt, begin);
2815 status = pt_pkt_find_ovf_fup(&pkt);
2817 status = pt_pkt_get_offset(&pkt, &end);
2822 return -pte_overflow;
2824 offset = end - begin;
2825 if (INT_MAX < offset)
2826 return -pte_overflow;
2828 status = (int) offset;
2832 pt_pkt_decoder_fini(&pkt);
2836 int pt_qry_decode_ovf(struct pt_query_decoder *decoder)
2838 struct pt_time time;
2842 return -pte_internal;
2844 status = pt_qry_process_pending_psb_events(decoder);
2848 /* If we have any pending psbend events, we're done for now. */
2852 /* Reset the decoder state but preserve timing. */
2853 time = decoder->time;
2854 pt_qry_reset(decoder);
2855 decoder->time = time;
2857 /* We must consume the OVF before we search for the binding packet. */
2858 decoder->pos += ptps_ovf;
2860 /* Overflow binds to either FUP or TIP.PGE.
2862 * If the overflow can be resolved while PacketEn=1 it binds to FUP. We
2863 * can see timing packets between OVF anf FUP but that's it.
2865 * Otherwise, PacketEn will be zero when the overflow resolves and OVF
2866 * binds to TIP.PGE. There can be packets between OVF and TIP.PGE that
2867 * do not depend on PacketEn.
2869 * We don't need to decode everything until TIP.PGE, however. As soon
2870 * as we see a non-timing non-FUP packet, we know that tracing has been
2871 * disabled before the overflow resolves.
2873 offset = pt_qry_find_ovf_fup(decoder);
2875 /* Check for erratum SKD010.
2877 * The FUP may have been dropped. If we can figure out that
2878 * tracing is enabled and hence the FUP is missing, we resume
2879 * at a later packet and a different IP.
2881 if (decoder->config.errata.skd010) {
2882 status = pt_qry_handle_skd010(decoder);
2887 /* Check for erratum APL11.
2889 * We may have gotten an extra TIP.PGD, which should be
2890 * diagnosed by our search for a subsequent FUP.
2892 if (decoder->config.errata.apl11 &&
2893 (offset == -pte_bad_context)) {
2894 status = pt_qry_handle_apl11(decoder);
2899 /* Report the original error from searching for the FUP packet
2900 * if we were not able to fix the trace.
2902 * We treat an overflow at the end of the trace as standalone.
2904 if (offset < 0 && offset != -pte_eos)
2907 return pt_qry_event_ovf_disabled(decoder);
2909 /* Check for erratum APL12.
2911 * We may get an extra FUP even though the overflow resolved
2912 * with tracing disabled.
2914 if (decoder->config.errata.apl12) {
2915 status = pt_qry_handle_apl12(decoder,
2916 (unsigned int) offset);
2921 return pt_qry_event_ovf_enabled(decoder);
2925 static int pt_qry_decode_mode_exec(struct pt_query_decoder *decoder,
2926 const struct pt_packet_mode_exec *packet)
2928 struct pt_event *event;
2930 if (!decoder || !packet)
2931 return -pte_internal;
2933 /* MODE.EXEC binds to TIP. */
2934 event = pt_evq_enqueue(&decoder->evq, evb_tip);
2938 event->type = ptev_exec_mode;
2939 event->variant.exec_mode.mode = pt_get_exec_mode(packet);
2941 return pt_qry_event_time(event, decoder);
2944 static int pt_qry_decode_mode_tsx(struct pt_query_decoder *decoder,
2945 const struct pt_packet_mode_tsx *packet)
2947 struct pt_event *event;
2949 if (!decoder || !packet)
2950 return -pte_internal;
2952 /* MODE.TSX is standalone if tracing is disabled. */
2953 if (!decoder->enabled) {
2954 event = pt_evq_standalone(&decoder->evq);
2956 return -pte_internal;
2958 /* We don't have an IP in this case. */
2959 event->variant.tsx.ip = 0;
2960 event->ip_suppressed = 1;
2962 /* Publish the event. */
2963 decoder->event = event;
2965 /* MODE.TSX binds to FUP. */
2966 event = pt_evq_enqueue(&decoder->evq, evb_fup);
2971 event->type = ptev_tsx;
2972 event->variant.tsx.speculative = packet->intx;
2973 event->variant.tsx.aborted = packet->abrt;
2975 return pt_qry_event_time(event, decoder);
2978 int pt_qry_decode_mode(struct pt_query_decoder *decoder)
2980 struct pt_packet_mode packet;
2984 return -pte_internal;
2986 size = pt_pkt_read_mode(&packet, decoder->pos, &decoder->config);
2991 switch (packet.leaf) {
2993 errcode = pt_qry_decode_mode_exec(decoder, &packet.bits.exec);
2997 errcode = pt_qry_decode_mode_tsx(decoder, &packet.bits.tsx);
3004 decoder->pos += size;
3008 int pt_qry_header_mode(struct pt_query_decoder *decoder)
3010 struct pt_packet_mode packet;
3011 struct pt_event *event;
3015 return -pte_internal;
3017 size = pt_pkt_read_mode(&packet, decoder->pos, &decoder->config);
3021 /* Inside the header, events are reported at the end. */
3022 event = pt_evq_enqueue(&decoder->evq, evb_psbend);
3026 switch (packet.leaf) {
3028 event->type = ptev_exec_mode;
3029 event->variant.exec_mode.mode =
3030 pt_get_exec_mode(&packet.bits.exec);
3034 event->type = ptev_tsx;
3035 event->variant.tsx.speculative = packet.bits.tsx.intx;
3036 event->variant.tsx.aborted = packet.bits.tsx.abrt;
3040 decoder->pos += size;
3044 int pt_qry_decode_psbend(struct pt_query_decoder *decoder)
3049 return -pte_internal;
3051 status = pt_qry_process_pending_psb_events(decoder);
3055 /* If we had any psb events, we're done for now. */
3059 /* Skip the psbend extended opcode that we fetched before if no more
3060 * psbend events are pending.
3062 decoder->pos += ptps_psbend;
3066 int pt_qry_decode_tsc(struct pt_query_decoder *decoder)
3068 struct pt_packet_tsc packet;
3072 return -pte_internal;
3074 size = pt_pkt_read_tsc(&packet, decoder->pos, &decoder->config);
3078 errcode = pt_qry_apply_tsc(&decoder->time, &decoder->tcal,
3079 &packet, &decoder->config);
3083 decoder->pos += size;
3087 int pt_qry_header_tsc(struct pt_query_decoder *decoder)
3089 struct pt_packet_tsc packet;
3093 return -pte_internal;
3095 size = pt_pkt_read_tsc(&packet, decoder->pos, &decoder->config);
3099 errcode = pt_qry_apply_header_tsc(&decoder->time, &decoder->tcal,
3100 &packet, &decoder->config);
3104 decoder->pos += size;
3108 int pt_qry_decode_cbr(struct pt_query_decoder *decoder)
3110 struct pt_packet_cbr packet;
3111 struct pt_event *event;
3115 return -pte_internal;
3117 size = pt_pkt_read_cbr(&packet, decoder->pos, &decoder->config);
3121 errcode = pt_qry_apply_cbr(&decoder->time, &decoder->tcal,
3122 &packet, &decoder->config);
3126 event = pt_evq_standalone(&decoder->evq);
3128 return -pte_internal;
3130 event->type = ptev_cbr;
3131 event->variant.cbr.ratio = packet.ratio;
3133 decoder->event = event;
3135 errcode = pt_qry_event_time(event, decoder);
3139 decoder->pos += size;
3143 int pt_qry_header_cbr(struct pt_query_decoder *decoder)
3145 struct pt_packet_cbr packet;
3146 struct pt_event *event;
3150 return -pte_internal;
3152 size = pt_pkt_read_cbr(&packet, decoder->pos, &decoder->config);
3156 errcode = pt_qry_apply_header_cbr(&decoder->time, &decoder->tcal,
3157 &packet, &decoder->config);
3161 event = pt_evq_enqueue(&decoder->evq, evb_psbend);
3165 event->type = ptev_cbr;
3166 event->variant.cbr.ratio = packet.ratio;
3168 decoder->pos += size;
3172 int pt_qry_decode_tma(struct pt_query_decoder *decoder)
3174 struct pt_packet_tma packet;
3178 return -pte_internal;
3180 size = pt_pkt_read_tma(&packet, decoder->pos, &decoder->config);
3184 errcode = pt_qry_apply_tma(&decoder->time, &decoder->tcal,
3185 &packet, &decoder->config);
3189 decoder->pos += size;
3193 int pt_qry_decode_mtc(struct pt_query_decoder *decoder)
3195 struct pt_packet_mtc packet;
3199 return -pte_internal;
3201 size = pt_pkt_read_mtc(&packet, decoder->pos, &decoder->config);
3205 errcode = pt_qry_apply_mtc(&decoder->time, &decoder->tcal,
3206 &packet, &decoder->config);
3210 decoder->pos += size;
3214 static int check_erratum_skd007(struct pt_query_decoder *decoder,
3215 const struct pt_packet_cyc *packet, int size)
3220 if (!decoder || !packet || size < 0)
3221 return -pte_internal;
3223 /* It must be a 2-byte CYC. */
3227 payload = (uint16_t) packet->value;
3229 /* The 2nd byte of the CYC payload must look like an ext opcode. */
3230 if ((payload & ~0x1f) != 0x20)
3233 /* Skip this CYC packet. */
3234 pos = decoder->pos + size;
3235 if (decoder->config.end <= pos)
3238 /* See if we got a second CYC that looks like an OVF ext opcode. */
3239 if (*pos != pt_ext_ovf)
3242 /* We shouldn't get back-to-back CYCs unless they are sent when the
3243 * counter wraps around. In this case, we'd expect a full payload.
3245 * Since we got two non-full CYC packets, we assume the erratum hit.
3251 int pt_qry_decode_cyc(struct pt_query_decoder *decoder)
3253 struct pt_packet_cyc packet;
3254 struct pt_config *config;
3258 return -pte_internal;
3260 config = &decoder->config;
3262 size = pt_pkt_read_cyc(&packet, decoder->pos, config);
3266 if (config->errata.skd007) {
3267 errcode = check_erratum_skd007(decoder, &packet, size);
3271 /* If the erratum hits, we ignore the partial CYC and instead
3272 * process the OVF following/overlapping it.
3275 /* We skip the first byte of the CYC, which brings us
3276 * to the beginning of the OVF packet.
3283 errcode = pt_qry_apply_cyc(&decoder->time, &decoder->tcal,
3288 decoder->pos += size;
3292 int pt_qry_decode_stop(struct pt_query_decoder *decoder)
3294 struct pt_event *event;
3298 return -pte_internal;
3300 /* Stop events are reported immediately. */
3301 event = pt_evq_standalone(&decoder->evq);
3303 return -pte_internal;
3305 event->type = ptev_stop;
3307 decoder->event = event;
3309 errcode = pt_qry_event_time(event, decoder);
3313 decoder->pos += ptps_stop;
3317 int pt_qry_header_vmcs(struct pt_query_decoder *decoder)
3319 struct pt_packet_vmcs packet;
3320 struct pt_event *event;
3324 return -pte_internal;
3326 size = pt_pkt_read_vmcs(&packet, decoder->pos, &decoder->config);
3330 event = pt_evq_enqueue(&decoder->evq, evb_psbend);
3334 event->type = ptev_async_vmcs;
3335 event->variant.async_vmcs.base = packet.base;
3337 decoder->pos += size;
3341 int pt_qry_decode_vmcs(struct pt_query_decoder *decoder)
3343 struct pt_packet_vmcs packet;
3344 struct pt_event *event;
3348 return -pte_internal;
3350 size = pt_pkt_read_vmcs(&packet, decoder->pos, &decoder->config);
3354 /* VMCS events bind to the same IP as an in-flight async paging event.
3356 * In that case, the VMCS event should be applied first. We reorder
3357 * events here to simplify the life of higher layers.
3359 event = pt_evq_find(&decoder->evq, evb_tip, ptev_async_paging);
3361 struct pt_event *paging;
3363 paging = pt_evq_enqueue(&decoder->evq, evb_tip);
3369 event->type = ptev_async_vmcs;
3370 event->variant.async_vmcs.base = packet.base;
3372 decoder->pos += size;
3376 /* VMCS events bind to the same TIP packet as an in-flight async
3379 event = pt_evq_find(&decoder->evq, evb_tip, ptev_async_branch);
3381 event = pt_evq_enqueue(&decoder->evq, evb_tip);
3385 event->type = ptev_async_vmcs;
3386 event->variant.async_vmcs.base = packet.base;
3388 decoder->pos += size;
3392 /* VMCS events that do not bind to an in-flight async event are
3395 event = pt_evq_standalone(&decoder->evq);
3397 return -pte_internal;
3399 event->type = ptev_vmcs;
3400 event->variant.vmcs.base = packet.base;
3402 decoder->event = event;
3404 errcode = pt_qry_event_time(event, decoder);
3408 decoder->pos += size;
3412 int pt_qry_decode_mnt(struct pt_query_decoder *decoder)
3414 struct pt_packet_mnt packet;
3415 struct pt_event *event;
3419 return -pte_internal;
3421 size = pt_pkt_read_mnt(&packet, decoder->pos, &decoder->config);
3425 event = pt_evq_standalone(&decoder->evq);
3427 return -pte_internal;
3429 event->type = ptev_mnt;
3430 event->variant.mnt.payload = packet.payload;
3432 decoder->event = event;
3434 errcode = pt_qry_event_time(event, decoder);
3438 decoder->pos += size;
3443 int pt_qry_header_mnt(struct pt_query_decoder *decoder)
3445 struct pt_packet_mnt packet;
3446 struct pt_event *event;
3450 return -pte_internal;
3452 size = pt_pkt_read_mnt(&packet, decoder->pos, &decoder->config);
3456 event = pt_evq_enqueue(&decoder->evq, evb_psbend);
3460 event->type = ptev_mnt;
3461 event->variant.mnt.payload = packet.payload;
3463 decoder->pos += size;
3468 int pt_qry_decode_exstop(struct pt_query_decoder *decoder)
3470 struct pt_packet_exstop packet;
3471 struct pt_event *event;
3475 return -pte_internal;
3477 size = pt_pkt_read_exstop(&packet, decoder->pos, &decoder->config);
3482 event = pt_evq_enqueue(&decoder->evq, evb_fup);
3484 return -pte_internal;
3486 event->type = ptev_exstop;
3488 event = pt_evq_standalone(&decoder->evq);
3490 return -pte_internal;
3492 event->type = ptev_exstop;
3494 event->ip_suppressed = 1;
3495 event->variant.exstop.ip = 0ull;
3497 decoder->event = event;
3500 decoder->pos += size;
3504 int pt_qry_decode_mwait(struct pt_query_decoder *decoder)
3506 struct pt_packet_mwait packet;
3507 struct pt_event *event;
3511 return -pte_internal;
3513 size = pt_pkt_read_mwait(&packet, decoder->pos, &decoder->config);
3517 event = pt_evq_enqueue(&decoder->evq, evb_fup);
3519 return -pte_internal;
3521 event->type = ptev_mwait;
3522 event->variant.mwait.hints = packet.hints;
3523 event->variant.mwait.ext = packet.ext;
3525 decoder->pos += size;
3529 int pt_qry_decode_pwre(struct pt_query_decoder *decoder)
3531 struct pt_packet_pwre packet;
3532 struct pt_event *event;
3536 return -pte_internal;
3538 size = pt_pkt_read_pwre(&packet, decoder->pos, &decoder->config);
3542 event = pt_evq_standalone(&decoder->evq);
3544 return -pte_internal;
3546 event->type = ptev_pwre;
3547 event->variant.pwre.state = packet.state;
3548 event->variant.pwre.sub_state = packet.sub_state;
3551 event->variant.pwre.hw = 1;
3553 decoder->event = event;
3555 decoder->pos += size;
3559 int pt_qry_decode_pwrx(struct pt_query_decoder *decoder)
3561 struct pt_packet_pwrx packet;
3562 struct pt_event *event;
3566 return -pte_internal;
3568 size = pt_pkt_read_pwrx(&packet, decoder->pos, &decoder->config);
3572 event = pt_evq_standalone(&decoder->evq);
3574 return -pte_internal;
3576 event->type = ptev_pwrx;
3577 event->variant.pwrx.last = packet.last;
3578 event->variant.pwrx.deepest = packet.deepest;
3580 if (packet.interrupt)
3581 event->variant.pwrx.interrupt = 1;
3583 event->variant.pwrx.store = 1;
3584 if (packet.autonomous)
3585 event->variant.pwrx.autonomous = 1;
3587 decoder->event = event;
3589 decoder->pos += size;
3593 int pt_qry_decode_ptw(struct pt_query_decoder *decoder)
3595 struct pt_packet_ptw packet;
3596 struct pt_event *event;
3600 return -pte_internal;
3602 size = pt_pkt_read_ptw(&packet, decoder->pos, &decoder->config);
3606 pls = pt_ptw_size(packet.plc);
3611 event = pt_evq_enqueue(&decoder->evq, evb_fup);
3613 return -pte_internal;
3615 event = pt_evq_standalone(&decoder->evq);
3617 return -pte_internal;
3619 event->ip_suppressed = 1;
3621 decoder->event = event;
3624 event->type = ptev_ptwrite;
3625 event->variant.ptwrite.size = (uint8_t) pls;
3626 event->variant.ptwrite.payload = packet.payload;
3628 decoder->pos += size;