2 * ompt-specific.cpp -- OMPT internal functions
5 //===----------------------------------------------------------------------===//
7 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
8 // See https://llvm.org/LICENSE.txt for license information.
9 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
11 //===----------------------------------------------------------------------===//
13 //******************************************************************************
15 //******************************************************************************
18 #include "ompt-specific.h"
25 #define THREAD_LOCAL __declspec(thread)
27 #define THREAD_LOCAL __thread
30 #define OMPT_WEAK_ATTRIBUTE KMP_WEAK_ATTRIBUTE
32 //******************************************************************************
34 //******************************************************************************
36 #define LWT_FROM_TEAM(team) (team)->t.ompt_serialized_team_info
38 #define OMPT_THREAD_ID_BITS 16
40 //******************************************************************************
42 //******************************************************************************
44 //----------------------------------------------------------
45 // traverse the team and task hierarchy
46 // note: __ompt_get_teaminfo and __ompt_get_task_info_object
47 // traverse the hierarchy similarly and need to be
49 //----------------------------------------------------------
51 ompt_team_info_t *__ompt_get_teaminfo(int depth, int *size) {
52 kmp_info_t *thr = ompt_get_thread();
55 kmp_team *team = thr->th.th_team;
59 ompt_lw_taskteam_t *next_lwt = LWT_FROM_TEAM(team), *lwt = NULL;
62 // next lightweight team (if any)
66 // next heavyweight team (if any) after
67 // lightweight teams are exhausted
73 team = team->t.t_parent;
75 next_lwt = LWT_FROM_TEAM(team);
84 // lightweight teams have one task
88 // return team info for lightweight team
89 return &lwt->ompt_team_info;
91 // extract size from heavyweight team
93 *size = team->t.t_nproc;
95 // return team info for heavyweight team
96 return &team->t.ompt_team_info;
103 ompt_task_info_t *__ompt_get_task_info_object(int depth) {
104 ompt_task_info_t *info = NULL;
105 kmp_info_t *thr = ompt_get_thread();
108 kmp_taskdata_t *taskdata = thr->th.th_current_task;
109 ompt_lw_taskteam_t *lwt = NULL,
110 *next_lwt = LWT_FROM_TEAM(taskdata->td_team);
113 // next lightweight team (if any)
117 // next heavyweight team (if any) after
118 // lightweight teams are exhausted
119 if (!lwt && taskdata) {
124 taskdata = taskdata->td_parent;
126 next_lwt = LWT_FROM_TEAM(taskdata->td_team);
134 info = &lwt->ompt_task_info;
135 } else if (taskdata) {
136 info = &taskdata->ompt_task_info;
143 ompt_task_info_t *__ompt_get_scheduling_taskinfo(int depth) {
144 ompt_task_info_t *info = NULL;
145 kmp_info_t *thr = ompt_get_thread();
148 kmp_taskdata_t *taskdata = thr->th.th_current_task;
150 ompt_lw_taskteam_t *lwt = NULL,
151 *next_lwt = LWT_FROM_TEAM(taskdata->td_team);
154 // next lightweight team (if any)
158 // next heavyweight team (if any) after
159 // lightweight teams are exhausted
160 if (!lwt && taskdata) {
161 // first try scheduling parent (for explicit task scheduling)
162 if (taskdata->ompt_task_info.scheduling_parent) {
163 taskdata = taskdata->ompt_task_info.scheduling_parent;
164 } else if (next_lwt) {
168 // then go for implicit tasks
169 taskdata = taskdata->td_parent;
171 next_lwt = LWT_FROM_TEAM(taskdata->td_team);
179 info = &lwt->ompt_task_info;
180 } else if (taskdata) {
181 info = &taskdata->ompt_task_info;
188 //******************************************************************************
189 // interface operations
190 //******************************************************************************
192 //----------------------------------------------------------
194 //----------------------------------------------------------
196 ompt_data_t *__ompt_get_thread_data_internal() {
197 if (__kmp_get_gtid() >= 0) {
198 kmp_info_t *thread = ompt_get_thread();
201 return &(thread->th.ompt_thread_info.thread_data);
206 //----------------------------------------------------------
208 //----------------------------------------------------------
210 void __ompt_thread_assign_wait_id(void *variable) {
211 kmp_info_t *ti = ompt_get_thread();
214 ti->th.ompt_thread_info.wait_id = (ompt_wait_id_t)(uintptr_t)variable;
217 int __ompt_get_state_internal(ompt_wait_id_t *omp_wait_id) {
218 kmp_info_t *ti = ompt_get_thread();
222 *omp_wait_id = ti->th.ompt_thread_info.wait_id;
223 return ti->th.ompt_thread_info.state;
225 return ompt_state_undefined;
228 //----------------------------------------------------------
229 // parallel region support
230 //----------------------------------------------------------
232 int __ompt_get_parallel_info_internal(int ancestor_level,
233 ompt_data_t **parallel_data,
235 if (__kmp_get_gtid() >= 0) {
236 ompt_team_info_t *info;
238 info = __ompt_get_teaminfo(ancestor_level, team_size);
240 info = __ompt_get_teaminfo(ancestor_level, NULL);
243 *parallel_data = info ? &(info->parallel_data) : NULL;
251 //----------------------------------------------------------
252 // lightweight task team support
253 //----------------------------------------------------------
255 void __ompt_lw_taskteam_init(ompt_lw_taskteam_t *lwt, kmp_info_t *thr, int gtid,
256 ompt_data_t *ompt_pid, void *codeptr) {
257 // initialize parallel_data with input, return address to parallel_data on
259 lwt->ompt_team_info.parallel_data = *ompt_pid;
260 lwt->ompt_team_info.master_return_address = codeptr;
261 lwt->ompt_task_info.task_data.value = 0;
262 lwt->ompt_task_info.frame.enter_frame = ompt_data_none;
263 lwt->ompt_task_info.frame.exit_frame = ompt_data_none;
264 lwt->ompt_task_info.scheduling_parent = NULL;
265 lwt->ompt_task_info.deps = NULL;
266 lwt->ompt_task_info.ndeps = 0;
271 void __ompt_lw_taskteam_link(ompt_lw_taskteam_t *lwt, kmp_info_t *thr,
273 ompt_lw_taskteam_t *link_lwt = lwt;
274 if (thr->th.th_team->t.t_serialized >
275 1) { // we already have a team, so link the new team and swap values
276 if (on_heap) { // the lw_taskteam cannot stay on stack, allocate it on heap
278 (ompt_lw_taskteam_t *)__kmp_allocate(sizeof(ompt_lw_taskteam_t));
280 link_lwt->heap = on_heap;
282 // would be swap in the (on_stack) case.
283 ompt_team_info_t tmp_team = lwt->ompt_team_info;
284 link_lwt->ompt_team_info = *OMPT_CUR_TEAM_INFO(thr);
285 *OMPT_CUR_TEAM_INFO(thr) = tmp_team;
287 ompt_task_info_t tmp_task = lwt->ompt_task_info;
288 link_lwt->ompt_task_info = *OMPT_CUR_TASK_INFO(thr);
289 *OMPT_CUR_TASK_INFO(thr) = tmp_task;
291 // link the taskteam into the list of taskteams:
292 ompt_lw_taskteam_t *my_parent =
293 thr->th.th_team->t.ompt_serialized_team_info;
294 link_lwt->parent = my_parent;
295 thr->th.th_team->t.ompt_serialized_team_info = link_lwt;
297 // this is the first serialized team, so we just store the values in the
298 // team and drop the taskteam-object
299 *OMPT_CUR_TEAM_INFO(thr) = lwt->ompt_team_info;
300 *OMPT_CUR_TASK_INFO(thr) = lwt->ompt_task_info;
304 void __ompt_lw_taskteam_unlink(kmp_info_t *thr) {
305 ompt_lw_taskteam_t *lwtask = thr->th.th_team->t.ompt_serialized_team_info;
307 thr->th.th_team->t.ompt_serialized_team_info = lwtask->parent;
309 ompt_team_info_t tmp_team = lwtask->ompt_team_info;
310 lwtask->ompt_team_info = *OMPT_CUR_TEAM_INFO(thr);
311 *OMPT_CUR_TEAM_INFO(thr) = tmp_team;
313 ompt_task_info_t tmp_task = lwtask->ompt_task_info;
314 lwtask->ompt_task_info = *OMPT_CUR_TASK_INFO(thr);
315 *OMPT_CUR_TASK_INFO(thr) = tmp_task;
325 //----------------------------------------------------------
327 //----------------------------------------------------------
329 int __ompt_get_task_info_internal(int ancestor_level, int *type,
330 ompt_data_t **task_data,
331 ompt_frame_t **task_frame,
332 ompt_data_t **parallel_data,
334 if (__kmp_get_gtid() < 0)
337 if (ancestor_level < 0)
340 // copied from __ompt_get_scheduling_taskinfo
341 ompt_task_info_t *info = NULL;
342 ompt_team_info_t *team_info = NULL;
343 kmp_info_t *thr = ompt_get_thread();
344 int level = ancestor_level;
347 kmp_taskdata_t *taskdata = thr->th.th_current_task;
348 if (taskdata == NULL)
350 kmp_team *team = thr->th.th_team, *prev_team = NULL;
353 ompt_lw_taskteam_t *lwt = NULL,
354 *next_lwt = LWT_FROM_TEAM(taskdata->td_team),
357 while (ancestor_level > 0) {
358 // needed for thread_num
361 // next lightweight team (if any)
365 // next heavyweight team (if any) after
366 // lightweight teams are exhausted
367 if (!lwt && taskdata) {
368 // first try scheduling parent (for explicit task scheduling)
369 if (taskdata->ompt_task_info.scheduling_parent) {
370 taskdata = taskdata->ompt_task_info.scheduling_parent;
371 } else if (next_lwt) {
375 // then go for implicit tasks
376 taskdata = taskdata->td_parent;
379 team = team->t.t_parent;
381 next_lwt = LWT_FROM_TEAM(taskdata->td_team);
389 info = &lwt->ompt_task_info;
390 team_info = &lwt->ompt_team_info;
392 *type = ompt_task_implicit;
394 } else if (taskdata) {
395 info = &taskdata->ompt_task_info;
396 team_info = &team->t.ompt_team_info;
398 if (taskdata->td_parent) {
399 *type = (taskdata->td_flags.tasktype ? ompt_task_explicit
400 : ompt_task_implicit) |
401 TASK_TYPE_DETAILS_FORMAT(taskdata);
403 *type = ompt_task_initial;
408 *task_data = info ? &info->task_data : NULL;
411 // OpenMP spec asks for the scheduling task to be returned.
412 *task_frame = info ? &info->frame : NULL;
415 *parallel_data = team_info ? &(team_info->parallel_data) : NULL;
419 *thread_num = __kmp_get_tid();
423 *thread_num = prev_team->t.t_master_tid;
424 // *thread_num = team->t.t_master_tid;
431 int __ompt_get_task_memory_internal(void **addr, size_t *size, int blocknum) {
433 return 0; // support only a single block
435 kmp_info_t *thr = ompt_get_thread();
439 kmp_taskdata_t *taskdata = thr->th.th_current_task;
440 kmp_task_t *task = KMP_TASKDATA_TO_TASK(taskdata);
442 if (taskdata->td_flags.tasktype != TASK_EXPLICIT)
443 return 0; // support only explicit task
446 int64_t ret_size = taskdata->td_size_alloc - sizeof(kmp_taskdata_t);
448 // kmp_task_t->data1 is an optional member
449 if (taskdata->td_flags.destructors_thunk)
450 ret_addr = &task->data1 + 1;
452 ret_addr = &task->part_id + 1;
454 ret_size -= (char *)(ret_addr) - (char *)(task);
463 //----------------------------------------------------------
465 //----------------------------------------------------------
467 void __ompt_team_assign_id(kmp_team_t *team, ompt_data_t ompt_pid) {
468 team->t.ompt_team_info.parallel_data = ompt_pid;
471 //----------------------------------------------------------
473 //----------------------------------------------------------
475 static uint64_t __ompt_get_unique_id_internal() {
476 static uint64_t thread = 1;
477 static THREAD_LOCAL uint64_t ID = 0;
479 uint64_t new_thread = KMP_TEST_THEN_INC64((kmp_int64 *)&thread);
480 ID = new_thread << (sizeof(uint64_t) * 8 - OMPT_THREAD_ID_BITS);
485 ompt_sync_region_t __ompt_get_barrier_kind(enum barrier_type bt,
487 if (bt == bs_forkjoin_barrier)
488 return ompt_sync_region_barrier_implicit;
490 if (bt != bs_plain_barrier)
491 return ompt_sync_region_barrier_implementation;
493 if (!thr->th.th_ident)
494 return ompt_sync_region_barrier;
496 kmp_int32 flags = thr->th.th_ident->flags;
498 if ((flags & KMP_IDENT_BARRIER_EXPL) != 0)
499 return ompt_sync_region_barrier_explicit;
501 if ((flags & KMP_IDENT_BARRIER_IMPL) != 0)
502 return ompt_sync_region_barrier_implicit;
504 return ompt_sync_region_barrier_implementation;