6 //===----------------------------------------------------------------------===//
8 // The LLVM Compiler Infrastructure
10 // This file is dual licensed under the MIT and the University of Illinois Open
11 // Source Licenses. See LICENSE.txt for details.
13 //===----------------------------------------------------------------------===//
16 #ifndef KMP_TASKDEPS_H
17 #define KMP_TASKDEPS_H
23 #define KMP_ACQUIRE_DEPNODE(gtid, n) __kmp_acquire_lock(&(n)->dn.lock, (gtid))
24 #define KMP_RELEASE_DEPNODE(gtid, n) __kmp_release_lock(&(n)->dn.lock, (gtid))
26 static inline void __kmp_node_deref(kmp_info_t *thread, kmp_depnode_t *node) {
30 kmp_int32 n = KMP_ATOMIC_DEC(&node->dn.nrefs) - 1;
32 KMP_ASSERT(node->dn.nrefs == 0);
34 __kmp_fast_free(thread, node);
36 __kmp_thread_free(thread, node);
41 static inline void __kmp_depnode_list_free(kmp_info_t *thread,
42 kmp_depnode_list *list) {
43 kmp_depnode_list *next;
45 for (; list; list = next) {
48 __kmp_node_deref(thread, list->node);
50 __kmp_fast_free(thread, list);
52 __kmp_thread_free(thread, list);
57 static inline void __kmp_dephash_free_entries(kmp_info_t *thread,
59 for (size_t i = 0; i < h->size; i++) {
61 kmp_dephash_entry_t *next;
62 for (kmp_dephash_entry_t *entry = h->buckets[i]; entry; entry = next) {
63 next = entry->next_in_bucket;
64 __kmp_depnode_list_free(thread, entry->last_ins);
65 __kmp_depnode_list_free(thread, entry->last_mtxs);
66 __kmp_node_deref(thread, entry->last_out);
67 if (entry->mtx_lock) {
68 __kmp_destroy_lock(entry->mtx_lock);
69 __kmp_free(entry->mtx_lock);
72 __kmp_fast_free(thread, entry);
74 __kmp_thread_free(thread, entry);
82 static inline void __kmp_dephash_free(kmp_info_t *thread, kmp_dephash_t *h) {
83 __kmp_dephash_free_entries(thread, h);
85 __kmp_fast_free(thread, h);
87 __kmp_thread_free(thread, h);
91 static inline void __kmp_release_deps(kmp_int32 gtid, kmp_taskdata_t *task) {
92 kmp_info_t *thread = __kmp_threads[gtid];
93 kmp_depnode_t *node = task->td_depnode;
95 if (task->td_dephash) {
97 40, ("__kmp_release_deps: T#%d freeing dependencies hash of task %p.\n",
99 __kmp_dephash_free(thread, task->td_dephash);
100 task->td_dephash = NULL;
106 KA_TRACE(20, ("__kmp_release_deps: T#%d notifying successors of task %p.\n",
109 KMP_ACQUIRE_DEPNODE(gtid, node);
111 NULL; // mark this task as finished, so no new dependencies are generated
112 KMP_RELEASE_DEPNODE(gtid, node);
114 kmp_depnode_list_t *next;
115 for (kmp_depnode_list_t *p = node->dn.successors; p; p = next) {
116 kmp_depnode_t *successor = p->node;
117 kmp_int32 npredecessors = KMP_ATOMIC_DEC(&successor->dn.npredecessors) - 1;
119 // successor task can be NULL for wait_depends or because deps are still
121 if (npredecessors == 0) {
123 if (successor->dn.task) {
124 KA_TRACE(20, ("__kmp_release_deps: T#%d successor %p of %p scheduled "
126 gtid, successor->dn.task, task));
127 __kmp_omp_task(gtid, successor->dn.task, false);
132 __kmp_node_deref(thread, p->node);
134 __kmp_fast_free(thread, p);
136 __kmp_thread_free(thread, p);
140 __kmp_node_deref(thread, node);
144 ("__kmp_release_deps: T#%d all successors of %p notified of completion\n",
148 #endif // OMP_40_ENABLED
150 #endif // KMP_TASKDEPS_H