17 #include "kmp_wait_release.h"
18 #include "kmp_taskdeps.h"
20 #include "ompt-specific.h"
32 #ifdef KMP_SUPPORT_GRAPH_OUTPUT
33 static std::atomic<kmp_int32> kmp_node_id_seed = ATOMIC_VAR_INIT(0);
36 static void __kmp_init_node(kmp_depnode_t *node) {
37 node->dn.successors = NULL;
40 for (
int i = 0; i < MAX_MTX_DEPS; ++i)
41 node->dn.mtx_locks[i] = NULL;
42 node->dn.mtx_num_locks = 0;
43 __kmp_init_lock(&node->dn.lock);
44 KMP_ATOMIC_ST_RLX(&node->dn.nrefs, 1);
45 #ifdef KMP_SUPPORT_GRAPH_OUTPUT
46 node->dn.id = KMP_ATOMIC_INC(&kmp_node_id_seed);
50 static inline kmp_depnode_t *__kmp_node_ref(kmp_depnode_t *node) {
51 KMP_ATOMIC_INC(&node->dn.nrefs);
55 enum { KMP_DEPHASH_OTHER_SIZE = 97, KMP_DEPHASH_MASTER_SIZE = 997 };
57 size_t sizes[] = { 997, 2003, 4001, 8191, 16001, 32003, 64007, 131071, 270029 };
58 const size_t MAX_GEN = 8;
60 static inline kmp_int32 __kmp_dephash_hash(kmp_intptr_t addr,
size_t hsize) {
63 return ((addr >> 6) ^ (addr >> 2)) % hsize;
66 static kmp_dephash_t *__kmp_dephash_extend(kmp_info_t *thread,
67 kmp_dephash_t *current_dephash) {
70 size_t gen = current_dephash->generation + 1;
72 return current_dephash;
73 size_t new_size = sizes[gen];
75 kmp_int32 size_to_allocate =
76 new_size *
sizeof(kmp_dephash_entry_t *) +
sizeof(kmp_dephash_t);
79 h = (kmp_dephash_t *)__kmp_fast_allocate(thread, size_to_allocate);
81 h = (kmp_dephash_t *)__kmp_thread_malloc(thread, size_to_allocate);
85 h->nelements = current_dephash->nelements;
86 h->buckets = (kmp_dephash_entry **)(h + 1);
90 for (
size_t i = 0; i < current_dephash->size; i++) {
91 kmp_dephash_entry_t *next, *entry;
92 for (entry = current_dephash->buckets[i]; entry; entry = next) {
93 next = entry->next_in_bucket;
96 kmp_int32 new_bucket = __kmp_dephash_hash(entry->addr, h->size);
97 entry->next_in_bucket = h->buckets[new_bucket];
98 if (entry->next_in_bucket) {
101 h->buckets[new_bucket] = entry;
107 __kmp_fast_free(thread, current_dephash);
109 __kmp_thread_free(thread, current_dephash);
115 static kmp_dephash_t *__kmp_dephash_create(kmp_info_t *thread,
116 kmp_taskdata_t *current_task) {
121 if (current_task->td_flags.tasktype == TASK_IMPLICIT)
122 h_size = KMP_DEPHASH_MASTER_SIZE;
124 h_size = KMP_DEPHASH_OTHER_SIZE;
127 h_size *
sizeof(kmp_dephash_entry_t *) +
sizeof(kmp_dephash_t);
130 h = (kmp_dephash_t *)__kmp_fast_allocate(thread, size);
132 h = (kmp_dephash_t *)__kmp_thread_malloc(thread, size);
139 h->buckets = (kmp_dephash_entry **)(h + 1);
141 for (
size_t i = 0; i < h_size; i++)
147 #define ENTRY_LAST_INS 0
148 #define ENTRY_LAST_MTXS 1
150 static kmp_dephash_entry *
151 __kmp_dephash_find(kmp_info_t *thread, kmp_dephash_t **hash, kmp_intptr_t addr) {
152 kmp_dephash_t *h = *hash;
153 if (h->nelements != 0
154 && h->nconflicts/h->size >= 1) {
155 *hash = __kmp_dephash_extend(thread, h);
158 kmp_int32 bucket = __kmp_dephash_hash(addr, h->size);
160 kmp_dephash_entry_t *entry;
161 for (entry = h->buckets[bucket]; entry; entry = entry->next_in_bucket)
162 if (entry->addr == addr)
168 entry = (kmp_dephash_entry_t *)__kmp_fast_allocate(
169 thread,
sizeof(kmp_dephash_entry_t));
171 entry = (kmp_dephash_entry_t *)__kmp_thread_malloc(
172 thread,
sizeof(kmp_dephash_entry_t));
175 entry->last_out = NULL;
176 entry->last_ins = NULL;
177 entry->last_mtxs = NULL;
178 entry->last_flag = ENTRY_LAST_INS;
179 entry->mtx_lock = NULL;
180 entry->next_in_bucket = h->buckets[bucket];
181 h->buckets[bucket] = entry;
183 if (entry->next_in_bucket)
189 static kmp_depnode_list_t *__kmp_add_node(kmp_info_t *thread,
190 kmp_depnode_list_t *list,
191 kmp_depnode_t *node) {
192 kmp_depnode_list_t *new_head;
195 new_head = (kmp_depnode_list_t *)__kmp_fast_allocate(
196 thread,
sizeof(kmp_depnode_list_t));
198 new_head = (kmp_depnode_list_t *)__kmp_thread_malloc(
199 thread,
sizeof(kmp_depnode_list_t));
202 new_head->node = __kmp_node_ref(node);
203 new_head->next = list;
208 static inline void __kmp_track_dependence(kmp_depnode_t *source,
210 kmp_task_t *sink_task) {
211 #ifdef KMP_SUPPORT_GRAPH_OUTPUT
212 kmp_taskdata_t *task_source = KMP_TASK_TO_TASKDATA(source->dn.task);
215 kmp_taskdata_t *task_sink = KMP_TASK_TO_TASKDATA(sink_task);
217 __kmp_printf(
"%d(%s) -> %d(%s)\n", source->dn.id,
218 task_source->td_ident->psource, sink->dn.id,
219 task_sink->td_ident->psource);
221 #if OMPT_SUPPORT && OMPT_OPTIONAL
225 if (ompt_enabled.ompt_callback_task_dependence) {
226 kmp_taskdata_t *task_source = KMP_TASK_TO_TASKDATA(source->dn.task);
227 kmp_taskdata_t *task_sink = KMP_TASK_TO_TASKDATA(sink_task);
229 ompt_callbacks.ompt_callback(ompt_callback_task_dependence)(
230 &(task_source->ompt_task_info.task_data),
231 &(task_sink->ompt_task_info.task_data));
236 static inline kmp_int32
237 __kmp_depnode_link_successor(kmp_int32 gtid, kmp_info_t *thread,
238 kmp_task_t *task, kmp_depnode_t *node,
239 kmp_depnode_list_t *plist) {
242 kmp_int32 npredecessors = 0;
244 for (kmp_depnode_list_t *p = plist; p; p = p->next) {
245 kmp_depnode_t *dep = p->node;
247 KMP_ACQUIRE_DEPNODE(gtid, dep);
249 __kmp_track_dependence(dep, node, task);
250 dep->dn.successors = __kmp_add_node(thread, dep->dn.successors, node);
251 KA_TRACE(40, (
"__kmp_process_deps: T#%d adding dependence from %p to "
253 gtid, KMP_TASK_TO_TASKDATA(dep->dn.task),
254 KMP_TASK_TO_TASKDATA(task)));
257 KMP_RELEASE_DEPNODE(gtid, dep);
260 return npredecessors;
263 static inline kmp_int32 __kmp_depnode_link_successor(kmp_int32 gtid,
266 kmp_depnode_t *source,
267 kmp_depnode_t *sink) {
270 kmp_int32 npredecessors = 0;
273 KMP_ACQUIRE_DEPNODE(gtid, sink);
275 __kmp_track_dependence(sink, source, task);
276 sink->dn.successors = __kmp_add_node(thread, sink->dn.successors, source);
277 KA_TRACE(40, (
"__kmp_process_deps: T#%d adding dependence from %p to "
279 gtid, KMP_TASK_TO_TASKDATA(sink->dn.task),
280 KMP_TASK_TO_TASKDATA(task)));
283 KMP_RELEASE_DEPNODE(gtid, sink);
285 return npredecessors;
288 template <
bool filter>
289 static inline kmp_int32
290 __kmp_process_deps(kmp_int32 gtid, kmp_depnode_t *node, kmp_dephash_t **hash,
291 bool dep_barrier, kmp_int32 ndeps,
292 kmp_depend_info_t *dep_list, kmp_task_t *task) {
293 KA_TRACE(30, (
"__kmp_process_deps<%d>: T#%d processing %d dependencies : "
294 "dep_barrier = %d\n",
295 filter, gtid, ndeps, dep_barrier));
297 kmp_info_t *thread = __kmp_threads[gtid];
298 kmp_int32 npredecessors = 0;
299 for (kmp_int32 i = 0; i < ndeps; i++) {
300 const kmp_depend_info_t *dep = &dep_list[i];
302 if (filter && dep->base_addr == 0)
305 kmp_dephash_entry_t *info =
306 __kmp_dephash_find(thread, hash, dep->base_addr);
307 kmp_depnode_t *last_out = info->last_out;
308 kmp_depnode_list_t *last_ins = info->last_ins;
309 kmp_depnode_list_t *last_mtxs = info->last_mtxs;
311 if (dep->flags.out) {
312 if (last_ins || last_mtxs) {
313 if (info->last_flag == ENTRY_LAST_INS) {
315 __kmp_depnode_link_successor(gtid, thread, task, node, last_ins);
318 __kmp_depnode_link_successor(gtid, thread, task, node, last_mtxs);
320 __kmp_depnode_list_free(thread, last_ins);
321 __kmp_depnode_list_free(thread, last_mtxs);
322 info->last_ins = NULL;
323 info->last_mtxs = NULL;
326 __kmp_depnode_link_successor(gtid, thread, task, node, last_out);
328 __kmp_node_deref(thread, last_out);
333 info->last_out = NULL;
335 info->last_out = __kmp_node_ref(node);
337 }
else if (dep->flags.in) {
341 __kmp_depnode_link_successor(gtid, thread, task, node, last_mtxs);
342 __kmp_node_deref(thread, last_out);
343 info->last_out = NULL;
344 if (info->last_flag == ENTRY_LAST_MTXS && last_ins) {
346 __kmp_depnode_list_free(thread, last_ins);
347 info->last_ins = NULL;
352 __kmp_depnode_link_successor(gtid, thread, task, node, last_out);
354 info->last_flag = ENTRY_LAST_INS;
355 info->last_ins = __kmp_add_node(thread, info->last_ins, node);
357 KMP_DEBUG_ASSERT(dep->flags.mtx == 1);
361 __kmp_depnode_link_successor(gtid, thread, task, node, last_ins);
362 __kmp_node_deref(thread, last_out);
363 info->last_out = NULL;
364 if (info->last_flag == ENTRY_LAST_INS && last_mtxs) {
366 __kmp_depnode_list_free(thread, last_mtxs);
367 info->last_mtxs = NULL;
372 __kmp_depnode_link_successor(gtid, thread, task, node, last_out);
374 info->last_flag = ENTRY_LAST_MTXS;
375 info->last_mtxs = __kmp_add_node(thread, info->last_mtxs, node);
376 if (info->mtx_lock == NULL) {
377 info->mtx_lock = (kmp_lock_t *)__kmp_allocate(
sizeof(kmp_lock_t));
378 __kmp_init_lock(info->mtx_lock);
380 KMP_DEBUG_ASSERT(node->dn.mtx_num_locks < MAX_MTX_DEPS);
383 for (m = 0; m < MAX_MTX_DEPS; ++m) {
385 if (node->dn.mtx_locks[m] < info->mtx_lock) {
386 KMP_DEBUG_ASSERT(node->dn.mtx_locks[node->dn.mtx_num_locks] == NULL);
387 for (
int n = node->dn.mtx_num_locks; n > m; --n) {
389 KMP_DEBUG_ASSERT(node->dn.mtx_locks[n - 1] != NULL);
390 node->dn.mtx_locks[n] = node->dn.mtx_locks[n - 1];
392 node->dn.mtx_locks[m] = info->mtx_lock;
396 KMP_DEBUG_ASSERT(m < MAX_MTX_DEPS);
397 node->dn.mtx_num_locks++;
400 KA_TRACE(30, (
"__kmp_process_deps<%d>: T#%d found %d predecessors\n", filter,
401 gtid, npredecessors));
402 return npredecessors;
405 #define NO_DEP_BARRIER (false)
406 #define DEP_BARRIER (true)
409 static bool __kmp_check_deps(kmp_int32 gtid, kmp_depnode_t *node,
410 kmp_task_t *task, kmp_dephash_t **hash,
411 bool dep_barrier, kmp_int32 ndeps,
412 kmp_depend_info_t *dep_list,
413 kmp_int32 ndeps_noalias,
414 kmp_depend_info_t *noalias_dep_list) {
417 kmp_taskdata_t *taskdata = KMP_TASK_TO_TASKDATA(task);
419 KA_TRACE(20, (
"__kmp_check_deps: T#%d checking dependencies for task %p : %d "
420 "possibly aliased dependencies, %d non-aliased dependencies : "
421 "dep_barrier=%d .\n",
422 gtid, taskdata, ndeps, ndeps_noalias, dep_barrier));
426 for (i = 0; i < ndeps; i++) {
427 if (dep_list[i].base_addr != 0) {
428 for (
int j = i + 1; j < ndeps; j++) {
429 if (dep_list[i].base_addr == dep_list[j].base_addr) {
430 dep_list[i].flags.in |= dep_list[j].flags.in;
431 dep_list[i].flags.out |=
432 (dep_list[j].flags.out ||
433 (dep_list[i].flags.in && dep_list[j].flags.mtx) ||
434 (dep_list[i].flags.mtx && dep_list[j].flags.in));
435 dep_list[i].flags.mtx =
436 dep_list[i].flags.mtx | dep_list[j].flags.mtx &&
437 !dep_list[i].flags.out;
438 dep_list[j].base_addr = 0;
441 if (dep_list[i].flags.mtx) {
443 if (n_mtxs < MAX_MTX_DEPS && task != NULL) {
446 dep_list[i].flags.in = 1;
447 dep_list[i].flags.out = 1;
448 dep_list[i].flags.mtx = 0;
458 node->dn.npredecessors = -1;
464 npredecessors = __kmp_process_deps<true>(gtid, node, hash, dep_barrier, ndeps,
466 npredecessors += __kmp_process_deps<false>(
467 gtid, node, hash, dep_barrier, ndeps_noalias, noalias_dep_list, task);
469 node->dn.task = task;
479 node->dn.npredecessors.fetch_add(npredecessors) + npredecessors;
481 KA_TRACE(20, (
"__kmp_check_deps: T#%d found %d predecessors for task %p \n",
482 gtid, npredecessors, taskdata));
486 return npredecessors > 0 ? true :
false;
506 kmp_task_t *new_task, kmp_int32 ndeps,
507 kmp_depend_info_t *dep_list,
508 kmp_int32 ndeps_noalias,
509 kmp_depend_info_t *noalias_dep_list) {
511 kmp_taskdata_t *new_taskdata = KMP_TASK_TO_TASKDATA(new_task);
512 KA_TRACE(10, (
"__kmpc_omp_task_with_deps(enter): T#%d loc=%p task=%p\n", gtid,
513 loc_ref, new_taskdata));
515 kmp_info_t *thread = __kmp_threads[gtid];
516 kmp_taskdata_t *current_task = thread->th.th_current_task;
519 if (ompt_enabled.enabled) {
520 OMPT_STORE_RETURN_ADDRESS(gtid);
521 if (!current_task->ompt_task_info.frame.enter_frame.ptr)
522 current_task->ompt_task_info.frame.enter_frame.ptr =
523 OMPT_GET_FRAME_ADDRESS(0);
524 if (ompt_enabled.ompt_callback_task_create) {
525 ompt_data_t task_data = ompt_data_none;
526 ompt_callbacks.ompt_callback(ompt_callback_task_create)(
527 current_task ? &(current_task->ompt_task_info.task_data) : &task_data,
528 current_task ? &(current_task->ompt_task_info.frame) : NULL,
529 &(new_taskdata->ompt_task_info.task_data),
530 ompt_task_explicit | TASK_TYPE_DETAILS_FORMAT(new_taskdata), 1,
531 OMPT_LOAD_RETURN_ADDRESS(gtid));
534 new_taskdata->ompt_task_info.frame.enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0);
539 if (ndeps + ndeps_noalias > 0 &&
540 ompt_enabled.ompt_callback_dependences) {
543 new_taskdata->ompt_task_info.ndeps = ndeps + ndeps_noalias;
544 new_taskdata->ompt_task_info.deps =
545 (ompt_dependence_t *)KMP_OMPT_DEPS_ALLOC(
546 thread, (ndeps + ndeps_noalias) *
sizeof(ompt_dependence_t));
548 KMP_ASSERT(new_taskdata->ompt_task_info.deps != NULL);
550 for (i = 0; i < ndeps; i++) {
551 new_taskdata->ompt_task_info.deps[i].variable.ptr =
552 (
void *)dep_list[i].base_addr;
553 if (dep_list[i].flags.in && dep_list[i].flags.out)
554 new_taskdata->ompt_task_info.deps[i].dependence_type =
555 ompt_dependence_type_inout;
556 else if (dep_list[i].flags.out)
557 new_taskdata->ompt_task_info.deps[i].dependence_type =
558 ompt_dependence_type_out;
559 else if (dep_list[i].flags.in)
560 new_taskdata->ompt_task_info.deps[i].dependence_type =
561 ompt_dependence_type_in;
563 for (i = 0; i < ndeps_noalias; i++) {
564 new_taskdata->ompt_task_info.deps[ndeps + i].variable.ptr =
565 (
void *)noalias_dep_list[i].base_addr;
566 if (noalias_dep_list[i].flags.in && noalias_dep_list[i].flags.out)
567 new_taskdata->ompt_task_info.deps[ndeps + i].dependence_type =
568 ompt_dependence_type_inout;
569 else if (noalias_dep_list[i].flags.out)
570 new_taskdata->ompt_task_info.deps[ndeps + i].dependence_type =
571 ompt_dependence_type_out;
572 else if (noalias_dep_list[i].flags.in)
573 new_taskdata->ompt_task_info.deps[ndeps + i].dependence_type =
574 ompt_dependence_type_in;
576 ompt_callbacks.ompt_callback(ompt_callback_dependences)(
577 &(new_taskdata->ompt_task_info.task_data),
578 new_taskdata->ompt_task_info.deps, new_taskdata->ompt_task_info.ndeps);
581 KMP_OMPT_DEPS_FREE(thread, new_taskdata->ompt_task_info.deps);
582 new_taskdata->ompt_task_info.deps = NULL;
583 new_taskdata->ompt_task_info.ndeps = 0;
588 bool serial = current_task->td_flags.team_serial ||
589 current_task->td_flags.tasking_ser ||
590 current_task->td_flags.final;
591 kmp_task_team_t *task_team = thread->th.th_task_team;
592 serial = serial && !(task_team && task_team->tt.tt_found_proxy_tasks);
594 if (!serial && (ndeps > 0 || ndeps_noalias > 0)) {
596 if (current_task->td_dephash == NULL)
597 current_task->td_dephash = __kmp_dephash_create(thread, current_task);
600 kmp_depnode_t *node =
601 (kmp_depnode_t *)__kmp_fast_allocate(thread,
sizeof(kmp_depnode_t));
603 kmp_depnode_t *node =
604 (kmp_depnode_t *)__kmp_thread_malloc(thread,
sizeof(kmp_depnode_t));
607 __kmp_init_node(node);
608 new_taskdata->td_depnode = node;
610 if (__kmp_check_deps(gtid, node, new_task, ¤t_task->td_dephash,
611 NO_DEP_BARRIER, ndeps, dep_list, ndeps_noalias,
613 KA_TRACE(10, (
"__kmpc_omp_task_with_deps(exit): T#%d task had blocking "
615 "loc=%p task=%p, return: TASK_CURRENT_NOT_QUEUED\n",
616 gtid, loc_ref, new_taskdata));
618 if (ompt_enabled.enabled) {
619 current_task->ompt_task_info.frame.enter_frame = ompt_data_none;
622 return TASK_CURRENT_NOT_QUEUED;
625 KA_TRACE(10, (
"__kmpc_omp_task_with_deps(exit): T#%d ignored dependencies "
626 "for task (serialized)"
628 gtid, loc_ref, new_taskdata));
631 KA_TRACE(10, (
"__kmpc_omp_task_with_deps(exit): T#%d task had no blocking "
633 "loc=%p task=%p, transferring to __kmp_omp_task\n",
634 gtid, loc_ref, new_taskdata));
636 kmp_int32 ret = __kmp_omp_task(gtid, new_task,
true);
638 if (ompt_enabled.enabled) {
639 current_task->ompt_task_info.frame.enter_frame = ompt_data_none;
657 kmp_depend_info_t *dep_list, kmp_int32 ndeps_noalias,
658 kmp_depend_info_t *noalias_dep_list) {
659 KA_TRACE(10, (
"__kmpc_omp_wait_deps(enter): T#%d loc=%p\n", gtid, loc_ref));
661 if (ndeps == 0 && ndeps_noalias == 0) {
662 KA_TRACE(10, (
"__kmpc_omp_wait_deps(exit): T#%d has no dependencies to "
663 "wait upon : loc=%p\n",
668 kmp_info_t *thread = __kmp_threads[gtid];
669 kmp_taskdata_t *current_task = thread->th.th_current_task;
674 bool ignore = current_task->td_flags.team_serial ||
675 current_task->td_flags.tasking_ser ||
676 current_task->td_flags.final;
677 ignore = ignore && thread->th.th_task_team != NULL &&
678 thread->th.th_task_team->tt.tt_found_proxy_tasks == FALSE;
679 ignore = ignore || current_task->td_dephash == NULL;
682 KA_TRACE(10, (
"__kmpc_omp_wait_deps(exit): T#%d has no blocking "
683 "dependencies : loc=%p\n",
688 kmp_depnode_t node = {0};
689 __kmp_init_node(&node);
691 if (!__kmp_check_deps(gtid, &node, NULL, ¤t_task->td_dephash,
692 DEP_BARRIER, ndeps, dep_list, ndeps_noalias,
694 KA_TRACE(10, (
"__kmpc_omp_wait_deps(exit): T#%d has no blocking "
695 "dependencies : loc=%p\n",
700 int thread_finished = FALSE;
701 kmp_flag_32 flag((std::atomic<kmp_uint32> *)&node.dn.npredecessors, 0U);
702 while (node.dn.npredecessors > 0) {
703 flag.execute_tasks(thread, gtid, FALSE,
704 &thread_finished USE_ITT_BUILD_ARG(NULL),
705 __kmp_task_stealing_constraint);
708 KA_TRACE(10, (
"__kmpc_omp_wait_deps(exit): T#%d finished waiting : loc=%p\n",