14 #include "kmp_affinity.h"
15 #include "kmp_atomic.h"
16 #include "kmp_environment.h"
17 #include "kmp_error.h"
21 #include "kmp_settings.h"
22 #include "kmp_stats.h"
24 #include "kmp_wait_release.h"
25 #include "kmp_wrapper_getpid.h"
26 #include "kmp_dispatch.h"
27 #if KMP_USE_HIER_SCHED
28 #include "kmp_dispatch_hier.h"
32 #include "ompt-specific.h"
36 #define KMP_USE_PRCTL 0
42 #include "tsan_annotations.h"
44 #if defined(KMP_GOMP_COMPAT)
45 char const __kmp_version_alt_comp[] =
46 KMP_VERSION_PREFIX
"alternative compiler support: yes";
49 char const __kmp_version_omp_api[] =
50 KMP_VERSION_PREFIX
"API version: 5.0 (201611)";
53 char const __kmp_version_lock[] =
54 KMP_VERSION_PREFIX
"lock type: run time selectable";
57 #define KMP_MIN(x, y) ((x) < (y) ? (x) : (y))
62 kmp_info_t __kmp_monitor;
67 void __kmp_cleanup(
void);
69 static void __kmp_initialize_info(kmp_info_t *, kmp_team_t *,
int tid,
71 static void __kmp_initialize_team(kmp_team_t *team,
int new_nproc,
72 kmp_internal_control_t *new_icvs,
74 #if KMP_AFFINITY_SUPPORTED
75 static void __kmp_partition_places(kmp_team_t *team,
76 int update_master_only = 0);
78 static void __kmp_do_serial_initialize(
void);
79 void __kmp_fork_barrier(
int gtid,
int tid);
80 void __kmp_join_barrier(
int gtid);
81 void __kmp_setup_icv_copy(kmp_team_t *team,
int new_nproc,
82 kmp_internal_control_t *new_icvs,
ident_t *loc);
84 #ifdef USE_LOAD_BALANCE
85 static int __kmp_load_balance_nproc(kmp_root_t *root,
int set_nproc);
88 static int __kmp_expand_threads(
int nNeed);
90 static int __kmp_unregister_root_other_thread(
int gtid);
92 static void __kmp_unregister_library(
void);
93 static void __kmp_reap_thread(kmp_info_t *thread,
int is_root);
94 kmp_info_t *__kmp_thread_pool_insert_pt = NULL;
99 int __kmp_get_global_thread_id() {
101 kmp_info_t **other_threads;
109 (
"*** __kmp_get_global_thread_id: entering, nproc=%d all_nproc=%d\n",
110 __kmp_nth, __kmp_all_nth));
117 if (!TCR_4(__kmp_init_gtid))
120 #ifdef KMP_TDATA_GTID
121 if (TCR_4(__kmp_gtid_mode) >= 3) {
122 KA_TRACE(1000, (
"*** __kmp_get_global_thread_id: using TDATA\n"));
126 if (TCR_4(__kmp_gtid_mode) >= 2) {
127 KA_TRACE(1000, (
"*** __kmp_get_global_thread_id: using keyed TLS\n"));
128 return __kmp_gtid_get_specific();
130 KA_TRACE(1000, (
"*** __kmp_get_global_thread_id: using internal alg.\n"));
132 stack_addr = (
char *)&stack_data;
133 other_threads = __kmp_threads;
146 for (i = 0; i < __kmp_threads_capacity; i++) {
148 kmp_info_t *thr = (kmp_info_t *)TCR_SYNC_PTR(other_threads[i]);
152 stack_size = (size_t)TCR_PTR(thr->th.th_info.ds.ds_stacksize);
153 stack_base = (
char *)TCR_PTR(thr->th.th_info.ds.ds_stackbase);
157 if (stack_addr <= stack_base) {
158 size_t stack_diff = stack_base - stack_addr;
160 if (stack_diff <= stack_size) {
163 KMP_DEBUG_ASSERT(__kmp_gtid_get_specific() == i);
171 (
"*** __kmp_get_global_thread_id: internal alg. failed to find "
172 "thread, using TLS\n"));
173 i = __kmp_gtid_get_specific();
183 if (!TCR_4(other_threads[i]->th.th_info.ds.ds_stackgrow)) {
184 KMP_FATAL(StackOverflow, i);
187 stack_base = (
char *)other_threads[i]->th.th_info.ds.ds_stackbase;
188 if (stack_addr > stack_base) {
189 TCW_PTR(other_threads[i]->th.th_info.ds.ds_stackbase, stack_addr);
190 TCW_PTR(other_threads[i]->th.th_info.ds.ds_stacksize,
191 other_threads[i]->th.th_info.ds.ds_stacksize + stack_addr -
194 TCW_PTR(other_threads[i]->th.th_info.ds.ds_stacksize,
195 stack_base - stack_addr);
199 if (__kmp_storage_map) {
200 char *stack_end = (
char *)other_threads[i]->th.th_info.ds.ds_stackbase;
201 char *stack_beg = stack_end - other_threads[i]->th.th_info.ds.ds_stacksize;
202 __kmp_print_storage_map_gtid(i, stack_beg, stack_end,
203 other_threads[i]->th.th_info.ds.ds_stacksize,
204 "th_%d stack (refinement)", i);
209 int __kmp_get_global_thread_id_reg() {
212 if (!__kmp_init_serial) {
215 #ifdef KMP_TDATA_GTID
216 if (TCR_4(__kmp_gtid_mode) >= 3) {
217 KA_TRACE(1000, (
"*** __kmp_get_global_thread_id_reg: using TDATA\n"));
221 if (TCR_4(__kmp_gtid_mode) >= 2) {
222 KA_TRACE(1000, (
"*** __kmp_get_global_thread_id_reg: using keyed TLS\n"));
223 gtid = __kmp_gtid_get_specific();
226 (
"*** __kmp_get_global_thread_id_reg: using internal alg.\n"));
227 gtid = __kmp_get_global_thread_id();
231 if (gtid == KMP_GTID_DNE) {
233 (
"__kmp_get_global_thread_id_reg: Encountered new root thread. "
234 "Registering a new gtid.\n"));
235 __kmp_acquire_bootstrap_lock(&__kmp_initz_lock);
236 if (!__kmp_init_serial) {
237 __kmp_do_serial_initialize();
238 gtid = __kmp_gtid_get_specific();
240 gtid = __kmp_register_root(FALSE);
242 __kmp_release_bootstrap_lock(&__kmp_initz_lock);
246 KMP_DEBUG_ASSERT(gtid >= 0);
252 void __kmp_check_stack_overlap(kmp_info_t *th) {
254 char *stack_beg = NULL;
255 char *stack_end = NULL;
258 KA_TRACE(10, (
"__kmp_check_stack_overlap: called\n"));
259 if (__kmp_storage_map) {
260 stack_end = (
char *)th->th.th_info.ds.ds_stackbase;
261 stack_beg = stack_end - th->th.th_info.ds.ds_stacksize;
263 gtid = __kmp_gtid_from_thread(th);
265 if (gtid == KMP_GTID_MONITOR) {
266 __kmp_print_storage_map_gtid(
267 gtid, stack_beg, stack_end, th->th.th_info.ds.ds_stacksize,
268 "th_%s stack (%s)",
"mon",
269 (th->th.th_info.ds.ds_stackgrow) ?
"initial" :
"actual");
271 __kmp_print_storage_map_gtid(
272 gtid, stack_beg, stack_end, th->th.th_info.ds.ds_stacksize,
273 "th_%d stack (%s)", gtid,
274 (th->th.th_info.ds.ds_stackgrow) ?
"initial" :
"actual");
280 gtid = __kmp_gtid_from_thread(th);
281 if (__kmp_env_checks == TRUE && !KMP_UBER_GTID(gtid)) {
283 (
"__kmp_check_stack_overlap: performing extensive checking\n"));
284 if (stack_beg == NULL) {
285 stack_end = (
char *)th->th.th_info.ds.ds_stackbase;
286 stack_beg = stack_end - th->th.th_info.ds.ds_stacksize;
289 for (f = 0; f < __kmp_threads_capacity; f++) {
290 kmp_info_t *f_th = (kmp_info_t *)TCR_SYNC_PTR(__kmp_threads[f]);
292 if (f_th && f_th != th) {
293 char *other_stack_end =
294 (
char *)TCR_PTR(f_th->th.th_info.ds.ds_stackbase);
295 char *other_stack_beg =
296 other_stack_end - (size_t)TCR_PTR(f_th->th.th_info.ds.ds_stacksize);
297 if ((stack_beg > other_stack_beg && stack_beg < other_stack_end) ||
298 (stack_end > other_stack_beg && stack_end < other_stack_end)) {
301 if (__kmp_storage_map)
302 __kmp_print_storage_map_gtid(
303 -1, other_stack_beg, other_stack_end,
304 (
size_t)TCR_PTR(f_th->th.th_info.ds.ds_stacksize),
305 "th_%d stack (overlapped)", __kmp_gtid_from_thread(f_th));
307 __kmp_fatal(KMP_MSG(StackOverlap), KMP_HNT(ChangeStackLimit),
313 KA_TRACE(10, (
"__kmp_check_stack_overlap: returning\n"));
318 void __kmp_infinite_loop(
void) {
319 static int done = FALSE;
326 #define MAX_MESSAGE 512
328 void __kmp_print_storage_map_gtid(
int gtid,
void *p1,
void *p2,
size_t size,
329 char const *format, ...) {
330 char buffer[MAX_MESSAGE];
333 va_start(ap, format);
334 KMP_SNPRINTF(buffer,
sizeof(buffer),
"OMP storage map: %p %p%8lu %s\n", p1,
335 p2, (
unsigned long)size, format);
336 __kmp_acquire_bootstrap_lock(&__kmp_stdio_lock);
337 __kmp_vprintf(kmp_err, buffer, ap);
338 #if KMP_PRINT_DATA_PLACEMENT
341 if (p1 <= p2 && (
char *)p2 - (
char *)p1 == size) {
342 if (__kmp_storage_map_verbose) {
343 node = __kmp_get_host_node(p1);
345 __kmp_storage_map_verbose = FALSE;
349 int localProc = __kmp_get_cpu_from_gtid(gtid);
351 const int page_size = KMP_GET_PAGE_SIZE();
353 p1 = (
void *)((
size_t)p1 & ~((size_t)page_size - 1));
354 p2 = (
void *)(((
size_t)p2 - 1) & ~((
size_t)page_size - 1));
356 __kmp_printf_no_lock(
" GTID %d localNode %d\n", gtid,
359 __kmp_printf_no_lock(
" GTID %d\n", gtid);
368 (
char *)p1 += page_size;
369 }
while (p1 <= p2 && (node = __kmp_get_host_node(p1)) == lastNode);
370 __kmp_printf_no_lock(
" %p-%p memNode %d\n", last, (
char *)p1 - 1,
374 __kmp_printf_no_lock(
" %p-%p memNode %d\n", p1,
375 (
char *)p1 + (page_size - 1),
376 __kmp_get_host_node(p1));
378 __kmp_printf_no_lock(
" %p-%p memNode %d\n", p2,
379 (
char *)p2 + (page_size - 1),
380 __kmp_get_host_node(p2));
386 __kmp_printf_no_lock(
" %s\n", KMP_I18N_STR(StorageMapWarning));
389 __kmp_release_bootstrap_lock(&__kmp_stdio_lock);
392 void __kmp_warn(
char const *format, ...) {
393 char buffer[MAX_MESSAGE];
396 if (__kmp_generate_warnings == kmp_warnings_off) {
400 va_start(ap, format);
402 KMP_SNPRINTF(buffer,
sizeof(buffer),
"OMP warning: %s\n", format);
403 __kmp_acquire_bootstrap_lock(&__kmp_stdio_lock);
404 __kmp_vprintf(kmp_err, buffer, ap);
405 __kmp_release_bootstrap_lock(&__kmp_stdio_lock);
410 void __kmp_abort_process() {
412 __kmp_acquire_bootstrap_lock(&__kmp_exit_lock);
414 if (__kmp_debug_buf) {
415 __kmp_dump_debug_buffer();
418 if (KMP_OS_WINDOWS) {
421 __kmp_global.g.g_abort = SIGABRT;
438 __kmp_infinite_loop();
439 __kmp_release_bootstrap_lock(&__kmp_exit_lock);
443 void __kmp_abort_thread(
void) {
446 __kmp_infinite_loop();
452 static void __kmp_print_thread_storage_map(kmp_info_t *thr,
int gtid) {
453 __kmp_print_storage_map_gtid(gtid, thr, thr + 1,
sizeof(kmp_info_t),
"th_%d",
456 __kmp_print_storage_map_gtid(gtid, &thr->th.th_info, &thr->th.th_team,
457 sizeof(kmp_desc_t),
"th_%d.th_info", gtid);
459 __kmp_print_storage_map_gtid(gtid, &thr->th.th_local, &thr->th.th_pri_head,
460 sizeof(kmp_local_t),
"th_%d.th_local", gtid);
462 __kmp_print_storage_map_gtid(
463 gtid, &thr->th.th_bar[0], &thr->th.th_bar[bs_last_barrier],
464 sizeof(kmp_balign_t) * bs_last_barrier,
"th_%d.th_bar", gtid);
466 __kmp_print_storage_map_gtid(gtid, &thr->th.th_bar[bs_plain_barrier],
467 &thr->th.th_bar[bs_plain_barrier + 1],
468 sizeof(kmp_balign_t),
"th_%d.th_bar[plain]",
471 __kmp_print_storage_map_gtid(gtid, &thr->th.th_bar[bs_forkjoin_barrier],
472 &thr->th.th_bar[bs_forkjoin_barrier + 1],
473 sizeof(kmp_balign_t),
"th_%d.th_bar[forkjoin]",
476 #if KMP_FAST_REDUCTION_BARRIER
477 __kmp_print_storage_map_gtid(gtid, &thr->th.th_bar[bs_reduction_barrier],
478 &thr->th.th_bar[bs_reduction_barrier + 1],
479 sizeof(kmp_balign_t),
"th_%d.th_bar[reduction]",
481 #endif // KMP_FAST_REDUCTION_BARRIER
487 static void __kmp_print_team_storage_map(
const char *header, kmp_team_t *team,
488 int team_id,
int num_thr) {
489 int num_disp_buff = team->t.t_max_nproc > 1 ? __kmp_dispatch_num_buffers : 2;
490 __kmp_print_storage_map_gtid(-1, team, team + 1,
sizeof(kmp_team_t),
"%s_%d",
493 __kmp_print_storage_map_gtid(-1, &team->t.t_bar[0],
494 &team->t.t_bar[bs_last_barrier],
495 sizeof(kmp_balign_team_t) * bs_last_barrier,
496 "%s_%d.t_bar", header, team_id);
498 __kmp_print_storage_map_gtid(-1, &team->t.t_bar[bs_plain_barrier],
499 &team->t.t_bar[bs_plain_barrier + 1],
500 sizeof(kmp_balign_team_t),
"%s_%d.t_bar[plain]",
503 __kmp_print_storage_map_gtid(-1, &team->t.t_bar[bs_forkjoin_barrier],
504 &team->t.t_bar[bs_forkjoin_barrier + 1],
505 sizeof(kmp_balign_team_t),
506 "%s_%d.t_bar[forkjoin]", header, team_id);
508 #if KMP_FAST_REDUCTION_BARRIER
509 __kmp_print_storage_map_gtid(-1, &team->t.t_bar[bs_reduction_barrier],
510 &team->t.t_bar[bs_reduction_barrier + 1],
511 sizeof(kmp_balign_team_t),
512 "%s_%d.t_bar[reduction]", header, team_id);
513 #endif // KMP_FAST_REDUCTION_BARRIER
515 __kmp_print_storage_map_gtid(
516 -1, &team->t.t_dispatch[0], &team->t.t_dispatch[num_thr],
517 sizeof(kmp_disp_t) * num_thr,
"%s_%d.t_dispatch", header, team_id);
519 __kmp_print_storage_map_gtid(
520 -1, &team->t.t_threads[0], &team->t.t_threads[num_thr],
521 sizeof(kmp_info_t *) * num_thr,
"%s_%d.t_threads", header, team_id);
523 __kmp_print_storage_map_gtid(-1, &team->t.t_disp_buffer[0],
524 &team->t.t_disp_buffer[num_disp_buff],
525 sizeof(dispatch_shared_info_t) * num_disp_buff,
526 "%s_%d.t_disp_buffer", header, team_id);
529 static void __kmp_init_allocator() { __kmp_init_memkind(); }
530 static void __kmp_fini_allocator() { __kmp_fini_memkind(); }
537 static void __kmp_reset_lock(kmp_bootstrap_lock_t *lck) {
539 __kmp_init_bootstrap_lock(lck);
542 static void __kmp_reset_locks_on_process_detach(
int gtid_req) {
560 for (i = 0; i < __kmp_threads_capacity; ++i) {
563 kmp_info_t *th = __kmp_threads[i];
566 int gtid = th->th.th_info.ds.ds_gtid;
567 if (gtid == gtid_req)
572 int alive = __kmp_is_thread_alive(th, &exit_val);
577 if (thread_count == 0)
583 __kmp_reset_lock(&__kmp_forkjoin_lock);
585 __kmp_reset_lock(&__kmp_stdio_lock);
589 BOOL WINAPI DllMain(HINSTANCE hInstDLL, DWORD fdwReason, LPVOID lpReserved) {
594 case DLL_PROCESS_ATTACH:
595 KA_TRACE(10, (
"DllMain: PROCESS_ATTACH\n"));
599 case DLL_PROCESS_DETACH:
600 KA_TRACE(10, (
"DllMain: PROCESS_DETACH T#%d\n", __kmp_gtid_get_specific()));
602 if (lpReserved != NULL) {
628 __kmp_reset_locks_on_process_detach(__kmp_gtid_get_specific());
631 __kmp_internal_end_library(__kmp_gtid_get_specific());
635 case DLL_THREAD_ATTACH:
636 KA_TRACE(10, (
"DllMain: THREAD_ATTACH\n"));
642 case DLL_THREAD_DETACH:
643 KA_TRACE(10, (
"DllMain: THREAD_DETACH T#%d\n", __kmp_gtid_get_specific()));
645 __kmp_internal_end_thread(__kmp_gtid_get_specific());
656 void __kmp_parallel_deo(
int *gtid_ref,
int *cid_ref,
ident_t *loc_ref) {
657 int gtid = *gtid_ref;
658 #ifdef BUILD_PARALLEL_ORDERED
659 kmp_team_t *team = __kmp_team_from_gtid(gtid);
662 if (__kmp_env_consistency_check) {
663 if (__kmp_threads[gtid]->th.th_root->r.r_active)
664 #if KMP_USE_DYNAMIC_LOCK
665 __kmp_push_sync(gtid, ct_ordered_in_parallel, loc_ref, NULL, 0);
667 __kmp_push_sync(gtid, ct_ordered_in_parallel, loc_ref, NULL);
670 #ifdef BUILD_PARALLEL_ORDERED
671 if (!team->t.t_serialized) {
673 KMP_WAIT(&team->t.t_ordered.dt.t_value, __kmp_tid_from_gtid(gtid), KMP_EQ,
681 void __kmp_parallel_dxo(
int *gtid_ref,
int *cid_ref,
ident_t *loc_ref) {
682 int gtid = *gtid_ref;
683 #ifdef BUILD_PARALLEL_ORDERED
684 int tid = __kmp_tid_from_gtid(gtid);
685 kmp_team_t *team = __kmp_team_from_gtid(gtid);
688 if (__kmp_env_consistency_check) {
689 if (__kmp_threads[gtid]->th.th_root->r.r_active)
690 __kmp_pop_sync(gtid, ct_ordered_in_parallel, loc_ref);
692 #ifdef BUILD_PARALLEL_ORDERED
693 if (!team->t.t_serialized) {
698 team->t.t_ordered.dt.t_value = ((tid + 1) % team->t.t_nproc);
708 int __kmp_enter_single(
int gtid,
ident_t *id_ref,
int push_ws) {
713 if (!TCR_4(__kmp_init_parallel))
714 __kmp_parallel_initialize();
715 __kmp_resume_if_soft_paused();
717 th = __kmp_threads[gtid];
718 team = th->th.th_team;
721 th->th.th_ident = id_ref;
723 if (team->t.t_serialized) {
726 kmp_int32 old_this = th->th.th_local.this_construct;
728 ++th->th.th_local.this_construct;
732 if (team->t.t_construct == old_this) {
733 status = __kmp_atomic_compare_store_acq(&team->t.t_construct, old_this,
734 th->th.th_local.this_construct);
737 if (__itt_metadata_add_ptr && __kmp_forkjoin_frames_mode == 3 &&
738 KMP_MASTER_GTID(gtid) && th->th.th_teams_microtask == NULL &&
739 team->t.t_active_level ==
741 __kmp_itt_metadata_single(id_ref);
746 if (__kmp_env_consistency_check) {
747 if (status && push_ws) {
748 __kmp_push_workshare(gtid, ct_psingle, id_ref);
750 __kmp_check_workshare(gtid, ct_psingle, id_ref);
755 __kmp_itt_single_start(gtid);
761 void __kmp_exit_single(
int gtid) {
763 __kmp_itt_single_end(gtid);
765 if (__kmp_env_consistency_check)
766 __kmp_pop_workshare(gtid, ct_psingle, NULL);
775 static int __kmp_reserve_threads(kmp_root_t *root, kmp_team_t *parent_team,
776 int master_tid,
int set_nthreads,
780 KMP_DEBUG_ASSERT(__kmp_init_serial);
781 KMP_DEBUG_ASSERT(root && parent_team);
782 kmp_info_t *this_thr = parent_team->t.t_threads[master_tid];
786 new_nthreads = set_nthreads;
787 if (!get__dynamic_2(parent_team, master_tid)) {
790 #ifdef USE_LOAD_BALANCE
791 else if (__kmp_global.g.g_dynamic_mode == dynamic_load_balance) {
792 new_nthreads = __kmp_load_balance_nproc(root, set_nthreads);
793 if (new_nthreads == 1) {
794 KC_TRACE(10, (
"__kmp_reserve_threads: T#%d load balance reduced "
795 "reservation to 1 thread\n",
799 if (new_nthreads < set_nthreads) {
800 KC_TRACE(10, (
"__kmp_reserve_threads: T#%d load balance reduced "
801 "reservation to %d threads\n",
802 master_tid, new_nthreads));
806 else if (__kmp_global.g.g_dynamic_mode == dynamic_thread_limit) {
807 new_nthreads = __kmp_avail_proc - __kmp_nth +
808 (root->r.r_active ? 1 : root->r.r_hot_team->t.t_nproc);
809 if (new_nthreads <= 1) {
810 KC_TRACE(10, (
"__kmp_reserve_threads: T#%d thread limit reduced "
811 "reservation to 1 thread\n",
815 if (new_nthreads < set_nthreads) {
816 KC_TRACE(10, (
"__kmp_reserve_threads: T#%d thread limit reduced "
817 "reservation to %d threads\n",
818 master_tid, new_nthreads));
820 new_nthreads = set_nthreads;
822 }
else if (__kmp_global.g.g_dynamic_mode == dynamic_random) {
823 if (set_nthreads > 2) {
824 new_nthreads = __kmp_get_random(parent_team->t.t_threads[master_tid]);
825 new_nthreads = (new_nthreads % set_nthreads) + 1;
826 if (new_nthreads == 1) {
827 KC_TRACE(10, (
"__kmp_reserve_threads: T#%d dynamic random reduced "
828 "reservation to 1 thread\n",
832 if (new_nthreads < set_nthreads) {
833 KC_TRACE(10, (
"__kmp_reserve_threads: T#%d dynamic random reduced "
834 "reservation to %d threads\n",
835 master_tid, new_nthreads));
843 if (__kmp_nth + new_nthreads -
844 (root->r.r_active ? 1 : root->r.r_hot_team->t.t_nproc) >
846 int tl_nthreads = __kmp_max_nth - __kmp_nth +
847 (root->r.r_active ? 1 : root->r.r_hot_team->t.t_nproc);
848 if (tl_nthreads <= 0) {
853 if (!get__dynamic_2(parent_team, master_tid) && (!__kmp_reserve_warn)) {
854 __kmp_reserve_warn = 1;
855 __kmp_msg(kmp_ms_warning,
856 KMP_MSG(CantFormThrTeam, set_nthreads, tl_nthreads),
857 KMP_HNT(Unset_ALL_THREADS), __kmp_msg_null);
859 if (tl_nthreads == 1) {
860 KC_TRACE(10, (
"__kmp_reserve_threads: T#%d KMP_DEVICE_THREAD_LIMIT "
861 "reduced reservation to 1 thread\n",
865 KC_TRACE(10, (
"__kmp_reserve_threads: T#%d KMP_DEVICE_THREAD_LIMIT reduced "
866 "reservation to %d threads\n",
867 master_tid, tl_nthreads));
868 new_nthreads = tl_nthreads;
872 int cg_nthreads = this_thr->th.th_cg_roots->cg_nthreads;
873 int max_cg_threads = this_thr->th.th_cg_roots->cg_thread_limit;
874 if (cg_nthreads + new_nthreads -
875 (root->r.r_active ? 1 : root->r.r_hot_team->t.t_nproc) >
877 int tl_nthreads = max_cg_threads - cg_nthreads +
878 (root->r.r_active ? 1 : root->r.r_hot_team->t.t_nproc);
879 if (tl_nthreads <= 0) {
884 if (!get__dynamic_2(parent_team, master_tid) && (!__kmp_reserve_warn)) {
885 __kmp_reserve_warn = 1;
886 __kmp_msg(kmp_ms_warning,
887 KMP_MSG(CantFormThrTeam, set_nthreads, tl_nthreads),
888 KMP_HNT(Unset_ALL_THREADS), __kmp_msg_null);
890 if (tl_nthreads == 1) {
891 KC_TRACE(10, (
"__kmp_reserve_threads: T#%d OMP_THREAD_LIMIT "
892 "reduced reservation to 1 thread\n",
896 KC_TRACE(10, (
"__kmp_reserve_threads: T#%d OMP_THREAD_LIMIT reduced "
897 "reservation to %d threads\n",
898 master_tid, tl_nthreads));
899 new_nthreads = tl_nthreads;
905 capacity = __kmp_threads_capacity;
906 if (TCR_PTR(__kmp_threads[0]) == NULL) {
909 if (__kmp_nth + new_nthreads -
910 (root->r.r_active ? 1 : root->r.r_hot_team->t.t_nproc) >
913 int slotsRequired = __kmp_nth + new_nthreads -
914 (root->r.r_active ? 1 : root->r.r_hot_team->t.t_nproc) -
916 int slotsAdded = __kmp_expand_threads(slotsRequired);
917 if (slotsAdded < slotsRequired) {
919 new_nthreads -= (slotsRequired - slotsAdded);
920 KMP_ASSERT(new_nthreads >= 1);
923 if (!get__dynamic_2(parent_team, master_tid) && (!__kmp_reserve_warn)) {
924 __kmp_reserve_warn = 1;
925 if (__kmp_tp_cached) {
926 __kmp_msg(kmp_ms_warning,
927 KMP_MSG(CantFormThrTeam, set_nthreads, new_nthreads),
928 KMP_HNT(Set_ALL_THREADPRIVATE, __kmp_tp_capacity),
929 KMP_HNT(PossibleSystemLimitOnThreads), __kmp_msg_null);
931 __kmp_msg(kmp_ms_warning,
932 KMP_MSG(CantFormThrTeam, set_nthreads, new_nthreads),
933 KMP_HNT(SystemLimitOnThreads), __kmp_msg_null);
940 if (new_nthreads == 1) {
942 (
"__kmp_reserve_threads: T#%d serializing team after reclaiming "
943 "dead roots and rechecking; requested %d threads\n",
944 __kmp_get_gtid(), set_nthreads));
946 KC_TRACE(10, (
"__kmp_reserve_threads: T#%d allocating %d threads; requested"
948 __kmp_get_gtid(), new_nthreads, set_nthreads));
957 static void __kmp_fork_team_threads(kmp_root_t *root, kmp_team_t *team,
958 kmp_info_t *master_th,
int master_gtid) {
962 KA_TRACE(10, (
"__kmp_fork_team_threads: new_nprocs = %d\n", team->t.t_nproc));
963 KMP_DEBUG_ASSERT(master_gtid == __kmp_get_gtid());
967 master_th->th.th_info.ds.ds_tid = 0;
968 master_th->th.th_team = team;
969 master_th->th.th_team_nproc = team->t.t_nproc;
970 master_th->th.th_team_master = master_th;
971 master_th->th.th_team_serialized = FALSE;
972 master_th->th.th_dispatch = &team->t.t_dispatch[0];
975 #if KMP_NESTED_HOT_TEAMS
977 kmp_hot_team_ptr_t *hot_teams = master_th->th.th_hot_teams;
980 int level = team->t.t_active_level - 1;
981 if (master_th->th.th_teams_microtask) {
982 if (master_th->th.th_teams_size.nteams > 1) {
986 if (team->t.t_pkfn != (microtask_t)__kmp_teams_master &&
987 master_th->th.th_teams_level == team->t.t_level) {
992 if (level < __kmp_hot_teams_max_level) {
993 if (hot_teams[level].hot_team) {
995 KMP_DEBUG_ASSERT(hot_teams[level].hot_team == team);
999 hot_teams[level].hot_team = team;
1000 hot_teams[level].hot_team_nth = team->t.t_nproc;
1007 use_hot_team = team == root->r.r_hot_team;
1009 if (!use_hot_team) {
1012 team->t.t_threads[0] = master_th;
1013 __kmp_initialize_info(master_th, team, 0, master_gtid);
1016 for (i = 1; i < team->t.t_nproc; i++) {
1019 kmp_info_t *thr = __kmp_allocate_thread(root, team, i);
1020 team->t.t_threads[i] = thr;
1021 KMP_DEBUG_ASSERT(thr);
1022 KMP_DEBUG_ASSERT(thr->th.th_team == team);
1024 KA_TRACE(20, (
"__kmp_fork_team_threads: T#%d(%d:%d) init arrived "
1025 "T#%d(%d:%d) join =%llu, plain=%llu\n",
1026 __kmp_gtid_from_tid(0, team), team->t.t_id, 0,
1027 __kmp_gtid_from_tid(i, team), team->t.t_id, i,
1028 team->t.t_bar[bs_forkjoin_barrier].b_arrived,
1029 team->t.t_bar[bs_plain_barrier].b_arrived));
1030 thr->th.th_teams_microtask = master_th->th.th_teams_microtask;
1031 thr->th.th_teams_level = master_th->th.th_teams_level;
1032 thr->th.th_teams_size = master_th->th.th_teams_size;
1035 kmp_balign_t *balign = team->t.t_threads[i]->th.th_bar;
1036 for (b = 0; b < bs_last_barrier; ++b) {
1037 balign[b].bb.b_arrived = team->t.t_bar[b].b_arrived;
1038 KMP_DEBUG_ASSERT(balign[b].bb.wait_flag != KMP_BARRIER_PARENT_FLAG);
1040 balign[b].bb.b_worker_arrived = team->t.t_bar[b].b_team_arrived;
1046 #if KMP_AFFINITY_SUPPORTED
1047 __kmp_partition_places(team);
1051 if (__kmp_display_affinity && team->t.t_display_affinity != 1) {
1052 for (i = 0; i < team->t.t_nproc; i++) {
1053 kmp_info_t *thr = team->t.t_threads[i];
1054 if (thr->th.th_prev_num_threads != team->t.t_nproc ||
1055 thr->th.th_prev_level != team->t.t_level) {
1056 team->t.t_display_affinity = 1;
1065 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
1069 inline static void propagateFPControl(kmp_team_t *team) {
1070 if (__kmp_inherit_fp_control) {
1071 kmp_int16 x87_fpu_control_word;
1075 __kmp_store_x87_fpu_control_word(&x87_fpu_control_word);
1076 __kmp_store_mxcsr(&mxcsr);
1077 mxcsr &= KMP_X86_MXCSR_MASK;
1088 KMP_CHECK_UPDATE(team->t.t_x87_fpu_control_word, x87_fpu_control_word);
1089 KMP_CHECK_UPDATE(team->t.t_mxcsr, mxcsr);
1092 KMP_CHECK_UPDATE(team->t.t_fp_control_saved, TRUE);
1096 KMP_CHECK_UPDATE(team->t.t_fp_control_saved, FALSE);
1102 inline static void updateHWFPControl(kmp_team_t *team) {
1103 if (__kmp_inherit_fp_control && team->t.t_fp_control_saved) {
1106 kmp_int16 x87_fpu_control_word;
1108 __kmp_store_x87_fpu_control_word(&x87_fpu_control_word);
1109 __kmp_store_mxcsr(&mxcsr);
1110 mxcsr &= KMP_X86_MXCSR_MASK;
1112 if (team->t.t_x87_fpu_control_word != x87_fpu_control_word) {
1113 __kmp_clear_x87_fpu_status_word();
1114 __kmp_load_x87_fpu_control_word(&team->t.t_x87_fpu_control_word);
1117 if (team->t.t_mxcsr != mxcsr) {
1118 __kmp_load_mxcsr(&team->t.t_mxcsr);
1123 #define propagateFPControl(x) ((void)0)
1124 #define updateHWFPControl(x) ((void)0)
1127 static void __kmp_alloc_argv_entries(
int argc, kmp_team_t *team,
1132 void __kmp_serialized_parallel(
ident_t *loc, kmp_int32 global_tid) {
1133 kmp_info_t *this_thr;
1134 kmp_team_t *serial_team;
1136 KC_TRACE(10, (
"__kmpc_serialized_parallel: called by T#%d\n", global_tid));
1143 if (!TCR_4(__kmp_init_parallel))
1144 __kmp_parallel_initialize();
1145 __kmp_resume_if_soft_paused();
1147 this_thr = __kmp_threads[global_tid];
1148 serial_team = this_thr->th.th_serial_team;
1151 KMP_DEBUG_ASSERT(serial_team);
1154 if (__kmp_tasking_mode != tskm_immediate_exec) {
1156 this_thr->th.th_task_team ==
1157 this_thr->th.th_team->t.t_task_team[this_thr->th.th_task_state]);
1158 KMP_DEBUG_ASSERT(serial_team->t.t_task_team[this_thr->th.th_task_state] ==
1160 KA_TRACE(20, (
"__kmpc_serialized_parallel: T#%d pushing task_team %p / "
1161 "team %p, new task_team = NULL\n",
1162 global_tid, this_thr->th.th_task_team, this_thr->th.th_team));
1163 this_thr->th.th_task_team = NULL;
1166 kmp_proc_bind_t proc_bind = this_thr->th.th_set_proc_bind;
1167 if (this_thr->th.th_current_task->td_icvs.proc_bind == proc_bind_false) {
1168 proc_bind = proc_bind_false;
1169 }
else if (proc_bind == proc_bind_default) {
1172 proc_bind = this_thr->th.th_current_task->td_icvs.proc_bind;
1175 this_thr->th.th_set_proc_bind = proc_bind_default;
1178 ompt_data_t ompt_parallel_data = ompt_data_none;
1179 ompt_data_t *implicit_task_data;
1180 void *codeptr = OMPT_LOAD_RETURN_ADDRESS(global_tid);
1181 if (ompt_enabled.enabled &&
1182 this_thr->th.ompt_thread_info.state != ompt_state_overhead) {
1184 ompt_task_info_t *parent_task_info;
1185 parent_task_info = OMPT_CUR_TASK_INFO(this_thr);
1187 parent_task_info->frame.enter_frame.ptr = OMPT_GET_FRAME_ADDRESS(0);
1188 if (ompt_enabled.ompt_callback_parallel_begin) {
1191 ompt_callbacks.ompt_callback(ompt_callback_parallel_begin)(
1192 &(parent_task_info->task_data), &(parent_task_info->frame),
1193 &ompt_parallel_data, team_size,
1194 ompt_parallel_invoker_program | ompt_parallel_team, codeptr);
1197 #endif // OMPT_SUPPORT
1199 if (this_thr->th.th_team != serial_team) {
1201 int level = this_thr->th.th_team->t.t_level;
1203 if (serial_team->t.t_serialized) {
1206 kmp_team_t *new_team;
1208 __kmp_acquire_bootstrap_lock(&__kmp_forkjoin_lock);
1211 __kmp_allocate_team(this_thr->th.th_root, 1, 1,
1215 proc_bind, &this_thr->th.th_current_task->td_icvs,
1216 0 USE_NESTED_HOT_ARG(NULL));
1217 __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
1218 KMP_ASSERT(new_team);
1221 new_team->t.t_threads[0] = this_thr;
1222 new_team->t.t_parent = this_thr->th.th_team;
1223 serial_team = new_team;
1224 this_thr->th.th_serial_team = serial_team;
1228 (
"__kmpc_serialized_parallel: T#%d allocated new serial team %p\n",
1229 global_tid, serial_team));
1237 (
"__kmpc_serialized_parallel: T#%d reusing cached serial team %p\n",
1238 global_tid, serial_team));
1242 KMP_DEBUG_ASSERT(serial_team->t.t_threads);
1243 KMP_DEBUG_ASSERT(serial_team->t.t_threads[0] == this_thr);
1244 KMP_DEBUG_ASSERT(this_thr->th.th_team != serial_team);
1245 serial_team->t.t_ident = loc;
1246 serial_team->t.t_serialized = 1;
1247 serial_team->t.t_nproc = 1;
1248 serial_team->t.t_parent = this_thr->th.th_team;
1249 serial_team->t.t_sched.sched = this_thr->th.th_team->t.t_sched.sched;
1250 this_thr->th.th_team = serial_team;
1251 serial_team->t.t_master_tid = this_thr->th.th_info.ds.ds_tid;
1253 KF_TRACE(10, (
"__kmpc_serialized_parallel: T#d curtask=%p\n", global_tid,
1254 this_thr->th.th_current_task));
1255 KMP_ASSERT(this_thr->th.th_current_task->td_flags.executing == 1);
1256 this_thr->th.th_current_task->td_flags.executing = 0;
1258 __kmp_push_current_task_to_thread(this_thr, serial_team, 0);
1263 copy_icvs(&this_thr->th.th_current_task->td_icvs,
1264 &this_thr->th.th_current_task->td_parent->td_icvs);
1268 if (__kmp_nested_nth.used && (level + 1 < __kmp_nested_nth.used)) {
1269 this_thr->th.th_current_task->td_icvs.nproc =
1270 __kmp_nested_nth.nth[level + 1];
1273 if (__kmp_nested_proc_bind.used &&
1274 (level + 1 < __kmp_nested_proc_bind.used)) {
1275 this_thr->th.th_current_task->td_icvs.proc_bind =
1276 __kmp_nested_proc_bind.bind_types[level + 1];
1280 serial_team->t.t_pkfn = (microtask_t)(~0);
1282 this_thr->th.th_info.ds.ds_tid = 0;
1285 this_thr->th.th_team_nproc = 1;
1286 this_thr->th.th_team_master = this_thr;
1287 this_thr->th.th_team_serialized = 1;
1289 serial_team->t.t_level = serial_team->t.t_parent->t.t_level + 1;
1290 serial_team->t.t_active_level = serial_team->t.t_parent->t.t_active_level;
1291 serial_team->t.t_def_allocator = this_thr->th.th_def_allocator;
1293 propagateFPControl(serial_team);
1296 KMP_DEBUG_ASSERT(serial_team->t.t_dispatch);
1297 if (!serial_team->t.t_dispatch->th_disp_buffer) {
1298 serial_team->t.t_dispatch->th_disp_buffer =
1299 (dispatch_private_info_t *)__kmp_allocate(
1300 sizeof(dispatch_private_info_t));
1302 this_thr->th.th_dispatch = serial_team->t.t_dispatch;
1309 KMP_DEBUG_ASSERT(this_thr->th.th_team == serial_team);
1310 KMP_DEBUG_ASSERT(serial_team->t.t_threads);
1311 KMP_DEBUG_ASSERT(serial_team->t.t_threads[0] == this_thr);
1312 ++serial_team->t.t_serialized;
1313 this_thr->th.th_team_serialized = serial_team->t.t_serialized;
1316 int level = this_thr->th.th_team->t.t_level;
1319 if (__kmp_nested_nth.used && (level + 1 < __kmp_nested_nth.used)) {
1320 this_thr->th.th_current_task->td_icvs.nproc =
1321 __kmp_nested_nth.nth[level + 1];
1323 serial_team->t.t_level++;
1324 KF_TRACE(10, (
"__kmpc_serialized_parallel: T#%d increasing nesting level "
1325 "of serial team %p to %d\n",
1326 global_tid, serial_team, serial_team->t.t_level));
1329 KMP_DEBUG_ASSERT(serial_team->t.t_dispatch);
1331 dispatch_private_info_t *disp_buffer =
1332 (dispatch_private_info_t *)__kmp_allocate(
1333 sizeof(dispatch_private_info_t));
1334 disp_buffer->next = serial_team->t.t_dispatch->th_disp_buffer;
1335 serial_team->t.t_dispatch->th_disp_buffer = disp_buffer;
1337 this_thr->th.th_dispatch = serial_team->t.t_dispatch;
1341 KMP_CHECK_UPDATE(serial_team->t.t_cancel_request, cancel_noreq);
1345 if (__kmp_display_affinity) {
1346 if (this_thr->th.th_prev_level != serial_team->t.t_level ||
1347 this_thr->th.th_prev_num_threads != 1) {
1349 __kmp_aux_display_affinity(global_tid, NULL);
1350 this_thr->th.th_prev_level = serial_team->t.t_level;
1351 this_thr->th.th_prev_num_threads = 1;
1355 if (__kmp_env_consistency_check)
1356 __kmp_push_parallel(global_tid, NULL);
1358 serial_team->t.ompt_team_info.master_return_address = codeptr;
1359 if (ompt_enabled.enabled &&
1360 this_thr->th.ompt_thread_info.state != ompt_state_overhead) {
1361 OMPT_CUR_TASK_INFO(this_thr)->frame.exit_frame.ptr = OMPT_GET_FRAME_ADDRESS(0);
1363 ompt_lw_taskteam_t lw_taskteam;
1364 __ompt_lw_taskteam_init(&lw_taskteam, this_thr, global_tid,
1365 &ompt_parallel_data, codeptr);
1367 __ompt_lw_taskteam_link(&lw_taskteam, this_thr, 1);
1371 implicit_task_data = OMPT_CUR_TASK_DATA(this_thr);
1372 if (ompt_enabled.ompt_callback_implicit_task) {
1373 ompt_callbacks.ompt_callback(ompt_callback_implicit_task)(
1374 ompt_scope_begin, OMPT_CUR_TEAM_DATA(this_thr),
1375 OMPT_CUR_TASK_DATA(this_thr), 1, __kmp_tid_from_gtid(global_tid), ompt_task_implicit);
1376 OMPT_CUR_TASK_INFO(this_thr)
1377 ->thread_num = __kmp_tid_from_gtid(global_tid);
1381 this_thr->th.ompt_thread_info.state = ompt_state_work_parallel;
1382 OMPT_CUR_TASK_INFO(this_thr)->frame.exit_frame.ptr = OMPT_GET_FRAME_ADDRESS(0);
1389 int __kmp_fork_call(
ident_t *loc,
int gtid,
1390 enum fork_context_e call_context,
1391 kmp_int32 argc, microtask_t microtask, launch_t invoker,
1393 #
if (KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) && KMP_OS_LINUX
1402 int master_this_cons;
1404 kmp_team_t *parent_team;
1405 kmp_info_t *master_th;
1409 int master_set_numthreads;
1413 #if KMP_NESTED_HOT_TEAMS
1414 kmp_hot_team_ptr_t **p_hot_teams;
1417 KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(KMP_fork_call);
1420 KA_TRACE(20, (
"__kmp_fork_call: enter T#%d\n", gtid));
1421 if (__kmp_stkpadding > 0 && __kmp_root[gtid] != NULL) {
1424 void *dummy = KMP_ALLOCA(__kmp_stkpadding);
1426 if (__kmp_stkpadding > KMP_MAX_STKPADDING)
1427 __kmp_stkpadding += (short)((kmp_int64)dummy);
1433 if (!TCR_4(__kmp_init_parallel))
1434 __kmp_parallel_initialize();
1435 __kmp_resume_if_soft_paused();
1438 master_th = __kmp_threads[gtid];
1440 parent_team = master_th->th.th_team;
1441 master_tid = master_th->th.th_info.ds.ds_tid;
1442 master_this_cons = master_th->th.th_local.this_construct;
1443 root = master_th->th.th_root;
1444 master_active = root->r.r_active;
1445 master_set_numthreads = master_th->th.th_set_nproc;
1448 ompt_data_t ompt_parallel_data = ompt_data_none;
1449 ompt_data_t *parent_task_data;
1450 ompt_frame_t *ompt_frame;
1451 ompt_data_t *implicit_task_data;
1452 void *return_address = NULL;
1454 if (ompt_enabled.enabled) {
1455 __ompt_get_task_info_internal(0, NULL, &parent_task_data, &ompt_frame,
1457 return_address = OMPT_LOAD_RETURN_ADDRESS(gtid);
1462 level = parent_team->t.t_level;
1464 active_level = parent_team->t.t_active_level;
1466 teams_level = master_th->th.th_teams_level;
1467 #if KMP_NESTED_HOT_TEAMS
1468 p_hot_teams = &master_th->th.th_hot_teams;
1469 if (*p_hot_teams == NULL && __kmp_hot_teams_max_level > 0) {
1470 *p_hot_teams = (kmp_hot_team_ptr_t *)__kmp_allocate(
1471 sizeof(kmp_hot_team_ptr_t) * __kmp_hot_teams_max_level);
1472 (*p_hot_teams)[0].hot_team = root->r.r_hot_team;
1474 (*p_hot_teams)[0].hot_team_nth = 1;
1479 if (ompt_enabled.enabled) {
1480 if (ompt_enabled.ompt_callback_parallel_begin) {
1481 int team_size = master_set_numthreads
1482 ? master_set_numthreads
1483 : get__nproc_2(parent_team, master_tid);
1484 int flags = OMPT_INVOKER(call_context) |
1485 ((microtask == (microtask_t)__kmp_teams_master)
1486 ? ompt_parallel_league
1487 : ompt_parallel_team);
1488 ompt_callbacks.ompt_callback(ompt_callback_parallel_begin)(
1489 parent_task_data, ompt_frame, &ompt_parallel_data, team_size, flags,
1492 master_th->th.ompt_thread_info.state = ompt_state_overhead;
1496 master_th->th.th_ident = loc;
1498 if (master_th->th.th_teams_microtask && ap &&
1499 microtask != (microtask_t)__kmp_teams_master && level == teams_level) {
1503 parent_team->t.t_ident = loc;
1504 __kmp_alloc_argv_entries(argc, parent_team, TRUE);
1505 parent_team->t.t_argc = argc;
1506 argv = (
void **)parent_team->t.t_argv;
1507 for (i = argc - 1; i >= 0; --i)
1509 #
if (KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) && KMP_OS_LINUX
1510 *argv++ = va_arg(*ap,
void *);
1512 *argv++ = va_arg(ap,
void *);
1515 if (parent_team == master_th->th.th_serial_team) {
1518 KMP_DEBUG_ASSERT(parent_team->t.t_serialized > 1);
1522 void **exit_frame_p;
1524 ompt_lw_taskteam_t lw_taskteam;
1526 if (ompt_enabled.enabled) {
1527 __ompt_lw_taskteam_init(&lw_taskteam, master_th, gtid,
1528 &ompt_parallel_data, return_address);
1529 exit_frame_p = &(lw_taskteam.ompt_task_info.frame.exit_frame.ptr);
1531 __ompt_lw_taskteam_link(&lw_taskteam, master_th, 0);
1535 implicit_task_data = OMPT_CUR_TASK_DATA(master_th);
1536 if (ompt_enabled.ompt_callback_implicit_task) {
1537 OMPT_CUR_TASK_INFO(master_th)
1538 ->thread_num = __kmp_tid_from_gtid(gtid);
1539 ompt_callbacks.ompt_callback(ompt_callback_implicit_task)(
1540 ompt_scope_begin, OMPT_CUR_TEAM_DATA(master_th),
1541 implicit_task_data, 1,
1542 OMPT_CUR_TASK_INFO(master_th)->thread_num, ompt_task_implicit);
1546 master_th->th.ompt_thread_info.state = ompt_state_work_parallel;
1548 exit_frame_p = &dummy;
1553 parent_team->t.t_serialized--;
1556 KMP_TIME_PARTITIONED_BLOCK(OMP_parallel);
1557 KMP_SET_THREAD_STATE_BLOCK(IMPLICIT_TASK);
1558 __kmp_invoke_microtask(microtask, gtid, 0, argc, parent_team->t.t_argv
1567 if (ompt_enabled.enabled) {
1568 *exit_frame_p = NULL;
1569 OMPT_CUR_TASK_INFO(master_th)->frame.exit_frame = ompt_data_none;
1570 if (ompt_enabled.ompt_callback_implicit_task) {
1571 ompt_callbacks.ompt_callback(ompt_callback_implicit_task)(
1572 ompt_scope_end, NULL, implicit_task_data, 1,
1573 OMPT_CUR_TASK_INFO(master_th)->thread_num, ompt_task_implicit);
1575 ompt_parallel_data = *OMPT_CUR_TEAM_DATA(master_th);
1576 __ompt_lw_taskteam_unlink(master_th);
1577 if (ompt_enabled.ompt_callback_parallel_end) {
1578 ompt_callbacks.ompt_callback(ompt_callback_parallel_end)(
1579 &ompt_parallel_data, OMPT_CUR_TASK_DATA(master_th),
1580 OMPT_INVOKER(call_context) | ompt_parallel_team,
1583 master_th->th.ompt_thread_info.state = ompt_state_overhead;
1589 parent_team->t.t_pkfn = microtask;
1590 parent_team->t.t_invoke = invoker;
1591 KMP_ATOMIC_INC(&root->r.r_in_parallel);
1592 parent_team->t.t_active_level++;
1593 parent_team->t.t_level++;
1594 parent_team->t.t_def_allocator = master_th->th.th_def_allocator;
1597 if (ompt_enabled.enabled) {
1598 ompt_lw_taskteam_t lw_taskteam;
1599 __ompt_lw_taskteam_init(&lw_taskteam, master_th, gtid,
1600 &ompt_parallel_data, return_address);
1601 __ompt_lw_taskteam_link(&lw_taskteam, master_th, 1,
true);
1606 if (master_set_numthreads) {
1607 if (master_set_numthreads < master_th->th.th_teams_size.nth) {
1609 kmp_info_t **other_threads = parent_team->t.t_threads;
1610 parent_team->t.t_nproc = master_set_numthreads;
1611 for (i = 0; i < master_set_numthreads; ++i) {
1612 other_threads[i]->th.th_team_nproc = master_set_numthreads;
1616 master_th->th.th_set_nproc = 0;
1620 if (__kmp_debugging) {
1621 int nth = __kmp_omp_num_threads(loc);
1623 master_set_numthreads = nth;
1628 KF_TRACE(10, (
"__kmp_fork_call: before internal fork: root=%p, team=%p, "
1629 "master_th=%p, gtid=%d\n",
1630 root, parent_team, master_th, gtid));
1631 __kmp_internal_fork(loc, gtid, parent_team);
1632 KF_TRACE(10, (
"__kmp_fork_call: after internal fork: root=%p, team=%p, "
1633 "master_th=%p, gtid=%d\n",
1634 root, parent_team, master_th, gtid));
1637 KA_TRACE(20, (
"__kmp_fork_call: T#%d(%d:0) invoke microtask = %p\n", gtid,
1638 parent_team->t.t_id, parent_team->t.t_pkfn));
1640 if (!parent_team->t.t_invoke(gtid)) {
1641 KMP_ASSERT2(0,
"cannot invoke microtask for MASTER thread");
1643 KA_TRACE(20, (
"__kmp_fork_call: T#%d(%d:0) done microtask = %p\n", gtid,
1644 parent_team->t.t_id, parent_team->t.t_pkfn));
1647 KA_TRACE(20, (
"__kmp_fork_call: parallel exit T#%d\n", gtid));
1653 if (__kmp_tasking_mode != tskm_immediate_exec) {
1654 KMP_DEBUG_ASSERT(master_th->th.th_task_team ==
1655 parent_team->t.t_task_team[master_th->th.th_task_state]);
1659 if (parent_team->t.t_active_level >=
1660 master_th->th.th_current_task->td_icvs.max_active_levels) {
1663 int enter_teams = ((ap == NULL && active_level == 0) ||
1664 (ap && teams_level > 0 && teams_level == level));
1666 master_set_numthreads
1667 ? master_set_numthreads
1676 if ((get__max_active_levels(master_th) == 1 &&
1677 (root->r.r_in_parallel && !enter_teams)) ||
1678 (__kmp_library == library_serial)) {
1679 KC_TRACE(10, (
"__kmp_fork_call: T#%d serializing team; requested %d"
1687 __kmp_acquire_bootstrap_lock(&__kmp_forkjoin_lock);
1692 nthreads = __kmp_reserve_threads(root, parent_team, master_tid,
1693 nthreads, enter_teams);
1694 if (nthreads == 1) {
1698 __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
1702 KMP_DEBUG_ASSERT(nthreads > 0);
1705 master_th->th.th_set_nproc = 0;
1708 if (nthreads == 1) {
1710 #if KMP_OS_LINUX && \
1711 (KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64)
1714 void **args = (
void **)KMP_ALLOCA(argc *
sizeof(
void *));
1719 (
"__kmp_fork_call: T#%d serializing parallel region\n", gtid));
1723 if (call_context == fork_context_intel) {
1725 master_th->th.th_serial_team->t.t_ident = loc;
1728 master_th->th.th_serial_team->t.t_level--;
1733 void **exit_frame_p;
1734 ompt_task_info_t *task_info;
1736 ompt_lw_taskteam_t lw_taskteam;
1738 if (ompt_enabled.enabled) {
1739 __ompt_lw_taskteam_init(&lw_taskteam, master_th, gtid,
1740 &ompt_parallel_data, return_address);
1742 __ompt_lw_taskteam_link(&lw_taskteam, master_th, 0);
1745 task_info = OMPT_CUR_TASK_INFO(master_th);
1746 exit_frame_p = &(task_info->frame.exit_frame.ptr);
1747 if (ompt_enabled.ompt_callback_implicit_task) {
1748 OMPT_CUR_TASK_INFO(master_th)
1749 ->thread_num = __kmp_tid_from_gtid(gtid);
1750 ompt_callbacks.ompt_callback(ompt_callback_implicit_task)(
1751 ompt_scope_begin, OMPT_CUR_TEAM_DATA(master_th),
1752 &(task_info->task_data), 1,
1753 OMPT_CUR_TASK_INFO(master_th)->thread_num,
1754 ompt_task_implicit);
1758 master_th->th.ompt_thread_info.state = ompt_state_work_parallel;
1760 exit_frame_p = &dummy;
1765 KMP_TIME_PARTITIONED_BLOCK(OMP_parallel);
1766 KMP_SET_THREAD_STATE_BLOCK(IMPLICIT_TASK);
1767 __kmp_invoke_microtask(microtask, gtid, 0, argc,
1768 parent_team->t.t_argv
1777 if (ompt_enabled.enabled) {
1778 *exit_frame_p = NULL;
1779 if (ompt_enabled.ompt_callback_implicit_task) {
1780 ompt_callbacks.ompt_callback(ompt_callback_implicit_task)(
1781 ompt_scope_end, NULL, &(task_info->task_data), 1,
1782 OMPT_CUR_TASK_INFO(master_th)->thread_num,
1783 ompt_task_implicit);
1785 ompt_parallel_data = *OMPT_CUR_TEAM_DATA(master_th);
1786 __ompt_lw_taskteam_unlink(master_th);
1787 if (ompt_enabled.ompt_callback_parallel_end) {
1788 ompt_callbacks.ompt_callback(ompt_callback_parallel_end)(
1789 &ompt_parallel_data, parent_task_data,
1790 OMPT_INVOKER(call_context) | ompt_parallel_team,
1793 master_th->th.ompt_thread_info.state = ompt_state_overhead;
1796 }
else if (microtask == (microtask_t)__kmp_teams_master) {
1797 KMP_DEBUG_ASSERT(master_th->th.th_team ==
1798 master_th->th.th_serial_team);
1799 team = master_th->th.th_team;
1801 team->t.t_invoke = invoker;
1802 __kmp_alloc_argv_entries(argc, team, TRUE);
1803 team->t.t_argc = argc;
1804 argv = (
void **)team->t.t_argv;
1806 for (i = argc - 1; i >= 0; --i)
1808 #
if (KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) && KMP_OS_LINUX
1809 *argv++ = va_arg(*ap,
void *);
1811 *argv++ = va_arg(ap,
void *);
1814 for (i = 0; i < argc; ++i)
1816 argv[i] = parent_team->t.t_argv[i];
1824 if (ompt_enabled.enabled) {
1825 ompt_task_info_t *task_info = OMPT_CUR_TASK_INFO(master_th);
1826 if (ompt_enabled.ompt_callback_implicit_task) {
1827 ompt_callbacks.ompt_callback(ompt_callback_implicit_task)(
1828 ompt_scope_end, NULL, &(task_info->task_data), 0,
1829 OMPT_CUR_TASK_INFO(master_th)->thread_num, ompt_task_initial);
1831 if (ompt_enabled.ompt_callback_parallel_end) {
1832 ompt_callbacks.ompt_callback(ompt_callback_parallel_end)(
1833 &ompt_parallel_data, parent_task_data,
1834 OMPT_INVOKER(call_context) | ompt_parallel_league,
1837 master_th->th.ompt_thread_info.state = ompt_state_overhead;
1842 for (i = argc - 1; i >= 0; --i)
1844 #
if (KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) && KMP_OS_LINUX
1845 *argv++ = va_arg(*ap,
void *);
1847 *argv++ = va_arg(ap,
void *);
1853 void **exit_frame_p;
1854 ompt_task_info_t *task_info;
1856 ompt_lw_taskteam_t lw_taskteam;
1858 if (ompt_enabled.enabled) {
1859 __ompt_lw_taskteam_init(&lw_taskteam, master_th, gtid,
1860 &ompt_parallel_data, return_address);
1861 __ompt_lw_taskteam_link(&lw_taskteam, master_th, 0);
1863 task_info = OMPT_CUR_TASK_INFO(master_th);
1864 exit_frame_p = &(task_info->frame.exit_frame.ptr);
1867 implicit_task_data = OMPT_CUR_TASK_DATA(master_th);
1868 if (ompt_enabled.ompt_callback_implicit_task) {
1869 ompt_callbacks.ompt_callback(ompt_callback_implicit_task)(
1870 ompt_scope_begin, OMPT_CUR_TEAM_DATA(master_th),
1871 implicit_task_data, 1, __kmp_tid_from_gtid(gtid),
1872 ompt_task_implicit);
1873 OMPT_CUR_TASK_INFO(master_th)
1874 ->thread_num = __kmp_tid_from_gtid(gtid);
1878 master_th->th.ompt_thread_info.state = ompt_state_work_parallel;
1880 exit_frame_p = &dummy;
1885 KMP_TIME_PARTITIONED_BLOCK(OMP_parallel);
1886 KMP_SET_THREAD_STATE_BLOCK(IMPLICIT_TASK);
1887 __kmp_invoke_microtask(microtask, gtid, 0, argc, args
1896 if (ompt_enabled.enabled) {
1897 *exit_frame_p = NULL;
1898 if (ompt_enabled.ompt_callback_implicit_task) {
1899 ompt_callbacks.ompt_callback(ompt_callback_implicit_task)(
1900 ompt_scope_end, NULL, &(task_info->task_data), 1,
1901 OMPT_CUR_TASK_INFO(master_th)->thread_num,
1902 ompt_task_implicit);
1905 ompt_parallel_data = *OMPT_CUR_TEAM_DATA(master_th);
1906 __ompt_lw_taskteam_unlink(master_th);
1907 if (ompt_enabled.ompt_callback_parallel_end) {
1908 ompt_callbacks.ompt_callback(ompt_callback_parallel_end)(
1909 &ompt_parallel_data, parent_task_data,
1910 OMPT_INVOKER(call_context) | ompt_parallel_team,
1913 master_th->th.ompt_thread_info.state = ompt_state_overhead;
1917 }
else if (call_context == fork_context_gnu) {
1919 ompt_lw_taskteam_t lwt;
1920 __ompt_lw_taskteam_init(&lwt, master_th, gtid, &ompt_parallel_data,
1923 lwt.ompt_task_info.frame.exit_frame = ompt_data_none;
1924 __ompt_lw_taskteam_link(&lwt, master_th, 1);
1929 KA_TRACE(20, (
"__kmp_fork_call: T#%d serial exit\n", gtid));
1932 KMP_ASSERT2(call_context < fork_context_last,
1933 "__kmp_fork_call: unknown fork_context parameter");
1936 KA_TRACE(20, (
"__kmp_fork_call: T#%d serial exit\n", gtid));
1943 KF_TRACE(10, (
"__kmp_fork_call: parent_team_aclevel=%d, master_th=%p, "
1944 "curtask=%p, curtask_max_aclevel=%d\n",
1945 parent_team->t.t_active_level, master_th,
1946 master_th->th.th_current_task,
1947 master_th->th.th_current_task->td_icvs.max_active_levels));
1951 master_th->th.th_current_task->td_flags.executing = 0;
1953 if (!master_th->th.th_teams_microtask || level > teams_level) {
1955 KMP_ATOMIC_INC(&root->r.r_in_parallel);
1959 int nthreads_icv = master_th->th.th_current_task->td_icvs.nproc;
1960 if ((level + 1 < __kmp_nested_nth.used) &&
1961 (__kmp_nested_nth.nth[level + 1] != nthreads_icv)) {
1962 nthreads_icv = __kmp_nested_nth.nth[level + 1];
1968 kmp_proc_bind_t proc_bind = master_th->th.th_set_proc_bind;
1969 kmp_proc_bind_t proc_bind_icv =
1971 if (master_th->th.th_current_task->td_icvs.proc_bind == proc_bind_false) {
1972 proc_bind = proc_bind_false;
1974 if (proc_bind == proc_bind_default) {
1977 proc_bind = master_th->th.th_current_task->td_icvs.proc_bind;
1983 if ((level + 1 < __kmp_nested_proc_bind.used) &&
1984 (__kmp_nested_proc_bind.bind_types[level + 1] !=
1985 master_th->th.th_current_task->td_icvs.proc_bind)) {
1986 proc_bind_icv = __kmp_nested_proc_bind.bind_types[level + 1];
1991 master_th->th.th_set_proc_bind = proc_bind_default;
1993 if ((nthreads_icv > 0) || (proc_bind_icv != proc_bind_default)) {
1994 kmp_internal_control_t new_icvs;
1995 copy_icvs(&new_icvs, &master_th->th.th_current_task->td_icvs);
1996 new_icvs.next = NULL;
1997 if (nthreads_icv > 0) {
1998 new_icvs.nproc = nthreads_icv;
2000 if (proc_bind_icv != proc_bind_default) {
2001 new_icvs.proc_bind = proc_bind_icv;
2005 KF_TRACE(10, (
"__kmp_fork_call: before __kmp_allocate_team\n"));
2006 team = __kmp_allocate_team(root, nthreads, nthreads,
2010 proc_bind, &new_icvs,
2011 argc USE_NESTED_HOT_ARG(master_th));
2014 KF_TRACE(10, (
"__kmp_fork_call: before __kmp_allocate_team\n"));
2015 team = __kmp_allocate_team(root, nthreads, nthreads,
2020 &master_th->th.th_current_task->td_icvs,
2021 argc USE_NESTED_HOT_ARG(master_th));
2024 10, (
"__kmp_fork_call: after __kmp_allocate_team - team = %p\n", team));
2027 KMP_CHECK_UPDATE(team->t.t_master_tid, master_tid);
2028 KMP_CHECK_UPDATE(team->t.t_master_this_cons, master_this_cons);
2029 KMP_CHECK_UPDATE(team->t.t_ident, loc);
2030 KMP_CHECK_UPDATE(team->t.t_parent, parent_team);
2031 KMP_CHECK_UPDATE_SYNC(team->t.t_pkfn, microtask);
2033 KMP_CHECK_UPDATE_SYNC(team->t.ompt_team_info.master_return_address,
2036 KMP_CHECK_UPDATE(team->t.t_invoke, invoker);
2038 if (!master_th->th.th_teams_microtask || level > teams_level) {
2039 int new_level = parent_team->t.t_level + 1;
2040 KMP_CHECK_UPDATE(team->t.t_level, new_level);
2041 new_level = parent_team->t.t_active_level + 1;
2042 KMP_CHECK_UPDATE(team->t.t_active_level, new_level);
2045 int new_level = parent_team->t.t_level;
2046 KMP_CHECK_UPDATE(team->t.t_level, new_level);
2047 new_level = parent_team->t.t_active_level;
2048 KMP_CHECK_UPDATE(team->t.t_active_level, new_level);
2050 kmp_r_sched_t new_sched = get__sched_2(parent_team, master_tid);
2052 KMP_CHECK_UPDATE(team->t.t_sched.sched, new_sched.sched);
2054 KMP_CHECK_UPDATE(team->t.t_cancel_request, cancel_noreq);
2055 KMP_CHECK_UPDATE(team->t.t_def_allocator, master_th->th.th_def_allocator);
2058 propagateFPControl(team);
2060 if (__kmp_tasking_mode != tskm_immediate_exec) {
2063 KMP_DEBUG_ASSERT(master_th->th.th_task_team ==
2064 parent_team->t.t_task_team[master_th->th.th_task_state]);
2065 KA_TRACE(20, (
"__kmp_fork_call: Master T#%d pushing task_team %p / team "
2066 "%p, new task_team %p / team %p\n",
2067 __kmp_gtid_from_thread(master_th),
2068 master_th->th.th_task_team, parent_team,
2069 team->t.t_task_team[master_th->th.th_task_state], team));
2071 if (active_level || master_th->th.th_task_team) {
2073 KMP_DEBUG_ASSERT(master_th->th.th_task_state_memo_stack);
2074 if (master_th->th.th_task_state_top >=
2075 master_th->th.th_task_state_stack_sz) {
2076 kmp_uint32 new_size = 2 * master_th->th.th_task_state_stack_sz;
2077 kmp_uint8 *old_stack, *new_stack;
2079 new_stack = (kmp_uint8 *)__kmp_allocate(new_size);
2080 for (i = 0; i < master_th->th.th_task_state_stack_sz; ++i) {
2081 new_stack[i] = master_th->th.th_task_state_memo_stack[i];
2083 for (i = master_th->th.th_task_state_stack_sz; i < new_size;
2087 old_stack = master_th->th.th_task_state_memo_stack;
2088 master_th->th.th_task_state_memo_stack = new_stack;
2089 master_th->th.th_task_state_stack_sz = new_size;
2090 __kmp_free(old_stack);
2094 .th_task_state_memo_stack[master_th->th.th_task_state_top] =
2095 master_th->th.th_task_state;
2096 master_th->th.th_task_state_top++;
2097 #if KMP_NESTED_HOT_TEAMS
2098 if (master_th->th.th_hot_teams &&
2099 active_level < __kmp_hot_teams_max_level &&
2100 team == master_th->th.th_hot_teams[active_level].hot_team) {
2102 master_th->th.th_task_state =
2104 .th_task_state_memo_stack[master_th->th.th_task_state_top];
2107 master_th->th.th_task_state = 0;
2108 #if KMP_NESTED_HOT_TEAMS
2112 #if !KMP_NESTED_HOT_TEAMS
2113 KMP_DEBUG_ASSERT((master_th->th.th_task_team == NULL) ||
2114 (team == root->r.r_hot_team));
2120 (
"__kmp_fork_call: T#%d(%d:%d)->(%d:0) created a team of %d threads\n",
2121 gtid, parent_team->t.t_id, team->t.t_master_tid, team->t.t_id,
2123 KMP_DEBUG_ASSERT(team != root->r.r_hot_team ||
2124 (team->t.t_master_tid == 0 &&
2125 (team->t.t_parent == root->r.r_root_team ||
2126 team->t.t_parent->t.t_serialized)));
2130 argv = (
void **)team->t.t_argv;
2132 for (i = argc - 1; i >= 0; --i) {
2134 #if (KMP_ARCH_X86_64 || KMP_ARCH_ARM || KMP_ARCH_AARCH64) && KMP_OS_LINUX
2135 void *new_argv = va_arg(*ap,
void *);
2137 void *new_argv = va_arg(ap,
void *);
2139 KMP_CHECK_UPDATE(*argv, new_argv);
2143 for (i = 0; i < argc; ++i) {
2145 KMP_CHECK_UPDATE(argv[i], team->t.t_parent->t.t_argv[i]);
2150 KMP_CHECK_UPDATE(team->t.t_master_active, master_active);
2151 if (!root->r.r_active)
2152 root->r.r_active = TRUE;
2154 __kmp_fork_team_threads(root, team, master_th, gtid);
2155 __kmp_setup_icv_copy(team, nthreads,
2156 &master_th->th.th_current_task->td_icvs, loc);
2159 master_th->th.ompt_thread_info.state = ompt_state_work_parallel;
2162 __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
2165 if (team->t.t_active_level == 1
2166 && !master_th->th.th_teams_microtask) {
2168 if ((__itt_frame_submit_v3_ptr || KMP_ITT_DEBUG) &&
2169 (__kmp_forkjoin_frames_mode == 3 ||
2170 __kmp_forkjoin_frames_mode == 1)) {
2171 kmp_uint64 tmp_time = 0;
2172 if (__itt_get_timestamp_ptr)
2173 tmp_time = __itt_get_timestamp();
2175 master_th->th.th_frame_time = tmp_time;
2176 if (__kmp_forkjoin_frames_mode == 3)
2177 team->t.t_region_time = tmp_time;
2181 if ((__itt_frame_begin_v3_ptr || KMP_ITT_DEBUG) &&
2182 __kmp_forkjoin_frames && !__kmp_forkjoin_frames_mode) {
2184 __kmp_itt_region_forking(gtid, team->t.t_nproc, 0);
2190 KMP_DEBUG_ASSERT(team == __kmp_threads[gtid]->th.th_team);
2193 (
"__kmp_internal_fork : root=%p, team=%p, master_th=%p, gtid=%d\n",
2194 root, team, master_th, gtid));
2197 if (__itt_stack_caller_create_ptr) {
2198 team->t.t_stack_id =
2199 __kmp_itt_stack_caller_create();
2207 __kmp_internal_fork(loc, gtid, team);
2208 KF_TRACE(10, (
"__kmp_internal_fork : after : root=%p, team=%p, "
2209 "master_th=%p, gtid=%d\n",
2210 root, team, master_th, gtid));
2213 if (call_context == fork_context_gnu) {
2214 KA_TRACE(20, (
"__kmp_fork_call: parallel exit T#%d\n", gtid));
2219 KA_TRACE(20, (
"__kmp_fork_call: T#%d(%d:0) invoke microtask = %p\n", gtid,
2220 team->t.t_id, team->t.t_pkfn));
2223 #if KMP_STATS_ENABLED
2227 KMP_SET_THREAD_STATE(stats_state_e::TEAMS_REGION);
2231 if (!team->t.t_invoke(gtid)) {
2232 KMP_ASSERT2(0,
"cannot invoke microtask for MASTER thread");
2235 #if KMP_STATS_ENABLED
2238 KMP_SET_THREAD_STATE(previous_state);
2242 KA_TRACE(20, (
"__kmp_fork_call: T#%d(%d:0) done microtask = %p\n", gtid,
2243 team->t.t_id, team->t.t_pkfn));
2246 KA_TRACE(20, (
"__kmp_fork_call: parallel exit T#%d\n", gtid));
2249 if (ompt_enabled.enabled) {
2250 master_th->th.ompt_thread_info.state = ompt_state_overhead;
2258 static inline void __kmp_join_restore_state(kmp_info_t *thread,
2261 thread->th.ompt_thread_info.state =
2262 ((team->t.t_serialized) ? ompt_state_work_serial
2263 : ompt_state_work_parallel);
2266 static inline void __kmp_join_ompt(
int gtid, kmp_info_t *thread,
2267 kmp_team_t *team, ompt_data_t *parallel_data,
2268 int flags,
void *codeptr) {
2269 ompt_task_info_t *task_info = __ompt_get_task_info_object(0);
2270 if (ompt_enabled.ompt_callback_parallel_end) {
2271 ompt_callbacks.ompt_callback(ompt_callback_parallel_end)(
2272 parallel_data, &(task_info->task_data), flags, codeptr);
2275 task_info->frame.enter_frame = ompt_data_none;
2276 __kmp_join_restore_state(thread, team);
2280 void __kmp_join_call(
ident_t *loc,
int gtid
2283 enum fork_context_e fork_context
2287 KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(KMP_join_call);
2289 kmp_team_t *parent_team;
2290 kmp_info_t *master_th;
2294 KA_TRACE(20, (
"__kmp_join_call: enter T#%d\n", gtid));
2297 master_th = __kmp_threads[gtid];
2298 root = master_th->th.th_root;
2299 team = master_th->th.th_team;
2300 parent_team = team->t.t_parent;
2302 master_th->th.th_ident = loc;
2305 void *team_microtask = (
void *)team->t.t_pkfn;
2306 if (ompt_enabled.enabled) {
2307 master_th->th.ompt_thread_info.state = ompt_state_overhead;
2312 if (__kmp_tasking_mode != tskm_immediate_exec && !exit_teams) {
2313 KA_TRACE(20, (
"__kmp_join_call: T#%d, old team = %p old task_team = %p, "
2314 "th_task_team = %p\n",
2315 __kmp_gtid_from_thread(master_th), team,
2316 team->t.t_task_team[master_th->th.th_task_state],
2317 master_th->th.th_task_team));
2318 KMP_DEBUG_ASSERT(master_th->th.th_task_team ==
2319 team->t.t_task_team[master_th->th.th_task_state]);
2323 if (team->t.t_serialized) {
2324 if (master_th->th.th_teams_microtask) {
2326 int level = team->t.t_level;
2327 int tlevel = master_th->th.th_teams_level;
2328 if (level == tlevel) {
2332 }
else if (level == tlevel + 1) {
2336 team->t.t_serialized++;
2342 if (ompt_enabled.enabled) {
2343 __kmp_join_restore_state(master_th, parent_team);
2350 master_active = team->t.t_master_active;
2355 __kmp_internal_join(loc, gtid, team);
2357 master_th->th.th_task_state =
2364 ompt_data_t *parallel_data = &(team->t.ompt_team_info.parallel_data);
2365 void *codeptr = team->t.ompt_team_info.master_return_address;
2369 if (__itt_stack_caller_create_ptr) {
2370 __kmp_itt_stack_caller_destroy(
2371 (__itt_caller)team->t
2376 if (team->t.t_active_level == 1 &&
2377 !master_th->th.th_teams_microtask) {
2378 master_th->th.th_ident = loc;
2381 if ((__itt_frame_submit_v3_ptr || KMP_ITT_DEBUG) &&
2382 __kmp_forkjoin_frames_mode == 3)
2383 __kmp_itt_frame_submit(gtid, team->t.t_region_time,
2384 master_th->th.th_frame_time, 0, loc,
2385 master_th->th.th_team_nproc, 1);
2386 else if ((__itt_frame_end_v3_ptr || KMP_ITT_DEBUG) &&
2387 !__kmp_forkjoin_frames_mode && __kmp_forkjoin_frames)
2388 __kmp_itt_region_joined(gtid);
2392 if (master_th->th.th_teams_microtask && !exit_teams &&
2393 team->t.t_pkfn != (microtask_t)__kmp_teams_master &&
2394 team->t.t_level == master_th->th.th_teams_level + 1) {
2399 ompt_data_t ompt_parallel_data = ompt_data_none;
2400 if (ompt_enabled.enabled) {
2401 ompt_task_info_t *task_info = __ompt_get_task_info_object(0);
2402 if (ompt_enabled.ompt_callback_implicit_task) {
2403 int ompt_team_size = team->t.t_nproc;
2404 ompt_callbacks.ompt_callback(ompt_callback_implicit_task)(
2405 ompt_scope_end, NULL, &(task_info->task_data), ompt_team_size,
2406 OMPT_CUR_TASK_INFO(master_th)->thread_num, ompt_task_implicit);
2408 task_info->frame.exit_frame = ompt_data_none;
2409 task_info->task_data = ompt_data_none;
2410 ompt_parallel_data = *OMPT_CUR_TEAM_DATA(master_th);
2411 __ompt_lw_taskteam_unlink(master_th);
2416 team->t.t_active_level--;
2417 KMP_ATOMIC_DEC(&root->r.r_in_parallel);
2423 if (master_th->th.th_team_nproc < master_th->th.th_teams_size.nth) {
2424 int old_num = master_th->th.th_team_nproc;
2425 int new_num = master_th->th.th_teams_size.nth;
2426 kmp_info_t **other_threads = team->t.t_threads;
2427 team->t.t_nproc = new_num;
2428 for (
int i = 0; i < old_num; ++i) {
2429 other_threads[i]->th.th_team_nproc = new_num;
2432 for (
int i = old_num; i < new_num; ++i) {
2434 KMP_DEBUG_ASSERT(other_threads[i]);
2435 kmp_balign_t *balign = other_threads[i]->th.th_bar;
2436 for (
int b = 0; b < bs_last_barrier; ++b) {
2437 balign[b].bb.b_arrived = team->t.t_bar[b].b_arrived;
2438 KMP_DEBUG_ASSERT(balign[b].bb.wait_flag != KMP_BARRIER_PARENT_FLAG);
2440 balign[b].bb.b_worker_arrived = team->t.t_bar[b].b_team_arrived;
2443 if (__kmp_tasking_mode != tskm_immediate_exec) {
2445 other_threads[i]->th.th_task_state = master_th->th.th_task_state;
2451 if (ompt_enabled.enabled) {
2452 __kmp_join_ompt(gtid, master_th, parent_team, &ompt_parallel_data,
2453 OMPT_INVOKER(fork_context) | ompt_parallel_team, codeptr);
2461 master_th->th.th_info.ds.ds_tid = team->t.t_master_tid;
2462 master_th->th.th_local.this_construct = team->t.t_master_this_cons;
2464 master_th->th.th_dispatch = &parent_team->t.t_dispatch[team->t.t_master_tid];
2469 __kmp_acquire_bootstrap_lock(&__kmp_forkjoin_lock);
2471 if (!master_th->th.th_teams_microtask ||
2472 team->t.t_level > master_th->th.th_teams_level) {
2474 KMP_ATOMIC_DEC(&root->r.r_in_parallel);
2476 KMP_DEBUG_ASSERT(root->r.r_in_parallel >= 0);
2479 if (ompt_enabled.enabled) {
2480 ompt_task_info_t *task_info = __ompt_get_task_info_object(0);
2481 if (ompt_enabled.ompt_callback_implicit_task) {
2482 int flags = (team_microtask == (
void *)__kmp_teams_master)
2484 : ompt_task_implicit;
2485 int ompt_team_size = (flags == ompt_task_initial) ? 0 : team->t.t_nproc;
2486 ompt_callbacks.ompt_callback(ompt_callback_implicit_task)(
2487 ompt_scope_end, NULL, &(task_info->task_data), ompt_team_size,
2488 OMPT_CUR_TASK_INFO(master_th)->thread_num, flags);
2490 task_info->frame.exit_frame = ompt_data_none;
2491 task_info->task_data = ompt_data_none;
2495 KF_TRACE(10, (
"__kmp_join_call1: T#%d, this_thread=%p team=%p\n", 0,
2497 __kmp_pop_current_task_from_thread(master_th);
2499 #if KMP_AFFINITY_SUPPORTED
2501 master_th->th.th_first_place = team->t.t_first_place;
2502 master_th->th.th_last_place = team->t.t_last_place;
2503 #endif // KMP_AFFINITY_SUPPORTED
2504 master_th->th.th_def_allocator = team->t.t_def_allocator;
2506 updateHWFPControl(team);
2508 if (root->r.r_active != master_active)
2509 root->r.r_active = master_active;
2511 __kmp_free_team(root, team USE_NESTED_HOT_ARG(
2519 master_th->th.th_team = parent_team;
2520 master_th->th.th_team_nproc = parent_team->t.t_nproc;
2521 master_th->th.th_team_master = parent_team->t.t_threads[0];
2522 master_th->th.th_team_serialized = parent_team->t.t_serialized;
2525 if (parent_team->t.t_serialized &&
2526 parent_team != master_th->th.th_serial_team &&
2527 parent_team != root->r.r_root_team) {
2528 __kmp_free_team(root,
2529 master_th->th.th_serial_team USE_NESTED_HOT_ARG(NULL));
2530 master_th->th.th_serial_team = parent_team;
2533 if (__kmp_tasking_mode != tskm_immediate_exec) {
2534 if (master_th->th.th_task_state_top >
2536 KMP_DEBUG_ASSERT(master_th->th.th_task_state_memo_stack);
2538 master_th->th.th_task_state_memo_stack[master_th->th.th_task_state_top] =
2539 master_th->th.th_task_state;
2540 --master_th->th.th_task_state_top;
2542 master_th->th.th_task_state =
2544 .th_task_state_memo_stack[master_th->th.th_task_state_top];
2547 master_th->th.th_task_team =
2548 parent_team->t.t_task_team[master_th->th.th_task_state];
2550 (
"__kmp_join_call: Master T#%d restoring task_team %p / team %p\n",
2551 __kmp_gtid_from_thread(master_th), master_th->th.th_task_team,
2558 master_th->th.th_current_task->td_flags.executing = 1;
2560 __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
2564 OMPT_INVOKER(fork_context) |
2565 ((team_microtask == (
void *)__kmp_teams_master) ? ompt_parallel_league
2566 : ompt_parallel_team);
2567 if (ompt_enabled.enabled) {
2568 __kmp_join_ompt(gtid, master_th, parent_team, parallel_data, flags,
2574 KA_TRACE(20, (
"__kmp_join_call: exit T#%d\n", gtid));
2579 void __kmp_save_internal_controls(kmp_info_t *thread) {
2581 if (thread->th.th_team != thread->th.th_serial_team) {
2584 if (thread->th.th_team->t.t_serialized > 1) {
2587 if (thread->th.th_team->t.t_control_stack_top == NULL) {
2590 if (thread->th.th_team->t.t_control_stack_top->serial_nesting_level !=
2591 thread->th.th_team->t.t_serialized) {
2596 kmp_internal_control_t *control =
2597 (kmp_internal_control_t *)__kmp_allocate(
2598 sizeof(kmp_internal_control_t));
2600 copy_icvs(control, &thread->th.th_current_task->td_icvs);
2602 control->serial_nesting_level = thread->th.th_team->t.t_serialized;
2604 control->next = thread->th.th_team->t.t_control_stack_top;
2605 thread->th.th_team->t.t_control_stack_top = control;
2611 void __kmp_set_num_threads(
int new_nth,
int gtid) {
2615 KF_TRACE(10, (
"__kmp_set_num_threads: new __kmp_nth = %d\n", new_nth));
2616 KMP_DEBUG_ASSERT(__kmp_init_serial);
2620 else if (new_nth > __kmp_max_nth)
2621 new_nth = __kmp_max_nth;
2624 thread = __kmp_threads[gtid];
2625 if (thread->th.th_current_task->td_icvs.nproc == new_nth)
2628 __kmp_save_internal_controls(thread);
2630 set__nproc(thread, new_nth);
2635 root = thread->th.th_root;
2636 if (__kmp_init_parallel && (!root->r.r_active) &&
2637 (root->r.r_hot_team->t.t_nproc > new_nth)
2638 #
if KMP_NESTED_HOT_TEAMS
2639 && __kmp_hot_teams_max_level && !__kmp_hot_teams_mode
2642 kmp_team_t *hot_team = root->r.r_hot_team;
2645 __kmp_acquire_bootstrap_lock(&__kmp_forkjoin_lock);
2648 for (f = new_nth; f < hot_team->t.t_nproc; f++) {
2649 KMP_DEBUG_ASSERT(hot_team->t.t_threads[f] != NULL);
2650 if (__kmp_tasking_mode != tskm_immediate_exec) {
2653 hot_team->t.t_threads[f]->th.th_task_team = NULL;
2655 __kmp_free_thread(hot_team->t.t_threads[f]);
2656 hot_team->t.t_threads[f] = NULL;
2658 hot_team->t.t_nproc = new_nth;
2659 #if KMP_NESTED_HOT_TEAMS
2660 if (thread->th.th_hot_teams) {
2661 KMP_DEBUG_ASSERT(hot_team == thread->th.th_hot_teams[0].hot_team);
2662 thread->th.th_hot_teams[0].hot_team_nth = new_nth;
2666 __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
2669 for (f = 0; f < new_nth; f++) {
2670 KMP_DEBUG_ASSERT(hot_team->t.t_threads[f] != NULL);
2671 hot_team->t.t_threads[f]->th.th_team_nproc = new_nth;
2674 hot_team->t.t_size_changed = -1;
2679 void __kmp_set_max_active_levels(
int gtid,
int max_active_levels) {
2682 KF_TRACE(10, (
"__kmp_set_max_active_levels: new max_active_levels for thread "
2684 gtid, max_active_levels));
2685 KMP_DEBUG_ASSERT(__kmp_init_serial);
2688 if (max_active_levels < 0) {
2689 KMP_WARNING(ActiveLevelsNegative, max_active_levels);
2694 KF_TRACE(10, (
"__kmp_set_max_active_levels: the call is ignored: new "
2695 "max_active_levels for thread %d = (%d)\n",
2696 gtid, max_active_levels));
2699 if (max_active_levels <= KMP_MAX_ACTIVE_LEVELS_LIMIT) {
2704 KMP_WARNING(ActiveLevelsExceedLimit, max_active_levels,
2705 KMP_MAX_ACTIVE_LEVELS_LIMIT);
2706 max_active_levels = KMP_MAX_ACTIVE_LEVELS_LIMIT;
2712 KF_TRACE(10, (
"__kmp_set_max_active_levels: after validation: new "
2713 "max_active_levels for thread %d = (%d)\n",
2714 gtid, max_active_levels));
2716 thread = __kmp_threads[gtid];
2718 __kmp_save_internal_controls(thread);
2720 set__max_active_levels(thread, max_active_levels);
2724 int __kmp_get_max_active_levels(
int gtid) {
2727 KF_TRACE(10, (
"__kmp_get_max_active_levels: thread %d\n", gtid));
2728 KMP_DEBUG_ASSERT(__kmp_init_serial);
2730 thread = __kmp_threads[gtid];
2731 KMP_DEBUG_ASSERT(thread->th.th_current_task);
2732 KF_TRACE(10, (
"__kmp_get_max_active_levels: thread %d, curtask=%p, "
2733 "curtask_maxaclevel=%d\n",
2734 gtid, thread->th.th_current_task,
2735 thread->th.th_current_task->td_icvs.max_active_levels));
2736 return thread->th.th_current_task->td_icvs.max_active_levels;
2739 KMP_BUILD_ASSERT(
sizeof(kmp_sched_t) ==
sizeof(
int));
2740 KMP_BUILD_ASSERT(
sizeof(
enum sched_type) ==
sizeof(
int));
2743 void __kmp_set_schedule(
int gtid, kmp_sched_t kind,
int chunk) {
2745 kmp_sched_t orig_kind;
2748 KF_TRACE(10, (
"__kmp_set_schedule: new schedule for thread %d = (%d, %d)\n",
2749 gtid, (
int)kind, chunk));
2750 KMP_DEBUG_ASSERT(__kmp_init_serial);
2757 kind = __kmp_sched_without_mods(kind);
2759 if (kind <= kmp_sched_lower || kind >= kmp_sched_upper ||
2760 (kind <= kmp_sched_lower_ext && kind >= kmp_sched_upper_std)) {
2762 __kmp_msg(kmp_ms_warning, KMP_MSG(ScheduleKindOutOfRange, kind),
2763 KMP_HNT(DefaultScheduleKindUsed,
"static, no chunk"),
2765 kind = kmp_sched_default;
2769 thread = __kmp_threads[gtid];
2771 __kmp_save_internal_controls(thread);
2773 if (kind < kmp_sched_upper_std) {
2774 if (kind == kmp_sched_static && chunk < KMP_DEFAULT_CHUNK) {
2777 thread->th.th_current_task->td_icvs.sched.r_sched_type =
kmp_sch_static;
2779 thread->th.th_current_task->td_icvs.sched.r_sched_type =
2780 __kmp_sch_map[kind - kmp_sched_lower - 1];
2785 thread->th.th_current_task->td_icvs.sched.r_sched_type =
2786 __kmp_sch_map[kind - kmp_sched_lower_ext + kmp_sched_upper_std -
2787 kmp_sched_lower - 2];
2789 __kmp_sched_apply_mods_intkind(
2790 orig_kind, &(thread->th.th_current_task->td_icvs.sched.r_sched_type));
2791 if (kind == kmp_sched_auto || chunk < 1) {
2793 thread->th.th_current_task->td_icvs.sched.chunk = KMP_DEFAULT_CHUNK;
2795 thread->th.th_current_task->td_icvs.sched.chunk = chunk;
2800 void __kmp_get_schedule(
int gtid, kmp_sched_t *kind,
int *chunk) {
2804 KF_TRACE(10, (
"__kmp_get_schedule: thread %d\n", gtid));
2805 KMP_DEBUG_ASSERT(__kmp_init_serial);
2807 thread = __kmp_threads[gtid];
2809 th_type = thread->th.th_current_task->td_icvs.sched.r_sched_type;
2810 switch (SCHEDULE_WITHOUT_MODIFIERS(th_type)) {
2812 case kmp_sch_static_greedy:
2813 case kmp_sch_static_balanced:
2814 *kind = kmp_sched_static;
2815 __kmp_sched_apply_mods_stdkind(kind, th_type);
2818 case kmp_sch_static_chunked:
2819 *kind = kmp_sched_static;
2821 case kmp_sch_dynamic_chunked:
2822 *kind = kmp_sched_dynamic;
2825 case kmp_sch_guided_iterative_chunked:
2826 case kmp_sch_guided_analytical_chunked:
2827 *kind = kmp_sched_guided;
2830 *kind = kmp_sched_auto;
2832 case kmp_sch_trapezoidal:
2833 *kind = kmp_sched_trapezoidal;
2835 #if KMP_STATIC_STEAL_ENABLED
2836 case kmp_sch_static_steal:
2837 *kind = kmp_sched_static_steal;
2841 KMP_FATAL(UnknownSchedulingType, th_type);
2844 __kmp_sched_apply_mods_stdkind(kind, th_type);
2845 *chunk = thread->th.th_current_task->td_icvs.sched.chunk;
2848 int __kmp_get_ancestor_thread_num(
int gtid,
int level) {
2854 KF_TRACE(10, (
"__kmp_get_ancestor_thread_num: thread %d %d\n", gtid, level));
2855 KMP_DEBUG_ASSERT(__kmp_init_serial);
2862 thr = __kmp_threads[gtid];
2863 team = thr->th.th_team;
2864 ii = team->t.t_level;
2868 if (thr->th.th_teams_microtask) {
2870 int tlevel = thr->th.th_teams_level;
2873 KMP_DEBUG_ASSERT(ii >= tlevel);
2885 return __kmp_tid_from_gtid(gtid);
2887 dd = team->t.t_serialized;
2889 while (ii > level) {
2890 for (dd = team->t.t_serialized; (dd > 0) && (ii > level); dd--, ii--) {
2892 if ((team->t.t_serialized) && (!dd)) {
2893 team = team->t.t_parent;
2897 team = team->t.t_parent;
2898 dd = team->t.t_serialized;
2903 return (dd > 1) ? (0) : (team->t.t_master_tid);
2906 int __kmp_get_team_size(
int gtid,
int level) {
2912 KF_TRACE(10, (
"__kmp_get_team_size: thread %d %d\n", gtid, level));
2913 KMP_DEBUG_ASSERT(__kmp_init_serial);
2920 thr = __kmp_threads[gtid];
2921 team = thr->th.th_team;
2922 ii = team->t.t_level;
2926 if (thr->th.th_teams_microtask) {
2928 int tlevel = thr->th.th_teams_level;
2931 KMP_DEBUG_ASSERT(ii >= tlevel);
2942 while (ii > level) {
2943 for (dd = team->t.t_serialized; (dd > 0) && (ii > level); dd--, ii--) {
2945 if (team->t.t_serialized && (!dd)) {
2946 team = team->t.t_parent;
2950 team = team->t.t_parent;
2955 return team->t.t_nproc;
2958 kmp_r_sched_t __kmp_get_schedule_global() {
2963 kmp_r_sched_t r_sched;
2969 enum sched_type s = SCHEDULE_WITHOUT_MODIFIERS(__kmp_sched);
2970 enum sched_type sched_modifiers = SCHEDULE_GET_MODIFIERS(__kmp_sched);
2973 r_sched.r_sched_type = __kmp_static;
2976 r_sched.r_sched_type = __kmp_guided;
2978 r_sched.r_sched_type = __kmp_sched;
2980 SCHEDULE_SET_MODIFIERS(r_sched.r_sched_type, sched_modifiers);
2982 if (__kmp_chunk < KMP_DEFAULT_CHUNK) {
2984 r_sched.chunk = KMP_DEFAULT_CHUNK;
2986 r_sched.chunk = __kmp_chunk;
2994 static void __kmp_alloc_argv_entries(
int argc, kmp_team_t *team,
int realloc) {
2996 KMP_DEBUG_ASSERT(team);
2997 if (!realloc || argc > team->t.t_max_argc) {
2999 KA_TRACE(100, (
"__kmp_alloc_argv_entries: team %d: needed entries=%d, "
3000 "current entries=%d\n",
3001 team->t.t_id, argc, (realloc) ? team->t.t_max_argc : 0));
3003 if (realloc && team->t.t_argv != &team->t.t_inline_argv[0])
3004 __kmp_free((
void *)team->t.t_argv);
3006 if (argc <= KMP_INLINE_ARGV_ENTRIES) {
3008 team->t.t_max_argc = KMP_INLINE_ARGV_ENTRIES;
3009 KA_TRACE(100, (
"__kmp_alloc_argv_entries: team %d: inline allocate %d "
3011 team->t.t_id, team->t.t_max_argc));
3012 team->t.t_argv = &team->t.t_inline_argv[0];
3013 if (__kmp_storage_map) {
3014 __kmp_print_storage_map_gtid(
3015 -1, &team->t.t_inline_argv[0],
3016 &team->t.t_inline_argv[KMP_INLINE_ARGV_ENTRIES],
3017 (
sizeof(
void *) * KMP_INLINE_ARGV_ENTRIES),
"team_%d.t_inline_argv",
3022 team->t.t_max_argc = (argc <= (KMP_MIN_MALLOC_ARGV_ENTRIES >> 1))
3023 ? KMP_MIN_MALLOC_ARGV_ENTRIES
3025 KA_TRACE(100, (
"__kmp_alloc_argv_entries: team %d: dynamic allocate %d "
3027 team->t.t_id, team->t.t_max_argc));
3029 (
void **)__kmp_page_allocate(
sizeof(
void *) * team->t.t_max_argc);
3030 if (__kmp_storage_map) {
3031 __kmp_print_storage_map_gtid(-1, &team->t.t_argv[0],
3032 &team->t.t_argv[team->t.t_max_argc],
3033 sizeof(
void *) * team->t.t_max_argc,
3034 "team_%d.t_argv", team->t.t_id);
3040 static void __kmp_allocate_team_arrays(kmp_team_t *team,
int max_nth) {
3042 int num_disp_buff = max_nth > 1 ? __kmp_dispatch_num_buffers : 2;
3044 (kmp_info_t **)__kmp_allocate(
sizeof(kmp_info_t *) * max_nth);
3045 team->t.t_disp_buffer = (dispatch_shared_info_t *)__kmp_allocate(
3046 sizeof(dispatch_shared_info_t) * num_disp_buff);
3047 team->t.t_dispatch =
3048 (kmp_disp_t *)__kmp_allocate(
sizeof(kmp_disp_t) * max_nth);
3049 team->t.t_implicit_task_taskdata =
3050 (kmp_taskdata_t *)__kmp_allocate(
sizeof(kmp_taskdata_t) * max_nth);
3051 team->t.t_max_nproc = max_nth;
3054 for (i = 0; i < num_disp_buff; ++i) {
3055 team->t.t_disp_buffer[i].buffer_index = i;
3056 team->t.t_disp_buffer[i].doacross_buf_idx = i;
3060 static void __kmp_free_team_arrays(kmp_team_t *team) {
3063 for (i = 0; i < team->t.t_max_nproc; ++i) {
3064 if (team->t.t_dispatch[i].th_disp_buffer != NULL) {
3065 __kmp_free(team->t.t_dispatch[i].th_disp_buffer);
3066 team->t.t_dispatch[i].th_disp_buffer = NULL;
3069 #if KMP_USE_HIER_SCHED
3070 __kmp_dispatch_free_hierarchies(team);
3072 __kmp_free(team->t.t_threads);
3073 __kmp_free(team->t.t_disp_buffer);
3074 __kmp_free(team->t.t_dispatch);
3075 __kmp_free(team->t.t_implicit_task_taskdata);
3076 team->t.t_threads = NULL;
3077 team->t.t_disp_buffer = NULL;
3078 team->t.t_dispatch = NULL;
3079 team->t.t_implicit_task_taskdata = 0;
3082 static void __kmp_reallocate_team_arrays(kmp_team_t *team,
int max_nth) {
3083 kmp_info_t **oldThreads = team->t.t_threads;
3085 __kmp_free(team->t.t_disp_buffer);
3086 __kmp_free(team->t.t_dispatch);
3087 __kmp_free(team->t.t_implicit_task_taskdata);
3088 __kmp_allocate_team_arrays(team, max_nth);
3090 KMP_MEMCPY(team->t.t_threads, oldThreads,
3091 team->t.t_nproc *
sizeof(kmp_info_t *));
3093 __kmp_free(oldThreads);
3096 static kmp_internal_control_t __kmp_get_global_icvs(
void) {
3098 kmp_r_sched_t r_sched =
3099 __kmp_get_schedule_global();
3101 KMP_DEBUG_ASSERT(__kmp_nested_proc_bind.used > 0);
3103 kmp_internal_control_t g_icvs = {
3105 (kmp_int8)__kmp_global.g.g_dynamic,
3107 (kmp_int8)__kmp_env_blocktime,
3109 __kmp_dflt_blocktime,
3114 __kmp_dflt_team_nth,
3118 __kmp_dflt_max_active_levels,
3122 __kmp_nested_proc_bind.bind_types[0],
3123 __kmp_default_device,
3130 static kmp_internal_control_t __kmp_get_x_global_icvs(
const kmp_team_t *team) {
3132 kmp_internal_control_t gx_icvs;
3133 gx_icvs.serial_nesting_level =
3135 copy_icvs(&gx_icvs, &team->t.t_threads[0]->th.th_current_task->td_icvs);
3136 gx_icvs.next = NULL;
3141 static void __kmp_initialize_root(kmp_root_t *root) {
3143 kmp_team_t *root_team;
3144 kmp_team_t *hot_team;
3145 int hot_team_max_nth;
3146 kmp_r_sched_t r_sched =
3147 __kmp_get_schedule_global();
3148 kmp_internal_control_t r_icvs = __kmp_get_global_icvs();
3149 KMP_DEBUG_ASSERT(root);
3150 KMP_ASSERT(!root->r.r_begin);
3153 __kmp_init_lock(&root->r.r_begin_lock);
3154 root->r.r_begin = FALSE;
3155 root->r.r_active = FALSE;
3156 root->r.r_in_parallel = 0;
3157 root->r.r_blocktime = __kmp_dflt_blocktime;
3161 KF_TRACE(10, (
"__kmp_initialize_root: before root_team\n"));
3164 __kmp_allocate_team(root,
3170 __kmp_nested_proc_bind.bind_types[0], &r_icvs,
3172 USE_NESTED_HOT_ARG(NULL)
3177 TCW_SYNC_PTR(root_team->t.t_pkfn, (microtask_t)(~0));
3180 KF_TRACE(10, (
"__kmp_initialize_root: after root_team = %p\n", root_team));
3182 root->r.r_root_team = root_team;
3183 root_team->t.t_control_stack_top = NULL;
3186 root_team->t.t_threads[0] = NULL;
3187 root_team->t.t_nproc = 1;
3188 root_team->t.t_serialized = 1;
3190 root_team->t.t_sched.sched = r_sched.sched;
3193 (
"__kmp_initialize_root: init root team %d arrived: join=%u, plain=%u\n",
3194 root_team->t.t_id, KMP_INIT_BARRIER_STATE, KMP_INIT_BARRIER_STATE));
3198 KF_TRACE(10, (
"__kmp_initialize_root: before hot_team\n"));
3201 __kmp_allocate_team(root,
3203 __kmp_dflt_team_nth_ub * 2,
3207 __kmp_nested_proc_bind.bind_types[0], &r_icvs,
3209 USE_NESTED_HOT_ARG(NULL)
3211 KF_TRACE(10, (
"__kmp_initialize_root: after hot_team = %p\n", hot_team));
3213 root->r.r_hot_team = hot_team;
3214 root_team->t.t_control_stack_top = NULL;
3217 hot_team->t.t_parent = root_team;
3220 hot_team_max_nth = hot_team->t.t_max_nproc;
3221 for (f = 0; f < hot_team_max_nth; ++f) {
3222 hot_team->t.t_threads[f] = NULL;
3224 hot_team->t.t_nproc = 1;
3226 hot_team->t.t_sched.sched = r_sched.sched;
3227 hot_team->t.t_size_changed = 0;
3232 typedef struct kmp_team_list_item {
3233 kmp_team_p
const *entry;
3234 struct kmp_team_list_item *next;
3235 } kmp_team_list_item_t;
3236 typedef kmp_team_list_item_t *kmp_team_list_t;
3238 static void __kmp_print_structure_team_accum(
3239 kmp_team_list_t list,
3240 kmp_team_p
const *team
3250 KMP_DEBUG_ASSERT(list != NULL);
3255 __kmp_print_structure_team_accum(list, team->t.t_parent);
3256 __kmp_print_structure_team_accum(list, team->t.t_next_pool);
3260 while (l->next != NULL && l->entry != team) {
3263 if (l->next != NULL) {
3269 while (l->next != NULL && l->entry->t.t_id <= team->t.t_id) {
3275 kmp_team_list_item_t *item = (kmp_team_list_item_t *)KMP_INTERNAL_MALLOC(
3276 sizeof(kmp_team_list_item_t));
3283 static void __kmp_print_structure_team(
char const *title, kmp_team_p
const *team
3286 __kmp_printf(
"%s", title);
3288 __kmp_printf(
"%2x %p\n", team->t.t_id, team);
3290 __kmp_printf(
" - (nil)\n");
3294 static void __kmp_print_structure_thread(
char const *title,
3295 kmp_info_p
const *thread) {
3296 __kmp_printf(
"%s", title);
3297 if (thread != NULL) {
3298 __kmp_printf(
"%2d %p\n", thread->th.th_info.ds.ds_gtid, thread);
3300 __kmp_printf(
" - (nil)\n");
3304 void __kmp_print_structure(
void) {
3306 kmp_team_list_t list;
3310 (kmp_team_list_item_t *)KMP_INTERNAL_MALLOC(
sizeof(kmp_team_list_item_t));
3314 __kmp_printf(
"\n------------------------------\nGlobal Thread "
3315 "Table\n------------------------------\n");
3318 for (gtid = 0; gtid < __kmp_threads_capacity; ++gtid) {
3319 __kmp_printf(
"%2d", gtid);
3320 if (__kmp_threads != NULL) {
3321 __kmp_printf(
" %p", __kmp_threads[gtid]);
3323 if (__kmp_root != NULL) {
3324 __kmp_printf(
" %p", __kmp_root[gtid]);
3331 __kmp_printf(
"\n------------------------------\nThreads\n--------------------"
3333 if (__kmp_threads != NULL) {
3335 for (gtid = 0; gtid < __kmp_threads_capacity; ++gtid) {
3336 kmp_info_t
const *thread = __kmp_threads[gtid];
3337 if (thread != NULL) {
3338 __kmp_printf(
"GTID %2d %p:\n", gtid, thread);
3339 __kmp_printf(
" Our Root: %p\n", thread->th.th_root);
3340 __kmp_print_structure_team(
" Our Team: ", thread->th.th_team);
3341 __kmp_print_structure_team(
" Serial Team: ",
3342 thread->th.th_serial_team);
3343 __kmp_printf(
" Threads: %2d\n", thread->th.th_team_nproc);
3344 __kmp_print_structure_thread(
" Master: ",
3345 thread->th.th_team_master);
3346 __kmp_printf(
" Serialized?: %2d\n", thread->th.th_team_serialized);
3347 __kmp_printf(
" Set NProc: %2d\n", thread->th.th_set_nproc);
3348 __kmp_printf(
" Set Proc Bind: %2d\n", thread->th.th_set_proc_bind);
3349 __kmp_print_structure_thread(
" Next in pool: ",
3350 thread->th.th_next_pool);
3352 __kmp_print_structure_team_accum(list, thread->th.th_team);
3353 __kmp_print_structure_team_accum(list, thread->th.th_serial_team);
3357 __kmp_printf(
"Threads array is not allocated.\n");
3361 __kmp_printf(
"\n------------------------------\nUbers\n----------------------"
3363 if (__kmp_root != NULL) {
3365 for (gtid = 0; gtid < __kmp_threads_capacity; ++gtid) {
3366 kmp_root_t
const *root = __kmp_root[gtid];
3368 __kmp_printf(
"GTID %2d %p:\n", gtid, root);
3369 __kmp_print_structure_team(
" Root Team: ", root->r.r_root_team);
3370 __kmp_print_structure_team(
" Hot Team: ", root->r.r_hot_team);
3371 __kmp_print_structure_thread(
" Uber Thread: ",
3372 root->r.r_uber_thread);
3373 __kmp_printf(
" Active?: %2d\n", root->r.r_active);
3374 __kmp_printf(
" In Parallel: %2d\n",
3375 KMP_ATOMIC_LD_RLX(&root->r.r_in_parallel));
3377 __kmp_print_structure_team_accum(list, root->r.r_root_team);
3378 __kmp_print_structure_team_accum(list, root->r.r_hot_team);
3382 __kmp_printf(
"Ubers array is not allocated.\n");
3385 __kmp_printf(
"\n------------------------------\nTeams\n----------------------"
3387 while (list->next != NULL) {
3388 kmp_team_p
const *team = list->entry;
3390 __kmp_printf(
"Team %2x %p:\n", team->t.t_id, team);
3391 __kmp_print_structure_team(
" Parent Team: ", team->t.t_parent);
3392 __kmp_printf(
" Master TID: %2d\n", team->t.t_master_tid);
3393 __kmp_printf(
" Max threads: %2d\n", team->t.t_max_nproc);
3394 __kmp_printf(
" Levels of serial: %2d\n", team->t.t_serialized);
3395 __kmp_printf(
" Number threads: %2d\n", team->t.t_nproc);
3396 for (i = 0; i < team->t.t_nproc; ++i) {
3397 __kmp_printf(
" Thread %2d: ", i);
3398 __kmp_print_structure_thread(
"", team->t.t_threads[i]);
3400 __kmp_print_structure_team(
" Next in pool: ", team->t.t_next_pool);
3406 __kmp_printf(
"\n------------------------------\nPools\n----------------------"
3408 __kmp_print_structure_thread(
"Thread pool: ",
3409 CCAST(kmp_info_t *, __kmp_thread_pool));
3410 __kmp_print_structure_team(
"Team pool: ",
3411 CCAST(kmp_team_t *, __kmp_team_pool));
3415 while (list != NULL) {
3416 kmp_team_list_item_t *item = list;
3418 KMP_INTERNAL_FREE(item);
3427 static const unsigned __kmp_primes[] = {
3428 0x9e3779b1, 0xffe6cc59, 0x2109f6dd, 0x43977ab5, 0xba5703f5, 0xb495a877,
3429 0xe1626741, 0x79695e6b, 0xbc98c09f, 0xd5bee2b3, 0x287488f9, 0x3af18231,
3430 0x9677cd4d, 0xbe3a6929, 0xadc6a877, 0xdcf0674b, 0xbe4d6fe9, 0x5f15e201,
3431 0x99afc3fd, 0xf3f16801, 0xe222cfff, 0x24ba5fdb, 0x0620452d, 0x79f149e3,
3432 0xc8b93f49, 0x972702cd, 0xb07dd827, 0x6c97d5ed, 0x085a3d61, 0x46eb5ea7,
3433 0x3d9910ed, 0x2e687b5b, 0x29609227, 0x6eb081f1, 0x0954c4e1, 0x9d114db9,
3434 0x542acfa9, 0xb3e6bd7b, 0x0742d917, 0xe9f3ffa7, 0x54581edb, 0xf2480f45,
3435 0x0bb9288f, 0xef1affc7, 0x85fa0ca7, 0x3ccc14db, 0xe6baf34b, 0x343377f7,
3436 0x5ca19031, 0xe6d9293b, 0xf0a9f391, 0x5d2e980b, 0xfc411073, 0xc3749363,
3437 0xb892d829, 0x3549366b, 0x629750ad, 0xb98294e5, 0x892d9483, 0xc235baf3,
3438 0x3d2402a3, 0x6bdef3c9, 0xbec333cd, 0x40c9520f};
3442 unsigned short __kmp_get_random(kmp_info_t *thread) {
3443 unsigned x = thread->th.th_x;
3444 unsigned short r = x >> 16;
3446 thread->th.th_x = x * thread->th.th_a + 1;
3448 KA_TRACE(30, (
"__kmp_get_random: THREAD: %d, RETURN: %u\n",
3449 thread->th.th_info.ds.ds_tid, r));
3455 void __kmp_init_random(kmp_info_t *thread) {
3456 unsigned seed = thread->th.th_info.ds.ds_tid;
3459 __kmp_primes[seed % (
sizeof(__kmp_primes) /
sizeof(__kmp_primes[0]))];
3460 thread->th.th_x = (seed + 1) * thread->th.th_a + 1;
3462 (
"__kmp_init_random: THREAD: %u; A: %u\n", seed, thread->th.th_a));
3468 static int __kmp_reclaim_dead_roots(
void) {
3471 for (i = 0; i < __kmp_threads_capacity; ++i) {
3472 if (KMP_UBER_GTID(i) &&
3473 !__kmp_still_running((kmp_info_t *)TCR_SYNC_PTR(__kmp_threads[i])) &&
3476 r += __kmp_unregister_root_other_thread(i);
3501 static int __kmp_expand_threads(
int nNeed) {
3503 int minimumRequiredCapacity;
3505 kmp_info_t **newThreads;
3506 kmp_root_t **newRoot;
3512 #if KMP_OS_WINDOWS && !KMP_DYNAMIC_LIB
3515 added = __kmp_reclaim_dead_roots();
3544 KMP_DEBUG_ASSERT(__kmp_sys_max_nth >= __kmp_threads_capacity);
3547 if (__kmp_sys_max_nth - __kmp_threads_capacity < nNeed) {
3551 minimumRequiredCapacity = __kmp_threads_capacity + nNeed;
3553 newCapacity = __kmp_threads_capacity;
3555 newCapacity = newCapacity <= (__kmp_sys_max_nth >> 1) ? (newCapacity << 1)
3556 : __kmp_sys_max_nth;
3557 }
while (newCapacity < minimumRequiredCapacity);
3558 newThreads = (kmp_info_t **)__kmp_allocate(
3559 (
sizeof(kmp_info_t *) +
sizeof(kmp_root_t *)) * newCapacity + CACHE_LINE);
3561 (kmp_root_t **)((
char *)newThreads +
sizeof(kmp_info_t *) * newCapacity);
3562 KMP_MEMCPY(newThreads, __kmp_threads,
3563 __kmp_threads_capacity *
sizeof(kmp_info_t *));
3564 KMP_MEMCPY(newRoot, __kmp_root,
3565 __kmp_threads_capacity *
sizeof(kmp_root_t *));
3567 kmp_info_t **temp_threads = __kmp_threads;
3568 *(kmp_info_t * *
volatile *)&__kmp_threads = newThreads;
3569 *(kmp_root_t * *
volatile *)&__kmp_root = newRoot;
3570 __kmp_free(temp_threads);
3571 added += newCapacity - __kmp_threads_capacity;
3572 *(
volatile int *)&__kmp_threads_capacity = newCapacity;
3574 if (newCapacity > __kmp_tp_capacity) {
3575 __kmp_acquire_bootstrap_lock(&__kmp_tp_cached_lock);
3576 if (__kmp_tp_cached && newCapacity > __kmp_tp_capacity) {
3577 __kmp_threadprivate_resize_cache(newCapacity);
3579 *(
volatile int *)&__kmp_tp_capacity = newCapacity;
3581 __kmp_release_bootstrap_lock(&__kmp_tp_cached_lock);
3590 int __kmp_register_root(
int initial_thread) {
3591 kmp_info_t *root_thread;
3595 __kmp_acquire_bootstrap_lock(&__kmp_forkjoin_lock);
3596 KA_TRACE(20, (
"__kmp_register_root: entered\n"));
3613 capacity = __kmp_threads_capacity;
3614 if (!initial_thread && TCR_PTR(__kmp_threads[0]) == NULL) {
3619 if (__kmp_all_nth >= capacity && !__kmp_expand_threads(1)) {
3620 if (__kmp_tp_cached) {
3621 __kmp_fatal(KMP_MSG(CantRegisterNewThread),
3622 KMP_HNT(Set_ALL_THREADPRIVATE, __kmp_tp_capacity),
3623 KMP_HNT(PossibleSystemLimitOnThreads), __kmp_msg_null);
3625 __kmp_fatal(KMP_MSG(CantRegisterNewThread), KMP_HNT(SystemLimitOnThreads),
3633 for (gtid = (initial_thread ? 0 : 1); TCR_PTR(__kmp_threads[gtid]) != NULL;
3637 (
"__kmp_register_root: found slot in threads array: T#%d\n", gtid));
3638 KMP_ASSERT(gtid < __kmp_threads_capacity);
3642 TCW_4(__kmp_nth, __kmp_nth + 1);
3646 if (__kmp_adjust_gtid_mode) {
3647 if (__kmp_all_nth >= __kmp_tls_gtid_min) {
3648 if (TCR_4(__kmp_gtid_mode) != 2) {
3649 TCW_4(__kmp_gtid_mode, 2);
3652 if (TCR_4(__kmp_gtid_mode) != 1) {
3653 TCW_4(__kmp_gtid_mode, 1);
3658 #ifdef KMP_ADJUST_BLOCKTIME
3661 if (!__kmp_env_blocktime && (__kmp_avail_proc > 0)) {
3662 if (__kmp_nth > __kmp_avail_proc) {
3663 __kmp_zero_bt = TRUE;
3669 if (!(root = __kmp_root[gtid])) {
3670 root = __kmp_root[gtid] = (kmp_root_t *)__kmp_allocate(
sizeof(kmp_root_t));
3671 KMP_DEBUG_ASSERT(!root->r.r_root_team);
3674 #if KMP_STATS_ENABLED
3676 __kmp_stats_thread_ptr = __kmp_stats_list->push_back(gtid);
3677 __kmp_stats_thread_ptr->startLife();
3678 KMP_SET_THREAD_STATE(SERIAL_REGION);
3681 __kmp_initialize_root(root);
3684 if (root->r.r_uber_thread) {
3685 root_thread = root->r.r_uber_thread;
3687 root_thread = (kmp_info_t *)__kmp_allocate(
sizeof(kmp_info_t));
3688 if (__kmp_storage_map) {
3689 __kmp_print_thread_storage_map(root_thread, gtid);
3691 root_thread->th.th_info.ds.ds_gtid = gtid;
3693 root_thread->th.ompt_thread_info.thread_data = ompt_data_none;
3695 root_thread->th.th_root = root;
3696 if (__kmp_env_consistency_check) {
3697 root_thread->th.th_cons = __kmp_allocate_cons_stack(gtid);
3700 __kmp_initialize_fast_memory(root_thread);
3704 KMP_DEBUG_ASSERT(root_thread->th.th_local.bget_data == NULL);
3705 __kmp_initialize_bget(root_thread);
3707 __kmp_init_random(root_thread);
3711 if (!root_thread->th.th_serial_team) {
3712 kmp_internal_control_t r_icvs = __kmp_get_global_icvs();
3713 KF_TRACE(10, (
"__kmp_register_root: before serial_team\n"));
3714 root_thread->th.th_serial_team = __kmp_allocate_team(
3719 proc_bind_default, &r_icvs, 0 USE_NESTED_HOT_ARG(NULL));
3721 KMP_ASSERT(root_thread->th.th_serial_team);
3722 KF_TRACE(10, (
"__kmp_register_root: after serial_team = %p\n",
3723 root_thread->th.th_serial_team));
3726 TCW_SYNC_PTR(__kmp_threads[gtid], root_thread);
3728 root->r.r_root_team->t.t_threads[0] = root_thread;
3729 root->r.r_hot_team->t.t_threads[0] = root_thread;
3730 root_thread->th.th_serial_team->t.t_threads[0] = root_thread;
3732 root_thread->th.th_serial_team->t.t_serialized = 0;
3733 root->r.r_uber_thread = root_thread;
3736 __kmp_initialize_info(root_thread, root->r.r_root_team, 0, gtid);
3737 TCW_4(__kmp_init_gtid, TRUE);
3740 __kmp_gtid_set_specific(gtid);
3743 __kmp_itt_thread_name(gtid);
3746 #ifdef KMP_TDATA_GTID
3749 __kmp_create_worker(gtid, root_thread, __kmp_stksize);
3750 KMP_DEBUG_ASSERT(__kmp_gtid_get_specific() == gtid);
3752 KA_TRACE(20, (
"__kmp_register_root: T#%d init T#%d(%d:%d) arrived: join=%u, "
3754 gtid, __kmp_gtid_from_tid(0, root->r.r_hot_team),
3755 root->r.r_hot_team->t.t_id, 0, KMP_INIT_BARRIER_STATE,
3756 KMP_INIT_BARRIER_STATE));
3759 for (b = 0; b < bs_last_barrier; ++b) {
3760 root_thread->th.th_bar[b].bb.b_arrived = KMP_INIT_BARRIER_STATE;
3762 root_thread->th.th_bar[b].bb.b_worker_arrived = 0;
3766 KMP_DEBUG_ASSERT(root->r.r_hot_team->t.t_bar[bs_forkjoin_barrier].b_arrived ==
3767 KMP_INIT_BARRIER_STATE);
3769 #if KMP_AFFINITY_SUPPORTED
3770 root_thread->th.th_current_place = KMP_PLACE_UNDEFINED;
3771 root_thread->th.th_new_place = KMP_PLACE_UNDEFINED;
3772 root_thread->th.th_first_place = KMP_PLACE_UNDEFINED;
3773 root_thread->th.th_last_place = KMP_PLACE_UNDEFINED;
3774 if (TCR_4(__kmp_init_middle)) {
3775 __kmp_affinity_set_init_mask(gtid, TRUE);
3778 root_thread->th.th_def_allocator = __kmp_def_allocator;
3779 root_thread->th.th_prev_level = 0;
3780 root_thread->th.th_prev_num_threads = 1;
3782 kmp_cg_root_t *tmp = (kmp_cg_root_t *)__kmp_allocate(
sizeof(kmp_cg_root_t));
3783 tmp->cg_root = root_thread;
3784 tmp->cg_thread_limit = __kmp_cg_max_nth;
3785 tmp->cg_nthreads = 1;
3786 KA_TRACE(100, (
"__kmp_register_root: Thread %p created node %p with"
3787 " cg_nthreads init to 1\n",
3790 root_thread->th.th_cg_roots = tmp;
3792 __kmp_root_counter++;
3795 if (!initial_thread && ompt_enabled.enabled) {
3797 kmp_info_t *root_thread = ompt_get_thread();
3799 ompt_set_thread_state(root_thread, ompt_state_overhead);
3801 if (ompt_enabled.ompt_callback_thread_begin) {
3802 ompt_callbacks.ompt_callback(ompt_callback_thread_begin)(
3803 ompt_thread_initial, __ompt_get_thread_data_internal());
3805 ompt_data_t *task_data;
3806 ompt_data_t *parallel_data;
3807 __ompt_get_task_info_internal(0, NULL, &task_data, NULL, ¶llel_data, NULL);
3808 if (ompt_enabled.ompt_callback_implicit_task) {
3809 ompt_callbacks.ompt_callback(ompt_callback_implicit_task)(
3810 ompt_scope_begin, parallel_data, task_data, 1, 1, ompt_task_initial);
3813 ompt_set_thread_state(root_thread, ompt_state_work_serial);
3818 __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
3823 #if KMP_NESTED_HOT_TEAMS
3824 static int __kmp_free_hot_teams(kmp_root_t *root, kmp_info_t *thr,
int level,
3825 const int max_level) {
3827 kmp_hot_team_ptr_t *hot_teams = thr->th.th_hot_teams;
3828 if (!hot_teams || !hot_teams[level].hot_team) {
3831 KMP_DEBUG_ASSERT(level < max_level);
3832 kmp_team_t *team = hot_teams[level].hot_team;
3833 nth = hot_teams[level].hot_team_nth;
3835 if (level < max_level - 1) {
3836 for (i = 0; i < nth; ++i) {
3837 kmp_info_t *th = team->t.t_threads[i];
3838 n += __kmp_free_hot_teams(root, th, level + 1, max_level);
3839 if (i > 0 && th->th.th_hot_teams) {
3840 __kmp_free(th->th.th_hot_teams);
3841 th->th.th_hot_teams = NULL;
3845 __kmp_free_team(root, team, NULL);
3852 static int __kmp_reset_root(
int gtid, kmp_root_t *root) {
3853 kmp_team_t *root_team = root->r.r_root_team;
3854 kmp_team_t *hot_team = root->r.r_hot_team;
3855 int n = hot_team->t.t_nproc;
3858 KMP_DEBUG_ASSERT(!root->r.r_active);
3860 root->r.r_root_team = NULL;
3861 root->r.r_hot_team = NULL;
3864 __kmp_free_team(root, root_team USE_NESTED_HOT_ARG(NULL));
3865 #if KMP_NESTED_HOT_TEAMS
3866 if (__kmp_hot_teams_max_level >
3868 for (i = 0; i < hot_team->t.t_nproc; ++i) {
3869 kmp_info_t *th = hot_team->t.t_threads[i];
3870 if (__kmp_hot_teams_max_level > 1) {
3871 n += __kmp_free_hot_teams(root, th, 1, __kmp_hot_teams_max_level);
3873 if (th->th.th_hot_teams) {
3874 __kmp_free(th->th.th_hot_teams);
3875 th->th.th_hot_teams = NULL;
3880 __kmp_free_team(root, hot_team USE_NESTED_HOT_ARG(NULL));
3885 if (__kmp_tasking_mode != tskm_immediate_exec) {
3886 __kmp_wait_to_unref_task_teams();
3892 10, (
"__kmp_reset_root: free handle, th = %p, handle = %" KMP_UINTPTR_SPEC
3894 (LPVOID) & (root->r.r_uber_thread->th),
3895 root->r.r_uber_thread->th.th_info.ds.ds_thread));
3896 __kmp_free_handle(root->r.r_uber_thread->th.th_info.ds.ds_thread);
3900 ompt_data_t *task_data;
3901 ompt_data_t *parallel_data;
3902 __ompt_get_task_info_internal(0, NULL, &task_data, NULL, ¶llel_data, NULL);
3903 if (ompt_enabled.ompt_callback_implicit_task) {
3904 ompt_callbacks.ompt_callback(ompt_callback_implicit_task)(
3905 ompt_scope_end, parallel_data, task_data, 0, 1, ompt_task_initial);
3907 if (ompt_enabled.ompt_callback_thread_end) {
3908 ompt_callbacks.ompt_callback(ompt_callback_thread_end)(
3909 &(root->r.r_uber_thread->th.ompt_thread_info.thread_data));
3915 i = root->r.r_uber_thread->th.th_cg_roots->cg_nthreads--;
3916 KA_TRACE(100, (
"__kmp_reset_root: Thread %p decrement cg_nthreads on node %p"
3918 root->r.r_uber_thread, root->r.r_uber_thread->th.th_cg_roots,
3919 root->r.r_uber_thread->th.th_cg_roots->cg_nthreads));
3922 KMP_DEBUG_ASSERT(root->r.r_uber_thread ==
3923 root->r.r_uber_thread->th.th_cg_roots->cg_root);
3924 KMP_DEBUG_ASSERT(root->r.r_uber_thread->th.th_cg_roots->up == NULL);
3925 __kmp_free(root->r.r_uber_thread->th.th_cg_roots);
3926 root->r.r_uber_thread->th.th_cg_roots = NULL;
3928 __kmp_reap_thread(root->r.r_uber_thread, 1);
3932 root->r.r_uber_thread = NULL;
3934 root->r.r_begin = FALSE;
3939 void __kmp_unregister_root_current_thread(
int gtid) {
3940 KA_TRACE(1, (
"__kmp_unregister_root_current_thread: enter T#%d\n", gtid));
3944 __kmp_acquire_bootstrap_lock(&__kmp_forkjoin_lock);
3945 if (TCR_4(__kmp_global.g.g_done) || !__kmp_init_serial) {
3946 KC_TRACE(10, (
"__kmp_unregister_root_current_thread: already finished, "
3949 __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
3952 kmp_root_t *root = __kmp_root[gtid];
3954 KMP_DEBUG_ASSERT(__kmp_threads && __kmp_threads[gtid]);
3955 KMP_ASSERT(KMP_UBER_GTID(gtid));
3956 KMP_ASSERT(root == __kmp_threads[gtid]->th.th_root);
3957 KMP_ASSERT(root->r.r_active == FALSE);
3961 kmp_info_t *thread = __kmp_threads[gtid];
3962 kmp_team_t *team = thread->th.th_team;
3963 kmp_task_team_t *task_team = thread->th.th_task_team;
3966 if (task_team != NULL && task_team->tt.tt_found_proxy_tasks) {
3969 thread->th.ompt_thread_info.state = ompt_state_undefined;
3971 __kmp_task_team_wait(thread, team USE_ITT_BUILD_ARG(NULL));
3974 __kmp_reset_root(gtid, root);
3977 __kmp_gtid_set_specific(KMP_GTID_DNE);
3978 #ifdef KMP_TDATA_GTID
3979 __kmp_gtid = KMP_GTID_DNE;
3984 (
"__kmp_unregister_root_current_thread: T#%d unregistered\n", gtid));
3986 __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
3993 static int __kmp_unregister_root_other_thread(
int gtid) {
3994 kmp_root_t *root = __kmp_root[gtid];
3997 KA_TRACE(1, (
"__kmp_unregister_root_other_thread: enter T#%d\n", gtid));
3998 KMP_DEBUG_ASSERT(__kmp_threads && __kmp_threads[gtid]);
3999 KMP_ASSERT(KMP_UBER_GTID(gtid));
4000 KMP_ASSERT(root == __kmp_threads[gtid]->th.th_root);
4001 KMP_ASSERT(root->r.r_active == FALSE);
4003 r = __kmp_reset_root(gtid, root);
4005 (
"__kmp_unregister_root_other_thread: T#%d unregistered\n", gtid));
4011 void __kmp_task_info() {
4013 kmp_int32 gtid = __kmp_entry_gtid();
4014 kmp_int32 tid = __kmp_tid_from_gtid(gtid);
4015 kmp_info_t *this_thr = __kmp_threads[gtid];
4016 kmp_team_t *steam = this_thr->th.th_serial_team;
4017 kmp_team_t *team = this_thr->th.th_team;
4020 "__kmp_task_info: gtid=%d tid=%d t_thread=%p team=%p steam=%p curtask=%p "
4022 gtid, tid, this_thr, team, steam, this_thr->th.th_current_task,
4023 team->t.t_implicit_task_taskdata[tid].td_parent);
4030 static void __kmp_initialize_info(kmp_info_t *this_thr, kmp_team_t *team,
4031 int tid,
int gtid) {
4035 kmp_info_t *master = team->t.t_threads[0];
4036 KMP_DEBUG_ASSERT(this_thr != NULL);
4037 KMP_DEBUG_ASSERT(this_thr->th.th_serial_team);
4038 KMP_DEBUG_ASSERT(team);
4039 KMP_DEBUG_ASSERT(team->t.t_threads);
4040 KMP_DEBUG_ASSERT(team->t.t_dispatch);
4041 KMP_DEBUG_ASSERT(master);
4042 KMP_DEBUG_ASSERT(master->th.th_root);
4046 TCW_SYNC_PTR(this_thr->th.th_team, team);
4048 this_thr->th.th_info.ds.ds_tid = tid;
4049 this_thr->th.th_set_nproc = 0;
4050 if (__kmp_tasking_mode != tskm_immediate_exec)
4053 this_thr->th.th_reap_state = KMP_NOT_SAFE_TO_REAP;
4055 this_thr->th.th_reap_state = KMP_SAFE_TO_REAP;
4056 this_thr->th.th_set_proc_bind = proc_bind_default;
4057 #if KMP_AFFINITY_SUPPORTED
4058 this_thr->th.th_new_place = this_thr->th.th_current_place;
4060 this_thr->th.th_root = master->th.th_root;
4063 this_thr->th.th_team_nproc = team->t.t_nproc;
4064 this_thr->th.th_team_master = master;
4065 this_thr->th.th_team_serialized = team->t.t_serialized;
4066 TCW_PTR(this_thr->th.th_sleep_loc, NULL);
4068 KMP_DEBUG_ASSERT(team->t.t_implicit_task_taskdata);
4070 KF_TRACE(10, (
"__kmp_initialize_info1: T#%d:%d this_thread=%p curtask=%p\n",
4071 tid, gtid, this_thr, this_thr->th.th_current_task));
4073 __kmp_init_implicit_task(this_thr->th.th_team_master->th.th_ident, this_thr,
4076 KF_TRACE(10, (
"__kmp_initialize_info2: T#%d:%d this_thread=%p curtask=%p\n",
4077 tid, gtid, this_thr, this_thr->th.th_current_task));
4082 this_thr->th.th_dispatch = &team->t.t_dispatch[tid];
4084 this_thr->th.th_local.this_construct = 0;
4086 if (!this_thr->th.th_pri_common) {
4087 this_thr->th.th_pri_common =
4088 (
struct common_table *)__kmp_allocate(
sizeof(
struct common_table));
4089 if (__kmp_storage_map) {
4090 __kmp_print_storage_map_gtid(
4091 gtid, this_thr->th.th_pri_common, this_thr->th.th_pri_common + 1,
4092 sizeof(
struct common_table),
"th_%d.th_pri_common\n", gtid);
4094 this_thr->th.th_pri_head = NULL;
4097 if (this_thr != master &&
4098 this_thr->th.th_cg_roots != master->th.th_cg_roots) {
4100 KMP_DEBUG_ASSERT(master->th.th_cg_roots);
4101 kmp_cg_root_t *tmp = this_thr->th.th_cg_roots;
4104 int i = tmp->cg_nthreads--;
4105 KA_TRACE(100, (
"__kmp_initialize_info: Thread %p decrement cg_nthreads"
4106 " on node %p of thread %p to %d\n",
4107 this_thr, tmp, tmp->cg_root, tmp->cg_nthreads));
4112 this_thr->th.th_cg_roots = master->th.th_cg_roots;
4114 this_thr->th.th_cg_roots->cg_nthreads++;
4115 KA_TRACE(100, (
"__kmp_initialize_info: Thread %p increment cg_nthreads on"
4116 " node %p of thread %p to %d\n",
4117 this_thr, this_thr->th.th_cg_roots,
4118 this_thr->th.th_cg_roots->cg_root,
4119 this_thr->th.th_cg_roots->cg_nthreads));
4120 this_thr->th.th_current_task->td_icvs.thread_limit =
4121 this_thr->th.th_cg_roots->cg_thread_limit;
4126 volatile kmp_disp_t *dispatch = this_thr->th.th_dispatch;
4129 sizeof(dispatch_private_info_t) *
4130 (team->t.t_max_nproc == 1 ? 1 : __kmp_dispatch_num_buffers);
4131 KD_TRACE(10, (
"__kmp_initialize_info: T#%d max_nproc: %d\n", gtid,
4132 team->t.t_max_nproc));
4133 KMP_ASSERT(dispatch);
4134 KMP_DEBUG_ASSERT(team->t.t_dispatch);
4135 KMP_DEBUG_ASSERT(dispatch == &team->t.t_dispatch[tid]);
4137 dispatch->th_disp_index = 0;
4138 dispatch->th_doacross_buf_idx = 0;
4139 if (!dispatch->th_disp_buffer) {
4140 dispatch->th_disp_buffer =
4141 (dispatch_private_info_t *)__kmp_allocate(disp_size);
4143 if (__kmp_storage_map) {
4144 __kmp_print_storage_map_gtid(
4145 gtid, &dispatch->th_disp_buffer[0],
4146 &dispatch->th_disp_buffer[team->t.t_max_nproc == 1
4148 : __kmp_dispatch_num_buffers],
4149 disp_size,
"th_%d.th_dispatch.th_disp_buffer "
4150 "(team_%d.t_dispatch[%d].th_disp_buffer)",
4151 gtid, team->t.t_id, gtid);
4154 memset(&dispatch->th_disp_buffer[0],
'\0', disp_size);
4157 dispatch->th_dispatch_pr_current = 0;
4158 dispatch->th_dispatch_sh_current = 0;
4160 dispatch->th_deo_fcn = 0;
4161 dispatch->th_dxo_fcn = 0;
4164 this_thr->th.th_next_pool = NULL;
4166 if (!this_thr->th.th_task_state_memo_stack) {
4168 this_thr->th.th_task_state_memo_stack =
4169 (kmp_uint8 *)__kmp_allocate(4 *
sizeof(kmp_uint8));
4170 this_thr->th.th_task_state_top = 0;
4171 this_thr->th.th_task_state_stack_sz = 4;
4172 for (i = 0; i < this_thr->th.th_task_state_stack_sz;
4174 this_thr->th.th_task_state_memo_stack[i] = 0;
4177 KMP_DEBUG_ASSERT(!this_thr->th.th_spin_here);
4178 KMP_DEBUG_ASSERT(this_thr->th.th_next_waiting == 0);
4188 kmp_info_t *__kmp_allocate_thread(kmp_root_t *root, kmp_team_t *team,
4190 kmp_team_t *serial_team;
4191 kmp_info_t *new_thr;
4194 KA_TRACE(20, (
"__kmp_allocate_thread: T#%d\n", __kmp_get_gtid()));
4195 KMP_DEBUG_ASSERT(root && team);
4196 #if !KMP_NESTED_HOT_TEAMS
4197 KMP_DEBUG_ASSERT(KMP_MASTER_GTID(__kmp_get_gtid()));
4202 if (__kmp_thread_pool) {
4203 new_thr = CCAST(kmp_info_t *, __kmp_thread_pool);
4204 __kmp_thread_pool = (
volatile kmp_info_t *)new_thr->th.th_next_pool;
4205 if (new_thr == __kmp_thread_pool_insert_pt) {
4206 __kmp_thread_pool_insert_pt = NULL;
4208 TCW_4(new_thr->th.th_in_pool, FALSE);
4209 __kmp_suspend_initialize_thread(new_thr);
4210 __kmp_lock_suspend_mx(new_thr);
4211 if (new_thr->th.th_active_in_pool == TRUE) {
4212 KMP_DEBUG_ASSERT(new_thr->th.th_active == TRUE);
4213 KMP_ATOMIC_DEC(&__kmp_thread_pool_active_nth);
4214 new_thr->th.th_active_in_pool = FALSE;
4216 __kmp_unlock_suspend_mx(new_thr);
4218 KA_TRACE(20, (
"__kmp_allocate_thread: T#%d using thread T#%d\n",
4219 __kmp_get_gtid(), new_thr->th.th_info.ds.ds_gtid));
4220 KMP_ASSERT(!new_thr->th.th_team);
4221 KMP_DEBUG_ASSERT(__kmp_nth < __kmp_threads_capacity);
4224 __kmp_initialize_info(new_thr, team, new_tid,
4225 new_thr->th.th_info.ds.ds_gtid);
4226 KMP_DEBUG_ASSERT(new_thr->th.th_serial_team);
4228 TCW_4(__kmp_nth, __kmp_nth + 1);
4230 new_thr->th.th_task_state = 0;
4231 new_thr->th.th_task_state_top = 0;
4232 new_thr->th.th_task_state_stack_sz = 4;
4234 #ifdef KMP_ADJUST_BLOCKTIME
4237 if (!__kmp_env_blocktime && (__kmp_avail_proc > 0)) {
4238 if (__kmp_nth > __kmp_avail_proc) {
4239 __kmp_zero_bt = TRUE;
4248 kmp_balign_t *balign = new_thr->th.th_bar;
4249 for (b = 0; b < bs_last_barrier; ++b)
4250 KMP_DEBUG_ASSERT(balign[b].bb.wait_flag != KMP_BARRIER_PARENT_FLAG);
4253 KF_TRACE(10, (
"__kmp_allocate_thread: T#%d using thread %p T#%d\n",
4254 __kmp_get_gtid(), new_thr, new_thr->th.th_info.ds.ds_gtid));
4261 KMP_ASSERT(__kmp_nth == __kmp_all_nth);
4262 KMP_ASSERT(__kmp_all_nth < __kmp_threads_capacity);
4267 if (!TCR_4(__kmp_init_monitor)) {
4268 __kmp_acquire_bootstrap_lock(&__kmp_monitor_lock);
4269 if (!TCR_4(__kmp_init_monitor)) {
4270 KF_TRACE(10, (
"before __kmp_create_monitor\n"));
4271 TCW_4(__kmp_init_monitor, 1);
4272 __kmp_create_monitor(&__kmp_monitor);
4273 KF_TRACE(10, (
"after __kmp_create_monitor\n"));
4284 while (TCR_4(__kmp_init_monitor) < 2) {
4287 KF_TRACE(10, (
"after monitor thread has started\n"));
4290 __kmp_release_bootstrap_lock(&__kmp_monitor_lock);
4295 for (new_gtid = 1; TCR_PTR(__kmp_threads[new_gtid]) != NULL; ++new_gtid) {
4296 KMP_DEBUG_ASSERT(new_gtid < __kmp_threads_capacity);
4300 new_thr = (kmp_info_t *)__kmp_allocate(
sizeof(kmp_info_t));
4302 TCW_SYNC_PTR(__kmp_threads[new_gtid], new_thr);
4304 if (__kmp_storage_map) {
4305 __kmp_print_thread_storage_map(new_thr, new_gtid);
4310 kmp_internal_control_t r_icvs = __kmp_get_x_global_icvs(team);
4311 KF_TRACE(10, (
"__kmp_allocate_thread: before th_serial/serial_team\n"));
4312 new_thr->th.th_serial_team = serial_team =
4313 (kmp_team_t *)__kmp_allocate_team(root, 1, 1,
4317 proc_bind_default, &r_icvs,
4318 0 USE_NESTED_HOT_ARG(NULL));
4320 KMP_ASSERT(serial_team);
4321 serial_team->t.t_serialized = 0;
4323 serial_team->t.t_threads[0] = new_thr;
4325 (
"__kmp_allocate_thread: after th_serial/serial_team : new_thr=%p\n",
4329 __kmp_initialize_info(new_thr, team, new_tid, new_gtid);
4332 __kmp_initialize_fast_memory(new_thr);
4336 KMP_DEBUG_ASSERT(new_thr->th.th_local.bget_data == NULL);
4337 __kmp_initialize_bget(new_thr);
4340 __kmp_init_random(new_thr);
4344 (
"__kmp_allocate_thread: T#%d init go fork=%u, plain=%u\n",
4345 __kmp_get_gtid(), KMP_INIT_BARRIER_STATE, KMP_INIT_BARRIER_STATE));
4348 kmp_balign_t *balign = new_thr->th.th_bar;
4349 for (b = 0; b < bs_last_barrier; ++b) {
4350 balign[b].bb.b_go = KMP_INIT_BARRIER_STATE;
4351 balign[b].bb.team = NULL;
4352 balign[b].bb.wait_flag = KMP_BARRIER_NOT_WAITING;
4353 balign[b].bb.use_oncore_barrier = 0;
4356 new_thr->th.th_spin_here = FALSE;
4357 new_thr->th.th_next_waiting = 0;
4359 new_thr->th.th_blocking =
false;
4362 #if KMP_AFFINITY_SUPPORTED
4363 new_thr->th.th_current_place = KMP_PLACE_UNDEFINED;
4364 new_thr->th.th_new_place = KMP_PLACE_UNDEFINED;
4365 new_thr->th.th_first_place = KMP_PLACE_UNDEFINED;
4366 new_thr->th.th_last_place = KMP_PLACE_UNDEFINED;
4368 new_thr->th.th_def_allocator = __kmp_def_allocator;
4369 new_thr->th.th_prev_level = 0;
4370 new_thr->th.th_prev_num_threads = 1;
4372 TCW_4(new_thr->th.th_in_pool, FALSE);
4373 new_thr->th.th_active_in_pool = FALSE;
4374 TCW_4(new_thr->th.th_active, TRUE);
4382 if (__kmp_adjust_gtid_mode) {
4383 if (__kmp_all_nth >= __kmp_tls_gtid_min) {
4384 if (TCR_4(__kmp_gtid_mode) != 2) {
4385 TCW_4(__kmp_gtid_mode, 2);
4388 if (TCR_4(__kmp_gtid_mode) != 1) {
4389 TCW_4(__kmp_gtid_mode, 1);
4394 #ifdef KMP_ADJUST_BLOCKTIME
4397 if (!__kmp_env_blocktime && (__kmp_avail_proc > 0)) {
4398 if (__kmp_nth > __kmp_avail_proc) {
4399 __kmp_zero_bt = TRUE;
4406 10, (
"__kmp_allocate_thread: before __kmp_create_worker: %p\n", new_thr));
4407 __kmp_create_worker(new_gtid, new_thr, __kmp_stksize);
4409 (
"__kmp_allocate_thread: after __kmp_create_worker: %p\n", new_thr));
4411 KA_TRACE(20, (
"__kmp_allocate_thread: T#%d forked T#%d\n", __kmp_get_gtid(),
4422 static void __kmp_reinitialize_team(kmp_team_t *team,
4423 kmp_internal_control_t *new_icvs,
4425 KF_TRACE(10, (
"__kmp_reinitialize_team: enter this_thread=%p team=%p\n",
4426 team->t.t_threads[0], team));
4427 KMP_DEBUG_ASSERT(team && new_icvs);
4428 KMP_DEBUG_ASSERT((!TCR_4(__kmp_init_parallel)) || new_icvs->nproc);
4429 KMP_CHECK_UPDATE(team->t.t_ident, loc);
4431 KMP_CHECK_UPDATE(team->t.t_id, KMP_GEN_TEAM_ID());
4433 __kmp_init_implicit_task(loc, team->t.t_threads[0], team, 0, FALSE);
4434 copy_icvs(&team->t.t_implicit_task_taskdata[0].td_icvs, new_icvs);
4436 KF_TRACE(10, (
"__kmp_reinitialize_team: exit this_thread=%p team=%p\n",
4437 team->t.t_threads[0], team));
4443 static void __kmp_initialize_team(kmp_team_t *team,
int new_nproc,
4444 kmp_internal_control_t *new_icvs,
4446 KF_TRACE(10, (
"__kmp_initialize_team: enter: team=%p\n", team));
4449 KMP_DEBUG_ASSERT(team);
4450 KMP_DEBUG_ASSERT(new_nproc <= team->t.t_max_nproc);
4451 KMP_DEBUG_ASSERT(team->t.t_threads);
4454 team->t.t_master_tid = 0;
4456 team->t.t_serialized = new_nproc > 1 ? 0 : 1;
4457 team->t.t_nproc = new_nproc;
4460 team->t.t_next_pool = NULL;
4464 TCW_SYNC_PTR(team->t.t_pkfn, NULL);
4465 team->t.t_invoke = NULL;
4468 team->t.t_sched.sched = new_icvs->sched.sched;
4470 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
4471 team->t.t_fp_control_saved = FALSE;
4472 team->t.t_x87_fpu_control_word = 0;
4473 team->t.t_mxcsr = 0;
4476 team->t.t_construct = 0;
4478 team->t.t_ordered.dt.t_value = 0;
4479 team->t.t_master_active = FALSE;
4482 team->t.t_copypriv_data = NULL;
4485 team->t.t_copyin_counter = 0;
4488 team->t.t_control_stack_top = NULL;
4490 __kmp_reinitialize_team(team, new_icvs, loc);
4493 KF_TRACE(10, (
"__kmp_initialize_team: exit: team=%p\n", team));
4496 #if (KMP_OS_LINUX || KMP_OS_FREEBSD) && KMP_AFFINITY_SUPPORTED
4499 __kmp_set_thread_affinity_mask_full_tmp(kmp_affin_mask_t *old_mask) {
4500 if (KMP_AFFINITY_CAPABLE()) {
4502 if (old_mask != NULL) {
4503 status = __kmp_get_system_affinity(old_mask, TRUE);
4506 __kmp_fatal(KMP_MSG(ChangeThreadAffMaskError), KMP_ERR(error),
4510 __kmp_set_system_affinity(__kmp_affin_fullMask, TRUE);
4515 #if KMP_AFFINITY_SUPPORTED
4521 static void __kmp_partition_places(kmp_team_t *team,
int update_master_only) {
4523 kmp_info_t *master_th = team->t.t_threads[0];
4524 KMP_DEBUG_ASSERT(master_th != NULL);
4525 kmp_proc_bind_t proc_bind = team->t.t_proc_bind;
4526 int first_place = master_th->th.th_first_place;
4527 int last_place = master_th->th.th_last_place;
4528 int masters_place = master_th->th.th_current_place;
4529 team->t.t_first_place = first_place;
4530 team->t.t_last_place = last_place;
4532 KA_TRACE(20, (
"__kmp_partition_places: enter: proc_bind = %d T#%d(%d:0) "
4533 "bound to place %d partition = [%d,%d]\n",
4534 proc_bind, __kmp_gtid_from_thread(team->t.t_threads[0]),
4535 team->t.t_id, masters_place, first_place, last_place));
4537 switch (proc_bind) {
4539 case proc_bind_default:
4542 KMP_DEBUG_ASSERT(team->t.t_nproc == 1);
4545 case proc_bind_master: {
4547 int n_th = team->t.t_nproc;
4548 for (f = 1; f < n_th; f++) {
4549 kmp_info_t *th = team->t.t_threads[f];
4550 KMP_DEBUG_ASSERT(th != NULL);
4551 th->th.th_first_place = first_place;
4552 th->th.th_last_place = last_place;
4553 th->th.th_new_place = masters_place;
4554 if (__kmp_display_affinity && masters_place != th->th.th_current_place &&
4555 team->t.t_display_affinity != 1) {
4556 team->t.t_display_affinity = 1;
4559 KA_TRACE(100, (
"__kmp_partition_places: master: T#%d(%d:%d) place %d "
4560 "partition = [%d,%d]\n",
4561 __kmp_gtid_from_thread(team->t.t_threads[f]), team->t.t_id,
4562 f, masters_place, first_place, last_place));
4566 case proc_bind_close: {
4568 int n_th = team->t.t_nproc;
4570 if (first_place <= last_place) {
4571 n_places = last_place - first_place + 1;
4573 n_places = __kmp_affinity_num_masks - first_place + last_place + 1;
4575 if (n_th <= n_places) {
4576 int place = masters_place;
4577 for (f = 1; f < n_th; f++) {
4578 kmp_info_t *th = team->t.t_threads[f];
4579 KMP_DEBUG_ASSERT(th != NULL);
4581 if (place == last_place) {
4582 place = first_place;
4583 }
else if (place == (
int)(__kmp_affinity_num_masks - 1)) {
4588 th->th.th_first_place = first_place;
4589 th->th.th_last_place = last_place;
4590 th->th.th_new_place = place;
4591 if (__kmp_display_affinity && place != th->th.th_current_place &&
4592 team->t.t_display_affinity != 1) {
4593 team->t.t_display_affinity = 1;
4596 KA_TRACE(100, (
"__kmp_partition_places: close: T#%d(%d:%d) place %d "
4597 "partition = [%d,%d]\n",
4598 __kmp_gtid_from_thread(team->t.t_threads[f]),
4599 team->t.t_id, f, place, first_place, last_place));
4602 int S, rem, gap, s_count;
4603 S = n_th / n_places;
4605 rem = n_th - (S * n_places);
4606 gap = rem > 0 ? n_places / rem : n_places;
4607 int place = masters_place;
4609 for (f = 0; f < n_th; f++) {
4610 kmp_info_t *th = team->t.t_threads[f];
4611 KMP_DEBUG_ASSERT(th != NULL);
4613 th->th.th_first_place = first_place;
4614 th->th.th_last_place = last_place;
4615 th->th.th_new_place = place;
4616 if (__kmp_display_affinity && place != th->th.th_current_place &&
4617 team->t.t_display_affinity != 1) {
4618 team->t.t_display_affinity = 1;
4622 if ((s_count == S) && rem && (gap_ct == gap)) {
4624 }
else if ((s_count == S + 1) && rem && (gap_ct == gap)) {
4626 if (place == last_place) {
4627 place = first_place;
4628 }
else if (place == (
int)(__kmp_affinity_num_masks - 1)) {
4636 }
else if (s_count == S) {
4637 if (place == last_place) {
4638 place = first_place;
4639 }
else if (place == (
int)(__kmp_affinity_num_masks - 1)) {
4649 (
"__kmp_partition_places: close: T#%d(%d:%d) place %d "
4650 "partition = [%d,%d]\n",
4651 __kmp_gtid_from_thread(team->t.t_threads[f]), team->t.t_id, f,
4652 th->th.th_new_place, first_place, last_place));
4654 KMP_DEBUG_ASSERT(place == masters_place);
4658 case proc_bind_spread: {
4660 int n_th = team->t.t_nproc;
4663 if (first_place <= last_place) {
4664 n_places = last_place - first_place + 1;
4666 n_places = __kmp_affinity_num_masks - first_place + last_place + 1;
4668 if (n_th <= n_places) {
4671 if (n_places !=
static_cast<int>(__kmp_affinity_num_masks)) {
4672 int S = n_places / n_th;
4673 int s_count, rem, gap, gap_ct;
4675 place = masters_place;
4676 rem = n_places - n_th * S;
4677 gap = rem ? n_th / rem : 1;
4680 if (update_master_only == 1)
4682 for (f = 0; f < thidx; f++) {
4683 kmp_info_t *th = team->t.t_threads[f];
4684 KMP_DEBUG_ASSERT(th != NULL);
4686 th->th.th_first_place = place;
4687 th->th.th_new_place = place;
4688 if (__kmp_display_affinity && place != th->th.th_current_place &&
4689 team->t.t_display_affinity != 1) {
4690 team->t.t_display_affinity = 1;
4693 while (s_count < S) {
4694 if (place == last_place) {
4695 place = first_place;
4696 }
else if (place == (
int)(__kmp_affinity_num_masks - 1)) {
4703 if (rem && (gap_ct == gap)) {
4704 if (place == last_place) {
4705 place = first_place;
4706 }
else if (place == (
int)(__kmp_affinity_num_masks - 1)) {
4714 th->th.th_last_place = place;
4717 if (place == last_place) {
4718 place = first_place;
4719 }
else if (place == (
int)(__kmp_affinity_num_masks - 1)) {
4726 (
"__kmp_partition_places: spread: T#%d(%d:%d) place %d "
4727 "partition = [%d,%d], __kmp_affinity_num_masks: %u\n",
4728 __kmp_gtid_from_thread(team->t.t_threads[f]), team->t.t_id,
4729 f, th->th.th_new_place, th->th.th_first_place,
4730 th->th.th_last_place, __kmp_affinity_num_masks));
4736 double current =
static_cast<double>(masters_place);
4738 (
static_cast<double>(n_places + 1) /
static_cast<double>(n_th));
4743 if (update_master_only == 1)
4745 for (f = 0; f < thidx; f++) {
4746 first =
static_cast<int>(current);
4747 last =
static_cast<int>(current + spacing) - 1;
4748 KMP_DEBUG_ASSERT(last >= first);
4749 if (first >= n_places) {
4750 if (masters_place) {
4753 if (first == (masters_place + 1)) {
4754 KMP_DEBUG_ASSERT(f == n_th);
4757 if (last == masters_place) {
4758 KMP_DEBUG_ASSERT(f == (n_th - 1));
4762 KMP_DEBUG_ASSERT(f == n_th);
4767 if (last >= n_places) {
4768 last = (n_places - 1);
4773 KMP_DEBUG_ASSERT(0 <= first);
4774 KMP_DEBUG_ASSERT(n_places > first);
4775 KMP_DEBUG_ASSERT(0 <= last);
4776 KMP_DEBUG_ASSERT(n_places > last);
4777 KMP_DEBUG_ASSERT(last_place >= first_place);
4778 th = team->t.t_threads[f];
4779 KMP_DEBUG_ASSERT(th);
4780 th->th.th_first_place = first;
4781 th->th.th_new_place = place;
4782 th->th.th_last_place = last;
4783 if (__kmp_display_affinity && place != th->th.th_current_place &&
4784 team->t.t_display_affinity != 1) {
4785 team->t.t_display_affinity = 1;
4788 (
"__kmp_partition_places: spread: T#%d(%d:%d) place %d "
4789 "partition = [%d,%d], spacing = %.4f\n",
4790 __kmp_gtid_from_thread(team->t.t_threads[f]),
4791 team->t.t_id, f, th->th.th_new_place,
4792 th->th.th_first_place, th->th.th_last_place, spacing));
4796 KMP_DEBUG_ASSERT(update_master_only || place == masters_place);
4798 int S, rem, gap, s_count;
4799 S = n_th / n_places;
4801 rem = n_th - (S * n_places);
4802 gap = rem > 0 ? n_places / rem : n_places;
4803 int place = masters_place;
4806 if (update_master_only == 1)
4808 for (f = 0; f < thidx; f++) {
4809 kmp_info_t *th = team->t.t_threads[f];
4810 KMP_DEBUG_ASSERT(th != NULL);
4812 th->th.th_first_place = place;
4813 th->th.th_last_place = place;
4814 th->th.th_new_place = place;
4815 if (__kmp_display_affinity && place != th->th.th_current_place &&
4816 team->t.t_display_affinity != 1) {
4817 team->t.t_display_affinity = 1;
4821 if ((s_count == S) && rem && (gap_ct == gap)) {
4823 }
else if ((s_count == S + 1) && rem && (gap_ct == gap)) {
4825 if (place == last_place) {
4826 place = first_place;
4827 }
else if (place == (
int)(__kmp_affinity_num_masks - 1)) {
4835 }
else if (s_count == S) {
4836 if (place == last_place) {
4837 place = first_place;
4838 }
else if (place == (
int)(__kmp_affinity_num_masks - 1)) {
4847 KA_TRACE(100, (
"__kmp_partition_places: spread: T#%d(%d:%d) place %d "
4848 "partition = [%d,%d]\n",
4849 __kmp_gtid_from_thread(team->t.t_threads[f]),
4850 team->t.t_id, f, th->th.th_new_place,
4851 th->th.th_first_place, th->th.th_last_place));
4853 KMP_DEBUG_ASSERT(update_master_only || place == masters_place);
4861 KA_TRACE(20, (
"__kmp_partition_places: exit T#%d\n", team->t.t_id));
4864 #endif // KMP_AFFINITY_SUPPORTED
4869 __kmp_allocate_team(kmp_root_t *root,
int new_nproc,
int max_nproc,
4871 ompt_data_t ompt_parallel_data,
4873 kmp_proc_bind_t new_proc_bind,
4874 kmp_internal_control_t *new_icvs,
4875 int argc USE_NESTED_HOT_ARG(kmp_info_t *master)) {
4876 KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(KMP_allocate_team);
4879 int use_hot_team = !root->r.r_active;
4882 KA_TRACE(20, (
"__kmp_allocate_team: called\n"));
4883 KMP_DEBUG_ASSERT(new_nproc >= 1 && argc >= 0);
4884 KMP_DEBUG_ASSERT(max_nproc >= new_nproc);
4887 #if KMP_NESTED_HOT_TEAMS
4888 kmp_hot_team_ptr_t *hot_teams;
4890 team = master->th.th_team;
4891 level = team->t.t_active_level;
4892 if (master->th.th_teams_microtask) {
4893 if (master->th.th_teams_size.nteams > 1 &&
4896 (microtask_t)__kmp_teams_master ||
4897 master->th.th_teams_level <
4903 hot_teams = master->th.th_hot_teams;
4904 if (level < __kmp_hot_teams_max_level && hot_teams &&
4914 if (use_hot_team && new_nproc > 1) {
4915 KMP_DEBUG_ASSERT(new_nproc <= max_nproc);
4916 #if KMP_NESTED_HOT_TEAMS
4917 team = hot_teams[level].hot_team;
4919 team = root->r.r_hot_team;
4922 if (__kmp_tasking_mode != tskm_immediate_exec) {
4923 KA_TRACE(20, (
"__kmp_allocate_team: hot team task_team[0] = %p "
4924 "task_team[1] = %p before reinit\n",
4925 team->t.t_task_team[0], team->t.t_task_team[1]));
4932 if (team->t.t_nproc == new_nproc) {
4933 KA_TRACE(20, (
"__kmp_allocate_team: reusing hot team\n"));
4936 if (team->t.t_size_changed == -1) {
4937 team->t.t_size_changed = 1;
4939 KMP_CHECK_UPDATE(team->t.t_size_changed, 0);
4943 kmp_r_sched_t new_sched = new_icvs->sched;
4945 KMP_CHECK_UPDATE(team->t.t_sched.sched, new_sched.sched);
4947 __kmp_reinitialize_team(team, new_icvs,
4948 root->r.r_uber_thread->th.th_ident);
4950 KF_TRACE(10, (
"__kmp_allocate_team2: T#%d, this_thread=%p team=%p\n", 0,
4951 team->t.t_threads[0], team));
4952 __kmp_push_current_task_to_thread(team->t.t_threads[0], team, 0);
4954 #if KMP_AFFINITY_SUPPORTED
4955 if ((team->t.t_size_changed == 0) &&
4956 (team->t.t_proc_bind == new_proc_bind)) {
4957 if (new_proc_bind == proc_bind_spread) {
4958 __kmp_partition_places(
4961 KA_TRACE(200, (
"__kmp_allocate_team: reusing hot team #%d bindings: "
4962 "proc_bind = %d, partition = [%d,%d]\n",
4963 team->t.t_id, new_proc_bind, team->t.t_first_place,
4964 team->t.t_last_place));
4966 KMP_CHECK_UPDATE(team->t.t_proc_bind, new_proc_bind);
4967 __kmp_partition_places(team);
4970 KMP_CHECK_UPDATE(team->t.t_proc_bind, new_proc_bind);
4972 }
else if (team->t.t_nproc > new_nproc) {
4974 (
"__kmp_allocate_team: decreasing hot team thread count to %d\n",
4977 team->t.t_size_changed = 1;
4978 #if KMP_NESTED_HOT_TEAMS
4979 if (__kmp_hot_teams_mode == 0) {
4982 KMP_DEBUG_ASSERT(hot_teams[level].hot_team_nth == team->t.t_nproc);
4983 hot_teams[level].hot_team_nth = new_nproc;
4984 #endif // KMP_NESTED_HOT_TEAMS
4986 for (f = new_nproc; f < team->t.t_nproc; f++) {
4987 KMP_DEBUG_ASSERT(team->t.t_threads[f]);
4988 if (__kmp_tasking_mode != tskm_immediate_exec) {
4991 team->t.t_threads[f]->th.th_task_team = NULL;
4993 __kmp_free_thread(team->t.t_threads[f]);
4994 team->t.t_threads[f] = NULL;
4996 #if KMP_NESTED_HOT_TEAMS
5001 for (f = new_nproc; f < team->t.t_nproc; ++f) {
5002 KMP_DEBUG_ASSERT(team->t.t_threads[f]);
5003 kmp_balign_t *balign = team->t.t_threads[f]->th.th_bar;
5004 for (
int b = 0; b < bs_last_barrier; ++b) {
5005 if (balign[b].bb.wait_flag == KMP_BARRIER_PARENT_FLAG) {
5006 balign[b].bb.wait_flag = KMP_BARRIER_SWITCH_TO_OWN_FLAG;
5008 KMP_CHECK_UPDATE(balign[b].bb.leaf_kids, 0);
5012 #endif // KMP_NESTED_HOT_TEAMS
5013 team->t.t_nproc = new_nproc;
5015 KMP_CHECK_UPDATE(team->t.t_sched.sched, new_icvs->sched.sched);
5016 __kmp_reinitialize_team(team, new_icvs,
5017 root->r.r_uber_thread->th.th_ident);
5020 for (f = 0; f < new_nproc; ++f) {
5021 team->t.t_threads[f]->th.th_team_nproc = new_nproc;
5026 KF_TRACE(10, (
"__kmp_allocate_team: T#%d, this_thread=%p team=%p\n", 0,
5027 team->t.t_threads[0], team));
5029 __kmp_push_current_task_to_thread(team->t.t_threads[0], team, 0);
5032 for (f = 0; f < team->t.t_nproc; f++) {
5033 KMP_DEBUG_ASSERT(team->t.t_threads[f] &&
5034 team->t.t_threads[f]->th.th_team_nproc ==
5039 KMP_CHECK_UPDATE(team->t.t_proc_bind, new_proc_bind);
5040 #if KMP_AFFINITY_SUPPORTED
5041 __kmp_partition_places(team);
5044 #if (KMP_OS_LINUX || KMP_OS_FREEBSD) && KMP_AFFINITY_SUPPORTED
5045 kmp_affin_mask_t *old_mask;
5046 if (KMP_AFFINITY_CAPABLE()) {
5047 KMP_CPU_ALLOC(old_mask);
5052 (
"__kmp_allocate_team: increasing hot team thread count to %d\n",
5055 team->t.t_size_changed = 1;
5057 #if KMP_NESTED_HOT_TEAMS
5058 int avail_threads = hot_teams[level].hot_team_nth;
5059 if (new_nproc < avail_threads)
5060 avail_threads = new_nproc;
5061 kmp_info_t **other_threads = team->t.t_threads;
5062 for (f = team->t.t_nproc; f < avail_threads; ++f) {
5066 kmp_balign_t *balign = other_threads[f]->th.th_bar;
5067 for (b = 0; b < bs_last_barrier; ++b) {
5068 balign[b].bb.b_arrived = team->t.t_bar[b].b_arrived;
5069 KMP_DEBUG_ASSERT(balign[b].bb.wait_flag != KMP_BARRIER_PARENT_FLAG);
5071 balign[b].bb.b_worker_arrived = team->t.t_bar[b].b_team_arrived;
5075 if (hot_teams[level].hot_team_nth >= new_nproc) {
5078 KMP_DEBUG_ASSERT(__kmp_hot_teams_mode == 1);
5079 team->t.t_nproc = new_nproc;
5085 hot_teams[level].hot_team_nth = new_nproc;
5086 #endif // KMP_NESTED_HOT_TEAMS
5087 if (team->t.t_max_nproc < new_nproc) {
5089 __kmp_reallocate_team_arrays(team, new_nproc);
5090 __kmp_reinitialize_team(team, new_icvs, NULL);
5093 #if (KMP_OS_LINUX || KMP_OS_FREEBSD) && KMP_AFFINITY_SUPPORTED
5098 __kmp_set_thread_affinity_mask_full_tmp(old_mask);
5102 for (f = team->t.t_nproc; f < new_nproc; f++) {
5103 kmp_info_t *new_worker = __kmp_allocate_thread(root, team, f);
5104 KMP_DEBUG_ASSERT(new_worker);
5105 team->t.t_threads[f] = new_worker;
5108 (
"__kmp_allocate_team: team %d init T#%d arrived: "
5109 "join=%llu, plain=%llu\n",
5110 team->t.t_id, __kmp_gtid_from_tid(f, team), team->t.t_id, f,
5111 team->t.t_bar[bs_forkjoin_barrier].b_arrived,
5112 team->t.t_bar[bs_plain_barrier].b_arrived));
5116 kmp_balign_t *balign = new_worker->th.th_bar;
5117 for (b = 0; b < bs_last_barrier; ++b) {
5118 balign[b].bb.b_arrived = team->t.t_bar[b].b_arrived;
5119 KMP_DEBUG_ASSERT(balign[b].bb.wait_flag !=
5120 KMP_BARRIER_PARENT_FLAG);
5122 balign[b].bb.b_worker_arrived = team->t.t_bar[b].b_team_arrived;
5128 #if (KMP_OS_LINUX || KMP_OS_FREEBSD) && KMP_AFFINITY_SUPPORTED
5129 if (KMP_AFFINITY_CAPABLE()) {
5131 __kmp_set_system_affinity(old_mask, TRUE);
5132 KMP_CPU_FREE(old_mask);
5135 #if KMP_NESTED_HOT_TEAMS
5137 #endif // KMP_NESTED_HOT_TEAMS
5139 int old_nproc = team->t.t_nproc;
5141 __kmp_initialize_team(team, new_nproc, new_icvs,
5142 root->r.r_uber_thread->th.th_ident);
5145 KMP_DEBUG_ASSERT(team->t.t_nproc == new_nproc);
5146 for (f = 0; f < team->t.t_nproc; ++f)
5147 __kmp_initialize_info(team->t.t_threads[f], team, f,
5148 __kmp_gtid_from_tid(f, team));
5156 for (f = old_nproc; f < team->t.t_nproc; ++f)
5157 team->t.t_threads[f]->th.th_task_state =
5158 team->t.t_threads[0]->th.th_task_state_memo_stack[level];
5161 team->t.t_threads[0]->th.th_task_state;
5162 for (f = old_nproc; f < team->t.t_nproc; ++f)
5163 team->t.t_threads[f]->th.th_task_state = old_state;
5167 for (f = 0; f < team->t.t_nproc; ++f) {
5168 KMP_DEBUG_ASSERT(team->t.t_threads[f] &&
5169 team->t.t_threads[f]->th.th_team_nproc ==
5174 KMP_CHECK_UPDATE(team->t.t_proc_bind, new_proc_bind);
5175 #if KMP_AFFINITY_SUPPORTED
5176 __kmp_partition_places(team);
5180 kmp_info_t *master = team->t.t_threads[0];
5181 if (master->th.th_teams_microtask) {
5182 for (f = 1; f < new_nproc; ++f) {
5184 kmp_info_t *thr = team->t.t_threads[f];
5185 thr->th.th_teams_microtask = master->th.th_teams_microtask;
5186 thr->th.th_teams_level = master->th.th_teams_level;
5187 thr->th.th_teams_size = master->th.th_teams_size;
5190 #if KMP_NESTED_HOT_TEAMS
5194 for (f = 1; f < new_nproc; ++f) {
5195 kmp_info_t *thr = team->t.t_threads[f];
5197 kmp_balign_t *balign = thr->th.th_bar;
5198 for (b = 0; b < bs_last_barrier; ++b) {
5199 balign[b].bb.b_arrived = team->t.t_bar[b].b_arrived;
5200 KMP_DEBUG_ASSERT(balign[b].bb.wait_flag != KMP_BARRIER_PARENT_FLAG);
5202 balign[b].bb.b_worker_arrived = team->t.t_bar[b].b_team_arrived;
5207 #endif // KMP_NESTED_HOT_TEAMS
5210 __kmp_alloc_argv_entries(argc, team, TRUE);
5211 KMP_CHECK_UPDATE(team->t.t_argc, argc);
5215 KF_TRACE(10, (
" hot_team = %p\n", team));
5218 if (__kmp_tasking_mode != tskm_immediate_exec) {
5219 KA_TRACE(20, (
"__kmp_allocate_team: hot team task_team[0] = %p "
5220 "task_team[1] = %p after reinit\n",
5221 team->t.t_task_team[0], team->t.t_task_team[1]));
5226 __ompt_team_assign_id(team, ompt_parallel_data);
5236 for (team = CCAST(kmp_team_t *, __kmp_team_pool); (team);) {
5239 if (team->t.t_max_nproc >= max_nproc) {
5241 __kmp_team_pool = team->t.t_next_pool;
5244 __kmp_initialize_team(team, new_nproc, new_icvs, NULL);
5246 KA_TRACE(20, (
"__kmp_allocate_team: setting task_team[0] %p and "
5247 "task_team[1] %p to NULL\n",
5248 &team->t.t_task_team[0], &team->t.t_task_team[1]));
5249 team->t.t_task_team[0] = NULL;
5250 team->t.t_task_team[1] = NULL;
5253 __kmp_alloc_argv_entries(argc, team, TRUE);
5254 KMP_CHECK_UPDATE(team->t.t_argc, argc);
5257 20, (
"__kmp_allocate_team: team %d init arrived: join=%u, plain=%u\n",
5258 team->t.t_id, KMP_INIT_BARRIER_STATE, KMP_INIT_BARRIER_STATE));
5261 for (b = 0; b < bs_last_barrier; ++b) {
5262 team->t.t_bar[b].b_arrived = KMP_INIT_BARRIER_STATE;
5264 team->t.t_bar[b].b_master_arrived = 0;
5265 team->t.t_bar[b].b_team_arrived = 0;
5270 team->t.t_proc_bind = new_proc_bind;
5272 KA_TRACE(20, (
"__kmp_allocate_team: using team from pool %d.\n",
5276 __ompt_team_assign_id(team, ompt_parallel_data);
5288 team = __kmp_reap_team(team);
5289 __kmp_team_pool = team;
5294 team = (kmp_team_t *)__kmp_allocate(
sizeof(kmp_team_t));
5297 team->t.t_max_nproc = max_nproc;
5300 __kmp_allocate_team_arrays(team, max_nproc);
5302 KA_TRACE(20, (
"__kmp_allocate_team: making a new team\n"));
5303 __kmp_initialize_team(team, new_nproc, new_icvs, NULL);
5305 KA_TRACE(20, (
"__kmp_allocate_team: setting task_team[0] %p and task_team[1] "
5307 &team->t.t_task_team[0], &team->t.t_task_team[1]));
5308 team->t.t_task_team[0] = NULL;
5310 team->t.t_task_team[1] = NULL;
5313 if (__kmp_storage_map) {
5314 __kmp_print_team_storage_map(
"team", team, team->t.t_id, new_nproc);
5318 __kmp_alloc_argv_entries(argc, team, FALSE);
5319 team->t.t_argc = argc;
5322 (
"__kmp_allocate_team: team %d init arrived: join=%u, plain=%u\n",
5323 team->t.t_id, KMP_INIT_BARRIER_STATE, KMP_INIT_BARRIER_STATE));
5326 for (b = 0; b < bs_last_barrier; ++b) {
5327 team->t.t_bar[b].b_arrived = KMP_INIT_BARRIER_STATE;
5329 team->t.t_bar[b].b_master_arrived = 0;
5330 team->t.t_bar[b].b_team_arrived = 0;
5335 team->t.t_proc_bind = new_proc_bind;
5338 __ompt_team_assign_id(team, ompt_parallel_data);
5339 team->t.ompt_serialized_team_info = NULL;
5344 KA_TRACE(20, (
"__kmp_allocate_team: done creating a new team %d.\n",
5355 void __kmp_free_team(kmp_root_t *root,
5356 kmp_team_t *team USE_NESTED_HOT_ARG(kmp_info_t *master)) {
5358 KA_TRACE(20, (
"__kmp_free_team: T#%d freeing team %d\n", __kmp_get_gtid(),
5362 KMP_DEBUG_ASSERT(root);
5363 KMP_DEBUG_ASSERT(team);
5364 KMP_DEBUG_ASSERT(team->t.t_nproc <= team->t.t_max_nproc);
5365 KMP_DEBUG_ASSERT(team->t.t_threads);
5367 int use_hot_team = team == root->r.r_hot_team;
5368 #if KMP_NESTED_HOT_TEAMS
5370 kmp_hot_team_ptr_t *hot_teams;
5372 level = team->t.t_active_level - 1;
5373 if (master->th.th_teams_microtask) {
5374 if (master->th.th_teams_size.nteams > 1) {
5378 if (team->t.t_pkfn != (microtask_t)__kmp_teams_master &&
5379 master->th.th_teams_level == team->t.t_level) {
5384 hot_teams = master->th.th_hot_teams;
5385 if (level < __kmp_hot_teams_max_level) {
5386 KMP_DEBUG_ASSERT(team == hot_teams[level].hot_team);
5390 #endif // KMP_NESTED_HOT_TEAMS
5393 TCW_SYNC_PTR(team->t.t_pkfn,
5396 team->t.t_copyin_counter = 0;
5401 if (!use_hot_team) {
5402 if (__kmp_tasking_mode != tskm_immediate_exec) {
5404 for (f = 1; f < team->t.t_nproc; ++f) {
5405 KMP_DEBUG_ASSERT(team->t.t_threads[f]);
5406 kmp_info_t *th = team->t.t_threads[f];
5407 volatile kmp_uint32 *state = &th->th.th_reap_state;
5408 while (*state != KMP_SAFE_TO_REAP) {
5412 if (!__kmp_is_thread_alive(th, &ecode)) {
5413 *state = KMP_SAFE_TO_REAP;
5418 kmp_flag_64 fl(&th->th.th_bar[bs_forkjoin_barrier].bb.b_go, th);
5419 if (fl.is_sleeping())
5420 fl.resume(__kmp_gtid_from_thread(th));
5427 for (tt_idx = 0; tt_idx < 2; ++tt_idx) {
5428 kmp_task_team_t *task_team = team->t.t_task_team[tt_idx];
5429 if (task_team != NULL) {
5430 for (f = 0; f < team->t.t_nproc; ++f) {
5431 KMP_DEBUG_ASSERT(team->t.t_threads[f]);
5432 team->t.t_threads[f]->th.th_task_team = NULL;
5436 (
"__kmp_free_team: T#%d deactivating task_team %p on team %d\n",
5437 __kmp_get_gtid(), task_team, team->t.t_id));
5438 #if KMP_NESTED_HOT_TEAMS
5439 __kmp_free_task_team(master, task_team);
5441 team->t.t_task_team[tt_idx] = NULL;
5447 team->t.t_parent = NULL;
5448 team->t.t_level = 0;
5449 team->t.t_active_level = 0;
5452 for (f = 1; f < team->t.t_nproc; ++f) {
5453 KMP_DEBUG_ASSERT(team->t.t_threads[f]);
5454 __kmp_free_thread(team->t.t_threads[f]);
5455 team->t.t_threads[f] = NULL;
5460 team->t.t_next_pool = CCAST(kmp_team_t *, __kmp_team_pool);
5461 __kmp_team_pool = (
volatile kmp_team_t *)team;
5464 KMP_DEBUG_ASSERT(team->t.t_threads[1] &&
5465 team->t.t_threads[1]->th.th_cg_roots);
5466 if (team->t.t_threads[1]->th.th_cg_roots->cg_root == team->t.t_threads[1]) {
5468 for (f = 1; f < team->t.t_nproc; ++f) {
5469 kmp_info_t *thr = team->t.t_threads[f];
5470 KMP_DEBUG_ASSERT(thr && thr->th.th_cg_roots &&
5471 thr->th.th_cg_roots->cg_root == thr);
5473 kmp_cg_root_t *tmp = thr->th.th_cg_roots;
5474 thr->th.th_cg_roots = tmp->up;
5475 KA_TRACE(100, (
"__kmp_free_team: Thread %p popping node %p and moving"
5476 " up to node %p. cg_nthreads was %d\n",
5477 thr, tmp, thr->th.th_cg_roots, tmp->cg_nthreads));
5478 int i = tmp->cg_nthreads--;
5483 if (thr->th.th_cg_roots)
5484 thr->th.th_current_task->td_icvs.thread_limit =
5485 thr->th.th_cg_roots->cg_thread_limit;
5494 kmp_team_t *__kmp_reap_team(kmp_team_t *team) {
5495 kmp_team_t *next_pool = team->t.t_next_pool;
5497 KMP_DEBUG_ASSERT(team);
5498 KMP_DEBUG_ASSERT(team->t.t_dispatch);
5499 KMP_DEBUG_ASSERT(team->t.t_disp_buffer);
5500 KMP_DEBUG_ASSERT(team->t.t_threads);
5501 KMP_DEBUG_ASSERT(team->t.t_argv);
5506 __kmp_free_team_arrays(team);
5507 if (team->t.t_argv != &team->t.t_inline_argv[0])
5508 __kmp_free((
void *)team->t.t_argv);
5540 void __kmp_free_thread(kmp_info_t *this_th) {
5544 KA_TRACE(20, (
"__kmp_free_thread: T#%d putting T#%d back on free pool.\n",
5545 __kmp_get_gtid(), this_th->th.th_info.ds.ds_gtid));
5547 KMP_DEBUG_ASSERT(this_th);
5552 kmp_balign_t *balign = this_th->th.th_bar;
5553 for (b = 0; b < bs_last_barrier; ++b) {
5554 if (balign[b].bb.wait_flag == KMP_BARRIER_PARENT_FLAG)
5555 balign[b].bb.wait_flag = KMP_BARRIER_SWITCH_TO_OWN_FLAG;
5556 balign[b].bb.team = NULL;
5557 balign[b].bb.leaf_kids = 0;
5559 this_th->th.th_task_state = 0;
5560 this_th->th.th_reap_state = KMP_SAFE_TO_REAP;
5563 TCW_PTR(this_th->th.th_team, NULL);
5564 TCW_PTR(this_th->th.th_root, NULL);
5565 TCW_PTR(this_th->th.th_dispatch, NULL);
5567 while (this_th->th.th_cg_roots) {
5568 this_th->th.th_cg_roots->cg_nthreads--;
5569 KA_TRACE(100, (
"__kmp_free_thread: Thread %p decrement cg_nthreads on node"
5570 " %p of thread %p to %d\n",
5571 this_th, this_th->th.th_cg_roots,
5572 this_th->th.th_cg_roots->cg_root,
5573 this_th->th.th_cg_roots->cg_nthreads));
5574 kmp_cg_root_t *tmp = this_th->th.th_cg_roots;
5575 if (tmp->cg_root == this_th) {
5576 KMP_DEBUG_ASSERT(tmp->cg_nthreads == 0);
5578 5, (
"__kmp_free_thread: Thread %p freeing node %p\n", this_th, tmp));
5579 this_th->th.th_cg_roots = tmp->up;
5582 if (tmp->cg_nthreads == 0) {
5585 this_th->th.th_cg_roots = NULL;
5595 __kmp_free_implicit_task(this_th);
5596 this_th->th.th_current_task = NULL;
5600 gtid = this_th->th.th_info.ds.ds_gtid;
5601 if (__kmp_thread_pool_insert_pt != NULL) {
5602 KMP_DEBUG_ASSERT(__kmp_thread_pool != NULL);
5603 if (__kmp_thread_pool_insert_pt->th.th_info.ds.ds_gtid > gtid) {
5604 __kmp_thread_pool_insert_pt = NULL;
5613 if (__kmp_thread_pool_insert_pt != NULL) {
5614 scan = &(__kmp_thread_pool_insert_pt->th.th_next_pool);
5616 scan = CCAST(kmp_info_t **, &__kmp_thread_pool);
5618 for (; (*scan != NULL) && ((*scan)->th.th_info.ds.ds_gtid < gtid);
5619 scan = &((*scan)->th.th_next_pool))
5624 TCW_PTR(this_th->th.th_next_pool, *scan);
5625 __kmp_thread_pool_insert_pt = *scan = this_th;
5626 KMP_DEBUG_ASSERT((this_th->th.th_next_pool == NULL) ||
5627 (this_th->th.th_info.ds.ds_gtid <
5628 this_th->th.th_next_pool->th.th_info.ds.ds_gtid));
5629 TCW_4(this_th->th.th_in_pool, TRUE);
5630 __kmp_suspend_initialize_thread(this_th);
5631 __kmp_lock_suspend_mx(this_th);
5632 if (this_th->th.th_active == TRUE) {
5633 KMP_ATOMIC_INC(&__kmp_thread_pool_active_nth);
5634 this_th->th.th_active_in_pool = TRUE;
5638 KMP_DEBUG_ASSERT(this_th->th.th_active_in_pool == FALSE);
5641 __kmp_unlock_suspend_mx(this_th);
5643 TCW_4(__kmp_nth, __kmp_nth - 1);
5645 #ifdef KMP_ADJUST_BLOCKTIME
5648 if (!__kmp_env_blocktime && (__kmp_avail_proc > 0)) {
5649 KMP_DEBUG_ASSERT(__kmp_avail_proc > 0);
5650 if (__kmp_nth <= __kmp_avail_proc) {
5651 __kmp_zero_bt = FALSE;
5661 void *__kmp_launch_thread(kmp_info_t *this_thr) {
5662 int gtid = this_thr->th.th_info.ds.ds_gtid;
5664 kmp_team_t **
volatile pteam;
5667 KA_TRACE(10, (
"__kmp_launch_thread: T#%d start\n", gtid));
5669 if (__kmp_env_consistency_check) {
5670 this_thr->th.th_cons = __kmp_allocate_cons_stack(gtid);
5674 ompt_data_t *thread_data;
5675 if (ompt_enabled.enabled) {
5676 thread_data = &(this_thr->th.ompt_thread_info.thread_data);
5677 *thread_data = ompt_data_none;
5679 this_thr->th.ompt_thread_info.state = ompt_state_overhead;
5680 this_thr->th.ompt_thread_info.wait_id = 0;
5681 this_thr->th.ompt_thread_info.idle_frame = OMPT_GET_FRAME_ADDRESS(0);
5682 this_thr->th.ompt_thread_info.parallel_flags = 0;
5683 if (ompt_enabled.ompt_callback_thread_begin) {
5684 ompt_callbacks.ompt_callback(ompt_callback_thread_begin)(
5685 ompt_thread_worker, thread_data);
5687 this_thr->th.ompt_thread_info.state = ompt_state_idle;
5692 while (!TCR_4(__kmp_global.g.g_done)) {
5693 KMP_DEBUG_ASSERT(this_thr == __kmp_threads[gtid]);
5697 KA_TRACE(20, (
"__kmp_launch_thread: T#%d waiting for work\n", gtid));
5700 __kmp_fork_barrier(gtid, KMP_GTID_DNE);
5703 if (ompt_enabled.enabled) {
5704 this_thr->th.ompt_thread_info.state = ompt_state_overhead;
5708 pteam = &this_thr->th.th_team;
5711 if (TCR_SYNC_PTR(*pteam) && !TCR_4(__kmp_global.g.g_done)) {
5713 if (TCR_SYNC_PTR((*pteam)->t.t_pkfn) != NULL) {
5716 (
"__kmp_launch_thread: T#%d(%d:%d) invoke microtask = %p\n",
5717 gtid, (*pteam)->t.t_id, __kmp_tid_from_gtid(gtid),
5718 (*pteam)->t.t_pkfn));
5720 updateHWFPControl(*pteam);
5723 if (ompt_enabled.enabled) {
5724 this_thr->th.ompt_thread_info.state = ompt_state_work_parallel;
5728 rc = (*pteam)->t.t_invoke(gtid);
5732 KA_TRACE(20, (
"__kmp_launch_thread: T#%d(%d:%d) done microtask = %p\n",
5733 gtid, (*pteam)->t.t_id, __kmp_tid_from_gtid(gtid),
5734 (*pteam)->t.t_pkfn));
5737 if (ompt_enabled.enabled) {
5739 __ompt_get_task_info_object(0)->frame.exit_frame = ompt_data_none;
5741 this_thr->th.ompt_thread_info.state = ompt_state_overhead;
5745 __kmp_join_barrier(gtid);
5748 TCR_SYNC_PTR((intptr_t)__kmp_global.g.g_done);
5751 if (ompt_enabled.ompt_callback_thread_end) {
5752 ompt_callbacks.ompt_callback(ompt_callback_thread_end)(thread_data);
5756 this_thr->th.th_task_team = NULL;
5758 __kmp_common_destroy_gtid(gtid);
5760 KA_TRACE(10, (
"__kmp_launch_thread: T#%d done\n", gtid));
5767 void __kmp_internal_end_dest(
void *specific_gtid) {
5768 #if KMP_COMPILER_ICC
5769 #pragma warning(push)
5770 #pragma warning(disable : 810) // conversion from "void *" to "int" may lose
5774 int gtid = (kmp_intptr_t)specific_gtid - 1;
5775 #if KMP_COMPILER_ICC
5776 #pragma warning(pop)
5779 KA_TRACE(30, (
"__kmp_internal_end_dest: T#%d\n", gtid));
5792 if (gtid >= 0 && KMP_UBER_GTID(gtid))
5793 __kmp_gtid_set_specific(gtid);
5794 #ifdef KMP_TDATA_GTID
5797 __kmp_internal_end_thread(gtid);
5800 #if KMP_OS_UNIX && KMP_DYNAMIC_LIB
5802 __attribute__((destructor))
void __kmp_internal_end_dtor(
void) {
5803 __kmp_internal_end_atexit();
5810 void __kmp_internal_end_atexit(
void) {
5811 KA_TRACE(30, (
"__kmp_internal_end_atexit\n"));
5835 __kmp_internal_end_library(-1);
5837 __kmp_close_console();
5841 static void __kmp_reap_thread(kmp_info_t *thread,
int is_root) {
5846 KMP_DEBUG_ASSERT(thread != NULL);
5848 gtid = thread->th.th_info.ds.ds_gtid;
5851 if (__kmp_dflt_blocktime != KMP_MAX_BLOCKTIME) {
5854 20, (
"__kmp_reap_thread: releasing T#%d from fork barrier for reap\n",
5858 ANNOTATE_HAPPENS_BEFORE(thread);
5859 kmp_flag_64 flag(&thread->th.th_bar[bs_forkjoin_barrier].bb.b_go, thread);
5860 __kmp_release_64(&flag);
5864 __kmp_reap_worker(thread);
5876 if (thread->th.th_active_in_pool) {
5877 thread->th.th_active_in_pool = FALSE;
5878 KMP_ATOMIC_DEC(&__kmp_thread_pool_active_nth);
5879 KMP_DEBUG_ASSERT(__kmp_thread_pool_active_nth >= 0);
5883 __kmp_free_implicit_task(thread);
5887 __kmp_free_fast_memory(thread);
5890 __kmp_suspend_uninitialize_thread(thread);
5892 KMP_DEBUG_ASSERT(__kmp_threads[gtid] == thread);
5893 TCW_SYNC_PTR(__kmp_threads[gtid], NULL);
5898 #ifdef KMP_ADJUST_BLOCKTIME
5901 if (!__kmp_env_blocktime && (__kmp_avail_proc > 0)) {
5902 KMP_DEBUG_ASSERT(__kmp_avail_proc > 0);
5903 if (__kmp_nth <= __kmp_avail_proc) {
5904 __kmp_zero_bt = FALSE;
5910 if (__kmp_env_consistency_check) {
5911 if (thread->th.th_cons) {
5912 __kmp_free_cons_stack(thread->th.th_cons);
5913 thread->th.th_cons = NULL;
5917 if (thread->th.th_pri_common != NULL) {
5918 __kmp_free(thread->th.th_pri_common);
5919 thread->th.th_pri_common = NULL;
5922 if (thread->th.th_task_state_memo_stack != NULL) {
5923 __kmp_free(thread->th.th_task_state_memo_stack);
5924 thread->th.th_task_state_memo_stack = NULL;
5928 if (thread->th.th_local.bget_data != NULL) {
5929 __kmp_finalize_bget(thread);
5933 #if KMP_AFFINITY_SUPPORTED
5934 if (thread->th.th_affin_mask != NULL) {
5935 KMP_CPU_FREE(thread->th.th_affin_mask);
5936 thread->th.th_affin_mask = NULL;
5940 #if KMP_USE_HIER_SCHED
5941 if (thread->th.th_hier_bar_data != NULL) {
5942 __kmp_free(thread->th.th_hier_bar_data);
5943 thread->th.th_hier_bar_data = NULL;
5947 __kmp_reap_team(thread->th.th_serial_team);
5948 thread->th.th_serial_team = NULL;
5955 static void __kmp_internal_end(
void) {
5959 __kmp_unregister_library();
5966 __kmp_reclaim_dead_roots();
5970 for (i = 0; i < __kmp_threads_capacity; i++)
5972 if (__kmp_root[i]->r.r_active)
5975 TCW_SYNC_4(__kmp_global.g.g_done, TRUE);
5977 if (i < __kmp_threads_capacity) {
5989 __kmp_acquire_bootstrap_lock(&__kmp_monitor_lock);
5990 if (TCR_4(__kmp_init_monitor)) {
5991 __kmp_reap_monitor(&__kmp_monitor);
5992 TCW_4(__kmp_init_monitor, 0);
5994 __kmp_release_bootstrap_lock(&__kmp_monitor_lock);
5995 KA_TRACE(10, (
"__kmp_internal_end: monitor reaped\n"));
5996 #endif // KMP_USE_MONITOR
6001 for (i = 0; i < __kmp_threads_capacity; i++) {
6002 if (__kmp_root[i]) {
6005 KMP_ASSERT(!__kmp_root[i]->r.r_active);
6014 while (__kmp_thread_pool != NULL) {
6016 kmp_info_t *thread = CCAST(kmp_info_t *, __kmp_thread_pool);
6017 __kmp_thread_pool = thread->th.th_next_pool;
6019 KMP_DEBUG_ASSERT(thread->th.th_reap_state == KMP_SAFE_TO_REAP);
6020 thread->th.th_next_pool = NULL;
6021 thread->th.th_in_pool = FALSE;
6022 __kmp_reap_thread(thread, 0);
6024 __kmp_thread_pool_insert_pt = NULL;
6027 while (__kmp_team_pool != NULL) {
6029 kmp_team_t *team = CCAST(kmp_team_t *, __kmp_team_pool);
6030 __kmp_team_pool = team->t.t_next_pool;
6032 team->t.t_next_pool = NULL;
6033 __kmp_reap_team(team);
6036 __kmp_reap_task_teams();
6043 for (i = 0; i < __kmp_threads_capacity; i++) {
6044 kmp_info_t *thr = __kmp_threads[i];
6045 while (thr && KMP_ATOMIC_LD_ACQ(&thr->th.th_blocking))
6050 for (i = 0; i < __kmp_threads_capacity; ++i) {
6057 TCW_SYNC_4(__kmp_init_common, FALSE);
6059 KA_TRACE(10, (
"__kmp_internal_end: all workers reaped\n"));
6067 __kmp_acquire_bootstrap_lock(&__kmp_monitor_lock);
6068 if (TCR_4(__kmp_init_monitor)) {
6069 __kmp_reap_monitor(&__kmp_monitor);
6070 TCW_4(__kmp_init_monitor, 0);
6072 __kmp_release_bootstrap_lock(&__kmp_monitor_lock);
6073 KA_TRACE(10, (
"__kmp_internal_end: monitor reaped\n"));
6076 TCW_4(__kmp_init_gtid, FALSE);
6085 void __kmp_internal_end_library(
int gtid_req) {
6092 if (__kmp_global.g.g_abort) {
6093 KA_TRACE(11, (
"__kmp_internal_end_library: abort, exiting\n"));
6097 if (TCR_4(__kmp_global.g.g_done) || !__kmp_init_serial) {
6098 KA_TRACE(10, (
"__kmp_internal_end_library: already finished\n"));
6106 int gtid = (gtid_req >= 0) ? gtid_req : __kmp_gtid_get_specific();
6108 10, (
"__kmp_internal_end_library: enter T#%d (%d)\n", gtid, gtid_req));
6109 if (gtid == KMP_GTID_SHUTDOWN) {
6110 KA_TRACE(10, (
"__kmp_internal_end_library: !__kmp_init_runtime, system "
6111 "already shutdown\n"));
6113 }
else if (gtid == KMP_GTID_MONITOR) {
6114 KA_TRACE(10, (
"__kmp_internal_end_library: monitor thread, gtid not "
6115 "registered, or system shutdown\n"));
6117 }
else if (gtid == KMP_GTID_DNE) {
6118 KA_TRACE(10, (
"__kmp_internal_end_library: gtid not registered or system "
6121 }
else if (KMP_UBER_GTID(gtid)) {
6123 if (__kmp_root[gtid]->r.r_active) {
6124 __kmp_global.g.g_abort = -1;
6125 TCW_SYNC_4(__kmp_global.g.g_done, TRUE);
6127 (
"__kmp_internal_end_library: root still active, abort T#%d\n",
6133 (
"__kmp_internal_end_library: unregistering sibling T#%d\n", gtid));
6134 __kmp_unregister_root_current_thread(gtid);
6141 #ifdef DUMP_DEBUG_ON_EXIT
6142 if (__kmp_debug_buf)
6143 __kmp_dump_debug_buffer();
6149 __kmp_acquire_bootstrap_lock(&__kmp_initz_lock);
6152 if (__kmp_global.g.g_abort) {
6153 KA_TRACE(10, (
"__kmp_internal_end_library: abort, exiting\n"));
6155 __kmp_release_bootstrap_lock(&__kmp_initz_lock);
6158 if (TCR_4(__kmp_global.g.g_done) || !__kmp_init_serial) {
6159 __kmp_release_bootstrap_lock(&__kmp_initz_lock);
6168 __kmp_acquire_bootstrap_lock(&__kmp_forkjoin_lock);
6171 __kmp_internal_end();
6173 __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
6174 __kmp_release_bootstrap_lock(&__kmp_initz_lock);
6176 KA_TRACE(10, (
"__kmp_internal_end_library: exit\n"));
6178 #ifdef DUMP_DEBUG_ON_EXIT
6179 if (__kmp_debug_buf)
6180 __kmp_dump_debug_buffer();
6184 __kmp_close_console();
6187 __kmp_fini_allocator();
6191 void __kmp_internal_end_thread(
int gtid_req) {
6200 if (__kmp_global.g.g_abort) {
6201 KA_TRACE(11, (
"__kmp_internal_end_thread: abort, exiting\n"));
6205 if (TCR_4(__kmp_global.g.g_done) || !__kmp_init_serial) {
6206 KA_TRACE(10, (
"__kmp_internal_end_thread: already finished\n"));
6214 int gtid = (gtid_req >= 0) ? gtid_req : __kmp_gtid_get_specific();
6216 (
"__kmp_internal_end_thread: enter T#%d (%d)\n", gtid, gtid_req));
6217 if (gtid == KMP_GTID_SHUTDOWN) {
6218 KA_TRACE(10, (
"__kmp_internal_end_thread: !__kmp_init_runtime, system "
6219 "already shutdown\n"));
6221 }
else if (gtid == KMP_GTID_MONITOR) {
6222 KA_TRACE(10, (
"__kmp_internal_end_thread: monitor thread, gtid not "
6223 "registered, or system shutdown\n"));
6225 }
else if (gtid == KMP_GTID_DNE) {
6226 KA_TRACE(10, (
"__kmp_internal_end_thread: gtid not registered or system "
6230 }
else if (KMP_UBER_GTID(gtid)) {
6232 if (__kmp_root[gtid]->r.r_active) {
6233 __kmp_global.g.g_abort = -1;
6234 TCW_SYNC_4(__kmp_global.g.g_done, TRUE);
6236 (
"__kmp_internal_end_thread: root still active, abort T#%d\n",
6240 KA_TRACE(10, (
"__kmp_internal_end_thread: unregistering sibling T#%d\n",
6242 __kmp_unregister_root_current_thread(gtid);
6246 KA_TRACE(10, (
"__kmp_internal_end_thread: worker thread T#%d\n", gtid));
6249 __kmp_threads[gtid]->th.th_task_team = NULL;
6253 (
"__kmp_internal_end_thread: worker thread done, exiting T#%d\n",
6259 if (__kmp_pause_status != kmp_hard_paused)
6263 KA_TRACE(10, (
"__kmp_internal_end_thread: exiting T#%d\n", gtid_req));
6268 __kmp_acquire_bootstrap_lock(&__kmp_initz_lock);
6271 if (__kmp_global.g.g_abort) {
6272 KA_TRACE(10, (
"__kmp_internal_end_thread: abort, exiting\n"));
6274 __kmp_release_bootstrap_lock(&__kmp_initz_lock);
6277 if (TCR_4(__kmp_global.g.g_done) || !__kmp_init_serial) {
6278 __kmp_release_bootstrap_lock(&__kmp_initz_lock);
6289 __kmp_acquire_bootstrap_lock(&__kmp_forkjoin_lock);
6291 for (i = 0; i < __kmp_threads_capacity; ++i) {
6292 if (KMP_UBER_GTID(i)) {
6295 (
"__kmp_internal_end_thread: remaining sibling task: gtid==%d\n", i));
6296 __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
6297 __kmp_release_bootstrap_lock(&__kmp_initz_lock);
6304 __kmp_internal_end();
6306 __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
6307 __kmp_release_bootstrap_lock(&__kmp_initz_lock);
6309 KA_TRACE(10, (
"__kmp_internal_end_thread: exit T#%d\n", gtid_req));
6311 #ifdef DUMP_DEBUG_ON_EXIT
6312 if (__kmp_debug_buf)
6313 __kmp_dump_debug_buffer();
6320 static long __kmp_registration_flag = 0;
6322 static char *__kmp_registration_str = NULL;
6325 static inline char *__kmp_reg_status_name() {
6330 return __kmp_str_format(
"__KMP_REGISTERED_LIB_%d", (
int)getpid());
6333 void __kmp_register_library_startup(
void) {
6335 char *name = __kmp_reg_status_name();
6341 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
6342 __kmp_initialize_system_tick();
6344 __kmp_read_system_time(&time.dtime);
6345 __kmp_registration_flag = 0xCAFE0000L | (time.ltime & 0x0000FFFFL);
6346 __kmp_registration_str =
6347 __kmp_str_format(
"%p-%lx-%s", &__kmp_registration_flag,
6348 __kmp_registration_flag, KMP_LIBRARY_FILE);
6350 KA_TRACE(50, (
"__kmp_register_library_startup: %s=\"%s\"\n", name,
6351 __kmp_registration_str));
6358 __kmp_env_set(name, __kmp_registration_str, 0);
6360 value = __kmp_env_get(name);
6361 if (value != NULL && strcmp(value, __kmp_registration_str) == 0) {
6371 char *flag_addr_str = NULL;
6372 char *flag_val_str = NULL;
6373 char const *file_name = NULL;
6374 __kmp_str_split(tail,
'-', &flag_addr_str, &tail);
6375 __kmp_str_split(tail,
'-', &flag_val_str, &tail);
6378 long *flag_addr = 0;
6380 KMP_SSCANF(flag_addr_str,
"%p", RCAST(
void**, &flag_addr));
6381 KMP_SSCANF(flag_val_str,
"%lx", &flag_val);
6382 if (flag_addr != 0 && flag_val != 0 && strcmp(file_name,
"") != 0) {
6386 if (__kmp_is_address_mapped(flag_addr) && *flag_addr == flag_val) {
6400 file_name =
"unknown library";
6405 char *duplicate_ok = __kmp_env_get(
"KMP_DUPLICATE_LIB_OK");
6406 if (!__kmp_str_match_true(duplicate_ok)) {
6408 __kmp_fatal(KMP_MSG(DuplicateLibrary, KMP_LIBRARY_FILE, file_name),
6409 KMP_HNT(DuplicateLibrary), __kmp_msg_null);
6411 KMP_INTERNAL_FREE(duplicate_ok);
6412 __kmp_duplicate_library_ok = 1;
6417 __kmp_env_unset(name);
6419 default: { KMP_DEBUG_ASSERT(0); }
break;
6422 KMP_INTERNAL_FREE((
void *)value);
6424 KMP_INTERNAL_FREE((
void *)name);
6428 void __kmp_unregister_library(
void) {
6430 char *name = __kmp_reg_status_name();
6431 char *value = __kmp_env_get(name);
6433 KMP_DEBUG_ASSERT(__kmp_registration_flag != 0);
6434 KMP_DEBUG_ASSERT(__kmp_registration_str != NULL);
6435 if (value != NULL && strcmp(value, __kmp_registration_str) == 0) {
6437 __kmp_env_unset(name);
6440 KMP_INTERNAL_FREE(__kmp_registration_str);
6441 KMP_INTERNAL_FREE(value);
6442 KMP_INTERNAL_FREE(name);
6444 __kmp_registration_flag = 0;
6445 __kmp_registration_str = NULL;
6452 #if KMP_MIC_SUPPORTED
6454 static void __kmp_check_mic_type() {
6455 kmp_cpuid_t cpuid_state = {0};
6456 kmp_cpuid_t *cs_p = &cpuid_state;
6457 __kmp_x86_cpuid(1, 0, cs_p);
6459 if ((cs_p->eax & 0xff0) == 0xB10) {
6460 __kmp_mic_type = mic2;
6461 }
else if ((cs_p->eax & 0xf0ff0) == 0x50670) {
6462 __kmp_mic_type = mic3;
6464 __kmp_mic_type = non_mic;
6470 static void __kmp_do_serial_initialize(
void) {
6474 KA_TRACE(10, (
"__kmp_do_serial_initialize: enter\n"));
6476 KMP_DEBUG_ASSERT(
sizeof(kmp_int32) == 4);
6477 KMP_DEBUG_ASSERT(
sizeof(kmp_uint32) == 4);
6478 KMP_DEBUG_ASSERT(
sizeof(kmp_int64) == 8);
6479 KMP_DEBUG_ASSERT(
sizeof(kmp_uint64) == 8);
6480 KMP_DEBUG_ASSERT(
sizeof(kmp_intptr_t) ==
sizeof(
void *));
6486 __kmp_validate_locks();
6489 __kmp_init_allocator();
6494 __kmp_register_library_startup();
6497 if (TCR_4(__kmp_global.g.g_done)) {
6498 KA_TRACE(10, (
"__kmp_do_serial_initialize: reinitialization of library\n"));
6501 __kmp_global.g.g_abort = 0;
6502 TCW_SYNC_4(__kmp_global.g.g_done, FALSE);
6505 #if KMP_USE_ADAPTIVE_LOCKS
6506 #if KMP_DEBUG_ADAPTIVE_LOCKS
6507 __kmp_init_speculative_stats();
6510 #if KMP_STATS_ENABLED
6513 __kmp_init_lock(&__kmp_global_lock);
6514 __kmp_init_queuing_lock(&__kmp_dispatch_lock);
6515 __kmp_init_lock(&__kmp_debug_lock);
6516 __kmp_init_atomic_lock(&__kmp_atomic_lock);
6517 __kmp_init_atomic_lock(&__kmp_atomic_lock_1i);
6518 __kmp_init_atomic_lock(&__kmp_atomic_lock_2i);
6519 __kmp_init_atomic_lock(&__kmp_atomic_lock_4i);
6520 __kmp_init_atomic_lock(&__kmp_atomic_lock_4r);
6521 __kmp_init_atomic_lock(&__kmp_atomic_lock_8i);
6522 __kmp_init_atomic_lock(&__kmp_atomic_lock_8r);
6523 __kmp_init_atomic_lock(&__kmp_atomic_lock_8c);
6524 __kmp_init_atomic_lock(&__kmp_atomic_lock_10r);
6525 __kmp_init_atomic_lock(&__kmp_atomic_lock_16r);
6526 __kmp_init_atomic_lock(&__kmp_atomic_lock_16c);
6527 __kmp_init_atomic_lock(&__kmp_atomic_lock_20c);
6528 __kmp_init_atomic_lock(&__kmp_atomic_lock_32c);
6529 __kmp_init_bootstrap_lock(&__kmp_forkjoin_lock);
6530 __kmp_init_bootstrap_lock(&__kmp_exit_lock);
6532 __kmp_init_bootstrap_lock(&__kmp_monitor_lock);
6534 __kmp_init_bootstrap_lock(&__kmp_tp_cached_lock);
6538 __kmp_runtime_initialize();
6540 #if KMP_MIC_SUPPORTED
6541 __kmp_check_mic_type();
6548 __kmp_abort_delay = 0;
6552 __kmp_dflt_team_nth_ub = __kmp_xproc;
6553 if (__kmp_dflt_team_nth_ub < KMP_MIN_NTH) {
6554 __kmp_dflt_team_nth_ub = KMP_MIN_NTH;
6556 if (__kmp_dflt_team_nth_ub > __kmp_sys_max_nth) {
6557 __kmp_dflt_team_nth_ub = __kmp_sys_max_nth;
6559 __kmp_max_nth = __kmp_sys_max_nth;
6560 __kmp_cg_max_nth = __kmp_sys_max_nth;
6561 __kmp_teams_max_nth = __kmp_xproc;
6562 if (__kmp_teams_max_nth > __kmp_sys_max_nth) {
6563 __kmp_teams_max_nth = __kmp_sys_max_nth;
6568 __kmp_dflt_blocktime = KMP_DEFAULT_BLOCKTIME;
6570 __kmp_monitor_wakeups =
6571 KMP_WAKEUPS_FROM_BLOCKTIME(__kmp_dflt_blocktime, __kmp_monitor_wakeups);
6572 __kmp_bt_intervals =
6573 KMP_INTERVALS_FROM_BLOCKTIME(__kmp_dflt_blocktime, __kmp_monitor_wakeups);
6576 __kmp_library = library_throughput;
6578 __kmp_static = kmp_sch_static_balanced;
6585 #if KMP_FAST_REDUCTION_BARRIER
6586 #define kmp_reduction_barrier_gather_bb ((int)1)
6587 #define kmp_reduction_barrier_release_bb ((int)1)
6588 #define kmp_reduction_barrier_gather_pat bp_hyper_bar
6589 #define kmp_reduction_barrier_release_pat bp_hyper_bar
6590 #endif // KMP_FAST_REDUCTION_BARRIER
6591 for (i = bs_plain_barrier; i < bs_last_barrier; i++) {
6592 __kmp_barrier_gather_branch_bits[i] = __kmp_barrier_gather_bb_dflt;
6593 __kmp_barrier_release_branch_bits[i] = __kmp_barrier_release_bb_dflt;
6594 __kmp_barrier_gather_pattern[i] = __kmp_barrier_gather_pat_dflt;
6595 __kmp_barrier_release_pattern[i] = __kmp_barrier_release_pat_dflt;
6596 #if KMP_FAST_REDUCTION_BARRIER
6597 if (i == bs_reduction_barrier) {
6599 __kmp_barrier_gather_branch_bits[i] = kmp_reduction_barrier_gather_bb;
6600 __kmp_barrier_release_branch_bits[i] = kmp_reduction_barrier_release_bb;
6601 __kmp_barrier_gather_pattern[i] = kmp_reduction_barrier_gather_pat;
6602 __kmp_barrier_release_pattern[i] = kmp_reduction_barrier_release_pat;
6604 #endif // KMP_FAST_REDUCTION_BARRIER
6606 #if KMP_FAST_REDUCTION_BARRIER
6607 #undef kmp_reduction_barrier_release_pat
6608 #undef kmp_reduction_barrier_gather_pat
6609 #undef kmp_reduction_barrier_release_bb
6610 #undef kmp_reduction_barrier_gather_bb
6611 #endif // KMP_FAST_REDUCTION_BARRIER
6612 #if KMP_MIC_SUPPORTED
6613 if (__kmp_mic_type == mic2) {
6615 __kmp_barrier_gather_branch_bits[bs_plain_barrier] = 3;
6616 __kmp_barrier_release_branch_bits[bs_forkjoin_barrier] =
6618 __kmp_barrier_gather_pattern[bs_forkjoin_barrier] = bp_hierarchical_bar;
6619 __kmp_barrier_release_pattern[bs_forkjoin_barrier] = bp_hierarchical_bar;
6621 #if KMP_FAST_REDUCTION_BARRIER
6622 if (__kmp_mic_type == mic2) {
6623 __kmp_barrier_gather_pattern[bs_reduction_barrier] = bp_hierarchical_bar;
6624 __kmp_barrier_release_pattern[bs_reduction_barrier] = bp_hierarchical_bar;
6626 #endif // KMP_FAST_REDUCTION_BARRIER
6627 #endif // KMP_MIC_SUPPORTED
6631 __kmp_env_checks = TRUE;
6633 __kmp_env_checks = FALSE;
6637 __kmp_foreign_tp = TRUE;
6639 __kmp_global.g.g_dynamic = FALSE;
6640 __kmp_global.g.g_dynamic_mode = dynamic_default;
6642 __kmp_env_initialize(NULL);
6646 char const *val = __kmp_env_get(
"KMP_DUMP_CATALOG");
6647 if (__kmp_str_match_true(val)) {
6648 kmp_str_buf_t buffer;
6649 __kmp_str_buf_init(&buffer);
6650 __kmp_i18n_dump_catalog(&buffer);
6651 __kmp_printf(
"%s", buffer.str);
6652 __kmp_str_buf_free(&buffer);
6654 __kmp_env_free(&val);
6657 __kmp_threads_capacity =
6658 __kmp_initial_threads_capacity(__kmp_dflt_team_nth_ub);
6660 __kmp_tp_capacity = __kmp_default_tp_capacity(
6661 __kmp_dflt_team_nth_ub, __kmp_max_nth, __kmp_allThreadsSpecified);
6666 KMP_DEBUG_ASSERT(__kmp_thread_pool == NULL);
6667 KMP_DEBUG_ASSERT(__kmp_thread_pool_insert_pt == NULL);
6668 KMP_DEBUG_ASSERT(__kmp_team_pool == NULL);
6669 __kmp_thread_pool = NULL;
6670 __kmp_thread_pool_insert_pt = NULL;
6671 __kmp_team_pool = NULL;
6678 (
sizeof(kmp_info_t *) +
sizeof(kmp_root_t *)) * __kmp_threads_capacity +
6680 __kmp_threads = (kmp_info_t **)__kmp_allocate(size);
6681 __kmp_root = (kmp_root_t **)((
char *)__kmp_threads +
6682 sizeof(kmp_info_t *) * __kmp_threads_capacity);
6685 KMP_DEBUG_ASSERT(__kmp_all_nth ==
6687 KMP_DEBUG_ASSERT(__kmp_nth == 0);
6692 gtid = __kmp_register_root(TRUE);
6693 KA_TRACE(10, (
"__kmp_do_serial_initialize T#%d\n", gtid));
6694 KMP_ASSERT(KMP_UBER_GTID(gtid));
6695 KMP_ASSERT(KMP_INITIAL_GTID(gtid));
6699 __kmp_common_initialize();
6703 __kmp_register_atfork();
6706 #if !KMP_DYNAMIC_LIB
6710 int rc = atexit(__kmp_internal_end_atexit);
6712 __kmp_fatal(KMP_MSG(FunctionError,
"atexit()"), KMP_ERR(rc),
6718 #if KMP_HANDLE_SIGNALS
6724 __kmp_install_signals(FALSE);
6727 __kmp_install_signals(TRUE);
6732 __kmp_init_counter++;
6734 __kmp_init_serial = TRUE;
6736 if (__kmp_settings) {
6740 if (__kmp_display_env || __kmp_display_env_verbose) {
6741 __kmp_env_print_2();
6750 KA_TRACE(10, (
"__kmp_do_serial_initialize: exit\n"));
6753 void __kmp_serial_initialize(
void) {
6754 if (__kmp_init_serial) {
6757 __kmp_acquire_bootstrap_lock(&__kmp_initz_lock);
6758 if (__kmp_init_serial) {
6759 __kmp_release_bootstrap_lock(&__kmp_initz_lock);
6762 __kmp_do_serial_initialize();
6763 __kmp_release_bootstrap_lock(&__kmp_initz_lock);
6766 static void __kmp_do_middle_initialize(
void) {
6768 int prev_dflt_team_nth;
6770 if (!__kmp_init_serial) {
6771 __kmp_do_serial_initialize();
6774 KA_TRACE(10, (
"__kmp_middle_initialize: enter\n"));
6778 prev_dflt_team_nth = __kmp_dflt_team_nth;
6780 #if KMP_AFFINITY_SUPPORTED
6783 __kmp_affinity_initialize();
6787 for (i = 0; i < __kmp_threads_capacity; i++) {
6788 if (TCR_PTR(__kmp_threads[i]) != NULL) {
6789 __kmp_affinity_set_init_mask(i, TRUE);
6794 KMP_ASSERT(__kmp_xproc > 0);
6795 if (__kmp_avail_proc == 0) {
6796 __kmp_avail_proc = __kmp_xproc;
6802 while ((j < __kmp_nested_nth.used) && !__kmp_nested_nth.nth[j]) {
6803 __kmp_nested_nth.nth[j] = __kmp_dflt_team_nth = __kmp_dflt_team_nth_ub =
6808 if (__kmp_dflt_team_nth == 0) {
6809 #ifdef KMP_DFLT_NTH_CORES
6811 __kmp_dflt_team_nth = __kmp_ncores;
6812 KA_TRACE(20, (
"__kmp_middle_initialize: setting __kmp_dflt_team_nth = "
6813 "__kmp_ncores (%d)\n",
6814 __kmp_dflt_team_nth));
6817 __kmp_dflt_team_nth = __kmp_avail_proc;
6818 KA_TRACE(20, (
"__kmp_middle_initialize: setting __kmp_dflt_team_nth = "
6819 "__kmp_avail_proc(%d)\n",
6820 __kmp_dflt_team_nth));
6824 if (__kmp_dflt_team_nth < KMP_MIN_NTH) {
6825 __kmp_dflt_team_nth = KMP_MIN_NTH;
6827 if (__kmp_dflt_team_nth > __kmp_sys_max_nth) {
6828 __kmp_dflt_team_nth = __kmp_sys_max_nth;
6833 KMP_DEBUG_ASSERT(__kmp_dflt_team_nth <= __kmp_dflt_team_nth_ub);
6835 if (__kmp_dflt_team_nth != prev_dflt_team_nth) {
6840 for (i = 0; i < __kmp_threads_capacity; i++) {
6841 kmp_info_t *thread = __kmp_threads[i];
6844 if (thread->th.th_current_task->td_icvs.nproc != 0)
6847 set__nproc(__kmp_threads[i], __kmp_dflt_team_nth);
6852 (
"__kmp_middle_initialize: final value for __kmp_dflt_team_nth = %d\n",
6853 __kmp_dflt_team_nth));
6855 #ifdef KMP_ADJUST_BLOCKTIME
6857 if (!__kmp_env_blocktime && (__kmp_avail_proc > 0)) {
6858 KMP_DEBUG_ASSERT(__kmp_avail_proc > 0);
6859 if (__kmp_nth > __kmp_avail_proc) {
6860 __kmp_zero_bt = TRUE;
6866 TCW_SYNC_4(__kmp_init_middle, TRUE);
6868 KA_TRACE(10, (
"__kmp_do_middle_initialize: exit\n"));
6871 void __kmp_middle_initialize(
void) {
6872 if (__kmp_init_middle) {
6875 __kmp_acquire_bootstrap_lock(&__kmp_initz_lock);
6876 if (__kmp_init_middle) {
6877 __kmp_release_bootstrap_lock(&__kmp_initz_lock);
6880 __kmp_do_middle_initialize();
6881 __kmp_release_bootstrap_lock(&__kmp_initz_lock);
6884 void __kmp_parallel_initialize(
void) {
6885 int gtid = __kmp_entry_gtid();
6888 if (TCR_4(__kmp_init_parallel))
6890 __kmp_acquire_bootstrap_lock(&__kmp_initz_lock);
6891 if (TCR_4(__kmp_init_parallel)) {
6892 __kmp_release_bootstrap_lock(&__kmp_initz_lock);
6897 if (TCR_4(__kmp_global.g.g_done)) {
6900 (
"__kmp_parallel_initialize: attempt to init while shutting down\n"));
6901 __kmp_infinite_loop();
6907 if (!__kmp_init_middle) {
6908 __kmp_do_middle_initialize();
6910 __kmp_resume_if_hard_paused();
6913 KA_TRACE(10, (
"__kmp_parallel_initialize: enter\n"));
6914 KMP_ASSERT(KMP_UBER_GTID(gtid));
6916 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
6919 __kmp_store_x87_fpu_control_word(&__kmp_init_x87_fpu_control_word);
6920 __kmp_store_mxcsr(&__kmp_init_mxcsr);
6921 __kmp_init_mxcsr &= KMP_X86_MXCSR_MASK;
6925 #if KMP_HANDLE_SIGNALS
6927 __kmp_install_signals(TRUE);
6931 __kmp_suspend_initialize();
6933 #if defined(USE_LOAD_BALANCE)
6934 if (__kmp_global.g.g_dynamic_mode == dynamic_default) {
6935 __kmp_global.g.g_dynamic_mode = dynamic_load_balance;
6938 if (__kmp_global.g.g_dynamic_mode == dynamic_default) {
6939 __kmp_global.g.g_dynamic_mode = dynamic_thread_limit;
6943 if (__kmp_version) {
6944 __kmp_print_version_2();
6948 TCW_SYNC_4(__kmp_init_parallel, TRUE);
6951 KA_TRACE(10, (
"__kmp_parallel_initialize: exit\n"));
6953 __kmp_release_bootstrap_lock(&__kmp_initz_lock);
6958 void __kmp_run_before_invoked_task(
int gtid,
int tid, kmp_info_t *this_thr,
6960 kmp_disp_t *dispatch;
6965 this_thr->th.th_local.this_construct = 0;
6966 #if KMP_CACHE_MANAGE
6967 KMP_CACHE_PREFETCH(&this_thr->th.th_bar[bs_forkjoin_barrier].bb.b_arrived);
6969 dispatch = (kmp_disp_t *)TCR_PTR(this_thr->th.th_dispatch);
6970 KMP_DEBUG_ASSERT(dispatch);
6971 KMP_DEBUG_ASSERT(team->t.t_dispatch);
6975 dispatch->th_disp_index = 0;
6976 dispatch->th_doacross_buf_idx = 0;
6977 if (__kmp_env_consistency_check)
6978 __kmp_push_parallel(gtid, team->t.t_ident);
6983 void __kmp_run_after_invoked_task(
int gtid,
int tid, kmp_info_t *this_thr,
6985 if (__kmp_env_consistency_check)
6986 __kmp_pop_parallel(gtid, team->t.t_ident);
6988 __kmp_finish_implicit_task(this_thr);
6991 int __kmp_invoke_task_func(
int gtid) {
6993 int tid = __kmp_tid_from_gtid(gtid);
6994 kmp_info_t *this_thr = __kmp_threads[gtid];
6995 kmp_team_t *team = this_thr->th.th_team;
6997 __kmp_run_before_invoked_task(gtid, tid, this_thr, team);
6999 if (__itt_stack_caller_create_ptr) {
7000 __kmp_itt_stack_callee_enter(
7002 team->t.t_stack_id);
7005 #if INCLUDE_SSC_MARKS
7006 SSC_MARK_INVOKING();
7011 void **exit_frame_p;
7012 ompt_data_t *my_task_data;
7013 ompt_data_t *my_parallel_data;
7016 if (ompt_enabled.enabled) {
7018 team->t.t_implicit_task_taskdata[tid].ompt_task_info.frame.exit_frame.ptr);
7020 exit_frame_p = &dummy;
7024 &(team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_data);
7025 my_parallel_data = &(team->t.ompt_team_info.parallel_data);
7026 if (ompt_enabled.ompt_callback_implicit_task) {
7027 ompt_team_size = team->t.t_nproc;
7028 ompt_callbacks.ompt_callback(ompt_callback_implicit_task)(
7029 ompt_scope_begin, my_parallel_data, my_task_data, ompt_team_size,
7030 __kmp_tid_from_gtid(gtid), ompt_task_implicit);
7031 OMPT_CUR_TASK_INFO(this_thr)->thread_num = __kmp_tid_from_gtid(gtid);
7035 #if KMP_STATS_ENABLED
7037 if (previous_state == stats_state_e::TEAMS_REGION) {
7038 KMP_PUSH_PARTITIONED_TIMER(OMP_teams);
7040 KMP_PUSH_PARTITIONED_TIMER(OMP_parallel);
7042 KMP_SET_THREAD_STATE(IMPLICIT_TASK);
7045 rc = __kmp_invoke_microtask((microtask_t)TCR_SYNC_PTR(team->t.t_pkfn), gtid,
7046 tid, (
int)team->t.t_argc, (
void **)team->t.t_argv
7053 *exit_frame_p = NULL;
7054 this_thr->th.ompt_thread_info.parallel_flags |= ompt_parallel_team;
7057 #if KMP_STATS_ENABLED
7058 if (previous_state == stats_state_e::TEAMS_REGION) {
7059 KMP_SET_THREAD_STATE(previous_state);
7061 KMP_POP_PARTITIONED_TIMER();
7065 if (__itt_stack_caller_create_ptr) {
7066 __kmp_itt_stack_callee_leave(
7068 team->t.t_stack_id);
7071 __kmp_run_after_invoked_task(gtid, tid, this_thr, team);
7076 void __kmp_teams_master(
int gtid) {
7078 kmp_info_t *thr = __kmp_threads[gtid];
7079 kmp_team_t *team = thr->th.th_team;
7080 ident_t *loc = team->t.t_ident;
7081 thr->th.th_set_nproc = thr->th.th_teams_size.nth;
7082 KMP_DEBUG_ASSERT(thr->th.th_teams_microtask);
7083 KMP_DEBUG_ASSERT(thr->th.th_set_nproc);
7084 KA_TRACE(20, (
"__kmp_teams_master: T#%d, Tid %d, microtask %p\n", gtid,
7085 __kmp_tid_from_gtid(gtid), thr->th.th_teams_microtask));
7088 kmp_cg_root_t *tmp = (kmp_cg_root_t *)__kmp_allocate(
sizeof(kmp_cg_root_t));
7091 tmp->cg_thread_limit = thr->th.th_current_task->td_icvs.thread_limit;
7092 tmp->cg_nthreads = 1;
7093 KA_TRACE(100, (
"__kmp_teams_master: Thread %p created node %p and init"
7094 " cg_nthreads to 1\n",
7096 tmp->up = thr->th.th_cg_roots;
7097 thr->th.th_cg_roots = tmp;
7101 #if INCLUDE_SSC_MARKS
7104 __kmp_fork_call(loc, gtid, fork_context_intel, team->t.t_argc,
7105 (microtask_t)thr->th.th_teams_microtask,
7106 VOLATILE_CAST(launch_t) __kmp_invoke_task_func, NULL);
7107 #if INCLUDE_SSC_MARKS
7111 if (thr->th.th_team_nproc < thr->th.th_teams_size.nth)
7112 thr->th.th_teams_size.nth = thr->th.th_team_nproc;
7115 __kmp_join_call(loc, gtid
7124 int __kmp_invoke_teams_master(
int gtid) {
7125 kmp_info_t *this_thr = __kmp_threads[gtid];
7126 kmp_team_t *team = this_thr->th.th_team;
7128 if (!__kmp_threads[gtid]->th.th_team->t.t_serialized)
7129 KMP_DEBUG_ASSERT((
void *)__kmp_threads[gtid]->th.th_team->t.t_pkfn ==
7130 (
void *)__kmp_teams_master);
7132 __kmp_run_before_invoked_task(gtid, 0, this_thr, team);
7134 int tid = __kmp_tid_from_gtid(gtid);
7135 ompt_data_t *task_data =
7136 &team->t.t_implicit_task_taskdata[tid].ompt_task_info.task_data;
7137 ompt_data_t *parallel_data = &team->t.ompt_team_info.parallel_data;
7138 if (ompt_enabled.ompt_callback_implicit_task) {
7139 ompt_callbacks.ompt_callback(ompt_callback_implicit_task)(
7140 ompt_scope_begin, parallel_data, task_data, team->t.t_nproc, tid,
7142 OMPT_CUR_TASK_INFO(this_thr)->thread_num = tid;
7145 __kmp_teams_master(gtid);
7147 this_thr->th.ompt_thread_info.parallel_flags |= ompt_parallel_league;
7149 __kmp_run_after_invoked_task(gtid, 0, this_thr, team);
7158 void __kmp_push_num_threads(
ident_t *
id,
int gtid,
int num_threads) {
7159 kmp_info_t *thr = __kmp_threads[gtid];
7161 if (num_threads > 0)
7162 thr->th.th_set_nproc = num_threads;
7167 void __kmp_push_num_teams(
ident_t *
id,
int gtid,
int num_teams,
7169 kmp_info_t *thr = __kmp_threads[gtid];
7170 KMP_DEBUG_ASSERT(num_teams >= 0);
7171 KMP_DEBUG_ASSERT(num_threads >= 0);
7175 if (num_teams > __kmp_teams_max_nth) {
7176 if (!__kmp_reserve_warn) {
7177 __kmp_reserve_warn = 1;
7178 __kmp_msg(kmp_ms_warning,
7179 KMP_MSG(CantFormThrTeam, num_teams, __kmp_teams_max_nth),
7180 KMP_HNT(Unset_ALL_THREADS), __kmp_msg_null);
7182 num_teams = __kmp_teams_max_nth;
7186 thr->th.th_set_nproc = thr->th.th_teams_size.nteams = num_teams;
7189 if (!TCR_4(__kmp_init_middle))
7190 __kmp_middle_initialize();
7191 KMP_DEBUG_ASSERT(__kmp_avail_proc);
7192 KMP_DEBUG_ASSERT(__kmp_dflt_team_nth);
7193 if (num_threads == 0) {
7194 num_threads = __kmp_avail_proc / num_teams;
7198 if (num_threads > __kmp_dflt_team_nth) {
7199 num_threads = __kmp_dflt_team_nth;
7201 if (num_threads > thr->th.th_current_task->td_icvs.thread_limit) {
7202 num_threads = thr->th.th_current_task->td_icvs.thread_limit;
7204 if (num_teams * num_threads > __kmp_teams_max_nth) {
7205 num_threads = __kmp_teams_max_nth / num_teams;
7210 thr->th.th_current_task->td_icvs.thread_limit = num_threads;
7212 if (num_threads > __kmp_dflt_team_nth) {
7213 num_threads = __kmp_dflt_team_nth;
7215 if (num_teams * num_threads > __kmp_teams_max_nth) {
7216 int new_threads = __kmp_teams_max_nth / num_teams;
7217 if (!__kmp_reserve_warn) {
7218 __kmp_reserve_warn = 1;
7219 __kmp_msg(kmp_ms_warning,
7220 KMP_MSG(CantFormThrTeam, num_threads, new_threads),
7221 KMP_HNT(Unset_ALL_THREADS), __kmp_msg_null);
7223 num_threads = new_threads;
7226 thr->th.th_teams_size.nth = num_threads;
7230 void __kmp_push_proc_bind(
ident_t *
id,
int gtid, kmp_proc_bind_t proc_bind) {
7231 kmp_info_t *thr = __kmp_threads[gtid];
7232 thr->th.th_set_proc_bind = proc_bind;
7237 void __kmp_internal_fork(
ident_t *
id,
int gtid, kmp_team_t *team) {
7238 kmp_info_t *this_thr = __kmp_threads[gtid];
7244 KMP_DEBUG_ASSERT(team);
7245 KMP_DEBUG_ASSERT(this_thr->th.th_team == team);
7246 KMP_ASSERT(KMP_MASTER_GTID(gtid));
7249 team->t.t_construct = 0;
7250 team->t.t_ordered.dt.t_value =
7254 KMP_DEBUG_ASSERT(team->t.t_disp_buffer);
7255 if (team->t.t_max_nproc > 1) {
7257 for (i = 0; i < __kmp_dispatch_num_buffers; ++i) {
7258 team->t.t_disp_buffer[i].buffer_index = i;
7259 team->t.t_disp_buffer[i].doacross_buf_idx = i;
7262 team->t.t_disp_buffer[0].buffer_index = 0;
7263 team->t.t_disp_buffer[0].doacross_buf_idx = 0;
7267 KMP_ASSERT(this_thr->th.th_team == team);
7270 for (f = 0; f < team->t.t_nproc; f++) {
7271 KMP_DEBUG_ASSERT(team->t.t_threads[f] &&
7272 team->t.t_threads[f]->th.th_team_nproc == team->t.t_nproc);
7277 __kmp_fork_barrier(gtid, 0);
7280 void __kmp_internal_join(
ident_t *
id,
int gtid, kmp_team_t *team) {
7281 kmp_info_t *this_thr = __kmp_threads[gtid];
7283 KMP_DEBUG_ASSERT(team);
7284 KMP_DEBUG_ASSERT(this_thr->th.th_team == team);
7285 KMP_ASSERT(KMP_MASTER_GTID(gtid));
7291 if (__kmp_threads[gtid] &&
7292 __kmp_threads[gtid]->th.th_team_nproc != team->t.t_nproc) {
7293 __kmp_printf(
"GTID: %d, __kmp_threads[%d]=%p\n", gtid, gtid,
7294 __kmp_threads[gtid]);
7295 __kmp_printf(
"__kmp_threads[%d]->th.th_team_nproc=%d, TEAM: %p, "
7296 "team->t.t_nproc=%d\n",
7297 gtid, __kmp_threads[gtid]->th.th_team_nproc, team,
7299 __kmp_print_structure();
7301 KMP_DEBUG_ASSERT(__kmp_threads[gtid] &&
7302 __kmp_threads[gtid]->th.th_team_nproc == team->t.t_nproc);
7305 __kmp_join_barrier(gtid);
7307 if (ompt_enabled.enabled &&
7308 this_thr->th.ompt_thread_info.state == ompt_state_wait_barrier_implicit) {
7309 int ds_tid = this_thr->th.th_info.ds.ds_tid;
7310 ompt_data_t *task_data = OMPT_CUR_TASK_DATA(this_thr);
7311 this_thr->th.ompt_thread_info.state = ompt_state_overhead;
7313 void *codeptr = NULL;
7314 if (KMP_MASTER_TID(ds_tid) &&
7315 (ompt_callbacks.ompt_callback(ompt_callback_sync_region_wait) ||
7316 ompt_callbacks.ompt_callback(ompt_callback_sync_region)))
7317 codeptr = OMPT_CUR_TEAM_INFO(this_thr)->master_return_address;
7319 if (ompt_enabled.ompt_callback_sync_region_wait) {
7320 ompt_callbacks.ompt_callback(ompt_callback_sync_region_wait)(
7321 ompt_sync_region_barrier_implicit, ompt_scope_end, NULL, task_data,
7324 if (ompt_enabled.ompt_callback_sync_region) {
7325 ompt_callbacks.ompt_callback(ompt_callback_sync_region)(
7326 ompt_sync_region_barrier_implicit, ompt_scope_end, NULL, task_data,
7330 if (!KMP_MASTER_TID(ds_tid) && ompt_enabled.ompt_callback_implicit_task) {
7331 ompt_callbacks.ompt_callback(ompt_callback_implicit_task)(
7332 ompt_scope_end, NULL, task_data, 0, ds_tid, ompt_task_implicit);
7338 KMP_ASSERT(this_thr->th.th_team == team);
7343 #ifdef USE_LOAD_BALANCE
7347 static int __kmp_active_hot_team_nproc(kmp_root_t *root) {
7350 kmp_team_t *hot_team;
7352 if (root->r.r_active) {
7355 hot_team = root->r.r_hot_team;
7356 if (__kmp_dflt_blocktime == KMP_MAX_BLOCKTIME) {
7357 return hot_team->t.t_nproc - 1;
7362 for (i = 1; i < hot_team->t.t_nproc; i++) {
7363 if (hot_team->t.t_threads[i]->th.th_active) {
7372 static int __kmp_load_balance_nproc(kmp_root_t *root,
int set_nproc) {
7375 int hot_team_active;
7376 int team_curr_active;
7379 KB_TRACE(20, (
"__kmp_load_balance_nproc: called root:%p set_nproc:%d\n", root,
7381 KMP_DEBUG_ASSERT(root);
7382 KMP_DEBUG_ASSERT(root->r.r_root_team->t.t_threads[0]
7383 ->th.th_current_task->td_icvs.dynamic == TRUE);
7384 KMP_DEBUG_ASSERT(set_nproc > 1);
7386 if (set_nproc == 1) {
7387 KB_TRACE(20, (
"__kmp_load_balance_nproc: serial execution.\n"));
7396 pool_active = __kmp_thread_pool_active_nth;
7397 hot_team_active = __kmp_active_hot_team_nproc(root);
7398 team_curr_active = pool_active + hot_team_active + 1;
7401 system_active = __kmp_get_load_balance(__kmp_avail_proc + team_curr_active);
7402 KB_TRACE(30, (
"__kmp_load_balance_nproc: system active = %d pool active = %d "
7403 "hot team active = %d\n",
7404 system_active, pool_active, hot_team_active));
7406 if (system_active < 0) {
7410 __kmp_global.g.g_dynamic_mode = dynamic_thread_limit;
7411 KMP_WARNING(CantLoadBalUsing,
"KMP_DYNAMIC_MODE=thread limit");
7414 retval = __kmp_avail_proc - __kmp_nth +
7415 (root->r.r_active ? 1 : root->r.r_hot_team->t.t_nproc);
7416 if (retval > set_nproc) {
7419 if (retval < KMP_MIN_NTH) {
7420 retval = KMP_MIN_NTH;
7423 KB_TRACE(20, (
"__kmp_load_balance_nproc: thread limit exit. retval:%d\n",
7431 if (system_active < team_curr_active) {
7432 system_active = team_curr_active;
7434 retval = __kmp_avail_proc - system_active + team_curr_active;
7435 if (retval > set_nproc) {
7438 if (retval < KMP_MIN_NTH) {
7439 retval = KMP_MIN_NTH;
7442 KB_TRACE(20, (
"__kmp_load_balance_nproc: exit. retval:%d\n", retval));
7451 void __kmp_cleanup(
void) {
7454 KA_TRACE(10, (
"__kmp_cleanup: enter\n"));
7456 if (TCR_4(__kmp_init_parallel)) {
7457 #if KMP_HANDLE_SIGNALS
7458 __kmp_remove_signals();
7460 TCW_4(__kmp_init_parallel, FALSE);
7463 if (TCR_4(__kmp_init_middle)) {
7464 #if KMP_AFFINITY_SUPPORTED
7465 __kmp_affinity_uninitialize();
7467 __kmp_cleanup_hierarchy();
7468 TCW_4(__kmp_init_middle, FALSE);
7471 KA_TRACE(10, (
"__kmp_cleanup: go serial cleanup\n"));
7473 if (__kmp_init_serial) {
7474 __kmp_runtime_destroy();
7475 __kmp_init_serial = FALSE;
7478 __kmp_cleanup_threadprivate_caches();
7480 for (f = 0; f < __kmp_threads_capacity; f++) {
7481 if (__kmp_root[f] != NULL) {
7482 __kmp_free(__kmp_root[f]);
7483 __kmp_root[f] = NULL;
7486 __kmp_free(__kmp_threads);
7489 __kmp_threads = NULL;
7491 __kmp_threads_capacity = 0;
7493 #if KMP_USE_DYNAMIC_LOCK
7494 __kmp_cleanup_indirect_user_locks();
7496 __kmp_cleanup_user_locks();
7499 #if KMP_AFFINITY_SUPPORTED
7500 KMP_INTERNAL_FREE(CCAST(
char *, __kmp_cpuinfo_file));
7501 __kmp_cpuinfo_file = NULL;
7504 #if KMP_USE_ADAPTIVE_LOCKS
7505 #if KMP_DEBUG_ADAPTIVE_LOCKS
7506 __kmp_print_speculative_stats();
7509 KMP_INTERNAL_FREE(__kmp_nested_nth.nth);
7510 __kmp_nested_nth.nth = NULL;
7511 __kmp_nested_nth.size = 0;
7512 __kmp_nested_nth.used = 0;
7513 KMP_INTERNAL_FREE(__kmp_nested_proc_bind.bind_types);
7514 __kmp_nested_proc_bind.bind_types = NULL;
7515 __kmp_nested_proc_bind.size = 0;
7516 __kmp_nested_proc_bind.used = 0;
7517 if (__kmp_affinity_format) {
7518 KMP_INTERNAL_FREE(__kmp_affinity_format);
7519 __kmp_affinity_format = NULL;
7522 __kmp_i18n_catclose();
7524 #if KMP_USE_HIER_SCHED
7525 __kmp_hier_scheds.deallocate();
7528 #if KMP_STATS_ENABLED
7532 KA_TRACE(10, (
"__kmp_cleanup: exit\n"));
7537 int __kmp_ignore_mppbeg(
void) {
7540 if ((env = getenv(
"KMP_IGNORE_MPPBEG")) != NULL) {
7541 if (__kmp_str_match_false(env))
7548 int __kmp_ignore_mppend(
void) {
7551 if ((env = getenv(
"KMP_IGNORE_MPPEND")) != NULL) {
7552 if (__kmp_str_match_false(env))
7559 void __kmp_internal_begin(
void) {
7565 gtid = __kmp_entry_gtid();
7566 root = __kmp_threads[gtid]->th.th_root;
7567 KMP_ASSERT(KMP_UBER_GTID(gtid));
7569 if (root->r.r_begin)
7571 __kmp_acquire_lock(&root->r.r_begin_lock, gtid);
7572 if (root->r.r_begin) {
7573 __kmp_release_lock(&root->r.r_begin_lock, gtid);
7577 root->r.r_begin = TRUE;
7579 __kmp_release_lock(&root->r.r_begin_lock, gtid);
7584 void __kmp_user_set_library(
enum library_type arg) {
7591 gtid = __kmp_entry_gtid();
7592 thread = __kmp_threads[gtid];
7594 root = thread->th.th_root;
7596 KA_TRACE(20, (
"__kmp_user_set_library: enter T#%d, arg: %d, %d\n", gtid, arg,
7598 if (root->r.r_in_parallel) {
7600 KMP_WARNING(SetLibraryIncorrectCall);
7605 case library_serial:
7606 thread->th.th_set_nproc = 0;
7607 set__nproc(thread, 1);
7609 case library_turnaround:
7610 thread->th.th_set_nproc = 0;
7611 set__nproc(thread, __kmp_dflt_team_nth ? __kmp_dflt_team_nth
7612 : __kmp_dflt_team_nth_ub);
7614 case library_throughput:
7615 thread->th.th_set_nproc = 0;
7616 set__nproc(thread, __kmp_dflt_team_nth ? __kmp_dflt_team_nth
7617 : __kmp_dflt_team_nth_ub);
7620 KMP_FATAL(UnknownLibraryType, arg);
7623 __kmp_aux_set_library(arg);
7626 void __kmp_aux_set_stacksize(
size_t arg) {
7627 if (!__kmp_init_serial)
7628 __kmp_serial_initialize();
7631 if (arg & (0x1000 - 1)) {
7632 arg &= ~(0x1000 - 1);
7637 __kmp_acquire_bootstrap_lock(&__kmp_initz_lock);
7640 if (!TCR_4(__kmp_init_parallel)) {
7643 if (value < __kmp_sys_min_stksize)
7644 value = __kmp_sys_min_stksize;
7645 else if (value > KMP_MAX_STKSIZE)
7646 value = KMP_MAX_STKSIZE;
7648 __kmp_stksize = value;
7650 __kmp_env_stksize = TRUE;
7653 __kmp_release_bootstrap_lock(&__kmp_initz_lock);
7658 void __kmp_aux_set_library(
enum library_type arg) {
7659 __kmp_library = arg;
7661 switch (__kmp_library) {
7662 case library_serial: {
7663 KMP_INFORM(LibraryIsSerial);
7665 case library_turnaround:
7666 if (__kmp_use_yield == 1 && !__kmp_use_yield_exp_set)
7667 __kmp_use_yield = 2;
7669 case library_throughput:
7670 if (__kmp_dflt_blocktime == KMP_MAX_BLOCKTIME)
7671 __kmp_dflt_blocktime = 200;
7674 KMP_FATAL(UnknownLibraryType, arg);
7680 static kmp_team_t *__kmp_aux_get_team_info(
int &teams_serialized) {
7681 kmp_info_t *thr = __kmp_entry_thread();
7682 teams_serialized = 0;
7683 if (thr->th.th_teams_microtask) {
7684 kmp_team_t *team = thr->th.th_team;
7685 int tlevel = thr->th.th_teams_level;
7686 int ii = team->t.t_level;
7687 teams_serialized = team->t.t_serialized;
7688 int level = tlevel + 1;
7689 KMP_DEBUG_ASSERT(ii >= tlevel);
7690 while (ii > level) {
7691 for (teams_serialized = team->t.t_serialized;
7692 (teams_serialized > 0) && (ii > level); teams_serialized--, ii--) {
7694 if (team->t.t_serialized && (!teams_serialized)) {
7695 team = team->t.t_parent;
7699 team = team->t.t_parent;
7708 int __kmp_aux_get_team_num() {
7710 kmp_team_t *team = __kmp_aux_get_team_info(serialized);
7712 if (serialized > 1) {
7715 return team->t.t_master_tid;
7721 int __kmp_aux_get_num_teams() {
7723 kmp_team_t *team = __kmp_aux_get_team_info(serialized);
7725 if (serialized > 1) {
7728 return team->t.t_parent->t.t_nproc;
7767 typedef struct kmp_affinity_format_field_t {
7769 const char *long_name;
7772 } kmp_affinity_format_field_t;
7774 static const kmp_affinity_format_field_t __kmp_affinity_format_table[] = {
7775 #if KMP_AFFINITY_SUPPORTED
7776 {
'A',
"thread_affinity",
's'},
7778 {
't',
"team_num",
'd'},
7779 {
'T',
"num_teams",
'd'},
7780 {
'L',
"nesting_level",
'd'},
7781 {
'n',
"thread_num",
'd'},
7782 {
'N',
"num_threads",
'd'},
7783 {
'a',
"ancestor_tnum",
'd'},
7785 {
'P',
"process_id",
'd'},
7786 {
'i',
"native_thread_id",
'd'}};
7789 static int __kmp_aux_capture_affinity_field(
int gtid,
const kmp_info_t *th,
7791 kmp_str_buf_t *field_buffer) {
7792 int rc, format_index, field_value;
7793 const char *width_left, *width_right;
7794 bool pad_zeros, right_justify, parse_long_name, found_valid_name;
7795 static const int FORMAT_SIZE = 20;
7796 char format[FORMAT_SIZE] = {0};
7797 char absolute_short_name = 0;
7799 KMP_DEBUG_ASSERT(gtid >= 0);
7800 KMP_DEBUG_ASSERT(th);
7801 KMP_DEBUG_ASSERT(**ptr ==
'%');
7802 KMP_DEBUG_ASSERT(field_buffer);
7804 __kmp_str_buf_clear(field_buffer);
7811 __kmp_str_buf_cat(field_buffer,
"%", 1);
7822 right_justify =
false;
7824 right_justify =
true;
7828 width_left = width_right = NULL;
7829 if (**ptr >=
'0' && **ptr <=
'9') {
7837 format[format_index++] =
'%';
7839 format[format_index++] =
'-';
7841 format[format_index++] =
'0';
7842 if (width_left && width_right) {
7846 while (i < 8 && width_left < width_right) {
7847 format[format_index++] = *width_left;
7855 found_valid_name =
false;
7856 parse_long_name = (**ptr ==
'{');
7857 if (parse_long_name)
7859 for (
size_t i = 0; i <
sizeof(__kmp_affinity_format_table) /
7860 sizeof(__kmp_affinity_format_table[0]);
7862 char short_name = __kmp_affinity_format_table[i].short_name;
7863 const char *long_name = __kmp_affinity_format_table[i].long_name;
7864 char field_format = __kmp_affinity_format_table[i].field_format;
7865 if (parse_long_name) {
7866 int length = KMP_STRLEN(long_name);
7867 if (strncmp(*ptr, long_name, length) == 0) {
7868 found_valid_name =
true;
7871 }
else if (**ptr == short_name) {
7872 found_valid_name =
true;
7875 if (found_valid_name) {
7876 format[format_index++] = field_format;
7877 format[format_index++] =
'\0';
7878 absolute_short_name = short_name;
7882 if (parse_long_name) {
7884 absolute_short_name = 0;
7892 switch (absolute_short_name) {
7894 rc = __kmp_str_buf_print(field_buffer, format, __kmp_aux_get_team_num());
7897 rc = __kmp_str_buf_print(field_buffer, format, __kmp_aux_get_num_teams());
7900 rc = __kmp_str_buf_print(field_buffer, format, th->th.th_team->t.t_level);
7903 rc = __kmp_str_buf_print(field_buffer, format, __kmp_tid_from_gtid(gtid));
7906 static const int BUFFER_SIZE = 256;
7907 char buf[BUFFER_SIZE];
7908 __kmp_expand_host_name(buf, BUFFER_SIZE);
7909 rc = __kmp_str_buf_print(field_buffer, format, buf);
7912 rc = __kmp_str_buf_print(field_buffer, format, getpid());
7915 rc = __kmp_str_buf_print(field_buffer, format, __kmp_gettid());
7918 rc = __kmp_str_buf_print(field_buffer, format, th->th.th_team->t.t_nproc);
7922 __kmp_get_ancestor_thread_num(gtid, th->th.th_team->t.t_level - 1);
7923 rc = __kmp_str_buf_print(field_buffer, format, field_value);
7925 #if KMP_AFFINITY_SUPPORTED
7928 __kmp_str_buf_init(&buf);
7929 __kmp_affinity_str_buf_mask(&buf, th->th.th_affin_mask);
7930 rc = __kmp_str_buf_print(field_buffer, format, buf.str);
7931 __kmp_str_buf_free(&buf);
7937 rc = __kmp_str_buf_print(field_buffer,
"%s",
"undefined");
7939 if (parse_long_name) {
7948 KMP_ASSERT(format_index <= FORMAT_SIZE);
7958 size_t __kmp_aux_capture_affinity(
int gtid,
const char *format,
7959 kmp_str_buf_t *buffer) {
7960 const char *parse_ptr;
7962 const kmp_info_t *th;
7963 kmp_str_buf_t field;
7965 KMP_DEBUG_ASSERT(buffer);
7966 KMP_DEBUG_ASSERT(gtid >= 0);
7968 __kmp_str_buf_init(&field);
7969 __kmp_str_buf_clear(buffer);
7971 th = __kmp_threads[gtid];
7977 if (parse_ptr == NULL || *parse_ptr ==
'\0') {
7978 parse_ptr = __kmp_affinity_format;
7980 KMP_DEBUG_ASSERT(parse_ptr);
7982 while (*parse_ptr !=
'\0') {
7984 if (*parse_ptr ==
'%') {
7986 int rc = __kmp_aux_capture_affinity_field(gtid, th, &parse_ptr, &field);
7987 __kmp_str_buf_catbuf(buffer, &field);
7991 __kmp_str_buf_cat(buffer, parse_ptr, 1);
7996 __kmp_str_buf_free(&field);
8001 void __kmp_aux_display_affinity(
int gtid,
const char *format) {
8003 __kmp_str_buf_init(&buf);
8004 __kmp_aux_capture_affinity(gtid, format, &buf);
8005 __kmp_fprintf(kmp_out,
"%s" KMP_END_OF_LINE, buf.str);
8006 __kmp_str_buf_free(&buf);
8011 void __kmp_aux_set_blocktime(
int arg, kmp_info_t *thread,
int tid) {
8012 int blocktime = arg;
8018 __kmp_save_internal_controls(thread);
8021 if (blocktime < KMP_MIN_BLOCKTIME)
8022 blocktime = KMP_MIN_BLOCKTIME;
8023 else if (blocktime > KMP_MAX_BLOCKTIME)
8024 blocktime = KMP_MAX_BLOCKTIME;
8026 set__blocktime_team(thread->th.th_team, tid, blocktime);
8027 set__blocktime_team(thread->th.th_serial_team, 0, blocktime);
8031 bt_intervals = KMP_INTERVALS_FROM_BLOCKTIME(blocktime, __kmp_monitor_wakeups);
8033 set__bt_intervals_team(thread->th.th_team, tid, bt_intervals);
8034 set__bt_intervals_team(thread->th.th_serial_team, 0, bt_intervals);
8040 set__bt_set_team(thread->th.th_team, tid, bt_set);
8041 set__bt_set_team(thread->th.th_serial_team, 0, bt_set);
8043 KF_TRACE(10, (
"kmp_set_blocktime: T#%d(%d:%d), blocktime=%d, "
8044 "bt_intervals=%d, monitor_updates=%d\n",
8045 __kmp_gtid_from_tid(tid, thread->th.th_team),
8046 thread->th.th_team->t.t_id, tid, blocktime, bt_intervals,
8047 __kmp_monitor_wakeups));
8049 KF_TRACE(10, (
"kmp_set_blocktime: T#%d(%d:%d), blocktime=%d\n",
8050 __kmp_gtid_from_tid(tid, thread->th.th_team),
8051 thread->th.th_team->t.t_id, tid, blocktime));
8055 void __kmp_aux_set_defaults(
char const *str,
int len) {
8056 if (!__kmp_init_serial) {
8057 __kmp_serial_initialize();
8059 __kmp_env_initialize(str);
8061 if (__kmp_settings || __kmp_display_env || __kmp_display_env_verbose) {
8069 PACKED_REDUCTION_METHOD_T
8070 __kmp_determine_reduction_method(
8071 ident_t *loc, kmp_int32 global_tid, kmp_int32 num_vars,
size_t reduce_size,
8072 void *reduce_data,
void (*reduce_func)(
void *lhs_data,
void *rhs_data),
8073 kmp_critical_name *lck) {
8084 PACKED_REDUCTION_METHOD_T retval;
8088 KMP_DEBUG_ASSERT(loc);
8089 KMP_DEBUG_ASSERT(lck);
8091 #define FAST_REDUCTION_ATOMIC_METHOD_GENERATED \
8092 ((loc->flags & (KMP_IDENT_ATOMIC_REDUCE)) == (KMP_IDENT_ATOMIC_REDUCE))
8093 #define FAST_REDUCTION_TREE_METHOD_GENERATED ((reduce_data) && (reduce_func))
8095 retval = critical_reduce_block;
8098 team_size = __kmp_get_team_num_threads(global_tid);
8099 if (team_size == 1) {
8101 retval = empty_reduce_block;
8105 int atomic_available = FAST_REDUCTION_ATOMIC_METHOD_GENERATED;
8107 #if KMP_ARCH_X86_64 || KMP_ARCH_PPC64 || KMP_ARCH_AARCH64 || \
8108 KMP_ARCH_MIPS64 || KMP_ARCH_RISCV64
8110 #if KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || \
8111 KMP_OS_OPENBSD || KMP_OS_WINDOWS || KMP_OS_DARWIN || KMP_OS_HURD
8113 int teamsize_cutoff = 4;
8115 #if KMP_MIC_SUPPORTED
8116 if (__kmp_mic_type != non_mic) {
8117 teamsize_cutoff = 8;
8120 int tree_available = FAST_REDUCTION_TREE_METHOD_GENERATED;
8121 if (tree_available) {
8122 if (team_size <= teamsize_cutoff) {
8123 if (atomic_available) {
8124 retval = atomic_reduce_block;
8127 retval = TREE_REDUCE_BLOCK_WITH_REDUCTION_BARRIER;
8129 }
else if (atomic_available) {
8130 retval = atomic_reduce_block;
8133 #error "Unknown or unsupported OS"
8134 #endif // KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD ||
8137 #elif KMP_ARCH_X86 || KMP_ARCH_ARM || KMP_ARCH_AARCH || KMP_ARCH_MIPS
8139 #if KMP_OS_LINUX || KMP_OS_FREEBSD || KMP_OS_WINDOWS || KMP_OS_HURD
8143 if (atomic_available) {
8144 if (num_vars <= 2) {
8145 retval = atomic_reduce_block;
8151 int tree_available = FAST_REDUCTION_TREE_METHOD_GENERATED;
8152 if (atomic_available && (num_vars <= 3)) {
8153 retval = atomic_reduce_block;
8154 }
else if (tree_available) {
8155 if ((reduce_size > (9 *
sizeof(kmp_real64))) &&
8156 (reduce_size < (2000 *
sizeof(kmp_real64)))) {
8157 retval = TREE_REDUCE_BLOCK_WITH_PLAIN_BARRIER;
8162 #error "Unknown or unsupported OS"
8166 #error "Unknown or unsupported architecture"
8174 if (__kmp_force_reduction_method != reduction_method_not_defined &&
8177 PACKED_REDUCTION_METHOD_T forced_retval = critical_reduce_block;
8179 int atomic_available, tree_available;
8181 switch ((forced_retval = __kmp_force_reduction_method)) {
8182 case critical_reduce_block:
8186 case atomic_reduce_block:
8187 atomic_available = FAST_REDUCTION_ATOMIC_METHOD_GENERATED;
8188 if (!atomic_available) {
8189 KMP_WARNING(RedMethodNotSupported,
"atomic");
8190 forced_retval = critical_reduce_block;
8194 case tree_reduce_block:
8195 tree_available = FAST_REDUCTION_TREE_METHOD_GENERATED;
8196 if (!tree_available) {
8197 KMP_WARNING(RedMethodNotSupported,
"tree");
8198 forced_retval = critical_reduce_block;
8200 #if KMP_FAST_REDUCTION_BARRIER
8201 forced_retval = TREE_REDUCE_BLOCK_WITH_REDUCTION_BARRIER;
8210 retval = forced_retval;
8213 KA_TRACE(10, (
"reduction method selected=%08x\n", retval));
8215 #undef FAST_REDUCTION_TREE_METHOD_GENERATED
8216 #undef FAST_REDUCTION_ATOMIC_METHOD_GENERATED
8222 kmp_int32 __kmp_get_reduce_method(
void) {
8223 return ((__kmp_entry_thread()->th.th_local.packed_reduction_method) >> 8);
8228 void __kmp_soft_pause() { __kmp_pause_status = kmp_soft_paused; }
8232 void __kmp_hard_pause() {
8233 __kmp_pause_status = kmp_hard_paused;
8234 __kmp_internal_end_thread(-1);
8238 void __kmp_resume_if_soft_paused() {
8239 if (__kmp_pause_status == kmp_soft_paused) {
8240 __kmp_pause_status = kmp_not_paused;
8242 for (
int gtid = 1; gtid < __kmp_threads_capacity; ++gtid) {
8243 kmp_info_t *thread = __kmp_threads[gtid];
8245 kmp_flag_64 fl(&thread->th.th_bar[bs_forkjoin_barrier].bb.b_go, thread);
8246 if (fl.is_sleeping())
8248 else if (__kmp_try_suspend_mx(thread)) {
8249 __kmp_unlock_suspend_mx(thread);
8252 if (fl.is_sleeping()) {
8255 }
else if (__kmp_try_suspend_mx(thread)) {
8256 __kmp_unlock_suspend_mx(thread);
8268 int __kmp_pause_resource(kmp_pause_status_t level) {
8269 if (level == kmp_not_paused) {
8270 if (__kmp_pause_status == kmp_not_paused) {
8274 KMP_DEBUG_ASSERT(__kmp_pause_status == kmp_soft_paused ||
8275 __kmp_pause_status == kmp_hard_paused);
8276 __kmp_pause_status = kmp_not_paused;
8279 }
else if (level == kmp_soft_paused) {
8280 if (__kmp_pause_status != kmp_not_paused) {
8287 }
else if (level == kmp_hard_paused) {
8288 if (__kmp_pause_status != kmp_not_paused) {