14 #include "kmp_affinity.h"
19 #include "kmp_stats.h"
21 #include "kmp_wait_release.h"
22 #include "kmp_wrapper_getpid.h"
24 #if !KMP_OS_DRAGONFLY && !KMP_OS_FREEBSD && !KMP_OS_NETBSD && !KMP_OS_OPENBSD
28 #include <sys/resource.h>
29 #include <sys/syscall.h>
31 #include <sys/times.h>
34 #if KMP_OS_LINUX && !KMP_OS_CNK
35 #include <sys/sysinfo.h>
50 #include <mach/mach.h>
51 #include <sys/sysctl.h>
52 #elif KMP_OS_DRAGONFLY || KMP_OS_FREEBSD
53 #include <sys/types.h>
54 #include <sys/sysctl.h>
56 #include <pthread_np.h>
57 #elif KMP_OS_NETBSD || KMP_OS_OPENBSD
58 #include <sys/types.h>
59 #include <sys/sysctl.h>
66 #include "tsan_annotations.h"
68 struct kmp_sys_timer {
69 struct timespec start;
73 #define TS2NS(timespec) (((timespec).tv_sec * 1e9) + (timespec).tv_nsec)
75 static struct kmp_sys_timer __kmp_sys_timer_data;
77 #if KMP_HANDLE_SIGNALS
78 typedef void (*sig_func_t)(int);
79 STATIC_EFI2_WORKAROUND
struct sigaction __kmp_sighldrs[NSIG];
80 static sigset_t __kmp_sigset;
83 static int __kmp_init_runtime = FALSE;
85 static int __kmp_fork_count = 0;
87 static pthread_condattr_t __kmp_suspend_cond_attr;
88 static pthread_mutexattr_t __kmp_suspend_mutex_attr;
90 static kmp_cond_align_t __kmp_wait_cv;
91 static kmp_mutex_align_t __kmp_wait_mx;
93 kmp_uint64 __kmp_ticks_per_msec = 1000000;
96 static void __kmp_print_cond(
char *buffer, kmp_cond_align_t *cond) {
97 KMP_SNPRINTF(buffer, 128,
"(cond (lock (%ld, %d)), (descr (%p)))",
98 cond->c_cond.__c_lock.__status, cond->c_cond.__c_lock.__spinlock,
99 cond->c_cond.__c_waiting);
103 #if ((KMP_OS_LINUX || KMP_OS_FREEBSD) && KMP_AFFINITY_SUPPORTED)
107 void __kmp_affinity_bind_thread(
int which) {
108 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
109 "Illegal set affinity operation when not capable");
111 kmp_affin_mask_t *mask;
112 KMP_CPU_ALLOC_ON_STACK(mask);
114 KMP_CPU_SET(which, mask);
115 __kmp_set_system_affinity(mask, TRUE);
116 KMP_CPU_FREE_FROM_STACK(mask);
122 void __kmp_affinity_determine_capable(
const char *env_var) {
126 #define KMP_CPU_SET_SIZE_LIMIT (1024 * 1024)
128 #define KMP_CPU_SET_SIZE_LIMIT (sizeof(cpuset_t))
139 buf = (
unsigned char *)KMP_INTERNAL_MALLOC(KMP_CPU_SET_SIZE_LIMIT);
140 gCode = syscall(__NR_sched_getaffinity, 0, KMP_CPU_SET_SIZE_LIMIT, buf);
141 KA_TRACE(30, (
"__kmp_affinity_determine_capable: "
142 "initial getaffinity call returned %d errno = %d\n",
148 if (__kmp_affinity_verbose ||
149 (__kmp_affinity_warnings && (__kmp_affinity_type != affinity_none) &&
150 (__kmp_affinity_type != affinity_default) &&
151 (__kmp_affinity_type != affinity_disabled))) {
153 kmp_msg_t err_code = KMP_ERR(error);
154 __kmp_msg(kmp_ms_warning, KMP_MSG(GetAffSysCallNotSupported, env_var),
155 err_code, __kmp_msg_null);
156 if (__kmp_generate_warnings == kmp_warnings_off) {
157 __kmp_str_free(&err_code.str);
160 KMP_AFFINITY_DISABLE();
161 KMP_INTERNAL_FREE(buf);
169 sCode = syscall(__NR_sched_setaffinity, 0, gCode, NULL);
170 KA_TRACE(30, (
"__kmp_affinity_determine_capable: "
171 "setaffinity for mask size %d returned %d errno = %d\n",
172 gCode, sCode, errno));
174 if (errno == ENOSYS) {
175 if (__kmp_affinity_verbose ||
176 (__kmp_affinity_warnings &&
177 (__kmp_affinity_type != affinity_none) &&
178 (__kmp_affinity_type != affinity_default) &&
179 (__kmp_affinity_type != affinity_disabled))) {
181 kmp_msg_t err_code = KMP_ERR(error);
182 __kmp_msg(kmp_ms_warning, KMP_MSG(SetAffSysCallNotSupported, env_var),
183 err_code, __kmp_msg_null);
184 if (__kmp_generate_warnings == kmp_warnings_off) {
185 __kmp_str_free(&err_code.str);
188 KMP_AFFINITY_DISABLE();
189 KMP_INTERNAL_FREE(buf);
191 if (errno == EFAULT) {
192 KMP_AFFINITY_ENABLE(gCode);
193 KA_TRACE(10, (
"__kmp_affinity_determine_capable: "
194 "affinity supported (mask size %d)\n",
195 (
int)__kmp_affin_mask_size));
196 KMP_INTERNAL_FREE(buf);
204 KA_TRACE(30, (
"__kmp_affinity_determine_capable: "
205 "searching for proper set size\n"));
207 for (size = 1; size <= KMP_CPU_SET_SIZE_LIMIT; size *= 2) {
208 gCode = syscall(__NR_sched_getaffinity, 0, size, buf);
209 KA_TRACE(30, (
"__kmp_affinity_determine_capable: "
210 "getaffinity for mask size %d returned %d errno = %d\n",
211 size, gCode, errno));
214 if (errno == ENOSYS) {
216 KA_TRACE(30, (
"__kmp_affinity_determine_capable: "
217 "inconsistent OS call behavior: errno == ENOSYS for mask "
220 if (__kmp_affinity_verbose ||
221 (__kmp_affinity_warnings &&
222 (__kmp_affinity_type != affinity_none) &&
223 (__kmp_affinity_type != affinity_default) &&
224 (__kmp_affinity_type != affinity_disabled))) {
226 kmp_msg_t err_code = KMP_ERR(error);
227 __kmp_msg(kmp_ms_warning, KMP_MSG(GetAffSysCallNotSupported, env_var),
228 err_code, __kmp_msg_null);
229 if (__kmp_generate_warnings == kmp_warnings_off) {
230 __kmp_str_free(&err_code.str);
233 KMP_AFFINITY_DISABLE();
234 KMP_INTERNAL_FREE(buf);
240 sCode = syscall(__NR_sched_setaffinity, 0, gCode, NULL);
241 KA_TRACE(30, (
"__kmp_affinity_determine_capable: "
242 "setaffinity for mask size %d returned %d errno = %d\n",
243 gCode, sCode, errno));
245 if (errno == ENOSYS) {
247 KA_TRACE(30, (
"__kmp_affinity_determine_capable: "
248 "inconsistent OS call behavior: errno == ENOSYS for mask "
251 if (__kmp_affinity_verbose ||
252 (__kmp_affinity_warnings &&
253 (__kmp_affinity_type != affinity_none) &&
254 (__kmp_affinity_type != affinity_default) &&
255 (__kmp_affinity_type != affinity_disabled))) {
257 kmp_msg_t err_code = KMP_ERR(error);
258 __kmp_msg(kmp_ms_warning, KMP_MSG(SetAffSysCallNotSupported, env_var),
259 err_code, __kmp_msg_null);
260 if (__kmp_generate_warnings == kmp_warnings_off) {
261 __kmp_str_free(&err_code.str);
264 KMP_AFFINITY_DISABLE();
265 KMP_INTERNAL_FREE(buf);
268 if (errno == EFAULT) {
269 KMP_AFFINITY_ENABLE(gCode);
270 KA_TRACE(10, (
"__kmp_affinity_determine_capable: "
271 "affinity supported (mask size %d)\n",
272 (
int)__kmp_affin_mask_size));
273 KMP_INTERNAL_FREE(buf);
281 buf = (
unsigned char *)KMP_INTERNAL_MALLOC(KMP_CPU_SET_SIZE_LIMIT);
282 gCode = pthread_getaffinity_np(pthread_self(), KMP_CPU_SET_SIZE_LIMIT,
reinterpret_cast<cpuset_t *
>(buf));
283 KA_TRACE(30, (
"__kmp_affinity_determine_capable: "
284 "initial getaffinity call returned %d errno = %d\n",
287 KMP_AFFINITY_ENABLE(KMP_CPU_SET_SIZE_LIMIT);
288 KA_TRACE(10, (
"__kmp_affinity_determine_capable: "
289 "affinity supported (mask size %d)\n"<
290 (
int)__kmp_affin_mask_size));
291 KMP_INTERNAL_FREE(buf);
297 KMP_INTERNAL_FREE(buf);
302 KMP_AFFINITY_DISABLE();
303 KA_TRACE(10, (
"__kmp_affinity_determine_capable: "
304 "cannot determine mask size - affinity not supported\n"));
305 if (__kmp_affinity_verbose ||
306 (__kmp_affinity_warnings && (__kmp_affinity_type != affinity_none) &&
307 (__kmp_affinity_type != affinity_default) &&
308 (__kmp_affinity_type != affinity_disabled))) {
309 KMP_WARNING(AffCantGetMaskSize, env_var);
313 #endif // KMP_OS_LINUX && KMP_AFFINITY_SUPPORTED
317 int __kmp_futex_determine_capable() {
319 int rc = syscall(__NR_futex, &loc, FUTEX_WAKE, 1, NULL, NULL, 0);
320 int retval = (rc == 0) || (errno != ENOSYS);
323 (
"__kmp_futex_determine_capable: rc = %d errno = %d\n", rc, errno));
324 KA_TRACE(10, (
"__kmp_futex_determine_capable: futex syscall%s supported\n",
325 retval ?
"" :
" not"));
330 #endif // KMP_USE_FUTEX
332 #if (KMP_ARCH_X86 || KMP_ARCH_X86_64) && (!KMP_ASM_INTRINS)
336 kmp_int8 __kmp_test_then_or8(
volatile kmp_int8 *p, kmp_int8 d) {
337 kmp_int8 old_value, new_value;
339 old_value = TCR_1(*p);
340 new_value = old_value | d;
342 while (!KMP_COMPARE_AND_STORE_REL8(p, old_value, new_value)) {
344 old_value = TCR_1(*p);
345 new_value = old_value | d;
350 kmp_int8 __kmp_test_then_and8(
volatile kmp_int8 *p, kmp_int8 d) {
351 kmp_int8 old_value, new_value;
353 old_value = TCR_1(*p);
354 new_value = old_value & d;
356 while (!KMP_COMPARE_AND_STORE_REL8(p, old_value, new_value)) {
358 old_value = TCR_1(*p);
359 new_value = old_value & d;
364 kmp_uint32 __kmp_test_then_or32(
volatile kmp_uint32 *p, kmp_uint32 d) {
365 kmp_uint32 old_value, new_value;
367 old_value = TCR_4(*p);
368 new_value = old_value | d;
370 while (!KMP_COMPARE_AND_STORE_REL32(p, old_value, new_value)) {
372 old_value = TCR_4(*p);
373 new_value = old_value | d;
378 kmp_uint32 __kmp_test_then_and32(
volatile kmp_uint32 *p, kmp_uint32 d) {
379 kmp_uint32 old_value, new_value;
381 old_value = TCR_4(*p);
382 new_value = old_value & d;
384 while (!KMP_COMPARE_AND_STORE_REL32(p, old_value, new_value)) {
386 old_value = TCR_4(*p);
387 new_value = old_value & d;
393 kmp_int8 __kmp_test_then_add8(
volatile kmp_int8 *p, kmp_int8 d) {
394 kmp_int8 old_value, new_value;
396 old_value = TCR_1(*p);
397 new_value = old_value + d;
399 while (!KMP_COMPARE_AND_STORE_REL8(p, old_value, new_value)) {
401 old_value = TCR_1(*p);
402 new_value = old_value + d;
407 kmp_int64 __kmp_test_then_add64(
volatile kmp_int64 *p, kmp_int64 d) {
408 kmp_int64 old_value, new_value;
410 old_value = TCR_8(*p);
411 new_value = old_value + d;
413 while (!KMP_COMPARE_AND_STORE_REL64(p, old_value, new_value)) {
415 old_value = TCR_8(*p);
416 new_value = old_value + d;
422 kmp_uint64 __kmp_test_then_or64(
volatile kmp_uint64 *p, kmp_uint64 d) {
423 kmp_uint64 old_value, new_value;
425 old_value = TCR_8(*p);
426 new_value = old_value | d;
427 while (!KMP_COMPARE_AND_STORE_REL64(p, old_value, new_value)) {
429 old_value = TCR_8(*p);
430 new_value = old_value | d;
435 kmp_uint64 __kmp_test_then_and64(
volatile kmp_uint64 *p, kmp_uint64 d) {
436 kmp_uint64 old_value, new_value;
438 old_value = TCR_8(*p);
439 new_value = old_value & d;
440 while (!KMP_COMPARE_AND_STORE_REL64(p, old_value, new_value)) {
442 old_value = TCR_8(*p);
443 new_value = old_value & d;
450 void __kmp_terminate_thread(
int gtid) {
452 kmp_info_t *th = __kmp_threads[gtid];
457 #ifdef KMP_CANCEL_THREADS
458 KA_TRACE(10, (
"__kmp_terminate_thread: kill (%d)\n", gtid));
459 status = pthread_cancel(th->th.th_info.ds.ds_thread);
460 if (status != 0 && status != ESRCH) {
461 __kmp_fatal(KMP_MSG(CantTerminateWorkerThread), KMP_ERR(status),
472 static kmp_int32 __kmp_set_stack_info(
int gtid, kmp_info_t *th) {
474 #if KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || \
484 if (!KMP_UBER_GTID(gtid)) {
487 status = pthread_attr_init(&attr);
488 KMP_CHECK_SYSFAIL(
"pthread_attr_init", status);
489 #if KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD
490 status = pthread_attr_get_np(pthread_self(), &attr);
491 KMP_CHECK_SYSFAIL(
"pthread_attr_get_np", status);
493 status = pthread_getattr_np(pthread_self(), &attr);
494 KMP_CHECK_SYSFAIL(
"pthread_getattr_np", status);
496 status = pthread_attr_getstack(&attr, &addr, &size);
497 KMP_CHECK_SYSFAIL(
"pthread_attr_getstack", status);
499 (
"__kmp_set_stack_info: T#%d pthread_attr_getstack returned size:"
500 " %lu, low addr: %p\n",
502 status = pthread_attr_destroy(&attr);
503 KMP_CHECK_SYSFAIL(
"pthread_attr_destroy", status);
506 if (size != 0 && addr != 0) {
508 TCW_PTR(th->th.th_info.ds.ds_stackbase, (((
char *)addr) + size));
509 TCW_PTR(th->th.th_info.ds.ds_stacksize, size);
510 TCW_4(th->th.th_info.ds.ds_stackgrow, FALSE);
516 TCW_PTR(th->th.th_info.ds.ds_stacksize, 0);
517 TCW_PTR(th->th.th_info.ds.ds_stackbase, &stack_data);
518 TCW_4(th->th.th_info.ds.ds_stackgrow, TRUE);
522 static void *__kmp_launch_worker(
void *thr) {
523 int status, old_type, old_state;
524 #ifdef KMP_BLOCK_SIGNALS
525 sigset_t new_set, old_set;
528 #if KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || \
529 KMP_OS_OPENBSD || KMP_OS_HURD
530 void *
volatile padding = 0;
534 gtid = ((kmp_info_t *)thr)->th.th_info.ds.ds_gtid;
535 __kmp_gtid_set_specific(gtid);
536 #ifdef KMP_TDATA_GTID
539 #if KMP_STATS_ENABLED
541 __kmp_stats_thread_ptr = ((kmp_info_t *)thr)->th.th_stats;
542 __kmp_stats_thread_ptr->startLife();
543 KMP_SET_THREAD_STATE(IDLE);
548 __kmp_itt_thread_name(gtid);
551 #if KMP_AFFINITY_SUPPORTED
552 __kmp_affinity_set_init_mask(gtid, FALSE);
555 #ifdef KMP_CANCEL_THREADS
556 status = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &old_type);
557 KMP_CHECK_SYSFAIL(
"pthread_setcanceltype", status);
559 status = pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &old_state);
560 KMP_CHECK_SYSFAIL(
"pthread_setcancelstate", status);
563 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
565 __kmp_clear_x87_fpu_status_word();
566 __kmp_load_x87_fpu_control_word(&__kmp_init_x87_fpu_control_word);
567 __kmp_load_mxcsr(&__kmp_init_mxcsr);
570 #ifdef KMP_BLOCK_SIGNALS
571 status = sigfillset(&new_set);
572 KMP_CHECK_SYSFAIL_ERRNO(
"sigfillset", status);
573 status = pthread_sigmask(SIG_BLOCK, &new_set, &old_set);
574 KMP_CHECK_SYSFAIL(
"pthread_sigmask", status);
577 #if KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || \
579 if (__kmp_stkoffset > 0 && gtid > 0) {
580 padding = KMP_ALLOCA(gtid * __kmp_stkoffset);
585 __kmp_set_stack_info(gtid, (kmp_info_t *)thr);
587 __kmp_check_stack_overlap((kmp_info_t *)thr);
589 exit_val = __kmp_launch_thread((kmp_info_t *)thr);
591 #ifdef KMP_BLOCK_SIGNALS
592 status = pthread_sigmask(SIG_SETMASK, &old_set, NULL);
593 KMP_CHECK_SYSFAIL(
"pthread_sigmask", status);
602 static void *__kmp_launch_monitor(
void *thr) {
603 int status, old_type, old_state;
604 #ifdef KMP_BLOCK_SIGNALS
607 struct timespec interval;
611 KA_TRACE(10, (
"__kmp_launch_monitor: #1 launched\n"));
614 __kmp_gtid_set_specific(KMP_GTID_MONITOR);
615 #ifdef KMP_TDATA_GTID
616 __kmp_gtid = KMP_GTID_MONITOR;
623 __kmp_itt_thread_ignore();
626 __kmp_set_stack_info(((kmp_info_t *)thr)->th.th_info.ds.ds_gtid,
629 __kmp_check_stack_overlap((kmp_info_t *)thr);
631 #ifdef KMP_CANCEL_THREADS
632 status = pthread_setcanceltype(PTHREAD_CANCEL_ASYNCHRONOUS, &old_type);
633 KMP_CHECK_SYSFAIL(
"pthread_setcanceltype", status);
635 status = pthread_setcancelstate(PTHREAD_CANCEL_ENABLE, &old_state);
636 KMP_CHECK_SYSFAIL(
"pthread_setcancelstate", status);
639 #if KMP_REAL_TIME_FIX
644 int sched = sched_getscheduler(0);
645 if (sched == SCHED_FIFO || sched == SCHED_RR) {
648 struct sched_param param;
649 int max_priority = sched_get_priority_max(sched);
651 KMP_WARNING(RealTimeSchedNotSupported);
652 sched_getparam(0, ¶m);
653 if (param.sched_priority < max_priority) {
654 param.sched_priority += 1;
655 rc = sched_setscheduler(0, sched, ¶m);
658 kmp_msg_t err_code = KMP_ERR(error);
659 __kmp_msg(kmp_ms_warning, KMP_MSG(CantChangeMonitorPriority),
660 err_code, KMP_MSG(MonitorWillStarve), __kmp_msg_null);
661 if (__kmp_generate_warnings == kmp_warnings_off) {
662 __kmp_str_free(&err_code.str);
669 __kmp_msg(kmp_ms_warning, KMP_MSG(RunningAtMaxPriority),
670 KMP_MSG(MonitorWillStarve), KMP_HNT(RunningAtMaxPriority),
675 TCW_4(__kmp_global.g.g_time.dt.t_value, 0);
677 #endif // KMP_REAL_TIME_FIX
681 if (__kmp_monitor_wakeups == 1) {
683 interval.tv_nsec = 0;
686 interval.tv_nsec = (KMP_NSEC_PER_SEC / __kmp_monitor_wakeups);
689 KA_TRACE(10, (
"__kmp_launch_monitor: #2 monitor\n"));
691 while (!TCR_4(__kmp_global.g.g_done)) {
697 KA_TRACE(15, (
"__kmp_launch_monitor: update\n"));
699 status = gettimeofday(&tval, NULL);
700 KMP_CHECK_SYSFAIL_ERRNO(
"gettimeofday", status);
701 TIMEVAL_TO_TIMESPEC(&tval, &now);
703 now.tv_sec += interval.tv_sec;
704 now.tv_nsec += interval.tv_nsec;
706 if (now.tv_nsec >= KMP_NSEC_PER_SEC) {
708 now.tv_nsec -= KMP_NSEC_PER_SEC;
711 status = pthread_mutex_lock(&__kmp_wait_mx.m_mutex);
712 KMP_CHECK_SYSFAIL(
"pthread_mutex_lock", status);
714 if (!TCR_4(__kmp_global.g.g_done)) {
715 status = pthread_cond_timedwait(&__kmp_wait_cv.c_cond,
716 &__kmp_wait_mx.m_mutex, &now);
718 if (status != ETIMEDOUT && status != EINTR) {
719 KMP_SYSFAIL(
"pthread_cond_timedwait", status);
723 status = pthread_mutex_unlock(&__kmp_wait_mx.m_mutex);
724 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
726 TCW_4(__kmp_global.g.g_time.dt.t_value,
727 TCR_4(__kmp_global.g.g_time.dt.t_value) + 1);
732 KA_TRACE(10, (
"__kmp_launch_monitor: #3 cleanup\n"));
734 #ifdef KMP_BLOCK_SIGNALS
735 status = sigfillset(&new_set);
736 KMP_CHECK_SYSFAIL_ERRNO(
"sigfillset", status);
737 status = pthread_sigmask(SIG_UNBLOCK, &new_set, NULL);
738 KMP_CHECK_SYSFAIL(
"pthread_sigmask", status);
741 KA_TRACE(10, (
"__kmp_launch_monitor: #4 finished\n"));
743 if (__kmp_global.g.g_abort != 0) {
749 KA_TRACE(10, (
"__kmp_launch_monitor: #5 terminate sig=%d\n",
750 __kmp_global.g.g_abort));
755 for (gtid = 1; gtid < __kmp_threads_capacity; ++gtid)
756 __kmp_terminate_thread(gtid);
760 KA_TRACE(10, (
"__kmp_launch_monitor: #6 raise sig=%d\n",
761 __kmp_global.g.g_abort));
763 if (__kmp_global.g.g_abort > 0)
764 raise(__kmp_global.g.g_abort);
767 KA_TRACE(10, (
"__kmp_launch_monitor: #7 exit\n"));
771 #endif // KMP_USE_MONITOR
773 void __kmp_create_worker(
int gtid, kmp_info_t *th,
size_t stack_size) {
775 pthread_attr_t thread_attr;
778 th->th.th_info.ds.ds_gtid = gtid;
780 #if KMP_STATS_ENABLED
782 __kmp_acquire_tas_lock(&__kmp_stats_lock, gtid);
788 if (!KMP_UBER_GTID(gtid)) {
789 th->th.th_stats = __kmp_stats_list->push_back(gtid);
793 th->th.th_stats = __kmp_stats_thread_ptr;
795 __kmp_release_tas_lock(&__kmp_stats_lock, gtid);
797 #endif // KMP_STATS_ENABLED
799 if (KMP_UBER_GTID(gtid)) {
800 KA_TRACE(10, (
"__kmp_create_worker: uber thread (%d)\n", gtid));
801 th->th.th_info.ds.ds_thread = pthread_self();
802 __kmp_set_stack_info(gtid, th);
803 __kmp_check_stack_overlap(th);
807 KA_TRACE(10, (
"__kmp_create_worker: try to create thread (%d)\n", gtid));
811 #ifdef KMP_THREAD_ATTR
812 status = pthread_attr_init(&thread_attr);
814 __kmp_fatal(KMP_MSG(CantInitThreadAttrs), KMP_ERR(status), __kmp_msg_null);
816 status = pthread_attr_setdetachstate(&thread_attr, PTHREAD_CREATE_JOINABLE);
818 __kmp_fatal(KMP_MSG(CantSetWorkerState), KMP_ERR(status), __kmp_msg_null);
828 stack_size += gtid * __kmp_stkoffset * 2;
830 #if defined(__ANDROID__) && __ANDROID_API__ < 19
834 stack_size = (stack_size + PAGE_SIZE - 1) & ~(PAGE_SIZE - 1);
837 KA_TRACE(10, (
"__kmp_create_worker: T#%d, default stacksize = %lu bytes, "
838 "__kmp_stksize = %lu bytes, final stacksize = %lu bytes\n",
839 gtid, KMP_DEFAULT_STKSIZE, __kmp_stksize, stack_size));
841 #ifdef _POSIX_THREAD_ATTR_STACKSIZE
842 status = pthread_attr_setstacksize(&thread_attr, stack_size);
843 #ifdef KMP_BACKUP_STKSIZE
845 if (!__kmp_env_stksize) {
846 stack_size = KMP_BACKUP_STKSIZE + gtid * __kmp_stkoffset;
847 __kmp_stksize = KMP_BACKUP_STKSIZE;
848 KA_TRACE(10, (
"__kmp_create_worker: T#%d, default stacksize = %lu bytes, "
849 "__kmp_stksize = %lu bytes, (backup) final stacksize = %lu "
851 gtid, KMP_DEFAULT_STKSIZE, __kmp_stksize, stack_size));
852 status = pthread_attr_setstacksize(&thread_attr, stack_size);
857 __kmp_fatal(KMP_MSG(CantSetWorkerStackSize, stack_size), KMP_ERR(status),
858 KMP_HNT(ChangeWorkerStackSize), __kmp_msg_null);
865 pthread_create(&handle, &thread_attr, __kmp_launch_worker, (
void *)th);
866 if (status != 0 || !handle) {
867 #ifdef _POSIX_THREAD_ATTR_STACKSIZE
868 if (status == EINVAL) {
869 __kmp_fatal(KMP_MSG(CantSetWorkerStackSize, stack_size), KMP_ERR(status),
870 KMP_HNT(IncreaseWorkerStackSize), __kmp_msg_null);
872 if (status == ENOMEM) {
873 __kmp_fatal(KMP_MSG(CantSetWorkerStackSize, stack_size), KMP_ERR(status),
874 KMP_HNT(DecreaseWorkerStackSize), __kmp_msg_null);
877 if (status == EAGAIN) {
878 __kmp_fatal(KMP_MSG(NoResourcesForWorkerThread), KMP_ERR(status),
879 KMP_HNT(Decrease_NUM_THREADS), __kmp_msg_null);
881 KMP_SYSFAIL(
"pthread_create", status);
884 th->th.th_info.ds.ds_thread = handle;
886 #ifdef KMP_THREAD_ATTR
887 status = pthread_attr_destroy(&thread_attr);
889 kmp_msg_t err_code = KMP_ERR(status);
890 __kmp_msg(kmp_ms_warning, KMP_MSG(CantDestroyThreadAttrs), err_code,
892 if (__kmp_generate_warnings == kmp_warnings_off) {
893 __kmp_str_free(&err_code.str);
900 KA_TRACE(10, (
"__kmp_create_worker: done creating thread (%d)\n", gtid));
905 void __kmp_create_monitor(kmp_info_t *th) {
907 pthread_attr_t thread_attr;
910 int auto_adj_size = FALSE;
912 if (__kmp_dflt_blocktime == KMP_MAX_BLOCKTIME) {
914 KA_TRACE(10, (
"__kmp_create_monitor: skipping monitor thread because of "
916 th->th.th_info.ds.ds_tid = 0;
917 th->th.th_info.ds.ds_gtid = 0;
920 KA_TRACE(10, (
"__kmp_create_monitor: try to create monitor\n"));
924 th->th.th_info.ds.ds_tid = KMP_GTID_MONITOR;
925 th->th.th_info.ds.ds_gtid = KMP_GTID_MONITOR;
926 #if KMP_REAL_TIME_FIX
927 TCW_4(__kmp_global.g.g_time.dt.t_value,
930 TCW_4(__kmp_global.g.g_time.dt.t_value, 0);
931 #endif // KMP_REAL_TIME_FIX
933 #ifdef KMP_THREAD_ATTR
934 if (__kmp_monitor_stksize == 0) {
935 __kmp_monitor_stksize = KMP_DEFAULT_MONITOR_STKSIZE;
936 auto_adj_size = TRUE;
938 status = pthread_attr_init(&thread_attr);
940 __kmp_fatal(KMP_MSG(CantInitThreadAttrs), KMP_ERR(status), __kmp_msg_null);
942 status = pthread_attr_setdetachstate(&thread_attr, PTHREAD_CREATE_JOINABLE);
944 __kmp_fatal(KMP_MSG(CantSetMonitorState), KMP_ERR(status), __kmp_msg_null);
947 #ifdef _POSIX_THREAD_ATTR_STACKSIZE
948 status = pthread_attr_getstacksize(&thread_attr, &size);
949 KMP_CHECK_SYSFAIL(
"pthread_attr_getstacksize", status);
951 size = __kmp_sys_min_stksize;
955 if (__kmp_monitor_stksize == 0) {
956 __kmp_monitor_stksize = KMP_DEFAULT_MONITOR_STKSIZE;
958 if (__kmp_monitor_stksize < __kmp_sys_min_stksize) {
959 __kmp_monitor_stksize = __kmp_sys_min_stksize;
962 KA_TRACE(10, (
"__kmp_create_monitor: default stacksize = %lu bytes,"
963 "requested stacksize = %lu bytes\n",
964 size, __kmp_monitor_stksize));
969 #ifdef _POSIX_THREAD_ATTR_STACKSIZE
970 KA_TRACE(10, (
"__kmp_create_monitor: setting stacksize = %lu bytes,",
971 __kmp_monitor_stksize));
972 status = pthread_attr_setstacksize(&thread_attr, __kmp_monitor_stksize);
975 __kmp_monitor_stksize *= 2;
978 kmp_msg_t err_code = KMP_ERR(status);
979 __kmp_msg(kmp_ms_warning,
980 KMP_MSG(CantSetMonitorStackSize, (
long int)__kmp_monitor_stksize),
981 err_code, KMP_HNT(ChangeMonitorStackSize), __kmp_msg_null);
982 if (__kmp_generate_warnings == kmp_warnings_off) {
983 __kmp_str_free(&err_code.str);
989 pthread_create(&handle, &thread_attr, __kmp_launch_monitor, (
void *)th);
992 #ifdef _POSIX_THREAD_ATTR_STACKSIZE
993 if (status == EINVAL) {
994 if (auto_adj_size && (__kmp_monitor_stksize < (
size_t)0x40000000)) {
995 __kmp_monitor_stksize *= 2;
998 __kmp_fatal(KMP_MSG(CantSetMonitorStackSize, __kmp_monitor_stksize),
999 KMP_ERR(status), KMP_HNT(IncreaseMonitorStackSize),
1002 if (status == ENOMEM) {
1003 __kmp_fatal(KMP_MSG(CantSetMonitorStackSize, __kmp_monitor_stksize),
1004 KMP_ERR(status), KMP_HNT(DecreaseMonitorStackSize),
1008 if (status == EAGAIN) {
1009 __kmp_fatal(KMP_MSG(NoResourcesForMonitorThread), KMP_ERR(status),
1010 KMP_HNT(DecreaseNumberOfThreadsInUse), __kmp_msg_null);
1012 KMP_SYSFAIL(
"pthread_create", status);
1015 th->th.th_info.ds.ds_thread = handle;
1017 #if KMP_REAL_TIME_FIX
1019 KMP_DEBUG_ASSERT(
sizeof(kmp_uint32) ==
1020 sizeof(__kmp_global.g.g_time.dt.t_value));
1021 __kmp_wait_4((kmp_uint32
volatile *)&__kmp_global.g.g_time.dt.t_value, -1,
1022 &__kmp_neq_4, NULL);
1023 #endif // KMP_REAL_TIME_FIX
1025 #ifdef KMP_THREAD_ATTR
1026 status = pthread_attr_destroy(&thread_attr);
1028 kmp_msg_t err_code = KMP_ERR(status);
1029 __kmp_msg(kmp_ms_warning, KMP_MSG(CantDestroyThreadAttrs), err_code,
1031 if (__kmp_generate_warnings == kmp_warnings_off) {
1032 __kmp_str_free(&err_code.str);
1039 KA_TRACE(10, (
"__kmp_create_monitor: monitor created %#.8lx\n",
1040 th->th.th_info.ds.ds_thread));
1043 #endif // KMP_USE_MONITOR
1045 void __kmp_exit_thread(
int exit_status) {
1046 pthread_exit((
void *)(intptr_t)exit_status);
1050 void __kmp_resume_monitor();
1052 void __kmp_reap_monitor(kmp_info_t *th) {
1056 KA_TRACE(10, (
"__kmp_reap_monitor: try to reap monitor thread with handle"
1058 th->th.th_info.ds.ds_thread));
1063 KMP_DEBUG_ASSERT(th->th.th_info.ds.ds_tid == th->th.th_info.ds.ds_gtid);
1064 if (th->th.th_info.ds.ds_gtid != KMP_GTID_MONITOR) {
1065 KA_TRACE(10, (
"__kmp_reap_monitor: monitor did not start, returning\n"));
1075 status = pthread_kill(th->th.th_info.ds.ds_thread, 0);
1076 if (status != ESRCH) {
1077 __kmp_resume_monitor();
1079 KA_TRACE(10, (
"__kmp_reap_monitor: try to join with monitor\n"));
1080 status = pthread_join(th->th.th_info.ds.ds_thread, &exit_val);
1081 if (exit_val != th) {
1082 __kmp_fatal(KMP_MSG(ReapMonitorError), KMP_ERR(status), __kmp_msg_null);
1085 th->th.th_info.ds.ds_tid = KMP_GTID_DNE;
1086 th->th.th_info.ds.ds_gtid = KMP_GTID_DNE;
1088 KA_TRACE(10, (
"__kmp_reap_monitor: done reaping monitor thread with handle"
1090 th->th.th_info.ds.ds_thread));
1094 #endif // KMP_USE_MONITOR
1096 void __kmp_reap_worker(kmp_info_t *th) {
1103 10, (
"__kmp_reap_worker: try to reap T#%d\n", th->th.th_info.ds.ds_gtid));
1105 status = pthread_join(th->th.th_info.ds.ds_thread, &exit_val);
1109 __kmp_fatal(KMP_MSG(ReapWorkerError), KMP_ERR(status), __kmp_msg_null);
1111 if (exit_val != th) {
1112 KA_TRACE(10, (
"__kmp_reap_worker: worker T#%d did not reap properly, "
1114 th->th.th_info.ds.ds_gtid, exit_val));
1118 KA_TRACE(10, (
"__kmp_reap_worker: done reaping T#%d\n",
1119 th->th.th_info.ds.ds_gtid));
1124 #if KMP_HANDLE_SIGNALS
1126 static void __kmp_null_handler(
int signo) {
1130 static void __kmp_team_handler(
int signo) {
1131 if (__kmp_global.g.g_abort == 0) {
1134 __kmp_debug_printf(
"__kmp_team_handler: caught signal = %d\n", signo);
1149 if (__kmp_debug_buf) {
1150 __kmp_dump_debug_buffer();
1153 TCW_4(__kmp_global.g.g_abort, signo);
1155 TCW_4(__kmp_global.g.g_done, TRUE);
1160 __kmp_debug_printf(
"__kmp_team_handler: unknown signal type");
1167 static void __kmp_sigaction(
int signum,
const struct sigaction *act,
1168 struct sigaction *oldact) {
1169 int rc = sigaction(signum, act, oldact);
1170 KMP_CHECK_SYSFAIL_ERRNO(
"sigaction", rc);
1173 static void __kmp_install_one_handler(
int sig, sig_func_t handler_func,
1174 int parallel_init) {
1177 (
"__kmp_install_one_handler( %d, ..., %d )\n", sig, parallel_init));
1178 if (parallel_init) {
1179 struct sigaction new_action;
1180 struct sigaction old_action;
1181 new_action.sa_handler = handler_func;
1182 new_action.sa_flags = 0;
1183 sigfillset(&new_action.sa_mask);
1184 __kmp_sigaction(sig, &new_action, &old_action);
1185 if (old_action.sa_handler == __kmp_sighldrs[sig].sa_handler) {
1186 sigaddset(&__kmp_sigset, sig);
1189 __kmp_sigaction(sig, &old_action, NULL);
1193 __kmp_sigaction(sig, NULL, &__kmp_sighldrs[sig]);
1198 static void __kmp_remove_one_handler(
int sig) {
1199 KB_TRACE(60, (
"__kmp_remove_one_handler( %d )\n", sig));
1200 if (sigismember(&__kmp_sigset, sig)) {
1201 struct sigaction old;
1203 __kmp_sigaction(sig, &__kmp_sighldrs[sig], &old);
1204 if ((old.sa_handler != __kmp_team_handler) &&
1205 (old.sa_handler != __kmp_null_handler)) {
1207 KB_TRACE(10, (
"__kmp_remove_one_handler: oops, not our handler, "
1208 "restoring: sig=%d\n",
1210 __kmp_sigaction(sig, &old, NULL);
1212 sigdelset(&__kmp_sigset, sig);
1217 void __kmp_install_signals(
int parallel_init) {
1218 KB_TRACE(10, (
"__kmp_install_signals( %d )\n", parallel_init));
1219 if (__kmp_handle_signals || !parallel_init) {
1222 sigemptyset(&__kmp_sigset);
1223 __kmp_install_one_handler(SIGHUP, __kmp_team_handler, parallel_init);
1224 __kmp_install_one_handler(SIGINT, __kmp_team_handler, parallel_init);
1225 __kmp_install_one_handler(SIGQUIT, __kmp_team_handler, parallel_init);
1226 __kmp_install_one_handler(SIGILL, __kmp_team_handler, parallel_init);
1227 __kmp_install_one_handler(SIGABRT, __kmp_team_handler, parallel_init);
1228 __kmp_install_one_handler(SIGFPE, __kmp_team_handler, parallel_init);
1229 __kmp_install_one_handler(SIGBUS, __kmp_team_handler, parallel_init);
1230 __kmp_install_one_handler(SIGSEGV, __kmp_team_handler, parallel_init);
1232 __kmp_install_one_handler(SIGSYS, __kmp_team_handler, parallel_init);
1234 __kmp_install_one_handler(SIGTERM, __kmp_team_handler, parallel_init);
1236 __kmp_install_one_handler(SIGPIPE, __kmp_team_handler, parallel_init);
1241 void __kmp_remove_signals(
void) {
1243 KB_TRACE(10, (
"__kmp_remove_signals()\n"));
1244 for (sig = 1; sig < NSIG; ++sig) {
1245 __kmp_remove_one_handler(sig);
1249 #endif // KMP_HANDLE_SIGNALS
1251 void __kmp_enable(
int new_state) {
1252 #ifdef KMP_CANCEL_THREADS
1253 int status, old_state;
1254 status = pthread_setcancelstate(new_state, &old_state);
1255 KMP_CHECK_SYSFAIL(
"pthread_setcancelstate", status);
1256 KMP_DEBUG_ASSERT(old_state == PTHREAD_CANCEL_DISABLE);
1260 void __kmp_disable(
int *old_state) {
1261 #ifdef KMP_CANCEL_THREADS
1263 status = pthread_setcancelstate(PTHREAD_CANCEL_DISABLE, old_state);
1264 KMP_CHECK_SYSFAIL(
"pthread_setcancelstate", status);
1268 static void __kmp_atfork_prepare(
void) {
1269 __kmp_acquire_bootstrap_lock(&__kmp_initz_lock);
1270 __kmp_acquire_bootstrap_lock(&__kmp_forkjoin_lock);
1273 static void __kmp_atfork_parent(
void) {
1274 __kmp_release_bootstrap_lock(&__kmp_initz_lock);
1275 __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
1281 static void __kmp_atfork_child(
void) {
1282 __kmp_release_bootstrap_lock(&__kmp_forkjoin_lock);
1289 #if KMP_AFFINITY_SUPPORTED
1290 #if KMP_OS_LINUX || KMP_OS_FREEBSD
1293 kmp_set_thread_affinity_mask_initial();
1298 __kmp_affinity_type = affinity_none;
1299 if (__kmp_nested_proc_bind.bind_types != NULL) {
1300 __kmp_nested_proc_bind.bind_types[0] = proc_bind_false;
1302 #endif // KMP_AFFINITY_SUPPORTED
1304 __kmp_init_runtime = FALSE;
1306 __kmp_init_monitor = 0;
1308 __kmp_init_parallel = FALSE;
1309 __kmp_init_middle = FALSE;
1310 __kmp_init_serial = FALSE;
1311 TCW_4(__kmp_init_gtid, FALSE);
1312 __kmp_init_common = FALSE;
1314 TCW_4(__kmp_init_user_locks, FALSE);
1315 #if !KMP_USE_DYNAMIC_LOCK
1316 __kmp_user_lock_table.used = 1;
1317 __kmp_user_lock_table.allocated = 0;
1318 __kmp_user_lock_table.table = NULL;
1319 __kmp_lock_blocks = NULL;
1323 TCW_4(__kmp_nth, 0);
1325 __kmp_thread_pool = NULL;
1326 __kmp_thread_pool_insert_pt = NULL;
1327 __kmp_team_pool = NULL;
1331 KA_TRACE(10, (
"__kmp_atfork_child: checking cache address list %p\n",
1332 __kmp_threadpriv_cache_list));
1334 while (__kmp_threadpriv_cache_list != NULL) {
1336 if (*__kmp_threadpriv_cache_list->addr != NULL) {
1337 KC_TRACE(50, (
"__kmp_atfork_child: zeroing cache at address %p\n",
1338 &(*__kmp_threadpriv_cache_list->addr)));
1340 *__kmp_threadpriv_cache_list->addr = NULL;
1342 __kmp_threadpriv_cache_list = __kmp_threadpriv_cache_list->next;
1345 __kmp_init_runtime = FALSE;
1348 __kmp_init_bootstrap_lock(&__kmp_initz_lock);
1349 __kmp_init_bootstrap_lock(&__kmp_stdio_lock);
1350 __kmp_init_bootstrap_lock(&__kmp_console_lock);
1351 __kmp_init_bootstrap_lock(&__kmp_task_team_lock);
1368 void __kmp_register_atfork(
void) {
1369 if (__kmp_need_register_atfork) {
1370 int status = pthread_atfork(__kmp_atfork_prepare, __kmp_atfork_parent,
1371 __kmp_atfork_child);
1372 KMP_CHECK_SYSFAIL(
"pthread_atfork", status);
1373 __kmp_need_register_atfork = FALSE;
1377 void __kmp_suspend_initialize(
void) {
1379 status = pthread_mutexattr_init(&__kmp_suspend_mutex_attr);
1380 KMP_CHECK_SYSFAIL(
"pthread_mutexattr_init", status);
1381 status = pthread_condattr_init(&__kmp_suspend_cond_attr);
1382 KMP_CHECK_SYSFAIL(
"pthread_condattr_init", status);
1385 void __kmp_suspend_initialize_thread(kmp_info_t *th) {
1386 ANNOTATE_HAPPENS_AFTER(&th->th.th_suspend_init_count);
1387 int old_value = KMP_ATOMIC_LD_RLX(&th->th.th_suspend_init_count);
1388 int new_value = __kmp_fork_count + 1;
1390 if (old_value == new_value)
1393 if (old_value == -1 ||
1394 !__kmp_atomic_compare_store(&th->th.th_suspend_init_count, old_value,
1396 while (KMP_ATOMIC_LD_ACQ(&th->th.th_suspend_init_count) != new_value) {
1402 status = pthread_cond_init(&th->th.th_suspend_cv.c_cond,
1403 &__kmp_suspend_cond_attr);
1404 KMP_CHECK_SYSFAIL(
"pthread_cond_init", status);
1405 status = pthread_mutex_init(&th->th.th_suspend_mx.m_mutex,
1406 &__kmp_suspend_mutex_attr);
1407 KMP_CHECK_SYSFAIL(
"pthread_mutex_init", status);
1408 KMP_ATOMIC_ST_REL(&th->th.th_suspend_init_count, new_value);
1409 ANNOTATE_HAPPENS_BEFORE(&th->th.th_suspend_init_count);
1413 void __kmp_suspend_uninitialize_thread(kmp_info_t *th) {
1414 if (KMP_ATOMIC_LD_ACQ(&th->th.th_suspend_init_count) > __kmp_fork_count) {
1419 status = pthread_cond_destroy(&th->th.th_suspend_cv.c_cond);
1420 if (status != 0 && status != EBUSY) {
1421 KMP_SYSFAIL(
"pthread_cond_destroy", status);
1423 status = pthread_mutex_destroy(&th->th.th_suspend_mx.m_mutex);
1424 if (status != 0 && status != EBUSY) {
1425 KMP_SYSFAIL(
"pthread_mutex_destroy", status);
1427 --th->th.th_suspend_init_count;
1428 KMP_DEBUG_ASSERT(KMP_ATOMIC_LD_RLX(&th->th.th_suspend_init_count) ==
1434 int __kmp_try_suspend_mx(kmp_info_t *th) {
1435 return (pthread_mutex_trylock(&th->th.th_suspend_mx.m_mutex) == 0);
1438 void __kmp_lock_suspend_mx(kmp_info_t *th) {
1439 int status = pthread_mutex_lock(&th->th.th_suspend_mx.m_mutex);
1440 KMP_CHECK_SYSFAIL(
"pthread_mutex_lock", status);
1443 void __kmp_unlock_suspend_mx(kmp_info_t *th) {
1444 int status = pthread_mutex_unlock(&th->th.th_suspend_mx.m_mutex);
1445 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
1451 static inline void __kmp_suspend_template(
int th_gtid, C *flag) {
1452 KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(USER_suspend);
1453 kmp_info_t *th = __kmp_threads[th_gtid];
1455 typename C::flag_t old_spin;
1457 KF_TRACE(30, (
"__kmp_suspend_template: T#%d enter for flag = %p\n", th_gtid,
1460 __kmp_suspend_initialize_thread(th);
1462 status = pthread_mutex_lock(&th->th.th_suspend_mx.m_mutex);
1463 KMP_CHECK_SYSFAIL(
"pthread_mutex_lock", status);
1465 KF_TRACE(10, (
"__kmp_suspend_template: T#%d setting sleep bit for spin(%p)\n",
1466 th_gtid, flag->get()));
1470 old_spin = flag->set_sleeping();
1471 if (__kmp_dflt_blocktime == KMP_MAX_BLOCKTIME &&
1472 __kmp_pause_status != kmp_soft_paused) {
1473 flag->unset_sleeping();
1474 status = pthread_mutex_unlock(&th->th.th_suspend_mx.m_mutex);
1475 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
1478 KF_TRACE(5, (
"__kmp_suspend_template: T#%d set sleep bit for spin(%p)==%x,"
1480 th_gtid, flag->get(), flag->load(), old_spin));
1482 if (flag->done_check_val(old_spin)) {
1483 old_spin = flag->unset_sleeping();
1484 KF_TRACE(5, (
"__kmp_suspend_template: T#%d false alarm, reset sleep bit "
1486 th_gtid, flag->get()));
1491 int deactivated = FALSE;
1492 TCW_PTR(th->th.th_sleep_loc, (
void *)flag);
1494 while (flag->is_sleeping()) {
1495 #ifdef DEBUG_SUSPEND
1497 __kmp_suspend_count++;
1498 __kmp_print_cond(buffer, &th->th.th_suspend_cv);
1499 __kmp_printf(
"__kmp_suspend_template: suspending T#%d: %s\n", th_gtid,
1505 th->th.th_active = FALSE;
1506 if (th->th.th_active_in_pool) {
1507 th->th.th_active_in_pool = FALSE;
1508 KMP_ATOMIC_DEC(&__kmp_thread_pool_active_nth);
1509 KMP_DEBUG_ASSERT(TCR_4(__kmp_thread_pool_active_nth) >= 0);
1514 #if USE_SUSPEND_TIMEOUT
1515 struct timespec now;
1516 struct timeval tval;
1519 status = gettimeofday(&tval, NULL);
1520 KMP_CHECK_SYSFAIL_ERRNO(
"gettimeofday", status);
1521 TIMEVAL_TO_TIMESPEC(&tval, &now);
1523 msecs = (4 * __kmp_dflt_blocktime) + 200;
1524 now.tv_sec += msecs / 1000;
1525 now.tv_nsec += (msecs % 1000) * 1000;
1527 KF_TRACE(15, (
"__kmp_suspend_template: T#%d about to perform "
1528 "pthread_cond_timedwait\n",
1530 status = pthread_cond_timedwait(&th->th.th_suspend_cv.c_cond,
1531 &th->th.th_suspend_mx.m_mutex, &now);
1533 KF_TRACE(15, (
"__kmp_suspend_template: T#%d about to perform"
1534 " pthread_cond_wait\n",
1536 status = pthread_cond_wait(&th->th.th_suspend_cv.c_cond,
1537 &th->th.th_suspend_mx.m_mutex);
1540 if ((status != 0) && (status != EINTR) && (status != ETIMEDOUT)) {
1541 KMP_SYSFAIL(
"pthread_cond_wait", status);
1544 if (status == ETIMEDOUT) {
1545 if (flag->is_sleeping()) {
1547 (
"__kmp_suspend_template: T#%d timeout wakeup\n", th_gtid));
1549 KF_TRACE(2, (
"__kmp_suspend_template: T#%d timeout wakeup, sleep bit "
1553 }
else if (flag->is_sleeping()) {
1555 (
"__kmp_suspend_template: T#%d spurious wakeup\n", th_gtid));
1562 th->th.th_active = TRUE;
1563 if (TCR_4(th->th.th_in_pool)) {
1564 KMP_ATOMIC_INC(&__kmp_thread_pool_active_nth);
1565 th->th.th_active_in_pool = TRUE;
1569 #ifdef DEBUG_SUSPEND
1572 __kmp_print_cond(buffer, &th->th.th_suspend_cv);
1573 __kmp_printf(
"__kmp_suspend_template: T#%d has awakened: %s\n", th_gtid,
1578 status = pthread_mutex_unlock(&th->th.th_suspend_mx.m_mutex);
1579 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
1580 KF_TRACE(30, (
"__kmp_suspend_template: T#%d exit\n", th_gtid));
1583 void __kmp_suspend_32(
int th_gtid, kmp_flag_32 *flag) {
1584 __kmp_suspend_template(th_gtid, flag);
1586 void __kmp_suspend_64(
int th_gtid, kmp_flag_64 *flag) {
1587 __kmp_suspend_template(th_gtid, flag);
1589 void __kmp_suspend_oncore(
int th_gtid, kmp_flag_oncore *flag) {
1590 __kmp_suspend_template(th_gtid, flag);
1597 static inline void __kmp_resume_template(
int target_gtid, C *flag) {
1598 KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(USER_resume);
1599 kmp_info_t *th = __kmp_threads[target_gtid];
1603 int gtid = TCR_4(__kmp_init_gtid) ? __kmp_get_gtid() : -1;
1606 KF_TRACE(30, (
"__kmp_resume_template: T#%d wants to wakeup T#%d enter\n",
1607 gtid, target_gtid));
1608 KMP_DEBUG_ASSERT(gtid != target_gtid);
1610 __kmp_suspend_initialize_thread(th);
1612 status = pthread_mutex_lock(&th->th.th_suspend_mx.m_mutex);
1613 KMP_CHECK_SYSFAIL(
"pthread_mutex_lock", status);
1616 flag = (C *)CCAST(
void *, th->th.th_sleep_loc);
1621 if (!flag || flag->get_type() != flag->get_ptr_type()) {
1624 KF_TRACE(5, (
"__kmp_resume_template: T#%d exiting, thread T#%d already "
1625 "awake: flag(%p)\n",
1626 gtid, target_gtid, NULL));
1627 status = pthread_mutex_unlock(&th->th.th_suspend_mx.m_mutex);
1628 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
1632 typename C::flag_t old_spin = flag->unset_sleeping();
1633 if (!flag->is_sleeping_val(old_spin)) {
1634 KF_TRACE(5, (
"__kmp_resume_template: T#%d exiting, thread T#%d already "
1637 gtid, target_gtid, flag->get(), old_spin, flag->load()));
1638 status = pthread_mutex_unlock(&th->th.th_suspend_mx.m_mutex);
1639 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
1642 KF_TRACE(5, (
"__kmp_resume_template: T#%d about to wakeup T#%d, reset "
1643 "sleep bit for flag's loc(%p): "
1645 gtid, target_gtid, flag->get(), old_spin, flag->load()));
1647 TCW_PTR(th->th.th_sleep_loc, NULL);
1649 #ifdef DEBUG_SUSPEND
1652 __kmp_print_cond(buffer, &th->th.th_suspend_cv);
1653 __kmp_printf(
"__kmp_resume_template: T#%d resuming T#%d: %s\n", gtid,
1654 target_gtid, buffer);
1657 status = pthread_cond_signal(&th->th.th_suspend_cv.c_cond);
1658 KMP_CHECK_SYSFAIL(
"pthread_cond_signal", status);
1659 status = pthread_mutex_unlock(&th->th.th_suspend_mx.m_mutex);
1660 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
1661 KF_TRACE(30, (
"__kmp_resume_template: T#%d exiting after signaling wake up"
1663 gtid, target_gtid));
1666 void __kmp_resume_32(
int target_gtid, kmp_flag_32 *flag) {
1667 __kmp_resume_template(target_gtid, flag);
1669 void __kmp_resume_64(
int target_gtid, kmp_flag_64 *flag) {
1670 __kmp_resume_template(target_gtid, flag);
1672 void __kmp_resume_oncore(
int target_gtid, kmp_flag_oncore *flag) {
1673 __kmp_resume_template(target_gtid, flag);
1677 void __kmp_resume_monitor() {
1678 KMP_TIME_DEVELOPER_PARTITIONED_BLOCK(USER_resume);
1681 int gtid = TCR_4(__kmp_init_gtid) ? __kmp_get_gtid() : -1;
1682 KF_TRACE(30, (
"__kmp_resume_monitor: T#%d wants to wakeup T#%d enter\n", gtid,
1684 KMP_DEBUG_ASSERT(gtid != KMP_GTID_MONITOR);
1686 status = pthread_mutex_lock(&__kmp_wait_mx.m_mutex);
1687 KMP_CHECK_SYSFAIL(
"pthread_mutex_lock", status);
1688 #ifdef DEBUG_SUSPEND
1691 __kmp_print_cond(buffer, &__kmp_wait_cv.c_cond);
1692 __kmp_printf(
"__kmp_resume_monitor: T#%d resuming T#%d: %s\n", gtid,
1693 KMP_GTID_MONITOR, buffer);
1696 status = pthread_cond_signal(&__kmp_wait_cv.c_cond);
1697 KMP_CHECK_SYSFAIL(
"pthread_cond_signal", status);
1698 status = pthread_mutex_unlock(&__kmp_wait_mx.m_mutex);
1699 KMP_CHECK_SYSFAIL(
"pthread_mutex_unlock", status);
1700 KF_TRACE(30, (
"__kmp_resume_monitor: T#%d exiting after signaling wake up"
1702 gtid, KMP_GTID_MONITOR));
1704 #endif // KMP_USE_MONITOR
1706 void __kmp_yield() { sched_yield(); }
1708 void __kmp_gtid_set_specific(
int gtid) {
1709 if (__kmp_init_gtid) {
1711 status = pthread_setspecific(__kmp_gtid_threadprivate_key,
1712 (
void *)(intptr_t)(gtid + 1));
1713 KMP_CHECK_SYSFAIL(
"pthread_setspecific", status);
1715 KA_TRACE(50, (
"__kmp_gtid_set_specific: runtime shutdown, returning\n"));
1719 int __kmp_gtid_get_specific() {
1721 if (!__kmp_init_gtid) {
1722 KA_TRACE(50, (
"__kmp_gtid_get_specific: runtime shutdown, returning "
1723 "KMP_GTID_SHUTDOWN\n"));
1724 return KMP_GTID_SHUTDOWN;
1726 gtid = (int)(
size_t)pthread_getspecific(__kmp_gtid_threadprivate_key);
1728 gtid = KMP_GTID_DNE;
1732 KA_TRACE(50, (
"__kmp_gtid_get_specific: key:%d gtid:%d\n",
1733 __kmp_gtid_threadprivate_key, gtid));
1737 double __kmp_read_cpu_time(
void) {
1743 return (buffer.tms_utime + buffer.tms_cutime) / (double)CLOCKS_PER_SEC;
1746 int __kmp_read_system_info(
struct kmp_sys_info *info) {
1748 struct rusage r_usage;
1750 memset(info, 0,
sizeof(*info));
1752 status = getrusage(RUSAGE_SELF, &r_usage);
1753 KMP_CHECK_SYSFAIL_ERRNO(
"getrusage", status);
1756 info->maxrss = r_usage.ru_maxrss;
1758 info->minflt = r_usage.ru_minflt;
1760 info->majflt = r_usage.ru_majflt;
1762 info->nswap = r_usage.ru_nswap;
1764 info->inblock = r_usage.ru_inblock;
1766 info->oublock = r_usage.ru_oublock;
1768 info->nvcsw = r_usage.ru_nvcsw;
1770 info->nivcsw = r_usage.ru_nivcsw;
1772 return (status != 0);
1775 void __kmp_read_system_time(
double *delta) {
1777 struct timeval tval;
1778 struct timespec stop;
1781 status = gettimeofday(&tval, NULL);
1782 KMP_CHECK_SYSFAIL_ERRNO(
"gettimeofday", status);
1783 TIMEVAL_TO_TIMESPEC(&tval, &stop);
1784 t_ns = TS2NS(stop) - TS2NS(__kmp_sys_timer_data.start);
1785 *delta = (t_ns * 1e-9);
1788 void __kmp_clear_system_time(
void) {
1789 struct timeval tval;
1791 status = gettimeofday(&tval, NULL);
1792 KMP_CHECK_SYSFAIL_ERRNO(
"gettimeofday", status);
1793 TIMEVAL_TO_TIMESPEC(&tval, &__kmp_sys_timer_data.start);
1796 static int __kmp_get_xproc(
void) {
1800 #if KMP_OS_LINUX || KMP_OS_DRAGONFLY || KMP_OS_FREEBSD || KMP_OS_NETBSD || \
1801 KMP_OS_OPENBSD || KMP_OS_HURD
1803 r = sysconf(_SC_NPROCESSORS_ONLN);
1811 host_basic_info_data_t info;
1812 mach_msg_type_number_t num = HOST_BASIC_INFO_COUNT;
1813 rc = host_info(mach_host_self(), HOST_BASIC_INFO, (host_info_t)&info, &num);
1814 if (rc == 0 && num == HOST_BASIC_INFO_COUNT) {
1817 r = info.avail_cpus;
1819 KMP_WARNING(CantGetNumAvailCPU);
1820 KMP_INFORM(AssumedNumCPU);
1825 #error "Unknown or unsupported OS."
1829 return r > 0 ? r : 2;
1833 int __kmp_read_from_file(
char const *path,
char const *format, ...) {
1837 va_start(args, format);
1838 FILE *f = fopen(path,
"rb");
1841 result = vfscanf(f, format, args);
1847 void __kmp_runtime_initialize(
void) {
1849 pthread_mutexattr_t mutex_attr;
1850 pthread_condattr_t cond_attr;
1852 if (__kmp_init_runtime) {
1856 #if (KMP_ARCH_X86 || KMP_ARCH_X86_64)
1857 if (!__kmp_cpuinfo.initialized) {
1858 __kmp_query_cpuid(&__kmp_cpuinfo);
1862 __kmp_xproc = __kmp_get_xproc();
1864 #if ! KMP_32_BIT_ARCH
1868 status = getrlimit(RLIMIT_STACK, &rlim);
1870 __kmp_stksize = rlim.rlim_cur;
1871 __kmp_check_stksize(&__kmp_stksize);
1875 if (sysconf(_SC_THREADS)) {
1878 __kmp_sys_max_nth = sysconf(_SC_THREAD_THREADS_MAX);
1879 if (__kmp_sys_max_nth == -1) {
1881 __kmp_sys_max_nth = INT_MAX;
1882 }
else if (__kmp_sys_max_nth <= 1) {
1884 __kmp_sys_max_nth = KMP_MAX_NTH;
1888 __kmp_sys_min_stksize = sysconf(_SC_THREAD_STACK_MIN);
1889 if (__kmp_sys_min_stksize <= 1) {
1890 __kmp_sys_min_stksize = KMP_MIN_STKSIZE;
1895 __kmp_tls_gtid_min = KMP_TLS_GTID_MIN;
1897 status = pthread_key_create(&__kmp_gtid_threadprivate_key,
1898 __kmp_internal_end_dest);
1899 KMP_CHECK_SYSFAIL(
"pthread_key_create", status);
1900 status = pthread_mutexattr_init(&mutex_attr);
1901 KMP_CHECK_SYSFAIL(
"pthread_mutexattr_init", status);
1902 status = pthread_mutex_init(&__kmp_wait_mx.m_mutex, &mutex_attr);
1903 KMP_CHECK_SYSFAIL(
"pthread_mutex_init", status);
1904 status = pthread_condattr_init(&cond_attr);
1905 KMP_CHECK_SYSFAIL(
"pthread_condattr_init", status);
1906 status = pthread_cond_init(&__kmp_wait_cv.c_cond, &cond_attr);
1907 KMP_CHECK_SYSFAIL(
"pthread_cond_init", status);
1909 __kmp_itt_initialize();
1912 __kmp_init_runtime = TRUE;
1915 void __kmp_runtime_destroy(
void) {
1918 if (!__kmp_init_runtime) {
1923 __kmp_itt_destroy();
1926 status = pthread_key_delete(__kmp_gtid_threadprivate_key);
1927 KMP_CHECK_SYSFAIL(
"pthread_key_delete", status);
1929 status = pthread_mutex_destroy(&__kmp_wait_mx.m_mutex);
1930 if (status != 0 && status != EBUSY) {
1931 KMP_SYSFAIL(
"pthread_mutex_destroy", status);
1933 status = pthread_cond_destroy(&__kmp_wait_cv.c_cond);
1934 if (status != 0 && status != EBUSY) {
1935 KMP_SYSFAIL(
"pthread_cond_destroy", status);
1937 #if KMP_AFFINITY_SUPPORTED
1938 __kmp_affinity_uninitialize();
1941 __kmp_init_runtime = FALSE;
1946 void __kmp_thread_sleep(
int millis) { sleep((millis + 500) / 1000); }
1949 void __kmp_elapsed(
double *t) {
1951 #ifdef FIX_SGI_CLOCK
1954 status = clock_gettime(CLOCK_PROCESS_CPUTIME_ID, &ts);
1955 KMP_CHECK_SYSFAIL_ERRNO(
"clock_gettime", status);
1957 (double)ts.tv_nsec * (1.0 / (
double)KMP_NSEC_PER_SEC) + (
double)ts.tv_sec;
1961 status = gettimeofday(&tv, NULL);
1962 KMP_CHECK_SYSFAIL_ERRNO(
"gettimeofday", status);
1964 (double)tv.tv_usec * (1.0 / (
double)KMP_USEC_PER_SEC) + (
double)tv.tv_sec;
1969 void __kmp_elapsed_tick(
double *t) { *t = 1 / (double)CLOCKS_PER_SEC; }
1972 kmp_uint64 __kmp_now_nsec() {
1974 gettimeofday(&t, NULL);
1975 kmp_uint64 nsec = (kmp_uint64)KMP_NSEC_PER_SEC * (kmp_uint64)t.tv_sec +
1976 (kmp_uint64)1000 * (kmp_uint64)t.tv_usec;
1980 #if KMP_ARCH_X86 || KMP_ARCH_X86_64
1982 void __kmp_initialize_system_tick() {
1983 kmp_uint64 now, nsec2, diff;
1984 kmp_uint64 delay = 100000;
1985 kmp_uint64 nsec = __kmp_now_nsec();
1986 kmp_uint64 goal = __kmp_hardware_timestamp() + delay;
1987 while ((now = __kmp_hardware_timestamp()) < goal)
1989 nsec2 = __kmp_now_nsec();
1990 diff = nsec2 - nsec;
1992 kmp_uint64 tpms = (kmp_uint64)(1e6 * (delay + (now - goal)) / diff);
1994 __kmp_ticks_per_msec = tpms;
2002 int __kmp_is_address_mapped(
void *addr) {
2007 #if KMP_OS_LINUX || KMP_OS_HURD
2012 char *name = __kmp_str_format(
"/proc/%d/maps", getpid());
2015 file = fopen(name,
"r");
2016 KMP_ASSERT(file != NULL);
2020 void *beginning = NULL;
2021 void *ending = NULL;
2024 rc = fscanf(file,
"%p-%p %4s %*[^\n]\n", &beginning, &ending, perms);
2028 KMP_ASSERT(rc == 3 &&
2029 KMP_STRLEN(perms) == 4);
2032 if ((addr >= beginning) && (addr < ending)) {
2034 if (strcmp(perms,
"rw") == 0) {
2044 KMP_INTERNAL_FREE(name);
2045 #elif KMP_OS_FREEBSD
2048 int mib[] = {CTL_KERN, KERN_PROC, KERN_PROC_VMMAP, getpid()};
2049 rc = sysctl(mib, 4, NULL, &lstsz, NULL, 0);
2054 lstsz = lstsz * 4 / 3;
2055 buf =
reinterpret_cast<char *
>(kmpc_malloc(lstsz));
2056 rc = sysctl(mib, 4, buf, &lstsz, NULL, 0);
2063 char *up = buf + lstsz;
2066 struct kinfo_vmentry *cur =
reinterpret_cast<struct kinfo_vmentry *
>(lw);
2067 size_t cursz = cur->kve_structsize;
2070 void *start =
reinterpret_cast<void *
>(cur->kve_start);
2071 void *end =
reinterpret_cast<void *
>(cur->kve_end);
2073 if ((addr >= start) && (addr < end)) {
2074 if ((cur->kve_protection & KVME_PROT_READ) != 0 &&
2075 (cur->kve_protection & KVME_PROT_WRITE) != 0) {
2091 rc = vm_read_overwrite(
2093 (vm_address_t)(addr),
2095 (vm_address_t)(&buffer),
2108 mib[2] = VM_PROC_MAP;
2110 mib[4] =
sizeof(
struct kinfo_vmentry);
2113 rc = sysctl(mib, __arraycount(mib), NULL, &size, NULL, 0);
2117 size = size * 4 / 3;
2118 struct kinfo_vmentry *kiv = (
struct kinfo_vmentry *)KMP_INTERNAL_MALLOC(size);
2121 rc = sysctl(mib, __arraycount(mib), kiv, &size, NULL, 0);
2125 for (
size_t i = 0; i < size; i++) {
2126 if (kiv[i].kve_start >= (uint64_t)addr &&
2127 kiv[i].kve_end <= (uint64_t)addr) {
2132 KMP_INTERNAL_FREE(kiv);
2133 #elif KMP_OS_OPENBSD
2137 mib[1] = KERN_PROC_VMMAP;
2142 rc = sysctl(mib, 3, NULL, &size, NULL, 0);
2147 struct kinfo_vmentry kiv = {.kve_start = 0};
2149 while ((rc = sysctl(mib, 3, &kiv, &size, NULL, 0)) == 0) {
2151 if (kiv.kve_end == end)
2154 if (kiv.kve_start >= (uint64_t)addr && kiv.kve_end <= (uint64_t)addr) {
2160 #elif KMP_OS_DRAGONFLY
2167 #error "Unknown or unsupported OS"
2175 #ifdef USE_LOAD_BALANCE
2177 #if KMP_OS_DARWIN || KMP_OS_NETBSD
2184 int __kmp_get_load_balance(
int max) {
2188 int res = getloadavg(averages, 3);
2193 if (__kmp_load_balance_interval < 180 && (res >= 1)) {
2194 ret_avg = averages[0];
2195 }
else if ((__kmp_load_balance_interval >= 180 &&
2196 __kmp_load_balance_interval < 600) &&
2198 ret_avg = averages[1];
2199 }
else if ((__kmp_load_balance_interval >= 600) && (res == 3)) {
2200 ret_avg = averages[2];
2214 int __kmp_get_load_balance(
int max) {
2215 static int permanent_error = 0;
2216 static int glb_running_threads = 0;
2218 static double glb_call_time = 0;
2220 int running_threads = 0;
2222 DIR *proc_dir = NULL;
2223 struct dirent *proc_entry = NULL;
2225 kmp_str_buf_t task_path;
2226 DIR *task_dir = NULL;
2227 struct dirent *task_entry = NULL;
2228 int task_path_fixed_len;
2230 kmp_str_buf_t stat_path;
2232 int stat_path_fixed_len;
2234 int total_processes = 0;
2235 int total_threads = 0;
2237 double call_time = 0.0;
2239 __kmp_str_buf_init(&task_path);
2240 __kmp_str_buf_init(&stat_path);
2242 __kmp_elapsed(&call_time);
2244 if (glb_call_time &&
2245 (call_time - glb_call_time < __kmp_load_balance_interval)) {
2246 running_threads = glb_running_threads;
2250 glb_call_time = call_time;
2253 if (permanent_error) {
2254 running_threads = -1;
2263 proc_dir = opendir(
"/proc");
2264 if (proc_dir == NULL) {
2267 running_threads = -1;
2268 permanent_error = 1;
2273 __kmp_str_buf_cat(&task_path,
"/proc/", 6);
2274 task_path_fixed_len = task_path.used;
2276 proc_entry = readdir(proc_dir);
2277 while (proc_entry != NULL) {
2280 if (proc_entry->d_type == DT_DIR && isdigit(proc_entry->d_name[0])) {
2288 KMP_DEBUG_ASSERT(total_processes != 1 ||
2289 strcmp(proc_entry->d_name,
"1") == 0);
2292 task_path.used = task_path_fixed_len;
2293 __kmp_str_buf_cat(&task_path, proc_entry->d_name,
2294 KMP_STRLEN(proc_entry->d_name));
2295 __kmp_str_buf_cat(&task_path,
"/task", 5);
2297 task_dir = opendir(task_path.str);
2298 if (task_dir == NULL) {
2307 if (strcmp(proc_entry->d_name,
"1") == 0) {
2308 running_threads = -1;
2309 permanent_error = 1;
2314 __kmp_str_buf_clear(&stat_path);
2315 __kmp_str_buf_cat(&stat_path, task_path.str, task_path.used);
2316 __kmp_str_buf_cat(&stat_path,
"/", 1);
2317 stat_path_fixed_len = stat_path.used;
2319 task_entry = readdir(task_dir);
2320 while (task_entry != NULL) {
2322 if (proc_entry->d_type == DT_DIR && isdigit(task_entry->d_name[0])) {
2330 stat_path_fixed_len;
2331 __kmp_str_buf_cat(&stat_path, task_entry->d_name,
2332 KMP_STRLEN(task_entry->d_name));
2333 __kmp_str_buf_cat(&stat_path,
"/stat", 5);
2337 stat_file = open(stat_path.str, O_RDONLY);
2338 if (stat_file == -1) {
2368 len = read(stat_file, buffer,
sizeof(buffer) - 1);
2375 char *close_parent = strstr(buffer,
") ");
2376 if (close_parent != NULL) {
2377 char state = *(close_parent + 2);
2380 if (running_threads >= max) {
2390 task_entry = readdir(task_dir);
2396 proc_entry = readdir(proc_dir);
2402 KMP_DEBUG_ASSERT(running_threads > 0);
2403 if (running_threads <= 0) {
2404 running_threads = 1;
2408 if (proc_dir != NULL) {
2411 __kmp_str_buf_free(&task_path);
2412 if (task_dir != NULL) {
2415 __kmp_str_buf_free(&stat_path);
2416 if (stat_file != -1) {
2420 glb_running_threads = running_threads;
2422 return running_threads;
2426 #endif // KMP_OS_DARWIN
2428 #endif // USE_LOAD_BALANCE
2430 #if !(KMP_ARCH_X86 || KMP_ARCH_X86_64 || KMP_MIC || \
2431 ((KMP_OS_LINUX || KMP_OS_DARWIN) && KMP_ARCH_AARCH64) || \
2432 KMP_ARCH_PPC64 || KMP_ARCH_RISCV64)
2436 int __kmp_invoke_microtask(microtask_t pkfn,
int gtid,
int tid,
int argc,
2440 void **exit_frame_ptr
2444 *exit_frame_ptr = OMPT_GET_FRAME_ADDRESS(0);
2449 fprintf(stderr,
"Too many args to microtask: %d!\n", argc);
2453 (*pkfn)(>id, &tid);
2456 (*pkfn)(>id, &tid, p_argv[0]);
2459 (*pkfn)(>id, &tid, p_argv[0], p_argv[1]);
2462 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2]);
2465 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3]);
2468 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4]);
2471 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2475 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2476 p_argv[5], p_argv[6]);
2479 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2480 p_argv[5], p_argv[6], p_argv[7]);
2483 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2484 p_argv[5], p_argv[6], p_argv[7], p_argv[8]);
2487 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2488 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9]);
2491 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2492 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10]);
2495 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2496 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2500 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2501 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2502 p_argv[11], p_argv[12]);
2505 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2506 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2507 p_argv[11], p_argv[12], p_argv[13]);
2510 (*pkfn)(>id, &tid, p_argv[0], p_argv[1], p_argv[2], p_argv[3], p_argv[4],
2511 p_argv[5], p_argv[6], p_argv[7], p_argv[8], p_argv[9], p_argv[10],
2512 p_argv[11], p_argv[12], p_argv[13], p_argv[14]);