13 #ifndef KMP_AFFINITY_H
14 #define KMP_AFFINITY_H
19 #if KMP_AFFINITY_SUPPORTED
21 class KMPHwlocAffinity :
public KMPAffinity {
23 class Mask :
public KMPAffinity::Mask {
28 mask = hwloc_bitmap_alloc();
31 ~Mask() { hwloc_bitmap_free(mask); }
32 void set(
int i)
override { hwloc_bitmap_set(mask, i); }
33 bool is_set(
int i)
const override {
return hwloc_bitmap_isset(mask, i); }
34 void clear(
int i)
override { hwloc_bitmap_clr(mask, i); }
35 void zero()
override { hwloc_bitmap_zero(mask); }
36 void copy(
const KMPAffinity::Mask *src)
override {
37 const Mask *convert =
static_cast<const Mask *
>(src);
38 hwloc_bitmap_copy(mask, convert->mask);
40 void bitwise_and(
const KMPAffinity::Mask *rhs)
override {
41 const Mask *convert =
static_cast<const Mask *
>(rhs);
42 hwloc_bitmap_and(mask, mask, convert->mask);
44 void bitwise_or(
const KMPAffinity::Mask *rhs)
override {
45 const Mask *convert =
static_cast<const Mask *
>(rhs);
46 hwloc_bitmap_or(mask, mask, convert->mask);
48 void bitwise_not()
override { hwloc_bitmap_not(mask, mask); }
49 int begin()
const override {
return hwloc_bitmap_first(mask); }
50 int end()
const override {
return -1; }
51 int next(
int previous)
const override {
52 return hwloc_bitmap_next(mask, previous);
54 int get_system_affinity(
bool abort_on_error)
override {
55 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
56 "Illegal get affinity operation when not capable");
58 hwloc_get_cpubind(__kmp_hwloc_topology, mask, HWLOC_CPUBIND_THREAD);
64 __kmp_fatal(KMP_MSG(FatalSysError), KMP_ERR(error), __kmp_msg_null);
68 int set_system_affinity(
bool abort_on_error)
const override {
69 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
70 "Illegal get affinity operation when not capable");
72 hwloc_set_cpubind(__kmp_hwloc_topology, mask, HWLOC_CPUBIND_THREAD);
78 __kmp_fatal(KMP_MSG(FatalSysError), KMP_ERR(error), __kmp_msg_null);
82 int get_proc_group()
const override {
85 if (__kmp_num_proc_groups == 1) {
88 for (
int i = 0; i < __kmp_num_proc_groups; i++) {
90 unsigned long first_32_bits = hwloc_bitmap_to_ith_ulong(mask, i * 2);
91 unsigned long second_32_bits =
92 hwloc_bitmap_to_ith_ulong(mask, i * 2 + 1);
93 if (first_32_bits == 0 && second_32_bits == 0) {
105 void determine_capable(
const char *var)
override {
106 const hwloc_topology_support *topology_support;
107 if (__kmp_hwloc_topology == NULL) {
108 if (hwloc_topology_init(&__kmp_hwloc_topology) < 0) {
109 __kmp_hwloc_error = TRUE;
110 if (__kmp_affinity_verbose)
111 KMP_WARNING(AffHwlocErrorOccurred, var,
"hwloc_topology_init()");
113 if (hwloc_topology_load(__kmp_hwloc_topology) < 0) {
114 __kmp_hwloc_error = TRUE;
115 if (__kmp_affinity_verbose)
116 KMP_WARNING(AffHwlocErrorOccurred, var,
"hwloc_topology_load()");
119 topology_support = hwloc_topology_get_support(__kmp_hwloc_topology);
124 if (topology_support && topology_support->cpubind->set_thisthread_cpubind &&
125 topology_support->cpubind->get_thisthread_cpubind &&
126 topology_support->discovery->pu && !__kmp_hwloc_error) {
128 KMP_AFFINITY_ENABLE(TRUE);
131 __kmp_hwloc_error = TRUE;
132 KMP_AFFINITY_DISABLE();
135 void bind_thread(
int which)
override {
136 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
137 "Illegal set affinity operation when not capable");
138 KMPAffinity::Mask *mask;
139 KMP_CPU_ALLOC_ON_STACK(mask);
141 KMP_CPU_SET(which, mask);
142 __kmp_set_system_affinity(mask, TRUE);
143 KMP_CPU_FREE_FROM_STACK(mask);
145 KMPAffinity::Mask *allocate_mask()
override {
return new Mask(); }
146 void deallocate_mask(KMPAffinity::Mask *m)
override {
delete m; }
147 KMPAffinity::Mask *allocate_mask_array(
int num)
override {
148 return new Mask[num];
150 void deallocate_mask_array(KMPAffinity::Mask *array)
override {
151 Mask *hwloc_array =
static_cast<Mask *
>(array);
152 delete[] hwloc_array;
154 KMPAffinity::Mask *index_mask_array(KMPAffinity::Mask *array,
155 int index)
override {
156 Mask *hwloc_array =
static_cast<Mask *
>(array);
157 return &(hwloc_array[index]);
159 api_type get_api_type()
const override {
return HWLOC; }
163 #if KMP_OS_LINUX || KMP_OS_FREEBSD
169 #include <sys/syscall.h>
170 #if KMP_ARCH_X86 || KMP_ARCH_ARM
171 #ifndef __NR_sched_setaffinity
172 #define __NR_sched_setaffinity 241
173 #elif __NR_sched_setaffinity != 241
174 #error Wrong code for setaffinity system call.
176 #ifndef __NR_sched_getaffinity
177 #define __NR_sched_getaffinity 242
178 #elif __NR_sched_getaffinity != 242
179 #error Wrong code for getaffinity system call.
181 #elif KMP_ARCH_AARCH64
182 #ifndef __NR_sched_setaffinity
183 #define __NR_sched_setaffinity 122
184 #elif __NR_sched_setaffinity != 122
185 #error Wrong code for setaffinity system call.
187 #ifndef __NR_sched_getaffinity
188 #define __NR_sched_getaffinity 123
189 #elif __NR_sched_getaffinity != 123
190 #error Wrong code for getaffinity system call.
192 #elif KMP_ARCH_X86_64
193 #ifndef __NR_sched_setaffinity
194 #define __NR_sched_setaffinity 203
195 #elif __NR_sched_setaffinity != 203
196 #error Wrong code for setaffinity system call.
198 #ifndef __NR_sched_getaffinity
199 #define __NR_sched_getaffinity 204
200 #elif __NR_sched_getaffinity != 204
201 #error Wrong code for getaffinity system call.
204 #ifndef __NR_sched_setaffinity
205 #define __NR_sched_setaffinity 222
206 #elif __NR_sched_setaffinity != 222
207 #error Wrong code for setaffinity system call.
209 #ifndef __NR_sched_getaffinity
210 #define __NR_sched_getaffinity 223
211 #elif __NR_sched_getaffinity != 223
212 #error Wrong code for getaffinity system call.
215 # ifndef __NR_sched_setaffinity
216 # define __NR_sched_setaffinity 4239
217 # elif __NR_sched_setaffinity != 4239
218 # error Wrong code for setaffinity system call.
220 # ifndef __NR_sched_getaffinity
221 # define __NR_sched_getaffinity 4240
222 # elif __NR_sched_getaffinity != 4240
223 # error Wrong code for getaffinity system call.
225 # elif KMP_ARCH_MIPS64
226 # ifndef __NR_sched_setaffinity
227 # define __NR_sched_setaffinity 5195
228 # elif __NR_sched_setaffinity != 5195
229 # error Wrong code for setaffinity system call.
231 # ifndef __NR_sched_getaffinity
232 # define __NR_sched_getaffinity 5196
233 # elif __NR_sched_getaffinity != 5196
234 # error Wrong code for getaffinity system call.
237 #error Unknown or unsupported architecture
241 #include <pthread_np.h>
243 class KMPNativeAffinity :
public KMPAffinity {
244 class Mask :
public KMPAffinity::Mask {
245 typedef unsigned char mask_t;
246 static const int BITS_PER_MASK_T =
sizeof(mask_t) * CHAR_BIT;
250 Mask() { mask = (mask_t *)__kmp_allocate(__kmp_affin_mask_size); }
255 void set(
int i)
override {
256 mask[i / BITS_PER_MASK_T] |= ((mask_t)1 << (i % BITS_PER_MASK_T));
258 bool is_set(
int i)
const override {
259 return (mask[i / BITS_PER_MASK_T] & ((mask_t)1 << (i % BITS_PER_MASK_T)));
261 void clear(
int i)
override {
262 mask[i / BITS_PER_MASK_T] &= ~((mask_t)1 << (i % BITS_PER_MASK_T));
264 void zero()
override {
265 for (
size_t i = 0; i < __kmp_affin_mask_size; ++i)
268 void copy(
const KMPAffinity::Mask *src)
override {
269 const Mask *convert =
static_cast<const Mask *
>(src);
270 for (
size_t i = 0; i < __kmp_affin_mask_size; ++i)
271 mask[i] = convert->mask[i];
273 void bitwise_and(
const KMPAffinity::Mask *rhs)
override {
274 const Mask *convert =
static_cast<const Mask *
>(rhs);
275 for (
size_t i = 0; i < __kmp_affin_mask_size; ++i)
276 mask[i] &= convert->mask[i];
278 void bitwise_or(
const KMPAffinity::Mask *rhs)
override {
279 const Mask *convert =
static_cast<const Mask *
>(rhs);
280 for (
size_t i = 0; i < __kmp_affin_mask_size; ++i)
281 mask[i] |= convert->mask[i];
283 void bitwise_not()
override {
284 for (
size_t i = 0; i < __kmp_affin_mask_size; ++i)
285 mask[i] = ~(mask[i]);
287 int begin()
const override {
289 while (retval < end() && !is_set(retval))
293 int end()
const override {
return __kmp_affin_mask_size * BITS_PER_MASK_T; }
294 int next(
int previous)
const override {
295 int retval = previous + 1;
296 while (retval < end() && !is_set(retval))
300 int get_system_affinity(
bool abort_on_error)
override {
301 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
302 "Illegal get affinity operation when not capable");
305 syscall(__NR_sched_getaffinity, 0, __kmp_affin_mask_size, mask);
308 pthread_getaffinity_np(pthread_self(), __kmp_affin_mask_size,
reinterpret_cast<cpuset_t *
>(mask));
314 if (abort_on_error) {
315 __kmp_fatal(KMP_MSG(FatalSysError), KMP_ERR(error), __kmp_msg_null);
319 int set_system_affinity(
bool abort_on_error)
const override {
320 KMP_ASSERT2(KMP_AFFINITY_CAPABLE(),
321 "Illegal get affinity operation when not capable");
324 syscall(__NR_sched_setaffinity, 0, __kmp_affin_mask_size, mask);
327 pthread_setaffinity_np(pthread_self(), __kmp_affin_mask_size,
reinterpret_cast<cpuset_t *
>(mask));
333 if (abort_on_error) {
334 __kmp_fatal(KMP_MSG(FatalSysError), KMP_ERR(error), __kmp_msg_null);
339 void determine_capable(
const char *env_var)
override {
340 __kmp_affinity_determine_capable(env_var);
342 void bind_thread(
int which)
override { __kmp_affinity_bind_thread(which); }
343 KMPAffinity::Mask *allocate_mask()
override {
344 KMPNativeAffinity::Mask *retval =
new Mask();
347 void deallocate_mask(KMPAffinity::Mask *m)
override {
348 KMPNativeAffinity::Mask *native_mask =
349 static_cast<KMPNativeAffinity::Mask *
>(m);
352 KMPAffinity::Mask *allocate_mask_array(
int num)
override {
353 return new Mask[num];
355 void deallocate_mask_array(KMPAffinity::Mask *array)
override {
356 Mask *linux_array =
static_cast<Mask *
>(array);
357 delete[] linux_array;
359 KMPAffinity::Mask *index_mask_array(KMPAffinity::Mask *array,
360 int index)
override {
361 Mask *linux_array =
static_cast<Mask *
>(array);
362 return &(linux_array[index]);
364 api_type get_api_type()
const override {
return NATIVE_OS; }
369 class KMPNativeAffinity :
public KMPAffinity {
370 class Mask :
public KMPAffinity::Mask {
371 typedef ULONG_PTR mask_t;
372 static const int BITS_PER_MASK_T =
sizeof(mask_t) * CHAR_BIT;
377 mask = (mask_t *)__kmp_allocate(
sizeof(mask_t) * __kmp_num_proc_groups);
383 void set(
int i)
override {
384 mask[i / BITS_PER_MASK_T] |= ((mask_t)1 << (i % BITS_PER_MASK_T));
386 bool is_set(
int i)
const override {
387 return (mask[i / BITS_PER_MASK_T] & ((mask_t)1 << (i % BITS_PER_MASK_T)));
389 void clear(
int i)
override {
390 mask[i / BITS_PER_MASK_T] &= ~((mask_t)1 << (i % BITS_PER_MASK_T));
392 void zero()
override {
393 for (
int i = 0; i < __kmp_num_proc_groups; ++i)
396 void copy(
const KMPAffinity::Mask *src)
override {
397 const Mask *convert =
static_cast<const Mask *
>(src);
398 for (
int i = 0; i < __kmp_num_proc_groups; ++i)
399 mask[i] = convert->mask[i];
401 void bitwise_and(
const KMPAffinity::Mask *rhs)
override {
402 const Mask *convert =
static_cast<const Mask *
>(rhs);
403 for (
int i = 0; i < __kmp_num_proc_groups; ++i)
404 mask[i] &= convert->mask[i];
406 void bitwise_or(
const KMPAffinity::Mask *rhs)
override {
407 const Mask *convert =
static_cast<const Mask *
>(rhs);
408 for (
int i = 0; i < __kmp_num_proc_groups; ++i)
409 mask[i] |= convert->mask[i];
411 void bitwise_not()
override {
412 for (
int i = 0; i < __kmp_num_proc_groups; ++i)
413 mask[i] = ~(mask[i]);
415 int begin()
const override {
417 while (retval < end() && !is_set(retval))
421 int end()
const override {
return __kmp_num_proc_groups * BITS_PER_MASK_T; }
422 int next(
int previous)
const override {
423 int retval = previous + 1;
424 while (retval < end() && !is_set(retval))
428 int set_system_affinity(
bool abort_on_error)
const override {
429 if (__kmp_num_proc_groups > 1) {
432 int group = get_proc_group();
434 if (abort_on_error) {
435 KMP_FATAL(AffinityInvalidMask,
"kmp_set_affinity");
442 ga.Mask = mask[group];
443 ga.Reserved[0] = ga.Reserved[1] = ga.Reserved[2] = 0;
445 KMP_DEBUG_ASSERT(__kmp_SetThreadGroupAffinity != NULL);
446 if (__kmp_SetThreadGroupAffinity(GetCurrentThread(), &ga, NULL) == 0) {
447 DWORD error = GetLastError();
448 if (abort_on_error) {
449 __kmp_fatal(KMP_MSG(CantSetThreadAffMask), KMP_ERR(error),
455 if (!SetThreadAffinityMask(GetCurrentThread(), *mask)) {
456 DWORD error = GetLastError();
457 if (abort_on_error) {
458 __kmp_fatal(KMP_MSG(CantSetThreadAffMask), KMP_ERR(error),
466 int get_system_affinity(
bool abort_on_error)
override {
467 if (__kmp_num_proc_groups > 1) {
470 KMP_DEBUG_ASSERT(__kmp_GetThreadGroupAffinity != NULL);
471 if (__kmp_GetThreadGroupAffinity(GetCurrentThread(), &ga) == 0) {
472 DWORD error = GetLastError();
473 if (abort_on_error) {
474 __kmp_fatal(KMP_MSG(FunctionError,
"GetThreadGroupAffinity()"),
475 KMP_ERR(error), __kmp_msg_null);
479 if ((ga.Group < 0) || (ga.Group > __kmp_num_proc_groups) ||
483 mask[ga.Group] = ga.Mask;
485 mask_t newMask, sysMask, retval;
486 if (!GetProcessAffinityMask(GetCurrentProcess(), &newMask, &sysMask)) {
487 DWORD error = GetLastError();
488 if (abort_on_error) {
489 __kmp_fatal(KMP_MSG(FunctionError,
"GetProcessAffinityMask()"),
490 KMP_ERR(error), __kmp_msg_null);
494 retval = SetThreadAffinityMask(GetCurrentThread(), newMask);
496 DWORD error = GetLastError();
497 if (abort_on_error) {
498 __kmp_fatal(KMP_MSG(FunctionError,
"SetThreadAffinityMask()"),
499 KMP_ERR(error), __kmp_msg_null);
503 newMask = SetThreadAffinityMask(GetCurrentThread(), retval);
505 DWORD error = GetLastError();
506 if (abort_on_error) {
507 __kmp_fatal(KMP_MSG(FunctionError,
"SetThreadAffinityMask()"),
508 KMP_ERR(error), __kmp_msg_null);
515 int get_proc_group()
const override {
517 if (__kmp_num_proc_groups == 1) {
520 for (
int i = 0; i < __kmp_num_proc_groups; i++) {
530 void determine_capable(
const char *env_var)
override {
531 __kmp_affinity_determine_capable(env_var);
533 void bind_thread(
int which)
override { __kmp_affinity_bind_thread(which); }
534 KMPAffinity::Mask *allocate_mask()
override {
return new Mask(); }
535 void deallocate_mask(KMPAffinity::Mask *m)
override {
delete m; }
536 KMPAffinity::Mask *allocate_mask_array(
int num)
override {
537 return new Mask[num];
539 void deallocate_mask_array(KMPAffinity::Mask *array)
override {
540 Mask *windows_array =
static_cast<Mask *
>(array);
541 delete[] windows_array;
543 KMPAffinity::Mask *index_mask_array(KMPAffinity::Mask *array,
544 int index)
override {
545 Mask *windows_array =
static_cast<Mask *
>(array);
546 return &(windows_array[index]);
548 api_type get_api_type()
const override {
return NATIVE_OS; }
555 static const unsigned maxDepth = 32;
556 unsigned labels[maxDepth];
557 unsigned childNums[maxDepth];
560 Address(
unsigned _depth) : depth(_depth), leader(FALSE) {}
561 Address &operator=(
const Address &b) {
563 for (
unsigned i = 0; i < depth; i++) {
564 labels[i] = b.labels[i];
565 childNums[i] = b.childNums[i];
570 bool operator==(
const Address &b)
const {
571 if (depth != b.depth)
573 for (
unsigned i = 0; i < depth; i++)
574 if (labels[i] != b.labels[i])
578 bool isClose(
const Address &b,
int level)
const {
579 if (depth != b.depth)
581 if ((
unsigned)level >= depth)
583 for (
unsigned i = 0; i < (depth - level); i++)
584 if (labels[i] != b.labels[i])
588 bool operator!=(
const Address &b)
const {
return !operator==(b); }
591 printf(
"Depth: %u --- ", depth);
592 for (i = 0; i < depth; i++) {
593 printf(
"%u ", labels[i]);
602 AddrUnsPair(Address _first,
unsigned _second)
603 : first(_first), second(_second) {}
604 AddrUnsPair &operator=(
const AddrUnsPair &b) {
612 printf(
" --- second = %u", second);
614 bool operator==(
const AddrUnsPair &b)
const {
615 if (first != b.first)
617 if (second != b.second)
621 bool operator!=(
const AddrUnsPair &b)
const {
return !operator==(b); }
624 static int __kmp_affinity_cmp_Address_labels(
const void *a,
const void *b) {
625 const Address *aa = &(((
const AddrUnsPair *)a)->first);
626 const Address *bb = &(((
const AddrUnsPair *)b)->first);
627 unsigned depth = aa->depth;
629 KMP_DEBUG_ASSERT(depth == bb->depth);
630 for (i = 0; i < depth; i++) {
631 if (aa->labels[i] < bb->labels[i])
633 if (aa->labels[i] > bb->labels[i])
645 class hierarchy_info {
649 static const kmp_uint32 maxLeaves = 4;
650 static const kmp_uint32 minBranch = 4;
656 kmp_uint32 maxLevels;
663 kmp_uint32 base_num_threads;
664 enum init_status { initialized = 0, not_initialized = 1, initializing = 2 };
665 volatile kmp_int8 uninitialized;
667 volatile kmp_int8 resizing;
673 kmp_uint32 *numPerLevel;
674 kmp_uint32 *skipPerLevel;
676 void deriveLevels(AddrUnsPair *adr2os,
int num_addrs) {
677 int hier_depth = adr2os[0].first.depth;
679 for (
int i = hier_depth - 1; i >= 0; --i) {
681 for (
int j = 0; j < num_addrs; ++j) {
682 int next = adr2os[j].first.childNums[i];
686 numPerLevel[level] = max + 1;
692 : maxLevels(7), depth(1), uninitialized(not_initialized), resizing(0) {}
695 if (!uninitialized && numPerLevel) {
696 __kmp_free(numPerLevel);
698 uninitialized = not_initialized;
702 void init(AddrUnsPair *adr2os,
int num_addrs) {
703 kmp_int8 bool_result = KMP_COMPARE_AND_STORE_ACQ8(
704 &uninitialized, not_initialized, initializing);
705 if (bool_result == 0) {
706 while (TCR_1(uninitialized) != initialized)
710 KMP_DEBUG_ASSERT(bool_result == 1);
720 (kmp_uint32 *)__kmp_allocate(maxLevels * 2 *
sizeof(kmp_uint32));
721 skipPerLevel = &(numPerLevel[maxLevels]);
722 for (kmp_uint32 i = 0; i < maxLevels;
730 qsort(adr2os, num_addrs,
sizeof(*adr2os),
731 __kmp_affinity_cmp_Address_labels);
732 deriveLevels(adr2os, num_addrs);
734 numPerLevel[0] = maxLeaves;
735 numPerLevel[1] = num_addrs / maxLeaves;
736 if (num_addrs % maxLeaves)
740 base_num_threads = num_addrs;
741 for (
int i = maxLevels - 1; i >= 0;
743 if (numPerLevel[i] != 1 || depth > 1)
746 kmp_uint32 branch = minBranch;
747 if (numPerLevel[0] == 1)
748 branch = num_addrs / maxLeaves;
749 if (branch < minBranch)
751 for (kmp_uint32 d = 0; d < depth - 1; ++d) {
752 while (numPerLevel[d] > branch ||
753 (d == 0 && numPerLevel[d] > maxLeaves)) {
754 if (numPerLevel[d] & 1)
756 numPerLevel[d] = numPerLevel[d] >> 1;
757 if (numPerLevel[d + 1] == 1)
759 numPerLevel[d + 1] = numPerLevel[d + 1] << 1;
761 if (numPerLevel[0] == 1) {
762 branch = branch >> 1;
768 for (kmp_uint32 i = 1; i < depth; ++i)
769 skipPerLevel[i] = numPerLevel[i - 1] * skipPerLevel[i - 1];
771 for (kmp_uint32 i = depth; i < maxLevels; ++i)
772 skipPerLevel[i] = 2 * skipPerLevel[i - 1];
774 uninitialized = initialized;
778 void resize(kmp_uint32 nproc) {
779 kmp_int8 bool_result = KMP_COMPARE_AND_STORE_ACQ8(&resizing, 0, 1);
780 while (bool_result == 0) {
782 if (nproc <= base_num_threads)
785 bool_result = KMP_COMPARE_AND_STORE_ACQ8(&resizing, 0, 1);
787 KMP_DEBUG_ASSERT(bool_result != 0);
788 if (nproc <= base_num_threads)
792 kmp_uint32 old_sz = skipPerLevel[depth - 1];
793 kmp_uint32 incs = 0, old_maxLevels = maxLevels;
795 for (kmp_uint32 i = depth; i < maxLevels && nproc > old_sz; ++i) {
796 skipPerLevel[i] = 2 * skipPerLevel[i - 1];
797 numPerLevel[i - 1] *= 2;
801 if (nproc > old_sz) {
802 while (nproc > old_sz) {
810 kmp_uint32 *old_numPerLevel = numPerLevel;
811 kmp_uint32 *old_skipPerLevel = skipPerLevel;
812 numPerLevel = skipPerLevel = NULL;
814 (kmp_uint32 *)__kmp_allocate(maxLevels * 2 *
sizeof(kmp_uint32));
815 skipPerLevel = &(numPerLevel[maxLevels]);
818 for (kmp_uint32 i = 0; i < old_maxLevels;
820 numPerLevel[i] = old_numPerLevel[i];
821 skipPerLevel[i] = old_skipPerLevel[i];
825 for (kmp_uint32 i = old_maxLevels; i < maxLevels;
832 __kmp_free(old_numPerLevel);
836 for (kmp_uint32 i = old_maxLevels; i < maxLevels; ++i)
837 skipPerLevel[i] = 2 * skipPerLevel[i - 1];
839 base_num_threads = nproc;
843 #endif // KMP_AFFINITY_H