27#include "ompt-specific.h"
33char const *traits_t<int>::spec =
"d";
34char const *traits_t<unsigned int>::spec =
"u";
35char const *traits_t<long long>::spec =
"lld";
36char const *traits_t<unsigned long long>::spec =
"llu";
37char const *traits_t<long>::spec =
"ld";
42#define KMP_STATS_LOOP_END(stat) \
45 kmp_int64 u = (kmp_int64)(*pupper); \
46 kmp_int64 l = (kmp_int64)(*plower); \
47 kmp_int64 i = (kmp_int64)incr; \
50 } else if (i == -1) { \
53 t = (u - l) / i + 1; \
55 KMP_DEBUG_ASSERT(i != 0); \
56 t = (l - u) / (-i) + 1; \
58 KMP_COUNT_VALUE(stat, t); \
59 KMP_POP_PARTITIONED_TIMER(); \
62#define KMP_STATS_LOOP_END(stat)
65#if USE_ITT_BUILD || defined KMP_DEBUG
67static inline void check_loc(
ident_t *&loc) {
74static void __kmp_for_static_init(
ident_t *loc, kmp_int32 global_tid,
75 kmp_int32 schedtype, kmp_int32 *plastiter,
77 typename traits_t<T>::signed_t *pstride,
78 typename traits_t<T>::signed_t incr,
79 typename traits_t<T>::signed_t chunk
80#
if OMPT_SUPPORT && OMPT_OPTIONAL
86 KMP_PUSH_PARTITIONED_TIMER(OMP_loop_static);
87 KMP_PUSH_PARTITIONED_TIMER(OMP_loop_static_scheduling);
90 schedtype = SCHEDULE_WITHOUT_MODIFIERS(schedtype);
92 typedef typename traits_t<T>::unsigned_t UT;
93 typedef typename traits_t<T>::signed_t ST;
95 kmp_int32 gtid = global_tid;
100 __kmp_assert_valid_gtid(gtid);
101 kmp_info_t *th = __kmp_threads[gtid];
103#if OMPT_SUPPORT && OMPT_OPTIONAL
104 ompt_team_info_t *team_info = NULL;
105 ompt_task_info_t *task_info = NULL;
106 ompt_work_t ompt_work_type = ompt_work_loop_static;
108 static kmp_int8 warn = 0;
110 if (ompt_enabled.ompt_callback_work || ompt_enabled.ompt_callback_dispatch) {
112 team_info = __ompt_get_teaminfo(0, NULL);
113 task_info = __ompt_get_task_info_object(0);
117 ompt_work_type = ompt_work_loop_static;
119 ompt_work_type = ompt_work_sections;
121 ompt_work_type = ompt_work_distribute;
124 KMP_COMPARE_AND_STORE_ACQ8(&warn, (kmp_int8)0, (kmp_int8)1);
126 KMP_WARNING(OmptOutdatedWorkshare);
128 KMP_DEBUG_ASSERT(ompt_work_type);
133 KMP_DEBUG_ASSERT(plastiter && plower && pupper && pstride);
134 KE_TRACE(10, (
"__kmpc_for_static_init called (%d)\n", global_tid));
139 buff = __kmp_str_format(
140 "__kmpc_for_static_init: T#%%d sched=%%d liter=%%d iter=(%%%s,"
141 " %%%s, %%%s) incr=%%%s chunk=%%%s signed?<%s>\n",
142 traits_t<T>::spec, traits_t<T>::spec, traits_t<ST>::spec,
143 traits_t<ST>::spec, traits_t<ST>::spec, traits_t<T>::spec);
144 KD_TRACE(100, (buff, global_tid, schedtype, *plastiter, *plower, *pupper,
145 *pstride, incr, chunk));
146 __kmp_str_free(&buff);
150 if (__kmp_env_consistency_check) {
151 __kmp_push_workshare(global_tid, ct_pdo, loc);
153 __kmp_error_construct(kmp_i18n_msg_CnsLoopIncrZeroProhibited, ct_pdo,
158 if (incr > 0 ? (*pupper < *plower) : (*plower < *pupper)) {
159 if (plastiter != NULL)
171 buff = __kmp_str_format(
"__kmpc_for_static_init:(ZERO TRIP) liter=%%d "
172 "lower=%%%s upper=%%%s stride = %%%s "
173 "signed?<%s>, loc = %%s\n",
174 traits_t<T>::spec, traits_t<T>::spec,
175 traits_t<ST>::spec, traits_t<T>::spec);
178 (buff, *plastiter, *plower, *pupper, *pstride, loc->
psource));
179 __kmp_str_free(&buff);
182 KE_TRACE(10, (
"__kmpc_for_static_init: T#%d return\n", global_tid));
184#if OMPT_SUPPORT && OMPT_OPTIONAL
185 if (ompt_enabled.ompt_callback_work) {
186 ompt_callbacks.ompt_callback(ompt_callback_work)(
187 ompt_work_type, ompt_scope_begin, &(team_info->parallel_data),
188 &(task_info->task_data), 0, codeptr);
191 KMP_STATS_LOOP_END(OMP_loop_static_iterations);
203 if (th->th.th_team->t.t_serialized > 1) {
205 team = th->th.th_team;
207 tid = th->th.th_team->t.t_master_tid;
208 team = th->th.th_team->t.t_parent;
211 tid = __kmp_tid_from_gtid(global_tid);
212 team = th->th.th_team;
216 if (team->t.t_serialized) {
218 if (plastiter != NULL)
222 (incr > 0) ? (*pupper - *plower + 1) : (-(*plower - *pupper + 1));
228 buff = __kmp_str_format(
"__kmpc_for_static_init: (serial) liter=%%d "
229 "lower=%%%s upper=%%%s stride = %%%s\n",
230 traits_t<T>::spec, traits_t<T>::spec,
232 KD_TRACE(100, (buff, *plastiter, *plower, *pupper, *pstride));
233 __kmp_str_free(&buff);
236 KE_TRACE(10, (
"__kmpc_for_static_init: T#%d return\n", global_tid));
238#if OMPT_SUPPORT && OMPT_OPTIONAL
239 if (ompt_enabled.ompt_callback_work) {
240 ompt_callbacks.ompt_callback(ompt_callback_work)(
241 ompt_work_type, ompt_scope_begin, &(team_info->parallel_data),
242 &(task_info->task_data), *pstride, codeptr);
245 KMP_STATS_LOOP_END(OMP_loop_static_iterations);
248 nth = team->t.t_nproc;
250 if (plastiter != NULL)
253 (incr > 0) ? (*pupper - *plower + 1) : (-(*plower - *pupper + 1));
258 buff = __kmp_str_format(
"__kmpc_for_static_init: (serial) liter=%%d "
259 "lower=%%%s upper=%%%s stride = %%%s\n",
260 traits_t<T>::spec, traits_t<T>::spec,
262 KD_TRACE(100, (buff, *plastiter, *plower, *pupper, *pstride));
263 __kmp_str_free(&buff);
266 KE_TRACE(10, (
"__kmpc_for_static_init: T#%d return\n", global_tid));
268#if OMPT_SUPPORT && OMPT_OPTIONAL
269 if (ompt_enabled.ompt_callback_work) {
270 ompt_callbacks.ompt_callback(ompt_callback_work)(
271 ompt_work_type, ompt_scope_begin, &(team_info->parallel_data),
272 &(task_info->task_data), *pstride, codeptr);
275 KMP_STATS_LOOP_END(OMP_loop_static_iterations);
281 trip_count = *pupper - *plower + 1;
282 }
else if (incr == -1) {
283 trip_count = *plower - *pupper + 1;
284 }
else if (incr > 0) {
286 trip_count = (UT)(*pupper - *plower) / incr + 1;
288 KMP_DEBUG_ASSERT(incr != 0);
289 trip_count = (UT)(*plower - *pupper) / (-incr) + 1;
293 if (KMP_MASTER_GTID(gtid)) {
298 if (__kmp_env_consistency_check) {
300 if (trip_count == 0 && *pupper != *plower) {
301 __kmp_error_construct(kmp_i18n_msg_CnsIterationRangeTooLarge, ct_pdo,
309 if (trip_count < nth) {
311 __kmp_static == kmp_sch_static_greedy ||
313 kmp_sch_static_balanced);
314 if (tid < trip_count) {
315 *pupper = *plower = *plower + tid * incr;
318 *plower = *pupper + (incr > 0 ? 1 : -1);
320 if (plastiter != NULL)
321 *plastiter = (tid == trip_count - 1);
323 KMP_DEBUG_ASSERT(nth != 0);
324 if (__kmp_static == kmp_sch_static_balanced) {
325 UT small_chunk = trip_count / nth;
326 UT extras = trip_count % nth;
327 *plower += incr * (tid * small_chunk + (tid < extras ? tid : extras));
328 *pupper = *plower + small_chunk * incr - (tid < extras ? 0 : incr);
329 if (plastiter != NULL)
330 *plastiter = (tid == nth - 1);
332 T big_chunk_inc_count =
333 (trip_count / nth + ((trip_count % nth) ? 1 : 0)) * incr;
334 T old_upper = *pupper;
336 KMP_DEBUG_ASSERT(__kmp_static == kmp_sch_static_greedy);
339 *plower += tid * big_chunk_inc_count;
340 *pupper = *plower + big_chunk_inc_count - incr;
342 if (*pupper < *plower)
343 *pupper = traits_t<T>::max_value;
344 if (plastiter != NULL)
345 *plastiter = *plower <= old_upper && *pupper > old_upper - incr;
346 if (*pupper > old_upper)
349 if (*pupper > *plower)
350 *pupper = traits_t<T>::min_value;
351 if (plastiter != NULL)
352 *plastiter = *plower >= old_upper && *pupper < old_upper - incr;
353 if (*pupper < old_upper)
358 *pstride = trip_count;
361 case kmp_sch_static_chunked: {
364 KMP_DEBUG_ASSERT(chunk != 0);
367 else if ((UT)chunk > trip_count)
369 nchunks = (trip_count) / (UT)chunk + (trip_count % (UT)chunk ? 1 : 0);
372 *pstride = span * nchunks;
374 *plower = *plower + (span * tid);
375 *pupper = *plower + span - incr;
377 *plower = *pupper + (incr > 0 ? 1 : -1);
380 *pstride = span * nth;
381 *plower = *plower + (span * tid);
382 *pupper = *plower + span - incr;
384 if (plastiter != NULL)
385 *plastiter = (tid == (nchunks - 1) % nth);
388 case kmp_sch_static_balanced_chunked: {
389 T old_upper = *pupper;
390 KMP_DEBUG_ASSERT(nth != 0);
392 UT span = (trip_count + nth - 1) / nth;
395 chunk = (span + chunk - 1) & ~(chunk - 1);
398 *plower = *plower + (span * tid);
399 *pupper = *plower + span - incr;
401 if (*pupper > old_upper)
403 }
else if (*pupper < old_upper)
406 if (plastiter != NULL) {
407 KMP_DEBUG_ASSERT(chunk != 0);
408 *plastiter = (tid == ((trip_count - 1) / (UT)chunk));
413 KMP_ASSERT2(0,
"__kmpc_for_static_init: unknown scheduling type");
419 if (KMP_MASTER_TID(tid) && __itt_metadata_add_ptr &&
420 __kmp_forkjoin_frames_mode == 3 && th->th.th_teams_microtask == NULL &&
421 team->t.t_active_level == 1) {
422 kmp_uint64 cur_chunk = chunk;
427 KMP_DEBUG_ASSERT(nth != 0);
428 cur_chunk = trip_count / nth + ((trip_count % nth) ? 1 : 0);
431 __kmp_itt_metadata_loop(loc, 0, trip_count, cur_chunk);
438 buff = __kmp_str_format(
"__kmpc_for_static_init: liter=%%d lower=%%%s "
439 "upper=%%%s stride = %%%s signed?<%s>\n",
440 traits_t<T>::spec, traits_t<T>::spec,
441 traits_t<ST>::spec, traits_t<T>::spec);
442 KD_TRACE(100, (buff, *plastiter, *plower, *pupper, *pstride));
443 __kmp_str_free(&buff);
446 KE_TRACE(10, (
"__kmpc_for_static_init: T#%d return\n", global_tid));
448#if OMPT_SUPPORT && OMPT_OPTIONAL
449 if (ompt_enabled.ompt_callback_work) {
450 ompt_callbacks.ompt_callback(ompt_callback_work)(
451 ompt_work_type, ompt_scope_begin, &(team_info->parallel_data),
452 &(task_info->task_data), trip_count, codeptr);
454 if (ompt_enabled.ompt_callback_dispatch) {
455 ompt_dispatch_t dispatch_type;
456 ompt_data_t instance = ompt_data_none;
457 ompt_dispatch_chunk_t dispatch_chunk;
458 if (ompt_work_type == ompt_work_sections) {
459 dispatch_type = ompt_dispatch_section;
460 instance.ptr = codeptr;
462 OMPT_GET_DISPATCH_CHUNK(dispatch_chunk, *plower, *pupper, incr);
463 dispatch_type = (ompt_work_type == ompt_work_distribute)
464 ? ompt_dispatch_distribute_chunk
465 : ompt_dispatch_ws_loop_chunk;
466 instance.ptr = &dispatch_chunk;
468 ompt_callbacks.ompt_callback(ompt_callback_dispatch)(
469 &(team_info->parallel_data), &(task_info->task_data), dispatch_type,
474 KMP_STATS_LOOP_END(OMP_loop_static_iterations);
479static void __kmp_dist_for_static_init(
ident_t *loc, kmp_int32 gtid,
480 kmp_int32 schedule, kmp_int32 *plastiter,
481 T *plower, T *pupper, T *pupperDist,
482 typename traits_t<T>::signed_t *pstride,
483 typename traits_t<T>::signed_t incr,
484 typename traits_t<T>::signed_t chunk
485#
if OMPT_SUPPORT && OMPT_OPTIONAL
491 KMP_PUSH_PARTITIONED_TIMER(OMP_distribute);
492 KMP_PUSH_PARTITIONED_TIMER(OMP_distribute_scheduling);
493 typedef typename traits_t<T>::unsigned_t UT;
494 typedef typename traits_t<T>::signed_t ST;
503 KMP_DEBUG_ASSERT(plastiter && plower && pupper && pupperDist && pstride);
504 KE_TRACE(10, (
"__kmpc_dist_for_static_init called (%d)\n", gtid));
505 __kmp_assert_valid_gtid(gtid);
510 buff = __kmp_str_format(
511 "__kmpc_dist_for_static_init: T#%%d schedLoop=%%d liter=%%d "
512 "iter=(%%%s, %%%s, %%%s) chunk=%%%s signed?<%s>\n",
513 traits_t<T>::spec, traits_t<T>::spec, traits_t<ST>::spec,
514 traits_t<ST>::spec, traits_t<T>::spec);
516 (buff, gtid, schedule, *plastiter, *plower, *pupper, incr, chunk));
517 __kmp_str_free(&buff);
521 if (__kmp_env_consistency_check) {
522 __kmp_push_workshare(gtid, ct_pdo, loc);
524 __kmp_error_construct(kmp_i18n_msg_CnsLoopIncrZeroProhibited, ct_pdo,
527 if (incr > 0 ? (*pupper < *plower) : (*plower < *pupper)) {
537 __kmp_error_construct(kmp_i18n_msg_CnsLoopIncrIllegal, ct_pdo, loc);
540 tid = __kmp_tid_from_gtid(gtid);
541 th = __kmp_threads[gtid];
542 nth = th->th.th_team_nproc;
543 team = th->th.th_team;
544 KMP_DEBUG_ASSERT(th->th.th_teams_microtask);
545 nteams = th->th.th_teams_size.nteams;
546 team_id = team->t.t_master_tid;
547 KMP_DEBUG_ASSERT(nteams == (kmp_uint32)team->t.t_parent->t.t_nproc);
551 trip_count = *pupper - *plower + 1;
552 }
else if (incr == -1) {
553 trip_count = *plower - *pupper + 1;
554 }
else if (incr > 0) {
556 trip_count = (UT)(*pupper - *plower) / incr + 1;
558 KMP_DEBUG_ASSERT(incr != 0);
559 trip_count = (UT)(*plower - *pupper) / (-incr) + 1;
562 *pstride = *pupper - *plower;
563 if (trip_count <= nteams) {
565 __kmp_static == kmp_sch_static_greedy ||
567 kmp_sch_static_balanced);
570 if (team_id < trip_count && tid == 0) {
571 *pupper = *pupperDist = *plower = *plower + team_id * incr;
573 *pupperDist = *pupper;
574 *plower = *pupper + incr;
576 if (plastiter != NULL)
577 *plastiter = (tid == 0 && team_id == trip_count - 1);
580 KMP_DEBUG_ASSERT(nteams != 0);
581 if (__kmp_static == kmp_sch_static_balanced) {
582 UT chunkD = trip_count / nteams;
583 UT extras = trip_count % nteams;
585 incr * (team_id * chunkD + (team_id < extras ? team_id : extras));
586 *pupperDist = *plower + chunkD * incr - (team_id < extras ? 0 : incr);
587 if (plastiter != NULL)
588 *plastiter = (team_id == nteams - 1);
591 (trip_count / nteams + ((trip_count % nteams) ? 1 : 0)) * incr;
593 KMP_DEBUG_ASSERT(__kmp_static == kmp_sch_static_greedy);
595 *plower += team_id * chunk_inc_count;
596 *pupperDist = *plower + chunk_inc_count - incr;
599 if (*pupperDist < *plower)
600 *pupperDist = traits_t<T>::max_value;
601 if (plastiter != NULL)
602 *plastiter = *plower <= upper && *pupperDist > upper - incr;
603 if (*pupperDist > upper)
605 if (*plower > *pupperDist) {
606 *pupper = *pupperDist;
610 if (*pupperDist > *plower)
611 *pupperDist = traits_t<T>::min_value;
612 if (plastiter != NULL)
613 *plastiter = *plower >= upper && *pupperDist < upper - incr;
614 if (*pupperDist < upper)
616 if (*plower < *pupperDist) {
617 *pupper = *pupperDist;
625 trip_count = *pupperDist - *plower + 1;
626 }
else if (incr == -1) {
627 trip_count = *plower - *pupperDist + 1;
628 }
else if (incr > 1) {
630 trip_count = (UT)(*pupperDist - *plower) / incr + 1;
632 KMP_DEBUG_ASSERT(incr != 0);
633 trip_count = (UT)(*plower - *pupperDist) / (-incr) + 1;
635 KMP_DEBUG_ASSERT(trip_count);
638 if (trip_count <= nth) {
640 __kmp_static == kmp_sch_static_greedy ||
642 kmp_sch_static_balanced);
643 if (tid < trip_count)
644 *pupper = *plower = *plower + tid * incr;
646 *plower = *pupper + incr;
647 if (plastiter != NULL)
648 if (*plastiter != 0 && !(tid == trip_count - 1))
651 KMP_DEBUG_ASSERT(nth != 0);
652 if (__kmp_static == kmp_sch_static_balanced) {
653 UT chunkL = trip_count / nth;
654 UT extras = trip_count % nth;
655 *plower += incr * (tid * chunkL + (tid < extras ? tid : extras));
656 *pupper = *plower + chunkL * incr - (tid < extras ? 0 : incr);
657 if (plastiter != NULL)
658 if (*plastiter != 0 && !(tid == nth - 1))
662 (trip_count / nth + ((trip_count % nth) ? 1 : 0)) * incr;
663 T upper = *pupperDist;
664 KMP_DEBUG_ASSERT(__kmp_static == kmp_sch_static_greedy);
666 *plower += tid * chunk_inc_count;
667 *pupper = *plower + chunk_inc_count - incr;
669 if (*pupper < *plower)
670 *pupper = traits_t<T>::max_value;
671 if (plastiter != NULL)
672 if (*plastiter != 0 &&
673 !(*plower <= upper && *pupper > upper - incr))
678 if (*pupper > *plower)
679 *pupper = traits_t<T>::min_value;
680 if (plastiter != NULL)
681 if (*plastiter != 0 &&
682 !(*plower >= upper && *pupper < upper - incr))
691 case kmp_sch_static_chunked: {
696 *pstride = span * nth;
697 *plower = *plower + (span * tid);
698 *pupper = *plower + span - incr;
699 if (plastiter != NULL) {
700 KMP_DEBUG_ASSERT(chunk != 0);
701 if (*plastiter != 0 && !(tid == ((trip_count - 1) / (UT)chunk) % nth))
708 "__kmpc_dist_for_static_init: unknown loop scheduling type");
717 buff = __kmp_str_format(
718 "__kmpc_dist_for_static_init: last=%%d lo=%%%s up=%%%s upDist=%%%s "
719 "stride=%%%s signed?<%s>\n",
720 traits_t<T>::spec, traits_t<T>::spec, traits_t<T>::spec,
721 traits_t<ST>::spec, traits_t<T>::spec);
722 KD_TRACE(100, (buff, *plastiter, *plower, *pupper, *pupperDist, *pstride));
723 __kmp_str_free(&buff);
726 KE_TRACE(10, (
"__kmpc_dist_for_static_init: T#%d return\n", gtid));
727#if OMPT_SUPPORT && OMPT_OPTIONAL
728 if (ompt_enabled.ompt_callback_work || ompt_enabled.ompt_callback_dispatch) {
729 ompt_team_info_t *team_info = __ompt_get_teaminfo(0, NULL);
730 ompt_task_info_t *task_info = __ompt_get_task_info_object(0);
731 if (ompt_enabled.ompt_callback_work) {
732 ompt_callbacks.ompt_callback(ompt_callback_work)(
733 ompt_work_distribute, ompt_scope_begin, &(team_info->parallel_data),
734 &(task_info->task_data), 0, codeptr);
736 if (ompt_enabled.ompt_callback_dispatch) {
737 ompt_data_t instance = ompt_data_none;
738 ompt_dispatch_chunk_t dispatch_chunk;
739 OMPT_GET_DISPATCH_CHUNK(dispatch_chunk, *plower, *pupperDist, incr);
740 instance.ptr = &dispatch_chunk;
741 ompt_callbacks.ompt_callback(ompt_callback_dispatch)(
742 &(team_info->parallel_data), &(task_info->task_data),
743 ompt_dispatch_distribute_chunk, instance);
747 KMP_STATS_LOOP_END(OMP_distribute_iterations);
752static void __kmp_team_static_init(
ident_t *loc, kmp_int32 gtid,
753 kmp_int32 *p_last, T *p_lb, T *p_ub,
754 typename traits_t<T>::signed_t *p_st,
755 typename traits_t<T>::signed_t incr,
756 typename traits_t<T>::signed_t chunk) {
762 typedef typename traits_t<T>::unsigned_t UT;
763 typedef typename traits_t<T>::signed_t ST;
773 KMP_DEBUG_ASSERT(p_last && p_lb && p_ub && p_st);
774 KE_TRACE(10, (
"__kmp_team_static_init called (%d)\n", gtid));
775 __kmp_assert_valid_gtid(gtid);
780 buff = __kmp_str_format(
"__kmp_team_static_init enter: T#%%d liter=%%d "
781 "iter=(%%%s, %%%s, %%%s) chunk %%%s; signed?<%s>\n",
782 traits_t<T>::spec, traits_t<T>::spec,
783 traits_t<ST>::spec, traits_t<ST>::spec,
785 KD_TRACE(100, (buff, gtid, *p_last, *p_lb, *p_ub, *p_st, chunk));
786 __kmp_str_free(&buff);
792 if (__kmp_env_consistency_check) {
794 __kmp_error_construct(kmp_i18n_msg_CnsLoopIncrZeroProhibited, ct_pdo,
797 if (incr > 0 ? (upper < lower) : (lower < upper)) {
807 __kmp_error_construct(kmp_i18n_msg_CnsLoopIncrIllegal, ct_pdo, loc);
810 th = __kmp_threads[gtid];
811 team = th->th.th_team;
812 KMP_DEBUG_ASSERT(th->th.th_teams_microtask);
813 nteams = th->th.th_teams_size.nteams;
814 team_id = team->t.t_master_tid;
815 KMP_DEBUG_ASSERT(nteams == (kmp_uint32)team->t.t_parent->t.t_nproc);
819 trip_count = upper - lower + 1;
820 }
else if (incr == -1) {
821 trip_count = lower - upper + 1;
822 }
else if (incr > 0) {
824 trip_count = (UT)(upper - lower) / incr + 1;
826 KMP_DEBUG_ASSERT(incr != 0);
827 trip_count = (UT)(lower - upper) / (-incr) + 1;
832 *p_st = span * nteams;
833 *p_lb = lower + (span * team_id);
834 *p_ub = *p_lb + span - incr;
835 if (p_last != NULL) {
836 KMP_DEBUG_ASSERT(chunk != 0);
837 *p_last = (team_id == ((trip_count - 1) / (UT)chunk) % nteams);
842 *p_ub = traits_t<T>::max_value;
847 *p_ub = traits_t<T>::min_value;
856 __kmp_str_format(
"__kmp_team_static_init exit: T#%%d team%%u liter=%%d "
857 "iter=(%%%s, %%%s, %%%s) chunk %%%s\n",
858 traits_t<T>::spec, traits_t<T>::spec,
859 traits_t<ST>::spec, traits_t<ST>::spec);
860 KD_TRACE(100, (buff, gtid, team_id, *p_last, *p_lb, *p_ub, *p_st, chunk));
861 __kmp_str_free(&buff);
890 kmp_int32 *plastiter, kmp_int32 *plower,
891 kmp_int32 *pupper, kmp_int32 *pstride,
892 kmp_int32 incr, kmp_int32 chunk) {
893 __kmp_for_static_init<kmp_int32>(loc, gtid, schedtype, plastiter, plower,
894 pupper, pstride, incr, chunk
895#
if OMPT_SUPPORT && OMPT_OPTIONAL
897 OMPT_GET_RETURN_ADDRESS(0)
906 kmp_int32 schedtype, kmp_int32 *plastiter,
907 kmp_uint32 *plower, kmp_uint32 *pupper,
908 kmp_int32 *pstride, kmp_int32 incr,
910 __kmp_for_static_init<kmp_uint32>(loc, gtid, schedtype, plastiter, plower,
911 pupper, pstride, incr, chunk
912#
if OMPT_SUPPORT && OMPT_OPTIONAL
914 OMPT_GET_RETURN_ADDRESS(0)
923 kmp_int32 *plastiter, kmp_int64 *plower,
924 kmp_int64 *pupper, kmp_int64 *pstride,
925 kmp_int64 incr, kmp_int64 chunk) {
926 __kmp_for_static_init<kmp_int64>(loc, gtid, schedtype, plastiter, plower,
927 pupper, pstride, incr, chunk
928#
if OMPT_SUPPORT && OMPT_OPTIONAL
930 OMPT_GET_RETURN_ADDRESS(0)
939 kmp_int32 schedtype, kmp_int32 *plastiter,
940 kmp_uint64 *plower, kmp_uint64 *pupper,
941 kmp_int64 *pstride, kmp_int64 incr,
943 __kmp_for_static_init<kmp_uint64>(loc, gtid, schedtype, plastiter, plower,
944 pupper, pstride, incr, chunk
945#
if OMPT_SUPPORT && OMPT_OPTIONAL
947 OMPT_GET_RETURN_ADDRESS(0)
955#if OMPT_SUPPORT && OMPT_OPTIONAL
956#define OMPT_CODEPTR_ARG , OMPT_GET_RETURN_ADDRESS(0)
958#define OMPT_CODEPTR_ARG
984 kmp_int32 schedule, kmp_int32 *plastiter,
985 kmp_int32 *plower, kmp_int32 *pupper,
986 kmp_int32 *pupperD, kmp_int32 *pstride,
987 kmp_int32 incr, kmp_int32 chunk) {
988 __kmp_dist_for_static_init<kmp_int32>(loc, gtid, schedule, plastiter, plower,
989 pupper, pupperD, pstride, incr,
990 chunk OMPT_CODEPTR_ARG);
997 kmp_int32 schedule, kmp_int32 *plastiter,
998 kmp_uint32 *plower, kmp_uint32 *pupper,
999 kmp_uint32 *pupperD, kmp_int32 *pstride,
1000 kmp_int32 incr, kmp_int32 chunk) {
1001 __kmp_dist_for_static_init<kmp_uint32>(loc, gtid, schedule, plastiter, plower,
1002 pupper, pupperD, pstride, incr,
1003 chunk OMPT_CODEPTR_ARG);
1010 kmp_int32 schedule, kmp_int32 *plastiter,
1011 kmp_int64 *plower, kmp_int64 *pupper,
1012 kmp_int64 *pupperD, kmp_int64 *pstride,
1013 kmp_int64 incr, kmp_int64 chunk) {
1014 __kmp_dist_for_static_init<kmp_int64>(loc, gtid, schedule, plastiter, plower,
1015 pupper, pupperD, pstride, incr,
1016 chunk OMPT_CODEPTR_ARG);
1023 kmp_int32 schedule, kmp_int32 *plastiter,
1024 kmp_uint64 *plower, kmp_uint64 *pupper,
1025 kmp_uint64 *pupperD, kmp_int64 *pstride,
1026 kmp_int64 incr, kmp_int64 chunk) {
1027 __kmp_dist_for_static_init<kmp_uint64>(loc, gtid, schedule, plastiter, plower,
1028 pupper, pupperD, pstride, incr,
1029 chunk OMPT_CODEPTR_ARG);
1062 kmp_int32 *p_lb, kmp_int32 *p_ub,
1063 kmp_int32 *p_st, kmp_int32 incr,
1065 KMP_DEBUG_ASSERT(__kmp_init_serial);
1066 __kmp_team_static_init<kmp_int32>(loc, gtid, p_last, p_lb, p_ub, p_st, incr,
1074 kmp_uint32 *p_lb, kmp_uint32 *p_ub,
1075 kmp_int32 *p_st, kmp_int32 incr,
1077 KMP_DEBUG_ASSERT(__kmp_init_serial);
1078 __kmp_team_static_init<kmp_uint32>(loc, gtid, p_last, p_lb, p_ub, p_st, incr,
1086 kmp_int64 *p_lb, kmp_int64 *p_ub,
1087 kmp_int64 *p_st, kmp_int64 incr,
1089 KMP_DEBUG_ASSERT(__kmp_init_serial);
1090 __kmp_team_static_init<kmp_int64>(loc, gtid, p_last, p_lb, p_ub, p_st, incr,
1098 kmp_uint64 *p_lb, kmp_uint64 *p_ub,
1099 kmp_int64 *p_st, kmp_int64 incr,
1101 KMP_DEBUG_ASSERT(__kmp_init_serial);
1102 __kmp_team_static_init<kmp_uint64>(loc, gtid, p_last, p_lb, p_ub, p_st, incr,
@ KMP_IDENT_WORK_SECTIONS
@ KMP_IDENT_WORK_DISTRIBUTE
#define KMP_COUNT_VALUE(name, value)
Adds value to specified timer (name).
#define KMP_COUNT_BLOCK(name)
Increments specified counter (name).
void __kmpc_for_static_init_8(ident_t *loc, kmp_int32 gtid, kmp_int32 schedtype, kmp_int32 *plastiter, kmp_int64 *plower, kmp_int64 *pupper, kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk)
void __kmpc_dist_for_static_init_8(ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter, kmp_int64 *plower, kmp_int64 *pupper, kmp_int64 *pupperD, kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk)
void __kmpc_for_static_init_4u(ident_t *loc, kmp_int32 gtid, kmp_int32 schedtype, kmp_int32 *plastiter, kmp_uint32 *plower, kmp_uint32 *pupper, kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_team_static_init_8(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_int64 *p_lb, kmp_int64 *p_ub, kmp_int64 *p_st, kmp_int64 incr, kmp_int64 chunk)
void __kmpc_dist_for_static_init_4u(ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter, kmp_uint32 *plower, kmp_uint32 *pupper, kmp_uint32 *pupperD, kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_team_static_init_8u(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_uint64 *p_lb, kmp_uint64 *p_ub, kmp_int64 *p_st, kmp_int64 incr, kmp_int64 chunk)
void __kmpc_for_static_init_8u(ident_t *loc, kmp_int32 gtid, kmp_int32 schedtype, kmp_int32 *plastiter, kmp_uint64 *plower, kmp_uint64 *pupper, kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk)
void __kmpc_team_static_init_4u(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_uint32 *p_lb, kmp_uint32 *p_ub, kmp_int32 *p_st, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_dist_for_static_init_4(ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter, kmp_int32 *plower, kmp_int32 *pupper, kmp_int32 *pupperD, kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_for_static_init_4(ident_t *loc, kmp_int32 gtid, kmp_int32 schedtype, kmp_int32 *plastiter, kmp_int32 *plower, kmp_int32 *pupper, kmp_int32 *pstride, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_team_static_init_4(ident_t *loc, kmp_int32 gtid, kmp_int32 *p_last, kmp_int32 *p_lb, kmp_int32 *p_ub, kmp_int32 *p_st, kmp_int32 incr, kmp_int32 chunk)
void __kmpc_dist_for_static_init_8u(ident_t *loc, kmp_int32 gtid, kmp_int32 schedule, kmp_int32 *plastiter, kmp_uint64 *plower, kmp_uint64 *pupper, kmp_uint64 *pupperD, kmp_int64 *pstride, kmp_int64 incr, kmp_int64 chunk)