Intel(R) Threading Building Blocks Doxygen Documentation
version 4.2.3
|
A scheduler with a customized evaluation loop.
More...
#include <custom_scheduler.h>
|
| custom_scheduler (market &m, bool genuine) |
|
void | local_wait_for_all (task &parent, task *child) __TBB_override |
| Scheduler loop that dispatches tasks. More...
|
|
void | wait_for_all (task &parent, task *child) __TBB_override |
| Entry point from client code to the scheduler loop that dispatches tasks. More...
|
|
void | tally_completion_of_predecessor (task &s, __TBB_ISOLATION_ARG(task *&bypass_slot, isolation_tag isolation)) |
| Decrements ref_count of a predecessor. More...
|
|
bool | process_bypass_loop (context_guard_helper< SchedulerTraits::itt_possible > &context_guard, __TBB_ISOLATION_ARG(task *t, isolation_tag isolation)) |
| Implements the bypass loop of the dispatch loop (local_wait_for_all). More...
|
|
bool | is_task_pool_published () const |
|
bool | is_local_task_pool_quiescent () const |
|
bool | is_quiescent_local_task_pool_empty () const |
|
bool | is_quiescent_local_task_pool_reset () const |
|
void | attach_mailbox (affinity_id id) |
|
void | init_stack_info () |
| Sets up the data necessary for the stealing limiting heuristics. More...
|
|
bool | can_steal () |
| Returns true if stealing is allowed. More...
|
|
void | publish_task_pool () |
| Used by workers to enter the task pool. More...
|
|
void | leave_task_pool () |
| Leave the task pool. More...
|
|
void | reset_task_pool_and_leave () |
| Resets head and tail indices to 0, and leaves task pool. More...
|
|
task ** | lock_task_pool (arena_slot *victim_arena_slot) const |
| Locks victim's task pool, and returns pointer to it. The pointer can be NULL. More...
|
|
void | unlock_task_pool (arena_slot *victim_arena_slot, task **victim_task_pool) const |
| Unlocks victim's task pool. More...
|
|
void | acquire_task_pool () const |
| Locks the local task pool. More...
|
|
void | release_task_pool () const |
| Unlocks the local task pool. More...
|
|
task * | prepare_for_spawning (task *t) |
| Checks if t is affinitized to another thread, and if so, bundles it as proxy. More...
|
|
void | commit_spawned_tasks (size_t new_tail) |
| Makes newly spawned tasks visible to thieves. More...
|
|
void | commit_relocated_tasks (size_t new_tail) |
| Makes relocated tasks visible to thieves and releases the local task pool. More...
|
|
task * | get_task (__TBB_ISOLATION_EXPR(isolation_tag isolation)) |
| Get a task from the local pool. More...
|
|
task * | get_task (size_t T) |
| Get a task from the local pool at specified location T. More...
|
|
task * | get_mailbox_task (__TBB_ISOLATION_EXPR(isolation_tag isolation)) |
| Attempt to get a task from the mailbox. More...
|
|
task * | steal_task (__TBB_ISOLATION_EXPR(isolation_tag isolation)) |
| Attempts to steal a task from a randomly chosen thread/scheduler. More...
|
|
task * | steal_task_from (__TBB_ISOLATION_ARG(arena_slot &victim_arena_slot, isolation_tag isolation)) |
| Steal task from another scheduler's ready pool. More...
|
|
size_t | prepare_task_pool (size_t n) |
| Makes sure that the task pool can accommodate at least n more elements. More...
|
|
bool | cleanup_master (bool blocking_terminate) |
| Perform necessary cleanup when a master thread stops using TBB. More...
|
|
void | assert_task_pool_valid () const |
|
void | attach_arena (arena *, size_t index, bool is_master) |
|
void | nested_arena_entry (arena *, size_t) |
|
void | nested_arena_exit () |
|
void | wait_until_empty () |
|
void | spawn (task &first, task *&next) __TBB_override |
| For internal use only. More...
|
|
void | spawn_root_and_wait (task &first, task *&next) __TBB_override |
| For internal use only. More...
|
|
void | enqueue (task &, void *reserved) __TBB_override |
| For internal use only. More...
|
|
void | local_spawn (task *first, task *&next) |
|
void | local_spawn_root_and_wait (task *first, task *&next) |
|
void | destroy () |
| Destroy and deallocate this scheduler object. More...
|
|
void | cleanup_scheduler () |
| Cleans up this scheduler (the scheduler might be destroyed). More...
|
|
task & | allocate_task (size_t number_of_bytes, __TBB_CONTEXT_ARG(task *parent, task_group_context *context)) |
| Allocate task object, either from the heap or a free list. More...
|
|
template<free_task_hint h> |
void | free_task (task &t) |
| Put task on free list. More...
|
|
void | deallocate_task (task &t) |
| Return task object to the memory allocator. More...
|
|
bool | is_worker () const |
| True if running on a worker thread, false otherwise. More...
|
|
bool | outermost_level () const |
| True if the scheduler is on the outermost dispatch level. More...
|
|
bool | master_outermost_level () const |
| True if the scheduler is on the outermost dispatch level in a master thread. More...
|
|
bool | worker_outermost_level () const |
| True if the scheduler is on the outermost dispatch level in a worker thread. More...
|
|
unsigned | max_threads_in_arena () |
| Returns the concurrency limit of the current arena. More...
|
|
void | free_nonlocal_small_task (task &t) |
| Free a small task t that that was allocated by a different scheduler. More...
|
|
| generic_scheduler (market &, bool) |
|
virtual | ~scheduler ()=0 |
| Pure virtual destructor;. More...
|
|
template<typename SchedulerTraits>
class tbb::internal::custom_scheduler< SchedulerTraits >
A scheduler with a customized evaluation loop.
The customization can use SchedulerTraits to make decisions without needing a run-time check.
Definition at line 52 of file custom_scheduler.h.
◆ scheduler_type
template<typename SchedulerTraits >
◆ custom_scheduler()
template<typename SchedulerTraits >
◆ allocate_scheduler()
template<typename SchedulerTraits >
◆ local_wait_for_all()
template<typename SchedulerTraits >
Scheduler loop that dispatches tasks.
If child is non-NULL, it is dispatched first. Then, until "parent" has a reference count of 1, other task are dispatched or stolen.
Implements tbb::internal::generic_scheduler.
Definition at line 552 of file custom_scheduler.h.
556 #if __TBB_TASK_GROUP_CONTEXT
562 if( SchedulerTraits::itt_possible )
567 context_guard_helper<SchedulerTraits::itt_possible> context_guard;
576 #if __TBB_PREVIEW_CRITICAL_TASKS
579 #if __TBB_TASK_PRIORITY
581 volatile intptr_t *old_ref_top_priority = my_ref_top_priority;
584 volatile uintptr_t *old_ref_reload_epoch = my_ref_reload_epoch;
589 my_ref_top_priority = &
parent.prefix().context->my_priority;
590 my_ref_reload_epoch = &
my_arena->my_reload_epoch;
591 if (my_ref_reload_epoch != old_ref_reload_epoch)
592 my_local_reload_epoch = *my_ref_reload_epoch - 1;
595 #if __TBB_TASK_ISOLATION
600 t->prefix().isolation = isolation;
603 #if __TBB_PREVIEW_RESUMABLE_TASKS
606 tbb::atomic<bool> recall_flag;
612 my_current_is_recalled = &recall_flag;
615 task* old_wait_task = my_wait_task;
618 #if TBB_USE_EXCEPTIONS
632 #if __TBB_PREVIEW_RESUMABLE_TASKS
636 my_wait_task = old_wait_task;
642 if (
parent.prefix().ref_count == 1 ) {
648 #if __TBB_PREVIEW_RESUMABLE_TASKS
650 if ( &recall_flag !=
my_arena_slot->my_scheduler_is_recalled ) {
653 if ( !resume_original_scheduler() ) {
656 "Only a coroutine on outermost level can be left." );
660 my_wait_task = old_wait_task;
675 #if __TBB_HOARD_NONLOCAL_TASKS
677 for (; my_nonlocal_free_list; my_nonlocal_free_list = t ) {
678 t = my_nonlocal_free_list->prefix().next;
687 #if __TBB_TASK_PRIORITY
688 my_ref_top_priority = old_ref_top_priority;
689 if(my_ref_reload_epoch != old_ref_reload_epoch)
690 my_local_reload_epoch = *old_ref_reload_epoch-1;
691 my_ref_reload_epoch = old_ref_reload_epoch;
693 #if __TBB_PREVIEW_RESUMABLE_TASKS
694 if (&recall_flag !=
my_arena_slot->my_scheduler_is_recalled) {
697 tbb::task::suspend(recall_functor(&recall_flag));
703 __TBB_ASSERT(!(my_wait_task->prefix().ref_count & internal::abandon_flag), NULL);
704 my_wait_task = old_wait_task;
710 #if __TBB_PREVIEW_RESUMABLE_TASKS
714 if ( &recall_flag ==
my_arena_slot->my_scheduler_is_recalled || old_wait_task != NULL )
718 tbb::task::suspend( recall_functor(&recall_flag) );
727 #if TBB_USE_EXCEPTIONS
734 #if __TBB_RECYCLE_TO_ENQUEUE
736 || t->state() == task::to_enqueue
741 if( SchedulerTraits::itt_possible )
744 if( SchedulerTraits::itt_possible )
745 ITT_NOTIFY(sync_acquired, &t->prefix().ref_count);
754 #if __TBB_PREVIEW_RESUMABLE_TASKS
756 my_wait_task = old_wait_task;
757 if (my_wait_task == NULL) {
759 if (&recall_flag !=
my_arena_slot->my_scheduler_is_recalled) {
763 tbb::task::suspend(recall_functor(&recall_flag));
769 my_current_is_recalled = NULL;
775 #if __TBB_TASK_PRIORITY
776 my_ref_top_priority = old_ref_top_priority;
777 if(my_ref_reload_epoch != old_ref_reload_epoch)
778 my_local_reload_epoch = *old_ref_reload_epoch-1;
779 my_ref_reload_epoch = old_ref_reload_epoch;
782 if (
parent.prefix().ref_count != 1) {
785 "Worker thread exits nested dispatch loop prematurely" );
788 parent.prefix().ref_count = 0;
793 #if __TBB_TASK_GROUP_CONTEXT
795 task_group_context* parent_ctx =
parent.prefix().context;
796 if ( parent_ctx->my_cancellation_requested ) {
801 parent_ctx->my_cancellation_requested = 0;
809 context_guard.restore_default();
810 TbbRethrowException( pe );
814 "Worker's dummy task context modified");
816 "Unexpected exception or cancellation data in the master's dummy task");
References __TBB_ASSERT, __TBB_control_consistency_helper, __TBB_FetchAndDecrementWrelease, __TBB_ISOLATION_ARG, __TBB_ISOLATION_EXPR, tbb::task::allocated, tbb::internal::ConcurrentWaitsEnabled(), tbb::internal::es_ref_count_active, tbb::internal::is_critical(), tbb::internal::governor::is_set(), tbb::internal::task_prefix::isolation, ITT_NOTIFY, ITT_SYNC_CREATE, tbb::task_group_context::may_have_children, tbb::internal::task_prefix::next, tbb::internal::no_isolation, tbb::internal::num_priority_levels, parent, tbb::task::parent(), tbb::task::prefix(), tbb::task::recycle, tbb::internal::task_prefix::state, and sync_releasing.
◆ process_bypass_loop()
template<typename SchedulerTraits >
Implements the bypass loop of the dispatch loop (local_wait_for_all).
Definition at line 384 of file custom_scheduler.h.
392 #if __TBB_TASK_ISOLATION
394 "A task from another isolated region is going to be executed" );
397 #if __TBB_TASK_GROUP_CONTEXT && TBB_USE_ASSERT
398 assert_context_valid(t->prefix().context);
399 if ( !t->prefix().context->my_cancellation_requested )
404 #if __TBB_PREVIEW_CRITICAL_TASKS
410 "Received task must be critical one" );
418 #if __TBB_TASK_PRIORITY
419 intptr_t
p = priority(*t);
420 if (
p != *my_ref_top_priority
421 && !t->is_enqueued_task() ) {
422 assert_priority_valid(
p);
426 if (
p < effective_reference_priority() ) {
427 if ( !my_offloaded_tasks ) {
428 my_offloaded_task_list_tail_link = &t->prefix().next_offloaded;
431 *my_offloaded_task_list_tail_link = NULL;
433 offload_task( *t,
p );
448 #if __TBB_PREVIEW_CRITICAL_TASKS
456 #if __TBB_TASK_GROUP_CONTEXT
457 context_guard.set_ctx( t->prefix().context );
458 if ( !t->prefix().context->my_cancellation_requested )
464 #if __TBB_TASK_PRIORITY
468 ITT_STACK(SchedulerTraits::itt_possible, callee_enter, t->prefix().context->itt_caller);
469 t_next = t->execute();
470 ITT_STACK(SchedulerTraits::itt_possible, callee_leave, t->prefix().context->itt_caller);
474 "if task::execute() returns task, it must be marked as allocated" );
478 affinity_id next_affinity=t_next->prefix().affinity;
485 switch( t->state() ) {
487 task*
s = t->parent();
489 __TBB_ASSERT( t->prefix().ref_count==0,
"Task still has children after it has been executed" );
493 free_task<no_hint>( *t );
501 #if __TBB_RECYCLE_TO_ENQUEUE
503 case task::to_enqueue:
505 __TBB_ASSERT( t_next != t,
"a task returned from method execute() can not be recycled in another way" );
513 __TBB_ASSERT( t_next,
"reexecution requires that method execute() return another task" );
514 __TBB_ASSERT( t_next != t,
"a task returned from method execute() can not be recycled in another way" );
523 #if __TBB_PREVIEW_RESUMABLE_TASKS
524 case task::to_resume:
526 __TBB_ASSERT(t->prefix().ref_count == 0,
"Task still has children after it has been executed");
528 free_task<no_hint>(*t);
530 "Only a coroutine on outermost level can be left.");
536 __TBB_ASSERT(
false,
"task is in READY state upon return from method execute()" );
References __TBB_ASSERT, __TBB_ASSERT_EX, __TBB_fallthrough, __TBB_ISOLATION_ARG, __TBB_ISOLATION_EXPR, tbb::task::allocated, tbb::internal::assert_task_valid(), tbb::internal::task_prefix::context, tbb::task::execute(), tbb::task::executing, GATHER_STATISTIC, tbb::internal::is_critical(), tbb::task::is_enqueued_task(), tbb::internal::task_prefix::isolation, tbb::task_group_context::itt_caller, ITT_NOTIFY, ITT_STACK, tbb::task_group_context::my_cancellation_requested, tbb::internal::task_prefix::next, tbb::internal::task_prefix::next_offloaded, tbb::internal::no_isolation, tbb::internal::task_prefix::owner, p, tbb::task::parent(), tbb::internal::poison_pointer(), tbb::task::prefix(), tbb::task::ready, tbb::task::recycle, tbb::task::reexecute, tbb::internal::task_prefix::ref_count, tbb::internal::reset_extra_state(), s, tbb::internal::context_guard_helper< T >::set_ctx(), tbb::internal::task_prefix::state, tbb::task::state(), tbb::internal::arena::wakeup, and tbb::task::~task().
◆ receive_or_steal_task()
template<typename SchedulerTraits >
Try getting a task from the mailbox or stealing from another scheduler.
Returns the stolen task or NULL if all attempts fail.
Implements tbb::internal::generic_scheduler.
Definition at line 156 of file custom_scheduler.h.
161 bool outermost_current_worker_level = outermost_worker_level;
162 #if __TBB_PREVIEW_RESUMABLE_TASKS
166 #if __TBB_HOARD_NONLOCAL_TASKS
169 #if __TBB_TASK_PRIORITY
170 if ( outermost_dispatch_level ) {
171 if ( intptr_t skipped_priority =
my_arena->my_skipped_fifo_priority ) {
175 if (
my_arena->my_skipped_fifo_priority.compare_and_swap(0, skipped_priority) == skipped_priority
176 && skipped_priority >
my_arena->my_top_priority )
189 for(
int failure_count = -static_cast<int>(SchedulerTraits::itt_possible);; ++failure_count) {
192 if( completion_ref_count == 1 ) {
193 if( SchedulerTraits::itt_possible ) {
194 if( failure_count!=-1 ) {
195 ITT_NOTIFY(sync_prepare, &completion_ref_count);
199 ITT_NOTIFY(sync_acquired, &completion_ref_count);
209 if ( outermost_current_worker_level ) {
211 if ( SchedulerTraits::itt_possible && failure_count != -1 )
216 #if __TBB_PREVIEW_RESUMABLE_TASKS
224 #if __TBB_TASK_PRIORITY
227 static const int p = 0;
233 #if __TBB_TASK_ISOLATION
252 #if __TBB_PREVIEW_CRITICAL_TASKS && __TBB_CPF_BUILD
261 #if __TBB_TASK_PRIORITY
271 #if __TBB_PREVIEW_CRITICAL_TASKS
277 #endif // __TBB_PREVIEW_CRITICAL_TASKS
282 #if __TBB_ARENA_OBSERVER
283 my_arena->my_observers.notify_entry_observers( my_last_local_observer,
is_worker() );
285 #if __TBB_SCHEDULER_OBSERVER
286 the_global_observer_list.notify_entry_observers( my_last_global_observer,
is_worker() );
288 if ( SchedulerTraits::itt_possible && failure_count != -1 ) {
296 if( SchedulerTraits::itt_possible && failure_count==-1 ) {
306 const int failure_threshold = 2*
int(n+1);
307 if( failure_count>=failure_threshold ) {
311 failure_count = failure_threshold;
314 #if __TBB_TASK_PRIORITY
316 if (
my_arena->my_orphaned_tasks ) {
319 task* orphans = (
task*)__TBB_FetchAndStoreW( &
my_arena->my_orphaned_tasks, 0 );
323 my_local_reload_epoch--;
324 t = reload_tasks( orphans, link,
__TBB_ISOLATION_ARG( effective_reference_priority(), isolation ) );
326 *link = my_offloaded_tasks;
327 if ( !my_offloaded_tasks )
328 my_offloaded_task_list_tail_link = link;
329 my_offloaded_tasks = orphans;
331 __TBB_ASSERT( !my_offloaded_tasks == !my_offloaded_task_list_tail_link, NULL );
333 if( SchedulerTraits::itt_possible )
341 const int yield_threshold = 100;
342 if( yield_count++ >= yield_threshold ) {
345 #if __TBB_TASK_PRIORITY
346 if( outermost_current_worker_level ||
my_arena->my_top_priority >
my_arena->my_bottom_priority ) {
351 if( SchedulerTraits::itt_possible )
355 #if __TBB_TASK_PRIORITY
357 if ( my_offloaded_tasks ) {
360 my_local_reload_epoch--;
365 if ( !outermost_worker_level && *my_ref_top_priority >
my_arena->my_top_priority ) {
367 my_ref_top_priority = &
my_arena->my_top_priority;
References __TBB_ASSERT, __TBB_control_consistency_helper, __TBB_ISOLATION_ARG, __TBB_ISOLATION_EXPR, __TBB_Yield, GATHER_STATISTIC, int, tbb::internal::is_critical(), ITT_NOTIFY, tbb::internal::no_isolation, p, tbb::internal::prolonged_pause(), and sync_cancel.
◆ tally_completion_of_predecessor()
template<typename SchedulerTraits >
Decrements ref_count of a predecessor.
If it achieves 0, the predecessor is scheduled for execution. When changing, remember that this is a hot path function.
Definition at line 72 of file custom_scheduler.h.
73 task_prefix&
p =
s.prefix();
75 if( SchedulerTraits::itt_possible )
77 if( SchedulerTraits::has_slow_atomic &&
p.ref_count==1 )
81 #if __TBB_PREVIEW_RESUMABLE_TASKS
82 if (old_ref_count == internal::abandon_flag + 2) {
86 tbb::task::resume(
p.abandoned_scheduler);
90 if (old_ref_count > 1) {
99 __TBB_ASSERT(
p.ref_count==0,
"completion of task caused predecessor's reference count to underflow");
100 if( SchedulerTraits::itt_possible )
105 #if __TBB_TASK_ISOLATION
109 p.isolation = isolation;
113 #if __TBB_RECYCLE_TO_ENQUEUE
114 if (
p.state==task::to_enqueue) {
120 if( bypass_slot==NULL )
122 #if __TBB_PREVIEW_CRITICAL_TASKS
124 local_spawn( bypass_slot, bypass_slot->prefix().next );
References __TBB_ASSERT, __TBB_control_consistency_helper, __TBB_FetchAndDecrementWrelease, tbb::internal::arena::enqueue_task(), tbb::internal::es_ref_count_active, tbb::internal::is_critical(), ITT_NOTIFY, tbb::internal::generic_scheduler::local_spawn(), tbb::internal::scheduler_state::my_arena, tbb::internal::generic_scheduler::my_random, tbb::internal::task_prefix::next, tbb::internal::no_isolation, p, tbb::task::prefix(), s, and sync_releasing.
◆ wait_for_all()
template<typename SchedulerTraits >
The documentation for this class was generated from the following file:
bool can_steal()
Returns true if stealing is allowed.
internal::task_prefix & prefix(internal::version_tag *=NULL) const
Get reference to corresponding task_prefix.
task to be recycled as continuation
void local_spawn(task *first, task *&next)
#define __TBB_ISOLATION_EXPR(isolation)
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
bool empty()
Return true if mailbox is empty.
task_stream< num_priority_levels > my_task_stream
Task pool for the tasks scheduled via task::enqueue() method.
#define ITT_NOTIFY(name, obj)
affinity_id my_affinity_id
The mailbox id assigned to this scheduler.
void reset_extra_state(task *t)
task * my_dummy_task
Fake root task created by slave threads.
internal::tbb_exception_ptr exception_container_type
void poison_pointer(T *__TBB_atomic &)
bool is_task_pool_published() const
arena * my_arena
The arena that I own (if master) or am servicing at the moment (if worker)
bool is_idle_state(bool value) const
Indicate whether thread that reads this mailbox is idle.
bool empty(int level)
Checks existence of a task.
generic_scheduler(market &, bool)
bool is_out_of_work()
Check if there is job anywhere in arena.
void advertise_new_work()
If necessary, raise a flag that there is new job in arena.
void tally_completion_of_predecessor(task &s, __TBB_ISOLATION_ARG(task *&bypass_slot, isolation_tag isolation))
Decrements ref_count of a predecessor.
task * receive_or_steal_task(__TBB_ISOLATION_ARG(__TBB_atomic reference_count &completion_ref_count, isolation_tag isolation)) __TBB_override
Try getting a task from the mailbox or stealing from another scheduler.
task is running, and will be destroyed after method execute() completes.
size_t my_arena_index
Index of the arena slot the scheduler occupies now, or occupied last time.
void assert_task_pool_valid() const
generic_scheduler * my_scheduler
Scheduler of the thread attached to the slot.
task * get_task(__TBB_ISOLATION_EXPR(isolation_tag isolation))
Get a task from the local pool.
bool is_critical(task &t)
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p sync_cancel
bool ConcurrentWaitsEnabled(task &t)
bool process_bypass_loop(context_guard_helper< SchedulerTraits::itt_possible > &context_guard, __TBB_ISOLATION_ARG(task *t, isolation_tag isolation))
Implements the bypass loop of the dispatch loop (local_wait_for_all).
Set if ref_count might be changed by another thread. Used for debugging.
#define __TBB_FetchAndDecrementWrelease(P)
void enqueue_task(task &, intptr_t, FastRandom &)
enqueue a task into starvation-resistance queue
#define GATHER_STATISTIC(x)
#define ITT_SYNC_CREATE(obj, type, name)
unsigned short affinity_id
An id as used for specifying affinity.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle __itt_metadata_type size_t void ITT_FORMAT p const __itt_domain __itt_id __itt_string_handle const wchar_t size_t ITT_FORMAT lu const __itt_domain __itt_id __itt_relation __itt_id ITT_FORMAT p const wchar_t int ITT_FORMAT __itt_group_mark d int
bool master_outermost_level() const
True if the scheduler is on the outermost dispatch level in a master thread.
#define __TBB_fallthrough
const isolation_tag no_isolation
#define __TBB_ISOLATION_ARG(arg1, isolation)
bool is_worker() const
True if running on a worker thread, false otherwise.
void set_is_idle(bool value)
Indicate whether thread that reads this mailbox is idle.
#define __TBB_control_consistency_helper()
static bool is_proxy(const task &t)
True if t is a task_proxy.
unsigned my_num_workers_allotted
The number of workers that have been marked out by the resource manager to service the arena.
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id parent
intptr_t isolation_tag
A tag for task isolation.
task * my_innermost_running_task
Innermost task whose task::execute() is running. A dummy task on the outermost level.
arena_slot * my_arena_slot
Pointer to the slot in the arena we own at the moment.
atomic< unsigned > my_limit
The maximal number of currently busy slots.
void const char const char int ITT_FORMAT __itt_group_sync s
bool outermost
Indicates that a scheduler is on outermost level.
task object is freshly allocated or recycled.
static generic_scheduler * local_scheduler()
Obtain the thread-local instance of the TBB scheduler.
scheduler * owner
Obsolete. The scheduler that owns the task.
#define ITT_STACK(precond, name, obj)
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task * task
static bool is_set(generic_scheduler *s)
Used to check validity of the local scheduler TLS contents.
bool worker_outermost_level() const
True if the scheduler is on the outermost dispatch level in a worker thread.
scheduler_properties my_properties
task is in ready pool, or is going to be put there, or was just taken off.
static const intptr_t num_priority_levels
task * steal_task(__TBB_ISOLATION_EXPR(isolation_tag isolation))
Attempts to steal a task from a randomly chosen thread/scheduler.
unsigned char state
A task::state_type, stored as a byte for compactness.
void free_nonlocal_small_task(task &t)
Free a small task t that that was allocated by a different scheduler.
isolation_tag isolation
The tag used for task isolation.
intptr_t reference_count
A reference count.
custom_scheduler< SchedulerTraits > scheduler_type
void assert_task_valid(const task *)
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p sync_releasing
market * my_market
The market I am in.
void const char const char int ITT_FORMAT __itt_group_sync p
bool is_quiescent_local_task_pool_reset() const
task_group_context * context
Shared context that is used to communicate asynchronous state changes.
FastRandom my_random
Random number generator used for picking a random victim from which to steal.
task * pop(int level, unsigned &last_used_lane)
Try finding and popping a task.
void *__TBB_EXPORTED_FUNC NFS_Allocate(size_t n_element, size_t element_size, void *hint)
Allocate memory on cache/sector line boundary.
unsigned num_workers_active() const
The number of workers active in the arena.
task * get_mailbox_task(__TBB_ISOLATION_EXPR(isolation_tag isolation))
Attempt to get a task from the mailbox.
#define __TBB_ASSERT_EX(predicate, comment)
"Extended" version is useful to suppress warnings if a variable is only used with an assert
unsigned hint_for_pop
Hint provided for operations with the container of starvation-resistant tasks.
bool outermost_level() const
True if the scheduler is on the outermost dispatch level.
Copyright © 2005-2020 Intel Corporation. All Rights Reserved.
Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are
registered trademarks or trademarks of Intel Corporation or its
subsidiaries in the United States and other countries.
* Other names and brands may be claimed as the property of others.