Intel(R) Threading Building Blocks Doxygen Documentation version 4.2.3
task_group_context.cpp
Go to the documentation of this file.
1/*
2 Copyright (c) 2005-2020 Intel Corporation
3
4 Licensed under the Apache License, Version 2.0 (the "License");
5 you may not use this file except in compliance with the License.
6 You may obtain a copy of the License at
7
8 http://www.apache.org/licenses/LICENSE-2.0
9
10 Unless required by applicable law or agreed to in writing, software
11 distributed under the License is distributed on an "AS IS" BASIS,
12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 See the License for the specific language governing permissions and
14 limitations under the License.
15*/
16
17#include "scheduler.h"
18
19#include "itt_notify.h"
20
21namespace tbb {
22
23#if __TBB_TASK_GROUP_CONTEXT
24
25using namespace internal;
26
27//------------------------------------------------------------------------
28// captured_exception
29//------------------------------------------------------------------------
30
31inline char* duplicate_string ( const char* src ) {
32 char* dst = NULL;
33 if ( src ) {
34 size_t len = strlen(src) + 1;
35 dst = (char*)allocate_via_handler_v3(len);
36 strncpy (dst, src, len);
37 }
38 return dst;
39}
40
42 clear();
43}
44
45void captured_exception::set ( const char* a_name, const char* info ) throw() {
46 my_exception_name = duplicate_string( a_name );
47 my_exception_info = duplicate_string( info );
48}
49
50void captured_exception::clear () throw() {
53}
54
55captured_exception* captured_exception::move () throw() {
57 if ( e ) {
58 ::new (e) captured_exception();
59 e->my_exception_name = my_exception_name;
60 e->my_exception_info = my_exception_info;
61 e->my_dynamic = true;
63 }
64 return e;
65}
66
67void captured_exception::destroy () throw() {
68 __TBB_ASSERT ( my_dynamic, "Method destroy can be used only on objects created by clone or allocate" );
69 if ( my_dynamic ) {
72 }
73}
74
75captured_exception* captured_exception::allocate ( const char* a_name, const char* info ) {
77 if ( e ) {
78 ::new (e) captured_exception(a_name, info);
79 e->my_dynamic = true;
80 }
81 return e;
82}
83
84const char* captured_exception::name() const throw() {
85 return my_exception_name;
86}
87
88const char* captured_exception::what() const throw() {
89 return my_exception_info;
90}
91
92
93//------------------------------------------------------------------------
94// tbb_exception_ptr
95//------------------------------------------------------------------------
96
97#if !TBB_USE_CAPTURED_EXCEPTION
98
99namespace internal {
100
101template<typename T>
102tbb_exception_ptr* AllocateExceptionContainer( const T& src ) {
103 tbb_exception_ptr *eptr = (tbb_exception_ptr*)allocate_via_handler_v3( sizeof(tbb_exception_ptr) );
104 if ( eptr )
105 new (eptr) tbb_exception_ptr(src);
106 return eptr;
107}
108
109tbb_exception_ptr* tbb_exception_ptr::allocate () {
110 return AllocateExceptionContainer( std::current_exception() );
111}
112
113tbb_exception_ptr* tbb_exception_ptr::allocate ( const tbb_exception& ) {
114 return AllocateExceptionContainer( std::current_exception() );
115}
116
117tbb_exception_ptr* tbb_exception_ptr::allocate ( captured_exception& src ) {
118 tbb_exception_ptr *res = AllocateExceptionContainer( src );
119 src.destroy();
120 return res;
121}
122
123void tbb_exception_ptr::destroy () throw() {
124 this->tbb_exception_ptr::~tbb_exception_ptr();
126}
127
128} // namespace internal
129#endif /* !TBB_USE_CAPTURED_EXCEPTION */
130
131
132//------------------------------------------------------------------------
133// task_group_context
134//------------------------------------------------------------------------
135
138 if ( governor::is_set(my_owner) ) {
139 // Local update of the context list
140 uintptr_t local_count_snapshot = my_owner->my_context_state_propagation_epoch;
141 my_owner->my_local_ctx_list_update.store<relaxed>(1);
142 // Prevent load of nonlocal update flag from being hoisted before the
143 // store to local update flag.
144 atomic_fence();
145 if ( my_owner->my_nonlocal_ctx_list_update.load<relaxed>() ) {
146 spin_mutex::scoped_lock lock(my_owner->my_context_list_mutex);
149 my_owner->my_local_ctx_list_update.store<relaxed>(0);
150 }
151 else {
154 // Release fence is necessary so that update of our neighbors in
155 // the context list was committed when possible concurrent destroyer
156 // proceeds after local update flag is reset by the following store.
157 my_owner->my_local_ctx_list_update.store<release>(0);
158 if ( local_count_snapshot != the_context_state_propagation_epoch ) {
159 // Another thread was propagating cancellation request when we removed
160 // ourselves from the list. We must ensure that it is not accessing us
161 // when this destructor finishes. We'll be able to acquire the lock
162 // below only after the other thread finishes with us.
163 spin_mutex::scoped_lock lock(my_owner->my_context_list_mutex);
164 }
165 }
166 }
167 else {
168 // Nonlocal update of the context list
169 // Synchronizes with generic_scheduler::cleanup_local_context_list()
170 // TODO: evaluate and perhaps relax, or add some lock instead
171 if ( internal::as_atomic(my_kind).fetch_and_store(dying) == detached ) {
174 }
175 else {
176 //TODO: evaluate and perhaps relax
177 my_owner->my_nonlocal_ctx_list_update.fetch_and_increment<full_fence>();
178 //TODO: evaluate and perhaps remove
179 spin_wait_until_eq( my_owner->my_local_ctx_list_update, 0u );
180 my_owner->my_context_list_mutex.lock();
183 my_owner->my_context_list_mutex.unlock();
184 //TODO: evaluate and perhaps relax
185 my_owner->my_nonlocal_ctx_list_update.fetch_and_decrement<full_fence>();
186 }
187 }
188 }
189#if __TBB_FP_CONTEXT
190 internal::punned_cast<cpu_ctl_env*>(&my_cpu_ctl_env)->~cpu_ctl_env();
191#endif
193 if ( my_exception )
195 ITT_STACK(itt_caller != ITT_CALLER_NULL, caller_destroy, itt_caller);
196}
197
199#if DO_ITT_NOTIFY
200 // Check version of task group context to avoid reporting misleading identifier.
201 if( ( my_version_and_traits & version_mask ) < 3 )
202 my_name = internal::CUSTOM_CTX;
203#endif
204 ITT_TASK_GROUP(this, my_name, NULL);
205 __TBB_STATIC_ASSERT ( sizeof(my_version_and_traits) >= 4, "Layout of my_version_and_traits must be reconsidered on this platform" );
206 __TBB_STATIC_ASSERT ( sizeof(task_group_context) == 2 * NFS_MaxLineSize, "Context class has wrong size - check padding and members alignment" );
207 __TBB_ASSERT ( (uintptr_t(this) & (sizeof(my_cancellation_requested) - 1)) == 0, "Context is improperly aligned" );
208 __TBB_ASSERT ( __TBB_load_relaxed(my_kind) == isolated || __TBB_load_relaxed(my_kind) == bound, "Context can be created only as isolated or bound" );
209 my_parent = NULL;
210 my_node.my_next = NULL;
211 my_node.my_prev = NULL;
213 my_exception = NULL;
214 my_owner = NULL;
215 my_state = 0;
217#if __TBB_TASK_PRIORITY
218 my_priority = normalized_normal_priority;
219#endif /* __TBB_TASK_PRIORITY */
220#if __TBB_FP_CONTEXT
221 __TBB_STATIC_ASSERT( sizeof(my_cpu_ctl_env) == sizeof(internal::uint64_t), "The reserved space for FPU settings are not equal sizeof(uint64_t)" );
222 __TBB_STATIC_ASSERT( sizeof(cpu_ctl_env) <= sizeof(my_cpu_ctl_env), "FPU settings storage does not fit to uint64_t" );
224
225 cpu_ctl_env &ctl = *internal::punned_cast<cpu_ctl_env*>(&my_cpu_ctl_env);
226 new ( &ctl ) cpu_ctl_env;
228 ctl.get_env();
229#endif
230}
231
232void task_group_context::register_with ( generic_scheduler *local_sched ) {
233 __TBB_ASSERT( local_sched, NULL );
234 my_owner = local_sched;
235 // state propagation logic assumes new contexts are bound to head of the list
236 my_node.my_prev = &local_sched->my_context_list_head;
237 // Notify threads that may be concurrently destroying contexts registered
238 // in this scheduler's list that local list update is underway.
239 local_sched->my_local_ctx_list_update.store<relaxed>(1);
240 // Prevent load of global propagation epoch counter from being hoisted before
241 // speculative stores above, as well as load of nonlocal update flag from
242 // being hoisted before the store to local update flag.
243 atomic_fence();
244 // Finalize local context list update
245 if ( local_sched->my_nonlocal_ctx_list_update.load<relaxed>() ) {
246 spin_mutex::scoped_lock lock(my_owner->my_context_list_mutex);
247 local_sched->my_context_list_head.my_next->my_prev = &my_node;
248 my_node.my_next = local_sched->my_context_list_head.my_next;
249 my_owner->my_local_ctx_list_update.store<relaxed>(0);
250 local_sched->my_context_list_head.my_next = &my_node;
251 }
252 else {
253 local_sched->my_context_list_head.my_next->my_prev = &my_node;
254 my_node.my_next = local_sched->my_context_list_head.my_next;
255 my_owner->my_local_ctx_list_update.store<release>(0);
256 // Thread-local list of contexts allows concurrent traversal by another thread
257 // while propagating state change. To ensure visibility of my_node's members
258 // to the concurrently traversing thread, the list's head is updated by means
259 // of store-with-release.
260 __TBB_store_with_release(local_sched->my_context_list_head.my_next, &my_node);
261 }
262}
263
264void task_group_context::bind_to ( generic_scheduler *local_sched ) {
265 __TBB_ASSERT ( __TBB_load_relaxed(my_kind) == binding_required, "Already bound or isolated?" );
266 __TBB_ASSERT ( !my_parent, "Parent is set before initial binding" );
267 my_parent = local_sched->my_innermost_running_task->prefix().context;
268#if __TBB_FP_CONTEXT
269 // Inherit FPU settings only if the context has not captured FPU settings yet.
272#endif
273
274 // Condition below prevents unnecessary thrashing parent context's cache line
276 my_parent->my_state |= may_have_children; // full fence is below
277 if ( my_parent->my_parent ) {
278 // Even if this context were made accessible for state change propagation
279 // (by placing __TBB_store_with_release(s->my_context_list_head.my_next, &my_node)
280 // above), it still could be missed if state propagation from a grand-ancestor
281 // was underway concurrently with binding.
282 // Speculative propagation from the parent together with epoch counters
283 // detecting possibility of such a race allow to avoid taking locks when
284 // there is no contention.
285
286 // Acquire fence is necessary to prevent reordering subsequent speculative
287 // loads of parent state data out of the scope where epoch counters comparison
288 // can reliably validate it.
289 uintptr_t local_count_snapshot = __TBB_load_with_acquire( my_parent->my_owner->my_context_state_propagation_epoch );
290 // Speculative propagation of parent's state. The speculation will be
291 // validated by the epoch counters check further on.
293#if __TBB_TASK_PRIORITY
295#endif /* __TBB_TASK_PRIORITY */
296 register_with( local_sched ); // Issues full fence
297
298 // If no state propagation was detected by the following condition, the above
299 // full fence guarantees that the parent had correct state during speculative
300 // propagation before the fence. Otherwise the propagation from parent is
301 // repeated under the lock.
302 if ( local_count_snapshot != the_context_state_propagation_epoch ) {
303 // Another thread may be propagating state change right now. So resort to lock.
304 context_state_propagation_mutex_type::scoped_lock lock(the_context_state_propagation_mutex);
306#if __TBB_TASK_PRIORITY
308#endif /* __TBB_TASK_PRIORITY */
309 }
310 }
311 else {
312 register_with( local_sched ); // Issues full fence
313 // As we do not have grand-ancestors, concurrent state propagation (if any)
314 // may originate only from the parent context, and thus it is safe to directly
315 // copy the state from it.
317#if __TBB_TASK_PRIORITY
319#endif /* __TBB_TASK_PRIORITY */
320 }
322}
323
324template <typename T>
325void task_group_context::propagate_task_group_state ( T task_group_context::*mptr_state, task_group_context& src, T new_state ) {
326 if (this->*mptr_state == new_state) {
327 // Nothing to do, whether descending from "src" or not, so no need to scan.
328 // Hopefully this happens often thanks to earlier invocations.
329 // This optimization is enabled by LIFO order in the context lists:
330 // - new contexts are bound to the beginning of lists;
331 // - descendants are newer than ancestors;
332 // - earlier invocations are therefore likely to "paint" long chains.
333 }
334 else if (this == &src) {
335 // This clause is disjunct from the traversal below, which skips src entirely.
336 // Note that src.*mptr_state is not necessarily still equal to new_state (another thread may have changed it again).
337 // Such interference is probably not frequent enough to aim for optimisation by writing new_state again (to make the other thread back down).
338 // Letting the other thread prevail may also be fairer.
339 }
340 else {
341 for ( task_group_context *ancestor = my_parent; ancestor != NULL; ancestor = ancestor->my_parent ) {
342 __TBB_ASSERT(internal::is_alive(ancestor->my_version_and_traits), "context tree was corrupted");
343 if ( ancestor == &src ) {
344 for ( task_group_context *ctx = this; ctx != ancestor; ctx = ctx->my_parent )
345 ctx->*mptr_state = new_state;
346 break;
347 }
348 }
349 }
350}
351
352template <typename T>
353void generic_scheduler::propagate_task_group_state ( T task_group_context::*mptr_state, task_group_context& src, T new_state ) {
354 spin_mutex::scoped_lock lock(my_context_list_mutex);
355 // Acquire fence is necessary to ensure that the subsequent node->my_next load
356 // returned the correct value in case it was just inserted in another thread.
357 // The fence also ensures visibility of the correct my_parent value.
358 context_list_node_t *node = __TBB_load_with_acquire(my_context_list_head.my_next);
359 while ( node != &my_context_list_head ) {
360 task_group_context &ctx = __TBB_get_object_ref(task_group_context, my_node, node);
361 if ( ctx.*mptr_state != new_state )
362 ctx.propagate_task_group_state( mptr_state, src, new_state );
363 node = node->my_next;
364 __TBB_ASSERT( is_alive(ctx.my_version_and_traits), "Local context list contains destroyed object" );
365 }
366 // Sync up local propagation epoch with the global one. Release fence prevents
367 // reordering of possible store to *mptr_state after the sync point.
368 __TBB_store_with_release(my_context_state_propagation_epoch, the_context_state_propagation_epoch);
369}
370
371template <typename T>
372bool market::propagate_task_group_state ( T task_group_context::*mptr_state, task_group_context& src, T new_state ) {
373 if ( !(src.my_state & task_group_context::may_have_children) )
374 return true;
375 // The whole propagation algorithm is under the lock in order to ensure correctness
376 // in case of concurrent state changes at the different levels of the context tree.
377 // See comment at the bottom of scheduler.cpp
378 context_state_propagation_mutex_type::scoped_lock lock(the_context_state_propagation_mutex);
379 if ( src.*mptr_state != new_state )
380 // Another thread has concurrently changed the state. Back down.
381 return false;
382 // Advance global state propagation epoch
383 __TBB_FetchAndAddWrelease(&the_context_state_propagation_epoch, 1);
384 // Propagate to all workers and masters and sync up their local epochs with the global one
385 unsigned num_workers = my_first_unused_worker_idx;
386 for ( unsigned i = 0; i < num_workers; ++i ) {
387 generic_scheduler *s = my_workers[i];
388 // If the worker is only about to be registered, skip it.
389 if ( s )
390 s->propagate_task_group_state( mptr_state, src, new_state );
391 }
392 // Propagate to all master threads
393 // The whole propagation sequence is locked, thus no contention is expected
394 for( scheduler_list_type::iterator it = my_masters.begin(); it != my_masters.end(); it++ )
395 it->propagate_task_group_state( mptr_state, src, new_state );
396 return true;
397}
398
400 __TBB_ASSERT ( my_cancellation_requested == 0 || my_cancellation_requested == 1, "Invalid cancellation state");
401 if ( my_cancellation_requested || as_atomic(my_cancellation_requested).compare_and_swap(1, 0) ) {
402 // This task group and any descendants have already been canceled.
403 // (A newly added descendant would inherit its parent's my_cancellation_requested,
404 // not missing out on any cancellation still being propagated, and a context cannot be uncanceled.)
405 return false;
406 }
407 governor::local_scheduler_weak()->my_market->propagate_task_group_state( &task_group_context::my_cancellation_requested, *this, (uintptr_t)1 );
408 return true;
409}
410
412 return my_cancellation_requested != 0;
413}
414
415// IMPORTANT: It is assumed that this method is not used concurrently!
418 // No fences are necessary since this context can be accessed from another thread
419 // only after stealing happened (which means necessary fences were used).
420 if ( my_exception ) {
421 my_exception->destroy();
422 my_exception = NULL;
423 }
424 my_cancellation_requested = 0;
425}
426
427#if __TBB_FP_CONTEXT
428// IMPORTANT: It is assumed that this method is not used concurrently!
431 // No fences are necessary since this context can be accessed from another thread
432 // only after stealing happened (which means necessary fences were used).
433 cpu_ctl_env &ctl = *internal::punned_cast<cpu_ctl_env*>(&my_cpu_ctl_env);
434 if ( !(my_version_and_traits & fp_settings) ) {
435 new ( &ctl ) cpu_ctl_env;
436 my_version_and_traits |= fp_settings;
437 }
438 ctl.get_env();
439}
440
441void task_group_context::copy_fp_settings( const task_group_context &src ) {
442 __TBB_ASSERT( !(my_version_and_traits & fp_settings), "The context already has FPU settings." );
443 __TBB_ASSERT( src.my_version_and_traits & fp_settings, "The source context does not have FPU settings." );
444
445 cpu_ctl_env &ctl = *internal::punned_cast<cpu_ctl_env*>(&my_cpu_ctl_env);
446 cpu_ctl_env &src_ctl = *internal::punned_cast<cpu_ctl_env*>(&src.my_cpu_ctl_env);
447 new (&ctl) cpu_ctl_env( src_ctl );
448 my_version_and_traits |= fp_settings;
449}
450#endif /* __TBB_FP_CONTEXT */
451
453 if ( my_cancellation_requested )
454 return;
455#if TBB_USE_EXCEPTIONS
456 try {
457 throw;
458 } TbbCatchAll( this );
459#endif /* TBB_USE_EXCEPTIONS */
460}
461
462#if __TBB_TASK_PRIORITY
464 __TBB_ASSERT( prio == priority_low || prio == priority_normal || prio == priority_high, "Invalid priority level value" );
465 intptr_t p = normalize_priority(prio);
466 if ( my_priority == p && !(my_state & task_group_context::may_have_children))
467 return;
468 my_priority = p;
469 internal::generic_scheduler* s = governor::local_scheduler_if_initialized();
470 if ( !s || !s->my_arena || !s->my_market->propagate_task_group_state(&task_group_context::my_priority, *this, p) )
471 return;
472
474 // need to find out the right arena for priority update.
475 // The executing status check only guarantees being inside some working arena.
476 if ( s->my_innermost_running_task->state() == task::executing )
477 // Updating arena priority here does not eliminate necessity of checking each
478 // task priority and updating arena priority if necessary before the task execution.
479 // These checks will be necessary because:
480 // a) set_priority() may be invoked before any tasks from this task group are spawned;
481 // b) all spawned tasks from this task group are retrieved from the task pools.
482 // These cases create a time window when arena priority may be lowered.
483 s->my_market->update_arena_priority( *s->my_arena, p );
484}
485
487 return static_cast<priority_t>(priority_from_normalized_rep[my_priority]);
488}
489#endif /* __TBB_TASK_PRIORITY */
490
491#endif /* __TBB_TASK_GROUP_CONTEXT */
492
493} // namespace tbb
#define __TBB_FetchAndAddWrelease(P, V)
Definition: tbb_machine.h:309
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition: tbb_stddef.h:165
#define __TBB_STATIC_ASSERT(condition, msg)
Definition: tbb_stddef.h:553
#define __TBB_get_object_ref(class_name, member_name, member_addr)
Returns address of the object containing a member with the given name and address.
Definition: tbb_stddef.h:270
#define poison_value(g)
void const char const char int ITT_FORMAT __itt_group_sync s
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void * lock
void const char const char int ITT_FORMAT __itt_group_sync p
#define ITT_TASK_GROUP(type, name, parent)
Definition: itt_notify.h:120
#define ITT_STACK(precond, name, obj)
Definition: itt_notify.h:118
#define ITT_CALLER_NULL
Definition: itt_notify.h:45
const size_t NFS_MaxLineSize
Compile-time constant that is upper bound on cache line/sector size.
Definition: tbb_stddef.h:216
The graph class.
void atomic_fence()
Sequentially consistent full memory fence.
Definition: tbb_machine.h:339
priority_t
Definition: task.h:317
@ priority_normal
Definition: task.h:318
@ priority_high
Definition: task.h:320
@ priority_low
Definition: task.h:319
@ release
Release.
Definition: atomic.h:59
@ full_fence
Sequential consistency.
Definition: atomic.h:55
@ relaxed
No ordering.
Definition: atomic.h:61
void __TBB_EXPORTED_FUNC deallocate_via_handler_v3(void *p)
Deallocates memory using FreeHandler.
T __TBB_load_with_acquire(const volatile T &location)
Definition: tbb_machine.h:709
void __TBB_store_relaxed(volatile T &location, V value)
Definition: tbb_machine.h:739
void suppress_unused_warning(const T1 &)
Utility template function to prevent "unused" warnings by various compilers.
Definition: tbb_stddef.h:398
void spin_wait_until_eq(const volatile T &location, const U value)
Spin UNTIL the value of the variable is equal to a given value.
Definition: tbb_machine.h:399
atomic< T > & as_atomic(T &t)
Definition: atomic.h:572
T __TBB_load_relaxed(const volatile T &location)
Definition: tbb_machine.h:735
void *__TBB_EXPORTED_FUNC allocate_via_handler_v3(size_t n)
Allocates memory using MallocHandler.
void __TBB_store_with_release(volatile T &location, V value)
Definition: tbb_machine.h:713
friend class scoped_lock
Definition: spin_mutex.h:179
int space[sizeof(internal::uint64_t)/sizeof(int)]
Definition: task.h:69
context_list_node_t * my_next
Definition: task.h:152
context_list_node_t * my_prev
Definition: task.h:151
__TBB_atomic kind_type my_kind
Flavor of this context: bound or isolated.
Definition: task.h:405
exception_container_type * my_exception
Pointer to the container storing exception being propagated across this task group.
Definition: task.h:449
bool __TBB_EXPORTED_METHOD cancel_group_execution()
Initiates cancellation of all tasks in this cancellation group and its subordinate groups.
bool __TBB_EXPORTED_METHOD is_group_execution_cancelled() const
Returns true if the context received cancellation request.
task_group_context * my_parent
Pointer to the context of the parent cancellation group. NULL for isolated contexts.
Definition: task.h:410
internal::string_index my_name
Description of algorithm for scheduler based instrumentation.
Definition: task.h:463
void propagate_task_group_state(T task_group_context::*mptr_state, task_group_context &src, T new_state)
Propagates any state change detected to *this, and as an optimisation possibly also upward along the ...
static const kind_type binding_completed
Definition: task.h:590
intptr_t my_priority
Priority level of the task group (in normalized representation)
Definition: task.h:459
void __TBB_EXPORTED_METHOD register_pending_exception()
Records the pending exception, and cancels the task group.
void register_with(internal::generic_scheduler *local_sched)
Registers this context with the local scheduler.
uintptr_t my_cancellation_requested
Specifies whether cancellation was requested for this task group.
Definition: task.h:440
static const kind_type detached
Definition: task.h:591
__TBB_DEPRECATED_IN_VERBOSE_MODE void set_priority(priority_t)
Changes priority of the task group.
void copy_fp_settings(const task_group_context &src)
Copies FPU control setting from another context.
task_group_context(kind_type relation_with_parent=bound, uintptr_t t=default_traits)
Default & binding constructor.
Definition: task.h:504
__itt_caller itt_caller
Used to set and maintain stack stitching point for Intel Performance Tools.
Definition: task.h:418
__TBB_DEPRECATED_IN_VERBOSE_MODE priority_t priority() const
Retrieves current priority of the current task group.
void __TBB_EXPORTED_METHOD init()
Out-of-line part of the constructor.
void bind_to(internal::generic_scheduler *local_sched)
Registers this context with the local scheduler and binds it to its parent context.
uintptr_t my_state
Internal state (combination of state flags, currently only may_have_children).
Definition: task.h:455
internal::context_list_node_t my_node
Used to form the thread specific list of contexts without additional memory allocation.
Definition: task.h:415
void __TBB_EXPORTED_METHOD capture_fp_settings()
Captures the current FPU control settings to the context.
__TBB_EXPORTED_METHOD ~task_group_context()
internal::cpu_ctl_env_space my_cpu_ctl_env
Space for platform-specific FPU settings.
Definition: task.h:436
static const kind_type dying
Definition: task.h:592
internal::generic_scheduler * my_owner
Scheduler instance that registered this context in its thread specific list.
Definition: task.h:452
void __TBB_EXPORTED_METHOD reset()
Forcefully reinitializes the context after the task tree it was associated with is completed.
uintptr_t my_version_and_traits
Version for run-time checks and behavioral traits of the context.
Definition: task.h:446
static const kind_type binding_required
Definition: task.h:589
@ executing
task is running, and will be destroyed after method execute() completes.
Definition: task.h:637
const char * my_exception_name
void __TBB_EXPORTED_METHOD clear()
captured_exception()
Used only by method move().
__TBB_EXPORTED_METHOD ~captured_exception()
const char *__TBB_EXPORTED_METHOD name() const __TBB_override
Returns RTTI name of the originally intercepted exception.
const char *__TBB_EXPORTED_METHOD what() const __TBB_override
Returns the result of originally intercepted exception's what() method.
captured_exception *__TBB_EXPORTED_METHOD move() __TBB_override
Creates and returns pointer to the deep copy of this exception object.
void __TBB_EXPORTED_METHOD set(const char *name, const char *info)
void __TBB_EXPORTED_METHOD destroy() __TBB_override
Destroys objects created by the move() method.
static captured_exception * allocate(const char *name, const char *info)
Functionally equivalent to {captured_exception e(name,info); return e.move();}.
const char * my_exception_info
void destroy()
Destroys this objects.
tbb_exception_ptr(const std::exception_ptr &src)
static tbb_exception_ptr * allocate()
static bool is_set(generic_scheduler *s)
Used to check validity of the local scheduler TLS contents.
Definition: governor.cpp:120
static generic_scheduler * local_scheduler_weak()
Definition: governor.h:134
static generic_scheduler * local_scheduler_if_initialized()
Definition: governor.h:139
atomic< unsigned > my_first_unused_worker_idx
First unused index of worker.
Definition: market.h:86
friend class generic_scheduler
Definition: market.h:46
market * my_market
The market I am in.
Definition: scheduler.h:172

Copyright © 2005-2020 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.