00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021 #ifndef __TBB_task_H
00022 #define __TBB_task_H
00023
00024 #include "tbb_stddef.h"
00025 #include "tbb_machine.h"
00026
00027 typedef struct ___itt_caller *__itt_caller;
00028
00029 namespace tbb {
00030
00031 class task;
00032 class task_list;
00033
00034 #if __TBB_TASK_GROUP_CONTEXT
00035 class task_group_context;
00036 #endif
00037
00038
00039
00040 #if _MSC_VER || (__GNUC__==3 && __GNUC_MINOR__<3)
00041 #define __TBB_TASK_BASE_ACCESS public
00042 #else
00043 #define __TBB_TASK_BASE_ACCESS private
00044 #endif
00045
00046 namespace interface5 {
00047 namespace internal {
00049
00054 class task_base: tbb::internal::no_copy {
00055 __TBB_TASK_BASE_ACCESS:
00056 friend class tbb::task;
00057 #if !TBB_DEPRECATED_TASK_INTERFACE
00059 static void spawn( task& t );
00060
00062 static void spawn( task_list& list );
00063
00064 #endif
00065 #if !TBB_DEPRECATED_TASK_INTERFACE || __TBB_BUILD
00067
00071 static void __TBB_EXPORTED_FUNC destroy( task& victim );
00072 #endif
00073 };
00074 }
00075 }
00076
00078 namespace internal {
00079
00080 class scheduler: no_copy {
00081 public:
00083 virtual void spawn( task& first, task*& next ) = 0;
00084
00086 virtual void wait_for_all( task& parent, task* child ) = 0;
00087
00089 virtual void spawn_root_and_wait( task& first, task*& next ) = 0;
00090
00092
00093 virtual ~scheduler() = 0;
00094 #if __TBB_ARENA_PER_MASTER
00095
00097 virtual void enqueue( task& t, void* reserved ) = 0;
00098 #endif
00099 };
00100
00102
00103 typedef intptr_t reference_count;
00104
00106 typedef unsigned short affinity_id;
00107
00108 #if __TBB_TASK_GROUP_CONTEXT
00109 struct context_list_node_t {
00110 context_list_node_t *my_prev,
00111 *my_next;
00112 };
00113
00114 class allocate_root_with_context_proxy: no_assign {
00115 task_group_context& my_context;
00116 public:
00117 allocate_root_with_context_proxy ( task_group_context& ctx ) : my_context(ctx) {}
00118 task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;
00119 void __TBB_EXPORTED_METHOD free( task& ) const;
00120 };
00121 #endif
00122
00123 class allocate_root_proxy: no_assign {
00124 public:
00125 static task& __TBB_EXPORTED_FUNC allocate( size_t size );
00126 static void __TBB_EXPORTED_FUNC free( task& );
00127 };
00128
00129 class allocate_continuation_proxy: no_assign {
00130 public:
00131 task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;
00132 void __TBB_EXPORTED_METHOD free( task& ) const;
00133 };
00134
00135 class allocate_child_proxy: no_assign {
00136 public:
00137 task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;
00138 void __TBB_EXPORTED_METHOD free( task& ) const;
00139 };
00140
00141 class allocate_additional_child_of_proxy: no_assign {
00142 task& self;
00143 task& parent;
00144 public:
00145 allocate_additional_child_of_proxy( task& self_, task& parent_ ) : self(self_), parent(parent_) {}
00146 task& __TBB_EXPORTED_METHOD allocate( size_t size ) const;
00147 void __TBB_EXPORTED_METHOD free( task& ) const;
00148 };
00149
00151
00156 class task_prefix {
00157 private:
00158 friend class tbb::task;
00159 friend class tbb::interface5::internal::task_base;
00160 friend class tbb::task_list;
00161 friend class internal::scheduler;
00162 friend class internal::allocate_root_proxy;
00163 friend class internal::allocate_child_proxy;
00164 friend class internal::allocate_continuation_proxy;
00165 friend class internal::allocate_additional_child_of_proxy;
00166
00167 #if __TBB_TASK_GROUP_CONTEXT
00169
00172 task_group_context *context;
00173 #endif
00174
00176
00181 scheduler* origin;
00182
00184 scheduler* owner;
00185
00187
00190 tbb::task* parent;
00191
00193
00197 reference_count ref_count;
00198
00200
00201 int depth;
00202
00204
00205 unsigned char state;
00206
00208
00213 unsigned char extra_state;
00214
00215 affinity_id affinity;
00216
00218 tbb::task* next;
00219
00221 tbb::task& task() {return *reinterpret_cast<tbb::task*>(this+1);}
00222 };
00223
00224 }
00226
00227 #if __TBB_TASK_GROUP_CONTEXT
00228
00229 #if TBB_USE_CAPTURED_EXCEPTION
00230 class tbb_exception;
00231 #else
00232 namespace internal {
00233 class tbb_exception_ptr;
00234 }
00235 #endif
00236
00238
00258 class task_group_context : internal::no_copy {
00259 private:
00260 #if TBB_USE_CAPTURED_EXCEPTION
00261 typedef tbb_exception exception_container_type;
00262 #else
00263 typedef internal::tbb_exception_ptr exception_container_type;
00264 #endif
00265
00266 enum version_traits_word_layout {
00267 traits_offset = 16,
00268 version_mask = 0xFFFF,
00269 traits_mask = 0xFFFFul << traits_offset
00270 };
00271
00272 public:
00273 enum kind_type {
00274 isolated,
00275 bound
00276 };
00277
00278 enum traits_type {
00279 exact_exception = 0x0001ul << traits_offset,
00280 no_cancellation = 0x0002ul << traits_offset,
00281 concurrent_wait = 0x0004ul << traits_offset,
00282 #if TBB_USE_CAPTURED_EXCEPTION
00283 default_traits = 0
00284 #else
00285 default_traits = exact_exception
00286 #endif
00287 };
00288
00289 private:
00290 union {
00292 kind_type my_kind;
00293 uintptr_t _my_kind_aligner;
00294 };
00295
00297 task_group_context *my_parent;
00298
00300
00302 internal::context_list_node_t my_node;
00303
00305 __itt_caller itt_caller;
00306
00308
00311 char _leading_padding[internal::NFS_MaxLineSize -
00312 2 * sizeof(uintptr_t)- sizeof(void*) - sizeof(internal::context_list_node_t)
00313 - sizeof(__itt_caller)];
00314
00316 uintptr_t my_cancellation_requested;
00317
00319
00322 uintptr_t my_version_and_traits;
00323
00325 exception_container_type *my_exception;
00326
00328
00331 void *my_owner;
00332
00334
00335 char _trailing_padding[internal::NFS_MaxLineSize - sizeof(intptr_t) - 2 * sizeof(void*)];
00336
00337 public:
00339
00366 task_group_context ( kind_type relation_with_parent = bound,
00367 uintptr_t traits = default_traits )
00368 : my_kind(relation_with_parent)
00369 , my_version_and_traits(1 | traits)
00370 {
00371 init();
00372 }
00373
00374 __TBB_EXPORTED_METHOD ~task_group_context ();
00375
00377
00384 void __TBB_EXPORTED_METHOD reset ();
00385
00387
00394 bool __TBB_EXPORTED_METHOD cancel_group_execution ();
00395
00397 bool __TBB_EXPORTED_METHOD is_group_execution_cancelled () const;
00398
00400
00406 void __TBB_EXPORTED_METHOD register_pending_exception ();
00407
00408 protected:
00410
00411 void __TBB_EXPORTED_METHOD init ();
00412
00413 private:
00414 friend class task;
00415 friend class internal::allocate_root_with_context_proxy;
00416
00417 static const kind_type binding_required = bound;
00418 static const kind_type binding_completed = kind_type(bound+1);
00419 static const kind_type detached = kind_type(binding_completed+1);
00420 static const kind_type dying = kind_type(detached+1);
00421
00424 void propagate_cancellation_from_ancestors ();
00425
00427 bool is_alive () {
00428 #if TBB_USE_DEBUG
00429 return my_version_and_traits != 0xDeadBeef;
00430 #else
00431 return true;
00432 #endif
00433 }
00434 };
00435
00436 #endif
00437
00439
00440 class task: __TBB_TASK_BASE_ACCESS interface5::internal::task_base {
00441
00443 void __TBB_EXPORTED_METHOD internal_set_ref_count( int count );
00444
00446 internal::reference_count __TBB_EXPORTED_METHOD internal_decrement_ref_count();
00447
00448 protected:
00450 task() {prefix().extra_state=1;}
00451
00452 public:
00454 virtual ~task() {}
00455
00457 virtual task* execute() = 0;
00458
00460 enum state_type {
00462 executing,
00464 reexecute,
00466 ready,
00468 allocated,
00470 freed,
00472 recycle,
00474 to_enqueue
00475 };
00476
00477
00478
00479
00480
00482 static internal::allocate_root_proxy allocate_root() {
00483 return internal::allocate_root_proxy();
00484 }
00485
00486 #if __TBB_TASK_GROUP_CONTEXT
00488 static internal::allocate_root_with_context_proxy allocate_root( task_group_context& ctx ) {
00489 return internal::allocate_root_with_context_proxy(ctx);
00490 }
00491 #endif
00492
00494
00495 internal::allocate_continuation_proxy& allocate_continuation() {
00496 return *reinterpret_cast<internal::allocate_continuation_proxy*>(this);
00497 }
00498
00500 internal::allocate_child_proxy& allocate_child() {
00501 return *reinterpret_cast<internal::allocate_child_proxy*>(this);
00502 }
00503
00505
00507 internal::allocate_additional_child_of_proxy allocate_additional_child_of( task& t ) {
00508 return internal::allocate_additional_child_of_proxy(*this,t);
00509 }
00510
00511 #if TBB_DEPRECATED_TASK_INTERFACE
00513
00517 void __TBB_EXPORTED_METHOD destroy( task& t );
00518 #else
00520 using task_base::destroy;
00521 #endif
00522
00523
00524
00525
00526
00528
00534 void recycle_as_continuation() {
00535 __TBB_ASSERT( prefix().state==executing, "execute not running?" );
00536 prefix().state = allocated;
00537 }
00538
00540
00542 void recycle_as_safe_continuation() {
00543 __TBB_ASSERT( prefix().state==executing, "execute not running?" );
00544 prefix().state = recycle;
00545 }
00546
00548 void recycle_as_child_of( task& new_parent ) {
00549 internal::task_prefix& p = prefix();
00550 __TBB_ASSERT( prefix().state==executing||prefix().state==allocated, "execute not running, or already recycled" );
00551 __TBB_ASSERT( prefix().ref_count==0, "no child tasks allowed when recycled as a child" );
00552 __TBB_ASSERT( p.parent==NULL, "parent must be null" );
00553 __TBB_ASSERT( new_parent.prefix().state<=recycle, "corrupt parent's state" );
00554 __TBB_ASSERT( new_parent.prefix().state!=freed, "parent already freed" );
00555 p.state = allocated;
00556 p.parent = &new_parent;
00557 #if __TBB_TASK_GROUP_CONTEXT
00558 p.context = new_parent.prefix().context;
00559 #endif
00560 }
00561
00563
00564 void recycle_to_reexecute() {
00565 __TBB_ASSERT( prefix().state==executing, "execute not running, or already recycled" );
00566 __TBB_ASSERT( prefix().ref_count==0, "no child tasks allowed when recycled for reexecution" );
00567 prefix().state = reexecute;
00568 }
00569
00570 #if INTEL_PRIVATE && __TBB_RECYCLE_TO_ENQUEUE
00571
00573
00574 void recycle_to_enqueue() {
00575 __TBB_ASSERT( prefix().state==executing, "execute not running, or already recycled" );
00576 prefix().state = to_enqueue;
00577 }
00578
00579 #endif
00580
00581
00582 intptr_t depth() const {return 0;}
00583 void set_depth( intptr_t ) {}
00584 void add_to_depth( int ) {}
00585
00586
00587
00588
00589
00590
00592 void set_ref_count( int count ) {
00593 #if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT
00594 internal_set_ref_count(count);
00595 #else
00596 prefix().ref_count = count;
00597 #endif
00598 }
00599
00601
00602 void increment_ref_count() {
00603 __TBB_FetchAndIncrementWacquire( &prefix().ref_count );
00604 }
00605
00607
00608 int decrement_ref_count() {
00609 #if TBB_USE_THREADING_TOOLS||TBB_USE_ASSERT
00610 return int(internal_decrement_ref_count());
00611 #else
00612 return int(__TBB_FetchAndDecrementWrelease( &prefix().ref_count ))-1;
00613 #endif
00614 }
00615
00616 #if TBB_DEPRECATED_TASK_INTERFACE
00618
00622 void spawn( task& t );
00623
00625 void spawn( task_list& list );
00626 #else
00628 using task_base::spawn;
00629 #endif
00630
00632 void spawn_and_wait_for_all( task& child ) {
00633 prefix().owner->wait_for_all( *this, &child );
00634 }
00635
00637 void __TBB_EXPORTED_METHOD spawn_and_wait_for_all( task_list& list );
00638
00640 static void spawn_root_and_wait( task& root ) {
00641 root.prefix().owner->spawn_root_and_wait( root, root.prefix().next );
00642 }
00643
00645
00647 static void spawn_root_and_wait( task_list& root_list );
00648
00650
00651 void wait_for_all() {
00652 prefix().owner->wait_for_all( *this, NULL );
00653 }
00654
00655 #if __TBB_ARENA_PER_MASTER
00657 static void enqueue( task& t ) {
00658 t.prefix().owner->enqueue( t, NULL );
00659 }
00660
00661 #endif
00663 static task& __TBB_EXPORTED_FUNC self();
00664
00666 task* parent() const {return prefix().parent;}
00667
00668 #if __TBB_TASK_GROUP_CONTEXT
00670 task_group_context* context() {return prefix().context;}
00671 #endif
00672
00674 bool is_stolen_task() const {
00675 return (prefix().extra_state & 0x80)!=0;
00676 }
00677
00678
00679
00680
00681
00683 state_type state() const {return state_type(prefix().state);}
00684
00686 int ref_count() const {
00687 #if TBB_USE_ASSERT
00688 internal::reference_count ref_count_ = prefix().ref_count;
00689 __TBB_ASSERT( ref_count_==int(ref_count_), "integer overflow error");
00690 #endif
00691 return int(prefix().ref_count);
00692 }
00693
00695 bool __TBB_EXPORTED_METHOD is_owned_by_current_thread() const;
00696
00697
00698
00699
00700
00702
00703 typedef internal::affinity_id affinity_id;
00704
00706 void set_affinity( affinity_id id ) {prefix().affinity = id;}
00707
00709 affinity_id affinity() const {return prefix().affinity;}
00710
00712
00716 virtual void __TBB_EXPORTED_METHOD note_affinity( affinity_id id );
00717
00718 #if __TBB_TASK_GROUP_CONTEXT
00720
00721 bool cancel_group_execution () { return prefix().context->cancel_group_execution(); }
00722
00724 bool is_cancelled () const { return prefix().context->is_group_execution_cancelled(); }
00725 #endif
00726
00727 private:
00728 friend class interface5::internal::task_base;
00729 friend class task_list;
00730 friend class internal::scheduler;
00731 friend class internal::allocate_root_proxy;
00732 #if __TBB_TASK_GROUP_CONTEXT
00733 friend class internal::allocate_root_with_context_proxy;
00734 #endif
00735 friend class internal::allocate_continuation_proxy;
00736 friend class internal::allocate_child_proxy;
00737 friend class internal::allocate_additional_child_of_proxy;
00738
00740
00741 internal::task_prefix& prefix( internal::version_tag* = NULL ) const {
00742 return reinterpret_cast<internal::task_prefix*>(const_cast<task*>(this))[-1];
00743 }
00744 };
00745
00747
00748 class empty_task: public task {
00749 task* execute() {
00750 return NULL;
00751 }
00752 };
00753
00755
00757 class task_list: internal::no_copy {
00758 private:
00759 task* first;
00760 task** next_ptr;
00761 friend class task;
00762 friend class interface5::internal::task_base;
00763 public:
00765 task_list() : first(NULL), next_ptr(&first) {}
00766
00768 ~task_list() {}
00769
00771 bool empty() const {return !first;}
00772
00774 void push_back( task& task ) {
00775 task.prefix().next = NULL;
00776 *next_ptr = &task;
00777 next_ptr = &task.prefix().next;
00778 }
00779
00781 task& pop_front() {
00782 __TBB_ASSERT( !empty(), "attempt to pop item from empty task_list" );
00783 task* result = first;
00784 first = result->prefix().next;
00785 if( !first ) next_ptr = &first;
00786 return *result;
00787 }
00788
00790 void clear() {
00791 first=NULL;
00792 next_ptr=&first;
00793 }
00794 };
00795
00796 #if TBB_DEPRECATED_TASK_INTERFACE
00797 inline void task::spawn( task& t )
00798 #else
00799 inline void interface5::internal::task_base::spawn( task& t )
00800 #endif
00801 {
00802 t.prefix().owner->spawn( t, t.prefix().next );
00803 }
00804
00805 #if TBB_DEPRECATED_TASK_INTERFACE
00806 inline void task::spawn( task_list& list )
00807 #else
00808 inline void interface5::internal::task_base::spawn( task_list& list )
00809 #endif
00810 {
00811 if( task* t = list.first ) {
00812 t->prefix().owner->spawn( *t, *list.next_ptr );
00813 list.clear();
00814 }
00815 }
00816
00817 inline void task::spawn_root_and_wait( task_list& root_list ) {
00818 if( task* t = root_list.first ) {
00819 t->prefix().owner->spawn_root_and_wait( *t, *root_list.next_ptr );
00820 root_list.clear();
00821 }
00822 }
00823
00824 }
00825
00826 inline void *operator new( size_t bytes, const tbb::internal::allocate_root_proxy& ) {
00827 return &tbb::internal::allocate_root_proxy::allocate(bytes);
00828 }
00829
00830 inline void operator delete( void* task, const tbb::internal::allocate_root_proxy& ) {
00831 tbb::internal::allocate_root_proxy::free( *static_cast<tbb::task*>(task) );
00832 }
00833
00834 #if __TBB_TASK_GROUP_CONTEXT
00835 inline void *operator new( size_t bytes, const tbb::internal::allocate_root_with_context_proxy& p ) {
00836 return &p.allocate(bytes);
00837 }
00838
00839 inline void operator delete( void* task, const tbb::internal::allocate_root_with_context_proxy& p ) {
00840 p.free( *static_cast<tbb::task*>(task) );
00841 }
00842 #endif
00843
00844 inline void *operator new( size_t bytes, const tbb::internal::allocate_continuation_proxy& p ) {
00845 return &p.allocate(bytes);
00846 }
00847
00848 inline void operator delete( void* task, const tbb::internal::allocate_continuation_proxy& p ) {
00849 p.free( *static_cast<tbb::task*>(task) );
00850 }
00851
00852 inline void *operator new( size_t bytes, const tbb::internal::allocate_child_proxy& p ) {
00853 return &p.allocate(bytes);
00854 }
00855
00856 inline void operator delete( void* task, const tbb::internal::allocate_child_proxy& p ) {
00857 p.free( *static_cast<tbb::task*>(task) );
00858 }
00859
00860 inline void *operator new( size_t bytes, const tbb::internal::allocate_additional_child_of_proxy& p ) {
00861 return &p.allocate(bytes);
00862 }
00863
00864 inline void operator delete( void* task, const tbb::internal::allocate_additional_child_of_proxy& p ) {
00865 p.free( *static_cast<tbb::task*>(task) );
00866 }
00867
00868 #endif