enumerable_thread_specific.h

00001 /*
00002     Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
00003 
00004     The source code contained or described herein and all documents related
00005     to the source code ("Material") are owned by Intel Corporation or its
00006     suppliers or licensors.  Title to the Material remains with Intel
00007     Corporation or its suppliers and licensors.  The Material is protected
00008     by worldwide copyright laws and treaty provisions.  No part of the
00009     Material may be used, copied, reproduced, modified, published, uploaded,
00010     posted, transmitted, distributed, or disclosed in any way without
00011     Intel's prior express written permission.
00012 
00013     No license under any patent, copyright, trade secret or other
00014     intellectual property right is granted to or conferred upon you by
00015     disclosure or delivery of the Materials, either expressly, by
00016     implication, inducement, estoppel or otherwise.  Any license under such
00017     intellectual property rights must be express and approved by Intel in
00018     writing.
00019 */
00020 
00021 #ifndef __TBB_enumerable_thread_specific_H
00022 #define __TBB_enumerable_thread_specific_H
00023 
00024 #include "concurrent_vector.h"
00025 #include "tbb_thread.h"
00026 #include "concurrent_hash_map.h"
00027 #include "cache_aligned_allocator.h"
00028 #if __SUNPRO_CC
00029 #include <string.h>  // for memcpy
00030 #endif
00031 
00032 #if _WIN32||_WIN64
00033 #include <windows.h>
00034 #else
00035 #include <pthread.h>
00036 #endif
00037 
00038 namespace tbb {
00039 
00041     enum ets_key_usage_type { ets_key_per_instance, ets_no_key };
00042 
00044     namespace internal {
00045         
00047         template< typename Container, typename Value >
00048         class enumerable_thread_specific_iterator 
00049 #if defined(_WIN64) && defined(_MSC_VER) 
00050             // Ensure that Microsoft's internal template function _Val_type works correctly.
00051             : public std::iterator<std::random_access_iterator_tag,Value>
00052 #endif /* defined(_WIN64) && defined(_MSC_VER) */
00053         {
00055         
00056             Container *my_container;
00057             typename Container::size_type my_index;
00058             mutable Value *my_value;
00059         
00060             template<typename C, typename T>
00061             friend enumerable_thread_specific_iterator<C,T> operator+( ptrdiff_t offset, 
00062                                                                        const enumerable_thread_specific_iterator<C,T>& v );
00063         
00064             template<typename C, typename T, typename U>
00065             friend bool operator==( const enumerable_thread_specific_iterator<C,T>& i, 
00066                                     const enumerable_thread_specific_iterator<C,U>& j );
00067         
00068             template<typename C, typename T, typename U>
00069             friend bool operator<( const enumerable_thread_specific_iterator<C,T>& i, 
00070                                    const enumerable_thread_specific_iterator<C,U>& j );
00071         
00072             template<typename C, typename T, typename U>
00073             friend ptrdiff_t operator-( const enumerable_thread_specific_iterator<C,T>& i, const enumerable_thread_specific_iterator<C,U>& j );
00074             
00075             template<typename C, typename U> 
00076             friend class enumerable_thread_specific_iterator;
00077         
00078             public:
00079         
00080             enumerable_thread_specific_iterator( const Container &container, typename Container::size_type index ) : 
00081                 my_container(&const_cast<Container &>(container)), my_index(index), my_value(NULL) {}
00082         
00084             enumerable_thread_specific_iterator() : my_container(NULL), my_index(0), my_value(NULL) {}
00085         
00086             template<typename U>
00087             enumerable_thread_specific_iterator( const enumerable_thread_specific_iterator<Container, U>& other ) :
00088                     my_container( other.my_container ), my_index( other.my_index), my_value( const_cast<Value *>(other.my_value) ) {}
00089         
00090             enumerable_thread_specific_iterator operator+( ptrdiff_t offset ) const {
00091                 return enumerable_thread_specific_iterator(*my_container, my_index + offset);
00092             }
00093         
00094             enumerable_thread_specific_iterator &operator+=( ptrdiff_t offset ) {
00095                 my_index += offset;
00096                 my_value = NULL;
00097                 return *this;
00098             }
00099         
00100             enumerable_thread_specific_iterator operator-( ptrdiff_t offset ) const {
00101                 return enumerable_thread_specific_iterator( *my_container, my_index-offset );
00102             }
00103         
00104             enumerable_thread_specific_iterator &operator-=( ptrdiff_t offset ) {
00105                 my_index -= offset;
00106                 my_value = NULL;
00107                 return *this;
00108             }
00109         
00110             Value& operator*() const {
00111                 Value* value = my_value;
00112                 if( !value ) {
00113                     value = my_value = reinterpret_cast<Value *>(&(*my_container)[my_index].value);
00114                 }
00115                 __TBB_ASSERT( value==reinterpret_cast<Value *>(&(*my_container)[my_index].value), "corrupt cache" );
00116                 return *value;
00117             }
00118         
00119             Value& operator[]( ptrdiff_t k ) const {
00120                return (*my_container)[my_index + k].value;
00121             }
00122         
00123             Value* operator->() const {return &operator*();}
00124         
00125             enumerable_thread_specific_iterator& operator++() {
00126                 ++my_index;
00127                 my_value = NULL;
00128                 return *this;
00129             }
00130         
00131             enumerable_thread_specific_iterator& operator--() {
00132                 --my_index;
00133                 my_value = NULL;
00134                 return *this;
00135             }
00136         
00138             enumerable_thread_specific_iterator operator++(int) {
00139                 enumerable_thread_specific_iterator result = *this;
00140                 ++my_index;
00141                 my_value = NULL;
00142                 return result;
00143             }
00144         
00146             enumerable_thread_specific_iterator operator--(int) {
00147                 enumerable_thread_specific_iterator result = *this;
00148                 --my_index;
00149                 my_value = NULL;
00150                 return result;
00151             }
00152         
00153             // STL support
00154             typedef ptrdiff_t difference_type;
00155             typedef Value value_type;
00156             typedef Value* pointer;
00157             typedef Value& reference;
00158             typedef std::random_access_iterator_tag iterator_category;
00159         };
00160         
00161         template<typename Container, typename T>
00162         enumerable_thread_specific_iterator<Container,T> operator+( ptrdiff_t offset, 
00163                                                                     const enumerable_thread_specific_iterator<Container,T>& v ) {
00164             return enumerable_thread_specific_iterator<Container,T>( v.my_container, v.my_index + offset );
00165         }
00166         
00167         template<typename Container, typename T, typename U>
00168         bool operator==( const enumerable_thread_specific_iterator<Container,T>& i, 
00169                          const enumerable_thread_specific_iterator<Container,U>& j ) {
00170             return i.my_index==j.my_index && i.my_container == j.my_container;
00171         }
00172         
00173         template<typename Container, typename T, typename U>
00174         bool operator!=( const enumerable_thread_specific_iterator<Container,T>& i, 
00175                          const enumerable_thread_specific_iterator<Container,U>& j ) {
00176             return !(i==j);
00177         }
00178         
00179         template<typename Container, typename T, typename U>
00180         bool operator<( const enumerable_thread_specific_iterator<Container,T>& i, 
00181                         const enumerable_thread_specific_iterator<Container,U>& j ) {
00182             return i.my_index<j.my_index;
00183         }
00184         
00185         template<typename Container, typename T, typename U>
00186         bool operator>( const enumerable_thread_specific_iterator<Container,T>& i, 
00187                         const enumerable_thread_specific_iterator<Container,U>& j ) {
00188             return j<i;
00189         }
00190         
00191         template<typename Container, typename T, typename U>
00192         bool operator>=( const enumerable_thread_specific_iterator<Container,T>& i, 
00193                          const enumerable_thread_specific_iterator<Container,U>& j ) {
00194             return !(i<j);
00195         }
00196         
00197         template<typename Container, typename T, typename U>
00198         bool operator<=( const enumerable_thread_specific_iterator<Container,T>& i, 
00199                          const enumerable_thread_specific_iterator<Container,U>& j ) {
00200             return !(j<i);
00201         }
00202         
00203         template<typename Container, typename T, typename U>
00204         ptrdiff_t operator-( const enumerable_thread_specific_iterator<Container,T>& i, 
00205                              const enumerable_thread_specific_iterator<Container,U>& j ) {
00206             return i.my_index-j.my_index;
00207         }
00208 
00209     template<typename SegmentedContainer, typename Value >
00210         class segmented_iterator
00211 #if defined(_WIN64) && defined(_MSC_VER)
00212         : public std::iterator<std::input_iterator_tag, Value>
00213 #endif
00214         {
00215             template<typename C, typename T, typename U>
00216             friend bool operator==(const segmented_iterator<C,T>& i, const segmented_iterator<C,U>& j);
00217 
00218             template<typename C, typename T, typename U>
00219             friend bool operator!=(const segmented_iterator<C,T>& i, const segmented_iterator<C,U>& j);
00220             
00221             template<typename C, typename U> 
00222             friend class segmented_iterator;
00223 
00224             public:
00225 
00226                 segmented_iterator() {my_segcont = NULL;}
00227 
00228                 segmented_iterator( const SegmentedContainer& _segmented_container ) : 
00229                     my_segcont(const_cast<SegmentedContainer*>(&_segmented_container)),
00230                     outer_iter(my_segcont->end()) { }
00231 
00232                 ~segmented_iterator() {}
00233 
00234                 typedef typename SegmentedContainer::iterator outer_iterator;
00235                 typedef typename SegmentedContainer::value_type InnerContainer;
00236                 typedef typename InnerContainer::iterator inner_iterator;
00237 
00238                 // STL support
00239                 typedef ptrdiff_t difference_type;
00240                 typedef Value value_type;
00241                 typedef typename SegmentedContainer::size_type size_type;
00242                 typedef Value* pointer;
00243                 typedef Value& reference;
00244                 typedef std::input_iterator_tag iterator_category;
00245 
00246                 // Copy Constructor
00247                 template<typename U>
00248                 segmented_iterator(const segmented_iterator<SegmentedContainer, U>& other) :
00249                     my_segcont(other.my_segcont),
00250                     outer_iter(other.outer_iter),
00251                     // can we assign a default-constructed iterator to inner if we're at the end?
00252                     inner_iter(other.inner_iter)
00253                 {}
00254 
00255                 // assignment
00256                 template<typename U>
00257                 segmented_iterator& operator=( const segmented_iterator<SegmentedContainer, U>& other) {
00258                     if(this != &other) {
00259                         my_segcont = other.my_segcont;
00260                         outer_iter = other.outer_iter;
00261                         if(outer_iter != my_segcont->end()) inner_iter = other.inner_iter;
00262                     }
00263                     return *this;
00264                 }
00265 
00266                 // allow assignment of outer iterator to segmented iterator.  Once it is
00267                 // assigned, move forward until a non-empty inner container is found or
00268                 // the end of the outer container is reached.
00269                 segmented_iterator& operator=(const outer_iterator& new_outer_iter) {
00270                     __TBB_ASSERT(my_segcont != NULL, NULL);
00271                     // check that this iterator points to something inside the segmented container
00272                     for(outer_iter = new_outer_iter ;outer_iter!=my_segcont->end(); ++outer_iter) {
00273                         if( !outer_iter->empty() ) {
00274                             inner_iter = outer_iter->begin();
00275                             break;
00276                         }
00277                     }
00278                     return *this;
00279                 }
00280 
00281                 // pre-increment
00282                 segmented_iterator& operator++() {
00283                     advance_me();
00284                     return *this;
00285                 }
00286 
00287                 // post-increment
00288                 segmented_iterator operator++(int) {
00289                     segmented_iterator tmp = *this;
00290                     operator++();
00291                     return tmp;
00292                 }
00293 
00294                 bool operator==(const outer_iterator& other_outer) const {
00295                     __TBB_ASSERT(my_segcont != NULL, NULL);
00296                     return (outer_iter == other_outer &&
00297                             (outer_iter == my_segcont->end() || inner_iter == outer_iter->begin()));
00298                 }
00299 
00300                 bool operator!=(const outer_iterator& other_outer) const {
00301                     return !operator==(other_outer);
00302 
00303                 }
00304 
00305                 // (i)* RHS
00306                 reference operator*() const {
00307                     __TBB_ASSERT(my_segcont != NULL, NULL);
00308                     __TBB_ASSERT(outer_iter != my_segcont->end(), "Dereferencing a pointer at end of container");
00309                     __TBB_ASSERT(inner_iter != outer_iter->end(), NULL); // should never happen
00310                     return *inner_iter;
00311                 }
00312 
00313                 // i->
00314                 pointer operator->() const { return &operator*();}
00315 
00316             private:
00317                 SegmentedContainer*             my_segcont;
00318                 outer_iterator outer_iter;
00319                 inner_iterator inner_iter;
00320 
00321                 void advance_me() {
00322                     __TBB_ASSERT(my_segcont != NULL, NULL);
00323                     __TBB_ASSERT(outer_iter != my_segcont->end(), NULL); // not true if there are no inner containers
00324                     __TBB_ASSERT(inner_iter != outer_iter->end(), NULL); // not true if the inner containers are all empty.
00325                     ++inner_iter;
00326                     while(inner_iter == outer_iter->end() && ++outer_iter != my_segcont->end()) {
00327                         inner_iter = outer_iter->begin();
00328                     }
00329                 }
00330         };    // segmented_iterator
00331 
00332         template<typename SegmentedContainer, typename T, typename U>
00333         bool operator==( const segmented_iterator<SegmentedContainer,T>& i, 
00334                          const segmented_iterator<SegmentedContainer,U>& j ) {
00335             if(i.my_segcont != j.my_segcont) return false;
00336             if(i.my_segcont == NULL) return true;
00337             if(i.outer_iter != j.outer_iter) return false;
00338             if(i.outer_iter == i.my_segcont->end()) return true;
00339             return i.inner_iter == j.inner_iter;
00340         }
00341 
00342         // !=
00343         template<typename SegmentedContainer, typename T, typename U>
00344         bool operator!=( const segmented_iterator<SegmentedContainer,T>& i, 
00345                          const segmented_iterator<SegmentedContainer,U>& j ) {
00346             return !(i==j);
00347         }
00348 
00349         // empty template for following specializations
00350         template<ets_key_usage_type et>
00351         struct tls_manager {};
00352         
00354         template <>
00355         struct tls_manager<ets_no_key> {
00356             typedef size_t tls_key_t;
00357             static inline void create_key( tls_key_t &) { }
00358             static inline void destroy_key( tls_key_t & ) { }
00359             static inline void set_tls( tls_key_t &, void *  ) { }
00360             static inline void * get_tls( tls_key_t & ) { return (size_t)0; }
00361         };
00362 
00364         template <>
00365         struct tls_manager <ets_key_per_instance> {
00366 #if _WIN32||_WIN64
00367             typedef DWORD tls_key_t;
00368             static inline void create_key( tls_key_t &k) { k = TlsAlloc(); }
00369             static inline void destroy_key( tls_key_t &k) { TlsFree(k); }
00370             static inline void set_tls( tls_key_t &k, void * value) { TlsSetValue(k, (LPVOID)value); }
00371             static inline void * get_tls( tls_key_t &k ) { return (void *)TlsGetValue(k); }
00372 #else
00373             typedef pthread_key_t tls_key_t;
00374             static inline void create_key( tls_key_t &k) { pthread_key_create(&k, NULL); }
00375             static inline void destroy_key( tls_key_t &k) { pthread_key_delete(k); }
00376             static inline void set_tls( tls_key_t &k, void * value) { pthread_setspecific(k, value); }
00377             static inline void * get_tls( tls_key_t &k ) { return pthread_getspecific(k); }
00378 #endif
00379         };
00380 
00381         class thread_hash_compare {
00382         public:
00383             // using hack suggested by Arch to get value for thread id for hashing...
00384 #if _WIN32||_WIN64
00385             typedef DWORD thread_key;
00386 #else
00387             typedef pthread_t thread_key;
00388 #endif
00389             static thread_key my_thread_key(const tbb_thread::id j) {
00390                 thread_key key_val;
00391                 memcpy(&key_val, &j, sizeof(thread_key));
00392                 return key_val;
00393             }
00394 
00395             bool equal( const thread_key j, const thread_key k) const {
00396                 return j == k;
00397             }
00398             unsigned long hash(const thread_key k) const {
00399                 return (unsigned long)k;
00400             }
00401         };
00402 
00403         // storage for initialization function pointer
00404         template<typename T>
00405         struct callback_base {
00406             virtual T apply( ) = 0;
00407             virtual void destroy( ) = 0;
00408             // need to be able to create copies of callback_base for copy constructor
00409             virtual callback_base* make_copy() = 0;
00410             // need virtual destructor to satisfy GCC compiler warning
00411             virtual ~callback_base() { }
00412         };
00413 
00414         template <typename T, typename Functor>
00415         struct callback_leaf : public callback_base<T> {
00416             typedef Functor my_callback_type;
00417             typedef callback_leaf<T,Functor> my_type;
00418             typedef my_type* callback_pointer;
00419             typedef typename tbb::tbb_allocator<my_type> my_allocator_type;
00420             Functor f;
00421             callback_leaf( const Functor& f_) : f(f_) {
00422             }
00423 
00424             static callback_pointer new_callback(const Functor& f_ ) {
00425                 void* new_void = my_allocator_type().allocate(1);
00426                 callback_pointer new_cb = new (new_void) callback_leaf<T,Functor>(f_); // placement new
00427                 return new_cb;
00428             }
00429 
00430             /* override */ callback_pointer make_copy() {
00431                 return new_callback( f );
00432             }
00433 
00434              /* override */ void destroy( ) {
00435                  callback_pointer my_ptr = this;
00436                  my_allocator_type().destroy(my_ptr);
00437                  my_allocator_type().deallocate(my_ptr,1);
00438              }
00439             /* override */ T apply() { return f(); }  // does copy construction of returned value.
00440         };
00441 
00442         template<typename Key, typename T, typename HC, typename A>
00443         class ets_concurrent_hash_map : public tbb::concurrent_hash_map<Key, T, HC, A> {
00444         public:
00445             typedef tbb::concurrent_hash_map<Key, T, HC, A> base_type;
00446             typedef typename base_type::const_pointer const_pointer;
00447             typedef typename base_type::key_type key_type;
00448             const_pointer find( const key_type &k ) {
00449                 return this->internal_fast_find( k );
00450             } // make public
00451         };
00452 
00454 
00459         template<typename U, size_t ModularSize>
00460         struct ets_element {
00461             char value[sizeof(U) + NFS_MaxLineSize-ModularSize];
00462             void unconstruct() {
00463                 // "reinterpret_cast<U*>(&value)->~U();" causes type-punning warning with gcc 4.4,
00464                 // "U* u = reinterpret_cast<U*>(&value); u->~U();" causes unused variable warning with VS2010.
00465                 // Thus another "casting via union" hack.
00466                 union { void* space; U* val; } helper;
00467                 helper.space = &value;
00468                 helper.val->~U();
00469             }
00470         };
00471 
00473         template<typename U>
00474         struct ets_element<U,0> {
00475             char value[sizeof(U)];
00476             void unconstruct() { // Same implementation as in general case
00477                 union { void* space; U* val; } helper;
00478                 helper.space = &value;
00479                 helper.val->~U();
00480             }
00481         };
00482 
00483     } // namespace internal
00485 
00487 
00506     template <typename T, 
00507               typename Allocator=cache_aligned_allocator<T>, 
00508               ets_key_usage_type ETS_key_type=ets_no_key > 
00509     class enumerable_thread_specific { 
00510 
00511         template<typename U, typename A, ets_key_usage_type C> friend class enumerable_thread_specific;
00512     
00513         typedef internal::tls_manager< ETS_key_type > my_tls_manager;
00514 
00515         typedef internal::ets_element<T,sizeof(T)%internal::NFS_MaxLineSize> padded_element;
00516 
00518         template<typename I>
00519         class generic_range_type: public blocked_range<I> {
00520         public:
00521             typedef T value_type;
00522             typedef T& reference;
00523             typedef const T& const_reference;
00524             typedef I iterator;
00525             typedef ptrdiff_t difference_type;
00526             generic_range_type( I begin_, I end_, size_t grainsize_ = 1) : blocked_range<I>(begin_,end_,grainsize_) {} 
00527             template<typename U>
00528             generic_range_type( const generic_range_type<U>& r) : blocked_range<I>(r.begin(),r.end(),r.grainsize()) {} 
00529             generic_range_type( generic_range_type& r, split ) : blocked_range<I>(r,split()) {}
00530         };
00531     
00532         typedef typename Allocator::template rebind< padded_element >::other padded_allocator_type;
00533         typedef tbb::concurrent_vector< padded_element, padded_allocator_type > internal_collection_type;
00534         typedef typename internal_collection_type::size_type hash_table_index_type; // storing array indices rather than iterators to simplify
00535         // copying the hash table that correlates thread IDs with concurrent vector elements.
00536         
00537         typedef typename Allocator::template rebind< std::pair< typename internal::thread_hash_compare::thread_key, hash_table_index_type > >::other hash_element_allocator;
00538         typedef internal::ets_concurrent_hash_map< typename internal::thread_hash_compare::thread_key, hash_table_index_type, internal::thread_hash_compare, hash_element_allocator > thread_to_index_type;
00539 
00540         typename my_tls_manager::tls_key_t my_key;
00541 
00542         void reset_key() {
00543             my_tls_manager::destroy_key(my_key);
00544             my_tls_manager::create_key(my_key); 
00545         }
00546 
00547         internal::callback_base<T> *my_finit_callback;
00548 
00549         // need to use a pointed-to exemplar because T may not be assignable.
00550         // using tbb_allocator instead of padded_element_allocator because we may be
00551         // copying an exemplar from one instantiation of ETS to another with a different
00552         // allocator.
00553         typedef typename tbb::tbb_allocator<padded_element > exemplar_allocator_type;
00554         static padded_element * create_exemplar(const T& my_value) {
00555             padded_element *new_exemplar = reinterpret_cast<padded_element *>(exemplar_allocator_type().allocate(1));
00556             new(new_exemplar->value) T(my_value);
00557             return new_exemplar;
00558         }
00559 
00560         static padded_element *create_exemplar( ) {
00561             padded_element *new_exemplar = reinterpret_cast<padded_element *>(exemplar_allocator_type().allocate(1));
00562             new(new_exemplar->value) T( );
00563             return new_exemplar;
00564         }
00565 
00566         static void free_exemplar(padded_element *my_ptr) {
00567             my_ptr->unconstruct();
00568             exemplar_allocator_type().destroy(my_ptr);
00569             exemplar_allocator_type().deallocate(my_ptr,1);
00570         }
00571 
00572         padded_element* my_exemplar_ptr;
00573 
00574         internal_collection_type my_locals;
00575         thread_to_index_type my_hash_tbl;
00576     
00577     public:
00578     
00580         typedef Allocator allocator_type;
00581         typedef T value_type;
00582         typedef T& reference;
00583         typedef const T& const_reference;
00584         typedef T* pointer;
00585         typedef const T* const_pointer;
00586         typedef typename internal_collection_type::size_type size_type;
00587         typedef typename internal_collection_type::difference_type difference_type;
00588     
00589         // Iterator types
00590         typedef typename internal::enumerable_thread_specific_iterator< internal_collection_type, value_type > iterator;
00591         typedef typename internal::enumerable_thread_specific_iterator< internal_collection_type, const value_type > const_iterator;
00592 
00593         // Parallel range types
00594         typedef generic_range_type< iterator > range_type;
00595         typedef generic_range_type< const_iterator > const_range_type;
00596     
00598         enumerable_thread_specific() : my_finit_callback(0) { 
00599             my_exemplar_ptr = 0;
00600             my_tls_manager::create_key(my_key); 
00601         }
00602 
00604         // Finit should be a function taking 0 parameters and returning a T
00605         template <typename Finit>
00606         enumerable_thread_specific( Finit _finit )
00607         {
00608             my_finit_callback = internal::callback_leaf<T,Finit>::new_callback( _finit );
00609             my_exemplar_ptr = 0; // don't need exemplar if function is provided
00610             my_tls_manager::create_key(my_key);
00611         }
00612     
00614         enumerable_thread_specific(const T &_exemplar) : my_finit_callback(0) {
00615             my_exemplar_ptr = create_exemplar(_exemplar);
00616             my_tls_manager::create_key(my_key); 
00617         }
00618     
00620         ~enumerable_thread_specific() { 
00621             unconstruct_locals();
00622             my_tls_manager::destroy_key(my_key); 
00623             if(my_finit_callback) {
00624                 my_finit_callback->destroy();
00625             }
00626             if(my_exemplar_ptr)
00627             {
00628                 free_exemplar(my_exemplar_ptr);
00629             }
00630         }
00631       
00633         reference local() {
00634             bool exists;
00635             return local(exists);
00636         }
00637 
00639         reference local(bool& exists)  {
00640             if ( pointer local_ptr = static_cast<pointer>(my_tls_manager::get_tls(my_key)) ) {
00641                 exists = true;
00642                return *local_ptr;
00643             }
00644             hash_table_index_type local_index;
00645             typename internal::thread_hash_compare::thread_key my_t_key = internal::thread_hash_compare::my_thread_key(tbb::this_tbb_thread::get_id());
00646             {
00647                 typename thread_to_index_type::const_pointer my_existing_entry;
00648                 my_existing_entry = my_hash_tbl.find(my_t_key);
00649                 if(my_existing_entry) {
00650                     exists = true;
00651                     local_index = my_existing_entry->second;
00652                 }
00653                 else {
00654 
00655                     // see if the table entry can be found by accessor
00656                     typename thread_to_index_type::accessor a;
00657                     if(!my_hash_tbl.insert(a, my_t_key)) {
00658                         exists = true;
00659                         local_index = a->second;
00660                     }
00661                     else {
00662                         // create new entry
00663                         exists = false;
00664 #if TBB_DEPRECATED
00665                         local_index = my_locals.push_back(padded_element());
00666 #else
00667                         local_index = my_locals.push_back(padded_element()) - my_locals.begin();
00668 #endif
00669                         pointer lref =  reinterpret_cast<T*>((my_locals[local_index].value));
00670                         if(my_finit_callback) {
00671                             new(lref) T(my_finit_callback->apply());
00672                         }
00673                         else if(my_exemplar_ptr) {
00674                             pointer t_exemp = reinterpret_cast<T *>(&(my_exemplar_ptr->value));
00675                             new(lref) T(*t_exemp);
00676                         }
00677                         else {
00678                             new(lref) T();
00679                         }
00680                         // insert into hash table
00681                         a->second = local_index;
00682                     }
00683                 }
00684             }
00685 
00686             pointer local_ref = reinterpret_cast<T*>((my_locals[local_index].value));
00687             my_tls_manager::set_tls( my_key, static_cast<void *>(local_ref) );
00688             return *local_ref;
00689         } // local
00690 
00692         size_type size() const { return my_locals.size(); }
00693     
00695         bool empty() const { return my_locals.empty(); }
00696     
00698         iterator begin() { return iterator( my_locals, 0 ); }
00700         iterator end() { return iterator(my_locals, my_locals.size() ); }
00701     
00703         const_iterator begin() const { return const_iterator(my_locals, 0); }
00704     
00706         const_iterator end() const { return const_iterator(my_locals, my_locals.size()); }
00707 
00709         range_type range( size_t grainsize=1 ) { return range_type( begin(), end(), grainsize ); } 
00710         
00712         const_range_type range( size_t grainsize=1 ) const { return const_range_type( begin(), end(), grainsize ); }
00713 
00714         void unconstruct_locals() {
00715             for(typename internal_collection_type::iterator cvi = my_locals.begin(); cvi != my_locals.end(); ++cvi) {
00716                 cvi->unconstruct();
00717             }
00718         }
00719     
00721         void clear() {
00722             unconstruct_locals();
00723             my_locals.clear();
00724             my_hash_tbl.clear();
00725             reset_key();
00726             // callback is not destroyed
00727             // exemplar is not destroyed
00728         }
00729 
00730         // STL container methods
00731         // copy constructor
00732 
00733     private:
00734 
00735         template<typename U, typename A2, ets_key_usage_type C2>
00736         void
00737         internal_copy_construct( const enumerable_thread_specific<U, A2, C2>& other) {
00738             typedef typename tbb::enumerable_thread_specific<U, A2, C2> other_type;
00739             for(typename other_type::const_iterator ci = other.begin(); ci != other.end(); ++ci) {
00740                 hash_table_index_type local_index;
00741 #if TBB_DEPRECATED
00742                 local_index = my_locals.push_back(padded_element());
00743 #else
00744                 local_index = my_locals.push_back(padded_element()) - my_locals.begin();
00745 #endif
00746                 (void) new(my_locals[local_index].value) T(*ci);
00747             }
00748             if(other.my_finit_callback) {
00749                 my_finit_callback = other.my_finit_callback->make_copy();
00750             }
00751             else {
00752                 my_finit_callback = 0;
00753             }
00754             if(other.my_exemplar_ptr) {
00755                 pointer local_ref = reinterpret_cast<T*>(other.my_exemplar_ptr->value);
00756                 my_exemplar_ptr = create_exemplar(*local_ref);
00757             }
00758             else {
00759                 my_exemplar_ptr = 0;
00760             }
00761             my_tls_manager::create_key(my_key);
00762         }
00763 
00764     public:
00765 
00766         template<typename U, typename Alloc, ets_key_usage_type Cachetype>
00767         enumerable_thread_specific( const enumerable_thread_specific<U, Alloc, Cachetype>& other ) : my_hash_tbl(other.my_hash_tbl) 
00768         {
00769             internal_copy_construct(other);
00770         }
00771 
00772         enumerable_thread_specific( const enumerable_thread_specific& other ) : my_hash_tbl(other.my_hash_tbl) 
00773         {
00774             internal_copy_construct(other);
00775         }
00776 
00777     private:
00778 
00779         template<typename U, typename A2, ets_key_usage_type C2>
00780         enumerable_thread_specific &
00781         internal_assign(const enumerable_thread_specific<U, A2, C2>& other) {
00782             typedef typename tbb::enumerable_thread_specific<U, A2, C2> other_type;
00783             if(static_cast<void *>( this ) != static_cast<const void *>( &other )) {
00784                 this->clear(); // resets TLS key
00785                 my_hash_tbl = other.my_hash_tbl;
00786                 for(typename other_type::const_iterator ci = other.begin(); ci != other.end(); ++ci) {
00787                     hash_table_index_type local_index;
00788 #if TBB_DEPRECATED
00789                         local_index = my_locals.push_back(padded_element());
00790 #else
00791                         local_index = my_locals.push_back(padded_element()) - my_locals.begin();
00792 #endif
00793                     (void) new(my_locals[local_index].value) T(*ci);
00794                 }
00795 
00796                 if(my_finit_callback) {
00797                     my_finit_callback->destroy();
00798                     my_finit_callback = 0;
00799                 }
00800                 if(my_exemplar_ptr) {
00801                     free_exemplar(my_exemplar_ptr);
00802                     my_exemplar_ptr = 0;
00803                 }
00804                 if(other.my_finit_callback) {
00805                     my_finit_callback = other.my_finit_callback->make_copy();
00806                 }
00807 
00808                 if(other.my_exemplar_ptr) {
00809                     pointer local_ref = reinterpret_cast<T*>(other.my_exemplar_ptr->value);
00810                     my_exemplar_ptr = create_exemplar(*local_ref);
00811                 }
00812             }
00813             return *this;
00814         }
00815 
00816     public:
00817 
00818         // assignment
00819         enumerable_thread_specific& operator=(const enumerable_thread_specific& other) {
00820             return internal_assign(other);
00821         }
00822 
00823         template<typename U, typename Alloc, ets_key_usage_type Cachetype>
00824         enumerable_thread_specific& operator=(const enumerable_thread_specific<U, Alloc, Cachetype>& other)
00825         {
00826             return internal_assign(other);
00827         }
00828 
00829         // combine_func_t has signature T(T,T) or T(const T&, const T&)
00830         template <typename combine_func_t>
00831         T combine(combine_func_t f_combine) {
00832             if(begin() == end()) {
00833                 if(my_finit_callback) {
00834                     return my_finit_callback->apply();
00835                 }
00836                 pointer local_ref = reinterpret_cast<T*>((my_exemplar_ptr->value));
00837                 return T(*local_ref);
00838             }
00839             const_iterator ci = begin();
00840             T my_result = *ci;
00841             while(++ci != end()) 
00842                 my_result = f_combine( my_result, *ci );
00843             return my_result;
00844         }
00845 
00846         // combine_func_t has signature void(T) or void(const T&)
00847         template <typename combine_func_t>
00848         void combine_each(combine_func_t f_combine) {
00849             for(const_iterator ci = begin(); ci != end(); ++ci) {
00850                 f_combine( *ci );
00851             }
00852         }
00853 
00854     }; // enumerable_thread_specific
00855 
00856     template< typename Container >
00857     class flattened2d {
00858 
00859         // This intermediate typedef is to address issues with VC7.1 compilers
00860         typedef typename Container::value_type conval_type;
00861 
00862     public:
00863 
00865         typedef typename conval_type::size_type size_type;
00866         typedef typename conval_type::difference_type difference_type;
00867         typedef typename conval_type::allocator_type allocator_type;
00868         typedef typename conval_type::value_type value_type;
00869         typedef typename conval_type::reference reference;
00870         typedef typename conval_type::const_reference const_reference;
00871         typedef typename conval_type::pointer pointer;
00872         typedef typename conval_type::const_pointer const_pointer;
00873 
00874         typedef typename internal::segmented_iterator<Container, value_type> iterator;
00875         typedef typename internal::segmented_iterator<Container, const value_type> const_iterator;
00876 
00877         flattened2d( const Container &c, typename Container::const_iterator b, typename Container::const_iterator e ) : 
00878             my_container(const_cast<Container*>(&c)), my_begin(b), my_end(e) { }
00879 
00880         flattened2d( const Container &c ) : 
00881             my_container(const_cast<Container*>(&c)), my_begin(c.begin()), my_end(c.end()) { }
00882 
00883         iterator begin() { return iterator(*my_container) = my_begin; }
00884         iterator end() { return iterator(*my_container) = my_end; }
00885         const_iterator begin() const { return const_iterator(*my_container) = my_begin; }
00886         const_iterator end() const { return const_iterator(*my_container) = my_end; }
00887 
00888         size_type size() const {
00889             size_type tot_size = 0;
00890             for(typename Container::const_iterator i = my_begin; i != my_end; ++i) {
00891                 tot_size += i->size();
00892             }
00893             return tot_size;
00894         }
00895 
00896     private:
00897 
00898         Container *my_container;
00899         typename Container::const_iterator my_begin;
00900         typename Container::const_iterator my_end;
00901 
00902     };
00903 
00904     template <typename Container>
00905     flattened2d<Container> flatten2d(const Container &c, const typename Container::const_iterator b, const typename Container::const_iterator e) {
00906         return flattened2d<Container>(c, b, e);
00907     }
00908 
00909     template <typename Container>
00910     flattened2d<Container> flatten2d(const Container &c) {
00911         return flattened2d<Container>(c);
00912     }
00913 
00914 } // namespace tbb
00915 
00916 #endif

Copyright © 2005-2010 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.