00001
00002
00003
00004
00005
00006
00007
00008
00009
00010
00011
00012
00013
00014
00015
00016
00017
00018
00019
00020
00021 #ifndef __TBB_concurrent_hash_map_H
00022 #define __TBB_concurrent_hash_map_H
00023
00024 #include "tbb_stddef.h"
00025
00026 #if !TBB_USE_EXCEPTIONS && _MSC_VER
00027
00028 #pragma warning (push)
00029 #pragma warning (disable: 4530)
00030 #endif
00031
00032 #include <iterator>
00033 #include <utility>
00034 #include <cstring>
00035
00036 #if !TBB_USE_EXCEPTIONS && _MSC_VER
00037 #pragma warning (pop)
00038 #endif
00039
00040 #include "cache_aligned_allocator.h"
00041 #include "tbb_allocator.h"
00042 #include "spin_rw_mutex.h"
00043 #include "atomic.h"
00044 #include "aligned_space.h"
00045 #include "tbb_exception.h"
00046 #include "_concurrent_unordered_internal.h"
00047 #if TBB_USE_PERFORMANCE_WARNINGS
00048 #include <typeinfo>
00049 #endif
00050
00051 namespace tbb {
00052
00054 namespace internal {
00056 void* __TBB_EXPORTED_FUNC itt_load_pointer_with_acquire_v3( const void* src );
00058 void __TBB_EXPORTED_FUNC itt_store_pointer_with_release_v3( void* dst, void* src );
00060 void* __TBB_EXPORTED_FUNC itt_load_pointer_v3( const void* src );
00061 }
00063
00065 template<typename Key>
00066 struct tbb_hash_compare {
00067 static size_t hash( const Key& a ) { return tbb_hasher(a); }
00068 static bool equal( const Key& a, const Key& b ) { return a == b; }
00069 };
00070
00071 namespace interface4 {
00072
00073 template<typename Key, typename T, typename HashCompare = tbb_hash_compare<Key>, typename A = tbb_allocator<std::pair<Key, T> > >
00074 class concurrent_hash_map;
00075
00077 namespace internal {
00078
00079
00081 typedef size_t hashcode_t;
00083 struct hash_map_node_base : tbb::internal::no_copy {
00085 typedef spin_rw_mutex mutex_t;
00087 typedef mutex_t::scoped_lock scoped_t;
00089 hash_map_node_base *next;
00090 mutex_t mutex;
00091 };
00093 static hash_map_node_base *const rehash_req = reinterpret_cast<hash_map_node_base*>(size_t(3));
00095 static hash_map_node_base *const empty_rehashed = reinterpret_cast<hash_map_node_base*>(size_t(0));
00097 class hash_map_base {
00098 public:
00100 typedef size_t size_type;
00102 typedef size_t hashcode_t;
00104 typedef size_t segment_index_t;
00106 typedef hash_map_node_base node_base;
00108 struct bucket : tbb::internal::no_copy {
00110 typedef spin_rw_mutex mutex_t;
00112 typedef mutex_t::scoped_lock scoped_t;
00113 mutex_t mutex;
00114 node_base *node_list;
00115 };
00117 static size_type const embedded_block = 1;
00119 static size_type const embedded_buckets = 1<<embedded_block;
00121 static size_type const first_block = 8;
00123 static size_type const pointers_per_table = sizeof(segment_index_t) * 8;
00125 typedef bucket *segment_ptr_t;
00127 typedef segment_ptr_t segments_table_t[pointers_per_table];
00129 atomic<hashcode_t> my_mask;
00131 segments_table_t my_table;
00133 atomic<size_type> my_size;
00135 bucket my_embedded_segment[embedded_buckets];
00136
00138 hash_map_base() {
00139 std::memset( this, 0, pointers_per_table*sizeof(segment_ptr_t)
00140 + sizeof(my_size) + sizeof(my_mask)
00141 + embedded_buckets*sizeof(bucket) );
00142 for( size_type i = 0; i < embedded_block; i++ )
00143 my_table[i] = my_embedded_segment + segment_base(i);
00144 my_mask = embedded_buckets - 1;
00145 __TBB_ASSERT( embedded_block <= first_block, "The first block number must include embedded blocks");
00146 }
00147
00149 static segment_index_t segment_index_of( size_type index ) {
00150 return segment_index_t( __TBB_Log2( index|1 ) );
00151 }
00152
00154 static segment_index_t segment_base( segment_index_t k ) {
00155 return (segment_index_t(1)<<k & ~segment_index_t(1));
00156 }
00157
00159 static size_type segment_size( segment_index_t k ) {
00160 return size_type(1)<<k;
00161 }
00162
00164 static bool is_valid( void *ptr ) {
00165 return reinterpret_cast<size_t>(ptr) > size_t(63);
00166 }
00167
00169 static void init_buckets( segment_ptr_t ptr, size_type sz, bool is_initial ) {
00170 if( is_initial ) std::memset(ptr, 0, sz*sizeof(bucket) );
00171 else for(size_type i = 0; i < sz; i++, ptr++) {
00172 *reinterpret_cast<intptr_t*>(&ptr->mutex) = 0;
00173 ptr->node_list = rehash_req;
00174 }
00175 }
00176
00178 static void add_to_bucket( bucket *b, node_base *n ) {
00179 __TBB_ASSERT(b->node_list != rehash_req, NULL);
00180 n->next = b->node_list;
00181 b->node_list = n;
00182 }
00183
00185 struct enable_segment_failsafe {
00186 segment_ptr_t *my_segment_ptr;
00187 enable_segment_failsafe(segments_table_t &table, segment_index_t k) : my_segment_ptr(&table[k]) {}
00188 ~enable_segment_failsafe() {
00189 if( my_segment_ptr ) *my_segment_ptr = 0;
00190 }
00191 };
00192
00194 void enable_segment( segment_index_t k, bool is_initial = false ) {
00195 __TBB_ASSERT( k, "Zero segment must be embedded" );
00196 enable_segment_failsafe watchdog( my_table, k );
00197 cache_aligned_allocator<bucket> alloc;
00198 size_type sz;
00199 __TBB_ASSERT( !is_valid(my_table[k]), "Wrong concurrent assignment");
00200 if( k >= first_block ) {
00201 sz = segment_size( k );
00202 segment_ptr_t ptr = alloc.allocate( sz );
00203 init_buckets( ptr, sz, is_initial );
00204 #if TBB_USE_THREADING_TOOLS
00205
00206 itt_store_pointer_with_release_v3( my_table + k, ptr );
00207 #else
00208 my_table[k] = ptr;
00209 #endif
00210 sz <<= 1;
00211 } else {
00212 __TBB_ASSERT( k == embedded_block, "Wrong segment index" );
00213 sz = segment_size( first_block );
00214 segment_ptr_t ptr = alloc.allocate( sz - embedded_buckets );
00215 init_buckets( ptr, sz - embedded_buckets, is_initial );
00216 ptr -= segment_base(embedded_block);
00217 for(segment_index_t i = embedded_block; i < first_block; i++)
00218 #if TBB_USE_THREADING_TOOLS
00219 itt_store_pointer_with_release_v3( my_table + i, ptr + segment_base(i) );
00220 #else
00221 my_table[i] = ptr + segment_base(i);
00222 #endif
00223 }
00224 #if TBB_USE_THREADING_TOOLS
00225 itt_store_pointer_with_release_v3( &my_mask, (void*)(sz-1) );
00226 #else
00227 my_mask = sz - 1;
00228 #endif
00229 watchdog.my_segment_ptr = 0;
00230 }
00231
00233 bucket *get_bucket( hashcode_t h ) const throw() {
00234 segment_index_t s = segment_index_of( h );
00235 h -= segment_base(s);
00236 segment_ptr_t seg = my_table[s];
00237 __TBB_ASSERT( is_valid(seg), "hashcode must be cut by valid mask for allocated segments" );
00238 return &seg[h];
00239 }
00240
00241
00242 void mark_rehashed_levels( hashcode_t h ) throw () {
00243 segment_index_t s = segment_index_of( h );
00244 while( segment_ptr_t seg = my_table[++s] )
00245 if( seg[h].node_list == rehash_req ) {
00246 seg[h].node_list = empty_rehashed;
00247 mark_rehashed_levels( h + segment_base(s) );
00248 }
00249 }
00250
00252
00253 inline bool check_mask_race( const hashcode_t h, hashcode_t &m ) const {
00254 hashcode_t m_now, m_old = m;
00255 #if TBB_USE_THREADING_TOOLS
00256 m_now = (hashcode_t) itt_load_pointer_with_acquire_v3( &my_mask );
00257 #else
00258 m_now = my_mask;
00259 #endif
00260 if( m_old != m_now )
00261 return check_rehashing_collision( h, m_old, m = m_now );
00262 return false;
00263 }
00264
00266 bool check_rehashing_collision( const hashcode_t h, hashcode_t m_old, hashcode_t m ) const {
00267 __TBB_ASSERT(m_old != m, NULL);
00268 if( (h & m_old) != (h & m) ) {
00269
00270
00271 for( ++m_old; !(h & m_old); m_old <<= 1 );
00272 m_old = (m_old<<1) - 1;
00273 __TBB_ASSERT((m_old&(m_old+1))==0 && m_old <= m, NULL);
00274
00275 #if TBB_USE_THREADING_TOOLS
00276 if( itt_load_pointer_with_acquire_v3(&( get_bucket(h & m_old)->node_list )) != rehash_req )
00277 #else
00278 if( __TBB_load_with_acquire(get_bucket( h & m_old )->node_list) != rehash_req )
00279 #endif
00280 return true;
00281 }
00282 return false;
00283 }
00284
00286 segment_index_t insert_new_node( bucket *b, node_base *n, hashcode_t mask ) {
00287 size_type sz = ++my_size;
00288 add_to_bucket( b, n );
00289
00290 if( sz >= mask ) {
00291 segment_index_t new_seg = segment_index_of( mask+1 );
00292 __TBB_ASSERT( is_valid(my_table[new_seg-1]), "new allocations must not publish new mask until segment has allocated");
00293 #if TBB_USE_THREADING_TOOLS
00294 if( !itt_load_pointer_v3(my_table+new_seg)
00295 #else
00296 if( !my_table[new_seg]
00297 #endif
00298 && __TBB_CompareAndSwapW(&my_table[new_seg], 2, 0) == 0 )
00299 return new_seg;
00300 }
00301 return 0;
00302 }
00303
00305 void reserve(size_type buckets) {
00306 if( !buckets-- ) return;
00307 bool is_initial = !my_size;
00308 for( size_type m = my_mask; buckets > m; m = my_mask )
00309 enable_segment( segment_index_of( m+1 ), is_initial );
00310 }
00312 void internal_swap(hash_map_base &table) {
00313 std::swap(this->my_mask, table.my_mask);
00314 std::swap(this->my_size, table.my_size);
00315 for(size_type i = 0; i < embedded_buckets; i++)
00316 std::swap(this->my_embedded_segment[i].node_list, table.my_embedded_segment[i].node_list);
00317 for(size_type i = embedded_block; i < pointers_per_table; i++)
00318 std::swap(this->my_table[i], table.my_table[i]);
00319 }
00320 };
00321
00322 template<typename Iterator>
00323 class hash_map_range;
00324
00326
00328 template<typename Container, typename Value>
00329 class hash_map_iterator
00330 : public std::iterator<std::forward_iterator_tag,Value>
00331 {
00332 typedef Container map_type;
00333 typedef typename Container::node node;
00334 typedef hash_map_base::node_base node_base;
00335 typedef hash_map_base::bucket bucket;
00336
00337 template<typename C, typename T, typename U>
00338 friend bool operator==( const hash_map_iterator<C,T>& i, const hash_map_iterator<C,U>& j );
00339
00340 template<typename C, typename T, typename U>
00341 friend bool operator!=( const hash_map_iterator<C,T>& i, const hash_map_iterator<C,U>& j );
00342
00343 template<typename C, typename T, typename U>
00344 friend ptrdiff_t operator-( const hash_map_iterator<C,T>& i, const hash_map_iterator<C,U>& j );
00345
00346 template<typename C, typename U>
00347 friend class hash_map_iterator;
00348
00349 template<typename I>
00350 friend class hash_map_range;
00351
00352 void advance_to_next_bucket() {
00353 size_t k = my_index+1;
00354 while( my_bucket && k <= my_map->my_mask ) {
00355
00356 if( k& (k-2) )
00357 ++my_bucket;
00358 else my_bucket = my_map->get_bucket( k );
00359 my_node = static_cast<node*>( my_bucket->node_list );
00360 if( hash_map_base::is_valid(my_node) ) {
00361 my_index = k; return;
00362 }
00363 ++k;
00364 }
00365 my_bucket = 0; my_node = 0; my_index = k;
00366 }
00367 #if !defined(_MSC_VER) || defined(__INTEL_COMPILER)
00368 template<typename Key, typename T, typename HashCompare, typename A>
00369 friend class interface4::concurrent_hash_map;
00370 #else
00371 public:
00372 #endif
00374 const Container *my_map;
00375
00377 size_t my_index;
00378
00380 const bucket *my_bucket;
00381
00383 node *my_node;
00384
00385 hash_map_iterator( const Container &map, size_t index, const bucket *b, node_base *n );
00386
00387 public:
00389 hash_map_iterator() {}
00390 hash_map_iterator( const hash_map_iterator<Container,typename Container::value_type> &other ) :
00391 my_map(other.my_map),
00392 my_index(other.my_index),
00393 my_bucket(other.my_bucket),
00394 my_node(other.my_node)
00395 {}
00396 Value& operator*() const {
00397 __TBB_ASSERT( hash_map_base::is_valid(my_node), "iterator uninitialized or at end of container?" );
00398 return my_node->item;
00399 }
00400 Value* operator->() const {return &operator*();}
00401 hash_map_iterator& operator++();
00402
00404 Value* operator++(int) {
00405 Value* result = &operator*();
00406 operator++();
00407 return result;
00408 }
00409 };
00410
00411 template<typename Container, typename Value>
00412 hash_map_iterator<Container,Value>::hash_map_iterator( const Container &map, size_t index, const bucket *b, node_base *n ) :
00413 my_map(&map),
00414 my_index(index),
00415 my_bucket(b),
00416 my_node( static_cast<node*>(n) )
00417 {
00418 if( b && !hash_map_base::is_valid(n) )
00419 advance_to_next_bucket();
00420 }
00421
00422 template<typename Container, typename Value>
00423 hash_map_iterator<Container,Value>& hash_map_iterator<Container,Value>::operator++() {
00424 my_node = static_cast<node*>( my_node->next );
00425 if( !my_node ) advance_to_next_bucket();
00426 return *this;
00427 }
00428
00429 template<typename Container, typename T, typename U>
00430 bool operator==( const hash_map_iterator<Container,T>& i, const hash_map_iterator<Container,U>& j ) {
00431 return i.my_node == j.my_node && i.my_map == j.my_map;
00432 }
00433
00434 template<typename Container, typename T, typename U>
00435 bool operator!=( const hash_map_iterator<Container,T>& i, const hash_map_iterator<Container,U>& j ) {
00436 return i.my_node != j.my_node || i.my_map != j.my_map;
00437 }
00438
00440
00441 template<typename Iterator>
00442 class hash_map_range {
00443 typedef typename Iterator::map_type map_type;
00444 Iterator my_begin;
00445 Iterator my_end;
00446 mutable Iterator my_midpoint;
00447 size_t my_grainsize;
00449 void set_midpoint() const;
00450 template<typename U> friend class hash_map_range;
00451 public:
00453 typedef std::size_t size_type;
00454 typedef typename Iterator::value_type value_type;
00455 typedef typename Iterator::reference reference;
00456 typedef typename Iterator::difference_type difference_type;
00457 typedef Iterator iterator;
00458
00460 bool empty() const {return my_begin==my_end;}
00461
00463 bool is_divisible() const {
00464 return my_midpoint!=my_end;
00465 }
00467 hash_map_range( hash_map_range& r, split ) :
00468 my_end(r.my_end),
00469 my_grainsize(r.my_grainsize)
00470 {
00471 r.my_end = my_begin = r.my_midpoint;
00472 __TBB_ASSERT( !empty(), "Splitting despite the range is not divisible" );
00473 __TBB_ASSERT( !r.empty(), "Splitting despite the range is not divisible" );
00474 set_midpoint();
00475 r.set_midpoint();
00476 }
00478 template<typename U>
00479 hash_map_range( hash_map_range<U>& r) :
00480 my_begin(r.my_begin),
00481 my_end(r.my_end),
00482 my_midpoint(r.my_midpoint),
00483 my_grainsize(r.my_grainsize)
00484 {}
00485 #if TBB_DEPRECATED
00487 hash_map_range( const Iterator& begin_, const Iterator& end_, size_type grainsize_ = 1 ) :
00488 my_begin(begin_),
00489 my_end(end_),
00490 my_grainsize(grainsize_)
00491 {
00492 if(!my_end.my_index && !my_end.my_bucket)
00493 my_end.my_index = my_end.my_map->my_mask + 1;
00494 set_midpoint();
00495 __TBB_ASSERT( grainsize_>0, "grainsize must be positive" );
00496 }
00497 #endif
00499 hash_map_range( const map_type &map, size_type grainsize_ = 1 ) :
00500 my_begin( Iterator( map, 0, map.my_embedded_segment, map.my_embedded_segment->node_list ) ),
00501 my_end( Iterator( map, map.my_mask + 1, 0, 0 ) ),
00502 my_grainsize( grainsize_ )
00503 {
00504 __TBB_ASSERT( grainsize_>0, "grainsize must be positive" );
00505 set_midpoint();
00506 }
00507 const Iterator& begin() const {return my_begin;}
00508 const Iterator& end() const {return my_end;}
00510 size_type grainsize() const {return my_grainsize;}
00511 };
00512
00513 template<typename Iterator>
00514 void hash_map_range<Iterator>::set_midpoint() const {
00515
00516 size_t m = my_end.my_index-my_begin.my_index;
00517 if( m > my_grainsize ) {
00518 m = my_begin.my_index + m/2u;
00519 hash_map_base::bucket *b = my_begin.my_map->get_bucket(m);
00520 my_midpoint = Iterator(*my_begin.my_map,m,b,b->node_list);
00521 } else {
00522 my_midpoint = my_end;
00523 }
00524 __TBB_ASSERT( my_begin.my_index <= my_midpoint.my_index,
00525 "my_begin is after my_midpoint" );
00526 __TBB_ASSERT( my_midpoint.my_index <= my_end.my_index,
00527 "my_midpoint is after my_end" );
00528 __TBB_ASSERT( my_begin != my_midpoint || my_begin == my_end,
00529 "[my_begin, my_midpoint) range should not be empty" );
00530 }
00531
00532 }
00534
00536
00565 template<typename Key, typename T, typename HashCompare, typename Allocator>
00566 class concurrent_hash_map : protected internal::hash_map_base {
00567 template<typename Container, typename Value>
00568 friend class internal::hash_map_iterator;
00569
00570 template<typename I>
00571 friend class internal::hash_map_range;
00572
00573 public:
00574 typedef Key key_type;
00575 typedef T mapped_type;
00576 typedef std::pair<const Key,T> value_type;
00577 typedef hash_map_base::size_type size_type;
00578 typedef ptrdiff_t difference_type;
00579 typedef value_type *pointer;
00580 typedef const value_type *const_pointer;
00581 typedef value_type &reference;
00582 typedef const value_type &const_reference;
00583 typedef internal::hash_map_iterator<concurrent_hash_map,value_type> iterator;
00584 typedef internal::hash_map_iterator<concurrent_hash_map,const value_type> const_iterator;
00585 typedef internal::hash_map_range<iterator> range_type;
00586 typedef internal::hash_map_range<const_iterator> const_range_type;
00587 typedef Allocator allocator_type;
00588
00589 protected:
00590 friend class const_accessor;
00591 struct node;
00592 typedef typename Allocator::template rebind<node>::other node_allocator_type;
00593 node_allocator_type my_allocator;
00594 HashCompare my_hash_compare;
00595
00596 struct node : public node_base {
00597 value_type item;
00598 node( const Key &key ) : item(key, T()) {}
00599 node( const Key &key, const T &t ) : item(key, t) {}
00600
00601 void *operator new( size_t , node_allocator_type &a ) {
00602 void *ptr = a.allocate(1);
00603 if(!ptr)
00604 tbb::internal::throw_exception(tbb::internal::eid_bad_alloc);
00605 return ptr;
00606 }
00607
00608 void operator delete( void *ptr, node_allocator_type &a ) {return a.deallocate(static_cast<node*>(ptr),1); }
00609 };
00610
00611 void delete_node( node_base *n ) {
00612 my_allocator.destroy( static_cast<node*>(n) );
00613 my_allocator.deallocate( static_cast<node*>(n), 1);
00614 }
00615
00616 node *search_bucket( const key_type &key, bucket *b ) const {
00617 node *n = static_cast<node*>( b->node_list );
00618 while( is_valid(n) && !my_hash_compare.equal(key, n->item.first) )
00619 n = static_cast<node*>( n->next );
00620 __TBB_ASSERT(n != internal::rehash_req, "Search can be executed only for rehashed bucket");
00621 return n;
00622 }
00623
00625 class bucket_accessor : public bucket::scoped_t {
00626 bool my_is_writer;
00627 bucket *my_b;
00628 public:
00629 bucket_accessor( concurrent_hash_map *base, const hashcode_t h, bool writer = false ) { acquire( base, h, writer ); }
00631 inline void acquire( concurrent_hash_map *base, const hashcode_t h, bool writer = false ) {
00632 my_b = base->get_bucket( h );
00633 #if TBB_USE_THREADING_TOOLS
00634
00635 if( itt_load_pointer_with_acquire_v3(&my_b->node_list) == internal::rehash_req
00636 #else
00637 if( __TBB_load_with_acquire(my_b->node_list) == internal::rehash_req
00638 #endif
00639 && try_acquire( my_b->mutex, true ) )
00640 {
00641 if( my_b->node_list == internal::rehash_req ) base->rehash_bucket( my_b, h );
00642 my_is_writer = true;
00643 }
00644 else bucket::scoped_t::acquire( my_b->mutex, my_is_writer = writer );
00645 __TBB_ASSERT( my_b->node_list != internal::rehash_req, NULL);
00646 }
00648 bool is_writer() { return my_is_writer; }
00650 bucket *operator() () { return my_b; }
00651
00652 bool upgrade_to_writer() { my_is_writer = true; return bucket::scoped_t::upgrade_to_writer(); }
00653 };
00654
00655
00656 void rehash_bucket( bucket *b_new, const hashcode_t h ) {
00657 __TBB_ASSERT( *(intptr_t*)(&b_new->mutex), "b_new must be locked (for write)");
00658 __TBB_ASSERT( h > 1, "The lowermost buckets can't be rehashed" );
00659 __TBB_store_with_release(b_new->node_list, internal::empty_rehashed);
00660 hashcode_t mask = ( 1u<<__TBB_Log2( h ) ) - 1;
00661
00662 bucket_accessor b_old( this, h & mask );
00663
00664 mask = (mask<<1) | 1;
00665 __TBB_ASSERT( (mask&(mask+1))==0 && (h & mask) == h, NULL );
00666 restart:
00667 for( node_base **p = &b_old()->node_list, *n = __TBB_load_with_acquire(*p); is_valid(n); n = *p ) {
00668 hashcode_t c = my_hash_compare.hash( static_cast<node*>(n)->item.first );
00669 #if TBB_USE_ASSERT
00670 hashcode_t bmask = h & (mask>>1);
00671 bmask = bmask==0? 1 : ( 1u<<(__TBB_Log2( bmask )+1 ) ) - 1;
00672 __TBB_ASSERT( (c & bmask) == (h & bmask), "hash() function changed for key in table" );
00673 #endif
00674 if( (c & mask) == h ) {
00675 if( !b_old.is_writer() )
00676 if( !b_old.upgrade_to_writer() ) {
00677 goto restart;
00678 }
00679 *p = n->next;
00680 add_to_bucket( b_new, n );
00681 } else p = &n->next;
00682 }
00683 }
00684
00685 public:
00686
00687 class accessor;
00689 class const_accessor {
00690 friend class concurrent_hash_map<Key,T,HashCompare,Allocator>;
00691 friend class accessor;
00692 void operator=( const accessor & ) const;
00693 const_accessor( const accessor & );
00694 public:
00696 typedef const typename concurrent_hash_map::value_type value_type;
00697
00699 bool empty() const {return !my_node;}
00700
00702 void release() {
00703 if( my_node ) {
00704 my_lock.release();
00705 my_node = 0;
00706 }
00707 }
00708
00710 const_reference operator*() const {
00711 __TBB_ASSERT( my_node, "attempt to dereference empty accessor" );
00712 return my_node->item;
00713 }
00714
00716 const_pointer operator->() const {
00717 return &operator*();
00718 }
00719
00721 const_accessor() : my_node(NULL) {}
00722
00724 ~const_accessor() {
00725 my_node = NULL;
00726 }
00727 private:
00728 node *my_node;
00729 typename node::scoped_t my_lock;
00730 hashcode_t my_hash;
00731 };
00732
00734 class accessor: public const_accessor {
00735 public:
00737 typedef typename concurrent_hash_map::value_type value_type;
00738
00740 reference operator*() const {
00741 __TBB_ASSERT( this->my_node, "attempt to dereference empty accessor" );
00742 return this->my_node->item;
00743 }
00744
00746 pointer operator->() const {
00747 return &operator*();
00748 }
00749 };
00750
00752 concurrent_hash_map(const allocator_type &a = allocator_type())
00753 : my_allocator(a)
00754 {}
00755
00757 concurrent_hash_map(size_type n, const allocator_type &a = allocator_type())
00758 : my_allocator(a)
00759 {
00760 reserve( n );
00761 }
00762
00764 concurrent_hash_map( const concurrent_hash_map& table, const allocator_type &a = allocator_type())
00765 : my_allocator(a)
00766 {
00767 internal_copy(table);
00768 }
00769
00771 template<typename I>
00772 concurrent_hash_map(I first, I last, const allocator_type &a = allocator_type())
00773 : my_allocator(a)
00774 {
00775 reserve( std::distance(first, last) );
00776 internal_copy(first, last);
00777 }
00778
00780 concurrent_hash_map& operator=( const concurrent_hash_map& table ) {
00781 if( this!=&table ) {
00782 clear();
00783 internal_copy(table);
00784 }
00785 return *this;
00786 }
00787
00788
00790
00792 void rehash(size_type n = 0);
00793
00795 void clear();
00796
00798 ~concurrent_hash_map() { clear(); }
00799
00800
00801
00802
00803 range_type range( size_type grainsize=1 ) {
00804 return range_type( *this, grainsize );
00805 }
00806 const_range_type range( size_type grainsize=1 ) const {
00807 return const_range_type( *this, grainsize );
00808 }
00809
00810
00811
00812
00813 iterator begin() {return iterator(*this,0,my_embedded_segment,my_embedded_segment->node_list);}
00814 iterator end() {return iterator(*this,0,0,0);}
00815 const_iterator begin() const {return const_iterator(*this,0,my_embedded_segment,my_embedded_segment->node_list);}
00816 const_iterator end() const {return const_iterator(*this,0,0,0);}
00817 std::pair<iterator, iterator> equal_range( const Key& key ) { return internal_equal_range(key, end()); }
00818 std::pair<const_iterator, const_iterator> equal_range( const Key& key ) const { return internal_equal_range(key, end()); }
00819
00821 size_type size() const { return my_size; }
00822
00824 bool empty() const { return my_size == 0; }
00825
00827 size_type max_size() const {return (~size_type(0))/sizeof(node);}
00828
00830 size_type bucket_count() const { return my_mask+1; }
00831
00833 allocator_type get_allocator() const { return this->my_allocator; }
00834
00836 void swap(concurrent_hash_map &table);
00837
00838
00839
00840
00841
00843 size_type count( const Key &key ) const {
00844 return const_cast<concurrent_hash_map*>(this)->lookup(false, key, NULL, NULL, false );
00845 }
00846
00848
00849 bool find( const_accessor &result, const Key &key ) const {
00850 result.release();
00851 return const_cast<concurrent_hash_map*>(this)->lookup(false, key, NULL, &result, false );
00852 }
00853
00855
00856 bool find( accessor &result, const Key &key ) {
00857 result.release();
00858 return lookup(false, key, NULL, &result, true );
00859 }
00860
00862
00863 bool insert( const_accessor &result, const Key &key ) {
00864 result.release();
00865 return lookup(true, key, NULL, &result, false );
00866 }
00867
00869
00870 bool insert( accessor &result, const Key &key ) {
00871 result.release();
00872 return lookup(true, key, NULL, &result, true );
00873 }
00874
00876
00877 bool insert( const_accessor &result, const value_type &value ) {
00878 result.release();
00879 return lookup(true, value.first, &value.second, &result, false );
00880 }
00881
00883
00884 bool insert( accessor &result, const value_type &value ) {
00885 result.release();
00886 return lookup(true, value.first, &value.second, &result, true );
00887 }
00888
00890
00891 bool insert( const value_type &value ) {
00892 return lookup(true, value.first, &value.second, NULL, false );
00893 }
00894
00896 template<typename I>
00897 void insert(I first, I last) {
00898 for(; first != last; ++first)
00899 insert( *first );
00900 }
00901
00903
00904 bool erase( const Key& key );
00905
00907
00908 bool erase( const_accessor& item_accessor ) {
00909 return exclude( item_accessor, true );
00910 }
00911
00913
00914 bool erase( accessor& item_accessor ) {
00915 return exclude( item_accessor, false );
00916 }
00917
00918 protected:
00920 bool lookup( bool op_insert, const Key &key, const T *t, const_accessor *result, bool write );
00921
00923 bool exclude( const_accessor &item_accessor, bool readonly );
00924
00926 template<typename I>
00927 std::pair<I, I> internal_equal_range( const Key& key, I end ) const;
00928
00930 void internal_copy( const concurrent_hash_map& source );
00931
00932 template<typename I>
00933 void internal_copy(I first, I last);
00934
00936
00938 const_pointer internal_fast_find( const Key& key ) const {
00939 hashcode_t h = my_hash_compare.hash( key );
00940 #if TBB_USE_THREADING_TOOLS
00941 hashcode_t m = (hashcode_t) itt_load_pointer_with_acquire_v3( &my_mask );
00942 #else
00943 hashcode_t m = my_mask;
00944 #endif
00945 node *n;
00946 restart:
00947 __TBB_ASSERT((m&(m+1))==0, NULL);
00948 bucket *b = get_bucket( h & m );
00949 #if TBB_USE_THREADING_TOOLS
00950
00951 if( itt_load_pointer_with_acquire_v3(&b->node_list) == internal::rehash_req )
00952 #else
00953 if( __TBB_load_with_acquire(b->node_list) == internal::rehash_req )
00954 #endif
00955 {
00956 bucket::scoped_t lock;
00957 if( lock.try_acquire( b->mutex, true ) ) {
00958 if( b->node_list == internal::rehash_req)
00959 const_cast<concurrent_hash_map*>(this)->rehash_bucket( b, h & m );
00960 }
00961 else lock.acquire( b->mutex, false );
00962 __TBB_ASSERT(b->node_list!=internal::rehash_req,NULL);
00963 }
00964 n = search_bucket( key, b );
00965 if( n )
00966 return &n->item;
00967 else if( check_mask_race( h, m ) )
00968 goto restart;
00969 return 0;
00970 }
00971 };
00972
00973 #if _MSC_VER && !defined(__INTEL_COMPILER)
00974
00975 #pragma warning( push )
00976 #pragma warning( disable: 4127 )
00977 #endif
00978
00979 template<typename Key, typename T, typename HashCompare, typename A>
00980 bool concurrent_hash_map<Key,T,HashCompare,A>::lookup( bool op_insert, const Key &key, const T *t, const_accessor *result, bool write ) {
00981 __TBB_ASSERT( !result || !result->my_node, NULL );
00982 segment_index_t grow_segment;
00983 bool return_value;
00984 node *n, *tmp_n = 0;
00985 hashcode_t const h = my_hash_compare.hash( key );
00986 #if TBB_USE_THREADING_TOOLS
00987 hashcode_t m = (hashcode_t) itt_load_pointer_with_acquire_v3( &my_mask );
00988 #else
00989 hashcode_t m = my_mask;
00990 #endif
00991 restart:
00992 {
00993 __TBB_ASSERT((m&(m+1))==0, NULL);
00994 return_value = false;
00995
00996 bucket_accessor b( this, h & m );
00997
00998
00999 n = search_bucket( key, b() );
01000 if( op_insert ) {
01001
01002 if( !n ) {
01003 if( !tmp_n ) {
01004 if(t) tmp_n = new( my_allocator ) node(key, *t);
01005 else tmp_n = new( my_allocator ) node(key);
01006 }
01007 if( !b.is_writer() && !b.upgrade_to_writer() ) {
01008
01009 n = search_bucket( key, b() );
01010 if( is_valid(n) ) {
01011 b.downgrade_to_reader();
01012 goto exists;
01013 }
01014 }
01015 if( check_mask_race(h, m) )
01016 goto restart;
01017
01018 grow_segment = insert_new_node( b(), n = tmp_n, m );
01019 tmp_n = 0;
01020 return_value = true;
01021 } else {
01022 exists: grow_segment = 0;
01023 }
01024 } else {
01025 if( !n ) {
01026 if( check_mask_race( h, m ) )
01027 goto restart;
01028 return false;
01029 }
01030 return_value = true;
01031 grow_segment = 0;
01032 }
01033 if( !result ) goto check_growth;
01034
01035
01036 if( !result->my_lock.try_acquire( n->mutex, write ) ) {
01037
01038 tbb::internal::atomic_backoff trials;
01039 do {
01040 if( !trials.bounded_pause() ) {
01041
01042 b.release();
01043 __TBB_ASSERT( !op_insert || !return_value, "Can't acquire new item in locked bucket?" );
01044 __TBB_Yield();
01045 #if TBB_USE_THREADING_TOOLS
01046 m = (hashcode_t) itt_load_pointer_with_acquire_v3( &my_mask );
01047 #else
01048 m = my_mask;
01049 #endif
01050 goto restart;
01051 }
01052 } while( !result->my_lock.try_acquire( n->mutex, write ) );
01053 }
01054 }
01055 result->my_node = n;
01056 result->my_hash = h;
01057 check_growth:
01058
01059 if( grow_segment )
01060 enable_segment( grow_segment );
01061 if( tmp_n )
01062 delete_node( tmp_n );
01063 return return_value;
01064 }
01065
01066 template<typename Key, typename T, typename HashCompare, typename A>
01067 template<typename I>
01068 std::pair<I, I> concurrent_hash_map<Key,T,HashCompare,A>::internal_equal_range( const Key& key, I end_ ) const {
01069 hashcode_t h = my_hash_compare.hash( key );
01070 hashcode_t m = my_mask;
01071 __TBB_ASSERT((m&(m+1))==0, NULL);
01072 h &= m;
01073 bucket *b = get_bucket( h );
01074 while( b->node_list == internal::rehash_req ) {
01075 m = ( 1u<<__TBB_Log2( h ) ) - 1;
01076 b = get_bucket( h &= m );
01077 }
01078 node *n = search_bucket( key, b );
01079 if( !n )
01080 return std::make_pair(end_, end_);
01081 iterator lower(*this, h, b, n), upper(lower);
01082 return std::make_pair(lower, ++upper);
01083 }
01084
01085 template<typename Key, typename T, typename HashCompare, typename A>
01086 bool concurrent_hash_map<Key,T,HashCompare,A>::exclude( const_accessor &item_accessor, bool readonly ) {
01087 __TBB_ASSERT( item_accessor.my_node, NULL );
01088 node_base *const n = item_accessor.my_node;
01089 item_accessor.my_node = NULL;
01090 hashcode_t const h = item_accessor.my_hash;
01091 #if TBB_USE_THREADING_TOOLS
01092 hashcode_t m = (hashcode_t) itt_load_pointer_with_acquire_v3( &my_mask );
01093 #else
01094 hashcode_t m = my_mask;
01095 #endif
01096 do {
01097
01098 bucket_accessor b( this, h & m, true );
01099 node_base **p = &b()->node_list;
01100 while( *p && *p != n )
01101 p = &(*p)->next;
01102 if( !*p ) {
01103 if( check_mask_race( h, m ) )
01104 continue;
01105 item_accessor.my_lock.release();
01106 return false;
01107 }
01108 __TBB_ASSERT( *p == n, NULL );
01109 *p = n->next;
01110 my_size--;
01111 break;
01112 } while(true);
01113 if( readonly )
01114 item_accessor.my_lock.upgrade_to_writer();
01115 item_accessor.my_lock.release();
01116 delete_node( n );
01117 return true;
01118 }
01119
01120 template<typename Key, typename T, typename HashCompare, typename A>
01121 bool concurrent_hash_map<Key,T,HashCompare,A>::erase( const Key &key ) {
01122 node_base *n;
01123 hashcode_t const h = my_hash_compare.hash( key );
01124 #if TBB_USE_THREADING_TOOLS
01125 hashcode_t m = (hashcode_t) itt_load_pointer_with_acquire_v3( &my_mask );
01126 #else
01127 hashcode_t m = my_mask;
01128 #endif
01129 restart:
01130 {
01131
01132 bucket_accessor b( this, h & m );
01133 search:
01134 node_base **p = &b()->node_list;
01135 n = *p;
01136 while( is_valid(n) && !my_hash_compare.equal(key, static_cast<node*>(n)->item.first ) ) {
01137 p = &n->next;
01138 n = *p;
01139 }
01140 if( !n ) {
01141 if( check_mask_race( h, m ) )
01142 goto restart;
01143 return false;
01144 }
01145 else if( !b.is_writer() && !b.upgrade_to_writer() ) {
01146 if( check_mask_race( h, m ) )
01147 goto restart;
01148 goto search;
01149 }
01150 *p = n->next;
01151 my_size--;
01152 }
01153 {
01154 typename node::scoped_t item_locker( n->mutex, true );
01155 }
01156
01157 delete_node( n );
01158 return true;
01159 }
01160
01161 template<typename Key, typename T, typename HashCompare, typename A>
01162 void concurrent_hash_map<Key,T,HashCompare,A>::swap(concurrent_hash_map<Key,T,HashCompare,A> &table) {
01163 std::swap(this->my_allocator, table.my_allocator);
01164 std::swap(this->my_hash_compare, table.my_hash_compare);
01165 internal_swap(table);
01166 }
01167
01168 template<typename Key, typename T, typename HashCompare, typename A>
01169 void concurrent_hash_map<Key,T,HashCompare,A>::rehash(size_type sz) {
01170 reserve( sz );
01171 hashcode_t mask = my_mask;
01172 hashcode_t b = (mask+1)>>1;
01173 __TBB_ASSERT((b&(b-1))==0, NULL);
01174 bucket *bp = get_bucket( b );
01175 for(; b <= mask; b++, bp++ ) {
01176 node_base *n = bp->node_list;
01177 __TBB_ASSERT( is_valid(n) || n == internal::empty_rehashed || n == internal::rehash_req, "Broken internal structure" );
01178 __TBB_ASSERT( *reinterpret_cast<intptr_t*>(&bp->mutex) == 0, "concurrent or unexpectedly terminated operation during rehash() execution" );
01179 if( n == internal::rehash_req ) {
01180 hashcode_t h = b; bucket *b_old = bp;
01181 do {
01182 __TBB_ASSERT( h > 1, "The lowermost buckets can't be rehashed" );
01183 hashcode_t m = ( 1u<<__TBB_Log2( h ) ) - 1;
01184 b_old = get_bucket( h &= m );
01185 } while( b_old->node_list == internal::rehash_req );
01186
01187 mark_rehashed_levels( h );
01188 for( node_base **p = &b_old->node_list, *q = *p; is_valid(q); q = *p ) {
01189 hashcode_t c = my_hash_compare.hash( static_cast<node*>(q)->item.first );
01190 if( (c & mask) != h ) {
01191 *p = q->next;
01192 bucket *b_new = get_bucket( c & mask );
01193 __TBB_ASSERT( b_new->node_list != internal::rehash_req, "hash() function changed for key in table or internal error" );
01194 add_to_bucket( b_new, q );
01195 } else p = &q->next;
01196 }
01197 }
01198 }
01199 #if TBB_USE_PERFORMANCE_WARNINGS
01200 int current_size = int(my_size), buckets = int(mask)+1, empty_buckets = 0, overpopulated_buckets = 0;
01201 static bool reported = false;
01202 #endif
01203 #if TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS
01204 for( b = 0; b <= mask; b++ ) {
01205 if( b & (b-2) ) ++bp;
01206 else bp = get_bucket( b );
01207 node_base *n = bp->node_list;
01208 __TBB_ASSERT( *reinterpret_cast<intptr_t*>(&bp->mutex) == 0, "concurrent or unexpectedly terminated operation during rehash() execution" );
01209 __TBB_ASSERT( is_valid(n) || n == internal::empty_rehashed, "Broken internal structure" );
01210 #if TBB_USE_PERFORMANCE_WARNINGS
01211 if( n == internal::empty_rehashed ) empty_buckets++;
01212 else if( n->next ) overpopulated_buckets++;
01213 #endif
01214 #if TBB_USE_ASSERT
01215 for( ; is_valid(n); n = n->next ) {
01216 hashcode_t h = my_hash_compare.hash( static_cast<node*>(n)->item.first ) & mask;
01217 __TBB_ASSERT( h == b, "hash() function changed for key in table or internal error" );
01218 }
01219 #endif
01220 }
01221 #endif // TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS
01222 #if TBB_USE_PERFORMANCE_WARNINGS
01223 if( buckets > current_size) empty_buckets -= buckets - current_size;
01224 else overpopulated_buckets -= current_size - buckets;
01225 if( !reported && buckets >= 512 && ( 2*empty_buckets > current_size || 2*overpopulated_buckets > current_size ) ) {
01226 tbb::internal::runtime_warning(
01227 "Performance is not optimal because the hash function produces bad randomness in lower bits in %s.\nSize: %d Empties: %d Overlaps: %d",
01228 typeid(*this).name(), current_size, empty_buckets, overpopulated_buckets );
01229 reported = true;
01230 }
01231 #endif
01232 }
01233
01234 template<typename Key, typename T, typename HashCompare, typename A>
01235 void concurrent_hash_map<Key,T,HashCompare,A>::clear() {
01236 hashcode_t m = my_mask;
01237 __TBB_ASSERT((m&(m+1))==0, NULL);
01238 #if TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS
01239 #if TBB_USE_PERFORMANCE_WARNINGS
01240 int current_size = int(my_size), buckets = int(m)+1, empty_buckets = 0, overpopulated_buckets = 0;
01241 static bool reported = false;
01242 #endif
01243 bucket *bp = 0;
01244
01245 for( segment_index_t b = 0; b <= m; b++ ) {
01246 if( b & (b-2) ) ++bp;
01247 else bp = get_bucket( b );
01248 node_base *n = bp->node_list;
01249 __TBB_ASSERT( is_valid(n) || n == internal::empty_rehashed || n == internal::rehash_req, "Broken internal structure" );
01250 __TBB_ASSERT( *reinterpret_cast<intptr_t*>(&bp->mutex) == 0, "concurrent or unexpectedly terminated operation during clear() execution" );
01251 #if TBB_USE_PERFORMANCE_WARNINGS
01252 if( n == internal::empty_rehashed ) empty_buckets++;
01253 else if( n == internal::rehash_req ) buckets--;
01254 else if( n->next ) overpopulated_buckets++;
01255 #endif
01256 #if __TBB_EXTRA_DEBUG
01257 for(; is_valid(n); n = n->next ) {
01258 hashcode_t h = my_hash_compare.hash( static_cast<node*>(n)->item.first );
01259 h &= m;
01260 __TBB_ASSERT( h == b || get_bucket(h)->node_list == internal::rehash_req, "hash() function changed for key in table or internal error" );
01261 }
01262 #endif
01263 }
01264 #if TBB_USE_PERFORMANCE_WARNINGS
01265 if( buckets > current_size) empty_buckets -= buckets - current_size;
01266 else overpopulated_buckets -= current_size - buckets;
01267 if( !reported && buckets >= 512 && ( 2*empty_buckets > current_size || 2*overpopulated_buckets > current_size ) ) {
01268 tbb::internal::runtime_warning(
01269 "Performance is not optimal because the hash function produces bad randomness in lower bits in %s.\nSize: %d Empties: %d Overlaps: %d",
01270 typeid(*this).name(), current_size, empty_buckets, overpopulated_buckets );
01271 reported = true;
01272 }
01273 #endif
01274 #endif//TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS
01275 my_size = 0;
01276 segment_index_t s = segment_index_of( m );
01277 __TBB_ASSERT( s+1 == pointers_per_table || !my_table[s+1], "wrong mask or concurrent grow" );
01278 cache_aligned_allocator<bucket> alloc;
01279 do {
01280 __TBB_ASSERT( is_valid( my_table[s] ), "wrong mask or concurrent grow" );
01281 segment_ptr_t buckets_ptr = my_table[s];
01282 size_type sz = segment_size( s ? s : 1 );
01283 for( segment_index_t i = 0; i < sz; i++ )
01284 for( node_base *n = buckets_ptr[i].node_list; is_valid(n); n = buckets_ptr[i].node_list ) {
01285 buckets_ptr[i].node_list = n->next;
01286 delete_node( n );
01287 }
01288 if( s >= first_block)
01289 alloc.deallocate( buckets_ptr, sz );
01290 else if( s == embedded_block && embedded_block != first_block )
01291 alloc.deallocate( buckets_ptr, segment_size(first_block)-embedded_buckets );
01292 if( s >= embedded_block ) my_table[s] = 0;
01293 } while(s-- > 0);
01294 my_mask = embedded_buckets - 1;
01295 }
01296
01297 template<typename Key, typename T, typename HashCompare, typename A>
01298 void concurrent_hash_map<Key,T,HashCompare,A>::internal_copy( const concurrent_hash_map& source ) {
01299 reserve( source.my_size );
01300 hashcode_t mask = source.my_mask;
01301 if( my_mask == mask ) {
01302 bucket *dst = 0, *src = 0;
01303 bool rehash_required = false;
01304 for( hashcode_t k = 0; k <= mask; k++ ) {
01305 if( k & (k-2) ) ++dst,src++;
01306 else { dst = get_bucket( k ); src = source.get_bucket( k ); }
01307 __TBB_ASSERT( dst->node_list != internal::rehash_req, "Invalid bucket in destination table");
01308 node *n = static_cast<node*>( src->node_list );
01309 if( n == internal::rehash_req ) {
01310 rehash_required = true;
01311 dst->node_list = internal::rehash_req;
01312 } else for(; n; n = static_cast<node*>( n->next ) ) {
01313 add_to_bucket( dst, new( my_allocator ) node(n->item.first, n->item.second) );
01314 ++my_size;
01315 }
01316 }
01317 if( rehash_required ) rehash();
01318 } else internal_copy( source.begin(), source.end() );
01319 }
01320
01321 template<typename Key, typename T, typename HashCompare, typename A>
01322 template<typename I>
01323 void concurrent_hash_map<Key,T,HashCompare,A>::internal_copy(I first, I last) {
01324 hashcode_t m = my_mask;
01325 for(; first != last; ++first) {
01326 hashcode_t h = my_hash_compare.hash( first->first );
01327 bucket *b = get_bucket( h & m );
01328 __TBB_ASSERT( b->node_list != internal::rehash_req, "Invalid bucket in destination table");
01329 node *n = new( my_allocator ) node(first->first, first->second);
01330 add_to_bucket( b, n );
01331 ++my_size;
01332 }
01333 }
01334
01335 }
01336
01337 using interface4::concurrent_hash_map;
01338
01339
01340 template<typename Key, typename T, typename HashCompare, typename A1, typename A2>
01341 inline bool operator==(const concurrent_hash_map<Key, T, HashCompare, A1> &a, const concurrent_hash_map<Key, T, HashCompare, A2> &b) {
01342 if(a.size() != b.size()) return false;
01343 typename concurrent_hash_map<Key, T, HashCompare, A1>::const_iterator i(a.begin()), i_end(a.end());
01344 typename concurrent_hash_map<Key, T, HashCompare, A2>::const_iterator j, j_end(b.end());
01345 for(; i != i_end; ++i) {
01346 j = b.equal_range(i->first).first;
01347 if( j == j_end || !(i->second == j->second) ) return false;
01348 }
01349 return true;
01350 }
01351
01352 template<typename Key, typename T, typename HashCompare, typename A1, typename A2>
01353 inline bool operator!=(const concurrent_hash_map<Key, T, HashCompare, A1> &a, const concurrent_hash_map<Key, T, HashCompare, A2> &b)
01354 { return !(a == b); }
01355
01356 template<typename Key, typename T, typename HashCompare, typename A>
01357 inline void swap(concurrent_hash_map<Key, T, HashCompare, A> &a, concurrent_hash_map<Key, T, HashCompare, A> &b)
01358 { a.swap( b ); }
01359
01360 #if _MSC_VER && !defined(__INTEL_COMPILER)
01361 #pragma warning( pop )
01362 #endif // warning 4127 is back
01363
01364 }
01365
01366 #endif