_concurrent_unordered_internal.h

00001 /*
00002     Copyright 2005-2010 Intel Corporation.  All Rights Reserved.
00003 
00004     The source code contained or described herein and all documents related
00005     to the source code ("Material") are owned by Intel Corporation or its
00006     suppliers or licensors.  Title to the Material remains with Intel
00007     Corporation or its suppliers and licensors.  The Material is protected
00008     by worldwide copyright laws and treaty provisions.  No part of the
00009     Material may be used, copied, reproduced, modified, published, uploaded,
00010     posted, transmitted, distributed, or disclosed in any way without
00011     Intel's prior express written permission.
00012 
00013     No license under any patent, copyright, trade secret or other
00014     intellectual property right is granted to or conferred upon you by
00015     disclosure or delivery of the Materials, either expressly, by
00016     implication, inducement, estoppel or otherwise.  Any license under such
00017     intellectual property rights must be express and approved by Intel in
00018     writing.
00019 */
00020 
00021 #ifndef __TBB_concurrent_unordered_internal_H
00022 #define __TBB_concurrent_unordered_internal_H
00023 
00024 #include "tbb_stddef.h"
00025 
00026 #if !TBB_USE_EXCEPTIONS && _MSC_VER
00027     // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers
00028     #pragma warning (push)
00029     #pragma warning (disable: 4530)
00030 #endif
00031 
00032 #include <iterator>
00033 #include <utility>      // Need std::pair
00034 #include <functional>
00035 #include <string>       // For tbb_hasher
00036 #include <cstring>      // Need std::memset
00037 
00038 #if !TBB_USE_EXCEPTIONS && _MSC_VER
00039     #pragma warning (pop)
00040 #endif
00041 
00042 #include "tbb_machine.h"
00043 #include "tbb_exception.h"
00044 #include "tbb_allocator.h"
00045 
00046 namespace tbb {
00047 namespace interface5 {
00049 namespace internal {
00050 
00051 template <typename T, typename Allocator>
00052 class split_ordered_list;
00053 template <typename Traits>
00054 class concurrent_unordered_base;
00055 
00056 // Forward list iterators (without skipping dummy elements)
00057 template<class Solist, typename Value>
00058 class flist_iterator : public std::iterator<std::forward_iterator_tag, Value>
00059 {
00060     template <typename T, typename Allocator>
00061     friend class split_ordered_list;
00062     template <typename Traits>
00063     friend class concurrent_unordered_base;
00064     template<class M, typename V>
00065     friend class flist_iterator;
00066 
00067     typedef typename Solist::nodeptr_t nodeptr_t;
00068 public:
00069     typedef typename Solist::value_type value_type;
00070     typedef typename Solist::difference_type difference_type;
00071     typedef typename Solist::pointer pointer;
00072     typedef typename Solist::reference reference;
00073 
00074     flist_iterator() : my_node_ptr(0) {}
00075     flist_iterator( const flist_iterator<Solist, typename Solist::value_type> &other )
00076         : my_node_ptr(other.my_node_ptr) {}
00077 
00078     reference operator*() const { return my_node_ptr->my_element; }
00079     pointer operator->() const { return &**this; }
00080 
00081     flist_iterator& operator++() {
00082         my_node_ptr = my_node_ptr->my_next;
00083         return *this;
00084     }
00085 
00086     flist_iterator operator++(int) {
00087         flist_iterator tmp = *this;
00088         ++*this;
00089         return tmp;
00090     }
00091 
00092 protected:
00093     flist_iterator(nodeptr_t pnode) : my_node_ptr(pnode) {}
00094     nodeptr_t get_node_ptr() const { return my_node_ptr; }
00095 
00096     nodeptr_t my_node_ptr;
00097 
00098     template<typename M, typename T, typename U>
00099     friend bool operator==( const flist_iterator<M,T> &i, const flist_iterator<M,U> &j );
00100     template<typename M, typename T, typename U>
00101     friend bool operator!=( const flist_iterator<M,T>& i, const flist_iterator<M,U>& j );
00102 };
00103 
00104 template<typename Solist, typename T, typename U>
00105 bool operator==( const flist_iterator<Solist,T> &i, const flist_iterator<Solist,U> &j ) {
00106     return i.my_node_ptr == j.my_node_ptr;
00107 }
00108 template<typename Solist, typename T, typename U>
00109 bool operator!=( const flist_iterator<Solist,T>& i, const flist_iterator<Solist,U>& j ) {
00110     return i.my_node_ptr != j.my_node_ptr;
00111 }
00112 
00113 // Split-order list iterators, needed to skip dummy elements
00114 template<class Solist, typename Value>
00115 class solist_iterator : public flist_iterator<Solist, Value>
00116 {
00117     typedef flist_iterator<Solist, Value> base_type;
00118     typedef typename Solist::nodeptr_t nodeptr_t;
00119     using base_type::get_node_ptr;
00120     template <typename T, typename Allocator>
00121     friend class split_ordered_list;
00122     template<class M, typename V>
00123     friend class solist_iterator;
00124     template<typename M, typename T, typename U>
00125     friend bool operator==( const solist_iterator<M,T> &i, const solist_iterator<M,U> &j );
00126     template<typename M, typename T, typename U>
00127     friend bool operator!=( const solist_iterator<M,T>& i, const solist_iterator<M,U>& j );
00128 
00129     const Solist *my_list_ptr;
00130     solist_iterator(nodeptr_t pnode, const Solist *plist) : base_type(pnode), my_list_ptr(plist) {}
00131 
00132 public:
00133     typedef typename Solist::value_type value_type;
00134     typedef typename Solist::difference_type difference_type;
00135     typedef typename Solist::pointer pointer;
00136     typedef typename Solist::reference reference;
00137 
00138     solist_iterator() {}
00139     solist_iterator(const solist_iterator<Solist, typename Solist::value_type> &other )
00140         : base_type(other), my_list_ptr(other.my_list_ptr) {}
00141 
00142     reference operator*() const {
00143         return this->base_type::operator*();
00144     }
00145 
00146     pointer operator->() const {
00147         return (&**this);
00148     }
00149 
00150     solist_iterator& operator++() {
00151         do ++(*(base_type *)this);
00152         while (get_node_ptr() != NULL && get_node_ptr()->is_dummy());
00153 
00154         return (*this);
00155     }
00156 
00157     solist_iterator operator++(int) {
00158         solist_iterator tmp = *this;
00159         do ++*this;
00160         while (get_node_ptr() != NULL && get_node_ptr()->is_dummy());
00161 
00162         return (tmp);
00163     }
00164 };
00165 
00166 template<typename Solist, typename T, typename U>
00167 bool operator==( const solist_iterator<Solist,T> &i, const solist_iterator<Solist,U> &j ) {
00168     return i.my_node_ptr == j.my_node_ptr && i.my_list_ptr == j.my_list_ptr;
00169 }
00170 template<typename Solist, typename T, typename U>
00171 bool operator!=( const solist_iterator<Solist,T>& i, const solist_iterator<Solist,U>& j ) {
00172     return i.my_node_ptr != j.my_node_ptr || i.my_list_ptr != j.my_list_ptr;
00173 }
00174 
00175 // Forward type and class definitions
00176 typedef size_t sokey_t;
00177 
00178 // Forward list in which elements are sorted in a split-order
00179 template <typename T, typename Allocator>
00180 class split_ordered_list
00181 {
00182 public:
00183     typedef split_ordered_list<T, Allocator> self_type;
00184     typedef typename Allocator::template rebind<T>::other allocator_type;
00185     struct node;
00186     typedef node *nodeptr_t;
00187 
00188     typedef typename allocator_type::size_type size_type;
00189     typedef typename allocator_type::difference_type difference_type;
00190     typedef typename allocator_type::pointer pointer;
00191     typedef typename allocator_type::const_pointer const_pointer;
00192     typedef typename allocator_type::reference reference;
00193     typedef typename allocator_type::const_reference const_reference;
00194     typedef typename allocator_type::value_type value_type;
00195 
00196     typedef solist_iterator<self_type, const value_type> const_iterator;
00197     typedef solist_iterator<self_type, value_type> iterator;
00198     typedef flist_iterator<self_type, const value_type> raw_const_iterator;
00199     typedef flist_iterator<self_type, value_type> raw_iterator;
00200 
00201     // Node that holds the element in a split-ordered list
00202     struct node : tbb::internal::no_assign
00203     {
00204         // Initialize the node with the given order key
00205         void init(sokey_t order_key) {
00206             my_order_key = order_key;
00207             my_next = NULL;
00208         }
00209 
00210         // Return the order key (needed for hashing)
00211         sokey_t get_order_key() const { // TODO: remove
00212             return my_order_key;
00213         }
00214 
00215         // Inserts the new element in the list in an atomic fashion
00216         nodeptr_t atomic_set_next(nodeptr_t new_node, nodeptr_t current_node)
00217         {
00218             // Try to change the next pointer on the current element to a new element, only if it still points to the cached next
00219             nodeptr_t exchange_node = (nodeptr_t) __TBB_CompareAndSwapW((void *) &my_next, (uintptr_t)new_node, (uintptr_t)current_node);
00220 
00221             if (exchange_node == current_node) // TODO: why this branch?
00222             {
00223                 // Operation succeeded, return the new node
00224                 return new_node;
00225             }
00226             else
00227             {
00228                 // Operation failed, return the "interfering" node
00229                 return exchange_node;
00230             }
00231         }
00232 
00233         // Checks if this element in the list is a dummy, order enforcing node. Dummy nodes are used by buckets
00234         // in the hash table to quickly index into the right subsection of the split-ordered list.
00235         bool is_dummy() const {
00236             return (my_order_key & 0x1) == 0;
00237         }
00238 
00239 
00240         nodeptr_t  my_next;      // Next element in the list
00241         value_type my_element;   // Element storage
00242         sokey_t    my_order_key; // Order key for this element
00243     };
00244 
00245     // Allocate a new node with the given order key and value
00246     nodeptr_t create_node(sokey_t order_key, const T &value) {
00247         nodeptr_t pnode = my_node_allocator.allocate(1);
00248 
00249         __TBB_TRY {
00250             new(static_cast<void*>(&pnode->my_element)) T(value);
00251             pnode->init(order_key);
00252         } __TBB_CATCH(...) {
00253             my_node_allocator.deallocate(pnode, 1);
00254             __TBB_RETHROW();
00255         }
00256 
00257         return (pnode);
00258     }
00259 
00260     // Allocate a new node with the given order key; used to allocate dummy nodes
00261     nodeptr_t create_node(sokey_t order_key) {
00262         nodeptr_t pnode = my_node_allocator.allocate(1);
00263 
00264         __TBB_TRY {
00265             new(static_cast<void*>(&pnode->my_element)) T();
00266             pnode->init(order_key);
00267         } __TBB_CATCH(...) {
00268             my_node_allocator.deallocate(pnode, 1);
00269             __TBB_RETHROW();
00270         }
00271 
00272         return (pnode);
00273     }
00274 
00275    split_ordered_list(allocator_type a = allocator_type())
00276        : my_node_allocator(a), my_element_count(0)
00277     {
00278         // Immediately allocate a dummy node with order key of 0. This node
00279         // will always be the head of the list.
00280         my_head = create_node(0);
00281     }
00282 
00283     ~split_ordered_list()
00284     {
00285         // Clear the list
00286         clear();
00287 
00288         // Remove the head element which is not cleared by clear()
00289         nodeptr_t pnode = my_head;
00290         my_head = NULL;
00291 
00292         __TBB_ASSERT(pnode != NULL && pnode->my_next == NULL, "Invalid head list node");
00293 
00294         destroy_node(pnode);
00295     }
00296 
00297     // Common forward list functions
00298 
00299     allocator_type get_allocator() const {
00300         return (my_node_allocator);
00301     }
00302 
00303     void clear() {
00304         nodeptr_t pnext;
00305         nodeptr_t pnode = my_head;
00306 
00307         __TBB_ASSERT(my_head != NULL, "Invalid head list node");
00308         pnext = pnode->my_next;
00309         pnode->my_next = NULL;
00310         pnode = pnext;
00311 
00312         while (pnode != NULL)
00313         {
00314             pnext = pnode->my_next;
00315             destroy_node(pnode);
00316             pnode = pnext;
00317         }
00318 
00319         my_element_count = 0;
00320     }
00321 
00322     // Returns a first non-dummy element in the SOL
00323     iterator begin() {
00324         return first_real_iterator(raw_begin());
00325     }
00326 
00327     // Returns a first non-dummy element in the SOL
00328     const_iterator begin() const {
00329         return first_real_iterator(raw_begin());
00330     }
00331 
00332     iterator end() {
00333         return (iterator(0, this));
00334     }
00335 
00336     const_iterator end() const {
00337         return (const_iterator(0, this));
00338     }
00339 
00340     const_iterator cbegin() const {
00341         return (((const self_type *)this)->begin());
00342     }
00343 
00344     const_iterator cend() const {
00345         return (((const self_type *)this)->end());
00346     }
00347 
00348     // Checks if the number of elements (non-dummy) is 0
00349     bool empty() const {
00350         return (my_element_count == 0);
00351     }
00352 
00353     // Returns the number of non-dummy elements in the list
00354     size_type size() const {
00355         return my_element_count;
00356     }
00357 
00358     // Returns the maximum size of the list, determined by the allocator
00359     size_type max_size() const {
00360         return my_node_allocator.max_size();
00361     }
00362 
00363     // Swaps 'this' list with the passed in one
00364     void swap(self_type& other)
00365     {
00366         if (this == &other)
00367         {
00368             // Nothing to do
00369             return;
00370         }
00371 
00372         std::swap(my_element_count, other.my_element_count);
00373         std::swap(my_head, other.my_head);
00374     }
00375 
00376     // Split-order list functions
00377 
00378     // Returns a first element in the SOL, which is always a dummy
00379     raw_iterator raw_begin() {
00380         return raw_iterator(my_head);
00381     }
00382 
00383     // Returns a first element in the SOL, which is always a dummy
00384     raw_const_iterator raw_begin() const {
00385         return raw_const_iterator(my_head);
00386     }
00387 
00388     raw_iterator raw_end() {
00389         return raw_iterator(0);
00390     }
00391 
00392     raw_const_iterator raw_end() const {
00393         return raw_const_iterator(0);
00394     }
00395 
00396     static sokey_t get_order_key(const raw_const_iterator& it) {
00397         return it.get_node_ptr()->get_order_key();
00398     }
00399 
00400     static sokey_t get_safe_order_key(const raw_const_iterator& it) {
00401         if( !it.get_node_ptr() ) return sokey_t(~0U);
00402         return it.get_node_ptr()->get_order_key();
00403     }
00404 
00405     // Returns a public iterator version of the internal iterator. Public iterator must not
00406     // be a dummy private iterator.
00407     iterator get_iterator(raw_iterator it) {
00408         __TBB_ASSERT(it.get_node_ptr() == NULL || !it.get_node_ptr()->is_dummy(), "Invalid user node (dummy)");
00409         return iterator(it.get_node_ptr(), this);
00410     }
00411 
00412     // Returns a public iterator version of the internal iterator. Public iterator must not
00413     // be a dummy private iterator.
00414     const_iterator get_iterator(raw_const_iterator it) const {
00415         __TBB_ASSERT(it.get_node_ptr() == NULL || !it.get_node_ptr()->is_dummy(), "Invalid user node (dummy)");
00416         return const_iterator(it.get_node_ptr(), this);
00417     }
00418 
00419     // Returns a non-const version of the raw_iterator
00420     raw_iterator get_iterator(raw_const_iterator it) {
00421         return raw_iterator(it.get_node_ptr());
00422     }
00423 
00424     // Returns a non-const version of the iterator
00425     static iterator get_iterator(const_iterator it) {
00426         return iterator(it.my_node_ptr, it.my_list_ptr);
00427     }
00428 
00429     // Returns a public iterator version of a first non-dummy internal iterator at or after
00430     // the passed in internal iterator.
00431     iterator first_real_iterator(raw_iterator it)
00432     {
00433         // Skip all dummy, internal only iterators
00434         while (it != raw_end() && it.get_node_ptr()->is_dummy())
00435             it++;
00436 
00437         return iterator(it.get_node_ptr(), this);
00438     }
00439 
00440     // Returns a public iterator version of a first non-dummy internal iterator at or after
00441     // the passed in internal iterator.
00442     const_iterator first_real_iterator(raw_const_iterator it) const
00443     {
00444         // Skip all dummy, internal only iterators
00445         while (it != raw_end() && it.get_node_ptr()->is_dummy())
00446             it++;
00447 
00448         return const_iterator(it.get_node_ptr(), this);
00449     }
00450 
00451     // Erase an element using the allocator
00452     void destroy_node(nodeptr_t pnode) {
00453         my_node_allocator.destroy(pnode);
00454         my_node_allocator.deallocate(pnode, 1);
00455     }
00456 
00457     // Try to insert a new element in the list. If insert fails, return the node that
00458     // was inserted instead.
00459     nodeptr_t try_insert(nodeptr_t previous, nodeptr_t new_node, nodeptr_t current_node) {
00460         new_node->my_next = current_node;
00461         return previous->atomic_set_next(new_node, current_node);
00462     }
00463 
00464     // Insert a new element between passed in iterators
00465     std::pair<iterator, bool> try_insert(raw_iterator it, raw_iterator next, const value_type &value, sokey_t order_key, size_type *new_count)
00466     {
00467         nodeptr_t pnode = create_node(order_key, value);
00468         nodeptr_t inserted_node = try_insert(it.get_node_ptr(), pnode, next.get_node_ptr());
00469 
00470         if (inserted_node == pnode)
00471         {
00472             // If the insert succeeded, check that the order is correct and increment the element count
00473             check_range();
00474             *new_count = __TBB_FetchAndAddW((uintptr_t*)&my_element_count, uintptr_t(1));
00475             return std::pair<iterator, bool>(iterator(pnode, this), true);
00476         }
00477         else
00478         {
00479             // If the insert failed (element already there), then delete the new one
00480             destroy_node(pnode);
00481             return std::pair<iterator, bool>(end(), false);
00482         }
00483     }
00484 
00485     // Insert a new dummy element, starting search at a parent dummy element
00486     raw_iterator insert_dummy(raw_iterator it, sokey_t order_key)
00487     {
00488         raw_iterator last = raw_end();
00489         raw_iterator where = it;
00490 
00491         __TBB_ASSERT(where != last, "Invalid head node");
00492 
00493         where++;
00494 
00495         // Create a dummy element up front, even though it may be discarded (due to concurrent insertion)
00496         nodeptr_t dummy_node = create_node(order_key);
00497 
00498         for (;;)
00499         {
00500             __TBB_ASSERT(it != last, "Invalid head list node");
00501 
00502             // If the head iterator is at the end of the list, or past the point where this dummy
00503             // node needs to be inserted, then try to insert it.
00504             if (where == last || get_order_key(where) > order_key)
00505             {
00506                 __TBB_ASSERT(get_order_key(it) < order_key, "Invalid node order in the list");
00507 
00508                 // Try to insert it in the right place
00509                 nodeptr_t inserted_node = try_insert(it.get_node_ptr(), dummy_node, where.get_node_ptr());
00510 
00511                 if (inserted_node == dummy_node)
00512                 {
00513                     // Insertion succeeded, check the list for order violations
00514                     check_range();
00515                     return raw_iterator(dummy_node);
00516                 }
00517                 else
00518                 {
00519                     // Insertion failed: either dummy node was inserted by another thread, or
00520                     // a real element was inserted at exactly the same place as dummy node.
00521                     // Proceed with the search from the previous location where order key was
00522                     // known to be larger (note: this is legal only because there is no safe
00523                     // concurrent erase operation supported).
00524                     where = it;
00525                     where++;
00526                     continue;
00527                 }
00528             }
00529             else if (get_order_key(where) == order_key)
00530             {
00531                 // Another dummy node with the same value found, discard the new one.
00532                 destroy_node(dummy_node);
00533                 return where;
00534             }
00535 
00536             // Move the iterator forward
00537             it = where;
00538             where++;
00539         }
00540 
00541     }
00542 
00543     // This erase function can handle both real and dummy nodes
00544     void erase_node(raw_iterator previous, raw_const_iterator& where)
00545     {
00546                 nodeptr_t pnode = (where++).get_node_ptr();
00547         nodeptr_t prevnode = previous.get_node_ptr();
00548         __TBB_ASSERT(prevnode->my_next == pnode, "Erase must take consecutive iterators");
00549         prevnode->my_next = pnode->my_next;
00550 
00551         destroy_node(pnode);
00552     }
00553 
00554     // Erase the element (previous node needs to be passed because this is a forward only list)
00555     iterator erase_node(raw_iterator previous, const_iterator where)
00556     {
00557         raw_const_iterator it = where;
00558         erase_node(previous, it);
00559         my_element_count--;
00560 
00561         return get_iterator(first_real_iterator(it));
00562     }
00563 
00564     // Move all elements from the passed in split-ordered list to this one
00565     void move_all(self_type& source)
00566     {
00567         raw_const_iterator first = source.raw_begin();
00568         raw_const_iterator last = source.raw_end();
00569 
00570         if (first == last)
00571             return;
00572 
00573         nodeptr_t previous_node = my_head;
00574         raw_const_iterator begin_iterator = first++;
00575 
00576         // Move all elements one by one, including dummy ones
00577         for (raw_const_iterator it = first; it != last;)
00578         {
00579             nodeptr_t pnode = it.get_node_ptr();
00580 
00581             nodeptr_t dummy_node = pnode->is_dummy() ? create_node(pnode->get_order_key()) : create_node(pnode->get_order_key(), pnode->my_element);
00582             previous_node = try_insert(previous_node, dummy_node, NULL);
00583             __TBB_ASSERT(previous_node != NULL, "Insertion must succeed");
00584             raw_const_iterator where = it++;
00585             source.erase_node(get_iterator(begin_iterator), where);
00586         }
00587         check_range();
00588     }
00589 
00590 
00591 private:
00592 
00593     // Check the list for order violations
00594     void check_range()
00595     {
00596 #if TBB_USE_ASSERT
00597         for (raw_iterator it = raw_begin(); it != raw_end(); it++)
00598         {
00599             raw_iterator next_iterator = it;
00600             next_iterator++;
00601 
00602             __TBB_ASSERT(next_iterator == end() || next_iterator.get_node_ptr()->get_order_key() >= it.get_node_ptr()->get_order_key(), "!!! List order inconsistency !!!");
00603         }
00604 #endif
00605     }
00606 
00607     typename allocator_type::template rebind<node>::other my_node_allocator;  // allocator object for nodes
00608     size_type                                             my_element_count;   // Total item count, not counting dummy nodes
00609     nodeptr_t                                             my_head;            // pointer to head node
00610 };
00611 
00612 // Template class for hash compare
00613 template<typename Key, typename Hasher, typename Key_equality>
00614 class hash_compare
00615 {
00616 public:
00617     hash_compare() {}
00618 
00619     hash_compare(Hasher a_hasher) : my_hash_object(a_hasher) {}
00620 
00621     hash_compare(Hasher a_hasher, Key_equality a_keyeq) : my_hash_object(a_hasher), my_key_compare_object(a_keyeq) {}
00622 
00623     size_t operator()(const Key& key) const {
00624         return ((size_t)my_hash_object(key));
00625     }
00626 
00627     bool operator()(const Key& key1, const Key& key2) const {
00628         return (!my_key_compare_object(key1, key2));
00629     }
00630 
00631     Hasher       my_hash_object;        // The hash object
00632     Key_equality my_key_compare_object; // The equality comparator object
00633 };
00634 
00635 #if _MSC_VER
00636 #pragma warning(push)
00637 #pragma warning(disable: 4127) // warning 4127 -- while (true) has a constant expression in it (for allow_multimapping)
00638 #endif
00639 
00640 template <typename Traits>
00641 class concurrent_unordered_base : public Traits
00642 {
00643 protected:
00644     // Type definitions
00645     typedef concurrent_unordered_base<Traits> self_type;
00646     typedef typename Traits::value_type value_type;
00647     typedef typename Traits::key_type key_type;
00648     typedef typename Traits::hash_compare hash_compare;
00649     typedef typename Traits::value_compare value_compare;
00650     typedef typename Traits::allocator_type allocator_type;
00651     typedef typename allocator_type::pointer pointer;
00652     typedef typename allocator_type::const_pointer const_pointer;
00653     typedef typename allocator_type::reference reference;
00654     typedef typename allocator_type::const_reference const_reference;
00655     typedef typename allocator_type::size_type size_type;
00656     typedef typename allocator_type::difference_type difference_type;
00657     typedef split_ordered_list<value_type, typename Traits::allocator_type> solist_t;
00658     typedef typename solist_t::nodeptr_t nodeptr_t;
00659     // Iterators that walk the entire split-order list, including dummy nodes
00660     typedef typename solist_t::raw_iterator raw_iterator;
00661     typedef typename solist_t::raw_const_iterator raw_const_iterator;
00662     typedef typename solist_t::iterator iterator; // TODO: restore const iterator for unordered_sets
00663     typedef typename solist_t::const_iterator const_iterator;
00664     typedef iterator local_iterator;
00665     typedef const_iterator const_local_iterator;
00666     using Traits::my_hash_compare;
00667     using Traits::get_key;
00668     using Traits::allow_multimapping;
00669 
00670 private:
00671     typedef std::pair<iterator, iterator> pairii_t;
00672     typedef std::pair<const_iterator, const_iterator> paircc_t;
00673 
00674     static size_type const pointers_per_table = sizeof(size_type) * 8;              // One bucket segment per bit
00675     static const size_type initial_bucket_number = 8;                               // Initial number of buckets
00676     static const size_type initial_bucket_load = 4;                                // Initial maximum number of elements per bucket
00677 
00678 protected:
00679     // Constructors/Destructors
00680     concurrent_unordered_base(size_type n_of_buckets = initial_bucket_number,
00681         const hash_compare& hc = hash_compare(), const allocator_type& a = allocator_type())
00682         : Traits(hc), my_number_of_buckets(n_of_buckets), my_solist(a),
00683           my_allocator(a), my_maximum_bucket_size((float) initial_bucket_load)
00684     {
00685         internal_init();
00686     }
00687 
00688     concurrent_unordered_base(const concurrent_unordered_base& right, const allocator_type& a)
00689         : Traits(right.my_hash_compare), my_solist(a), my_allocator(a)
00690     {
00691         internal_copy(right);
00692     }
00693 
00694     concurrent_unordered_base(const concurrent_unordered_base& right)
00695         : Traits(right.my_hash_compare), my_solist(right.get_allocator()), my_allocator(right.get_allocator())
00696     {
00697         internal_init();
00698         internal_copy(right);
00699     }
00700 
00701     concurrent_unordered_base& operator=(const concurrent_unordered_base& right) {
00702         if (this != &right)
00703             internal_copy(right);
00704         return (*this);
00705     }
00706 
00707     ~concurrent_unordered_base() {
00708         // Delete all node segments
00709         internal_clear();
00710     }
00711 
00712 public:
00713     allocator_type get_allocator() const {
00714         return my_solist.get_allocator();
00715     }
00716 
00717     // Size and capacity function
00718     bool empty() const {
00719         return my_solist.empty();
00720     }
00721 
00722     size_type size() const {
00723         return my_solist.size();
00724     }
00725 
00726     size_type max_size() const {
00727         return my_solist.max_size();
00728     }
00729 
00730     // Iterators 
00731     iterator begin() {
00732         return my_solist.begin();
00733     }
00734 
00735     const_iterator begin() const {
00736         return my_solist.begin();
00737     }
00738 
00739     iterator end() {
00740         return my_solist.end();
00741     }
00742 
00743     const_iterator end() const {
00744         return my_solist.end();
00745     }
00746 
00747     const_iterator cbegin() const {
00748         return my_solist.cbegin();
00749     }
00750 
00751     const_iterator cend() const {
00752         return my_solist.cend();
00753     }
00754 
00755     // Parallel traversal support
00756     class const_range_type : tbb::internal::no_assign {
00757         const concurrent_unordered_base &my_table;
00758         raw_const_iterator my_begin_node;
00759         raw_const_iterator my_end_node;
00760         mutable raw_const_iterator my_midpoint_node;
00761     public:
00763         typedef typename concurrent_unordered_base::size_type size_type;
00764         typedef typename concurrent_unordered_base::value_type value_type;
00765         typedef typename concurrent_unordered_base::reference reference;
00766         typedef typename concurrent_unordered_base::difference_type difference_type;
00767         typedef typename concurrent_unordered_base::const_iterator iterator;
00768 
00770         bool empty() const {return my_begin_node == my_end_node;}
00771 
00773         bool is_divisible() const {
00774             return my_midpoint_node != my_end_node;
00775         }
00777         const_range_type( const_range_type &r, split ) : 
00778             my_table(r.my_table), my_end_node(r.my_end_node)
00779         {
00780             r.my_end_node = my_begin_node = r.my_midpoint_node;
00781             __TBB_ASSERT( !empty(), "Splitting despite the range is not divisible" );
00782             __TBB_ASSERT( !r.empty(), "Splitting despite the range is not divisible" );
00783             set_midpoint();
00784             r.set_midpoint();
00785         }
00787         const_range_type( const concurrent_unordered_base &a_table ) : 
00788             my_table(a_table), my_begin_node(a_table.my_solist.begin()),
00789             my_end_node(a_table.my_solist.end())
00790         {
00791             set_midpoint();
00792         }
00793         iterator begin() const { return my_table.my_solist.get_iterator(my_begin_node); }
00794         iterator end() const { return my_table.my_solist.get_iterator(my_end_node); }
00796         size_type grainsize() const { return 1; }
00797 
00799         void set_midpoint() const {
00800             if( my_begin_node == my_end_node ) // not divisible
00801                 my_midpoint_node = my_end_node;
00802             else {
00803                 sokey_t begin_key = solist_t::get_safe_order_key(my_begin_node);
00804                 sokey_t end_key = solist_t::get_safe_order_key(my_end_node);
00805                 size_t mid_bucket = __TBB_ReverseBits( begin_key + (end_key-begin_key)/2 ) % my_table.my_number_of_buckets;
00806                 while ( !my_table.is_initialized(mid_bucket) ) mid_bucket = my_table.get_parent(mid_bucket);
00807                 my_midpoint_node = my_table.my_solist.first_real_iterator(my_table.get_bucket( mid_bucket ));
00808                 if( my_midpoint_node == my_begin_node )
00809                     my_midpoint_node = my_end_node;
00810 #if TBB_USE_ASSERT
00811                 else {
00812                     sokey_t mid_key = solist_t::get_safe_order_key(my_midpoint_node);
00813                     __TBB_ASSERT( begin_key < mid_key, "my_begin_node is after my_midpoint_node" );
00814                     __TBB_ASSERT( mid_key <= end_key, "my_midpoint_node is after my_end_node" );
00815                 }
00816 #endif // TBB_USE_ASSERT
00817             }
00818         }
00819     };
00820 
00821     class range_type : public const_range_type {
00822     public:
00823         typedef typename concurrent_unordered_base::iterator iterator;
00825         range_type( range_type &r, split ) : const_range_type( r, split() ) {}
00827         range_type( const concurrent_unordered_base &a_table ) : const_range_type(a_table) {}
00828 
00829         iterator begin() const { return solist_t::get_iterator( const_range_type::begin() ); }
00830         iterator end() const { return solist_t::get_iterator( const_range_type::end() ); }
00831     };
00832 
00833     range_type range() {
00834         return range_type( *this );
00835     }
00836 
00837     const_range_type range() const {
00838         return const_range_type( *this );
00839     }
00840 
00841     // Modifiers
00842     std::pair<iterator, bool> insert(const value_type& value) {
00843         return internal_insert(value);
00844     }
00845 
00846     iterator insert(const_iterator, const value_type& value) {
00847         // Ignore hint
00848         return insert(value).first;
00849     }
00850 
00851     template<class Iterator>
00852     void insert(Iterator first, Iterator last) {
00853         for (Iterator it = first; it != last; it++)
00854             insert(*it);
00855     }
00856 
00857     iterator unsafe_erase(const_iterator where) {
00858         return internal_erase(where);
00859     }
00860 
00861     iterator unsafe_erase(const_iterator first, const_iterator last) {
00862         while (first != last)
00863             unsafe_erase(first++);
00864         return my_solist.get_iterator(first);
00865     }
00866 
00867     size_type unsafe_erase(const key_type& key) {
00868         pairii_t where = equal_range(key);
00869         size_type item_count = internal_distance(where.first, where.second);
00870         unsafe_erase(where.first, where.second);
00871         return item_count;
00872     }
00873 
00874     void swap(concurrent_unordered_base& right) {
00875         if (this != &right) {
00876             std::swap(my_hash_compare, right.my_hash_compare); // TODO: check what ADL meant here
00877             my_solist.swap(right.my_solist);
00878             internal_swap_buckets(right);
00879             std::swap(my_number_of_buckets, right.my_number_of_buckets);
00880             std::swap(my_maximum_bucket_size, right.my_maximum_bucket_size);
00881         }
00882     }
00883 
00884     // Observers
00885     void clear() {
00886         // Clear list
00887         my_solist.clear();
00888 
00889         // Clear buckets
00890         internal_clear();
00891     }
00892 
00893     // Lookup
00894     iterator find(const key_type& key) {
00895         return internal_find(key);
00896     }
00897 
00898     const_iterator find(const key_type& key) const {
00899         return const_cast<self_type*>(this)->internal_find(key);
00900     }
00901 
00902     size_type count(const key_type& key) const {
00903         paircc_t answer = equal_range(key);
00904         size_type item_count = internal_distance(answer.first, answer.second);
00905         return item_count;
00906     }
00907 
00908     std::pair<iterator, iterator> equal_range(const key_type& key) {
00909         return internal_equal_range(key);
00910     }
00911 
00912     std::pair<const_iterator, const_iterator> equal_range(const key_type& key) const {
00913         return internal_equal_range(key);
00914     }
00915 
00916     // Bucket interface - for debugging 
00917     size_type unsafe_bucket_count() const {
00918         return my_number_of_buckets;
00919     }
00920 
00921     size_type unsafe_max_bucket_count() const {
00922         return segment_size(pointers_per_table-1);
00923     }
00924 
00925     size_type unsafe_bucket_size(size_type bucket) {
00926         size_type item_count = 0;
00927         if (is_initialized(bucket)) {
00928             raw_iterator it = get_bucket(bucket);
00929             it++;
00930             for (; it != my_solist.raw_end() && !it.get_node_ptr()->is_dummy(); it++)
00931                 item_count++;
00932         }
00933         return item_count;
00934     }
00935 
00936     size_type unsafe_bucket(const key_type& key) const {
00937         sokey_t order_key = (sokey_t) my_hash_compare(key);
00938         size_type bucket = order_key % my_number_of_buckets;
00939         return bucket;
00940     }
00941 
00942     // If the bucket is initialized, return a first non-dummy element in it
00943     local_iterator unsafe_begin(size_type bucket) {
00944         if (!is_initialized(bucket))
00945             return end();
00946 
00947         raw_iterator it = get_bucket(bucket);
00948         return my_solist.first_real_iterator(it);
00949     }
00950 
00951     // If the bucket is initialized, return a first non-dummy element in it
00952     const_local_iterator unsafe_begin(size_type bucket) const
00953     {
00954         if (!is_initialized(bucket))
00955             return end();
00956 
00957         raw_const_iterator it = get_bucket(bucket);
00958         return my_solist.first_real_iterator(it);
00959     }
00960 
00961     // @REVIEW: Takes O(n)
00962     // Returns the iterator after the last non-dummy element in the bucket
00963     local_iterator unsafe_end(size_type bucket)
00964     {
00965         if (!is_initialized(bucket))
00966             return end();
00967 
00968         raw_iterator it = get_bucket(bucket);
00969     
00970         // Find the end of the bucket, denoted by the dummy element
00971         do it++;
00972         while(it != my_solist.raw_end() && !it.get_node_ptr()->is_dummy());
00973 
00974         // Return the first real element past the end of the bucket
00975         return my_solist.first_real_iterator(it);
00976     }
00977 
00978     // @REVIEW: Takes O(n)
00979     // Returns the iterator after the last non-dummy element in the bucket
00980     const_local_iterator unsafe_end(size_type bucket) const
00981     {
00982         if (!is_initialized(bucket))
00983             return end();
00984 
00985         raw_const_iterator it = get_bucket(bucket);
00986     
00987         // Find the end of the bucket, denoted by the dummy element
00988         do it++;
00989         while(it != my_solist.raw_end() && !it.get_node_ptr()->is_dummy());
00990 
00991         // Return the first real element past the end of the bucket
00992         return my_solist.first_real_iterator(it);
00993     }
00994 
00995     const_local_iterator unsafe_cbegin(size_type bucket) const {
00996         return ((const self_type *) this)->begin();
00997     }
00998 
00999     const_local_iterator unsafe_cend(size_type bucket) const {
01000         return ((const self_type *) this)->end();
01001     }
01002 
01003     // Hash policy
01004     float load_factor() const {
01005         return (float) size() / (float) unsafe_bucket_count();
01006     }
01007 
01008     float max_load_factor() const {
01009         return my_maximum_bucket_size;
01010     }
01011 
01012     void max_load_factor(float newmax) {
01013         if (newmax != newmax || newmax < 0)
01014             tbb::internal::throw_exception(tbb::internal::eid_invalid_load_factor);
01015         my_maximum_bucket_size = newmax;
01016     }
01017 
01018     // This function is a noop, because the underlying split-ordered list
01019     // is already sorted, so an increase in the bucket number will be
01020     // reflected next time this bucket is touched.
01021     void rehash(size_type buckets) {
01022         size_type current_buckets = my_number_of_buckets;
01023 
01024         if (current_buckets > buckets)
01025             return;
01026         else if ( (buckets & (buckets-1)) != 0 )
01027             tbb::internal::throw_exception(tbb::internal::eid_invalid_buckets_number);
01028         my_number_of_buckets = buckets;
01029     }
01030 
01031 private:
01032 
01033     // Initialize the hash and keep the first bucket open
01034     void internal_init() {
01035         // Allocate an array of segment pointers
01036         memset(my_buckets, 0, pointers_per_table * sizeof(void *));
01037 
01038         // Insert the first element in the split-ordered list
01039         raw_iterator dummy_node = my_solist.raw_begin();
01040         set_bucket(0, dummy_node);
01041     }
01042 
01043     void internal_clear() {
01044         for (size_type index = 0; index < pointers_per_table; index++) {
01045             if (my_buckets[index] != NULL) {
01046                 size_type sz = segment_size(index);
01047                 for (size_type index2 = 0; index2 < sz; index2++)
01048                     my_allocator.destroy(&my_buckets[index][index2]);
01049                 my_allocator.deallocate(my_buckets[index], sz);
01050                 my_buckets[index] = 0;
01051             }
01052         }
01053     }
01054 
01055     void internal_copy(const self_type& right) {
01056         clear();
01057 
01058         my_maximum_bucket_size = right.my_maximum_bucket_size;
01059         my_number_of_buckets = right.my_number_of_buckets;
01060 
01061         __TBB_TRY {
01062             insert(right.begin(), right.end());
01063             my_hash_compare = right.my_hash_compare;
01064         } __TBB_CATCH(...) {
01065             my_solist.clear();
01066             __TBB_RETHROW();
01067         }
01068     }
01069 
01070     void internal_swap_buckets(concurrent_unordered_base& right)
01071     {
01072         // Swap all node segments
01073         for (size_type index = 0; index < pointers_per_table; index++)
01074         {
01075             raw_iterator * iterator_pointer = my_buckets[index];
01076             my_buckets[index] = right.my_buckets[index];
01077             right.my_buckets[index] = iterator_pointer;
01078         }
01079     }
01080 
01081     // Hash APIs
01082     size_type internal_distance(const_iterator first, const_iterator last) const
01083     {
01084         size_type num = 0;
01085 
01086         for (const_iterator it = first; it != last; it++)
01087             num++;
01088 
01089         return num;
01090     }
01091 
01092     // Insert an element in the hash given its value
01093     std::pair<iterator, bool> internal_insert(const value_type& value)
01094     {
01095         sokey_t order_key = (sokey_t) my_hash_compare(get_key(value));
01096         size_type bucket = order_key % my_number_of_buckets;
01097 
01098         // If bucket is empty, initialize it first
01099         if (!is_initialized(bucket))
01100             init_bucket(bucket);
01101 
01102         size_type new_count;
01103         order_key = split_order_key_regular(order_key);
01104         raw_iterator it = get_bucket(bucket);
01105         raw_iterator last = my_solist.raw_end();
01106         raw_iterator where = it;
01107 
01108         __TBB_ASSERT(where != last, "Invalid head node");
01109 
01110         // First node is a dummy node
01111         where++;
01112 
01113         for (;;)
01114         {
01115             if (where == last || solist_t::get_order_key(where) > order_key)
01116             {
01117                 // Try to insert it in the right place
01118                 std::pair<iterator, bool> result = my_solist.try_insert(it, where, value, order_key, &new_count);
01119                 
01120                 if (result.second)
01121                 {
01122                     // Insertion succeeded, adjust the table size, if needed
01123                     adjust_table_size(new_count, my_number_of_buckets);
01124                     return result;
01125                 }
01126                 else
01127                 {
01128                     // Insertion failed: either the same node was inserted by another thread, or
01129                     // another element was inserted at exactly the same place as this node.
01130                     // Proceed with the search from the previous location where order key was
01131                     // known to be larger (note: this is legal only because there is no safe
01132                     // concurrent erase operation supported).
01133                     where = it;
01134                     where++;
01135                     continue;
01136                 }
01137             }
01138             else if (!allow_multimapping && solist_t::get_order_key(where) == order_key && my_hash_compare(get_key(*where), get_key(value)) == 0)
01139             {
01140                 // Element already in the list, return it
01141                 return std::pair<iterator, bool>(my_solist.get_iterator(where), false);
01142             }
01143 
01144             // Move the iterator forward
01145             it = where;
01146             where++;
01147         }
01148     }
01149 
01150     // Find the element in the split-ordered list
01151     iterator internal_find(const key_type& key)
01152     {
01153         sokey_t order_key = (sokey_t) my_hash_compare(key);
01154         size_type bucket = order_key % my_number_of_buckets;
01155 
01156         // If bucket is empty, initialize it first
01157         if (!is_initialized(bucket))
01158             init_bucket(bucket);
01159 
01160         order_key = split_order_key_regular(order_key);
01161         raw_iterator last = my_solist.raw_end();
01162 
01163         for (raw_iterator it = get_bucket(bucket); it != last; it++)
01164         {
01165             if (solist_t::get_order_key(it) > order_key)
01166             {
01167                 // If the order key is smaller than the current order key, the element
01168                 // is not in the hash.
01169                 return end();
01170             }
01171             else if (solist_t::get_order_key(it) == order_key)
01172             {
01173                 // The fact that order keys match does not mean that the element is found.
01174                 // Key function comparison has to be performed to check whether this is the
01175                 // right element. If not, keep searching while order key is the same.
01176                 if (!my_hash_compare(get_key(*it), key))
01177                     return my_solist.get_iterator(it);
01178             }
01179         }
01180 
01181         return end();
01182     }
01183 
01184     // Erase an element from the list. This is not a concurrency safe function.
01185     iterator internal_erase(const_iterator it)
01186     {
01187         key_type key = get_key(*it);
01188         sokey_t order_key = (sokey_t) my_hash_compare(key);
01189         size_type bucket = order_key % my_number_of_buckets;
01190 
01191         // If bucket is empty, initialize it first
01192         if (!is_initialized(bucket))
01193             init_bucket(bucket);
01194 
01195         order_key = split_order_key_regular(order_key);
01196 
01197         raw_iterator previous = get_bucket(bucket);
01198         raw_iterator last = my_solist.raw_end();
01199         raw_iterator where = previous;
01200 
01201         __TBB_ASSERT(where != last, "Invalid head node");
01202 
01203         // First node is a dummy node
01204         where++;
01205 
01206         for (;;) {
01207             if (where == last)
01208                 return end();
01209             else if (my_solist.get_iterator(where) == it)
01210                 return my_solist.erase_node(previous, it);
01211 
01212             // Move the iterator forward
01213             previous = where;
01214             where++;
01215         }
01216     }
01217 
01218     // Return the [begin, end) pair of iterators with the same key values.
01219     // This operation makes sense only if mapping is many-to-one.
01220     pairii_t internal_equal_range(const key_type& key)
01221     {
01222         sokey_t order_key = (sokey_t) my_hash_compare(key);
01223         size_type bucket = order_key % my_number_of_buckets;
01224 
01225         // If bucket is empty, initialize it first
01226         if (!is_initialized(bucket))
01227             init_bucket(bucket);
01228 
01229         order_key = split_order_key_regular(order_key);
01230         raw_iterator end_it = my_solist.raw_end();
01231 
01232         for (raw_iterator it = get_bucket(bucket); it != end_it; it++)
01233         {
01234             if (solist_t::get_order_key(it) > order_key)
01235             {
01236                 // There is no element with the given key
01237                 return pairii_t(end(), end());
01238             }
01239             else if (solist_t::get_order_key(it) == order_key && !my_hash_compare(get_key(*it), key))
01240             {
01241                 iterator first = my_solist.get_iterator(it);
01242                 iterator last = first;
01243 
01244                 for (;last != end() && !my_hash_compare(get_key(*last), key); last++);
01245 
01246                 return pairii_t(first, last);
01247             }
01248         }
01249 
01250         return pairii_t(end(), end());
01251     }
01252 
01253     // Return the [begin, end) pair of const iterators with the same key values.
01254     // This operation makes sense only if mapping is many-to-one.
01255     paircc_t internal_equal_range(const key_type& key) const
01256     {
01257         sokey_t order_key = (sokey_t) my_hash_compare(key);
01258         size_type bucket = order_key % my_number_of_buckets;
01259 
01260         // If bucket is empty, initialize it first
01261         if (!is_initialized(bucket))
01262             return paircc_t(end(), end());
01263 
01264         order_key = split_order_key_regular(order_key);
01265         raw_const_iterator end_it = my_solist.raw_end();
01266 
01267         for (raw_const_iterator it = get_bucket(bucket); it != end_it; it++)
01268         {
01269             if (solist_t::get_order_key(it) > order_key)
01270             {
01271                 // There is no element with the given key
01272                 return paircc_t(end(), end());
01273             }
01274             else if (solist_t::get_order_key(it) == order_key && !my_hash_compare(get_key(*it), key))
01275             {
01276                 const_iterator first = my_solist.get_iterator(it);
01277                 const_iterator last = first;
01278 
01279                 for (; last != end() && !my_hash_compare(get_key(*last), key); last++);
01280 
01281                 return paircc_t(first, last);
01282             }
01283         }
01284 
01285         return paircc_t(end(), end());
01286     }
01287 
01288     // Bucket APIs
01289     void init_bucket(size_type bucket)
01290     {
01291         // Bucket 0 has no parent. Initialize it and return.
01292         if (bucket == 0) {
01293             internal_init();
01294             return;
01295         }
01296 
01297         size_type parent_bucket = get_parent(bucket);
01298 
01299         // All parent_bucket buckets have to be initialized before this bucket is
01300         if (!is_initialized(parent_bucket))
01301             init_bucket(parent_bucket);
01302 
01303         raw_iterator parent = get_bucket(parent_bucket);
01304 
01305         // Create a dummy first node in this bucket
01306         raw_iterator dummy_node = my_solist.insert_dummy(parent, split_order_key_dummy(bucket));
01307         set_bucket(bucket, dummy_node);
01308     }
01309 
01310     void adjust_table_size(size_type total_elements, size_type current_size)
01311     {
01312         // Grow the table by a factor of 2 if possible and needed
01313         if ( ((float) total_elements / (float) current_size) > my_maximum_bucket_size )
01314         {
01315              // Double the size of the hash only if size has not changed inbetween loads
01316             __TBB_CompareAndSwapW((uintptr_t*)&my_number_of_buckets, 2 * current_size, current_size );
01317         }
01318     }
01319 
01320     size_type get_parent(size_type bucket) const
01321     {
01322         // Unsets bucket's most significant turned-on bit
01323         size_type msb = __TBB_Log2((uintptr_t)bucket);
01324         return bucket & ~(size_type(1) << msb);
01325     }
01326 
01327 
01328     // Dynamic sized array (segments)
01330     static size_type segment_index_of( size_type index ) {
01331         return size_type( __TBB_Log2( index|1 ) );
01332     }
01333 
01335     static size_type segment_base( size_type k ) {
01336         return (size_type(1)<<k & ~size_type(1));
01337     }
01338 
01340     static size_type segment_size( size_type k ) {
01341         return k? size_type(1)<<k : 2;
01342     }
01343 
01344     raw_iterator get_bucket(size_type bucket) const {
01345         size_type segment = segment_index_of(bucket);
01346         bucket -= segment_base(segment);
01347         __TBB_ASSERT( my_buckets[segment], "bucket must be in an allocated segment" );
01348         return my_buckets[segment][bucket];
01349     }
01350 
01351     void set_bucket(size_type bucket, raw_iterator dummy_head) {
01352         size_type segment = segment_index_of(bucket);
01353         bucket -= segment_base(segment);
01354 
01355         if (my_buckets[segment] == NULL) {
01356             size_type sz = segment_size(segment);
01357             raw_iterator * new_segment = my_allocator.allocate(sz);
01358             std::memset(new_segment, 0, sz*sizeof(raw_iterator));
01359 
01360             if (__TBB_CompareAndSwapW((void *) &my_buckets[segment], (uintptr_t)new_segment, 0) != 0)
01361                 my_allocator.deallocate(new_segment, sz);
01362         }
01363 
01364         my_buckets[segment][bucket] = dummy_head;
01365     }
01366 
01367     bool is_initialized(size_type bucket) const {
01368         size_type segment = segment_index_of(bucket);
01369         bucket -= segment_base(segment);
01370 
01371         if (my_buckets[segment] == NULL)
01372             return false;
01373 
01374         raw_iterator it = my_buckets[segment][bucket];
01375         return (it.get_node_ptr() != NULL);
01376     }
01377 
01378     // Utilities for keys
01379 
01380     // A regular order key has its original hash value reversed and the last bit set
01381     sokey_t split_order_key_regular(sokey_t order_key) const {
01382         return __TBB_ReverseBits(order_key) | 0x1;
01383     }
01384 
01385     // A dummy order key has its original hash value reversed and the last bit unset
01386     sokey_t split_order_key_dummy(sokey_t order_key) const {
01387         return __TBB_ReverseBits(order_key) & ~(0x1);
01388     }
01389 
01390     // Shared variables
01391     size_type                                                     my_number_of_buckets;       // Current table size
01392     solist_t                                                      my_solist;                  // List where all the elements are kept
01393     typename allocator_type::template rebind<raw_iterator>::other my_allocator;               // Allocator object for segments
01394     float                                                         my_maximum_bucket_size;     // Maximum size of the bucket
01395     raw_iterator                                                 *my_buckets[pointers_per_table]; // The segment table
01396 };
01397 #if _MSC_VER
01398 #pragma warning(pop) // warning 4127 -- while (true) has a constant expression in it
01399 #endif
01400 
01402 static const size_t hash_multiplier = sizeof(size_t)==4? 2654435769U : 11400714819323198485ULL;
01403 } // namespace internal
01406 template<typename T>
01407 inline size_t tbb_hasher( const T& t ) {
01408     return static_cast<size_t>( t ) * internal::hash_multiplier;
01409 }
01410 template<typename P>
01411 inline size_t tbb_hasher( P* ptr ) {
01412     size_t const h = reinterpret_cast<size_t>( ptr );
01413     return (h >> 3) ^ h;
01414 }
01415 template<typename E, typename S, typename A>
01416 inline size_t tbb_hasher( const std::basic_string<E,S,A>& s ) {
01417     size_t h = 0;
01418     for( const E* c = s.c_str(); *c; c++ )
01419         h = static_cast<size_t>(*c) ^ (h * internal::hash_multiplier);
01420     return h;
01421 }
01422 template<typename F, typename S>
01423 inline size_t tbb_hasher( const std::pair<F,S>& p ) {
01424     return tbb_hasher(p.first) ^ tbb_hasher(p.second);
01425 }
01426 } // namespace interface5
01427 using interface5::tbb_hasher;
01428 } // namespace tbb
01429 #endif// __TBB_concurrent_unordered_internal_H

Copyright © 2005-2010 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.