/* Copyright 2005-2014 Intel Corporation. All Rights Reserved. This file is part of Threading Building Blocks. Threading Building Blocks is free software; you can redistribute it and/or modify it under the terms of the GNU General Public License version 2 as published by the Free Software Foundation. Threading Building Blocks is distributed in the hope that it will be useful, but WITHOUT ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the GNU General Public License for more details. You should have received a copy of the GNU General Public License along with Threading Building Blocks; if not, write to the Free Software Foundation, Inc., 51 Franklin St, Fifth Floor, Boston, MA 02110-1301 USA As a special exception, you may use this file as part of a free software library without restriction. Specifically, if other files instantiate templates or use macros or inline functions from this file, or you compile this file and link it with other files to produce an executable, this file does not by itself cause the resulting executable to be covered by the GNU General Public License. This exception does not however invalidate any other reasons why the executable file might be covered by the GNU General Public License. */ #ifndef __TBB_concurrent_hash_map_H #define __TBB_concurrent_hash_map_H #include "tbb_stddef.h" #if !TBB_USE_EXCEPTIONS && _MSC_VER // Suppress "C++ exception handler used, but unwind semantics are not enabled" warning in STL headers #pragma warning (push) #pragma warning (disable: 4530) #endif #include #include // Need std::pair #include // Need std::memset #if !TBB_USE_EXCEPTIONS && _MSC_VER #pragma warning (pop) #endif #include "cache_aligned_allocator.h" #include "tbb_allocator.h" #include "spin_rw_mutex.h" #include "atomic.h" #include "tbb_exception.h" #include "tbb_profiling.h" #include "internal/_concurrent_unordered_impl.h" // Need tbb_hasher #if __TBB_INITIALIZER_LISTS_PRESENT #include #endif #if TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS #include #endif #if __TBB_STATISTICS #include #endif namespace tbb { //! hash_compare that is default argument for concurrent_hash_map template struct tbb_hash_compare { static size_t hash( const Key& a ) { return tbb_hasher(a); } static bool equal( const Key& a, const Key& b ) { return a == b; } }; namespace interface5 { template, typename A = tbb_allocator > > class concurrent_hash_map; //! @cond INTERNAL namespace internal { using namespace tbb::internal; //! Type of a hash code. typedef size_t hashcode_t; //! Node base type struct hash_map_node_base : tbb::internal::no_copy { //! Mutex type typedef spin_rw_mutex mutex_t; //! Scoped lock type for mutex typedef mutex_t::scoped_lock scoped_t; //! Next node in chain hash_map_node_base *next; mutex_t mutex; }; //! Incompleteness flag value static hash_map_node_base *const rehash_req = reinterpret_cast(size_t(3)); //! Rehashed empty bucket flag static hash_map_node_base *const empty_rehashed = reinterpret_cast(size_t(0)); //! base class of concurrent_hash_map class hash_map_base { public: //! Size type typedef size_t size_type; //! Type of a hash code. typedef size_t hashcode_t; //! Segment index type typedef size_t segment_index_t; //! Node base type typedef hash_map_node_base node_base; //! Bucket type struct bucket : tbb::internal::no_copy { //! Mutex type for buckets typedef spin_rw_mutex mutex_t; //! Scoped lock type for mutex typedef mutex_t::scoped_lock scoped_t; mutex_t mutex; node_base *node_list; }; //! Count of segments in the first block static size_type const embedded_block = 1; //! Count of segments in the first block static size_type const embedded_buckets = 1< my_mask; //! Segment pointers table. Also prevents false sharing between my_mask and my_size segments_table_t my_table; //! Size of container in stored items atomic my_size; // It must be in separate cache line from my_mask due to performance effects //! Zero segment bucket my_embedded_segment[embedded_buckets]; #if __TBB_STATISTICS atomic my_info_resizes; // concurrent ones mutable atomic my_info_restarts; // race collisions atomic my_info_rehashes; // invocations of rehash_bucket #endif //! Constructor hash_map_base() { std::memset( this, 0, pointers_per_table*sizeof(segment_ptr_t) // 32*4=128 or 64*8=512 + sizeof(my_size) + sizeof(my_mask) // 4+4 or 8+8 + embedded_buckets*sizeof(bucket) ); // n*8 or n*16 for( size_type i = 0; i < embedded_block; i++ ) // fill the table my_table[i] = my_embedded_segment + segment_base(i); my_mask = embedded_buckets - 1; __TBB_ASSERT( embedded_block <= first_block, "The first block number must include embedded blocks"); #if __TBB_STATISTICS my_info_resizes = 0; // concurrent ones my_info_restarts = 0; // race collisions my_info_rehashes = 0; // invocations of rehash_bucket #endif } //! @return segment index of given index in the array static segment_index_t segment_index_of( size_type index ) { return segment_index_t( __TBB_Log2( index|1 ) ); } //! @return the first array index of given segment static segment_index_t segment_base( segment_index_t k ) { return (segment_index_t(1)<(ptr) > uintptr_t(63); } //! Initialize buckets static void init_buckets( segment_ptr_t ptr, size_type sz, bool is_initial ) { if( is_initial ) std::memset(ptr, 0, sz*sizeof(bucket) ); else for(size_type i = 0; i < sz; i++, ptr++) { *reinterpret_cast(&ptr->mutex) = 0; ptr->node_list = rehash_req; } } //! Add node @arg n to bucket @arg b static void add_to_bucket( bucket *b, node_base *n ) { __TBB_ASSERT(b->node_list != rehash_req, NULL); n->next = b->node_list; b->node_list = n; // its under lock and flag is set } //! Exception safety helper struct enable_segment_failsafe : tbb::internal::no_copy { segment_ptr_t *my_segment_ptr; enable_segment_failsafe(segments_table_t &table, segment_index_t k) : my_segment_ptr(&table[k]) {} ~enable_segment_failsafe() { if( my_segment_ptr ) *my_segment_ptr = 0; // indicate no allocation in progress } }; //! Enable segment void enable_segment( segment_index_t k, bool is_initial = false ) { __TBB_ASSERT( k, "Zero segment must be embedded" ); enable_segment_failsafe watchdog( my_table, k ); cache_aligned_allocator alloc; size_type sz; __TBB_ASSERT( !is_valid(my_table[k]), "Wrong concurrent assignment"); if( k >= first_block ) { sz = segment_size( k ); segment_ptr_t ptr = alloc.allocate( sz ); init_buckets( ptr, sz, is_initial ); itt_hide_store_word( my_table[k], ptr ); sz <<= 1;// double it to get entire capacity of the container } else { // the first block __TBB_ASSERT( k == embedded_block, "Wrong segment index" ); sz = segment_size( first_block ); segment_ptr_t ptr = alloc.allocate( sz - embedded_buckets ); init_buckets( ptr, sz - embedded_buckets, is_initial ); ptr -= segment_base(embedded_block); for(segment_index_t i = embedded_block; i < first_block; i++) // calc the offsets itt_hide_store_word( my_table[i], ptr + segment_base(i) ); } itt_store_word_with_release( my_mask, sz-1 ); watchdog.my_segment_ptr = 0; } //! Get bucket by (masked) hashcode bucket *get_bucket( hashcode_t h ) const throw() { // TODO: add throw() everywhere? segment_index_t s = segment_index_of( h ); h -= segment_base(s); segment_ptr_t seg = my_table[s]; __TBB_ASSERT( is_valid(seg), "hashcode must be cut by valid mask for allocated segments" ); return &seg[h]; } // internal serial rehashing helper void mark_rehashed_levels( hashcode_t h ) throw () { segment_index_t s = segment_index_of( h ); while( segment_ptr_t seg = my_table[++s] ) if( seg[h].node_list == rehash_req ) { seg[h].node_list = empty_rehashed; mark_rehashed_levels( h + ((hashcode_t)1<node_list) != rehash_req ) { #if __TBB_STATISTICS my_info_restarts++; // race collisions #endif return true; } } return false; } //! Insert a node and check for load factor. @return segment index to enable. segment_index_t insert_new_node( bucket *b, node_base *n, hashcode_t mask ) { size_type sz = ++my_size; // prefix form is to enforce allocation after the first item inserted add_to_bucket( b, n ); // check load factor if( sz >= mask ) { // TODO: add custom load_factor segment_index_t new_seg = __TBB_Log2( mask+1 ); //optimized segment_index_of __TBB_ASSERT( is_valid(my_table[new_seg-1]), "new allocations must not publish new mask until segment has allocated"); static const segment_ptr_t is_allocating = (segment_ptr_t)2; if( !itt_hide_load_word(my_table[new_seg]) && as_atomic(my_table[new_seg]).compare_and_swap(is_allocating, NULL) == NULL ) return new_seg; // The value must be processed } return 0; } //! Prepare enough segments for number of buckets void reserve(size_type buckets) { if( !buckets-- ) return; bool is_initial = !my_size; for( size_type m = my_mask; buckets > m; m = my_mask ) enable_segment( segment_index_of( m+1 ), is_initial ); } //! Swap hash_map_bases void internal_swap(hash_map_base &table) { using std::swap; swap(this->my_mask, table.my_mask); swap(this->my_size, table.my_size); for(size_type i = 0; i < embedded_buckets; i++) swap(this->my_embedded_segment[i].node_list, table.my_embedded_segment[i].node_list); for(size_type i = embedded_block; i < pointers_per_table; i++) swap(this->my_table[i], table.my_table[i]); } }; template class hash_map_range; //! Meets requirements of a forward iterator for STL */ /** Value is either the T or const T type of the container. @ingroup containers */ template class hash_map_iterator : public std::iterator { typedef Container map_type; typedef typename Container::node node; typedef hash_map_base::node_base node_base; typedef hash_map_base::bucket bucket; template friend bool operator==( const hash_map_iterator& i, const hash_map_iterator& j ); template friend bool operator!=( const hash_map_iterator& i, const hash_map_iterator& j ); template friend ptrdiff_t operator-( const hash_map_iterator& i, const hash_map_iterator& j ); template friend class hash_map_iterator; template friend class hash_map_range; void advance_to_next_bucket() { // TODO?: refactor to iterator_base class size_t k = my_index+1; while( my_bucket && k <= my_map->my_mask ) { // Following test uses 2's-complement wizardry if( k& (k-2) ) // not the beginning of a segment ++my_bucket; else my_bucket = my_map->get_bucket( k ); my_node = static_cast( my_bucket->node_list ); if( hash_map_base::is_valid(my_node) ) { my_index = k; return; } ++k; } my_bucket = 0; my_node = 0; my_index = k; // the end } #if !defined(_MSC_VER) || defined(__INTEL_COMPILER) template friend class interface5::concurrent_hash_map; #else public: // workaround #endif //! concurrent_hash_map over which we are iterating. const Container *my_map; //! Index in hash table for current item size_t my_index; //! Pointer to bucket const bucket *my_bucket; //! Pointer to node that has current item node *my_node; hash_map_iterator( const Container &map, size_t index, const bucket *b, node_base *n ); public: //! Construct undefined iterator hash_map_iterator() {} hash_map_iterator( const hash_map_iterator &other ) : my_map(other.my_map), my_index(other.my_index), my_bucket(other.my_bucket), my_node(other.my_node) {} Value& operator*() const { __TBB_ASSERT( hash_map_base::is_valid(my_node), "iterator uninitialized or at end of container?" ); return my_node->item; } Value* operator->() const {return &operator*();} hash_map_iterator& operator++(); //! Post increment hash_map_iterator operator++(int) { hash_map_iterator old(*this); operator++(); return old; } }; template hash_map_iterator::hash_map_iterator( const Container &map, size_t index, const bucket *b, node_base *n ) : my_map(&map), my_index(index), my_bucket(b), my_node( static_cast(n) ) { if( b && !hash_map_base::is_valid(n) ) advance_to_next_bucket(); } template hash_map_iterator& hash_map_iterator::operator++() { my_node = static_cast( my_node->next ); if( !my_node ) advance_to_next_bucket(); return *this; } template bool operator==( const hash_map_iterator& i, const hash_map_iterator& j ) { return i.my_node == j.my_node && i.my_map == j.my_map; } template bool operator!=( const hash_map_iterator& i, const hash_map_iterator& j ) { return i.my_node != j.my_node || i.my_map != j.my_map; } //! Range class used with concurrent_hash_map /** @ingroup containers */ template class hash_map_range { typedef typename Iterator::map_type map_type; Iterator my_begin; Iterator my_end; mutable Iterator my_midpoint; size_t my_grainsize; //! Set my_midpoint to point approximately half way between my_begin and my_end. void set_midpoint() const; template friend class hash_map_range; public: //! Type for size of a range typedef std::size_t size_type; typedef typename Iterator::value_type value_type; typedef typename Iterator::reference reference; typedef typename Iterator::difference_type difference_type; typedef Iterator iterator; //! True if range is empty. bool empty() const {return my_begin==my_end;} //! True if range can be partitioned into two subranges. bool is_divisible() const { return my_midpoint!=my_end; } //! Split range. hash_map_range( hash_map_range& r, split ) : my_end(r.my_end), my_grainsize(r.my_grainsize) { r.my_end = my_begin = r.my_midpoint; __TBB_ASSERT( !empty(), "Splitting despite the range is not divisible" ); __TBB_ASSERT( !r.empty(), "Splitting despite the range is not divisible" ); set_midpoint(); r.set_midpoint(); } //! type conversion template hash_map_range( hash_map_range& r) : my_begin(r.my_begin), my_end(r.my_end), my_midpoint(r.my_midpoint), my_grainsize(r.my_grainsize) {} #if TBB_DEPRECATED //! Init range with iterators and grainsize specified hash_map_range( const Iterator& begin_, const Iterator& end_, size_type grainsize_ = 1 ) : my_begin(begin_), my_end(end_), my_grainsize(grainsize_) { if(!my_end.my_index && !my_end.my_bucket) // end my_end.my_index = my_end.my_map->my_mask + 1; set_midpoint(); __TBB_ASSERT( grainsize_>0, "grainsize must be positive" ); } #endif //! Init range with container and grainsize specified hash_map_range( const map_type &map, size_type grainsize_ = 1 ) : my_begin( Iterator( map, 0, map.my_embedded_segment, map.my_embedded_segment->node_list ) ), my_end( Iterator( map, map.my_mask + 1, 0, 0 ) ), my_grainsize( grainsize_ ) { __TBB_ASSERT( grainsize_>0, "grainsize must be positive" ); set_midpoint(); } const Iterator& begin() const {return my_begin;} const Iterator& end() const {return my_end;} //! The grain size for this range. size_type grainsize() const {return my_grainsize;} }; template void hash_map_range::set_midpoint() const { // Split by groups of nodes size_t m = my_end.my_index-my_begin.my_index; if( m > my_grainsize ) { m = my_begin.my_index + m/2u; hash_map_base::bucket *b = my_begin.my_map->get_bucket(m); my_midpoint = Iterator(*my_begin.my_map,m,b,b->node_list); } else { my_midpoint = my_end; } __TBB_ASSERT( my_begin.my_index <= my_midpoint.my_index, "my_begin is after my_midpoint" ); __TBB_ASSERT( my_midpoint.my_index <= my_end.my_index, "my_midpoint is after my_end" ); __TBB_ASSERT( my_begin != my_midpoint || my_begin == my_end, "[my_begin, my_midpoint) range should not be empty" ); } } // internal //! @endcond //! Unordered map from Key to T. /** concurrent_hash_map is associative container with concurrent access. @par Compatibility The class meets all Container Requirements from C++ Standard (See ISO/IEC 14882:2003(E), clause 23.1). @par Exception Safety - Hash function is not permitted to throw an exception. User-defined types Key and T are forbidden from throwing an exception in destructors. - If exception happens during insert() operations, it has no effect (unless exception raised by HashCompare::hash() function during grow_segment). - If exception happens during operator=() operation, the container can have a part of source items, and methods size() and empty() can return wrong results. @par Changes since TBB 2.1 - Replaced internal algorithm and data structure. Patent is pending. - Added buckets number argument for constructor @par Changes since TBB 2.0 - Fixed exception-safety - Added template argument for allocator - Added allocator argument in constructors - Added constructor from a range of iterators - Added several new overloaded insert() methods - Added get_allocator() - Added swap() - Added count() - Added overloaded erase(accessor &) and erase(const_accessor&) - Added equal_range() [const] - Added [const_]pointer, [const_]reference, and allocator_type types - Added global functions: operator==(), operator!=(), and swap() @ingroup containers */ template class concurrent_hash_map : protected internal::hash_map_base { template friend class internal::hash_map_iterator; template friend class internal::hash_map_range; public: typedef Key key_type; typedef T mapped_type; typedef std::pair value_type; typedef hash_map_base::size_type size_type; typedef ptrdiff_t difference_type; typedef value_type *pointer; typedef const value_type *const_pointer; typedef value_type &reference; typedef const value_type &const_reference; typedef internal::hash_map_iterator iterator; typedef internal::hash_map_iterator const_iterator; typedef internal::hash_map_range range_type; typedef internal::hash_map_range const_range_type; typedef Allocator allocator_type; protected: friend class const_accessor; struct node; typedef typename Allocator::template rebind::other node_allocator_type; node_allocator_type my_allocator; HashCompare my_hash_compare; struct node : public node_base { value_type item; node( const Key &key ) : item(key, T()) {} node( const Key &key, const T &t ) : item(key, t) {} // exception-safe allocation, see C++ Standard 2003, clause 5.3.4p17 void *operator new( size_t /*size*/, node_allocator_type &a ) { void *ptr = a.allocate(1); if(!ptr) tbb::internal::throw_exception(tbb::internal::eid_bad_alloc); return ptr; } // match placement-new form above to be called if exception thrown in constructor void operator delete( void *ptr, node_allocator_type &a ) { a.deallocate(static_cast(ptr),1); } }; void delete_node( node_base *n ) { my_allocator.destroy( static_cast(n) ); my_allocator.deallocate( static_cast(n), 1); } node *search_bucket( const key_type &key, bucket *b ) const { node *n = static_cast( b->node_list ); while( is_valid(n) && !my_hash_compare.equal(key, n->item.first) ) n = static_cast( n->next ); __TBB_ASSERT(n != internal::rehash_req, "Search can be executed only for rehashed bucket"); return n; } //! bucket accessor is to find, rehash, acquire a lock, and access a bucket class bucket_accessor : public bucket::scoped_t { bucket *my_b; public: bucket_accessor( concurrent_hash_map *base, const hashcode_t h, bool writer = false ) { acquire( base, h, writer ); } //! find a bucket by masked hashcode, optionally rehash, and acquire the lock inline void acquire( concurrent_hash_map *base, const hashcode_t h, bool writer = false ) { my_b = base->get_bucket( h ); // TODO: actually, notification is unnecessary here, just hiding double-check if( itt_load_word_with_acquire(my_b->node_list) == internal::rehash_req && try_acquire( my_b->mutex, /*write=*/true ) ) { if( my_b->node_list == internal::rehash_req ) base->rehash_bucket( my_b, h ); //recursive rehashing } else bucket::scoped_t::acquire( my_b->mutex, writer ); __TBB_ASSERT( my_b->node_list != internal::rehash_req, NULL); } //! check whether bucket is locked for write bool is_writer() { return bucket::scoped_t::is_writer; } //! get bucket pointer bucket *get() { return my_b; } bucket *operator->() { return get(); } }; // TODO refactor to hash_base void rehash_bucket( bucket *b_new, const hashcode_t h ) { __TBB_ASSERT( *(intptr_t*)(&b_new->mutex), "b_new must be locked (for write)"); __TBB_ASSERT( h > 1, "The lowermost buckets can't be rehashed" ); __TBB_store_with_release(b_new->node_list, internal::empty_rehashed); // mark rehashed hashcode_t mask = ( 1u<<__TBB_Log2( h ) ) - 1; // get parent mask from the topmost bit #if __TBB_STATISTICS my_info_rehashes++; // invocations of rehash_bucket #endif bucket_accessor b_old( this, h & mask ); mask = (mask<<1) | 1; // get full mask for new bucket __TBB_ASSERT( (mask&(mask+1))==0 && (h & mask) == h, NULL ); restart: for( node_base **p = &b_old->node_list, *n = __TBB_load_with_acquire(*p); is_valid(n); n = *p ) { hashcode_t c = my_hash_compare.hash( static_cast(n)->item.first ); #if TBB_USE_ASSERT hashcode_t bmask = h & (mask>>1); bmask = bmask==0? 1 : ( 1u<<(__TBB_Log2( bmask )+1 ) ) - 1; // minimal mask of parent bucket __TBB_ASSERT( (c & bmask) == (h & bmask), "hash() function changed for key in table" ); #endif if( (c & mask) == h ) { if( !b_old.is_writer() ) if( !b_old.upgrade_to_writer() ) { goto restart; // node ptr can be invalid due to concurrent erase } *p = n->next; // exclude from b_old add_to_bucket( b_new, n ); } else p = &n->next; // iterate to next item } } public: class accessor; //! Combines data access, locking, and garbage collection. class const_accessor : private node::scoped_t /*which derived from no_copy*/ { friend class concurrent_hash_map; friend class accessor; public: //! Type of value typedef const typename concurrent_hash_map::value_type value_type; //! True if result is empty. bool empty() const {return !my_node;} //! Set to null void release() { if( my_node ) { node::scoped_t::release(); my_node = 0; } } //! Return reference to associated value in hash table. const_reference operator*() const { __TBB_ASSERT( my_node, "attempt to dereference empty accessor" ); return my_node->item; } //! Return pointer to associated value in hash table. const_pointer operator->() const { return &operator*(); } //! Create empty result const_accessor() : my_node(NULL) {} //! Destroy result after releasing the underlying reference. ~const_accessor() { my_node = NULL; // scoped lock's release() is called in its destructor } protected: bool is_writer() { return node::scoped_t::is_writer; } node *my_node; hashcode_t my_hash; }; //! Allows write access to elements and combines data access, locking, and garbage collection. class accessor: public const_accessor { public: //! Type of value typedef typename concurrent_hash_map::value_type value_type; //! Return reference to associated value in hash table. reference operator*() const { __TBB_ASSERT( this->my_node, "attempt to dereference empty accessor" ); return this->my_node->item; } //! Return pointer to associated value in hash table. pointer operator->() const { return &operator*(); } }; //! Construct empty table. concurrent_hash_map(const allocator_type &a = allocator_type()) : internal::hash_map_base(), my_allocator(a) {} //! Construct empty table with n preallocated buckets. This number serves also as initial concurrency level. concurrent_hash_map(size_type n, const allocator_type &a = allocator_type()) : my_allocator(a) { reserve( n ); } //! Copy constructor concurrent_hash_map( const concurrent_hash_map& table, const allocator_type &a = allocator_type()) : internal::hash_map_base(), my_allocator(a) { internal_copy(table); } //! Construction with copying iteration range and given allocator instance template concurrent_hash_map(I first, I last, const allocator_type &a = allocator_type()) : my_allocator(a) { reserve( std::distance(first, last) ); // TODO: load_factor? internal_copy(first, last); } #if __TBB_INITIALIZER_LISTS_PRESENT //! Construct empty table with n preallocated buckets. This number serves also as initial concurrency level. concurrent_hash_map(const std::initializer_list &il, const allocator_type &a = allocator_type()) : my_allocator(a) { reserve(il.size()); internal_copy(il.begin(), il.end()); } #endif //__TBB_INITIALIZER_LISTS_PRESENT //! Assignment concurrent_hash_map& operator=( const concurrent_hash_map& table ) { if( this!=&table ) { clear(); internal_copy(table); } return *this; } #if __TBB_INITIALIZER_LISTS_PRESENT //! Assignment concurrent_hash_map& operator=( const std::initializer_list &il ) { clear(); reserve(il.size()); internal_copy(il.begin(), il.end()); return *this; } #endif //__TBB_INITIALIZER_LISTS_PRESENT //! Rehashes and optionally resizes the whole table. /** Useful to optimize performance before or after concurrent operations. Also enables using of find() and count() concurrent methods in serial context. */ void rehash(size_type n = 0); //! Clear table void clear(); //! Clear table and destroy it. ~concurrent_hash_map() { clear(); } //------------------------------------------------------------------------ // Parallel algorithm support //------------------------------------------------------------------------ range_type range( size_type grainsize=1 ) { return range_type( *this, grainsize ); } const_range_type range( size_type grainsize=1 ) const { return const_range_type( *this, grainsize ); } //------------------------------------------------------------------------ // STL support - not thread-safe methods //------------------------------------------------------------------------ iterator begin() {return iterator(*this,0,my_embedded_segment,my_embedded_segment->node_list);} iterator end() {return iterator(*this,0,0,0);} const_iterator begin() const {return const_iterator(*this,0,my_embedded_segment,my_embedded_segment->node_list);} const_iterator end() const {return const_iterator(*this,0,0,0);} std::pair equal_range( const Key& key ) { return internal_equal_range(key, end()); } std::pair equal_range( const Key& key ) const { return internal_equal_range(key, end()); } //! Number of items in table. size_type size() const { return my_size; } //! True if size()==0. bool empty() const { return my_size == 0; } //! Upper bound on size. size_type max_size() const {return (~size_type(0))/sizeof(node);} //! Returns the current number of buckets size_type bucket_count() const { return my_mask+1; } //! return allocator object allocator_type get_allocator() const { return this->my_allocator; } //! swap two instances. Iterators are invalidated void swap(concurrent_hash_map &table); //------------------------------------------------------------------------ // concurrent map operations //------------------------------------------------------------------------ //! Return count of items (0 or 1) size_type count( const Key &key ) const { return const_cast(this)->lookup(/*op_insert=*/false, NULL, NULL, /*write=*/false, key, NULL ); } //! Find item and acquire a read lock on the item. /** Return true if item is found, false otherwise. */ bool find( const_accessor &result, const Key &key ) const { return const_cast(this)->lookup(/*op_insert=*/false, NULL, &result, /*write=*/false, key, NULL ); } //! Find item and acquire a write lock on the item. /** Return true if item is found, false otherwise. */ bool find( accessor &result, const Key &key ) { return lookup(/*op_insert=*/false, NULL, &result, /*write=*/true, key, NULL ); } //! Insert item (if not already present) and acquire a write lock on the item, otherwise (if already present) acquire a read lock on the item. /** Returns true if item is new. */ bool insert( accessor &result_inserted, const_accessor &result_found, const Key &key ) { return lookup(/*op_insert=*/true, &result_inserted, &result_found, /*write=*/false /*ignored*/, key, NULL ); } //! Insert item (if not already present) and acquire a read lock on the item. /** Returns true if item is new. */ bool insert( const_accessor &result, const Key &key ) { return lookup(/*op_insert=*/true, &result, NULL, /*write=*/false, key, NULL ); } //! Insert item (if not already present) and acquire a write lock on the item. /** Returns true if item is new. */ bool insert( accessor &result, const Key &key ) { return lookup(/*op_insert=*/true, &result, NULL, /*write=*/true, key, NULL ); } //! Insert item by copying if there is no such key present already and acquire a read lock on the item. /** Returns true if item is new. */ bool insert( const_accessor &result, const value_type &value ) { return lookup(/*op_insert=*/true, &result, NULL, /*write=*/false, value.first, &value.second ); } //! Insert item by copying if there is no such key present already and acquire a write lock on the item. /** Returns true if item is new. */ bool insert( accessor &result, const value_type &value ) { return lookup(/*op_insert=*/true, &result, NULL, /*write=*/true, value.first, &value.second ); } //! Insert item by copying if there is no such key present already /** Returns true if item is inserted. */ bool insert( const value_type &value ) { return lookup(/*op_insert=*/true, NULL, NULL, /*write=*/false, value.first, &value.second ); } //! Insert range [first, last) template void insert(I first, I last) { for(; first != last; ++first) insert( *first ); } //! Erase item. /** Return true if item was erased by particularly this call. */ bool erase( const Key& key ); //! Erase item by const_accessor. /** Return true if item was erased by particularly this call. */ bool erase( const_accessor& item_accessor ) { return exclude( item_accessor ); } //! Erase item by accessor. /** Return true if item was erased by particularly this call. */ bool erase( accessor& item_accessor ) { return exclude( item_accessor ); } protected: //! Find or insert item and optionally acquire a lock on the item. bool lookup( bool op_insert, const_accessor *result_inserted, const_accessor *result_found, bool write, const Key &key, const T *value ); //! delete item by accessor bool exclude( const_accessor &item_accessor ); //! Returns an iterator for an item defined by the key, or for the next item after it (if upper==true) template std::pair internal_equal_range( const Key& key, I end ) const; //! Copy "source" to *this, where *this must start out empty. void internal_copy( const concurrent_hash_map& source ); template void internal_copy(I first, I last); //! Fast find when no concurrent erasure is used. For internal use inside TBB only! /** Return pointer to item with given key, or NULL if no such item exists. Must not be called concurrently with erasure operations. */ const_pointer internal_fast_find( const Key& key ) const { hashcode_t h = my_hash_compare.hash( key ); hashcode_t m = (hashcode_t) itt_load_word_with_acquire( my_mask ); node *n; restart: __TBB_ASSERT((m&(m+1))==0, "data structure is invalid"); bucket *b = get_bucket( h & m ); // TODO: actually, notification is unnecessary here, just hiding double-check if( itt_load_word_with_acquire(b->node_list) == internal::rehash_req ) { bucket::scoped_t lock; if( lock.try_acquire( b->mutex, /*write=*/true ) ) { if( b->node_list == internal::rehash_req) const_cast(this)->rehash_bucket( b, h & m ); //recursive rehashing } else lock.acquire( b->mutex, /*write=*/false ); __TBB_ASSERT(b->node_list!=internal::rehash_req,NULL); } n = search_bucket( key, b ); if( n ) return &n->item; else if( check_mask_race( h, m ) ) goto restart; return 0; } private: // const_accessor+const_accessor: not allowed by any overload, use insert(const_accessor) instead // const_accessor+ accessor: not allowed by any overload, of no obvious use // accessor+const_accessor: public overload above // accessor+ accessor: deleted here (or private and undefined before C++11), use insert(accessor) instead bool insert( accessor &result_inserted, accessor &result_found, const Key &key ); }; #if _MSC_VER && !defined(__INTEL_COMPILER) // Suppress "conditional expression is constant" warning. #pragma warning( push ) #pragma warning( disable: 4127 ) #endif // TODO: move to tbb_stddef.h or so, or just eliminate? #define __TBB_IMPLIES(x,y) (!( x )||/*short-circuit*/( y )) // guarantees lazy evaluation of y (for performance and/or correctness) template bool concurrent_hash_map::lookup( const bool op_insert, const_accessor *const result_inserted, const_accessor *const result_found, const bool write, // only used if exactly one of the above result arguments is non-NULL const Key &key, const T *const value ) { __TBB_ASSERT( __TBB_IMPLIES( !op_insert , !result_inserted && !value ), NULL ); // count()/find() __TBB_ASSERT( __TBB_IMPLIES( op_insert && !result_found, result_inserted || value ), NULL ); // insert() with one result __TBB_ASSERT( __TBB_IMPLIES( op_insert && result_found, result_inserted && !value ), NULL ); // insert() with two results __TBB_ASSERT( __TBB_IMPLIES( !result_inserted == !result_found, !write ), NULL ); // let's require value "false" when "write" is ignored // TODO: shouldn't accessors have been released before use? implicitly releasing them seems error-prone... if( result_inserted ) result_inserted->release(); if( result_found ) result_found ->release(); const hashcode_t h = my_hash_compare.hash( key ); /*var*/ hashcode_t m = (hashcode_t) itt_load_word_with_acquire( my_mask ); segment_index_t grow_segment = 0; node *new_node = NULL; // for insertion, kept across restarts, abandoned if another thread inserts a node first; std::unique_ptr not available before C++11 /*var*/ bool did_insert = false; restart: __TBB_ASSERT(!did_insert, NULL); // insertion must succeed without restart {//lock scope __TBB_ASSERT((m&(m+1))==0, "data structure is invalid"); bucket_accessor b( this, h & m ); node * /*var*/ n = search_bucket( key, b.get() ); if( !n ) { if( op_insert ) { // insert() if( !new_node ) { if(value) new_node = new( my_allocator ) node(key, *value); else new_node = new( my_allocator ) node(key); } if( b.is_writer() // might already be exclusive because of rehash_req || // short-circuit b.upgrade_to_writer() // expects a reader state || // short-circuit (only) for order of evaluation !(n =/*sic*/ search_bucket(key, b.get())) ) { // Got exclusive access to bucket, node (still) not there, go ahead with insertion. __TBB_ASSERT(!n, NULL); if( check_mask_race(h, m) ) goto restart; n = new_node; new_node = NULL; // emulate n = new_node.release() grow_segment = insert_new_node( b.get(), n, m ); did_insert = true; // Keep exclusive lock on bucket until the new node has been acquired! } else { // Another thread inserted the item during a non-atomic upgrade. __TBB_ASSERT(is_valid(n), NULL); b.downgrade_to_reader(); } __TBB_ASSERT(is_valid(n), NULL); } else { // count()/find() if( check_mask_race( h, m ) ) goto restart; __TBB_ASSERT(!new_node, NULL); // no need to delete_node() return false; } } __TBB_ASSERT(is_valid(n), NULL); if( const_accessor * const result = ( op_insert && (!result_found || did_insert)) ? result_inserted : result_found ) { // TODO: the following seems as generic/regular operation // acquire the item const bool l_write = (result_inserted && result_found) ? did_insert : write; if( !result->try_acquire( n->mutex, l_write ) ) { __TBB_ASSERT( !did_insert, "Can't acquire new item in locked bucket?" ); for( tbb::internal::atomic_backoff backoff(true);; ) { if( result->try_acquire( n->mutex, l_write ) ) break; if( !backoff.bounded_pause() ) { // the wait takes really long, restart the operation // TODO: describe why? what might/would happen otherwise? b.release(); // implied by restart, but we want to release before the yield __TBB_Yield(); m = (hashcode_t) itt_load_word_with_acquire( my_mask ); goto restart; } } } b.release(); // optional, negligible benefit result->my_node = n; result->my_hash = h; } }//lock scope // [opt] grow the container (TODO: what is "[opt]"? "optional"? no other uses (left) in TBB...) if( grow_segment ) { #if __TBB_STATISTICS my_info_resizes++; // concurrent ones #endif enable_segment( grow_segment ); } if (new_node) delete_node(new_node); return !op_insert || did_insert; // a failed count()/find() would already have returned false above } template template std::pair concurrent_hash_map::internal_equal_range( const Key& key, I end_ ) const { hashcode_t h = my_hash_compare.hash( key ); hashcode_t m = my_mask; __TBB_ASSERT((m&(m+1))==0, "data structure is invalid"); h &= m; bucket *b = get_bucket( h ); while( b->node_list == internal::rehash_req ) { m = ( 1u<<__TBB_Log2( h ) ) - 1; // get parent mask from the topmost bit b = get_bucket( h &= m ); } node *n = search_bucket( key, b ); if( !n ) return std::make_pair(end_, end_); iterator lower(*this, h, b, n), upper(lower); return std::make_pair(lower, ++upper); } template bool concurrent_hash_map::exclude( const_accessor &item_accessor ) { __TBB_ASSERT( item_accessor.my_node, NULL ); node_base *const n = item_accessor.my_node; hashcode_t const h = item_accessor.my_hash; hashcode_t m = (hashcode_t) itt_load_word_with_acquire( my_mask ); do { // get bucket bucket_accessor b( this, h & m, /*writer=*/true ); node_base **p = &b->node_list; while( *p && *p != n ) p = &(*p)->next; if( !*p ) { // someone else was first if( check_mask_race( h, m ) ) continue; item_accessor.release(); return false; } __TBB_ASSERT( *p == n, NULL ); *p = n->next; // remove from container my_size--; break; } while(true); if( !item_accessor.is_writer() ) // need to get exclusive lock item_accessor.upgrade_to_writer(); // return value means nothing here item_accessor.release(); delete_node( n ); // Only one thread can delete it return true; } template bool concurrent_hash_map::erase( const Key &key ) { node_base *n; hashcode_t const h = my_hash_compare.hash( key ); hashcode_t m = (hashcode_t) itt_load_word_with_acquire( my_mask ); restart: {//lock scope // get bucket bucket_accessor b( this, h & m ); search: node_base **p = &b->node_list; n = *p; while( is_valid(n) && !my_hash_compare.equal(key, static_cast(n)->item.first ) ) { p = &n->next; n = *p; } if( !n ) { // not found, but mask could be changed if( check_mask_race( h, m ) ) goto restart; return false; } else if( !b.is_writer() && !b.upgrade_to_writer() ) { if( check_mask_race( h, m ) ) // contended upgrade, check mask goto restart; goto search; } *p = n->next; my_size--; } { typename node::scoped_t item_locker( n->mutex, /*write=*/true ); } // note: there should be no threads pretending to acquire this mutex again, do not try to upgrade const_accessor! delete_node( n ); // Only one thread can delete it due to write lock on the bucket return true; } template void concurrent_hash_map::swap(concurrent_hash_map &table) { using std::swap; swap(this->my_allocator, table.my_allocator); swap(this->my_hash_compare, table.my_hash_compare); internal_swap(table); } template void concurrent_hash_map::rehash(size_type sz) { reserve( sz ); // TODO: add reduction of number of buckets as well hashcode_t mask = my_mask; hashcode_t b = (mask+1)>>1; // size or first index of the last segment __TBB_ASSERT((b&(b-1))==0, NULL); // zero or power of 2 bucket *bp = get_bucket( b ); // only the last segment should be scanned for rehashing for(; b <= mask; b++, bp++ ) { node_base *n = bp->node_list; __TBB_ASSERT( is_valid(n) || n == internal::empty_rehashed || n == internal::rehash_req, "Broken internal structure" ); __TBB_ASSERT( *reinterpret_cast(&bp->mutex) == 0, "concurrent or unexpectedly terminated operation during rehash() execution" ); if( n == internal::rehash_req ) { // rehash bucket, conditional because rehashing of a previous bucket may affect this one hashcode_t h = b; bucket *b_old = bp; do { __TBB_ASSERT( h > 1, "The lowermost buckets can't be rehashed" ); hashcode_t m = ( 1u<<__TBB_Log2( h ) ) - 1; // get parent mask from the topmost bit b_old = get_bucket( h &= m ); } while( b_old->node_list == internal::rehash_req ); // now h - is index of the root rehashed bucket b_old mark_rehashed_levels( h ); // mark all non-rehashed children recursively across all segments for( node_base **p = &b_old->node_list, *q = *p; is_valid(q); q = *p ) { hashcode_t c = my_hash_compare.hash( static_cast(q)->item.first ); if( (c & mask) != h ) { // should be rehashed *p = q->next; // exclude from b_old bucket *b_new = get_bucket( c & mask ); __TBB_ASSERT( b_new->node_list != internal::rehash_req, "hash() function changed for key in table or internal error" ); add_to_bucket( b_new, q ); } else p = &q->next; // iterate to next item } } } #if TBB_USE_PERFORMANCE_WARNINGS int current_size = int(my_size), buckets = int(mask)+1, empty_buckets = 0, overpopulated_buckets = 0; // usage statistics static bool reported = false; #endif #if TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS for( b = 0; b <= mask; b++ ) {// only last segment should be scanned for rehashing if( b & (b-2) ) ++bp; // not the beginning of a segment else bp = get_bucket( b ); node_base *n = bp->node_list; __TBB_ASSERT( *reinterpret_cast(&bp->mutex) == 0, "concurrent or unexpectedly terminated operation during rehash() execution" ); __TBB_ASSERT( is_valid(n) || n == internal::empty_rehashed, "Broken internal structure" ); #if TBB_USE_PERFORMANCE_WARNINGS if( n == internal::empty_rehashed ) empty_buckets++; else if( n->next ) overpopulated_buckets++; #endif #if TBB_USE_ASSERT for( ; is_valid(n); n = n->next ) { hashcode_t h = my_hash_compare.hash( static_cast(n)->item.first ) & mask; __TBB_ASSERT( h == b, "hash() function changed for key in table or internal error" ); } #endif } #endif // TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS #if TBB_USE_PERFORMANCE_WARNINGS if( buckets > current_size) empty_buckets -= buckets - current_size; else overpopulated_buckets -= current_size - buckets; // TODO: load_factor? if( !reported && buckets >= 512 && ( 2*empty_buckets > current_size || 2*overpopulated_buckets > current_size ) ) { tbb::internal::runtime_warning( "Performance is not optimal because the hash function produces bad randomness in lower bits in %s.\nSize: %d Empties: %d Overlaps: %d", typeid(*this).name(), current_size, empty_buckets, overpopulated_buckets ); reported = true; } #endif } template void concurrent_hash_map::clear() { hashcode_t m = my_mask; __TBB_ASSERT((m&(m+1))==0, "data structure is invalid"); #if TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS #if TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS int current_size = int(my_size), buckets = int(m)+1, empty_buckets = 0, overpopulated_buckets = 0; // usage statistics static bool reported = false; #endif bucket *bp = 0; // check consistency for( segment_index_t b = 0; b <= m; b++ ) { if( b & (b-2) ) ++bp; // not the beginning of a segment else bp = get_bucket( b ); node_base *n = bp->node_list; __TBB_ASSERT( is_valid(n) || n == internal::empty_rehashed || n == internal::rehash_req, "Broken internal structure" ); __TBB_ASSERT( *reinterpret_cast(&bp->mutex) == 0, "concurrent or unexpectedly terminated operation during clear() execution" ); #if TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS if( n == internal::empty_rehashed ) empty_buckets++; else if( n == internal::rehash_req ) buckets--; else if( n->next ) overpopulated_buckets++; #endif #if __TBB_EXTRA_DEBUG for(; is_valid(n); n = n->next ) { hashcode_t h = my_hash_compare.hash( static_cast(n)->item.first ); h &= m; __TBB_ASSERT( h == b || get_bucket(h)->node_list == internal::rehash_req, "hash() function changed for key in table or internal error" ); } #endif } #if TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS #if __TBB_STATISTICS printf( "items=%d buckets: capacity=%d rehashed=%d empty=%d overpopulated=%d" " concurrent: resizes=%u rehashes=%u restarts=%u\n", current_size, int(m+1), buckets, empty_buckets, overpopulated_buckets, unsigned(my_info_resizes), unsigned(my_info_rehashes), unsigned(my_info_restarts) ); my_info_resizes = 0; // concurrent ones my_info_restarts = 0; // race collisions my_info_rehashes = 0; // invocations of rehash_bucket #endif if( buckets > current_size) empty_buckets -= buckets - current_size; else overpopulated_buckets -= current_size - buckets; // TODO: load_factor? if( !reported && buckets >= 512 && ( 2*empty_buckets > current_size || 2*overpopulated_buckets > current_size ) ) { tbb::internal::runtime_warning( "Performance is not optimal because the hash function produces bad randomness in lower bits in %s.\nSize: %d Empties: %d Overlaps: %d", typeid(*this).name(), current_size, empty_buckets, overpopulated_buckets ); reported = true; } #endif #endif//TBB_USE_ASSERT || TBB_USE_PERFORMANCE_WARNINGS || __TBB_STATISTICS my_size = 0; segment_index_t s = segment_index_of( m ); __TBB_ASSERT( s+1 == pointers_per_table || !my_table[s+1], "wrong mask or concurrent grow" ); cache_aligned_allocator alloc; do { __TBB_ASSERT( is_valid( my_table[s] ), "wrong mask or concurrent grow" ); segment_ptr_t buckets_ptr = my_table[s]; size_type sz = segment_size( s ? s : 1 ); for( segment_index_t i = 0; i < sz; i++ ) for( node_base *n = buckets_ptr[i].node_list; is_valid(n); n = buckets_ptr[i].node_list ) { buckets_ptr[i].node_list = n->next; delete_node( n ); } if( s >= first_block) // the first segment or the next alloc.deallocate( buckets_ptr, sz ); else if( s == embedded_block && embedded_block != first_block ) alloc.deallocate( buckets_ptr, segment_size(first_block)-embedded_buckets ); if( s >= embedded_block ) my_table[s] = 0; } while(s-- > 0); my_mask = embedded_buckets - 1; } template void concurrent_hash_map::internal_copy( const concurrent_hash_map& source ) { reserve( source.my_size ); // TODO: load_factor? hashcode_t mask = source.my_mask; if( my_mask == mask ) { // optimized version bucket *dst = 0, *src = 0; bool rehash_required = false; for( hashcode_t k = 0; k <= mask; k++ ) { if( k & (k-2) ) ++dst,src++; // not the beginning of a segment else { dst = get_bucket( k ); src = source.get_bucket( k ); } __TBB_ASSERT( dst->node_list != internal::rehash_req, "Invalid bucket in destination table"); node *n = static_cast( src->node_list ); if( n == internal::rehash_req ) { // source is not rehashed, items are in previous buckets rehash_required = true; dst->node_list = internal::rehash_req; } else for(; n; n = static_cast( n->next ) ) { add_to_bucket( dst, new( my_allocator ) node(n->item.first, n->item.second) ); ++my_size; // TODO: replace by non-atomic op } } if( rehash_required ) rehash(); } else internal_copy( source.begin(), source.end() ); } template template void concurrent_hash_map::internal_copy(I first, I last) { hashcode_t m = my_mask; for(; first != last; ++first) { hashcode_t h = my_hash_compare.hash( first->first ); bucket *b = get_bucket( h & m ); __TBB_ASSERT( b->node_list != internal::rehash_req, "Invalid bucket in destination table"); node *n = new( my_allocator ) node(first->first, first->second); add_to_bucket( b, n ); ++my_size; // TODO: replace by non-atomic op } } } // namespace interface5 using interface5::concurrent_hash_map; template inline bool operator==(const concurrent_hash_map &a, const concurrent_hash_map &b) { if(a.size() != b.size()) return false; typename concurrent_hash_map::const_iterator i(a.begin()), i_end(a.end()); typename concurrent_hash_map::const_iterator j, j_end(b.end()); for(; i != i_end; ++i) { j = b.equal_range(i->first).first; if( j == j_end || !(i->second == j->second) ) return false; } return true; } template inline bool operator!=(const concurrent_hash_map &a, const concurrent_hash_map &b) { return !(a == b); } template inline void swap(concurrent_hash_map &a, concurrent_hash_map &b) { a.swap( b ); } #if _MSC_VER && !defined(__INTEL_COMPILER) #pragma warning( pop ) #endif // warning 4127 is back } // namespace tbb #endif /* __TBB_concurrent_hash_map_H */