Intel(R) Threading Building Blocks Doxygen Documentation version 4.2.3
_concurrent_unordered_impl.h
Go to the documentation of this file.
1/*
2 Copyright (c) 2005-2020 Intel Corporation
3
4 Licensed under the Apache License, Version 2.0 (the "License");
5 you may not use this file except in compliance with the License.
6 You may obtain a copy of the License at
7
8 http://www.apache.org/licenses/LICENSE-2.0
9
10 Unless required by applicable law or agreed to in writing, software
11 distributed under the License is distributed on an "AS IS" BASIS,
12 WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
13 See the License for the specific language governing permissions and
14 limitations under the License.
15*/
16
17/* Container implementations in this header are based on PPL implementations
18 provided by Microsoft. */
19
20#ifndef __TBB__concurrent_unordered_impl_H
21#define __TBB__concurrent_unordered_impl_H
22#if !defined(__TBB_concurrent_unordered_map_H) && !defined(__TBB_concurrent_unordered_set_H) && !defined(__TBB_concurrent_hash_map_H)
23#error Do not #include this internal file directly; use public TBB headers instead.
24#endif
25
26#include "../tbb_stddef.h"
27
28#include <iterator>
29#include <utility> // Need std::pair
30#include <functional> // Need std::equal_to (in ../concurrent_unordered_*.h)
31#include <string> // For tbb_hasher
32#include <cstring> // Need std::memset
33#include __TBB_STD_SWAP_HEADER
34
35#include "../atomic.h"
36#include "../tbb_exception.h"
37#include "../tbb_allocator.h"
38
39#if __TBB_INITIALIZER_LISTS_PRESENT
40 #include <initializer_list>
41#endif
42
43#if __TBB_CPP11_RVALUE_REF_PRESENT && !__TBB_IMPLICIT_COPY_DELETION_BROKEN
44 #define __TBB_UNORDERED_NODE_HANDLE_PRESENT 1
45#endif
46
47#include "_allocator_traits.h"
49#include "_template_helpers.h"
50
51#if __TBB_UNORDERED_NODE_HANDLE_PRESENT
52#include "_node_handle_impl.h"
53#endif // __TBB_UNORDERED_NODE_HANDLE_PRESENT
54
55namespace tbb {
56namespace interface5 {
58namespace internal {
59
60template <typename T, typename Allocator>
61class split_ordered_list;
62template <typename Traits>
63class concurrent_unordered_base;
64
65// Forward list iterators (without skipping dummy elements)
66template<class Solist, typename Value>
67class flist_iterator : public std::iterator<std::forward_iterator_tag, Value>
68{
69 template <typename T, typename Allocator>
70 friend class split_ordered_list;
71 template <typename Traits>
73 template<class M, typename V>
74 friend class flist_iterator;
75
76 typedef typename Solist::nodeptr_t nodeptr_t;
77public:
78 typedef typename Solist::value_type value_type;
79 typedef typename Solist::difference_type difference_type;
80 typedef typename Solist::pointer pointer;
81 typedef typename Solist::reference reference;
82
85 : my_node_ptr(other.my_node_ptr) {}
86
89 return *this;
90 }
91
92 reference operator*() const { return my_node_ptr->my_element; }
93 pointer operator->() const { return &**this; }
94
96 my_node_ptr = my_node_ptr->my_next;
97 return *this;
98 }
99
101 flist_iterator tmp = *this;
102 ++*this;
103 return tmp;
104 }
105
106protected:
109
111
112 template<typename M, typename T, typename U>
113 friend bool operator==( const flist_iterator<M,T> &i, const flist_iterator<M,U> &j );
114 template<typename M, typename T, typename U>
115 friend bool operator!=( const flist_iterator<M,T>& i, const flist_iterator<M,U>& j );
116};
117
118template<typename Solist, typename T, typename U>
120 return i.my_node_ptr == j.my_node_ptr;
121}
122template<typename Solist, typename T, typename U>
124 return i.my_node_ptr != j.my_node_ptr;
125}
126
127// Split-order list iterators, needed to skip dummy elements
128template<class Solist, typename Value>
129class solist_iterator : public flist_iterator<Solist, Value>
130{
132 typedef typename Solist::nodeptr_t nodeptr_t;
134 template <typename T, typename Allocator>
135 friend class split_ordered_list;
136 template<class M, typename V>
137 friend class solist_iterator;
138 template <typename Traits>
140 template<typename M, typename T, typename U>
141 friend bool operator==( const solist_iterator<M,T> &i, const solist_iterator<M,U> &j );
142 template<typename M, typename T, typename U>
143 friend bool operator!=( const solist_iterator<M,T>& i, const solist_iterator<M,U>& j );
144
145 const Solist *my_list_ptr;
146 solist_iterator(nodeptr_t pnode, const Solist *plist) : base_type(pnode), my_list_ptr(plist) {}
147
148public:
149 typedef typename Solist::value_type value_type;
150 typedef typename Solist::difference_type difference_type;
151 typedef typename Solist::pointer pointer;
152 typedef typename Solist::reference reference;
153
156 : base_type(other), my_list_ptr(other.my_list_ptr) {}
157
160 my_list_ptr = other.my_list_ptr;
161 return *this;
162 }
163
165 return this->base_type::operator*();
166 }
167
168 pointer operator->() const {
169 return (&**this);
170 }
171
173 do ++(*(base_type *)this);
174 while (get_node_ptr() != NULL && get_node_ptr()->is_dummy());
175
176 return (*this);
177 }
178
180 solist_iterator tmp = *this;
181 do ++*this;
182 while (get_node_ptr() != NULL && get_node_ptr()->is_dummy());
183
184 return (tmp);
185 }
186};
187
188template<typename Solist, typename T, typename U>
190 return i.my_node_ptr == j.my_node_ptr && i.my_list_ptr == j.my_list_ptr;
191}
192template<typename Solist, typename T, typename U>
194 return i.my_node_ptr != j.my_node_ptr || i.my_list_ptr != j.my_list_ptr;
195}
196
197// Forward type and class definitions
198typedef size_t sokey_t;
199
200
201// Forward list in which elements are sorted in a split-order
202template <typename T, typename Allocator>
204{
205public:
207
209
210 struct node;
211 typedef node *nodeptr_t;
212
218 // No support for reference/const_reference in allocator traits
221
226
227 // Node that holds the element in a split-ordered list
229 {
230 private:
231 // for compilers that try to generate default constructors though they are not needed.
232 node(); // VS 2008, 2010, 2012
233 public:
234 // Initialize the node with the given order key
235 void init(sokey_t order_key) {
236 my_order_key = order_key;
237 my_next = NULL;
238 }
239
240 // Return the order key (needed for hashing)
241 sokey_t get_order_key() const { // TODO: remove
242 return my_order_key;
243 }
244
245 // get() and value() is a common interface for getting access to node`s element (required by node_handle)
247 return reinterpret_cast<value_type*>(&my_element);
248 }
249
251 return *storage();
252 }
253
254 // Inserts the new element in the list in an atomic fashion
256 {
257 // Try to change the next pointer on the current element to a new element, only if it still points to the cached next
258 nodeptr_t exchange_node = tbb::internal::as_atomic(my_next).compare_and_swap(new_node, current_node);
259
260 if (exchange_node == current_node) // TODO: why this branch?
261 {
262 // Operation succeeded, return the new node
263 return new_node;
264 }
265 else
266 {
267 // Operation failed, return the "interfering" node
268 return exchange_node;
269 }
270 }
271
272 // Checks if this element in the list is a dummy, order enforcing node. Dummy nodes are used by buckets
273 // in the hash table to quickly index into the right subsection of the split-ordered list.
274 bool is_dummy() const {
275 return (my_order_key & 0x1) == 0;
276 }
277
278
279 nodeptr_t my_next; // Next element in the list
280 value_type my_element; // Element storage
281 sokey_t my_order_key; // Order key for this element
282 };
283
284 // Allocate a new node with the given order key; used to allocate dummy nodes
286 nodeptr_t pnode = my_node_allocator.allocate(1);
287 pnode->init(order_key);
288 return (pnode);
289 }
290
291 // Allocate a new node with the given order key and value
292 template<typename Arg>
295 nodeptr_t pnode = my_node_allocator.allocate(1);
296
297 //TODO: use RAII scoped guard instead of explicit catch
298 __TBB_TRY {
299 new(static_cast<void*>(&pnode->my_element)) T(tbb::internal::forward<Arg>(t));
300 pnode->init(order_key);
301 } __TBB_CATCH(...) {
302 my_node_allocator.deallocate(pnode, 1);
304 }
305
306 return (pnode);
307 }
308
309 // A helper to avoid excessive requiremens in internal_insert
310 template<typename Arg>
312 /*AllowCreate=*/tbb::internal::false_type){
313 __TBB_ASSERT(false, "This compile-time helper should never get called");
314 return nodeptr_t();
315 }
316
317 // Allocate a new node with the given parameters for constructing value
318 template<typename __TBB_PARAMETER_PACK Args>
320 nodeptr_t pnode = my_node_allocator.allocate(1);
321
322 //TODO: use RAII scoped guard instead of explicit catch
323 __TBB_TRY {
324 new(static_cast<void*>(&pnode->my_element)) T(__TBB_PACK_EXPANSION(tbb::internal::forward<Args>(args)));
325 } __TBB_CATCH(...) {
326 my_node_allocator.deallocate(pnode, 1);
328 }
329
330 return (pnode);
331 }
332
335 {
336 // Immediately allocate a dummy node with order key of 0. This node
337 // will always be the head of the list.
339 }
340
342 {
343 // Clear the list
344 clear();
345
346 // Remove the head element which is not cleared by clear()
347 nodeptr_t pnode = my_head;
348 my_head = NULL;
349
350 __TBB_ASSERT(pnode != NULL && pnode->my_next == NULL, "Invalid head list node");
351
352 destroy_node(pnode);
353 }
354
355 // Common forward list functions
356
358 return (my_node_allocator);
359 }
360
361 void clear() {
362 nodeptr_t pnext;
363 nodeptr_t pnode = my_head;
364
365 __TBB_ASSERT(my_head != NULL, "Invalid head list node");
366 pnext = pnode->my_next;
367 pnode->my_next = NULL;
368 pnode = pnext;
369
370 while (pnode != NULL)
371 {
372 pnext = pnode->my_next;
373 destroy_node(pnode);
374 pnode = pnext;
375 }
376
378 }
379
380 // Returns a first non-dummy element in the SOL
383 }
384
385 // Returns a first non-dummy element in the SOL
388 }
389
391 return (iterator(0, this));
392 }
393
395 return (const_iterator(0, this));
396 }
397
399 return (((const self_type *)this)->begin());
400 }
401
403 return (((const self_type *)this)->end());
404 }
405
406 // Checks if the number of elements (non-dummy) is 0
407 bool empty() const {
408 return (my_element_count == 0);
409 }
410
411 // Returns the number of non-dummy elements in the list
412 size_type size() const {
413 return my_element_count;
414 }
415
416 // Returns the maximum size of the list, determined by the allocator
418 return my_node_allocator.max_size();
419 }
420
421 // Swaps 'this' list with the passed in one
422 void swap(self_type& other)
423 {
424 if (this == &other)
425 {
426 // Nothing to do
427 return;
428 }
429
431 std::swap(my_head, other.my_head);
432 }
433
434 // Split-order list functions
435
436 // Returns a first element in the SOL, which is always a dummy
438 return raw_iterator(my_head);
439 }
440
441 // Returns a first element in the SOL, which is always a dummy
444 }
445
447 return raw_iterator(0);
448 }
449
451 return raw_const_iterator(0);
452 }
453
455 return it.get_node_ptr()->get_order_key();
456 }
457
459 if( !it.get_node_ptr() ) return ~sokey_t(0);
460 return it.get_node_ptr()->get_order_key();
461 }
462
463 // Returns a public iterator version of the internal iterator. Public iterator must not
464 // be a dummy private iterator.
466 __TBB_ASSERT(it.get_node_ptr() == NULL || !it.get_node_ptr()->is_dummy(), "Invalid user node (dummy)");
467 return iterator(it.get_node_ptr(), this);
468 }
469
470 // Returns a public iterator version of the internal iterator. Public iterator must not
471 // be a dummy private iterator.
473 __TBB_ASSERT(it.get_node_ptr() == NULL || !it.get_node_ptr()->is_dummy(), "Invalid user node (dummy)");
474 return const_iterator(it.get_node_ptr(), this);
475 }
476
477 // Returns a non-const version of the raw_iterator
479 return raw_iterator(it.get_node_ptr());
480 }
481
482 // Returns a non-const version of the iterator
484 return iterator(it.my_node_ptr, it.my_list_ptr);
485 }
486
487 // Returns a public iterator version of a first non-dummy internal iterator at or after
488 // the passed in internal iterator.
490 {
491 // Skip all dummy, internal only iterators
492 while (it != raw_end() && it.get_node_ptr()->is_dummy())
493 ++it;
494
495 return iterator(it.get_node_ptr(), this);
496 }
497
498 // Returns a public iterator version of a first non-dummy internal iterator at or after
499 // the passed in internal iterator.
501 {
502 // Skip all dummy, internal only iterators
503 while (it != raw_end() && it.get_node_ptr()->is_dummy())
504 ++it;
505
506 return const_iterator(it.get_node_ptr(), this);
507 }
508
509 // Erase an element using the allocator
511 if (!pnode->is_dummy()) my_node_allocator.destroy(pnode);
512 my_node_allocator.deallocate(pnode, 1);
513 }
514
515 // Try to insert a new element in the list.
516 // If insert fails, return the node that was inserted instead.
517 static nodeptr_t try_insert_atomic(nodeptr_t previous, nodeptr_t new_node, nodeptr_t current_node) {
518 new_node->my_next = current_node;
519 return previous->atomic_set_next(new_node, current_node);
520 }
521
522 // Insert a new element between passed in iterators
523 std::pair<iterator, bool> try_insert(raw_iterator it, raw_iterator next, nodeptr_t pnode, size_type *new_count)
524 {
525 nodeptr_t inserted_node = try_insert_atomic(it.get_node_ptr(), pnode, next.get_node_ptr());
526
527 if (inserted_node == pnode)
528 {
529 // If the insert succeeded, check that the order is correct and increment the element count
530 check_range(it, next);
531 *new_count = tbb::internal::as_atomic(my_element_count).fetch_and_increment();
532 return std::pair<iterator, bool>(iterator(pnode, this), true);
533 }
534 else
535 {
536 return std::pair<iterator, bool>(end(), false);
537 }
538 }
539
540 // Insert a new dummy element, starting search at a parent dummy element
542 {
544 raw_iterator where = it;
545
546 __TBB_ASSERT(where != last, "Invalid head node");
547
548 ++where;
549
550 // Create a dummy element up front, even though it may be discarded (due to concurrent insertion)
551 nodeptr_t dummy_node = create_node(order_key);
552
553 for (;;)
554 {
555 __TBB_ASSERT(it != last, "Invalid head list node");
556
557 // If the head iterator is at the end of the list, or past the point where this dummy
558 // node needs to be inserted, then try to insert it.
559 if (where == last || get_order_key(where) > order_key)
560 {
561 __TBB_ASSERT(get_order_key(it) < order_key, "Invalid node order in the list");
562
563 // Try to insert it in the right place
564 nodeptr_t inserted_node = try_insert_atomic(it.get_node_ptr(), dummy_node, where.get_node_ptr());
565
566 if (inserted_node == dummy_node)
567 {
568 // Insertion succeeded, check the list for order violations
569 check_range(it, where);
570 return raw_iterator(dummy_node);
571 }
572 else
573 {
574 // Insertion failed: either dummy node was inserted by another thread, or
575 // a real element was inserted at exactly the same place as dummy node.
576 // Proceed with the search from the previous location where order key was
577 // known to be larger (note: this is legal only because there is no safe
578 // concurrent erase operation supported).
579 where = it;
580 ++where;
581 continue;
582 }
583 }
584 else if (get_order_key(where) == order_key)
585 {
586 // Another dummy node with the same value found, discard the new one.
587 destroy_node(dummy_node);
588 return where;
589 }
590
591 // Move the iterator forward
592 it = where;
593 ++where;
594 }
595
596 }
597
599 nodeptr_t pnode = (where++).get_node_ptr();
600 nodeptr_t prevnode = previous.get_node_ptr();
601 __TBB_ASSERT(prevnode->my_next == pnode, "Erase must take consecutive iterators");
602 prevnode->my_next = pnode->my_next;
603 return pnode;
604 }
605
606 // This erase function can handle both real and dummy nodes
608 /*allow_destroy*/tbb::internal::true_type)
609 {
610 nodeptr_t pnode = erase_node_impl(previous, where);
611 destroy_node(pnode);
612 }
613
615 /*allow_destroy*/tbb::internal::false_type)
616 {
617 erase_node_impl(previous, where);
618 }
619
621 erase_node(previous, where, /*allow_destroy*/tbb::internal::true_type());
622 }
623
624 // Erase the element (previous node needs to be passed because this is a forward only list)
625 template<typename AllowDestroy>
626 iterator erase_node(raw_iterator previous, const_iterator where, AllowDestroy)
627 {
628 raw_const_iterator it = where;
629 erase_node(previous, it, AllowDestroy());
631
633 }
634
636 return erase_node(previous, where, /*allow_destroy*/tbb::internal::true_type());
637 }
638
639
640
641 // Move all elements from the passed in split-ordered list to this one
642 void move_all(self_type& source)
643 {
646
647 if (first == last)
648 return;
649
650 nodeptr_t previous_node = my_head;
651 raw_const_iterator begin_iterator = first++;
652
653 // Move all elements one by one, including dummy ones
654 for (raw_const_iterator it = first; it != last;)
655 {
656 nodeptr_t pnode = it.get_node_ptr();
657
658 nodeptr_t dummy_node = pnode->is_dummy() ? create_node(pnode->get_order_key()) : create_node(pnode->get_order_key(), pnode->my_element);
659 previous_node = try_insert_atomic(previous_node, dummy_node, NULL);
660 __TBB_ASSERT(previous_node != NULL, "Insertion must succeed");
661 raw_const_iterator where = it++;
662 source.erase_node(get_iterator(begin_iterator), where);
663 }
664 check_range();
665 }
666
667
668private:
669 //Need to setup private fields of split_ordered_list in move constructor and assignment of concurrent_unordered_base
670 template <typename Traits>
672
673 // Check the list for order violations
675 {
676#if TBB_USE_ASSERT
677 for (raw_iterator it = first; it != last; ++it)
678 {
679 raw_iterator next = it;
680 ++next;
681
682 __TBB_ASSERT(next == raw_end() || get_order_key(next) >= get_order_key(it), "!!! List order inconsistency !!!");
683 }
684#else
686#endif
687 }
689 {
690#if TBB_USE_ASSERT
692#endif
693 }
694
696 size_type my_element_count; // Total item count, not counting dummy nodes
697 nodeptr_t my_head; // pointer to head node
698};
699
700#if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
701#pragma warning(push)
702#pragma warning(disable: 4127) // warning C4127: conditional expression is constant
703#endif
704
705template <typename Traits>
706class concurrent_unordered_base : public Traits
707{
708protected:
709 // Type definitions
711 typedef typename Traits::value_type value_type;
712 typedef typename Traits::key_type key_type;
713 typedef typename Traits::hash_compare hash_compare;
714 typedef typename Traits::allocator_type allocator_type;
715 typedef typename hash_compare::hasher hasher;
717
722 // No support for reference/const_reference in allocator
725
728 // Iterators that walk the entire split-order list, including dummy nodes
731 typedef typename solist_t::iterator iterator; // TODO: restore const iterator for unordered_sets
735#if __TBB_UNORDERED_NODE_HANDLE_PRESENT
736 typedef typename Traits::node_type node_type;
737#endif // __TBB_UNORDERED_NODE_HANDLE_PRESENT
738 using Traits::my_hash_compare;
739 using Traits::get_key;
740 using Traits::allow_multimapping;
741
742 static const size_type initial_bucket_number = 8; // Initial number of buckets
743
744private:
745 template<typename OtherTraits>
747
748 typedef std::pair<iterator, iterator> pairii_t;
749 typedef std::pair<const_iterator, const_iterator> paircc_t;
750
751 static size_type const pointers_per_table = sizeof(size_type) * 8; // One bucket segment per bit
752 static const size_type initial_bucket_load = 4; // Initial maximum number of elements per bucket
753
757 void dismiss(){ my_instance = NULL;}
759 if (my_instance){
761 }
762 }
763 };
764protected:
765 // Constructors/Destructors
767 const hash_compare& hc = hash_compare(), const allocator_type& a = allocator_type())
768 : Traits(hc), my_solist(a),
770 {
771 if( n_of_buckets == 0) ++n_of_buckets;
772 my_number_of_buckets = size_type(1)<<__TBB_Log2((uintptr_t)n_of_buckets*2-1); // round up to power of 2
774 }
775
777 : Traits(right.my_hash_compare), my_solist(a), my_allocator(a)
778 {
780 internal_copy(right);
781 }
782
784 : Traits(right.my_hash_compare), my_solist(right.get_allocator()), my_allocator(right.get_allocator())
785 {
786 //FIXME:exception safety seems to be broken here
788 internal_copy(right);
789 }
790
791#if __TBB_CPP11_RVALUE_REF_PRESENT
793 : Traits(right.my_hash_compare), my_solist(right.get_allocator()), my_allocator(right.get_allocator()),
795 {
798 swap(right);
799 }
800
802 : Traits(right.my_hash_compare), my_solist(a), my_allocator(a)
803 {
804 call_internal_clear_on_exit clear_buckets_on_exception(this);
805
807 if (a == right.get_allocator()){
810 this->swap(right);
811 }else{
812 my_maximum_bucket_size = right.my_maximum_bucket_size;
813 my_number_of_buckets = right.my_number_of_buckets;
814 my_solist.my_element_count = right.my_solist.my_element_count;
815
816 if (! right.my_solist.empty()){
817 nodeptr_t previous_node = my_solist.my_head;
818
819 // Move all elements one by one, including dummy ones
820 for (raw_const_iterator it = ++(right.my_solist.raw_begin()), last = right.my_solist.raw_end(); it != last; ++it)
821 {
822 const nodeptr_t pnode = it.get_node_ptr();
823 nodeptr_t node;
824 if (pnode->is_dummy()) {
825 node = my_solist.create_node(pnode->get_order_key());
826 size_type bucket = __TBB_ReverseBits(pnode->get_order_key()) % my_number_of_buckets;
827 set_bucket(bucket, node);
828 }else{
829 node = my_solist.create_node(pnode->get_order_key(), std::move(pnode->my_element));
830 }
831
832 previous_node = my_solist.try_insert_atomic(previous_node, node, NULL);
833 __TBB_ASSERT(previous_node != NULL, "Insertion of node failed. Concurrent inserts in constructor ?");
834 }
836 }
837 }
838
839 clear_buckets_on_exception.dismiss();
840 }
841
842#endif // __TBB_CPP11_RVALUE_REF_PRESENT
843
845 if (this != &right)
846 internal_copy(right);
847 return (*this);
848 }
849
850#if __TBB_CPP11_RVALUE_REF_PRESENT
852 {
853 if(this != &other){
855 if(pocma_t::value || this->my_allocator == other.my_allocator) {
857 swap(other);
858 if (pocma_t::value) {
859 using std::swap;
860 //TODO: swapping allocators here may be a problem, replace with single direction moving
861 swap(this->my_solist.my_node_allocator, other.my_solist.my_node_allocator);
862 swap(this->my_allocator, other.my_allocator);
863 }
864 } else {
865 concurrent_unordered_base moved_copy(std::move(other),this->my_allocator);
866 this->swap(moved_copy);
867 }
868 }
869 return *this;
870 }
871
872#endif // __TBB_CPP11_RVALUE_REF_PRESENT
873
874#if __TBB_INITIALIZER_LISTS_PRESENT
876 concurrent_unordered_base& operator=(std::initializer_list<value_type> il)
877 {
878 this->clear();
879 this->insert(il.begin(),il.end());
880 return (*this);
881 }
882#endif // __TBB_INITIALIZER_LISTS_PRESENT
883
884
886 // Delete all node segments
888 }
889
890#if __TBB_UNORDERED_NODE_HANDLE_PRESENT
891 template<typename SourceType>
892 void internal_merge(SourceType& source) {
893 typedef typename SourceType::iterator source_iterator;
895 typename SourceType::node_type>::value),
896 "Incompatible containers cannot be merged");
897
898 for(source_iterator it = source.begin(); it != source.end();) {
899 source_iterator where = it++;
900 if (allow_multimapping || find(get_key(*where)) == end()) {
901 std::pair<node_type, raw_iterator> extract_result = source.internal_extract(where);
902
903 // Remember the old order key
904 sokey_t old_order_key = extract_result.first.my_node->get_order_key();
905
906 // If the insertion fails, it returns ownership of the node to extract_result.first
907 // extract_result.first remains valid node handle
908 if (!insert(std::move(extract_result.first)).second) {
909 raw_iterator next = extract_result.second;
910 raw_iterator current = next++;
911
912 // Revert order key to old value
913 extract_result.first.my_node->init(old_order_key);
914
915 __TBB_ASSERT(extract_result.first.my_node->get_order_key() >= current.get_node_ptr()->get_order_key(),
916 "Wrong nodes order in source container");
917 __TBB_ASSERT(next==source.my_solist.raw_end() ||
918 extract_result.first.my_node->get_order_key() <= next.get_node_ptr()->get_order_key(),
919 "Wrong nodes order in source container");
920
921 size_t new_count = 0;// To use try_insert()
922 bool insert_result =
923 source.my_solist.try_insert(current, next, extract_result.first.my_node, &new_count).second;
924 __TBB_ASSERT_EX(insert_result, "Return to source must be successful. "
925 "Changing source container while merging is unsafe.");
926 }
927 extract_result.first.deactivate();
928 }
929 }
930 }
931#endif // __TBB_UNORDERED_NODE_HANDLE_PRESENT
932
933public:
935 return my_solist.get_allocator();
936 }
937
938 // Size and capacity function
939 bool empty() const {
940 return my_solist.empty();
941 }
942
943 size_type size() const {
944 return my_solist.size();
945 }
946
948 return my_solist.max_size();
949 }
950
951 // Iterators
953 return my_solist.begin();
954 }
955
957 return my_solist.begin();
958 }
959
961 return my_solist.end();
962 }
963
965 return my_solist.end();
966 }
967
969 return my_solist.cbegin();
970 }
971
973 return my_solist.cend();
974 }
975
976 // Parallel traversal support
982 public:
989
991 bool empty() const {return my_begin_node == my_end_node;}
992
994 bool is_divisible() const {
996 }
1000 {
1002 __TBB_ASSERT( !empty(), "Splitting despite the range is not divisible" );
1003 __TBB_ASSERT( !r.empty(), "Splitting despite the range is not divisible" );
1004 set_midpoint();
1005 r.set_midpoint();
1006 }
1009 my_table(a_table), my_begin_node(a_table.my_solist.begin()),
1010 my_end_node(a_table.my_solist.end())
1011 {
1012 set_midpoint();
1013 }
1017 size_type grainsize() const { return 1; }
1018
1020 void set_midpoint() const {
1021 if( my_begin_node == my_end_node ) // not divisible
1023 else {
1026 size_t mid_bucket = __TBB_ReverseBits( begin_key + (end_key-begin_key)/2 ) % my_table.my_number_of_buckets;
1027 while ( !my_table.is_initialized(mid_bucket) ) mid_bucket = my_table.get_parent(mid_bucket);
1028 if(__TBB_ReverseBits(mid_bucket) > begin_key) {
1029 // found a dummy_node between begin and end
1031 }
1032 else {
1033 // didn't find a dummy node between begin and end.
1035 }
1036#if TBB_USE_ASSERT
1037 {
1039 __TBB_ASSERT( begin_key < mid_key, "my_begin_node is after my_midpoint_node" );
1040 __TBB_ASSERT( mid_key <= end_key, "my_midpoint_node is after my_end_node" );
1041 }
1042#endif // TBB_USE_ASSERT
1043 }
1044 }
1045 };
1046
1048 public:
1054
1057 };
1058
1060 return range_type( *this );
1061 }
1062
1064 return const_range_type( *this );
1065 }
1066
1067 // Modifiers
1068 std::pair<iterator, bool> insert(const value_type& value) {
1069 return internal_insert</*AllowCreate=*/tbb::internal::true_type,
1070 /*AllowDestroy=*/tbb::internal::true_type>(value);
1071 }
1072
1074 // Ignore hint
1075 return insert(value).first;
1076 }
1077
1078#if __TBB_CPP11_RVALUE_REF_PRESENT
1079 std::pair<iterator, bool> insert(value_type&& value) {
1080 return internal_insert</*AllowCreate=*/tbb::internal::true_type,
1081 /*AllowDestroy=*/tbb::internal::true_type>(std::move(value));
1082 }
1083
1085 // Ignore hint
1086 return insert(std::move(value)).first;
1087 }
1088#endif /*__TBB_CPP11_RVALUE_REF_PRESENT*/
1089
1090#if __TBB_UNORDERED_NODE_HANDLE_PRESENT
1091 std::pair<iterator, bool> insert(node_type&& nh) {
1092 if (!nh.empty()) {
1093 nodeptr_t handled_node = nh.my_node;
1094 std::pair<iterator, bool> insert_result =
1096 /*AllowDestroy=*/tbb::internal::false_type>
1097 (handled_node->my_element, handled_node);
1098 if (insert_result.second)
1099 nh.deactivate();
1100 return insert_result;
1101 }
1102 return std::pair<iterator, bool>(end(), false);
1103 }
1104
1106 return insert(std::move(nh)).first;
1107 }
1108#endif // __TBB_UNORDERED_NODE_HANDLE_PRESENT
1109
1110#if __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_RVALUE_REF_PRESENT
1111 template<typename... Args>
1112 std::pair<iterator, bool> emplace(Args&&... args) {
1113 nodeptr_t pnode = my_solist.create_node_v(tbb::internal::forward<Args>(args)...);
1114
1115 return internal_insert</*AllowCreate=*/tbb::internal::false_type,
1116 /*AllowDestroy=*/tbb::internal::true_type>(pnode->my_element, pnode);
1117 }
1118
1119 template<typename... Args>
1121 // Ignore hint
1122 return emplace(tbb::internal::forward<Args>(args)...).first;
1123 }
1124#endif // __TBB_CPP11_VARIADIC_TEMPLATES_PRESENT && __TBB_CPP11_RVALUE_REF_PRESENT
1125
1126
1127 template<class Iterator>
1128 void insert(Iterator first, Iterator last) {
1129 for (Iterator it = first; it != last; ++it)
1130 insert(*it);
1131 }
1132
1133#if __TBB_INITIALIZER_LISTS_PRESENT
1135 void insert(std::initializer_list<value_type> il) {
1136 insert(il.begin(), il.end());
1137 }
1138#endif
1139
1141 return internal_erase(where);
1142 }
1143
1145 while (first != last)
1148 }
1149
1151 pairii_t where = equal_range(key);
1152 size_type item_count = internal_distance(where.first, where.second);
1153 unsafe_erase(where.first, where.second);
1154 return item_count;
1155 }
1156
1157#if __TBB_UNORDERED_NODE_HANDLE_PRESENT
1159 return internal_extract(where).first;
1160 }
1161
1163 pairii_t where = equal_range(key);
1164 if (where.first == end()) return node_type(); // element was not found
1165 return internal_extract(where.first).first;
1166 }
1167#endif // __TBB_UNORDERED_NODE_HANDLE_PRESENT
1168
1170 if (this != &right) {
1171 std::swap(my_hash_compare, right.my_hash_compare);
1172 my_solist.swap(right.my_solist);
1173 internal_swap_buckets(right);
1176 }
1177 }
1178
1179 // Observers
1181 return my_hash_compare.my_hash_object;
1182 }
1183
1185 return my_hash_compare.my_key_compare_object;
1186 }
1187
1188 void clear() {
1189 // Clear list
1190 my_solist.clear();
1191
1192 // Clear buckets
1194
1195 // Initialize bucket 0
1196 __TBB_ASSERT(my_buckets[0] == NULL, NULL);
1197 raw_iterator dummy_node = my_solist.raw_begin();
1198 set_bucket(0, dummy_node);
1199 }
1200
1201 // Lookup
1203 return internal_find(key);
1204 }
1205
1207 return const_cast<self_type*>(this)->internal_find(key);
1208 }
1209
1210 size_type count(const key_type& key) const {
1211 if(allow_multimapping) {
1212 paircc_t answer = equal_range(key);
1213 size_type item_count = internal_distance(answer.first, answer.second);
1214 return item_count;
1215 } else {
1216 return const_cast<self_type*>(this)->internal_find(key) == end()?0:1;
1217 }
1218 }
1219
1220 std::pair<iterator, iterator> equal_range(const key_type& key) {
1221 return internal_equal_range(key);
1222 }
1223
1224 std::pair<const_iterator, const_iterator> equal_range(const key_type& key) const {
1225 return const_cast<self_type*>(this)->internal_equal_range(key);
1226 }
1227
1228 // Bucket interface - for debugging
1230 return my_number_of_buckets;
1231 }
1232
1235 }
1236
1238 size_type item_count = 0;
1239 if (is_initialized(bucket)) {
1240 raw_iterator it = get_bucket(bucket);
1241 ++it;
1242 for (; it != my_solist.raw_end() && !it.get_node_ptr()->is_dummy(); ++it)
1243 ++item_count;
1244 }
1245 return item_count;
1246 }
1247
1249 sokey_t order_key = (sokey_t) my_hash_compare(key);
1250 size_type bucket = order_key % my_number_of_buckets;
1251 return bucket;
1252 }
1253
1254 // If the bucket is initialized, return a first non-dummy element in it
1256 if (!is_initialized(bucket))
1257 return end();
1258
1259 raw_iterator it = get_bucket(bucket);
1260 return my_solist.first_real_iterator(it);
1261 }
1262
1263 // If the bucket is initialized, return a first non-dummy element in it
1265 {
1266 if (!is_initialized(bucket))
1267 return end();
1268
1269 raw_const_iterator it = get_bucket(bucket);
1270 return my_solist.first_real_iterator(it);
1271 }
1272
1273 // @REVIEW: Takes O(n)
1274 // Returns the iterator after the last non-dummy element in the bucket
1276 {
1277 if (!is_initialized(bucket))
1278 return end();
1279
1280 raw_iterator it = get_bucket(bucket);
1281
1282 // Find the end of the bucket, denoted by the dummy element
1283 do ++it;
1284 while(it != my_solist.raw_end() && !it.get_node_ptr()->is_dummy());
1285
1286 // Return the first real element past the end of the bucket
1287 return my_solist.first_real_iterator(it);
1288 }
1289
1290 // @REVIEW: Takes O(n)
1291 // Returns the iterator after the last non-dummy element in the bucket
1293 {
1294 if (!is_initialized(bucket))
1295 return end();
1296
1297 raw_const_iterator it = get_bucket(bucket);
1298
1299 // Find the end of the bucket, denoted by the dummy element
1300 do ++it;
1301 while(it != my_solist.raw_end() && !it.get_node_ptr()->is_dummy());
1302
1303 // Return the first real element past the end of the bucket
1304 return my_solist.first_real_iterator(it);
1305 }
1306
1308 return ((const self_type *) this)->unsafe_begin(bucket);
1309 }
1310
1312 return ((const self_type *) this)->unsafe_end(bucket);
1313 }
1314
1315 // Hash policy
1316 float load_factor() const {
1317 return (float) size() / (float) unsafe_bucket_count();
1318 }
1319
1320 float max_load_factor() const {
1322 }
1323
1324 void max_load_factor(float newmax) {
1325 if (newmax != newmax || newmax < 0)
1327 my_maximum_bucket_size = newmax;
1328 }
1329
1330 // This function is a noop, because the underlying split-ordered list
1331 // is already sorted, so an increase in the bucket number will be
1332 // reflected next time this bucket is touched.
1333 void rehash(size_type buckets) {
1334 size_type current_buckets = my_number_of_buckets;
1335 if (current_buckets >= buckets)
1336 return;
1337 my_number_of_buckets = size_type(1)<<__TBB_Log2((uintptr_t)buckets*2-1); // round up to power of 2
1338 }
1339
1340private:
1341
1342 // Initialize the hash and keep the first bucket open
1344 // Initialize the array of segment pointers
1345 memset(my_buckets, 0, sizeof(my_buckets));
1346
1347 // Initialize bucket 0
1348 raw_iterator dummy_node = my_solist.raw_begin();
1349 set_bucket(0, dummy_node);
1350 }
1351
1353 for (size_type index = 0; index < pointers_per_table; ++index) {
1354 if (my_buckets[index] != NULL) {
1355 size_type sz = segment_size(index);
1356 for (size_type index2 = 0; index2 < sz; ++index2)
1357 my_allocator.destroy(&my_buckets[index][index2]);
1358 my_allocator.deallocate(my_buckets[index], sz);
1359 my_buckets[index] = 0;
1360 }
1361 }
1362 }
1363
1364 void internal_copy(const self_type& right) {
1365 clear();
1366
1369
1370 __TBB_TRY {
1371 insert(right.begin(), right.end());
1372 my_hash_compare = right.my_hash_compare;
1373 } __TBB_CATCH(...) {
1374 my_solist.clear();
1375 __TBB_RETHROW();
1376 }
1377 }
1378
1380 {
1381 // Swap all node segments
1382 for (size_type index = 0; index < pointers_per_table; ++index)
1383 {
1384 raw_iterator * iterator_pointer = my_buckets[index];
1385 my_buckets[index] = right.my_buckets[index];
1386 right.my_buckets[index] = iterator_pointer;
1387 }
1388 }
1389
1390 //TODO: why not use std::distance?
1391 // Hash APIs
1393 {
1394 size_type num = 0;
1395
1396 for (const_iterator it = first; it != last; ++it)
1397 ++num;
1398
1399 return num;
1400 }
1401
1402 // Insert an element in the hash given its value
1403 template<typename AllowCreate, typename AllowDestroy, typename ValueType>
1404 std::pair<iterator, bool> internal_insert(__TBB_FORWARDING_REF(ValueType) value, nodeptr_t pnode = NULL)
1405 {
1406 const key_type *pkey = &get_key(value);
1407 sokey_t hash_key = (sokey_t) my_hash_compare(*pkey);
1408 size_type new_count = 0;
1409 sokey_t order_key = split_order_key_regular(hash_key);
1410 raw_iterator previous = prepare_bucket(hash_key);
1412 __TBB_ASSERT(previous != last, "Invalid head node");
1413
1414 if (pnode) {
1415 // Set new order_key to node
1416 pnode->init(order_key);
1417 }
1418
1419 // First node is a dummy node
1420 for (raw_iterator where = previous;;)
1421 {
1422 ++where;
1423 if (where == last || solist_t::get_order_key(where) > order_key ||
1424 // if multimapped, stop at the first item equal to us.
1425 (allow_multimapping && solist_t::get_order_key(where) == order_key &&
1426 !my_hash_compare(get_key(*where), *pkey))) // TODO: fix negation
1427 {
1428 if (!pnode) {
1429 pnode = my_solist.create_node(order_key, tbb::internal::forward<ValueType>(value), AllowCreate());
1430 // If the value was moved, the known reference to key might be invalid
1431 pkey = &get_key(pnode->my_element);
1432 }
1433
1434 // Try to insert 'pnode' between 'previous' and 'where'
1435 std::pair<iterator, bool> result = my_solist.try_insert(previous, where, pnode, &new_count);
1436
1437 if (result.second)
1438 {
1439 // Insertion succeeded, adjust the table size, if needed
1441 return result;
1442 }
1443 else
1444 {
1445 // Insertion failed: either the same node was inserted by another thread, or
1446 // another element was inserted at exactly the same place as this node.
1447 // Proceed with the search from the previous location where order key was
1448 // known to be larger (note: this is legal only because there is no safe
1449 // concurrent erase operation supported).
1450 where = previous;
1451 continue;
1452 }
1453 }
1454 else if (!allow_multimapping && solist_t::get_order_key(where) == order_key &&
1455 !my_hash_compare(get_key(*where), *pkey)) // TODO: fix negation
1456 { // Element already in the list, return it
1457 if (pnode && AllowDestroy::value)
1458 my_solist.destroy_node(pnode);
1459 return std::pair<iterator, bool>(my_solist.get_iterator(where), false);
1460 }
1461 // Move the iterator forward
1462 previous = where;
1463 }
1464 }
1465
1466 // Find the element in the split-ordered list
1468 {
1469 sokey_t hash_key = (sokey_t) my_hash_compare(key);
1470 sokey_t order_key = split_order_key_regular(hash_key);
1472
1473 for (raw_iterator it = prepare_bucket(hash_key); it != last; ++it)
1474 {
1475 if (solist_t::get_order_key(it) > order_key)
1476 {
1477 // If the order key is smaller than the current order key, the element
1478 // is not in the hash.
1479 return end();
1480 }
1481 else if (solist_t::get_order_key(it) == order_key)
1482 {
1483 // The fact that order keys match does not mean that the element is found.
1484 // Key function comparison has to be performed to check whether this is the
1485 // right element. If not, keep searching while order key is the same.
1486 if (!my_hash_compare(get_key(*it), key)) // TODO: fix negation
1487 return my_solist.get_iterator(it);
1488 }
1489 }
1490
1491 return end();
1492 }
1493
1494 // Erase an element from the list. This is not a concurrency safe function.
1496 {
1497 sokey_t hash_key = (sokey_t) my_hash_compare(get_key(*it));
1498 raw_iterator previous = prepare_bucket(hash_key);
1500 __TBB_ASSERT(previous != last, "Invalid head node");
1501
1502 // First node is a dummy node
1503 for (raw_iterator where = previous; where != last; previous = where) {
1504 ++where;
1505 if (my_solist.get_iterator(where) == it)
1506 return my_solist.erase_node(previous, it);
1507 }
1508 return end();
1509 }
1510
1511#if __TBB_UNORDERED_NODE_HANDLE_PRESENT
1512 std::pair<node_type, raw_iterator> internal_extract(const_iterator it) {
1513 sokey_t hash_key = sokey_t(my_hash_compare(get_key(*it)));
1514 raw_iterator previous = prepare_bucket(hash_key);
1516 __TBB_ASSERT(previous != last, "Invalid head node");
1517
1518 for(raw_iterator where = previous; where != last; previous = where) {
1519 ++where;
1520 if (my_solist.get_iterator(where) == it) {
1521 const_iterator result = it;
1522 my_solist.erase_node(previous, it, /*allow_destroy*/tbb::internal::false_type());
1523 return std::pair<node_type, raw_iterator>( node_type(result.get_node_ptr()),
1524 previous);
1525 }
1526 }
1527 return std::pair<node_type, iterator>(node_type(), end());
1528 }
1529#endif // __TBB_UNORDERED_NODE_HANDLE_PRESENT
1530
1531 // Return the [begin, end) pair of iterators with the same key values.
1532 // This operation makes sense only if mapping is many-to-one.
1534 {
1535 sokey_t hash_key = (sokey_t) my_hash_compare(key);
1536 sokey_t order_key = split_order_key_regular(hash_key);
1537 raw_iterator end_it = my_solist.raw_end();
1538
1539 for (raw_iterator it = prepare_bucket(hash_key); it != end_it; ++it)
1540 {
1541 if (solist_t::get_order_key(it) > order_key)
1542 {
1543 // There is no element with the given key
1544 return pairii_t(end(), end());
1545 }
1546 else if (solist_t::get_order_key(it) == order_key &&
1547 !my_hash_compare(get_key(*it), key)) // TODO: fix negation; also below
1548 {
1551 do ++last; while( allow_multimapping && last != end() && !my_hash_compare(get_key(*last), key) );
1552 return pairii_t(first, last);
1553 }
1554 }
1555
1556 return pairii_t(end(), end());
1557 }
1558
1559 // Bucket APIs
1561 {
1562 // Bucket 0 has no parent.
1563 __TBB_ASSERT( bucket != 0, "The first bucket must always be initialized");
1564
1565 size_type parent_bucket = get_parent(bucket);
1566
1567 // All parent_bucket buckets have to be initialized before this bucket is
1568 if (!is_initialized(parent_bucket))
1569 init_bucket(parent_bucket);
1570
1571 raw_iterator parent = get_bucket(parent_bucket);
1572
1573 // Create a dummy first node in this bucket
1575 set_bucket(bucket, dummy_node);
1576 }
1577
1578 void adjust_table_size(size_type total_elements, size_type current_size)
1579 {
1580 // Grow the table by a factor of 2 if possible and needed
1581 if ( ((float) total_elements / (float) current_size) > my_maximum_bucket_size )
1582 {
1583 // Double the size of the hash only if size has not changed in between loads
1584 my_number_of_buckets.compare_and_swap(2u*current_size, current_size);
1585 //Simple "my_number_of_buckets.compare_and_swap( current_size<<1, current_size );" does not work for VC8
1586 //due to overzealous compiler warnings in /Wp64 mode
1587 }
1588 }
1589
1591 {
1592 // Unsets bucket's most significant turned-on bit
1593 size_type msb = __TBB_Log2((uintptr_t)bucket);
1594 return bucket & ~(size_type(1) << msb);
1595 }
1596
1597
1598 // Dynamic sized array (segments)
1601 return size_type( __TBB_Log2( uintptr_t(index|1) ) );
1602 }
1603
1606 return (size_type(1)<<k & ~size_type(1));
1607 }
1608
1611 return k? size_type(1)<<k : 2;
1612 }
1613
1615 size_type segment = segment_index_of(bucket);
1616 bucket -= segment_base(segment);
1617 __TBB_ASSERT( my_buckets[segment], "bucket must be in an allocated segment" );
1618 return my_buckets[segment][bucket];
1619 }
1620
1622 size_type bucket = hash_key % my_number_of_buckets;
1623 size_type segment = segment_index_of(bucket);
1624 size_type index = bucket - segment_base(segment);
1625 if (my_buckets[segment] == NULL || my_buckets[segment][index].get_node_ptr() == NULL)
1626 init_bucket(bucket);
1627 return my_buckets[segment][index];
1628 }
1629
1630 void set_bucket(size_type bucket, raw_iterator dummy_head) {
1631 size_type segment = segment_index_of(bucket);
1632 bucket -= segment_base(segment);
1633
1634 if (my_buckets[segment] == NULL) {
1635 size_type sz = segment_size(segment);
1636 raw_iterator * new_segment = my_allocator.allocate(sz);
1637 std::memset(static_cast<void*>(new_segment), 0, sz*sizeof(raw_iterator));
1638
1639 if (my_buckets[segment].compare_and_swap( new_segment, NULL) != NULL)
1640 my_allocator.deallocate(new_segment, sz);
1641 }
1642
1643 my_buckets[segment][bucket] = dummy_head;
1644 }
1645
1646 bool is_initialized(size_type bucket) const {
1647 size_type segment = segment_index_of(bucket);
1648 bucket -= segment_base(segment);
1649
1650 if (my_buckets[segment] == NULL)
1651 return false;
1652
1653 raw_iterator it = my_buckets[segment][bucket];
1654 return (it.get_node_ptr() != NULL);
1655 }
1656
1657 // Utilities for keys
1658
1659 // A regular order key has its original hash value reversed and the last bit set
1661 return __TBB_ReverseBits(order_key) | 0x1;
1662 }
1663
1664 // A dummy order key has its original hash value reversed and the last bit unset
1666 return __TBB_ReverseBits(order_key) & ~sokey_t(0x1);
1667 }
1668
1669 // Shared variables
1670 atomic<size_type> my_number_of_buckets; // Current table size
1671 solist_t my_solist; // List where all the elements are kept
1673 float my_maximum_bucket_size; // Maximum size of the bucket
1674 atomic<raw_iterator*> my_buckets[pointers_per_table]; // The segment table
1675};
1676#if defined(_MSC_VER) && !defined(__INTEL_COMPILER)
1677#pragma warning(pop) // warning 4127 is back
1678#endif
1679
1680} // namespace internal
1682} // namespace interface5
1683} // namespace tbb
1684#endif // __TBB__concurrent_unordered_impl_H
intptr_t __TBB_Log2(uintptr_t x)
Definition: tbb_machine.h:860
T __TBB_ReverseBits(T src)
Definition: tbb_machine.h:967
#define __TBB_PARAMETER_PACK
Definition: tbb_stddef.h:524
#define __TBB_CATCH(e)
Definition: tbb_stddef.h:284
#define __TBB_ASSERT_EX(predicate, comment)
"Extended" version is useful to suppress warnings if a variable is only used with an assert
Definition: tbb_stddef.h:167
#define __TBB_TRY
Definition: tbb_stddef.h:283
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition: tbb_stddef.h:165
#define __TBB_PACK_EXPANSION(A)
Definition: tbb_stddef.h:525
#define __TBB_STATIC_ASSERT(condition, msg)
Definition: tbb_stddef.h:553
#define __TBB_FORWARDING_REF(A)
Definition: tbb_stddef.h:517
#define __TBB_RETHROW()
Definition: tbb_stddef.h:286
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance * instance
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id parent
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long value
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void ITT_FORMAT p void ITT_FORMAT p void size_t ITT_FORMAT d void ITT_FORMAT p const wchar_t ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s const char ITT_FORMAT s no args void ITT_FORMAT p size_t ITT_FORMAT d no args const wchar_t const wchar_t ITT_FORMAT s __itt_heap_function void size_t int ITT_FORMAT d __itt_heap_function void ITT_FORMAT p __itt_heap_function void void size_t int ITT_FORMAT d no args no args unsigned int ITT_FORMAT u const __itt_domain __itt_id ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain __itt_id ITT_FORMAT p const __itt_domain __itt_id __itt_timestamp __itt_timestamp ITT_FORMAT lu const __itt_domain __itt_id __itt_id __itt_string_handle ITT_FORMAT p const __itt_domain ITT_FORMAT p const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_string_handle unsigned long long ITT_FORMAT lu const __itt_domain __itt_id __itt_string_handle * key
The graph class.
void move(tbb_thread &t1, tbb_thread &t2)
Definition: tbb_thread.h:319
void swap(atomic< T > &lhs, atomic< T > &rhs)
Definition: atomic.h:564
void suppress_unused_warning(const T1 &)
Utility template function to prevent "unused" warnings by various compilers.
Definition: tbb_stddef.h:398
void throw_exception(exception_id eid)
Versionless convenience wrapper for throw_exception_v4()
bool_constant< false > false_type
Definition: tbb_stddef.h:490
atomic< T > & as_atomic(T &t)
Definition: atomic.h:572
bool_constant< true > true_type
Definition: tbb_stddef.h:489
auto last(Container &c) -> decltype(begin(c))
auto first(Container &c) -> decltype(begin(c))
bool operator==(const hash_map_iterator< Container, T > &i, const hash_map_iterator< Container, U > &j)
bool operator!=(const hash_map_iterator< Container, T > &i, const hash_map_iterator< Container, U > &j)
allocator_type::size_type size_type
allocator_type::value_type value_type
allocator_type::difference_type difference_type
allocator_type::const_pointer const_pointer
allocator_type::pointer pointer
allocator_traits< Alloc >::template rebind_alloc< T >::other type
solist_iterator< self_type, value_type > iterator
tbb::internal::allocator_traits< allocator_type >::difference_type difference_type
const_iterator first_real_iterator(raw_const_iterator it) const
split_ordered_list(allocator_type a=allocator_type())
tbb::internal::allocator_rebind< Allocator, T >::type allocator_type
nodeptr_t erase_node_impl(raw_iterator previous, raw_const_iterator &where)
std::pair< iterator, bool > try_insert(raw_iterator it, raw_iterator next, nodeptr_t pnode, size_type *new_count)
const_iterator get_iterator(raw_const_iterator it) const
static sokey_t get_order_key(const raw_const_iterator &it)
void erase_node(raw_iterator previous, raw_const_iterator &where)
raw_iterator insert_dummy(raw_iterator it, sokey_t order_key)
tbb::internal::allocator_traits< allocator_type >::const_pointer const_pointer
tbb::internal::allocator_traits< allocator_type >::pointer pointer
static sokey_t get_safe_order_key(const raw_const_iterator &it)
iterator erase_node(raw_iterator previous, const_iterator where, AllowDestroy)
static nodeptr_t try_insert_atomic(nodeptr_t previous, nodeptr_t new_node, nodeptr_t current_node)
tbb::internal::allocator_traits< allocator_type >::size_type size_type
flist_iterator< self_type, const value_type > raw_const_iterator
void check_range(raw_iterator first, raw_iterator last)
solist_iterator< self_type, const value_type > const_iterator
nodeptr_t create_node(sokey_t, __TBB_FORWARDING_REF(Arg), tbb::internal::false_type)
tbb::internal::allocator_rebind< allocator_type, node >::type my_node_allocator
tbb::internal::allocator_traits< allocator_type >::value_type value_type
nodeptr_t create_node_v(__TBB_FORWARDING_REF(Args) __TBB_PARAMETER_PACK args)
void erase_node(raw_iterator previous, raw_const_iterator &where, tbb::internal::true_type)
iterator erase_node(raw_iterator previous, const_iterator &where)
void erase_node(raw_iterator previous, raw_const_iterator &where, tbb::internal::false_type)
nodeptr_t create_node(sokey_t order_key, __TBB_FORWARDING_REF(Arg) t, tbb::internal::true_type=tbb::internal::true_type())
flist_iterator< self_type, value_type > raw_iterator
concurrent_unordered_base(const concurrent_unordered_base &right)
iterator unsafe_erase(const_iterator first, const_iterator last)
iterator insert(const_iterator, value_type &&value)
concurrent_unordered_base & operator=(concurrent_unordered_base &&other)
std::pair< iterator, bool > insert(node_type &&nh)
tbb::internal::allocator_traits< allocator_type >::pointer pointer
std::pair< iterator, iterator > equal_range(const key_type &key)
std::pair< const_iterator, const_iterator > equal_range(const key_type &key) const
void set_bucket(size_type bucket, raw_iterator dummy_head)
const_local_iterator unsafe_begin(size_type bucket) const
std::pair< iterator, bool > internal_insert(__TBB_FORWARDING_REF(ValueType) value, nodeptr_t pnode=NULL)
const_local_iterator unsafe_cend(size_type bucket) const
iterator emplace_hint(const_iterator, Args &&... args)
tbb::internal::allocator_traits< allocator_type >::difference_type difference_type
concurrent_unordered_base(concurrent_unordered_base &&right, const allocator_type &a)
tbb::internal::allocator_rebind< allocator_type, raw_iterator >::type my_allocator
tbb::internal::allocator_traits< allocator_type >::const_pointer const_pointer
std::pair< node_type, raw_iterator > internal_extract(const_iterator it)
concurrent_unordered_base(const concurrent_unordered_base &right, const allocator_type &a)
void adjust_table_size(size_type total_elements, size_type current_size)
std::pair< iterator, bool > insert(value_type &&value)
iterator insert(const_iterator, const value_type &value)
std::pair< const_iterator, const_iterator > paircc_t
const_local_iterator unsafe_end(size_type bucket) const
concurrent_unordered_base & operator=(const concurrent_unordered_base &right)
tbb::internal::allocator_traits< allocator_type >::size_type size_type
std::pair< iterator, bool > insert(const value_type &value)
split_ordered_list< value_type, typename Traits::allocator_type > solist_t
concurrent_unordered_base(size_type n_of_buckets=initial_bucket_number, const hash_compare &hc=hash_compare(), const allocator_type &a=allocator_type())
const_local_iterator unsafe_cbegin(size_type bucket) const
void internal_swap_buckets(concurrent_unordered_base &right)
static size_type internal_distance(const_iterator first, const_iterator last)
void insert(std::initializer_list< value_type > il)
Insert initializer list.
concurrent_unordered_base & operator=(std::initializer_list< value_type > il)
assignment operator from initializer_list
atomic< raw_iterator * > my_buckets[pointers_per_table]
std::pair< iterator, bool > emplace(Args &&... args)
flist_iterator & operator=(const flist_iterator< Solist, typename Solist::value_type > &other)
friend bool operator!=(const flist_iterator< M, T > &i, const flist_iterator< M, U > &j)
friend bool operator==(const flist_iterator< M, T > &i, const flist_iterator< M, U > &j)
flist_iterator(const flist_iterator< Solist, typename Solist::value_type > &other)
solist_iterator & operator=(const solist_iterator< Solist, typename Solist::value_type > &other)
solist_iterator(nodeptr_t pnode, const Solist *plist)
friend bool operator!=(const solist_iterator< M, T > &i, const solist_iterator< M, U > &j)
solist_iterator(const solist_iterator< Solist, typename Solist::value_type > &other)
friend bool operator==(const solist_iterator< M, T > &i, const solist_iterator< M, U > &j)
nodeptr_t atomic_set_next(nodeptr_t new_node, nodeptr_t current_node)
bool is_divisible() const
True if range can be partitioned into two subranges.
void set_midpoint() const
Set my_midpoint_node to point approximately half way between my_begin_node and my_end_node.
concurrent_unordered_base::size_type size_type
Type for size of a range.
const_range_type(const concurrent_unordered_base &a_table)
Init range with container and grainsize specified.
range_type(const concurrent_unordered_base &a_table)
Init range with container and grainsize specified.
Detects whether two given types are the same.
Base class for types that should not be assigned.
Definition: tbb_stddef.h:322
Dummy type that distinguishes splitting constructor from copy constructor.
Definition: tbb_stddef.h:416

Copyright © 2005-2020 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.