Intel(R) Threading Building Blocks Doxygen Documentation version 4.2.3
tbb::strict_ppl::internal::micro_queue< T > Class Template Reference

A queue using simple locking. More...

#include <_concurrent_queue_impl.h>

Inheritance diagram for tbb::strict_ppl::internal::micro_queue< T >:
Collaboration diagram for tbb::strict_ppl::internal::micro_queue< T >:

Classes

class  destroyer
 Class used to ensure exception-safety of method "pop". More...
 
struct  padded_page
 

Public Types

typedef void(* item_constructor_t) (T *location, const void *src)
 

Public Member Functions

void push (const void *item, ticket k, concurrent_queue_base_v3< T > &base, item_constructor_t construct_item)
 
bool pop (void *dst, ticket k, concurrent_queue_base_v3< T > &base)
 
micro_queueassign (const micro_queue &src, concurrent_queue_base_v3< T > &base, item_constructor_t construct_item)
 
pagemake_copy (concurrent_queue_base_v3< T > &base, const page *src_page, size_t begin_in_page, size_t end_in_page, ticket &g_index, item_constructor_t construct_item)
 
void invalidate_page_and_rethrow (ticket k)
 

Static Public Member Functions

static T & get_ref (page &p, size_t index)
 

Public Attributes

atomic< page * > head_page
 
atomic< tickethead_counter
 
atomic< page * > tail_page
 
atomic< tickettail_counter
 
spin_mutex page_mutex
 

Private Types

typedef concurrent_queue_rep_base::page page
 

Private Member Functions

void copy_item (page &dst, size_t dindex, const void *src, item_constructor_t construct_item)
 
void copy_item (page &dst, size_t dindex, const page &src, size_t sindex, item_constructor_t construct_item)
 
void assign_and_destroy_item (void *dst, page &src, size_t index)
 
void spin_wait_until_my_turn (atomic< ticket > &counter, ticket k, concurrent_queue_rep_base &rb) const
 
- Private Member Functions inherited from tbb::internal::no_copy
 no_copy (const no_copy &)=delete
 
 no_copy ()=default
 

Friends

class micro_queue_pop_finalizer< T >
 

Detailed Description

template<typename T>
class tbb::strict_ppl::internal::micro_queue< T >

A queue using simple locking.

For efficiency, this class has no constructor. The caller is expected to zero-initialize it.

Definition at line 131 of file _concurrent_queue_impl.h.

Member Typedef Documentation

◆ item_constructor_t

template<typename T >
typedef void(* tbb::strict_ppl::internal::micro_queue< T >::item_constructor_t) (T *location, const void *src)

Definition at line 133 of file _concurrent_queue_impl.h.

◆ page

template<typename T >
typedef concurrent_queue_rep_base::page tbb::strict_ppl::internal::micro_queue< T >::page
private

Definition at line 135 of file _concurrent_queue_impl.h.

Member Function Documentation

◆ assign()

template<typename T >
micro_queue< T > & tbb::strict_ppl::internal::micro_queue< T >::assign ( const micro_queue< T > &  src,
concurrent_queue_base_v3< T > &  base,
item_constructor_t  construct_item 
)

Definition at line 286 of file _concurrent_queue_impl.h.

288{
289 head_counter = src.head_counter;
290 tail_counter = src.tail_counter;
291
292 const page* srcp = src.head_page;
293 if( is_valid_page(srcp) ) {
294 ticket g_index = head_counter;
295 __TBB_TRY {
297 size_t index = modulo_power_of_two( head_counter/concurrent_queue_rep_base::n_queue, base.my_rep->items_per_page );
298 size_t end_in_first_page = (index+n_items<base.my_rep->items_per_page)?(index+n_items):base.my_rep->items_per_page;
299
300 head_page = make_copy( base, srcp, index, end_in_first_page, g_index, construct_item );
301 page* cur_page = head_page;
302
303 if( srcp != src.tail_page ) {
304 for( srcp = srcp->next; srcp!=src.tail_page; srcp=srcp->next ) {
305 cur_page->next = make_copy( base, srcp, 0, base.my_rep->items_per_page, g_index, construct_item );
306 cur_page = cur_page->next;
307 }
308
309 __TBB_ASSERT( srcp==src.tail_page, NULL );
310 size_t last_index = modulo_power_of_two( tail_counter/concurrent_queue_rep_base::n_queue, base.my_rep->items_per_page );
311 if( last_index==0 ) last_index = base.my_rep->items_per_page;
312
313 cur_page->next = make_copy( base, srcp, 0, last_index, g_index, construct_item );
314 cur_page = cur_page->next;
315 }
316 tail_page = cur_page;
317 } __TBB_CATCH (...) {
319 }
320 } else {
321 head_page = tail_page = NULL;
322 }
323 return *this;
324}
#define __TBB_CATCH(e)
Definition: tbb_stddef.h:284
#define __TBB_TRY
Definition: tbb_stddef.h:283
#define __TBB_ASSERT(predicate, comment)
No-op version of __TBB_ASSERT.
Definition: tbb_stddef.h:165
argument_integer_type modulo_power_of_two(argument_integer_type arg, divisor_integer_type divisor)
A function to compute arg modulo divisor where divisor is a power of 2.
Definition: tbb_stddef.h:382
bool is_valid_page(const concurrent_queue_rep_base::page *p)
concurrent_queue_rep_base::page page
page * make_copy(concurrent_queue_base_v3< T > &base, const page *src_page, size_t begin_in_page, size_t end_in_page, ticket &g_index, item_constructor_t construct_item)

References __TBB_ASSERT, __TBB_CATCH, __TBB_TRY, tbb::strict_ppl::internal::micro_queue< T >::head_counter, tbb::strict_ppl::internal::micro_queue< T >::head_page, tbb::strict_ppl::internal::is_valid_page(), tbb::internal::modulo_power_of_two(), tbb::strict_ppl::internal::concurrent_queue_base_v3< T >::my_rep, tbb::strict_ppl::internal::concurrent_queue_rep_base::n_queue, tbb::strict_ppl::internal::concurrent_queue_rep_base::page::next, tbb::strict_ppl::internal::micro_queue< T >::tail_counter, and tbb::strict_ppl::internal::micro_queue< T >::tail_page.

Here is the call graph for this function:

◆ assign_and_destroy_item()

template<typename T >
void tbb::strict_ppl::internal::micro_queue< T >::assign_and_destroy_item ( void dst,
page src,
size_t  index 
)
inlineprivate

Definition at line 156 of file _concurrent_queue_impl.h.

156 {
157 T& from = get_ref(src,index);
158 destroyer d(from);
159 *static_cast<T*>(dst) = tbb::internal::move( from );
160 }
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d
void move(tbb_thread &t1, tbb_thread &t2)
Definition: tbb_thread.h:319
static T & get_ref(page &p, size_t index)

References d, tbb::strict_ppl::internal::micro_queue< T >::get_ref(), and tbb::move().

Here is the call graph for this function:

◆ copy_item() [1/2]

template<typename T >
void tbb::strict_ppl::internal::micro_queue< T >::copy_item ( page dst,
size_t  dindex,
const page src,
size_t  sindex,
item_constructor_t  construct_item 
)
inlineprivate

Definition at line 149 of file _concurrent_queue_impl.h.

151 {
152 T& src_item = get_ref( const_cast<page&>(src), sindex );
153 construct_item( &get_ref(dst, dindex), static_cast<const void*>(&src_item) );
154 }

References tbb::strict_ppl::internal::micro_queue< T >::get_ref().

Here is the call graph for this function:

◆ copy_item() [2/2]

template<typename T >
void tbb::strict_ppl::internal::micro_queue< T >::copy_item ( page dst,
size_t  dindex,
const void src,
item_constructor_t  construct_item 
)
inlineprivate

Definition at line 145 of file _concurrent_queue_impl.h.

145 {
146 construct_item( &get_ref(dst, dindex), src );
147 }

References tbb::strict_ppl::internal::micro_queue< T >::get_ref().

Here is the call graph for this function:

◆ get_ref()

template<typename T >
static T & tbb::strict_ppl::internal::micro_queue< T >::get_ref ( page p,
size_t  index 
)
inlinestatic

Definition at line 176 of file _concurrent_queue_impl.h.

176 {
177 return (&static_cast<padded_page*>(static_cast<void*>(&p))->last)[index];
178 }
void const char const char int ITT_FORMAT __itt_group_sync p
auto last(Container &c) -> decltype(begin(c))

References tbb::internal::last(), and p.

Referenced by tbb::strict_ppl::internal::micro_queue< T >::assign_and_destroy_item(), tbb::strict_ppl::internal::micro_queue< T >::copy_item(), and tbb::strict_ppl::internal::concurrent_queue_iterator_rep< T >::get_item().

Here is the call graph for this function:
Here is the caller graph for this function:

◆ invalidate_page_and_rethrow()

template<typename T >
void tbb::strict_ppl::internal::micro_queue< T >::invalidate_page_and_rethrow ( ticket  k)

Definition at line 327 of file _concurrent_queue_impl.h.

327 {
328 // Append an invalid page at address 1 so that no more pushes are allowed.
329 page* invalid_page = (page*)uintptr_t(1);
330 {
333 page* q = tail_page;
334 if( is_valid_page(q) )
335 q->next = invalid_page;
336 else
337 head_page = invalid_page;
338 tail_page = invalid_page;
339 }
341}
#define __TBB_RETHROW()
Definition: tbb_stddef.h:286
void const char const char int ITT_FORMAT __itt_group_sync x void const char ITT_FORMAT __itt_group_sync s void ITT_FORMAT __itt_group_sync p void ITT_FORMAT p void ITT_FORMAT p no args __itt_suppress_mode_t unsigned int void size_t ITT_FORMAT d void ITT_FORMAT p void ITT_FORMAT p __itt_model_site __itt_model_site_instance ITT_FORMAT p __itt_model_task __itt_model_task_instance ITT_FORMAT p void * lock
void itt_store_word_with_release(tbb::atomic< T > &dst, U src)
friend class scoped_lock
Definition: spin_mutex.h:179

References __TBB_RETHROW, tbb::strict_ppl::internal::is_valid_page(), tbb::internal::itt_store_word_with_release(), lock, tbb::strict_ppl::internal::concurrent_queue_rep_base::n_queue, and tbb::strict_ppl::internal::concurrent_queue_rep_base::page::next.

Here is the call graph for this function:

◆ make_copy()

template<typename T >
concurrent_queue_rep_base::page * tbb::strict_ppl::internal::micro_queue< T >::make_copy ( concurrent_queue_base_v3< T > &  base,
const page src_page,
size_t  begin_in_page,
size_t  end_in_page,
ticket g_index,
item_constructor_t  construct_item 
)

Definition at line 344 of file _concurrent_queue_impl.h.

347{
348 concurrent_queue_page_allocator& pa = base;
349 page* new_page = pa.allocate_page();
350 new_page->next = NULL;
351 new_page->mask = src_page->mask;
352 for( ; begin_in_page!=end_in_page; ++begin_in_page, ++g_index )
353 if( new_page->mask & uintptr_t(1)<<begin_in_page )
354 copy_item( *new_page, begin_in_page, *src_page, begin_in_page, construct_item );
355 return new_page;
356}
void copy_item(page &dst, size_t dindex, const void *src, item_constructor_t construct_item)

References tbb::strict_ppl::internal::concurrent_queue_page_allocator::allocate_page(), tbb::strict_ppl::internal::concurrent_queue_rep_base::page::mask, and tbb::strict_ppl::internal::concurrent_queue_rep_base::page::next.

Here is the call graph for this function:

◆ pop()

template<typename T >
bool tbb::strict_ppl::internal::micro_queue< T >::pop ( void dst,
ticket  k,
concurrent_queue_base_v3< T > &  base 
)

Definition at line 263 of file _concurrent_queue_impl.h.

263 {
269 page *p = head_page;
270 __TBB_ASSERT( p, NULL );
271 size_t index = modulo_power_of_two( k/concurrent_queue_rep_base::n_queue, base.my_rep->items_per_page );
272 bool success = false;
273 {
274 micro_queue_pop_finalizer<T> finalizer( *this, base, k+concurrent_queue_rep_base::n_queue, index==base.my_rep->items_per_page-1 ? p : NULL );
275 if( p->mask & uintptr_t(1)<<index ) {
276 success = true;
277 assign_and_destroy_item( dst, *p, index );
278 } else {
279 --base.my_rep->n_invalid_entries;
280 }
281 }
282 return success;
283}
void spin_wait_until_eq(const volatile T &location, const U value)
Spin UNTIL the value of the variable is equal to a given value.
Definition: tbb_machine.h:399
void call_itt_notify(notify_type, void *)
void spin_wait_while_eq(const volatile T &location, U value)
Spin WHILE the value of the variable is equal to a given value.
Definition: tbb_machine.h:391
void assign_and_destroy_item(void *dst, page &src, size_t index)

References __TBB_ASSERT, tbb::internal::acquired, tbb::internal::call_itt_notify(), tbb::internal::modulo_power_of_two(), tbb::strict_ppl::internal::concurrent_queue_base_v3< T >::my_rep, tbb::strict_ppl::internal::concurrent_queue_rep_base::n_queue, p, tbb::internal::spin_wait_until_eq(), and tbb::internal::spin_wait_while_eq().

Here is the call graph for this function:

◆ push()

template<typename T >
void tbb::strict_ppl::internal::micro_queue< T >::push ( const void item,
ticket  k,
concurrent_queue_base_v3< T > &  base,
item_constructor_t  construct_item 
)

Definition at line 215 of file _concurrent_queue_impl.h.

217{
219 page* p = NULL;
220 size_t index = modulo_power_of_two( k/concurrent_queue_rep_base::n_queue, base.my_rep->items_per_page);
221 if( !index ) {
222 __TBB_TRY {
223 concurrent_queue_page_allocator& pa = base;
224 p = pa.allocate_page();
225 } __TBB_CATCH (...) {
226 ++base.my_rep->n_invalid_entries;
228 }
229 p->mask = 0;
230 p->next = NULL;
231 }
232
233 if( tail_counter != k ) spin_wait_until_my_turn( tail_counter, k, *base.my_rep );
235
236 if( p ) {
238 page* q = tail_page;
239 if( is_valid_page(q) )
240 q->next = p;
241 else
242 head_page = p;
243 tail_page = p;
244 } else {
245 p = tail_page;
246 }
247
248 __TBB_TRY {
249 copy_item( *p, index, item, construct_item );
250 // If no exception was thrown, mark item as present.
251 itt_hide_store_word(p->mask, p->mask | uintptr_t(1)<<index);
254 } __TBB_CATCH (...) {
255 ++base.my_rep->n_invalid_entries;
259 }
260}
void itt_hide_store_word(T &dst, T src)
void spin_wait_until_my_turn(atomic< ticket > &counter, ticket k, concurrent_queue_rep_base &rb) const

References __TBB_CATCH, __TBB_RETHROW, __TBB_TRY, tbb::internal::acquired, tbb::strict_ppl::internal::concurrent_queue_page_allocator::allocate_page(), tbb::internal::call_itt_notify(), tbb::strict_ppl::internal::is_valid_page(), tbb::internal::itt_hide_store_word(), lock, tbb::internal::modulo_power_of_two(), tbb::strict_ppl::internal::concurrent_queue_base_v3< T >::my_rep, tbb::strict_ppl::internal::concurrent_queue_rep_base::n_queue, tbb::strict_ppl::internal::concurrent_queue_rep_base::page::next, p, and tbb::internal::releasing.

Here is the call graph for this function:

◆ spin_wait_until_my_turn()

template<typename T >
void tbb::strict_ppl::internal::micro_queue< T >::spin_wait_until_my_turn ( atomic< ticket > &  counter,
ticket  k,
concurrent_queue_rep_base rb 
) const
private

Definition at line 203 of file _concurrent_queue_impl.h.

203 {
204 for( atomic_backoff b(true);;b.pause() ) {
205 ticket c = counter;
206 if( c==k ) return;
207 else if( c&1 ) {
208 ++rb.n_invalid_entries;
210 }
211 }
212}
void throw_exception(exception_id eid)
Versionless convenience wrapper for throw_exception_v4()

References tbb::internal::eid_bad_last_alloc, tbb::strict_ppl::internal::concurrent_queue_rep_base::n_invalid_entries, tbb::internal::atomic_backoff::pause(), and tbb::internal::throw_exception().

Here is the call graph for this function:

Friends And Related Function Documentation

◆ micro_queue_pop_finalizer< T >

template<typename T >
friend class micro_queue_pop_finalizer< T >
friend

Definition at line 162 of file _concurrent_queue_impl.h.

Member Data Documentation

◆ head_counter

template<typename T >
atomic<ticket> tbb::strict_ppl::internal::micro_queue< T >::head_counter

◆ head_page

template<typename T >
atomic<page*> tbb::strict_ppl::internal::micro_queue< T >::head_page

◆ page_mutex

template<typename T >
spin_mutex tbb::strict_ppl::internal::micro_queue< T >::page_mutex

Definition at line 186 of file _concurrent_queue_impl.h.

◆ tail_counter

template<typename T >
atomic<ticket> tbb::strict_ppl::internal::micro_queue< T >::tail_counter

◆ tail_page

template<typename T >
atomic<page*> tbb::strict_ppl::internal::micro_queue< T >::tail_page

The documentation for this class was generated from the following file:

Copyright © 2005-2020 Intel Corporation. All Rights Reserved.

Intel, Pentium, Intel Xeon, Itanium, Intel XScale and VTune are registered trademarks or trademarks of Intel Corporation or its subsidiaries in the United States and other countries.

* Other names and brands may be claimed as the property of others.