42 #define USE_CHECKS_COMMON
44 #define KMP_INLINE_SUBR 1
51 kmp_threadprivate_insert_private_data(
int gtid,
void *pc_addr,
void *data_addr,
size_t pc_size );
52 struct private_common *
53 kmp_threadprivate_insert(
int gtid,
void *pc_addr,
void *data_addr,
size_t pc_size );
55 struct shared_table __kmp_threadprivate_d_table;
61 #ifdef KMP_INLINE_SUBR
64 struct private_common *
65 __kmp_threadprivate_find_task_common(
struct common_table *tbl,
int gtid,
void *pc_addr )
68 struct private_common *tn;
70 #ifdef KMP_TASK_COMMON_DEBUG
71 KC_TRACE( 10, (
"__kmp_threadprivate_find_task_common: thread#%d, called with address %p\n",
76 for (tn = tbl->data[ KMP_HASH(pc_addr) ]; tn; tn = tn->next) {
77 if (tn->gbl_addr == pc_addr) {
78 #ifdef KMP_TASK_COMMON_DEBUG
79 KC_TRACE( 10, (
"__kmp_threadprivate_find_task_common: thread#%d, found node %p on list\n",
89 #ifdef KMP_INLINE_SUBR
92 struct shared_common *
93 __kmp_find_shared_task_common(
struct shared_table *tbl,
int gtid,
void *pc_addr )
95 struct shared_common *tn;
97 for (tn = tbl->data[ KMP_HASH(pc_addr) ]; tn; tn = tn->next) {
98 if (tn->gbl_addr == pc_addr) {
99 #ifdef KMP_TASK_COMMON_DEBUG
100 KC_TRACE( 10, (
"__kmp_find_shared_task_common: thread#%d, found node %p on list\n",
116 static struct private_data *
117 __kmp_init_common_data(
void *pc_addr,
size_t pc_size )
119 struct private_data *d;
123 d = (
struct private_data *) __kmp_allocate(
sizeof(
struct private_data ) );
133 for (i = pc_size; i > 0; --i) {
135 d->data = __kmp_allocate( pc_size );
136 KMP_MEMCPY( d->data, pc_addr, pc_size );
149 __kmp_copy_common_data(
void *pc_addr,
struct private_data *d )
151 char *addr = (
char *) pc_addr;
154 for (offset = 0; d != 0; d = d->next) {
155 for (i = d->more; i > 0; --i) {
157 memset( & addr[ offset ],
'\0', d->size );
159 KMP_MEMCPY( & addr[ offset ], d->data, d->size );
170 __kmp_common_initialize(
void )
172 if( ! TCR_4(__kmp_init_common) ) {
178 __kmp_threadpriv_cache_list = NULL;
182 for(gtid = 0 ; gtid < __kmp_threads_capacity; gtid++ )
183 if( __kmp_root[gtid] ) {
184 KMP_DEBUG_ASSERT( __kmp_root[gtid]->r.r_uber_thread );
185 for ( q = 0; q< KMP_HASH_TABLE_SIZE; ++q)
186 KMP_DEBUG_ASSERT( !__kmp_root[gtid]->r.r_uber_thread->th.th_pri_common->data[q] );
191 for (q = 0; q < KMP_HASH_TABLE_SIZE; ++q)
192 __kmp_threadprivate_d_table.data[ q ] = 0;
194 TCW_4(__kmp_init_common, TRUE);
201 __kmp_common_destroy(
void )
203 if( TCR_4(__kmp_init_common) ) {
206 TCW_4(__kmp_init_common, FALSE);
208 for (q = 0; q < KMP_HASH_TABLE_SIZE; ++q) {
210 struct private_common *tn;
211 struct shared_common *d_tn;
216 for (d_tn = __kmp_threadprivate_d_table.data[ q ]; d_tn; d_tn = d_tn->next) {
218 if (d_tn->dt.dtorv != 0) {
219 for (gtid = 0; gtid < __kmp_all_nth; ++gtid) {
220 if( __kmp_threads[gtid] ) {
221 if( (__kmp_foreign_tp) ? (! KMP_INITIAL_GTID (gtid)) :
222 (! KMP_UBER_GTID (gtid)) ) {
223 tn = __kmp_threadprivate_find_task_common( __kmp_threads[ gtid ]->th.th_pri_common,
224 gtid, d_tn->gbl_addr );
226 (*d_tn->dt.dtorv) (tn->par_addr, d_tn->vec_len);
231 if (d_tn->obj_init != 0) {
232 (*d_tn->dt.dtorv) (d_tn->obj_init, d_tn->vec_len);
236 if (d_tn->dt.dtor != 0) {
237 for (gtid = 0; gtid < __kmp_all_nth; ++gtid) {
238 if( __kmp_threads[gtid] ) {
239 if( (__kmp_foreign_tp) ? (! KMP_INITIAL_GTID (gtid)) :
240 (! KMP_UBER_GTID (gtid)) ) {
241 tn = __kmp_threadprivate_find_task_common( __kmp_threads[ gtid ]->th.th_pri_common,
242 gtid, d_tn->gbl_addr );
244 (*d_tn->dt.dtor) (tn->par_addr);
249 if (d_tn->obj_init != 0) {
250 (*d_tn->dt.dtor) (d_tn->obj_init);
255 __kmp_threadprivate_d_table.data[ q ] = 0;
262 __kmp_common_destroy_gtid(
int gtid )
264 struct private_common *tn;
265 struct shared_common *d_tn;
267 KC_TRACE( 10, (
"__kmp_common_destroy_gtid: T#%d called\n", gtid ) );
268 if( (__kmp_foreign_tp) ? (! KMP_INITIAL_GTID (gtid)) :
269 (! KMP_UBER_GTID (gtid)) ) {
271 if( TCR_4(__kmp_init_common) ) {
276 for (tn = __kmp_threads[ gtid ]->th.th_pri_head; tn; tn = tn->link) {
278 d_tn = __kmp_find_shared_task_common( &__kmp_threadprivate_d_table,
279 gtid, tn->gbl_addr );
281 KMP_DEBUG_ASSERT( d_tn );
284 if (d_tn->dt.dtorv != 0) {
285 (void) (*d_tn->dt.dtorv) (tn->par_addr, d_tn->vec_len);
287 if (d_tn->obj_init != 0) {
288 (void) (*d_tn->dt.dtorv) (d_tn->obj_init, d_tn->vec_len);
291 if (d_tn->dt.dtor != 0) {
292 (void) (*d_tn->dt.dtor) (tn->par_addr);
294 if (d_tn->obj_init != 0) {
295 (void) (*d_tn->dt.dtor) (d_tn->obj_init);
299 KC_TRACE( 30, (
"__kmp_common_destroy_gtid: T#%d threadprivate destructors complete\n",
308 #ifdef KMP_TASK_COMMON_DEBUG
314 for (p = 0; p < __kmp_all_nth; ++p) {
315 if( !__kmp_threads[p] )
continue;
316 for (q = 0; q < KMP_HASH_TABLE_SIZE; ++q) {
317 if (__kmp_threads[ p ]->th.th_pri_common->data[ q ]) {
318 struct private_common *tn;
320 KC_TRACE( 10, (
"\tdump_list: gtid:%d addresses\n", p ) );
322 for (tn = __kmp_threads[ p ]->th.th_pri_common->data[ q ]; tn; tn = tn->next) {
323 KC_TRACE( 10, (
"\tdump_list: THREADPRIVATE: Serial %p -> Parallel %p\n",
324 tn->gbl_addr, tn->par_addr ) );
338 kmp_threadprivate_insert_private_data(
int gtid,
void *pc_addr,
void *data_addr,
size_t pc_size )
340 struct shared_common **lnk_tn, *d_tn;
341 KMP_DEBUG_ASSERT( __kmp_threads[ gtid ] &&
342 __kmp_threads[ gtid ] -> th.th_root -> r.r_active == 0 );
344 d_tn = __kmp_find_shared_task_common( &__kmp_threadprivate_d_table,
348 d_tn = (
struct shared_common *) __kmp_allocate(
sizeof(
struct shared_common ) );
350 d_tn->gbl_addr = pc_addr;
351 d_tn->pod_init = __kmp_init_common_data( data_addr, pc_size );
360 d_tn->cmn_size = pc_size;
362 __kmp_acquire_lock( &__kmp_global_lock, gtid );
364 lnk_tn = &(__kmp_threadprivate_d_table.data[ KMP_HASH(pc_addr) ]);
366 d_tn->next = *lnk_tn;
369 __kmp_release_lock( &__kmp_global_lock, gtid );
373 struct private_common *
374 kmp_threadprivate_insert(
int gtid,
void *pc_addr,
void *data_addr,
size_t pc_size )
376 struct private_common *tn, **tt;
377 struct shared_common *d_tn;
381 __kmp_acquire_lock( & __kmp_global_lock, gtid );
383 tn = (
struct private_common *) __kmp_allocate(
sizeof (
struct private_common) );
385 tn->gbl_addr = pc_addr;
387 d_tn = __kmp_find_shared_task_common( &__kmp_threadprivate_d_table,
393 if ( d_tn->pod_init == 0 && d_tn->obj_init == 0 ) {
394 d_tn->cmn_size = pc_size;
397 if (d_tn->ct.ctorv != 0) {
401 else if (d_tn->cct.cctorv != 0) {
403 d_tn->obj_init = (
void *) __kmp_allocate( d_tn->cmn_size );
404 (void) (*d_tn->cct.cctorv) (d_tn->obj_init, pc_addr, d_tn->vec_len);
407 d_tn->pod_init = __kmp_init_common_data( data_addr, d_tn->cmn_size );
410 if (d_tn->ct.ctor != 0) {
414 else if (d_tn->cct.cctor != 0) {
416 d_tn->obj_init = (
void *) __kmp_allocate( d_tn->cmn_size );
417 (void) (*d_tn->cct.cctor) (d_tn->obj_init, pc_addr);
420 d_tn->pod_init = __kmp_init_common_data( data_addr, d_tn->cmn_size );
426 struct shared_common **lnk_tn;
428 d_tn = (
struct shared_common *) __kmp_allocate(
sizeof(
struct shared_common ) );
429 d_tn->gbl_addr = pc_addr;
430 d_tn->cmn_size = pc_size;
431 d_tn->pod_init = __kmp_init_common_data( data_addr, pc_size );
440 lnk_tn = &(__kmp_threadprivate_d_table.data[ KMP_HASH(pc_addr) ]);
442 d_tn->next = *lnk_tn;
446 tn->cmn_size = d_tn->cmn_size;
448 if ( (__kmp_foreign_tp) ? (KMP_INITIAL_GTID (gtid)) : (KMP_UBER_GTID (gtid)) ) {
449 tn->par_addr = (
void *) pc_addr;
452 tn->par_addr = (
void *) __kmp_allocate( tn->cmn_size );
455 __kmp_release_lock( & __kmp_global_lock, gtid );
459 #ifdef USE_CHECKS_COMMON
460 if (pc_size > d_tn->cmn_size) {
461 KC_TRACE( 10, (
"__kmp_threadprivate_insert: THREADPRIVATE: %p (%"
462 KMP_UINTPTR_SPEC
" ,%" KMP_UINTPTR_SPEC
")\n",
463 pc_addr, pc_size, d_tn->cmn_size ) );
464 KMP_FATAL( TPCommonBlocksInconsist );
468 tt = &(__kmp_threads[ gtid ]->th.th_pri_common->data[ KMP_HASH(pc_addr) ]);
470 #ifdef KMP_TASK_COMMON_DEBUG
472 KC_TRACE( 10, (
"__kmp_threadprivate_insert: WARNING! thread#%d: collision on %p\n",
479 #ifdef KMP_TASK_COMMON_DEBUG
480 KC_TRACE( 10, (
"__kmp_threadprivate_insert: thread#%d, inserted node %p on list\n",
487 tn->link = __kmp_threads[ gtid ]->th.th_pri_head;
488 __kmp_threads[ gtid ]->th.th_pri_head = tn;
491 __kmp_tv_threadprivate_store( __kmp_threads[ gtid ], tn->gbl_addr, tn->par_addr );
494 if( (__kmp_foreign_tp) ? (KMP_INITIAL_GTID (gtid)) : (KMP_UBER_GTID (gtid)) )
511 if ( d_tn->ct.ctorv != 0) {
512 (void) (*d_tn->ct.ctorv) (tn->par_addr, d_tn->vec_len);
513 }
else if (d_tn->cct.cctorv != 0) {
514 (void) (*d_tn->cct.cctorv) (tn->par_addr, d_tn->obj_init, d_tn->vec_len);
515 }
else if (tn->par_addr != tn->gbl_addr) {
516 __kmp_copy_common_data( tn->par_addr, d_tn->pod_init );
519 if ( d_tn->ct.ctor != 0 ) {
520 (void) (*d_tn->ct.ctor) (tn->par_addr);
521 }
else if (d_tn->cct.cctor != 0) {
522 (void) (*d_tn->cct.cctor) (tn->par_addr, d_tn->obj_init);
523 }
else if (tn->par_addr != tn->gbl_addr) {
524 __kmp_copy_common_data( tn->par_addr, d_tn->pod_init );
553 struct shared_common *d_tn, **lnk_tn;
555 KC_TRACE( 10, (
"__kmpc_threadprivate_register: called\n" ) );
557 #ifdef USE_CHECKS_COMMON
559 KMP_ASSERT( cctor == 0);
563 d_tn = __kmp_find_shared_task_common( &__kmp_threadprivate_d_table, -1, data );
566 d_tn = (
struct shared_common *) __kmp_allocate(
sizeof(
struct shared_common ) );
567 d_tn->gbl_addr = data;
569 d_tn->ct.ctor = ctor;
570 d_tn->cct.cctor = cctor;
571 d_tn->dt.dtor = dtor;
578 lnk_tn = &(__kmp_threadprivate_d_table.data[ KMP_HASH(data) ]);
580 d_tn->next = *lnk_tn;
586 __kmpc_threadprivate(
ident_t *loc, kmp_int32 global_tid,
void *data,
size_t size)
589 struct private_common *tn;
591 KC_TRACE( 10, (
"__kmpc_threadprivate: T#%d called\n", global_tid ) );
593 #ifdef USE_CHECKS_COMMON
594 if (! __kmp_init_serial)
595 KMP_FATAL( RTLNotInitialized );
598 if ( ! __kmp_threads[global_tid] -> th.th_root -> r.r_active && ! __kmp_foreign_tp ) {
602 KC_TRACE( 20, (
"__kmpc_threadprivate: T#%d inserting private data\n", global_tid ) );
603 kmp_threadprivate_insert_private_data( global_tid, data, data, size );
608 KC_TRACE( 50, (
"__kmpc_threadprivate: T#%d try to find private data at address %p\n",
609 global_tid, data ) );
610 tn = __kmp_threadprivate_find_task_common( __kmp_threads[ global_tid ]->th.th_pri_common, global_tid, data );
613 KC_TRACE( 20, (
"__kmpc_threadprivate: T#%d found data\n", global_tid ) );
614 #ifdef USE_CHECKS_COMMON
615 if ((
size_t) size > tn->cmn_size) {
616 KC_TRACE( 10, (
"THREADPRIVATE: %p (%" KMP_UINTPTR_SPEC
" ,%" KMP_UINTPTR_SPEC
")\n",
617 data, size, tn->cmn_size ) );
618 KMP_FATAL( TPCommonBlocksInconsist );
625 KC_TRACE( 20, (
"__kmpc_threadprivate: T#%d inserting data\n", global_tid ) );
626 tn = kmp_threadprivate_insert( global_tid, data, data, size );
631 KC_TRACE( 10, (
"__kmpc_threadprivate: T#%d exiting; return value = %p\n",
651 kmp_int32 global_tid,
656 KC_TRACE( 10, (
"__kmpc_threadprivate_cached: T#%d called with cache: %p, address: %p, size: %"
657 KMP_SIZE_T_SPEC
"\n",
658 global_tid, *cache, data, size ) );
660 if ( TCR_PTR(*cache) == 0) {
661 __kmp_acquire_lock( & __kmp_global_lock, global_tid );
663 if ( TCR_PTR(*cache) == 0) {
664 __kmp_acquire_bootstrap_lock(&__kmp_tp_cached_lock);
666 __kmp_release_bootstrap_lock(&__kmp_tp_cached_lock);
670 __kmp_allocate(
sizeof(
void * ) * __kmp_tp_capacity +
sizeof ( kmp_cached_addr_t ));
673 KC_TRACE( 50, (
"__kmpc_threadprivate_cached: T#%d allocated cache at address %p\n",
674 global_tid, my_cache ) );
678 kmp_cached_addr_t *tp_cache_addr;
680 tp_cache_addr = (kmp_cached_addr_t *) & my_cache[__kmp_tp_capacity];
681 tp_cache_addr -> addr = my_cache;
682 tp_cache_addr -> next = __kmp_threadpriv_cache_list;
683 __kmp_threadpriv_cache_list = tp_cache_addr;
687 TCW_PTR( *cache, my_cache);
692 __kmp_release_lock( & __kmp_global_lock, global_tid );
696 if ((ret = TCR_PTR((*cache)[ global_tid ])) == 0) {
697 ret = __kmpc_threadprivate( loc, global_tid, data, (
size_t) size);
699 TCW_PTR( (*cache)[ global_tid ], ret);
701 KC_TRACE( 10, (
"__kmpc_threadprivate_cached: T#%d exiting; return value = %p\n",
720 size_t vector_length )
722 struct shared_common *d_tn, **lnk_tn;
724 KC_TRACE( 10, (
"__kmpc_threadprivate_register_vec: called\n" ) );
726 #ifdef USE_CHECKS_COMMON
728 KMP_ASSERT( cctor == 0);
731 d_tn = __kmp_find_shared_task_common( &__kmp_threadprivate_d_table,
735 d_tn = (
struct shared_common *) __kmp_allocate(
sizeof(
struct shared_common ) );
736 d_tn->gbl_addr = data;
738 d_tn->ct.ctorv = ctor;
739 d_tn->cct.cctorv = cctor;
740 d_tn->dt.dtorv = dtor;
742 d_tn->vec_len = (size_t) vector_length;
747 lnk_tn = &(__kmp_threadprivate_d_table.data[ KMP_HASH(data) ]);
749 d_tn->next = *lnk_tn;
void(* kmpc_dtor)(void *)
void(* kmpc_dtor_vec)(void *, size_t)
void *(* kmpc_ctor_vec)(void *, size_t)
void * __kmpc_threadprivate_cached(ident_t *loc, kmp_int32 global_tid, void *data, size_t size, void ***cache)
void *(* kmpc_cctor_vec)(void *, void *, size_t)
void *(* kmpc_cctor)(void *, void *)
void __kmpc_threadprivate_register(ident_t *loc, void *data, kmpc_ctor ctor, kmpc_cctor cctor, kmpc_dtor dtor)
void *(* kmpc_ctor)(void *)
void __kmpc_threadprivate_register_vec(ident_t *loc, void *data, kmpc_ctor_vec ctor, kmpc_cctor_vec cctor, kmpc_dtor_vec dtor, size_t vector_length)