root/gc/aix_irix_threads.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. GC_print_sig_mask
  2. GC_thread
  3. GC_suspend_handler
  4. GC_PROTO
  5. GC_new_thread
  6. GC_delete_gc_thread
  7. GC_lookup_thread
  8. GC_stop_world
  9. GC_start_world
  10. GC_stop_world
  11. GC_start_world
  12. GC_push_all_stacks
  13. GC_thr_init
  14. GC_pthread_sigmask
  15. GC_thread_exit_proc
  16. GC_pthread_join
  17. GC_start_routine
  18. GC_pthread_create
  19. GC_lock

   1 /* 
   2  * Copyright (c) 1991-1995 by Xerox Corporation.  All rights reserved.
   3  * Copyright (c) 1996-1999 by Silicon Graphics.  All rights reserved.
   4  * Copyright (c) 1999-2003 by Hewlett-Packard Company. All rights reserved.
   5  *
   6  * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
   7  * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
   8  *
   9  * Permission is hereby granted to use or copy this program
  10  * for any purpose,  provided the above notices are retained on all copies.
  11  * Permission to modify the code and to distribute modified code is granted,
  12  * provided the above notices are retained, and a notice that the code was
  13  * modified is included with the above copyright notice.
  14  */
  15 /*
  16  * Support code for Irix (>=6.2) Pthreads and for AIX pthreads.
  17  * This relies on properties
  18  * not guaranteed by the Pthread standard.  It may or may not be portable
  19  * to other implementations.
  20  *
  21  * Note that there is a lot of code duplication between this file and
  22  * (pthread_support.c, pthread_stop_world.c).  They should be merged.
  23  * Pthread_support.c should be directly usable.
  24  *
  25  * Please avoid adding new ports here; use the generic pthread support
  26  * as a base instead.
  27  */
  28 
  29 # if defined(GC_IRIX_THREADS) || defined(GC_AIX_THREADS)
  30 
  31 # include "private/gc_priv.h"
  32 # include <pthread.h>
  33 # include <assert.h>
  34 # include <semaphore.h>
  35 # include <time.h>
  36 # include <errno.h>
  37 # include <unistd.h>
  38 # include <sys/mman.h>
  39 # include <sys/time.h>
  40 
  41 #undef pthread_create
  42 #undef pthread_sigmask
  43 #undef pthread_join
  44 
  45 #if defined(GC_IRIX_THREADS) && !defined(MUTEX_RECURSIVE_NP)
  46 #define MUTEX_RECURSIVE_NP PTHREAD_MUTEX_RECURSIVE
  47 #endif
  48 
  49 void GC_thr_init();
  50 
  51 #if 0
  52 void GC_print_sig_mask()
  53 {
  54     sigset_t blocked;
  55     int i;
  56 
  57     if (pthread_sigmask(SIG_BLOCK, NULL, &blocked) != 0)
  58         ABORT("pthread_sigmask");
  59     GC_printf0("Blocked: ");
  60     for (i = 1; i <= MAXSIG; i++) {
  61         if (sigismember(&blocked, i)) { GC_printf1("%ld ",(long) i); }
  62     }
  63     GC_printf0("\n");
  64 }
  65 #endif
  66 
  67 /* We use the allocation lock to protect thread-related data structures. */
  68 
  69 /* The set of all known threads.  We intercept thread creation and      */
  70 /* joins.  We never actually create detached threads.  We allocate all  */
  71 /* new thread stacks ourselves.  These allow us to maintain this        */
  72 /* data structure.                                                      */
  73 /* Protected by GC_thr_lock.                                            */
  74 /* Some of this should be declared volatile, but that's incosnsistent   */
  75 /* with some library routine declarations.                              */
  76 typedef struct GC_Thread_Rep {
  77     struct GC_Thread_Rep * next;  /* More recently allocated threads    */
  78                                   /* with a given pthread id come       */
  79                                   /* first.  (All but the first are     */
  80                                   /* guaranteed to be dead, but we may  */
  81                                   /* not yet have registered the join.) */
  82     pthread_t id;
  83     word stop;
  84 #       define NOT_STOPPED 0
  85 #       define PLEASE_STOP 1
  86 #       define STOPPED 2
  87     word flags;
  88 #       define FINISHED 1       /* Thread has exited.   */
  89 #       define DETACHED 2       /* Thread is intended to be detached.   */
  90     ptr_t stack_cold;           /* cold end of the stack                */
  91     ptr_t stack_hot;            /* Valid only when stopped. */
  92                                 /* But must be within stack region at   */
  93                                 /* all times.                           */
  94     void * status;              /* Used only to avoid premature         */
  95                                 /* reclamation of any data it might     */
  96                                 /* reference.                           */
  97 } * GC_thread;
  98 
  99 GC_thread GC_lookup_thread(pthread_t id);
 100 
 101 /*
 102  * The only way to suspend threads given the pthread interface is to send
 103  * signals.  Unfortunately, this means we have to reserve
 104  * a signal, and intercept client calls to change the signal mask.
 105  */
 106 #if 0 /* DOB: 6.1 */
 107 # if defined(GC_AIX_THREADS)
 108 #   define SIG_SUSPEND SIGUSR1
 109 # else
 110 #   define SIG_SUSPEND (SIGRTMIN + 6)
 111 # endif
 112 #endif
 113 
 114 pthread_mutex_t GC_suspend_lock = PTHREAD_MUTEX_INITIALIZER;
 115                                 /* Number of threads stopped so far     */
 116 pthread_cond_t GC_suspend_ack_cv = PTHREAD_COND_INITIALIZER;
 117 pthread_cond_t GC_continue_cv = PTHREAD_COND_INITIALIZER;
 118 
 119 void GC_suspend_handler(int sig)
 120 {
 121     int dummy;
 122     GC_thread me;
 123     sigset_t all_sigs;
 124     sigset_t old_sigs;
 125     int i;
 126 
 127     if (sig != SIG_SUSPEND) ABORT("Bad signal in suspend_handler");
 128     me = GC_lookup_thread(pthread_self());
 129     /* The lookup here is safe, since I'm doing this on behalf  */
 130     /* of a thread which holds the allocation lock in order     */
 131     /* to stop the world.  Thus concurrent modification of the  */
 132     /* data structure is impossible.                            */
 133     if (PLEASE_STOP != me -> stop) {
 134         /* Misdirected signal.  */
 135         pthread_mutex_unlock(&GC_suspend_lock);
 136         return;
 137     }
 138     pthread_mutex_lock(&GC_suspend_lock);
 139     me -> stack_hot = (ptr_t)(&dummy);
 140     me -> stop = STOPPED;
 141     pthread_cond_signal(&GC_suspend_ack_cv);
 142     pthread_cond_wait(&GC_continue_cv, &GC_suspend_lock);
 143     pthread_mutex_unlock(&GC_suspend_lock);
 144     /* GC_printf1("Continuing 0x%x\n", pthread_self()); */
 145 }
 146 
 147 
 148 GC_bool GC_thr_initialized = FALSE;
 149 
 150 
 151 # define THREAD_TABLE_SZ 128    /* Must be power of 2   */
 152 volatile GC_thread GC_threads[THREAD_TABLE_SZ];
 153 
 154 void GC_push_thread_structures GC_PROTO((void))
 155 {
 156     GC_push_all((ptr_t)(GC_threads), (ptr_t)(GC_threads)+sizeof(GC_threads));
 157 }
 158 
 159 /* Add a thread to GC_threads.  We assume it wasn't already there.      */
 160 /* Caller holds allocation lock.                                        */
 161 GC_thread GC_new_thread(pthread_t id)
 162 {
 163     int hv = ((word)id) % THREAD_TABLE_SZ;
 164     GC_thread result;
 165     static struct GC_Thread_Rep first_thread;
 166     static GC_bool first_thread_used = FALSE;
 167     
 168     GC_ASSERT(I_HOLD_LOCK());
 169     if (!first_thread_used) {
 170         result = &first_thread;
 171         first_thread_used = TRUE;
 172         /* Dont acquire allocation lock, since we may already hold it. */
 173     } else {
 174         result = (struct GC_Thread_Rep *)
 175                  GC_generic_malloc_inner(sizeof(struct GC_Thread_Rep), NORMAL);
 176     }
 177     if (result == 0) return(0);
 178     result -> id = id;
 179     result -> next = GC_threads[hv];
 180     GC_threads[hv] = result;
 181     /* result -> flags = 0;     */
 182     /* result -> stop = 0;      */
 183     return(result);
 184 }
 185 
 186 /* Delete a thread from GC_threads.  We assume it is there.     */
 187 /* (The code intentionally traps if it wasn't.)                 */
 188 /* Caller holds allocation lock.                                */
 189 /* We explicitly pass in the GC_thread we're looking for, since */
 190 /* if a thread has been joined, but we have not yet             */
 191 /* been notified, then there may be more than one thread        */
 192 /* in the table with the same pthread id.                       */
 193 /* This is OK, but we need a way to delete a specific one.      */
 194 void GC_delete_gc_thread(pthread_t id, GC_thread gc_id)
 195 {
 196     int hv = ((word)id) % THREAD_TABLE_SZ;
 197     register GC_thread p = GC_threads[hv];
 198     register GC_thread prev = 0;
 199 
 200     GC_ASSERT(I_HOLD_LOCK());
 201     while (p != gc_id) {
 202         prev = p;
 203         p = p -> next;
 204     }
 205     if (prev == 0) {
 206         GC_threads[hv] = p -> next;
 207     } else {
 208         prev -> next = p -> next;
 209     }
 210 }
 211 
 212 /* Return a GC_thread corresponding to a given thread_t.        */
 213 /* Returns 0 if it's not there.                                 */
 214 /* Caller holds  allocation lock or otherwise inhibits          */
 215 /* updates.                                                     */
 216 /* If there is more than one thread with the given id we        */
 217 /* return the most recent one.                                  */
 218 GC_thread GC_lookup_thread(pthread_t id)
 219 {
 220     int hv = ((word)id) % THREAD_TABLE_SZ;
 221     register GC_thread p = GC_threads[hv];
 222     
 223     /* I either hold the lock, or i'm being called from the stop-the-world
 224      * handler. */
 225 #if defined(GC_AIX_THREADS)
 226     GC_ASSERT(I_HOLD_LOCK()); /* no stop-the-world handler needed on AIX */
 227 #endif
 228     while (p != 0 && !pthread_equal(p -> id, id)) p = p -> next;
 229     return(p);
 230 }
 231 
 232 #if defined(GC_AIX_THREADS)
 233 void GC_stop_world()
 234 {
 235     pthread_t my_thread = pthread_self();
 236     register int i;
 237     register GC_thread p;
 238     register int result;
 239     struct timespec timeout;
 240 
 241     GC_ASSERT(I_HOLD_LOCK());
 242     for (i = 0; i < THREAD_TABLE_SZ; i++) {
 243       for (p = GC_threads[i]; p != 0; p = p -> next) {
 244         if (p -> id != my_thread) {
 245           pthread_suspend_np(p->id);
 246         }
 247       }
 248     }
 249     /* GC_printf1("World stopped 0x%x\n", pthread_self()); */
 250 }
 251 
 252 void GC_start_world()
 253 {
 254     GC_thread p;
 255     unsigned i;
 256     pthread_t my_thread = pthread_self();
 257 
 258     /* GC_printf0("World starting\n"); */
 259     GC_ASSERT(I_HOLD_LOCK());
 260     for (i = 0; i < THREAD_TABLE_SZ; i++) {
 261       for (p = GC_threads[i]; p != 0; p = p -> next) {
 262         if (p -> id != my_thread) {
 263           pthread_continue_np(p->id);
 264         }
 265       }
 266     }
 267 }
 268 
 269 #else /* GC_AIX_THREADS */
 270 
 271 /* Caller holds allocation lock.        */
 272 void GC_stop_world()
 273 {
 274     pthread_t my_thread = pthread_self();
 275     register int i;
 276     register GC_thread p;
 277     register int result;
 278     struct timespec timeout;
 279     
 280     GC_ASSERT(I_HOLD_LOCK());
 281     for (i = 0; i < THREAD_TABLE_SZ; i++) {
 282       for (p = GC_threads[i]; p != 0; p = p -> next) {
 283         if (p -> id != my_thread) {
 284             if (p -> flags & FINISHED) {
 285                 p -> stop = STOPPED;
 286                 continue;
 287             }
 288             p -> stop = PLEASE_STOP;
 289             result = pthread_kill(p -> id, SIG_SUSPEND);
 290             /* GC_printf1("Sent signal to 0x%x\n", p -> id); */
 291             switch(result) {
 292                 case ESRCH:
 293                     /* Not really there anymore.  Possible? */
 294                     p -> stop = STOPPED;
 295                     break;
 296                 case 0:
 297                     break;
 298                 default:
 299                     ABORT("pthread_kill failed");
 300             }
 301         }
 302       }
 303     }
 304     pthread_mutex_lock(&GC_suspend_lock);
 305     for (i = 0; i < THREAD_TABLE_SZ; i++) {
 306       for (p = GC_threads[i]; p != 0; p = p -> next) {
 307         while (p -> id != my_thread && p -> stop != STOPPED) {
 308             clock_gettime(CLOCK_REALTIME, &timeout);
 309             timeout.tv_nsec += 50000000; /* 50 msecs */
 310             if (timeout.tv_nsec >= 1000000000) {
 311                 timeout.tv_nsec -= 1000000000;
 312                 ++timeout.tv_sec;
 313             }
 314             result = pthread_cond_timedwait(&GC_suspend_ack_cv,
 315                                             &GC_suspend_lock,
 316                                             &timeout);
 317             if (result == ETIMEDOUT) {
 318                 /* Signal was lost or misdirected.  Try again.      */
 319                 /* Duplicate signals should be benign.              */
 320                 result = pthread_kill(p -> id, SIG_SUSPEND);
 321             }
 322         }
 323       }
 324     }
 325     pthread_mutex_unlock(&GC_suspend_lock);
 326     /* GC_printf1("World stopped 0x%x\n", pthread_self()); */
 327 }
 328 
 329 /* Caller holds allocation lock.        */
 330 void GC_start_world()
 331 {
 332     GC_thread p;
 333     unsigned i;
 334 
 335     /* GC_printf0("World starting\n"); */
 336     GC_ASSERT(I_HOLD_LOCK());
 337     for (i = 0; i < THREAD_TABLE_SZ; i++) {
 338       for (p = GC_threads[i]; p != 0; p = p -> next) {
 339         p -> stop = NOT_STOPPED;
 340       }
 341     }
 342     pthread_mutex_lock(&GC_suspend_lock);
 343     /* All other threads are at pthread_cond_wait in signal handler.    */
 344     /* Otherwise we couldn't have acquired the lock.                    */
 345     pthread_mutex_unlock(&GC_suspend_lock);
 346     pthread_cond_broadcast(&GC_continue_cv);
 347 }
 348 
 349 #endif /* GC_AIX_THREADS */
 350 
 351 
 352 /* We hold allocation lock.  Should do exactly the right thing if the   */
 353 /* world is stopped.  Should not fail if it isn't.                      */
 354 void GC_push_all_stacks()
 355 {
 356     register int i;
 357     register GC_thread p;
 358     register ptr_t hot, cold;
 359     pthread_t me = pthread_self();
 360     
 361     /* GC_init() should have been called before GC_push_all_stacks is
 362      * invoked, and GC_init calls GC_thr_init(), which sets
 363      * GC_thr_initialized. */
 364     GC_ASSERT(GC_thr_initialized);
 365 
 366     /* GC_printf1("Pushing stacks from thread 0x%x\n", me); */
 367     GC_ASSERT(I_HOLD_LOCK());
 368     for (i = 0; i < THREAD_TABLE_SZ; i++) {
 369       for (p = GC_threads[i]; p != 0; p = p -> next) {
 370         if (p -> flags & FINISHED) continue;
 371         cold = p->stack_cold;
 372         if (!cold) cold=GC_stackbottom; /* 0 indicates 'original stack' */
 373         if (pthread_equal(p -> id, me)) {
 374             hot = GC_approx_sp();
 375         } else {
 376 #        ifdef GC_AIX_THREADS
 377           /* AIX doesn't use signals to suspend, so we need to get an */
 378           /* accurate hot stack pointer.                              */
 379           /* See http://publib16.boulder.ibm.com/pseries/en_US/libs/basetrf1/pthread_getthrds_np.htm */
 380           pthread_t id = p -> id;
 381           struct __pthrdsinfo pinfo;
 382           int regbuf[64];
 383           int val = sizeof(regbuf);
 384           int retval = pthread_getthrds_np(&id, PTHRDSINFO_QUERY_ALL, &pinfo,
 385                                            sizeof(pinfo), regbuf, &val);
 386           if (retval != 0) {
 387             printf("ERROR: pthread_getthrds_np() failed in GC\n");
 388             abort();
 389           }
 390           /* according to the AIX ABI, 
 391              "the lowest possible valid stack address is 288 bytes (144 + 144)
 392              less than the current value of the stack pointer.  Functions may
 393              use this stack space as volatile storage which is not preserved
 394              across function calls."
 395              ftp://ftp.penguinppc64.org/pub/people/amodra/PPC-elf64abi.txt.gz
 396           */
 397           hot = (ptr_t)(unsigned long)pinfo.__pi_ustk-288;
 398           cold = (ptr_t)pinfo.__pi_stackend; /* more precise */
 399           /* push the registers too, because they won't be on stack */
 400           GC_push_all_eager((ptr_t)&pinfo.__pi_context,
 401                             (ptr_t)((&pinfo.__pi_context)+1));
 402           GC_push_all_eager((ptr_t)regbuf, ((ptr_t)regbuf)+val);
 403 #        else
 404               hot = p -> stack_hot;
 405 #        endif
 406         }
 407 #       ifdef STACK_GROWS_UP
 408           GC_push_all_stack(cold, hot);
 409 #       else
 410  /* printf("thread 0x%x: hot=0x%08x cold=0x%08x\n", p -> id, hot, cold); */
 411           GC_push_all_stack(hot, cold);
 412 #       endif
 413       }
 414     }
 415 }
 416 
 417 
 418 /* We hold the allocation lock. */
 419 void GC_thr_init()
 420 {
 421     GC_thread t;
 422     struct sigaction act;
 423 
 424     if (GC_thr_initialized) return;
 425     GC_ASSERT(I_HOLD_LOCK());
 426     GC_thr_initialized = TRUE;
 427 #ifndef GC_AIX_THREADS
 428     (void) sigaction(SIG_SUSPEND, 0, &act);
 429     if (act.sa_handler != SIG_DFL)
 430         ABORT("Previously installed SIG_SUSPEND handler");
 431     /* Install handler. */
 432         act.sa_handler = GC_suspend_handler;
 433         act.sa_flags = SA_RESTART;
 434         (void) sigemptyset(&act.sa_mask);
 435         if (0 != sigaction(SIG_SUSPEND, &act, 0))
 436             ABORT("Failed to install SIG_SUSPEND handler");
 437 #endif
 438     /* Add the initial thread, so we can stop it.       */
 439       t = GC_new_thread(pthread_self());
 440       /* use '0' to indicate GC_stackbottom, since GC_init() has not
 441        * completed by the time we are called (from GC_init_inner()) */
 442       t -> stack_cold = 0; /* the original stack. */
 443       t -> stack_hot = (ptr_t)(&t);
 444       t -> flags = DETACHED;
 445 }
 446 
 447 int GC_pthread_sigmask(int how, const sigset_t *set, sigset_t *oset)
 448 {
 449     sigset_t fudged_set;
 450     
 451 #ifdef GC_AIX_THREADS
 452     return(pthread_sigmask(how, set, oset));
 453 #endif
 454 
 455     if (set != NULL && (how == SIG_BLOCK || how == SIG_SETMASK)) {
 456         fudged_set = *set;
 457         sigdelset(&fudged_set, SIG_SUSPEND);
 458         set = &fudged_set;
 459     }
 460     return(pthread_sigmask(how, set, oset));
 461 }
 462 
 463 struct start_info {
 464     void *(*start_routine)(void *);
 465     void *arg;
 466     word flags;
 467     pthread_mutex_t registeredlock;
 468     pthread_cond_t registered;     
 469     int volatile registereddone;
 470 };
 471 
 472 void GC_thread_exit_proc(void *arg)
 473 {
 474     GC_thread me;
 475 
 476     LOCK();
 477     me = GC_lookup_thread(pthread_self());
 478     me -> flags |= FINISHED;
 479     /* reclaim DETACHED thread right away; otherwise wait until join() */
 480     if (me -> flags & DETACHED) {
 481         GC_delete_gc_thread(pthread_self(), me);
 482     }
 483     UNLOCK();
 484 }
 485 
 486 int GC_pthread_join(pthread_t thread, void **retval)
 487 {
 488     int result;
 489     GC_thread thread_gc_id;
 490     
 491     LOCK();
 492     thread_gc_id = GC_lookup_thread(thread);
 493     /* This is guaranteed to be the intended one, since the thread id   */
 494     /* cant have been recycled by pthreads.                             */
 495     UNLOCK();
 496     GC_ASSERT(!(thread_gc_id->flags & DETACHED));
 497     result = pthread_join(thread, retval);
 498     /* Some versions of the Irix pthreads library can erroneously       */
 499     /* return EINTR when the call succeeds.                             */
 500         if (EINTR == result) result = 0;
 501     GC_ASSERT(thread_gc_id->flags & FINISHED);
 502     LOCK();
 503     /* Here the pthread thread id may have been recycled. */
 504     GC_delete_gc_thread(thread, thread_gc_id);
 505     UNLOCK();
 506     return result;
 507 }
 508 
 509 void * GC_start_routine(void * arg)
 510 {
 511     int dummy;
 512     struct start_info * si = arg;
 513     void * result;
 514     GC_thread me;
 515     pthread_t my_pthread;
 516     void *(*start)(void *);
 517     void *start_arg;
 518 
 519     my_pthread = pthread_self();
 520     /* If a GC occurs before the thread is registered, that GC will     */
 521     /* ignore this thread.  That's fine, since it will block trying to  */
 522     /* acquire the allocation lock, and won't yet hold interesting      */
 523     /* pointers.                                                        */
 524     LOCK();
 525     /* We register the thread here instead of in the parent, so that    */
 526     /* we don't need to hold the allocation lock during pthread_create. */
 527     /* Holding the allocation lock there would make REDIRECT_MALLOC     */
 528     /* impossible.  It probably still doesn't work, but we're a little  */
 529     /* closer ...                                                       */
 530     /* This unfortunately means that we have to be careful the parent   */
 531     /* doesn't try to do a pthread_join before we're registered.        */
 532     me = GC_new_thread(my_pthread);
 533     me -> flags = si -> flags;
 534     me -> stack_cold = (ptr_t) &dummy; /* this now the 'start of stack' */
 535     me -> stack_hot = me->stack_cold;/* this field should always be sensible */
 536     UNLOCK();
 537     start = si -> start_routine;
 538     start_arg = si -> arg;
 539 
 540     pthread_mutex_lock(&(si->registeredlock));
 541     si->registereddone = 1;
 542     pthread_cond_signal(&(si->registered));
 543     pthread_mutex_unlock(&(si->registeredlock));
 544     /* si went away as soon as we did this unlock */
 545 
 546     pthread_cleanup_push(GC_thread_exit_proc, 0);
 547     result = (*start)(start_arg);
 548     me -> status = result;
 549     pthread_cleanup_pop(1);
 550         /* This involves acquiring the lock, ensuring that we can't exit */
 551         /* while a collection that thinks we're alive is trying to stop  */
 552         /* us.                                                           */
 553     return(result);
 554 }
 555 
 556 int
 557 GC_pthread_create(pthread_t *new_thread,
 558                   const pthread_attr_t *attr,
 559                   void *(*start_routine)(void *), void *arg)
 560 {
 561     int result;
 562     GC_thread t;
 563     int detachstate;
 564     word my_flags = 0;
 565     struct start_info * si;
 566         /* This is otherwise saved only in an area mmapped by the thread */
 567         /* library, which isn't visible to the collector.                */
 568 
 569     LOCK();
 570     /* GC_INTERNAL_MALLOC implicitly calls GC_init() if required */
 571     si = (struct start_info *)GC_INTERNAL_MALLOC(sizeof(struct start_info),
 572                                                  NORMAL);
 573     GC_ASSERT(GC_thr_initialized); /* initialized by GC_init() */
 574     UNLOCK();
 575     if (0 == si) return(ENOMEM);
 576     pthread_mutex_init(&(si->registeredlock), NULL);
 577     pthread_cond_init(&(si->registered),NULL);
 578     pthread_mutex_lock(&(si->registeredlock));
 579     si -> start_routine = start_routine;
 580     si -> arg = arg;
 581 
 582     pthread_attr_getdetachstate(attr, &detachstate);
 583     if (PTHREAD_CREATE_DETACHED == detachstate) my_flags |= DETACHED;
 584     si -> flags = my_flags;
 585     result = pthread_create(new_thread, attr, GC_start_routine, si); 
 586 
 587     /* Wait until child has been added to the thread table.             */
 588     /* This also ensures that we hold onto si until the child is done   */
 589     /* with it.  Thus it doesn't matter whether it is otherwise         */
 590     /* visible to the collector.                                        */
 591 
 592     if (0 == result) {
 593       si->registereddone = 0;
 594       while (!si->registereddone) 
 595         pthread_cond_wait(&(si->registered), &(si->registeredlock));
 596     }
 597     pthread_mutex_unlock(&(si->registeredlock));
 598 
 599     pthread_cond_destroy(&(si->registered));
 600     pthread_mutex_destroy(&(si->registeredlock));
 601     LOCK();
 602     GC_INTERNAL_FREE(si);
 603     UNLOCK();
 604 
 605     return(result);
 606 }
 607 
 608 /* For now we use the pthreads locking primitives on HP/UX */
 609 
 610 VOLATILE GC_bool GC_collecting = 0; /* A hint that we're in the collector and       */
 611                         /* holding the allocation lock for an           */
 612                         /* extended period.                             */
 613 
 614 /* Reasonably fast spin locks.  Basically the same implementation */
 615 /* as STL alloc.h.                                                */
 616 
 617 #define SLEEP_THRESHOLD 3
 618 
 619 volatile unsigned int GC_allocate_lock = 0;
 620 #define GC_TRY_LOCK() !GC_test_and_set(&GC_allocate_lock)
 621 #define GC_LOCK_TAKEN GC_allocate_lock
 622 
 623 void GC_lock()
 624 {
 625 #   define low_spin_max 30  /* spin cycles if we suspect uniprocessor */
 626 #   define high_spin_max 1000 /* spin cycles for multiprocessor */
 627     static unsigned spin_max = low_spin_max;
 628     unsigned my_spin_max;
 629     static unsigned last_spins = 0;
 630     unsigned my_last_spins;
 631     volatile unsigned junk;
 632 #   define PAUSE junk *= junk; junk *= junk; junk *= junk; junk *= junk
 633     int i;
 634 
 635     if (GC_TRY_LOCK()) {
 636         return;
 637     }
 638     junk = 0;
 639     my_spin_max = spin_max;
 640     my_last_spins = last_spins;
 641     for (i = 0; i < my_spin_max; i++) {
 642         if (GC_collecting) goto yield;
 643         if (i < my_last_spins/2 || GC_LOCK_TAKEN) {
 644             PAUSE; 
 645             continue;
 646         }
 647         if (GC_TRY_LOCK()) {
 648             /*
 649              * got it!
 650              * Spinning worked.  Thus we're probably not being scheduled
 651              * against the other process with which we were contending.
 652              * Thus it makes sense to spin longer the next time.
 653              */
 654             last_spins = i;
 655             spin_max = high_spin_max;
 656             return;
 657         }
 658     }
 659     /* We are probably being scheduled against the other process.  Sleep. */
 660     spin_max = low_spin_max;
 661 yield:
 662     for (i = 0;; ++i) {
 663         if (GC_TRY_LOCK()) {
 664             return;
 665         }
 666         if (i < SLEEP_THRESHOLD) {
 667             sched_yield();
 668         } else {
 669             struct timespec ts;
 670         
 671             if (i > 26) i = 26;
 672                         /* Don't wait for more than about 60msecs, even */
 673                         /* under extreme contention.                    */
 674             ts.tv_sec = 0;
 675             ts.tv_nsec = 1 << i;
 676             nanosleep(&ts, 0);
 677         }
 678     }
 679 }
 680 
 681 # else  /* !GC_IRIX_THREADS && !GC_AIX_THREADS */
 682 
 683 #ifndef LINT
 684   int GC_no_Irix_threads;
 685 #endif
 686 
 687 # endif /* IRIX_THREADS */
 688 

/* [<][>][^][v][top][bottom][index][help] */