root/gc/pthread_support.c
/* [<][>][^][v][top][bottom][index][help] */
DEFINITIONS
This source file includes following definitions.
- return_freelists
- GC_init_thread_local
- GC_destroy_thread_local
- GC_local_malloc
- GC_local_malloc_atomic
- GC_local_gcj_malloc
- GC_mark_thread
- start_mark_threads
- start_mark_threads
- GC_PROTO
- GC_mark_thread_local_free_lists
- GC_new_thread
- GC_delete_thread
- GC_delete_gc_thread
- GC_lookup_thread
- GC_remove_all_threads_but_me
- GC_segment_is_thread_stack
- GC_get_nprocs
- GC_wait_for_gc_completion
- GC_fork_prepare_proc
- GC_fork_parent_proc
- GC_fork_child_proc
- GC_get_nprocs
- GC_thr_init
- GC_init_parallel
- GC_start_blocking
- GC_end_blocking
- WRAP_FUNC
- GC_thread_exit_proc
- WRAP_FUNC
- GC_start_routine
- GC_compare_and_exchange
- GC_atomic_add
- GC_pause
- GC_generic_lock
- GC_lock
- GC_lock
- GC_acquire_mark_lock
- GC_release_mark_lock
- GC_wait_builder
- GC_wait_for_reclaim
- GC_notify_all_builder
- GC_wait_marker
- GC_notify_all_marker
1 /*
2 * Copyright (c) 1994 by Xerox Corporation. All rights reserved.
3 * Copyright (c) 1996 by Silicon Graphics. All rights reserved.
4 * Copyright (c) 1998 by Fergus Henderson. All rights reserved.
5 * Copyright (c) 2000-2004 by Hewlett-Packard Company. All rights reserved.
6 *
7 * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
8 * OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
9 *
10 * Permission is hereby granted to use or copy this program
11 * for any purpose, provided the above notices are retained on all copies.
12 * Permission to modify the code and to distribute modified code is granted,
13 * provided the above notices are retained, and a notice that the code was
14 * modified is included with the above copyright notice.
15 */
16 /*
17 * Support code for LinuxThreads, the clone()-based kernel
18 * thread package for Linux which is included in libc6.
19 *
20 * This code relies on implementation details of LinuxThreads,
21 * (i.e. properties not guaranteed by the Pthread standard),
22 * though this version now does less of that than the other Pthreads
23 * support code.
24 *
25 * Note that there is a lot of code duplication between linux_threads.c
26 * and thread support for some of the other Posix platforms; any changes
27 * made here may need to be reflected there too.
28 */
29 /* DG/UX ix86 support <takis@xfree86.org> */
30 /*
31 * Linux_threads.c now also includes some code to support HPUX and
32 * OSF1 (Compaq Tru64 Unix, really). The OSF1 support is based on Eric Benson's
33 * patch.
34 *
35 * Eric also suggested an alternate basis for a lock implementation in
36 * his code:
37 * + #elif defined(OSF1)
38 * + unsigned long GC_allocate_lock = 0;
39 * + msemaphore GC_allocate_semaphore;
40 * + # define GC_TRY_LOCK() \
41 * + ((msem_lock(&GC_allocate_semaphore, MSEM_IF_NOWAIT) == 0) \
42 * + ? (GC_allocate_lock = 1) \
43 * + : 0)
44 * + # define GC_LOCK_TAKEN GC_allocate_lock
45 */
46
47 /*#define DEBUG_THREADS 1*/
48 /*#define GC_ASSERTIONS*/
49
50 # include "private/pthread_support.h"
51
52 # if defined(GC_PTHREADS) && !defined(GC_SOLARIS_THREADS) \
53 && !defined(GC_WIN32_THREADS)
54
55 # if defined(GC_HPUX_THREADS) && !defined(USE_PTHREAD_SPECIFIC) \
56 && !defined(USE_COMPILER_TLS)
57 # ifdef __GNUC__
58 # define USE_PTHREAD_SPECIFIC
59 /* Empirically, as of gcc 3.3, USE_COMPILER_TLS doesn't work. */
60 # else
61 # define USE_COMPILER_TLS
62 # endif
63 # endif
64
65 # if defined USE_HPUX_TLS
66 --> Macro replaced by USE_COMPILER_TLS
67 # endif
68
69 # if (defined(GC_DGUX386_THREADS) || defined(GC_OSF1_THREADS) || \
70 defined(GC_DARWIN_THREADS) || defined(GC_AIX_THREADS)) \
71 && !defined(USE_PTHREAD_SPECIFIC)
72 # define USE_PTHREAD_SPECIFIC
73 # endif
74
75 # if defined(GC_DGUX386_THREADS) && !defined(_POSIX4A_DRAFT10_SOURCE)
76 # define _POSIX4A_DRAFT10_SOURCE 1
77 # endif
78
79 # if defined(GC_DGUX386_THREADS) && !defined(_USING_POSIX4A_DRAFT10)
80 # define _USING_POSIX4A_DRAFT10 1
81 # endif
82
83 # ifdef THREAD_LOCAL_ALLOC
84 # if !defined(USE_PTHREAD_SPECIFIC) && !defined(USE_COMPILER_TLS)
85 # include "private/specific.h"
86 # endif
87 # if defined(USE_PTHREAD_SPECIFIC)
88 # define GC_getspecific pthread_getspecific
89 # define GC_setspecific pthread_setspecific
90 # define GC_key_create pthread_key_create
91 typedef pthread_key_t GC_key_t;
92 # endif
93 # if defined(USE_COMPILER_TLS)
94 # define GC_getspecific(x) (x)
95 # define GC_setspecific(key, v) ((key) = (v), 0)
96 # define GC_key_create(key, d) 0
97 typedef void * GC_key_t;
98 # endif
99 # endif
100 # include <stdlib.h>
101 # include <pthread.h>
102 # include <sched.h>
103 # include <time.h>
104 # include <errno.h>
105 # include <unistd.h>
106 # include <sys/mman.h>
107 # include <sys/time.h>
108 # include <sys/types.h>
109 # include <sys/stat.h>
110 # include <fcntl.h>
111 # include <signal.h>
112
113 #if defined(GC_DARWIN_THREADS)
114 # include "private/darwin_semaphore.h"
115 #else
116 # include <semaphore.h>
117 #endif /* !GC_DARWIN_THREADS */
118
119 #if defined(GC_DARWIN_THREADS)
120 # include <sys/sysctl.h>
121 #endif /* GC_DARWIN_THREADS */
122
123
124
125 #if defined(GC_DGUX386_THREADS)
126 # include <sys/dg_sys_info.h>
127 # include <sys/_int_psem.h>
128 /* sem_t is an uint in DG/UX */
129 typedef unsigned int sem_t;
130 #endif /* GC_DGUX386_THREADS */
131
132 #ifndef __GNUC__
133 # define __inline__
134 #endif
135
136 #ifdef GC_USE_LD_WRAP
137 # define WRAP_FUNC(f) __wrap_##f
138 # define REAL_FUNC(f) __real_##f
139 #else
140 # define WRAP_FUNC(f) GC_##f
141 # if !defined(GC_DGUX386_THREADS)
142 # define REAL_FUNC(f) f
143 # else /* GC_DGUX386_THREADS */
144 # define REAL_FUNC(f) __d10_##f
145 # endif /* GC_DGUX386_THREADS */
146 # undef pthread_create
147 # if !defined(GC_DARWIN_THREADS)
148 # undef pthread_sigmask
149 # endif
150 # undef pthread_join
151 # undef pthread_detach
152 # if defined(GC_OSF1_THREADS) && defined(_PTHREAD_USE_MANGLED_NAMES_) \
153 && !defined(_PTHREAD_USE_PTDNAM_)
154 /* Restore the original mangled names on Tru64 UNIX. */
155 # define pthread_create __pthread_create
156 # define pthread_join __pthread_join
157 # define pthread_detach __pthread_detach
158 # endif
159 #endif
160
161 void GC_thr_init();
162
163 static GC_bool parallel_initialized = FALSE;
164
165 void GC_init_parallel();
166
167 # if defined(THREAD_LOCAL_ALLOC) && !defined(DBG_HDRS_ALL)
168
169 /* We don't really support thread-local allocation with DBG_HDRS_ALL */
170
171 #ifdef USE_COMPILER_TLS
172 __thread
173 #endif
174 GC_key_t GC_thread_key;
175
176 static GC_bool keys_initialized;
177
178 /* Recover the contents of the freelist array fl into the global one gfl.*/
179 /* Note that the indexing scheme differs, in that gfl has finer size */
180 /* resolution, even if not all entries are used. */
181 /* We hold the allocator lock. */
182 static void return_freelists(ptr_t *fl, ptr_t *gfl)
183 {
184 int i;
185 ptr_t q, *qptr;
186 size_t nwords;
187
188 for (i = 1; i < NFREELISTS; ++i) {
189 nwords = i * (GRANULARITY/sizeof(word));
190 qptr = fl + i;
191 q = *qptr;
192 if ((word)q >= HBLKSIZE) {
193 if (gfl[nwords] == 0) {
194 gfl[nwords] = q;
195 } else {
196 /* Concatenate: */
197 for (; (word)q >= HBLKSIZE; qptr = &(obj_link(q)), q = *qptr);
198 GC_ASSERT(0 == q);
199 *qptr = gfl[nwords];
200 gfl[nwords] = fl[i];
201 }
202 }
203 /* Clear fl[i], since the thread structure may hang around. */
204 /* Do it in a way that is likely to trap if we access it. */
205 fl[i] = (ptr_t)HBLKSIZE;
206 }
207 }
208
209 /* We statically allocate a single "size 0" object. It is linked to */
210 /* itself, and is thus repeatedly reused for all size 0 allocation */
211 /* requests. (Size 0 gcj allocation requests are incorrect, and */
212 /* we arrange for those to fault asap.) */
213 static ptr_t size_zero_object = (ptr_t)(&size_zero_object);
214
215 /* Each thread structure must be initialized. */
216 /* This call must be made from the new thread. */
217 /* Caller holds allocation lock. */
218 void GC_init_thread_local(GC_thread p)
219 {
220 int i;
221
222 if (!keys_initialized) {
223 if (0 != GC_key_create(&GC_thread_key, 0)) {
224 ABORT("Failed to create key for local allocator");
225 }
226 keys_initialized = TRUE;
227 }
228 if (0 != GC_setspecific(GC_thread_key, p)) {
229 ABORT("Failed to set thread specific allocation pointers");
230 }
231 for (i = 1; i < NFREELISTS; ++i) {
232 p -> ptrfree_freelists[i] = (ptr_t)1;
233 p -> normal_freelists[i] = (ptr_t)1;
234 # ifdef GC_GCJ_SUPPORT
235 p -> gcj_freelists[i] = (ptr_t)1;
236 # endif
237 }
238 /* Set up the size 0 free lists. */
239 p -> ptrfree_freelists[0] = (ptr_t)(&size_zero_object);
240 p -> normal_freelists[0] = (ptr_t)(&size_zero_object);
241 # ifdef GC_GCJ_SUPPORT
242 p -> gcj_freelists[0] = (ptr_t)(-1);
243 # endif
244 }
245
246 #ifdef GC_GCJ_SUPPORT
247 extern ptr_t * GC_gcjobjfreelist;
248 #endif
249
250 /* We hold the allocator lock. */
251 void GC_destroy_thread_local(GC_thread p)
252 {
253 /* We currently only do this from the thread itself or from */
254 /* the fork handler for a child process. */
255 # ifndef HANDLE_FORK
256 GC_ASSERT(GC_getspecific(GC_thread_key) == (void *)p);
257 # endif
258 return_freelists(p -> ptrfree_freelists, GC_aobjfreelist);
259 return_freelists(p -> normal_freelists, GC_objfreelist);
260 # ifdef GC_GCJ_SUPPORT
261 return_freelists(p -> gcj_freelists, GC_gcjobjfreelist);
262 # endif
263 }
264
265 extern GC_PTR GC_generic_malloc_many();
266
267 GC_PTR GC_local_malloc(size_t bytes)
268 {
269 if (EXPECT(!SMALL_ENOUGH(bytes),0)) {
270 return(GC_malloc(bytes));
271 } else {
272 int index = INDEX_FROM_BYTES(bytes);
273 ptr_t * my_fl;
274 ptr_t my_entry;
275 # if defined(REDIRECT_MALLOC) && !defined(USE_PTHREAD_SPECIFIC)
276 GC_key_t k = GC_thread_key;
277 # endif
278 void * tsd;
279
280 # if defined(REDIRECT_MALLOC) && !defined(USE_PTHREAD_SPECIFIC)
281 if (EXPECT(0 == k, 0)) {
282 /* This can happen if we get called when the world is */
283 /* being initialized. Whether we can actually complete */
284 /* the initialization then is unclear. */
285 GC_init_parallel();
286 k = GC_thread_key;
287 }
288 # endif
289 tsd = GC_getspecific(GC_thread_key);
290 # ifdef GC_ASSERTIONS
291 LOCK();
292 GC_ASSERT(tsd == (void *)GC_lookup_thread(pthread_self()));
293 UNLOCK();
294 # endif
295 my_fl = ((GC_thread)tsd) -> normal_freelists + index;
296 my_entry = *my_fl;
297 if (EXPECT((word)my_entry >= HBLKSIZE, 1)) {
298 ptr_t next = obj_link(my_entry);
299 GC_PTR result = (GC_PTR)my_entry;
300 *my_fl = next;
301 obj_link(my_entry) = 0;
302 PREFETCH_FOR_WRITE(next);
303 return result;
304 } else if ((word)my_entry - 1 < DIRECT_GRANULES) {
305 *my_fl = my_entry + index + 1;
306 return GC_malloc(bytes);
307 } else {
308 GC_generic_malloc_many(BYTES_FROM_INDEX(index), NORMAL, my_fl);
309 if (*my_fl == 0) return GC_oom_fn(bytes);
310 return GC_local_malloc(bytes);
311 }
312 }
313 }
314
315 GC_PTR GC_local_malloc_atomic(size_t bytes)
316 {
317 if (EXPECT(!SMALL_ENOUGH(bytes), 0)) {
318 return(GC_malloc_atomic(bytes));
319 } else {
320 int index = INDEX_FROM_BYTES(bytes);
321 ptr_t * my_fl = ((GC_thread)GC_getspecific(GC_thread_key))
322 -> ptrfree_freelists + index;
323 ptr_t my_entry = *my_fl;
324
325 if (EXPECT((word)my_entry >= HBLKSIZE, 1)) {
326 GC_PTR result = (GC_PTR)my_entry;
327 *my_fl = obj_link(my_entry);
328 return result;
329 } else if ((word)my_entry - 1 < DIRECT_GRANULES) {
330 *my_fl = my_entry + index + 1;
331 return GC_malloc_atomic(bytes);
332 } else {
333 GC_generic_malloc_many(BYTES_FROM_INDEX(index), PTRFREE, my_fl);
334 /* *my_fl is updated while the collector is excluded; */
335 /* the free list is always visible to the collector as */
336 /* such. */
337 if (*my_fl == 0) return GC_oom_fn(bytes);
338 return GC_local_malloc_atomic(bytes);
339 }
340 }
341 }
342
343 #ifdef GC_GCJ_SUPPORT
344
345 #include "include/gc_gcj.h"
346
347 #ifdef GC_ASSERTIONS
348 extern GC_bool GC_gcj_malloc_initialized;
349 #endif
350
351 extern int GC_gcj_kind;
352
353 GC_PTR GC_local_gcj_malloc(size_t bytes,
354 void * ptr_to_struct_containing_descr)
355 {
356 GC_ASSERT(GC_gcj_malloc_initialized);
357 if (EXPECT(!SMALL_ENOUGH(bytes), 0)) {
358 return GC_gcj_malloc(bytes, ptr_to_struct_containing_descr);
359 } else {
360 int index = INDEX_FROM_BYTES(bytes);
361 ptr_t * my_fl = ((GC_thread)GC_getspecific(GC_thread_key))
362 -> gcj_freelists + index;
363 ptr_t my_entry = *my_fl;
364 if (EXPECT((word)my_entry >= HBLKSIZE, 1)) {
365 GC_PTR result = (GC_PTR)my_entry;
366 GC_ASSERT(!GC_incremental);
367 /* We assert that any concurrent marker will stop us. */
368 /* Thus it is impossible for a mark procedure to see the */
369 /* allocation of the next object, but to see this object */
370 /* still containing a free list pointer. Otherwise the */
371 /* marker might find a random "mark descriptor". */
372 *(volatile ptr_t *)my_fl = obj_link(my_entry);
373 /* We must update the freelist before we store the pointer. */
374 /* Otherwise a GC at this point would see a corrupted */
375 /* free list. */
376 /* A memory barrier is probably never needed, since the */
377 /* action of stopping this thread will cause prior writes */
378 /* to complete. */
379 GC_ASSERT(((void * volatile *)result)[1] == 0);
380 *(void * volatile *)result = ptr_to_struct_containing_descr;
381 return result;
382 } else if ((word)my_entry - 1 < DIRECT_GRANULES) {
383 if (!GC_incremental) *my_fl = my_entry + index + 1;
384 /* In the incremental case, we always have to take this */
385 /* path. Thus we leave the counter alone. */
386 return GC_gcj_malloc(bytes, ptr_to_struct_containing_descr);
387 } else {
388 GC_generic_malloc_many(BYTES_FROM_INDEX(index), GC_gcj_kind, my_fl);
389 if (*my_fl == 0) return GC_oom_fn(bytes);
390 return GC_local_gcj_malloc(bytes, ptr_to_struct_containing_descr);
391 }
392 }
393 }
394
395 #endif /* GC_GCJ_SUPPORT */
396
397 # else /* !THREAD_LOCAL_ALLOC && !DBG_HDRS_ALL */
398
399 # define GC_destroy_thread_local(t)
400
401 # endif /* !THREAD_LOCAL_ALLOC */
402
403 #if 0
404 /*
405 To make sure that we're using LinuxThreads and not some other thread
406 package, we generate a dummy reference to `pthread_kill_other_threads_np'
407 (was `__pthread_initial_thread_bos' but that disappeared),
408 which is a symbol defined in LinuxThreads, but (hopefully) not in other
409 thread packages.
410
411 We no longer do this, since this code is now portable enough that it might
412 actually work for something else.
413 */
414 void (*dummy_var_to_force_linux_threads)() = pthread_kill_other_threads_np;
415 #endif /* 0 */
416
417 long GC_nprocs = 1; /* Number of processors. We may not have */
418 /* access to all of them, but this is as good */
419 /* a guess as any ... */
420
421 #ifdef PARALLEL_MARK
422
423 # ifndef MAX_MARKERS
424 # define MAX_MARKERS 16
425 # endif
426
427 static ptr_t marker_sp[MAX_MARKERS] = {0};
428
429 void * GC_mark_thread(void * id)
430 {
431 word my_mark_no = 0;
432
433 marker_sp[(word)id] = GC_approx_sp();
434 for (;; ++my_mark_no) {
435 /* GC_mark_no is passed only to allow GC_help_marker to terminate */
436 /* promptly. This is important if it were called from the signal */
437 /* handler or from the GC lock acquisition code. Under Linux, it's */
438 /* not safe to call it from a signal handler, since it uses mutexes */
439 /* and condition variables. Since it is called only here, the */
440 /* argument is unnecessary. */
441 if (my_mark_no < GC_mark_no || my_mark_no > GC_mark_no + 2) {
442 /* resynchronize if we get far off, e.g. because GC_mark_no */
443 /* wrapped. */
444 my_mark_no = GC_mark_no;
445 }
446 # ifdef DEBUG_THREADS
447 GC_printf1("Starting mark helper for mark number %ld\n", my_mark_no);
448 # endif
449 GC_help_marker(my_mark_no);
450 }
451 }
452
453 extern long GC_markers; /* Number of mark threads we would */
454 /* like to have. Includes the */
455 /* initiating thread. */
456
457 pthread_t GC_mark_threads[MAX_MARKERS];
458
459 #define PTHREAD_CREATE REAL_FUNC(pthread_create)
460
461 static void start_mark_threads()
462 {
463 unsigned i;
464 pthread_attr_t attr;
465
466 if (GC_markers > MAX_MARKERS) {
467 WARN("Limiting number of mark threads\n", 0);
468 GC_markers = MAX_MARKERS;
469 }
470 if (0 != pthread_attr_init(&attr)) ABORT("pthread_attr_init failed");
471
472 if (0 != pthread_attr_setdetachstate(&attr, PTHREAD_CREATE_DETACHED))
473 ABORT("pthread_attr_setdetachstate failed");
474
475 # if defined(HPUX) || defined(GC_DGUX386_THREADS)
476 /* Default stack size is usually too small: fix it. */
477 /* Otherwise marker threads or GC may run out of */
478 /* space. */
479 # define MIN_STACK_SIZE (8*HBLKSIZE*sizeof(word))
480 {
481 size_t old_size;
482 int code;
483
484 if (pthread_attr_getstacksize(&attr, &old_size) != 0)
485 ABORT("pthread_attr_getstacksize failed\n");
486 if (old_size < MIN_STACK_SIZE) {
487 if (pthread_attr_setstacksize(&attr, MIN_STACK_SIZE) != 0)
488 ABORT("pthread_attr_setstacksize failed\n");
489 }
490 }
491 # endif /* HPUX || GC_DGUX386_THREADS */
492 # ifdef CONDPRINT
493 if (GC_print_stats) {
494 GC_printf1("Starting %ld marker threads\n", GC_markers - 1);
495 }
496 # endif
497 for (i = 0; i < GC_markers - 1; ++i) {
498 if (0 != PTHREAD_CREATE(GC_mark_threads + i, &attr,
499 GC_mark_thread, (void *)(word)i)) {
500 WARN("Marker thread creation failed, errno = %ld.\n", errno);
501 }
502 }
503 }
504
505 #else /* !PARALLEL_MARK */
506
507 static __inline__ void start_mark_threads()
508 {
509 }
510
511 #endif /* !PARALLEL_MARK */
512
513 GC_bool GC_thr_initialized = FALSE;
514
515 volatile GC_thread GC_threads[THREAD_TABLE_SZ];
516
517 void GC_push_thread_structures GC_PROTO((void))
518 {
519 GC_push_all((ptr_t)(GC_threads), (ptr_t)(GC_threads)+sizeof(GC_threads));
520 # if defined(THREAD_LOCAL_ALLOC) && !defined(DBG_HDRS_ALL)
521 GC_push_all((ptr_t)(&GC_thread_key),
522 (ptr_t)(&GC_thread_key)+sizeof(&GC_thread_key));
523 # endif
524 }
525
526 #ifdef THREAD_LOCAL_ALLOC
527 /* We must explicitly mark ptrfree and gcj free lists, since the free */
528 /* list links wouldn't otherwise be found. We also set them in the */
529 /* normal free lists, since that involves touching less memory than if */
530 /* we scanned them normally. */
531 void GC_mark_thread_local_free_lists(void)
532 {
533 int i, j;
534 GC_thread p;
535 ptr_t q;
536
537 for (i = 0; i < THREAD_TABLE_SZ; ++i) {
538 for (p = GC_threads[i]; 0 != p; p = p -> next) {
539 for (j = 1; j < NFREELISTS; ++j) {
540 q = p -> ptrfree_freelists[j];
541 if ((word)q > HBLKSIZE) GC_set_fl_marks(q);
542 q = p -> normal_freelists[j];
543 if ((word)q > HBLKSIZE) GC_set_fl_marks(q);
544 # ifdef GC_GCJ_SUPPORT
545 q = p -> gcj_freelists[j];
546 if ((word)q > HBLKSIZE) GC_set_fl_marks(q);
547 # endif /* GC_GCJ_SUPPORT */
548 }
549 }
550 }
551 }
552 #endif /* THREAD_LOCAL_ALLOC */
553
554 static struct GC_Thread_Rep first_thread;
555
556 /* Add a thread to GC_threads. We assume it wasn't already there. */
557 /* Caller holds allocation lock. */
558 GC_thread GC_new_thread(pthread_t id)
559 {
560 int hv = ((word)id) % THREAD_TABLE_SZ;
561 GC_thread result;
562 static GC_bool first_thread_used = FALSE;
563
564 if (!first_thread_used) {
565 result = &first_thread;
566 first_thread_used = TRUE;
567 } else {
568 result = (struct GC_Thread_Rep *)
569 GC_INTERNAL_MALLOC(sizeof(struct GC_Thread_Rep), NORMAL);
570 }
571 if (result == 0) return(0);
572 result -> id = id;
573 result -> next = GC_threads[hv];
574 GC_threads[hv] = result;
575 GC_ASSERT(result -> flags == 0 && result -> thread_blocked == 0);
576 return(result);
577 }
578
579 /* Delete a thread from GC_threads. We assume it is there. */
580 /* (The code intentionally traps if it wasn't.) */
581 /* Caller holds allocation lock. */
582 void GC_delete_thread(pthread_t id)
583 {
584 int hv = ((word)id) % THREAD_TABLE_SZ;
585 register GC_thread p = GC_threads[hv];
586 register GC_thread prev = 0;
587
588 while (!pthread_equal(p -> id, id)) {
589 prev = p;
590 p = p -> next;
591 }
592 if (prev == 0) {
593 GC_threads[hv] = p -> next;
594 } else {
595 prev -> next = p -> next;
596 }
597 GC_INTERNAL_FREE(p);
598 }
599
600 /* If a thread has been joined, but we have not yet */
601 /* been notified, then there may be more than one thread */
602 /* in the table with the same pthread id. */
603 /* This is OK, but we need a way to delete a specific one. */
604 void GC_delete_gc_thread(pthread_t id, GC_thread gc_id)
605 {
606 int hv = ((word)id) % THREAD_TABLE_SZ;
607 register GC_thread p = GC_threads[hv];
608 register GC_thread prev = 0;
609
610 while (p != gc_id) {
611 prev = p;
612 p = p -> next;
613 }
614 if (prev == 0) {
615 GC_threads[hv] = p -> next;
616 } else {
617 prev -> next = p -> next;
618 }
619 GC_INTERNAL_FREE(p);
620 }
621
622 /* Return a GC_thread corresponding to a given pthread_t. */
623 /* Returns 0 if it's not there. */
624 /* Caller holds allocation lock or otherwise inhibits */
625 /* updates. */
626 /* If there is more than one thread with the given id we */
627 /* return the most recent one. */
628 GC_thread GC_lookup_thread(pthread_t id)
629 {
630 int hv = ((word)id) % THREAD_TABLE_SZ;
631 register GC_thread p = GC_threads[hv];
632
633 while (p != 0 && !pthread_equal(p -> id, id)) p = p -> next;
634 return(p);
635 }
636
637 #ifdef HANDLE_FORK
638 /* Remove all entries from the GC_threads table, except the */
639 /* one for the current thread. We need to do this in the child */
640 /* process after a fork(), since only the current thread */
641 /* survives in the child. */
642 void GC_remove_all_threads_but_me(void)
643 {
644 pthread_t self = pthread_self();
645 int hv;
646 GC_thread p, next, me;
647
648 for (hv = 0; hv < THREAD_TABLE_SZ; ++hv) {
649 me = 0;
650 for (p = GC_threads[hv]; 0 != p; p = next) {
651 next = p -> next;
652 if (p -> id == self) {
653 me = p;
654 p -> next = 0;
655 } else {
656 # ifdef THREAD_LOCAL_ALLOC
657 if (!(p -> flags & FINISHED)) {
658 GC_destroy_thread_local(p);
659 }
660 # endif /* THREAD_LOCAL_ALLOC */
661 if (p != &first_thread) GC_INTERNAL_FREE(p);
662 }
663 }
664 GC_threads[hv] = me;
665 }
666 }
667 #endif /* HANDLE_FORK */
668
669 #ifdef USE_PROC_FOR_LIBRARIES
670 int GC_segment_is_thread_stack(ptr_t lo, ptr_t hi)
671 {
672 int i;
673 GC_thread p;
674
675 # ifdef PARALLEL_MARK
676 for (i = 0; i < GC_markers; ++i) {
677 if (marker_sp[i] > lo & marker_sp[i] < hi) return 1;
678 }
679 # endif
680 for (i = 0; i < THREAD_TABLE_SZ; i++) {
681 for (p = GC_threads[i]; p != 0; p = p -> next) {
682 if (0 != p -> stack_end) {
683 # ifdef STACK_GROWS_UP
684 if (p -> stack_end >= lo && p -> stack_end < hi) return 1;
685 # else /* STACK_GROWS_DOWN */
686 if (p -> stack_end > lo && p -> stack_end <= hi) return 1;
687 # endif
688 }
689 }
690 }
691 return 0;
692 }
693 #endif /* USE_PROC_FOR_LIBRARIES */
694
695 #ifdef GC_LINUX_THREADS
696 /* Return the number of processors, or i<= 0 if it can't be determined. */
697 int GC_get_nprocs()
698 {
699 /* Should be "return sysconf(_SC_NPROCESSORS_ONLN);" but that */
700 /* appears to be buggy in many cases. */
701 /* We look for lines "cpu<n>" in /proc/stat. */
702 # define STAT_BUF_SIZE 4096
703 # define STAT_READ read
704 /* If read is wrapped, this may need to be redefined to call */
705 /* the real one. */
706 char stat_buf[STAT_BUF_SIZE];
707 int f;
708 word result = 1;
709 /* Some old kernels only have a single "cpu nnnn ..." */
710 /* entry in /proc/stat. We identify those as */
711 /* uniprocessors. */
712 size_t i, len = 0;
713
714 f = open("/proc/stat", O_RDONLY);
715 if (f < 0 || (len = STAT_READ(f, stat_buf, STAT_BUF_SIZE)) < 100) {
716 WARN("Couldn't read /proc/stat\n", 0);
717 return -1;
718 }
719 for (i = 0; i < len - 100; ++i) {
720 if (stat_buf[i] == '\n' && stat_buf[i+1] == 'c'
721 && stat_buf[i+2] == 'p' && stat_buf[i+3] == 'u') {
722 int cpu_no = atoi(stat_buf + i + 4);
723 if (cpu_no >= result) result = cpu_no + 1;
724 }
725 }
726 close(f);
727 return result;
728 }
729 #endif /* GC_LINUX_THREADS */
730
731 /* We hold the GC lock. Wait until an in-progress GC has finished. */
732 /* Repeatedly RELEASES GC LOCK in order to wait. */
733 /* If wait_for_all is true, then we exit with the GC lock held and no */
734 /* collection in progress; otherwise we just wait for the current GC */
735 /* to finish. */
736 extern GC_bool GC_collection_in_progress();
737 void GC_wait_for_gc_completion(GC_bool wait_for_all)
738 {
739 if (GC_incremental && GC_collection_in_progress()) {
740 int old_gc_no = GC_gc_no;
741
742 /* Make sure that no part of our stack is still on the mark stack, */
743 /* since it's about to be unmapped. */
744 while (GC_incremental && GC_collection_in_progress()
745 && (wait_for_all || old_gc_no == GC_gc_no)) {
746 ENTER_GC();
747 GC_in_thread_creation = TRUE;
748 GC_collect_a_little_inner(1);
749 GC_in_thread_creation = FALSE;
750 EXIT_GC();
751 UNLOCK();
752 sched_yield();
753 LOCK();
754 }
755 }
756 }
757
758 #ifdef HANDLE_FORK
759 /* Procedures called before and after a fork. The goal here is to make */
760 /* it safe to call GC_malloc() in a forked child. It's unclear that is */
761 /* attainable, since the single UNIX spec seems to imply that one */
762 /* should only call async-signal-safe functions, and we probably can't */
763 /* quite guarantee that. But we give it our best shot. (That same */
764 /* spec also implies that it's not safe to call the system malloc */
765 /* between fork() and exec(). Thus we're doing no worse than it. */
766
767 /* Called before a fork() */
768 void GC_fork_prepare_proc(void)
769 {
770 /* Acquire all relevant locks, so that after releasing the locks */
771 /* the child will see a consistent state in which monitor */
772 /* invariants hold. Unfortunately, we can't acquire libc locks */
773 /* we might need, and there seems to be no guarantee that libc */
774 /* must install a suitable fork handler. */
775 /* Wait for an ongoing GC to finish, since we can't finish it in */
776 /* the (one remaining thread in) the child. */
777 LOCK();
778 # if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC)
779 GC_wait_for_reclaim();
780 # endif
781 GC_wait_for_gc_completion(TRUE);
782 # if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC)
783 GC_acquire_mark_lock();
784 # endif
785 }
786
787 /* Called in parent after a fork() */
788 void GC_fork_parent_proc(void)
789 {
790 # if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC)
791 GC_release_mark_lock();
792 # endif
793 UNLOCK();
794 }
795
796 /* Called in child after a fork() */
797 void GC_fork_child_proc(void)
798 {
799 /* Clean up the thread table, so that just our thread is left. */
800 # if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC)
801 GC_release_mark_lock();
802 # endif
803 GC_remove_all_threads_but_me();
804 # ifdef PARALLEL_MARK
805 /* Turn off parallel marking in the child, since we are probably */
806 /* just going to exec, and we would have to restart mark threads. */
807 GC_markers = 1;
808 GC_parallel = FALSE;
809 # endif /* PARALLEL_MARK */
810 UNLOCK();
811 }
812 #endif /* HANDLE_FORK */
813
814 #if defined(GC_DGUX386_THREADS)
815 /* Return the number of processors, or i<= 0 if it can't be determined. */
816 int GC_get_nprocs()
817 {
818 /* <takis@XFree86.Org> */
819 int numCpus;
820 struct dg_sys_info_pm_info pm_sysinfo;
821 int status =0;
822
823 status = dg_sys_info((long int *) &pm_sysinfo,
824 DG_SYS_INFO_PM_INFO_TYPE, DG_SYS_INFO_PM_CURRENT_VERSION);
825 if (status < 0)
826 /* set -1 for error */
827 numCpus = -1;
828 else
829 /* Active CPUs */
830 numCpus = pm_sysinfo.idle_vp_count;
831
832 # ifdef DEBUG_THREADS
833 GC_printf1("Number of active CPUs in this system: %d\n", numCpus);
834 # endif
835 return(numCpus);
836 }
837 #endif /* GC_DGUX386_THREADS */
838
839 /* We hold the allocation lock. */
840 void GC_thr_init()
841 {
842 # ifndef GC_DARWIN_THREADS
843 int dummy;
844 # endif
845 GC_thread t;
846
847 if (GC_thr_initialized) return;
848 GC_thr_initialized = TRUE;
849
850 # ifdef HANDLE_FORK
851 /* Prepare for a possible fork. */
852 pthread_atfork(GC_fork_prepare_proc, GC_fork_parent_proc,
853 GC_fork_child_proc);
854 # endif /* HANDLE_FORK */
855 /* Add the initial thread, so we can stop it. */
856 t = GC_new_thread(pthread_self());
857 # ifdef GC_DARWIN_THREADS
858 t -> stop_info.mach_thread = mach_thread_self();
859 # else
860 t -> stop_info.stack_ptr = (ptr_t)(&dummy);
861 # endif
862 t -> flags = DETACHED | MAIN_THREAD;
863
864 GC_stop_init();
865
866 /* Set GC_nprocs. */
867 {
868 char * nprocs_string = GETENV("GC_NPROCS");
869 GC_nprocs = -1;
870 if (nprocs_string != NULL) GC_nprocs = atoi(nprocs_string);
871 }
872 if (GC_nprocs <= 0) {
873 # if defined(GC_HPUX_THREADS)
874 GC_nprocs = pthread_num_processors_np();
875 # endif
876 # if defined(GC_OSF1_THREADS) || defined(GC_AIX_THREADS)
877 GC_nprocs = sysconf(_SC_NPROCESSORS_ONLN);
878 if (GC_nprocs <= 0) GC_nprocs = 1;
879 # endif
880 # if defined(GC_FREEBSD_THREADS) || defined(GC_IRIX_THREADS)
881 /* FIXME: For Irix, that's a ridiculous assumption. */
882 GC_nprocs = 1;
883 # endif
884 # if defined(GC_DARWIN_THREADS)
885 int ncpus = 1;
886 size_t len = sizeof(ncpus);
887 sysctl((int[2]) {CTL_HW, HW_NCPU}, 2, &ncpus, &len, NULL, 0);
888 GC_nprocs = ncpus;
889 # endif
890 # if defined(GC_LINUX_THREADS) || defined(GC_DGUX386_THREADS)
891 GC_nprocs = GC_get_nprocs();
892 # endif
893 }
894 if (GC_nprocs <= 0) {
895 WARN("GC_get_nprocs() returned %ld\n", GC_nprocs);
896 GC_nprocs = 2;
897 # ifdef PARALLEL_MARK
898 GC_markers = 1;
899 # endif
900 } else {
901 # ifdef PARALLEL_MARK
902 {
903 char * markers_string = GETENV("GC_MARKERS");
904 if (markers_string != NULL) {
905 GC_markers = atoi(markers_string);
906 } else {
907 GC_markers = GC_nprocs;
908 }
909 }
910 # endif
911 }
912 # ifdef PARALLEL_MARK
913 # ifdef CONDPRINT
914 if (GC_print_stats) {
915 GC_printf2("Number of processors = %ld, "
916 "number of marker threads = %ld\n", GC_nprocs, GC_markers);
917 }
918 # endif
919 if (GC_markers == 1) {
920 GC_parallel = FALSE;
921 # ifdef CONDPRINT
922 if (GC_print_stats) {
923 GC_printf0("Single marker thread, turning off parallel marking\n");
924 }
925 # endif
926 } else {
927 GC_parallel = TRUE;
928 /* Disable true incremental collection, but generational is OK. */
929 GC_time_limit = GC_TIME_UNLIMITED;
930 }
931 /* If we are using a parallel marker, actually start helper threads. */
932 if (GC_parallel) start_mark_threads();
933 # endif
934 }
935
936
937 /* Perform all initializations, including those that */
938 /* may require allocation. */
939 /* Called without allocation lock. */
940 /* Must be called before a second thread is created. */
941 /* Called without allocation lock. */
942 void GC_init_parallel()
943 {
944 if (parallel_initialized) return;
945 parallel_initialized = TRUE;
946
947 /* GC_init() calls us back, so set flag first. */
948 if (!GC_is_initialized) GC_init();
949 /* Initialize thread local free lists if used. */
950 # if defined(THREAD_LOCAL_ALLOC) && !defined(DBG_HDRS_ALL)
951 LOCK();
952 GC_init_thread_local(GC_lookup_thread(pthread_self()));
953 UNLOCK();
954 # endif
955 }
956
957
958 #if !defined(GC_DARWIN_THREADS)
959 int WRAP_FUNC(pthread_sigmask)(int how, const sigset_t *set, sigset_t *oset)
960 {
961 sigset_t fudged_set;
962
963 if (set != NULL && (how == SIG_BLOCK || how == SIG_SETMASK)) {
964 fudged_set = *set;
965 sigdelset(&fudged_set, SIG_SUSPEND);
966 set = &fudged_set;
967 }
968 return(REAL_FUNC(pthread_sigmask)(how, set, oset));
969 }
970 #endif /* !GC_DARWIN_THREADS */
971
972 /* Wrappers for functions that are likely to block for an appreciable */
973 /* length of time. Must be called in pairs, if at all. */
974 /* Nothing much beyond the system call itself should be executed */
975 /* between these. */
976
977 void GC_start_blocking(void) {
978 # define SP_SLOP 128
979 GC_thread me;
980 LOCK();
981 me = GC_lookup_thread(pthread_self());
982 GC_ASSERT(!(me -> thread_blocked));
983 # ifdef SPARC
984 me -> stop_info.stack_ptr = (ptr_t)GC_save_regs_in_stack();
985 # else
986 # ifndef GC_DARWIN_THREADS
987 me -> stop_info.stack_ptr = (ptr_t)GC_approx_sp();
988 # endif
989 # endif
990 # ifdef IA64
991 me -> backing_store_ptr = (ptr_t)GC_save_regs_in_stack() + SP_SLOP;
992 # endif
993 /* Add some slop to the stack pointer, since the wrapped call may */
994 /* end up pushing more callee-save registers. */
995 # ifndef GC_DARWIN_THREADS
996 # ifdef STACK_GROWS_UP
997 me -> stop_info.stack_ptr += SP_SLOP;
998 # else
999 me -> stop_info.stack_ptr -= SP_SLOP;
1000 # endif
1001 # endif
1002 me -> thread_blocked = TRUE;
1003 UNLOCK();
1004 }
1005
1006 void GC_end_blocking(void) {
1007 GC_thread me;
1008 LOCK(); /* This will block if the world is stopped. */
1009 me = GC_lookup_thread(pthread_self());
1010 GC_ASSERT(me -> thread_blocked);
1011 me -> thread_blocked = FALSE;
1012 UNLOCK();
1013 }
1014
1015 #if defined(GC_DGUX386_THREADS)
1016 #define __d10_sleep sleep
1017 #endif /* GC_DGUX386_THREADS */
1018
1019 /* A wrapper for the standard C sleep function */
1020 int WRAP_FUNC(sleep) (unsigned int seconds)
1021 {
1022 int result;
1023
1024 GC_start_blocking();
1025 result = REAL_FUNC(sleep)(seconds);
1026 GC_end_blocking();
1027 return result;
1028 }
1029
1030 struct start_info {
1031 void *(*start_routine)(void *);
1032 void *arg;
1033 word flags;
1034 sem_t registered; /* 1 ==> in our thread table, but */
1035 /* parent hasn't yet noticed. */
1036 };
1037
1038 /* Called at thread exit. */
1039 /* Never called for main thread. That's OK, since it */
1040 /* results in at most a tiny one-time leak. And */
1041 /* linuxthreads doesn't reclaim the main threads */
1042 /* resources or id anyway. */
1043 void GC_thread_exit_proc(void *arg)
1044 {
1045 GC_thread me;
1046
1047 LOCK();
1048 me = GC_lookup_thread(pthread_self());
1049 GC_destroy_thread_local(me);
1050 if (me -> flags & DETACHED) {
1051 GC_delete_thread(pthread_self());
1052 } else {
1053 me -> flags |= FINISHED;
1054 }
1055 # if defined(THREAD_LOCAL_ALLOC) && !defined(USE_PTHREAD_SPECIFIC) \
1056 && !defined(USE_COMPILER_TLS) && !defined(DBG_HDRS_ALL)
1057 GC_remove_specific(GC_thread_key);
1058 # endif
1059 /* The following may run the GC from "nonexistent" thread. */
1060 GC_wait_for_gc_completion(FALSE);
1061 UNLOCK();
1062 }
1063
1064 int WRAP_FUNC(pthread_join)(pthread_t thread, void **retval)
1065 {
1066 int result;
1067 GC_thread thread_gc_id;
1068
1069 LOCK();
1070 thread_gc_id = GC_lookup_thread(thread);
1071 /* This is guaranteed to be the intended one, since the thread id */
1072 /* cant have been recycled by pthreads. */
1073 UNLOCK();
1074 result = REAL_FUNC(pthread_join)(thread, retval);
1075 # if defined (GC_FREEBSD_THREADS)
1076 /* On FreeBSD, the wrapped pthread_join() sometimes returns (what
1077 appears to be) a spurious EINTR which caused the test and real code
1078 to gratuitously fail. Having looked at system pthread library source
1079 code, I see how this return code may be generated. In one path of
1080 code, pthread_join() just returns the errno setting of the thread
1081 being joined. This does not match the POSIX specification or the
1082 local man pages thus I have taken the liberty to catch this one
1083 spurious return value properly conditionalized on GC_FREEBSD_THREADS. */
1084 if (result == EINTR) result = 0;
1085 # endif
1086 if (result == 0) {
1087 LOCK();
1088 /* Here the pthread thread id may have been recycled. */
1089 GC_delete_gc_thread(thread, thread_gc_id);
1090 UNLOCK();
1091 }
1092 return result;
1093 }
1094
1095 int
1096 WRAP_FUNC(pthread_detach)(pthread_t thread)
1097 {
1098 int result;
1099 GC_thread thread_gc_id;
1100
1101 LOCK();
1102 thread_gc_id = GC_lookup_thread(thread);
1103 UNLOCK();
1104 result = REAL_FUNC(pthread_detach)(thread);
1105 if (result == 0) {
1106 LOCK();
1107 thread_gc_id -> flags |= DETACHED;
1108 /* Here the pthread thread id may have been recycled. */
1109 if (thread_gc_id -> flags & FINISHED) {
1110 GC_delete_gc_thread(thread, thread_gc_id);
1111 }
1112 UNLOCK();
1113 }
1114 return result;
1115 }
1116
1117 GC_bool GC_in_thread_creation = FALSE;
1118
1119 void * GC_start_routine(void * arg)
1120 {
1121 int dummy;
1122 struct start_info * si = arg;
1123 void * result;
1124 GC_thread me;
1125 pthread_t my_pthread;
1126 void *(*start)(void *);
1127 void *start_arg;
1128
1129 my_pthread = pthread_self();
1130 # ifdef DEBUG_THREADS
1131 GC_printf1("Starting thread 0x%lx\n", my_pthread);
1132 GC_printf1("pid = %ld\n", (long) getpid());
1133 GC_printf1("sp = 0x%lx\n", (long) &arg);
1134 # endif
1135 LOCK();
1136 GC_in_thread_creation = TRUE;
1137 me = GC_new_thread(my_pthread);
1138 GC_in_thread_creation = FALSE;
1139 #ifdef GC_DARWIN_THREADS
1140 me -> stop_info.mach_thread = mach_thread_self();
1141 #else
1142 me -> stop_info.stack_ptr = 0;
1143 #endif
1144 me -> flags = si -> flags;
1145 /* me -> stack_end = GC_linux_stack_base(); -- currently (11/99) */
1146 /* doesn't work because the stack base in /proc/self/stat is the */
1147 /* one for the main thread. There is a strong argument that that's */
1148 /* a kernel bug, but a pervasive one. */
1149 # ifdef STACK_GROWS_DOWN
1150 me -> stack_end = (ptr_t)(((word)(&dummy) + (GC_page_size - 1))
1151 & ~(GC_page_size - 1));
1152 # ifndef GC_DARWIN_THREADS
1153 me -> stop_info.stack_ptr = me -> stack_end - 0x10;
1154 # endif
1155 /* Needs to be plausible, since an asynchronous stack mark */
1156 /* should not crash. */
1157 # else
1158 me -> stack_end = (ptr_t)((word)(&dummy) & ~(GC_page_size - 1));
1159 me -> stop_info.stack_ptr = me -> stack_end + 0x10;
1160 # endif
1161 /* This is dubious, since we may be more than a page into the stack, */
1162 /* and hence skip some of it, though it's not clear that matters. */
1163 # ifdef IA64
1164 me -> backing_store_end = (ptr_t)
1165 (GC_save_regs_in_stack() & ~(GC_page_size - 1));
1166 /* This is also < 100% convincing. We should also read this */
1167 /* from /proc, but the hook to do so isn't there yet. */
1168 # endif /* IA64 */
1169 UNLOCK();
1170 start = si -> start_routine;
1171 # ifdef DEBUG_THREADS
1172 GC_printf1("start_routine = 0x%lx\n", start);
1173 # endif
1174 start_arg = si -> arg;
1175 sem_post(&(si -> registered)); /* Last action on si. */
1176 /* OK to deallocate. */
1177 pthread_cleanup_push(GC_thread_exit_proc, 0);
1178 # if defined(THREAD_LOCAL_ALLOC) && !defined(DBG_HDRS_ALL)
1179 LOCK();
1180 GC_init_thread_local(me);
1181 UNLOCK();
1182 # endif
1183 result = (*start)(start_arg);
1184 #if DEBUG_THREADS
1185 GC_printf1("Finishing thread 0x%x\n", pthread_self());
1186 #endif
1187 me -> status = result;
1188 pthread_cleanup_pop(1);
1189 /* Cleanup acquires lock, ensuring that we can't exit */
1190 /* while a collection that thinks we're alive is trying to stop */
1191 /* us. */
1192 return(result);
1193 }
1194
1195 int
1196 WRAP_FUNC(pthread_create)(pthread_t *new_thread,
1197 const pthread_attr_t *attr,
1198 void *(*start_routine)(void *), void *arg)
1199 {
1200 int result;
1201 int detachstate;
1202 word my_flags = 0;
1203 struct start_info * si;
1204 /* This is otherwise saved only in an area mmapped by the thread */
1205 /* library, which isn't visible to the collector. */
1206
1207 /* We resist the temptation to muck with the stack size here, */
1208 /* even if the default is unreasonably small. That's the client's */
1209 /* responsibility. */
1210
1211 LOCK();
1212 si = (struct start_info *)GC_INTERNAL_MALLOC(sizeof(struct start_info),
1213 NORMAL);
1214 UNLOCK();
1215 if (!parallel_initialized) GC_init_parallel();
1216 if (0 == si) return(ENOMEM);
1217 sem_init(&(si -> registered), 0, 0);
1218 si -> start_routine = start_routine;
1219 si -> arg = arg;
1220 LOCK();
1221 if (!GC_thr_initialized) GC_thr_init();
1222 # ifdef GC_ASSERTIONS
1223 {
1224 size_t stack_size;
1225 if (NULL == attr) {
1226 pthread_attr_t my_attr;
1227 pthread_attr_init(&my_attr);
1228 pthread_attr_getstacksize(&my_attr, &stack_size);
1229 } else {
1230 pthread_attr_getstacksize(attr, &stack_size);
1231 }
1232 # ifdef PARALLEL_MARK
1233 GC_ASSERT(stack_size >= (8*HBLKSIZE*sizeof(word)));
1234 # else
1235 /* FreeBSD-5.3/Alpha: default pthread stack is 64K, */
1236 /* HBLKSIZE=8192, sizeof(word)=8 */
1237 GC_ASSERT(stack_size >= 65536);
1238 # endif
1239 /* Our threads may need to do some work for the GC. */
1240 /* Ridiculously small threads won't work, and they */
1241 /* probably wouldn't work anyway. */
1242 }
1243 # endif
1244 if (NULL == attr) {
1245 detachstate = PTHREAD_CREATE_JOINABLE;
1246 } else {
1247 pthread_attr_getdetachstate(attr, &detachstate);
1248 }
1249 if (PTHREAD_CREATE_DETACHED == detachstate) my_flags |= DETACHED;
1250 si -> flags = my_flags;
1251 UNLOCK();
1252 # ifdef DEBUG_THREADS
1253 GC_printf1("About to start new thread from thread 0x%X\n",
1254 pthread_self());
1255 # endif
1256
1257 result = REAL_FUNC(pthread_create)(new_thread, attr, GC_start_routine, si);
1258
1259 # ifdef DEBUG_THREADS
1260 GC_printf1("Started thread 0x%X\n", *new_thread);
1261 # endif
1262 /* Wait until child has been added to the thread table. */
1263 /* This also ensures that we hold onto si until the child is done */
1264 /* with it. Thus it doesn't matter whether it is otherwise */
1265 /* visible to the collector. */
1266 if (0 == result) {
1267 while (0 != sem_wait(&(si -> registered))) {
1268 if (EINTR != errno) ABORT("sem_wait failed");
1269 }
1270 }
1271 sem_destroy(&(si -> registered));
1272 LOCK();
1273 GC_INTERNAL_FREE(si);
1274 UNLOCK();
1275
1276 return(result);
1277 }
1278
1279 #ifdef GENERIC_COMPARE_AND_SWAP
1280 pthread_mutex_t GC_compare_and_swap_lock = PTHREAD_MUTEX_INITIALIZER;
1281
1282 GC_bool GC_compare_and_exchange(volatile GC_word *addr,
1283 GC_word old, GC_word new_val)
1284 {
1285 GC_bool result;
1286 pthread_mutex_lock(&GC_compare_and_swap_lock);
1287 if (*addr == old) {
1288 *addr = new_val;
1289 result = TRUE;
1290 } else {
1291 result = FALSE;
1292 }
1293 pthread_mutex_unlock(&GC_compare_and_swap_lock);
1294 return result;
1295 }
1296
1297 GC_word GC_atomic_add(volatile GC_word *addr, GC_word how_much)
1298 {
1299 GC_word old;
1300 pthread_mutex_lock(&GC_compare_and_swap_lock);
1301 old = *addr;
1302 *addr = old + how_much;
1303 pthread_mutex_unlock(&GC_compare_and_swap_lock);
1304 return old;
1305 }
1306
1307 #endif /* GENERIC_COMPARE_AND_SWAP */
1308 /* Spend a few cycles in a way that can't introduce contention with */
1309 /* othre threads. */
1310 void GC_pause()
1311 {
1312 int i;
1313 # if !defined(__GNUC__) || defined(__INTEL_COMPILER)
1314 volatile word dummy = 0;
1315 # endif
1316
1317 for (i = 0; i < 10; ++i) {
1318 # if defined(__GNUC__) && !defined(__INTEL_COMPILER)
1319 __asm__ __volatile__ (" " : : : "memory");
1320 # else
1321 /* Something that's unlikely to be optimized away. */
1322 GC_noop(++dummy);
1323 # endif
1324 }
1325 }
1326
1327 #define SPIN_MAX 128 /* Maximum number of calls to GC_pause before */
1328 /* give up. */
1329
1330 VOLATILE GC_bool GC_collecting = 0;
1331 /* A hint that we're in the collector and */
1332 /* holding the allocation lock for an */
1333 /* extended period. */
1334
1335 #if !defined(USE_SPIN_LOCK) || defined(PARALLEL_MARK)
1336 /* If we don't want to use the below spinlock implementation, either */
1337 /* because we don't have a GC_test_and_set implementation, or because */
1338 /* we don't want to risk sleeping, we can still try spinning on */
1339 /* pthread_mutex_trylock for a while. This appears to be very */
1340 /* beneficial in many cases. */
1341 /* I suspect that under high contention this is nearly always better */
1342 /* than the spin lock. But it's a bit slower on a uniprocessor. */
1343 /* Hence we still default to the spin lock. */
1344 /* This is also used to acquire the mark lock for the parallel */
1345 /* marker. */
1346
1347 /* Here we use a strict exponential backoff scheme. I don't know */
1348 /* whether that's better or worse than the above. We eventually */
1349 /* yield by calling pthread_mutex_lock(); it never makes sense to */
1350 /* explicitly sleep. */
1351
1352 #define LOCK_STATS
1353 #ifdef LOCK_STATS
1354 unsigned long GC_spin_count = 0;
1355 unsigned long GC_block_count = 0;
1356 unsigned long GC_unlocked_count = 0;
1357 #endif
1358
1359 void GC_generic_lock(pthread_mutex_t * lock)
1360 {
1361 #ifndef NO_PTHREAD_TRYLOCK
1362 unsigned pause_length = 1;
1363 unsigned i;
1364
1365 if (0 == pthread_mutex_trylock(lock)) {
1366 # ifdef LOCK_STATS
1367 ++GC_unlocked_count;
1368 # endif
1369 return;
1370 }
1371 for (; pause_length <= SPIN_MAX; pause_length <<= 1) {
1372 for (i = 0; i < pause_length; ++i) {
1373 GC_pause();
1374 }
1375 switch(pthread_mutex_trylock(lock)) {
1376 case 0:
1377 # ifdef LOCK_STATS
1378 ++GC_spin_count;
1379 # endif
1380 return;
1381 case EBUSY:
1382 break;
1383 default:
1384 ABORT("Unexpected error from pthread_mutex_trylock");
1385 }
1386 }
1387 #endif /* !NO_PTHREAD_TRYLOCK */
1388 # ifdef LOCK_STATS
1389 ++GC_block_count;
1390 # endif
1391 pthread_mutex_lock(lock);
1392 }
1393
1394 #endif /* !USE_SPIN_LOCK || PARALLEL_MARK */
1395
1396 #if defined(USE_SPIN_LOCK)
1397
1398 /* Reasonably fast spin locks. Basically the same implementation */
1399 /* as STL alloc.h. This isn't really the right way to do this. */
1400 /* but until the POSIX scheduling mess gets straightened out ... */
1401
1402 volatile unsigned int GC_allocate_lock = 0;
1403
1404
1405 void GC_lock()
1406 {
1407 # define low_spin_max 30 /* spin cycles if we suspect uniprocessor */
1408 # define high_spin_max SPIN_MAX /* spin cycles for multiprocessor */
1409 static unsigned spin_max = low_spin_max;
1410 unsigned my_spin_max;
1411 static unsigned last_spins = 0;
1412 unsigned my_last_spins;
1413 int i;
1414
1415 if (!GC_test_and_set(&GC_allocate_lock)) {
1416 return;
1417 }
1418 my_spin_max = spin_max;
1419 my_last_spins = last_spins;
1420 for (i = 0; i < my_spin_max; i++) {
1421 if (GC_collecting || GC_nprocs == 1) goto yield;
1422 if (i < my_last_spins/2 || GC_allocate_lock) {
1423 GC_pause();
1424 continue;
1425 }
1426 if (!GC_test_and_set(&GC_allocate_lock)) {
1427 /*
1428 * got it!
1429 * Spinning worked. Thus we're probably not being scheduled
1430 * against the other process with which we were contending.
1431 * Thus it makes sense to spin longer the next time.
1432 */
1433 last_spins = i;
1434 spin_max = high_spin_max;
1435 return;
1436 }
1437 }
1438 /* We are probably being scheduled against the other process. Sleep. */
1439 spin_max = low_spin_max;
1440 yield:
1441 for (i = 0;; ++i) {
1442 if (!GC_test_and_set(&GC_allocate_lock)) {
1443 return;
1444 }
1445 # define SLEEP_THRESHOLD 12
1446 /* Under Linux very short sleeps tend to wait until */
1447 /* the current time quantum expires. On old Linux */
1448 /* kernels nanosleep(<= 2ms) just spins under Linux. */
1449 /* (Under 2.4, this happens only for real-time */
1450 /* processes.) We want to minimize both behaviors */
1451 /* here. */
1452 if (i < SLEEP_THRESHOLD) {
1453 sched_yield();
1454 } else {
1455 struct timespec ts;
1456
1457 if (i > 24) i = 24;
1458 /* Don't wait for more than about 15msecs, even */
1459 /* under extreme contention. */
1460 ts.tv_sec = 0;
1461 ts.tv_nsec = 1 << i;
1462 nanosleep(&ts, 0);
1463 }
1464 }
1465 }
1466
1467 #else /* !USE_SPINLOCK */
1468 void GC_lock()
1469 {
1470 #ifndef NO_PTHREAD_TRYLOCK
1471 if (1 == GC_nprocs || GC_collecting) {
1472 pthread_mutex_lock(&GC_allocate_ml);
1473 } else {
1474 GC_generic_lock(&GC_allocate_ml);
1475 }
1476 #else /* !NO_PTHREAD_TRYLOCK */
1477 pthread_mutex_lock(&GC_allocate_ml);
1478 #endif /* !NO_PTHREAD_TRYLOCK */
1479 }
1480
1481 #endif /* !USE_SPINLOCK */
1482
1483 #if defined(PARALLEL_MARK) || defined(THREAD_LOCAL_ALLOC)
1484
1485 #ifdef GC_ASSERTIONS
1486 pthread_t GC_mark_lock_holder = NO_THREAD;
1487 #endif
1488
1489 #if 0
1490 /* Ugly workaround for a linux threads bug in the final versions */
1491 /* of glibc2.1. Pthread_mutex_trylock sets the mutex owner */
1492 /* field even when it fails to acquire the mutex. This causes */
1493 /* pthread_cond_wait to die. Remove for glibc2.2. */
1494 /* According to the man page, we should use */
1495 /* PTHREAD_ERRORCHECK_MUTEX_INITIALIZER_NP, but that isn't actually */
1496 /* defined. */
1497 static pthread_mutex_t mark_mutex =
1498 {0, 0, 0, PTHREAD_MUTEX_ERRORCHECK_NP, {0, 0}};
1499 #else
1500 static pthread_mutex_t mark_mutex = PTHREAD_MUTEX_INITIALIZER;
1501 #endif
1502
1503 static pthread_cond_t builder_cv = PTHREAD_COND_INITIALIZER;
1504
1505 void GC_acquire_mark_lock()
1506 {
1507 /*
1508 if (pthread_mutex_lock(&mark_mutex) != 0) {
1509 ABORT("pthread_mutex_lock failed");
1510 }
1511 */
1512 GC_generic_lock(&mark_mutex);
1513 # ifdef GC_ASSERTIONS
1514 GC_mark_lock_holder = pthread_self();
1515 # endif
1516 }
1517
1518 void GC_release_mark_lock()
1519 {
1520 GC_ASSERT(GC_mark_lock_holder == pthread_self());
1521 # ifdef GC_ASSERTIONS
1522 GC_mark_lock_holder = NO_THREAD;
1523 # endif
1524 if (pthread_mutex_unlock(&mark_mutex) != 0) {
1525 ABORT("pthread_mutex_unlock failed");
1526 }
1527 }
1528
1529 /* Collector must wait for a freelist builders for 2 reasons: */
1530 /* 1) Mark bits may still be getting examined without lock. */
1531 /* 2) Partial free lists referenced only by locals may not be scanned */
1532 /* correctly, e.g. if they contain "pointer-free" objects, since the */
1533 /* free-list link may be ignored. */
1534 void GC_wait_builder()
1535 {
1536 GC_ASSERT(GC_mark_lock_holder == pthread_self());
1537 # ifdef GC_ASSERTIONS
1538 GC_mark_lock_holder = NO_THREAD;
1539 # endif
1540 if (pthread_cond_wait(&builder_cv, &mark_mutex) != 0) {
1541 ABORT("pthread_cond_wait failed");
1542 }
1543 GC_ASSERT(GC_mark_lock_holder == NO_THREAD);
1544 # ifdef GC_ASSERTIONS
1545 GC_mark_lock_holder = pthread_self();
1546 # endif
1547 }
1548
1549 void GC_wait_for_reclaim()
1550 {
1551 GC_acquire_mark_lock();
1552 while (GC_fl_builder_count > 0) {
1553 GC_wait_builder();
1554 }
1555 GC_release_mark_lock();
1556 }
1557
1558 void GC_notify_all_builder()
1559 {
1560 GC_ASSERT(GC_mark_lock_holder == pthread_self());
1561 if (pthread_cond_broadcast(&builder_cv) != 0) {
1562 ABORT("pthread_cond_broadcast failed");
1563 }
1564 }
1565
1566 #endif /* PARALLEL_MARK || THREAD_LOCAL_ALLOC */
1567
1568 #ifdef PARALLEL_MARK
1569
1570 static pthread_cond_t mark_cv = PTHREAD_COND_INITIALIZER;
1571
1572 void GC_wait_marker()
1573 {
1574 GC_ASSERT(GC_mark_lock_holder == pthread_self());
1575 # ifdef GC_ASSERTIONS
1576 GC_mark_lock_holder = NO_THREAD;
1577 # endif
1578 if (pthread_cond_wait(&mark_cv, &mark_mutex) != 0) {
1579 ABORT("pthread_cond_wait failed");
1580 }
1581 GC_ASSERT(GC_mark_lock_holder == NO_THREAD);
1582 # ifdef GC_ASSERTIONS
1583 GC_mark_lock_holder = pthread_self();
1584 # endif
1585 }
1586
1587 void GC_notify_all_marker()
1588 {
1589 if (pthread_cond_broadcast(&mark_cv) != 0) {
1590 ABORT("pthread_cond_broadcast failed");
1591 }
1592 }
1593
1594 #endif /* PARALLEL_MARK */
1595
1596 # endif /* GC_LINUX_THREADS and friends */
1597