root/gc/stubborn.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. GC_PROTO
  2. GC_stubborn_init
  3. GC_compact_changing_list
  4. GC_change_stubborn
  5. GC_end_stubborn_change
  6. GC_malloc_stubborn
  7. GC_read_changed
  8. GC_page_was_changed
  9. GC_clean_changing_list
  10. GC_malloc_stubborn
  11. GC_end_stubborn_change
  12. GC_change_stubborn
  13. GC_PROTO

   1 /* 
   2  * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
   3  * Copyright (c) 1991-1994 by Xerox Corporation.  All rights reserved.
   4  *
   5  * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
   6  * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
   7  *
   8  * Permission is hereby granted to use or copy this program
   9  * for any purpose,  provided the above notices are retained on all copies.
  10  * Permission to modify the code and to distribute modified code is granted,
  11  * provided the above notices are retained, and a notice that the code was
  12  * modified is included with the above copyright notice.
  13  */
  14 /* Boehm, July 31, 1995 5:02 pm PDT */
  15 
  16 
  17 #include "private/gc_priv.h"
  18 
  19 # ifdef STUBBORN_ALLOC
  20 /* Stubborn object (hard to change, nearly immutable) allocation. */
  21 
  22 extern ptr_t GC_clear_stack();  /* in misc.c, behaves like identity */
  23 
  24 #define GENERAL_MALLOC(lb,k) \
  25     (GC_PTR)GC_clear_stack(GC_generic_malloc((word)lb, k))
  26 
  27 /* Data structure representing immutable objects that   */
  28 /* are still being initialized.                         */
  29 /* This is a bit baroque in order to avoid acquiring    */
  30 /* the lock twice for a typical allocation.             */
  31 
  32 GC_PTR * GC_changing_list_start;
  33 
  34 void GC_push_stubborn_structures GC_PROTO((void))
  35 {
  36     GC_push_all((ptr_t)(&GC_changing_list_start),
  37                 (ptr_t)(&GC_changing_list_start) + sizeof(GC_PTR *));
  38 }
  39 
  40 # ifdef THREADS
  41   VOLATILE GC_PTR * VOLATILE GC_changing_list_current;
  42 # else
  43   GC_PTR * GC_changing_list_current;
  44 # endif
  45         /* Points at last added element.  Also (ab)used for             */
  46         /* synchronization.  Updates and reads are assumed atomic.      */
  47 
  48 GC_PTR * GC_changing_list_limit;
  49         /* Points at the last word of the buffer, which is always 0     */
  50         /* All entries in (GC_changing_list_current,                    */
  51         /* GC_changing_list_limit] are 0                                */
  52 
  53 
  54 void GC_stubborn_init()
  55 {
  56 #   define INIT_SIZE 10
  57 
  58     GC_changing_list_start = (GC_PTR *)
  59                         GC_INTERNAL_MALLOC(
  60                                 (word)(INIT_SIZE * sizeof(GC_PTR)),
  61                                 PTRFREE);
  62     BZERO(GC_changing_list_start,
  63           INIT_SIZE * sizeof(GC_PTR));
  64     if (GC_changing_list_start == 0) {
  65         GC_err_printf0("Insufficient space to start up\n");
  66         ABORT("GC_stubborn_init: put of space");
  67     }
  68     GC_changing_list_current = GC_changing_list_start;
  69     GC_changing_list_limit = GC_changing_list_start + INIT_SIZE - 1;
  70     * GC_changing_list_limit = 0;
  71 }
  72 
  73 /* Compact and possibly grow GC_uninit_list.  The old copy is           */
  74 /* left alone.  Lock must be held.                                      */
  75 /* When called GC_changing_list_current == GC_changing_list_limit       */
  76 /* which is one past the current element.                               */
  77 /* When we finish GC_changing_list_current again points one past last   */
  78 /* element.                                                             */
  79 /* Invariant while this is running: GC_changing_list_current            */
  80 /* points at a word containing 0.                                       */
  81 /* Returns FALSE on failure.                                            */
  82 GC_bool GC_compact_changing_list()
  83 {
  84     register GC_PTR *p, *q;
  85     register word count = 0;
  86     word old_size = (char **)GC_changing_list_limit
  87                     - (char **)GC_changing_list_start+1;
  88                     /* The casts are needed as a workaround for an Amiga bug */
  89     register word new_size = old_size;
  90     GC_PTR * new_list;
  91     
  92     for (p = GC_changing_list_start; p < GC_changing_list_limit; p++) {
  93         if (*p != 0) count++;
  94     }
  95     if (2 * count > old_size) new_size = 2 * count;
  96     new_list = (GC_PTR *)
  97                 GC_INTERNAL_MALLOC(
  98                                 new_size * sizeof(GC_PTR), PTRFREE);
  99                 /* PTRFREE is a lie.  But we don't want the collector to  */
 100                 /* consider these.  We do want the list itself to be      */
 101                 /* collectable.                                           */
 102     if (new_list == 0) return(FALSE);
 103     BZERO(new_list, new_size * sizeof(GC_PTR));
 104     q = new_list;
 105     for (p = GC_changing_list_start; p < GC_changing_list_limit; p++) {
 106         if (*p != 0) *q++ = *p;
 107     }
 108     GC_changing_list_start = new_list;
 109     GC_changing_list_limit = new_list + new_size - 1;
 110     GC_changing_list_current = q;
 111     return(TRUE);
 112 }
 113 
 114 /* Add p to changing list.  Clear p on failure. */
 115 # define ADD_CHANGING(p) \
 116         {       \
 117             register struct hblk * h = HBLKPTR(p);      \
 118             register word index = PHT_HASH(h);  \
 119             \
 120             set_pht_entry_from_index(GC_changed_pages, index);  \
 121         }       \
 122         if (*GC_changing_list_current != 0 \
 123             && ++GC_changing_list_current == GC_changing_list_limit) { \
 124             if (!GC_compact_changing_list()) (p) = 0; \
 125         } \
 126         *GC_changing_list_current = p;
 127 
 128 void GC_change_stubborn(p)
 129 GC_PTR p;
 130 {
 131     DCL_LOCK_STATE;
 132     
 133     DISABLE_SIGNALS();
 134     LOCK();
 135     ADD_CHANGING(p);
 136     UNLOCK();
 137     ENABLE_SIGNALS();
 138 }
 139 
 140 void GC_end_stubborn_change(p)
 141 GC_PTR p;
 142 {
 143 #   ifdef THREADS
 144       register VOLATILE GC_PTR * my_current = GC_changing_list_current;
 145 #   else
 146       register GC_PTR * my_current = GC_changing_list_current;
 147 #   endif
 148     register GC_bool tried_quick;
 149     DCL_LOCK_STATE;
 150     
 151     if (*my_current == p) {
 152         /* Hopefully the normal case.                                   */
 153         /* Compaction could not have been running when we started.      */
 154         *my_current = 0;
 155 #       ifdef THREADS
 156           if (my_current == GC_changing_list_current) {
 157             /* Compaction can't have run in the interim.        */
 158             /* We got away with the quick and dirty approach.   */
 159             return;
 160           }
 161           tried_quick = TRUE;
 162 #       else
 163           return;
 164 #       endif
 165     } else {
 166         tried_quick = FALSE;
 167     }
 168     DISABLE_SIGNALS();
 169     LOCK();
 170     my_current = GC_changing_list_current;
 171     for (; my_current >= GC_changing_list_start; my_current--) {
 172         if (*my_current == p) {
 173             *my_current = 0;
 174             UNLOCK();
 175             ENABLE_SIGNALS();
 176             return;
 177         }
 178     }
 179     if (!tried_quick) {
 180         GC_err_printf1("Bad arg to GC_end_stubborn_change: 0x%lx\n",
 181                        (unsigned long)p);
 182         ABORT("Bad arg to GC_end_stubborn_change");
 183     }
 184     UNLOCK();
 185     ENABLE_SIGNALS();
 186 }
 187 
 188 /* Allocate lb bytes of composite (pointerful) data     */
 189 /* No pointer fields may be changed after a call to     */
 190 /* GC_end_stubborn_change(p) where p is the value       */
 191 /* returned by GC_malloc_stubborn.                      */
 192 # ifdef __STDC__
 193     GC_PTR GC_malloc_stubborn(size_t lb)
 194 # else
 195     GC_PTR GC_malloc_stubborn(lb)
 196     size_t lb;
 197 # endif
 198 {
 199 register ptr_t op;
 200 register ptr_t *opp;
 201 register word lw;
 202 ptr_t result;
 203 DCL_LOCK_STATE;
 204 
 205     if( SMALL_OBJ(lb) ) {
 206 #       ifdef MERGE_SIZES
 207           lw = GC_size_map[lb];
 208 #       else
 209           lw = ALIGNED_WORDS(lb);
 210 #       endif
 211         opp = &(GC_sobjfreelist[lw]);
 212         FASTLOCK();
 213         if( !FASTLOCK_SUCCEEDED() || (op = *opp) == 0 ) {
 214             FASTUNLOCK();
 215             result = GC_generic_malloc((word)lb, STUBBORN);
 216             goto record;
 217         }
 218         *opp = obj_link(op);
 219         obj_link(op) = 0;
 220         GC_words_allocd += lw;
 221         result = (GC_PTR) op;
 222         ADD_CHANGING(result);
 223         FASTUNLOCK();
 224         return((GC_PTR)result);
 225    } else {
 226        result = (GC_PTR)
 227                 GC_generic_malloc((word)lb, STUBBORN);
 228    }
 229 record:
 230    DISABLE_SIGNALS();
 231    LOCK();
 232    ADD_CHANGING(result);
 233    UNLOCK();
 234    ENABLE_SIGNALS();
 235    return((GC_PTR)GC_clear_stack(result));
 236 }
 237 
 238 
 239 /* Functions analogous to GC_read_dirty and GC_page_was_dirty.  */
 240 /* Report pages on which stubborn objects were changed.         */
 241 void GC_read_changed()
 242 {
 243     register GC_PTR * p = GC_changing_list_start;
 244     register GC_PTR q;
 245     register struct hblk * h;
 246     register word index;
 247     
 248     if (p == 0) /* initializing */ return;
 249     BCOPY(GC_changed_pages, GC_prev_changed_pages,
 250           (sizeof GC_changed_pages));
 251     BZERO(GC_changed_pages, (sizeof GC_changed_pages));
 252     for (; p <= GC_changing_list_current; p++) {
 253         if ((q = *p) != 0) {
 254             h = HBLKPTR(q);
 255             index = PHT_HASH(h);
 256             set_pht_entry_from_index(GC_changed_pages, index);
 257         }
 258     }
 259 }
 260 
 261 GC_bool GC_page_was_changed(h)
 262 struct hblk * h;
 263 {
 264     register word index = PHT_HASH(h);
 265     
 266     return(get_pht_entry_from_index(GC_prev_changed_pages, index));
 267 }
 268 
 269 /* Remove unreachable entries from changed list. Should only be */
 270 /* called with mark bits consistent and lock held.              */
 271 void GC_clean_changing_list()
 272 {
 273     register GC_PTR * p = GC_changing_list_start;
 274     register GC_PTR q;
 275     register ptr_t r;
 276     register unsigned long count = 0;
 277     register unsigned long dropped_count = 0;
 278     
 279     if (p == 0) /* initializing */ return;
 280     for (; p <= GC_changing_list_current; p++) {
 281         if ((q = *p) != 0) {
 282             count++;
 283             r = (ptr_t)GC_base(q);
 284             if (r == 0 || !GC_is_marked(r)) {
 285                 *p = 0;
 286                 dropped_count++;
 287             }
 288         }
 289     }
 290 #   ifdef PRINTSTATS
 291       if (count > 0) {
 292         GC_printf2("%lu entries in changing list: reclaimed %lu\n",
 293                   (unsigned long)count, (unsigned long)dropped_count);
 294       }
 295 #   endif
 296 }
 297 
 298 #else /* !STUBBORN_ALLOC */
 299 
 300 # ifdef __STDC__
 301     GC_PTR GC_malloc_stubborn(size_t lb)
 302 # else
 303     GC_PTR GC_malloc_stubborn(lb)
 304     size_t lb;
 305 # endif
 306 {
 307     return(GC_malloc(lb));
 308 }
 309 
 310 /*ARGSUSED*/
 311 void GC_end_stubborn_change(p)
 312 GC_PTR p;
 313 {
 314 }
 315 
 316 /*ARGSUSED*/
 317 void GC_change_stubborn(p)
 318 GC_PTR p;
 319 {
 320 }
 321 
 322 void GC_push_stubborn_structures GC_PROTO((void))
 323 {
 324 }
 325 
 326 #endif

/* [<][>][^][v][top][bottom][index][help] */