root/gc/malloc.c

/* [<][>][^][v][top][bottom][index][help] */

DEFINITIONS

This source file includes following definitions.
  1. GC_alloc_reclaim_list
  2. GC_alloc_large
  3. GC_alloc_large_and_clear
  4. GC_generic_malloc_inner
  5. GC_generic_malloc_inner_ignore_off_page
  6. GC_generic_malloc
  7. GC_malloc_atomic
  8. GC_malloc
  9. malloc
  10. calloc
  11. strdup
  12. GC_free
  13. GC_free_inner
  14. free

   1 /* 
   2  * Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
   3  * Copyright (c) 1991-1994 by Xerox Corporation.  All rights reserved.
   4  * Copyright (c) 2000 by Hewlett-Packard Company.  All rights reserved.
   5  *
   6  * THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
   7  * OR IMPLIED.  ANY USE IS AT YOUR OWN RISK.
   8  *
   9  * Permission is hereby granted to use or copy this program
  10  * for any purpose,  provided the above notices are retained on all copies.
  11  * Permission to modify the code and to distribute modified code is granted,
  12  * provided the above notices are retained, and a notice that the code was
  13  * modified is included with the above copyright notice.
  14  */
  15 /* Boehm, February 7, 1996 4:32 pm PST */
  16  
  17 #include <stdio.h>
  18 #include "private/gc_priv.h"
  19 
  20 extern ptr_t GC_clear_stack();  /* in misc.c, behaves like identity */
  21 void GC_extend_size_map();      /* in misc.c. */
  22 
  23 /* Allocate reclaim list for kind:      */
  24 /* Return TRUE on success               */
  25 GC_bool GC_alloc_reclaim_list(kind)
  26 register struct obj_kind * kind;
  27 {
  28     struct hblk ** result = (struct hblk **)
  29                 GC_scratch_alloc((MAXOBJSZ+1) * sizeof(struct hblk *));
  30     if (result == 0) return(FALSE);
  31     BZERO(result, (MAXOBJSZ+1)*sizeof(struct hblk *));
  32     kind -> ok_reclaim_list = result;
  33     return(TRUE);
  34 }
  35 
  36 /* Allocate a large block of size lw words.     */
  37 /* The block is not cleared.                    */
  38 /* Flags is 0 or IGNORE_OFF_PAGE.               */
  39 /* We hold the allocation lock.                 */
  40 ptr_t GC_alloc_large(lw, k, flags)
  41 word lw;
  42 int k;
  43 unsigned flags;
  44 {
  45     struct hblk * h;
  46     word n_blocks = OBJ_SZ_TO_BLOCKS(lw);
  47     ptr_t result;
  48         
  49     if (!GC_is_initialized) GC_init_inner();
  50     /* Do our share of marking work */
  51         if(GC_incremental && !GC_dont_gc)
  52             GC_collect_a_little_inner((int)n_blocks);
  53     h = GC_allochblk(lw, k, flags);
  54 #   ifdef USE_MUNMAP
  55         if (0 == h) {
  56             GC_merge_unmapped();
  57             h = GC_allochblk(lw, k, flags);
  58         }
  59 #   endif
  60     while (0 == h && GC_collect_or_expand(n_blocks, (flags != 0))) {
  61         h = GC_allochblk(lw, k, flags);
  62     }
  63     if (h == 0) {
  64         result = 0;
  65     } else {
  66         int total_bytes = n_blocks * HBLKSIZE;
  67         if (n_blocks > 1) {
  68             GC_large_allocd_bytes += total_bytes;
  69             if (GC_large_allocd_bytes > GC_max_large_allocd_bytes)
  70                 GC_max_large_allocd_bytes = GC_large_allocd_bytes;
  71         }
  72         result = (ptr_t) (h -> hb_body);
  73         GC_words_wasted += BYTES_TO_WORDS(total_bytes) - lw;
  74     }
  75     return result;
  76 }
  77 
  78 
  79 /* Allocate a large block of size lb bytes.  Clear if appropriate.      */
  80 /* We hold the allocation lock.                                         */
  81 ptr_t GC_alloc_large_and_clear(lw, k, flags)
  82 word lw;
  83 int k;
  84 unsigned flags;
  85 {
  86     ptr_t result = GC_alloc_large(lw, k, flags);
  87     word n_blocks = OBJ_SZ_TO_BLOCKS(lw);
  88 
  89     if (0 == result) return 0;
  90     if (GC_debugging_started || GC_obj_kinds[k].ok_init) {
  91         /* Clear the whole block, in case of GC_realloc call. */
  92         BZERO(result, n_blocks * HBLKSIZE);
  93     }
  94     return result;
  95 }
  96 
  97 /* allocate lb bytes for an object of kind k.   */
  98 /* Should not be used to directly to allocate   */
  99 /* objects such as STUBBORN objects that        */
 100 /* require special handling on allocation.      */
 101 /* First a version that assumes we already      */
 102 /* hold lock:                                   */
 103 ptr_t GC_generic_malloc_inner(lb, k)
 104 register word lb;
 105 register int k;
 106 {
 107 register word lw;
 108 register ptr_t op;
 109 register ptr_t *opp;
 110 
 111     if( SMALL_OBJ(lb) ) {
 112         register struct obj_kind * kind = GC_obj_kinds + k;
 113 #       ifdef MERGE_SIZES
 114           lw = GC_size_map[lb];
 115 #       else
 116           lw = ALIGNED_WORDS(lb);
 117           if (lw == 0) lw = MIN_WORDS;
 118 #       endif
 119         opp = &(kind -> ok_freelist[lw]);
 120         if( (op = *opp) == 0 ) {
 121 #           ifdef MERGE_SIZES
 122               if (GC_size_map[lb] == 0) {
 123                 if (!GC_is_initialized)  GC_init_inner();
 124                 if (GC_size_map[lb] == 0) GC_extend_size_map(lb);
 125                 return(GC_generic_malloc_inner(lb, k));
 126               }
 127 #           else
 128               if (!GC_is_initialized) {
 129                 GC_init_inner();
 130                 return(GC_generic_malloc_inner(lb, k));
 131               }
 132 #           endif
 133             if (kind -> ok_reclaim_list == 0) {
 134                 if (!GC_alloc_reclaim_list(kind)) goto out;
 135             }
 136             op = GC_allocobj(lw, k);
 137             if (op == 0) goto out;
 138         }
 139         /* Here everything is in a consistent state.    */
 140         /* We assume the following assignment is        */
 141         /* atomic.  If we get aborted                   */
 142         /* after the assignment, we lose an object,     */
 143         /* but that's benign.                           */
 144         /* Volatile declarations may need to be added   */
 145         /* to prevent the compiler from breaking things.*/
 146         /* If we only execute the second of the         */
 147         /* following assignments, we lose the free      */
 148         /* list, but that should still be OK, at least  */
 149         /* for garbage collected memory.                */
 150         *opp = obj_link(op);
 151         obj_link(op) = 0;
 152     } else {
 153         lw = ROUNDED_UP_WORDS(lb);
 154         op = (ptr_t)GC_alloc_large_and_clear(lw, k, 0);
 155     }
 156     GC_words_allocd += lw;
 157     
 158 out:
 159     return op;
 160 }
 161 
 162 /* Allocate a composite object of size n bytes.  The caller guarantees  */
 163 /* that pointers past the first page are not relevant.  Caller holds    */
 164 /* allocation lock.                                                     */
 165 ptr_t GC_generic_malloc_inner_ignore_off_page(lb, k)
 166 register size_t lb;
 167 register int k;
 168 {
 169     register word lw;
 170     ptr_t op;
 171 
 172     if (lb <= HBLKSIZE)
 173         return(GC_generic_malloc_inner((word)lb, k));
 174     lw = ROUNDED_UP_WORDS(lb);
 175     op = (ptr_t)GC_alloc_large_and_clear(lw, k, IGNORE_OFF_PAGE);
 176     GC_words_allocd += lw;
 177     return op;
 178 }
 179 
 180 ptr_t GC_generic_malloc(lb, k)
 181 register word lb;
 182 register int k;
 183 {
 184     ptr_t result;
 185     DCL_LOCK_STATE;
 186 
 187     if (GC_have_errors) GC_print_all_errors();
 188     GC_INVOKE_FINALIZERS();
 189     if (SMALL_OBJ(lb)) {
 190         DISABLE_SIGNALS();
 191         LOCK();
 192         result = GC_generic_malloc_inner((word)lb, k);
 193         UNLOCK();
 194         ENABLE_SIGNALS();
 195     } else {
 196         word lw;
 197         word n_blocks;
 198         GC_bool init;
 199         lw = ROUNDED_UP_WORDS(lb);
 200         n_blocks = OBJ_SZ_TO_BLOCKS(lw);
 201         init = GC_obj_kinds[k].ok_init;
 202         DISABLE_SIGNALS();
 203         LOCK();
 204         result = (ptr_t)GC_alloc_large(lw, k, 0);
 205         if (0 != result) {
 206           if (GC_debugging_started) {
 207             BZERO(result, n_blocks * HBLKSIZE);
 208           } else {
 209 #           ifdef THREADS
 210               /* Clear any memory that might be used for GC descriptors */
 211               /* before we release the lock.                          */
 212                 ((word *)result)[0] = 0;
 213                 ((word *)result)[1] = 0;
 214                 ((word *)result)[lw-1] = 0;
 215                 ((word *)result)[lw-2] = 0;
 216 #           endif
 217           }
 218         }
 219         GC_words_allocd += lw;
 220         UNLOCK();
 221         ENABLE_SIGNALS();
 222         if (init && !GC_debugging_started && 0 != result) {
 223             BZERO(result, n_blocks * HBLKSIZE);
 224         }
 225     }
 226     if (0 == result) {
 227         return((*GC_oom_fn)(lb));
 228     } else {
 229         return(result);
 230     }
 231 }   
 232 
 233 
 234 #define GENERAL_MALLOC(lb,k) \
 235     (GC_PTR)GC_clear_stack(GC_generic_malloc((word)lb, k))
 236 /* We make the GC_clear_stack_call a tail call, hoping to get more of   */
 237 /* the stack.                                                           */
 238 
 239 /* Allocate lb bytes of atomic (pointerfree) data */
 240 # ifdef __STDC__
 241     GC_PTR GC_malloc_atomic(size_t lb)
 242 # else
 243     GC_PTR GC_malloc_atomic(lb)
 244     size_t lb;
 245 # endif
 246 {
 247 register ptr_t op;
 248 register ptr_t * opp;
 249 register word lw;
 250 DCL_LOCK_STATE;
 251 
 252     if( EXPECT(SMALL_OBJ(lb), 1) ) {
 253 #       ifdef MERGE_SIZES
 254           lw = GC_size_map[lb];
 255 #       else
 256           lw = ALIGNED_WORDS(lb);
 257 #       endif
 258         opp = &(GC_aobjfreelist[lw]);
 259         FASTLOCK();
 260         if( EXPECT(!FASTLOCK_SUCCEEDED() || (op = *opp) == 0, 0) ) {
 261             FASTUNLOCK();
 262             return(GENERAL_MALLOC((word)lb, PTRFREE));
 263         }
 264         /* See above comment on signals.        */
 265         *opp = obj_link(op);
 266         GC_words_allocd += lw;
 267         FASTUNLOCK();
 268         return((GC_PTR) op);
 269    } else {
 270        return(GENERAL_MALLOC((word)lb, PTRFREE));
 271    }
 272 }
 273 
 274 /* Allocate lb bytes of composite (pointerful) data */
 275 # ifdef __STDC__
 276     GC_PTR GC_malloc(size_t lb)
 277 # else
 278     GC_PTR GC_malloc(lb)
 279     size_t lb;
 280 # endif
 281 {
 282 register ptr_t op;
 283 register ptr_t *opp;
 284 register word lw;
 285 DCL_LOCK_STATE;
 286 
 287     if( EXPECT(SMALL_OBJ(lb), 1) ) {
 288 #       ifdef MERGE_SIZES
 289           lw = GC_size_map[lb];
 290 #       else
 291           lw = ALIGNED_WORDS(lb);
 292 #       endif
 293         opp = &(GC_objfreelist[lw]);
 294         FASTLOCK();
 295         if( EXPECT(!FASTLOCK_SUCCEEDED() || (op = *opp) == 0, 0) ) {
 296             FASTUNLOCK();
 297             return(GENERAL_MALLOC((word)lb, NORMAL));
 298         }
 299         /* See above comment on signals.        */
 300         GC_ASSERT(0 == obj_link(op)
 301                   || (word)obj_link(op)
 302                         <= (word)GC_greatest_plausible_heap_addr
 303                      && (word)obj_link(op)
 304                         >= (word)GC_least_plausible_heap_addr);
 305         *opp = obj_link(op);
 306         obj_link(op) = 0;
 307         GC_words_allocd += lw;
 308         FASTUNLOCK();
 309         return((GC_PTR) op);
 310    } else {
 311        return(GENERAL_MALLOC((word)lb, NORMAL));
 312    }
 313 }
 314 
 315 # ifdef REDIRECT_MALLOC
 316 
 317 /* Avoid unnecessary nested procedure calls here, by #defining some     */
 318 /* malloc replacements.  Otherwise we end up saving a                   */
 319 /* meaningless return address in the object.  It also speeds things up, */
 320 /* but it is admittedly quite ugly.                                     */
 321 # ifdef GC_ADD_CALLER
 322 #   define RA GC_RETURN_ADDR,
 323 # else
 324 #   define RA
 325 # endif
 326 # define GC_debug_malloc_replacement(lb) \
 327         GC_debug_malloc(lb, RA "unknown", 0)
 328 
 329 # ifdef __STDC__
 330     GC_PTR malloc(size_t lb)
 331 # else
 332     GC_PTR malloc(lb)
 333     size_t lb;
 334 # endif
 335   {
 336     /* It might help to manually inline the GC_malloc call here.        */
 337     /* But any decent compiler should reduce the extra procedure call   */
 338     /* to at most a jump instruction in this case.                      */
 339 #   if defined(I386) && defined(GC_SOLARIS_THREADS)
 340       /*
 341        * Thread initialisation can call malloc before
 342        * we're ready for it.
 343        * It's not clear that this is enough to help matters.
 344        * The thread implementation may well call malloc at other
 345        * inopportune times.
 346        */
 347       if (!GC_is_initialized) return sbrk(lb);
 348 #   endif /* I386 && GC_SOLARIS_THREADS */
 349     return((GC_PTR)REDIRECT_MALLOC(lb));
 350   }
 351 
 352 # ifdef __STDC__
 353     GC_PTR calloc(size_t n, size_t lb)
 354 # else
 355     GC_PTR calloc(n, lb)
 356     size_t n, lb;
 357 # endif
 358   {
 359     return((GC_PTR)REDIRECT_MALLOC(n*lb));
 360   }
 361 
 362 #ifndef strdup
 363 # include <string.h>
 364 # ifdef __STDC__
 365     char *strdup(const char *s)
 366 # else
 367     char *strdup(s)
 368     char *s;
 369 # endif
 370   {
 371     size_t len = strlen(s) + 1;
 372     char * result = ((char *)REDIRECT_MALLOC(len+1));
 373     BCOPY(s, result, len+1);
 374     return result;
 375   }
 376 #endif /* !defined(strdup) */
 377  /* If strdup is macro defined, we assume that it actually calls malloc, */
 378  /* and thus the right thing will happen even without overriding it.     */
 379  /* This seems to be true on most Linux systems.                         */
 380 
 381 #undef GC_debug_malloc_replacement
 382 
 383 # endif /* REDIRECT_MALLOC */
 384 
 385 /* Explicitly deallocate an object p.                           */
 386 # ifdef __STDC__
 387     void GC_free(GC_PTR p)
 388 # else
 389     void GC_free(p)
 390     GC_PTR p;
 391 # endif
 392 {
 393     register struct hblk *h;
 394     register hdr *hhdr;
 395     register signed_word sz;
 396     register ptr_t * flh;
 397     register int knd;
 398     register struct obj_kind * ok;
 399     DCL_LOCK_STATE;
 400 
 401     if (p == 0) return;
 402         /* Required by ANSI.  It's not my fault ...     */
 403     h = HBLKPTR(p);
 404     hhdr = HDR(h);
 405     GC_ASSERT(GC_base(p) == p);
 406 #   if defined(REDIRECT_MALLOC) && \
 407         (defined(GC_SOLARIS_THREADS) || defined(GC_LINUX_THREADS) \
 408          || defined(__MINGW32__)) /* Should this be MSWIN32 in general? */
 409         /* For Solaris, we have to redirect malloc calls during         */
 410         /* initialization.  For the others, this seems to happen        */
 411         /* implicitly.                                                  */
 412         /* Don't try to deallocate that memory.                         */
 413         if (0 == hhdr) return;
 414 #   endif
 415     knd = hhdr -> hb_obj_kind;
 416     sz = hhdr -> hb_sz;
 417     ok = &GC_obj_kinds[knd];
 418     if (EXPECT((sz <= MAXOBJSZ), 1)) {
 419 #       ifdef THREADS
 420             DISABLE_SIGNALS();
 421             LOCK();
 422 #       endif
 423         GC_mem_freed += sz;
 424         /* A signal here can make GC_mem_freed and GC_non_gc_bytes      */
 425         /* inconsistent.  We claim this is benign.                      */
 426         if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= WORDS_TO_BYTES(sz);
 427                 /* Its unnecessary to clear the mark bit.  If the       */
 428                 /* object is reallocated, it doesn't matter.  O.w. the  */
 429                 /* collector will do it, since it's on a free list.     */
 430         if (ok -> ok_init) {
 431             BZERO((word *)p + 1, WORDS_TO_BYTES(sz-1));
 432         }
 433         flh = &(ok -> ok_freelist[sz]);
 434         obj_link(p) = *flh;
 435         *flh = (ptr_t)p;
 436 #       ifdef THREADS
 437             UNLOCK();
 438             ENABLE_SIGNALS();
 439 #       endif
 440     } else {
 441         DISABLE_SIGNALS();
 442         LOCK();
 443         GC_mem_freed += sz;
 444         if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= WORDS_TO_BYTES(sz);
 445         GC_freehblk(h);
 446         UNLOCK();
 447         ENABLE_SIGNALS();
 448     }
 449 }
 450 
 451 /* Explicitly deallocate an object p when we already hold lock.         */
 452 /* Only used for internally allocated objects, so we can take some      */
 453 /* shortcuts.                                                           */
 454 #ifdef THREADS
 455 void GC_free_inner(GC_PTR p)
 456 {
 457     register struct hblk *h;
 458     register hdr *hhdr;
 459     register signed_word sz;
 460     register ptr_t * flh;
 461     register int knd;
 462     register struct obj_kind * ok;
 463     DCL_LOCK_STATE;
 464 
 465     h = HBLKPTR(p);
 466     hhdr = HDR(h);
 467     knd = hhdr -> hb_obj_kind;
 468     sz = hhdr -> hb_sz;
 469     ok = &GC_obj_kinds[knd];
 470     if (sz <= MAXOBJSZ) {
 471         GC_mem_freed += sz;
 472         if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= WORDS_TO_BYTES(sz);
 473         if (ok -> ok_init) {
 474             BZERO((word *)p + 1, WORDS_TO_BYTES(sz-1));
 475         }
 476         flh = &(ok -> ok_freelist[sz]);
 477         obj_link(p) = *flh;
 478         *flh = (ptr_t)p;
 479     } else {
 480         GC_mem_freed += sz;
 481         if (IS_UNCOLLECTABLE(knd)) GC_non_gc_bytes -= WORDS_TO_BYTES(sz);
 482         GC_freehblk(h);
 483     }
 484 }
 485 #endif /* THREADS */
 486 
 487 # if defined(REDIRECT_MALLOC) && !defined(REDIRECT_FREE)
 488 #   define REDIRECT_FREE GC_free
 489 # endif
 490 # ifdef REDIRECT_FREE
 491 #   ifdef __STDC__
 492       void free(GC_PTR p)
 493 #   else
 494       void free(p)
 495       GC_PTR p;
 496 #   endif
 497   {
 498 #   ifndef IGNORE_FREE
 499       REDIRECT_FREE(p);
 500 #   endif
 501   }
 502 # endif  /* REDIRECT_MALLOC */

/* [<][>][^][v][top][bottom][index][help] */