1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248
|
/*
* Copyright 1988, 1989 Hans-J. Boehm, Alan J. Demers
* Copyright (c) 1991-1994 by Xerox Corporation. All rights reserved.
*
* THIS MATERIAL IS PROVIDED AS IS, WITH ABSOLUTELY NO WARRANTY EXPRESSED
* OR IMPLIED. ANY USE IS AT YOUR OWN RISK.
*
* Permission is hereby granted to use or copy this program
* for any purpose, provided the above notices are retained on all copies.
* Permission to modify the code and to distribute modified code is granted,
* provided the above notices are retained, and a notice that the code was
* modified is included with the above copyright notice.
*/
/* Boehm, August 9, 1995 6:09 pm PDT */
# include "gc_priv.h"
/*
* We maintain several hash tables of hblks that have had false hits.
* Each contains one bit per hash bucket; If any page in the bucket
* has had a false hit, we assume that all of them have.
* See the definition of page_hash_table in gc_private.h.
* False hits from the stack(s) are much more dangerous than false hits
* from elsewhere, since the former can pin a large object that spans the
* block, eventhough it does not start on the dangerous block.
*/
/*
* Externally callable routines are:
* GC_add_to_black_list_normal
* GC_add_to_black_list_stack
* GC_promote_black_lists
* GC_is_black_listed
*
* All require that the allocator lock is held.
*/
/* Pointers to individual tables. We replace one table by another by */
/* switching these pointers. */
word * GC_old_normal_bl;
/* Nonstack false references seen at last full */
/* collection. */
word * GC_incomplete_normal_bl;
/* Nonstack false references seen since last */
/* full collection. */
word * GC_old_stack_bl;
word * GC_incomplete_stack_bl;
word GC_total_stack_black_listed;
word GC_black_list_spacing = MINHINCR*HBLKSIZE; /* Initial rough guess */
void GC_clear_bl();
void GC_bl_init()
{
# ifndef ALL_INTERIOR_POINTERS
GC_old_normal_bl = (word *)
GC_scratch_alloc((word)(sizeof (page_hash_table)));
GC_incomplete_normal_bl = (word *)GC_scratch_alloc
((word)(sizeof(page_hash_table)));
if (GC_old_normal_bl == 0 || GC_incomplete_normal_bl == 0) {
GC_err_printf0("Insufficient memory for black list\n");
EXIT();
}
GC_clear_bl(GC_old_normal_bl);
GC_clear_bl(GC_incomplete_normal_bl);
# endif
GC_old_stack_bl = (word *)GC_scratch_alloc((word)(sizeof(page_hash_table)));
GC_incomplete_stack_bl = (word *)GC_scratch_alloc
((word)(sizeof(page_hash_table)));
if (GC_old_stack_bl == 0 || GC_incomplete_stack_bl == 0) {
GC_err_printf0("Insufficient memory for black list\n");
EXIT();
}
GC_clear_bl(GC_old_stack_bl);
GC_clear_bl(GC_incomplete_stack_bl);
}
void GC_clear_bl(doomed)
word *doomed;
{
BZERO(doomed, sizeof(page_hash_table));
}
void GC_copy_bl(old, new)
word *new, *old;
{
BCOPY(old, new, sizeof(page_hash_table));
}
static word total_stack_black_listed();
/* Signal the completion of a collection. Turn the incomplete black */
/* lists into new black lists, etc. */
void GC_promote_black_lists()
{
word * very_old_normal_bl = GC_old_normal_bl;
word * very_old_stack_bl = GC_old_stack_bl;
GC_old_normal_bl = GC_incomplete_normal_bl;
GC_old_stack_bl = GC_incomplete_stack_bl;
# ifndef ALL_INTERIOR_POINTERS
GC_clear_bl(very_old_normal_bl);
# endif
GC_clear_bl(very_old_stack_bl);
GC_incomplete_normal_bl = very_old_normal_bl;
GC_incomplete_stack_bl = very_old_stack_bl;
GC_total_stack_black_listed = total_stack_black_listed();
# ifdef PRINTSTATS
GC_printf1("%ld bytes in heap blacklisted for interior pointers\n",
(unsigned long)GC_total_stack_black_listed);
# endif
if (GC_total_stack_black_listed != 0) {
GC_black_list_spacing =
HBLKSIZE*(GC_heapsize/GC_total_stack_black_listed);
}
if (GC_black_list_spacing < 3 * HBLKSIZE) {
GC_black_list_spacing = 3 * HBLKSIZE;
}
}
void GC_unpromote_black_lists()
{
# ifndef ALL_INTERIOR_POINTERS
GC_copy_bl(GC_old_normal_bl, GC_incomplete_normal_bl);
# endif
GC_copy_bl(GC_old_stack_bl, GC_incomplete_stack_bl);
}
# ifndef ALL_INTERIOR_POINTERS
/* P is not a valid pointer reference, but it falls inside */
/* the plausible heap bounds. */
/* Add it to the normal incomplete black list if appropriate. */
void GC_add_to_black_list_normal(p)
word p;
{
if (!(GC_modws_valid_offsets[p & (sizeof(word)-1)])) return;
{
register int index = PHT_HASH(p);
if (HDR(p) == 0 || get_pht_entry_from_index(GC_old_normal_bl, index)) {
# ifdef PRINTBLACKLIST
if (!get_pht_entry_from_index(GC_incomplete_normal_bl, index)) {
GC_printf1("Black listing (normal) 0x%lx\n",
(unsigned long) p);
}
# endif
set_pht_entry_from_index(GC_incomplete_normal_bl, index);
} /* else this is probably just an interior pointer to an allocated */
/* object, and isn't worth black listing. */
}
}
# endif
/* And the same for false pointers from the stack. */
void GC_add_to_black_list_stack(p)
word p;
{
register int index = PHT_HASH(p);
if (HDR(p) == 0 || get_pht_entry_from_index(GC_old_stack_bl, index)) {
# ifdef PRINTBLACKLIST
if (!get_pht_entry_from_index(GC_incomplete_stack_bl, index)) {
GC_printf1("Black listing (stack) 0x%lx\n",
(unsigned long)p);
}
# endif
set_pht_entry_from_index(GC_incomplete_stack_bl, index);
}
}
/*
* Is the block starting at h of size len bytes black listed? If so,
* return the address of the next plausible r such that (r, len) might not
* be black listed. (R may not actually be in the heap. We guarantee only
* that every smaller value of r after h is also black listed.)
* If (h,len) is not black listed, return 0.
* Knows about the structure of the black list hash tables.
*/
struct hblk * GC_is_black_listed(h, len)
struct hblk * h;
word len;
{
register int index = PHT_HASH((word)h);
register word i;
word nblocks = divHBLKSZ(len);
# ifndef ALL_INTERIOR_POINTERS
if (get_pht_entry_from_index(GC_old_normal_bl, index)
|| get_pht_entry_from_index(GC_incomplete_normal_bl, index)) {
return(h+1);
}
# endif
for (i = 0; ; ) {
if (GC_old_stack_bl[divWORDSZ(index)] == 0
&& GC_incomplete_stack_bl[divWORDSZ(index)] == 0) {
/* An easy case */
i += WORDSZ - modWORDSZ(index);
} else {
if (get_pht_entry_from_index(GC_old_stack_bl, index)
|| get_pht_entry_from_index(GC_incomplete_stack_bl, index)) {
return(h+i+1);
}
i++;
}
if (i >= nblocks) break;
index = PHT_HASH((word)(h+i));
}
return(0);
}
/* Return the number of blacklisted blocks in a given range. */
/* Used only for statistical purposes. */
/* Looks only at the GC_incomplete_stack_bl. */
word GC_number_stack_black_listed(start, endp1)
struct hblk *start, *endp1;
{
register struct hblk * h;
word result = 0;
for (h = start; h < endp1; h++) {
register int index = PHT_HASH((word)h);
if (get_pht_entry_from_index(GC_old_stack_bl, index)) result++;
}
return(result);
}
/* Return the total number of (stack) black-listed bytes. */
static word total_stack_black_listed()
{
register unsigned i;
word total = 0;
for (i = 0; i < GC_n_heap_sects; i++) {
struct hblk * start = (struct hblk *) GC_heap_sects[i].hs_start;
word len = (word) GC_heap_sects[i].hs_bytes;
struct hblk * endp1 = start + len/HBLKSIZE;
total += GC_number_stack_black_listed(start, endp1);
}
return(total * HBLKSIZE);
}
|