1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
/* policy.c: POLICY DECISIONS
 *
 * $Id: //info.ravenbrook.com/project/mps/master/code/policy.c#19 $
 * Copyright (c) 2001-2020 Ravenbrook Limited.  See end of file for license.
 *
 * This module collects the decision-making code for the MPS, so that
 * policy can be maintained and adjusted.
 *
 * .sources: <design/strategy>.
 */

#include "locus.h"
#include "mpm.h"

SRCID(policy, "$Id: //info.ravenbrook.com/project/mps/master/code/policy.c#19 $");


/* PolicyAlloc -- allocation policy
 *
 * This is the code responsible for making decisions about where to allocate
 * memory.
 *
 * pref describes the address space preferences for the allocation.
 * size is the amount of memory requested to be allocated, in bytes.
 * pool is the pool that is requresting the memory.
 *
 * If successful, update *tractReturn to point to the initial tract of
 * the allocated memory and return ResOK. Otherwise return a result
 * code describing the problem.
 */

Res PolicyAlloc(Tract *tractReturn, Arena arena, LocusPref pref,
                Size size, Pool pool)
{
  Res res;
  Tract tract;
  ZoneSet zones, moreZones, evenMoreZones;

  AVER(tractReturn != NULL);
  AVERT(Arena, arena);
  AVERT(LocusPref, pref);
  AVER(size > (Size)0);
  AVER(SizeIsArenaGrains(size, arena));
  AVERT(Pool, pool);
  AVER(arena == PoolArena(pool));

  /* Don't attempt to allocate if doing so would definitely exceed the
   * commit limit. */
  if (arena->spareCommitted < size) {
    Size necessaryCommitIncrease = size - arena->spareCommitted;
    if (arena->committed + necessaryCommitIncrease > arena->commitLimit
        || arena->committed + necessaryCommitIncrease < arena->committed) {
      return ResCOMMIT_LIMIT;
    }
  }

  /* Plan A: allocate from the free land in the requested zones */
  zones = ZoneSetDiff(pref->zones, pref->avoid);
  if (zones != ZoneSetEMPTY) {
    res = ArenaFreeLandAlloc(&tract, arena, zones, pref->high, size, pool);
    if (res == ResOK)
      goto found;
  }

  /* Plan B: add free zones that aren't blacklisted */
  /* TODO: Pools without ambiguous roots might not care about the blacklist. */
  /* TODO: zones are precious and (currently) never deallocated, so we
   * should consider extending the arena first if address space is plentiful.
   * See also job003384. */
  moreZones = ZoneSetUnion(pref->zones, ZoneSetDiff(arena->freeZones, pref->avoid));
  if (moreZones != zones) {
    res = ArenaFreeLandAlloc(&tract, arena, moreZones, pref->high, size, pool);
    if (res == ResOK)
      goto found;
  }

  /* Plan C: Extend the arena, then try A and B again. */
  if (moreZones != ZoneSetEMPTY) {
    res = Method(Arena, arena, grow)(arena, pref, size);
    /* If we can't extend because we hit the commit limit, try purging
       some spare committed memory and try again.*/
    /* TODO: This would be a good time to *remap* VM instead of
       returning it to the OS. */
    if (res == ResCOMMIT_LIMIT) {
      if (Method(Arena, arena, purgeSpare)(arena, size) >= size)
        res = Method(Arena, arena, grow)(arena, pref, size);
    }
    if (res == ResOK) {
      if (zones != ZoneSetEMPTY) {
        res = ArenaFreeLandAlloc(&tract, arena, zones, pref->high, size, pool);
        if (res == ResOK)
          goto found;
      }
      if (moreZones != zones) {
        res = ArenaFreeLandAlloc(&tract, arena, moreZones, pref->high,
                                 size, pool);
        if (res == ResOK)
          goto found;
      }
    }
    /* TODO: Log an event here, since something went wrong, before
       trying the next plan anyway. */
  }

  /* Plan D: add every zone that isn't blacklisted.  This might mix GC'd
   * objects with those from other generations, causing the zone check
   * to give false positives and slowing down the collector. */
  /* TODO: log an event for this */
  evenMoreZones = ZoneSetDiff(ZoneSetUNIV, pref->avoid);
  if (evenMoreZones != moreZones) {
    res = ArenaFreeLandAlloc(&tract, arena, evenMoreZones, pref->high,
                             size, pool);
    if (res == ResOK)
      goto found;
  }

  /* Last resort: try anywhere.  This might put GC'd objects in zones where
   * common ambiguous bit patterns pin them down, causing the zone check
   * to give even more false positives permanently, and possibly retaining
   * garbage indefinitely. */
  res = ArenaFreeLandAlloc(&tract, arena, ZoneSetUNIV, pref->high, size, pool);
  if (res == ResOK)
    goto found;

  /* Uh oh. */
  return res;

found:
  *tractReturn = tract;
  return ResOK;
}


/* policyCollectionTime -- estimate time to collect the world, in seconds */

static double policyCollectionTime(Arena arena)
{
  Size collectableSize;
  double collectionRate;
  double collectionTime;

  AVERT(Arena, arena);

  collectableSize = ArenaCollectable(arena);
  /* The condition arena->tracedTime >= 1.0 ensures that the division
   * can't overflow. */
  if (arena->tracedTime >= 1.0)
    collectionRate = arena->tracedWork / arena->tracedTime;
  else
    collectionRate = ARENA_DEFAULT_COLLECTION_RATE;
  collectionTime = (double)collectableSize / collectionRate;
  collectionTime += ARENA_DEFAULT_COLLECTION_OVERHEAD;

  return collectionTime;
}


/* PolicyShouldCollectWorld -- should we collect the world now?
 *
 * Return TRUE if we should try collecting the world now, FALSE if
 * not.
 *
 * This is the policy behind mps_arena_step, and so the client
 * must have provided us with enough time to collect the world, and
 * enough time must have passed since the last time we did that
 * opportunistically.
 */

Bool PolicyShouldCollectWorld(Arena arena, double availableTime,
                              Clock now, Clock clocks_per_sec)
{
  Size collectableSize;
  double collectionTime, sinceLastWorldCollect;

  AVERT(Arena, arena);
  /* Can't collect the world if we're already collecting. */
  AVER(arena->busyTraces == TraceSetEMPTY);

  if (availableTime <= 0.0)
    /* Can't collect the world if we're not given any time. */
    return FALSE;

  /* Don't collect the world if it's very small. */
  collectableSize = ArenaCollectable(arena);
  if (collectableSize < ARENA_MINIMUM_COLLECTABLE_SIZE)
    return FALSE;

  /* How long would it take to collect the world? */
  collectionTime = policyCollectionTime(arena);

  /* How long since we last collected the world? */
  sinceLastWorldCollect = (double)(now - arena->lastWorldCollect) /
    (double)clocks_per_sec;

  /* Offered enough time, and long enough since we last did it? */
  return availableTime > collectionTime
    && sinceLastWorldCollect > collectionTime / ARENA_MAX_COLLECT_FRACTION;
}


/* policyCondemnChain -- condemn approriate parts of this chain
 *
 * If successful, set *mortalityReturn to an estimate of the mortality
 * of the condemned parts of this chain and return ResOK.
 *
 * This is only called if ChainDeferral returned a value sufficiently
 * low that we decided to start the collection. (Usually such values
 * are less than zero; see <design/strategy#.policy.start.chain>.)
 */

static Res policyCondemnChain(double *mortalityReturn, Chain chain, Trace trace)
{
  size_t topCondemnedGen, i;
  GenDesc gen;

  AVER(mortalityReturn != NULL);
  AVERT(Chain, chain);
  AVERT(Trace, trace);
  AVER(chain->arena == trace->arena);

  /* Find the highest generation that's over capacity. We will condemn
   * this and all lower generations in the chain. */
  topCondemnedGen = chain->genCount;
  for (;;) {
    /* It's an error to call this function unless some generation is
     * over capacity as reported by ChainDeferral. */
    AVER(topCondemnedGen > 0);
    if (topCondemnedGen == 0)
      return ResFAIL;
    -- topCondemnedGen;
    gen = &chain->gens[topCondemnedGen];
    AVERT(GenDesc, gen);
    if (GenDescNewSize(gen) >= gen->capacity)
      break;
  }

  /* At this point, we've decided to condemn topCondemnedGen and all
   * lower generations. */
  TraceCondemnStart(trace);
  for (i = 0; i <= topCondemnedGen; ++i) {
    gen = &chain->gens[i];
    AVERT(GenDesc, gen);
    GenDescStartTrace(gen, trace);
  }
  EVENT5(ChainCondemnAuto, chain->arena, chain, trace, topCondemnedGen,
         chain->genCount);
  return TraceCondemnEnd(mortalityReturn, trace);
}


/* PolicyStartTrace -- consider starting a trace
 *
 * If collectWorldAllowed is TRUE, consider starting a collection of
 * the world. Otherwise, consider only starting collections of individual
 * chains or generations.
 *
 * If a collection of the world was started, set *collectWorldReturn
 * to TRUE. Otherwise leave it unchanged.
 *
 * If a trace was started, update *traceReturn and return TRUE.
 * Otherwise, leave *traceReturn unchanged and return FALSE.
 */

Bool PolicyStartTrace(Trace *traceReturn, Bool *collectWorldReturn,
                      Arena arena, Bool collectWorldAllowed)
{
  Res res;
  Trace trace;
  double TraceWorkFactor = 0.25;
  /* Fix the mortality of the world to avoid runaway feedback between the
     dynamic criterion and the mortality of the arena's top generation,
     leading to all traces collecting the world. This is a (hopefully)
     temporary hack, pending an improved scheduling algorithm. */
  double TraceWorldMortality = 0.5;

  AVER(traceReturn != NULL);
  AVERT(Arena, arena);

  if (collectWorldAllowed) {
    Size sFoundation, sCondemned, sSurvivors, sConsTrace;
    double tTracePerScan; /* tTrace/cScan */
    double dynamicDeferral;

    /* Compute dynamic criterion.  See strategy.lisp-machine. */
    sFoundation = (Size)0; /* condemning everything, only roots @@@@ */
    /* @@@@ sCondemned should be scannable only */
    sCondemned = ArenaCommitted(arena) - ArenaSpareCommitted(arena);
    sSurvivors = (Size)((double)sCondemned * (1 - TraceWorldMortality));
    tTracePerScan = (double)sFoundation
      + ((double)sSurvivors * (1 + TraceCopyScanRATIO));
    AVER(TraceWorkFactor >= 0);
    AVER((double)sSurvivors + tTracePerScan * TraceWorkFactor
         <= (double)SizeMAX);
    sConsTrace = (Size)((double)sSurvivors + tTracePerScan * TraceWorkFactor);
    dynamicDeferral = (double)ArenaAvail(arena) - (double)sConsTrace;

    if (dynamicDeferral < 0.0) {
      /* Start full collection. */
      res = TraceStartCollectAll(&trace, arena, TraceStartWhyDYNAMICCRITERION);
      if (res != ResOK)
        goto failStart;
      *collectWorldReturn = TRUE;
      *traceReturn = trace;
      return TRUE;
    }
  }
  {
    /* Find the chain most over its capacity. */
    Ring node, nextNode;
    double firstTime = 0.0;
    Chain firstChain = NULL;

    RING_FOR(node, &arena->chainRing, nextNode) {
      Chain chain = RING_ELT(Chain, chainRing, node);
      double time;

      AVERT(Chain, chain);
      time = ChainDeferral(chain);
      if (time < firstTime) {
        firstTime = time; firstChain = chain;
      }
    }

    /* If one was found, start collection on that chain. */
    if(firstTime < 0) {
      double mortality;

      res = TraceCreate(&trace, arena, TraceStartWhyCHAIN_GEN0CAP);
      AVER(res == ResOK);
      res = policyCondemnChain(&mortality, firstChain, trace);
      if (res != ResOK) /* should try some other trace, really @@@@ */
        goto failCondemn;
      if (TraceIsEmpty(trace))
        goto nothingCondemned;
      res = TraceStart(trace, mortality,
                       (double)trace->condemned * TraceWorkFactor);
      /* We don't expect normal GC traces to fail to start. */
      AVER(res == ResOK);
      *traceReturn = trace;
      return TRUE;
    }
  } /* (dynamicDeferral > 0.0) */
  return FALSE;

nothingCondemned:
failCondemn:
  TraceDestroyInit(trace);
failStart:
  return FALSE;
}


/* PolicyPoll -- do some tracing work?
 *
 * Return TRUE if the MPS should do some tracing work; FALSE if it
 * should return to the mutator.
 */

Bool PolicyPoll(Arena arena)
{
  Globals globals;
  AVERT(Arena, arena);
  globals = ArenaGlobals(arena);
  return globals->pollThreshold <= globals->fillMutatorSize;
}


/* PolicyPollAgain -- do another unit of work?
 *
 * Return TRUE if the MPS should do another unit of work; FALSE if it
 * should return to the mutator.
 *
 * start is the clock time when the MPS was entered.
 * moreWork and tracedWork are the results of the last call to TracePoll.
 */

Bool PolicyPollAgain(Arena arena, Clock start, Bool moreWork, Work tracedWork)
{
  Bool moreTime;
  Globals globals;
  double nextPollThreshold;

  AVERT(Arena, arena);
  UNUSED(tracedWork);

  if (ArenaEmergency(arena))
    return TRUE;

  /* Is there more work to do and more time to do it in? */
  moreTime = (double)(ClockNow() - start)
    < ArenaPauseTime(arena) * (double)ClocksPerSec();
  if (moreWork && moreTime)
    return TRUE;

  /* We're not going to do more work now, so calculate when to come back. */

  globals = ArenaGlobals(arena);

  if (moreWork) {
    /* We did one quantum of work; consume one unit of 'time'. */
    nextPollThreshold = globals->pollThreshold + ArenaPollALLOCTIME;
  } else {
    /* No more work to do.  Sleep until NOW + a bit. */
    nextPollThreshold = globals->fillMutatorSize + ArenaPollALLOCTIME;
  }

  /* Advance pollThreshold; check: enough precision? */
  AVER(nextPollThreshold > globals->pollThreshold);
  globals->pollThreshold = nextPollThreshold;

  return FALSE;
}


/* C. COPYRIGHT AND LICENSE
 *
 * Copyright (C) 2001-2020 Ravenbrook Limited <https://www.ravenbrook.com/>.
 *
 * Redistribution and use in source and binary forms, with or without
 * modification, are permitted provided that the following conditions are
 * met:
 *
 * 1. Redistributions of source code must retain the above copyright
 *    notice, this list of conditions and the following disclaimer.
 *
 * 2. Redistributions in binary form must reproduce the above copyright
 *    notice, this list of conditions and the following disclaimer in the
 *    documentation and/or other materials provided with the
 *    distribution.
 *
 * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS "AS
 * IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT LIMITED
 * TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A
 * PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
 * HOLDER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
 * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
 * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
 * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
 * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
 * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
 * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
 */