summaryrefslogtreecommitdiff
path: root/chromium/third_party/blink/renderer/platform/heap/impl/heap_stats_collector.h
blob: c5b6128f611f0173a38e70dad0ad1c0486943e3e (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
// Copyright 2018 The Chromium Authors. All rights reserved.
// Use of this source code is governed by a BSD-style license that can be
// found in the LICENSE file.

#ifndef THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_HEAP_STATS_COLLECTOR_H_
#define THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_HEAP_STATS_COLLECTOR_H_

#include <stddef.h>

#include "base/atomicops.h"
#include "third_party/blink/renderer/platform/heap/blink_gc.h"
#include "third_party/blink/renderer/platform/instrumentation/tracing/trace_event.h"
#include "third_party/blink/renderer/platform/platform_export.h"
#include "third_party/blink/renderer/platform/wtf/allocator/allocator.h"

namespace blink {

// Interface for observing changes to heap sizing.
class PLATFORM_EXPORT ThreadHeapStatsObserver {
 public:
  // Called upon allocating/releasing chunks of memory that contain objects.
  //
  // Must not trigger GC or allocate.
  virtual void IncreaseAllocatedSpace(size_t) = 0;
  virtual void DecreaseAllocatedSpace(size_t) = 0;

  // Called once per GC cycle with the accurate number of live |bytes|.
  //
  // Must not trigger GC or allocate.
  virtual void ResetAllocatedObjectSize(size_t bytes) = 0;

  // Called after observing at least
  // |ThreadHeapStatsCollector::kUpdateThreshold| changed bytes through
  // allocation or explicit free. Reports both, negative and positive
  // increments, to allow observer to decide whether absolute values or only the
  // deltas is interesting.
  //
  // May trigger GC but most not allocate.
  virtual void IncreaseAllocatedObjectSize(size_t) = 0;
  virtual void DecreaseAllocatedObjectSize(size_t) = 0;
};

#define FOR_ALL_SCOPES(V)                    \
  V(AtomicPauseCompaction)                   \
  V(AtomicPauseMarkEpilogue)                 \
  V(AtomicPauseMarkPrologue)                 \
  V(AtomicPauseMarkRoots)                    \
  V(AtomicPauseMarkTransitiveClosure)        \
  V(AtomicPauseSweepAndCompact)              \
  V(CompleteSweep)                           \
  V(IncrementalMarkingFinalize)              \
  V(IncrementalMarkingStartMarking)          \
  V(IncrementalMarkingStep)                  \
  V(IncrementalMarkingWithDeadline)          \
  V(InvokePreFinalizers)                     \
  V(LazySweepInIdle)                         \
  V(LazySweepOnAllocation)                   \
  V(MarkBailOutObjects)                      \
  V(MarkInvokeEphemeronCallbacks)            \
  V(MarkFlushV8References)                   \
  V(MarkFlushEphemeronPairs)                 \
  V(MarkProcessWorklists)                    \
  V(MarkProcessMarkingWorklist)              \
  V(MarkProcessWriteBarrierWorklist)         \
  V(MarkProcessNotFullyconstructeddWorklist) \
  V(MarkNotFullyConstructedObjects)          \
  V(MarkWeakProcessing)                      \
  V(UnifiedMarkingStep)                      \
  V(VisitCrossThreadPersistents)             \
  V(VisitPersistentRoots)                    \
  V(VisitPersistents)                        \
  V(VisitRoots)                              \
  V(VisitStackRoots)                         \
  V(VisitRememberedSets)

#define FOR_ALL_CONCURRENT_SCOPES(V)        \
  V(ConcurrentMarkInvokeEphemeronCallbacks) \
  V(ConcurrentMarkingStep)                  \
  V(ConcurrentSweepingStep)

// Manages counters and statistics across garbage collection cycles.
//
// Usage:
//   ThreadHeapStatsCollector stats_collector;
//   stats_collector.NotifyMarkingStarted(<BlinkGC::CollectionType>,
//                                        <BlinkGC::GCReason>);
//   // Use tracer.
//   stats_collector.NotifySweepingCompleted();
//   // Previous event is available using stats_collector.previous().
class PLATFORM_EXPORT ThreadHeapStatsCollector {
  USING_FAST_MALLOC(ThreadHeapStatsCollector);

 public:
  // These ids will form human readable names when used in Scopes.
  enum Id {
#define DECLARE_ENUM(name) k##name,
    FOR_ALL_SCOPES(DECLARE_ENUM)
#undef DECLARE_ENUM
        kNumScopeIds,
  };

  enum ConcurrentId {
#define DECLARE_ENUM(name) k##name,
    FOR_ALL_CONCURRENT_SCOPES(DECLARE_ENUM)
#undef DECLARE_ENUM
        kNumConcurrentScopeIds
  };

  constexpr static const char* ToString(Id id, BlinkGC::CollectionType type) {
    switch (id) {
#define CASE(name)                                                    \
  case k##name:                                                       \
    return type == BlinkGC::CollectionType::kMajor ? "BlinkGC." #name \
                                                   : "BlinkGC." #name \
                                                     ".Minor";
      FOR_ALL_SCOPES(CASE)
#undef CASE
      default:
        NOTREACHED();
    }
    return nullptr;
  }

  constexpr static const char* ToString(ConcurrentId id,
                                        BlinkGC::CollectionType type) {
    switch (id) {
#define CASE(name)                                                    \
  case k##name:                                                       \
    return type == BlinkGC::CollectionType::kMajor ? "BlinkGC." #name \
                                                   : "BlinkGC." #name \
                                                     ".Minor";
      FOR_ALL_CONCURRENT_SCOPES(CASE)
#undef CASE
      default:
        NOTREACHED();
    }
    return nullptr;
  }

  enum TraceCategory { kEnabled, kDisabled };
  enum ScopeContext { kMutatorThread, kConcurrentThread };

  // Trace a particular scope. Will emit a trace event and record the time in
  // the corresponding ThreadHeapStatsCollector.
  template <TraceCategory trace_category = kDisabled,
            ScopeContext scope_category = kMutatorThread>
  class PLATFORM_EXPORT InternalScope {
    DISALLOW_NEW();

    using IdType =
        std::conditional_t<scope_category == kMutatorThread, Id, ConcurrentId>;

   public:
    template <typename... Args>
    InternalScope(ThreadHeapStatsCollector* tracer, IdType id, Args... args)
        : tracer_(tracer), start_time_(base::TimeTicks::Now()), id_(id) {
      StartTrace(args...);
    }
    InternalScope(const InternalScope&) = delete;
    InternalScope& operator=(const InternalScope&) = delete;

    ~InternalScope() {
      StopTrace();
      IncreaseScopeTime(id_);
    }

   private:
    inline constexpr static const char* TraceCategory();

    inline void StartTrace();
    template <typename Value1>
    inline void StartTrace(const char* k1, Value1 v1);
    template <typename Value1, typename Value2>
    inline void StartTrace(const char* k1,
                           Value1 v1,
                           const char* k2,
                           Value2 v2);
    inline void StopTrace();

    inline void IncreaseScopeTime(Id);
    inline void IncreaseScopeTime(ConcurrentId);

    ThreadHeapStatsCollector* const tracer_;
    const base::TimeTicks start_time_;
    const IdType id_;
  };

  using Scope = InternalScope<kDisabled>;
  using EnabledScope = InternalScope<kEnabled>;
  using ConcurrentScope = InternalScope<kDisabled, kConcurrentThread>;
  using EnabledConcurrentScope = InternalScope<kEnabled, kConcurrentThread>;

  // BlinkGCInV8Scope keeps track of time spent in Blink's GC when called by V8.
  // This is necessary to avoid double-accounting of Blink's time when computing
  // the overall time (V8 + Blink) spent in GC on the main thread.
  class PLATFORM_EXPORT BlinkGCInV8Scope {
    DISALLOW_NEW();

   public:
    template <typename... Args>
    BlinkGCInV8Scope(ThreadHeapStatsCollector* tracer)
        : tracer_(tracer), start_time_(base::TimeTicks::Now()) {}
    BlinkGCInV8Scope(const BlinkGCInV8Scope&) = delete;
    BlinkGCInV8Scope& operator=(const BlinkGCInV8Scope&) = delete;

    ~BlinkGCInV8Scope() {
      if (tracer_)
        tracer_->gc_nested_in_v8_ += base::TimeTicks::Now() - start_time_;
    }

   private:
    ThreadHeapStatsCollector* const tracer_;
    const base::TimeTicks start_time_;
  };

  // POD to hold interesting data accumulated during a garbage collection cycle.
  // The event is always fully populated when looking at previous events but
  // is only be partially populated when looking at the current event. See
  // members on when they are available.
  //
  // Note that all getters include time for stand-alone as well as unified heap
  // GCs. E.g., |atomic_marking_time()| report the marking time of the atomic
  // phase, independent of whether the GC was a stand-alone or unified heap GC.
  struct PLATFORM_EXPORT Event {
    Event();

    // Overall time spent in the GC cycle. This includes marking time as well as
    // sweeping time.
    base::TimeDelta gc_cycle_time() const;

    // Time spent in the final atomic pause of a GC cycle.
    base::TimeDelta atomic_pause_time() const;

    // Time spent in the final atomic pause for marking the heap.
    base::TimeDelta atomic_marking_time() const;

    // Time spent in the final atomic pause in sweeping and compacting the heap.
    base::TimeDelta atomic_sweep_and_compact_time() const;

    // Time spent marking the roots.
    base::TimeDelta roots_marking_time() const;

    // Time spent incrementally marking the heap.
    base::TimeDelta incremental_marking_time() const;

    // Time spent processing worklist in the foreground thread.
    base::TimeDelta worklist_processing_time_foreground() const;

    // Time spent flushing v8 references (this is done only in the foreground)
    base::TimeDelta flushing_v8_references_time() const;

    // Time spent in foreground tasks marking the heap.
    base::TimeDelta foreground_marking_time() const;

    // Time spent in background tasks marking the heap.
    base::TimeDelta background_marking_time() const;

    // Overall time spent marking the heap.
    base::TimeDelta marking_time() const;

    // Time spent in foreground tasks sweeping the heap.
    base::TimeDelta foreground_sweeping_time() const;

    // Time spent in background tasks sweeping the heap.
    base::TimeDelta background_sweeping_time() const;

    // Overall time spent sweeping the heap.
    base::TimeDelta sweeping_time() const;

    // Marked bytes collected during sweeping.
    size_t unique_id = -1;
    size_t marked_bytes = 0;
    size_t compaction_freed_bytes = 0;
    size_t compaction_freed_pages = 0;
    bool compaction_recorded_events = false;
    base::TimeDelta scope_data[kNumScopeIds];
    base::subtle::Atomic32 concurrent_scope_data[kNumConcurrentScopeIds]{0};
    BlinkGC::GCReason reason = static_cast<BlinkGC::GCReason>(0);
    BlinkGC::CollectionType collection_type = BlinkGC::CollectionType::kMajor;
    size_t object_size_in_bytes_before_sweeping = 0;
    size_t allocated_space_in_bytes_before_sweeping = 0;
    size_t partition_alloc_bytes_before_sweeping = 0;
    double live_object_rate = 0;
    base::TimeDelta gc_nested_in_v8;
    bool is_forced_gc = true;
  };

  // Indicates a new garbage collection cycle.
  void NotifyMarkingStarted(BlinkGC::CollectionType,
                            BlinkGC::GCReason,
                            bool is_forced_gc);

  // Indicates that marking of the current garbage collection cycle is
  // completed.
  void NotifyMarkingCompleted(size_t marked_bytes);

  // Indicates the end of a garbage collection cycle. This means that sweeping
  // is finished at this point.
  void NotifySweepingCompleted();

  void IncreaseScopeTime(Id id, base::TimeDelta time) {
    DCHECK(is_started_);
    current_.scope_data[id] += time;
  }

  void IncreaseConcurrentScopeTime(ConcurrentId id, base::TimeDelta time) {
    using Atomic32 = base::subtle::Atomic32;
    DCHECK(is_started_);
    const int64_t ms = time.InMicroseconds();
    DCHECK(ms <= std::numeric_limits<Atomic32>::max());
    base::subtle::NoBarrier_AtomicIncrement(&current_.concurrent_scope_data[id],
                                            static_cast<Atomic32>(ms));
  }

  void UpdateReason(BlinkGC::GCReason);
  void IncreaseCompactionFreedSize(size_t);
  void IncreaseCompactionFreedPages(size_t);
  void IncreaseAllocatedObjectSize(size_t);
  void DecreaseAllocatedObjectSize(size_t);
  void IncreaseAllocatedSpace(size_t);
  void DecreaseAllocatedSpace(size_t);
  void IncreaseWrapperCount(size_t);
  void DecreaseWrapperCount(size_t);
  void IncreaseCollectedWrapperCount(size_t);

  // Called by the GC when it hits a point where allocated memory may be
  // reported and garbage collection is possible. This is necessary, as
  // increments and decrements are reported as close to their actual
  // allocation/reclamation as possible.
  void AllocatedObjectSizeSafepoint();

  // Size of objects on the heap. Based on marked bytes in the previous cycle
  // and newly allocated bytes since the previous cycle.
  size_t object_size_in_bytes() const;

  size_t marked_bytes() const;
  base::TimeDelta marking_time_so_far() const;

  base::TimeDelta worklist_processing_time_foreground() const;

  base::TimeDelta flushing_v8_references_time() const;

  int64_t allocated_bytes_since_prev_gc() const;

  size_t allocated_space_bytes() const;

  size_t wrapper_count() const;
  size_t collected_wrapper_count() const;

  bool is_started() const { return is_started_; }

  // Statistics for the previously running garbage collection.
  const Event& previous() const { return previous_; }

  void RegisterObserver(ThreadHeapStatsObserver* observer);
  void UnregisterObserver(ThreadHeapStatsObserver* observer);

  void IncreaseAllocatedObjectSizeForTesting(size_t);
  void DecreaseAllocatedObjectSizeForTesting(size_t);

 private:
  // Observers are implemented using virtual calls. Avoid notifications below
  // reasonably interesting sizes.
  static constexpr int64_t kUpdateThreshold = 1024;

  // Invokes |callback| for all registered observers.
  template <typename Callback>
  void ForAllObservers(Callback callback);

  void AllocatedObjectSizeSafepointImpl();

  // Statistics for the currently running garbage collection. Note that the
  // Event may not be fully populated yet as some phase may not have been run.
  const Event& current() const { return current_; }

  Event current_;
  Event previous_;

  // Allocated bytes since the last garbage collection. These bytes are reset
  // after marking as they are accounted in marked_bytes then.
  int64_t allocated_bytes_since_prev_gc_ = 0;
  int64_t pos_delta_allocated_bytes_since_prev_gc_ = 0;
  int64_t neg_delta_allocated_bytes_since_prev_gc_ = 0;

  // Allocated space in bytes for all arenas.
  size_t allocated_space_bytes_ = 0;

  bool is_started_ = false;
  bool is_sweeping_ = false;

  // base::TimeDelta for RawScope. These don't need to be nested within a
  // garbage collection cycle to make them easier to use.
  base::TimeDelta gc_nested_in_v8_;

  Vector<ThreadHeapStatsObserver*> observers_;

  FRIEND_TEST_ALL_PREFIXES(ThreadHeapStatsCollectorTest, InitialEmpty);
  FRIEND_TEST_ALL_PREFIXES(ThreadHeapStatsCollectorTest, IncreaseScopeTime);
  FRIEND_TEST_ALL_PREFIXES(ThreadHeapStatsCollectorTest, StopResetsCurrent);
};

template <ThreadHeapStatsCollector::TraceCategory trace_category,
          ThreadHeapStatsCollector::ScopeContext scope_category>
constexpr const char*
ThreadHeapStatsCollector::InternalScope<trace_category,
                                        scope_category>::TraceCategory() {
  switch (trace_category) {
    case kEnabled:
      return "blink_gc,devtools.timeline";
    case kDisabled:
      return TRACE_DISABLED_BY_DEFAULT("blink_gc");
  }
}

template <ThreadHeapStatsCollector::TraceCategory trace_category,
          ThreadHeapStatsCollector::ScopeContext scope_category>
void ThreadHeapStatsCollector::InternalScope<trace_category,
                                             scope_category>::StartTrace() {
  TRACE_EVENT_BEGIN0(TraceCategory(),
                     ToString(id_, tracer_->current_.collection_type));
}

template <ThreadHeapStatsCollector::TraceCategory trace_category,
          ThreadHeapStatsCollector::ScopeContext scope_category>
template <typename Value1>
void ThreadHeapStatsCollector::InternalScope<trace_category, scope_category>::
    StartTrace(const char* k1, Value1 v1) {
  TRACE_EVENT_BEGIN1(TraceCategory(),
                     ToString(id_, tracer_->current_.collection_type), k1, v1);
}

template <ThreadHeapStatsCollector::TraceCategory trace_category,
          ThreadHeapStatsCollector::ScopeContext scope_category>
template <typename Value1, typename Value2>
void ThreadHeapStatsCollector::InternalScope<trace_category, scope_category>::
    StartTrace(const char* k1, Value1 v1, const char* k2, Value2 v2) {
  TRACE_EVENT_BEGIN2(TraceCategory(),
                     ToString(id_, tracer_->current_.collection_type), k1, v1,
                     k2, v2);
}

template <ThreadHeapStatsCollector::TraceCategory trace_category,
          ThreadHeapStatsCollector::ScopeContext scope_category>
void ThreadHeapStatsCollector::InternalScope<trace_category,
                                             scope_category>::StopTrace() {
  TRACE_EVENT_END2(TraceCategory(),
                   ToString(id_, tracer_->current_.collection_type), "epoch",
                   tracer_->current_.unique_id, "forced",
                   tracer_->current_.is_forced_gc);
}

template <ThreadHeapStatsCollector::TraceCategory trace_category,
          ThreadHeapStatsCollector::ScopeContext scope_category>
void ThreadHeapStatsCollector::InternalScope<trace_category, scope_category>::
    IncreaseScopeTime(Id) {
  tracer_->IncreaseScopeTime(id_, base::TimeTicks::Now() - start_time_);
}

template <ThreadHeapStatsCollector::TraceCategory trace_category,
          ThreadHeapStatsCollector::ScopeContext scope_category>
void ThreadHeapStatsCollector::InternalScope<trace_category, scope_category>::
    IncreaseScopeTime(ConcurrentId) {
  tracer_->IncreaseConcurrentScopeTime(id_,
                                       base::TimeTicks::Now() - start_time_);
}

#undef FOR_ALL_SCOPES
#undef FOR_ALL_CONCURRENT_SCOPES

}  // namespace blink

#endif  // THIRD_PARTY_BLINK_RENDERER_PLATFORM_HEAP_IMPL_HEAP_STATS_COLLECTOR_H_