summaryrefslogtreecommitdiff
path: root/src/heap-profiler.cc
blob: 1da544ddc0dd1f18ea5a3caf7ff1ab81e34df1c9 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
// -*- Mode: C++; c-basic-offset: 2; indent-tabs-mode: nil -*-
// Copyright (c) 2005, Google Inc.
// All rights reserved.
// 
// Redistribution and use in source and binary forms, with or without
// modification, are permitted provided that the following conditions are
// met:
// 
//     * Redistributions of source code must retain the above copyright
// notice, this list of conditions and the following disclaimer.
//     * Redistributions in binary form must reproduce the above
// copyright notice, this list of conditions and the following disclaimer
// in the documentation and/or other materials provided with the
// distribution.
//     * Neither the name of Google Inc. nor the names of its
// contributors may be used to endorse or promote products derived from
// this software without specific prior written permission.
// 
// THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
// "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
// LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
// A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
// OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
// SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
// LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
// DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
// THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
// (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
// OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.

// ---
// Author: Sanjay Ghemawat
//
// TODO: Log large allocations

#include <config.h>
#include <stddef.h>
#include <stdio.h>
#include <stdlib.h>
#ifdef HAVE_UNISTD_H
#include <unistd.h>
#endif
#ifdef HAVE_INTTYPES_H
#include <inttypes.h>
#endif
#ifdef HAVE_FCNTL_H
#include <fcntl.h>    // for open()
#endif
#ifdef HAVE_MMAP
#include <sys/mman.h>
#endif
#include <errno.h>
#include <assert.h>
#include <sys/types.h>
#include <signal.h>

#include <algorithm>
#include <string>

#include <gperftools/heap-profiler.h>

#include "base/logging.h"
#include "base/basictypes.h"   // for PRId64, among other things
#include "base/googleinit.h"
#include "base/commandlineflags.h"
#include "malloc_hook-inl.h"
#include "tcmalloc_guard.h"
#include <gperftools/malloc_hook.h>
#include <gperftools/malloc_extension.h>
#include "base/spinlock.h"
#include "base/low_level_alloc.h"
#include "base/sysinfo.h"      // for GetUniquePathFromEnv()
#include "heap-profile-table.h"
#include "memory_region_map.h"


#ifndef	PATH_MAX
#ifdef MAXPATHLEN
#define	PATH_MAX	MAXPATHLEN
#else
#define	PATH_MAX	4096         // seems conservative for max filename len!
#endif
#endif

using STL_NAMESPACE::string;
using STL_NAMESPACE::sort;

//----------------------------------------------------------------------
// Flags that control heap-profiling
//
// The thread-safety of the profiler depends on these being immutable
// after main starts, so don't change them.
//----------------------------------------------------------------------

DEFINE_int64(heap_profile_allocation_interval,
             EnvToInt64("HEAP_PROFILE_ALLOCATION_INTERVAL", 1 << 30 /*1GB*/),
             "If non-zero, dump heap profiling information once every "
             "specified number of bytes allocated by the program since "
             "the last dump.");
DEFINE_int64(heap_profile_deallocation_interval,
             EnvToInt64("HEAP_PROFILE_DEALLOCATION_INTERVAL", 0),
             "If non-zero, dump heap profiling information once every "
             "specified number of bytes deallocated by the program "
             "since the last dump.");
// We could also add flags that report whenever inuse_bytes changes by
// X or -X, but there hasn't been a need for that yet, so we haven't.
DEFINE_int64(heap_profile_inuse_interval,
             EnvToInt64("HEAP_PROFILE_INUSE_INTERVAL", 100 << 20 /*100MB*/),
             "If non-zero, dump heap profiling information whenever "
             "the high-water memory usage mark increases by the specified "
             "number of bytes.");
DEFINE_int64(heap_profile_time_interval,
             EnvToInt64("HEAP_PROFILE_TIME_INTERVAL", 0),
             "If non-zero, dump heap profiling information once every "
             "specified number of seconds since the last dump.");
DEFINE_bool(mmap_log,
            EnvToBool("HEAP_PROFILE_MMAP_LOG", false),
            "Should mmap/munmap calls be logged?");
DEFINE_bool(mmap_profile,
            EnvToBool("HEAP_PROFILE_MMAP", false),
            "If heap-profiling is on, also profile mmap, mremap, and sbrk)");
DEFINE_bool(only_mmap_profile,
            EnvToBool("HEAP_PROFILE_ONLY_MMAP", false),
            "If heap-profiling is on, only profile mmap, mremap, and sbrk; "
            "do not profile malloc/new/etc");


//----------------------------------------------------------------------
// Locking
//----------------------------------------------------------------------

// A pthread_mutex has way too much lock contention to be used here.
//
// I would like to use Mutex, but it can call malloc(),
// which can cause us to fall into an infinite recursion.
//
// So we use a simple spinlock.
static SpinLock heap_lock(SpinLock::LINKER_INITIALIZED);

//----------------------------------------------------------------------
// Simple allocator for heap profiler's internal memory
//----------------------------------------------------------------------

static LowLevelAlloc::Arena *heap_profiler_memory;

static void* ProfilerMalloc(size_t bytes) {
  return LowLevelAlloc::AllocWithArena(bytes, heap_profiler_memory);
}
static void ProfilerFree(void* p) {
  LowLevelAlloc::Free(p);
}

// We use buffers of this size in DoGetHeapProfile.
static const int kProfileBufferSize = 1 << 20;

// This is a last-ditch buffer we use in DumpProfileLocked in case we
// can't allocate more memory from ProfilerMalloc.  We expect this
// will be used by HeapProfileEndWriter when the application has to
// exit due to out-of-memory.  This buffer is allocated in
// HeapProfilerStart.  Access to this must be protected by heap_lock.
static char* global_profiler_buffer = NULL;


//----------------------------------------------------------------------
// Profiling control/state data
//----------------------------------------------------------------------

// Access to all of these is protected by heap_lock.
static bool  is_on = false;           // If are on as a subsytem.
static bool  dumping = false;         // Dumping status to prevent recursion
static char* filename_prefix = NULL;  // Prefix used for profile file names
                                      // (NULL if no need for dumping yet)
static int   dump_count = 0;          // How many dumps so far
static int64 last_dump_alloc = 0;     // alloc_size when did we last dump
static int64 last_dump_free = 0;      // free_size when did we last dump
static int64 high_water_mark = 0;     // In-use-bytes at last high-water dump
static int64 last_dump_time = 0;      // The time of the last dump

static HeapProfileTable* heap_profile = NULL;  // the heap profile table

//----------------------------------------------------------------------
// Profile generation
//----------------------------------------------------------------------

// Input must be a buffer of size at least 1MB.
static char* DoGetHeapProfileLocked(char* buf, int buflen) {
  // We used to be smarter about estimating the required memory and
  // then capping it to 1MB and generating the profile into that.
  if (buf == NULL || buflen < 1)
    return NULL;

  RAW_DCHECK(heap_lock.IsHeld(), "");
  int bytes_written = 0;
  if (is_on) {
    HeapProfileTable::Stats const stats = heap_profile->total();
    (void)stats;   // avoid an unused-variable warning in non-debug mode.
    bytes_written = heap_profile->FillOrderedProfile(buf, buflen - 1);
    // FillOrderedProfile should not reduce the set of active mmap-ed regions,
    // hence MemoryRegionMap will let us remove everything we've added above:
    RAW_DCHECK(stats.Equivalent(heap_profile->total()), "");
    // if this fails, we somehow removed by FillOrderedProfile
    // more than we have added.
  }
  buf[bytes_written] = '\0';
  RAW_DCHECK(bytes_written == strlen(buf), "");

  return buf;
}

extern "C" char* GetHeapProfile() {
  // Use normal malloc: we return the profile to the user to free it:
  char* buffer = reinterpret_cast<char*>(malloc(kProfileBufferSize));
  SpinLockHolder l(&heap_lock);
  return DoGetHeapProfileLocked(buffer, kProfileBufferSize);
}

// defined below
static void NewHook(const void* ptr, size_t size);
static void DeleteHook(const void* ptr);

// Helper for HeapProfilerDump.
static void DumpProfileLocked(const char* reason) {
  RAW_DCHECK(heap_lock.IsHeld(), "");
  RAW_DCHECK(is_on, "");
  RAW_DCHECK(!dumping, "");

  if (filename_prefix == NULL) return;  // we do not yet need dumping

  dumping = true;

  // Make file name
  char file_name[1000];
  dump_count++;
  snprintf(file_name, sizeof(file_name), "%s.%04d%s",
           filename_prefix, dump_count, HeapProfileTable::kFileExt);

  // Dump the profile
  RAW_VLOG(0, "Dumping heap profile to %s (%s)", file_name, reason);
  // We must use file routines that don't access memory, since we hold
  // a memory lock now.
  RawFD fd = RawOpenForWriting(file_name);
  if (fd == kIllegalRawFD) {
    RAW_LOG(ERROR, "Failed dumping heap profile to %s", file_name);
    dumping = false;
    return;
  }

  // This case may be impossible, but it's best to be safe.
  // It's safe to use the global buffer: we're protected by heap_lock.
  if (global_profiler_buffer == NULL) {
    global_profiler_buffer =
        reinterpret_cast<char*>(ProfilerMalloc(kProfileBufferSize));
  }

  char* profile = DoGetHeapProfileLocked(global_profiler_buffer,
                                         kProfileBufferSize);
  RawWrite(fd, profile, strlen(profile));
  RawClose(fd);

  dumping = false;
}

//----------------------------------------------------------------------
// Profile collection
//----------------------------------------------------------------------

// Dump a profile after either an allocation or deallocation, if
// the memory use has changed enough since the last dump.
static void MaybeDumpProfileLocked() {
  if (!dumping) {
    const HeapProfileTable::Stats& total = heap_profile->total();
    const int64 inuse_bytes = total.alloc_size - total.free_size;
    bool need_to_dump = false;
    char buf[128];
    int64 current_time = time(NULL);
    if (FLAGS_heap_profile_allocation_interval > 0 &&
        total.alloc_size >=
        last_dump_alloc + FLAGS_heap_profile_allocation_interval) {
      snprintf(buf, sizeof(buf), ("%" PRId64 " MB allocated cumulatively, "
                                  "%" PRId64 " MB currently in use"),
               total.alloc_size >> 20, inuse_bytes >> 20);
      need_to_dump = true;
    } else if (FLAGS_heap_profile_deallocation_interval > 0 &&
               total.free_size >=
               last_dump_free + FLAGS_heap_profile_deallocation_interval) {
      snprintf(buf, sizeof(buf), ("%" PRId64 " MB freed cumulatively, "
                                  "%" PRId64 " MB currently in use"),
               total.free_size >> 20, inuse_bytes >> 20);
      need_to_dump = true;
    } else if (FLAGS_heap_profile_inuse_interval > 0 &&
               inuse_bytes >
               high_water_mark + FLAGS_heap_profile_inuse_interval) {
      snprintf(buf, sizeof(buf), "%" PRId64 " MB currently in use",
               inuse_bytes >> 20);
      need_to_dump = true;
    } else if (FLAGS_heap_profile_time_interval > 0 &&
               current_time - last_dump_time >=
               FLAGS_heap_profile_time_interval) {
      snprintf(buf, sizeof(buf), "%" PRId64 " sec since the last dump",
               current_time - last_dump_time);
      need_to_dump = true;
      last_dump_time = current_time;
    }
    if (need_to_dump) {
      DumpProfileLocked(buf);

      last_dump_alloc = total.alloc_size;
      last_dump_free = total.free_size;
      if (inuse_bytes > high_water_mark)
        high_water_mark = inuse_bytes;
    }
  }
}

// Record an allocation in the profile.
static void RecordAlloc(const void* ptr, size_t bytes, int skip_count) {
  // Take the stack trace outside the critical section.
  void* stack[HeapProfileTable::kMaxStackDepth];
  int depth = HeapProfileTable::GetCallerStackTrace(skip_count + 1, stack);
  SpinLockHolder l(&heap_lock);
  if (is_on) {
    heap_profile->RecordAlloc(ptr, bytes, depth, stack);
    MaybeDumpProfileLocked();
  }
}

// Record a deallocation in the profile.
static void RecordFree(const void* ptr) {
  SpinLockHolder l(&heap_lock);
  if (is_on) {
    heap_profile->RecordFree(ptr);
    MaybeDumpProfileLocked();
  }
}

//----------------------------------------------------------------------
// Allocation/deallocation hooks for MallocHook
//----------------------------------------------------------------------

// static
void NewHook(const void* ptr, size_t size) {
  if (ptr != NULL) RecordAlloc(ptr, size, 0);
}

// static
void DeleteHook(const void* ptr) {
  if (ptr != NULL) RecordFree(ptr);
}

// TODO(jandrews): Re-enable stack tracing
#ifdef TODO_REENABLE_STACK_TRACING
static void RawInfoStackDumper(const char* message, void*) {
  RAW_LOG(INFO, "%.*s", static_cast<int>(strlen(message) - 1), message);
  // -1 is to chop the \n which will be added by RAW_LOG
}
#endif

static void MmapHook(const void* result, const void* start, size_t size,
                     int prot, int flags, int fd, off_t offset) {
  if (FLAGS_mmap_log) {  // log it
    // We use PRIxS not just '%p' to avoid deadlocks
    // in pretty-printing of NULL as "nil".
    // TODO(maxim): instead should use a safe snprintf reimplementation
    RAW_LOG(INFO,
            "mmap(start=0x%" PRIxPTR ", len=%" PRIuS ", prot=0x%x, flags=0x%x, "
            "fd=%d, offset=0x%x) = 0x%" PRIxPTR "",
            (uintptr_t) start, size, prot, flags, fd, (unsigned int) offset,
            (uintptr_t) result);
#ifdef TODO_REENABLE_STACK_TRACING
    DumpStackTrace(1, RawInfoStackDumper, NULL);
#endif
  }
}

static void MremapHook(const void* result, const void* old_addr,
                       size_t old_size, size_t new_size,
                       int flags, const void* new_addr) {
  if (FLAGS_mmap_log) {  // log it
    // We use PRIxS not just '%p' to avoid deadlocks
    // in pretty-printing of NULL as "nil".
    // TODO(maxim): instead should use a safe snprintf reimplementation
    RAW_LOG(INFO,
            "mremap(old_addr=0x%" PRIxPTR ", old_size=%" PRIuS ", "
            "new_size=%" PRIuS ", flags=0x%x, new_addr=0x%" PRIxPTR ") = "
            "0x%" PRIxPTR "",
            (uintptr_t) old_addr, old_size, new_size, flags,
            (uintptr_t) new_addr, (uintptr_t) result);
#ifdef TODO_REENABLE_STACK_TRACING
    DumpStackTrace(1, RawInfoStackDumper, NULL);
#endif
  }
}

static void MunmapHook(const void* ptr, size_t size) {
  if (FLAGS_mmap_log) {  // log it
    // We use PRIxS not just '%p' to avoid deadlocks
    // in pretty-printing of NULL as "nil".
    // TODO(maxim): instead should use a safe snprintf reimplementation
    RAW_LOG(INFO, "munmap(start=0x%" PRIxPTR ", len=%" PRIuS ")",
                  (uintptr_t) ptr, size);
#ifdef TODO_REENABLE_STACK_TRACING
    DumpStackTrace(1, RawInfoStackDumper, NULL);
#endif
  }
}

static void SbrkHook(const void* result, ptrdiff_t increment) {
  if (FLAGS_mmap_log) {  // log it
    RAW_LOG(INFO, "sbrk(inc=%" PRIdS ") = 0x%" PRIxPTR "",
                  increment, (uintptr_t) result);
#ifdef TODO_REENABLE_STACK_TRACING
    DumpStackTrace(1, RawInfoStackDumper, NULL);
#endif
  }
}

//----------------------------------------------------------------------
// Starting/stopping/dumping
//----------------------------------------------------------------------

extern "C" void HeapProfilerStart(const char* prefix) {
  SpinLockHolder l(&heap_lock);

  if (is_on) return;

  is_on = true;

  RAW_VLOG(0, "Starting tracking the heap");

  // This should be done before the hooks are set up, since it should
  // call new, and we want that to be accounted for correctly.
  MallocExtension::Initialize();

  if (FLAGS_only_mmap_profile) {
    FLAGS_mmap_profile = true;
  }

  if (FLAGS_mmap_profile) {
    // Ask MemoryRegionMap to record all mmap, mremap, and sbrk
    // call stack traces of at least size kMaxStackDepth:
    MemoryRegionMap::Init(HeapProfileTable::kMaxStackDepth,
                          /* use_buckets */ true);
  }

  if (FLAGS_mmap_log) {
    // Install our hooks to do the logging:
    RAW_CHECK(MallocHook::AddMmapHook(&MmapHook), "");
    RAW_CHECK(MallocHook::AddMremapHook(&MremapHook), "");
    RAW_CHECK(MallocHook::AddMunmapHook(&MunmapHook), "");
    RAW_CHECK(MallocHook::AddSbrkHook(&SbrkHook), "");
  }

  heap_profiler_memory =
    LowLevelAlloc::NewArena(0, LowLevelAlloc::DefaultArena());

  // Reserve space now for the heap profiler, so we can still write a
  // heap profile even if the application runs out of memory.
  global_profiler_buffer =
      reinterpret_cast<char*>(ProfilerMalloc(kProfileBufferSize));

  heap_profile = new(ProfilerMalloc(sizeof(HeapProfileTable)))
      HeapProfileTable(ProfilerMalloc, ProfilerFree, FLAGS_mmap_profile);

  last_dump_alloc = 0;
  last_dump_free = 0;
  high_water_mark = 0;
  last_dump_time = 0;

  // We do not reset dump_count so if the user does a sequence of
  // HeapProfilerStart/HeapProfileStop, we will get a continuous
  // sequence of profiles.

  if (FLAGS_only_mmap_profile == false) {
    // Now set the hooks that capture new/delete and malloc/free.
    RAW_CHECK(MallocHook::AddNewHook(&NewHook), "");
    RAW_CHECK(MallocHook::AddDeleteHook(&DeleteHook), "");
  }

  // Copy filename prefix
  RAW_DCHECK(filename_prefix == NULL, "");
  const int prefix_length = strlen(prefix);
  filename_prefix = reinterpret_cast<char*>(ProfilerMalloc(prefix_length + 1));
  memcpy(filename_prefix, prefix, prefix_length);
  filename_prefix[prefix_length] = '\0';
}

extern "C" int IsHeapProfilerRunning() {
  SpinLockHolder l(&heap_lock);
  return is_on ? 1 : 0;   // return an int, because C code doesn't have bool
}

extern "C" void HeapProfilerStop() {
  SpinLockHolder l(&heap_lock);

  if (!is_on) return;

  if (FLAGS_only_mmap_profile == false) {
    // Unset our new/delete hooks, checking they were set:
    RAW_CHECK(MallocHook::RemoveNewHook(&NewHook), "");
    RAW_CHECK(MallocHook::RemoveDeleteHook(&DeleteHook), "");
  }
  if (FLAGS_mmap_log) {
    // Restore mmap/sbrk hooks, checking that our hooks were set:
    RAW_CHECK(MallocHook::RemoveMmapHook(&MmapHook), "");
    RAW_CHECK(MallocHook::RemoveMremapHook(&MremapHook), "");
    RAW_CHECK(MallocHook::RemoveSbrkHook(&SbrkHook), "");
    RAW_CHECK(MallocHook::RemoveMunmapHook(&MunmapHook), "");
  }

  // free profile
  heap_profile->~HeapProfileTable();
  ProfilerFree(heap_profile);
  heap_profile = NULL;

  // free output-buffer memory
  ProfilerFree(global_profiler_buffer);

  // free prefix
  ProfilerFree(filename_prefix);
  filename_prefix = NULL;

  if (!LowLevelAlloc::DeleteArena(heap_profiler_memory)) {
    RAW_LOG(FATAL, "Memory leak in HeapProfiler:");
  }

  if (FLAGS_mmap_profile) {
    MemoryRegionMap::Shutdown();
  }

  is_on = false;
}

extern "C" void HeapProfilerDump(const char *reason) {
  SpinLockHolder l(&heap_lock);
  if (is_on && !dumping) {
    DumpProfileLocked(reason);
  }
}

// Signal handler that is registered when a user selectable signal
// number is defined in the environment variable HEAPPROFILESIGNAL.
static void HeapProfilerDumpSignal(int signal_number) {
  (void)signal_number;
  if (!heap_lock.TryLock()) {
    return;
  }
  if (is_on && !dumping) {
    DumpProfileLocked("signal");
  }
  heap_lock.Unlock();
}


//----------------------------------------------------------------------
// Initialization/finalization code
//----------------------------------------------------------------------

// Initialization code
static void HeapProfilerInit() {
  // Everything after this point is for setting up the profiler based on envvar
  char fname[PATH_MAX];
  if (!GetUniquePathFromEnv("HEAPPROFILE", fname)) {
    return;
  }
  // We do a uid check so we don't write out files in a setuid executable.
#ifdef HAVE_GETEUID
  if (getuid() != geteuid()) {
    RAW_LOG(WARNING, ("HeapProfiler: ignoring HEAPPROFILE because "
                      "program seems to be setuid\n"));
    return;
  }
#endif

  char *signal_number_str = getenv("HEAPPROFILESIGNAL");
  if (signal_number_str != NULL) {
    long int signal_number = strtol(signal_number_str, NULL, 10);
    intptr_t old_signal_handler = reinterpret_cast<intptr_t>(signal(signal_number, HeapProfilerDumpSignal));
    if (old_signal_handler == reinterpret_cast<intptr_t>(SIG_ERR)) {
      RAW_LOG(FATAL, "Failed to set signal. Perhaps signal number %s is invalid\n", signal_number_str);
    } else if (old_signal_handler == 0) {
      RAW_LOG(INFO,"Using signal %d as heap profiling switch", signal_number);
    } else {
      RAW_LOG(FATAL, "Signal %d already in use\n", signal_number);
    }
  }

  HeapProfileTable::CleanupOldProfiles(fname);

  HeapProfilerStart(fname);
}

// class used for finalization -- dumps the heap-profile at program exit
struct HeapProfileEndWriter {
  ~HeapProfileEndWriter() { HeapProfilerDump("Exiting"); }
};

// We want to make sure tcmalloc is up and running before starting the profiler
static const TCMallocGuard tcmalloc_initializer;
REGISTER_MODULE_INITIALIZER(heapprofiler, HeapProfilerInit());
static HeapProfileEndWriter heap_profile_end_writer;