summaryrefslogtreecommitdiff
path: root/libsanitizer
diff options
context:
space:
mode:
authorjakub <jakub@138bc75d-0d04-0410-961f-82ee72b054a4>2014-05-30 13:48:22 +0000
committerjakub <jakub@138bc75d-0d04-0410-961f-82ee72b054a4>2014-05-30 13:48:22 +0000
commitc98fd781d8c0752f75b4ae03f1fa652b17265c27 (patch)
tree76015d23f34435b311f781d9e25f6c528bf3521f /libsanitizer
parent8776d2d7ff0b3caf49cc3bdc8fc4308c7e4e6211 (diff)
downloadgcc-c98fd781d8c0752f75b4ae03f1fa652b17265c27.tar.gz
* sanitizer_common/sanitizer_stacktrace.cc: Cherry pick upstream
r209879. * sanitizer_common/sanitizer_common.h: Likewise. * asan/asan_mapping.h: Likewise. * asan/asan_linux.cc: Likewise. * tsan/tsan_mman.cc: Cherry pick upstream r209744. * sanitizer_common/sanitizer_allocator.h: Likewise. git-svn-id: svn+ssh://gcc.gnu.org/svn/gcc/trunk@211080 138bc75d-0d04-0410-961f-82ee72b054a4
Diffstat (limited to 'libsanitizer')
-rw-r--r--libsanitizer/ChangeLog10
-rw-r--r--libsanitizer/asan/asan_linux.cc7
-rw-r--r--libsanitizer/asan/asan_mapping.h3
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_allocator.h42
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_common.h6
-rw-r--r--libsanitizer/sanitizer_common/sanitizer_stacktrace.cc8
-rw-r--r--libsanitizer/tsan/tsan_mman.cc12
7 files changed, 58 insertions, 30 deletions
diff --git a/libsanitizer/ChangeLog b/libsanitizer/ChangeLog
index b6ed9bca5d8..b441ea3b705 100644
--- a/libsanitizer/ChangeLog
+++ b/libsanitizer/ChangeLog
@@ -1,3 +1,13 @@
+2014-05-30 Jakub Jelinek <jakub@redhat.com>
+
+ * sanitizer_common/sanitizer_stacktrace.cc: Cherry pick upstream
+ r209879.
+ * sanitizer_common/sanitizer_common.h: Likewise.
+ * asan/asan_mapping.h: Likewise.
+ * asan/asan_linux.cc: Likewise.
+ * tsan/tsan_mman.cc: Cherry pick upstream r209744.
+ * sanitizer_common/sanitizer_allocator.h: Likewise.
+
2014-05-23 Marek Polacek <polacek@redhat.com>
* ubsan/ubsan_value.cc (getFloatValue): Handle 96-bit
diff --git a/libsanitizer/asan/asan_linux.cc b/libsanitizer/asan/asan_linux.cc
index d893b2334d5..c401d9df48b 100644
--- a/libsanitizer/asan/asan_linux.cc
+++ b/libsanitizer/asan/asan_linux.cc
@@ -186,6 +186,13 @@ void GetPcSpBp(void *context, uptr *pc, uptr *sp, uptr *bp) {
*bp = ucontext->uc_mcontext.gregs[REG_EBP];
*sp = ucontext->uc_mcontext.gregs[REG_ESP];
# endif
+#elif defined(__powerpc__) || defined(__powerpc64__)
+ ucontext_t *ucontext = (ucontext_t*)context;
+ *pc = ucontext->uc_mcontext.regs->nip;
+ *sp = ucontext->uc_mcontext.regs->gpr[PT_R1];
+ // The powerpc{,64}-linux ABIs do not specify r31 as the frame
+ // pointer, but GCC always uses r31 when we need a frame pointer.
+ *bp = ucontext->uc_mcontext.regs->gpr[PT_R31];
#elif defined(__sparc__)
ucontext_t *ucontext = (ucontext_t*)context;
uptr *stk_ptr;
diff --git a/libsanitizer/asan/asan_mapping.h b/libsanitizer/asan/asan_mapping.h
index 86e391f5968..a1f84e24c46 100644
--- a/libsanitizer/asan/asan_mapping.h
+++ b/libsanitizer/asan/asan_mapping.h
@@ -85,6 +85,7 @@ static const u64 kDefaultShadowOffset64 = 1ULL << 44;
static const u64 kDefaultShort64bitShadowOffset = 0x7FFF8000; // < 2G.
static const u64 kAArch64_ShadowOffset64 = 1ULL << 36;
static const u64 kMIPS32_ShadowOffset32 = 0x0aaa8000;
+static const u64 kPPC64_ShadowOffset64 = 1ULL << 41;
static const u64 kFreeBSD_ShadowOffset32 = 1ULL << 30; // 0x40000000
static const u64 kFreeBSD_ShadowOffset64 = 1ULL << 46; // 0x400000000000
@@ -107,6 +108,8 @@ static const u64 kFreeBSD_ShadowOffset64 = 1ULL << 46; // 0x400000000000
# else
# if defined(__aarch64__)
# define SHADOW_OFFSET kAArch64_ShadowOffset64
+# elif defined(__powerpc64__)
+# define SHADOW_OFFSET kPPC64_ShadowOffset64
# elif SANITIZER_FREEBSD
# define SHADOW_OFFSET kFreeBSD_ShadowOffset64
# elif SANITIZER_MAC
diff --git a/libsanitizer/sanitizer_common/sanitizer_allocator.h b/libsanitizer/sanitizer_common/sanitizer_allocator.h
index 8ba825f14ec..99be09ba9ba 100644
--- a/libsanitizer/sanitizer_common/sanitizer_allocator.h
+++ b/libsanitizer/sanitizer_common/sanitizer_allocator.h
@@ -196,14 +196,12 @@ template<class SizeClassAllocator> struct SizeClassAllocatorLocalCache;
// Memory allocator statistics
enum AllocatorStat {
- AllocatorStatMalloced,
- AllocatorStatFreed,
- AllocatorStatMmapped,
- AllocatorStatUnmapped,
+ AllocatorStatAllocated,
+ AllocatorStatMapped,
AllocatorStatCount
};
-typedef u64 AllocatorStatCounters[AllocatorStatCount];
+typedef uptr AllocatorStatCounters[AllocatorStatCount];
// Per-thread stats, live in per-thread cache.
class AllocatorStats {
@@ -212,16 +210,21 @@ class AllocatorStats {
internal_memset(this, 0, sizeof(*this));
}
- void Add(AllocatorStat i, u64 v) {
+ void Add(AllocatorStat i, uptr v) {
v += atomic_load(&stats_[i], memory_order_relaxed);
atomic_store(&stats_[i], v, memory_order_relaxed);
}
- void Set(AllocatorStat i, u64 v) {
+ void Sub(AllocatorStat i, uptr v) {
+ v = atomic_load(&stats_[i], memory_order_relaxed) - v;
atomic_store(&stats_[i], v, memory_order_relaxed);
}
- u64 Get(AllocatorStat i) const {
+ void Set(AllocatorStat i, uptr v) {
+ atomic_store(&stats_[i], v, memory_order_relaxed);
+ }
+
+ uptr Get(AllocatorStat i) const {
return atomic_load(&stats_[i], memory_order_relaxed);
}
@@ -229,7 +232,7 @@ class AllocatorStats {
friend class AllocatorGlobalStats;
AllocatorStats *next_;
AllocatorStats *prev_;
- atomic_uint64_t stats_[AllocatorStatCount];
+ atomic_uintptr_t stats_[AllocatorStatCount];
};
// Global stats, used for aggregation and querying.
@@ -258,7 +261,7 @@ class AllocatorGlobalStats : public AllocatorStats {
}
void Get(AllocatorStatCounters s) const {
- internal_memset(s, 0, AllocatorStatCount * sizeof(u64));
+ internal_memset(s, 0, AllocatorStatCount * sizeof(uptr));
SpinMutexLock l(&mu_);
const AllocatorStats *stats = this;
for (;;) {
@@ -268,6 +271,9 @@ class AllocatorGlobalStats : public AllocatorStats {
if (stats == this)
break;
}
+ // All stats must be positive.
+ for (int i = 0; i < AllocatorStatCount; i++)
+ s[i] = ((sptr)s[i]) > 0 ? s[i] : 1;
}
private:
@@ -520,7 +526,7 @@ class SizeClassAllocator64 {
map_size += kUserMapSize;
CHECK_GE(region->mapped_user + map_size, end_idx);
MapWithCallback(region_beg + region->mapped_user, map_size);
- stat->Add(AllocatorStatMmapped, map_size);
+ stat->Add(AllocatorStatMapped, map_size);
region->mapped_user += map_size;
}
uptr total_count = (region->mapped_user - beg_idx - size)
@@ -839,7 +845,7 @@ class SizeClassAllocator32 {
uptr res = reinterpret_cast<uptr>(MmapAlignedOrDie(kRegionSize, kRegionSize,
"SizeClassAllocator32"));
MapUnmapCallback().OnMap(res, kRegionSize);
- stat->Add(AllocatorStatMmapped, kRegionSize);
+ stat->Add(AllocatorStatMapped, kRegionSize);
CHECK_EQ(0U, (res & (kRegionSize - 1)));
possible_regions.set(ComputeRegionId(res), static_cast<u8>(class_id));
return res;
@@ -905,7 +911,7 @@ struct SizeClassAllocatorLocalCache {
void *Allocate(SizeClassAllocator *allocator, uptr class_id) {
CHECK_NE(class_id, 0UL);
CHECK_LT(class_id, kNumClasses);
- stats_.Add(AllocatorStatMalloced, SizeClassMap::Size(class_id));
+ stats_.Add(AllocatorStatAllocated, SizeClassMap::Size(class_id));
PerClass *c = &per_class_[class_id];
if (UNLIKELY(c->count == 0))
Refill(allocator, class_id);
@@ -920,7 +926,7 @@ struct SizeClassAllocatorLocalCache {
// If the first allocator call on a new thread is a deallocation, then
// max_count will be zero, leading to check failure.
InitCache();
- stats_.Add(AllocatorStatFreed, SizeClassMap::Size(class_id));
+ stats_.Sub(AllocatorStatAllocated, SizeClassMap::Size(class_id));
PerClass *c = &per_class_[class_id];
CHECK_NE(c->max_count, 0UL);
if (UNLIKELY(c->count == c->max_count))
@@ -1031,8 +1037,8 @@ class LargeMmapAllocator {
stats.currently_allocated += map_size;
stats.max_allocated = Max(stats.max_allocated, stats.currently_allocated);
stats.by_size_log[size_log]++;
- stat->Add(AllocatorStatMalloced, map_size);
- stat->Add(AllocatorStatMmapped, map_size);
+ stat->Add(AllocatorStatAllocated, map_size);
+ stat->Add(AllocatorStatMapped, map_size);
}
return reinterpret_cast<void*>(res);
}
@@ -1050,8 +1056,8 @@ class LargeMmapAllocator {
chunks_sorted_ = false;
stats.n_frees++;
stats.currently_allocated -= h->map_size;
- stat->Add(AllocatorStatFreed, h->map_size);
- stat->Add(AllocatorStatUnmapped, h->map_size);
+ stat->Sub(AllocatorStatAllocated, h->map_size);
+ stat->Sub(AllocatorStatMapped, h->map_size);
}
MapUnmapCallback().OnUnmap(h->map_beg, h->map_size);
UnmapOrDie(reinterpret_cast<void*>(h->map_beg), h->map_size);
diff --git a/libsanitizer/sanitizer_common/sanitizer_common.h b/libsanitizer/sanitizer_common/sanitizer_common.h
index 86171068f6e..93317132c49 100644
--- a/libsanitizer/sanitizer_common/sanitizer_common.h
+++ b/libsanitizer/sanitizer_common/sanitizer_common.h
@@ -26,7 +26,11 @@ struct StackTrace;
const uptr kWordSize = SANITIZER_WORDSIZE / 8;
const uptr kWordSizeInBits = 8 * kWordSize;
-const uptr kCacheLineSize = 64;
+#if defined(__powerpc__) || defined(__powerpc64__)
+ const uptr kCacheLineSize = 128;
+#else
+ const uptr kCacheLineSize = 64;
+#endif
const uptr kMaxPathLength = 512;
diff --git a/libsanitizer/sanitizer_common/sanitizer_stacktrace.cc b/libsanitizer/sanitizer_common/sanitizer_stacktrace.cc
index 3938f03a4d8..244ac36f5f8 100644
--- a/libsanitizer/sanitizer_common/sanitizer_stacktrace.cc
+++ b/libsanitizer/sanitizer_common/sanitizer_stacktrace.cc
@@ -16,11 +16,13 @@
namespace __sanitizer {
uptr StackTrace::GetPreviousInstructionPc(uptr pc) {
-#ifdef __arm__
+#if defined(__arm__)
// Cancel Thumb bit.
pc = pc & (~1);
-#endif
-#if defined(__sparc__)
+#elif defined(__powerpc__) || defined(__powerpc64__)
+ // PCs are always 4 byte aligned.
+ return pc - 4;
+#elif defined(__sparc__)
return pc - 8;
#else
return pc - 1;
diff --git a/libsanitizer/tsan/tsan_mman.cc b/libsanitizer/tsan/tsan_mman.cc
index bd30cd52e63..3df0531f0c8 100644
--- a/libsanitizer/tsan/tsan_mman.cc
+++ b/libsanitizer/tsan/tsan_mman.cc
@@ -215,19 +215,15 @@ using namespace __tsan;
extern "C" {
uptr __tsan_get_current_allocated_bytes() {
- u64 stats[AllocatorStatCount];
+ uptr stats[AllocatorStatCount];
allocator()->GetStats(stats);
- u64 m = stats[AllocatorStatMalloced];
- u64 f = stats[AllocatorStatFreed];
- return m >= f ? m - f : 1;
+ return stats[AllocatorStatAllocated];
}
uptr __tsan_get_heap_size() {
- u64 stats[AllocatorStatCount];
+ uptr stats[AllocatorStatCount];
allocator()->GetStats(stats);
- u64 m = stats[AllocatorStatMmapped];
- u64 f = stats[AllocatorStatUnmapped];
- return m >= f ? m - f : 1;
+ return stats[AllocatorStatMapped];
}
uptr __tsan_get_free_bytes() {