summaryrefslogtreecommitdiff
diff options
context:
space:
mode:
authorAliaksey Kandratsenka <alk@tut.by>2013-09-09 07:59:25 -0700
committerAliaksey Kandratsenka <alk@tut.by>2013-09-09 07:59:25 -0700
commit6979583592df555a369a2c975f5117a1f61911af (patch)
tree3c33a83cbb7a9a6bb601087523859bba4b0965f1
parent28dd85e2825af71138621a4417e6ab004631924d (diff)
downloadgperftools-6979583592df555a369a2c975f5117a1f61911af.tar.gz
issue-564: added atomic ops support for mips{,64}
This merges patch contributed by Jovan Zelincevic. And with that patch tcmalloc build with --enable-minimal (just malloc replacement) appears to work (passes unit tests).
-rw-r--r--Makefile.am1
-rw-r--r--src/base/atomicops-internals-mips.h392
-rw-r--r--src/base/atomicops.h2
-rw-r--r--src/base/basictypes.h2
-rw-r--r--src/base/linux_syscall_support.h17
-rw-r--r--src/stacktrace_config.h4
6 files changed, 418 insertions, 0 deletions
diff --git a/Makefile.am b/Makefile.am
index 812c9a5..ed0fb66 100644
--- a/Makefile.am
+++ b/Makefile.am
@@ -233,6 +233,7 @@ SPINLOCK_INCLUDES = src/base/spinlock.h \
src/base/atomicops-internals-linuxppc.h \
src/base/atomicops-internals-arm-generic.h \
src/base/atomicops-internals-arm-v6plus.h \
+ src/base/atomicops-internals-mips.h \
src/base/atomicops-internals-windows.h \
src/base/atomicops-internals-x86.h
noinst_LTLIBRARIES += libspinlock.la
diff --git a/src/base/atomicops-internals-mips.h b/src/base/atomicops-internals-mips.h
new file mode 100644
index 0000000..4836027
--- /dev/null
+++ b/src/base/atomicops-internals-mips.h
@@ -0,0 +1,392 @@
+/* Copyright (c) 2013, Google Inc.
+ * All rights reserved.
+ *
+ * Redistribution and use in source and binary forms, with or without
+ * modification, are permitted provided that the following conditions are
+ * met:
+ *
+ * * Redistributions of source code must retain the above copyright
+ * notice, this list of conditions and the following disclaimer.
+ * * Redistributions in binary form must reproduce the above
+ * copyright notice, this list of conditions and the following disclaimer
+ * in the documentation and/or other materials provided with the
+ * distribution.
+ * * Neither the name of Google Inc. nor the names of its
+ * contributors may be used to endorse or promote products derived from
+ * this software without specific prior written permission.
+ *
+ * THIS SOFTWARE IS PROVIDED BY THE COPYRIGHT HOLDERS AND CONTRIBUTORS
+ * "AS IS" AND ANY EXPRESS OR IMPLIED WARRANTIES, INCLUDING, BUT NOT
+ * LIMITED TO, THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR
+ * A PARTICULAR PURPOSE ARE DISCLAIMED. IN NO EVENT SHALL THE COPYRIGHT
+ * OWNER OR CONTRIBUTORS BE LIABLE FOR ANY DIRECT, INDIRECT, INCIDENTAL,
+ * SPECIAL, EXEMPLARY, OR CONSEQUENTIAL DAMAGES (INCLUDING, BUT NOT
+ * LIMITED TO, PROCUREMENT OF SUBSTITUTE GOODS OR SERVICES; LOSS OF USE,
+ * DATA, OR PROFITS; OR BUSINESS INTERRUPTION) HOWEVER CAUSED AND ON ANY
+ * THEORY OF LIABILITY, WHETHER IN CONTRACT, STRICT LIABILITY, OR TORT
+ * (INCLUDING NEGLIGENCE OR OTHERWISE) ARISING IN ANY WAY OUT OF THE USE
+ * OF THIS SOFTWARE, EVEN IF ADVISED OF THE POSSIBILITY OF SUCH DAMAGE.
+ */
+
+// Author: Jovan Zelincevic <jovan.zelincevic@imgtec.com>
+// based on atomicops-internals by Sanjay Ghemawat
+
+// This file is an internal atomic implementation, use base/atomicops.h instead.
+//
+// This code implements MIPS atomics.
+
+#ifndef BASE_ATOMICOPS_INTERNALS_MIPS_H_
+#define BASE_ATOMICOPS_INTERNALS_MIPS_H_
+
+#if (_MIPS_ISA == _MIPS_ISA_MIPS64)
+#define BASE_HAS_ATOMIC64 1
+#endif
+
+typedef int32_t Atomic32;
+
+namespace base {
+namespace subtle {
+
+// Atomically execute:
+// result = *ptr;
+// if (*ptr == old_value)
+// *ptr = new_value;
+// return result;
+//
+// I.e., replace "*ptr" with "new_value" if "*ptr" used to be "old_value".
+// Always return the old value of "*ptr"
+//
+// This routine implies no memory barriers.
+inline Atomic32 NoBarrier_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value)
+{
+ Atomic32 prev, tmp;
+ __asm__ volatile(
+ ".set push \n"
+ ".set noreorder \n"
+
+ "1: \n"
+ "ll %0, %5 \n" // prev = *ptr
+ "bne %0, %3, 2f \n" // if (prev != old_value) goto 2
+ " move %2, %4 \n" // tmp = new_value
+ "sc %2, %1 \n" // *ptr = tmp (with atomic check)
+ "beqz %2, 1b \n" // start again on atomic error
+ " nop \n" // delay slot nop
+ "2: \n"
+
+ ".set pop \n"
+ : "=&r" (prev), "=m" (*ptr),
+ "=&r" (tmp)
+ : "Ir" (old_value), "r" (new_value),
+ "m" (*ptr)
+ : "memory"
+ );
+ return prev;
+}
+
+// Atomically store new_value into *ptr, returning the previous value held in
+// *ptr. This routine implies no memory barriers.
+inline Atomic32 NoBarrier_AtomicExchange(volatile Atomic32* ptr,
+ Atomic32 new_value)
+{
+ Atomic32 temp, old;
+ __asm__ volatile(
+ ".set push \n"
+ ".set noreorder \n"
+
+ "1: \n"
+ "ll %1, %2 \n" // old = *ptr
+ "move %0, %3 \n" // temp = new_value
+ "sc %0, %2 \n" // *ptr = temp (with atomic check)
+ "beqz %0, 1b \n" // start again on atomic error
+ " nop \n" // delay slot nop
+
+ ".set pop \n"
+ : "=&r" (temp), "=&r" (old),
+ "=m" (*ptr)
+ : "r" (new_value), "m" (*ptr)
+ : "memory"
+ );
+ return old;
+}
+
+// Atomically increment *ptr by "increment". Returns the new value of
+// *ptr with the increment applied. This routine implies no memory barriers.
+inline Atomic32 NoBarrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment)
+{
+ Atomic32 temp, temp2;
+ __asm__ volatile(
+ ".set push \n"
+ ".set noreorder \n"
+
+ "1: \n"
+ "ll %0, %2 \n" // temp = *ptr
+ "addu %1, %0, %3 \n" // temp2 = temp + increment
+ "sc %1, %2 \n" // *ptr = temp2 (with atomic check)
+ "beqz %1, 1b \n" // start again on atomic error
+ "addu %1, %0, %3 \n" // temp2 = temp + increment
+
+ ".set pop \n"
+ : "=&r" (temp), "=&r" (temp2),
+ "=m" (*ptr)
+ : "Ir" (increment), "m" (*ptr)
+ : "memory"
+ );
+ // temp2 now holds the final value.
+ return temp2;
+}
+
+inline void MemoryBarrier()
+{
+ __asm__ volatile("sync" : : : "memory");
+}
+
+inline Atomic32 Barrier_AtomicIncrement(volatile Atomic32* ptr,
+ Atomic32 increment)
+{
+ MemoryBarrier();
+ Atomic32 res = NoBarrier_AtomicIncrement(ptr, increment);
+ MemoryBarrier();
+ return res;
+}
+
+// "Acquire" operations
+// ensure that no later memory access can be reordered ahead of the operation.
+// "Release" operations ensure that no previous memory access can be reordered
+// after the operation. "Barrier" operations have both "Acquire" and "Release"
+// semantics. A MemoryBarrier() has "Barrier" semantics, but does no memory
+// access.
+inline Atomic32 Acquire_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value)
+{
+ Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+ MemoryBarrier();
+ return res;
+}
+
+inline Atomic32 Release_CompareAndSwap(volatile Atomic32* ptr,
+ Atomic32 old_value,
+ Atomic32 new_value)
+{
+ MemoryBarrier();
+ Atomic32 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+ return res;
+}
+
+inline void NoBarrier_Store(volatile Atomic32* ptr, Atomic32 value)
+{
+ *ptr = value;
+}
+
+inline Atomic32 Acquire_AtomicExchange(volatile Atomic32* ptr,
+ Atomic32 new_value)
+{
+ Atomic32 old_value = NoBarrier_AtomicExchange(ptr, new_value);
+ MemoryBarrier();
+ return old_value;
+}
+
+inline Atomic32 Release_AtomicExchange(volatile Atomic32* ptr,
+ Atomic32 new_value)
+{
+ MemoryBarrier();
+ return NoBarrier_AtomicExchange(ptr, new_value);
+}
+
+inline void Acquire_Store(volatile Atomic32* ptr, Atomic32 value)
+{
+ *ptr = value;
+ MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic32* ptr, Atomic32 value)
+{
+ MemoryBarrier();
+ *ptr = value;
+}
+
+inline Atomic32 NoBarrier_Load(volatile const Atomic32* ptr)
+{
+ return *ptr;
+}
+
+inline Atomic32 Acquire_Load(volatile const Atomic32* ptr)
+{
+ Atomic32 value = *ptr;
+ MemoryBarrier();
+ return value;
+}
+
+inline Atomic32 Release_Load(volatile const Atomic32* ptr)
+{
+ MemoryBarrier();
+ return *ptr;
+}
+
+#if (_MIPS_ISA == _MIPS_ISA_MIPS64) || (_MIPS_SIM == _MIPS_SIM_ABI64)
+
+typedef int64_t Atomic64;
+
+inline Atomic64 NoBarrier_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value)
+{
+ Atomic64 prev, tmp;
+ __asm__ volatile(
+ ".set push \n"
+ ".set noreorder \n"
+
+ "1: \n"
+ "lld %0, %5 \n" // prev = *ptr
+ "bne %0, %3, 2f \n" // if (prev != old_value) goto 2
+ " move %2, %4 \n" // tmp = new_value
+ "scd %2, %1 \n" // *ptr = tmp (with atomic check)
+ "beqz %2, 1b \n" // start again on atomic error
+ " nop \n" // delay slot nop
+ "2: \n"
+
+ ".set pop \n"
+ : "=&r" (prev), "=m" (*ptr),
+ "=&r" (tmp)
+ : "Ir" (old_value), "r" (new_value),
+ "m" (*ptr)
+ : "memory"
+ );
+ return prev;
+}
+
+inline Atomic64 NoBarrier_AtomicExchange(volatile Atomic64* ptr,
+ Atomic64 new_value)
+{
+ Atomic64 temp, old;
+ __asm__ volatile(
+ ".set push \n"
+ ".set noreorder \n"
+
+ "1: \n"
+ "lld %1, %2 \n" // old = *ptr
+ "move %0, %3 \n" // temp = new_value
+ "scd %0, %2 \n" // *ptr = temp (with atomic check)
+ "beqz %0, 1b \n" // start again on atomic error
+ " nop \n" // delay slot nop
+
+ ".set pop \n"
+ : "=&r" (temp), "=&r" (old),
+ "=m" (*ptr)
+ : "r" (new_value), "m" (*ptr)
+ : "memory"
+ );
+ return old;
+}
+
+inline Atomic64 NoBarrier_AtomicIncrement(volatile Atomic64* ptr,
+ Atomic64 increment)
+{
+ Atomic64 temp, temp2;
+ __asm__ volatile(
+ ".set push \n"
+ ".set noreorder \n"
+
+ "1: \n"
+ "lld %0, %2 \n" // temp = *ptr
+ "daddu %1, %0, %3 \n" // temp2 = temp + increment
+ "scd %1, %2 \n" // *ptr = temp2 (with atomic check)
+ "beqz %1, 1b \n" // start again on atomic error
+ "daddu %1, %0, %3 \n" // temp2 = temp + increment
+
+ ".set pop \n"
+ : "=&r" (temp), "=&r" (temp2),
+ "=m" (*ptr)
+ : "Ir" (increment), "m" (*ptr)
+ : "memory"
+ );
+ // temp2 now holds the final value.
+ return temp2;
+}
+
+inline Atomic64 Acquire_AtomicExchange(volatile Atomic64* ptr,
+ Atomic64 new_value)
+{
+ Atomic64 old_value = NoBarrier_AtomicExchange(ptr, new_value);
+ MemoryBarrier();
+ return old_value;
+}
+
+inline Atomic64 Barrier_AtomicIncrement(volatile Atomic64* ptr,
+ Atomic64 increment)
+{
+ MemoryBarrier();
+ Atomic64 res = NoBarrier_AtomicIncrement(ptr, increment);
+ MemoryBarrier();
+ return res;
+}
+
+inline Atomic64 Acquire_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value)
+{
+ Atomic64 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+ MemoryBarrier();
+ return res;
+}
+
+inline Atomic64 Release_CompareAndSwap(volatile Atomic64* ptr,
+ Atomic64 old_value,
+ Atomic64 new_value)
+{
+ MemoryBarrier();
+ Atomic64 res = NoBarrier_CompareAndSwap(ptr, old_value, new_value);
+ return res;
+}
+
+inline void NoBarrier_Store(volatile Atomic64* ptr, Atomic64 value)
+{
+ *ptr = value;
+}
+
+inline Atomic64 Release_AtomicExchange(volatile Atomic64* ptr,
+ Atomic64 new_value)
+{
+ MemoryBarrier();
+ return NoBarrier_AtomicExchange(ptr, new_value);
+}
+
+inline void Acquire_Store(volatile Atomic64* ptr, Atomic64 value)
+{
+ *ptr = value;
+ MemoryBarrier();
+}
+
+inline void Release_Store(volatile Atomic64* ptr, Atomic64 value)
+{
+ MemoryBarrier();
+ *ptr = value;
+}
+
+inline Atomic64 NoBarrier_Load(volatile const Atomic64* ptr)
+{
+ return *ptr;
+}
+
+inline Atomic64 Acquire_Load(volatile const Atomic64* ptr)
+{
+ Atomic64 value = *ptr;
+ MemoryBarrier();
+ return value;
+}
+
+inline Atomic64 Release_Load(volatile const Atomic64* ptr)
+{
+ MemoryBarrier();
+ return *ptr;
+}
+
+#endif
+
+} // namespace base::subtle
+} // namespace base
+
+#endif // BASE_ATOMICOPS_INTERNALS_MIPS_H_
diff --git a/src/base/atomicops.h b/src/base/atomicops.h
index 44d132c..9a727ad 100644
--- a/src/base/atomicops.h
+++ b/src/base/atomicops.h
@@ -111,6 +111,8 @@
#include "base/atomicops-internals-windows.h"
#elif defined(__linux__) && defined(__PPC__)
#include "base/atomicops-internals-linuxppc.h"
+#elif defined(__GNUC__) && defined(__mips__)
+#include "base/atomicops-internals-mips.h"
#else
// Assume x86 for now. If you need to support a new architecture and
// don't know how to implement atomic ops, you can probably get away
diff --git a/src/base/basictypes.h b/src/base/basictypes.h
index bdea488..c4a5d3b 100644
--- a/src/base/basictypes.h
+++ b/src/base/basictypes.h
@@ -345,6 +345,8 @@ class AssignAttributeStartEnd {
# elif (defined(__arm__))
# define CACHELINE_ALIGNED __attribute__((aligned(64)))
// some ARMs have shorter cache lines (ARM1176JZF-S is 32 bytes for example) but obviously 64-byte aligned implies 32-byte aligned
+# elif (defined(__mips__))
+# define CACHELINE_ALIGNED __attribute__((aligned(128)))
# else
# error Could not determine cache line length - unknown architecture
# endif
diff --git a/src/base/linux_syscall_support.h b/src/base/linux_syscall_support.h
index b3fc8a3..70774c0 100644
--- a/src/base/linux_syscall_support.h
+++ b/src/base/linux_syscall_support.h
@@ -2020,6 +2020,23 @@ struct kernel_stat {
return LSS_NAME(rt_sigprocmask)(how, set, oldset, (KERNEL_NSIG+7)/8);
}
#endif
+ #if defined(__mips__) && (_MIPS_ISA == _MIPS_ISA_MIPS64)
+ LSS_INLINE _syscall6(void*, mmap, void*, s,
+ size_t, l, int, p,
+ int, f, int, d,
+ __off64_t, o)
+ LSS_INLINE int LSS_NAME(sigaction)(int signum,
+ const struct kernel_sigaction *act,
+ struct kernel_sigaction *oldact) {
+ return LSS_NAME(rt_sigaction)(signum, act, oldact, (KERNEL_NSIG+7)/8);
+
+ }
+ LSS_INLINE int LSS_NAME(sigprocmask)(int how,
+ const struct kernel_sigset_t *set,
+ struct kernel_sigset_t *oldset) {
+ return LSS_NAME(rt_sigprocmask)(how, set, oldset, (KERNEL_NSIG+7)/8);
+ }
+ #endif
#if defined(__x86_64__) || \
defined(__arm__) || \
(defined(__mips__) && _MIPS_SIM != _MIPS_SIM_ABI32)
diff --git a/src/stacktrace_config.h b/src/stacktrace_config.h
index 72d108a..787cdd2 100644
--- a/src/stacktrace_config.h
+++ b/src/stacktrace_config.h
@@ -76,6 +76,10 @@
# error stacktrace without frame pointer is not supported on ARM
# endif
+// The MIPS case
+#elif defined(__mips__) && __GNUC__ >= 2
+# define STACKTRACE_INL_HEADER "stacktrace_generic-inl.h"
+
// The Windows case -- probably cygwin and mingw will use one of the
// x86-includes above, but if not, we can fall back to windows intrinsics.
#elif defined(_WIN32) || defined(__CYGWIN__) || defined(__CYGWIN32__) || defined(__MINGW32__)