summaryrefslogtreecommitdiff
path: root/libc/src/__support/CPP/atomic.h
blob: 6922a367289a9fee542c767fae931cf1ed137926 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
//===-- A simple equivalent of std::atomic ----------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//

#ifndef LLVM_LIBC_SRC_SUPPORT_CPP_ATOMIC_H
#define LLVM_LIBC_SRC_SUPPORT_CPP_ATOMIC_H

#include "src/__support/macros/attributes.h"
#include "src/__support/macros/properties/architectures.h"

#include "type_traits.h"

namespace __llvm_libc {
namespace cpp {

enum class MemoryOrder : int {
  RELAXED = __ATOMIC_RELAXED,
  CONSUME = __ATOMIC_CONSUME,
  ACQUIRE = __ATOMIC_ACQUIRE,
  RELEASE = __ATOMIC_RELEASE,
  ACQ_REL = __ATOMIC_ACQ_REL,
  SEQ_CST = __ATOMIC_SEQ_CST
};

template <typename T> struct Atomic {
  // For now, we will restrict to only arithmetic types.
  static_assert(is_arithmetic_v<T>, "Only arithmetic types can be atomic.");

private:
  // The value stored should be appropriately aligned so that
  // hardware instructions used to perform atomic operations work
  // correctly.
  static constexpr int ALIGNMENT = sizeof(T) > alignof(T) ? sizeof(T)
                                                          : alignof(T);

public:
  using value_type = T;

  // We keep the internal value public so that it can be addressable.
  // This is useful in places like the Linux futex operations where
  // we need pointers to the memory of the atomic values. Load and store
  // operations should be performed using the atomic methods however.
  alignas(ALIGNMENT) value_type val;

  constexpr Atomic() = default;

  // Intializes the value without using atomic operations.
  constexpr Atomic(value_type v) : val(v) {}

  Atomic(const Atomic &) = delete;
  Atomic &operator=(const Atomic &) = delete;

  // Atomic load
  operator T() { return __atomic_load_n(&val, int(MemoryOrder::SEQ_CST)); }

  T load(MemoryOrder mem_ord = MemoryOrder::SEQ_CST) {
    return __atomic_load_n(&val, int(mem_ord));
  }

  // Atomic store
  T operator=(T rhs) {
    __atomic_store_n(&val, rhs, int(MemoryOrder::SEQ_CST));
    return rhs;
  }

  void store(T rhs, MemoryOrder mem_ord = MemoryOrder::SEQ_CST) {
    __atomic_store_n(&val, rhs, int(mem_ord));
  }

  // Atomic compare exchange
  bool compare_exchange_strong(T &expected, T desired,
                               MemoryOrder mem_ord = MemoryOrder::SEQ_CST) {
    return __atomic_compare_exchange_n(&val, &expected, desired, false,
                                       int(mem_ord), int(mem_ord));
  }

  T exchange(T desired, MemoryOrder mem_ord = MemoryOrder::SEQ_CST) {
    return __atomic_exchange_n(&val, desired, int(mem_ord));
  }

  T fetch_add(T increment, MemoryOrder mem_ord = MemoryOrder::SEQ_CST) {
    return __atomic_fetch_add(&val, increment, int(mem_ord));
  }

  T fetch_or(T mask, MemoryOrder mem_ord = MemoryOrder::SEQ_CST) {
    return __atomic_fetch_or(&val, mask, int(mem_ord));
  }

  T fetch_and(T mask, MemoryOrder mem_ord = MemoryOrder::SEQ_CST) {
    return __atomic_fetch_and(&val, mask, int(mem_ord));
  }

  T fetch_sub(T decrement, MemoryOrder mem_ord = MemoryOrder::SEQ_CST) {
    return __atomic_fetch_sub(&val, decrement, int(mem_ord));
  }

  // Set the value without using an atomic operation. This is useful
  // in initializing atomic values without a constructor.
  void set(T rhs) { val = rhs; }
};

// Issue a thread fence with the given memory ordering.
LIBC_INLINE void atomic_thread_fence(MemoryOrder mem_ord) {
// The NVPTX backend currently does not support atomic thread fences so we use a
// full system fence instead.
#ifdef LIBC_TARGET_ARCH_IS_NVPTX
  (void)mem_ord;
  __nvvm_membar_sys();
#else
  __atomic_thread_fence(int(mem_ord));
#endif
}

} // namespace cpp
} // namespace __llvm_libc

#endif // LLVM_LIBC_SRC_SUPPORT_CPP_ATOMIC_H