summaryrefslogtreecommitdiff
path: root/core/cortex-m/atomic.h
blob: 1f432e8d1ea9ad552ae1ed928a7b9e577e96523b (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
/* Copyright 2012 The Chromium OS Authors. All rights reserved.
 * Use of this source code is governed by a BSD-style license that can be
 * found in the LICENSE file.
 */

/* Atomic operations for ARMv7 */

#ifndef __CROS_EC_ATOMIC_H
#define __CROS_EC_ATOMIC_H

#include "common.h"

typedef int atomic_t;
typedef atomic_t atomic_val_t;

/**
 * Implements atomic arithmetic operations on 32-bit integers.
 *
 * It used load/store exclusive.
 * If you write directly the integer used as an atomic variable,
 * you must either clear explicitly the exclusive monitor (using clrex)
 * or do it in exception context (which clears the monitor).
 */
#define ATOMIC_OP(asm_op, a, v) do {				\
	uint32_t reg0, reg1;                                    \
								\
	__asm__ __volatile__("1: ldrex   %0, [%2]\n"            \
			     #asm_op" %0, %0, %3\n"		\
			     "   strex   %1, %0, [%2]\n"        \
			     "   teq     %1, #0\n"              \
			     "   bne     1b"                    \
			     : "=&r" (reg0), "=&r" (reg1)       \
			     : "r" (a), "r" (v) : "cc");        \
} while (0)

/*
 * The atomic_* functions are marked as deprecated as a part of the process of
 * transaction to Zephyr compatible atomic functions. These prefixes will be
 * removed in the following patches. Please see b:169151160 for more details.
 */

static inline void deprecated_atomic_clear_bits(uint32_t volatile *addr,
						uint32_t bits)
{
	ATOMIC_OP(bic, addr, bits);
}

static inline void atomic_clear_bits(atomic_t *addr, atomic_val_t bits)
{
	__atomic_fetch_and(addr, ~bits, __ATOMIC_SEQ_CST);
}

static inline void deprecated_atomic_or(uint32_t volatile *addr, uint32_t bits)
{
	ATOMIC_OP(orr, addr, bits);
}

static inline atomic_val_t atomic_or(atomic_t *addr, atomic_val_t bits)
{
	return __atomic_fetch_or(addr, bits, __ATOMIC_SEQ_CST);
}

static inline void deprecated_atomic_add(uint32_t volatile *addr,
					 uint32_t value)
{
	ATOMIC_OP(add, addr, value);
}

static inline atomic_val_t atomic_add(atomic_t *addr, atomic_val_t value)
{
	return __atomic_fetch_add(addr, value, __ATOMIC_SEQ_CST);
}

static inline void deprecated_atomic_sub(uint32_t volatile *addr,
					 uint32_t value)
{
	ATOMIC_OP(sub, addr, value);
}

static inline atomic_val_t atomic_sub(atomic_t *addr, atomic_val_t value)
{
	return __atomic_fetch_sub(addr, value, __ATOMIC_SEQ_CST);
}

static inline uint32_t deprecated_atomic_read_clear(uint32_t volatile *addr)
{
	uint32_t ret, tmp;

	__asm__ __volatile__("   mov     %3, #0\n"
			     "1: ldrex   %0, [%2]\n"
			     "   strex   %1, %3, [%2]\n"
			     "   teq     %1, #0\n"
			     "   bne     1b"
			     : "=&r" (ret), "=&r" (tmp)
			     : "r" (addr), "r" (0) : "cc");

	return ret;
}

static inline atomic_val_t atomic_read_clear(atomic_t *addr)
{
	return __atomic_exchange_n(addr, 0, __ATOMIC_SEQ_CST);
}

#endif  /* __CROS_EC_ATOMIC_H */