summaryrefslogtreecommitdiff
path: root/src/runtime/atomic_arm.go
blob: b1632cdd167fc08e2bc114cb5868e4e5ef3ab4d1 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
// Copyright 2009 The Go Authors. All rights reserved.
// Use of this source code is governed by a BSD-style
// license that can be found in the LICENSE file.

package runtime

import "unsafe"

var locktab [57]struct {
	l   mutex
	pad [_CacheLineSize - unsafe.Sizeof(mutex{})]byte
}

func addrLock(addr *uint64) *mutex {
	return &locktab[(uintptr(unsafe.Pointer(addr))>>3)%uintptr(len(locktab))].l
}

// Atomic add and return new value.
//go:nosplit
func xadd(val *uint32, delta int32) uint32 {
	for {
		oval := *val
		nval := oval + uint32(delta)
		if cas(val, oval, nval) {
			return nval
		}
	}
}

//go:nosplit
func xchg(addr *uint32, v uint32) uint32 {
	for {
		old := *addr
		if cas(addr, old, v) {
			return old
		}
	}
}

//go:nosplit
func xchgp(addr *unsafe.Pointer, v unsafe.Pointer) unsafe.Pointer {
	for {
		old := *addr
		if casp(addr, old, v) {
			return old
		}
	}
}

//go:nosplit
func xchguintptr(addr *uintptr, v uintptr) uintptr {
	return uintptr(xchg((*uint32)(unsafe.Pointer(addr)), uint32(v)))
}

//go:nosplit
func atomicload(addr *uint32) uint32 {
	return xadd(addr, 0)
}

//go:nosplit
func atomicloadp(addr unsafe.Pointer) unsafe.Pointer {
	return unsafe.Pointer(uintptr(xadd((*uint32)(addr), 0)))
}

//go:nosplit
func atomicstorep(addr unsafe.Pointer, v unsafe.Pointer) {
	for {
		old := *(*unsafe.Pointer)(addr)
		if casp((*unsafe.Pointer)(addr), old, v) {
			return
		}
	}
}

//go:nosplit
func atomicstore(addr *uint32, v uint32) {
	for {
		old := *addr
		if cas(addr, old, v) {
			return
		}
	}
}

//go:nosplit
func cas64(addr *uint64, old, new uint64) bool {
	var ok bool
	onM(func() {
		lock(addrLock(addr))
		if *addr == old {
			*addr = new
			ok = true
		}
		unlock(addrLock(addr))
	})
	return ok
}

//go:nosplit
func xadd64(addr *uint64, delta int64) uint64 {
	var r uint64
	onM(func() {
		lock(addrLock(addr))
		r = *addr + uint64(delta)
		*addr = r
		unlock(addrLock(addr))
	})
	return r
}

//go:nosplit
func xchg64(addr *uint64, v uint64) uint64 {
	var r uint64
	onM(func() {
		lock(addrLock(addr))
		r = *addr
		*addr = v
		unlock(addrLock(addr))
	})
	return r
}

//go:nosplit
func atomicload64(addr *uint64) uint64 {
	var r uint64
	onM(func() {
		lock(addrLock(addr))
		r = *addr
		unlock(addrLock(addr))
	})
	return r
}

//go:nosplit
func atomicstore64(addr *uint64, v uint64) {
	onM(func() {
		lock(addrLock(addr))
		*addr = v
		unlock(addrLock(addr))
	})
}

//go:nosplit
func atomicor8(addr *uint8, v uint8) {
	// Align down to 4 bytes and use 32-bit CAS.
	uaddr := uintptr(unsafe.Pointer(addr))
	addr32 := (*uint32)(unsafe.Pointer(uaddr &^ 3))
	word := uint32(v) << ((uaddr & 3) * 8) // little endian
	for {
		old := *addr32
		if cas(addr32, old, old|word) {
			return
		}
	}
}