summaryrefslogtreecommitdiff
path: root/src/backend/storage/lmgr/spin.c
blob: e406bea201d054d0cc9ddb050d32d0b4d5b15d6b (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
/*-------------------------------------------------------------------------
 *
 * spin.c
 *	   Hardware-independent implementation of spinlocks.
 *
 *
 * For machines that have test-and-set (TAS) instructions, s_lock.h/.c
 * define the spinlock implementation.  This file contains only a stub
 * implementation for spinlocks using PGSemaphores.  Unless semaphores
 * are implemented in a way that doesn't involve a kernel call, this
 * is too slow to be very useful :-(
 *
 *
 * Portions Copyright (c) 1996-2023, PostgreSQL Global Development Group
 * Portions Copyright (c) 1994, Regents of the University of California
 *
 *
 * IDENTIFICATION
 *	  src/backend/storage/lmgr/spin.c
 *
 *-------------------------------------------------------------------------
 */
#include "postgres.h"

#include "storage/pg_sema.h"
#include "storage/shmem.h"
#include "storage/spin.h"


#ifndef HAVE_SPINLOCKS

/*
 * No TAS, so spinlocks are implemented as PGSemaphores.
 */

#ifndef HAVE_ATOMICS
#define NUM_EMULATION_SEMAPHORES (NUM_SPINLOCK_SEMAPHORES + NUM_ATOMICS_SEMAPHORES)
#else
#define NUM_EMULATION_SEMAPHORES (NUM_SPINLOCK_SEMAPHORES)
#endif							/* DISABLE_ATOMICS */

PGSemaphore *SpinlockSemaArray;

#else							/* !HAVE_SPINLOCKS */

#define NUM_EMULATION_SEMAPHORES 0

#endif							/* HAVE_SPINLOCKS */

/*
 * Report the amount of shared memory needed to store semaphores for spinlock
 * support.
 */
Size
SpinlockSemaSize(void)
{
	return NUM_EMULATION_SEMAPHORES * sizeof(PGSemaphore);
}

/*
 * Report number of semaphores needed to support spinlocks.
 */
int
SpinlockSemas(void)
{
	return NUM_EMULATION_SEMAPHORES;
}

#ifndef HAVE_SPINLOCKS

/*
 * Initialize spinlock emulation.
 *
 * This must be called after PGReserveSemaphores().
 */
void
SpinlockSemaInit(void)
{
	PGSemaphore *spinsemas;
	int			nsemas = SpinlockSemas();
	int			i;

	/*
	 * We must use ShmemAllocUnlocked(), since the spinlock protecting
	 * ShmemAlloc() obviously can't be ready yet.
	 */
	spinsemas = (PGSemaphore *) ShmemAllocUnlocked(SpinlockSemaSize());
	for (i = 0; i < nsemas; ++i)
		spinsemas[i] = PGSemaphoreCreate();
	SpinlockSemaArray = spinsemas;
}

/*
 * s_lock.h hardware-spinlock emulation using semaphores
 *
 * We map all spinlocks onto NUM_EMULATION_SEMAPHORES semaphores.  It's okay to
 * map multiple spinlocks onto one semaphore because no process should ever
 * hold more than one at a time.  We just need enough semaphores so that we
 * aren't adding too much extra contention from that.
 *
 * There is one exception to the restriction of only holding one spinlock at a
 * time, which is that it's ok if emulated atomic operations are nested inside
 * spinlocks. To avoid the danger of spinlocks and atomic using the same sema,
 * we make sure "normal" spinlocks and atomics backed by spinlocks use
 * distinct semaphores (see the nested argument to s_init_lock_sema).
 *
 * slock_t is just an int for this implementation; it holds the spinlock
 * number from 1..NUM_EMULATION_SEMAPHORES.  We intentionally ensure that 0
 * is not a valid value, so that testing with this code can help find
 * failures to initialize spinlocks.
 */

static inline void
s_check_valid(int lockndx)
{
	if (unlikely(lockndx <= 0 || lockndx > NUM_EMULATION_SEMAPHORES))
		elog(ERROR, "invalid spinlock number: %d", lockndx);
}

void
s_init_lock_sema(volatile slock_t *lock, bool nested)
{
	static uint32 counter = 0;
	uint32		offset;
	uint32		sema_total;
	uint32		idx;

	if (nested)
	{
		/*
		 * To allow nesting atomics inside spinlocked sections, use a
		 * different spinlock. See comment above.
		 */
		offset = 1 + NUM_SPINLOCK_SEMAPHORES;
		sema_total = NUM_ATOMICS_SEMAPHORES;
	}
	else
	{
		offset = 1;
		sema_total = NUM_SPINLOCK_SEMAPHORES;
	}

	idx = (counter++ % sema_total) + offset;

	/* double check we did things correctly */
	s_check_valid(idx);

	*lock = idx;
}

void
s_unlock_sema(volatile slock_t *lock)
{
	int			lockndx = *lock;

	s_check_valid(lockndx);

	PGSemaphoreUnlock(SpinlockSemaArray[lockndx - 1]);
}

bool
s_lock_free_sema(volatile slock_t *lock)
{
	/* We don't currently use S_LOCK_FREE anyway */
	elog(ERROR, "spin.c does not support S_LOCK_FREE()");
	return false;
}

int
tas_sema(volatile slock_t *lock)
{
	int			lockndx = *lock;

	s_check_valid(lockndx);

	/* Note that TAS macros return 0 if *success* */
	return !PGSemaphoreTryLock(SpinlockSemaArray[lockndx - 1]);
}

#endif							/* !HAVE_SPINLOCKS */