summaryrefslogtreecommitdiff
path: root/libstdc++-v3/include/parallel/compatibility.h
blob: e10370b6f54bde88e77d68441eb6b5a7f4c62468 (plain)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
// -*- C++ -*-

// Copyright (C) 2007, 2008, 2009 Free Software Foundation, Inc.
//
// This file is part of the GNU ISO C++ Library.  This library is free
// software; you can redistribute it and/or modify it under the terms
// of the GNU General Public License as published by the Free Software
// Foundation; either version 3, or (at your option) any later
// version.

// This library is distributed in the hope that it will be useful, but
// WITHOUT ANY WARRANTY; without even the implied warranty of
// MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the GNU
// General Public License for more details.

// Under Section 7 of GPL version 3, you are granted additional
// permissions described in the GCC Runtime Library Exception, version
// 3.1, as published by the Free Software Foundation.

// You should have received a copy of the GNU General Public License and
// a copy of the GCC Runtime Library Exception along with this program;
// see the files COPYING3 and COPYING.RUNTIME respectively.  If not, see
// <http://www.gnu.org/licenses/>.

/** @file parallel/compatibility.h
 *  @brief Compatibility layer, mostly concerned with atomic operations.
 *  This file is a GNU parallel extension to the Standard C++ Library.
 */

// Written by Felix Putze.

#ifndef _GLIBCXX_PARALLEL_COMPATIBILITY_H
#define _GLIBCXX_PARALLEL_COMPATIBILITY_H 1

#include <parallel/types.h>
#include <parallel/base.h>

#if defined(__SUNPRO_CC) && defined(__sparc)
#include <sys/atomic.h>
#endif

#if !defined(_WIN32) || defined (__CYGWIN__)
#include <sched.h>
#endif

#if defined(_MSC_VER)
#include <Windows.h>
#include <intrin.h>
#undef max
#undef min
#endif

#ifdef __MINGW32__
// Including <windows.h> will drag in all the windows32 names.  Since
// that can cause user code portability problems, we just declare the
// one needed function here.
extern "C"
__attribute((dllimport)) void __attribute__((stdcall)) Sleep (unsigned long);
#endif

namespace __gnu_parallel
{
#if defined(__ICC)
  template<typename _MustBeInt = int>
  int32 __faa32(int32* __x, int32 __inc)
  {
    asm volatile("lock xadd %0,%1"
                 : "=__r" (__inc), "=__m" (*__x)
                 : "0" (__inc)
                 : "memory");
    return __inc;
  }
#if defined(__x86_64)
  template<typename _MustBeInt = int>
  int64 __faa64(int64* __x, int64 __inc)
  {
    asm volatile("lock xadd %0,%1"
                 : "=__r" (__inc), "=__m" (*__x)
                 : "0" (__inc)
                 : "memory");
    return __inc;
  }
#endif
#endif

  // atomic functions only work on integers

  /** @brief Add a value to a variable, atomically.
   *
   *  Implementation is heavily platform-dependent.
   *  @param __ptr Pointer to a 32-bit signed integer.
   *  @param __addend Value to add.
   */
  inline int32
  __fetch_and_add_32(volatile int32* __ptr, int32 __addend)
  {
#if defined(__ICC)      //x86 version
    return _InterlockedExchangeAdd((void*)__ptr, __addend);
#elif defined(__ECC)    //IA-64 version
    return _InterlockedExchangeAdd((void*)__ptr, __addend);
#elif defined(__ICL) || defined(_MSC_VER)
    return _InterlockedExchangeAdd(reinterpret_cast<volatile long*>(__ptr),
                                   __addend);
#elif defined(__GNUC__)
    return __sync_fetch_and_add(__ptr, __addend);
#elif defined(__SUNPRO_CC) && defined(__sparc)
    volatile int32 __before, __after;
    do
      {
        __before = *__ptr;
        __after = __before + __addend;
      } while (atomic_cas_32((volatile unsigned int*)__ptr, __before,
                             __after) != __before);
    return __before;
#else   //fallback, slow
#pragma message("slow __fetch_and_add_32")
    int32 __res;
#pragma omp critical
    {
      __res = *__ptr;
      *(__ptr) += __addend;
    }
    return __res;
#endif
  }

  /** @brief Add a value to a variable, atomically.
   *
   *  Implementation is heavily platform-dependent.
   *  @param __ptr Pointer to a 64-bit signed integer.
   *  @param __addend Value to add.
   */
  inline int64
  __fetch_and_add_64(volatile int64* __ptr, int64 __addend)
  {
#if defined(__ICC) && defined(__x86_64) //x86 version
    return __faa64<int>((int64*)__ptr, __addend);
#elif defined(__ECC)    //IA-64 version
    return _InterlockedExchangeAdd64((void*)__ptr, __addend);
#elif defined(__ICL) || defined(_MSC_VER)
#ifndef _WIN64
    _GLIBCXX_PARALLEL_ASSERT(false);    //not available in this case
    return 0;
#else
    return _InterlockedExchangeAdd64(__ptr, __addend);
#endif
#elif defined(__GNUC__) && defined(__x86_64)
    return __sync_fetch_and_add(__ptr, __addend);
#elif defined(__GNUC__) && defined(__i386) &&                   \
  (defined(__i686) || defined(__pentium4) || defined(__athlon))
    return __sync_fetch_and_add(__ptr, __addend);
#elif defined(__SUNPRO_CC) && defined(__sparc)
    volatile int64 __before, __after;
    do
      {
        __before = *__ptr;
        __after = __before + __addend;
      } while (atomic_cas_64((volatile unsigned long long*)__ptr, __before,
                             __after) != __before);
    return __before;
#else   //fallback, slow
#if defined(__GNUC__) && defined(__i386)
    // XXX doesn'__t work with -march=native
    //#warning "please compile with -march=i686 or better"
#endif
#pragma message("slow __fetch_and_add_64")
    int64 __res;
#pragma omp critical
    {
      __res = *__ptr;
      *(__ptr) += __addend;
    }
    return __res;
#endif
  }

  /** @brief Add a value to a variable, atomically.
   *
   *  Implementation is heavily platform-dependent.
   *  @param __ptr Pointer to a signed integer.
   *  @param __addend Value to add.
   */
  template<typename _Tp>
  inline _Tp
  __fetch_and_add(volatile _Tp* __ptr, _Tp __addend)
  {
    if (sizeof(_Tp) == sizeof(int32))
      return (_Tp)__fetch_and_add_32((volatile int32*) __ptr, (int32)__addend);
    else if (sizeof(_Tp) == sizeof(int64))
      return (_Tp)__fetch_and_add_64((volatile int64*) __ptr, (int64)__addend);
    else
      _GLIBCXX_PARALLEL_ASSERT(false);
  }


#if defined(__ICC)

  template<typename _MustBeInt = int>
  inline int32
  __cas32(volatile int32* __ptr, int32 __old, int32 __nw)
  {
    int32 __before;
    __asm__ __volatile__("lock; cmpxchgl %1,%2"
                         : "=a"(__before)
                         : "q"(__nw), "__m"(*(volatile long long*)(__ptr)),
                               "0"(__old)
                         : "memory");
    return __before;
  }

#if defined(__x86_64)
  template<typename _MustBeInt = int>
  inline int64
  __cas64(volatile int64 *__ptr, int64 __old, int64 __nw)
  {
    int64 __before;
    __asm__ __volatile__("lock; cmpxchgq %1,%2"
                         : "=a"(__before)
                         : "q"(__nw), "__m"(*(volatile long long*)(__ptr)),
                               "0"(__old)
                         : "memory");
    return __before;
  }
#endif

#endif

  /** @brief Compare @__c *__ptr and @__c __comparand. If equal, let @__c
   * *__ptr=__replacement and return @__c true, return @__c false otherwise.
   *
   *  Implementation is heavily platform-dependent.
   *  @param __ptr Pointer to 32-bit signed integer.
   *  @param __comparand Compare value.
   *  @param __replacement Replacement value.
   */
  inline bool
  __compare_and_swap_32(volatile int32* __ptr, int32 __comparand,
                        int32 __replacement)
  {
#if defined(__ICC)      //x86 version
    return _InterlockedCompareExchange((void*)__ptr, __replacement,
                                       __comparand) == __comparand;
#elif defined(__ECC)    //IA-64 version
    return _InterlockedCompareExchange((void*)__ptr, __replacement,
                                       __comparand) == __comparand;
#elif defined(__ICL) || defined(_MSC_VER)
    return _InterlockedCompareExchange(
               reinterpret_cast<volatile long*>(__ptr),
               __replacement, __comparand)
             == __comparand;
#elif defined(__GNUC__)
    return __sync_bool_compare_and_swap(__ptr, __comparand, __replacement);
#elif defined(__SUNPRO_CC) && defined(__sparc)
    return atomic_cas_32((volatile unsigned int*)__ptr, __comparand,
                         __replacement) == __comparand;
#else
#pragma message("slow __compare_and_swap_32")
    bool __res = false;
#pragma omp critical
    {
      if (*__ptr == __comparand)
        {
          *__ptr = __replacement;
          __res = true;
        }
    }
    return __res;
#endif
  }

  /** @brief Compare @__c *__ptr and @__c __comparand. If equal, let @__c
   * *__ptr=__replacement and return @__c true, return @__c false otherwise.
   *
   *  Implementation is heavily platform-dependent.
   *  @param __ptr Pointer to 64-bit signed integer.
   *  @param __comparand Compare value.
   *  @param __replacement Replacement value.
   */
  inline bool
  __compare_and_swap_64(volatile int64* __ptr, int64 __comparand,
                        int64 __replacement)
  {
#if defined(__ICC) && defined(__x86_64) //x86 version
    return __cas64<int>(__ptr, __comparand, __replacement) == __comparand;
#elif defined(__ECC)    //IA-64 version
    return _InterlockedCompareExchange64((void*)__ptr, __replacement,
                                         __comparand) == __comparand;
#elif defined(__ICL) || defined(_MSC_VER)
#ifndef _WIN64
    _GLIBCXX_PARALLEL_ASSERT(false);    //not available in this case
    return 0;
#else
    return _InterlockedCompareExchange64(__ptr, __replacement,
                                         __comparand) == __comparand;
#endif

#elif defined(__GNUC__) && defined(__x86_64)
    return __sync_bool_compare_and_swap(__ptr, __comparand, __replacement);
#elif defined(__GNUC__) && defined(__i386) &&                   \
  (defined(__i686) || defined(__pentium4) || defined(__athlon))
    return __sync_bool_compare_and_swap(__ptr, __comparand, __replacement);
#elif defined(__SUNPRO_CC) && defined(__sparc)
    return atomic_cas_64((volatile unsigned long long*)__ptr,
                         __comparand, __replacement) == __comparand;
#else
#if defined(__GNUC__) && defined(__i386)
    // XXX -march=native
    //#warning "please compile with -march=i686 or better"
#endif
#pragma message("slow __compare_and_swap_64")
    bool __res = false;
#pragma omp critical
    {
      if (*__ptr == __comparand)
        {
          *__ptr = __replacement;
          __res = true;
        }
    }
    return __res;
#endif
  }

  /** @brief Compare @__c *__ptr and @__c __comparand. If equal, let @__c
   * *__ptr=__replacement and return @__c true, return @__c false otherwise.
   *
   *  Implementation is heavily platform-dependent.
   *  @param __ptr Pointer to signed integer.
   *  @param __comparand Compare value.
   *  @param __replacement Replacement value. */
  template<typename _Tp>
  inline bool
  __compare_and_swap(volatile _Tp* __ptr, _Tp __comparand, _Tp __replacement)
  {
    if (sizeof(_Tp) == sizeof(int32))
      return __compare_and_swap_32((volatile int32*) __ptr, (int32)__comparand,
                                   (int32)__replacement);
    else if (sizeof(_Tp) == sizeof(int64))
      return __compare_and_swap_64((volatile int64*) __ptr, (int64)__comparand,
                                   (int64)__replacement);
    else
      _GLIBCXX_PARALLEL_ASSERT(false);
  }

  /** @brief Yield the control to another thread, without waiting for
      the end to the time slice. */
  inline void
  __yield()
  {
#if defined (_WIN32) && !defined (__CYGWIN__)
    Sleep(0);
#else
    sched_yield();
#endif
  }
} // end namespace

#endif /* _GLIBCXX_PARALLEL_COMPATIBILITY_H */