atomic_helpers.h
4.16 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
//===-- atomic_helpers.h ----------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#ifndef SCUDO_ATOMIC_H_
#define SCUDO_ATOMIC_H_
#include "internal_defs.h"
namespace scudo {
enum memory_order {
memory_order_relaxed = 0,
memory_order_consume = 1,
memory_order_acquire = 2,
memory_order_release = 3,
memory_order_acq_rel = 4,
memory_order_seq_cst = 5
};
static_assert(memory_order_relaxed == __ATOMIC_RELAXED, "");
static_assert(memory_order_consume == __ATOMIC_CONSUME, "");
static_assert(memory_order_acquire == __ATOMIC_ACQUIRE, "");
static_assert(memory_order_release == __ATOMIC_RELEASE, "");
static_assert(memory_order_acq_rel == __ATOMIC_ACQ_REL, "");
static_assert(memory_order_seq_cst == __ATOMIC_SEQ_CST, "");
struct atomic_u8 {
typedef u8 Type;
volatile Type ValDoNotUse;
};
struct atomic_u16 {
typedef u16 Type;
volatile Type ValDoNotUse;
};
struct atomic_s32 {
typedef s32 Type;
volatile Type ValDoNotUse;
};
struct atomic_u32 {
typedef u32 Type;
volatile Type ValDoNotUse;
};
struct atomic_u64 {
typedef u64 Type;
// On 32-bit platforms u64 is not necessarily aligned on 8 bytes.
ALIGNED(8) volatile Type ValDoNotUse;
};
struct atomic_uptr {
typedef uptr Type;
volatile Type ValDoNotUse;
};
template <typename T>
inline typename T::Type atomic_load(const volatile T *A, memory_order MO) {
DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
typename T::Type V;
__atomic_load(&A->ValDoNotUse, &V, MO);
return V;
}
template <typename T>
inline void atomic_store(volatile T *A, typename T::Type V, memory_order MO) {
DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
__atomic_store(&A->ValDoNotUse, &V, MO);
}
inline void atomic_thread_fence(memory_order) { __sync_synchronize(); }
template <typename T>
inline typename T::Type atomic_fetch_add(volatile T *A, typename T::Type V,
memory_order MO) {
DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
return __atomic_fetch_add(&A->ValDoNotUse, V, MO);
}
template <typename T>
inline typename T::Type atomic_fetch_sub(volatile T *A, typename T::Type V,
memory_order MO) {
DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
return __atomic_fetch_sub(&A->ValDoNotUse, V, MO);
}
template <typename T>
inline typename T::Type atomic_exchange(volatile T *A, typename T::Type V,
memory_order MO) {
DCHECK(!(reinterpret_cast<uptr>(A) % sizeof(*A)));
typename T::Type R;
__atomic_exchange(&A->ValDoNotUse, &V, &R, MO);
return R;
}
template <typename T>
inline bool atomic_compare_exchange_strong(volatile T *A, typename T::Type *Cmp,
typename T::Type Xchg,
memory_order MO) {
return __atomic_compare_exchange(&A->ValDoNotUse, Cmp, &Xchg, false, MO,
__ATOMIC_RELAXED);
}
template <typename T>
inline bool atomic_compare_exchange_weak(volatile T *A, typename T::Type *Cmp,
typename T::Type Xchg,
memory_order MO) {
return __atomic_compare_exchange(&A->ValDoNotUse, Cmp, &Xchg, true, MO,
__ATOMIC_RELAXED);
}
// Clutter-reducing helpers.
template <typename T>
inline typename T::Type atomic_load_relaxed(const volatile T *A) {
return atomic_load(A, memory_order_relaxed);
}
template <typename T>
inline void atomic_store_relaxed(volatile T *A, typename T::Type V) {
atomic_store(A, V, memory_order_relaxed);
}
template <typename T>
inline typename T::Type atomic_compare_exchange(volatile T *A,
typename T::Type Cmp,
typename T::Type Xchg) {
atomic_compare_exchange_strong(A, &Cmp, Xchg, memory_order_acquire);
return Cmp;
}
} // namespace scudo
#endif // SCUDO_ATOMIC_H_