atomic_test.cpp
4.4 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
//===-- atomic_test.cpp -----------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#include "tests/scudo_unit_test.h"
#include "atomic_helpers.h"
namespace scudo {
template <typename T> struct ValAndMagic {
typename T::Type Magic0;
T A;
typename T::Type Magic1;
static ValAndMagic<T> *Sink;
};
template <typename T> ValAndMagic<T> *ValAndMagic<T>::Sink;
template <typename T, memory_order LoadMO, memory_order StoreMO>
void checkStoreLoad() {
typedef typename T::Type Type;
ValAndMagic<T> Val;
// Prevent the compiler from scalarizing the struct.
ValAndMagic<T>::Sink = &Val;
// Ensure that surrounding memory is not overwritten.
Val.Magic0 = Val.Magic1 = (Type)-3;
for (u64 I = 0; I < 100; I++) {
// Generate A value that occupies all bytes of the variable.
u64 V = I;
V |= V << 8;
V |= V << 16;
V |= V << 32;
Val.A.ValDoNotUse = (Type)V;
EXPECT_EQ(atomic_load(&Val.A, LoadMO), (Type)V);
Val.A.ValDoNotUse = (Type)-1;
atomic_store(&Val.A, (Type)V, StoreMO);
EXPECT_EQ(Val.A.ValDoNotUse, (Type)V);
}
EXPECT_EQ(Val.Magic0, (Type)-3);
EXPECT_EQ(Val.Magic1, (Type)-3);
}
TEST(ScudoAtomicTest, AtomicStoreLoad) {
checkStoreLoad<atomic_u8, memory_order_relaxed, memory_order_relaxed>();
checkStoreLoad<atomic_u8, memory_order_consume, memory_order_relaxed>();
checkStoreLoad<atomic_u8, memory_order_acquire, memory_order_relaxed>();
checkStoreLoad<atomic_u8, memory_order_relaxed, memory_order_release>();
checkStoreLoad<atomic_u8, memory_order_seq_cst, memory_order_seq_cst>();
checkStoreLoad<atomic_u16, memory_order_relaxed, memory_order_relaxed>();
checkStoreLoad<atomic_u16, memory_order_consume, memory_order_relaxed>();
checkStoreLoad<atomic_u16, memory_order_acquire, memory_order_relaxed>();
checkStoreLoad<atomic_u16, memory_order_relaxed, memory_order_release>();
checkStoreLoad<atomic_u16, memory_order_seq_cst, memory_order_seq_cst>();
checkStoreLoad<atomic_u32, memory_order_relaxed, memory_order_relaxed>();
checkStoreLoad<atomic_u32, memory_order_consume, memory_order_relaxed>();
checkStoreLoad<atomic_u32, memory_order_acquire, memory_order_relaxed>();
checkStoreLoad<atomic_u32, memory_order_relaxed, memory_order_release>();
checkStoreLoad<atomic_u32, memory_order_seq_cst, memory_order_seq_cst>();
checkStoreLoad<atomic_u64, memory_order_relaxed, memory_order_relaxed>();
checkStoreLoad<atomic_u64, memory_order_consume, memory_order_relaxed>();
checkStoreLoad<atomic_u64, memory_order_acquire, memory_order_relaxed>();
checkStoreLoad<atomic_u64, memory_order_relaxed, memory_order_release>();
checkStoreLoad<atomic_u64, memory_order_seq_cst, memory_order_seq_cst>();
checkStoreLoad<atomic_uptr, memory_order_relaxed, memory_order_relaxed>();
checkStoreLoad<atomic_uptr, memory_order_consume, memory_order_relaxed>();
checkStoreLoad<atomic_uptr, memory_order_acquire, memory_order_relaxed>();
checkStoreLoad<atomic_uptr, memory_order_relaxed, memory_order_release>();
checkStoreLoad<atomic_uptr, memory_order_seq_cst, memory_order_seq_cst>();
}
template <typename T> void checkAtomicCompareExchange() {
typedef typename T::Type Type;
{
Type OldVal = 42;
Type NewVal = 24;
Type V = OldVal;
EXPECT_TRUE(atomic_compare_exchange_strong(
reinterpret_cast<T *>(&V), &OldVal, NewVal, memory_order_relaxed));
EXPECT_FALSE(atomic_compare_exchange_strong(
reinterpret_cast<T *>(&V), &OldVal, NewVal, memory_order_relaxed));
EXPECT_EQ(NewVal, OldVal);
}
{
Type OldVal = 42;
Type NewVal = 24;
Type V = OldVal;
EXPECT_TRUE(atomic_compare_exchange_weak(reinterpret_cast<T *>(&V), &OldVal,
NewVal, memory_order_relaxed));
EXPECT_FALSE(atomic_compare_exchange_weak(
reinterpret_cast<T *>(&V), &OldVal, NewVal, memory_order_relaxed));
EXPECT_EQ(NewVal, OldVal);
}
}
TEST(ScudoAtomicTest, AtomicCompareExchangeTest) {
checkAtomicCompareExchange<atomic_u8>();
checkAtomicCompareExchange<atomic_u16>();
checkAtomicCompareExchange<atomic_u32>();
checkAtomicCompareExchange<atomic_u64>();
checkAtomicCompareExchange<atomic_uptr>();
}
} // namespace scudo