mutex.h
1.71 KB
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
//===-- mutex.h -------------------------------------------------*- C++ -*-===//
//
// Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
// See https://llvm.org/LICENSE.txt for license information.
// SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
//
//===----------------------------------------------------------------------===//
#ifndef SCUDO_MUTEX_H_
#define SCUDO_MUTEX_H_
#include "atomic_helpers.h"
#include "common.h"
#include <string.h>
#if SCUDO_FUCHSIA
#include <lib/sync/mutex.h> // for sync_mutex_t
#endif
namespace scudo {
class HybridMutex {
public:
void init() { memset(this, 0, sizeof(*this)); }
bool tryLock();
NOINLINE void lock() {
if (LIKELY(tryLock()))
return;
// The compiler may try to fully unroll the loop, ending up in a
// NumberOfTries*NumberOfYields block of pauses mixed with tryLocks. This
// is large, ugly and unneeded, a compact loop is better for our purpose
// here. Use a pragma to tell the compiler not to unroll the loop.
#ifdef __clang__
#pragma nounroll
#endif
for (u8 I = 0U; I < NumberOfTries; I++) {
yieldProcessor(NumberOfYields);
if (tryLock())
return;
}
lockSlow();
}
void unlock();
private:
static constexpr u8 NumberOfTries = 8U;
static constexpr u8 NumberOfYields = 8U;
#if SCUDO_LINUX
atomic_u32 M;
#elif SCUDO_FUCHSIA
sync_mutex_t M;
#endif
void lockSlow();
};
class ScopedLock {
public:
explicit ScopedLock(HybridMutex &M) : Mutex(M) { Mutex.lock(); }
~ScopedLock() { Mutex.unlock(); }
private:
HybridMutex &Mutex;
ScopedLock(const ScopedLock &) = delete;
void operator=(const ScopedLock &) = delete;
};
} // namespace scudo
#endif // SCUDO_MUTEX_H_