111
|
1 //===-- tsan_clock.h --------------------------------------------*- C++ -*-===//
|
|
2 //
|
145
|
3 // Part of the LLVM Project, under the Apache License v2.0 with LLVM Exceptions.
|
|
4 // See https://llvm.org/LICENSE.txt for license information.
|
|
5 // SPDX-License-Identifier: Apache-2.0 WITH LLVM-exception
|
111
|
6 //
|
|
7 //===----------------------------------------------------------------------===//
|
|
8 //
|
|
9 // This file is a part of ThreadSanitizer (TSan), a race detector.
|
|
10 //
|
|
11 //===----------------------------------------------------------------------===//
|
|
12 #ifndef TSAN_CLOCK_H
|
|
13 #define TSAN_CLOCK_H
|
|
14
|
|
15 #include "tsan_defs.h"
|
|
16 #include "tsan_dense_alloc.h"
|
|
17
|
|
18 namespace __tsan {
|
|
19
|
|
20 typedef DenseSlabAlloc<ClockBlock, 1<<16, 1<<10> ClockAlloc;
|
|
21 typedef DenseSlabAllocCache ClockCache;
|
|
22
|
|
23 // The clock that lives in sync variables (mutexes, atomics, etc).
|
|
24 class SyncClock {
|
|
25 public:
|
|
26 SyncClock();
|
|
27 ~SyncClock();
|
|
28
|
|
29 uptr size() const;
|
|
30
|
|
31 // These are used only in tests.
|
|
32 u64 get(unsigned tid) const;
|
|
33 u64 get_clean(unsigned tid) const;
|
|
34
|
|
35 void Resize(ClockCache *c, uptr nclk);
|
|
36 void Reset(ClockCache *c);
|
|
37
|
|
38 void DebugDump(int(*printf)(const char *s, ...));
|
|
39
|
|
40 // Clock element iterator.
|
|
41 // Note: it iterates only over the table without regard to dirty entries.
|
|
42 class Iter {
|
|
43 public:
|
|
44 explicit Iter(SyncClock* parent);
|
|
45 Iter& operator++();
|
|
46 bool operator!=(const Iter& other);
|
|
47 ClockElem &operator*();
|
|
48
|
|
49 private:
|
|
50 SyncClock *parent_;
|
|
51 // [pos_, end_) is the current continuous range of clock elements.
|
|
52 ClockElem *pos_;
|
|
53 ClockElem *end_;
|
|
54 int block_; // Current number of second level block.
|
|
55
|
|
56 NOINLINE void Next();
|
|
57 };
|
|
58
|
|
59 Iter begin();
|
|
60 Iter end();
|
|
61
|
|
62 private:
|
|
63 friend class ThreadClock;
|
|
64 friend class Iter;
|
|
65 static const uptr kDirtyTids = 2;
|
|
66
|
|
67 struct Dirty {
|
|
68 u64 epoch : kClkBits;
|
|
69 u64 tid : 64 - kClkBits; // kInvalidId if not active
|
|
70 };
|
|
71
|
|
72 unsigned release_store_tid_;
|
|
73 unsigned release_store_reused_;
|
|
74 Dirty dirty_[kDirtyTids];
|
|
75 // If size_ is 0, tab_ is nullptr.
|
|
76 // If size <= 64 (kClockCount), tab_ contains pointer to an array with
|
|
77 // 64 ClockElem's (ClockBlock::clock).
|
|
78 // Otherwise, tab_ points to an array with up to 127 u32 elements,
|
|
79 // each pointing to the second-level 512b block with 64 ClockElem's.
|
|
80 // Unused space in the first level ClockBlock is used to store additional
|
|
81 // clock elements.
|
|
82 // The last u32 element in the first level ClockBlock is always used as
|
|
83 // reference counter.
|
|
84 //
|
|
85 // See the following scheme for details.
|
|
86 // All memory blocks are 512 bytes (allocated from ClockAlloc).
|
|
87 // Clock (clk) elements are 64 bits.
|
|
88 // Idx and ref are 32 bits.
|
|
89 //
|
|
90 // tab_
|
|
91 // |
|
|
92 // \/
|
|
93 // +----------------------------------------------------+
|
|
94 // | clk128 | clk129 | ...unused... | idx1 | idx0 | ref |
|
|
95 // +----------------------------------------------------+
|
|
96 // | |
|
|
97 // | \/
|
|
98 // | +----------------+
|
|
99 // | | clk0 ... clk63 |
|
|
100 // | +----------------+
|
|
101 // \/
|
|
102 // +------------------+
|
|
103 // | clk64 ... clk127 |
|
|
104 // +------------------+
|
|
105 //
|
|
106 // Note: dirty entries, if active, always override what's stored in the clock.
|
|
107 ClockBlock *tab_;
|
|
108 u32 tab_idx_;
|
|
109 u16 size_;
|
|
110 u16 blocks_; // Number of second level blocks.
|
|
111
|
|
112 void Unshare(ClockCache *c);
|
|
113 bool IsShared() const;
|
|
114 bool Cachable() const;
|
|
115 void ResetImpl();
|
|
116 void FlushDirty();
|
|
117 uptr capacity() const;
|
|
118 u32 get_block(uptr bi) const;
|
|
119 void append_block(u32 idx);
|
|
120 ClockElem &elem(unsigned tid) const;
|
|
121 };
|
|
122
|
|
123 // The clock that lives in threads.
|
|
124 class ThreadClock {
|
|
125 public:
|
|
126 typedef DenseSlabAllocCache Cache;
|
|
127
|
|
128 explicit ThreadClock(unsigned tid, unsigned reused = 0);
|
|
129
|
|
130 u64 get(unsigned tid) const;
|
|
131 void set(ClockCache *c, unsigned tid, u64 v);
|
|
132 void set(u64 v);
|
|
133 void tick();
|
|
134 uptr size() const;
|
|
135
|
|
136 void acquire(ClockCache *c, SyncClock *src);
|
|
137 void release(ClockCache *c, SyncClock *dst);
|
|
138 void acq_rel(ClockCache *c, SyncClock *dst);
|
|
139 void ReleaseStore(ClockCache *c, SyncClock *dst);
|
|
140 void ResetCached(ClockCache *c);
|
|
141
|
|
142 void DebugReset();
|
|
143 void DebugDump(int(*printf)(const char *s, ...));
|
|
144
|
|
145 private:
|
|
146 static const uptr kDirtyTids = SyncClock::kDirtyTids;
|
|
147 // Index of the thread associated with he clock ("current thread").
|
|
148 const unsigned tid_;
|
|
149 const unsigned reused_; // tid_ reuse count.
|
|
150 // Current thread time when it acquired something from other threads.
|
|
151 u64 last_acquire_;
|
|
152
|
|
153 // Cached SyncClock (without dirty entries and release_store_tid_).
|
|
154 // We reuse it for subsequent store-release operations without intervening
|
|
155 // acquire operations. Since it is shared (and thus constant), clock value
|
|
156 // for the current thread is then stored in dirty entries in the SyncClock.
|
|
157 // We host a refernece to the table while it is cached here.
|
|
158 u32 cached_idx_;
|
|
159 u16 cached_size_;
|
|
160 u16 cached_blocks_;
|
|
161
|
|
162 // Number of active elements in the clk_ table (the rest is zeros).
|
|
163 uptr nclk_;
|
|
164 u64 clk_[kMaxTidInClock]; // Fixed size vector clock.
|
|
165
|
|
166 bool IsAlreadyAcquired(const SyncClock *src) const;
|
|
167 void UpdateCurrentThread(ClockCache *c, SyncClock *dst) const;
|
|
168 };
|
|
169
|
|
170 ALWAYS_INLINE u64 ThreadClock::get(unsigned tid) const {
|
|
171 DCHECK_LT(tid, kMaxTidInClock);
|
|
172 return clk_[tid];
|
|
173 }
|
|
174
|
|
175 ALWAYS_INLINE void ThreadClock::set(u64 v) {
|
|
176 DCHECK_GE(v, clk_[tid_]);
|
|
177 clk_[tid_] = v;
|
|
178 }
|
|
179
|
|
180 ALWAYS_INLINE void ThreadClock::tick() {
|
|
181 clk_[tid_]++;
|
|
182 }
|
|
183
|
|
184 ALWAYS_INLINE uptr ThreadClock::size() const {
|
|
185 return nclk_;
|
|
186 }
|
|
187
|
|
188 ALWAYS_INLINE SyncClock::Iter SyncClock::begin() {
|
|
189 return Iter(this);
|
|
190 }
|
|
191
|
|
192 ALWAYS_INLINE SyncClock::Iter SyncClock::end() {
|
|
193 return Iter(nullptr);
|
|
194 }
|
|
195
|
|
196 ALWAYS_INLINE uptr SyncClock::size() const {
|
|
197 return size_;
|
|
198 }
|
|
199
|
|
200 ALWAYS_INLINE SyncClock::Iter::Iter(SyncClock* parent)
|
|
201 : parent_(parent)
|
|
202 , pos_(nullptr)
|
|
203 , end_(nullptr)
|
|
204 , block_(-1) {
|
|
205 if (parent)
|
|
206 Next();
|
|
207 }
|
|
208
|
|
209 ALWAYS_INLINE SyncClock::Iter& SyncClock::Iter::operator++() {
|
|
210 pos_++;
|
|
211 if (UNLIKELY(pos_ >= end_))
|
|
212 Next();
|
|
213 return *this;
|
|
214 }
|
|
215
|
|
216 ALWAYS_INLINE bool SyncClock::Iter::operator!=(const SyncClock::Iter& other) {
|
|
217 return parent_ != other.parent_;
|
|
218 }
|
|
219
|
|
220 ALWAYS_INLINE ClockElem &SyncClock::Iter::operator*() {
|
|
221 return *pos_;
|
|
222 }
|
|
223 } // namespace __tsan
|
|
224
|
|
225 #endif // TSAN_CLOCK_H
|