std/sync/reentrant_lock.rs
1use cfg_if::cfg_if;
2
3use crate::cell::UnsafeCell;
4use crate::fmt;
5use crate::ops::Deref;
6use crate::panic::{RefUnwindSafe, UnwindSafe};
7use crate::sys::sync as sys;
8use crate::thread::{ThreadId, current_id};
9
10/// A re-entrant mutual exclusion lock
11///
12/// This lock will block *other* threads waiting for the lock to become
13/// available. The thread which has already locked the mutex can lock it
14/// multiple times without blocking, preventing a common source of deadlocks.
15///
16/// # Examples
17///
18/// Allow recursively calling a function needing synchronization from within
19/// a callback (this is how [`StdoutLock`](crate::io::StdoutLock) is currently
20/// implemented):
21///
22/// ```
23/// #![feature(reentrant_lock)]
24///
25/// use std::cell::RefCell;
26/// use std::sync::ReentrantLock;
27///
28/// pub struct Log {
29/// data: RefCell<String>,
30/// }
31///
32/// impl Log {
33/// pub fn append(&self, msg: &str) {
34/// self.data.borrow_mut().push_str(msg);
35/// }
36/// }
37///
38/// static LOG: ReentrantLock<Log> = ReentrantLock::new(Log { data: RefCell::new(String::new()) });
39///
40/// pub fn with_log<R>(f: impl FnOnce(&Log) -> R) -> R {
41/// let log = LOG.lock();
42/// f(&*log)
43/// }
44///
45/// with_log(|log| {
46/// log.append("Hello");
47/// with_log(|log| log.append(" there!"));
48/// });
49/// ```
50///
51// # Implementation details
52//
53// The 'owner' field tracks which thread has locked the mutex.
54//
55// We use thread::current_id() as the thread identifier, which is just the
56// current thread's ThreadId, so it's unique across the process lifetime.
57//
58// If `owner` is set to the identifier of the current thread,
59// we assume the mutex is already locked and instead of locking it again,
60// we increment `lock_count`.
61//
62// When unlocking, we decrement `lock_count`, and only unlock the mutex when
63// it reaches zero.
64//
65// `lock_count` is protected by the mutex and only accessed by the thread that has
66// locked the mutex, so needs no synchronization.
67//
68// `owner` can be checked by other threads that want to see if they already
69// hold the lock, so needs to be atomic. If it compares equal, we're on the
70// same thread that holds the mutex and memory access can use relaxed ordering
71// since we're not dealing with multiple threads. If it's not equal,
72// synchronization is left to the mutex, making relaxed memory ordering for
73// the `owner` field fine in all cases.
74//
75// On systems without 64 bit atomics we also store the address of a TLS variable
76// along the 64-bit TID. We then first check that address against the address
77// of that variable on the current thread, and only if they compare equal do we
78// compare the actual TIDs. Because we only ever read the TID on the same thread
79// that it was written on (or a thread sharing the TLS block with that writer thread),
80// we don't need to further synchronize the TID accesses, so they can be regular 64-bit
81// non-atomic accesses.
82#[unstable(feature = "reentrant_lock", issue = "121440")]
83pub struct ReentrantLock<T: ?Sized> {
84 mutex: sys::Mutex,
85 owner: Tid,
86 lock_count: UnsafeCell<u32>,
87 data: T,
88}
89
90cfg_if!(
91 if #[cfg(target_has_atomic = "64")] {
92 use crate::sync::atomic::{Atomic, AtomicU64, Ordering::Relaxed};
93
94 struct Tid(Atomic<u64>);
95
96 impl Tid {
97 const fn new() -> Self {
98 Self(AtomicU64::new(0))
99 }
100
101 #[inline]
102 fn contains(&self, owner: ThreadId) -> bool {
103 owner.as_u64().get() == self.0.load(Relaxed)
104 }
105
106 #[inline]
107 // This is just unsafe to match the API of the Tid type below.
108 unsafe fn set(&self, tid: Option<ThreadId>) {
109 let value = tid.map_or(0, |tid| tid.as_u64().get());
110 self.0.store(value, Relaxed);
111 }
112 }
113 } else {
114 /// Returns the address of a TLS variable. This is guaranteed to
115 /// be unique across all currently alive threads.
116 fn tls_addr() -> usize {
117 thread_local! { static X: u8 = const { 0u8 } };
118
119 X.with(|p| <*const u8>::addr(p))
120 }
121
122 use crate::sync::atomic::{
123 Atomic,
124 AtomicUsize,
125 Ordering,
126 };
127
128 struct Tid {
129 // When a thread calls `set()`, this value gets updated to
130 // the address of a thread local on that thread. This is
131 // used as a first check in `contains()`; if the `tls_addr`
132 // doesn't match the TLS address of the current thread, then
133 // the ThreadId also can't match. Only if the TLS addresses do
134 // match do we read out the actual TID.
135 // Note also that we can use relaxed atomic operations here, because
136 // we only ever read from the tid if `tls_addr` matches the current
137 // TLS address. In that case, either the tid has been set by
138 // the current thread, or by a thread that has terminated before
139 // the current thread's `tls_addr` was allocated. In either case, no further
140 // synchronization is needed (as per <https://github.com/rust-lang/miri/issues/3450>)
141 tls_addr: Atomic<usize>,
142 tid: UnsafeCell<u64>,
143 }
144
145 unsafe impl Send for Tid {}
146 unsafe impl Sync for Tid {}
147
148 impl Tid {
149 const fn new() -> Self {
150 Self { tls_addr: AtomicUsize::new(0), tid: UnsafeCell::new(0) }
151 }
152
153 #[inline]
154 // NOTE: This assumes that `owner` is the ID of the current
155 // thread, and may spuriously return `false` if that's not the case.
156 fn contains(&self, owner: ThreadId) -> bool {
157 // We must call `tls_addr()` *before* doing the load to ensure that if we reuse an
158 // earlier thread's address, the `tls_addr.load()` below happens-after everything
159 // that thread did.
160 let tls_addr = tls_addr();
161 // SAFETY: See the comments in the struct definition.
162 self.tls_addr.load(Ordering::Relaxed) == tls_addr
163 && unsafe { *self.tid.get() } == owner.as_u64().get()
164 }
165
166 #[inline]
167 // This may only be called by one thread at a time, and can lead to
168 // race conditions otherwise.
169 unsafe fn set(&self, tid: Option<ThreadId>) {
170 // It's important that we set `self.tls_addr` to 0 if the tid is
171 // cleared. Otherwise, there might be race conditions between
172 // `set()` and `get()`.
173 let tls_addr = if tid.is_some() { tls_addr() } else { 0 };
174 let value = tid.map_or(0, |tid| tid.as_u64().get());
175 self.tls_addr.store(tls_addr, Ordering::Relaxed);
176 unsafe { *self.tid.get() = value };
177 }
178 }
179 }
180);
181
182#[unstable(feature = "reentrant_lock", issue = "121440")]
183unsafe impl<T: Send + ?Sized> Send for ReentrantLock<T> {}
184#[unstable(feature = "reentrant_lock", issue = "121440")]
185unsafe impl<T: Send + ?Sized> Sync for ReentrantLock<T> {}
186
187// Because of the `UnsafeCell`, these traits are not implemented automatically
188#[unstable(feature = "reentrant_lock", issue = "121440")]
189impl<T: UnwindSafe + ?Sized> UnwindSafe for ReentrantLock<T> {}
190#[unstable(feature = "reentrant_lock", issue = "121440")]
191impl<T: RefUnwindSafe + ?Sized> RefUnwindSafe for ReentrantLock<T> {}
192
193/// An RAII implementation of a "scoped lock" of a re-entrant lock. When this
194/// structure is dropped (falls out of scope), the lock will be unlocked.
195///
196/// The data protected by the mutex can be accessed through this guard via its
197/// [`Deref`] implementation.
198///
199/// This structure is created by the [`lock`](ReentrantLock::lock) method on
200/// [`ReentrantLock`].
201///
202/// # Mutability
203///
204/// Unlike [`MutexGuard`](super::MutexGuard), `ReentrantLockGuard` does not
205/// implement [`DerefMut`](crate::ops::DerefMut), because implementation of
206/// the trait would violate Rust’s reference aliasing rules. Use interior
207/// mutability (usually [`RefCell`](crate::cell::RefCell)) in order to mutate
208/// the guarded data.
209#[must_use = "if unused the ReentrantLock will immediately unlock"]
210#[unstable(feature = "reentrant_lock", issue = "121440")]
211pub struct ReentrantLockGuard<'a, T: ?Sized + 'a> {
212 lock: &'a ReentrantLock<T>,
213}
214
215#[unstable(feature = "reentrant_lock", issue = "121440")]
216impl<T: ?Sized> !Send for ReentrantLockGuard<'_, T> {}
217
218#[unstable(feature = "reentrant_lock", issue = "121440")]
219unsafe impl<T: ?Sized + Sync> Sync for ReentrantLockGuard<'_, T> {}
220
221#[unstable(feature = "reentrant_lock", issue = "121440")]
222impl<T> ReentrantLock<T> {
223 /// Creates a new re-entrant lock in an unlocked state ready for use.
224 ///
225 /// # Examples
226 ///
227 /// ```
228 /// #![feature(reentrant_lock)]
229 /// use std::sync::ReentrantLock;
230 ///
231 /// let lock = ReentrantLock::new(0);
232 /// ```
233 pub const fn new(t: T) -> ReentrantLock<T> {
234 ReentrantLock {
235 mutex: sys::Mutex::new(),
236 owner: Tid::new(),
237 lock_count: UnsafeCell::new(0),
238 data: t,
239 }
240 }
241
242 /// Consumes this lock, returning the underlying data.
243 ///
244 /// # Examples
245 ///
246 /// ```
247 /// #![feature(reentrant_lock)]
248 ///
249 /// use std::sync::ReentrantLock;
250 ///
251 /// let lock = ReentrantLock::new(0);
252 /// assert_eq!(lock.into_inner(), 0);
253 /// ```
254 pub fn into_inner(self) -> T {
255 self.data
256 }
257}
258
259#[unstable(feature = "reentrant_lock", issue = "121440")]
260impl<T: ?Sized> ReentrantLock<T> {
261 /// Acquires the lock, blocking the current thread until it is able to do
262 /// so.
263 ///
264 /// This function will block the caller until it is available to acquire
265 /// the lock. Upon returning, the thread is the only thread with the lock
266 /// held. When the thread calling this method already holds the lock, the
267 /// call succeeds without blocking.
268 ///
269 /// # Examples
270 ///
271 /// ```
272 /// #![feature(reentrant_lock)]
273 /// use std::cell::Cell;
274 /// use std::sync::{Arc, ReentrantLock};
275 /// use std::thread;
276 ///
277 /// let lock = Arc::new(ReentrantLock::new(Cell::new(0)));
278 /// let c_lock = Arc::clone(&lock);
279 ///
280 /// thread::spawn(move || {
281 /// c_lock.lock().set(10);
282 /// }).join().expect("thread::spawn failed");
283 /// assert_eq!(lock.lock().get(), 10);
284 /// ```
285 pub fn lock(&self) -> ReentrantLockGuard<'_, T> {
286 let this_thread = current_id();
287 // Safety: We only touch lock_count when we own the inner mutex.
288 // Additionally, we only call `self.owner.set()` while holding
289 // the inner mutex, so no two threads can call it concurrently.
290 unsafe {
291 if self.owner.contains(this_thread) {
292 self.increment_lock_count().expect("lock count overflow in reentrant mutex");
293 } else {
294 self.mutex.lock();
295 self.owner.set(Some(this_thread));
296 debug_assert_eq!(*self.lock_count.get(), 0);
297 *self.lock_count.get() = 1;
298 }
299 }
300 ReentrantLockGuard { lock: self }
301 }
302
303 /// Returns a mutable reference to the underlying data.
304 ///
305 /// Since this call borrows the `ReentrantLock` mutably, no actual locking
306 /// needs to take place -- the mutable borrow statically guarantees no locks
307 /// exist.
308 ///
309 /// # Examples
310 ///
311 /// ```
312 /// #![feature(reentrant_lock)]
313 /// use std::sync::ReentrantLock;
314 ///
315 /// let mut lock = ReentrantLock::new(0);
316 /// *lock.get_mut() = 10;
317 /// assert_eq!(*lock.lock(), 10);
318 /// ```
319 pub fn get_mut(&mut self) -> &mut T {
320 &mut self.data
321 }
322
323 /// Attempts to acquire this lock.
324 ///
325 /// If the lock could not be acquired at this time, then `None` is returned.
326 /// Otherwise, an RAII guard is returned.
327 ///
328 /// This function does not block.
329 // FIXME maybe make it a public part of the API?
330 #[unstable(issue = "none", feature = "std_internals")]
331 #[doc(hidden)]
332 pub fn try_lock(&self) -> Option<ReentrantLockGuard<'_, T>> {
333 let this_thread = current_id();
334 // Safety: We only touch lock_count when we own the inner mutex.
335 // Additionally, we only call `self.owner.set()` while holding
336 // the inner mutex, so no two threads can call it concurrently.
337 unsafe {
338 if self.owner.contains(this_thread) {
339 self.increment_lock_count()?;
340 Some(ReentrantLockGuard { lock: self })
341 } else if self.mutex.try_lock() {
342 self.owner.set(Some(this_thread));
343 debug_assert_eq!(*self.lock_count.get(), 0);
344 *self.lock_count.get() = 1;
345 Some(ReentrantLockGuard { lock: self })
346 } else {
347 None
348 }
349 }
350 }
351
352 unsafe fn increment_lock_count(&self) -> Option<()> {
353 unsafe {
354 *self.lock_count.get() = (*self.lock_count.get()).checked_add(1)?;
355 }
356 Some(())
357 }
358}
359
360#[unstable(feature = "reentrant_lock", issue = "121440")]
361impl<T: fmt::Debug + ?Sized> fmt::Debug for ReentrantLock<T> {
362 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
363 let mut d = f.debug_struct("ReentrantLock");
364 match self.try_lock() {
365 Some(v) => d.field("data", &&*v),
366 None => d.field("data", &format_args!("<locked>")),
367 };
368 d.finish_non_exhaustive()
369 }
370}
371
372#[unstable(feature = "reentrant_lock", issue = "121440")]
373impl<T: Default> Default for ReentrantLock<T> {
374 fn default() -> Self {
375 Self::new(T::default())
376 }
377}
378
379#[unstable(feature = "reentrant_lock", issue = "121440")]
380impl<T> From<T> for ReentrantLock<T> {
381 fn from(t: T) -> Self {
382 Self::new(t)
383 }
384}
385
386#[unstable(feature = "reentrant_lock", issue = "121440")]
387impl<T: ?Sized> Deref for ReentrantLockGuard<'_, T> {
388 type Target = T;
389
390 fn deref(&self) -> &T {
391 &self.lock.data
392 }
393}
394
395#[unstable(feature = "reentrant_lock", issue = "121440")]
396impl<T: fmt::Debug + ?Sized> fmt::Debug for ReentrantLockGuard<'_, T> {
397 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
398 (**self).fmt(f)
399 }
400}
401
402#[unstable(feature = "reentrant_lock", issue = "121440")]
403impl<T: fmt::Display + ?Sized> fmt::Display for ReentrantLockGuard<'_, T> {
404 fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result {
405 (**self).fmt(f)
406 }
407}
408
409#[unstable(feature = "reentrant_lock", issue = "121440")]
410impl<T: ?Sized> Drop for ReentrantLockGuard<'_, T> {
411 #[inline]
412 fn drop(&mut self) {
413 // Safety: We own the lock.
414 unsafe {
415 *self.lock.lock_count.get() -= 1;
416 if *self.lock.lock_count.get() == 0 {
417 self.lock.owner.set(None);
418 self.lock.mutex.unlock();
419 }
420 }
421 }
422}