xref: /drstd/src/std/sys/unix/locks/pthread_rwlock.rs (revision 86982c5e9b2eaa583327251616ee822c36288824)
1 use crate::std::cell::UnsafeCell;
2 use crate::std::mem::forget;
3 use crate::std::sync::atomic::{AtomicUsize, Ordering};
4 use crate::std::sys_common::lazy_box::{LazyBox, LazyInit};
5 use dlibc;
6 
7 struct AllocatedRwLock {
8     inner: UnsafeCell<dlibc::pthread_rwlock_t>,
9     write_locked: UnsafeCell<bool>, // guarded by the `inner` RwLock
10     num_readers: AtomicUsize,
11 }
12 
13 unsafe impl Send for AllocatedRwLock {}
14 unsafe impl Sync for AllocatedRwLock {}
15 
16 pub struct RwLock {
17     inner: LazyBox<AllocatedRwLock>,
18 }
19 
20 impl LazyInit for AllocatedRwLock {
21     fn init() -> Box<Self> {
22         Box::new(AllocatedRwLock {
23             inner: UnsafeCell::new(dlibc::PTHREAD_RWLOCK_INITIALIZER),
24             write_locked: UnsafeCell::new(false),
25             num_readers: AtomicUsize::new(0),
26         })
27     }
28 
29     fn destroy(mut rwlock: Box<Self>) {
30         // We're not allowed to pthread_rwlock_destroy a locked rwlock,
31         // so check first if it's unlocked.
32         if *rwlock.write_locked.get_mut() || *rwlock.num_readers.get_mut() != 0 {
33             // The rwlock is locked. This happens if a RwLock{Read,Write}Guard is leaked.
34             // In this case, we just leak the RwLock too.
35             forget(rwlock);
36         }
37     }
38 
39     fn cancel_init(_: Box<Self>) {
40         // In this case, we can just drop it without any checks,
41         // since it cannot have been locked yet.
42     }
43 }
44 
45 impl AllocatedRwLock {
46     #[inline]
47     unsafe fn raw_unlock(&self) {
48         let r = dlibc::pthread_rwlock_unlock(self.inner.get());
49         debug_assert_eq!(r, 0);
50     }
51 }
52 
53 impl Drop for AllocatedRwLock {
54     fn drop(&mut self) {
55         let r = unsafe { dlibc::pthread_rwlock_destroy(self.inner.get()) };
56         // On DragonFly pthread_rwlock_destroy() returns EINVAL if called on a
57         // rwlock that was just initialized with
58         // dlibc::PTHREAD_RWLOCK_INITIALIZER. Once it is used (locked/unlocked)
59         // or pthread_rwlock_init() is called, this behaviour no longer occurs.
60         if cfg!(target_os = "dragonfly") {
61             debug_assert!(r == 0 || r == dlibc::EINVAL);
62         } else {
63             debug_assert_eq!(r, 0);
64         }
65     }
66 }
67 
68 impl RwLock {
69     #[inline]
70     pub const fn new() -> RwLock {
71         RwLock {
72             inner: LazyBox::new(),
73         }
74     }
75 
76     #[inline]
77     pub fn read(&self) {
78         let lock = &*self.inner;
79         let r = unsafe { dlibc::pthread_rwlock_rdlock(lock.inner.get()) };
80 
81         // According to POSIX, when a thread tries to acquire this read lock
82         // while it already holds the write lock
83         // (or vice versa, or tries to acquire the write lock twice),
84         // "the call shall either deadlock or return [EDEADLK]"
85         // (https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_rwlock_wrlock.html,
86         // https://pubs.opengroup.org/onlinepubs/9699919799/functions/pthread_rwlock_rdlock.html).
87         // So, in principle, all we have to do here is check `r == 0` to be sure we properly
88         // got the lock.
89         //
90         // However, (at least) glibc before version 2.25 does not conform to this spec,
91         // and can return `r == 0` even when this thread already holds the write lock.
92         // We thus check for this situation ourselves and panic when detecting that a thread
93         // got the write lock more than once, or got a read and a write lock.
94         if r == dlibc::EAGAIN {
95             panic!("rwlock maximum reader count exceeded");
96         } else if r == dlibc::EDEADLK || (r == 0 && unsafe { *lock.write_locked.get() }) {
97             // Above, we make sure to only access `write_locked` when `r == 0` to avoid
98             // data races.
99             if r == 0 {
100                 // `pthread_rwlock_rdlock` succeeded when it should not have.
101                 unsafe {
102                     lock.raw_unlock();
103                 }
104             }
105             panic!("rwlock read lock would result in deadlock");
106         } else {
107             // POSIX does not make guarantees about all the errors that may be returned.
108             // See issue #94705 for more details.
109             assert_eq!(r, 0, "unexpected error during rwlock read lock: {:?}", r);
110             lock.num_readers.fetch_add(1, Ordering::Relaxed);
111         }
112     }
113 
114     #[inline]
115     pub fn try_read(&self) -> bool {
116         let lock = &*self.inner;
117         let r = unsafe { dlibc::pthread_rwlock_tryrdlock(lock.inner.get()) };
118         if r == 0 {
119             if unsafe { *lock.write_locked.get() } {
120                 // `pthread_rwlock_tryrdlock` succeeded when it should not have.
121                 unsafe {
122                     lock.raw_unlock();
123                 }
124                 false
125             } else {
126                 lock.num_readers.fetch_add(1, Ordering::Relaxed);
127                 true
128             }
129         } else {
130             false
131         }
132     }
133 
134     #[inline]
135     pub fn write(&self) {
136         let lock = &*self.inner;
137         let r = unsafe { dlibc::pthread_rwlock_wrlock(lock.inner.get()) };
138         // See comments above for why we check for EDEADLK and write_locked. For the same reason,
139         // we also need to check that there are no readers (tracked in `num_readers`).
140         if r == dlibc::EDEADLK
141             || (r == 0 && unsafe { *lock.write_locked.get() })
142             || lock.num_readers.load(Ordering::Relaxed) != 0
143         {
144             // Above, we make sure to only access `write_locked` when `r == 0` to avoid
145             // data races.
146             if r == 0 {
147                 // `pthread_rwlock_wrlock` succeeded when it should not have.
148                 unsafe {
149                     lock.raw_unlock();
150                 }
151             }
152             panic!("rwlock write lock would result in deadlock");
153         } else {
154             // According to POSIX, for a properly initialized rwlock this can only
155             // return EDEADLK or 0. We rely on that.
156             debug_assert_eq!(r, 0);
157         }
158 
159         unsafe {
160             *lock.write_locked.get() = true;
161         }
162     }
163 
164     #[inline]
165     pub unsafe fn try_write(&self) -> bool {
166         let lock = &*self.inner;
167         let r = dlibc::pthread_rwlock_trywrlock(lock.inner.get());
168         if r == 0 {
169             if *lock.write_locked.get() || lock.num_readers.load(Ordering::Relaxed) != 0 {
170                 // `pthread_rwlock_trywrlock` succeeded when it should not have.
171                 lock.raw_unlock();
172                 false
173             } else {
174                 *lock.write_locked.get() = true;
175                 true
176             }
177         } else {
178             false
179         }
180     }
181 
182     #[inline]
183     pub unsafe fn read_unlock(&self) {
184         let lock = &*self.inner;
185         debug_assert!(!*lock.write_locked.get());
186         lock.num_readers.fetch_sub(1, Ordering::Relaxed);
187         lock.raw_unlock();
188     }
189 
190     #[inline]
191     pub unsafe fn write_unlock(&self) {
192         let lock = &*self.inner;
193         debug_assert_eq!(lock.num_readers.load(Ordering::Relaxed), 0);
194         debug_assert!(*lock.write_locked.get());
195         *lock.write_locked.get() = false;
196         lock.raw_unlock();
197     }
198 }
199