1
use core::cell::UnsafeCell;
2
use core::default::Default;
3
use core::fmt;
4
use core::marker::PhantomData;
5
use core::mem;
6
use core::ops::{Deref, DerefMut};
7
use core::ptr::NonNull;
8
use core::sync::atomic::{spin_loop_hint as cpu_relax, AtomicUsize, Ordering};
9

            
10
/// A reader-writer lock
11
///
12
/// This type of lock allows a number of readers or at most one writer at any
13
/// point in time. The write portion of this lock typically allows modification
14
/// of the underlying data (exclusive access) and the read portion of this lock
15
/// typically allows for read-only access (shared access).
16
///
17
/// The type parameter `T` represents the data that this lock protects. It is
18
/// required that `T` satisfies `Send` to be shared across tasks and `Sync` to
19
/// allow concurrent access through readers. The RAII guards returned from the
20
/// locking methods implement `Deref` (and `DerefMut` for the `write` methods)
21
/// to allow access to the contained of the lock.
22
///
23
/// An [`RwLockUpgradeableGuard`](RwLockUpgradeableGuard) can be upgraded to a
24
/// writable guard through the [`RwLockUpgradeableGuard::upgrade`](RwLockUpgradeableGuard::upgrade)
25
/// [`RwLockUpgradeableGuard::try_upgrade`](RwLockUpgradeableGuard::try_upgrade) functions.
26
/// Writable or upgradeable guards can be downgraded through their respective `downgrade`
27
/// functions.
28
///
29
/// Based on Facebook's
30
/// [`folly/RWSpinLock.h`](https://github.com/facebook/folly/blob/a0394d84f2d5c3e50ebfd0566f9d3acb52cfab5a/folly/synchronization/RWSpinLock.h).
31
/// This implementation is unfair to writers - if the lock always has readers, then no writers will
32
/// ever get a chance. Using an upgradeable lock guard can *somewhat* alleviate this issue as no
33
/// new readers are allowed when an upgradeable guard is held, but upgradeable guards can be taken
34
/// when there are existing readers. However if the lock is that highly contended and writes are
35
/// crucial then this implementation may be a poor choice.
36
///
37
/// # Examples
38
///
39
/// ```
40
/// use spin;
41
///
42
/// let lock = spin::RwLock::new(5);
43
///
44
/// // many reader locks can be held at once
45
/// {
46
///     let r1 = lock.read();
47
///     let r2 = lock.read();
48
///     assert_eq!(*r1, 5);
49
///     assert_eq!(*r2, 5);
50
/// } // read locks are dropped at this point
51
///
52
/// // only one write lock may be held, however
53
/// {
54
///     let mut w = lock.write();
55
///     *w += 1;
56
///     assert_eq!(*w, 6);
57
/// } // write lock is dropped here
58
/// ```
59
pub struct RwLock<T: ?Sized> {
60
    lock: AtomicUsize,
61
    data: UnsafeCell<T>,
62
}
63

            
64
const READER: usize = 1 << 2;
65
const UPGRADED: usize = 1 << 1;
66
const WRITER: usize = 1;
67

            
68
/// A guard from which the protected data can be read
69
///
70
/// When the guard falls out of scope it will decrement the read count,
71
/// potentially releasing the lock.
72
#[derive(Debug)]
73
pub struct RwLockReadGuard<'a, T: 'a + ?Sized> {
74
    lock: &'a AtomicUsize,
75
    data: NonNull<T>,
76
}
77

            
78
/// A guard to which the protected data can be written
79
///
80
/// When the guard falls out of scope it will release the lock.
81
#[derive(Debug)]
82
pub struct RwLockWriteGuard<'a, T: 'a + ?Sized> {
83
    lock: &'a AtomicUsize,
84
    data: NonNull<T>,
85
    #[doc(hidden)]
86
    _invariant: PhantomData<&'a mut T>,
87
}
88

            
89
/// A guard from which the protected data can be read, and can be upgraded
90
/// to a writable guard if needed
91
///
92
/// No writers or other upgradeable guards can exist while this is in scope. New reader
93
/// creation is prevented (to alleviate writer starvation) but there may be existing readers
94
/// when the lock is acquired.
95
///
96
/// When the guard falls out of scope it will release the lock.
97
#[derive(Debug)]
98
pub struct RwLockUpgradeableGuard<'a, T: 'a + ?Sized> {
99
    lock: &'a AtomicUsize,
100
    data: NonNull<T>,
101
    #[doc(hidden)]
102
    _invariant: PhantomData<&'a mut T>,
103
}
104

            
105
// Same unsafe impls as `std::sync::RwLock`
106
unsafe impl<T: ?Sized + Send> Send for RwLock<T> {}
107
unsafe impl<T: ?Sized + Send + Sync> Sync for RwLock<T> {}
108

            
109
impl<T> RwLock<T> {
110
    /// Creates a new spinlock wrapping the supplied data.
111
    ///
112
    /// May be used statically:
113
    ///
114
    /// ```
115
    /// use spin;
116
    ///
117
    /// static RW_LOCK: spin::RwLock<()> = spin::RwLock::new(());
118
    ///
119
    /// fn demo() {
120
    ///     let lock = RW_LOCK.read();
121
    ///     // do something with lock
122
    ///     drop(lock);
123
    /// }
124
    /// ```
125
    #[inline]
126
    pub const fn new(user_data: T) -> RwLock<T> {
127
        RwLock {
128
            lock: AtomicUsize::new(0),
129
            data: UnsafeCell::new(user_data),
130
        }
131
    }
132

            
133
    /// Consumes this `RwLock`, returning the underlying data.
134
    #[inline]
135
    pub fn into_inner(self) -> T {
136
        // We know statically that there are no outstanding references to
137
        // `self` so there's no need to lock.
138
        let RwLock { data, .. } = self;
139
        data.into_inner()
140
    }
141
}
142

            
143
impl<T: ?Sized> RwLock<T> {
144
    /// Locks this rwlock with shared read access, blocking the current thread
145
    /// until it can be acquired.
146
    ///
147
    /// The calling thread will be blocked until there are no more writers which
148
    /// hold the lock. There may be other readers currently inside the lock when
149
    /// this method returns. This method does not provide any guarantees with
150
    /// respect to the ordering of whether contentious readers or writers will
151
    /// acquire the lock first.
152
    ///
153
    /// Returns an RAII guard which will release this thread's shared access
154
    /// once it is dropped.
155
    ///
156
    /// ```
157
    /// let mylock = spin::RwLock::new(0);
158
    /// {
159
    ///     let mut data = mylock.read();
160
    ///     // The lock is now locked and the data can be read
161
    ///     println!("{}", *data);
162
    ///     // The lock is dropped
163
    /// }
164
    /// ```
165
    #[inline]
166
    pub fn read(&self) -> RwLockReadGuard<T> {
167
        loop {
168
            match self.try_read() {
169
                Some(guard) => return guard,
170
                None => cpu_relax(),
171
            }
172
        }
173
    }
174

            
175
    /// Attempt to acquire this lock with shared read access.
176
    ///
177
    /// This function will never block and will return immediately if `read`
178
    /// would otherwise succeed. Returns `Some` of an RAII guard which will
179
    /// release the shared access of this thread when dropped, or `None` if the
180
    /// access could not be granted. This method does not provide any
181
    /// guarantees with respect to the ordering of whether contentious readers
182
    /// or writers will acquire the lock first.
183
    ///
184
    /// ```
185
    /// let mylock = spin::RwLock::new(0);
186
    /// {
187
    ///     match mylock.try_read() {
188
    ///         Some(data) => {
189
    ///             // The lock is now locked and the data can be read
190
    ///             println!("{}", *data);
191
    ///             // The lock is dropped
192
    ///         },
193
    ///         None => (), // no cigar
194
    ///     };
195
    /// }
196
    /// ```
197
    #[inline]
198
    pub fn try_read(&self) -> Option<RwLockReadGuard<T>> {
199
        let value = self.lock.fetch_add(READER, Ordering::Acquire);
200

            
201
        // We check the UPGRADED bit here so that new readers are prevented when an UPGRADED lock is held.
202
        // This helps reduce writer starvation.
203
        if value & (WRITER | UPGRADED) != 0 {
204
            // Lock is taken, undo.
205
            self.lock.fetch_sub(READER, Ordering::Release);
206
            None
207
        } else {
208
            Some(RwLockReadGuard {
209
                lock: &self.lock,
210
                data: unsafe { NonNull::new_unchecked(self.data.get()) },
211
            })
212
        }
213
    }
214

            
215
    /// Force decrement the reader count.
216
    ///
217
    /// This is *extremely* unsafe if there are outstanding `RwLockReadGuard`s
218
    /// live, or if called more times than `read` has been called, but can be
219
    /// useful in FFI contexts where the caller doesn't know how to deal with
220
    /// RAII. The underlying atomic operation uses `Ordering::Release`.
221
    #[inline]
222
    pub unsafe fn force_read_decrement(&self) {
223
        debug_assert!(self.lock.load(Ordering::Relaxed) & !WRITER > 0);
224
        self.lock.fetch_sub(READER, Ordering::Release);
225
    }
226

            
227
    /// Force unlock exclusive write access.
228
    ///
229
    /// This is *extremely* unsafe if there are outstanding `RwLockWriteGuard`s
230
    /// live, or if called when there are current readers, but can be useful in
231
    /// FFI contexts where the caller doesn't know how to deal with RAII. The
232
    /// underlying atomic operation uses `Ordering::Release`.
233
    #[inline]
234
    pub unsafe fn force_write_unlock(&self) {
235
        debug_assert_eq!(self.lock.load(Ordering::Relaxed) & !(WRITER | UPGRADED), 0);
236
        self.lock.fetch_and(!(WRITER | UPGRADED), Ordering::Release);
237
    }
238

            
239
    #[inline(always)]
240
    fn try_write_internal(&self, strong: bool) -> Option<RwLockWriteGuard<T>> {
241
        if compare_exchange(
242
            &self.lock,
243
            0,
244
            WRITER,
245
            Ordering::Acquire,
246
            Ordering::Relaxed,
247
            strong,
248
        )
249
        .is_ok()
250
        {
251
            Some(RwLockWriteGuard {
252
                lock: &self.lock,
253
                data: unsafe { NonNull::new_unchecked(self.data.get()) },
254
                _invariant: PhantomData,
255
            })
256
        } else {
257
            None
258
        }
259
    }
260

            
261
    /// Lock this rwlock with exclusive write access, blocking the current
262
    /// thread until it can be acquired.
263
    ///
264
    /// This function will not return while other writers or other readers
265
    /// currently have access to the lock.
266
    ///
267
    /// Returns an RAII guard which will drop the write access of this rwlock
268
    /// when dropped.
269
    ///
270
    /// ```
271
    /// let mylock = spin::RwLock::new(0);
272
    /// {
273
    ///     let mut data = mylock.write();
274
    ///     // The lock is now locked and the data can be written
275
    ///     *data += 1;
276
    ///     // The lock is dropped
277
    /// }
278
    /// ```
279
    #[inline]
280
    pub fn write(&self) -> RwLockWriteGuard<T> {
281
        loop {
282
            match self.try_write_internal(false) {
283
                Some(guard) => return guard,
284
                None => cpu_relax(),
285
            }
286
        }
287
    }
288

            
289
    /// Attempt to lock this rwlock with exclusive write access.
290
    ///
291
    /// This function does not ever block, and it will return `None` if a call
292
    /// to `write` would otherwise block. If successful, an RAII guard is
293
    /// returned.
294
    ///
295
    /// ```
296
    /// let mylock = spin::RwLock::new(0);
297
    /// {
298
    ///     match mylock.try_write() {
299
    ///         Some(mut data) => {
300
    ///             // The lock is now locked and the data can be written
301
    ///             *data += 1;
302
    ///             // The lock is implicitly dropped
303
    ///         },
304
    ///         None => (), // no cigar
305
    ///     };
306
    /// }
307
    /// ```
308
    #[inline]
309
    pub fn try_write(&self) -> Option<RwLockWriteGuard<T>> {
310
        self.try_write_internal(true)
311
    }
312

            
313
    /// Obtain a readable lock guard that can later be upgraded to a writable lock guard.
314
    /// Upgrades can be done through the [`RwLockUpgradeableGuard::upgrade`](RwLockUpgradeableGuard::upgrade) method.
315
    #[inline]
316
    pub fn upgradeable_read(&self) -> RwLockUpgradeableGuard<T> {
317
        loop {
318
            match self.try_upgradeable_read() {
319
                Some(guard) => return guard,
320
                None => cpu_relax(),
321
            }
322
        }
323
    }
324

            
325
    /// Tries to obtain an upgradeable lock guard.
326
    #[inline]
327
    pub fn try_upgradeable_read(&self) -> Option<RwLockUpgradeableGuard<T>> {
328
        if self.lock.fetch_or(UPGRADED, Ordering::Acquire) & (WRITER | UPGRADED) == 0 {
329
            Some(RwLockUpgradeableGuard {
330
                lock: &self.lock,
331
                data: unsafe { NonNull::new_unchecked(self.data.get()) },
332
                _invariant: PhantomData,
333
            })
334
        } else {
335
            // We can't unflip the UPGRADED bit back just yet as there is another upgradeable or write lock.
336
            // When they unlock, they will clear the bit.
337
            None
338
        }
339
    }
340
}
341

            
342
impl<T: ?Sized + fmt::Debug> fmt::Debug for RwLock<T> {
343
    fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result {
344
        match self.try_read() {
345
            Some(guard) => write!(f, "RwLock {{ data: ")
346
                .and_then(|()| (&*guard).fmt(f))
347
                .and_then(|()| write!(f, "}}")),
348
            None => write!(f, "RwLock {{ <locked> }}"),
349
        }
350
    }
351
}
352

            
353
impl<T: ?Sized + Default> Default for RwLock<T> {
354
    fn default() -> RwLock<T> {
355
        RwLock::new(Default::default())
356
    }
357
}
358

            
359
impl<'rwlock, T: ?Sized> RwLockUpgradeableGuard<'rwlock, T> {
360
    #[inline(always)]
361
    fn try_upgrade_internal(self, strong: bool) -> Result<RwLockWriteGuard<'rwlock, T>, Self> {
362
        if compare_exchange(
363
            &self.lock,
364
            UPGRADED,
365
            WRITER,
366
            Ordering::Acquire,
367
            Ordering::Relaxed,
368
            strong,
369
        )
370
        .is_ok()
371
        {
372
            // Upgrade successful
373
            let out = Ok(RwLockWriteGuard {
374
                lock: &self.lock,
375
                data: self.data,
376
                _invariant: PhantomData,
377
            });
378

            
379
            // Forget the old guard so its destructor doesn't run
380
            mem::forget(self);
381

            
382
            out
383
        } else {
384
            Err(self)
385
        }
386
    }
387

            
388
    /// Upgrades an upgradeable lock guard to a writable lock guard.
389
    ///
390
    /// ```
391
    /// let mylock = spin::RwLock::new(0);
392
    ///
393
    /// let upgradeable = mylock.upgradeable_read(); // Readable, but not yet writable
394
    /// let writable = upgradeable.upgrade();
395
    /// ```
396
    #[inline]
397
    pub fn upgrade(mut self) -> RwLockWriteGuard<'rwlock, T> {
398
        loop {
399
            self = match self.try_upgrade_internal(false) {
400
                Ok(guard) => return guard,
401
                Err(e) => e,
402
            };
403

            
404
            cpu_relax();
405
        }
406
    }
407

            
408
    /// Tries to upgrade an upgradeable lock guard to a writable lock guard.
409
    ///
410
    /// ```
411
    /// let mylock = spin::RwLock::new(0);
412
    /// let upgradeable = mylock.upgradeable_read(); // Readable, but not yet writable
413
    ///
414
    /// match upgradeable.try_upgrade() {
415
    ///     Ok(writable) => /* upgrade successful - use writable lock guard */ (),
416
    ///     Err(upgradeable) => /* upgrade unsuccessful */ (),
417
    /// };
418
    /// ```
419
    #[inline]
420
    pub fn try_upgrade(self) -> Result<RwLockWriteGuard<'rwlock, T>, Self> {
421
        self.try_upgrade_internal(true)
422
    }
423

            
424
    #[inline]
425
    /// Downgrades the upgradeable lock guard to a readable, shared lock guard. Cannot fail and is guaranteed not to spin.
426
    ///
427
    /// ```
428
    /// let mylock = spin::RwLock::new(1);
429
    ///
430
    /// let upgradeable = mylock.upgradeable_read();
431
    /// assert!(mylock.try_read().is_none());
432
    /// assert_eq!(*upgradeable, 1);
433
    ///
434
    /// let readable = upgradeable.downgrade(); // This is guaranteed not to spin
435
    /// assert!(mylock.try_read().is_some());
436
    /// assert_eq!(*readable, 1);
437
    /// ```
438
    pub fn downgrade(self) -> RwLockReadGuard<'rwlock, T> {
439
        // Reserve the read guard for ourselves
440
        self.lock.fetch_add(READER, Ordering::Acquire);
441

            
442
        RwLockReadGuard {
443
            lock: &self.lock,
444
            data: self.data,
445
        }
446

            
447
        // Dropping self removes the UPGRADED bit
448
    }
449
}
450

            
451
impl<'rwlock, T: ?Sized> RwLockWriteGuard<'rwlock, T> {
452
    /// Downgrades the writable lock guard to a readable, shared lock guard. Cannot fail and is guaranteed not to spin.
453
    ///
454
    /// ```
455
    /// let mylock = spin::RwLock::new(0);
456
    ///
457
    /// let mut writable = mylock.write();
458
    /// *writable = 1;
459
    ///
460
    /// let readable = writable.downgrade(); // This is guaranteed not to spin
461
    /// # let readable_2 = mylock.try_read().unwrap();
462
    /// assert_eq!(*readable, 1);
463
    /// ```
464
    #[inline]
465
    pub fn downgrade(self) -> RwLockReadGuard<'rwlock, T> {
466
        // Reserve the read guard for ourselves
467
        self.lock.fetch_add(READER, Ordering::Acquire);
468

            
469
        RwLockReadGuard {
470
            lock: &self.lock,
471
            data: self.data,
472
        }
473

            
474
        // Dropping self removes the WRITER bit
475
    }
476
}
477

            
478
impl<'rwlock, T: ?Sized> Deref for RwLockReadGuard<'rwlock, T> {
479
    type Target = T;
480

            
481
    fn deref(&self) -> &T {
482
        unsafe { self.data.as_ref() }
483
    }
484
}
485

            
486
impl<'rwlock, T: ?Sized> Deref for RwLockUpgradeableGuard<'rwlock, T> {
487
    type Target = T;
488

            
489
    fn deref(&self) -> &T {
490
        unsafe { self.data.as_ref() }
491
    }
492
}
493

            
494
impl<'rwlock, T: ?Sized> Deref for RwLockWriteGuard<'rwlock, T> {
495
    type Target = T;
496

            
497
    fn deref(&self) -> &T {
498
        unsafe { self.data.as_ref() }
499
    }
500
}
501

            
502
impl<'rwlock, T: ?Sized> DerefMut for RwLockWriteGuard<'rwlock, T> {
503
    fn deref_mut(&mut self) -> &mut T {
504
        unsafe { self.data.as_mut() }
505
    }
506
}
507

            
508
impl<'rwlock, T: ?Sized> Drop for RwLockReadGuard<'rwlock, T> {
509
    fn drop(&mut self) {
510
        debug_assert!(self.lock.load(Ordering::Relaxed) & !(WRITER | UPGRADED) > 0);
511
        self.lock.fetch_sub(READER, Ordering::Release);
512
    }
513
}
514

            
515
impl<'rwlock, T: ?Sized> Drop for RwLockUpgradeableGuard<'rwlock, T> {
516
    fn drop(&mut self) {
517
        debug_assert_eq!(
518
            self.lock.load(Ordering::Relaxed) & (WRITER | UPGRADED),
519
            UPGRADED
520
        );
521
        self.lock.fetch_sub(UPGRADED, Ordering::AcqRel);
522
    }
523
}
524

            
525
impl<'rwlock, T: ?Sized> Drop for RwLockWriteGuard<'rwlock, T> {
526
    fn drop(&mut self) {
527
        debug_assert_eq!(self.lock.load(Ordering::Relaxed) & WRITER, WRITER);
528

            
529
        // Writer is responsible for clearing both WRITER and UPGRADED bits.
530
        // The UPGRADED bit may be set if an upgradeable lock attempts an upgrade while this lock is held.
531
        self.lock.fetch_and(!(WRITER | UPGRADED), Ordering::Release);
532
    }
533
}
534

            
535
#[inline(always)]
536
fn compare_exchange(
537
    atomic: &AtomicUsize,
538
    current: usize,
539
    new: usize,
540
    success: Ordering,
541
    failure: Ordering,
542
    strong: bool,
543
) -> Result<usize, usize> {
544
    if strong {
545
        atomic.compare_exchange(current, new, success, failure)
546
    } else {
547
        atomic.compare_exchange_weak(current, new, success, failure)
548
    }
549
}
550

            
551
#[cfg(test)]
552
mod tests {
553
    use std::prelude::v1::*;
554

            
555
    use std::sync::atomic::{AtomicUsize, Ordering};
556
    use std::sync::mpsc::channel;
557
    use std::sync::Arc;
558
    use std::thread;
559

            
560
    use super::*;
561

            
562
    #[derive(Eq, PartialEq, Debug)]
563
    struct NonCopy(i32);
564

            
565
    #[test]
566
    fn smoke() {
567
        let l = RwLock::new(());
568
        drop(l.read());
569
        drop(l.write());
570
        drop((l.read(), l.read()));
571
        drop(l.write());
572
    }
573

            
574
    // TODO: needs RNG
575
    //#[test]
576
    //fn frob() {
577
    //    static R: RwLock = RwLock::new();
578
    //    const N: usize = 10;
579
    //    const M: usize = 1000;
580
    //
581
    //    let (tx, rx) = channel::<()>();
582
    //    for _ in 0..N {
583
    //        let tx = tx.clone();
584
    //        thread::spawn(move|| {
585
    //            let mut rng = rand::thread_rng();
586
    //            for _ in 0..M {
587
    //                if rng.gen_weighted_bool(N) {
588
    //                    drop(R.write());
589
    //                } else {
590
    //                    drop(R.read());
591
    //                }
592
    //            }
593
    //            drop(tx);
594
    //        });
595
    //    }
596
    //    drop(tx);
597
    //    let _ = rx.recv();
598
    //    unsafe { R.destroy(); }
599
    //}
600

            
601
    #[test]
602
    fn test_rw_arc() {
603
        let arc = Arc::new(RwLock::new(0));
604
        let arc2 = arc.clone();
605
        let (tx, rx) = channel();
606

            
607
        thread::spawn(move || {
608
            let mut lock = arc2.write();
609
            for _ in 0..10 {
610
                let tmp = *lock;
611
                *lock = -1;
612
                thread::yield_now();
613
                *lock = tmp + 1;
614
            }
615
            tx.send(()).unwrap();
616
        });
617

            
618
        // Readers try to catch the writer in the act
619
        let mut children = Vec::new();
620
        for _ in 0..5 {
621
            let arc3 = arc.clone();
622
            children.push(thread::spawn(move || {
623
                let lock = arc3.read();
624
                assert!(*lock >= 0);
625
            }));
626
        }
627

            
628
        // Wait for children to pass their asserts
629
        for r in children {
630
            assert!(r.join().is_ok());
631
        }
632

            
633
        // Wait for writer to finish
634
        rx.recv().unwrap();
635
        let lock = arc.read();
636
        assert_eq!(*lock, 10);
637
    }
638

            
639
    #[test]
640
    fn test_rw_access_in_unwind() {
641
        let arc = Arc::new(RwLock::new(1));
642
        let arc2 = arc.clone();
643
        let _ = thread::spawn(move || -> () {
644
            struct Unwinder {
645
                i: Arc<RwLock<isize>>,
646
            }
647
            impl Drop for Unwinder {
648
                fn drop(&mut self) {
649
                    let mut lock = self.i.write();
650
                    *lock += 1;
651
                }
652
            }
653
            let _u = Unwinder { i: arc2 };
654
            panic!();
655
        })
656
        .join();
657
        let lock = arc.read();
658
        assert_eq!(*lock, 2);
659
    }
660

            
661
    #[test]
662
    fn test_rwlock_unsized() {
663
        let rw: &RwLock<[i32]> = &RwLock::new([1, 2, 3]);
664
        {
665
            let b = &mut *rw.write();
666
            b[0] = 4;
667
            b[2] = 5;
668
        }
669
        let comp: &[i32] = &[4, 2, 5];
670
        assert_eq!(&*rw.read(), comp);
671
    }
672

            
673
    #[test]
674
    fn test_rwlock_try_write() {
675
        use std::mem::drop;
676

            
677
        let lock = RwLock::new(0isize);
678
        let read_guard = lock.read();
679

            
680
        let write_result = lock.try_write();
681
        match write_result {
682
            None => (),
683
            Some(_) => assert!(
684
                false,
685
                "try_write should not succeed while read_guard is in scope"
686
            ),
687
        }
688

            
689
        drop(read_guard);
690
    }
691

            
692
    #[test]
693
    fn test_rw_try_read() {
694
        let m = RwLock::new(0);
695
        mem::forget(m.write());
696
        assert!(m.try_read().is_none());
697
    }
698

            
699
    #[test]
700
    fn test_into_inner() {
701
        let m = RwLock::new(NonCopy(10));
702
        assert_eq!(m.into_inner(), NonCopy(10));
703
    }
704

            
705
    #[test]
706
    fn test_into_inner_drop() {
707
        struct Foo(Arc<AtomicUsize>);
708
        impl Drop for Foo {
709
            fn drop(&mut self) {
710
                self.0.fetch_add(1, Ordering::SeqCst);
711
            }
712
        }
713
        let num_drops = Arc::new(AtomicUsize::new(0));
714
        let m = RwLock::new(Foo(num_drops.clone()));
715
        assert_eq!(num_drops.load(Ordering::SeqCst), 0);
716
        {
717
            let _inner = m.into_inner();
718
            assert_eq!(num_drops.load(Ordering::SeqCst), 0);
719
        }
720
        assert_eq!(num_drops.load(Ordering::SeqCst), 1);
721
    }
722

            
723
    #[test]
724
    fn test_force_read_decrement() {
725
        let m = RwLock::new(());
726
        ::std::mem::forget(m.read());
727
        ::std::mem::forget(m.read());
728
        ::std::mem::forget(m.read());
729
        assert!(m.try_write().is_none());
730
        unsafe {
731
            m.force_read_decrement();
732
            m.force_read_decrement();
733
        }
734
        assert!(m.try_write().is_none());
735
        unsafe {
736
            m.force_read_decrement();
737
        }
738
        assert!(m.try_write().is_some());
739
    }
740

            
741
    #[test]
742
    fn test_force_write_unlock() {
743
        let m = RwLock::new(());
744
        ::std::mem::forget(m.write());
745
        assert!(m.try_read().is_none());
746
        unsafe {
747
            m.force_write_unlock();
748
        }
749
        assert!(m.try_read().is_some());
750
    }
751

            
752
    #[test]
753
    fn test_upgrade_downgrade() {
754
        let m = RwLock::new(());
755
        {
756
            let _r = m.read();
757
            let upg = m.try_upgradeable_read().unwrap();
758
            assert!(m.try_read().is_none());
759
            assert!(m.try_write().is_none());
760
            assert!(upg.try_upgrade().is_err());
761
        }
762
        {
763
            let w = m.write();
764
            assert!(m.try_upgradeable_read().is_none());
765
            let _r = w.downgrade();
766
            assert!(m.try_upgradeable_read().is_some());
767
            assert!(m.try_read().is_some());
768
            assert!(m.try_write().is_none());
769
        }
770
        {
771
            let _u = m.upgradeable_read();
772
            assert!(m.try_upgradeable_read().is_none());
773
        }
774

            
775
        assert!(m.try_upgradeable_read().unwrap().try_upgrade().is_ok());
776
    }
777
}