diff --git a/src/doc/book b/src/doc/book index 7ddc46460f09a..871416b85c1a7 160000 --- a/src/doc/book +++ b/src/doc/book @@ -1 +1 @@ -Subproject commit 7ddc46460f09a5cd9bd2a620565bdc20b3315ea9 +Subproject commit 871416b85c1a73717d65d6f4a9ea29e5aef3db0e diff --git a/src/doc/rust-by-example b/src/doc/rust-by-example index e76be6b2dc84c..67cfbf31df880 160000 --- a/src/doc/rust-by-example +++ b/src/doc/rust-by-example @@ -1 +1 @@ -Subproject commit e76be6b2dc84c6a992e186157efe29d625e29b94 +Subproject commit 67cfbf31df880728dcf7cb35b15b028ec92caf31 diff --git a/src/libcore/hint.rs b/src/libcore/hint.rs index 6439fa0e0c8b8..ee4be6c915119 100644 --- a/src/libcore/hint.rs +++ b/src/libcore/hint.rs @@ -49,28 +49,16 @@ pub unsafe fn unreachable_unchecked() -> ! { intrinsics::unreachable() } -/// Signals the processor that it is entering a busy-wait spin-loop. +/// Emits a machine instruction hinting to the processor that it is running in busy-wait +/// spin-loop ("spin lock"). /// -/// Upon receiving spin-loop signal the processor can optimize its behavior by, for example, saving -/// power or switching hyper-threads. -/// -/// This function is different than [`std::thread::yield_now`] which directly yields to the -/// system's scheduler, whereas `spin_loop` only signals the processor that it is entering a -/// busy-wait spin-loop without yielding control to the system's scheduler. -/// -/// Using a busy-wait spin-loop with `spin_loop` is ideally used in situations where a -/// contended lock is held by another thread executed on a different CPU and where the waiting -/// times are relatively small. Because entering busy-wait spin-loop does not trigger the system's -/// scheduler, no overhead for switching threads occurs. However, if the thread holding the -/// contended lock is running on the same CPU, the spin-loop is likely to occupy an entire CPU slice -/// before switching to the thread that holds the lock. If the contending lock is held by a thread -/// on the same CPU or if the waiting times for acquiring the lock are longer, it is often better to -/// use [`std::thread::yield_now`]. +/// For a discussion of different locking strategies and their trade-offs, see +/// [`core::sync::atomic::spin_loop_hint`]. /// /// **Note**: On platforms that do not support receiving spin-loop hints this function does not /// do anything at all. /// -/// [`std::thread::yield_now`]: ../../std/thread/fn.yield_now.html +/// [`core::sync::atomic::spin_loop_hint`]: ../sync/atomic/fn.spin_loop_hint.html #[inline] #[unstable(feature = "renamed_spin_loop", issue = "55002")] pub fn spin_loop() { diff --git a/src/libcore/sync/atomic.rs b/src/libcore/sync/atomic.rs index a73111571c2b0..c9ccef972c2b5 100644 --- a/src/libcore/sync/atomic.rs +++ b/src/libcore/sync/atomic.rs @@ -124,28 +124,31 @@ use crate::fmt; use crate::hint::spin_loop; -/// Signals the processor that it is entering a busy-wait spin-loop. +/// Signals the processor that it is inside a busy-wait spin-loop ("spin lock"). /// /// Upon receiving spin-loop signal the processor can optimize its behavior by, for example, saving /// power or switching hyper-threads. /// -/// This function is different than [`std::thread::yield_now`] which directly yields to the -/// system's scheduler, whereas `spin_loop_hint` only signals the processor that it is entering a -/// busy-wait spin-loop without yielding control to the system's scheduler. +/// This function is different from [`std::thread::yield_now`] which directly yields to the +/// system's scheduler, whereas `spin_loop_hint` does not interact with the operating system. /// -/// Using a busy-wait spin-loop with `spin_loop_hint` is ideally used in situations where a -/// contended lock is held by another thread executed on a different CPU and where the waiting -/// times are relatively small. Because entering busy-wait spin-loop does not trigger the system's -/// scheduler, no overhead for switching threads occurs. However, if the thread holding the -/// contended lock is running on the same CPU, the spin-loop is likely to occupy an entire CPU slice -/// before switching to the thread that holds the lock. If the contending lock is held by a thread -/// on the same CPU or if the waiting times for acquiring the lock are longer, it is often better to -/// use [`std::thread::yield_now`]. +/// Spin locks can be very efficient for short lock durations because they do not involve context +/// switches or interaction with the operating system. For long lock durations they become wasteful +/// however because they use CPU cycles for the entire lock duration, and using a +/// [`std::sync::Mutex`] is likely the better approach. If actively spinning for a long time is +/// required, e.g. because code polls a non-blocking API, calling [`std::thread::yield_now`] +/// or [`std::thread::sleep`] may be the best option. +/// +/// **Note**: Spin locks are based on the underlying assumption that another thread will release +/// the lock 'soon'. In order for this to work, that other thread must run on a different CPU or +/// core (at least potentially). Spin locks do not work efficiently on single CPU / core platforms. /// /// **Note**: On platforms that do not support receiving spin-loop hints this function does not /// do anything at all. /// /// [`std::thread::yield_now`]: ../../../std/thread/fn.yield_now.html +/// [`std::thread::sleep`]: ../../../std/thread/fn.sleep.html +/// [`std::sync::Mutex`]: ../../../std/sync/struct.Mutex.html #[inline] #[stable(feature = "spin_loop_hint", since = "1.24.0")] pub fn spin_loop_hint() { diff --git a/src/librustc_mir/dataflow/mod.rs b/src/librustc_mir/dataflow/mod.rs index 7fe2a890a5371..319abbbe7b643 100644 --- a/src/librustc_mir/dataflow/mod.rs +++ b/src/librustc_mir/dataflow/mod.rs @@ -56,7 +56,7 @@ where /// string (as well as that of rendering up-front); in exchange, you /// don't have to hand over ownership of your value or deal with /// borrowing it. -pub(crate) struct DebugFormatted(String); +pub struct DebugFormatted(String); impl DebugFormatted { pub fn new(input: &dyn fmt::Debug) -> DebugFormatted { @@ -70,7 +70,7 @@ impl fmt::Debug for DebugFormatted { } } -pub(crate) trait Dataflow<'tcx, BD: BitDenotation<'tcx>> { +pub trait Dataflow<'tcx, BD: BitDenotation<'tcx>> { /// Sets up and runs the dataflow problem, using `p` to render results if /// implementation so chooses. fn dataflow

(&mut self, p: P) where P: Fn(&BD, BD::Idx) -> DebugFormatted { @@ -121,7 +121,7 @@ pub struct MoveDataParamEnv<'tcx> { pub(crate) param_env: ty::ParamEnv<'tcx>, } -pub(crate) fn do_dataflow<'a, 'tcx, BD, P>( +pub fn do_dataflow<'a, 'tcx, BD, P>( tcx: TyCtxt<'tcx>, body: &'a Body<'tcx>, def_id: DefId, @@ -453,34 +453,10 @@ where { self.flow_state.each_gen_bit(f) } -} - -pub fn state_for_location<'tcx, T: BitDenotation<'tcx>>(loc: Location, - analysis: &T, - result: &DataflowResults<'tcx, T>, - body: &Body<'tcx>) - -> BitSet { - let mut trans = GenKill::from_elem(HybridBitSet::new_empty(analysis.bits_per_block())); - for stmt in 0..loc.statement_index { - let mut stmt_loc = loc; - stmt_loc.statement_index = stmt; - analysis.before_statement_effect(&mut trans, stmt_loc); - analysis.statement_effect(&mut trans, stmt_loc); + pub fn get(&self) -> &BitSet { + self.flow_state.as_dense() } - - // Apply the pre-statement effect of the statement we're evaluating. - if loc.statement_index == body[loc.block].statements.len() { - analysis.before_terminator_effect(&mut trans, loc); - } else { - analysis.before_statement_effect(&mut trans, loc); - } - - // Apply the transfer function for all preceding statements to the fixpoint - // at the start of the block. - let mut state = result.sets().entry_set_for(loc.block.index()).to_owned(); - trans.apply(&mut state); - state } pub struct DataflowAnalysis<'a, 'tcx, O> @@ -565,7 +541,7 @@ pub struct GenKill { pub(crate) kill_set: T, } -type GenKillSet = GenKill>; +pub type GenKillSet = GenKill>; impl GenKill { /// Creates a new tuple where `gen_set == kill_set == elem`. @@ -580,28 +556,28 @@ impl GenKill { } impl GenKillSet { - pub(crate) fn clear(&mut self) { + pub fn clear(&mut self) { self.gen_set.clear(); self.kill_set.clear(); } - fn gen(&mut self, e: E) { + pub fn gen(&mut self, e: E) { self.gen_set.insert(e); self.kill_set.remove(e); } - fn gen_all(&mut self, i: impl IntoIterator>) { + pub fn gen_all(&mut self, i: impl IntoIterator>) { for j in i { self.gen(*j.borrow()); } } - fn kill(&mut self, e: E) { + pub fn kill(&mut self, e: E) { self.gen_set.remove(e); self.kill_set.insert(e); } - fn kill_all(&mut self, i: impl IntoIterator>) { + pub fn kill_all(&mut self, i: impl IntoIterator>) { for j in i { self.kill(*j.borrow()); } diff --git a/src/librustc_mir/lib.rs b/src/librustc_mir/lib.rs index 034ad5b01d346..6a49ed6ed9fa1 100644 --- a/src/librustc_mir/lib.rs +++ b/src/librustc_mir/lib.rs @@ -35,7 +35,7 @@ pub mod error_codes; mod borrow_check; mod build; -mod dataflow; +pub mod dataflow; mod hair; mod lints; mod shim; diff --git a/src/librustc_mir/transform/generator.rs b/src/librustc_mir/transform/generator.rs index 0ce2db93c421d..caf588af851dd 100644 --- a/src/librustc_mir/transform/generator.rs +++ b/src/librustc_mir/transform/generator.rs @@ -67,7 +67,7 @@ use crate::transform::{MirPass, MirSource}; use crate::transform::simplify; use crate::transform::no_landing_pads::no_landing_pads; use crate::dataflow::{DataflowResults, DataflowResultsConsumer, FlowAtLocation}; -use crate::dataflow::{do_dataflow, DebugFormatted, state_for_location}; +use crate::dataflow::{do_dataflow, DebugFormatted, DataflowResultsCursor}; use crate::dataflow::{MaybeStorageLive, HaveBeenBorrowedLocals, RequiresStorage}; use crate::util::dump_mir; use crate::util::liveness; @@ -436,9 +436,10 @@ fn locals_live_across_suspend_points( // Calculate when MIR locals have live storage. This gives us an upper bound of their // lifetimes. let storage_live_analysis = MaybeStorageLive::new(body); - let storage_live = + let storage_live_results = do_dataflow(tcx, body, def_id, &[], &dead_unwinds, storage_live_analysis, |bd, p| DebugFormatted::new(&bd.body().local_decls[p])); + let mut storage_live_cursor = DataflowResultsCursor::new(&storage_live_results, body); // Find the MIR locals which do not use StorageLive/StorageDead statements. // The storage of these locals are always live. @@ -448,17 +449,18 @@ fn locals_live_across_suspend_points( // Calculate the MIR locals which have been previously // borrowed (even if they are still active). let borrowed_locals_analysis = HaveBeenBorrowedLocals::new(body); - let borrowed_locals_result = + let borrowed_locals_results = do_dataflow(tcx, body, def_id, &[], &dead_unwinds, borrowed_locals_analysis, |bd, p| DebugFormatted::new(&bd.body().local_decls[p])); + let mut borrowed_locals_cursor = DataflowResultsCursor::new(&borrowed_locals_results, body); // Calculate the MIR locals that we actually need to keep storage around // for. - let requires_storage_analysis = RequiresStorage::new(body, &borrowed_locals_result); - let requires_storage = + let requires_storage_analysis = RequiresStorage::new(body, &borrowed_locals_results); + let requires_storage_results = do_dataflow(tcx, body, def_id, &[], &dead_unwinds, requires_storage_analysis, |bd, p| DebugFormatted::new(&bd.body().local_decls[p])); - let requires_storage_analysis = RequiresStorage::new(body, &borrowed_locals_result); + let mut requires_storage_cursor = DataflowResultsCursor::new(&requires_storage_results, body); // Calculate the liveness of MIR locals ignoring borrows. let mut live_locals = liveness::LiveVarSet::new_empty(body.local_decls.len()); @@ -484,10 +486,6 @@ fn locals_live_across_suspend_points( }; if !movable { - let borrowed_locals = state_for_location(loc, - &borrowed_locals_analysis, - &borrowed_locals_result, - body); // The `liveness` variable contains the liveness of MIR locals ignoring borrows. // This is correct for movable generators since borrows cannot live across // suspension points. However for immovable generators we need to account for @@ -498,22 +496,19 @@ fn locals_live_across_suspend_points( // If a borrow is converted to a raw reference, we must also assume that it lives // forever. Note that the final liveness is still bounded by the storage liveness // of the local, which happens using the `intersect` operation below. - liveness.outs[block].union(&borrowed_locals); + borrowed_locals_cursor.seek(loc); + liveness.outs[block].union(borrowed_locals_cursor.get()); } - let storage_liveness = state_for_location(loc, - &storage_live_analysis, - &storage_live, - body); + storage_live_cursor.seek(loc); + let storage_liveness = storage_live_cursor.get(); // Store the storage liveness for later use so we can restore the state // after a suspension point storage_liveness_map.insert(block, storage_liveness.clone()); - let mut storage_required = state_for_location(loc, - &requires_storage_analysis, - &requires_storage, - body); + requires_storage_cursor.seek(loc); + let mut storage_required = requires_storage_cursor.get().clone(); // Mark locals without storage statements as always requiring storage storage_required.union(&ignored.0); @@ -549,8 +544,7 @@ fn locals_live_across_suspend_points( body, &live_locals, &ignored, - requires_storage, - requires_storage_analysis); + requires_storage_results); LivenessInfo { live_locals, @@ -588,7 +582,6 @@ fn compute_storage_conflicts( stored_locals: &liveness::LiveVarSet, ignored: &StorageIgnored, requires_storage: DataflowResults<'tcx, RequiresStorage<'mir, 'tcx>>, - _requires_storage_analysis: RequiresStorage<'mir, 'tcx>, ) -> BitMatrix { assert_eq!(body.local_decls.len(), ignored.0.domain_size()); assert_eq!(body.local_decls.len(), stored_locals.domain_size()); diff --git a/src/test/debuginfo/issue-22656.rs b/src/test/debuginfo/issue-22656.rs index 86d31909a0b3b..e4634d96a6f31 100644 --- a/src/test/debuginfo/issue-22656.rs +++ b/src/test/debuginfo/issue-22656.rs @@ -15,7 +15,7 @@ // lldbg-check:[...]$0 = vec![1, 2, 3] // lldbr-check:(alloc::vec::Vec) v = vec![1, 2, 3] // lldb-command:print zs -// lldbg-check:[...]$1 = StructWithZeroSizedField { x: ZeroSizedStruct, y: 123, z: ZeroSizedStruct, w: 456 } +// lldbg-check:[...]$1 = StructWithZeroSizedField { x: ZeroSizedStruct[...], y: 123, z: ZeroSizedStruct[...], w: 456 } // lldbr-check:(issue_22656::StructWithZeroSizedField) zs = StructWithZeroSizedField { x: ZeroSizedStruct { }, y: 123, z: ZeroSizedStruct { }, w: 456 } // lldbr-command:continue