From 72645c3e1191097859cbdcba59218a79a8404ad1 Mon Sep 17 00:00:00 2001 From: Matthew Jasper Date: Thu, 15 Aug 2019 21:31:35 +0100 Subject: [PATCH 1/4] Move some of `rustc_mir::build::scope` to a submodule --- src/librustc_mir/build/scope.rs | 244 +------------------------ src/librustc_mir/build/scope/stack.rs | 247 ++++++++++++++++++++++++++ 2 files changed, 250 insertions(+), 241 deletions(-) create mode 100644 src/librustc_mir/build/scope/stack.rs diff --git a/src/librustc_mir/build/scope.rs b/src/librustc_mir/build/scope.rs index ee6d42de388d9..b05e90026431c 100644 --- a/src/librustc_mir/build/scope.rs +++ b/src/librustc_mir/build/scope.rs @@ -89,88 +89,13 @@ use rustc::ty::Ty; use rustc::hir; use rustc::mir::*; use syntax_pos::{DUMMY_SP, Span}; -use rustc_data_structures::fx::FxHashMap; use std::collections::hash_map::Entry; use std::mem; -#[derive(Debug)] -struct Scope { - /// The source scope this scope was created in. - source_scope: SourceScope, - - /// the region span of this scope within source code. - region_scope: region::Scope, - - /// the span of that region_scope - region_scope_span: Span, - - /// Whether there's anything to do for the cleanup path, that is, - /// when unwinding through this scope. This includes destructors, - /// but not StorageDead statements, which don't get emitted at all - /// for unwinding, for several reasons: - /// * clang doesn't emit llvm.lifetime.end for C++ unwinding - /// * LLVM's memory dependency analysis can't handle it atm - /// * polluting the cleanup MIR with StorageDead creates - /// landing pads even though there's no actual destructors - /// * freeing up stack space has no effect during unwinding - /// Note that for generators we do emit StorageDeads, for the - /// use of optimizations in the MIR generator transform. - needs_cleanup: bool, - - /// set of places to drop when exiting this scope. This starts - /// out empty but grows as variables are declared during the - /// building process. This is a stack, so we always drop from the - /// end of the vector (top of the stack) first. - drops: Vec, - - /// The cache for drop chain on “normal” exit into a particular BasicBlock. - cached_exits: FxHashMap<(BasicBlock, region::Scope), BasicBlock>, - - /// The cache for drop chain on "generator drop" exit. - cached_generator_drop: Option, - - /// The cache for drop chain on "unwind" exit. - cached_unwind: CachedBlock, -} +crate use stack::{Scope, Scopes}; +use stack::{CachedBlock, DropData}; -#[derive(Debug, Default)] -pub struct Scopes<'tcx> { - scopes: Vec, - /// The current set of breakable scopes. See module comment for more details. - breakable_scopes: Vec>, -} - -#[derive(Debug)] -struct DropData { - /// span where drop obligation was incurred (typically where place was declared) - span: Span, - - /// local to drop - local: Local, - - /// Whether this is a value Drop or a StorageDead. - kind: DropKind, - - /// The cached blocks for unwinds. - cached_block: CachedBlock, -} - -#[derive(Debug, Default, Clone, Copy)] -struct CachedBlock { - /// The cached block for the cleanups-on-diverge path. This block - /// contains code to run the current drop and all the preceding - /// drops (i.e., those having lower index in Drop’s Scope drop - /// array) - unwind: Option, - - /// The cached block for unwinds during cleanups-on-generator-drop path - /// - /// This is split from the standard unwind path here to prevent drop - /// elaboration from creating drop flags that would have to be captured - /// by the generator. I'm not sure how important this optimization is, - /// but it is here. - generator_drop: Option, -} +mod stack; #[derive(Debug)] pub(crate) enum DropKind { @@ -200,169 +125,6 @@ pub enum BreakableTarget { Return, } -impl CachedBlock { - fn invalidate(&mut self) { - self.generator_drop = None; - self.unwind = None; - } - - fn get(&self, generator_drop: bool) -> Option { - if generator_drop { - self.generator_drop - } else { - self.unwind - } - } - - fn ref_mut(&mut self, generator_drop: bool) -> &mut Option { - if generator_drop { - &mut self.generator_drop - } else { - &mut self.unwind - } - } -} - -impl Scope { - /// Invalidates all the cached blocks in the scope. - /// - /// Should always be run for all inner scopes when a drop is pushed into some scope enclosing a - /// larger extent of code. - /// - /// `storage_only` controls whether to invalidate only drop paths that run `StorageDead`. - /// `this_scope_only` controls whether to invalidate only drop paths that refer to the current - /// top-of-scope (as opposed to dependent scopes). - fn invalidate_cache(&mut self, storage_only: bool, is_generator: bool, this_scope_only: bool) { - // FIXME: maybe do shared caching of `cached_exits` etc. to handle functions - // with lots of `try!`? - - // cached exits drop storage and refer to the top-of-scope - self.cached_exits.clear(); - - // the current generator drop and unwind refer to top-of-scope - self.cached_generator_drop = None; - - let ignore_unwinds = storage_only && !is_generator; - if !ignore_unwinds { - self.cached_unwind.invalidate(); - } - - if !ignore_unwinds && !this_scope_only { - for drop_data in &mut self.drops { - drop_data.cached_block.invalidate(); - } - } - } - - /// Given a span and this scope's source scope, make a SourceInfo. - fn source_info(&self, span: Span) -> SourceInfo { - SourceInfo { - span, - scope: self.source_scope - } - } -} - -impl<'tcx> Scopes<'tcx> { - fn len(&self) -> usize { - self.scopes.len() - } - - fn push_scope(&mut self, region_scope: (region::Scope, SourceInfo), vis_scope: SourceScope) { - debug!("push_scope({:?})", region_scope); - self.scopes.push(Scope { - source_scope: vis_scope, - region_scope: region_scope.0, - region_scope_span: region_scope.1.span, - needs_cleanup: false, - drops: vec![], - cached_generator_drop: None, - cached_exits: Default::default(), - cached_unwind: CachedBlock::default(), - }); - } - - fn pop_scope( - &mut self, - region_scope: (region::Scope, SourceInfo), - ) -> (Scope, Option) { - let scope = self.scopes.pop().unwrap(); - assert_eq!(scope.region_scope, region_scope.0); - let unwind_to = self.scopes.last() - .and_then(|next_scope| next_scope.cached_unwind.get(false)); - (scope, unwind_to) - } - - fn may_panic(&self, scope_count: usize) -> bool { - let len = self.len(); - self.scopes[(len - scope_count)..].iter().any(|s| s.needs_cleanup) - } - - /// Finds the breakable scope for a given label. This is used for - /// resolving `return`, `break` and `continue`. - fn find_breakable_scope( - &self, - span: Span, - target: BreakableTarget, - ) -> (BasicBlock, region::Scope, Option>) { - let get_scope = |scope: region::Scope| { - // find the loop-scope by its `region::Scope`. - self.breakable_scopes.iter() - .rfind(|breakable_scope| breakable_scope.region_scope == scope) - .unwrap_or_else(|| span_bug!(span, "no enclosing breakable scope found")) - }; - match target { - BreakableTarget::Return => { - let scope = &self.breakable_scopes[0]; - if scope.break_destination != Place::return_place() { - span_bug!(span, "`return` in item with no return scope"); - } - (scope.break_block, scope.region_scope, Some(scope.break_destination.clone())) - } - BreakableTarget::Break(scope) => { - let scope = get_scope(scope); - (scope.break_block, scope.region_scope, Some(scope.break_destination.clone())) - } - BreakableTarget::Continue(scope) => { - let scope = get_scope(scope); - let continue_block = scope.continue_block - .unwrap_or_else(|| span_bug!(span, "missing `continue` block")); - (continue_block, scope.region_scope, None) - } - } - } - - fn num_scopes_above(&self, region_scope: region::Scope, span: Span) -> usize { - let scope_count = self.scopes.iter().rev() - .position(|scope| scope.region_scope == region_scope) - .unwrap_or_else(|| { - span_bug!(span, "region_scope {:?} does not enclose", region_scope) - }); - let len = self.len(); - assert!(scope_count < len, "should not use `exit_scope` to pop ALL scopes"); - scope_count - } - - fn iter_mut(&mut self) -> impl DoubleEndedIterator + '_ { - self.scopes.iter_mut().rev() - } - - fn top_scopes(&mut self, count: usize) -> impl DoubleEndedIterator + '_ { - let len = self.len(); - self.scopes[len - count..].iter_mut() - } - - /// Returns the topmost active scope, which is known to be alive until - /// the next scope expression. - pub(super) fn topmost(&self) -> region::Scope { - self.scopes.last().expect("topmost_scope: no scopes present").region_scope - } - - fn source_info(&self, index: usize, span: Span) -> SourceInfo { - self.scopes[self.len() - index].source_info(span) - } -} - impl<'a, 'tcx> Builder<'a, 'tcx> { // Adding and removing scopes // ========================== diff --git a/src/librustc_mir/build/scope/stack.rs b/src/librustc_mir/build/scope/stack.rs new file mode 100644 index 0000000000000..c56e853a4be24 --- /dev/null +++ b/src/librustc_mir/build/scope/stack.rs @@ -0,0 +1,247 @@ +use crate::build::scope::{BreakableScope, BreakableTarget, DropKind}; +use rustc::middle::region; +use rustc::mir::{BasicBlock, Local, Place, SourceScope, SourceInfo}; +use syntax_pos::Span; +use rustc_data_structures::fx::FxHashMap; + +#[derive(Debug)] +crate struct Scope { + /// The source scope this scope was created in. + pub(super) source_scope: SourceScope, + + /// the region span of this scope within source code. + pub(super) region_scope: region::Scope, + + /// the span of that region_scope + pub(super) region_scope_span: Span, + + /// Whether there's anything to do for the cleanup path, that is, + /// when unwinding through this scope. This includes destructors, + /// but not StorageDead statements, which don't get emitted at all + /// for unwinding, for several reasons: + /// * clang doesn't emit llvm.lifetime.end for C++ unwinding + /// * LLVM's memory dependency analysis can't handle it atm + /// * polluting the cleanup MIR with StorageDead creates + /// landing pads even though there's no actual destructors + /// * freeing up stack space has no effect during unwinding + /// Note that for generators we do emit StorageDeads, for the + /// use of optimizations in the MIR generator transform. + pub(super) needs_cleanup: bool, + + /// set of places to drop when exiting this scope. This starts + /// out empty but grows as variables are declared during the + /// building process. This is a stack, so we always drop from the + /// end of the vector (top of the stack) first. + pub(super) drops: Vec, + + /// The cache for drop chain on “normal” exit into a particular BasicBlock. + pub(super) cached_exits: FxHashMap<(BasicBlock, region::Scope), BasicBlock>, + + /// The cache for drop chain on "generator drop" exit. + pub(super) cached_generator_drop: Option, + + /// The cache for drop chain on "unwind" exit. + pub(super) cached_unwind: CachedBlock, +} + +impl Scope { + /// Invalidates all the cached blocks in the scope. + /// + /// Should always be run for all inner scopes when a drop is pushed into some scope enclosing a + /// larger extent of code. + /// + /// `storage_only` controls whether to invalidate only drop paths that run `StorageDead`. + /// `this_scope_only` controls whether to invalidate only drop paths that refer to the current + /// top-of-scope (as opposed to dependent scopes). + pub(super) fn invalidate_cache(&mut self, storage_only: bool, is_generator: bool, this_scope_only: bool) { + // FIXME: maybe do shared caching of `cached_exits` etc. to handle functions + // with lots of `try!`? + + // cached exits drop storage and refer to the top-of-scope + self.cached_exits.clear(); + + // the current generator drop and unwind refer to top-of-scope + self.cached_generator_drop = None; + + let ignore_unwinds = storage_only && !is_generator; + if !ignore_unwinds { + self.cached_unwind.invalidate(); + } + + if !ignore_unwinds && !this_scope_only { + for drop_data in &mut self.drops { + drop_data.cached_block.invalidate(); + } + } + } + + /// Given a span and this scope's source scope, make a SourceInfo. + pub(super) fn source_info(&self, span: Span) -> SourceInfo { + SourceInfo { + span, + scope: self.source_scope + } + } +} + +#[derive(Debug)] +pub(super) struct DropData { + /// span where drop obligation was incurred (typically where place was declared) + pub(super) span: Span, + + /// local to drop + pub(super) local: Local, + + /// Whether this is a value Drop or a StorageDead. + pub(super) kind: DropKind, + + /// The cached blocks for unwinds. + pub(super) cached_block: CachedBlock, +} + +#[derive(Debug, Default, Clone, Copy)] +pub(super) struct CachedBlock { + /// The cached block for the cleanups-on-diverge path. This block + /// contains code to run the current drop and all the preceding + /// drops (i.e., those having lower index in Drop’s Scope drop + /// array) + unwind: Option, + + /// The cached block for unwinds during cleanups-on-generator-drop path + /// + /// This is split from the standard unwind path here to prevent drop + /// elaboration from creating drop flags that would have to be captured + /// by the generator. I'm not sure how important this optimization is, + /// but it is here. + generator_drop: Option, +} + +impl CachedBlock { + fn invalidate(&mut self) { + self.generator_drop = None; + self.unwind = None; + } + + pub(super) fn get(&self, generator_drop: bool) -> Option { + if generator_drop { + self.generator_drop + } else { + self.unwind + } + } + + pub(super) fn ref_mut(&mut self, generator_drop: bool) -> &mut Option { + if generator_drop { + &mut self.generator_drop + } else { + &mut self.unwind + } + } +} + +#[derive(Debug, Default)] +pub struct Scopes<'tcx> { + pub(super) scopes: Vec, + /// The current set of breakable scopes. See module comment for more details. + pub(super) breakable_scopes: Vec>, +} + +impl<'tcx> Scopes<'tcx> { + pub(super) fn len(&self) -> usize { + self.scopes.len() + } + + pub(super) fn push_scope(&mut self, region_scope: (region::Scope, SourceInfo), vis_scope: SourceScope) { + debug!("push_scope({:?})", region_scope); + self.scopes.push(Scope { + source_scope: vis_scope, + region_scope: region_scope.0, + region_scope_span: region_scope.1.span, + needs_cleanup: false, + drops: vec![], + cached_generator_drop: None, + cached_exits: Default::default(), + cached_unwind: CachedBlock::default(), + }); + } + + pub(super) fn pop_scope( + &mut self, + region_scope: (region::Scope, SourceInfo), + ) -> (Scope, Option) { + let scope = self.scopes.pop().unwrap(); + assert_eq!(scope.region_scope, region_scope.0); + let unwind_to = self.scopes.last() + .and_then(|next_scope| next_scope.cached_unwind.get(false)); + (scope, unwind_to) + } + + pub(super) fn may_panic(&self, scope_count: usize) -> bool { + let len = self.len(); + self.scopes[(len - scope_count)..].iter().any(|s| s.needs_cleanup) + } + + /// Finds the breakable scope for a given label. This is used for + /// resolving `return`, `break` and `continue`. + pub(super) fn find_breakable_scope( + &self, + span: Span, + target: BreakableTarget, + ) -> (BasicBlock, region::Scope, Option>) { + let get_scope = |scope: region::Scope| { + // find the loop-scope by its `region::Scope`. + self.breakable_scopes.iter() + .rfind(|breakable_scope| breakable_scope.region_scope == scope) + .unwrap_or_else(|| span_bug!(span, "no enclosing breakable scope found")) + }; + match target { + BreakableTarget::Return => { + let scope = &self.breakable_scopes[0]; + if scope.break_destination != Place::return_place() { + span_bug!(span, "`return` in item with no return scope"); + } + (scope.break_block, scope.region_scope, Some(scope.break_destination.clone())) + } + BreakableTarget::Break(scope) => { + let scope = get_scope(scope); + (scope.break_block, scope.region_scope, Some(scope.break_destination.clone())) + } + BreakableTarget::Continue(scope) => { + let scope = get_scope(scope); + let continue_block = scope.continue_block + .unwrap_or_else(|| span_bug!(span, "missing `continue` block")); + (continue_block, scope.region_scope, None) + } + } + } + + pub(super) fn num_scopes_above(&self, region_scope: region::Scope, span: Span) -> usize { + let scope_count = self.scopes.iter().rev() + .position(|scope| scope.region_scope == region_scope) + .unwrap_or_else(|| { + span_bug!(span, "region_scope {:?} does not enclose", region_scope) + }); + let len = self.len(); + assert!(scope_count < len, "should not use `exit_scope` to pop ALL scopes"); + scope_count + } + + pub(super) fn iter_mut(&mut self) -> impl DoubleEndedIterator + '_ { + self.scopes.iter_mut().rev() + } + + pub(super) fn top_scopes(&mut self, count: usize) -> impl DoubleEndedIterator + '_ { + let len = self.len(); + self.scopes[len - count..].iter_mut() + } + + /// Returns the topmost active scope, which is known to be alive until + /// the next scope expression. + crate fn topmost(&self) -> region::Scope { + self.scopes.last().expect("topmost_scope: no scopes present").region_scope + } + + pub(super) fn source_info(&self, index: usize, span: Span) -> SourceInfo { + self.scopes[self.len() - index].source_info(span) + } +} From fc7c38f6d2d862172479d667a90f48f127a7abae Mon Sep 17 00:00:00 2001 From: Matthew Jasper Date: Sat, 17 Aug 2019 10:36:43 +0100 Subject: [PATCH 2/4] Make `Scope` private to `rustc_mir::build::scope::stack` The logic for finding the correct unwind scopes is going to become more complicated, so we want to limit the amount of code that can directly access the fields of `Scope`. --- src/librustc_mir/build/matches/mod.rs | 2 +- src/librustc_mir/build/scope.rs | 355 ++++++-------------------- src/librustc_mir/build/scope/stack.rs | 316 ++++++++++++++++++++--- 3 files changed, 361 insertions(+), 312 deletions(-) diff --git a/src/librustc_mir/build/matches/mod.rs b/src/librustc_mir/build/matches/mod.rs index aa261f8eb6fb2..7a2cbb640515d 100644 --- a/src/librustc_mir/build/matches/mod.rs +++ b/src/librustc_mir/build/matches/mod.rs @@ -228,7 +228,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { }; // Step 5. Create everything else: the guards and the arms. - let match_scope = self.scopes.topmost(); + let match_scope = self.topmost_scope(); let arm_end_blocks: Vec<_> = arm_candidates.into_iter().map(|(arm, mut candidates)| { let arm_source_info = self.source_info(arm.span); diff --git a/src/librustc_mir/build/scope.rs b/src/librustc_mir/build/scope.rs index b05e90026431c..dbdd8cf5e6c49 100644 --- a/src/librustc_mir/build/scope.rs +++ b/src/librustc_mir/build/scope.rs @@ -83,17 +83,16 @@ should go to. */ use crate::build::{BlockAnd, BlockAndExtension, BlockFrame, Builder, CFG}; -use crate::hair::{Expr, ExprRef, LintLevel}; +use crate::hair::{ExprRef, LintLevel}; use rustc::middle::region; use rustc::ty::Ty; use rustc::hir; use rustc::mir::*; use syntax_pos::{DUMMY_SP, Span}; -use std::collections::hash_map::Entry; use std::mem; -crate use stack::{Scope, Scopes}; -use stack::{CachedBlock, DropData}; +crate use stack::Scopes; +use stack::CachedBlock; mod stack; @@ -117,6 +116,23 @@ struct BreakableScope<'tcx> { break_destination: Place<'tcx>, } + +#[derive(Debug)] +struct DropData { + /// span where drop obligation was incurred (typically where place was declared) + span: Span, + + /// local to drop + local: Local, + + /// Whether this is a value Drop or a StorageDead. + kind: DropKind, + + /// The cached blocks for unwinds. + cached_block: CachedBlock, +} + + /// The target of an expression that breaks out of a scope #[derive(Clone, Copy, Debug)] pub enum BreakableTarget { @@ -125,6 +141,19 @@ pub enum BreakableTarget { Return, } +/// A view of the information in a scope that's needed to generate a non-unwind +/// exit from that scope +#[derive(Debug)] +struct ScopeInfo<'a> { + source_scope: SourceScope, + drops: &'a [DropData], + /// Cached block that will start by exiting this scope. + cached_block: &'a mut Option, + /// Cached block for unwind paths that starts at the next scope. This block + /// should be branched to if any of the drops for this scope panic. + unwind_to: BasicBlock, +} + impl<'a, 'tcx> Builder<'a, 'tcx> { // Adding and removing scopes // ========================== @@ -137,7 +166,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { f: F) -> R where F: FnOnce(&mut Builder<'a, 'tcx>) -> R { - let region_scope = self.scopes.topmost(); + let region_scope = self.topmost_scope(); let scope = BreakableScope { region_scope, continue_block: loop_block, @@ -232,15 +261,15 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { if self.scopes.may_panic(1) { self.diverge_cleanup(); } - let (scope, unwind_to) = self.scopes.pop_scope(region_scope); + let (drops, unwind_to, source_scope) = self.scopes.pop_scope(region_scope); let unwind_to = unwind_to.unwrap_or_else(|| self.resume_block()); + let cached_block = &mut Some(block); + let scope = ScopeInfo { drops: &drops, source_scope, cached_block, unwind_to }; unpack!(block = build_scope_drops( &mut self.cfg, self.is_generator, - &scope, - block, - unwind_to, + scope, self.arg_count, false, )); @@ -299,45 +328,31 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { self.diverge_cleanup(); } - let mut scopes = self.scopes.top_scopes(scope_count + 1).rev(); - let mut scope = scopes.next().unwrap(); - for next_scope in scopes { + let scopes = self.scopes.exit_blocks((target, region_scope)).take(scope_count); + for scope in scopes { if scope.drops.is_empty() { - scope = next_scope; continue; } - let source_info = scope.source_info(span); - block = match scope.cached_exits.entry((target, region_scope)) { - Entry::Occupied(e) => { - self.cfg.terminate(block, source_info, - TerminatorKind::Goto { target: *e.get() }); + let source_info = SourceInfo { scope: scope.source_scope, span }; + match *scope.cached_block { + Some(e) => { + self.cfg.terminate(block, source_info, TerminatorKind::Goto { target: e }); return; } - Entry::Vacant(v) => { + None => { let b = self.cfg.start_new_block(); - self.cfg.terminate(block, source_info, - TerminatorKind::Goto { target: b }); - v.insert(b); - b + self.cfg.terminate(block, source_info, TerminatorKind::Goto { target: b }); + *scope.cached_block = Some(b); } }; - let unwind_to = next_scope.cached_unwind.get(false).unwrap_or_else(|| { - debug_assert!(!may_panic, "cached block not present?"); - START_BLOCK - }); - unpack!(block = build_scope_drops( &mut self.cfg, self.is_generator, scope, - block, - unwind_to, self.arg_count, false, )); - - scope = next_scope; } let source_info = self.scopes.source_info(scope_count, span); @@ -353,36 +368,24 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { self.diverge_cleanup_gen(true); let src_info = self.scopes.source_info(self.scopes.len(), self.fn_span); - let resume_block = self.resume_block(); - let mut scopes = self.scopes.iter_mut().peekable(); + let scopes = self.scopes.generator_drop_blocks(); let mut block = self.cfg.start_new_block(); let result = block; - while let Some(scope) = scopes.next() { - block = if let Some(b) = scope.cached_generator_drop { - self.cfg.terminate(block, src_info, - TerminatorKind::Goto { target: b }); + for scope in scopes { + if let Some(b) = *scope.cached_block { + self.cfg.terminate(block, src_info, TerminatorKind::Goto { target: b }); return Some(result); } else { let b = self.cfg.start_new_block(); - scope.cached_generator_drop = Some(b); - self.cfg.terminate(block, src_info, - TerminatorKind::Goto { target: b }); - b + *scope.cached_block = Some(b); + self.cfg.terminate(block, src_info, TerminatorKind::Goto { target: b }); }; - let unwind_to = scopes.peek().as_ref().map(|scope| { - scope.cached_unwind.get(true).unwrap_or_else(|| { - span_bug!(src_info.span, "cached block not present?") - }) - }).unwrap_or(resume_block); - unpack!(block = build_scope_drops( &mut self.cfg, self.is_generator, scope, - block, - unwind_to, self.arg_count, true, )); @@ -460,7 +463,7 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { None, hir::BodyOwnerKind::Closure | hir::BodyOwnerKind::Fn => - Some(self.scopes.topmost()), + Some(self.topmost_scope()), } } @@ -486,178 +489,8 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { self.schedule_drop(span, region_scope, local, place_ty, DropKind::Value); } - /// Indicates that `place` should be dropped on exit from - /// `region_scope`. - /// - /// When called with `DropKind::Storage`, `place` should be a local - /// with an index higher than the current `self.arg_count`. - pub fn schedule_drop( - &mut self, - span: Span, - region_scope: region::Scope, - local: Local, - place_ty: Ty<'tcx>, - drop_kind: DropKind, - ) { - let needs_drop = self.hir.needs_drop(place_ty); - match drop_kind { - DropKind::Value => if !needs_drop { return }, - DropKind::Storage => { - if local.index() <= self.arg_count { - span_bug!( - span, "`schedule_drop` called with local {:?} and arg_count {}", - local, - self.arg_count, - ) - } - } - } - - for scope in self.scopes.iter_mut() { - let this_scope = scope.region_scope == region_scope; - // When building drops, we try to cache chains of drops in such a way so these drops - // could be reused by the drops which would branch into the cached (already built) - // blocks. This, however, means that whenever we add a drop into a scope which already - // had some blocks built (and thus, cached) for it, we must invalidate all caches which - // might branch into the scope which had a drop just added to it. This is necessary, - // because otherwise some other code might use the cache to branch into already built - // chain of drops, essentially ignoring the newly added drop. - // - // For example consider there’s two scopes with a drop in each. These are built and - // thus the caches are filled: - // - // +--------------------------------------------------------+ - // | +---------------------------------+ | - // | | +--------+ +-------------+ | +---------------+ | - // | | | return | <-+ | drop(outer) | <-+ | drop(middle) | | - // | | +--------+ +-------------+ | +---------------+ | - // | +------------|outer_scope cache|--+ | - // +------------------------------|middle_scope cache|------+ - // - // Now, a new, inner-most scope is added along with a new drop into both inner-most and - // outer-most scopes: - // - // +------------------------------------------------------------+ - // | +----------------------------------+ | - // | | +--------+ +-------------+ | +---------------+ | +-------------+ - // | | | return | <+ | drop(new) | <-+ | drop(middle) | <--+| drop(inner) | - // | | +--------+ | | drop(outer) | | +---------------+ | +-------------+ - // | | +-+ +-------------+ | | - // | +---|invalid outer_scope cache|----+ | - // +----=----------------|invalid middle_scope cache|-----------+ - // - // If, when adding `drop(new)` we do not invalidate the cached blocks for both - // outer_scope and middle_scope, then, when building drops for the inner (right-most) - // scope, the old, cached blocks, without `drop(new)` will get used, producing the - // wrong results. - // - // The cache and its invalidation for unwind branch is somewhat special. The cache is - // per-drop, rather than per scope, which has a several different implications. Adding - // a new drop into a scope will not invalidate cached blocks of the prior drops in the - // scope. That is true, because none of the already existing drops will have an edge - // into a block with the newly added drop. - // - // Note that this code iterates scopes from the inner-most to the outer-most, - // invalidating caches of each scope visited. This way bare minimum of the - // caches gets invalidated. i.e., if a new drop is added into the middle scope, the - // cache of outer scope stays intact. - scope.invalidate_cache(!needs_drop, self.is_generator, this_scope); - if this_scope { - if let DropKind::Value = drop_kind { - scope.needs_cleanup = true; - } - - let region_scope_span = region_scope.span(self.hir.tcx(), - &self.hir.region_scope_tree); - // Attribute scope exit drops to scope's closing brace. - let scope_end = self.hir.tcx().sess.source_map().end_point(region_scope_span); - - scope.drops.push(DropData { - span: scope_end, - local, - kind: drop_kind, - cached_block: CachedBlock::default(), - }); - return; - } - } - span_bug!(span, "region scope {:?} not in scope to drop {:?}", region_scope, local); - } - // Other // ===== - /// Branch based on a boolean condition. - /// - /// This is a special case because the temporary for the condition needs to - /// be dropped on both the true and the false arm. - pub fn test_bool( - &mut self, - mut block: BasicBlock, - condition: Expr<'tcx>, - source_info: SourceInfo, - ) -> (BasicBlock, BasicBlock) { - let cond = unpack!(block = self.as_local_operand(block, condition)); - let true_block = self.cfg.start_new_block(); - let false_block = self.cfg.start_new_block(); - let term = TerminatorKind::if_( - self.hir.tcx(), - cond.clone(), - true_block, - false_block, - ); - self.cfg.terminate(block, source_info, term); - - match cond { - // Don't try to drop a constant - Operand::Constant(_) => (), - // If constants and statics, we don't generate StorageLive for this - // temporary, so don't try to generate StorageDead for it either. - _ if self.local_scope().is_none() => (), - Operand::Copy(Place { - base: PlaceBase::Local(cond_temp), - projection: box [], - }) - | Operand::Move(Place { - base: PlaceBase::Local(cond_temp), - projection: box [], - }) => { - // Manually drop the condition on both branches. - let top_scope = self.scopes.scopes.last_mut().unwrap(); - let top_drop_data = top_scope.drops.pop().unwrap(); - - match top_drop_data.kind { - DropKind::Value { .. } => { - bug!("Drop scheduled on top of condition variable") - } - DropKind::Storage => { - let source_info = top_scope.source_info(top_drop_data.span); - let local = top_drop_data.local; - assert_eq!(local, cond_temp, "Drop scheduled on top of condition"); - self.cfg.push( - true_block, - Statement { - source_info, - kind: StatementKind::StorageDead(local) - }, - ); - self.cfg.push( - false_block, - Statement { - source_info, - kind: StatementKind::StorageDead(local) - }, - ); - } - } - - top_scope.invalidate_cache(true, self.is_generator, true); - } - _ => bug!("Expected as_local_operand to produce a temporary"), - } - - (true_block, false_block) - } - /// Creates a path that performs all required cleanup for unwinding. /// /// This path terminates in Resume. Returns the start of the path. @@ -696,17 +529,21 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { // Find the last cached block debug!("diverge_cleanup_gen(self.scopes = {:?})", self.scopes); - let cached_cleanup = self.scopes.iter_mut().enumerate() - .find_map(|(idx, ref scope)| { - let cached_block = scope.cached_unwind.get(generator_drop)?; - Some((cached_block, idx)) - }); + let cached_cleanup = self.scopes.last_cached_unwind(generator_drop); let (mut target, first_uncached) = cached_cleanup - .unwrap_or_else(|| (self.resume_block(), self.scopes.len())); + .unwrap_or_else(|| (self.resume_block(), 0)); - for scope in self.scopes.top_scopes(first_uncached) { - target = build_diverge_scope(&mut self.cfg, scope.region_scope_span, - scope, target, generator_drop, self.is_generator); + let scopes = self.scopes.diverge_blocks(first_uncached, generator_drop); + for (source_scope, drops, cached_unwind) in scopes { + target = build_diverge_scope( + &mut self.cfg, + source_scope, + drops, + target, + generator_drop, + self.is_generator, + ); + *cached_unwind = Some(target); } target @@ -756,34 +593,19 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { success_block } - - // `match` arm scopes - // ================== - /// Unschedules any drops in the top scope. - /// - /// This is only needed for `match` arm scopes, because they have one - /// entrance per pattern, but only one exit. - pub(crate) fn clear_top_scope(&mut self, region_scope: region::Scope) { - let top_scope = self.scopes.scopes.last_mut().unwrap(); - - assert_eq!(top_scope.region_scope, region_scope); - - top_scope.drops.clear(); - top_scope.invalidate_cache(false, self.is_generator, true); - } } /// Builds drops for pop_scope and exit_scope. fn build_scope_drops<'tcx>( cfg: &mut CFG<'tcx>, is_generator: bool, - scope: &Scope, - mut block: BasicBlock, - last_unwind_to: BasicBlock, + scope: ScopeInfo<'_>, arg_count: usize, generator_drop: bool, ) -> BlockAnd<()> { - debug!("build_scope_drops({:?} -> {:?})", block, scope); + debug!("build_scope_drops({:?})", scope); + + let mut block = scope.cached_block.unwrap(); // Build up the drops in evaluation order. The end result will // look like: @@ -804,14 +626,14 @@ fn build_scope_drops<'tcx>( // drops for the unwind path should have already been generated by // `diverge_cleanup_gen`. - for drop_idx in (0..scope.drops.len()).rev() { - let drop_data = &scope.drops[drop_idx]; - let source_info = scope.source_info(drop_data.span); + let drops = scope.drops; + for (drop_idx, drop_data) in drops.iter().enumerate().rev() { + let source_info = SourceInfo { scope: scope.source_scope, span: drop_data.span }; let local = drop_data.local; match drop_data.kind { DropKind::Value => { - let unwind_to = get_unwind_to(scope, is_generator, drop_idx, generator_drop) - .unwrap_or(last_unwind_to); + let unwind_to = get_unwind_to(&drops[..drop_idx], is_generator, generator_drop) + .unwrap_or(scope.unwind_to); let next = cfg.start_new_block(); cfg.terminate(block, source_info, TerminatorKind::Drop { @@ -835,20 +657,13 @@ fn build_scope_drops<'tcx>( } fn get_unwind_to( - scope: &Scope, + drops: &[DropData], is_generator: bool, - unwind_from: usize, generator_drop: bool, ) -> Option { - for drop_idx in (0..unwind_from).rev() { - let drop_data = &scope.drops[drop_idx]; + for drop_data in drops.iter().rev() { match (is_generator, &drop_data.kind) { - (true, DropKind::Storage) => { - return Some(drop_data.cached_block.get(generator_drop).unwrap_or_else(|| { - span_bug!(drop_data.span, "cached block not present for {:?}", drop_data) - })); - } - (false, DropKind::Value) => { + (true, DropKind::Storage) | (false, DropKind::Value) => { return Some(drop_data.cached_block.get(generator_drop).unwrap_or_else(|| { span_bug!(drop_data.span, "cached block not present for {:?}", drop_data) })); @@ -860,8 +675,8 @@ fn get_unwind_to( } fn build_diverge_scope<'tcx>(cfg: &mut CFG<'tcx>, - span: Span, - scope: &mut Scope, + source_scope: SourceScope, + drops: &mut [DropData], mut target: BasicBlock, generator_drop: bool, is_generator: bool) @@ -877,7 +692,6 @@ fn build_diverge_scope<'tcx>(cfg: &mut CFG<'tcx>, // remainder. If everything is cached, we'll just walk right to // left reading the cached results but never create anything. - let source_scope = scope.source_scope; let source_info = |span| SourceInfo { span, scope: source_scope @@ -892,8 +706,8 @@ fn build_diverge_scope<'tcx>(cfg: &mut CFG<'tcx>, // Build up the drops. Here we iterate the vector in // *forward* order, so that we generate drops[0] first (right to // left in diagram above). - debug!("build_diverge_scope({:?})", scope.drops); - for (j, drop_data) in scope.drops.iter_mut().enumerate() { + debug!("build_diverge_scope({:?})", drops); + for (j, drop_data) in drops.iter_mut().enumerate() { debug!("build_diverge_scope drop_data[{}]: {:?}", j, drop_data); // Only full value drops are emitted in the diverging path, // not StorageDead, except in the case of generators. @@ -948,10 +762,9 @@ fn build_diverge_scope<'tcx>(cfg: &mut CFG<'tcx>, }; } push_storage_deads(cfg, target, &mut storage_deads); - *scope.cached_unwind.ref_mut(generator_drop) = Some(target); assert!(storage_deads.is_empty()); - debug!("build_diverge_scope({:?}, {:?}) = {:?}", scope, span, target); + debug!("build_diverge_scope({:?}) = {:?}", drops, target); target } diff --git a/src/librustc_mir/build/scope/stack.rs b/src/librustc_mir/build/scope/stack.rs index c56e853a4be24..29bb2b9da9a92 100644 --- a/src/librustc_mir/build/scope/stack.rs +++ b/src/librustc_mir/build/scope/stack.rs @@ -1,19 +1,24 @@ -use crate::build::scope::{BreakableScope, BreakableTarget, DropKind}; +use crate::build::scope::{BreakableScope, BreakableTarget, DropKind, DropData, ScopeInfo}; +use crate::build::{BlockAnd, Builder}; +use crate::hair::Expr; use rustc::middle::region; -use rustc::mir::{BasicBlock, Local, Place, SourceScope, SourceInfo}; +use rustc::mir::{BasicBlock, Local, Operand, Place, PlaceBase, SourceScope}; +use rustc::mir::{SourceInfo, Statement, StatementKind, START_BLOCK, TerminatorKind}; +use rustc::ty::Ty; use syntax_pos::Span; use rustc_data_structures::fx::FxHashMap; +use std::mem; #[derive(Debug)] -crate struct Scope { +struct Scope { /// The source scope this scope was created in. - pub(super) source_scope: SourceScope, + source_scope: SourceScope, /// the region span of this scope within source code. - pub(super) region_scope: region::Scope, + region_scope: region::Scope, /// the span of that region_scope - pub(super) region_scope_span: Span, + region_scope_span: Span, /// Whether there's anything to do for the cleanup path, that is, /// when unwinding through this scope. This includes destructors, @@ -26,22 +31,22 @@ crate struct Scope { /// * freeing up stack space has no effect during unwinding /// Note that for generators we do emit StorageDeads, for the /// use of optimizations in the MIR generator transform. - pub(super) needs_cleanup: bool, + needs_cleanup: bool, /// set of places to drop when exiting this scope. This starts /// out empty but grows as variables are declared during the /// building process. This is a stack, so we always drop from the /// end of the vector (top of the stack) first. - pub(super) drops: Vec, + drops: Vec, /// The cache for drop chain on “normal” exit into a particular BasicBlock. - pub(super) cached_exits: FxHashMap<(BasicBlock, region::Scope), BasicBlock>, + cached_exits: FxHashMap<(BasicBlock, region::Scope), Option>, /// The cache for drop chain on "generator drop" exit. - pub(super) cached_generator_drop: Option, + cached_generator_drop: Option, /// The cache for drop chain on "unwind" exit. - pub(super) cached_unwind: CachedBlock, + cached_unwind: CachedBlock, } impl Scope { @@ -53,7 +58,7 @@ impl Scope { /// `storage_only` controls whether to invalidate only drop paths that run `StorageDead`. /// `this_scope_only` controls whether to invalidate only drop paths that refer to the current /// top-of-scope (as opposed to dependent scopes). - pub(super) fn invalidate_cache(&mut self, storage_only: bool, is_generator: bool, this_scope_only: bool) { + fn invalidate_cache(&mut self, storage_only: bool, is_generator: bool, this_scope_only: bool) { // FIXME: maybe do shared caching of `cached_exits` etc. to handle functions // with lots of `try!`? @@ -76,7 +81,7 @@ impl Scope { } /// Given a span and this scope's source scope, make a SourceInfo. - pub(super) fn source_info(&self, span: Span) -> SourceInfo { + fn source_info(&self, span: Span) -> SourceInfo { SourceInfo { span, scope: self.source_scope @@ -84,21 +89,6 @@ impl Scope { } } -#[derive(Debug)] -pub(super) struct DropData { - /// span where drop obligation was incurred (typically where place was declared) - pub(super) span: Span, - - /// local to drop - pub(super) local: Local, - - /// Whether this is a value Drop or a StorageDead. - pub(super) kind: DropKind, - - /// The cached blocks for unwinds. - pub(super) cached_block: CachedBlock, -} - #[derive(Debug, Default, Clone, Copy)] pub(super) struct CachedBlock { /// The cached block for the cleanups-on-diverge path. This block @@ -141,7 +131,7 @@ impl CachedBlock { #[derive(Debug, Default)] pub struct Scopes<'tcx> { - pub(super) scopes: Vec, + scopes: Vec, /// The current set of breakable scopes. See module comment for more details. pub(super) breakable_scopes: Vec>, } @@ -168,12 +158,12 @@ impl<'tcx> Scopes<'tcx> { pub(super) fn pop_scope( &mut self, region_scope: (region::Scope, SourceInfo), - ) -> (Scope, Option) { + ) -> (Vec, Option, SourceScope) { let scope = self.scopes.pop().unwrap(); assert_eq!(scope.region_scope, region_scope.0); let unwind_to = self.scopes.last() .and_then(|next_scope| next_scope.cached_unwind.get(false)); - (scope, unwind_to) + (scope.drops, unwind_to, scope.source_scope) } pub(super) fn may_panic(&self, scope_count: usize) -> bool { @@ -215,6 +205,8 @@ impl<'tcx> Scopes<'tcx> { } } + /// Get the number of scopes that are above the scope with the given + /// [region::Scope] (exclusive). pub(super) fn num_scopes_above(&self, region_scope: region::Scope, span: Span) -> usize { let scope_count = self.scopes.iter().rev() .position(|scope| scope.region_scope == region_scope) @@ -226,22 +218,266 @@ impl<'tcx> Scopes<'tcx> { scope_count } - pub(super) fn iter_mut(&mut self) -> impl DoubleEndedIterator + '_ { - self.scopes.iter_mut().rev() + pub(super) fn last_cached_unwind( + &mut self, + generator_drop: bool, + ) -> Option<(BasicBlock, usize)> { + self.scopes.iter_mut().enumerate().rev().find_map(move |(idx, scope)| { + let cached_block = scope.cached_unwind.get(generator_drop)?; + Some((cached_block, idx + 1)) + }) } - pub(super) fn top_scopes(&mut self, count: usize) -> impl DoubleEndedIterator + '_ { - let len = self.len(); - self.scopes[len - count..].iter_mut() + pub(super) fn diverge_blocks( + &mut self, + start_at: usize, + generator_drop: bool, + ) -> impl Iterator)> { + self.scopes[start_at..].iter_mut().map(move |scope| { + (scope.source_scope, &mut *scope.drops, scope.cached_unwind.ref_mut(generator_drop)) + }) } - /// Returns the topmost active scope, which is known to be alive until - /// the next scope expression. - crate fn topmost(&self) -> region::Scope { - self.scopes.last().expect("topmost_scope: no scopes present").region_scope + /// Iterate over the [ScopeInfo] for exiting to the given target. + /// Scopes are iterated going down the scope stack. + pub(super) fn exit_blocks( + &mut self, + target: (BasicBlock, region::Scope), + ) -> impl Iterator> { + let mut scopes = self.scopes.iter_mut().rev(); + let top_scope = scopes.next().expect("Should have at least one scope"); + scopes.scan(top_scope, move |scope, next_scope| { + let unwind_to = next_scope.cached_unwind.get(false).unwrap_or(START_BLOCK); + let current_scope = mem::replace(scope, next_scope); + Some(ScopeInfo { + source_scope: current_scope.source_scope, + drops: &*current_scope.drops, + cached_block: current_scope.cached_exits.entry(target).or_insert(None), + unwind_to, + }) + }) + } + + /// Iterate over the [ScopeInfo] for dropping a suspended generator. + /// Scopes are iterated going down the scope stack. + pub(super) fn generator_drop_blocks(&mut self) -> impl Iterator> { + // We don't return a ScopeInfo for the outermost scope, so ensure that + // it's empty. + assert!(self.scopes[0].drops.is_empty()); + let mut scopes = self.scopes.iter_mut().rev(); + let top_scope = scopes.next().unwrap(); + + scopes.scan(top_scope, move |scope, next_scope| { + let unwind_to = next_scope.cached_unwind.get(true) + .unwrap_or_else(|| bug!("cached block not present?")); + let current_scope = mem::replace(scope, next_scope); + Some(ScopeInfo { + source_scope: current_scope.source_scope, + drops: &*current_scope.drops, + cached_block: &mut current_scope.cached_generator_drop, + unwind_to, + }) + }) } pub(super) fn source_info(&self, index: usize, span: Span) -> SourceInfo { self.scopes[self.len() - index].source_info(span) } } + +impl<'tcx> Builder<'_, 'tcx> { + /// Returns the topmost active scope, which is known to be alive until + /// the next scope expression. + crate fn topmost_scope(&self) -> region::Scope { + self.scopes.scopes.last().expect("topmost_scope: no scopes present").region_scope + } + /// Indicates that `place` should be dropped on exit from + /// `region_scope`. + /// + /// When called with `DropKind::Storage`, `place` should be a local + /// with an index higher than the current `self.arg_count`. + crate fn schedule_drop( + &mut self, + span: Span, + region_scope: region::Scope, + local: Local, + place_ty: Ty<'tcx>, + drop_kind: DropKind, + ) { + match drop_kind { + DropKind::Value => if !self.hir.needs_drop(place_ty) { return }, + DropKind::Storage => { + if local.index() <= self.arg_count { + span_bug!( + span, "`schedule_drop` called with local {:?} and arg_count {}", + local, + self.arg_count, + ) + } + } + } + + let is_drop = match drop_kind { + DropKind::Value => true, + DropKind::Storage => false, + }; + for scope in self.scopes.scopes.iter_mut().rev() { + let this_scope = scope.region_scope == region_scope; + // When building drops, we try to cache chains of drops in such a way so these drops + // could be reused by the drops which would branch into the cached (already built) + // blocks. This, however, means that whenever we add a drop into a scope which already + // had some blocks built (and thus, cached) for it, we must invalidate all caches which + // might branch into the scope which had a drop just added to it. This is necessary, + // because otherwise some other code might use the cache to branch into already built + // chain of drops, essentially ignoring the newly added drop. + // + // For example consider there’s two scopes with a drop in each. These are built and + // thus the caches are filled: + // + // +--------------------------------------------------------+ + // | +---------------------------------+ | + // | | +--------+ +-------------+ | +---------------+ | + // | | | return | <-+ | drop(outer) | <-+ | drop(middle) | | + // | | +--------+ +-------------+ | +---------------+ | + // | +------------|outer_scope cache|--+ | + // +------------------------------|middle_scope cache|------+ + // + // Now, a new, inner-most scope is added along with a new drop into both inner-most and + // outer-most scopes: + // + // +------------------------------------------------------------+ + // | +----------------------------------+ | + // | | +--------+ +-------------+ | +---------------+ | +-------------+ + // | | | return | <+ | drop(new) | <-+ | drop(middle) | <--+| drop(inner) | + // | | +--------+ | | drop(outer) | | +---------------+ | +-------------+ + // | | +-+ +-------------+ | | + // | +---|invalid outer_scope cache|----+ | + // +----=----------------|invalid middle_scope cache|-----------+ + // + // If, when adding `drop(new)` we do not invalidate the cached blocks for both + // outer_scope and middle_scope, then, when building drops for the inner (right-most) + // scope, the old, cached blocks, without `drop(new)` will get used, producing the + // wrong results. + // + // The cache and its invalidation for unwind branch is somewhat special. The cache is + // per-drop, rather than per scope, which has a several different implications. Adding + // a new drop into a scope will not invalidate cached blocks of the prior drops in the + // scope. That is true, because none of the already existing drops will have an edge + // into a block with the newly added drop. + // + // Note that this code iterates scopes from the inner-most to the outer-most, + // invalidating caches of each scope visited. This way bare minimum of the + // caches gets invalidated. i.e., if a new drop is added into the middle scope, the + // cache of outer scope stays intact. + scope.invalidate_cache(!is_drop, self.is_generator, this_scope); + if this_scope { + if let DropKind::Value = drop_kind { + scope.needs_cleanup = true; + } + + let region_scope_span = region_scope.span( + self.hir.tcx(), + &self.hir.region_scope_tree, + ); + // Attribute scope exit drops to scope's closing brace. + let scope_end = self.hir.tcx().sess.source_map().end_point(region_scope_span); + + scope.drops.push(DropData { + span: scope_end, + local, + kind: drop_kind, + cached_block: CachedBlock::default(), + }); + return; + } + } + span_bug!(span, "region scope {:?} not in scope to drop {:?}", region_scope, local); + } + + // `match` arm scopes + // ================== + /// Unschedules any drops in the top scope. + /// + /// This is only needed for `match` arm scopes, because they have one + /// entrance per pattern, but only one exit. + crate fn clear_top_scope(&mut self, region_scope: region::Scope) { + let top_scope = self.scopes.scopes.last_mut().unwrap(); + + assert_eq!(top_scope.region_scope, region_scope); + + top_scope.drops.clear(); + top_scope.invalidate_cache(false, self.is_generator, true); + } + + /// Branch based on a boolean condition. + /// + /// This is a special case because the temporary for the condition needs to + /// be dropped on both the true and the false arm. + pub fn test_bool( + &mut self, + mut block: BasicBlock, + condition: Expr<'tcx>, + source_info: SourceInfo, + ) -> (BasicBlock, BasicBlock) { + let cond = unpack!(block = self.as_local_operand(block, condition)); + let true_block = self.cfg.start_new_block(); + let false_block = self.cfg.start_new_block(); + let term = TerminatorKind::if_( + self.hir.tcx(), + cond.clone(), + true_block, + false_block, + ); + self.cfg.terminate(block, source_info, term); + + match cond { + // Don't try to drop a constant + Operand::Constant(_) => (), + // If constants and statics, we don't generate StorageLive for this + // temporary, so don't try to generate StorageDead for it either. + _ if self.local_scope().is_none() => (), + Operand::Copy(Place { + base: PlaceBase::Local(cond_temp), + projection: box [], + }) + | Operand::Move(Place { + base: PlaceBase::Local(cond_temp), + projection: box [], + }) => { + // Manually drop the condition on both branches. + let top_scope = self.scopes.scopes.last_mut().unwrap(); + let top_drop_data = top_scope.drops.pop().unwrap(); + + match top_drop_data.kind { + DropKind::Value { .. } => { + bug!("Drop scheduled on top of condition variable") + } + DropKind::Storage => { + let source_info = top_scope.source_info(top_drop_data.span); + let local = top_drop_data.local; + assert_eq!(local, cond_temp, "Drop scheduled on top of condition"); + self.cfg.push( + true_block, + Statement { + source_info, + kind: StatementKind::StorageDead(local) + }, + ); + self.cfg.push( + false_block, + Statement { + source_info, + kind: StatementKind::StorageDead(local) + }, + ); + } + } + + top_scope.invalidate_cache(true, self.is_generator, true); + } + _ => bug!("Expected as_local_operand to produce a temporary"), + } + + (true_block, false_block) + } +} From 4667439326e989586240a568c39212353e915947 Mon Sep 17 00:00:00 2001 From: Matthew Jasper Date: Sat, 17 Aug 2019 10:46:11 +0100 Subject: [PATCH 3/4] Get the type of a local from `local_decls` in `schedule_drop` Passing around a separate type is unnecessary and error-prone. --- src/librustc_mir/build/expr/as_rvalue.rs | 2 -- src/librustc_mir/build/expr/as_temp.rs | 2 -- src/librustc_mir/build/matches/mod.rs | 5 +---- src/librustc_mir/build/mod.rs | 4 ++-- src/librustc_mir/build/scope.rs | 6 ++---- src/librustc_mir/build/scope/stack.rs | 4 +--- 6 files changed, 6 insertions(+), 17 deletions(-) diff --git a/src/librustc_mir/build/expr/as_rvalue.rs b/src/librustc_mir/build/expr/as_rvalue.rs index 7dfe98cbebfc2..7e8c83fe9d3cb 100644 --- a/src/librustc_mir/build/expr/as_rvalue.rs +++ b/src/librustc_mir/build/expr/as_rvalue.rs @@ -128,7 +128,6 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { expr_span, scope, result, - expr.ty, ); } @@ -569,7 +568,6 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { upvar_span, temp_lifetime, temp, - upvar_ty, ); } diff --git a/src/librustc_mir/build/expr/as_temp.rs b/src/librustc_mir/build/expr/as_temp.rs index dbcc330eca382..18332ed68f8bd 100644 --- a/src/librustc_mir/build/expr/as_temp.rs +++ b/src/librustc_mir/build/expr/as_temp.rs @@ -103,7 +103,6 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { expr_span, temp_lifetime, temp, - expr_ty, DropKind::Storage, ); } @@ -117,7 +116,6 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { expr_span, temp_lifetime, temp, - expr_ty, DropKind::Value, ); } diff --git a/src/librustc_mir/build/matches/mod.rs b/src/librustc_mir/build/matches/mod.rs index 7a2cbb640515d..96b020c227b8f 100644 --- a/src/librustc_mir/build/matches/mod.rs +++ b/src/librustc_mir/build/matches/mod.rs @@ -535,21 +535,18 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { kind: StatementKind::StorageLive(local_id), }, ); - let var_ty = self.local_decls[local_id].ty; let region_scope = self.hir.region_scope_tree.var_scope(var.local_id); - self.schedule_drop(span, region_scope, local_id, var_ty, DropKind::Storage); + self.schedule_drop(span, region_scope, local_id, DropKind::Storage); Place::from(local_id) } pub fn schedule_drop_for_binding(&mut self, var: HirId, span: Span, for_guard: ForGuard) { let local_id = self.var_local_id(var, for_guard); - let var_ty = self.local_decls[local_id].ty; let region_scope = self.hir.region_scope_tree.var_scope(var.local_id); self.schedule_drop( span, region_scope, local_id, - var_ty, DropKind::Value, ); } diff --git a/src/librustc_mir/build/mod.rs b/src/librustc_mir/build/mod.rs index 647d7515fe98d..3f4aacce9b6e0 100644 --- a/src/librustc_mir/build/mod.rs +++ b/src/librustc_mir/build/mod.rs @@ -812,12 +812,12 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { // Function arguments always get the first Local indices after the return place let local = Local::new(index + 1); let place = Place::from(local); - let &ArgInfo(ty, opt_ty_info, arg_opt, ref self_binding) = arg_info; + let &ArgInfo(_, opt_ty_info, arg_opt, ref self_binding) = arg_info; // Make sure we drop (parts of) the argument even when not matched on. self.schedule_drop( arg_opt.as_ref().map_or(ast_body.span, |arg| arg.pat.span), - argument_scope, local, ty, DropKind::Value, + argument_scope, local, DropKind::Value, ); if let Some(arg) = arg_opt { diff --git a/src/librustc_mir/build/scope.rs b/src/librustc_mir/build/scope.rs index dbdd8cf5e6c49..6558b28d71050 100644 --- a/src/librustc_mir/build/scope.rs +++ b/src/librustc_mir/build/scope.rs @@ -85,7 +85,6 @@ should go to. use crate::build::{BlockAnd, BlockAndExtension, BlockFrame, Builder, CFG}; use crate::hair::{ExprRef, LintLevel}; use rustc::middle::region; -use rustc::ty::Ty; use rustc::hir; use rustc::mir::*; use syntax_pos::{DUMMY_SP, Span}; @@ -483,10 +482,9 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { span: Span, region_scope: region::Scope, local: Local, - place_ty: Ty<'tcx>, ) { - self.schedule_drop(span, region_scope, local, place_ty, DropKind::Storage); - self.schedule_drop(span, region_scope, local, place_ty, DropKind::Value); + self.schedule_drop(span, region_scope, local, DropKind::Storage); + self.schedule_drop(span, region_scope, local, DropKind::Value); } // Other diff --git a/src/librustc_mir/build/scope/stack.rs b/src/librustc_mir/build/scope/stack.rs index 29bb2b9da9a92..3dc7785fa02ea 100644 --- a/src/librustc_mir/build/scope/stack.rs +++ b/src/librustc_mir/build/scope/stack.rs @@ -4,7 +4,6 @@ use crate::hair::Expr; use rustc::middle::region; use rustc::mir::{BasicBlock, Local, Operand, Place, PlaceBase, SourceScope}; use rustc::mir::{SourceInfo, Statement, StatementKind, START_BLOCK, TerminatorKind}; -use rustc::ty::Ty; use syntax_pos::Span; use rustc_data_structures::fx::FxHashMap; use std::mem; @@ -301,11 +300,10 @@ impl<'tcx> Builder<'_, 'tcx> { span: Span, region_scope: region::Scope, local: Local, - place_ty: Ty<'tcx>, drop_kind: DropKind, ) { match drop_kind { - DropKind::Value => if !self.hir.needs_drop(place_ty) { return }, + DropKind::Value => if !self.hir.needs_drop(self.local_decls[local].ty) { return }, DropKind::Storage => { if local.index() <= self.arg_count { span_bug!( From 899d10e86b8844440c690c25a8b571d09ee0e0b8 Mon Sep 17 00:00:00 2001 From: Matthew Jasper Date: Fri, 6 Sep 2019 15:48:28 +0100 Subject: [PATCH 4/4] Merge `last_cached_unwind` and `diverge_blocks` --- src/librustc_mir/build/scope.rs | 50 +++++++++++++-------------- src/librustc_mir/build/scope/stack.rs | 45 +++++++++++++++--------- 2 files changed, 53 insertions(+), 42 deletions(-) diff --git a/src/librustc_mir/build/scope.rs b/src/librustc_mir/build/scope.rs index 6558b28d71050..f49d80532b23f 100644 --- a/src/librustc_mir/build/scope.rs +++ b/src/librustc_mir/build/scope.rs @@ -107,15 +107,14 @@ struct BreakableScope<'tcx> { region_scope: region::Scope, /// Where the body of the loop begins. `None` if block continue_block: Option, - /// Block to branch into when the loop or block terminates (either by being `break`-en out - /// from, or by having its condition to become false) + /// Block to branch into when the loop or block terminates (either by being + /// `break`-en out from, or by having its condition to become false) break_block: BasicBlock, - /// The destination of the loop/block expression itself (i.e., where to put the result of a - /// `break` expression) + /// The destination of the loop/block expression itself (i.e., where to put + /// the result of a `break` expression) break_destination: Place<'tcx>, } - #[derive(Debug)] struct DropData { /// span where drop obligation was incurred (typically where place was declared) @@ -131,7 +130,6 @@ struct DropData { cached_block: CachedBlock, } - /// The target of an expression that breaks out of a scope #[derive(Clone, Copy, Debug)] pub enum BreakableTarget { @@ -526,25 +524,27 @@ impl<'a, 'tcx> Builder<'a, 'tcx> { // to left reading the cached results but never created anything. // Find the last cached block - debug!("diverge_cleanup_gen(self.scopes = {:?})", self.scopes); - let cached_cleanup = self.scopes.last_cached_unwind(generator_drop); - let (mut target, first_uncached) = cached_cleanup - .unwrap_or_else(|| (self.resume_block(), 0)); - - let scopes = self.scopes.diverge_blocks(first_uncached, generator_drop); - for (source_scope, drops, cached_unwind) in scopes { - target = build_diverge_scope( - &mut self.cfg, - source_scope, - drops, - target, - generator_drop, - self.is_generator, - ); - *cached_unwind = Some(target); - } - - target + debug!("diverge_cleanup_gen(self.scopes = {:#?})", self.scopes); + let resume_block = self.resume_block(); + let cfg = &mut self.cfg; + let is_generator = self.is_generator; + + self.scopes.for_each_diverge_block( + generator_drop, + resume_block, + |source_scope, drops, cached_unwind, mut target| { + target = build_diverge_scope( + cfg, + source_scope, + drops, + target, + generator_drop, + is_generator, + ); + *cached_unwind = Some(target); + target + } + ) } /// Utility function for *non*-scope code to build their own drops diff --git a/src/librustc_mir/build/scope/stack.rs b/src/librustc_mir/build/scope/stack.rs index 3dc7785fa02ea..c7004a3221f59 100644 --- a/src/librustc_mir/build/scope/stack.rs +++ b/src/librustc_mir/build/scope/stack.rs @@ -140,7 +140,11 @@ impl<'tcx> Scopes<'tcx> { self.scopes.len() } - pub(super) fn push_scope(&mut self, region_scope: (region::Scope, SourceInfo), vis_scope: SourceScope) { + pub(super) fn push_scope( + &mut self, + region_scope: (region::Scope, SourceInfo), + vis_scope: SourceScope, + ) { debug!("push_scope({:?})", region_scope); self.scopes.push(Scope { source_scope: vis_scope, @@ -217,24 +221,31 @@ impl<'tcx> Scopes<'tcx> { scope_count } - pub(super) fn last_cached_unwind( - &mut self, - generator_drop: bool, - ) -> Option<(BasicBlock, usize)> { - self.scopes.iter_mut().enumerate().rev().find_map(move |(idx, scope)| { - let cached_block = scope.cached_unwind.get(generator_drop)?; - Some((cached_block, idx + 1)) - }) - } - - pub(super) fn diverge_blocks( + /// Call the given action for each scope that doesn't already have a cached + /// unwind block. Scopes are iterated going up the scope stack. + pub(super) fn for_each_diverge_block( &mut self, - start_at: usize, generator_drop: bool, - ) -> impl Iterator)> { - self.scopes[start_at..].iter_mut().map(move |scope| { - (scope.source_scope, &mut *scope.drops, scope.cached_unwind.ref_mut(generator_drop)) - }) + resume_block: BasicBlock, + mut action: F, + ) -> BasicBlock + where + F: FnMut(SourceScope, &mut [DropData], &mut Option, BasicBlock) -> BasicBlock + { + let (mut target, start_at) = self.scopes.iter().enumerate().rev() + .find_map(move |(idx, scope)| { + let cached_block = scope.cached_unwind.get(generator_drop)?; + Some((cached_block, idx + 1)) + }).unwrap_or((resume_block, 0)); + self.scopes[start_at..].iter_mut().for_each(|scope| { + target = action( + scope.source_scope, + &mut *scope.drops, + scope.cached_unwind.ref_mut(generator_drop), + target, + ) + }); + target } /// Iterate over the [ScopeInfo] for exiting to the given target.