diff --git a/src/librustc/mir/repr.rs b/src/librustc/mir/repr.rs
index f511d820fac58..0fe4d870887ef 100644
--- a/src/librustc/mir/repr.rs
+++ b/src/librustc/mir/repr.rs
@@ -21,6 +21,8 @@ use ty::{self, AdtDef, ClosureSubsts, Region, Ty};
 use util::ppaux;
 use rustc_back::slice;
 use hir::InlineAsm;
+use hir::BinOp_ as HirBinOp;
+use hir::UnOp as HirUnOp;
 use std::ascii;
 use std::borrow::{Cow};
 use std::cell::Ref;
@@ -36,7 +38,7 @@ use super::cache::Cache;
 macro_rules! newtype_index {
     ($name:ident, $debug_name:expr) => (
         #[derive(Copy, Clone, PartialEq, Eq, Hash, PartialOrd, Ord,
-         RustcEncodable, RustcDecodable)]
+                 RustcEncodable, RustcDecodable)]
         pub struct $name(u32);
 
         impl Idx for $name {
@@ -393,7 +395,7 @@ pub enum TerminatorKind<'tcx> {
     /// to one of the targets, and otherwise fallback to `otherwise`
     SwitchInt {
         /// discriminant value being tested
-        discr: Lvalue<'tcx>,
+        discr: Operand<'tcx>,
 
         /// type of value being tested
         switch_ty: Ty<'tcx>,
@@ -725,7 +727,7 @@ newtype_index!(Local, "local");
 
 /// A path to a value; something that can be evaluated without
 /// changing or disturbing program state.
-#[derive(Clone, PartialEq, RustcEncodable, RustcDecodable)]
+#[derive(Clone, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)]
 pub enum Lvalue<'tcx> {
     /// local variable declared by the user
     Var(Var),
@@ -747,6 +749,16 @@ pub enum Lvalue<'tcx> {
     Projection(Box<LvalueProjection<'tcx>>),
 }
 
+impl<'tcx> Lvalue<'tcx> {
+    pub fn base(&self) -> &Lvalue<'tcx> {
+        let mut lval = self;
+        while let Lvalue::Projection(ref proj) = *lval {
+            lval = &proj.base;
+        }
+        lval
+    }
+}
+
 /// The `Projection` data structure defines things of the form `B.x`
 /// or `*B` or `B[index]`. Note that it is parameterized because it is
 /// shared between `Constant` and `Lvalue`. See the aliases
@@ -883,7 +895,7 @@ pub struct VisibilityScopeData {
 /// These are values that can appear inside an rvalue (or an index
 /// lvalue). They are intentionally limited to prevent rvalues from
 /// being nested in one another.
-#[derive(Clone, PartialEq, RustcEncodable, RustcDecodable)]
+#[derive(Clone, PartialEq, Eq, Hash, RustcEncodable, RustcDecodable)]
 pub enum Operand<'tcx> {
     Consume(Lvalue<'tcx>),
     Constant(Constant<'tcx>),
@@ -1010,6 +1022,27 @@ impl BinOp {
             _ => false
         }
     }
+
+    pub fn to_hir_binop(self) -> HirBinOp {
+        match self {
+            BinOp::Add => HirBinOp::BiAdd,
+            BinOp::Sub => HirBinOp::BiSub,
+            BinOp::Mul => HirBinOp::BiMul,
+            BinOp::Div => HirBinOp::BiDiv,
+            BinOp::Rem => HirBinOp::BiRem,
+            BinOp::BitXor => HirBinOp::BiBitXor,
+            BinOp::BitAnd => HirBinOp::BiBitAnd,
+            BinOp::BitOr => HirBinOp::BiBitOr,
+            BinOp::Shl => HirBinOp::BiShl,
+            BinOp::Shr => HirBinOp::BiShr,
+            BinOp::Eq => HirBinOp::BiEq,
+            BinOp::Ne => HirBinOp::BiNe,
+            BinOp::Lt => HirBinOp::BiLt,
+            BinOp::Gt => HirBinOp::BiGt,
+            BinOp::Le => HirBinOp::BiLe,
+            BinOp::Ge => HirBinOp::BiGe
+        }
+    }
 }
 
 #[derive(Copy, Clone, Debug, PartialEq, Eq, RustcEncodable, RustcDecodable)]
@@ -1020,6 +1053,32 @@ pub enum UnOp {
     Neg,
 }
 
+impl UnOp {
+    pub fn to_hir_unop(self) -> HirUnOp {
+        match self {
+            UnOp::Not => HirUnOp::UnNot,
+            UnOp::Neg => HirUnOp::UnNeg,
+        }
+    }
+}
+
+impl<'tcx> Rvalue<'tcx> {
+    pub fn is_pure(&self) -> bool {
+        use self::Rvalue::*;
+        match *self {
+            // Arbitrary side effects
+            InlineAsm { .. } |
+            // Side effect: allocation
+            Box(_) |
+            // Side effect: assertion
+            CheckedBinaryOp(..) => false,
+            // No side effects
+            Use(_) | Repeat(..) | Len(_) | Cast(..) | BinaryOp(..) | UnaryOp(..) |
+            Ref(..) | Aggregate(..) => true
+        }
+    }
+}
+
 impl<'tcx> Debug for Rvalue<'tcx> {
     fn fmt(&self, fmt: &mut Formatter) -> fmt::Result {
         use self::Rvalue::*;
diff --git a/src/librustc/mir/tcx.rs b/src/librustc/mir/tcx.rs
index cf91229f1c713..0670603ae9ed4 100644
--- a/src/librustc/mir/tcx.rs
+++ b/src/librustc/mir/tcx.rs
@@ -247,26 +247,3 @@ impl BorrowKind {
         }
     }
 }
-
-impl BinOp {
-    pub fn to_hir_binop(self) -> hir::BinOp_ {
-        match self {
-            BinOp::Add => hir::BinOp_::BiAdd,
-            BinOp::Sub => hir::BinOp_::BiSub,
-            BinOp::Mul => hir::BinOp_::BiMul,
-            BinOp::Div => hir::BinOp_::BiDiv,
-            BinOp::Rem => hir::BinOp_::BiRem,
-            BinOp::BitXor => hir::BinOp_::BiBitXor,
-            BinOp::BitAnd => hir::BinOp_::BiBitAnd,
-            BinOp::BitOr => hir::BinOp_::BiBitOr,
-            BinOp::Shl => hir::BinOp_::BiShl,
-            BinOp::Shr => hir::BinOp_::BiShr,
-            BinOp::Eq => hir::BinOp_::BiEq,
-            BinOp::Ne => hir::BinOp_::BiNe,
-            BinOp::Lt => hir::BinOp_::BiLt,
-            BinOp::Gt => hir::BinOp_::BiGt,
-            BinOp::Le => hir::BinOp_::BiLe,
-            BinOp::Ge => hir::BinOp_::BiGe
-        }
-    }
-}
diff --git a/src/librustc/mir/transform.rs b/src/librustc/mir/transform.rs
index 57601e6750432..1a65619542831 100644
--- a/src/librustc/mir/transform.rs
+++ b/src/librustc/mir/transform.rs
@@ -15,7 +15,10 @@ use mir::mir_map::MirMap;
 use mir::repr::{Mir, Promoted};
 use ty::TyCtxt;
 use syntax::ast::NodeId;
+use util::common::time;
+use session::Session;
 
+use std::borrow::Cow;
 use std::fmt;
 
 /// Where a specific Mir comes from.
@@ -72,14 +75,19 @@ impl<'a, 'tcx> MirSource {
 /// Various information about pass.
 pub trait Pass {
     // fn should_run(Session) to check if pass should run?
-    fn name(&self) -> &str {
+    fn name<'a>(&self) -> Cow<'static, str> {
         let name = unsafe { ::std::intrinsics::type_name::<Self>() };
         if let Some(tail) = name.rfind(":") {
-            &name[tail+1..]
+            Cow::from(&name[tail+1..])
         } else {
-            name
+            Cow::from(name)
         }
     }
+    fn should_run(&self, session: &Session) -> bool {
+        // Always run passes by default unless MIR passes have been explicitly turned off with -Z
+        // mir-opt-level=0
+        session.opts.mir_opt_level >= 1
+    }
     fn disambiguator<'a>(&'a self) -> Option<Box<fmt::Display+'a>> { None }
 }
 
@@ -162,11 +170,12 @@ impl<'a, 'tcx> Passes {
     }
 
     pub fn run_passes(&mut self, tcx: TyCtxt<'a, 'tcx, 'tcx>, map: &mut MirMap<'tcx>) {
-        for pass in &mut self.plugin_passes {
-            pass.run_pass(tcx, map, &mut self.pass_hooks);
-        }
-        for pass in &mut self.passes {
-            pass.run_pass(tcx, map, &mut self.pass_hooks);
+        let Passes { ref mut passes, ref mut plugin_passes, ref mut pass_hooks } = *self;
+        for pass in plugin_passes.iter_mut().chain(passes.iter_mut()) {
+            let name = pass.name();
+            if pass.should_run(&tcx.sess) {
+                time(tcx.sess.time_passes(), &*name, || pass.run_pass(tcx, map, pass_hooks));
+            }
         }
     }
 
diff --git a/src/librustc/mir/visit.rs b/src/librustc/mir/visit.rs
index ead8de86dbae4..5aa5526d4fb52 100644
--- a/src/librustc/mir/visit.rs
+++ b/src/librustc/mir/visit.rs
@@ -7,6 +7,69 @@
 // <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
+//! # The MIR Visitor
+//!
+//! ## Overview
+//!
+//! There are two visitors, one for immutable and one for mutable references,
+//! but both are generated by the following macro. The code is written according
+//! to the following conventions:
+//!
+//! - introduce a `visit_foo` and a `super_foo` method for every MIR type
+//! - `visit_foo`, by default, calls `super_foo`
+//! - `super_foo`, by default, destructures the `foo` and calls `visit_foo`
+//!
+//! This allows you as a user to override `visit_foo` for types are
+//! interested in, and invoke (within that method) call
+//! `self.super_foo` to get the default behavior. Just as in an OO
+//! language, you should never call `super` methods ordinarily except
+//! in that circumstance.
+//!
+//! For the most part, we do not destructure things external to the
+//! MIR, e.g. types, spans, etc, but simply visit them and stop. This
+//! avoids duplication with other visitors like `TypeFoldable`. But
+//! there is one exception: we do destructure the `FnOutput` to reach
+//! the type within. Just because.
+//!
+//! ## Updating
+//!
+//! The code is written in a very deliberate style intended to minimize
+//! the chance of things being overlooked. You'll notice that we always
+//! use pattern matching to reference fields and we ensure that all
+//! matches are exhaustive.
+//!
+//! For example, the `super_basic_block_data` method begins like this:
+//!
+//! ```rust
+//! fn super_basic_block_data(&mut self,
+//!                           block: BasicBlock,
+//!                           data: & $($mutability)* BasicBlockData<'tcx>) {
+//!     let BasicBlockData {
+//!         ref $($mutability)* statements,
+//!         ref $($mutability)* terminator,
+//!         is_cleanup: _
+//!     } = *data;
+//!
+//!     for statement in statements {
+//!         self.visit_statement(block, statement);
+//!     }
+//!
+//!     ...
+//! }
+//! ```
+//!
+//! Here we used `let BasicBlockData { <fields> } = *data` deliberately,
+//! rather than writing `data.statements` in the body. This is because if one
+//! adds a new field to `BasicBlockData`, one will be forced to revise this code,
+//! and hence one will (hopefully) invoke the correct visit methods (if any).
+//!
+//! For this to work, ALL MATCHES MUST BE EXHAUSTIVE IN FIELDS AND VARIANTS.
+//! That means you never write `..` to skip over fields, nor do you write `_`
+//! to skip over variants in a `match`.
+//!
+//! The only place that `_` is acceptable is to match a field (or
+//! variant argument) that does not require visiting, as in
+//! `is_cleanup` above.
 
 use middle::const_val::ConstVal;
 use hir::def_id::DefId;
@@ -18,68 +81,6 @@ use rustc_data_structures::tuple_slice::TupleSlice;
 use rustc_data_structures::indexed_vec::Idx;
 use syntax_pos::Span;
 
-// # The MIR Visitor
-//
-// ## Overview
-//
-// There are two visitors, one for immutable and one for mutable references,
-// but both are generated by the following macro. The code is written according
-// to the following conventions:
-//
-// - introduce a `visit_foo` and a `super_foo` method for every MIR type
-// - `visit_foo`, by default, calls `super_foo`
-// - `super_foo`, by default, destructures the `foo` and calls `visit_foo`
-//
-// This allows you as a user to override `visit_foo` for types are
-// interested in, and invoke (within that method) call
-// `self.super_foo` to get the default behavior. Just as in an OO
-// language, you should never call `super` methods ordinarily except
-// in that circumstance.
-//
-// For the most part, we do not destructure things external to the
-// MIR, e.g. types, spans, etc, but simply visit them and stop. This
-// avoids duplication with other visitors like `TypeFoldable`.
-//
-// ## Updating
-//
-// The code is written in a very deliberate style intended to minimize
-// the chance of things being overlooked. You'll notice that we always
-// use pattern matching to reference fields and we ensure that all
-// matches are exhaustive.
-//
-// For example, the `super_basic_block_data` method begins like this:
-//
-// ```rust
-// fn super_basic_block_data(&mut self,
-//                           block: BasicBlock,
-//                           data: & $($mutability)* BasicBlockData<'tcx>) {
-//     let BasicBlockData {
-//         ref $($mutability)* statements,
-//         ref $($mutability)* terminator,
-//         is_cleanup: _
-//     } = *data;
-//
-//     for statement in statements {
-//         self.visit_statement(block, statement);
-//     }
-//
-//     ...
-// }
-// ```
-//
-// Here we used `let BasicBlockData { <fields> } = *data` deliberately,
-// rather than writing `data.statements` in the body. This is because if one
-// adds a new field to `BasicBlockData`, one will be forced to revise this code,
-// and hence one will (hopefully) invoke the correct visit methods (if any).
-//
-// For this to work, ALL MATCHES MUST BE EXHAUSTIVE IN FIELDS AND VARIANTS.
-// That means you never write `..` to skip over fields, nor do you write `_`
-// to skip over variants in a `match`.
-//
-// The only place that `_` is acceptable is to match a field (or
-// variant argument) that does not require visiting, as in
-// `is_cleanup` above.
-
 macro_rules! make_mir_visitor {
     ($visitor_trait_name:ident, $($mutability:ident)*) => {
         pub trait $visitor_trait_name<'tcx> {
@@ -377,7 +378,7 @@ macro_rules! make_mir_visitor {
                                                 ref $($mutability)* switch_ty,
                                                 ref $($mutability)* values,
                                                 ref targets } => {
-                        self.visit_lvalue(discr, LvalueContext::Inspect);
+                        self.visit_operand(discr);
                         self.visit_ty(switch_ty);
                         for value in values {
                             self.visit_const_val(value);
@@ -419,7 +420,7 @@ macro_rules! make_mir_visitor {
                             self.visit_operand(arg);
                         }
                         if let Some((ref $($mutability)* destination, target)) = *destination {
-                            self.visit_lvalue(destination, LvalueContext::Call);
+                            self.visit_lvalue(destination, LvalueContext::CallStore);
                             self.visit_branch(block, target);
                         }
                         cleanup.map(|t| self.visit_branch(block, t));
@@ -529,7 +530,7 @@ macro_rules! make_mir_visitor {
                                         ref $($mutability)* inputs,
                                         asm: _ } => {
                         for output in & $($mutability)* outputs[..] {
-                            self.visit_lvalue(output, LvalueContext::Store);
+                            self.visit_lvalue(output, LvalueContext::AsmOutput);
                         }
                         for input in & $($mutability)* inputs[..] {
                             self.visit_operand(input);
@@ -723,33 +724,38 @@ macro_rules! make_mir_visitor {
 make_mir_visitor!(Visitor,);
 make_mir_visitor!(MutVisitor,mut);
 
-#[derive(Copy, Clone, Debug)]
+#[derive(Copy, Clone, Debug, PartialEq, Eq)]
 pub enum LvalueContext {
-    // Appears as LHS of an assignment
+    /// Appears as LHS of an assignment
     Store,
 
-    // Dest of a call
-    Call,
+    /// Dest of a call
+    CallStore,
 
-    // Being dropped
+    /// Being dropped
     Drop,
 
-    // Being inspected in some way, like loading a len
+    /// Being inspected in some way, like loading a len
     Inspect,
 
-    // Being borrowed
+    /// Being borrowed
     Borrow { region: Region, kind: BorrowKind },
 
-    // Being sliced -- this should be same as being borrowed, probably
+    /// Being sliced -- this should be same as being borrowed, probably
     Slice { from_start: usize, from_end: usize },
 
-    // Used as base for another lvalue, e.g. `x` in `x.y`
+    /// Used as base for another lvalue, e.g. `x` in `x.y`
     Projection,
 
-    // Consumed as part of an operand
+    /// Consumed as part of an operand
     Consume,
 
-    // Starting and ending a storage live range
+    /// Starting and ending a storage live range
     StorageLive,
     StorageDead,
+
+    /// “output” from inline asm.
+    ///
+    /// Cannot be interpreted as a store, because assembly may choose to ignore it.
+    AsmOutput
 }
diff --git a/src/librustc/session/config.rs b/src/librustc/session/config.rs
index e988ddcd97b15..aa1e506ccedd4 100644
--- a/src/librustc/session/config.rs
+++ b/src/librustc/session/config.rs
@@ -67,6 +67,12 @@ pub enum DebugInfoLevel {
     FullDebugInfo,
 }
 
+impl DebugInfoLevel {
+    pub fn is_enabled(&self) -> bool {
+        if let NoDebugInfo = *self { false } else { true }
+    }
+}
+
 #[derive(Clone, Copy, PartialEq, Eq, Hash, Debug, PartialOrd, Ord,
          RustcEncodable, RustcDecodable)]
 pub enum OutputType {
@@ -1334,8 +1340,6 @@ pub fn build_session_options_and_crate_config(matches: &getopts::Matches)
         debugging_opts.orbit = true;
     }
 
-    let mir_opt_level = debugging_opts.mir_opt_level.unwrap_or(1);
-
     let mut output_types = BTreeMap::new();
     if !debugging_opts.parse_only {
         for list in matches.opt_strs("emit") {
@@ -1438,6 +1442,17 @@ pub fn build_session_options_and_crate_config(matches: &getopts::Matches)
             }
         }
     };
+    // MIR opt level defaults to opt-level + 1 (mir-opt-level=0 is considered to disable MIR
+    // passes)
+    let mir_opt_level = debugging_opts.mir_opt_level.unwrap_or(match opt_level {
+        OptLevel::No => 1,
+        OptLevel::Less => 2,
+        OptLevel::Default => 3,
+        OptLevel::Aggressive => 4,
+        OptLevel::Size => 1,
+        OptLevel::SizeMin => 1,
+    });
+
     let debug_assertions = cg.debug_assertions.unwrap_or(opt_level == OptLevel::No);
     let debuginfo = if matches.opt_present("g") {
         if cg.debuginfo.is_some() {
diff --git a/src/librustc_borrowck/borrowck/mir/gather_moves.rs b/src/librustc_borrowck/borrowck/mir/gather_moves.rs
index 8ae40e71bee58..15ec628bb39fa 100644
--- a/src/librustc_borrowck/borrowck/mir/gather_moves.rs
+++ b/src/librustc_borrowck/borrowck/mir/gather_moves.rs
@@ -616,10 +616,8 @@ fn gather_moves<'a, 'tcx>(mir: &Mir<'tcx>, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> MoveD
                 }
                 StatementKind::StorageLive(_) |
                 StatementKind::StorageDead(_) => {}
-                StatementKind::SetDiscriminant{ .. } => {
-                    span_bug!(stmt.source_info.span,
-                              "SetDiscriminant should not exist during borrowck");
-                }
+                StatementKind::SetDiscriminant{ .. } =>
+                    bug!("SetDiscriminant and drop elaboration are incompatible")
             }
         }
 
@@ -658,13 +656,12 @@ fn gather_moves<'a, 'tcx>(mir: &Mir<'tcx>, tcx: TyCtxt<'a, 'tcx, 'tcx>) -> MoveD
                 }
             }
 
-            TerminatorKind::SwitchInt { switch_ty: _, values: _, targets: _, ref discr } |
-            TerminatorKind::Switch { adt_def: _, targets: _, ref discr } => {
+            TerminatorKind::SwitchInt { switch_ty: _, values: _, targets: _, discr: _ } |
+            TerminatorKind::Switch { adt_def: _, targets: _, discr: _ } => {
                 // The `discr` is not consumed; that is instead
                 // encoded on specific match arms (and for
                 // SwitchInt`, it is always a copyable integer
                 // type anyway).
-                let _ = discr;
             }
 
             TerminatorKind::Drop { ref location, target: _, unwind: _ } => {
diff --git a/src/librustc_borrowck/borrowck/mir/mod.rs b/src/librustc_borrowck/borrowck/mir/mod.rs
index dbee0ea9b00e9..545caf4f5ebdc 100644
--- a/src/librustc_borrowck/borrowck/mir/mod.rs
+++ b/src/librustc_borrowck/borrowck/mir/mod.rs
@@ -369,9 +369,6 @@ fn drop_flag_effects_for_location<'a, 'tcx, F>(
     let block = &mir[loc.block];
     match block.statements.get(loc.index) {
         Some(stmt) => match stmt.kind {
-            repr::StatementKind::SetDiscriminant{ .. } => {
-                span_bug!(stmt.source_info.span, "SetDiscrimant should not exist during borrowck");
-            }
             repr::StatementKind::Assign(ref lvalue, _) => {
                 debug!("drop_flag_effects: assignment {:?}", stmt);
                  on_all_children_bits(tcx, mir, move_data,
@@ -380,6 +377,8 @@ fn drop_flag_effects_for_location<'a, 'tcx, F>(
             }
             repr::StatementKind::StorageLive(_) |
             repr::StatementKind::StorageDead(_) => {}
+            repr::StatementKind::SetDiscriminant{ .. } =>
+                bug!("SetDiscriminant and drop elaboration are incompatible")
         },
         None => {
             debug!("drop_flag_effects: replace {:?}", block.terminator());
diff --git a/src/librustc_const_eval/eval.rs b/src/librustc_const_eval/eval.rs
index d71add3258fbd..1f708ecf834fd 100644
--- a/src/librustc_const_eval/eval.rs
+++ b/src/librustc_const_eval/eval.rs
@@ -48,10 +48,10 @@ use rustc_const_math::*;
 use rustc_errors::DiagnosticBuilder;
 
 macro_rules! math {
-    ($e:expr, $op:expr) => {
+    ($span:expr, $op:expr) => {
         match $op {
             Ok(val) => val,
-            Err(e) => signal!($e, Math(e)),
+            Err(e) => signal!($span, Math(e)),
         }
     }
 }
@@ -582,11 +582,84 @@ impl<'tcx> EvalHint<'tcx> {
 }
 
 macro_rules! signal {
-    ($e:expr, $exn:expr) => {
-        return Err(ConstEvalErr { span: $e.span, kind: $exn })
+    ($span:expr, $exn:expr) => {
+        return Err(ConstEvalErr { span: $span, kind: $exn })
     }
 }
 
+pub fn eval_const_binop(op: hir::BinOp_, l: &ConstVal, r: &ConstVal, span: Span) -> EvalResult {
+    Ok(match (l, r) {
+        (&Float(a), &Float(b)) => {
+            use std::cmp::Ordering::*;
+            match op {
+                hir::BiAdd => Float(math!(span, a + b)),
+                hir::BiSub => Float(math!(span, a - b)),
+                hir::BiMul => Float(math!(span, a * b)),
+                hir::BiDiv => Float(math!(span, a / b)),
+                hir::BiRem => Float(math!(span, a % b)),
+                hir::BiEq => Bool(math!(span, a.try_cmp(b)) == Equal),
+                hir::BiLt => Bool(math!(span, a.try_cmp(b)) == Less),
+                hir::BiLe => Bool(math!(span, a.try_cmp(b)) != Greater),
+                hir::BiNe => Bool(math!(span, a.try_cmp(b)) != Equal),
+                hir::BiGe => Bool(math!(span, a.try_cmp(b)) != Less),
+                hir::BiGt => Bool(math!(span, a.try_cmp(b)) == Greater),
+                _ => signal!(span, InvalidOpForFloats(op)),
+            }
+        }
+        (&Integral(a), &Integral(b)) => {
+            use std::cmp::Ordering::*;
+            match op {
+                hir::BiAdd => Integral(math!(span, a + b)),
+                hir::BiSub => Integral(math!(span, a - b)),
+                hir::BiMul => Integral(math!(span, a * b)),
+                hir::BiDiv => Integral(math!(span, a / b)),
+                hir::BiRem => Integral(math!(span, a % b)),
+                hir::BiBitAnd => Integral(math!(span, a & b)),
+                hir::BiBitOr => Integral(math!(span, a | b)),
+                hir::BiBitXor => Integral(math!(span, a ^ b)),
+                hir::BiShl => Integral(math!(span, a << b)),
+                hir::BiShr => Integral(math!(span, a >> b)),
+                hir::BiEq => Bool(math!(span, a.try_cmp(b)) == Equal),
+                hir::BiLt => Bool(math!(span, a.try_cmp(b)) == Less),
+                hir::BiLe => Bool(math!(span, a.try_cmp(b)) != Greater),
+                hir::BiNe => Bool(math!(span, a.try_cmp(b)) != Equal),
+                hir::BiGe => Bool(math!(span, a.try_cmp(b)) != Less),
+                hir::BiGt => Bool(math!(span, a.try_cmp(b)) == Greater),
+                _ => signal!(span, InvalidOpForInts(op)),
+            }
+        }
+        (&Bool(a), &Bool(b)) => {
+            Bool(match op {
+                hir::BiAnd => a && b,
+                hir::BiOr => a || b,
+                hir::BiBitXor => a ^ b,
+                hir::BiBitAnd => a & b,
+                hir::BiBitOr => a | b,
+                hir::BiEq => a == b,
+                hir::BiNe => a != b,
+                _ => signal!(span, InvalidOpForBools(op)),
+            })
+        }
+        _ => signal!(span, MiscBinaryOp),
+    })
+}
+
+pub fn eval_const_unop(op: hir::UnOp, const1: &ConstVal, span: Span) -> EvalResult {
+    Ok(match op {
+        hir::UnNeg => match *const1 {
+            Float(f) => Float(-f),
+            Integral(i) => Integral(math!(span, -i)),
+            ref const_val => signal!(span, NegateOn(const_val.clone())),
+        },
+        hir::UnNot => match *const1 {
+            Integral(i) => Integral(math!(span, !i)),
+            Bool(b) => Bool(!b),
+            ref const_val => signal!(span, NotOn(const_val.clone())),
+        },
+        hir::UnDeref => signal!(span, UnimplementedConstVal("deref operation")),
+    })
+}
+
 /// Evaluate a constant expression in a context where the expression isn't
 /// guaranteed to be evaluatable. `ty_hint` is usually ExprTypeChecked,
 /// but a few places need to evaluate constants during type-checking, like
@@ -659,20 +732,16 @@ pub fn eval_const_expr_partial<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
                 _ => {},
             }
         }
-        match eval_const_expr_partial(tcx, &inner, ty_hint, fn_args)? {
-          Float(f) => Float(-f),
-          Integral(i) => Integral(math!(e, -i)),
-          const_val => signal!(e, NegateOn(const_val)),
-        }
+        eval_const_unop(hir::UnNeg,
+                        &eval_const_expr_partial(tcx, &inner, ty_hint, fn_args)?,
+                        e.span)?
       }
       hir::ExprUnary(hir::UnNot, ref inner) => {
-        match eval_const_expr_partial(tcx, &inner, ty_hint, fn_args)? {
-          Integral(i) => Integral(math!(e, !i)),
-          Bool(b) => Bool(!b),
-          const_val => signal!(e, NotOn(const_val)),
-        }
+        eval_const_unop(hir::UnNot,
+                        &eval_const_expr_partial(tcx, &inner, ty_hint, fn_args)?,
+                        e.span)?
       }
-      hir::ExprUnary(hir::UnDeref, _) => signal!(e, UnimplementedConstVal("deref operation")),
+      hir::ExprUnary(hir::UnDeref, _) => signal!(e.span, UnimplementedConstVal("deref operation")),
       hir::ExprBinary(op, ref a, ref b) => {
         let b_ty = match op.node {
             hir::BiShl | hir::BiShr => ty_hint.erase_hint(),
@@ -682,62 +751,10 @@ pub fn eval_const_expr_partial<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
         // gives us a type through a type-suffix, cast or const def type
         // we need to re-eval the other value of the BinOp if it was
         // not inferred
-        match (eval_const_expr_partial(tcx, &a, ty_hint, fn_args)?,
-               eval_const_expr_partial(tcx, &b, b_ty, fn_args)?) {
-          (Float(a), Float(b)) => {
-            use std::cmp::Ordering::*;
-            match op.node {
-              hir::BiAdd => Float(math!(e, a + b)),
-              hir::BiSub => Float(math!(e, a - b)),
-              hir::BiMul => Float(math!(e, a * b)),
-              hir::BiDiv => Float(math!(e, a / b)),
-              hir::BiRem => Float(math!(e, a % b)),
-              hir::BiEq => Bool(math!(e, a.try_cmp(b)) == Equal),
-              hir::BiLt => Bool(math!(e, a.try_cmp(b)) == Less),
-              hir::BiLe => Bool(math!(e, a.try_cmp(b)) != Greater),
-              hir::BiNe => Bool(math!(e, a.try_cmp(b)) != Equal),
-              hir::BiGe => Bool(math!(e, a.try_cmp(b)) != Less),
-              hir::BiGt => Bool(math!(e, a.try_cmp(b)) == Greater),
-              _ => signal!(e, InvalidOpForFloats(op.node)),
-            }
-          }
-          (Integral(a), Integral(b)) => {
-            use std::cmp::Ordering::*;
-            match op.node {
-              hir::BiAdd => Integral(math!(e, a + b)),
-              hir::BiSub => Integral(math!(e, a - b)),
-              hir::BiMul => Integral(math!(e, a * b)),
-              hir::BiDiv => Integral(math!(e, a / b)),
-              hir::BiRem => Integral(math!(e, a % b)),
-              hir::BiBitAnd => Integral(math!(e, a & b)),
-              hir::BiBitOr => Integral(math!(e, a | b)),
-              hir::BiBitXor => Integral(math!(e, a ^ b)),
-              hir::BiShl => Integral(math!(e, a << b)),
-              hir::BiShr => Integral(math!(e, a >> b)),
-              hir::BiEq => Bool(math!(e, a.try_cmp(b)) == Equal),
-              hir::BiLt => Bool(math!(e, a.try_cmp(b)) == Less),
-              hir::BiLe => Bool(math!(e, a.try_cmp(b)) != Greater),
-              hir::BiNe => Bool(math!(e, a.try_cmp(b)) != Equal),
-              hir::BiGe => Bool(math!(e, a.try_cmp(b)) != Less),
-              hir::BiGt => Bool(math!(e, a.try_cmp(b)) == Greater),
-              _ => signal!(e, InvalidOpForInts(op.node)),
-            }
-          }
-          (Bool(a), Bool(b)) => {
-            Bool(match op.node {
-              hir::BiAnd => a && b,
-              hir::BiOr => a || b,
-              hir::BiBitXor => a ^ b,
-              hir::BiBitAnd => a & b,
-              hir::BiBitOr => a | b,
-              hir::BiEq => a == b,
-              hir::BiNe => a != b,
-              _ => signal!(e, InvalidOpForBools(op.node)),
-             })
-          }
-
-          _ => signal!(e, MiscBinaryOp),
-        }
+        eval_const_binop(op.node,
+                         &eval_const_expr_partial(tcx, &a, ty_hint, fn_args)?,
+                         &eval_const_expr_partial(tcx, &b, b_ty, fn_args)?,
+                         e.span)?
       }
       hir::ExprCast(ref base, ref target_ty) => {
         let ety = tcx.ast_ty_to_prim_ty(&target_ty).or(ety)
@@ -782,7 +799,7 @@ pub fn eval_const_expr_partial<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
           // FIXME: There's probably a better way to make sure we don't panic here.
           let resolution = tcx.expect_resolution(e.id);
           if resolution.depth != 0 {
-              signal!(e, UnresolvedPath);
+              signal!(e.span, UnresolvedPath);
           }
           match resolution.base_def {
               Def::Const(def_id) |
@@ -801,11 +818,11 @@ pub fn eval_const_expr_partial<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
                           Ok(val) => val,
                           Err(err) => {
                               debug!("bad reference: {:?}, {:?}", err.description(), err.span);
-                              signal!(e, ErroneousReferencedConstant(box err))
+                              signal!(e.span, ErroneousReferencedConstant(box err))
                           },
                       }
                   } else {
-                      signal!(e, NonConstPath);
+                      signal!(e.span, NonConstPath);
                   }
               },
               Def::Variant(enum_def, variant_def) => {
@@ -814,11 +831,11 @@ pub fn eval_const_expr_partial<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
                           Ok(val) => val,
                           Err(err) => {
                               debug!("bad reference: {:?}, {:?}", err.description(), err.span);
-                              signal!(e, ErroneousReferencedConstant(box err))
+                              signal!(e.span, ErroneousReferencedConstant(box err))
                           },
                       }
                   } else {
-                      signal!(e, UnimplementedConstVal("enum variants"));
+                      signal!(e.span, UnimplementedConstVal("enum variants"));
                   }
               }
               Def::Struct(..) => {
@@ -829,11 +846,11 @@ pub fn eval_const_expr_partial<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
                   if let Some(val) = fn_args.and_then(|args| args.get(&id)) {
                       val.clone()
                   } else {
-                      signal!(e, NonConstPath);
+                      signal!(e.span, NonConstPath);
                   }
               },
               Def::Method(id) | Def::Fn(id) => Function(id),
-              _ => signal!(e, NonConstPath),
+              _ => signal!(e.span, NonConstPath),
           }
       }
       hir::ExprCall(ref callee, ref args) => {
@@ -841,13 +858,13 @@ pub fn eval_const_expr_partial<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
           let callee_val = eval_const_expr_partial(tcx, callee, sub_ty_hint, fn_args)?;
           let did = match callee_val {
               Function(did) => did,
-              Struct(_) => signal!(e, UnimplementedConstVal("tuple struct constructors")),
-              callee => signal!(e, CallOn(callee)),
+              Struct(_) => signal!(e.span, UnimplementedConstVal("tuple struct constructors")),
+              callee => signal!(e.span, CallOn(callee)),
           };
           let (decl, result) = if let Some(fn_like) = lookup_const_fn_by_id(tcx, did) {
               (fn_like.decl(), &fn_like.body().expr)
           } else {
-              signal!(e, NonConstPath)
+              signal!(e.span, NonConstPath)
           };
           let result = result.as_ref().expect("const fn has no result expression");
           assert_eq!(decl.inputs.len(), args.len());
@@ -870,12 +887,12 @@ pub fn eval_const_expr_partial<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
       },
       hir::ExprLit(ref lit) => match lit_to_const(&lit.node, tcx, ety, lit.span) {
           Ok(val) => val,
-          Err(err) => signal!(e, err),
+          Err(err) => signal!(e.span, err),
       },
       hir::ExprBlock(ref block) => {
         match block.expr {
             Some(ref expr) => eval_const_expr_partial(tcx, &expr, ty_hint, fn_args)?,
-            None => signal!(e, UnimplementedConstVal("empty block")),
+            None => signal!(e.span, UnimplementedConstVal("empty block")),
         }
       }
       hir::ExprType(ref e, _) => eval_const_expr_partial(tcx, &e, ty_hint, fn_args)?,
@@ -883,7 +900,7 @@ pub fn eval_const_expr_partial<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
       hir::ExprStruct(..) => Struct(e.id),
       hir::ExprIndex(ref arr, ref idx) => {
         if !tcx.sess.features.borrow().const_indexing {
-            signal!(e, IndexOpFeatureGated);
+            signal!(e.span, IndexOpFeatureGated);
         }
         let arr_hint = ty_hint.erase_hint();
         let arr = eval_const_expr_partial(tcx, arr, arr_hint, fn_args)?;
@@ -891,12 +908,12 @@ pub fn eval_const_expr_partial<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
         let idx = match eval_const_expr_partial(tcx, idx, idx_hint, fn_args)? {
             Integral(Usize(i)) => i.as_u64(tcx.sess.target.uint_type),
             Integral(_) => bug!(),
-            _ => signal!(idx, IndexNotInt),
+            _ => signal!(idx.span, IndexNotInt),
         };
         assert_eq!(idx as usize as u64, idx);
         match arr {
             Array(_, n) if idx >= n => {
-                signal!(e, IndexOutOfBounds { len: n, index: idx })
+                signal!(e.span, IndexOutOfBounds { len: n, index: idx })
             }
             Array(v, n) => if let hir::ExprVec(ref v) = tcx.map.expect_expr(v).node {
                 assert_eq!(n as usize as u64, n);
@@ -906,7 +923,7 @@ pub fn eval_const_expr_partial<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
             },
 
             Repeat(_, n) if idx >= n => {
-                signal!(e, IndexOutOfBounds { len: n, index: idx })
+                signal!(e.span, IndexOutOfBounds { len: n, index: idx })
             }
             Repeat(elem, _) => eval_const_expr_partial(
                 tcx,
@@ -916,13 +933,13 @@ pub fn eval_const_expr_partial<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
             )?,
 
             ByteStr(ref data) if idx >= data.len() as u64 => {
-                signal!(e, IndexOutOfBounds { len: data.len() as u64, index: idx })
+                signal!(e.span, IndexOutOfBounds { len: data.len() as u64, index: idx })
             }
             ByteStr(data) => {
                 Integral(U8(data[idx as usize]))
             },
 
-            _ => signal!(e, IndexedNonVec),
+            _ => signal!(e.span, IndexedNonVec),
         }
       }
       hir::ExprVec(ref v) => Array(e.id, v.len() as u64),
@@ -932,8 +949,8 @@ pub fn eval_const_expr_partial<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
               e.id,
               match eval_const_expr_partial(tcx, &n, len_hint, fn_args)? {
                   Integral(Usize(i)) => i.as_u64(tcx.sess.target.uint_type),
-                  Integral(_) => signal!(e, RepeatCountNotNatural),
-                  _ => signal!(e, RepeatCountNotInt),
+                  Integral(_) => signal!(e.span, RepeatCountNotNatural),
+                  _ => signal!(e.span, RepeatCountNotInt),
               },
           )
       },
@@ -945,13 +962,13 @@ pub fn eval_const_expr_partial<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
                 if index.node < fields.len() {
                     eval_const_expr_partial(tcx, &fields[index.node], ty_hint, fn_args)?
                 } else {
-                    signal!(e, TupleIndexOutOfBounds);
+                    signal!(e.span, TupleIndexOutOfBounds);
                 }
             } else {
                 bug!()
             }
         } else {
-            signal!(base, ExpectedConstTuple);
+            signal!(base.span, ExpectedConstTuple);
         }
       }
       hir::ExprField(ref base, field_name) => {
@@ -966,23 +983,23 @@ pub fn eval_const_expr_partial<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
                                                      == field_name.node) {
                     eval_const_expr_partial(tcx, &f.expr, ty_hint, fn_args)?
                 } else {
-                    signal!(e, MissingStructField);
+                    signal!(e.span, MissingStructField);
                 }
             } else {
                 bug!()
             }
         } else {
-            signal!(base, ExpectedConstStruct);
+            signal!(base.span, ExpectedConstStruct);
         }
       }
-      hir::ExprAddrOf(..) => signal!(e, UnimplementedConstVal("address operator")),
-      _ => signal!(e, MiscCatchAll)
+      hir::ExprAddrOf(..) => signal!(e.span, UnimplementedConstVal("address operator")),
+      _ => signal!(e.span, MiscCatchAll)
     };
 
     match (ety.map(|t| &t.sty), result) {
         (Some(ref ty_hint), Integral(i)) => match infer(i, tcx, ty_hint) {
             Ok(inferred) => Ok(Integral(inferred)),
-            Err(err) => signal!(e, err),
+            Err(err) => signal!(e.span, err),
         },
         (_, result) => Ok(result),
     }
@@ -1170,7 +1187,7 @@ fn cast_const_float<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
     }
 }
 
-fn cast_const<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, val: ConstVal, ty: ty::Ty) -> CastResult {
+pub fn cast_const<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>, val: ConstVal, ty: ty::Ty) -> CastResult {
     match val {
         Integral(i) => cast_const_int(tcx, i, ty),
         Bool(b) => cast_const_int(tcx, Infer(b as u64), ty),
diff --git a/src/librustc_data_structures/bitvec.rs b/src/librustc_data_structures/bitvec.rs
index 0dab230f47a2d..b8d0042797f34 100644
--- a/src/librustc_data_structures/bitvec.rs
+++ b/src/librustc_data_structures/bitvec.rs
@@ -17,20 +17,15 @@ pub struct BitVector {
 }
 
 impl BitVector {
+    /// Create a new bitvector of at least num_bits length.
     pub fn new(num_bits: usize) -> BitVector {
         let num_words = u64s(num_bits);
         BitVector { data: vec![0; num_words] }
     }
 
-    pub fn clear(&mut self) {
-        for p in &mut self.data {
-            *p = 0;
-        }
-    }
-
     pub fn contains(&self, bit: usize) -> bool {
         let (word, mask) = word_mask(bit);
-        (self.data[word] & mask) != 0
+        (self.data.get(word).cloned().unwrap_or(0) & mask) != 0
     }
 
     /// Returns true if the bit has changed.
@@ -44,14 +39,12 @@ impl BitVector {
     }
 
     pub fn insert_all(&mut self, all: &BitVector) -> bool {
-        assert!(self.data.len() == all.data.len());
+        assert!(self.data.len() >= all.data.len());
         let mut changed = false;
-        for (i, j) in self.data.iter_mut().zip(&all.data) {
-            let value = *i;
-            *i = value | *j;
-            if value != *i {
-                changed = true;
-            }
+        for (i, &j) in self.data.iter_mut().zip(all.data.iter()) {
+            let new_value = *i | j;
+            changed = changed || *i != new_value;
+            *i = new_value;
         }
         changed
     }
@@ -63,6 +56,46 @@ impl BitVector {
         }
     }
 
+    /// Return and unset last bit set.
+    pub fn pop(&mut self) -> Option<usize> {
+        for (idx, el) in self.data.iter_mut().enumerate().rev() {
+            if *el != 0 {
+                let bit = 63 - el.leading_zeros() as usize;
+                *el &= !(1 << bit);
+                return Some(idx * 64 + bit);
+            }
+        }
+        None
+    }
+
+    /// Returns true if the bit has changed.
+    pub fn remove(&mut self, bit: usize) -> bool {
+        let (word, mask) = word_mask(bit);
+        self.data.get_mut(word).map(|data| {
+            let value = *data;
+            let new_value = value & !mask;
+            *data = new_value;
+            new_value != value
+        }).unwrap_or(false)
+    }
+
+    /// Clear the bitvector.
+    pub fn clear(&mut self) {
+        for datum in &mut self.data {
+            *datum = 0;
+        }
+    }
+
+    pub fn invert(&mut self) {
+        for datum in &mut self.data {
+            *datum = !*datum;
+        }
+    }
+
+    pub fn len(&self) -> usize {
+        self.data.len() * 64
+    }
+
     /// Iterates over indexes of set bits in a sorted order
     pub fn iter<'a>(&'a self) -> BitVectorIter<'a> {
         BitVectorIter {
@@ -239,17 +272,11 @@ fn word_mask(index: usize) -> (usize, u64) {
 #[test]
 fn bitvec_iter_works() {
     let mut bitvec = BitVector::new(100);
-    bitvec.insert(1);
-    bitvec.insert(10);
-    bitvec.insert(19);
-    bitvec.insert(62);
-    bitvec.insert(63);
-    bitvec.insert(64);
-    bitvec.insert(65);
-    bitvec.insert(66);
-    bitvec.insert(99);
-    assert_eq!(bitvec.iter().collect::<Vec<_>>(),
-               [1, 10, 19, 62, 63, 64, 65, 66, 99]);
+    let ids = [1, 10, 19, 62, 63, 64, 65, 66, 99];
+    for &i in &ids {
+        bitvec.insert(i);
+    }
+    assert_eq!(bitvec.iter().collect::<Vec<_>>(), ids);
 }
 
 
@@ -379,3 +406,54 @@ fn matrix_iter() {
     }
     assert!(iter.next().is_none());
 }
+
+#[test]
+fn bitvec_pop() {
+    let mut bitvec = BitVector::new(100);
+    let ids = [1, 10, 19, 62, 63, 64, 65, 66, 99];
+    for &i in &ids {
+        bitvec.insert(i);
+    }
+    let mut idxs = vec![];
+    while let Some(idx) = bitvec.pop() { idxs.push(idx); }
+    idxs.reverse();
+    assert_eq!(idxs, ids);
+}
+
+#[test]
+fn bitvec_invert() {
+    let mut bitvec = BitVector::new(100);
+    let ids = [1, 10, 19, 62, 63, 64, 65, 66, 99];
+    for &i in &ids {
+        bitvec.insert(i);
+    }
+    bitvec.invert();
+    for &i in &ids {
+        assert!(!bitvec.contains(i));
+    }
+    assert!(bitvec.contains(0));
+    assert!(bitvec.contains(2));
+    assert!(bitvec.contains(9));
+    assert!(bitvec.contains(11));
+    assert!(bitvec.contains(61));
+    assert!(bitvec.contains(67));
+}
+
+#[test]
+fn bitvec_remove() {
+    let mut bitvec = BitVector::new(100);
+    let ids = [1, 10, 19, 62, 63, 64, 65, 66, 99];
+    for &i in &ids {
+        bitvec.insert(i);
+    }
+    bitvec.invert();
+    for &i in &ids {
+        assert!(!bitvec.contains(i));
+    }
+    assert!(bitvec.contains(0));
+    assert!(bitvec.contains(2));
+    assert!(bitvec.contains(9));
+    assert!(bitvec.contains(11));
+    assert!(bitvec.contains(61));
+    assert!(bitvec.contains(67));
+}
diff --git a/src/librustc_data_structures/indexed_vec.rs b/src/librustc_data_structures/indexed_vec.rs
index 9123463149936..c287095397d32 100644
--- a/src/librustc_data_structures/indexed_vec.rs
+++ b/src/librustc_data_structures/indexed_vec.rs
@@ -102,6 +102,11 @@ impl<I: Idx, T> IndexVec<I, T> {
         self.raw.len()
     }
 
+    #[inline]
+    pub unsafe fn set_len(&mut self, len: usize) {
+        self.raw.set_len(len)
+    }
+
     #[inline]
     pub fn is_empty(&self) -> bool {
         self.raw.is_empty()
@@ -149,6 +154,31 @@ impl<I: Idx, T> IndexVec<I, T> {
     pub fn last(&self) -> Option<I> {
         self.len().checked_sub(1).map(I::new)
     }
+
+    #[inline]
+    pub unsafe fn as_ptr(&mut self) -> *const T {
+        self.raw.as_ptr()
+    }
+
+    #[inline]
+    pub unsafe fn as_mut_ptr(&mut self) -> *mut T {
+        self.raw.as_mut_ptr()
+    }
+
+    #[inline]
+    pub fn shrink_to_fit(&mut self) {
+        self.raw.shrink_to_fit()
+    }
+
+    #[inline]
+    pub fn swap(&mut self, a: usize, b: usize) {
+        self.raw.swap(a, b)
+    }
+
+    #[inline]
+    pub fn truncate(&mut self, a: usize) {
+        self.raw.truncate(a)
+    }
 }
 
 impl<I: Idx, T> Index<I> for IndexVec<I, T> {
diff --git a/src/librustc_driver/driver.rs b/src/librustc_driver/driver.rs
index c6100004786be..784458381faa3 100644
--- a/src/librustc_driver/driver.rs
+++ b/src/librustc_driver/driver.rs
@@ -910,16 +910,16 @@ pub fn phase_3_run_analysis_passes<'tcx, F, R>(sess: &'tcx Session,
                  "MIR dump",
                  || mir::mir_map::build_mir_for_crate(tcx));
 
-        time(time_passes, "MIR passes", || {
+        time(time_passes, "MIR validation", || {
             let mut passes = sess.mir_passes.borrow_mut();
             // Push all the built-in passes.
             passes.push_hook(box mir::transform::dump_mir::DumpMir);
-            passes.push_pass(box mir::transform::simplify_cfg::SimplifyCfg::new("initial"));
+            passes.push_pass(box mir::transform::simplify::SimplifyCfg::new("initial"));
             passes.push_pass(box mir::transform::qualify_consts::QualifyAndPromoteConstants);
             passes.push_pass(box mir::transform::type_check::TypeckMir);
             passes.push_pass(
                 box mir::transform::simplify_branches::SimplifyBranches::new("initial"));
-            passes.push_pass(box mir::transform::simplify_cfg::SimplifyCfg::new("qualify-consts"));
+            passes.push_pass(box mir::transform::simplify::SimplifyCfg::new("qualify-consts"));
             // And run everything.
             passes.run_passes(tcx, &mut mir_map);
         });
@@ -983,21 +983,25 @@ pub fn phase_4_translate_to_llvm<'a, 'tcx>(tcx: TyCtxt<'a, 'tcx, 'tcx>,
 
     // Run the passes that transform the MIR into a more suitable for translation
     // to LLVM code.
-    time(time_passes, "Prepare MIR codegen passes", || {
+    time(time_passes, "MIR optimisations", || {
         let mut passes = ::rustc::mir::transform::Passes::new();
         passes.push_hook(box mir::transform::dump_mir::DumpMir);
         passes.push_pass(box mir::transform::no_landing_pads::NoLandingPads);
-        passes.push_pass(box mir::transform::simplify_cfg::SimplifyCfg::new("no-landing-pads"));
+        passes.push_pass(box mir::transform::simplify::SimplifyCfg::new("no-landing-pads"));
 
         passes.push_pass(box mir::transform::erase_regions::EraseRegions);
 
         passes.push_pass(box mir::transform::add_call_guards::AddCallGuards);
         passes.push_pass(box borrowck::ElaborateDrops);
         passes.push_pass(box mir::transform::no_landing_pads::NoLandingPads);
-        passes.push_pass(box mir::transform::simplify_cfg::SimplifyCfg::new("elaborate-drops"));
+        passes.push_pass(box mir::transform::simplify::SimplifyCfg::new("elaborate-drops"));
 
         passes.push_pass(box mir::transform::deaggregator::Deaggregator);
+        passes.push_pass(box mir::transform::const_propagate::ConstPropagate);
+        passes.push_pass(box mir::transform::simplify::SimplifyCfg::new("const-propagate"));
+        passes.push_pass(box mir::transform::deadcode::DeadCode);
 
+        passes.push_pass(box mir::transform::simplify::SimplifyLocals);
         passes.push_pass(box mir::transform::add_call_guards::AddCallGuards);
         passes.push_pass(box mir::transform::dump_mir::Marker("PreTrans"));
 
diff --git a/src/librustc_mir/build/matches/test.rs b/src/librustc_mir/build/matches/test.rs
index 8c9ed53c8ab4d..0757229a1cb09 100644
--- a/src/librustc_mir/build/matches/test.rs
+++ b/src/librustc_mir/build/matches/test.rs
@@ -238,7 +238,7 @@ impl<'a, 'gcx, 'tcx> Builder<'a, 'gcx, 'tcx> {
 
                         (targets.clone(),
                          TerminatorKind::SwitchInt {
-                             discr: lvalue.clone(),
+                             discr: Operand::Consume(lvalue.clone()),
                              switch_ty: switch_ty,
                              values: options.clone(),
                              targets: targets
diff --git a/src/librustc_mir/transform/const_propagate.rs b/src/librustc_mir/transform/const_propagate.rs
new file mode 100644
index 0000000000000..43dc76989a883
--- /dev/null
+++ b/src/librustc_mir/transform/const_propagate.rs
@@ -0,0 +1,474 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+//! This is Constant-Simplify propagation pass. This is a composition of two distinct
+//! passes: constant-propagation and terminator simplification.
+//!
+//! Note that having these passes interleaved results in a strictly more potent optimisation pass
+//! which is able to optimise CFG in a way which two passes couldn’t do separately even if they
+//! were run indefinitely one after another.
+//!
+//! Both these passes are very similar in their nature:
+//!
+//!                  | Constant  | Simplify  |
+//! |----------------|-----------|-----------|
+//! | Lattice Domain | Lvalue    | Lvalue    |
+//! | Lattice Value  | Constant  | Constant  |
+//! | Transfer       | x = const | x = const |
+//! | Rewrite        | x → const | T(x) → T' |
+//! | Top            | {}        | {}        |
+//! | Join           | intersect | intersect |
+//! |----------------|-----------|-----------|
+//!
+//! It might be a good idea to eventually interleave move propagation here, as it has very
+//! similar lattice to the constant propagation pass.
+
+use rustc_data_structures::fnv::FnvHashMap;
+use rustc::middle::const_val::ConstVal;
+use rustc::hir::def_id::DefId;
+use rustc::mir::repr::*;
+use rustc::mir::transform::{Pass, MirPass, MirSource};
+use rustc::mir::visit::{MutVisitor};
+use rustc::ty::TyCtxt;
+use rustc_const_eval::{eval_const_binop, eval_const_unop, cast_const};
+
+use super::dataflow::*;
+
+pub struct ConstPropagate;
+
+impl Pass for ConstPropagate {}
+
+impl<'tcx> MirPass<'tcx> for ConstPropagate {
+    fn run_pass<'a>(&mut self, tcx: TyCtxt<'a, 'tcx, 'tcx>, _: MirSource, mir: &mut Mir<'tcx>) {
+        *mir = Dataflow::forward(mir, ConstLattice::new(),
+                                 ConstTransfer { tcx: tcx },
+                                 ConstRewrite { tcx: tcx }.and_then(SimplifyRewrite));
+    }
+}
+
+#[derive(Debug, Clone)]
+struct ConstLattice<'tcx> {
+    statics: FnvHashMap<DefId, Constant<'tcx>>,
+    // key must not be a static or a projection.
+    locals: FnvHashMap<Lvalue<'tcx>, Constant<'tcx>>,
+    // List of potentially aliased variables (reference taken), this is joined with union
+    // operation! FIXME: should use proper alias analysis store, similar to one LLVM has.
+    aliased: FnvHashMap<Lvalue<'tcx>, ()>
+}
+
+impl<'tcx> ConstLattice<'tcx> {
+    fn new() -> ConstLattice<'tcx> {
+        ConstLattice {
+            statics: FnvHashMap(),
+            locals: FnvHashMap(),
+            aliased: FnvHashMap(),
+        }
+    }
+
+    fn insert(&mut self, key: &Lvalue<'tcx>, val: Constant<'tcx>) {
+        if self.aliased.contains_key(key) {
+            return;
+        }
+        // FIXME: HashMap has no way to insert stuff without cloning the key even if it exists
+        // already.
+        match *key {
+            Lvalue::Static(defid) => self.statics.insert(defid, val),
+            // I feel like this could be handled, but needs special care. For example in code like
+            // this:
+            //
+            // ```
+            // var.field = false;
+            // something(&mut var);
+            // assert!(var.field);
+            // ```
+            //
+            // taking a reference to var should invalidate knowledge about all the
+            // projections of var and not just var itself. Currently we handle this by not
+            // keeping any knowledge about projections at all, but I think eventually we
+            // want to do so.
+            Lvalue::Projection(_) => None,
+            _ => self.locals.insert(key.clone(), val)
+        };
+    }
+
+    fn get(&self, key: &Lvalue<'tcx>) -> Option<&Constant<'tcx>> {
+        match *key {
+            Lvalue::Static(ref defid) => self.statics.get(defid),
+            Lvalue::Projection(_) => None,
+            _ => self.locals.get(key),
+        }
+    }
+
+    fn top(&mut self, key: &Lvalue<'tcx>) {
+        match *key {
+            Lvalue::Static(ref defid) => { self.statics.remove(defid); }
+            Lvalue::Projection(_) => {}
+            _ => { self.locals.remove(key); }
+        }
+    }
+
+    fn mark_aliased(&mut self, key: &Lvalue<'tcx>) {
+        self.top(key.base());
+        self.aliased.insert(key.base().clone(), ());
+    }
+}
+
+fn intersect_map<K, V>(this: &mut FnvHashMap<K, V>, mut other: FnvHashMap<K, V>) -> bool
+where K: Eq + ::std::hash::Hash, V: PartialEq
+{
+    let mut changed = false;
+    // Calculate inteersection this way:
+    // First, drain the self.values into a list of equal values common to both maps.
+    let mut common_keys = vec![];
+    for (key, value) in this.drain() {
+        match other.remove(&key) {
+            None => {
+                // self had the key, but not other, so it is Top (i.e. removed)
+                changed = true;
+            }
+            Some(ov) => {
+                // Similarly, if the values are not equal…
+                changed |= if ov != value {
+                    true
+                } else {
+                    common_keys.push((key, value));
+                    false
+                }
+            }
+        }
+    }
+    // Now, put each common key with equal value back into the map.
+    for (key, value) in common_keys {
+        this.insert(key, value);
+    }
+    changed
+}
+
+impl<'tcx> Lattice for ConstLattice<'tcx> {
+    fn bottom() -> Self { unimplemented!() }
+    fn join(&mut self, other: Self) -> bool {
+        self.aliased.join(other.aliased) |
+        intersect_map(&mut self.locals, other.locals) |
+        intersect_map(&mut self.statics, other.statics)
+    }
+}
+
+struct ConstTransfer<'a, 'tcx: 'a> {
+    tcx: TyCtxt<'a, 'tcx, 'tcx>,
+}
+
+impl<'a, 'tcx> Transfer<'tcx> for ConstTransfer<'a, 'tcx> {
+    type Lattice = ConstLattice<'tcx>;
+    type TerminatorReturn = Vec<Self::Lattice>;
+
+    fn stmt(&self, s: &Statement<'tcx>, mut lat: Self::Lattice)
+    -> Self::Lattice
+    {
+        let (lval, rval) = match s.kind {
+            // Could be handled with some extra information, but there’s no point in doing that yet
+            // because currently we would have no consumers of this data (no GetDiscriminant).
+            StatementKind::SetDiscriminant { .. } => return lat,
+            StatementKind::StorageDead(ref lval) => {
+                lat.top(lval);
+                return lat;
+            }
+            StatementKind::StorageLive(_) => return lat,
+            StatementKind::Assign(ref lval, ref rval) => (lval, rval),
+        };
+        match *rval {
+            Rvalue::Use(Operand::Consume(_)) => {
+                lat.top(lval);
+            },
+            Rvalue::Use(Operand::Constant(ref cnst)) => {
+                lat.insert(lval, cnst.clone());
+            },
+            // We do not want to deal with references and pointers here. Not yet and not without
+            // a way to query stuff about reference/pointer aliasing.
+            Rvalue::Ref(_, _, ref referee) => {
+                lat.top(lval);
+                lat.mark_aliased(referee);
+            }
+            // FIXME: should calculate length of statically sized arrays and store it.
+            Rvalue::Len(_) => { lat.top(lval); }
+            // FIXME: should keep length around for Len case above.
+            Rvalue::Repeat(_, _) => { lat.top(lval); }
+            // Not handled. Constant casts should turn out as plain ConstVals by here.
+            Rvalue::Cast(_, _, _) => { lat.top(lval); }
+            // Make sure case like `var1 = var1 {op} x` does not make our knowledge incorrect.
+            Rvalue::BinaryOp(..) | Rvalue::CheckedBinaryOp(..) | Rvalue::UnaryOp(..) => {
+                lat.top(lval);
+            }
+            // Cannot be handled
+            Rvalue::Box(_) => { lat.top(lval); }
+            // Not handled, but could be. Disaggregation helps to not bother with this.
+            Rvalue::Aggregate(..) => { lat.top(lval); }
+            // Not handled, invalidate any knowledge about any variables touched by this. Dangerous
+            // stuff and other dragons be here.
+            Rvalue::InlineAsm { ref outputs, ref inputs, asm: _ } => {
+                lat.top(lval);
+                for output in outputs { lat.top(output); }
+                for input in inputs {
+                    if let Operand::Consume(ref lval) = *input { lat.top(lval); }
+                }
+                // Clear the statics, because inline assembly may mutate global state at will.
+                lat.statics.clear();
+            }
+        };
+        lat
+    }
+
+    fn term(&self, t: &Terminator<'tcx>, mut lat: Self::Lattice)
+    -> Self::TerminatorReturn
+    {
+        let span = t.source_info.span;
+        let succ_count = t.successors().len();
+        let bool_const = |b: bool| Constant {
+            span: span,
+            ty: self.tcx.mk_bool(),
+            literal: Literal::Value { value: ConstVal::Bool(b) },
+        };
+        match t.kind {
+            TerminatorKind::If { cond: Operand::Consume(ref lval), .. } => {
+                let mut falsy = lat.clone();
+                falsy.insert(lval, bool_const(false));
+                lat.insert(lval, bool_const(true));
+                vec![lat, falsy]
+            }
+            TerminatorKind::SwitchInt { discr: Operand::Consume(ref lval),
+                                        ref values,
+                                        switch_ty, .. } => {
+                let mut vec: Vec<_> = values.iter().map(|val| {
+                    let mut branch = lat.clone();
+                    branch.insert(lval, Constant {
+                        span: span,
+                        ty: switch_ty,
+                        literal: Literal::Value { value: val.clone() }
+                    });
+                    branch
+                }).collect();
+                vec.push(lat);
+                vec
+            }
+            TerminatorKind::Drop { ref location, .. } => {
+                lat.top(location);
+                // See comment in Call.
+                lat.statics.clear();
+                vec![lat; succ_count]
+            }
+            TerminatorKind::DropAndReplace { ref location, ref value, .. } => {
+                match *value {
+                    Operand::Consume(_) => lat.top(location),
+                    Operand::Constant(ref cnst) => lat.insert(location, cnst.clone()),
+                }
+                // See comment in Call.
+                lat.statics.clear();
+                vec![lat; succ_count]
+            }
+            TerminatorKind::Call { ref destination, ref args, .. } => {
+                for arg in args {
+                    if let Operand::Consume(ref lval) = *arg {
+                        // FIXME: Probably safe to not remove any non-projection lvals.
+                        lat.top(lval);
+                    }
+                }
+                destination.as_ref().map(|&(ref lval, _)| lat.top(lval));
+                // Clear all knowledge about statics, because call may mutate any global state
+                // without us knowing about it.
+                lat.statics.clear();
+                vec![lat; succ_count]
+            }
+            TerminatorKind::Assert { ref cond, expected, ref cleanup, .. } => {
+                if let Operand::Consume(ref lval) = *cond {
+                    lat.insert(lval, bool_const(expected));
+                    if cleanup.is_some() {
+                        let mut falsy = lat.clone();
+                        falsy.insert(lval, bool_const(!expected));
+                        vec![lat, falsy]
+                    } else {
+                        vec![lat]
+                    }
+                } else {
+                    vec![lat; succ_count]
+                }
+            }
+            // (unreachable if interleaved with simplify branches pass):
+            TerminatorKind::SwitchInt { .. } | // The condition is constant
+            TerminatorKind::If { .. } |        // The condition is constant
+
+            TerminatorKind::Switch { .. } | // Might make some sense to handle this
+            TerminatorKind::Goto { .. } |
+            TerminatorKind::Unreachable |
+            TerminatorKind::Return |
+            TerminatorKind::Resume => {
+                vec![lat; succ_count]
+            }
+        }
+    }
+}
+
+struct ConstRewrite<'a, 'tcx: 'a> {
+    tcx: TyCtxt<'a, 'tcx, 'tcx>
+}
+impl<'a, 'tcx, T> Rewrite<'tcx, T> for ConstRewrite<'a, 'tcx>
+where T: Transfer<'tcx, Lattice=ConstLattice<'tcx>>
+{
+    fn stmt(&self, stmt: &Statement<'tcx>, fact: &T::Lattice) -> StatementChange<'tcx> {
+        let mut stmt = stmt.clone();
+        let mut vis = RewriteConstVisitor(&fact);
+        vis.visit_statement(START_BLOCK, &mut stmt);
+        ConstEvalVisitor(self.tcx).visit_statement(START_BLOCK, &mut stmt);
+        StatementChange::Statement(stmt)
+    }
+
+    fn term(&self, term: &Terminator<'tcx>, fact: &T::Lattice) -> TerminatorChange<'tcx> {
+        let mut term = term.clone();
+        let mut vis = RewriteConstVisitor(&fact);
+        vis.visit_terminator(START_BLOCK, &mut term);
+        ConstEvalVisitor(self.tcx).visit_terminator(START_BLOCK, &mut term);
+        TerminatorChange::Terminator(term)
+    }
+}
+
+struct RewriteConstVisitor<'a, 'tcx: 'a>(&'a ConstLattice<'tcx>);
+impl<'a, 'tcx> MutVisitor<'tcx> for RewriteConstVisitor<'a, 'tcx> {
+    fn visit_operand(&mut self, op: &mut Operand<'tcx>) {
+        // To satisy borrow checker, modify `op` after inspecting it
+        let repl = if let Operand::Consume(ref lval) = *op {
+            self.0.get(lval).cloned()
+        } else {
+            None
+        };
+        if let Some(c) = repl {
+            *op = Operand::Constant(c);
+        }
+        self.super_operand(op);
+    }
+}
+struct ConstEvalVisitor<'a, 'tcx: 'a>(TyCtxt<'a, 'tcx, 'tcx>);
+impl<'a, 'tcx> MutVisitor<'tcx> for ConstEvalVisitor<'a, 'tcx> {
+    fn visit_statement(&mut self, _: BasicBlock, stmt: &mut Statement<'tcx>) {
+        let span = stmt.source_info.span;
+        let rval = if let StatementKind::Assign(_, ref mut rval) = stmt.kind {
+            rval
+        } else {
+            return
+        };
+        let repl = match *rval {
+            // FIXME: Rvalue::CheckedBinaryOp could be evaluated to Rvalue::Aggregate of 2-tuple
+            // (or disaggregated version of it; needs replacement with arbitrary graphs)
+            Rvalue::BinaryOp(ref op, Operand::Constant(ref opr1), Operand::Constant(ref opr2)) => {
+                match (&opr1.literal, &opr2.literal) {
+                    (&Literal::Value { value: ref value1 },
+                     &Literal::Value { value: ref value2 }) =>
+                        eval_const_binop(op.to_hir_binop(), &value1, &value2, span).ok().map(|v| {
+                            (v, op.ty(self.0, opr1.ty, opr2.ty))
+                        }),
+                    _ => None
+                }
+            }
+            Rvalue::UnaryOp(ref op, Operand::Constant(ref opr1)) => {
+                if let Literal::Value { ref value } = opr1.literal {
+                    eval_const_unop(op.to_hir_unop(), value, span).ok().map(|v| (v, opr1.ty))
+                } else {
+                    None
+                }
+            }
+            Rvalue::Cast(CastKind::Misc, Operand::Constant(ref opr), ty) => {
+                let ret = if let Literal::Value { ref value } = opr.literal {
+                    cast_const(self.0, value.clone(), ty).ok().map(|v| (v, ty))
+                } else {
+                    None
+                };
+                ret
+            }
+            _ => None
+        };
+        if let Some((constant, ty)) = repl {
+            *rval = Rvalue::Use(Operand::Constant(Constant {
+                span: span,
+                ty: ty,
+                literal: Literal::Value { value: constant }
+            }));
+        }
+
+        self.super_rvalue(rval)
+    }
+}
+
+struct SimplifyRewrite;
+
+impl<'tcx, T: Transfer<'tcx>> Rewrite<'tcx, T> for SimplifyRewrite {
+    fn stmt(&self, s: &Statement<'tcx>, _: &T::Lattice)
+    -> StatementChange<'tcx> {
+        StatementChange::Statement(s.clone())
+    }
+
+    fn term(&self, t: &Terminator<'tcx>, _: &T::Lattice)
+    -> TerminatorChange<'tcx> {
+        match t.kind {
+            TerminatorKind::If { ref targets, .. } if targets.0 == targets.1 => {
+                let mut nt = t.clone();
+                nt.kind = TerminatorKind::Goto { target: targets.0 };
+                TerminatorChange::Terminator(nt)
+            }
+            TerminatorKind::If { ref targets, cond: Operand::Constant(Constant {
+                literal: Literal::Value { value: ConstVal::Bool(cond) }, ..
+            }) } => {
+                let mut nt = t.clone();
+                if cond {
+                    nt.kind = TerminatorKind::Goto { target: targets.0 };
+                } else {
+                    nt.kind = TerminatorKind::Goto { target: targets.1 };
+                }
+                TerminatorChange::Terminator(nt)
+            }
+
+            TerminatorKind::SwitchInt { ref targets, .. } if targets.len() == 1 => {
+                TerminatorChange::Terminator(Terminator {
+                    source_info: t.source_info,
+                    kind: TerminatorKind::Goto { target: targets[0] }
+                })
+            }
+            TerminatorKind::SwitchInt { discr: Operand::Constant(Constant {
+                literal: Literal::Value { value: ref switch }, ..
+            }), ref targets, ref values, .. } => {
+                for (target, value) in targets.iter().zip(values.iter()) {
+                    if value == switch {
+                        return TerminatorChange::Terminator(Terminator {
+                            source_info: t.source_info,
+                            kind: TerminatorKind::Goto { target: *target }
+                        });
+                    }
+                }
+                TerminatorChange::Terminator(Terminator {
+                    source_info: t.source_info,
+                    kind: TerminatorKind::Goto {
+                        target: *targets.last().expect("SwitchInt has the `otherwise` target")
+                    }
+                })
+            }
+
+            TerminatorKind::Assert { target, cond: Operand::Constant(Constant {
+                literal: Literal::Value {
+                    value: ConstVal::Bool(cond)
+                }, ..
+            }), expected, .. } if cond == expected => {
+                // FIXME: once replacements with arbitrary subgraphs get implemented, this should
+                // have success branch pointed to a block with Unreachable terminator when cond !=
+                // expected.
+                TerminatorChange::Terminator(Terminator {
+                    source_info: t.source_info,
+                    kind: TerminatorKind::Goto { target: target }
+                })
+            }
+            _ => TerminatorChange::Terminator(t.clone())
+        }
+    }
+}
diff --git a/src/librustc_mir/transform/dataflow/combinators.rs b/src/librustc_mir/transform/dataflow/combinators.rs
new file mode 100644
index 0000000000000..2877587e0107f
--- /dev/null
+++ b/src/librustc_mir/transform/dataflow/combinators.rs
@@ -0,0 +1,47 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+use rustc::mir::repr as mir;
+
+use std::marker::PhantomData;
+use super::*;
+
+/// This combinator has the following behaviour:
+///
+/// * Rewrite the node with the first rewriter.
+///   * if the first rewriter replaced the node, 2nd rewriter is used to rewrite the replacement.
+///   * otherwise 2nd rewriter is used to rewrite the original node.
+pub struct RewriteAndThen<'tcx, T, R1, R2>(R1, R2, PhantomData<(&'tcx (), T)>)
+where T: Transfer<'tcx>, R1: Rewrite<'tcx, T>, R2: Rewrite<'tcx, T>;
+
+impl<'tcx, T, R1, R2> RewriteAndThen<'tcx, T, R1, R2>
+where T: Transfer<'tcx>, R1: Rewrite<'tcx, T>, R2: Rewrite<'tcx, T>
+{
+    pub fn new(r1: R1, r2: R2) -> RewriteAndThen<'tcx, T, R1, R2> {
+        RewriteAndThen(r1, r2, PhantomData)
+    }
+}
+
+impl<'tcx, T, R1, R2> Rewrite<'tcx, T> for RewriteAndThen<'tcx, T, R1, R2>
+where T: Transfer<'tcx>, R1: Rewrite<'tcx, T>, R2: Rewrite<'tcx, T> {
+    fn stmt(&self, stmt: &mir::Statement<'tcx>, fact: &T::Lattice) -> StatementChange<'tcx> {
+        let rs = self.0.stmt(stmt, fact);
+        match rs {
+            StatementChange::Remove => StatementChange::Remove,
+            StatementChange::Statement(ns) => self.1.stmt(&ns, fact),
+        }
+    }
+
+    fn term(&self, term: &mir::Terminator<'tcx>, fact: &T::Lattice) -> TerminatorChange<'tcx> {
+        let rt = self.0.term(term, fact);
+        match rt {
+            TerminatorChange::Terminator(nt) => self.1.term(&nt, fact)
+        }
+    }
+}
diff --git a/src/librustc_mir/transform/dataflow/lattice.rs b/src/librustc_mir/transform/dataflow/lattice.rs
new file mode 100644
index 0000000000000..86bb12fe67ace
--- /dev/null
+++ b/src/librustc_mir/transform/dataflow/lattice.rs
@@ -0,0 +1,182 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+use std::fmt::{Debug, Formatter};
+use std::collections::hash_map::Entry;
+use std::collections::HashMap;
+
+/// A lattice type for forward and backward dataflow.
+///
+/// This lattice requires ⊥ to be defined for both forward and backward analyses, however some of
+/// the analyses might not need it, and it is fine to implement it as a panic (`bug!`) for them.
+pub trait Lattice: Clone {
+    fn bottom() -> Self;
+    fn join(&mut self, other: Self) -> bool;
+}
+
+/// Extend the type with a Top point.
+///
+/// Lattice extended with a top point follows these rules:
+///
+/// ```
+/// v + v = V::join(v, v)
+/// ⊤ + v = ⊤ (no change)
+/// v + ⊤ = ⊤
+/// ⊤ + ⊤ = ⊤ (no change)
+/// ```
+///
+/// where `v` is the wrapped value and `V` is its type.
+#[derive(Clone, PartialEq)]
+pub enum WTop<T> {
+    Top,
+    Value(T)
+}
+
+impl<T: Lattice> Lattice for WTop<T> {
+    fn bottom() -> Self {
+        WTop::Value(<T as Lattice>::bottom())
+    }
+
+    fn join(&mut self, other: Self) -> bool {
+        match other {
+            WTop::Top => match *self {
+                WTop::Top => false,
+                ref mut this@WTop::Value(_) => {
+                    *this = WTop::Top;
+                    true
+                }
+            },
+            WTop::Value(ov) => match *self {
+                WTop::Top => false,
+                WTop::Value(ref mut tv) => <T as Lattice>::join(tv, ov),
+            },
+        }
+    }
+}
+
+impl<T: Debug> Debug for WTop<T> {
+    fn fmt(&self, f: &mut Formatter) -> ::std::fmt::Result {
+        match *self {
+            WTop::Top => f.write_str("⊤"),
+            WTop::Value(ref t) => <T as Debug>::fmt(t, f)
+        }
+    }
+}
+
+
+/// Extend the type with a bottom point.
+///
+/// This guarantees the bottom() of the underlying lattice won’t get called, making this is a
+/// useful wrapper for lattices with no obvious bottom value.
+///
+/// Lattice extended with a bottom point follows these rules:
+///
+/// ```
+/// v + v = V::join(v, v)
+/// ⊥ + v = v
+/// v + ⊥ = v (no change)
+/// ⊥ + ⊥ = ⊥ (no change)
+/// ```
+///
+/// where `v` is the wrapped value and `V` is its type.
+#[derive(Clone, PartialEq)]
+pub enum WBottom<T> {
+    Bottom,
+    Value(T)
+}
+
+impl<T> WBottom<T> {
+    #[inline]
+    pub fn unwrap_or_else<F: FnOnce() -> T>(self, f: F) -> T {
+        match self {
+            WBottom::Bottom => f(),
+            WBottom::Value(v) => v
+        }
+    }
+}
+
+impl<T: Lattice> Lattice for WBottom<T> {
+    fn bottom() -> Self {
+        WBottom::Bottom
+    }
+
+    fn join(&mut self, other: Self) -> bool {
+        match other {
+            WBottom::Bottom => false,
+            WBottom::Value(ov) => match *self {
+                WBottom::Value(ref mut tv) => <T as Lattice>::join(tv, ov),
+                ref mut this => {
+                    *this = WBottom::Value(ov);
+                    true
+                }
+            }
+        }
+    }
+}
+
+impl<T: Debug> Debug for WBottom<T> {
+    fn fmt(&self, f: &mut Formatter) -> ::std::fmt::Result {
+        match *self {
+            WBottom::Bottom => f.write_str("⊥"),
+            WBottom::Value(ref t) => <T as Debug>::fmt(t, f)
+        }
+    }
+}
+
+/// Extend the type with both bottom and top points.
+///
+/// Lattice extended with both points follows these rules:
+///
+/// ```
+/// v + v = join(v, v)
+/// v + ⊥ = v (no change)
+/// v + ⊤ = ⊤
+/// ⊥ + v = v
+/// ⊥ + ⊥ = ⊥ (no change)
+/// ⊥ + ⊤ = ⊤
+/// ⊤ + v = ⊤ (no change)
+/// ⊤ + ⊤ = ⊤ (no change)
+/// ⊤ + ⊥ = ⊤ (no change)
+/// ```
+///
+/// where `v` is the wrapped value and `V` is its type.
+type WTopBottom<T> = WTop<WBottom<T>>;
+
+
+/// A hashmap lattice with union join operation.
+impl<K, T, H> Lattice for HashMap<K, T, H>
+where K: Clone + Eq + ::std::hash::Hash,
+      T: Lattice,
+      H: Clone + ::std::hash::BuildHasher + ::std::default::Default
+{
+    fn bottom() -> Self {
+        HashMap::default()
+    }
+    fn join(&mut self, other: Self) -> bool {
+        let mut changed = false;
+        for (key, val) in other.into_iter() {
+            match self.entry(key.clone()) {
+                Entry::Vacant(e) => {
+                    e.insert(val);
+                    changed = true
+                }
+                Entry::Occupied(mut e) => changed |= e.get_mut().join(val)
+            }
+        }
+        changed
+    }
+}
+
+impl Lattice for () {
+    fn bottom() -> Self { () }
+    fn join(&mut self, _: Self) -> bool {
+        false
+    }
+}
diff --git a/src/librustc_mir/transform/dataflow/mod.rs b/src/librustc_mir/transform/dataflow/mod.rs
new file mode 100644
index 0000000000000..22289b783fefd
--- /dev/null
+++ b/src/librustc_mir/transform/dataflow/mod.rs
@@ -0,0 +1,357 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+//! The interleaved, speculative MIR dataflow framework. This framework is heavily inspired by
+//! Hoopl[1][2], and while it has diverged from its inspiration quite a bit in implementation
+//! aproach, the general idea stays the same.
+//!
+//! [1]: https://github.com/haskell/hoopl
+//! [2]: http://research.microsoft.com/en-us/um/people/simonpj/papers/c--/hoopl-haskell10.pdf
+
+use rustc::mir::repr as mir;
+use rustc::mir::repr::{BasicBlock, BasicBlockData, Mir, START_BLOCK};
+use rustc_data_structures::bitvec::BitVector;
+use rustc_data_structures::indexed_vec::{IndexVec, Idx};
+pub use self::lattice::*;
+pub use self::combinators::*;
+
+mod lattice;
+mod combinators;
+
+pub enum StatementChange<'tcx> {
+    /// Remove the statement being inspected.
+    Remove,
+    /// Replace the statement with another (or the same) one.
+    Statement(mir::Statement<'tcx>)
+    // FIXME: Should grow a way to do replacements with arbitrary graphs.
+    // Fixing this needs figuring out how to represent such an arbitrary graph in a way that could
+    // be both analysed (by and_then combinator, for example) and applied to the MIR/constructed
+    // out from MIR. Such representation needs to allow at least:
+    // * Adding new blocks, edges between blocks in the replacement as well as edges to the other
+    //   blocks in the MIR;
+    // * Adding new locals and perhaps changing existing ones (e.g. the type)
+}
+pub enum TerminatorChange<'tcx> {
+    /// Replace the terminator with another (or the same) one.
+    Terminator(mir::Terminator<'tcx>)
+    // FIXME: Should grow a way to do replacements with arbitrary graphs.
+}
+
+pub trait Transfer<'tcx> {
+    /// The lattice used with this transfer function.
+    type Lattice: Lattice;
+
+    /// Return type of terminator transfer function.
+    ///
+    /// `Vec<Self::Lattice>` for forward analysis, `Self::Lattice` for backward analysis.
+    type TerminatorReturn;
+
+    /// The transfer function which given a statement and a fact produces another fact which is
+    /// true after the statement.
+    fn stmt(&self, stmt: &mir::Statement<'tcx>, fact: Self::Lattice) -> Self::Lattice;
+
+    /// The transfer function which given a terminator and a fact produces another fact for each
+    /// outgoing edge from the terminator.
+    fn term(&self, term: &mir::Terminator<'tcx>, fact: Self::Lattice) -> Self::TerminatorReturn;
+}
+
+pub trait Rewrite<'tcx, T: Transfer<'tcx>> {
+    /// Calculate changes to the statements.
+    fn stmt(&self, stmt: &mir::Statement<'tcx>, fact: &T::Lattice)
+    -> StatementChange<'tcx>;
+
+    /// Calculate changes to the terminators.
+    fn term(&self, term: &mir::Terminator<'tcx>, fact: &T::Lattice)
+    -> TerminatorChange<'tcx>;
+
+    /// Combine two rewrites using RewriteAndThen combinator.
+    ///
+    /// See documentation for the combinator for explanation of its behaviour.
+    fn and_then<R2>(self, other: R2)
+    -> RewriteAndThen<'tcx, T, Self, R2>
+    where Self: Sized, R2: Rewrite<'tcx, T>
+    {
+        RewriteAndThen::new(self, other)
+    }
+    // FIXME: should gain at least these combinators:
+    // * Fueled – only runs rewriter a set amount of times (needs saving the state of rewriter at
+    // certain points);
+    // * Deep – rewrite graph produced by rewriter with the same rewriter again;
+    // * maybe a wrapper to hold a tcx?
+}
+
+#[derive(Clone)]
+struct Knowledge<'tcx, F> {
+    fact: F,
+    new_block: Option<BasicBlockData<'tcx>>
+}
+
+pub struct Dataflow<'a, 'tcx: 'a, T, R>
+where T: Transfer<'tcx>, R: Rewrite<'tcx, T>
+{
+    mir: &'a Mir<'tcx>,
+    // FIXME: bitvector may not be the best choice, I feel like using a FIFO queue should yield
+    // better results at some cost of space use. This queue needs to be a set (no duplicate
+    // entries), though, so plain linked-list based queue is not suitable.
+    queue: BitVector,
+    knowledge: IndexVec<BasicBlock, Option<Knowledge<'tcx, T::Lattice>>>,
+    rewrite: R,
+    transfer: T
+}
+
+impl<'a, 'tcx, L, T, R> Dataflow<'a, 'tcx, T, R>
+where L: Lattice,
+      T: Transfer<'tcx, Lattice=L, TerminatorReturn=Vec<L>>,
+      R: Rewrite<'tcx, T>
+{
+    /// Execute dataflow in forward direction
+    pub fn forward(mir: &'a Mir<'tcx>, initial: L, transfer: T, rewrite: R) -> Mir<'tcx> {
+        let block_count = mir.basic_blocks().len();
+        let mut queue = BitVector::new(block_count);
+        queue.insert(START_BLOCK.index());
+        let mut dataflow = Dataflow {
+            mir: mir,
+            queue: queue,
+            knowledge: IndexVec::with_capacity(block_count),
+            rewrite: rewrite,
+            transfer: transfer,
+        };
+        dataflow.knowledge.extend(::std::iter::repeat(None).take(block_count));
+        dataflow.update_fact(START_BLOCK, initial);
+        dataflow.fixpoint(Self::forward_block);
+        dataflow.construct_mir()
+    }
+
+    fn forward_block(&mut self, bb: BasicBlock, mut fact: T::Lattice) {
+        let bb_data = &self.mir[bb];
+        let mut new_stmts = Vec::with_capacity(bb_data.statements.len());
+        for stmt in &bb_data.statements {
+            match self.rewrite.stmt(stmt, &fact) {
+                StatementChange::Remove => {}
+                StatementChange::Statement(stmt) => {
+                    fact = self.transfer.stmt(&stmt, fact);
+                    new_stmts.push(stmt);
+                }
+            }
+        }
+        let terminator = bb_data.terminator();
+        let (new_facts, new_term) = match self.rewrite.term(terminator, &fact) {
+            TerminatorChange::Terminator(term) => (self.transfer.term(&term, fact), term),
+        };
+        let successors = new_term.successors().into_owned();
+        assert!(successors.len() == new_facts.len(), "new_facts.len() must match successor count");
+        // Replace block first and update facts after. This order is important, because updating
+        // facts for a block invalidates the block replacement. If you consider a case like a block
+        // having a backedge into itself, then this particular ordering will correctly invalidate
+        // the replacement block and put this block back into the queue for repeated analysis,
+        // whereas the swapped ordering would not invalidate the replacement at all.
+        self.replace_block(bb, BasicBlockData {
+            statements: new_stmts,
+            terminator: Some(new_term),
+            is_cleanup: bb_data.is_cleanup,
+        });
+        for (fact, &target) in new_facts.into_iter().zip(successors.iter()) {
+            if self.update_fact(target, fact) {
+                self.queue.insert(target.index());
+            }
+        }
+    }
+}
+
+impl<'a, 'tcx, L, T, R> Dataflow<'a, 'tcx, T, R>
+where L: Lattice,
+      T: Transfer<'tcx, Lattice=L, TerminatorReturn=L>,
+      R: Rewrite<'tcx, T>
+{
+    /// Execute dataflow in backward direction.
+    pub fn backward(mir: &'a Mir<'tcx>, initial: L, transfer: T, rewrite: R) -> Mir<'tcx> {
+        let block_count = mir.basic_blocks().len();
+        let mut queue = BitVector::new(block_count);
+        mir_exits(mir, &mut queue);
+        let mut knowledge = IndexVec::with_capacity(block_count);
+        knowledge.extend(::std::iter::repeat(None).take(block_count));
+        for block in queue.iter() {
+            knowledge[BasicBlock::new(block)] = Some(Knowledge {
+                fact: initial.clone(),
+                new_block: None,
+            });
+        }
+        let mut dataflow = Dataflow {
+            mir: mir,
+            queue: queue,
+            knowledge: knowledge,
+            rewrite: rewrite,
+            transfer: transfer,
+        };
+        dataflow.fixpoint(Self::backward_block);
+        dataflow.construct_mir()
+    }
+
+    fn backward_block(&mut self, bb: BasicBlock, fact: T::Lattice) {
+        let bb_data = &self.mir[bb];
+        let terminator = bb_data.terminator();
+        let (mut fact, new_term) = match self.rewrite.term(terminator, &fact) {
+            TerminatorChange::Terminator(term) => (self.transfer.term(&term, fact), term),
+        };
+        let mut new_stmts = Vec::with_capacity(bb_data.statements.len());
+        for stmt in bb_data.statements.iter().rev() {
+            match self.rewrite.stmt(stmt, &fact) {
+                StatementChange::Remove => {}
+                StatementChange::Statement(stmt) => {
+                    fact = self.transfer.stmt(&stmt, fact);
+                    new_stmts.push(stmt);
+                }
+            }
+        }
+        new_stmts.reverse();
+        self.replace_block(bb, BasicBlockData {
+            statements: new_stmts,
+            terminator: Some(new_term),
+            is_cleanup: bb_data.is_cleanup,
+        });
+        for &target in self.mir.predecessors_for(bb).iter() {
+            if self.update_fact(target, fact.clone()) {
+                self.queue.insert(target.index());
+            }
+        }
+    }
+}
+
+impl<'a, 'tcx, T, R> Dataflow<'a, 'tcx, T, R>
+where T: Transfer<'tcx>,
+      R: Rewrite<'tcx, T>
+{
+    fn fixpoint<BF>(&mut self, f: BF)
+    where BF: Fn(&mut Self, BasicBlock, T::Lattice)
+    {
+        // In the fixpoint loop we never modify the incoming MIR and build up a new MIR in
+        // order to avoid problems with speculative analysis. As to why speculative analysis is
+        // problematic, consider a constant propagation pass for a loop.
+        //
+        // → --- { idx = 1 } ---
+        // |    idx = idx + 1    # replaced with `idx = 2`
+        // |   if(...) break;    # consider else branch taken
+        // ↑ --- { idx = 2 } --- # backedge to the top
+        //
+        // Here once we analyse the backedge the fact {idx = 1} is joined with fact {idx = 2}
+        // producing a Top ({idx = ⊤}) and rendering our replacement of `idx = idx + 1` with `idx =
+        // 2` invalid.
+        //
+        // If the input graph is immutable, we can always pass callback the original block with the
+        // most recent facts for it, thus avoiding the problem altogether.
+        while let Some(block) = self.queue.pop() {
+            let block = BasicBlock::new(block);
+            let fact: T::Lattice = self.recall_fact(block);
+            f(self, block, fact);
+        }
+    }
+
+    fn recall_fact(&self, bb: BasicBlock) -> T::Lattice {
+        self.knowledge[bb].as_ref().map(|k| k.fact.clone()).unwrap_or_else(|| T::Lattice::bottom())
+    }
+
+    fn update_fact(&mut self, bb: BasicBlock, new_fact: T::Lattice) -> bool {
+        match self.knowledge[bb] {
+            ref mut val@None => {
+                // In case of no prior knowledge about this block, it means we are introducing new
+                // knowledge, and therefore, must return true.
+                *val = Some(Knowledge { fact: new_fact, new_block: None });
+                true
+            },
+            Some(Knowledge { ref mut fact, ref mut new_block }) => {
+                let join = T::Lattice::join(fact, new_fact);
+                // In case of some prior knowledge and provided our knowledge changed, we should
+                // invalidate any replacement block that could already exist.
+                if join {
+                    *new_block = None;
+                }
+                join
+            }
+        }
+    }
+
+    fn replace_block(&mut self, bb: BasicBlock, block_data: BasicBlockData<'tcx>) {
+        match self.knowledge[bb] {
+            ref mut v@None => *v = Some(Knowledge {
+                fact: T::Lattice::bottom(),
+                new_block: Some(block_data)
+            }),
+            Some(Knowledge { ref mut new_block, .. }) => *new_block = Some(block_data),
+        }
+    }
+
+    /// Build the new MIR by combining the replacement blocks and original MIR into a clone.
+    fn construct_mir(mut self) -> Mir<'tcx> {
+        let mut new_blocks = IndexVec::with_capacity(self.mir.basic_blocks().len());
+        for (block, old_data) in self.mir.basic_blocks().iter_enumerated() {
+            let new_block = ::std::mem::replace(&mut self.knowledge[block], None)
+                            .and_then(|k| k.new_block);
+            new_blocks.push(if let Some(new_data) = new_block {
+                new_data
+            } else {
+                old_data.clone()
+            });
+        }
+        Mir::new(
+            new_blocks,
+            self.mir.visibility_scopes.clone(),
+            self.mir.promoted.clone(),
+            self.mir.return_ty,
+            self.mir.var_decls.clone(),
+            self.mir.arg_decls.clone(),
+            self.mir.temp_decls.clone(),
+            self.mir.upvar_decls.clone(),
+            self.mir.span
+        )
+    }
+}
+
+fn mir_exits<'tcx>(mir: &Mir<'tcx>, exits: &mut BitVector) {
+    // Do this smartly (cough… using bruteforce… cough). First of all, find all the nodes without
+    // successors. These are guaranteed exit nodes.
+    let mir_len = mir.basic_blocks().len();
+    let mut lead_to_exit = BitVector::new(mir_len);
+    for (block, block_data) in mir.basic_blocks().iter_enumerated() {
+        if block_data.terminator().successors().len() == 0 {
+            exits.insert(block.index());
+            lead_to_exit.insert(block.index());
+            // Then, all blocks which have a path to these nodes, are not exit nodes by definition.
+            mark_all_paths_to_with(block, &mut lead_to_exit, mir, BitVector::insert);
+        }
+    }
+    // All unmarked blocks, provided there’s no unreachable blocks in the graph, must be blocks
+    // belonging to a `loop {}` of some sort and thus each of such structure should have one of
+    // their block as an exit node. Optimal exit node for such a structure is the one containing a
+    // backedge (i.e. the `}` of `loop {}`). Finding such backedge might appear to be easy, but
+    // corner cases like
+    //
+    // ```
+    // 'a: loop { loop { if x { continue 'a } } }
+    // ```
+    //
+    // make it considerably more complex. In the end, it doesn’t matter very much which node we
+    // pick here. Picking any node inside the loop will make dataflow visit all the nodes, only
+    // potentially doing an extra pass or two on a few blocks.
+    lead_to_exit.invert();
+    while let Some(exit) = lead_to_exit.pop() {
+        if exit >= mir_len { continue }
+        exits.insert(exit);
+        mark_all_paths_to_with(BasicBlock::new(exit), &mut lead_to_exit, mir, BitVector::remove);
+    }
+}
+
+fn mark_all_paths_to_with<'tcx, F>(block: BasicBlock, mask: &mut BitVector, mir: &Mir<'tcx>, f: F)
+where F: Copy + Fn(&mut BitVector, usize) -> bool
+{
+    for &predecessor in mir.predecessors_for(block).iter() {
+        if f(mask, predecessor.index()) {
+            mark_all_paths_to_with(predecessor, mask, mir, f);
+        }
+    }
+}
diff --git a/src/librustc_mir/transform/deadcode.rs b/src/librustc_mir/transform/deadcode.rs
new file mode 100644
index 0000000000000..2d6a455b81f4b
--- /dev/null
+++ b/src/librustc_mir/transform/deadcode.rs
@@ -0,0 +1,171 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+use rustc_data_structures::bitvec::BitVector;
+use rustc_data_structures::indexed_vec::Idx;
+use rustc::mir::repr::*;
+use rustc::mir::transform::{Pass, MirPass, MirSource};
+use rustc::mir::visit::{Visitor, LvalueContext};
+use rustc::ty::TyCtxt;
+
+use super::dataflow::*;
+
+pub struct DeadCode;
+
+impl Pass for DeadCode {
+    fn should_run(&self, sess: &::rustc::session::Session) -> bool {
+        if sess.opts.debuginfo.is_enabled() {
+            sess.opts.mir_opt_level >= 2
+        } else {
+            sess.opts.mir_opt_level >= 1
+        }
+    }
+}
+
+impl<'tcx> MirPass<'tcx> for DeadCode {
+    fn run_pass<'a>(&mut self, _: TyCtxt<'a, 'tcx, 'tcx>, _: MirSource, mir: &mut Mir<'tcx>) {
+        let new_mir = Dataflow::backward(mir, DeadCodeLattice::bottom(),
+                                         DeadCodeTransfer, DeadCodeRewrite);
+        *mir = new_mir;
+    }
+}
+
+#[derive(Debug, Clone)]
+struct DeadCodeLattice {
+    vars: BitVector,
+    args: BitVector,
+    tmps: BitVector,
+}
+
+impl Lattice for DeadCodeLattice {
+    fn bottom() -> Self {
+        DeadCodeLattice {
+            vars: BitVector::new(0),
+            tmps: BitVector::new(0),
+            args: BitVector::new(0)
+        }
+    }
+    fn join(&mut self, other: Self) -> bool {
+        self.vars.grow(other.vars.len());
+        self.tmps.grow(other.tmps.len());
+        self.args.grow(other.args.len());
+        let (r1, r2, r3) = (self.vars.insert_all(&other.vars)
+                           ,self.tmps.insert_all(&other.tmps)
+                           ,self.args.insert_all(&other.args));
+        r1 || r2 || r3
+    }
+}
+
+impl DeadCodeLattice {
+    fn set_lvalue_live<'a>(&mut self, l: &Lvalue<'a>) {
+        match *l {
+            Lvalue::Arg(a) => {
+                self.args.grow(a.index() + 1);
+                self.args.insert(a.index());
+            }
+            Lvalue::Temp(t) => {
+                self.tmps.grow(t.index() + 1);
+                self.tmps.insert(t.index());
+            }
+            Lvalue::Var(v) => {
+                self.vars.grow(v.index() + 1);
+                self.vars.insert(v.index());
+            }
+            _ => {}
+        }
+    }
+
+    fn set_lvalue_dead<'a>(&mut self, l: &Lvalue<'a>) {
+        match *l.base() {
+            Lvalue::Arg(a) => self.args.remove(a.index()),
+            Lvalue::Temp(t) => self.tmps.remove(t.index()),
+            Lvalue::Var(v) => self.vars.remove(v.index()),
+            _ => false
+        };
+    }
+
+    fn is_live<'a>(&self, lval: &Lvalue<'a>) -> bool {
+        match *lval {
+            Lvalue::Temp(t) => self.tmps.contains(t.index()),
+            Lvalue::Var(v) => self.vars.contains(v.index()),
+            Lvalue::Arg(a) => self.args.contains(a.index()),
+            // All other stuff is *always* live.
+            _ => true
+        }
+    }
+}
+
+struct DeadCodeTransfer;
+impl<'tcx> Transfer<'tcx> for DeadCodeTransfer {
+    type Lattice = DeadCodeLattice;
+    type TerminatorReturn = DeadCodeLattice;
+
+    fn stmt(&self, s: &Statement<'tcx>, lat: DeadCodeLattice) -> DeadCodeLattice {
+        let mut vis = DeadCodeVisitor(lat);
+        vis.visit_statement(START_BLOCK, s);
+        vis.0
+    }
+
+    fn term(&self, t: &Terminator<'tcx>, lat: DeadCodeLattice) -> DeadCodeLattice {
+        let mut vis = DeadCodeVisitor(lat);
+        vis.visit_terminator(START_BLOCK, t);
+        vis.0
+    }
+}
+
+struct DeadCodeRewrite;
+impl<'tcx, T> Rewrite<'tcx, T> for DeadCodeRewrite
+where T: Transfer<'tcx, Lattice=DeadCodeLattice>
+{
+    fn stmt(&self, s: &Statement<'tcx>, lat: &DeadCodeLattice)
+    -> StatementChange<'tcx>
+    {
+        let keep = match s.kind {
+            StatementKind::Assign(ref lval, ref rval) => !rval.is_pure() || lat.is_live(lval),
+            StatementKind::SetDiscriminant { ref lvalue, .. } => lat.is_live(lvalue),
+            StatementKind::StorageLive(_) => true,
+            StatementKind::StorageDead(_) => true,
+        };
+        if keep {
+            StatementChange::Statement(s.clone())
+        } else {
+            StatementChange::Remove
+        }
+    }
+
+    fn term(&self, t: &Terminator<'tcx>, _: &DeadCodeLattice)
+    -> TerminatorChange<'tcx>
+    {
+        TerminatorChange::Terminator(t.clone())
+    }
+}
+
+struct DeadCodeVisitor(DeadCodeLattice);
+impl<'tcx> Visitor<'tcx> for DeadCodeVisitor {
+    fn visit_lvalue(&mut self, lval: &Lvalue<'tcx>, ctx: LvalueContext) {
+        if ctx == LvalueContext::Store || ctx == LvalueContext::CallStore {
+            match *lval {
+                // This is a assign to the variable in a way that all uses dominated by this store
+                // do not count as live.
+                ref l@Lvalue::Temp(_) |
+                ref l@Lvalue::Var(_) |
+                ref l@Lvalue::Arg(_) => self.0.set_lvalue_dead(l),
+                _ => {}
+            }
+        } else if ctx == LvalueContext::StorageLive || ctx == LvalueContext::StorageDead {
+            // Do not consider StorageDead as use of value, because that way we gonna never apply
+            // any optimisation in this pass. StorageLive behaves in a similar way as an assignment
+            // by having the value dead above the statement.
+            self.0.set_lvalue_dead(lval);
+        } else {
+            self.0.set_lvalue_live(lval);
+        }
+        self.super_lvalue(lval, ctx);
+    }
+}
diff --git a/src/librustc_mir/transform/dump_mir.rs b/src/librustc_mir/transform/dump_mir.rs
index 642adeee5cd63..694b017bbd706 100644
--- a/src/librustc_mir/transform/dump_mir.rs
+++ b/src/librustc_mir/transform/dump_mir.rs
@@ -26,7 +26,7 @@ impl<'b, 'tcx> MirPass<'tcx> for Marker<'b> {
 }
 
 impl<'b> Pass for Marker<'b> {
-    fn name(&self) -> &str { self.0 }
+    fn name(&self) -> ::std::borrow::Cow<'static, str> { String::from(self.0).into() }
 }
 
 pub struct Disambiguator<'a> {
@@ -58,7 +58,7 @@ impl<'tcx> MirPassHook<'tcx> for DumpMir {
     {
         pretty::dump_mir(
             tcx,
-            pass.name(),
+            &*pass.name(),
             &Disambiguator {
                 pass: pass,
                 is_after: is_after
diff --git a/src/librustc_mir/transform/mod.rs b/src/librustc_mir/transform/mod.rs
index c3485b8256da1..2efa287f8f41c 100644
--- a/src/librustc_mir/transform/mod.rs
+++ b/src/librustc_mir/transform/mod.rs
@@ -8,8 +8,10 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
+pub mod dataflow;
+
 pub mod simplify_branches;
-pub mod simplify_cfg;
+pub mod simplify;
 pub mod erase_regions;
 pub mod no_landing_pads;
 pub mod type_check;
@@ -18,3 +20,5 @@ pub mod promote_consts;
 pub mod qualify_consts;
 pub mod dump_mir;
 pub mod deaggregator;
+pub mod deadcode;
+pub mod const_propagate;
diff --git a/src/librustc_mir/transform/promote_consts.rs b/src/librustc_mir/transform/promote_consts.rs
index 21b406c3bf5c9..d3542eeb35ade 100644
--- a/src/librustc_mir/transform/promote_consts.rs
+++ b/src/librustc_mir/transform/promote_consts.rs
@@ -99,7 +99,7 @@ impl<'tcx> Visitor<'tcx> for TempCollector {
             if *temp == TempState::Undefined {
                 match context {
                     LvalueContext::Store |
-                    LvalueContext::Call => {
+                    LvalueContext::CallStore => {
                         *temp = TempState::Defined {
                             location: self.location,
                             uses: 0
diff --git a/src/librustc_mir/transform/simplify_cfg.rs b/src/librustc_mir/transform/simplify.rs
similarity index 61%
rename from src/librustc_mir/transform/simplify_cfg.rs
rename to src/librustc_mir/transform/simplify.rs
index c0e7e54050adf..de89f05715960 100644
--- a/src/librustc_mir/transform/simplify_cfg.rs
+++ b/src/librustc_mir/transform/simplify.rs
@@ -8,17 +8,23 @@
 // option. This file may not be copied, modified, or distributed
 // except according to those terms.
 
-//! A pass that removes various redundancies in the CFG. It should be
-//! called after every significant CFG modification to tidy things
-//! up.
+//! A number of passes which remove various redundancies in the CFG.
 //!
-//! This pass must also be run before any analysis passes because it removes
-//! dead blocks, and some of these can be ill-typed.
+//! The `SimplifyCfg` pass gets rid of unnecessary blocks in the CFG, whereas the `SimplifyLocals`
+//! gets rid of all the unnecessary variables.
 //!
-//! The cause of that is that typeck lets most blocks whose end is not
-//! reachable have an arbitrary return type, rather than having the
-//! usual () return type (as a note, typeck's notion of reachability
-//! is in fact slightly weaker than MIR CFG reachability - see #31617).
+//! The `SimplifyLocals` pass is kinda expensive and therefore not very suitable to be run often.
+//! Most of the passes should not care or be impacted in meaningful ways due to extra locals
+//! either, so running the pass once, right before translation, should suffice.
+//!
+//! On the other side of the spectrum, the `SimplifyCfg` pass is considerably cheap to run, thus
+//! one should run it after every pass which may modify CFG in significant ways. This pass must
+//! also be run before any analysis passes because it removes dead blocks, and some of these can be
+//! ill-typed.
+//!
+//! The cause of that is that typeck lets most blocks whose end is not reachable have an arbitrary
+//! return type, rather than having the usual () return type (as a note, typeck's notion of
+//! reachability is in fact slightly weaker than MIR CFG reachability - see #31617).
 //!
 //! A standard example of the situation is:
 //! ```rust
@@ -27,16 +33,16 @@
 //!   }
 //! ```
 //!
-//! Here the block (`{ return; }`) has the return type `char`,
-//! rather than `()`, but the MIR we naively generate still contains
-//! the `_a = ()` write in the unreachable block "after" the return.
-
+//! Here the block (`{ return; }`) has the return type `char`, rather than `()`, but the MIR we
+//! naively generate still contains the `_a = ()` write in the unreachable block "after" the
+//! return.
 
 use rustc_data_structures::bitvec::BitVector;
 use rustc_data_structures::indexed_vec::{Idx, IndexVec};
 use rustc::ty::TyCtxt;
 use rustc::mir::repr::*;
 use rustc::mir::transform::{MirPass, MirSource, Pass};
+use rustc::mir::visit::{MutVisitor, Visitor, LvalueContext};
 use rustc::mir::traversal;
 use std::fmt;
 
@@ -64,7 +70,7 @@ impl<'l> Pass for SimplifyCfg<'l> {
     }
 
     // avoid calling `type_name` - it contains `<'static>`
-    fn name(&self) -> &str { "SimplifyCfg" }
+    fn name(&self) -> ::std::borrow::Cow<'static, str> { "SimplifyCfg".into() }
 }
 
 pub struct CfgSimplifier<'a, 'tcx: 'a> {
@@ -247,3 +253,105 @@ fn remove_dead_blocks(mir: &mut Mir) {
         }
     }
 }
+
+
+pub struct SimplifyLocals;
+
+impl Pass for SimplifyLocals {
+    fn name(&self) -> ::std::borrow::Cow<'static, str> { "SimplifyLocals".into() }
+    fn should_run(&self, sess: &::rustc::session::Session) -> bool {
+        if sess.opts.debuginfo.is_enabled() {
+            sess.opts.mir_opt_level >= 2
+        } else {
+            sess.opts.mir_opt_level >= 1
+        }
+    }
+}
+
+impl<'tcx> MirPass<'tcx> for SimplifyLocals {
+    fn run_pass<'a>(&mut self, _: TyCtxt<'a, 'tcx, 'tcx>, _: MirSource, mir: &mut Mir<'tcx>) {
+        let (live_vars, live_temps) = count_live_locals(mir);
+        let var_map = make_local_map(&mut mir.var_decls, live_vars);
+        let tmp_map = make_local_map(&mut mir.temp_decls, live_temps);
+        // Update references to all vars and tmps now
+        LocalUpdater { var_map: var_map, tmp_map: tmp_map }.visit_mir(mir);
+        mir.var_decls.shrink_to_fit();
+        mir.temp_decls.shrink_to_fit();
+    }
+}
+
+/// Construct the mapping while swapping out unused stuff out from the `vec`.
+fn make_local_map<'tcx, I: Idx, V>(vec: &mut IndexVec<I, V>, mask: BitVector) -> Vec<usize> {
+    let mut map: Vec<usize> = ::std::iter::repeat(!0).take(vec.len()).collect();
+    let mut used = 0;
+    for alive_index in mask.iter() {
+        map[alive_index] = used;
+        if alive_index != used {
+            vec.swap(alive_index, used);
+        }
+        used += 1;
+    }
+    vec.truncate(used);
+    map
+}
+
+fn count_live_locals<'tcx>(mir: &Mir<'tcx>) -> (BitVector, BitVector) {
+    let mut marker = DeclMarker {
+        live_vars: BitVector::new(mir.var_decls.len()),
+        live_temps: BitVector::new(mir.temp_decls.len()),
+    };
+    marker.visit_mir(mir);
+    let DeclMarker { live_vars, live_temps } = marker;
+    (live_vars, live_temps)
+}
+
+struct DeclMarker {
+    pub live_vars: BitVector,
+    pub live_temps: BitVector
+}
+
+impl<'tcx> Visitor<'tcx> for DeclMarker {
+    fn visit_lvalue(&mut self, lval: &Lvalue<'tcx>, ctx: LvalueContext) {
+        if ctx == LvalueContext::StorageLive || ctx == LvalueContext::StorageDead {
+            return; // ignore these altogether, they get removed along with their decls.
+        }
+        match *lval {
+            Lvalue::Var(ref v) => self.live_vars.insert(v.index()),
+            Lvalue::Temp(ref t) => self.live_temps.insert(t.index()),
+            _ => false,
+        };
+        self.super_lvalue(lval, ctx);
+    }
+}
+
+struct LocalUpdater {
+    var_map: Vec<usize>,
+    tmp_map: Vec<usize>
+}
+
+impl<'tcx> MutVisitor<'tcx> for LocalUpdater {
+    fn visit_basic_block_data(&mut self, block: BasicBlock, data: &mut BasicBlockData<'tcx>) {
+        // Remove unnecessary StorageLive and StorageDead annotations.
+        data.statements.retain(|stmt| {
+            match stmt.kind {
+                StatementKind::StorageLive(ref lval) | StatementKind::StorageDead(ref lval) => {
+                    match *lval {
+                        Lvalue::Var(v) => self.var_map[v.index()] != !0,
+                        Lvalue::Temp(t) => self.tmp_map[t.index()] != !0,
+                        _ => true
+                    }
+                }
+                _ => true
+            }
+        });
+        self.super_basic_block_data(block, data);
+    }
+    fn visit_lvalue(&mut self, lval: &mut Lvalue<'tcx>, ctx: LvalueContext) {
+        match *lval {
+            Lvalue::Var(ref mut v) => *v = Var::new(self.var_map[v.index()]),
+            Lvalue::Temp(ref mut t) => *t = Temp::new(self.tmp_map[t.index()]),
+            _ => (),
+        };
+        self.super_lvalue(lval, ctx);
+    }
+}
diff --git a/src/librustc_mir/transform/simplify_branches.rs b/src/librustc_mir/transform/simplify_branches.rs
index b4960c677a16e..407e21616102c 100644
--- a/src/librustc_mir/transform/simplify_branches.rs
+++ b/src/librustc_mir/transform/simplify_branches.rs
@@ -62,5 +62,5 @@ impl<'l> Pass for SimplifyBranches<'l> {
     }
 
     // avoid calling `type_name` - it contains `<'static>`
-    fn name(&self) -> &str { "SimplifyBranches" }
+    fn name(&self) -> ::std::borrow::Cow<'static, str> { "SimplifyBranches".into() }
 }
diff --git a/src/librustc_mir/transform/type_check.rs b/src/librustc_mir/transform/type_check.rs
index bbd2a93659b0a..48a632a14afbf 100644
--- a/src/librustc_mir/transform/type_check.rs
+++ b/src/librustc_mir/transform/type_check.rs
@@ -423,7 +423,7 @@ impl<'a, 'gcx, 'tcx> TypeChecker<'a, 'gcx, 'tcx> {
                 }
             }
             TerminatorKind::SwitchInt { ref discr, switch_ty, .. } => {
-                let discr_ty = discr.ty(mir, tcx).to_ty(tcx);
+                let discr_ty = discr.ty(mir, tcx);
                 if let Err(terr) = self.sub_types(self.last_span, discr_ty, switch_ty) {
                     span_mirbug!(self, term, "bad SwitchInt ({:?} on {:?}): {:?}",
                                  switch_ty, discr_ty, terr);
diff --git a/src/librustc_trans/mir/analyze.rs b/src/librustc_trans/mir/analyze.rs
index e0d959f4774a6..13ff2a593b623 100644
--- a/src/librustc_trans/mir/analyze.rs
+++ b/src/librustc_trans/mir/analyze.rs
@@ -159,14 +159,14 @@ impl<'mir, 'bcx, 'tcx> Visitor<'tcx> for LocalAnalyzer<'mir, 'bcx, 'tcx> {
 
         if let Some(index) = self.mir.local_index(lvalue) {
             match context {
-                LvalueContext::Call => {
+                LvalueContext::CallStore => {
                     self.mark_assigned(index);
                 }
 
                 LvalueContext::StorageLive |
                 LvalueContext::StorageDead |
                 LvalueContext::Consume => {}
-
+                LvalueContext::AsmOutput |
                 LvalueContext::Store |
                 LvalueContext::Inspect |
                 LvalueContext::Borrow { .. } |
diff --git a/src/librustc_trans/mir/block.rs b/src/librustc_trans/mir/block.rs
index 56d02fa1fac4f..01681396e2185 100644
--- a/src/librustc_trans/mir/block.rs
+++ b/src/librustc_trans/mir/block.rs
@@ -175,8 +175,7 @@ impl<'bcx, 'tcx> MirContext<'bcx, 'tcx> {
 
             mir::TerminatorKind::SwitchInt { ref discr, switch_ty, ref values, ref targets } => {
                 let (otherwise, targets) = targets.split_last().unwrap();
-                let discr = bcx.load(self.trans_lvalue(&bcx, discr).llval);
-                let discr = bcx.with_block(|bcx| base::to_immediate(bcx, discr, switch_ty));
+                let discr = self.trans_operand(&bcx, discr).immediate();
                 let switch = bcx.switch(discr, llblock(self, *otherwise), values.len());
                 for (value, target) in values.iter().zip(targets) {
                     let val = Const::from_constval(bcx.ccx(), value.clone(), switch_ty);
diff --git a/src/librustc_trans/mir/mod.rs b/src/librustc_trans/mir/mod.rs
index 727b680541dd7..488fe152d6a05 100644
--- a/src/librustc_trans/mir/mod.rs
+++ b/src/librustc_trans/mir/mod.rs
@@ -26,7 +26,6 @@ use syntax::parse::token::keywords;
 
 use std::ops::Deref;
 use std::rc::Rc;
-use std::iter;
 
 use basic_block::BasicBlock;
 
@@ -155,22 +154,21 @@ pub fn trans_mir<'blk, 'tcx: 'blk>(fcx: &'blk FunctionContext<'blk, 'tcx>) {
          analyze::cleanup_kinds(bcx, &mir))
     });
 
+
     // Compute debuginfo scopes from MIR scopes.
     let scopes = debuginfo::create_mir_scopes(fcx);
 
     // Allocate variable and temp allocas
     let locals = {
         let args = arg_local_refs(&bcx, &mir, &scopes, &lvalue_locals);
-        let vars = mir.var_decls.iter().enumerate().map(|(i, decl)| {
+        let vars = mir.var_decls.iter_enumerated().map(|(var_idx, decl)| {
             let ty = bcx.monomorphize(&decl.ty);
             let scope = scopes[decl.source_info.scope];
             let dbg = !scope.is_null() && bcx.sess().opts.debuginfo == FullDebugInfo;
-
-            let local = mir.local_index(&mir::Lvalue::Var(mir::Var::new(i))).unwrap();
+            let local = mir.local_index(&mir::Lvalue::Var(var_idx)).unwrap();
             if !lvalue_locals.contains(local.index()) && !dbg {
                 return LocalRef::new_operand(bcx.ccx(), ty);
             }
-
             let lvalue = LvalueRef::alloca(&bcx, ty, &decl.name.as_str());
             if dbg {
                 bcx.with_block(|bcx| {
@@ -181,25 +179,30 @@ pub fn trans_mir<'blk, 'tcx: 'blk>(fcx: &'blk FunctionContext<'blk, 'tcx>) {
             }
             LocalRef::Lvalue(lvalue)
         });
-
-        let locals = mir.temp_decls.iter().enumerate().map(|(i, decl)| {
-            (mir::Lvalue::Temp(mir::Temp::new(i)), decl.ty)
-        }).chain(iter::once((mir::Lvalue::ReturnPointer, mir.return_ty)));
-
-        args.into_iter().chain(vars).chain(locals.map(|(lvalue, ty)| {
-            let ty = bcx.monomorphize(&ty);
-            let local = mir.local_index(&lvalue).unwrap();
-            if lvalue == mir::Lvalue::ReturnPointer && fcx.fn_ty.ret.is_indirect() {
-                let llretptr = llvm::get_param(fcx.llfn, 0);
-                LocalRef::Lvalue(LvalueRef::new_sized(llretptr, LvalueTy::from_ty(ty)))
-            } else if lvalue_locals.contains(local.index()) {
-                LocalRef::Lvalue(LvalueRef::alloca(&bcx, ty, &format!("{:?}", lvalue)))
+        let temps = mir.temp_decls.iter_enumerated().map(|(temp_idx, decl)| {
+            let ty = bcx.monomorphize(&decl.ty);
+            let local = mir.local_index(&mir::Lvalue::Temp(temp_idx)).unwrap();
+            if lvalue_locals.contains(local.index()) {
+                LocalRef::Lvalue(LvalueRef::alloca(&bcx, ty, &format!("{:?}", temp_idx)))
             } else {
                 // If this is an immediate local, we do not create an
                 // alloca in advance. Instead we wait until we see the
                 // definition and update the operand there.
                 LocalRef::new_operand(bcx.ccx(), ty)
             }
+        });
+
+        args.into_iter().chain(vars).chain(temps).chain(Some({
+            let ty = bcx.monomorphize(&mir.return_ty);
+            let lvalue = mir::Lvalue::ReturnPointer;
+            if fcx.fn_ty.ret.is_indirect() {
+                let llretptr = llvm::get_param(fcx.llfn, 0);
+                LocalRef::Lvalue(LvalueRef::new_sized(llretptr, LvalueTy::from_ty(ty)))
+            } else if lvalue_locals.contains(mir.local_index(&lvalue).unwrap().index()) {
+                LocalRef::Lvalue(LvalueRef::alloca(&bcx, ty, "ret".into()))
+            } else {
+                LocalRef::new_operand(bcx.ccx(), ty)
+            }
         })).collect()
     };
 
diff --git a/src/test/codegen/lifetime_start_end.rs b/src/test/codegen/lifetime_start_end.rs
index cf91e7a8bcb66..9930b0b4f1fc5 100644
--- a/src/test/codegen/lifetime_start_end.rs
+++ b/src/test/codegen/lifetime_start_end.rs
@@ -18,14 +18,14 @@
 #[rustc_mir] // FIXME #27840 MIR has different codegen.
 pub fn test() {
     let a = 0;
-    &a; // keep variable in an alloca
+    drop(&a); // keep variable in an alloca
 
 // CHECK: [[S_a:%[0-9]+]] = bitcast i32* %a to i8*
 // CHECK: call void @llvm.lifetime.start(i{{[0-9 ]+}}, i8* [[S_a]])
 
     {
         let b = &Some(a);
-        &b; // keep variable in an alloca
+        drop(&b); // keep variable in an alloca
 
 // CHECK: [[S_b:%[0-9]+]] = bitcast %"2.std::option::Option<i32>"** %b to i8*
 // CHECK: call void @llvm.lifetime.start(i{{[0-9 ]+}}, i8* [[S_b]])
@@ -41,7 +41,7 @@ pub fn test() {
     }
 
     let c = 1;
-    &c; // keep variable in an alloca
+    drop(&c); // keep variable in an alloca
 
 // CHECK: [[S_c:%[0-9]+]] = bitcast i32* %c to i8*
 // CHECK: call void @llvm.lifetime.start(i{{[0-9 ]+}}, i8* [[S_c]])
diff --git a/src/test/compile-fail/array_const_index-0.rs b/src/test/compile-fail/array_const_index-0.rs
index 501c66e75cded..1efce56aef7f0 100644
--- a/src/test/compile-fail/array_const_index-0.rs
+++ b/src/test/compile-fail/array_const_index-0.rs
@@ -14,5 +14,5 @@ const B: i32 = (&A)[1];
 //~| index out of bounds: the len is 0 but the index is 1
 
 fn main() {
-    let _ = B;
+    drop(B);
 }
diff --git a/src/test/compile-fail/array_const_index-1.rs b/src/test/compile-fail/array_const_index-1.rs
index d3b43e83bfe52..3babc54b3101b 100644
--- a/src/test/compile-fail/array_const_index-1.rs
+++ b/src/test/compile-fail/array_const_index-1.rs
@@ -14,5 +14,5 @@ const B: i32 = A[1];
 //~| index out of bounds: the len is 0 but the index is 1
 
 fn main() {
-    let _ = B;
+    drop(B);
 }
diff --git a/src/test/compile-fail/const-array-oob.rs b/src/test/compile-fail/const-array-oob.rs
index b980bc02c85a2..f7848e16e696a 100644
--- a/src/test/compile-fail/const-array-oob.rs
+++ b/src/test/compile-fail/const-array-oob.rs
@@ -20,5 +20,5 @@ const BLUB: [u32; FOO[4]] = [5, 6];
 //~| index out of bounds: the len is 3 but the index is 4
 
 fn main() {
-    let _ = BAR;
+    drop(BAR);
 }
diff --git a/src/test/compile-fail/const-slice-oob.rs b/src/test/compile-fail/const-slice-oob.rs
index b1b4bfe2d1c39..5017840461617 100644
--- a/src/test/compile-fail/const-slice-oob.rs
+++ b/src/test/compile-fail/const-slice-oob.rs
@@ -14,5 +14,5 @@ const BAR: u32 = FOO[5];
 //~| index out of bounds: the len is 3 but the index is 5
 
 fn main() {
-    let _ = BAR;
+    drop(BAR);
 }
diff --git a/src/test/compile-fail/huge-array.rs b/src/test/compile-fail/huge-array.rs
index 029e9651cb3cd..25452b2bc07ed 100644
--- a/src/test/compile-fail/huge-array.rs
+++ b/src/test/compile-fail/huge-array.rs
@@ -12,6 +12,7 @@
 
 fn generic<T: Copy>(t: T) {
     let s: [T; 1518600000] = [t; 1518600000];
+    drop(s);
 }
 
 fn main() {
diff --git a/src/test/compile-fail/huge-enum.rs b/src/test/compile-fail/huge-enum.rs
index 6e7c05370b99d..a82dafd96b844 100644
--- a/src/test/compile-fail/huge-enum.rs
+++ b/src/test/compile-fail/huge-enum.rs
@@ -15,9 +15,11 @@
 #[cfg(target_pointer_width = "32")]
 fn main() {
     let big: Option<[u32; (1<<29)-1]> = None;
+    drop(big);
 }
 
 #[cfg(target_pointer_width = "64")]
 fn main() {
     let big: Option<[u32; (1<<45)-1]> = None;
+    drop(big);
 }
diff --git a/src/test/compile-fail/huge-struct.rs b/src/test/compile-fail/huge-struct.rs
index a10c61d6606d0..1feccecd662f0 100644
--- a/src/test/compile-fail/huge-struct.rs
+++ b/src/test/compile-fail/huge-struct.rs
@@ -51,4 +51,5 @@ struct S1M<T> { val: S1k<S1k<T>> }
 
 fn main() {
     let fat: Option<S1M<S1M<S1M<u32>>>> = None;
+    drop(fat);
 }
diff --git a/src/test/compile-fail/issue-15919.rs b/src/test/compile-fail/issue-15919.rs
index df7e7c102b213..e63ef3a1696cd 100644
--- a/src/test/compile-fail/issue-15919.rs
+++ b/src/test/compile-fail/issue-15919.rs
@@ -13,9 +13,11 @@
 #[cfg(target_pointer_width = "32")]
 fn main() {
     let x = [0usize; 0xffff_ffff];
+    drop(x);
 }
 
 #[cfg(target_pointer_width = "64")]
 fn main() {
     let x = [0usize; 0xffff_ffff_ffff_ffff];
+    drop(x);
 }
diff --git a/src/test/mir-opt/const-prop-1.rs b/src/test/mir-opt/const-prop-1.rs
new file mode 100644
index 0000000000000..ceed4a48cf6c6
--- /dev/null
+++ b/src/test/mir-opt/const-prop-1.rs
@@ -0,0 +1,78 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+fn main() {
+    let x = 10;
+    let y = 20;
+    if x == 12 {
+        [42u8][y];
+    } else {
+        return;
+    }
+}
+
+// END RUST SOURCE
+// START rustc.node4.ConstPropagate.before.mir
+// bb0: {
+//     StorageLive(var0);
+//     var0 = const 10i32;
+//     StorageLive(var1);
+//     var1 = const 20usize;
+//     StorageLive(tmp0);
+//     StorageLive(tmp1);
+//     tmp1 = var0;
+//     tmp0 = Eq(tmp1, const 12i32);
+//     if(tmp0) -> [true: bb1, false: bb2];
+// }
+//
+// bb1: {
+//     StorageLive(tmp3);
+//     StorageLive(tmp4);
+//     tmp4 = [const 42u8];
+//     StorageLive(tmp5);
+//     tmp5 = var1;
+//     tmp6 = Len(tmp4);
+//     tmp7 = Lt(tmp5, tmp6);
+//     assert(tmp7, "index out of bounds: the len is {} but the index is {}", tmp6, tmp5) -> bb3;
+// }
+//
+// bb2: {
+//     StorageLive(tmp9);
+//     return = ();
+//     StorageDead(var1);
+//     StorageDead(var0);
+//     StorageDead(tmp0);
+//     StorageDead(tmp1);
+//     goto -> bb4;
+// }
+//
+// bb3: {
+//     tmp3 = tmp4[tmp5];
+//     tmp2 = tmp3;
+//     StorageDead(tmp3);
+//     StorageDead(tmp5);
+//     StorageDead(tmp4);
+//     return = ();
+//     StorageDead(var1);
+//     StorageDead(var0);
+//     StorageDead(tmp0);
+//     StorageDead(tmp1);
+//     goto -> bb4;
+// }
+// END rustc.node4.ConstPropagate.before.mir
+// START rustc.node4.SimplifyLocals.after.mir
+// bb0: {
+//     goto -> bb1;
+// }
+//
+// bb1: {
+//     return = ();
+//     return;
+// }
+// END rustc.node4.SimplifyLocals.after.mir
diff --git a/src/test/mir-opt/const-prop-2.rs b/src/test/mir-opt/const-prop-2.rs
new file mode 100644
index 0000000000000..03e18ded6ef01
--- /dev/null
+++ b/src/test/mir-opt/const-prop-2.rs
@@ -0,0 +1,54 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+fn main() {
+    let cfg = cfg!(never_ever_set);
+    let x = 42 + if cfg { 42 } else { 0 };
+    ::std::process::exit(x - 42);
+}
+
+// END RUST SOURCE
+// START rustc.node4.ConstPropagate.before.mir
+// bb0: {
+//     StorageLive(var0);
+//     var0 = const false;
+//     StorageLive(var1);
+//     StorageLive(tmp0);
+//     StorageLive(tmp1);
+//     tmp1 = var0;
+//     if(tmp1) -> [true: bb1, false: bb2];
+// }
+//
+// bb1: {
+//     tmp0 = const 42i32;
+//     goto -> bb3;
+// }
+//
+// bb2: {
+//     tmp0 = const 0i32;
+//     goto -> bb3;
+// }
+//
+// bb3: {
+//     var1 = Add(const 42i32, tmp0);
+//     StorageDead(tmp0);
+//     StorageDead(tmp1);
+//     StorageLive(tmp3);
+//     StorageLive(tmp4);
+//     StorageLive(tmp5);
+//     tmp5 = var1;
+//     tmp4 = Sub(tmp5, const 42i32);
+//     std::process::exit(tmp4);
+// }
+// END rustc.node4.ConstPropagate.before.mir
+// START rustc.node4.SimplifyLocals.after.mir
+// bb1: {
+//     std::process::exit(const 0i32);
+// }
+// END rustc.node4.SimplifyLocals.after.mir
diff --git a/src/test/mir-opt/const-prop-3.rs b/src/test/mir-opt/const-prop-3.rs
new file mode 100644
index 0000000000000..1bcba9d73920a
--- /dev/null
+++ b/src/test/mir-opt/const-prop-3.rs
@@ -0,0 +1,77 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+fn main() {
+    let x = 0;
+    match x {
+        y@0 => ::std::process::exit(y),
+        y@1 => ::std::process::exit(y),
+        y@2 => ::std::process::exit(y),
+        y@3 => ::std::process::exit(y),
+        y@_ => ::std::process::exit(y),
+    }
+}
+// END RUST SOURCE
+// START rustc.node4.ConstPropagate.before.mir
+// bb0: {
+//     StorageLive(var0);
+//     var0 = const 0i32;
+//     switchInt(var0) -> [0i32: bb2, 1i32: bb3, 2i32: bb4, 3i32: bb5, otherwise: bb1];
+// }
+//
+// bb1: {
+//     StorageLive(var5);
+//     var5 = var0;
+//     StorageLive(tmp8);
+//     StorageLive(tmp9);
+//     tmp9 = var5;
+//     std::process::exit(tmp9);
+// }
+//
+// bb2: {
+//     StorageLive(var1);
+//     var1 = var0;
+//     StorageLive(tmp0);
+//     StorageLive(tmp1);
+//     tmp1 = var1;
+//     std::process::exit(tmp1);
+// }
+//
+// bb3: {
+//     StorageLive(var2);
+//     var2 = var0;
+//     StorageLive(tmp2);
+//     StorageLive(tmp3);
+//     tmp3 = var2;
+//     std::process::exit(tmp3);
+// }
+//
+// bb4: {
+//     StorageLive(var3);
+//     var3 = var0;
+//     StorageLive(tmp4);
+//     StorageLive(tmp5);
+//     tmp5 = var3;
+//     std::process::exit(tmp5);
+// }
+//
+// bb5: {
+//     StorageLive(var4);
+//     var4 = var0;
+//     StorageLive(tmp6);
+//     StorageLive(tmp7);
+//     tmp7 = var4;
+//     std::process::exit(tmp7);
+// }
+// END rustc.node4.ConstPropagate.before.mir
+// START rustc.node4.SimplifyLocals.after.mir
+// bb1: {
+//     std::process::exit(const 0i32);
+// }
+// END rustc.node4.SimplifyLocals.after.mir
diff --git a/src/test/mir-opt/const-prop-loop-1.rs b/src/test/mir-opt/const-prop-loop-1.rs
new file mode 100644
index 0000000000000..4eb2d54fc06b2
--- /dev/null
+++ b/src/test/mir-opt/const-prop-loop-1.rs
@@ -0,0 +1,80 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+fn hdrty() -> usize { 6 }
+
+fn inc(idx: u16) -> usize { 1 }
+
+fn main() {
+    let hdrty = hdrty();
+    let max = match hdrty {
+        0 => 6,
+        1 => 2,
+        _ => 0,
+    };
+    let mut i = 0;
+    while i < max {
+        let next = inc(i as u16);
+        i = next;
+    }
+}
+
+// END RUST SOURCE
+// START rustc.node17.ConstPropagate.before.mir
+// bb6: {
+//     tmp1 = var2;
+//     tmp2 = var1;
+//     tmp0 = Lt(tmp1, tmp2);
+//     if(tmp0) -> [true: bb8, false: bb7];
+// }
+//
+// bb7: {
+//     return = ();
+//     return;
+// }
+//
+// bb8: {
+//     tmp5 = var2;
+//     tmp4 = tmp5 as u16 (Misc);
+//     var3 = inc(tmp4) -> bb9;
+// }
+//
+// bb9: {
+//     tmp6 = var3;
+//     var2 = tmp6;
+//     tmp3 = ();
+//     goto -> bb6;
+// }
+// END rustc.node17.ConstPropagate.before.mir
+// START rustc.node17.ConstPropagate.after.mir
+// bb6: {
+//     tmp1 = var2;
+//     tmp2 = var1;
+//     tmp0 = Lt(tmp1, tmp2);
+//     if(tmp0) -> [true: bb8, false: bb7];
+// }
+//
+// bb7: {
+//     return = ();
+//     return;
+// }
+//
+// bb8: {
+//     tmp5 = var2;
+//     tmp4 = tmp5 as u16 (Misc);
+//     var3 = inc(tmp4) -> bb9;
+// }
+//
+// bb9: {
+//     tmp6 = var3;
+//     var2 = tmp6;
+//     tmp3 = ();
+//     goto -> bb6;
+// }
+// END rustc.node17.ConstPropagate.after.mir
diff --git a/src/test/mir-opt/const-prop-loop-2.rs b/src/test/mir-opt/const-prop-loop-2.rs
new file mode 100644
index 0000000000000..bb0179689617b
--- /dev/null
+++ b/src/test/mir-opt/const-prop-loop-2.rs
@@ -0,0 +1,87 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+
+fn main(){
+    let mut x = 0;
+    let mut y = true;
+    while y {
+        if x != 0 {
+            x = 1;
+        }
+        y = false;
+    }
+    println(x);
+}
+
+fn println(x: u32) {
+    println!("{}", x);
+}
+// END RUST SOURCE
+// START rustc.node4.ConstPropagate.before.mir
+// bb0: {
+//     var0 = const 0u32;
+//     var1 = const true;
+//     goto -> bb1;
+// }
+//
+// bb1: {
+//     tmp1 = var1;
+//     if(tmp1) -> [true: bb3, false: bb2];
+// }
+//
+// bb2: {
+//     tmp0 = ();
+//     tmp7 = var0;
+//     tmp6 = println(tmp7) -> bb7;
+// }
+//
+// bb3: {
+//     tmp5 = var0;
+//     tmp4 = Ne(tmp5, const 0u32);
+//     if(tmp4) -> [true: bb4, false: bb5];
+// }
+//
+// bb4: {
+//     var0 = const 1u32;
+//     tmp3 = ();
+//     goto -> bb6;
+// }
+//
+// bb5: {
+//     tmp3 = ();
+//     goto -> bb6;
+// }
+//
+// bb6: {
+//     var1 = const false;
+//     tmp2 = ();
+//     goto -> bb1;
+// }
+// END rustc.node4.ConstPropagate.before.mir
+// START rustc.node4.DeadCode.after.mir
+// bb0: {
+//     var1 = const true;
+//     goto -> bb1;
+// }
+//
+// bb1: {
+//     tmp1 = var1;
+//     if(tmp1) -> [true: bb3, false: bb2];
+// }
+//
+// bb2: {
+//     tmp6 = println(const 0u32) -> bb4;
+// }
+//
+// bb3: {
+//     var1 = const false;
+//     goto -> bb1;
+// }
+// END rustc.node4.DeadCode.after.mir
diff --git a/src/test/mir-opt/const-prop-static.rs b/src/test/mir-opt/const-prop-static.rs
new file mode 100644
index 0000000000000..d462a363aa357
--- /dev/null
+++ b/src/test/mir-opt/const-prop-static.rs
@@ -0,0 +1,83 @@
+// Copyright 2016 The Rust Project Developers. See the COPYRIGHT
+// file at the top-level directory of this distribution and at
+// http://rust-lang.org/COPYRIGHT.
+//
+// Licensed under the Apache License, Version 2.0 <LICENSE-APACHE or
+// http://www.apache.org/licenses/LICENSE-2.0> or the MIT license
+// <LICENSE-MIT or http://opensource.org/licenses/MIT>, at your
+// option. This file may not be copied, modified, or distributed
+// except according to those terms.
+static mut BANANA: u32 = 42;
+
+fn assert_eq(x: u32, y: u32) {
+    assert_eq!(x, y);
+}
+
+unsafe fn reset_banana() {
+    BANANA = 42;
+}
+
+fn main() {
+    unsafe {
+        BANANA = 21;
+        let x = BANANA + 21;
+        if x != 42 { assert_eq(x, 42); }
+        reset_banana();
+        assert_eq(BANANA, 42);
+    }
+}
+// END RUST SOURCE
+// START rustc.node101.ConstPropagate.before.mir
+// bb0: {
+//     BANANA = const 21u32;
+//     tmp0 = BANANA;
+//     var0 = Add(tmp0, const 21u32);
+//     tmp3 = var0;
+//     tmp2 = Ne(tmp3, const 42u32);
+//     if(tmp2) -> [true: bb1, false: bb2];
+// }
+//
+// bb1: {
+//     tmp5 = var0;
+//     tmp4 = assert_eq(tmp5, const 42u32) -> bb3;
+// }
+//
+// bb2: {
+//     tmp1 = ();
+//     goto -> bb4;
+// }
+//
+// bb3: {
+//     tmp1 = ();
+//     goto -> bb4;
+// }
+//
+// bb4: {
+//     tmp6 = reset_banana() -> bb5;
+// }
+//
+// bb5: {
+//     tmp8 = BANANA;
+//     tmp7 = assert_eq(tmp8, const 42u32) -> bb6;
+// }
+// END rustc.node101.ConstPropagate.before.mir
+// START rustc.node101.DeadCode.after.mir
+// bb0: {
+//     BANANA = const 21u32;
+//     goto -> bb1;
+// }
+//
+// bb1: {
+//     tmp6 = reset_banana() -> bb2;
+// }
+//
+// bb2: {
+//     tmp8 = BANANA;
+//     tmp7 = assert_eq(tmp8, const 42u32) -> bb3;
+// }
+//
+// bb3: {
+//     return = ();
+//     return;
+// }
+// END rustc.node101.DeadCode.after.mir
diff --git a/src/test/mir-opt/storage_ranges.rs b/src/test/mir-opt/storage_ranges.rs
index f93447b642a20..f99ea1302d495 100644
--- a/src/test/mir-opt/storage_ranges.rs
+++ b/src/test/mir-opt/storage_ranges.rs
@@ -19,29 +19,47 @@ fn main() {
 }
 
 // END RUST SOURCE
+// START rustc.node4.SimplifyCfg.initial-after.mir
+// bb0: {
+//     StorageLive(var0);
+//     var0 = const 0i32;
+//     StorageLive(var1);
+//     StorageLive(tmp1);
+//     StorageLive(tmp2);
+//     tmp2 = var0;
+//     tmp1 = std::option::Option<i32>::Some(tmp2,);
+//     var1 = &tmp1;
+//     StorageDead(tmp2);
+//     tmp0 = ();
+//     StorageDead(tmp1);
+//     StorageDead(var1);
+//     StorageLive(var2);
+//     var2 = const 1i32;
+//     return = ();
+//     StorageDead(var2);
+//     StorageDead(var0);
+//     goto -> bb1;
+// }
+// END rustc.node4.SimplifyCfg.initial-after.mir
+// START rustc.node4.DeadCode.after.mir
+// bb0: {
+//     StorageLive(var0);
+//     StorageLive(var1);
+//     StorageLive(tmp1);
+//     StorageLive(tmp2);
+//     StorageDead(tmp2);
+//     StorageDead(tmp1);
+//     StorageDead(var1);
+//     StorageLive(var2);
+//     return = ();
+//     StorageDead(var2);
+//     StorageDead(var0);
+//     goto -> bb1;
+// }
+// END rustc.node4.DeadCode.after.mir
 // START rustc.node4.PreTrans.after.mir
-//     bb0: {
-//         StorageLive(var0);               // scope 0 at storage_ranges.rs:12:9: 12:10
-//         var0 = const 0i32;               // scope 0 at storage_ranges.rs:12:13: 12:14
-//         StorageLive(var1);               // scope 1 at storage_ranges.rs:14:13: 14:14
-//         StorageLive(tmp1);               // scope 1 at storage_ranges.rs:14:18: 14:25
-//         StorageLive(tmp2);               // scope 1 at storage_ranges.rs:14:23: 14:24
-//         tmp2 = var0;                     // scope 1 at storage_ranges.rs:14:23: 14:24
-//         tmp1 = std::option::Option<i32>::Some(tmp2,); // scope 1 at storage_ranges.rs:14:18: 14:25
-//         var1 = &tmp1;                    // scope 1 at storage_ranges.rs:14:17: 14:25
-//         StorageDead(tmp2);               // scope 1 at storage_ranges.rs:14:23: 14:24
-//         tmp0 = ();                       // scope 2 at storage_ranges.rs:13:5: 15:6
-//         StorageDead(tmp1);               // scope 1 at storage_ranges.rs:14:18: 14:25
-//         StorageDead(var1);               // scope 1 at storage_ranges.rs:14:13: 14:14
-//         StorageLive(var2);               // scope 1 at storage_ranges.rs:16:9: 16:10
-//         var2 = const 1i32;               // scope 1 at storage_ranges.rs:16:13: 16:14
-//         return = ();                     // scope 3 at storage_ranges.rs:11:11: 17:2
-//         StorageDead(var2);               // scope 1 at storage_ranges.rs:16:9: 16:10
-//         StorageDead(var0);               // scope 0 at storage_ranges.rs:12:9: 12:10
-//         goto -> bb1;                     // scope 0 at storage_ranges.rs:11:1: 17:2
-//     }
-//
-//     bb1: {
-//         return;                          // scope 0 at storage_ranges.rs:11:1: 17:2
-//     }
+// bb0: {
+//     return = ();
+//     goto -> bb1;
+// }
 // END rustc.node4.PreTrans.after.mir