diff --git a/src/bootstrap/builder/tests.rs b/src/bootstrap/builder/tests.rs
index cca8ab80c93b1..1e75e67af0c9e 100644
--- a/src/bootstrap/builder/tests.rs
+++ b/src/bootstrap/builder/tests.rs
@@ -54,6 +54,11 @@ fn dist_baseline() {
         &[dist::Std { compiler: Compiler { host: a, stage: 1 }, target: a },]
     );
     assert_eq!(first(builder.cache.all::<dist::Src>()), &[dist::Src]);
+    // Make sure rustdoc is only built once.
+    assert_eq!(
+        first(builder.cache.all::<tool::Rustdoc>()),
+        &[tool::Rustdoc { compiler: Compiler { host: a, stage: 2 } },]
+    );
 }
 
 #[test]
@@ -414,3 +419,77 @@ fn test_exclude() {
     // Ensure other tests are not affected.
     assert!(builder.cache.contains::<test::RustdocUi>());
 }
+
+#[test]
+fn doc_default() {
+    let mut config = configure(&[], &[]);
+    config.compiler_docs = true;
+    config.cmd = Subcommand::Doc { paths: Vec::new(), open: false };
+    let build = Build::new(config);
+    let mut builder = Builder::new(&build);
+    builder.run_step_descriptions(&Builder::get_step_descriptions(Kind::Doc), &[]);
+    let a = INTERNER.intern_str("A");
+
+    // error_index_generator uses stage 1 to share rustdoc artifacts with the
+    // rustdoc tool.
+    assert_eq!(
+        first(builder.cache.all::<doc::ErrorIndex>()),
+        &[doc::ErrorIndex { compiler: Compiler { host: a, stage: 1 }, target: a },]
+    );
+    assert_eq!(
+        first(builder.cache.all::<tool::ErrorIndex>()),
+        &[tool::ErrorIndex { compiler: Compiler { host: a, stage: 1 } }]
+    );
+    // This is actually stage 1, but Rustdoc::run swaps out the compiler with
+    // stage minus 1 if --stage is not 0. Very confusing!
+    assert_eq!(
+        first(builder.cache.all::<tool::Rustdoc>()),
+        &[tool::Rustdoc { compiler: Compiler { host: a, stage: 2 } },]
+    );
+}
+
+#[test]
+fn test_docs() {
+    // Behavior of `x.py test` doing various documentation tests.
+    let mut config = configure(&[], &[]);
+    config.cmd = Subcommand::Test {
+        paths: vec![],
+        test_args: vec![],
+        rustc_args: vec![],
+        fail_fast: true,
+        doc_tests: DocTests::Yes,
+        bless: false,
+        compare_mode: None,
+        rustfix_coverage: false,
+        pass: None,
+    };
+    let build = Build::new(config);
+    let mut builder = Builder::new(&build);
+    builder.run_step_descriptions(&Builder::get_step_descriptions(Kind::Test), &[]);
+    let a = INTERNER.intern_str("A");
+
+    // error_index_generator uses stage 1 to share rustdoc artifacts with the
+    // rustdoc tool.
+    assert_eq!(
+        first(builder.cache.all::<doc::ErrorIndex>()),
+        &[doc::ErrorIndex { compiler: Compiler { host: a, stage: 1 }, target: a },]
+    );
+    assert_eq!(
+        first(builder.cache.all::<tool::ErrorIndex>()),
+        &[tool::ErrorIndex { compiler: Compiler { host: a, stage: 1 } }]
+    );
+    // Unfortunately rustdoc is built twice. Once from stage1 for compiletest
+    // (and other things), and once from stage0 for std crates. Ideally it
+    // would only be built once. If someone wants to fix this, it might be
+    // worth investigating if it would be possible to test std from stage1.
+    // Note that the stages here are +1 than what they actually are because
+    // Rustdoc::run swaps out the compiler with stage minus 1 if --stage is
+    // not 0.
+    assert_eq!(
+        first(builder.cache.all::<tool::Rustdoc>()),
+        &[
+            tool::Rustdoc { compiler: Compiler { host: a, stage: 1 } },
+            tool::Rustdoc { compiler: Compiler { host: a, stage: 2 } },
+        ]
+    );
+}
diff --git a/src/bootstrap/doc.rs b/src/bootstrap/doc.rs
index 8b76158f9e564..d02c19467ee68 100644
--- a/src/bootstrap/doc.rs
+++ b/src/bootstrap/doc.rs
@@ -518,8 +518,7 @@ impl Step for Rustc {
         let out = builder.compiler_doc_out(target);
         t!(fs::create_dir_all(&out));
 
-        // Get the correct compiler for this stage.
-        let compiler = builder.compiler_for(stage, builder.config.build, target);
+        let compiler = builder.compiler(stage, builder.config.build);
 
         if !builder.config.compiler_docs {
             builder.info("\tskipping - compiler/librustdoc docs disabled");
@@ -599,8 +598,7 @@ impl Step for Rustdoc {
         let out = builder.compiler_doc_out(target);
         t!(fs::create_dir_all(&out));
 
-        // Get the correct compiler for this stage.
-        let compiler = builder.compiler_for(stage, builder.config.build, target);
+        let compiler = builder.compiler(stage, builder.config.build);
 
         if !builder.config.compiler_docs {
             builder.info("\tskipping - compiler/librustdoc docs disabled");
@@ -639,9 +637,10 @@ impl Step for Rustdoc {
     }
 }
 
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+#[derive(Ord, PartialOrd, Debug, Copy, Clone, Hash, PartialEq, Eq)]
 pub struct ErrorIndex {
-    target: Interned<String>,
+    pub compiler: Compiler,
+    pub target: Interned<String>,
 }
 
 impl Step for ErrorIndex {
@@ -655,26 +654,26 @@ impl Step for ErrorIndex {
     }
 
     fn make_run(run: RunConfig<'_>) {
-        run.builder.ensure(ErrorIndex { target: run.target });
+        let target = run.target;
+        // error_index_generator depends on librustdoc. Use the compiler that
+        // is normally used to build rustdoc for other documentation so that
+        // it shares the same artifacts.
+        let compiler =
+            run.builder.compiler_for(run.builder.top_stage, run.builder.config.build, target);
+        run.builder.ensure(ErrorIndex { compiler, target });
     }
 
     /// Generates the HTML rendered error-index by running the
     /// `error_index_generator` tool.
     fn run(self, builder: &Builder<'_>) {
-        let target = self.target;
-
-        builder.info(&format!("Documenting error index ({})", target));
-        let out = builder.doc_out(target);
+        builder.info(&format!("Documenting error index ({})", self.target));
+        let out = builder.doc_out(self.target);
         t!(fs::create_dir_all(&out));
-        let compiler = builder.compiler(2, builder.config.build);
-        let mut index = tool::ErrorIndex::command(builder, compiler);
+        let mut index = tool::ErrorIndex::command(builder, self.compiler);
         index.arg("html");
         index.arg(out.join("error-index.html"));
         index.arg(crate::channel::CFG_RELEASE_NUM);
 
-        // FIXME: shouldn't have to pass this env var
-        index.env("CFG_BUILD", &builder.config.build);
-
         builder.run(&mut index);
     }
 }
diff --git a/src/bootstrap/native.rs b/src/bootstrap/native.rs
index 106db90b2d0f0..cceb794165059 100644
--- a/src/bootstrap/native.rs
+++ b/src/bootstrap/native.rs
@@ -175,6 +175,17 @@ impl Step for Llvm {
             cfg.define("LLVM_ENABLE_ZLIB", "OFF");
         }
 
+        // Are we compiling for iOS/tvOS?
+        if target.contains("apple-ios") || target.contains("apple-tvos") {
+            // These two defines prevent CMake from automatically trying to add a MacOSX sysroot, which leads to a compiler error.
+            cfg.define("CMAKE_OSX_SYSROOT", "/");
+            cfg.define("CMAKE_OSX_DEPLOYMENT_TARGET", "");
+            // Prevent cmake from adding -bundle to CFLAGS automatically, which leads to a compiler error because "-bitcode_bundle" also gets added.
+            cfg.define("LLVM_ENABLE_PLUGINS", "OFF");
+            // Zlib fails to link properly, leading to a compiler error.
+            cfg.define("LLVM_ENABLE_ZLIB", "OFF");
+        }
+
         if builder.config.llvm_thin_lto {
             cfg.define("LLVM_ENABLE_LTO", "Thin");
             if !target.contains("apple") {
@@ -412,6 +423,14 @@ fn configure_cmake(
     if let Some(ref s) = builder.config.llvm_cflags {
         cflags.push_str(&format!(" {}", s));
     }
+    // Some compiler features used by LLVM (such as thread locals) will not work on a min version below iOS 10.
+    if target.contains("apple-ios") {
+        if target.contains("86-") {
+            cflags.push_str(" -miphonesimulator-version-min=10.0");
+        } else {
+            cflags.push_str(" -miphoneos-version-min=10.0");
+        }
+    }
     cfg.define("CMAKE_C_FLAGS", cflags);
     let mut cxxflags = builder.cflags(target, GitRepo::Llvm).join(" ");
     if builder.config.llvm_static_stdcpp && !target.contains("msvc") && !target.contains("netbsd") {
diff --git a/src/bootstrap/test.rs b/src/bootstrap/test.rs
index 12ab6b1636cc1..77bcc00d75b2b 100644
--- a/src/bootstrap/test.rs
+++ b/src/bootstrap/test.rs
@@ -1454,8 +1454,11 @@ impl Step for ErrorIndex {
     }
 
     fn make_run(run: RunConfig<'_>) {
-        run.builder
-            .ensure(ErrorIndex { compiler: run.builder.compiler(run.builder.top_stage, run.host) });
+        // error_index_generator depends on librustdoc. Use the compiler that
+        // is normally used to build rustdoc for other tests (like compiletest
+        // tests in src/test/rustdoc) so that it shares the same artifacts.
+        let compiler = run.builder.compiler_for(run.builder.top_stage, run.host, run.host);
+        run.builder.ensure(ErrorIndex { compiler });
     }
 
     /// Runs the error index generator tool to execute the tests located in the error
@@ -1467,22 +1470,23 @@ impl Step for ErrorIndex {
     fn run(self, builder: &Builder<'_>) {
         let compiler = self.compiler;
 
-        builder.ensure(compile::Std { compiler, target: compiler.host });
-
         let dir = testdir(builder, compiler.host);
         t!(fs::create_dir_all(&dir));
         let output = dir.join("error-index.md");
 
-        let mut tool = tool::ErrorIndex::command(
-            builder,
-            builder.compiler(compiler.stage, builder.config.build),
-        );
-        tool.arg("markdown").arg(&output).env("CFG_BUILD", &builder.config.build);
+        let mut tool = tool::ErrorIndex::command(builder, compiler);
+        tool.arg("markdown").arg(&output);
 
-        builder.info(&format!("Testing error-index stage{}", compiler.stage));
+        // Use the rustdoc that was built by self.compiler. This copy of
+        // rustdoc is shared with other tests (like compiletest tests in
+        // src/test/rustdoc). This helps avoid building rustdoc multiple
+        // times.
+        let rustdoc_compiler = builder.compiler(builder.top_stage, builder.config.build);
+        builder.info(&format!("Testing error-index stage{}", rustdoc_compiler.stage));
         let _time = util::timeit(&builder);
         builder.run_quiet(&mut tool);
-        markdown_test(builder, compiler, &output);
+        builder.ensure(compile::Std { compiler: rustdoc_compiler, target: rustdoc_compiler.host });
+        markdown_test(builder, rustdoc_compiler, &output);
     }
 }
 
@@ -1797,9 +1801,13 @@ impl Step for CrateRustdoc {
 
     fn run(self, builder: &Builder<'_>) {
         let test_kind = self.test_kind;
+        let target = self.host;
 
-        let compiler = builder.compiler(builder.top_stage, self.host);
-        let target = compiler.host;
+        // Use the previous stage compiler to reuse the artifacts that are
+        // created when running compiletest for src/test/rustdoc. If this used
+        // `compiler`, then it would cause rustdoc to be built *again*, which
+        // isn't really necessary.
+        let compiler = builder.compiler_for(builder.top_stage, target, target);
         builder.ensure(compile::Rustc { compiler, target });
 
         let mut cargo = tool::prepare_tool_cargo(
@@ -1825,6 +1833,32 @@ impl Step for CrateRustdoc {
             cargo.arg("'-Ctarget-feature=-crt-static'");
         }
 
+        // This is needed for running doctests on librustdoc. This is a bit of
+        // an unfortunate interaction with how bootstrap works and how cargo
+        // sets up the dylib path, and the fact that the doctest (in
+        // html/markdown.rs) links to rustc-private libs. For stage1, the
+        // compiler host dylibs (in stage1/lib) are not the same as the target
+        // dylibs (in stage1/lib/rustlib/...). This is different from a normal
+        // rust distribution where they are the same.
+        //
+        // On the cargo side, normal tests use `target_process` which handles
+        // setting up the dylib for a *target* (stage1/lib/rustlib/... in this
+        // case). However, for doctests it uses `rustdoc_process` which only
+        // sets up the dylib path for the *host* (stage1/lib), which is the
+        // wrong directory.
+        //
+        // It should be considered to just stop running doctests on
+        // librustdoc. There is only one test, and it doesn't look too
+        // important. There might be other ways to avoid this, but it seems
+        // pretty convoluted.
+        //
+        // See also https://github.com/rust-lang/rust/issues/13983 where the
+        // host vs target dylibs for rustdoc are consistently tricky to deal
+        // with.
+        let mut dylib_path = dylib_path();
+        dylib_path.insert(0, PathBuf::from(&*builder.sysroot_libdir(compiler, target)));
+        cargo.env(dylib_path_var(), env::join_paths(&dylib_path).unwrap());
+
         if !builder.config.verbose_tests {
             cargo.arg("--quiet");
         }
diff --git a/src/bootstrap/tool.rs b/src/bootstrap/tool.rs
index 063fb8fbb9526..45f5073f43100 100644
--- a/src/bootstrap/tool.rs
+++ b/src/bootstrap/tool.rs
@@ -366,7 +366,7 @@ bootstrap_tool!(
     ExpandYamlAnchors, "src/tools/expand-yaml-anchors", "expand-yaml-anchors";
 );
 
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq, Ord, PartialOrd)]
 pub struct ErrorIndex {
     pub compiler: Compiler,
 }
@@ -392,9 +392,9 @@ impl Step for ErrorIndex {
     fn make_run(run: RunConfig<'_>) {
         // Compile the error-index in the same stage as rustdoc to avoid
         // recompiling rustdoc twice if we can.
-        let stage = if run.builder.top_stage >= 2 { run.builder.top_stage } else { 0 };
-        run.builder
-            .ensure(ErrorIndex { compiler: run.builder.compiler(stage, run.builder.config.build) });
+        let host = run.builder.config.build;
+        let compiler = run.builder.compiler_for(run.builder.top_stage, host, host);
+        run.builder.ensure(ErrorIndex { compiler });
     }
 
     fn run(self, builder: &Builder<'_>) -> PathBuf {
@@ -449,7 +449,7 @@ impl Step for RemoteTestServer {
     }
 }
 
-#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq)]
+#[derive(Debug, Copy, Clone, Hash, PartialEq, Eq, Ord, PartialOrd)]
 pub struct Rustdoc {
     /// This should only ever be 0 or 2.
     /// We sometimes want to reference the "bootstrap" rustdoc, which is why this option is here.
diff --git a/src/ci/docker/wasm32/Dockerfile b/src/ci/docker/wasm32/Dockerfile
index 8232539edda77..92461305320ee 100644
--- a/src/ci/docker/wasm32/Dockerfile
+++ b/src/ci/docker/wasm32/Dockerfile
@@ -25,7 +25,18 @@ RUN ln `which python3` /usr/bin/python
 
 ENV PATH=$PATH:/emsdk-portable
 ENV PATH=$PATH:/emsdk-portable/upstream/emscripten/
-ENV PATH=$PATH:/emsdk-portable/node/12.9.1_64bit/bin/
+
+# Rust's build system requires NodeJS to be in the path, but the directory in
+# which emsdk stores it contains the version number. This caused breakages in
+# the past when emsdk bumped the node version causing the path to point to a
+# missing directory.
+#
+# To avoid the problem this symlinks the latest NodeJs version available to
+# "latest", and adds that to the path.
+RUN ln -s /emsdk-portable/node/$(ls /emsdk-portable/node | sort -V | tail -n 1) \
+          /emsdk-portable/node/latest
+ENV PATH=$PATH:/emsdk-portable/node/latest/bin/
+
 ENV BINARYEN_ROOT=/emsdk-portable/upstream/
 ENV EMSDK=/emsdk-portable
 ENV EM_CONFIG=/emsdk-portable/.emscripten
diff --git a/src/libcore/alloc/global.rs b/src/libcore/alloc/global.rs
index 147fe696ac02f..c198797e650f6 100644
--- a/src/libcore/alloc/global.rs
+++ b/src/libcore/alloc/global.rs
@@ -127,9 +127,12 @@ pub unsafe trait GlobalAlloc {
     #[stable(feature = "global_alloc", since = "1.28.0")]
     unsafe fn alloc_zeroed(&self, layout: Layout) -> *mut u8 {
         let size = layout.size();
-        let ptr = self.alloc(layout);
+        // SAFETY: the safety contract for `alloc` must be upheld by the caller.
+        let ptr = unsafe { self.alloc(layout) };
         if !ptr.is_null() {
-            ptr::write_bytes(ptr, 0, size);
+            // SAFETY: as allocation succeeded, the region from `ptr`
+            // of size `size` is guaranteed to be valid for writes.
+            unsafe { ptr::write_bytes(ptr, 0, size) };
         }
         ptr
     }
@@ -187,11 +190,18 @@ pub unsafe trait GlobalAlloc {
     /// [`handle_alloc_error`]: ../../alloc/alloc/fn.handle_alloc_error.html
     #[stable(feature = "global_alloc", since = "1.28.0")]
     unsafe fn realloc(&self, ptr: *mut u8, layout: Layout, new_size: usize) -> *mut u8 {
-        let new_layout = Layout::from_size_align_unchecked(new_size, layout.align());
-        let new_ptr = self.alloc(new_layout);
+        // SAFETY: the caller must ensure that the `new_size` does not overflow.
+        // `layout.align()` comes from a `Layout` and is thus guaranteed to be valid.
+        let new_layout = unsafe { Layout::from_size_align_unchecked(new_size, layout.align()) };
+        // SAFETY: the caller must ensure that `new_layout` is greater than zero.
+        let new_ptr = unsafe { self.alloc(new_layout) };
         if !new_ptr.is_null() {
-            ptr::copy_nonoverlapping(ptr, new_ptr, cmp::min(layout.size(), new_size));
-            self.dealloc(ptr, layout);
+            // SAFETY: the previously allocated block cannot overlap the newly allocated block.
+            // The safety contract for `dealloc` must be upheld by the caller.
+            unsafe {
+                ptr::copy_nonoverlapping(ptr, new_ptr, cmp::min(layout.size(), new_size));
+                self.dealloc(ptr, layout);
+            }
         }
         new_ptr
     }
diff --git a/src/libcore/alloc/layout.rs b/src/libcore/alloc/layout.rs
index a09c2387d0de2..ae7ae7044655b 100644
--- a/src/libcore/alloc/layout.rs
+++ b/src/libcore/alloc/layout.rs
@@ -90,7 +90,8 @@ impl Layout {
     #[rustc_const_stable(feature = "alloc_layout", since = "1.28.0")]
     #[inline]
     pub const unsafe fn from_size_align_unchecked(size: usize, align: usize) -> Self {
-        Layout { size_: size, align_: NonZeroUsize::new_unchecked(align) }
+        // SAFETY: the caller must ensure that `align` is greater than zero.
+        Layout { size_: size, align_: unsafe { NonZeroUsize::new_unchecked(align) } }
     }
 
     /// The minimum size in bytes for a memory block of this layout.
diff --git a/src/libcore/alloc/mod.rs b/src/libcore/alloc/mod.rs
index 1346fbd481003..be4e051b1ca42 100644
--- a/src/libcore/alloc/mod.rs
+++ b/src/libcore/alloc/mod.rs
@@ -54,7 +54,9 @@ impl AllocInit {
     #[inline]
     #[unstable(feature = "allocator_api", issue = "32838")]
     pub unsafe fn init(self, memory: MemoryBlock) {
-        self.init_offset(memory, 0)
+        // SAFETY: the safety contract for `init_offset` must be
+        // upheld by the caller.
+        unsafe { self.init_offset(memory, 0) }
     }
 
     /// Initialize the memory block like specified by `init` at the specified `offset`.
@@ -78,7 +80,10 @@ impl AllocInit {
         match self {
             AllocInit::Uninitialized => (),
             AllocInit::Zeroed => {
-                memory.ptr.as_ptr().add(offset).write_bytes(0, memory.size - offset)
+                // SAFETY: the caller must guarantee that `offset` is smaller than or equal to `memory.size`,
+                // so the memory from `memory.ptr + offset` of length `memory.size - offset`
+                // is guaranteed to be contaned in `memory` and thus valid for writes.
+                unsafe { memory.ptr.as_ptr().add(offset).write_bytes(0, memory.size - offset) }
             }
         }
     }
@@ -281,11 +286,23 @@ pub unsafe trait AllocRef {
                     return Ok(MemoryBlock { ptr, size });
                 }
 
-                let new_layout = Layout::from_size_align_unchecked(new_size, layout.align());
+                let new_layout =
+                    // SAFETY: the caller must ensure that the `new_size` does not overflow.
+                    // `layout.align()` comes from a `Layout` and is thus guaranteed to be valid for a Layout.
+                    // The caller must ensure that `new_size` is greater than zero.
+                    unsafe { Layout::from_size_align_unchecked(new_size, layout.align()) };
                 let new_memory = self.alloc(new_layout, init)?;
-                ptr::copy_nonoverlapping(ptr.as_ptr(), new_memory.ptr.as_ptr(), size);
-                self.dealloc(ptr, layout);
-                Ok(new_memory)
+
+                // SAFETY: because `new_size` must be greater than or equal to `size`, both the old and new
+                // memory allocation are valid for reads and writes for `size` bytes. Also, because the old
+                // allocation wasn't yet deallocated, it cannot overlap `new_memory`. Thus, the call to
+                // `copy_nonoverlapping` is safe.
+                // The safety contract for `dealloc` must be upheld by the caller.
+                unsafe {
+                    ptr::copy_nonoverlapping(ptr.as_ptr(), new_memory.ptr.as_ptr(), size);
+                    self.dealloc(ptr, layout);
+                    Ok(new_memory)
+                }
             }
         }
     }
@@ -356,11 +373,23 @@ pub unsafe trait AllocRef {
                     return Ok(MemoryBlock { ptr, size });
                 }
 
-                let new_layout = Layout::from_size_align_unchecked(new_size, layout.align());
+                let new_layout =
+                // SAFETY: the caller must ensure that the `new_size` does not overflow.
+                // `layout.align()` comes from a `Layout` and is thus guaranteed to be valid for a Layout.
+                // The caller must ensure that `new_size` is greater than zero.
+                    unsafe { Layout::from_size_align_unchecked(new_size, layout.align()) };
                 let new_memory = self.alloc(new_layout, AllocInit::Uninitialized)?;
-                ptr::copy_nonoverlapping(ptr.as_ptr(), new_memory.ptr.as_ptr(), new_size);
-                self.dealloc(ptr, layout);
-                Ok(new_memory)
+
+                // SAFETY: because `new_size` must be lower than or equal to `size`, both the old and new
+                // memory allocation are valid for reads and writes for `new_size` bytes. Also, because the
+                // old allocation wasn't yet deallocated, it cannot overlap `new_memory`. Thus, the call to
+                // `copy_nonoverlapping` is safe.
+                // The safety contract for `dealloc` must be upheld by the caller.
+                unsafe {
+                    ptr::copy_nonoverlapping(ptr.as_ptr(), new_memory.ptr.as_ptr(), new_size);
+                    self.dealloc(ptr, layout);
+                    Ok(new_memory)
+                }
             }
         }
     }
@@ -386,7 +415,8 @@ where
 
     #[inline]
     unsafe fn dealloc(&mut self, ptr: NonNull<u8>, layout: Layout) {
-        (**self).dealloc(ptr, layout)
+        // SAFETY: the safety contract must be upheld by the caller
+        unsafe { (**self).dealloc(ptr, layout) }
     }
 
     #[inline]
@@ -398,7 +428,8 @@ where
         placement: ReallocPlacement,
         init: AllocInit,
     ) -> Result<MemoryBlock, AllocErr> {
-        (**self).grow(ptr, layout, new_size, placement, init)
+        // SAFETY: the safety contract must be upheld by the caller
+        unsafe { (**self).grow(ptr, layout, new_size, placement, init) }
     }
 
     #[inline]
@@ -409,6 +440,7 @@ where
         new_size: usize,
         placement: ReallocPlacement,
     ) -> Result<MemoryBlock, AllocErr> {
-        (**self).shrink(ptr, layout, new_size, placement)
+        // SAFETY: the safety contract must be upheld by the caller
+        unsafe { (**self).shrink(ptr, layout, new_size, placement) }
     }
 }
diff --git a/src/libcore/cell.rs b/src/libcore/cell.rs
index c4293ed7bcfe2..51d9695687f4a 100644
--- a/src/libcore/cell.rs
+++ b/src/libcore/cell.rs
@@ -1005,7 +1005,12 @@ impl<T: ?Sized> RefCell<T> {
     #[inline]
     pub unsafe fn try_borrow_unguarded(&self) -> Result<&T, BorrowError> {
         if !is_writing(self.borrow.get()) {
-            Ok(&*self.value.get())
+            // SAFETY: We check that nobody is actively writing now, but it is
+            // the caller's responsibility to ensure that nobody writes until
+            // the returned reference is no longer in use.
+            // Also, `self.value.get()` refers to the value owned by `self`
+            // and is thus guaranteed to be valid for the lifetime of `self`.
+            Ok(unsafe { &*self.value.get() })
         } else {
             Err(BorrowError { _private: () })
         }
diff --git a/src/libcore/char/convert.rs b/src/libcore/char/convert.rs
index d7e39946148ed..c329eec76ac3d 100644
--- a/src/libcore/char/convert.rs
+++ b/src/libcore/char/convert.rs
@@ -99,7 +99,8 @@ pub fn from_u32(i: u32) -> Option<char> {
 #[inline]
 #[stable(feature = "char_from_unchecked", since = "1.5.0")]
 pub unsafe fn from_u32_unchecked(i: u32) -> char {
-    if cfg!(debug_assertions) { char::from_u32(i).unwrap() } else { transmute(i) }
+    // SAFETY: the caller must guarantee that `i` is a valid char value.
+    if cfg!(debug_assertions) { char::from_u32(i).unwrap() } else { unsafe { transmute(i) } }
 }
 
 #[stable(feature = "char_convert", since = "1.13.0")]
diff --git a/src/libcore/char/methods.rs b/src/libcore/char/methods.rs
index dd2f01c679f73..72555d781ed38 100644
--- a/src/libcore/char/methods.rs
+++ b/src/libcore/char/methods.rs
@@ -183,7 +183,8 @@ impl char {
     #[unstable(feature = "assoc_char_funcs", reason = "recently added", issue = "71763")]
     #[inline]
     pub unsafe fn from_u32_unchecked(i: u32) -> char {
-        super::convert::from_u32_unchecked(i)
+        // SAFETY: the safety contract must be upheld by the caller.
+        unsafe { super::convert::from_u32_unchecked(i) }
     }
 
     /// Converts a digit in the given radix to a `char`.
diff --git a/src/libcore/convert/num.rs b/src/libcore/convert/num.rs
index 46ba0a279b7ff..336c0b26bc7d7 100644
--- a/src/libcore/convert/num.rs
+++ b/src/libcore/convert/num.rs
@@ -28,7 +28,8 @@ macro_rules! impl_float_to_int {
                 #[doc(hidden)]
                 #[inline]
                 unsafe fn to_int_unchecked(self) -> $Int {
-                    crate::intrinsics::float_to_int_unchecked(self)
+                    // SAFETY: the safety contract must be upheld by the caller.
+                    unsafe { crate::intrinsics::float_to_int_unchecked(self) }
                 }
             }
         )+
diff --git a/src/libcore/ffi.rs b/src/libcore/ffi.rs
index 7bc2866dc2e67..ca4632006509f 100644
--- a/src/libcore/ffi.rs
+++ b/src/libcore/ffi.rs
@@ -333,7 +333,8 @@ impl<'f> VaListImpl<'f> {
     /// Advance to the next arg.
     #[inline]
     pub unsafe fn arg<T: sealed_trait::VaArgSafe>(&mut self) -> T {
-        va_arg(self)
+        // SAFETY: the caller must uphold the safety contract for `va_arg`.
+        unsafe { va_arg(self) }
     }
 
     /// Copies the `va_list` at the current location.
@@ -343,7 +344,10 @@ impl<'f> VaListImpl<'f> {
     {
         let mut ap = self.clone();
         let ret = f(ap.as_va_list());
-        va_end(&mut ap);
+        // SAFETY: the caller must uphold the safety contract for `va_end`.
+        unsafe {
+            va_end(&mut ap);
+        }
         ret
     }
 }
diff --git a/src/libcore/future/mod.rs b/src/libcore/future/mod.rs
index 9dbc23f5c04c5..2555d91ae8d9a 100644
--- a/src/libcore/future/mod.rs
+++ b/src/libcore/future/mod.rs
@@ -85,5 +85,7 @@ where
 #[unstable(feature = "gen_future", issue = "50547")]
 #[inline]
 pub unsafe fn get_context<'a, 'b>(cx: ResumeTy) -> &'a mut Context<'b> {
-    &mut *cx.0.as_ptr().cast()
+    // SAFETY: the caller must guarantee that `cx.0` is a valid pointer
+    // that fulfills all the requirements for a mutable reference.
+    unsafe { &mut *cx.0.as_ptr().cast() }
 }
diff --git a/src/libcore/hash/sip.rs b/src/libcore/hash/sip.rs
index ac058609f45ed..f2bbf646f3272 100644
--- a/src/libcore/hash/sip.rs
+++ b/src/libcore/hash/sip.rs
@@ -130,15 +130,19 @@ unsafe fn u8to64_le(buf: &[u8], start: usize, len: usize) -> u64 {
     let mut i = 0; // current byte index (from LSB) in the output u64
     let mut out = 0;
     if i + 3 < len {
-        out = load_int_le!(buf, start + i, u32) as u64;
+        // SAFETY: `i` cannot be greater than `len`, and the caller must guarantee
+        // that the index start..start+len is in bounds.
+        out = unsafe { load_int_le!(buf, start + i, u32) } as u64;
         i += 4;
     }
     if i + 1 < len {
-        out |= (load_int_le!(buf, start + i, u16) as u64) << (i * 8);
+        // SAFETY: same as above.
+        out |= (unsafe { load_int_le!(buf, start + i, u16) } as u64) << (i * 8);
         i += 2
     }
     if i < len {
-        out |= (*buf.get_unchecked(start + i) as u64) << (i * 8);
+        // SAFETY: same as above.
+        out |= (unsafe { *buf.get_unchecked(start + i) } as u64) << (i * 8);
         i += 1;
     }
     debug_assert_eq!(i, len);
diff --git a/src/libcore/hint.rs b/src/libcore/hint.rs
index 0d794de5fe84b..9ebcde79b633d 100644
--- a/src/libcore/hint.rs
+++ b/src/libcore/hint.rs
@@ -46,7 +46,9 @@ use crate::intrinsics;
 #[inline]
 #[stable(feature = "unreachable", since = "1.27.0")]
 pub unsafe fn unreachable_unchecked() -> ! {
-    intrinsics::unreachable()
+    // SAFETY: the safety contract for `intrinsics::unreachable` must
+    // be upheld by the caller.
+    unsafe { intrinsics::unreachable() }
 }
 
 /// Emits a machine instruction hinting to the processor that it is running in busy-wait
diff --git a/src/libcore/intrinsics.rs b/src/libcore/intrinsics.rs
index 57ffed19c0085..aff4e4a8d9825 100644
--- a/src/libcore/intrinsics.rs
+++ b/src/libcore/intrinsics.rs
@@ -1958,7 +1958,40 @@ extern "rust-intrinsic" {
     /// generation.
     #[cfg(not(bootstrap))]
     #[lang = "count_code_region"]
-    pub fn count_code_region(index: u32);
+    pub fn count_code_region(index: u32, start_byte_pos: u32, end_byte_pos: u32);
+
+    /// Internal marker for code coverage expressions, injected into the MIR when the
+    /// "instrument-coverage" option is enabled. This intrinsic is not converted into a
+    /// backend intrinsic call, but its arguments are extracted during the production of a
+    /// "coverage map", which is injected into the generated code, as additional data.
+    /// This marker identifies a code region and two other counters or counter expressions
+    /// whose sum is the number of times the code region was executed.
+    #[cfg(not(bootstrap))]
+    pub fn coverage_counter_add(
+        index: u32,
+        left_index: u32,
+        right_index: u32,
+        start_byte_pos: u32,
+        end_byte_pos: u32,
+    );
+
+    /// This marker identifies a code region and two other counters or counter expressions
+    /// whose difference is the number of times the code region was executed.
+    /// (See `coverage_counter_add` for more information.)
+    #[cfg(not(bootstrap))]
+    pub fn coverage_counter_subtract(
+        index: u32,
+        left_index: u32,
+        right_index: u32,
+        start_byte_pos: u32,
+        end_byte_pos: u32,
+    );
+
+    /// This marker identifies a code region to be added to the "coverage map" to indicate source
+    /// code that can never be reached.
+    /// (See `coverage_counter_add` for more information.)
+    #[cfg(not(bootstrap))]
+    pub fn coverage_unreachable(start_byte_pos: u32, end_byte_pos: u32);
 
     /// See documentation of `<*const T>::guaranteed_eq` for details.
     #[rustc_const_unstable(feature = "const_raw_ptr_comparison", issue = "53020")]
@@ -2099,7 +2132,10 @@ pub unsafe fn copy_nonoverlapping<T>(src: *const T, dst: *mut T, count: usize) {
         // Not panicking to keep codegen impact smaller.
         abort();
     }
-    copy_nonoverlapping(src, dst, count)
+
+    // SAFETY: the safety contract for `copy_nonoverlapping` must be
+    // upheld by the caller.
+    unsafe { copy_nonoverlapping(src, dst, count) }
 }
 
 /// Copies `count * size_of::<T>()` bytes from `src` to `dst`. The source
@@ -2165,7 +2201,9 @@ pub unsafe fn copy<T>(src: *const T, dst: *mut T, count: usize) {
         // Not panicking to keep codegen impact smaller.
         abort();
     }
-    copy(src, dst, count)
+
+    // SAFETY: the safety contract for `copy` must be upheld by the caller.
+    unsafe { copy(src, dst, count) }
 }
 
 /// Sets `count * size_of::<T>()` bytes of memory starting at `dst` to
@@ -2248,5 +2286,7 @@ pub unsafe fn write_bytes<T>(dst: *mut T, val: u8, count: usize) {
     }
 
     debug_assert!(is_aligned_and_not_null(dst), "attempt to write to unaligned or null pointer");
-    write_bytes(dst, val, count)
+
+    // SAFETY: the safety contract for `write_bytes` must be upheld by the caller.
+    unsafe { write_bytes(dst, val, count) }
 }
diff --git a/src/libcore/iter/adapters/fuse.rs b/src/libcore/iter/adapters/fuse.rs
index 502fc2e631502..d2e2fc04a2b76 100644
--- a/src/libcore/iter/adapters/fuse.rs
+++ b/src/libcore/iter/adapters/fuse.rs
@@ -178,9 +178,10 @@ where
 {
     unsafe fn get_unchecked(&mut self, i: usize) -> I::Item {
         match self.iter {
-            Some(ref mut iter) => iter.get_unchecked(i),
+            // SAFETY: the caller must uphold the contract for `TrustedRandomAccess::get_unchecked`.
+            Some(ref mut iter) => unsafe { iter.get_unchecked(i) },
             // SAFETY: the caller asserts there is an item at `i`, so we're not exhausted.
-            None => intrinsics::unreachable(),
+            None => unsafe { intrinsics::unreachable() },
         }
     }
 
diff --git a/src/libcore/iter/adapters/mod.rs b/src/libcore/iter/adapters/mod.rs
index 00529f0e2d54f..133643a0c7f03 100644
--- a/src/libcore/iter/adapters/mod.rs
+++ b/src/libcore/iter/adapters/mod.rs
@@ -272,7 +272,8 @@ where
     T: Copy,
 {
     unsafe fn get_unchecked(&mut self, i: usize) -> Self::Item {
-        *self.it.get_unchecked(i)
+        // SAFETY: the caller must uphold the contract for `TrustedRandomAccess::get_unchecked`.
+        unsafe { *self.it.get_unchecked(i) }
     }
 
     #[inline]
@@ -402,7 +403,8 @@ where
     T: Clone,
 {
     default unsafe fn get_unchecked(&mut self, i: usize) -> Self::Item {
-        self.it.get_unchecked(i).clone()
+        // SAFETY: the caller must uphold the contract for `TrustedRandomAccess::get_unchecked`.
+        unsafe { self.it.get_unchecked(i) }.clone()
     }
 
     #[inline]
@@ -418,7 +420,8 @@ where
     T: Copy,
 {
     unsafe fn get_unchecked(&mut self, i: usize) -> Self::Item {
-        *self.it.get_unchecked(i)
+        // SAFETY: the caller must uphold the contract for `TrustedRandomAccess::get_unchecked`.
+        unsafe { *self.it.get_unchecked(i) }
     }
 
     #[inline]
@@ -930,7 +933,8 @@ where
     F: FnMut(I::Item) -> B,
 {
     unsafe fn get_unchecked(&mut self, i: usize) -> Self::Item {
-        (self.f)(self.iter.get_unchecked(i))
+        // SAFETY: the caller must uphold the contract for `TrustedRandomAccess::get_unchecked`.
+        (self.f)(unsafe { self.iter.get_unchecked(i) })
     }
     #[inline]
     fn may_have_side_effect() -> bool {
@@ -1392,7 +1396,8 @@ where
     I: TrustedRandomAccess,
 {
     unsafe fn get_unchecked(&mut self, i: usize) -> (usize, I::Item) {
-        (self.count + i, self.iter.get_unchecked(i))
+        // SAFETY: the caller must uphold the contract for `TrustedRandomAccess::get_unchecked`.
+        (self.count + i, unsafe { self.iter.get_unchecked(i) })
     }
 
     fn may_have_side_effect() -> bool {
diff --git a/src/libcore/iter/adapters/zip.rs b/src/libcore/iter/adapters/zip.rs
index e83d36a580f06..985e6561665c7 100644
--- a/src/libcore/iter/adapters/zip.rs
+++ b/src/libcore/iter/adapters/zip.rs
@@ -271,7 +271,8 @@ where
     B: TrustedRandomAccess,
 {
     unsafe fn get_unchecked(&mut self, i: usize) -> (A::Item, B::Item) {
-        (self.a.get_unchecked(i), self.b.get_unchecked(i))
+        // SAFETY: the caller must uphold the contract for `TrustedRandomAccess::get_unchecked`.
+        unsafe { (self.a.get_unchecked(i), self.b.get_unchecked(i)) }
     }
 
     fn may_have_side_effect() -> bool {
diff --git a/src/libcore/iter/range.rs b/src/libcore/iter/range.rs
index bd7e6cfa5a750..ee53b6a13f837 100644
--- a/src/libcore/iter/range.rs
+++ b/src/libcore/iter/range.rs
@@ -189,12 +189,14 @@ macro_rules! step_identical_methods {
     () => {
         #[inline]
         unsafe fn forward_unchecked(start: Self, n: usize) -> Self {
-            start.unchecked_add(n as Self)
+            // SAFETY: the caller has to guarantee that `start + n` doesn't overflow.
+            unsafe { start.unchecked_add(n as Self) }
         }
 
         #[inline]
         unsafe fn backward_unchecked(start: Self, n: usize) -> Self {
-            start.unchecked_sub(n as Self)
+            // SAFETY: the caller has to guarantee that `start - n` doesn't overflow.
+            unsafe { start.unchecked_sub(n as Self) }
         }
 
         #[inline]
@@ -450,21 +452,33 @@ unsafe impl Step for char {
     #[inline]
     unsafe fn forward_unchecked(start: char, count: usize) -> char {
         let start = start as u32;
-        let mut res = Step::forward_unchecked(start, count);
+        // SAFETY: the caller must guarantee that this doesn't overflow
+        // the range of values for a char.
+        let mut res = unsafe { Step::forward_unchecked(start, count) };
         if start < 0xD800 && 0xD800 <= res {
-            res = Step::forward_unchecked(res, 0x800);
+            // SAFETY: the caller must guarantee that this doesn't overflow
+            // the range of values for a char.
+            res = unsafe { Step::forward_unchecked(res, 0x800) };
         }
-        char::from_u32_unchecked(res)
+        // SAFETY: because of the previous contract, this is guaranteed
+        // by the caller to be a valid char.
+        unsafe { char::from_u32_unchecked(res) }
     }
 
     #[inline]
     unsafe fn backward_unchecked(start: char, count: usize) -> char {
         let start = start as u32;
-        let mut res = Step::backward_unchecked(start, count);
+        // SAFETY: the caller must guarantee that this doesn't overflow
+        // the range of values for a char.
+        let mut res = unsafe { Step::backward_unchecked(start, count) };
         if start >= 0xE000 && 0xE000 > res {
-            res = Step::backward_unchecked(res, 0x800);
+            // SAFETY: the caller must guarantee that this doesn't overflow
+            // the range of values for a char.
+            res = unsafe { Step::backward_unchecked(res, 0x800) };
         }
-        char::from_u32_unchecked(res)
+        // SAFETY: because of the previous contract, this is guaranteed
+        // by the caller to be a valid char.
+        unsafe { char::from_u32_unchecked(res) }
     }
 }
 
diff --git a/src/libcore/lib.rs b/src/libcore/lib.rs
index 63ddd97eed3df..50c56434fa9a1 100644
--- a/src/libcore/lib.rs
+++ b/src/libcore/lib.rs
@@ -149,6 +149,8 @@
 #![feature(const_type_id)]
 #![feature(const_caller_location)]
 #![feature(no_niche)] // rust-lang/rust#68303
+#![feature(unsafe_block_in_unsafe_fn)]
+#![deny(unsafe_op_in_unsafe_fn)]
 
 #[prelude_import]
 #[allow(unused)]
@@ -279,7 +281,13 @@ pub mod primitive;
 // set up in such a way that directly pulling it here works such that the
 // crate uses the this crate as its libcore.
 #[path = "../stdarch/crates/core_arch/src/mod.rs"]
-#[allow(missing_docs, missing_debug_implementations, dead_code, unused_imports)]
+#[allow(
+    missing_docs,
+    missing_debug_implementations,
+    dead_code,
+    unused_imports,
+    unsafe_op_in_unsafe_fn
+)]
 // FIXME: This annotation should be moved into rust-lang/stdarch after clashing_extern_declarations is
 // merged. It currently cannot because bootstrap fails as the lint hasn't been defined yet.
 #[cfg_attr(not(bootstrap), allow(clashing_extern_declarations))]
diff --git a/src/libcore/mem/manually_drop.rs b/src/libcore/mem/manually_drop.rs
index 18767c482c77e..920f5e9c0bd28 100644
--- a/src/libcore/mem/manually_drop.rs
+++ b/src/libcore/mem/manually_drop.rs
@@ -122,7 +122,9 @@ impl<T> ManuallyDrop<T> {
     #[stable(feature = "manually_drop_take", since = "1.42.0")]
     #[inline]
     pub unsafe fn take(slot: &mut ManuallyDrop<T>) -> T {
-        ptr::read(&slot.value)
+        // SAFETY: we are reading from a reference, which is guaranteed
+        // to be valid for reads.
+        unsafe { ptr::read(&slot.value) }
     }
 }
 
@@ -152,7 +154,10 @@ impl<T: ?Sized> ManuallyDrop<T> {
     #[stable(feature = "manually_drop", since = "1.20.0")]
     #[inline]
     pub unsafe fn drop(slot: &mut ManuallyDrop<T>) {
-        ptr::drop_in_place(&mut slot.value)
+        // SAFETY: we are dropping the value pointed to by a mutable reference
+        // which is guaranteed to be valid for writes.
+        // It is up to the caller to make sure that `slot` isn't dropped again.
+        unsafe { ptr::drop_in_place(&mut slot.value) }
     }
 }
 
diff --git a/src/libcore/mem/maybe_uninit.rs b/src/libcore/mem/maybe_uninit.rs
index 499016545e967..7732525a0fc28 100644
--- a/src/libcore/mem/maybe_uninit.rs
+++ b/src/libcore/mem/maybe_uninit.rs
@@ -494,8 +494,12 @@ impl<T> MaybeUninit<T> {
     #[inline(always)]
     #[rustc_diagnostic_item = "assume_init"]
     pub unsafe fn assume_init(self) -> T {
-        intrinsics::assert_inhabited::<T>();
-        ManuallyDrop::into_inner(self.value)
+        // SAFETY: the caller must guarantee that `self` is initialized.
+        // This also means that `self` must be a `value` variant.
+        unsafe {
+            intrinsics::assert_inhabited::<T>();
+            ManuallyDrop::into_inner(self.value)
+        }
     }
 
     /// Reads the value from the `MaybeUninit<T>` container. The resulting `T` is subject
@@ -558,8 +562,12 @@ impl<T> MaybeUninit<T> {
     #[unstable(feature = "maybe_uninit_extra", issue = "63567")]
     #[inline(always)]
     pub unsafe fn read(&self) -> T {
-        intrinsics::assert_inhabited::<T>();
-        self.as_ptr().read()
+        // SAFETY: the caller must guarantee that `self` is initialized.
+        // Reading from `self.as_ptr()` is safe since `self` should be initialized.
+        unsafe {
+            intrinsics::assert_inhabited::<T>();
+            self.as_ptr().read()
+        }
     }
 
     /// Gets a shared reference to the contained value.
@@ -620,8 +628,12 @@ impl<T> MaybeUninit<T> {
     #[unstable(feature = "maybe_uninit_ref", issue = "63568")]
     #[inline(always)]
     pub unsafe fn get_ref(&self) -> &T {
-        intrinsics::assert_inhabited::<T>();
-        &*self.value
+        // SAFETY: the caller must guarantee that `self` is initialized.
+        // This also means that `self` must be a `value` variant.
+        unsafe {
+            intrinsics::assert_inhabited::<T>();
+            &*self.value
+        }
     }
 
     /// Gets a mutable (unique) reference to the contained value.
@@ -738,8 +750,12 @@ impl<T> MaybeUninit<T> {
     #[unstable(feature = "maybe_uninit_ref", issue = "63568")]
     #[inline(always)]
     pub unsafe fn get_mut(&mut self) -> &mut T {
-        intrinsics::assert_inhabited::<T>();
-        &mut *self.value
+        // SAFETY: the caller must guarantee that `self` is initialized.
+        // This also means that `self` must be a `value` variant.
+        unsafe {
+            intrinsics::assert_inhabited::<T>();
+            &mut *self.value
+        }
     }
 
     /// Assuming all the elements are initialized, get a slice to them.
@@ -752,7 +768,11 @@ impl<T> MaybeUninit<T> {
     #[unstable(feature = "maybe_uninit_slice_assume_init", issue = "none")]
     #[inline(always)]
     pub unsafe fn slice_get_ref(slice: &[Self]) -> &[T] {
-        &*(slice as *const [Self] as *const [T])
+        // SAFETY: casting slice to a `*const [T]` is safe since the caller guarantees that
+        // `slice` is initialized, and`MaybeUninit` is guaranteed to have the same layout as `T`.
+        // The pointer obtained is valid since it refers to memory owned by `slice` which is a
+        // reference and thus guaranteed to be valid for reads.
+        unsafe { &*(slice as *const [Self] as *const [T]) }
     }
 
     /// Assuming all the elements are initialized, get a mutable slice to them.
@@ -765,7 +785,9 @@ impl<T> MaybeUninit<T> {
     #[unstable(feature = "maybe_uninit_slice_assume_init", issue = "none")]
     #[inline(always)]
     pub unsafe fn slice_get_mut(slice: &mut [Self]) -> &mut [T] {
-        &mut *(slice as *mut [Self] as *mut [T])
+        // SAFETY: similar to safety notes for `slice_get_ref`, but we have a
+        // mutable reference which is also guaranteed to be valid for writes.
+        unsafe { &mut *(slice as *mut [Self] as *mut [T]) }
     }
 
     /// Gets a pointer to the first element of the array.
diff --git a/src/libcore/mem/mod.rs b/src/libcore/mem/mod.rs
index 46e6ea7cd1866..272088815ece9 100644
--- a/src/libcore/mem/mod.rs
+++ b/src/libcore/mem/mod.rs
@@ -623,8 +623,11 @@ pub const fn needs_drop<T>() -> bool {
 #[allow(deprecated)]
 #[rustc_diagnostic_item = "mem_zeroed"]
 pub unsafe fn zeroed<T>() -> T {
-    intrinsics::assert_zero_valid::<T>();
-    MaybeUninit::zeroed().assume_init()
+    // SAFETY: the caller must guarantee that an all-zero value is valid for `T`.
+    unsafe {
+        intrinsics::assert_zero_valid::<T>();
+        MaybeUninit::zeroed().assume_init()
+    }
 }
 
 /// Bypasses Rust's normal memory-initialization checks by pretending to
@@ -656,8 +659,11 @@ pub unsafe fn zeroed<T>() -> T {
 #[allow(deprecated)]
 #[rustc_diagnostic_item = "mem_uninitialized"]
 pub unsafe fn uninitialized<T>() -> T {
-    intrinsics::assert_uninit_valid::<T>();
-    MaybeUninit::uninit().assume_init()
+    // SAFETY: the caller must guarantee that an unitialized value is valid for `T`.
+    unsafe {
+        intrinsics::assert_uninit_valid::<T>();
+        MaybeUninit::uninit().assume_init()
+    }
 }
 
 /// Swaps the values at two mutable locations, without deinitializing either one.
@@ -922,9 +928,14 @@ pub fn drop<T>(_x: T) {}
 pub unsafe fn transmute_copy<T, U>(src: &T) -> U {
     // If U has a higher alignment requirement, src may not be suitably aligned.
     if align_of::<U>() > align_of::<T>() {
-        ptr::read_unaligned(src as *const T as *const U)
+        // SAFETY: `src` is a reference which is guaranteed to be valid for reads.
+        // The caller must guarantee that the actual transmutation is safe.
+        unsafe { ptr::read_unaligned(src as *const T as *const U) }
     } else {
-        ptr::read(src as *const T as *const U)
+        // SAFETY: `src` is a reference which is guaranteed to be valid for reads.
+        // We just checked that `src as *const U` was properly aligned.
+        // The caller must guarantee that the actual transmutation is safe.
+        unsafe { ptr::read(src as *const T as *const U) }
     }
 }
 
diff --git a/src/libcore/num/f32.rs b/src/libcore/num/f32.rs
index 6313de31ce4d5..061d1ea6b1c46 100644
--- a/src/libcore/num/f32.rs
+++ b/src/libcore/num/f32.rs
@@ -629,7 +629,9 @@ impl f32 {
     where
         Self: FloatToInt<Int>,
     {
-        FloatToInt::<Int>::to_int_unchecked(self)
+        // SAFETY: the caller must uphold the safety contract for
+        // `FloatToInt::to_int_unchecked`.
+        unsafe { FloatToInt::<Int>::to_int_unchecked(self) }
     }
 
     /// Raw transmutation to `u32`.
diff --git a/src/libcore/num/f64.rs b/src/libcore/num/f64.rs
index d42e5392c5863..b0df4d64f6ee1 100644
--- a/src/libcore/num/f64.rs
+++ b/src/libcore/num/f64.rs
@@ -643,7 +643,9 @@ impl f64 {
     where
         Self: FloatToInt<Int>,
     {
-        FloatToInt::<Int>::to_int_unchecked(self)
+        // SAFETY: the caller must uphold the safety contract for
+        // `FloatToInt::to_int_unchecked`.
+        unsafe { FloatToInt::<Int>::to_int_unchecked(self) }
     }
 
     /// Raw transmutation to `u64`.
diff --git a/src/libcore/num/mod.rs b/src/libcore/num/mod.rs
index d36da90f2adc9..f4a1afd436adb 100644
--- a/src/libcore/num/mod.rs
+++ b/src/libcore/num/mod.rs
@@ -21,6 +21,21 @@ macro_rules! try_opt {
     };
 }
 
+#[cfg(bootstrap)]
+macro_rules! unlikely {
+    ($e: expr) => {
+        $e
+    };
+}
+
+#[cfg(not(bootstrap))]
+#[allow_internal_unstable(const_likely)]
+macro_rules! unlikely {
+    ($e: expr) => {
+        intrinsics::unlikely($e)
+    };
+}
+
 macro_rules! impl_nonzero_fmt {
     ( #[$stability: meta] ( $( $Trait: ident ),+ ) for $Ty: ident ) => {
         $(
@@ -74,7 +89,8 @@ assert_eq!(size_of::<Option<core::num::", stringify!($Ty), ">>(), size_of::<", s
                 #[rustc_const_stable(feature = "nonzero", since = "1.34.0")]
                 #[inline]
                 pub const unsafe fn new_unchecked(n: $Int) -> Self {
-                    Self(n)
+                    // SAFETY: this is guaranteed to be safe by the caller.
+                    unsafe { Self(n) }
                 }
 
                 /// Creates a non-zero if the given value is not zero.
@@ -745,7 +761,7 @@ $EndFeature, "
             #[inline]
             pub const fn checked_add(self, rhs: Self) -> Option<Self> {
                 let (a, b) = self.overflowing_add(rhs);
-                if b {None} else {Some(a)}
+                if unlikely!(b) {None} else {Some(a)}
             }
         }
 
@@ -762,7 +778,9 @@ cannot occur. This results in undefined behavior when `self + rhs > ", stringify
                           without modifying the original"]
             #[inline]
             pub unsafe fn unchecked_add(self, rhs: Self) -> Self {
-                intrinsics::unchecked_add(self, rhs)
+                // SAFETY: the caller must uphold the safety contract for
+                // `unchecked_add`.
+                unsafe { intrinsics::unchecked_add(self, rhs) }
             }
         }
 
@@ -787,7 +805,7 @@ $EndFeature, "
             #[inline]
             pub const fn checked_sub(self, rhs: Self) -> Option<Self> {
                 let (a, b) = self.overflowing_sub(rhs);
-                if b {None} else {Some(a)}
+                if unlikely!(b) {None} else {Some(a)}
             }
         }
 
@@ -804,7 +822,9 @@ cannot occur. This results in undefined behavior when `self - rhs > ", stringify
                           without modifying the original"]
             #[inline]
             pub unsafe fn unchecked_sub(self, rhs: Self) -> Self {
-                intrinsics::unchecked_sub(self, rhs)
+                // SAFETY: the caller must uphold the safety contract for
+                // `unchecked_sub`.
+                unsafe { intrinsics::unchecked_sub(self, rhs) }
             }
         }
 
@@ -829,7 +849,7 @@ $EndFeature, "
             #[inline]
             pub const fn checked_mul(self, rhs: Self) -> Option<Self> {
                 let (a, b) = self.overflowing_mul(rhs);
-                if b {None} else {Some(a)}
+                if unlikely!(b) {None} else {Some(a)}
             }
         }
 
@@ -846,7 +866,9 @@ cannot occur. This results in undefined behavior when `self * rhs > ", stringify
                           without modifying the original"]
             #[inline]
             pub unsafe fn unchecked_mul(self, rhs: Self) -> Self {
-                intrinsics::unchecked_mul(self, rhs)
+                // SAFETY: the caller must uphold the safety contract for
+                // `unchecked_mul`.
+                unsafe { intrinsics::unchecked_mul(self, rhs) }
             }
         }
 
@@ -871,7 +893,7 @@ $EndFeature, "
                           without modifying the original"]
             #[inline]
             pub const fn checked_div(self, rhs: Self) -> Option<Self> {
-                if rhs == 0 || (self == Self::MIN && rhs == -1) {
+                if unlikely!(rhs == 0 || (self == Self::MIN && rhs == -1)) {
                     None
                 } else {
                     // SAFETY: div by zero and by INT_MIN have been checked above
@@ -900,7 +922,7 @@ assert_eq!((1", stringify!($SelfT), ").checked_div_euclid(0), None);
                           without modifying the original"]
             #[inline]
             pub const fn checked_div_euclid(self, rhs: Self) -> Option<Self> {
-                if rhs == 0 || (self == Self::MIN && rhs == -1) {
+                if unlikely!(rhs == 0 || (self == Self::MIN && rhs == -1)) {
                     None
                 } else {
                     Some(self.div_euclid(rhs))
@@ -929,7 +951,7 @@ $EndFeature, "
                           without modifying the original"]
             #[inline]
             pub const fn checked_rem(self, rhs: Self) -> Option<Self> {
-                if rhs == 0 || (self == Self::MIN && rhs == -1) {
+                if unlikely!(rhs == 0 || (self == Self::MIN && rhs == -1)) {
                     None
                 } else {
                     // SAFETY: div by zero and by INT_MIN have been checked above
@@ -957,7 +979,7 @@ assert_eq!(", stringify!($SelfT), "::MIN.checked_rem_euclid(-1), None);
                           without modifying the original"]
             #[inline]
             pub const fn checked_rem_euclid(self, rhs: Self) -> Option<Self> {
-                if rhs == 0 || (self == Self::MIN && rhs == -1) {
+                if unlikely!(rhs == 0 || (self == Self::MIN && rhs == -1)) {
                     None
                 } else {
                     Some(self.rem_euclid(rhs))
@@ -983,7 +1005,7 @@ $EndFeature, "
             #[inline]
             pub const fn checked_neg(self) -> Option<Self> {
                 let (a, b) = self.overflowing_neg();
-                if b {None} else {Some(a)}
+                if unlikely!(b) {None} else {Some(a)}
             }
         }
 
@@ -1007,7 +1029,7 @@ $EndFeature, "
             #[inline]
             pub const fn checked_shl(self, rhs: u32) -> Option<Self> {
                 let (a, b) = self.overflowing_shl(rhs);
-                if b {None} else {Some(a)}
+                if unlikely!(b) {None} else {Some(a)}
             }
         }
 
@@ -1031,7 +1053,7 @@ $EndFeature, "
             #[inline]
             pub const fn checked_shr(self, rhs: u32) -> Option<Self> {
                 let (a, b) = self.overflowing_shr(rhs);
-                if b {None} else {Some(a)}
+                if unlikely!(b) {None} else {Some(a)}
             }
         }
 
@@ -1738,7 +1760,7 @@ $EndFeature, "
             #[must_use = "this returns the result of the operation, \
                           without modifying the original"]
             pub const fn overflowing_div(self, rhs: Self) -> (Self, bool) {
-                if self == Self::MIN && rhs == -1 {
+                if unlikely!(self == Self::MIN && rhs == -1) {
                     (self, true)
                 } else {
                     (self / rhs, false)
@@ -1771,7 +1793,7 @@ assert_eq!(", stringify!($SelfT), "::MIN.overflowing_div_euclid(-1), (", stringi
             #[must_use = "this returns the result of the operation, \
                           without modifying the original"]
             pub const fn overflowing_div_euclid(self, rhs: Self) -> (Self, bool) {
-                if self == Self::MIN && rhs == -1 {
+                if unlikely!(self == Self::MIN && rhs == -1) {
                     (self, true)
                 } else {
                     (self.div_euclid(rhs), false)
@@ -1805,7 +1827,7 @@ $EndFeature, "
             #[must_use = "this returns the result of the operation, \
                           without modifying the original"]
             pub const fn overflowing_rem(self, rhs: Self) -> (Self, bool) {
-                if self == Self::MIN && rhs == -1 {
+                if unlikely!(self == Self::MIN && rhs == -1) {
                     (0, true)
                 } else {
                     (self % rhs, false)
@@ -1838,7 +1860,7 @@ assert_eq!(", stringify!($SelfT), "::MIN.overflowing_rem_euclid(-1), (0, true));
                           without modifying the original"]
             #[inline]
             pub const fn overflowing_rem_euclid(self, rhs: Self) -> (Self, bool) {
-                if self == Self::MIN && rhs == -1 {
+                if unlikely!(self == Self::MIN && rhs == -1) {
                     (0, true)
                 } else {
                     (self.rem_euclid(rhs), false)
@@ -1869,7 +1891,7 @@ assert_eq!(", stringify!($SelfT), "::MIN.overflowing_neg(), (", stringify!($Self
             #[allow(unused_attributes)]
             #[cfg_attr(bootstrap, allow_internal_unstable(const_if_match))]
             pub const fn overflowing_neg(self) -> (Self, bool) {
-                if self == Self::MIN {
+                if unlikely!(self == Self::MIN) {
                     (Self::MIN, true)
                 } else {
                     (-self, false)
@@ -2981,7 +3003,7 @@ assert_eq!((", stringify!($SelfT), "::MAX - 2).checked_add(3), None);", $EndFeat
             #[inline]
             pub const fn checked_add(self, rhs: Self) -> Option<Self> {
                 let (a, b) = self.overflowing_add(rhs);
-                if b {None} else {Some(a)}
+                if unlikely!(b) {None} else {Some(a)}
             }
         }
 
@@ -2998,7 +3020,9 @@ cannot occur. This results in undefined behavior when `self + rhs > ", stringify
                           without modifying the original"]
             #[inline]
             pub unsafe fn unchecked_add(self, rhs: Self) -> Self {
-                intrinsics::unchecked_add(self, rhs)
+                // SAFETY: the caller must uphold the safety contract for
+                // `unchecked_add`.
+                unsafe { intrinsics::unchecked_add(self, rhs) }
             }
         }
 
@@ -3021,7 +3045,7 @@ assert_eq!(0", stringify!($SelfT), ".checked_sub(1), None);", $EndFeature, "
             #[inline]
             pub const fn checked_sub(self, rhs: Self) -> Option<Self> {
                 let (a, b) = self.overflowing_sub(rhs);
-                if b {None} else {Some(a)}
+                if unlikely!(b) {None} else {Some(a)}
             }
         }
 
@@ -3038,7 +3062,9 @@ cannot occur. This results in undefined behavior when `self - rhs > ", stringify
                           without modifying the original"]
             #[inline]
             pub unsafe fn unchecked_sub(self, rhs: Self) -> Self {
-                intrinsics::unchecked_sub(self, rhs)
+                // SAFETY: the caller must uphold the safety contract for
+                // `unchecked_sub`.
+                unsafe { intrinsics::unchecked_sub(self, rhs) }
             }
         }
 
@@ -3061,7 +3087,7 @@ assert_eq!(", stringify!($SelfT), "::MAX.checked_mul(2), None);", $EndFeature, "
             #[inline]
             pub const fn checked_mul(self, rhs: Self) -> Option<Self> {
                 let (a, b) = self.overflowing_mul(rhs);
-                if b {None} else {Some(a)}
+                if unlikely!(b) {None} else {Some(a)}
             }
         }
 
@@ -3078,7 +3104,9 @@ cannot occur. This results in undefined behavior when `self * rhs > ", stringify
                           without modifying the original"]
             #[inline]
             pub unsafe fn unchecked_mul(self, rhs: Self) -> Self {
-                intrinsics::unchecked_mul(self, rhs)
+                // SAFETY: the caller must uphold the safety contract for
+                // `unchecked_mul`.
+                unsafe { intrinsics::unchecked_mul(self, rhs) }
             }
         }
 
@@ -3100,11 +3128,12 @@ assert_eq!(1", stringify!($SelfT), ".checked_div(0), None);", $EndFeature, "
                           without modifying the original"]
             #[inline]
             pub const fn checked_div(self, rhs: Self) -> Option<Self> {
-                match rhs {
-                    0 => None,
+                if unlikely!(rhs == 0) {
+                    None
+                } else {
                     // SAFETY: div by zero has been checked above and unsigned types have no other
                     // failure modes for division
-                    rhs => Some(unsafe { intrinsics::unchecked_div(self, rhs) }),
+                    Some(unsafe { intrinsics::unchecked_div(self, rhs) })
                 }
             }
         }
@@ -3127,7 +3156,7 @@ assert_eq!(1", stringify!($SelfT), ".checked_div_euclid(0), None);
                           without modifying the original"]
             #[inline]
             pub const fn checked_div_euclid(self, rhs: Self) -> Option<Self> {
-                if rhs == 0 {
+                if unlikely!(rhs == 0) {
                     None
                 } else {
                     Some(self.div_euclid(rhs))
@@ -3154,7 +3183,7 @@ assert_eq!(5", stringify!($SelfT), ".checked_rem(0), None);", $EndFeature, "
                           without modifying the original"]
             #[inline]
             pub const fn checked_rem(self, rhs: Self) -> Option<Self> {
-                if rhs == 0 {
+                if unlikely!(rhs == 0) {
                     None
                 } else {
                     // SAFETY: div by zero has been checked above and unsigned types have no other
@@ -3182,7 +3211,7 @@ assert_eq!(5", stringify!($SelfT), ".checked_rem_euclid(0), None);
                           without modifying the original"]
             #[inline]
             pub const fn checked_rem_euclid(self, rhs: Self) -> Option<Self> {
-                if rhs == 0 {
+                if unlikely!(rhs == 0) {
                     None
                 } else {
                     Some(self.rem_euclid(rhs))
@@ -3209,7 +3238,7 @@ assert_eq!(1", stringify!($SelfT), ".checked_neg(), None);", $EndFeature, "
             #[inline]
             pub const fn checked_neg(self) -> Option<Self> {
                 let (a, b) = self.overflowing_neg();
-                if b {None} else {Some(a)}
+                if unlikely!(b) {None} else {Some(a)}
             }
         }
 
@@ -3232,7 +3261,7 @@ assert_eq!(0x10", stringify!($SelfT), ".checked_shl(129), None);", $EndFeature,
             #[inline]
             pub const fn checked_shl(self, rhs: u32) -> Option<Self> {
                 let (a, b) = self.overflowing_shl(rhs);
-                if b {None} else {Some(a)}
+                if unlikely!(b) {None} else {Some(a)}
             }
         }
 
@@ -3255,7 +3284,7 @@ assert_eq!(0x10", stringify!($SelfT), ".checked_shr(129), None);", $EndFeature,
             #[inline]
             pub const fn checked_shr(self, rhs: u32) -> Option<Self> {
                 let (a, b) = self.overflowing_shr(rhs);
-                if b {None} else {Some(a)}
+                if unlikely!(b) {None} else {Some(a)}
             }
         }
 
diff --git a/src/libcore/pin.rs b/src/libcore/pin.rs
index 6f5bf7ad9da52..da299f026f8f1 100644
--- a/src/libcore/pin.rs
+++ b/src/libcore/pin.rs
@@ -679,7 +679,10 @@ impl<'a, T: ?Sized> Pin<&'a T> {
     {
         let pointer = &*self.pointer;
         let new_pointer = func(pointer);
-        Pin::new_unchecked(new_pointer)
+
+        // SAFETY: the safety contract for `new_unchecked` must be
+        // upheld by the caller.
+        unsafe { Pin::new_unchecked(new_pointer) }
     }
 
     /// Gets a shared reference out of a pin.
@@ -769,9 +772,13 @@ impl<'a, T: ?Sized> Pin<&'a mut T> {
         U: ?Sized,
         F: FnOnce(&mut T) -> &mut U,
     {
-        let pointer = Pin::get_unchecked_mut(self);
+        // SAFETY: the caller is responsible for not moving the
+        // value out of this reference.
+        let pointer = unsafe { Pin::get_unchecked_mut(self) };
         let new_pointer = func(pointer);
-        Pin::new_unchecked(new_pointer)
+        // SAFETY: as the value of `this` is guaranteed to not have
+        // been moved out, this call to `new_unchecked` is safe.
+        unsafe { Pin::new_unchecked(new_pointer) }
     }
 }
 
diff --git a/src/libcore/ptr/const_ptr.rs b/src/libcore/ptr/const_ptr.rs
index 64a506a6377f2..d1d7a71523822 100644
--- a/src/libcore/ptr/const_ptr.rs
+++ b/src/libcore/ptr/const_ptr.rs
@@ -95,7 +95,9 @@ impl<T: ?Sized> *const T {
     #[stable(feature = "ptr_as_ref", since = "1.9.0")]
     #[inline]
     pub unsafe fn as_ref<'a>(self) -> Option<&'a T> {
-        if self.is_null() { None } else { Some(&*self) }
+        // SAFETY: the caller must guarantee that `self` is valid
+        // for a reference if it isn't null.
+        if self.is_null() { None } else { unsafe { Some(&*self) } }
     }
 
     /// Calculates the offset from a pointer.
@@ -157,7 +159,8 @@ impl<T: ?Sized> *const T {
     where
         T: Sized,
     {
-        intrinsics::offset(self, count)
+        // SAFETY: the caller must uphold the safety contract for `offset`.
+        unsafe { intrinsics::offset(self, count) }
     }
 
     /// Calculates the offset from a pointer using wrapping arithmetic.
@@ -292,7 +295,8 @@ impl<T: ?Sized> *const T {
     {
         let pointee_size = mem::size_of::<T>();
         assert!(0 < pointee_size && pointee_size <= isize::MAX as usize);
-        intrinsics::ptr_offset_from(self, origin)
+        // SAFETY: the caller must uphold the safety contract for `ptr_offset_from`.
+        unsafe { intrinsics::ptr_offset_from(self, origin) }
     }
 
     /// Returns whether two pointers are guaranteed to be equal.
@@ -471,7 +475,8 @@ impl<T: ?Sized> *const T {
     where
         T: Sized,
     {
-        self.offset(count as isize)
+        // SAFETY: the caller must uphold the safety contract for `offset`.
+        unsafe { self.offset(count as isize) }
     }
 
     /// Calculates the offset from a pointer (convenience for
@@ -534,7 +539,8 @@ impl<T: ?Sized> *const T {
     where
         T: Sized,
     {
-        self.offset((count as isize).wrapping_neg())
+        // SAFETY: the caller must uphold the safety contract for `offset`.
+        unsafe { self.offset((count as isize).wrapping_neg()) }
     }
 
     /// Calculates the offset from a pointer using wrapping arithmetic.
@@ -663,7 +669,8 @@ impl<T: ?Sized> *const T {
     where
         T: Sized,
     {
-        read(self)
+        // SAFETY: the caller must uphold the safety contract for `read`.
+        unsafe { read(self) }
     }
 
     /// Performs a volatile read of the value from `self` without moving it. This
@@ -682,7 +689,8 @@ impl<T: ?Sized> *const T {
     where
         T: Sized,
     {
-        read_volatile(self)
+        // SAFETY: the caller must uphold the safety contract for `read_volatile`.
+        unsafe { read_volatile(self) }
     }
 
     /// Reads the value from `self` without moving it. This leaves the
@@ -699,7 +707,8 @@ impl<T: ?Sized> *const T {
     where
         T: Sized,
     {
-        read_unaligned(self)
+        // SAFETY: the caller must uphold the safety contract for `read_unaligned`.
+        unsafe { read_unaligned(self) }
     }
 
     /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
@@ -716,7 +725,8 @@ impl<T: ?Sized> *const T {
     where
         T: Sized,
     {
-        copy(self, dest, count)
+        // SAFETY: the caller must uphold the safety contract for `copy`.
+        unsafe { copy(self, dest, count) }
     }
 
     /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
@@ -733,7 +743,8 @@ impl<T: ?Sized> *const T {
     where
         T: Sized,
     {
-        copy_nonoverlapping(self, dest, count)
+        // SAFETY: the caller must uphold the safety contract for `copy_nonoverlapping`.
+        unsafe { copy_nonoverlapping(self, dest, count) }
     }
 
     /// Computes the offset that needs to be applied to the pointer in order to make it aligned to
diff --git a/src/libcore/ptr/mod.rs b/src/libcore/ptr/mod.rs
index ca2b0c85ec121..5f028f9ea76ca 100644
--- a/src/libcore/ptr/mod.rs
+++ b/src/libcore/ptr/mod.rs
@@ -184,7 +184,9 @@ mod mut_ptr;
 pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) {
     // Code here does not matter - this is replaced by the
     // real drop glue by the compiler.
-    drop_in_place(to_drop)
+
+    // SAFETY: see comment above
+    unsafe { drop_in_place(to_drop) }
 }
 
 /// Creates a null raw pointer.
@@ -374,9 +376,15 @@ pub unsafe fn swap<T>(x: *mut T, y: *mut T) {
     let mut tmp = MaybeUninit::<T>::uninit();
 
     // Perform the swap
-    copy_nonoverlapping(x, tmp.as_mut_ptr(), 1);
-    copy(y, x, 1); // `x` and `y` may overlap
-    copy_nonoverlapping(tmp.as_ptr(), y, 1);
+    // SAFETY: the caller must guarantee that `x` and `y` are
+    // valid for writes and properly aligned. `tmp` cannot be
+    // overlapping either `x` or `y` because `tmp` was just allocated
+    // on the stack as a separate allocated object.
+    unsafe {
+        copy_nonoverlapping(x, tmp.as_mut_ptr(), 1);
+        copy(y, x, 1); // `x` and `y` may overlap
+        copy_nonoverlapping(tmp.as_ptr(), y, 1);
+    }
 }
 
 /// Swaps `count * size_of::<T>()` bytes between the two regions of memory
@@ -432,7 +440,9 @@ pub unsafe fn swap_nonoverlapping<T>(x: *mut T, y: *mut T, count: usize) {
     let x = x as *mut u8;
     let y = y as *mut u8;
     let len = mem::size_of::<T>() * count;
-    swap_nonoverlapping_bytes(x, y, len)
+    // SAFETY: the caller must guarantee that `x` and `y` are
+    // valid for writes and properly aligned.
+    unsafe { swap_nonoverlapping_bytes(x, y, len) }
 }
 
 #[inline]
@@ -440,11 +450,16 @@ pub(crate) unsafe fn swap_nonoverlapping_one<T>(x: *mut T, y: *mut T) {
     // For types smaller than the block optimization below,
     // just swap directly to avoid pessimizing codegen.
     if mem::size_of::<T>() < 32 {
-        let z = read(x);
-        copy_nonoverlapping(y, x, 1);
-        write(y, z);
+        // SAFETY: the caller must guarantee that `x` and `y` are valid
+        // for writes, properly aligned, and non-overlapping.
+        unsafe {
+            let z = read(x);
+            copy_nonoverlapping(y, x, 1);
+            write(y, z);
+        }
     } else {
-        swap_nonoverlapping(x, y, 1);
+        // SAFETY: the caller must uphold the safety contract for `swap_nonoverlapping`.
+        unsafe { swap_nonoverlapping(x, y, 1) };
     }
 }
 
@@ -471,14 +486,23 @@ unsafe fn swap_nonoverlapping_bytes(x: *mut u8, y: *mut u8, len: usize) {
         // Declaring `t` here avoids aligning the stack when this loop is unused
         let mut t = mem::MaybeUninit::<Block>::uninit();
         let t = t.as_mut_ptr() as *mut u8;
-        let x = x.add(i);
-        let y = y.add(i);
 
-        // Swap a block of bytes of x & y, using t as a temporary buffer
-        // This should be optimized into efficient SIMD operations where available
-        copy_nonoverlapping(x, t, block_size);
-        copy_nonoverlapping(y, x, block_size);
-        copy_nonoverlapping(t, y, block_size);
+        // SAFETY: As `i < len`, and as the caller must guarantee that `x` and `y` are valid
+        // for `len` bytes, `x + i` and `y + i` must be valid adresses, which fulfills the
+        // safety contract for `add`.
+        //
+        // Also, the caller must guarantee that `x` and `y` are valid for writes, properly aligned,
+        // and non-overlapping, which fulfills the safety contract for `copy_nonoverlapping`.
+        unsafe {
+            let x = x.add(i);
+            let y = y.add(i);
+
+            // Swap a block of bytes of x & y, using t as a temporary buffer
+            // This should be optimized into efficient SIMD operations where available
+            copy_nonoverlapping(x, t, block_size);
+            copy_nonoverlapping(y, x, block_size);
+            copy_nonoverlapping(t, y, block_size);
+        }
         i += block_size;
     }
 
@@ -488,12 +512,16 @@ unsafe fn swap_nonoverlapping_bytes(x: *mut u8, y: *mut u8, len: usize) {
         let rem = len - i;
 
         let t = t.as_mut_ptr() as *mut u8;
-        let x = x.add(i);
-        let y = y.add(i);
 
-        copy_nonoverlapping(x, t, rem);
-        copy_nonoverlapping(y, x, rem);
-        copy_nonoverlapping(t, y, rem);
+        // SAFETY: see previous safety comment.
+        unsafe {
+            let x = x.add(i);
+            let y = y.add(i);
+
+            copy_nonoverlapping(x, t, rem);
+            copy_nonoverlapping(y, x, rem);
+            copy_nonoverlapping(t, y, rem);
+        }
     }
 }
 
@@ -540,7 +568,13 @@ unsafe fn swap_nonoverlapping_bytes(x: *mut u8, y: *mut u8, len: usize) {
 #[inline]
 #[stable(feature = "rust1", since = "1.0.0")]
 pub unsafe fn replace<T>(dst: *mut T, mut src: T) -> T {
-    mem::swap(&mut *dst, &mut src); // cannot overlap
+    // SAFETY: the caller must guarantee that `dst` is valid to be
+    // cast to a mutable reference (valid for writes, aligned, initialized),
+    // and cannot overlap `src` since `dst` must point to a distinct
+    // allocated object.
+    unsafe {
+        mem::swap(&mut *dst, &mut src); // cannot overlap
+    }
     src
 }
 
@@ -658,8 +692,16 @@ pub unsafe fn replace<T>(dst: *mut T, mut src: T) -> T {
 pub unsafe fn read<T>(src: *const T) -> T {
     // `copy_nonoverlapping` takes care of debug_assert.
     let mut tmp = MaybeUninit::<T>::uninit();
-    copy_nonoverlapping(src, tmp.as_mut_ptr(), 1);
-    tmp.assume_init()
+    // SAFETY: the caller must guarantee that `src` is valid for reads.
+    // `src` cannot overlap `tmp` because `tmp` was just allocated on
+    // the stack as a separate allocated object.
+    //
+    // Also, since we just wrote a valid value into `tmp`, it is guaranteed
+    // to be properly initialized.
+    unsafe {
+        copy_nonoverlapping(src, tmp.as_mut_ptr(), 1);
+        tmp.assume_init()
+    }
 }
 
 /// Reads the value from `src` without moving it. This leaves the
@@ -752,8 +794,16 @@ pub unsafe fn read<T>(src: *const T) -> T {
 pub unsafe fn read_unaligned<T>(src: *const T) -> T {
     // `copy_nonoverlapping` takes care of debug_assert.
     let mut tmp = MaybeUninit::<T>::uninit();
-    copy_nonoverlapping(src as *const u8, tmp.as_mut_ptr() as *mut u8, mem::size_of::<T>());
-    tmp.assume_init()
+    // SAFETY: the caller must guarantee that `src` is valid for reads.
+    // `src` cannot overlap `tmp` because `tmp` was just allocated on
+    // the stack as a separate allocated object.
+    //
+    // Also, since we just wrote a valid value into `tmp`, it is guaranteed
+    // to be properly initialized.
+    unsafe {
+        copy_nonoverlapping(src as *const u8, tmp.as_mut_ptr() as *mut u8, mem::size_of::<T>());
+        tmp.assume_init()
+    }
 }
 
 /// Overwrites a memory location with the given value without reading or
@@ -847,7 +897,8 @@ pub unsafe fn write<T>(dst: *mut T, src: T) {
         // Not panicking to keep codegen impact smaller.
         abort();
     }
-    intrinsics::move_val_init(&mut *dst, src)
+    // SAFETY: the caller must uphold the safety contract for `move_val_init`.
+    unsafe { intrinsics::move_val_init(&mut *dst, src) }
 }
 
 /// Overwrites a memory location with the given value without reading or
@@ -939,8 +990,13 @@ pub unsafe fn write<T>(dst: *mut T, src: T) {
 #[inline]
 #[stable(feature = "ptr_unaligned", since = "1.17.0")]
 pub unsafe fn write_unaligned<T>(dst: *mut T, src: T) {
-    // `copy_nonoverlapping` takes care of debug_assert.
-    copy_nonoverlapping(&src as *const T as *const u8, dst as *mut u8, mem::size_of::<T>());
+    // SAFETY: the caller must guarantee that `dst` is valid for writes.
+    // `dst` cannot overlap `src` because the caller has mutable access
+    // to `dst` while `src` is owned by this function.
+    unsafe {
+        // `copy_nonoverlapping` takes care of debug_assert.
+        copy_nonoverlapping(&src as *const T as *const u8, dst as *mut u8, mem::size_of::<T>());
+    }
     mem::forget(src);
 }
 
@@ -1015,7 +1071,8 @@ pub unsafe fn read_volatile<T>(src: *const T) -> T {
         // Not panicking to keep codegen impact smaller.
         abort();
     }
-    intrinsics::volatile_load(src)
+    // SAFETY: the caller must uphold the safety contract for `volatile_load`.
+    unsafe { intrinsics::volatile_load(src) }
 }
 
 /// Performs a volatile write of a memory location with the given value without
@@ -1087,7 +1144,10 @@ pub unsafe fn write_volatile<T>(dst: *mut T, src: T) {
         // Not panicking to keep codegen impact smaller.
         abort();
     }
-    intrinsics::volatile_store(dst, src);
+    // SAFETY: the caller must uphold the safety contract for `volatile_store`.
+    unsafe {
+        intrinsics::volatile_store(dst, src);
+    }
 }
 
 /// Align pointer `p`.
@@ -1173,8 +1233,8 @@ pub(crate) unsafe fn align_offset<T: Sized>(p: *const T, a: usize) -> usize {
     }
 
     let smoda = stride & a_minus_one;
-    // a is power-of-two so cannot be 0. stride = 0 is handled above.
-    let gcdpow = intrinsics::cttz_nonzero(stride).min(intrinsics::cttz_nonzero(a));
+    // SAFETY: a is power-of-two so cannot be 0. stride = 0 is handled above.
+    let gcdpow = unsafe { intrinsics::cttz_nonzero(stride).min(intrinsics::cttz_nonzero(a)) };
     let gcd = 1usize << gcdpow;
 
     if p as usize & (gcd.wrapping_sub(1)) == 0 {
diff --git a/src/libcore/ptr/mut_ptr.rs b/src/libcore/ptr/mut_ptr.rs
index 6b5cd9fdb854d..7d4b6339b511f 100644
--- a/src/libcore/ptr/mut_ptr.rs
+++ b/src/libcore/ptr/mut_ptr.rs
@@ -89,7 +89,9 @@ impl<T: ?Sized> *mut T {
     #[stable(feature = "ptr_as_ref", since = "1.9.0")]
     #[inline]
     pub unsafe fn as_ref<'a>(self) -> Option<&'a T> {
-        if self.is_null() { None } else { Some(&*self) }
+        // SAFETY: the caller must guarantee that `self` is valid for a
+        // reference if it isn't null.
+        if self.is_null() { None } else { unsafe { Some(&*self) } }
     }
 
     /// Calculates the offset from a pointer.
@@ -151,7 +153,10 @@ impl<T: ?Sized> *mut T {
     where
         T: Sized,
     {
-        intrinsics::offset(self, count) as *mut T
+        // SAFETY: the caller must uphold the safety contract for `offset`.
+        // The obtained pointer is valid for writes since the caller must
+        // guarantee that it points to the same allocated object as `self`.
+        unsafe { intrinsics::offset(self, count) as *mut T }
     }
 
     /// Calculates the offset from a pointer using wrapping arithmetic.
@@ -270,7 +275,9 @@ impl<T: ?Sized> *mut T {
     #[stable(feature = "ptr_as_ref", since = "1.9.0")]
     #[inline]
     pub unsafe fn as_mut<'a>(self) -> Option<&'a mut T> {
-        if self.is_null() { None } else { Some(&mut *self) }
+        // SAFETY: the caller must guarantee that `self` is be valid for
+        // a mutable reference if it isn't null.
+        if self.is_null() { None } else { unsafe { Some(&mut *self) } }
     }
 
     /// Returns whether two pointers are guaranteed to be equal.
@@ -406,7 +413,8 @@ impl<T: ?Sized> *mut T {
     where
         T: Sized,
     {
-        (self as *const T).offset_from(origin)
+        // SAFETY: the caller must uphold the safety contract for `offset_from`.
+        unsafe { (self as *const T).offset_from(origin) }
     }
 
     /// Calculates the distance between two pointers. The returned value is in
@@ -518,7 +526,8 @@ impl<T: ?Sized> *mut T {
     where
         T: Sized,
     {
-        self.offset(count as isize)
+        // SAFETY: the caller must uphold the safety contract for `offset`.
+        unsafe { self.offset(count as isize) }
     }
 
     /// Calculates the offset from a pointer (convenience for
@@ -581,7 +590,8 @@ impl<T: ?Sized> *mut T {
     where
         T: Sized,
     {
-        self.offset((count as isize).wrapping_neg())
+        // SAFETY: the caller must uphold the safety contract for `offset`.
+        unsafe { self.offset((count as isize).wrapping_neg()) }
     }
 
     /// Calculates the offset from a pointer using wrapping arithmetic.
@@ -710,7 +720,8 @@ impl<T: ?Sized> *mut T {
     where
         T: Sized,
     {
-        read(self)
+        // SAFETY: the caller must uphold the safety contract for ``.
+        unsafe { read(self) }
     }
 
     /// Performs a volatile read of the value from `self` without moving it. This
@@ -729,7 +740,8 @@ impl<T: ?Sized> *mut T {
     where
         T: Sized,
     {
-        read_volatile(self)
+        // SAFETY: the caller must uphold the safety contract for `read_volatile`.
+        unsafe { read_volatile(self) }
     }
 
     /// Reads the value from `self` without moving it. This leaves the
@@ -746,7 +758,8 @@ impl<T: ?Sized> *mut T {
     where
         T: Sized,
     {
-        read_unaligned(self)
+        // SAFETY: the caller must uphold the safety contract for `read_unaligned`.
+        unsafe { read_unaligned(self) }
     }
 
     /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
@@ -763,7 +776,8 @@ impl<T: ?Sized> *mut T {
     where
         T: Sized,
     {
-        copy(self, dest, count)
+        // SAFETY: the caller must uphold the safety contract for `copy`.
+        unsafe { copy(self, dest, count) }
     }
 
     /// Copies `count * size_of<T>` bytes from `self` to `dest`. The source
@@ -780,7 +794,8 @@ impl<T: ?Sized> *mut T {
     where
         T: Sized,
     {
-        copy_nonoverlapping(self, dest, count)
+        // SAFETY: the caller must uphold the safety contract for `copy_nonoverlapping`.
+        unsafe { copy_nonoverlapping(self, dest, count) }
     }
 
     /// Copies `count * size_of<T>` bytes from `src` to `self`. The source
@@ -797,7 +812,8 @@ impl<T: ?Sized> *mut T {
     where
         T: Sized,
     {
-        copy(src, self, count)
+        // SAFETY: the caller must uphold the safety contract for `copy`.
+        unsafe { copy(src, self, count) }
     }
 
     /// Copies `count * size_of<T>` bytes from `src` to `self`. The source
@@ -814,7 +830,8 @@ impl<T: ?Sized> *mut T {
     where
         T: Sized,
     {
-        copy_nonoverlapping(src, self, count)
+        // SAFETY: the caller must uphold the safety contract for `copy_nonoverlapping`.
+        unsafe { copy_nonoverlapping(src, self, count) }
     }
 
     /// Executes the destructor (if any) of the pointed-to value.
@@ -825,7 +842,8 @@ impl<T: ?Sized> *mut T {
     #[stable(feature = "pointer_methods", since = "1.26.0")]
     #[inline]
     pub unsafe fn drop_in_place(self) {
-        drop_in_place(self)
+        // SAFETY: the caller must uphold the safety contract for `drop_in_place`.
+        unsafe { drop_in_place(self) }
     }
 
     /// Overwrites a memory location with the given value without reading or
@@ -840,7 +858,8 @@ impl<T: ?Sized> *mut T {
     where
         T: Sized,
     {
-        write(self, val)
+        // SAFETY: the caller must uphold the safety contract for `write`.
+        unsafe { write(self, val) }
     }
 
     /// Invokes memset on the specified pointer, setting `count * size_of::<T>()`
@@ -855,7 +874,8 @@ impl<T: ?Sized> *mut T {
     where
         T: Sized,
     {
-        write_bytes(self, val, count)
+        // SAFETY: the caller must uphold the safety contract for `write_bytes`.
+        unsafe { write_bytes(self, val, count) }
     }
 
     /// Performs a volatile write of a memory location with the given value without
@@ -874,7 +894,8 @@ impl<T: ?Sized> *mut T {
     where
         T: Sized,
     {
-        write_volatile(self, val)
+        // SAFETY: the caller must uphold the safety contract for `write_volatile`.
+        unsafe { write_volatile(self, val) }
     }
 
     /// Overwrites a memory location with the given value without reading or
@@ -891,7 +912,8 @@ impl<T: ?Sized> *mut T {
     where
         T: Sized,
     {
-        write_unaligned(self, val)
+        // SAFETY: the caller must uphold the safety contract for `write_unaligned`.
+        unsafe { write_unaligned(self, val) }
     }
 
     /// Replaces the value at `self` with `src`, returning the old
@@ -906,7 +928,8 @@ impl<T: ?Sized> *mut T {
     where
         T: Sized,
     {
-        replace(self, src)
+        // SAFETY: the caller must uphold the safety contract for `replace`.
+        unsafe { replace(self, src) }
     }
 
     /// Swaps the values at two mutable locations of the same type, without
@@ -922,7 +945,8 @@ impl<T: ?Sized> *mut T {
     where
         T: Sized,
     {
-        swap(self, with)
+        // SAFETY: the caller must uphold the safety contract for `swap`.
+        unsafe { swap(self, with) }
     }
 
     /// Computes the offset that needs to be applied to the pointer in order to make it aligned to
diff --git a/src/libcore/ptr/non_null.rs b/src/libcore/ptr/non_null.rs
index 870364a61dd47..c2d31bfb6a4ee 100644
--- a/src/libcore/ptr/non_null.rs
+++ b/src/libcore/ptr/non_null.rs
@@ -87,7 +87,8 @@ impl<T: ?Sized> NonNull<T> {
     #[rustc_const_stable(feature = "const_nonnull_new_unchecked", since = "1.32.0")]
     #[inline]
     pub const unsafe fn new_unchecked(ptr: *mut T) -> Self {
-        NonNull { pointer: ptr as _ }
+        // SAFETY: the caller must guarantee that `ptr` is non-null.
+        unsafe { NonNull { pointer: ptr as _ } }
     }
 
     /// Creates a new `NonNull` if `ptr` is non-null.
@@ -118,7 +119,9 @@ impl<T: ?Sized> NonNull<T> {
     #[stable(feature = "nonnull", since = "1.25.0")]
     #[inline]
     pub unsafe fn as_ref(&self) -> &T {
-        &*self.as_ptr()
+        // SAFETY: the caller must guarantee that `self` meets all the
+        // requirements for a reference.
+        unsafe { &*self.as_ptr() }
     }
 
     /// Mutably dereferences the content.
@@ -129,7 +132,9 @@ impl<T: ?Sized> NonNull<T> {
     #[stable(feature = "nonnull", since = "1.25.0")]
     #[inline]
     pub unsafe fn as_mut(&mut self) -> &mut T {
-        &mut *self.as_ptr()
+        // SAFETY: the caller must guarantee that `self` meets all the
+        // requirements for a mutable reference.
+        unsafe { &mut *self.as_ptr() }
     }
 
     /// Casts to a pointer of another type.
diff --git a/src/libcore/ptr/unique.rs b/src/libcore/ptr/unique.rs
index f58d35f06137d..78647eee3389a 100644
--- a/src/libcore/ptr/unique.rs
+++ b/src/libcore/ptr/unique.rs
@@ -87,7 +87,8 @@ impl<T: ?Sized> Unique<T> {
     /// `ptr` must be non-null.
     #[inline]
     pub const unsafe fn new_unchecked(ptr: *mut T) -> Self {
-        Unique { pointer: ptr as _, _marker: PhantomData }
+        // SAFETY: the caller must guarantee that `ptr` is non-null.
+        unsafe { Unique { pointer: ptr as _, _marker: PhantomData } }
     }
 
     /// Creates a new `Unique` if `ptr` is non-null.
@@ -114,7 +115,9 @@ impl<T: ?Sized> Unique<T> {
     /// (unbound) lifetime is needed, use `&*my_ptr.as_ptr()`.
     #[inline]
     pub unsafe fn as_ref(&self) -> &T {
-        &*self.as_ptr()
+        // SAFETY: the caller must guarantee that `self` meets all the
+        // requirements for a reference.
+        unsafe { &*self.as_ptr() }
     }
 
     /// Mutably dereferences the content.
@@ -124,7 +127,9 @@ impl<T: ?Sized> Unique<T> {
     /// (unbound) lifetime is needed, use `&mut *my_ptr.as_ptr()`.
     #[inline]
     pub unsafe fn as_mut(&mut self) -> &mut T {
-        &mut *self.as_ptr()
+        // SAFETY: the caller must guarantee that `self` meets all the
+        // requirements for a mutable reference.
+        unsafe { &mut *self.as_ptr() }
     }
 
     /// Casts to a pointer of another type.
diff --git a/src/libcore/slice/mod.rs b/src/libcore/slice/mod.rs
index 6a50cdbc1d9fb..e7a2d7adedea0 100644
--- a/src/libcore/slice/mod.rs
+++ b/src/libcore/slice/mod.rs
@@ -310,7 +310,8 @@ impl<T> [T] {
     where
         I: SliceIndex<Self>,
     {
-        index.get_unchecked(self)
+        // SAFETY: the caller must uphold the safety requirements for `get_unchecked`.
+        unsafe { index.get_unchecked(self) }
     }
 
     /// Returns a mutable reference to an element or subslice, without doing
@@ -341,7 +342,8 @@ impl<T> [T] {
     where
         I: SliceIndex<Self>,
     {
-        index.get_unchecked_mut(self)
+        // SAFETY: the caller must uphold the safety requirements for `get_unchecked_mut`.
+        unsafe { index.get_unchecked_mut(self) }
     }
 
     /// Returns a raw pointer to the slice's buffer.
@@ -1459,6 +1461,68 @@ impl<T> [T] {
         m >= n && needle == &self[m - n..]
     }
 
+    /// Returns a subslice with the prefix removed.
+    ///
+    /// This method returns [`None`] if slice does not start with `prefix`.
+    /// Also it returns the original slice if `prefix` is an empty slice.
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// #![feature(slice_strip)]
+    /// let v = &[10, 40, 30];
+    /// assert_eq!(v.strip_prefix(&[10]), Some(&[40, 30][..]));
+    /// assert_eq!(v.strip_prefix(&[10, 40]), Some(&[30][..]));
+    /// assert_eq!(v.strip_prefix(&[50]), None);
+    /// assert_eq!(v.strip_prefix(&[10, 50]), None);
+    /// ```
+    #[must_use = "returns the subslice without modifying the original"]
+    #[unstable(feature = "slice_strip", issue = "73413")]
+    pub fn strip_prefix(&self, prefix: &[T]) -> Option<&[T]>
+    where
+        T: PartialEq,
+    {
+        let n = prefix.len();
+        if n <= self.len() {
+            let (head, tail) = self.split_at(n);
+            if head == prefix {
+                return Some(tail);
+            }
+        }
+        None
+    }
+
+    /// Returns a subslice with the suffix removed.
+    ///
+    /// This method returns [`None`] if slice does not end with `suffix`.
+    /// Also it returns the original slice if `suffix` is an empty slice
+    ///
+    /// # Examples
+    ///
+    /// ```
+    /// #![feature(slice_strip)]
+    /// let v = &[10, 40, 30];
+    /// assert_eq!(v.strip_suffix(&[30]), Some(&[10, 40][..]));
+    /// assert_eq!(v.strip_suffix(&[40, 30]), Some(&[10][..]));
+    /// assert_eq!(v.strip_suffix(&[50]), None);
+    /// assert_eq!(v.strip_suffix(&[50, 30]), None);
+    /// ```
+    #[must_use = "returns the subslice without modifying the original"]
+    #[unstable(feature = "slice_strip", issue = "73413")]
+    pub fn strip_suffix(&self, suffix: &[T]) -> Option<&[T]>
+    where
+        T: PartialEq,
+    {
+        let (len, n) = (self.len(), suffix.len());
+        if n <= len {
+            let (head, tail) = self.split_at(len - n);
+            if tail == suffix {
+                return Some(head);
+            }
+        }
+        None
+    }
+
     /// Binary searches this sorted slice for a given element.
     ///
     /// If the value is found then [`Result::Ok`] is returned, containing the
@@ -2519,18 +2583,21 @@ impl<T> [T] {
         // First, find at what point do we split between the first and 2nd slice. Easy with
         // ptr.align_offset.
         let ptr = self.as_ptr();
-        let offset = crate::ptr::align_offset(ptr, mem::align_of::<U>());
+        let offset = unsafe { crate::ptr::align_offset(ptr, mem::align_of::<U>()) };
         if offset > self.len() {
             (self, &[], &[])
         } else {
             let (left, rest) = self.split_at(offset);
-            // now `rest` is definitely aligned, so `from_raw_parts_mut` below is okay
             let (us_len, ts_len) = rest.align_to_offsets::<U>();
-            (
-                left,
-                from_raw_parts(rest.as_ptr() as *const U, us_len),
-                from_raw_parts(rest.as_ptr().add(rest.len() - ts_len), ts_len),
-            )
+            // SAFETY: now `rest` is definitely aligned, so `from_raw_parts` below is okay,
+            // since the caller guarantees that we can transmute `T` to `U` safely.
+            unsafe {
+                (
+                    left,
+                    from_raw_parts(rest.as_ptr() as *const U, us_len),
+                    from_raw_parts(rest.as_ptr().add(rest.len() - ts_len), ts_len),
+                )
+            }
         }
     }
 
@@ -2575,21 +2642,23 @@ impl<T> [T] {
         // First, find at what point do we split between the first and 2nd slice. Easy with
         // ptr.align_offset.
         let ptr = self.as_ptr();
-        let offset = crate::ptr::align_offset(ptr, mem::align_of::<U>());
+        let offset = unsafe { crate::ptr::align_offset(ptr, mem::align_of::<U>()) };
         if offset > self.len() {
             (self, &mut [], &mut [])
         } else {
             let (left, rest) = self.split_at_mut(offset);
-            // now `rest` is definitely aligned, so `from_raw_parts_mut` below is okay
             let (us_len, ts_len) = rest.align_to_offsets::<U>();
             let rest_len = rest.len();
             let mut_ptr = rest.as_mut_ptr();
             // We can't use `rest` again after this, that would invalidate its alias `mut_ptr`!
-            (
-                left,
-                from_raw_parts_mut(mut_ptr as *mut U, us_len),
-                from_raw_parts_mut(mut_ptr.add(rest_len - ts_len), ts_len),
-            )
+            // SAFETY: see comments for `align_to`.
+            unsafe {
+                (
+                    left,
+                    from_raw_parts_mut(mut_ptr as *mut U, us_len),
+                    from_raw_parts_mut(mut_ptr.add(rest_len - ts_len), ts_len),
+                )
+            }
         }
     }
 
@@ -2914,12 +2983,18 @@ impl<T> SliceIndex<[T]> for usize {
 
     #[inline]
     unsafe fn get_unchecked(self, slice: &[T]) -> &T {
-        &*slice.as_ptr().add(self)
+        // SAFETY: `slice` cannot be longer than `isize::MAX` and
+        // the caller guarantees that `self` is in bounds of `slice`
+        // so `self` cannot overflow an `isize`, so the call to `add` is safe.
+        // The obtained pointer comes from a reference which is guaranteed
+        // to be valid.
+        unsafe { &*slice.as_ptr().add(self) }
     }
 
     #[inline]
     unsafe fn get_unchecked_mut(self, slice: &mut [T]) -> &mut T {
-        &mut *slice.as_mut_ptr().add(self)
+        // SAFETY: see comments for `get_unchecked` above.
+        unsafe { &mut *slice.as_mut_ptr().add(self) }
     }
 
     #[inline]
@@ -2959,12 +3034,18 @@ impl<T> SliceIndex<[T]> for ops::Range<usize> {
 
     #[inline]
     unsafe fn get_unchecked(self, slice: &[T]) -> &[T] {
-        from_raw_parts(slice.as_ptr().add(self.start), self.end - self.start)
+        // SAFETY: `slice` cannot be longer than `isize::MAX` and
+        // the caller guarantees that `self` is in bounds of `slice`
+        // so `self` cannot overflow an `isize`, so the call to `add` is safe.
+        // Also, since the caller guarantees that `self` is in bounds of `slice`,
+        // `from_raw_parts` will give a subslice of `slice` which is always safe.
+        unsafe { from_raw_parts(slice.as_ptr().add(self.start), self.end - self.start) }
     }
 
     #[inline]
     unsafe fn get_unchecked_mut(self, slice: &mut [T]) -> &mut [T] {
-        from_raw_parts_mut(slice.as_mut_ptr().add(self.start), self.end - self.start)
+        // SAFETY: see comments for `get_unchecked` above.
+        unsafe { from_raw_parts_mut(slice.as_mut_ptr().add(self.start), self.end - self.start) }
     }
 
     #[inline]
@@ -3004,12 +3085,14 @@ impl<T> SliceIndex<[T]> for ops::RangeTo<usize> {
 
     #[inline]
     unsafe fn get_unchecked(self, slice: &[T]) -> &[T] {
-        (0..self.end).get_unchecked(slice)
+        // SAFETY: the caller has to uphold the safety contract for `get_unchecked`.
+        unsafe { (0..self.end).get_unchecked(slice) }
     }
 
     #[inline]
     unsafe fn get_unchecked_mut(self, slice: &mut [T]) -> &mut [T] {
-        (0..self.end).get_unchecked_mut(slice)
+        // SAFETY: the caller has to uphold the safety contract for `get_unchecked_mut`.
+        unsafe { (0..self.end).get_unchecked_mut(slice) }
     }
 
     #[inline]
@@ -3039,12 +3122,14 @@ impl<T> SliceIndex<[T]> for ops::RangeFrom<usize> {
 
     #[inline]
     unsafe fn get_unchecked(self, slice: &[T]) -> &[T] {
-        (self.start..slice.len()).get_unchecked(slice)
+        // SAFETY: the caller has to uphold the safety contract for `get_unchecked`.
+        unsafe { (self.start..slice.len()).get_unchecked(slice) }
     }
 
     #[inline]
     unsafe fn get_unchecked_mut(self, slice: &mut [T]) -> &mut [T] {
-        (self.start..slice.len()).get_unchecked_mut(slice)
+        // SAFETY: the caller has to uphold the safety contract for `get_unchecked_mut`.
+        unsafe { (self.start..slice.len()).get_unchecked_mut(slice) }
     }
 
     #[inline]
@@ -3113,12 +3198,14 @@ impl<T> SliceIndex<[T]> for ops::RangeInclusive<usize> {
 
     #[inline]
     unsafe fn get_unchecked(self, slice: &[T]) -> &[T] {
-        (*self.start()..self.end() + 1).get_unchecked(slice)
+        // SAFETY: the caller has to uphold the safety contract for `get_unchecked`.
+        unsafe { (*self.start()..self.end() + 1).get_unchecked(slice) }
     }
 
     #[inline]
     unsafe fn get_unchecked_mut(self, slice: &mut [T]) -> &mut [T] {
-        (*self.start()..self.end() + 1).get_unchecked_mut(slice)
+        // SAFETY: the caller has to uphold the safety contract for `get_unchecked_mut`.
+        unsafe { (*self.start()..self.end() + 1).get_unchecked_mut(slice) }
     }
 
     #[inline]
@@ -3154,12 +3241,14 @@ impl<T> SliceIndex<[T]> for ops::RangeToInclusive<usize> {
 
     #[inline]
     unsafe fn get_unchecked(self, slice: &[T]) -> &[T] {
-        (0..=self.end).get_unchecked(slice)
+        // SAFETY: the caller has to uphold the safety contract for `get_unchecked`.
+        unsafe { (0..=self.end).get_unchecked(slice) }
     }
 
     #[inline]
     unsafe fn get_unchecked_mut(self, slice: &mut [T]) -> &mut [T] {
-        (0..=self.end).get_unchecked_mut(slice)
+        // SAFETY: the caller has to uphold the safety contract for `get_unchecked_mut`.
+        unsafe { (0..=self.end).get_unchecked_mut(slice) }
     }
 
     #[inline]
@@ -3308,7 +3397,9 @@ macro_rules! iterator {
                     self.ptr.as_ptr()
                 } else {
                     let old = self.ptr.as_ptr();
-                    self.ptr = NonNull::new_unchecked(self.ptr.as_ptr().offset(offset));
+                    // SAFETY: the caller guarantees that `offset` doesn't exceed `self.len()`,
+                    // so this new pointer is inside `self` and thus guaranteed to be non-null.
+                    self.ptr = unsafe { NonNull::new_unchecked(self.ptr.as_ptr().offset(offset)) };
                     old
                 }
             }
@@ -3322,7 +3413,10 @@ macro_rules! iterator {
                     zst_shrink!(self, offset);
                     self.ptr.as_ptr()
                 } else {
-                    self.end = self.end.offset(-offset);
+                    // SAFETY: the caller guarantees that `offset` doesn't exceed `self.len()`,
+                    // which is guaranteed to not overflow an `isize`. Also, the resulting pointer
+                    // is in bounds of `slice`, which fulfills the other requirements for `offset`.
+                    self.end = unsafe { self.end.offset(-offset) };
                     self.end
                 }
             }
@@ -4640,7 +4734,11 @@ impl<T> FusedIterator for Windows<'_, T> {}
 #[doc(hidden)]
 unsafe impl<'a, T> TrustedRandomAccess for Windows<'a, T> {
     unsafe fn get_unchecked(&mut self, i: usize) -> &'a [T] {
-        from_raw_parts(self.v.as_ptr().add(i), self.size)
+        // SAFETY: since the caller guarantees that `i` is in bounds,
+        // which means that `i` cannot overflow an `isize`, and the
+        // slice created by `from_raw_parts` is a subslice of `self.v`
+        // thus is guaranteed to be valid for the lifetime `'a` of `self.v`.
+        unsafe { from_raw_parts(self.v.as_ptr().add(i), self.size) }
     }
     fn may_have_side_effect() -> bool {
         false
@@ -4784,7 +4882,14 @@ unsafe impl<'a, T> TrustedRandomAccess for Chunks<'a, T> {
             None => self.v.len(),
             Some(end) => cmp::min(end, self.v.len()),
         };
-        from_raw_parts(self.v.as_ptr().add(start), end - start)
+        // SAFETY: the caller guarantees that `i` is in bounds,
+        // which means that `start` must be in bounds of the
+        // underlying `self.v` slice, and we made sure that `end`
+        // is also in bounds of `self.v`. Thus, `start` cannot overflow
+        // an `isize`, and the slice constructed by `from_raw_parts`
+        // is a subslice of `self.v` which is guaranteed to be valid
+        // for the lifetime `'a` of `self.v`.
+        unsafe { from_raw_parts(self.v.as_ptr().add(start), end - start) }
     }
     fn may_have_side_effect() -> bool {
         false
@@ -4926,7 +5031,8 @@ unsafe impl<'a, T> TrustedRandomAccess for ChunksMut<'a, T> {
             None => self.v.len(),
             Some(end) => cmp::min(end, self.v.len()),
         };
-        from_raw_parts_mut(self.v.as_mut_ptr().add(start), end - start)
+        // SAFETY: see comments for `Chunks::get_unchecked`.
+        unsafe { from_raw_parts_mut(self.v.as_mut_ptr().add(start), end - start) }
     }
     fn may_have_side_effect() -> bool {
         false
@@ -5063,7 +5169,8 @@ impl<T> FusedIterator for ChunksExact<'_, T> {}
 unsafe impl<'a, T> TrustedRandomAccess for ChunksExact<'a, T> {
     unsafe fn get_unchecked(&mut self, i: usize) -> &'a [T] {
         let start = i * self.chunk_size;
-        from_raw_parts(self.v.as_ptr().add(start), self.chunk_size)
+        // SAFETY: mostly identical to `Chunks::get_unchecked`.
+        unsafe { from_raw_parts(self.v.as_ptr().add(start), self.chunk_size) }
     }
     fn may_have_side_effect() -> bool {
         false
@@ -5197,7 +5304,8 @@ impl<T> FusedIterator for ChunksExactMut<'_, T> {}
 unsafe impl<'a, T> TrustedRandomAccess for ChunksExactMut<'a, T> {
     unsafe fn get_unchecked(&mut self, i: usize) -> &'a mut [T] {
         let start = i * self.chunk_size;
-        from_raw_parts_mut(self.v.as_mut_ptr().add(start), self.chunk_size)
+        // SAFETY: see comments for `ChunksExactMut::get_unchecked`.
+        unsafe { from_raw_parts_mut(self.v.as_mut_ptr().add(start), self.chunk_size) }
     }
     fn may_have_side_effect() -> bool {
         false
@@ -5344,7 +5452,8 @@ unsafe impl<'a, T> TrustedRandomAccess for RChunks<'a, T> {
             None => 0,
             Some(start) => start,
         };
-        from_raw_parts(self.v.as_ptr().add(start), end - start)
+        // SAFETY: mostly identical to `Chunks::get_unchecked`.
+        unsafe { from_raw_parts(self.v.as_ptr().add(start), end - start) }
     }
     fn may_have_side_effect() -> bool {
         false
@@ -5489,7 +5598,8 @@ unsafe impl<'a, T> TrustedRandomAccess for RChunksMut<'a, T> {
             None => 0,
             Some(start) => start,
         };
-        from_raw_parts_mut(self.v.as_mut_ptr().add(start), end - start)
+        // SAFETY: see comments for `RChunks::get_unchecked`.
+        unsafe { from_raw_parts_mut(self.v.as_mut_ptr().add(start), end - start) }
     }
     fn may_have_side_effect() -> bool {
         false
@@ -5630,7 +5740,8 @@ unsafe impl<'a, T> TrustedRandomAccess for RChunksExact<'a, T> {
     unsafe fn get_unchecked(&mut self, i: usize) -> &'a [T] {
         let end = self.v.len() - i * self.chunk_size;
         let start = end - self.chunk_size;
-        from_raw_parts(self.v.as_ptr().add(start), self.chunk_size)
+        // SAFETY: mostmy identical to `Chunks::get_unchecked`.
+        unsafe { from_raw_parts(self.v.as_ptr().add(start), self.chunk_size) }
     }
     fn may_have_side_effect() -> bool {
         false
@@ -5769,7 +5880,8 @@ unsafe impl<'a, T> TrustedRandomAccess for RChunksExactMut<'a, T> {
     unsafe fn get_unchecked(&mut self, i: usize) -> &'a mut [T] {
         let end = self.v.len() - i * self.chunk_size;
         let start = end - self.chunk_size;
-        from_raw_parts_mut(self.v.as_mut_ptr().add(start), self.chunk_size)
+        // SAFETY: see comments for `RChunksExact::get_unchecked`.
+        unsafe { from_raw_parts_mut(self.v.as_mut_ptr().add(start), self.chunk_size) }
     }
     fn may_have_side_effect() -> bool {
         false
@@ -5865,7 +5977,8 @@ pub unsafe fn from_raw_parts<'a, T>(data: *const T, len: usize) -> &'a [T] {
         mem::size_of::<T>().saturating_mul(len) <= isize::MAX as usize,
         "attempt to create slice covering at least half the address space"
     );
-    &*ptr::slice_from_raw_parts(data, len)
+    // SAFETY: the caller must uphold the safety contract for `from_raw_parts`.
+    unsafe { &*ptr::slice_from_raw_parts(data, len) }
 }
 
 /// Performs the same functionality as [`from_raw_parts`], except that a
@@ -5905,7 +6018,8 @@ pub unsafe fn from_raw_parts_mut<'a, T>(data: *mut T, len: usize) -> &'a mut [T]
         mem::size_of::<T>().saturating_mul(len) <= isize::MAX as usize,
         "attempt to create slice covering at least half the address space"
     );
-    &mut *ptr::slice_from_raw_parts_mut(data, len)
+    // SAFETY: the caller must uphold the safety contract for `from_raw_parts_mut`.
+    unsafe { &mut *ptr::slice_from_raw_parts_mut(data, len) }
 }
 
 /// Converts a reference to T into a slice of length 1 (without copying).
@@ -6181,7 +6295,11 @@ impl_marker_for!(BytewiseEquality,
 #[doc(hidden)]
 unsafe impl<'a, T> TrustedRandomAccess for Iter<'a, T> {
     unsafe fn get_unchecked(&mut self, i: usize) -> &'a T {
-        &*self.ptr.as_ptr().add(i)
+        // SAFETY: the caller must guarantee that `i` is in bounds
+        // of the underlying slice, so `i` cannot overflow an `isize`,
+        // and the returned references is guaranteed to refer to an element
+        // of the slice and thus guaranteed to be valid.
+        unsafe { &*self.ptr.as_ptr().add(i) }
     }
     fn may_have_side_effect() -> bool {
         false
@@ -6191,7 +6309,8 @@ unsafe impl<'a, T> TrustedRandomAccess for Iter<'a, T> {
 #[doc(hidden)]
 unsafe impl<'a, T> TrustedRandomAccess for IterMut<'a, T> {
     unsafe fn get_unchecked(&mut self, i: usize) -> &'a mut T {
-        &mut *self.ptr.as_ptr().add(i)
+        // SAFETY: see comments for `Iter::get_unchecked`.
+        unsafe { &mut *self.ptr.as_ptr().add(i) }
     }
     fn may_have_side_effect() -> bool {
         false
diff --git a/src/libcore/slice/rotate.rs b/src/libcore/slice/rotate.rs
index f73e14f27e092..a89596b15ef94 100644
--- a/src/libcore/slice/rotate.rs
+++ b/src/libcore/slice/rotate.rs
@@ -1,3 +1,5 @@
+// ignore-tidy-undocumented-unsafe
+
 use crate::cmp;
 use crate::mem::{self, MaybeUninit};
 use crate::ptr;
@@ -77,9 +79,9 @@ pub unsafe fn ptr_rotate<T>(mut left: usize, mut mid: *mut T, mut right: usize)
             // the way until about `left + right == 32`, but the worst case performance breaks even
             // around 16. 24 was chosen as middle ground. If the size of `T` is larger than 4
             // `usize`s, this algorithm also outperforms other algorithms.
-            let x = mid.sub(left);
+            let x = unsafe { mid.sub(left) };
             // beginning of first round
-            let mut tmp: T = x.read();
+            let mut tmp: T = unsafe { x.read() };
             let mut i = right;
             // `gcd` can be found before hand by calculating `gcd(left + right, right)`,
             // but it is faster to do one loop which calculates the gcd as a side effect, then
@@ -90,7 +92,7 @@ pub unsafe fn ptr_rotate<T>(mut left: usize, mut mid: *mut T, mut right: usize)
             // the very end. This is possibly due to the fact that swapping or replacing temporaries
             // uses only one memory address in the loop instead of needing to manage two.
             loop {
-                tmp = x.add(i).replace(tmp);
+                tmp = unsafe { x.add(i).replace(tmp) };
                 // instead of incrementing `i` and then checking if it is outside the bounds, we
                 // check if `i` will go outside the bounds on the next increment. This prevents
                 // any wrapping of pointers or `usize`.
@@ -98,7 +100,7 @@ pub unsafe fn ptr_rotate<T>(mut left: usize, mut mid: *mut T, mut right: usize)
                     i -= left;
                     if i == 0 {
                         // end of first round
-                        x.write(tmp);
+                        unsafe { x.write(tmp) };
                         break;
                     }
                     // this conditional must be here if `left + right >= 15`
@@ -111,14 +113,14 @@ pub unsafe fn ptr_rotate<T>(mut left: usize, mut mid: *mut T, mut right: usize)
             }
             // finish the chunk with more rounds
             for start in 1..gcd {
-                tmp = x.add(start).read();
+                tmp = unsafe { x.add(start).read() };
                 i = start + right;
                 loop {
-                    tmp = x.add(i).replace(tmp);
+                    tmp = unsafe { x.add(i).replace(tmp) };
                     if i >= left {
                         i -= left;
                         if i == start {
-                            x.add(start).write(tmp);
+                            unsafe { x.add(start).write(tmp) };
                             break;
                         }
                     } else {
@@ -133,15 +135,19 @@ pub unsafe fn ptr_rotate<T>(mut left: usize, mut mid: *mut T, mut right: usize)
             // The `[T; 0]` here is to ensure this is appropriately aligned for T
             let mut rawarray = MaybeUninit::<(BufType, [T; 0])>::uninit();
             let buf = rawarray.as_mut_ptr() as *mut T;
-            let dim = mid.sub(left).add(right);
+            let dim = unsafe { mid.sub(left).add(right) };
             if left <= right {
-                ptr::copy_nonoverlapping(mid.sub(left), buf, left);
-                ptr::copy(mid, mid.sub(left), right);
-                ptr::copy_nonoverlapping(buf, dim, left);
+                unsafe {
+                    ptr::copy_nonoverlapping(mid.sub(left), buf, left);
+                    ptr::copy(mid, mid.sub(left), right);
+                    ptr::copy_nonoverlapping(buf, dim, left);
+                }
             } else {
-                ptr::copy_nonoverlapping(mid, buf, right);
-                ptr::copy(mid.sub(left), dim, left);
-                ptr::copy_nonoverlapping(buf, mid.sub(left), right);
+                unsafe {
+                    ptr::copy_nonoverlapping(mid, buf, right);
+                    ptr::copy(mid.sub(left), dim, left);
+                    ptr::copy_nonoverlapping(buf, mid.sub(left), right);
+                }
             }
             return;
         } else if left >= right {
@@ -150,8 +156,10 @@ pub unsafe fn ptr_rotate<T>(mut left: usize, mut mid: *mut T, mut right: usize)
             // of this algorithm would be, and swapping using that last chunk instead of swapping
             // adjacent chunks like this algorithm is doing, but this way is still faster.
             loop {
-                ptr::swap_nonoverlapping(mid.sub(right), mid, right);
-                mid = mid.sub(right);
+                unsafe {
+                    ptr::swap_nonoverlapping(mid.sub(right), mid, right);
+                    mid = mid.sub(right);
+                }
                 left -= right;
                 if left < right {
                     break;
@@ -160,8 +168,10 @@ pub unsafe fn ptr_rotate<T>(mut left: usize, mut mid: *mut T, mut right: usize)
         } else {
             // Algorithm 3, `left < right`
             loop {
-                ptr::swap_nonoverlapping(mid.sub(left), mid, left);
-                mid = mid.add(left);
+                unsafe {
+                    ptr::swap_nonoverlapping(mid.sub(left), mid, left);
+                    mid = mid.add(left);
+                }
                 right -= left;
                 if right < left {
                     break;
diff --git a/src/libcore/str/mod.rs b/src/libcore/str/mod.rs
index 6c4b28499a60b..0014501d2c4d0 100644
--- a/src/libcore/str/mod.rs
+++ b/src/libcore/str/mod.rs
@@ -419,7 +419,11 @@ pub fn from_utf8_mut(v: &mut [u8]) -> Result<&mut str, Utf8Error> {
 #[inline]
 #[stable(feature = "rust1", since = "1.0.0")]
 pub unsafe fn from_utf8_unchecked(v: &[u8]) -> &str {
-    &*(v as *const [u8] as *const str)
+    // SAFETY: the caller must guarantee that the bytes `v`
+    // are valid UTF-8, thus the cast to `*const str` is safe.
+    // Also, the pointer dereference is safe because that pointer
+    // comes from a reference which is guaranteed to be valid for reads.
+    unsafe { &*(v as *const [u8] as *const str) }
 }
 
 /// Converts a slice of bytes to a string slice without checking
@@ -444,7 +448,11 @@ pub unsafe fn from_utf8_unchecked(v: &[u8]) -> &str {
 #[inline]
 #[stable(feature = "str_mut_extras", since = "1.20.0")]
 pub unsafe fn from_utf8_unchecked_mut(v: &mut [u8]) -> &mut str {
-    &mut *(v as *mut [u8] as *mut str)
+    // SAFETY: the caller must guarantee that the bytes `v`
+    // are valid UTF-8, thus the cast to `*mut str` is safe.
+    // Also, the pointer dereference is safe because that pointer
+    // comes from a reference which is guaranteed to be valid for writes.
+    unsafe { &mut *(v as *mut [u8] as *mut str) }
 }
 
 #[stable(feature = "rust1", since = "1.0.0")]
@@ -867,7 +875,9 @@ unsafe impl TrustedLen for Bytes<'_> {}
 #[doc(hidden)]
 unsafe impl TrustedRandomAccess for Bytes<'_> {
     unsafe fn get_unchecked(&mut self, i: usize) -> u8 {
-        self.0.get_unchecked(i)
+        // SAFETY: the caller must uphold the safety contract
+        // for `TrustedRandomAccess::get_unchecked`.
+        unsafe { self.0.get_unchecked(i) }
     }
     fn may_have_side_effect() -> bool {
         false
@@ -1904,15 +1914,27 @@ mod traits {
         }
         #[inline]
         unsafe fn get_unchecked(self, slice: &str) -> &Self::Output {
-            let ptr = slice.as_ptr().add(self.start);
+            // SAFETY: the caller guarantees that `self` is in bounds of `slice`
+            // which satisfies all the conditions for `add`.
+            let ptr = unsafe { slice.as_ptr().add(self.start) };
             let len = self.end - self.start;
-            super::from_utf8_unchecked(slice::from_raw_parts(ptr, len))
+            // SAFETY: as the caller guarantees that `self` is in bounds of `slice`,
+            // we can safely construct a subslice with `from_raw_parts` and use it
+            // since we return a shared thus immutable reference.
+            // The call to `from_utf8_unchecked` is safe since the data comes from
+            // a `str` which is guaranteed to be valid utf8, since the caller
+            // must guarantee that `self.start` and `self.end` are char boundaries.
+            unsafe { super::from_utf8_unchecked(slice::from_raw_parts(ptr, len)) }
         }
         #[inline]
         unsafe fn get_unchecked_mut(self, slice: &mut str) -> &mut Self::Output {
-            let ptr = slice.as_mut_ptr().add(self.start);
+            // SAFETY: see comments for `get_unchecked`.
+            let ptr = unsafe { slice.as_mut_ptr().add(self.start) };
             let len = self.end - self.start;
-            super::from_utf8_unchecked_mut(slice::from_raw_parts_mut(ptr, len))
+            // SAFETY: mostly identical to the comments for `get_unchecked`, except that we
+            // can return a mutable reference since the caller passed a mutable reference
+            // and is thus guaranteed to have exclusive write access to `slice`.
+            unsafe { super::from_utf8_unchecked_mut(slice::from_raw_parts_mut(ptr, len)) }
         }
         #[inline]
         fn index(self, slice: &str) -> &Self::Output {
@@ -1974,12 +1996,21 @@ mod traits {
         #[inline]
         unsafe fn get_unchecked(self, slice: &str) -> &Self::Output {
             let ptr = slice.as_ptr();
-            super::from_utf8_unchecked(slice::from_raw_parts(ptr, self.end))
+            // SAFETY: as the caller guarantees that `self` is in bounds of `slice`,
+            // we can safely construct a subslice with `from_raw_parts` and use it
+            // since we return a shared thus immutable reference.
+            // The call to `from_utf8_unchecked` is safe since the data comes from
+            // a `str` which is guaranteed to be valid utf8, since the caller
+            // must guarantee that `self.end` is a char boundary.
+            unsafe { super::from_utf8_unchecked(slice::from_raw_parts(ptr, self.end)) }
         }
         #[inline]
         unsafe fn get_unchecked_mut(self, slice: &mut str) -> &mut Self::Output {
             let ptr = slice.as_mut_ptr();
-            super::from_utf8_unchecked_mut(slice::from_raw_parts_mut(ptr, self.end))
+            // SAFETY: mostly identical to `get_unchecked`, except that we can safely
+            // return a mutable reference since the caller passed a mutable reference
+            // and is thus guaranteed to have exclusive write access to `slice`.
+            unsafe { super::from_utf8_unchecked_mut(slice::from_raw_parts_mut(ptr, self.end)) }
         }
         #[inline]
         fn index(self, slice: &str) -> &Self::Output {
@@ -2036,15 +2067,27 @@ mod traits {
         }
         #[inline]
         unsafe fn get_unchecked(self, slice: &str) -> &Self::Output {
-            let ptr = slice.as_ptr().add(self.start);
+            // SAFETY: the caller guarantees that `self` is in bounds of `slice`
+            // which satisfies all the conditions for `add`.
+            let ptr = unsafe { slice.as_ptr().add(self.start) };
             let len = slice.len() - self.start;
-            super::from_utf8_unchecked(slice::from_raw_parts(ptr, len))
+            // SAFETY: as the caller guarantees that `self` is in bounds of `slice`,
+            // we can safely construct a subslice with `from_raw_parts` and use it
+            // since we return a shared thus immutable reference.
+            // The call to `from_utf8_unchecked` is safe since the data comes from
+            // a `str` which is guaranteed to be valid utf8, since the caller
+            // must guarantee that `self.start` is a char boundary.
+            unsafe { super::from_utf8_unchecked(slice::from_raw_parts(ptr, len)) }
         }
         #[inline]
         unsafe fn get_unchecked_mut(self, slice: &mut str) -> &mut Self::Output {
-            let ptr = slice.as_mut_ptr().add(self.start);
+            // SAFETY: identical to `get_unchecked`.
+            let ptr = unsafe { slice.as_mut_ptr().add(self.start) };
             let len = slice.len() - self.start;
-            super::from_utf8_unchecked_mut(slice::from_raw_parts_mut(ptr, len))
+            // SAFETY: mostly identical to `get_unchecked`, except that we can safely
+            // return a mutable reference since the caller passed a mutable reference
+            // and is thus guaranteed to have exclusive write access to `slice`.
+            unsafe { super::from_utf8_unchecked_mut(slice::from_raw_parts_mut(ptr, len)) }
         }
         #[inline]
         fn index(self, slice: &str) -> &Self::Output {
@@ -2099,11 +2142,13 @@ mod traits {
         }
         #[inline]
         unsafe fn get_unchecked(self, slice: &str) -> &Self::Output {
-            (*self.start()..self.end() + 1).get_unchecked(slice)
+            // SAFETY: the caller must uphold the safety contract for `get_unchecked`.
+            unsafe { (*self.start()..self.end() + 1).get_unchecked(slice) }
         }
         #[inline]
         unsafe fn get_unchecked_mut(self, slice: &mut str) -> &mut Self::Output {
-            (*self.start()..self.end() + 1).get_unchecked_mut(slice)
+            // SAFETY: the caller must uphold the safety contract for `get_unchecked_mut`.
+            unsafe { (*self.start()..self.end() + 1).get_unchecked_mut(slice) }
         }
         #[inline]
         fn index(self, slice: &str) -> &Self::Output {
@@ -2148,11 +2193,13 @@ mod traits {
         }
         #[inline]
         unsafe fn get_unchecked(self, slice: &str) -> &Self::Output {
-            (..self.end + 1).get_unchecked(slice)
+            // SAFETY: the caller must uphold the safety contract for `get_unchecked`.
+            unsafe { (..self.end + 1).get_unchecked(slice) }
         }
         #[inline]
         unsafe fn get_unchecked_mut(self, slice: &mut str) -> &mut Self::Output {
-            (..self.end + 1).get_unchecked_mut(slice)
+            // SAFETY: the caller must uphold the safety contract for `get_unchecked_mut`.
+            unsafe { (..self.end + 1).get_unchecked_mut(slice) }
         }
         #[inline]
         fn index(self, slice: &str) -> &Self::Output {
@@ -2373,7 +2420,11 @@ impl str {
     #[stable(feature = "str_mut_extras", since = "1.20.0")]
     #[inline(always)]
     pub unsafe fn as_bytes_mut(&mut self) -> &mut [u8] {
-        &mut *(self as *mut str as *mut [u8])
+        // SAFETY: the cast from `&str` to `&[u8]` is safe since `str`
+        // has the same layout as `&[u8]` (only libstd can make this guarantee).
+        // The pointer dereference is safe since it comes from a mutable reference which
+        // is guaranteed to be valid for writes.
+        unsafe { &mut *(self as *mut str as *mut [u8]) }
     }
 
     /// Converts a string slice to a raw pointer.
@@ -2509,7 +2560,8 @@ impl str {
     #[stable(feature = "str_checked_slicing", since = "1.20.0")]
     #[inline]
     pub unsafe fn get_unchecked<I: SliceIndex<str>>(&self, i: I) -> &I::Output {
-        i.get_unchecked(self)
+        // SAFETY: the caller must uphold the safety contract for `get_unchecked`.
+        unsafe { i.get_unchecked(self) }
     }
 
     /// Returns a mutable, unchecked subslice of `str`.
@@ -2541,7 +2593,8 @@ impl str {
     #[stable(feature = "str_checked_slicing", since = "1.20.0")]
     #[inline]
     pub unsafe fn get_unchecked_mut<I: SliceIndex<str>>(&mut self, i: I) -> &mut I::Output {
-        i.get_unchecked_mut(self)
+        // SAFETY: the caller must uphold the safety contract for `get_unchecked_mut`.
+        unsafe { i.get_unchecked_mut(self) }
     }
 
     /// Creates a string slice from another string slice, bypassing safety
@@ -2591,7 +2644,8 @@ impl str {
     #[rustc_deprecated(since = "1.29.0", reason = "use `get_unchecked(begin..end)` instead")]
     #[inline]
     pub unsafe fn slice_unchecked(&self, begin: usize, end: usize) -> &str {
-        (begin..end).get_unchecked(self)
+        // SAFETY: the caller must uphold the safety contract for `get_unchecked`.
+        unsafe { (begin..end).get_unchecked(self) }
     }
 
     /// Creates a string slice from another string slice, bypassing safety
@@ -2622,7 +2676,8 @@ impl str {
     #[rustc_deprecated(since = "1.29.0", reason = "use `get_unchecked_mut(begin..end)` instead")]
     #[inline]
     pub unsafe fn slice_mut_unchecked(&mut self, begin: usize, end: usize) -> &mut str {
-        (begin..end).get_unchecked_mut(self)
+        // SAFETY: the caller must uphold the safety contract for `get_unchecked_mut`.
+        unsafe { (begin..end).get_unchecked_mut(self) }
     }
 
     /// Divide one string slice into two at an index.
diff --git a/src/libcore/sync/atomic.rs b/src/libcore/sync/atomic.rs
index 1cd68f2881b7c..fcae6c86774f2 100644
--- a/src/libcore/sync/atomic.rs
+++ b/src/libcore/sync/atomic.rs
@@ -2335,35 +2335,44 @@ fn strongest_failure_ordering(order: Ordering) -> Ordering {
 
 #[inline]
 unsafe fn atomic_store<T: Copy>(dst: *mut T, val: T, order: Ordering) {
-    match order {
-        Release => intrinsics::atomic_store_rel(dst, val),
-        Relaxed => intrinsics::atomic_store_relaxed(dst, val),
-        SeqCst => intrinsics::atomic_store(dst, val),
-        Acquire => panic!("there is no such thing as an acquire store"),
-        AcqRel => panic!("there is no such thing as an acquire/release store"),
+    // SAFETY: the caller must uphold the safety contract for `atomic_store`.
+    unsafe {
+        match order {
+            Release => intrinsics::atomic_store_rel(dst, val),
+            Relaxed => intrinsics::atomic_store_relaxed(dst, val),
+            SeqCst => intrinsics::atomic_store(dst, val),
+            Acquire => panic!("there is no such thing as an acquire store"),
+            AcqRel => panic!("there is no such thing as an acquire/release store"),
+        }
     }
 }
 
 #[inline]
 unsafe fn atomic_load<T: Copy>(dst: *const T, order: Ordering) -> T {
-    match order {
-        Acquire => intrinsics::atomic_load_acq(dst),
-        Relaxed => intrinsics::atomic_load_relaxed(dst),
-        SeqCst => intrinsics::atomic_load(dst),
-        Release => panic!("there is no such thing as a release load"),
-        AcqRel => panic!("there is no such thing as an acquire/release load"),
+    // SAFETY: the caller must uphold the safety contract for `atomic_load`.
+    unsafe {
+        match order {
+            Acquire => intrinsics::atomic_load_acq(dst),
+            Relaxed => intrinsics::atomic_load_relaxed(dst),
+            SeqCst => intrinsics::atomic_load(dst),
+            Release => panic!("there is no such thing as a release load"),
+            AcqRel => panic!("there is no such thing as an acquire/release load"),
+        }
     }
 }
 
 #[inline]
 #[cfg(target_has_atomic = "8")]
 unsafe fn atomic_swap<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
-    match order {
-        Acquire => intrinsics::atomic_xchg_acq(dst, val),
-        Release => intrinsics::atomic_xchg_rel(dst, val),
-        AcqRel => intrinsics::atomic_xchg_acqrel(dst, val),
-        Relaxed => intrinsics::atomic_xchg_relaxed(dst, val),
-        SeqCst => intrinsics::atomic_xchg(dst, val),
+    // SAFETY: the caller must uphold the safety contract for `atomic_swap`.
+    unsafe {
+        match order {
+            Acquire => intrinsics::atomic_xchg_acq(dst, val),
+            Release => intrinsics::atomic_xchg_rel(dst, val),
+            AcqRel => intrinsics::atomic_xchg_acqrel(dst, val),
+            Relaxed => intrinsics::atomic_xchg_relaxed(dst, val),
+            SeqCst => intrinsics::atomic_xchg(dst, val),
+        }
     }
 }
 
@@ -2371,12 +2380,15 @@ unsafe fn atomic_swap<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
 #[inline]
 #[cfg(target_has_atomic = "8")]
 unsafe fn atomic_add<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
-    match order {
-        Acquire => intrinsics::atomic_xadd_acq(dst, val),
-        Release => intrinsics::atomic_xadd_rel(dst, val),
-        AcqRel => intrinsics::atomic_xadd_acqrel(dst, val),
-        Relaxed => intrinsics::atomic_xadd_relaxed(dst, val),
-        SeqCst => intrinsics::atomic_xadd(dst, val),
+    // SAFETY: the caller must uphold the safety contract for `atomic_add`.
+    unsafe {
+        match order {
+            Acquire => intrinsics::atomic_xadd_acq(dst, val),
+            Release => intrinsics::atomic_xadd_rel(dst, val),
+            AcqRel => intrinsics::atomic_xadd_acqrel(dst, val),
+            Relaxed => intrinsics::atomic_xadd_relaxed(dst, val),
+            SeqCst => intrinsics::atomic_xadd(dst, val),
+        }
     }
 }
 
@@ -2384,12 +2396,15 @@ unsafe fn atomic_add<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
 #[inline]
 #[cfg(target_has_atomic = "8")]
 unsafe fn atomic_sub<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
-    match order {
-        Acquire => intrinsics::atomic_xsub_acq(dst, val),
-        Release => intrinsics::atomic_xsub_rel(dst, val),
-        AcqRel => intrinsics::atomic_xsub_acqrel(dst, val),
-        Relaxed => intrinsics::atomic_xsub_relaxed(dst, val),
-        SeqCst => intrinsics::atomic_xsub(dst, val),
+    // SAFETY: the caller must uphold the safety contract for `atomic_sub`.
+    unsafe {
+        match order {
+            Acquire => intrinsics::atomic_xsub_acq(dst, val),
+            Release => intrinsics::atomic_xsub_rel(dst, val),
+            AcqRel => intrinsics::atomic_xsub_acqrel(dst, val),
+            Relaxed => intrinsics::atomic_xsub_relaxed(dst, val),
+            SeqCst => intrinsics::atomic_xsub(dst, val),
+        }
     }
 }
 
@@ -2402,19 +2417,22 @@ unsafe fn atomic_compare_exchange<T: Copy>(
     success: Ordering,
     failure: Ordering,
 ) -> Result<T, T> {
-    let (val, ok) = match (success, failure) {
-        (Acquire, Acquire) => intrinsics::atomic_cxchg_acq(dst, old, new),
-        (Release, Relaxed) => intrinsics::atomic_cxchg_rel(dst, old, new),
-        (AcqRel, Acquire) => intrinsics::atomic_cxchg_acqrel(dst, old, new),
-        (Relaxed, Relaxed) => intrinsics::atomic_cxchg_relaxed(dst, old, new),
-        (SeqCst, SeqCst) => intrinsics::atomic_cxchg(dst, old, new),
-        (Acquire, Relaxed) => intrinsics::atomic_cxchg_acq_failrelaxed(dst, old, new),
-        (AcqRel, Relaxed) => intrinsics::atomic_cxchg_acqrel_failrelaxed(dst, old, new),
-        (SeqCst, Relaxed) => intrinsics::atomic_cxchg_failrelaxed(dst, old, new),
-        (SeqCst, Acquire) => intrinsics::atomic_cxchg_failacq(dst, old, new),
-        (_, AcqRel) => panic!("there is no such thing as an acquire/release failure ordering"),
-        (_, Release) => panic!("there is no such thing as a release failure ordering"),
-        _ => panic!("a failure ordering can't be stronger than a success ordering"),
+    // SAFETY: the caller must uphold the safety contract for `atomic_compare_exchange`.
+    let (val, ok) = unsafe {
+        match (success, failure) {
+            (Acquire, Acquire) => intrinsics::atomic_cxchg_acq(dst, old, new),
+            (Release, Relaxed) => intrinsics::atomic_cxchg_rel(dst, old, new),
+            (AcqRel, Acquire) => intrinsics::atomic_cxchg_acqrel(dst, old, new),
+            (Relaxed, Relaxed) => intrinsics::atomic_cxchg_relaxed(dst, old, new),
+            (SeqCst, SeqCst) => intrinsics::atomic_cxchg(dst, old, new),
+            (Acquire, Relaxed) => intrinsics::atomic_cxchg_acq_failrelaxed(dst, old, new),
+            (AcqRel, Relaxed) => intrinsics::atomic_cxchg_acqrel_failrelaxed(dst, old, new),
+            (SeqCst, Relaxed) => intrinsics::atomic_cxchg_failrelaxed(dst, old, new),
+            (SeqCst, Acquire) => intrinsics::atomic_cxchg_failacq(dst, old, new),
+            (_, AcqRel) => panic!("there is no such thing as an acquire/release failure ordering"),
+            (_, Release) => panic!("there is no such thing as a release failure ordering"),
+            _ => panic!("a failure ordering can't be stronger than a success ordering"),
+        }
     };
     if ok { Ok(val) } else { Err(val) }
 }
@@ -2428,19 +2446,22 @@ unsafe fn atomic_compare_exchange_weak<T: Copy>(
     success: Ordering,
     failure: Ordering,
 ) -> Result<T, T> {
-    let (val, ok) = match (success, failure) {
-        (Acquire, Acquire) => intrinsics::atomic_cxchgweak_acq(dst, old, new),
-        (Release, Relaxed) => intrinsics::atomic_cxchgweak_rel(dst, old, new),
-        (AcqRel, Acquire) => intrinsics::atomic_cxchgweak_acqrel(dst, old, new),
-        (Relaxed, Relaxed) => intrinsics::atomic_cxchgweak_relaxed(dst, old, new),
-        (SeqCst, SeqCst) => intrinsics::atomic_cxchgweak(dst, old, new),
-        (Acquire, Relaxed) => intrinsics::atomic_cxchgweak_acq_failrelaxed(dst, old, new),
-        (AcqRel, Relaxed) => intrinsics::atomic_cxchgweak_acqrel_failrelaxed(dst, old, new),
-        (SeqCst, Relaxed) => intrinsics::atomic_cxchgweak_failrelaxed(dst, old, new),
-        (SeqCst, Acquire) => intrinsics::atomic_cxchgweak_failacq(dst, old, new),
-        (_, AcqRel) => panic!("there is no such thing as an acquire/release failure ordering"),
-        (_, Release) => panic!("there is no such thing as a release failure ordering"),
-        _ => panic!("a failure ordering can't be stronger than a success ordering"),
+    // SAFETY: the caller must uphold the safety contract for `atomic_compare_exchange_weak`.
+    let (val, ok) = unsafe {
+        match (success, failure) {
+            (Acquire, Acquire) => intrinsics::atomic_cxchgweak_acq(dst, old, new),
+            (Release, Relaxed) => intrinsics::atomic_cxchgweak_rel(dst, old, new),
+            (AcqRel, Acquire) => intrinsics::atomic_cxchgweak_acqrel(dst, old, new),
+            (Relaxed, Relaxed) => intrinsics::atomic_cxchgweak_relaxed(dst, old, new),
+            (SeqCst, SeqCst) => intrinsics::atomic_cxchgweak(dst, old, new),
+            (Acquire, Relaxed) => intrinsics::atomic_cxchgweak_acq_failrelaxed(dst, old, new),
+            (AcqRel, Relaxed) => intrinsics::atomic_cxchgweak_acqrel_failrelaxed(dst, old, new),
+            (SeqCst, Relaxed) => intrinsics::atomic_cxchgweak_failrelaxed(dst, old, new),
+            (SeqCst, Acquire) => intrinsics::atomic_cxchgweak_failacq(dst, old, new),
+            (_, AcqRel) => panic!("there is no such thing as an acquire/release failure ordering"),
+            (_, Release) => panic!("there is no such thing as a release failure ordering"),
+            _ => panic!("a failure ordering can't be stronger than a success ordering"),
+        }
     };
     if ok { Ok(val) } else { Err(val) }
 }
@@ -2448,48 +2469,60 @@ unsafe fn atomic_compare_exchange_weak<T: Copy>(
 #[inline]
 #[cfg(target_has_atomic = "8")]
 unsafe fn atomic_and<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
-    match order {
-        Acquire => intrinsics::atomic_and_acq(dst, val),
-        Release => intrinsics::atomic_and_rel(dst, val),
-        AcqRel => intrinsics::atomic_and_acqrel(dst, val),
-        Relaxed => intrinsics::atomic_and_relaxed(dst, val),
-        SeqCst => intrinsics::atomic_and(dst, val),
+    // SAFETY: the caller must uphold the safety contract for `atomic_and`
+    unsafe {
+        match order {
+            Acquire => intrinsics::atomic_and_acq(dst, val),
+            Release => intrinsics::atomic_and_rel(dst, val),
+            AcqRel => intrinsics::atomic_and_acqrel(dst, val),
+            Relaxed => intrinsics::atomic_and_relaxed(dst, val),
+            SeqCst => intrinsics::atomic_and(dst, val),
+        }
     }
 }
 
 #[inline]
 #[cfg(target_has_atomic = "8")]
 unsafe fn atomic_nand<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
-    match order {
-        Acquire => intrinsics::atomic_nand_acq(dst, val),
-        Release => intrinsics::atomic_nand_rel(dst, val),
-        AcqRel => intrinsics::atomic_nand_acqrel(dst, val),
-        Relaxed => intrinsics::atomic_nand_relaxed(dst, val),
-        SeqCst => intrinsics::atomic_nand(dst, val),
+    // SAFETY: the caller must uphold the safety contract for `atomic_nand`
+    unsafe {
+        match order {
+            Acquire => intrinsics::atomic_nand_acq(dst, val),
+            Release => intrinsics::atomic_nand_rel(dst, val),
+            AcqRel => intrinsics::atomic_nand_acqrel(dst, val),
+            Relaxed => intrinsics::atomic_nand_relaxed(dst, val),
+            SeqCst => intrinsics::atomic_nand(dst, val),
+        }
     }
 }
 
 #[inline]
 #[cfg(target_has_atomic = "8")]
 unsafe fn atomic_or<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
-    match order {
-        Acquire => intrinsics::atomic_or_acq(dst, val),
-        Release => intrinsics::atomic_or_rel(dst, val),
-        AcqRel => intrinsics::atomic_or_acqrel(dst, val),
-        Relaxed => intrinsics::atomic_or_relaxed(dst, val),
-        SeqCst => intrinsics::atomic_or(dst, val),
+    // SAFETY: the caller must uphold the safety contract for `atomic_or`
+    unsafe {
+        match order {
+            Acquire => intrinsics::atomic_or_acq(dst, val),
+            Release => intrinsics::atomic_or_rel(dst, val),
+            AcqRel => intrinsics::atomic_or_acqrel(dst, val),
+            Relaxed => intrinsics::atomic_or_relaxed(dst, val),
+            SeqCst => intrinsics::atomic_or(dst, val),
+        }
     }
 }
 
 #[inline]
 #[cfg(target_has_atomic = "8")]
 unsafe fn atomic_xor<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
-    match order {
-        Acquire => intrinsics::atomic_xor_acq(dst, val),
-        Release => intrinsics::atomic_xor_rel(dst, val),
-        AcqRel => intrinsics::atomic_xor_acqrel(dst, val),
-        Relaxed => intrinsics::atomic_xor_relaxed(dst, val),
-        SeqCst => intrinsics::atomic_xor(dst, val),
+    // SAFETY: the caller must uphold the safety contract for `atomic_xor`
+    unsafe {
+        match order {
+            Acquire => intrinsics::atomic_xor_acq(dst, val),
+            Release => intrinsics::atomic_xor_rel(dst, val),
+            AcqRel => intrinsics::atomic_xor_acqrel(dst, val),
+            Relaxed => intrinsics::atomic_xor_relaxed(dst, val),
+            SeqCst => intrinsics::atomic_xor(dst, val),
+        }
     }
 }
 
@@ -2497,12 +2530,15 @@ unsafe fn atomic_xor<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
 #[inline]
 #[cfg(target_has_atomic = "8")]
 unsafe fn atomic_max<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
-    match order {
-        Acquire => intrinsics::atomic_max_acq(dst, val),
-        Release => intrinsics::atomic_max_rel(dst, val),
-        AcqRel => intrinsics::atomic_max_acqrel(dst, val),
-        Relaxed => intrinsics::atomic_max_relaxed(dst, val),
-        SeqCst => intrinsics::atomic_max(dst, val),
+    // SAFETY: the caller must uphold the safety contract for `atomic_max`
+    unsafe {
+        match order {
+            Acquire => intrinsics::atomic_max_acq(dst, val),
+            Release => intrinsics::atomic_max_rel(dst, val),
+            AcqRel => intrinsics::atomic_max_acqrel(dst, val),
+            Relaxed => intrinsics::atomic_max_relaxed(dst, val),
+            SeqCst => intrinsics::atomic_max(dst, val),
+        }
     }
 }
 
@@ -2510,12 +2546,15 @@ unsafe fn atomic_max<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
 #[inline]
 #[cfg(target_has_atomic = "8")]
 unsafe fn atomic_min<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
-    match order {
-        Acquire => intrinsics::atomic_min_acq(dst, val),
-        Release => intrinsics::atomic_min_rel(dst, val),
-        AcqRel => intrinsics::atomic_min_acqrel(dst, val),
-        Relaxed => intrinsics::atomic_min_relaxed(dst, val),
-        SeqCst => intrinsics::atomic_min(dst, val),
+    // SAFETY: the caller must uphold the safety contract for `atomic_min`
+    unsafe {
+        match order {
+            Acquire => intrinsics::atomic_min_acq(dst, val),
+            Release => intrinsics::atomic_min_rel(dst, val),
+            AcqRel => intrinsics::atomic_min_acqrel(dst, val),
+            Relaxed => intrinsics::atomic_min_relaxed(dst, val),
+            SeqCst => intrinsics::atomic_min(dst, val),
+        }
     }
 }
 
@@ -2523,12 +2562,15 @@ unsafe fn atomic_min<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
 #[inline]
 #[cfg(target_has_atomic = "8")]
 unsafe fn atomic_umax<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
-    match order {
-        Acquire => intrinsics::atomic_umax_acq(dst, val),
-        Release => intrinsics::atomic_umax_rel(dst, val),
-        AcqRel => intrinsics::atomic_umax_acqrel(dst, val),
-        Relaxed => intrinsics::atomic_umax_relaxed(dst, val),
-        SeqCst => intrinsics::atomic_umax(dst, val),
+    // SAFETY: the caller must uphold the safety contract for `atomic_umax`
+    unsafe {
+        match order {
+            Acquire => intrinsics::atomic_umax_acq(dst, val),
+            Release => intrinsics::atomic_umax_rel(dst, val),
+            AcqRel => intrinsics::atomic_umax_acqrel(dst, val),
+            Relaxed => intrinsics::atomic_umax_relaxed(dst, val),
+            SeqCst => intrinsics::atomic_umax(dst, val),
+        }
     }
 }
 
@@ -2536,12 +2578,15 @@ unsafe fn atomic_umax<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
 #[inline]
 #[cfg(target_has_atomic = "8")]
 unsafe fn atomic_umin<T: Copy>(dst: *mut T, val: T, order: Ordering) -> T {
-    match order {
-        Acquire => intrinsics::atomic_umin_acq(dst, val),
-        Release => intrinsics::atomic_umin_rel(dst, val),
-        AcqRel => intrinsics::atomic_umin_acqrel(dst, val),
-        Relaxed => intrinsics::atomic_umin_relaxed(dst, val),
-        SeqCst => intrinsics::atomic_umin(dst, val),
+    // SAFETY: the caller must uphold the safety contract for `atomic_umin`
+    unsafe {
+        match order {
+            Acquire => intrinsics::atomic_umin_acq(dst, val),
+            Release => intrinsics::atomic_umin_rel(dst, val),
+            AcqRel => intrinsics::atomic_umin_acqrel(dst, val),
+            Relaxed => intrinsics::atomic_umin_relaxed(dst, val),
+            SeqCst => intrinsics::atomic_umin(dst, val),
+        }
     }
 }
 
diff --git a/src/libcore/tests/lib.rs b/src/libcore/tests/lib.rs
index c60ce8ec837d5..68a5e20a66fdc 100644
--- a/src/libcore/tests/lib.rs
+++ b/src/libcore/tests/lib.rs
@@ -44,6 +44,8 @@
 #![feature(option_unwrap_none)]
 #![feature(peekable_next_if)]
 #![feature(partition_point)]
+#![feature(unsafe_block_in_unsafe_fn)]
+#![deny(unsafe_op_in_unsafe_fn)]
 
 extern crate test;
 
diff --git a/src/librustc_ast_pretty/pprust.rs b/src/librustc_ast_pretty/pprust.rs
index 86faa1f086ce2..501cd3748282b 100644
--- a/src/librustc_ast_pretty/pprust.rs
+++ b/src/librustc_ast_pretty/pprust.rs
@@ -9,7 +9,7 @@ use rustc_ast::ast::{InlineAsmOptions, InlineAsmTemplatePiece};
 use rustc_ast::attr;
 use rustc_ast::ptr::P;
 use rustc_ast::token::{self, BinOpToken, DelimToken, Nonterminal, Token, TokenKind};
-use rustc_ast::tokenstream::{self, TokenStream, TokenTree};
+use rustc_ast::tokenstream::{TokenStream, TokenTree};
 use rustc_ast::util::parser::{self, AssocOp, Fixity};
 use rustc_ast::util::{classify, comments};
 use rustc_span::edition::Edition;
@@ -293,7 +293,7 @@ pub fn nonterminal_to_string(nt: &Nonterminal) -> String {
         token::NtIdent(e, is_raw) => IdentPrinter::for_ast_ident(e, is_raw).to_string(),
         token::NtLifetime(e) => e.to_string(),
         token::NtLiteral(ref e) => expr_to_string(e),
-        token::NtTT(ref tree) => tt_to_string(tree.clone()),
+        token::NtTT(ref tree) => tt_to_string(tree),
         token::NtVis(ref e) => vis_to_string(e),
     }
 }
@@ -314,11 +314,11 @@ pub fn expr_to_string(e: &ast::Expr) -> String {
     to_string(|s| s.print_expr(e))
 }
 
-pub fn tt_to_string(tt: tokenstream::TokenTree) -> String {
+pub fn tt_to_string(tt: &TokenTree) -> String {
     to_string(|s| s.print_tt(tt, false))
 }
 
-pub fn tts_to_string(tokens: TokenStream) -> String {
+pub fn tts_to_string(tokens: &TokenStream) -> String {
     to_string(|s| s.print_tts(tokens, false))
 }
 
@@ -585,7 +585,7 @@ pub trait PrintState<'a>: std::ops::Deref<Target = pp::Printer> + std::ops::Dere
                 false,
                 None,
                 delim.to_token(),
-                tokens.clone(),
+                tokens,
                 true,
                 span,
             ),
@@ -594,7 +594,7 @@ pub trait PrintState<'a>: std::ops::Deref<Target = pp::Printer> + std::ops::Dere
                 if let MacArgs::Eq(_, tokens) = &item.args {
                     self.space();
                     self.word_space("=");
-                    self.print_tts(tokens.clone(), true);
+                    self.print_tts(tokens, true);
                 }
             }
         }
@@ -635,9 +635,9 @@ pub trait PrintState<'a>: std::ops::Deref<Target = pp::Printer> + std::ops::Dere
     /// appropriate macro, transcribe back into the grammar we just parsed from,
     /// and then pretty-print the resulting AST nodes (so, e.g., we print
     /// expression arguments as expressions). It can be done! I think.
-    fn print_tt(&mut self, tt: tokenstream::TokenTree, convert_dollar_crate: bool) {
+    fn print_tt(&mut self, tt: &TokenTree, convert_dollar_crate: bool) {
         match tt {
-            TokenTree::Token(ref token) => {
+            TokenTree::Token(token) => {
                 self.word(token_to_string_ext(&token, convert_dollar_crate));
                 if let token::DocComment(..) = token.kind {
                     self.hardbreak()
@@ -648,7 +648,7 @@ pub trait PrintState<'a>: std::ops::Deref<Target = pp::Printer> + std::ops::Dere
                     None,
                     false,
                     None,
-                    delim,
+                    *delim,
                     tts,
                     convert_dollar_crate,
                     dspan.entire(),
@@ -657,14 +657,14 @@ pub trait PrintState<'a>: std::ops::Deref<Target = pp::Printer> + std::ops::Dere
         }
     }
 
-    fn print_tts(&mut self, tts: tokenstream::TokenStream, convert_dollar_crate: bool) {
-        let mut iter = tts.into_trees().peekable();
+    fn print_tts(&mut self, tts: &TokenStream, convert_dollar_crate: bool) {
+        let mut iter = tts.trees().peekable();
         while let Some(tt) = iter.next() {
-            let show_space =
-                if let Some(next) = iter.peek() { tt_prepend_space(next, &tt) } else { false };
-            self.print_tt(tt, convert_dollar_crate);
-            if show_space {
-                self.space();
+            self.print_tt(&tt, convert_dollar_crate);
+            if let Some(next) = iter.peek() {
+                if tt_prepend_space(next, &tt) {
+                    self.space();
+                }
             }
         }
     }
@@ -675,7 +675,7 @@ pub trait PrintState<'a>: std::ops::Deref<Target = pp::Printer> + std::ops::Dere
         has_bang: bool,
         ident: Option<Ident>,
         delim: DelimToken,
-        tts: TokenStream,
+        tts: &TokenStream,
         convert_dollar_crate: bool,
         span: Span,
     ) {
@@ -1253,7 +1253,7 @@ impl<'a> State<'a> {
                     has_bang,
                     Some(item.ident),
                     macro_def.body.delim(),
-                    macro_def.body.inner_tokens(),
+                    &macro_def.body.inner_tokens(),
                     true,
                     item.span,
                 );
@@ -1577,7 +1577,7 @@ impl<'a> State<'a> {
             true,
             None,
             m.args.delim(),
-            m.args.inner_tokens(),
+            &m.args.inner_tokens(),
             true,
             m.span(),
         );
diff --git a/src/librustc_builtin_macros/log_syntax.rs b/src/librustc_builtin_macros/log_syntax.rs
index ae3a889428ae4..ede34a7612589 100644
--- a/src/librustc_builtin_macros/log_syntax.rs
+++ b/src/librustc_builtin_macros/log_syntax.rs
@@ -7,7 +7,7 @@ pub fn expand_log_syntax<'cx>(
     sp: rustc_span::Span,
     tts: TokenStream,
 ) -> Box<dyn base::MacResult + 'cx> {
-    println!("{}", pprust::tts_to_string(tts));
+    println!("{}", pprust::tts_to_string(&tts));
 
     // any so that `log_syntax` can be invoked as an expression and item.
     base::DummyResult::any_valid(sp)
diff --git a/src/librustc_builtin_macros/source_util.rs b/src/librustc_builtin_macros/source_util.rs
index 1b164eae5a345..e46cf67e64d66 100644
--- a/src/librustc_builtin_macros/source_util.rs
+++ b/src/librustc_builtin_macros/source_util.rs
@@ -71,7 +71,7 @@ pub fn expand_stringify(
     tts: TokenStream,
 ) -> Box<dyn base::MacResult + 'static> {
     let sp = cx.with_def_site_ctxt(sp);
-    let s = pprust::tts_to_string(tts);
+    let s = pprust::tts_to_string(&tts);
     base::MacEager::expr(cx.expr_str(sp, Symbol::intern(&s)))
 }
 
diff --git a/src/librustc_codegen_llvm/base.rs b/src/librustc_codegen_llvm/base.rs
index e99fb8dcae1e5..d5e0d7d36ee7a 100644
--- a/src/librustc_codegen_llvm/base.rs
+++ b/src/librustc_codegen_llvm/base.rs
@@ -150,6 +150,11 @@ pub fn compile_codegen_unit(
                 cx.create_used_variable()
             }
 
+            // Finalize code coverage by injecting the coverage map
+            if cx.sess().opts.debugging_opts.instrument_coverage {
+                cx.coverageinfo_finalize();
+            }
+
             // Finalize debuginfo
             if cx.sess().opts.debuginfo != DebugInfo::None {
                 cx.debuginfo_finalize();
diff --git a/src/librustc_codegen_llvm/context.rs b/src/librustc_codegen_llvm/context.rs
index 7ff5ac5cbdc10..e9a001bab7540 100644
--- a/src/librustc_codegen_llvm/context.rs
+++ b/src/librustc_codegen_llvm/context.rs
@@ -1,5 +1,6 @@
 use crate::attributes;
 use crate::callee::get_fn;
+use crate::coverageinfo;
 use crate::debuginfo;
 use crate::llvm;
 use crate::llvm_util;
@@ -77,6 +78,7 @@ pub struct CodegenCx<'ll, 'tcx> {
     pub pointee_infos: RefCell<FxHashMap<(Ty<'tcx>, Size), Option<PointeeInfo>>>,
     pub isize_ty: &'ll Type,
 
+    pub coverage_cx: Option<coverageinfo::CrateCoverageContext<'tcx>>,
     pub dbg_cx: Option<debuginfo::CrateDebugContext<'ll, 'tcx>>,
 
     eh_personality: Cell<Option<&'ll Value>>,
@@ -256,6 +258,13 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> {
 
         let (llcx, llmod) = (&*llvm_module.llcx, llvm_module.llmod());
 
+        let coverage_cx = if tcx.sess.opts.debugging_opts.instrument_coverage {
+            let covctx = coverageinfo::CrateCoverageContext::new();
+            Some(covctx)
+        } else {
+            None
+        };
+
         let dbg_cx = if tcx.sess.opts.debuginfo != DebugInfo::None {
             let dctx = debuginfo::CrateDebugContext::new(llmod);
             debuginfo::metadata::compile_unit_metadata(tcx, &codegen_unit.name().as_str(), &dctx);
@@ -285,6 +294,7 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> {
             scalar_lltypes: Default::default(),
             pointee_infos: Default::default(),
             isize_ty,
+            coverage_cx,
             dbg_cx,
             eh_personality: Cell::new(None),
             rust_try_fn: Cell::new(None),
@@ -296,6 +306,11 @@ impl<'ll, 'tcx> CodegenCx<'ll, 'tcx> {
     crate fn statics_to_rauw(&self) -> &RefCell<Vec<(&'ll Value, &'ll Value)>> {
         &self.statics_to_rauw
     }
+
+    #[inline]
+    pub fn coverage_context(&'a self) -> &'a coverageinfo::CrateCoverageContext<'tcx> {
+        self.coverage_cx.as_ref().unwrap()
+    }
 }
 
 impl MiscMethods<'tcx> for CodegenCx<'ll, 'tcx> {
@@ -749,8 +764,6 @@ impl CodegenCx<'b, 'tcx> {
         ifn!("llvm.lifetime.start.p0i8", fn(t_i64, i8p) -> void);
         ifn!("llvm.lifetime.end.p0i8", fn(t_i64, i8p) -> void);
 
-        ifn!("llvm.instrprof.increment", fn(i8p, t_i64, t_i32, t_i32) -> void);
-
         ifn!("llvm.expect.i1", fn(i1, i1) -> i1);
         ifn!("llvm.eh.typeid.for", fn(i8p) -> t_i32);
         ifn!("llvm.localescape", fn(...) -> void);
@@ -765,6 +778,10 @@ impl CodegenCx<'b, 'tcx> {
         ifn!("llvm.va_end", fn(i8p) -> void);
         ifn!("llvm.va_copy", fn(i8p, i8p) -> void);
 
+        if self.sess().opts.debugging_opts.instrument_coverage {
+            ifn!("llvm.instrprof.increment", fn(i8p, t_i64, t_i32, t_i32) -> void);
+        }
+
         if self.sess().opts.debuginfo != DebugInfo::None {
             ifn!("llvm.dbg.declare", fn(self.type_metadata(), self.type_metadata()) -> void);
             ifn!("llvm.dbg.value", fn(self.type_metadata(), t_i64, self.type_metadata()) -> void);
diff --git a/src/librustc_codegen_llvm/coverageinfo/mod.rs b/src/librustc_codegen_llvm/coverageinfo/mod.rs
new file mode 100644
index 0000000000000..ff9f8f7aeaa54
--- /dev/null
+++ b/src/librustc_codegen_llvm/coverageinfo/mod.rs
@@ -0,0 +1,126 @@
+use crate::builder::Builder;
+use crate::common::CodegenCx;
+use log::debug;
+use rustc_codegen_ssa::coverageinfo::map::*;
+use rustc_codegen_ssa::traits::{CoverageInfoBuilderMethods, CoverageInfoMethods};
+use rustc_data_structures::fx::FxHashMap;
+use rustc_middle::ty::Instance;
+
+use std::cell::RefCell;
+
+/// A context object for maintaining all state needed by the coverageinfo module.
+pub struct CrateCoverageContext<'tcx> {
+    // Coverage region data for each instrumented function identified by DefId.
+    pub(crate) coverage_regions: RefCell<FxHashMap<Instance<'tcx>, FunctionCoverageRegions>>,
+}
+
+impl<'tcx> CrateCoverageContext<'tcx> {
+    pub fn new() -> Self {
+        Self { coverage_regions: Default::default() }
+    }
+}
+
+/// Generates and exports the Coverage Map.
+// FIXME(richkadel): Actually generate and export the coverage map to LLVM.
+// The current implementation is actually just debug messages to show the data is available.
+pub fn finalize(cx: &CodegenCx<'_, '_>) {
+    let coverage_regions = &*cx.coverage_context().coverage_regions.borrow();
+    for instance in coverage_regions.keys() {
+        let coverageinfo = cx.tcx.coverageinfo(instance.def_id());
+        debug_assert!(coverageinfo.num_counters > 0);
+        debug!(
+            "Generate coverage map for: {:?}, hash: {}, num_counters: {}",
+            instance, coverageinfo.hash, coverageinfo.num_counters
+        );
+        let function_coverage_regions = &coverage_regions[instance];
+        for (index, region) in function_coverage_regions.indexed_regions() {
+            match region.kind {
+                CoverageKind::Counter => debug!(
+                    "  Counter {}, for {}..{}",
+                    index, region.coverage_span.start_byte_pos, region.coverage_span.end_byte_pos
+                ),
+                CoverageKind::CounterExpression(lhs, op, rhs) => debug!(
+                    "  CounterExpression {} = {} {:?} {}, for {}..{}",
+                    index,
+                    lhs,
+                    op,
+                    rhs,
+                    region.coverage_span.start_byte_pos,
+                    region.coverage_span.end_byte_pos
+                ),
+            }
+        }
+        for unreachable in function_coverage_regions.unreachable_regions() {
+            debug!(
+                "  Unreachable code region: {}..{}",
+                unreachable.start_byte_pos, unreachable.end_byte_pos
+            );
+        }
+    }
+}
+
+impl CoverageInfoMethods for CodegenCx<'ll, 'tcx> {
+    fn coverageinfo_finalize(&self) {
+        finalize(self)
+    }
+}
+
+impl CoverageInfoBuilderMethods<'tcx> for Builder<'a, 'll, 'tcx> {
+    fn add_counter_region(
+        &mut self,
+        instance: Instance<'tcx>,
+        index: u32,
+        start_byte_pos: u32,
+        end_byte_pos: u32,
+    ) {
+        debug!(
+            "adding counter to coverage map: instance={:?}, index={}, byte range {}..{}",
+            instance, index, start_byte_pos, end_byte_pos,
+        );
+        let mut coverage_regions = self.coverage_context().coverage_regions.borrow_mut();
+        coverage_regions.entry(instance).or_default().add_counter(
+            index,
+            start_byte_pos,
+            end_byte_pos,
+        );
+    }
+
+    fn add_counter_expression_region(
+        &mut self,
+        instance: Instance<'tcx>,
+        index: u32,
+        lhs: u32,
+        op: CounterOp,
+        rhs: u32,
+        start_byte_pos: u32,
+        end_byte_pos: u32,
+    ) {
+        debug!(
+            "adding counter expression to coverage map: instance={:?}, index={}, {} {:?} {}, byte range {}..{}",
+            instance, index, lhs, op, rhs, start_byte_pos, end_byte_pos,
+        );
+        let mut coverage_regions = self.coverage_context().coverage_regions.borrow_mut();
+        coverage_regions.entry(instance).or_default().add_counter_expression(
+            index,
+            lhs,
+            op,
+            rhs,
+            start_byte_pos,
+            end_byte_pos,
+        );
+    }
+
+    fn add_unreachable_region(
+        &mut self,
+        instance: Instance<'tcx>,
+        start_byte_pos: u32,
+        end_byte_pos: u32,
+    ) {
+        debug!(
+            "adding unreachable code to coverage map: instance={:?}, byte range {}..{}",
+            instance, start_byte_pos, end_byte_pos,
+        );
+        let mut coverage_regions = self.coverage_context().coverage_regions.borrow_mut();
+        coverage_regions.entry(instance).or_default().add_unreachable(start_byte_pos, end_byte_pos);
+    }
+}
diff --git a/src/librustc_codegen_llvm/intrinsic.rs b/src/librustc_codegen_llvm/intrinsic.rs
index 130c0cf1877c6..de90ac0bac1d3 100644
--- a/src/librustc_codegen_llvm/intrinsic.rs
+++ b/src/librustc_codegen_llvm/intrinsic.rs
@@ -13,12 +13,15 @@ use rustc_ast::ast;
 use rustc_codegen_ssa::base::{compare_simd_types, to_immediate, wants_msvc_seh};
 use rustc_codegen_ssa::common::span_invalid_monomorphization_error;
 use rustc_codegen_ssa::common::{IntPredicate, TypeKind};
+use rustc_codegen_ssa::coverageinfo::CounterOp;
 use rustc_codegen_ssa::glue;
 use rustc_codegen_ssa::mir::operand::{OperandRef, OperandValue};
 use rustc_codegen_ssa::mir::place::PlaceRef;
 use rustc_codegen_ssa::traits::*;
 use rustc_codegen_ssa::MemFlags;
 use rustc_hir as hir;
+use rustc_middle::mir::coverage;
+use rustc_middle::mir::Operand;
 use rustc_middle::ty::layout::{FnAbiExt, HasTyCtxt};
 use rustc_middle::ty::{self, Ty};
 use rustc_middle::{bug, span_bug};
@@ -81,6 +84,53 @@ fn get_simple_intrinsic(cx: &CodegenCx<'ll, '_>, name: &str) -> Option<&'ll Valu
 }
 
 impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
+    fn is_codegen_intrinsic(
+        &mut self,
+        intrinsic: &str,
+        args: &Vec<Operand<'tcx>>,
+        caller_instance: ty::Instance<'tcx>,
+    ) -> bool {
+        match intrinsic {
+            "count_code_region" => {
+                use coverage::count_code_region_args::*;
+                self.add_counter_region(
+                    caller_instance,
+                    op_to_u32(&args[COUNTER_INDEX]),
+                    op_to_u32(&args[START_BYTE_POS]),
+                    op_to_u32(&args[END_BYTE_POS]),
+                );
+                true // Also inject the counter increment in the backend
+            }
+            "coverage_counter_add" | "coverage_counter_subtract" => {
+                use coverage::coverage_counter_expression_args::*;
+                self.add_counter_expression_region(
+                    caller_instance,
+                    op_to_u32(&args[COUNTER_EXPRESSION_INDEX]),
+                    op_to_u32(&args[LEFT_INDEX]),
+                    if intrinsic == "coverage_counter_add" {
+                        CounterOp::Add
+                    } else {
+                        CounterOp::Subtract
+                    },
+                    op_to_u32(&args[RIGHT_INDEX]),
+                    op_to_u32(&args[START_BYTE_POS]),
+                    op_to_u32(&args[END_BYTE_POS]),
+                );
+                false // Does not inject backend code
+            }
+            "coverage_unreachable" => {
+                use coverage::coverage_unreachable_args::*;
+                self.add_unreachable_region(
+                    caller_instance,
+                    op_to_u32(&args[START_BYTE_POS]),
+                    op_to_u32(&args[END_BYTE_POS]),
+                );
+                false // Does not inject backend code
+            }
+            _ => true, // Unhandled intrinsics should be passed to `codegen_intrinsic_call()`
+        }
+    }
+
     fn codegen_intrinsic_call(
         &mut self,
         instance: ty::Instance<'tcx>,
@@ -143,15 +193,16 @@ impl IntrinsicCallMethods<'tcx> for Builder<'a, 'll, 'tcx> {
                 // FIXME(richkadel): The current implementation assumes the MIR for the given
                 // caller_instance represents a single function. Validate and/or correct if inlining
                 // and/or monomorphization invalidates these assumptions.
-                let coverage_data = tcx.coverage_data(caller_instance.def_id());
+                let coverageinfo = tcx.coverageinfo(caller_instance.def_id());
                 let mangled_fn = tcx.symbol_name(caller_instance);
                 let (mangled_fn_name, _len_val) = self.const_str(mangled_fn.name);
-                let hash = self.const_u64(coverage_data.hash);
-                let num_counters = self.const_u32(coverage_data.num_counters);
-                let index = args[0].immediate();
+                let hash = self.const_u64(coverageinfo.hash);
+                let num_counters = self.const_u32(coverageinfo.num_counters);
+                use coverage::count_code_region_args::*;
+                let index = args[COUNTER_INDEX].immediate();
                 debug!(
                     "count_code_region to LLVM intrinsic instrprof.increment(fn_name={}, hash={:?}, num_counters={:?}, index={:?})",
-                    mangled_fn.name, hash, num_counters, index
+                    mangled_fn.name, hash, num_counters, index,
                 );
                 self.instrprof_increment(mangled_fn_name, hash, num_counters, index)
             }
@@ -2131,3 +2182,7 @@ fn float_type_width(ty: Ty<'_>) -> Option<u64> {
         _ => None,
     }
 }
+
+fn op_to_u32<'tcx>(op: &Operand<'tcx>) -> u32 {
+    Operand::scalar_from_const(op).to_u32().expect("Scalar is u32")
+}
diff --git a/src/librustc_codegen_llvm/lib.rs b/src/librustc_codegen_llvm/lib.rs
index 55ee660d9f700..565968f9952e5 100644
--- a/src/librustc_codegen_llvm/lib.rs
+++ b/src/librustc_codegen_llvm/lib.rs
@@ -55,6 +55,7 @@ mod callee;
 mod common;
 mod consts;
 mod context;
+mod coverageinfo;
 mod debuginfo;
 mod declare;
 mod intrinsic;
diff --git a/src/librustc_codegen_ssa/back/link.rs b/src/librustc_codegen_ssa/back/link.rs
index a34029410784a..3adaa07db91b0 100644
--- a/src/librustc_codegen_ssa/back/link.rs
+++ b/src/librustc_codegen_ssa/back/link.rs
@@ -1597,6 +1597,9 @@ fn linker_with_args<'a, B: ArchiveBuilder<'a>>(
         cmd.arg(format!("--dynamic-linker={}ld.so.1", prefix));
     }
 
+    // NO-OPT-OUT, OBJECT-FILES-NO, AUDIT-ORDER
+    cmd.add_eh_frame_header();
+
     // NO-OPT-OUT, OBJECT-FILES-NO
     if crt_objects_fallback {
         cmd.no_crt_objects();
diff --git a/src/librustc_codegen_ssa/back/linker.rs b/src/librustc_codegen_ssa/back/linker.rs
index d6ef94bfd1727..54f55c806d035 100644
--- a/src/librustc_codegen_ssa/back/linker.rs
+++ b/src/librustc_codegen_ssa/back/linker.rs
@@ -129,6 +129,7 @@ pub trait Linker {
     fn group_start(&mut self);
     fn group_end(&mut self);
     fn linker_plugin_lto(&mut self);
+    fn add_eh_frame_header(&mut self) {}
     fn finalize(&mut self);
 }
 
@@ -615,6 +616,19 @@ impl<'a> Linker for GccLinker<'a> {
             }
         }
     }
+
+    // Add the `GNU_EH_FRAME` program header which is required to locate unwinding information.
+    // Some versions of `gcc` add it implicitly, some (e.g. `musl-gcc`) don't,
+    // so we just always add it.
+    fn add_eh_frame_header(&mut self) {
+        // The condition here is "uses ELF" basically.
+        if !self.sess.target.target.options.is_like_osx
+            && !self.sess.target.target.options.is_like_windows
+            && self.sess.target.target.target_os != "uefi"
+        {
+            self.linker_arg("--eh-frame-hdr");
+        }
+    }
 }
 
 pub struct MsvcLinker<'a> {
diff --git a/src/librustc_codegen_ssa/coverageinfo/map.rs b/src/librustc_codegen_ssa/coverageinfo/map.rs
new file mode 100644
index 0000000000000..3bd262cf2b213
--- /dev/null
+++ b/src/librustc_codegen_ssa/coverageinfo/map.rs
@@ -0,0 +1,83 @@
+use rustc_data_structures::fx::FxHashMap;
+use std::collections::hash_map;
+use std::slice;
+
+#[derive(Copy, Clone, Debug)]
+pub enum CounterOp {
+    Add,
+    Subtract,
+}
+
+pub enum CoverageKind {
+    Counter,
+    CounterExpression(u32, CounterOp, u32),
+}
+
+pub struct CoverageSpan {
+    pub start_byte_pos: u32,
+    pub end_byte_pos: u32,
+}
+
+pub struct CoverageRegion {
+    pub kind: CoverageKind,
+    pub coverage_span: CoverageSpan,
+}
+
+/// Collects all of the coverage regions associated with (a) injected counters, (b) counter
+/// expressions (additions or subtraction), and (c) unreachable regions (always counted as zero),
+/// for a given Function. Counters and counter expressions are indexed because they can be operands
+/// in an expression.
+///
+/// Note, it's important to distinguish the `unreachable` region type from what LLVM's refers to as
+/// a "gap region" (or "gap area"). A gap region is a code region within a counted region (either
+/// counter or expression), but the line or lines in the gap region are not executable (such as
+/// lines with only whitespace or comments). According to LLVM Code Coverage Mapping documentation,
+/// "A count for a gap area is only used as the line execution count if there are no other regions
+/// on a line."
+#[derive(Default)]
+pub struct FunctionCoverageRegions {
+    indexed: FxHashMap<u32, CoverageRegion>,
+    unreachable: Vec<CoverageSpan>,
+}
+
+impl FunctionCoverageRegions {
+    pub fn add_counter(&mut self, index: u32, start_byte_pos: u32, end_byte_pos: u32) {
+        self.indexed.insert(
+            index,
+            CoverageRegion {
+                kind: CoverageKind::Counter,
+                coverage_span: CoverageSpan { start_byte_pos, end_byte_pos },
+            },
+        );
+    }
+
+    pub fn add_counter_expression(
+        &mut self,
+        index: u32,
+        lhs: u32,
+        op: CounterOp,
+        rhs: u32,
+        start_byte_pos: u32,
+        end_byte_pos: u32,
+    ) {
+        self.indexed.insert(
+            index,
+            CoverageRegion {
+                kind: CoverageKind::CounterExpression(lhs, op, rhs),
+                coverage_span: CoverageSpan { start_byte_pos, end_byte_pos },
+            },
+        );
+    }
+
+    pub fn add_unreachable(&mut self, start_byte_pos: u32, end_byte_pos: u32) {
+        self.unreachable.push(CoverageSpan { start_byte_pos, end_byte_pos });
+    }
+
+    pub fn indexed_regions(&self) -> hash_map::Iter<'_, u32, CoverageRegion> {
+        self.indexed.iter()
+    }
+
+    pub fn unreachable_regions(&self) -> slice::Iter<'_, CoverageSpan> {
+        self.unreachable.iter()
+    }
+}
diff --git a/src/librustc_codegen_ssa/coverageinfo/mod.rs b/src/librustc_codegen_ssa/coverageinfo/mod.rs
new file mode 100644
index 0000000000000..304f8e19da4e6
--- /dev/null
+++ b/src/librustc_codegen_ssa/coverageinfo/mod.rs
@@ -0,0 +1,3 @@
+pub mod map;
+
+pub use map::CounterOp;
diff --git a/src/librustc_codegen_ssa/lib.rs b/src/librustc_codegen_ssa/lib.rs
index bd3721850f35f..618df15f5bcbe 100644
--- a/src/librustc_codegen_ssa/lib.rs
+++ b/src/librustc_codegen_ssa/lib.rs
@@ -34,6 +34,7 @@ use std::path::{Path, PathBuf};
 pub mod back;
 pub mod base;
 pub mod common;
+pub mod coverageinfo;
 pub mod debuginfo;
 pub mod glue;
 pub mod meth;
diff --git a/src/librustc_codegen_ssa/mir/block.rs b/src/librustc_codegen_ssa/mir/block.rs
index 480f9a5032087..7514eb8e889a8 100644
--- a/src/librustc_codegen_ssa/mir/block.rs
+++ b/src/librustc_codegen_ssa/mir/block.rs
@@ -651,6 +651,18 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
         }
 
         if intrinsic.is_some() && intrinsic != Some("drop_in_place") {
+            let intrinsic = intrinsic.unwrap();
+
+            // `is_codegen_intrinsic()` allows the backend implementation to perform compile-time
+            // operations before converting the `args` to backend values.
+            if !bx.is_codegen_intrinsic(intrinsic, &args, self.instance) {
+                // If the intrinsic call was fully addressed by the `is_codegen_intrinsic()` call
+                // (as a compile-time operation), return immediately. This avoids the need to
+                // convert the arguments, the call to `codegen_intrinsic_call()`, and the return
+                // value handling.
+                return;
+            }
+
             let dest = match ret_dest {
                 _ if fn_abi.ret.is_indirect() => llargs[0],
                 ReturnDest::Nothing => {
@@ -670,7 +682,7 @@ impl<'a, 'tcx, Bx: BuilderMethods<'a, 'tcx>> FunctionCx<'a, 'tcx, Bx> {
                     // third argument must be constant. This is
                     // checked by const-qualification, which also
                     // promotes any complex rvalues to constants.
-                    if i == 2 && intrinsic.unwrap().starts_with("simd_shuffle") {
+                    if i == 2 && intrinsic.starts_with("simd_shuffle") {
                         if let mir::Operand::Constant(constant) = arg {
                             let c = self.eval_mir_constant(constant);
                             let (llval, ty) = self.simd_shuffle_indices(
diff --git a/src/librustc_codegen_ssa/traits/builder.rs b/src/librustc_codegen_ssa/traits/builder.rs
index 7ffc9f15bffdc..994d8e0395dc7 100644
--- a/src/librustc_codegen_ssa/traits/builder.rs
+++ b/src/librustc_codegen_ssa/traits/builder.rs
@@ -1,5 +1,6 @@
 use super::abi::AbiBuilderMethods;
 use super::asm::AsmBuilderMethods;
+use super::coverageinfo::CoverageInfoBuilderMethods;
 use super::debuginfo::DebugInfoBuilderMethods;
 use super::intrinsic::IntrinsicCallMethods;
 use super::type_::ArgAbiMethods;
@@ -29,6 +30,7 @@ pub enum OverflowOp {
 
 pub trait BuilderMethods<'a, 'tcx>:
     HasCodegen<'tcx>
+    + CoverageInfoBuilderMethods<'tcx>
     + DebugInfoBuilderMethods
     + ArgAbiMethods<'tcx>
     + AbiBuilderMethods<'tcx>
diff --git a/src/librustc_codegen_ssa/traits/coverageinfo.rs b/src/librustc_codegen_ssa/traits/coverageinfo.rs
new file mode 100644
index 0000000000000..d80f90fa4fa0d
--- /dev/null
+++ b/src/librustc_codegen_ssa/traits/coverageinfo.rs
@@ -0,0 +1,35 @@
+use super::BackendTypes;
+use crate::coverageinfo::CounterOp;
+use rustc_middle::ty::Instance;
+
+pub trait CoverageInfoMethods: BackendTypes {
+    fn coverageinfo_finalize(&self);
+}
+
+pub trait CoverageInfoBuilderMethods<'tcx>: BackendTypes {
+    fn add_counter_region(
+        &mut self,
+        instance: Instance<'tcx>,
+        index: u32,
+        start_byte_pos: u32,
+        end_byte_pos: u32,
+    );
+
+    fn add_counter_expression_region(
+        &mut self,
+        instance: Instance<'tcx>,
+        index: u32,
+        lhs: u32,
+        op: CounterOp,
+        rhs: u32,
+        start_byte_pos: u32,
+        end_byte_pos: u32,
+    );
+
+    fn add_unreachable_region(
+        &mut self,
+        instance: Instance<'tcx>,
+        start_byte_pos: u32,
+        end_byte_pos: u32,
+    );
+}
diff --git a/src/librustc_codegen_ssa/traits/intrinsic.rs b/src/librustc_codegen_ssa/traits/intrinsic.rs
index f62019498511c..e713cc948c10d 100644
--- a/src/librustc_codegen_ssa/traits/intrinsic.rs
+++ b/src/librustc_codegen_ssa/traits/intrinsic.rs
@@ -1,5 +1,6 @@
 use super::BackendTypes;
 use crate::mir::operand::OperandRef;
+use rustc_middle::mir::Operand;
 use rustc_middle::ty::{self, Ty};
 use rustc_span::Span;
 use rustc_target::abi::call::FnAbi;
@@ -18,6 +19,16 @@ pub trait IntrinsicCallMethods<'tcx>: BackendTypes {
         caller_instance: ty::Instance<'tcx>,
     );
 
+    /// Intrinsic-specific pre-codegen processing, if any is required. Some intrinsics are handled
+    /// at compile time and do not generate code. Returns true if codegen is required or false if
+    /// the intrinsic does not need code generation.
+    fn is_codegen_intrinsic(
+        &mut self,
+        intrinsic: &str,
+        args: &Vec<Operand<'tcx>>,
+        caller_instance: ty::Instance<'tcx>,
+    ) -> bool;
+
     fn abort(&mut self);
     fn assume(&mut self, val: Self::Value);
     fn expect(&mut self, cond: Self::Value, expected: bool) -> Self::Value;
diff --git a/src/librustc_codegen_ssa/traits/mod.rs b/src/librustc_codegen_ssa/traits/mod.rs
index 6b782731d535c..0ac519dd0b17c 100644
--- a/src/librustc_codegen_ssa/traits/mod.rs
+++ b/src/librustc_codegen_ssa/traits/mod.rs
@@ -19,6 +19,7 @@ mod asm;
 mod backend;
 mod builder;
 mod consts;
+mod coverageinfo;
 mod debuginfo;
 mod declare;
 mod intrinsic;
@@ -32,6 +33,7 @@ pub use self::asm::{AsmBuilderMethods, AsmMethods, InlineAsmOperandRef};
 pub use self::backend::{Backend, BackendTypes, CodegenBackend, ExtraBackendMethods};
 pub use self::builder::{BuilderMethods, OverflowOp};
 pub use self::consts::ConstMethods;
+pub use self::coverageinfo::{CoverageInfoBuilderMethods, CoverageInfoMethods};
 pub use self::debuginfo::{DebugInfoBuilderMethods, DebugInfoMethods};
 pub use self::declare::{DeclareMethods, PreDefineMethods};
 pub use self::intrinsic::IntrinsicCallMethods;
@@ -56,6 +58,7 @@ pub trait CodegenMethods<'tcx>:
     + MiscMethods<'tcx>
     + ConstMethods<'tcx>
     + StaticMethods
+    + CoverageInfoMethods
     + DebugInfoMethods<'tcx>
     + DeclareMethods<'tcx>
     + AsmMethods
@@ -72,6 +75,7 @@ impl<'tcx, T> CodegenMethods<'tcx> for T where
         + MiscMethods<'tcx>
         + ConstMethods<'tcx>
         + StaticMethods
+        + CoverageInfoMethods
         + DebugInfoMethods<'tcx>
         + DeclareMethods<'tcx>
         + AsmMethods
diff --git a/src/librustc_expand/mbe/macro_rules.rs b/src/librustc_expand/mbe/macro_rules.rs
index 8cdb5b09c9e8b..28a3970918ee6 100644
--- a/src/librustc_expand/mbe/macro_rules.rs
+++ b/src/librustc_expand/mbe/macro_rules.rs
@@ -224,7 +224,7 @@ fn generic_extension<'cx>(
     let sess = cx.parse_sess;
 
     if cx.trace_macros() {
-        let msg = format!("expanding `{}! {{ {} }}`", name, pprust::tts_to_string(arg.clone()));
+        let msg = format!("expanding `{}! {{ {} }}`", name, pprust::tts_to_string(&arg));
         trace_macros_note(&mut cx.expansions, sp, msg);
     }
 
@@ -300,7 +300,7 @@ fn generic_extension<'cx>(
                 }
 
                 if cx.trace_macros() {
-                    let msg = format!("to `{}`", pprust::tts_to_string(tts.clone()));
+                    let msg = format!("to `{}`", pprust::tts_to_string(&tts));
                     trace_macros_note(&mut cx.expansions, sp, msg);
                 }
 
diff --git a/src/librustc_expand/proc_macro_server.rs b/src/librustc_expand/proc_macro_server.rs
index c88b5a37f718a..e5e530227e43a 100644
--- a/src/librustc_expand/proc_macro_server.rs
+++ b/src/librustc_expand/proc_macro_server.rs
@@ -413,7 +413,7 @@ impl server::TokenStream for Rustc<'_> {
         )
     }
     fn to_string(&mut self, stream: &Self::TokenStream) -> String {
-        pprust::tts_to_string(stream.clone())
+        pprust::tts_to_string(stream)
     }
     fn from_token_tree(
         &mut self,
diff --git a/src/librustc_index/vec.rs b/src/librustc_index/vec.rs
index 4dde33283f575..c5dedab979326 100644
--- a/src/librustc_index/vec.rs
+++ b/src/librustc_index/vec.rs
@@ -536,7 +536,8 @@ impl<I: Idx, T> IndexVec<I, T> {
     }
 
     /// Create an `IndexVec` with `n` elements, where the value of each
-    /// element is the result of `func(i)`
+    /// element is the result of `func(i)`. (The underlying vector will
+    /// be allocated only once, with a capacity of at least `n`.)
     #[inline]
     pub fn from_fn_n(func: impl FnMut(I) -> T, n: usize) -> Self {
         let indices = (0..n).map(I::new);
diff --git a/src/librustc_middle/mir/coverage/mod.rs b/src/librustc_middle/mir/coverage/mod.rs
new file mode 100644
index 0000000000000..327f321bd7534
--- /dev/null
+++ b/src/librustc_middle/mir/coverage/mod.rs
@@ -0,0 +1,24 @@
+//! Metadata from source code coverage analysis and instrumentation.
+
+/// Positional arguments to `libcore::count_code_region()`
+pub mod count_code_region_args {
+    pub const COUNTER_INDEX: usize = 0;
+    pub const START_BYTE_POS: usize = 1;
+    pub const END_BYTE_POS: usize = 2;
+}
+
+/// Positional arguments to `libcore::coverage_counter_add()` and
+/// `libcore::coverage_counter_subtract()`
+pub mod coverage_counter_expression_args {
+    pub const COUNTER_EXPRESSION_INDEX: usize = 0;
+    pub const LEFT_INDEX: usize = 1;
+    pub const RIGHT_INDEX: usize = 2;
+    pub const START_BYTE_POS: usize = 3;
+    pub const END_BYTE_POS: usize = 4;
+}
+
+/// Positional arguments to `libcore::coverage_unreachable()`
+pub mod coverage_unreachable_args {
+    pub const START_BYTE_POS: usize = 0;
+    pub const END_BYTE_POS: usize = 1;
+}
diff --git a/src/librustc_middle/mir/mod.rs b/src/librustc_middle/mir/mod.rs
index ae59f8c542d8c..ad4e8b55b75e1 100644
--- a/src/librustc_middle/mir/mod.rs
+++ b/src/librustc_middle/mir/mod.rs
@@ -39,6 +39,7 @@ use std::{iter, mem, option};
 use self::predecessors::{PredecessorCache, Predecessors};
 pub use self::query::*;
 
+pub mod coverage;
 pub mod interpret;
 pub mod mono;
 mod predecessors;
@@ -1817,6 +1818,18 @@ impl<'tcx> Operand<'tcx> {
         })
     }
 
+    /// Convenience helper to make a `Scalar` from the given `Operand`, assuming that `Operand`
+    /// wraps a constant literal value. Panics if this is not the case.
+    pub fn scalar_from_const(operand: &Operand<'tcx>) -> Scalar {
+        match operand {
+            Operand::Constant(constant) => match constant.literal.val.try_to_scalar() {
+                Some(scalar) => scalar,
+                _ => panic!("{:?}: Scalar value expected", constant.literal.val),
+            },
+            _ => panic!("{:?}: Constant expected", operand),
+        }
+    }
+
     pub fn to_copy(&self) -> Self {
         match *self {
             Operand::Copy(_) | Operand::Constant(_) => self.clone(),
@@ -2490,18 +2503,3 @@ impl Location {
         }
     }
 }
-
-/// Coverage data associated with each function (MIR) instrumented with coverage counters, when
-/// compiled with `-Zinstrument_coverage`. The query `tcx.coverage_data(DefId)` computes these
-/// values on demand (during code generation). This query is only valid after executing the MIR pass
-/// `InstrumentCoverage`.
-#[derive(Clone, RustcEncodable, RustcDecodable, Debug, HashStable)]
-pub struct CoverageData {
-    /// A hash value that can be used by the consumer of the coverage profile data to detect
-    /// changes to the instrumented source of the associated MIR body (typically, for an
-    /// individual function).
-    pub hash: u64,
-
-    /// The total number of coverage region counters added to the MIR `Body`.
-    pub num_counters: u32,
-}
diff --git a/src/librustc_middle/mir/query.rs b/src/librustc_middle/mir/query.rs
index 9ad79230a4f6d..8dddaf40c8264 100644
--- a/src/librustc_middle/mir/query.rs
+++ b/src/librustc_middle/mir/query.rs
@@ -309,3 +309,17 @@ pub struct DestructuredConst<'tcx> {
     pub variant: Option<VariantIdx>,
     pub fields: &'tcx [&'tcx ty::Const<'tcx>],
 }
+
+/// Coverage information summarized from a MIR if instrumented for source code coverage (see
+/// compiler option `-Zinstrument-coverage`). This information is generated by the
+/// `InstrumentCoverage` MIR pass and can be retrieved via the `coverageinfo` query.
+#[derive(Clone, RustcEncodable, RustcDecodable, Debug, HashStable)]
+pub struct CoverageInfo {
+    /// A hash value that can be used by the consumer of the coverage profile data to detect
+    /// changes to the instrumented source of the associated MIR body (typically, for an
+    /// individual function).
+    pub hash: u64,
+
+    /// The total number of coverage region counters added to the MIR `Body`.
+    pub num_counters: u32,
+}
diff --git a/src/librustc_middle/query/mod.rs b/src/librustc_middle/query/mod.rs
index ba5a8c3ec2052..c9201855462ee 100644
--- a/src/librustc_middle/query/mod.rs
+++ b/src/librustc_middle/query/mod.rs
@@ -231,8 +231,10 @@ rustc_queries! {
             cache_on_disk_if { key.is_local() }
         }
 
-        query coverage_data(key: DefId) -> mir::CoverageData {
-            desc { |tcx| "retrieving coverage data from MIR for `{}`", tcx.def_path_str(key) }
+        /// Returns coverage summary info for a function, after executing the `InstrumentCoverage`
+        /// MIR pass (assuming the -Zinstrument-coverage option is enabled).
+        query coverageinfo(key: DefId) -> mir::CoverageInfo {
+            desc { |tcx| "retrieving coverage info from MIR for `{}`", tcx.def_path_str(key) }
             storage(ArenaCacheSelector<'tcx>)
             cache_on_disk_if { key.is_local() }
         }
diff --git a/src/librustc_mir/interpret/intrinsics.rs b/src/librustc_mir/interpret/intrinsics.rs
index 01eb2d0b8e266..851862640226f 100644
--- a/src/librustc_mir/interpret/intrinsics.rs
+++ b/src/librustc_mir/interpret/intrinsics.rs
@@ -414,7 +414,10 @@ impl<'mir, 'tcx: 'mir, M: Machine<'mir, 'tcx>> InterpCx<'mir, 'tcx, M> {
                 self.copy_op(args[0], dest)?;
             }
             // FIXME(#73156): Handle source code coverage in const eval
-            sym::count_code_region => (),
+            sym::count_code_region
+            | sym::coverage_counter_add
+            | sym::coverage_counter_subtract
+            | sym::coverage_unreachable => (),
             _ => return Ok(false),
         }
 
diff --git a/src/librustc_mir/transform/inline.rs b/src/librustc_mir/transform/inline.rs
index 068d055fa78f8..c03be2a8fcd69 100644
--- a/src/librustc_mir/transform/inline.rs
+++ b/src/librustc_mir/transform/inline.rs
@@ -39,7 +39,14 @@ struct CallSite<'tcx> {
 impl<'tcx> MirPass<'tcx> for Inline {
     fn run_pass(&self, tcx: TyCtxt<'tcx>, source: MirSource<'tcx>, body: &mut Body<'tcx>) {
         if tcx.sess.opts.debugging_opts.mir_opt_level >= 2 {
-            Inliner { tcx, source }.run_pass(body);
+            if tcx.sess.opts.debugging_opts.instrument_coverage {
+                // The current implementation of source code coverage injects code region counters
+                // into the MIR, and assumes a 1-to-1 correspondence between MIR and source-code-
+                // based function.
+                debug!("function inlining is disabled when compiling with `instrument_coverage`");
+            } else {
+                Inliner { tcx, source }.run_pass(body);
+            }
         }
     }
 }
diff --git a/src/librustc_mir/transform/instrument_coverage.rs b/src/librustc_mir/transform/instrument_coverage.rs
index 06b648ab5a908..25e154f2e9597 100644
--- a/src/librustc_mir/transform/instrument_coverage.rs
+++ b/src/librustc_mir/transform/instrument_coverage.rs
@@ -5,65 +5,71 @@ use rustc_data_structures::stable_hasher::{HashStable, StableHasher};
 use rustc_hir::lang_items;
 use rustc_middle::hir;
 use rustc_middle::ich::StableHashingContext;
-use rustc_middle::mir::interpret::{ConstValue, Scalar};
+use rustc_middle::mir::coverage::*;
+use rustc_middle::mir::interpret::Scalar;
+use rustc_middle::mir::CoverageInfo;
 use rustc_middle::mir::{
-    self, traversal, BasicBlock, BasicBlockData, CoverageData, Operand, Place, SourceInfo,
-    StatementKind, Terminator, TerminatorKind, START_BLOCK,
+    self, traversal, BasicBlock, BasicBlockData, Operand, Place, SourceInfo, StatementKind,
+    Terminator, TerminatorKind, START_BLOCK,
 };
 use rustc_middle::ty;
 use rustc_middle::ty::query::Providers;
+use rustc_middle::ty::FnDef;
 use rustc_middle::ty::TyCtxt;
-use rustc_middle::ty::{ConstKind, FnDef};
 use rustc_span::def_id::DefId;
-use rustc_span::Span;
+use rustc_span::{Pos, Span};
 
 /// Inserts call to count_code_region() as a placeholder to be replaced during code generation with
 /// the intrinsic llvm.instrprof.increment.
 pub struct InstrumentCoverage;
 
-/// The `query` provider for `CoverageData`, requested by `codegen_intrinsic_call()` when
+/// The `query` provider for `CoverageInfo`, requested by `codegen_intrinsic_call()` when
 /// constructing the arguments for `llvm.instrprof.increment`.
 pub(crate) fn provide(providers: &mut Providers<'_>) {
-    providers.coverage_data = |tcx, def_id| {
-        let mir_body = tcx.optimized_mir(def_id);
-        // FIXME(richkadel): The current implementation assumes the MIR for the given DefId
-        // represents a single function. Validate and/or correct if inlining and/or monomorphization
-        // invalidates these assumptions.
-        let count_code_region_fn =
-            tcx.require_lang_item(lang_items::CountCodeRegionFnLangItem, None);
-        let mut num_counters: u32 = 0;
-        // The `num_counters` argument to `llvm.instrprof.increment` is the number of injected
-        // counters, with each counter having an index from `0..num_counters-1`. MIR optimization
-        // may split and duplicate some BasicBlock sequences. Simply counting the calls may not
-        // not work; but computing the num_counters by adding `1` to the highest index (for a given
-        // instrumented function) is valid.
-        for (_, data) in traversal::preorder(mir_body) {
-            if let Some(terminator) = &data.terminator {
-                if let TerminatorKind::Call { func: Operand::Constant(func), args, .. } =
-                    &terminator.kind
-                {
-                    if let FnDef(called_fn_def_id, _) = func.literal.ty.kind {
-                        if called_fn_def_id == count_code_region_fn {
-                            if let Operand::Constant(constant) =
-                                args.get(0).expect("count_code_region has at least one arg")
-                            {
-                                if let ConstKind::Value(ConstValue::Scalar(value)) =
-                                    constant.literal.val
-                                {
-                                    let index = value
-                                        .to_u32()
-                                        .expect("count_code_region index at arg0 is u32");
-                                    num_counters = std::cmp::max(num_counters, index + 1);
-                                }
-                            }
-                        }
-                    }
+    providers.coverageinfo = |tcx, def_id| coverageinfo_from_mir(tcx, def_id);
+}
+
+fn coverageinfo_from_mir<'tcx>(tcx: TyCtxt<'tcx>, mir_def_id: DefId) -> CoverageInfo {
+    let mir_body = tcx.optimized_mir(mir_def_id);
+    // FIXME(richkadel): The current implementation assumes the MIR for the given DefId
+    // represents a single function. Validate and/or correct if inlining (which should be disabled
+    // if -Zinstrument-coverage is enabled) and/or monomorphization invalidates these assumptions.
+    let count_code_region_fn = tcx.require_lang_item(lang_items::CountCodeRegionFnLangItem, None);
+
+    // The `num_counters` argument to `llvm.instrprof.increment` is the number of injected
+    // counters, with each counter having an index from `0..num_counters-1`. MIR optimization
+    // may split and duplicate some BasicBlock sequences. Simply counting the calls may not
+    // not work; but computing the num_counters by adding `1` to the highest index (for a given
+    // instrumented function) is valid.
+    let mut num_counters: u32 = 0;
+    for terminator in traversal::preorder(mir_body)
+        .map(|(_, data)| (data, count_code_region_fn))
+        .filter_map(terminators_that_call_given_fn)
+    {
+        if let TerminatorKind::Call { args, .. } = &terminator.kind {
+            let index_arg = args.get(count_code_region_args::COUNTER_INDEX).expect("arg found");
+            let index =
+                mir::Operand::scalar_from_const(index_arg).to_u32().expect("index arg is u32");
+            num_counters = std::cmp::max(num_counters, index + 1);
+        }
+    }
+    let hash = if num_counters > 0 { hash_mir_source(tcx, mir_def_id) } else { 0 };
+    CoverageInfo { num_counters, hash }
+}
+
+fn terminators_that_call_given_fn(
+    (data, fn_def_id): (&'tcx BasicBlockData<'tcx>, DefId),
+) -> Option<&'tcx Terminator<'tcx>> {
+    if let Some(terminator) = &data.terminator {
+        if let TerminatorKind::Call { func: Operand::Constant(func), .. } = &terminator.kind {
+            if let FnDef(called_fn_def_id, _) = func.literal.ty.kind {
+                if called_fn_def_id == fn_def_id {
+                    return Some(&terminator);
                 }
             }
         }
-        let hash = if num_counters > 0 { hash_mir_source(tcx, def_id) } else { 0 };
-        CoverageData { num_counters, hash }
-    };
+    }
+    None
 }
 
 struct Instrumentor<'tcx> {
@@ -102,17 +108,16 @@ impl<'tcx> Instrumentor<'tcx> {
     fn inject_counters(&mut self, mir_body: &mut mir::Body<'tcx>) {
         // FIXME(richkadel): As a first step, counters are only injected at the top of each
         // function. The complete solution will inject counters at each conditional code branch.
-        let top_of_function = START_BLOCK;
-        let entire_function = mir_body.span;
-
-        self.inject_counter(mir_body, top_of_function, entire_function);
+        let code_region = mir_body.span;
+        let next_block = START_BLOCK;
+        self.inject_counter(mir_body, code_region, next_block);
     }
 
     fn inject_counter(
         &mut self,
         mir_body: &mut mir::Body<'tcx>,
-        next_block: BasicBlock,
         code_region: Span,
+        next_block: BasicBlock,
     ) {
         let injection_point = code_region.shrink_to_lo();
 
@@ -121,12 +126,20 @@ impl<'tcx> Instrumentor<'tcx> {
             self.tcx.require_lang_item(lang_items::CountCodeRegionFnLangItem, None),
             injection_point,
         );
-        let counter_index = Operand::const_from_scalar(
-            self.tcx,
-            self.tcx.types.u32,
-            Scalar::from_u32(self.next_counter()),
-            injection_point,
-        );
+
+        let index = self.next_counter();
+
+        let mut args = Vec::new();
+
+        use count_code_region_args::*;
+        debug_assert_eq!(COUNTER_INDEX, args.len());
+        args.push(self.const_u32(index, injection_point));
+
+        debug_assert_eq!(START_BYTE_POS, args.len());
+        args.push(self.const_u32(code_region.lo().to_u32(), injection_point));
+
+        debug_assert_eq!(END_BYTE_POS, args.len());
+        args.push(self.const_u32(code_region.hi().to_u32(), injection_point));
 
         let mut patch = MirPatch::new(mir_body);
 
@@ -136,7 +149,7 @@ impl<'tcx> Instrumentor<'tcx> {
             new_block,
             TerminatorKind::Call {
                 func: count_code_region_fn,
-                args: vec![counter_index],
+                args,
                 // new_block will swapped with the next_block, after applying patch
                 destination: Some((Place::from(temp), new_block)),
                 cleanup: None,
@@ -154,6 +167,10 @@ impl<'tcx> Instrumentor<'tcx> {
         // `next_block`), just swap the indexes, leaving the rest of the graph unchanged.
         mir_body.basic_blocks_mut().swap(next_block, new_block);
     }
+
+    fn const_u32(&self, value: u32, span: Span) -> Operand<'tcx> {
+        Operand::const_from_scalar(self.tcx, self.tcx.types.u32, Scalar::from_u32(value), span)
+    }
 }
 
 fn function_handle<'tcx>(tcx: TyCtxt<'tcx>, fn_def_id: DefId, span: Span) -> Operand<'tcx> {
diff --git a/src/librustc_session/options.rs b/src/librustc_session/options.rs
index 9337f241d7022..f65785d15d379 100644
--- a/src/librustc_session/options.rs
+++ b/src/librustc_session/options.rs
@@ -881,8 +881,9 @@ options! {DebuggingOptions, DebuggingSetter, basic_debugging_options,
         (default: no)"),
     instrument_coverage: bool = (false, parse_bool, [TRACKED],
         "instrument the generated code with LLVM code region counters to (in the \
-        future) generate coverage reports (default: no; note, the compiler build \
-        config must include `profiler = true`)"),
+        future) generate coverage reports; disables/overrides some optimization \
+        options (note, the compiler build config must include `profiler = true`) \
+        (default: no)"),
     instrument_mcount: bool = (false, parse_bool, [TRACKED],
         "insert function instrument code for mcount-based tracing (default: no)"),
     keep_hygiene_data: bool = (false, parse_bool, [UNTRACKED],
diff --git a/src/librustc_span/lib.rs b/src/librustc_span/lib.rs
index 6dcb1430cdc17..046554067f4ce 100644
--- a/src/librustc_span/lib.rs
+++ b/src/librustc_span/lib.rs
@@ -309,7 +309,9 @@ impl Ord for Span {
     }
 }
 
-/// A collection of spans. Spans have two orthogonal attributes:
+/// A collection of `Span`s.
+///
+/// Spans have two orthogonal attributes:
 ///
 /// - They can be *primary spans*. In this case they are the locus of
 ///   the error, and would be rendered with `^^^`.
diff --git a/src/librustc_span/symbol.rs b/src/librustc_span/symbol.rs
index e2f0d0b94c46c..56dcef3a0e7a2 100644
--- a/src/librustc_span/symbol.rs
+++ b/src/librustc_span/symbol.rs
@@ -242,6 +242,9 @@ symbols! {
         core,
         core_intrinsics,
         count_code_region,
+        coverage_counter_add,
+        coverage_counter_subtract,
+        coverage_unreachable,
         crate_id,
         crate_in_paths,
         crate_local,
diff --git a/src/librustc_target/spec/cloudabi_base.rs b/src/librustc_target/spec/cloudabi_base.rs
index 3659c9ecdfca6..39039435f5815 100644
--- a/src/librustc_target/spec/cloudabi_base.rs
+++ b/src/librustc_target/spec/cloudabi_base.rs
@@ -7,7 +7,6 @@ pub fn opts() -> TargetOptions {
         vec![
             "-Wl,-Bstatic".to_string(),
             "-Wl,--no-dynamic-linker".to_string(),
-            "-Wl,--eh-frame-hdr".to_string(),
             "-Wl,--gc-sections".to_string(),
         ],
     );
diff --git a/src/librustc_target/spec/fuchsia_base.rs b/src/librustc_target/spec/fuchsia_base.rs
index dd55788b664ea..6f432dc11718d 100644
--- a/src/librustc_target/spec/fuchsia_base.rs
+++ b/src/librustc_target/spec/fuchsia_base.rs
@@ -6,7 +6,6 @@ pub fn opts() -> TargetOptions {
         LinkerFlavor::Lld(LldFlavor::Ld),
         vec![
             "--build-id".to_string(),
-            "--eh-frame-hdr".to_string(),
             "--hash-style=gnu".to_string(),
             "-z".to_string(),
             "max-page-size=4096".to_string(),
diff --git a/src/librustc_target/spec/linux_musl_base.rs b/src/librustc_target/spec/linux_musl_base.rs
index 0fdd876080677..b90e91d2901a8 100644
--- a/src/librustc_target/spec/linux_musl_base.rs
+++ b/src/librustc_target/spec/linux_musl_base.rs
@@ -1,15 +1,9 @@
 use crate::spec::crt_objects::{self, CrtObjectsFallback};
-use crate::spec::{LinkerFlavor, TargetOptions};
+use crate::spec::TargetOptions;
 
 pub fn opts() -> TargetOptions {
     let mut base = super::linux_base::opts();
 
-    // At least when this was tested, the linker would not add the
-    // `GNU_EH_FRAME` program header to executables generated, which is required
-    // when unwinding to locate the unwinding information. I'm not sure why this
-    // argument is *not* necessary for normal builds, but it can't hurt!
-    base.pre_link_args.get_mut(&LinkerFlavor::Gcc).unwrap().push("-Wl,--eh-frame-hdr".to_string());
-
     base.pre_link_objects_fallback = crt_objects::pre_musl_fallback();
     base.post_link_objects_fallback = crt_objects::post_musl_fallback();
     base.crt_objects_fallback = Some(CrtObjectsFallback::Musl);
diff --git a/src/librustc_target/spec/mipsel_sony_psp.rs b/src/librustc_target/spec/mipsel_sony_psp.rs
index 0c74454d0c5fe..b3bda97c8a570 100644
--- a/src/librustc_target/spec/mipsel_sony_psp.rs
+++ b/src/librustc_target/spec/mipsel_sony_psp.rs
@@ -6,10 +6,7 @@ const LINKER_SCRIPT: &str = include_str!("./mipsel_sony_psp_linker_script.ld");
 
 pub fn target() -> TargetResult {
     let mut pre_link_args = LinkArgs::new();
-    pre_link_args.insert(
-        LinkerFlavor::Lld(LldFlavor::Ld),
-        vec!["--eh-frame-hdr".to_string(), "--emit-relocs".to_string()],
-    );
+    pre_link_args.insert(LinkerFlavor::Lld(LldFlavor::Ld), vec!["--emit-relocs".to_string()]);
 
     Ok(Target {
         llvm_target: "mipsel-sony-psp".to_string(),
diff --git a/src/librustc_target/spec/x86_64_fortanix_unknown_sgx.rs b/src/librustc_target/spec/x86_64_fortanix_unknown_sgx.rs
index d01545619c8fa..3b5233a3e677d 100644
--- a/src/librustc_target/spec/x86_64_fortanix_unknown_sgx.rs
+++ b/src/librustc_target/spec/x86_64_fortanix_unknown_sgx.rs
@@ -5,7 +5,6 @@ use super::{LinkerFlavor, LldFlavor, PanicStrategy, Target, TargetOptions};
 pub fn target() -> Result<Target, String> {
     const PRE_LINK_ARGS: &[&str] = &[
         "--as-needed",
-        "--eh-frame-hdr",
         "-z",
         "noexecstack",
         "-e",
diff --git a/src/librustc_typeck/check/intrinsic.rs b/src/librustc_typeck/check/intrinsic.rs
index 1c0b22ca7370b..6205088adadb7 100644
--- a/src/librustc_typeck/check/intrinsic.rs
+++ b/src/librustc_typeck/check/intrinsic.rs
@@ -352,7 +352,17 @@ pub fn check_intrinsic_type(tcx: TyCtxt<'_>, it: &hir::ForeignItem<'_>) {
                 return;
             }
 
-            "count_code_region" => (0, vec![tcx.types.u32], tcx.mk_unit()),
+            "count_code_region" => {
+                (0, vec![tcx.types.u32, tcx.types.u32, tcx.types.u32], tcx.mk_unit())
+            }
+
+            "coverage_counter_add" | "coverage_counter_subtract" => (
+                0,
+                vec![tcx.types.u32, tcx.types.u32, tcx.types.u32, tcx.types.u32, tcx.types.u32],
+                tcx.mk_unit(),
+            ),
+
+            "coverage_unreachable" => (0, vec![tcx.types.u32, tcx.types.u32], tcx.mk_unit()),
 
             ref other => {
                 struct_span_err!(
diff --git a/src/test/mir-opt/instrument_coverage/rustc.bar.InstrumentCoverage.diff b/src/test/mir-opt/instrument_coverage/rustc.bar.InstrumentCoverage.diff
index 8a079f0da4b01..af2899c887a94 100644
--- a/src/test/mir-opt/instrument_coverage/rustc.bar.InstrumentCoverage.diff
+++ b/src/test/mir-opt/instrument_coverage/rustc.bar.InstrumentCoverage.diff
@@ -7,19 +7,31 @@
   
       bb0: {
 +         StorageLive(_1);                 // scope 0 at $DIR/instrument_coverage.rs:18:1: 20:2
-+         _1 = const std::intrinsics::count_code_region(const 0_u32) -> bb2; // scope 0 at $DIR/instrument_coverage.rs:18:1: 20:2
++         _1 = const std::intrinsics::count_code_region(const 0_u32, const 484_u32, const 513_u32) -> bb2; // scope 0 at $DIR/instrument_coverage.rs:18:1: 20:2
 +                                          // ty::Const
-+                                          // + ty: unsafe extern "rust-intrinsic" fn(u32) {std::intrinsics::count_code_region}
++                                          // + ty: unsafe extern "rust-intrinsic" fn(u32, u32, u32) {std::intrinsics::count_code_region}
 +                                          // + val: Value(Scalar(<ZST>))
 +                                          // mir::Constant
 +                                          // + span: $DIR/instrument_coverage.rs:18:1: 18:1
-+                                          // + literal: Const { ty: unsafe extern "rust-intrinsic" fn(u32) {std::intrinsics::count_code_region}, val: Value(Scalar(<ZST>)) }
++                                          // + literal: Const { ty: unsafe extern "rust-intrinsic" fn(u32, u32, u32) {std::intrinsics::count_code_region}, val: Value(Scalar(<ZST>)) }
 +                                          // ty::Const
 +                                          // + ty: u32
 +                                          // + val: Value(Scalar(0x00000000))
 +                                          // mir::Constant
 +                                          // + span: $DIR/instrument_coverage.rs:18:1: 18:1
 +                                          // + literal: Const { ty: u32, val: Value(Scalar(0x00000000)) }
++                                          // ty::Const
++                                          // + ty: u32
++                                          // + val: Value(Scalar(0x000001e4))
++                                          // mir::Constant
++                                          // + span: $DIR/instrument_coverage.rs:18:1: 18:1
++                                          // + literal: Const { ty: u32, val: Value(Scalar(0x000001e4)) }
++                                          // ty::Const
++                                          // + ty: u32
++                                          // + val: Value(Scalar(0x00000201))
++                                          // mir::Constant
++                                          // + span: $DIR/instrument_coverage.rs:18:1: 18:1
++                                          // + literal: Const { ty: u32, val: Value(Scalar(0x00000201)) }
 +     }
 + 
 +     bb1 (cleanup): {
diff --git a/src/test/mir-opt/instrument_coverage/rustc.main.InstrumentCoverage.diff b/src/test/mir-opt/instrument_coverage/rustc.main.InstrumentCoverage.diff
index 3c2ec1dc06b70..4a300230f8a97 100644
--- a/src/test/mir-opt/instrument_coverage/rustc.main.InstrumentCoverage.diff
+++ b/src/test/mir-opt/instrument_coverage/rustc.main.InstrumentCoverage.diff
@@ -11,19 +11,31 @@
       bb0: {
 -         falseUnwind -> [real: bb1, cleanup: bb2]; // scope 0 at $DIR/instrument_coverage.rs:10:5: 14:6
 +         StorageLive(_4);                 // scope 0 at $DIR/instrument_coverage.rs:9:1: 15:2
-+         _4 = const std::intrinsics::count_code_region(const 0_u32) -> bb7; // scope 0 at $DIR/instrument_coverage.rs:9:1: 15:2
++         _4 = const std::intrinsics::count_code_region(const 0_u32, const 387_u32, const 465_u32) -> bb7; // scope 0 at $DIR/instrument_coverage.rs:9:1: 15:2
 +                                          // ty::Const
-+                                          // + ty: unsafe extern "rust-intrinsic" fn(u32) {std::intrinsics::count_code_region}
++                                          // + ty: unsafe extern "rust-intrinsic" fn(u32, u32, u32) {std::intrinsics::count_code_region}
 +                                          // + val: Value(Scalar(<ZST>))
 +                                          // mir::Constant
 +                                          // + span: $DIR/instrument_coverage.rs:9:1: 9:1
-+                                          // + literal: Const { ty: unsafe extern "rust-intrinsic" fn(u32) {std::intrinsics::count_code_region}, val: Value(Scalar(<ZST>)) }
++                                          // + literal: Const { ty: unsafe extern "rust-intrinsic" fn(u32, u32, u32) {std::intrinsics::count_code_region}, val: Value(Scalar(<ZST>)) }
 +                                          // ty::Const
 +                                          // + ty: u32
 +                                          // + val: Value(Scalar(0x00000000))
 +                                          // mir::Constant
 +                                          // + span: $DIR/instrument_coverage.rs:9:1: 9:1
 +                                          // + literal: Const { ty: u32, val: Value(Scalar(0x00000000)) }
++                                          // ty::Const
++                                          // + ty: u32
++                                          // + val: Value(Scalar(0x00000183))
++                                          // mir::Constant
++                                          // + span: $DIR/instrument_coverage.rs:9:1: 9:1
++                                          // + literal: Const { ty: u32, val: Value(Scalar(0x00000183)) }
++                                          // ty::Const
++                                          // + ty: u32
++                                          // + val: Value(Scalar(0x000001d1))
++                                          // mir::Constant
++                                          // + span: $DIR/instrument_coverage.rs:9:1: 9:1
++                                          // + literal: Const { ty: u32, val: Value(Scalar(0x000001d1)) }
       }
   
       bb1: {
diff --git a/src/test/ui/consts/miri_unleashed/drop.stderr b/src/test/ui/consts/miri_unleashed/drop.stderr
index 34ab5155e22d0..1fa3992cc5afd 100644
--- a/src/test/ui/consts/miri_unleashed/drop.stderr
+++ b/src/test/ui/consts/miri_unleashed/drop.stderr
@@ -4,7 +4,9 @@ error[E0080]: could not evaluate static initializer
 LL | / pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) {
 LL | |     // Code here does not matter - this is replaced by the
 LL | |     // real drop glue by the compiler.
-LL | |     drop_in_place(to_drop)
+LL | |
+LL | |     // SAFETY: see comment above
+LL | |     unsafe { drop_in_place(to_drop) }
 LL | | }
    | | ^
    | | |
diff --git a/src/test/ui/consts/offset_from_ub.stderr b/src/test/ui/consts/offset_from_ub.stderr
index cde2fe3262692..aa65f4de3e17e 100644
--- a/src/test/ui/consts/offset_from_ub.stderr
+++ b/src/test/ui/consts/offset_from_ub.stderr
@@ -1,12 +1,12 @@
 error: any use of this value will cause an error
   --> $SRC_DIR/libcore/ptr/const_ptr.rs:LL:COL
    |
-LL |           intrinsics::ptr_offset_from(self, origin)
-   |           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-   |           |
-   |           ptr_offset_from cannot compute offset of pointers into different allocations.
-   |           inside `std::ptr::const_ptr::<impl *const Struct>::offset_from` at $SRC_DIR/libcore/ptr/const_ptr.rs:LL:COL
-   |           inside `DIFFERENT_ALLOC` at $DIR/offset_from_ub.rs:17:27
+LL |           unsafe { intrinsics::ptr_offset_from(self, origin) }
+   |                    ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+   |                    |
+   |                    ptr_offset_from cannot compute offset of pointers into different allocations.
+   |                    inside `std::ptr::const_ptr::<impl *const Struct>::offset_from` at $SRC_DIR/libcore/ptr/const_ptr.rs:LL:COL
+   |                    inside `DIFFERENT_ALLOC` at $DIR/offset_from_ub.rs:17:27
    | 
   ::: $DIR/offset_from_ub.rs:11:1
    |
@@ -24,12 +24,12 @@ LL | | };
 error: any use of this value will cause an error
   --> $SRC_DIR/libcore/ptr/const_ptr.rs:LL:COL
    |
-LL |           intrinsics::ptr_offset_from(self, origin)
-   |           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-   |           |
-   |           unable to turn bytes into a pointer
-   |           inside `std::ptr::const_ptr::<impl *const u8>::offset_from` at $SRC_DIR/libcore/ptr/const_ptr.rs:LL:COL
-   |           inside `NOT_PTR` at $DIR/offset_from_ub.rs:23:14
+LL |           unsafe { intrinsics::ptr_offset_from(self, origin) }
+   |                    ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+   |                    |
+   |                    unable to turn bytes into a pointer
+   |                    inside `std::ptr::const_ptr::<impl *const u8>::offset_from` at $SRC_DIR/libcore/ptr/const_ptr.rs:LL:COL
+   |                    inside `NOT_PTR` at $DIR/offset_from_ub.rs:23:14
    | 
   ::: $DIR/offset_from_ub.rs:21:1
    |
@@ -42,12 +42,12 @@ LL | | };
 error: any use of this value will cause an error
   --> $SRC_DIR/libcore/ptr/const_ptr.rs:LL:COL
    |
-LL |           intrinsics::ptr_offset_from(self, origin)
-   |           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-   |           |
-   |           exact_div: 1_isize cannot be divided by 2_isize without remainder
-   |           inside `std::ptr::const_ptr::<impl *const u16>::offset_from` at $SRC_DIR/libcore/ptr/const_ptr.rs:LL:COL
-   |           inside `NOT_MULTIPLE_OF_SIZE` at $DIR/offset_from_ub.rs:31:14
+LL |           unsafe { intrinsics::ptr_offset_from(self, origin) }
+   |                    ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+   |                    |
+   |                    exact_div: 1_isize cannot be divided by 2_isize without remainder
+   |                    inside `std::ptr::const_ptr::<impl *const u16>::offset_from` at $SRC_DIR/libcore/ptr/const_ptr.rs:LL:COL
+   |                    inside `NOT_MULTIPLE_OF_SIZE` at $DIR/offset_from_ub.rs:31:14
    | 
   ::: $DIR/offset_from_ub.rs:26:1
    |
@@ -63,12 +63,12 @@ LL | | };
 error: any use of this value will cause an error
   --> $SRC_DIR/libcore/ptr/const_ptr.rs:LL:COL
    |
-LL |           intrinsics::ptr_offset_from(self, origin)
-   |           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-   |           |
-   |           inbounds test failed: 0x0 is not a valid pointer
-   |           inside `std::ptr::const_ptr::<impl *const u8>::offset_from` at $SRC_DIR/libcore/ptr/const_ptr.rs:LL:COL
-   |           inside `OFFSET_FROM_NULL` at $DIR/offset_from_ub.rs:37:14
+LL |           unsafe { intrinsics::ptr_offset_from(self, origin) }
+   |                    ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+   |                    |
+   |                    inbounds test failed: 0x0 is not a valid pointer
+   |                    inside `std::ptr::const_ptr::<impl *const u8>::offset_from` at $SRC_DIR/libcore/ptr/const_ptr.rs:LL:COL
+   |                    inside `OFFSET_FROM_NULL` at $DIR/offset_from_ub.rs:37:14
    | 
   ::: $DIR/offset_from_ub.rs:34:1
    |
@@ -82,12 +82,12 @@ LL | | };
 error: any use of this value will cause an error
   --> $SRC_DIR/libcore/ptr/const_ptr.rs:LL:COL
    |
-LL |           intrinsics::ptr_offset_from(self, origin)
-   |           ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-   |           |
-   |           unable to turn bytes into a pointer
-   |           inside `std::ptr::const_ptr::<impl *const u8>::offset_from` at $SRC_DIR/libcore/ptr/const_ptr.rs:LL:COL
-   |           inside `DIFFERENT_INT` at $DIR/offset_from_ub.rs:44:14
+LL |           unsafe { intrinsics::ptr_offset_from(self, origin) }
+   |                    ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+   |                    |
+   |                    unable to turn bytes into a pointer
+   |                    inside `std::ptr::const_ptr::<impl *const u8>::offset_from` at $SRC_DIR/libcore/ptr/const_ptr.rs:LL:COL
+   |                    inside `DIFFERENT_INT` at $DIR/offset_from_ub.rs:44:14
    | 
   ::: $DIR/offset_from_ub.rs:40:1
    |
diff --git a/src/test/ui/consts/offset_ub.stderr b/src/test/ui/consts/offset_ub.stderr
index 0ab81cc0c5b31..0a144a6bac2f1 100644
--- a/src/test/ui/consts/offset_ub.stderr
+++ b/src/test/ui/consts/offset_ub.stderr
@@ -1,12 +1,12 @@
 error: any use of this value will cause an error
   --> $SRC_DIR/libcore/ptr/const_ptr.rs:LL:COL
    |
-LL |         intrinsics::offset(self, count)
-   |         ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-   |         |
-   |         overflowing in-bounds pointer arithmetic
-   |         inside `std::ptr::const_ptr::<impl *const u8>::offset` at $SRC_DIR/libcore/ptr/const_ptr.rs:LL:COL
-   |         inside `BEFORE_START` at $DIR/offset_ub.rs:7:46
+LL |         unsafe { intrinsics::offset(self, count) }
+   |                  ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+   |                  |
+   |                  overflowing in-bounds pointer arithmetic
+   |                  inside `std::ptr::const_ptr::<impl *const u8>::offset` at $SRC_DIR/libcore/ptr/const_ptr.rs:LL:COL
+   |                  inside `BEFORE_START` at $DIR/offset_ub.rs:7:46
    | 
   ::: $DIR/offset_ub.rs:7:1
    |
@@ -18,12 +18,12 @@ LL | pub const BEFORE_START: *const u8 = unsafe { (&0u8 as *const u8).offset(-1)
 error: any use of this value will cause an error
   --> $SRC_DIR/libcore/ptr/const_ptr.rs:LL:COL
    |
-LL |         intrinsics::offset(self, count)
-   |         ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-   |         |
-   |         inbounds test failed: pointer must be in-bounds at offset 2, but is outside bounds of allocN which has size 1
-   |         inside `std::ptr::const_ptr::<impl *const u8>::offset` at $SRC_DIR/libcore/ptr/const_ptr.rs:LL:COL
-   |         inside `AFTER_END` at $DIR/offset_ub.rs:8:43
+LL |         unsafe { intrinsics::offset(self, count) }
+   |                  ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+   |                  |
+   |                  inbounds test failed: pointer must be in-bounds at offset 2, but is outside bounds of allocN which has size 1
+   |                  inside `std::ptr::const_ptr::<impl *const u8>::offset` at $SRC_DIR/libcore/ptr/const_ptr.rs:LL:COL
+   |                  inside `AFTER_END` at $DIR/offset_ub.rs:8:43
    | 
   ::: $DIR/offset_ub.rs:8:1
    |
@@ -33,12 +33,12 @@ LL | pub const AFTER_END: *const u8 = unsafe { (&0u8 as *const u8).offset(2) };
 error: any use of this value will cause an error
   --> $SRC_DIR/libcore/ptr/const_ptr.rs:LL:COL
    |
-LL |         intrinsics::offset(self, count)
-   |         ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-   |         |
-   |         inbounds test failed: pointer must be in-bounds at offset 101, but is outside bounds of allocN which has size 100
-   |         inside `std::ptr::const_ptr::<impl *const u8>::offset` at $SRC_DIR/libcore/ptr/const_ptr.rs:LL:COL
-   |         inside `AFTER_ARRAY` at $DIR/offset_ub.rs:9:45
+LL |         unsafe { intrinsics::offset(self, count) }
+   |                  ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+   |                  |
+   |                  inbounds test failed: pointer must be in-bounds at offset 101, but is outside bounds of allocN which has size 100
+   |                  inside `std::ptr::const_ptr::<impl *const u8>::offset` at $SRC_DIR/libcore/ptr/const_ptr.rs:LL:COL
+   |                  inside `AFTER_ARRAY` at $DIR/offset_ub.rs:9:45
    | 
   ::: $DIR/offset_ub.rs:9:1
    |
@@ -48,12 +48,12 @@ LL | pub const AFTER_ARRAY: *const u8 = unsafe { [0u8; 100].as_ptr().offset(101)
 error: any use of this value will cause an error
   --> $SRC_DIR/libcore/ptr/const_ptr.rs:LL:COL
    |
-LL |         intrinsics::offset(self, count)
-   |         ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-   |         |
-   |         overflowing in-bounds pointer arithmetic
-   |         inside `std::ptr::const_ptr::<impl *const u16>::offset` at $SRC_DIR/libcore/ptr/const_ptr.rs:LL:COL
-   |         inside `OVERFLOW` at $DIR/offset_ub.rs:11:43
+LL |         unsafe { intrinsics::offset(self, count) }
+   |                  ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+   |                  |
+   |                  overflowing in-bounds pointer arithmetic
+   |                  inside `std::ptr::const_ptr::<impl *const u16>::offset` at $SRC_DIR/libcore/ptr/const_ptr.rs:LL:COL
+   |                  inside `OVERFLOW` at $DIR/offset_ub.rs:11:43
    | 
   ::: $DIR/offset_ub.rs:11:1
    |
@@ -63,12 +63,12 @@ LL | pub const OVERFLOW: *const u16 = unsafe { [0u16; 1].as_ptr().offset(isize::
 error: any use of this value will cause an error
   --> $SRC_DIR/libcore/ptr/const_ptr.rs:LL:COL
    |
-LL |         intrinsics::offset(self, count)
-   |         ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-   |         |
-   |         overflowing in-bounds pointer arithmetic
-   |         inside `std::ptr::const_ptr::<impl *const u16>::offset` at $SRC_DIR/libcore/ptr/const_ptr.rs:LL:COL
-   |         inside `UNDERFLOW` at $DIR/offset_ub.rs:12:44
+LL |         unsafe { intrinsics::offset(self, count) }
+   |                  ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+   |                  |
+   |                  overflowing in-bounds pointer arithmetic
+   |                  inside `std::ptr::const_ptr::<impl *const u16>::offset` at $SRC_DIR/libcore/ptr/const_ptr.rs:LL:COL
+   |                  inside `UNDERFLOW` at $DIR/offset_ub.rs:12:44
    | 
   ::: $DIR/offset_ub.rs:12:1
    |
@@ -78,12 +78,12 @@ LL | pub const UNDERFLOW: *const u16 = unsafe { [0u16; 1].as_ptr().offset(isize:
 error: any use of this value will cause an error
   --> $SRC_DIR/libcore/ptr/const_ptr.rs:LL:COL
    |
-LL |         intrinsics::offset(self, count)
-   |         ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-   |         |
-   |         overflowing in-bounds pointer arithmetic
-   |         inside `std::ptr::const_ptr::<impl *const u8>::offset` at $SRC_DIR/libcore/ptr/const_ptr.rs:LL:COL
-   |         inside `OVERFLOW_ADDRESS_SPACE` at $DIR/offset_ub.rs:13:56
+LL |         unsafe { intrinsics::offset(self, count) }
+   |                  ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+   |                  |
+   |                  overflowing in-bounds pointer arithmetic
+   |                  inside `std::ptr::const_ptr::<impl *const u8>::offset` at $SRC_DIR/libcore/ptr/const_ptr.rs:LL:COL
+   |                  inside `OVERFLOW_ADDRESS_SPACE` at $DIR/offset_ub.rs:13:56
    | 
   ::: $DIR/offset_ub.rs:13:1
    |
@@ -93,12 +93,12 @@ LL | pub const OVERFLOW_ADDRESS_SPACE: *const u8 = unsafe { (usize::MAX as *cons
 error: any use of this value will cause an error
   --> $SRC_DIR/libcore/ptr/const_ptr.rs:LL:COL
    |
-LL |         intrinsics::offset(self, count)
-   |         ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-   |         |
-   |         overflowing in-bounds pointer arithmetic
-   |         inside `std::ptr::const_ptr::<impl *const u8>::offset` at $SRC_DIR/libcore/ptr/const_ptr.rs:LL:COL
-   |         inside `UNDERFLOW_ADDRESS_SPACE` at $DIR/offset_ub.rs:14:57
+LL |         unsafe { intrinsics::offset(self, count) }
+   |                  ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+   |                  |
+   |                  overflowing in-bounds pointer arithmetic
+   |                  inside `std::ptr::const_ptr::<impl *const u8>::offset` at $SRC_DIR/libcore/ptr/const_ptr.rs:LL:COL
+   |                  inside `UNDERFLOW_ADDRESS_SPACE` at $DIR/offset_ub.rs:14:57
    | 
   ::: $DIR/offset_ub.rs:14:1
    |
@@ -108,12 +108,12 @@ LL | pub const UNDERFLOW_ADDRESS_SPACE: *const u8 = unsafe { (1 as *const u8).of
 error: any use of this value will cause an error
   --> $SRC_DIR/libcore/ptr/const_ptr.rs:LL:COL
    |
-LL |         intrinsics::offset(self, count)
-   |         ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-   |         |
-   |         inbounds test failed: pointer must be in-bounds at offset 1, but is outside bounds of allocN which has size 0
-   |         inside `std::ptr::const_ptr::<impl *const u8>::offset` at $SRC_DIR/libcore/ptr/const_ptr.rs:LL:COL
-   |         inside `ZERO_SIZED_ALLOC` at $DIR/offset_ub.rs:16:50
+LL |         unsafe { intrinsics::offset(self, count) }
+   |                  ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+   |                  |
+   |                  inbounds test failed: pointer must be in-bounds at offset 1, but is outside bounds of allocN which has size 0
+   |                  inside `std::ptr::const_ptr::<impl *const u8>::offset` at $SRC_DIR/libcore/ptr/const_ptr.rs:LL:COL
+   |                  inside `ZERO_SIZED_ALLOC` at $DIR/offset_ub.rs:16:50
    | 
   ::: $DIR/offset_ub.rs:16:1
    |
@@ -123,12 +123,12 @@ LL | pub const ZERO_SIZED_ALLOC: *const u8 = unsafe { [0u8; 0].as_ptr().offset(1
 error: any use of this value will cause an error
   --> $SRC_DIR/libcore/ptr/mut_ptr.rs:LL:COL
    |
-LL |         intrinsics::offset(self, count) as *mut T
-   |         ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-   |         |
-   |         unable to turn bytes into a pointer
-   |         inside `std::ptr::mut_ptr::<impl *mut u8>::offset` at $SRC_DIR/libcore/ptr/mut_ptr.rs:LL:COL
-   |         inside `DANGLING` at $DIR/offset_ub.rs:17:42
+LL |         unsafe { intrinsics::offset(self, count) as *mut T }
+   |                  ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+   |                  |
+   |                  unable to turn bytes into a pointer
+   |                  inside `std::ptr::mut_ptr::<impl *mut u8>::offset` at $SRC_DIR/libcore/ptr/mut_ptr.rs:LL:COL
+   |                  inside `DANGLING` at $DIR/offset_ub.rs:17:42
    | 
   ::: $DIR/offset_ub.rs:17:1
    |
@@ -138,12 +138,12 @@ LL | pub const DANGLING: *const u8 = unsafe { ptr::NonNull::<u8>::dangling().as_
 error: any use of this value will cause an error
   --> $SRC_DIR/libcore/ptr/const_ptr.rs:LL:COL
    |
-LL |         intrinsics::offset(self, count)
-   |         ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-   |         |
-   |         inbounds test failed: 0x0 is not a valid pointer
-   |         inside `std::ptr::const_ptr::<impl *const u8>::offset` at $SRC_DIR/libcore/ptr/const_ptr.rs:LL:COL
-   |         inside `NULL_OFFSET_ZERO` at $DIR/offset_ub.rs:20:50
+LL |         unsafe { intrinsics::offset(self, count) }
+   |                  ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+   |                  |
+   |                  inbounds test failed: 0x0 is not a valid pointer
+   |                  inside `std::ptr::const_ptr::<impl *const u8>::offset` at $SRC_DIR/libcore/ptr/const_ptr.rs:LL:COL
+   |                  inside `NULL_OFFSET_ZERO` at $DIR/offset_ub.rs:20:50
    | 
   ::: $DIR/offset_ub.rs:20:1
    |
@@ -153,12 +153,12 @@ LL | pub const NULL_OFFSET_ZERO: *const u8 = unsafe { ptr::null::<u8>().offset(0
 error: any use of this value will cause an error
   --> $SRC_DIR/libcore/ptr/const_ptr.rs:LL:COL
    |
-LL |         intrinsics::offset(self, count)
-   |         ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
-   |         |
-   |         unable to turn bytes into a pointer
-   |         inside `std::ptr::const_ptr::<impl *const u8>::offset` at $SRC_DIR/libcore/ptr/const_ptr.rs:LL:COL
-   |         inside `UNDERFLOW_ABS` at $DIR/offset_ub.rs:23:47
+LL |         unsafe { intrinsics::offset(self, count) }
+   |                  ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^
+   |                  |
+   |                  unable to turn bytes into a pointer
+   |                  inside `std::ptr::const_ptr::<impl *const u8>::offset` at $SRC_DIR/libcore/ptr/const_ptr.rs:LL:COL
+   |                  inside `UNDERFLOW_ABS` at $DIR/offset_ub.rs:23:47
    | 
   ::: $DIR/offset_ub.rs:23:1
    |
diff --git a/src/test/ui/recursion/issue-38591-non-regular-dropck-recursion.stderr b/src/test/ui/recursion/issue-38591-non-regular-dropck-recursion.stderr
index 0552847c48ca9..3296a2cb094a1 100644
--- a/src/test/ui/recursion/issue-38591-non-regular-dropck-recursion.stderr
+++ b/src/test/ui/recursion/issue-38591-non-regular-dropck-recursion.stderr
@@ -4,7 +4,9 @@ error: reached the recursion limit while instantiating `std::intrinsics::drop_in
 LL | / pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) {
 LL | |     // Code here does not matter - this is replaced by the
 LL | |     // real drop glue by the compiler.
-LL | |     drop_in_place(to_drop)
+LL | |
+LL | |     // SAFETY: see comment above
+LL | |     unsafe { drop_in_place(to_drop) }
 LL | | }
    | |_^
    |
@@ -14,7 +16,9 @@ note: `std::intrinsics::drop_in_place` defined here
 LL | / pub unsafe fn drop_in_place<T: ?Sized>(to_drop: *mut T) {
 LL | |     // Code here does not matter - this is replaced by the
 LL | |     // real drop glue by the compiler.
-LL | |     drop_in_place(to_drop)
+LL | |
+LL | |     // SAFETY: see comment above
+LL | |     unsafe { drop_in_place(to_drop) }
 LL | | }
    | |_^