diff --git a/compiler/rustc_codegen_gcc/src/back/lto.rs b/compiler/rustc_codegen_gcc/src/back/lto.rs index dda777a540027..9a9040708ef89 100644 --- a/compiler/rustc_codegen_gcc/src/back/lto.rs +++ b/compiler/rustc_codegen_gcc/src/back/lto.rs @@ -30,6 +30,7 @@ use rustc_codegen_ssa::back::write::{CodegenContext, FatLtoInput, SharedEmitter} use rustc_codegen_ssa::traits::*; use rustc_codegen_ssa::{ModuleCodegen, ModuleKind, looks_like_rust_object_file}; use rustc_data_structures::memmap::Mmap; +use rustc_data_structures::profiling::SelfProfilerRef; use rustc_errors::{DiagCtxt, DiagCtxtHandle}; use rustc_log::tracing::info; use rustc_middle::bug; @@ -112,6 +113,7 @@ fn save_as_file(obj: &[u8], path: &Path) -> Result<(), LtoBitcodeFromRlib> { /// for further optimization. pub(crate) fn run_fat( cgcx: &CodegenContext, + prof: &SelfProfilerRef, shared_emitter: &SharedEmitter, each_linked_rlib_for_lto: &[PathBuf], modules: Vec>, @@ -123,6 +125,7 @@ pub(crate) fn run_fat( lto_data.symbols_below_threshold.iter().map(|c| c.as_ptr()).collect::>();*/ fat_lto( cgcx, + prof, dcx, modules, lto_data.upstream_modules, @@ -133,13 +136,14 @@ pub(crate) fn run_fat( fn fat_lto( cgcx: &CodegenContext, + prof: &SelfProfilerRef, _dcx: DiagCtxtHandle<'_>, modules: Vec>, mut serialized_modules: Vec<(SerializedModule, CString)>, tmp_path: TempDir, //symbols_below_threshold: &[String], ) -> ModuleCodegen { - let _timer = cgcx.prof.generic_activity("GCC_fat_lto_build_monolithic_module"); + let _timer = prof.generic_activity("GCC_fat_lto_build_monolithic_module"); info!("going for a fat lto"); // Sort out all our lists of incoming modules into two lists. @@ -223,8 +227,7 @@ fn fat_lto( // We add the object files and save in should_combine_object_files that we should combine // them into a single object file when compiling later. for (bc_decoded, name) in serialized_modules { - let _timer = cgcx - .prof + let _timer = prof .generic_activity_with_arg_recorder("GCC_fat_lto_link_module", |recorder| { recorder.record_arg(format!("{:?}", name)) }); @@ -284,6 +287,7 @@ impl ModuleBufferMethods for ModuleBuffer { /// can simply be copied over from the incr. comp. cache. pub(crate) fn run_thin( cgcx: &CodegenContext, + prof: &SelfProfilerRef, dcx: DiagCtxtHandle<'_>, each_linked_rlib_for_lto: &[PathBuf], modules: Vec<(String, ThinBuffer)>, @@ -298,6 +302,7 @@ pub(crate) fn run_thin( } thin_lto( cgcx, + prof, dcx, modules, lto_data.upstream_modules, @@ -345,7 +350,8 @@ pub(crate) fn prepare_thin(module: ModuleCodegen) -> (String, ThinBu /// all of the `LtoModuleCodegen` units returned below and destroyed once /// they all go out of scope. fn thin_lto( - cgcx: &CodegenContext, + _cgcx: &CodegenContext, + prof: &SelfProfilerRef, _dcx: DiagCtxtHandle<'_>, modules: Vec<(String, ThinBuffer)>, serialized_modules: Vec<(SerializedModule, CString)>, @@ -353,7 +359,7 @@ fn thin_lto( cached_modules: Vec<(SerializedModule, WorkProduct)>, //_symbols_below_threshold: &[String], ) -> (Vec>, Vec) { - let _timer = cgcx.prof.generic_activity("LLVM_thin_lto_global_analysis"); + let _timer = prof.generic_activity("LLVM_thin_lto_global_analysis"); info!("going for that thin, thin LTO"); /*let green_modules: FxHashMap<_, _> = diff --git a/compiler/rustc_codegen_gcc/src/back/write.rs b/compiler/rustc_codegen_gcc/src/back/write.rs index 5e96447234758..ddf13558027bd 100644 --- a/compiler/rustc_codegen_gcc/src/back/write.rs +++ b/compiler/rustc_codegen_gcc/src/back/write.rs @@ -6,6 +6,7 @@ use rustc_codegen_ssa::back::write::{ BitcodeSection, CodegenContext, EmitObj, ModuleConfig, SharedEmitter, }; use rustc_codegen_ssa::{CompiledModule, ModuleCodegen}; +use rustc_data_structures::profiling::SelfProfilerRef; use rustc_errors::DiagCtxt; use rustc_fs_util::link_or_copy; use rustc_log::tracing::debug; @@ -18,6 +19,7 @@ use crate::{GccContext, LtoMode}; pub(crate) fn codegen( cgcx: &CodegenContext, + prof: &SelfProfilerRef, shared_emitter: &SharedEmitter, module: ModuleCodegen, config: &ModuleConfig, @@ -25,7 +27,7 @@ pub(crate) fn codegen( let dcx = DiagCtxt::new(Box::new(shared_emitter.clone())); let dcx = dcx.handle(); - let _timer = cgcx.prof.generic_activity_with_arg("GCC_module_codegen", &*module.name); + let _timer = prof.generic_activity_with_arg("GCC_module_codegen", &*module.name); { let context = &module.module_llvm.context; @@ -44,9 +46,8 @@ pub(crate) fn codegen( ); if config.bitcode_needed() { - let _timer = cgcx - .prof - .generic_activity_with_arg("GCC_module_codegen_make_bitcode", &*module.name); + let _timer = + prof.generic_activity_with_arg("GCC_module_codegen_make_bitcode", &*module.name); // TODO(antoyo) /*if let Some(bitcode_filename) = bc_out.file_name() { @@ -58,8 +59,7 @@ pub(crate) fn codegen( }*/ if config.emit_bc || config.emit_obj == EmitObj::Bitcode { - let _timer = cgcx - .prof + let _timer = prof .generic_activity_with_arg("GCC_module_codegen_emit_bitcode", &*module.name); if lto_supported { context.add_command_line_option("-flto=auto"); @@ -70,8 +70,7 @@ pub(crate) fn codegen( } if config.emit_obj == EmitObj::ObjectCode(BitcodeSection::Full) { - let _timer = cgcx - .prof + let _timer = prof .generic_activity_with_arg("GCC_module_codegen_embed_bitcode", &*module.name); if lto_supported { // TODO(antoyo): maybe we should call embed_bitcode to have the proper iOS fixes? @@ -98,7 +97,7 @@ pub(crate) fn codegen( if config.emit_asm { let _timer = - cgcx.prof.generic_activity_with_arg("GCC_module_codegen_emit_asm", &*module.name); + prof.generic_activity_with_arg("GCC_module_codegen_emit_asm", &*module.name); let path = cgcx.output_filenames.temp_path_for_cgu( OutputType::Assembly, &module.name, @@ -109,9 +108,8 @@ pub(crate) fn codegen( match config.emit_obj { EmitObj::ObjectCode(_) => { - let _timer = cgcx - .prof - .generic_activity_with_arg("GCC_module_codegen_emit_obj", &*module.name); + let _timer = + prof.generic_activity_with_arg("GCC_module_codegen_emit_obj", &*module.name); if env::var("CG_GCCJIT_DUMP_MODULE_NAMES").as_deref() == Ok("1") { println!("Module {}", module.name); } diff --git a/compiler/rustc_codegen_gcc/src/lib.rs b/compiler/rustc_codegen_gcc/src/lib.rs index d490650c37f76..24a065d69ecac 100644 --- a/compiler/rustc_codegen_gcc/src/lib.rs +++ b/compiler/rustc_codegen_gcc/src/lib.rs @@ -90,6 +90,7 @@ use rustc_codegen_ssa::target_features::cfg_target_feature; use rustc_codegen_ssa::traits::{CodegenBackend, ExtraBackendMethods, WriteBackendMethods}; use rustc_codegen_ssa::{CodegenResults, CompiledModule, ModuleCodegen, TargetConfig}; use rustc_data_structures::fx::FxIndexMap; +use rustc_data_structures::profiling::SelfProfilerRef; use rustc_data_structures::sync::IntoDynSyncSend; use rustc_errors::DiagCtxtHandle; use rustc_middle::dep_graph::{WorkProduct, WorkProductId}; @@ -427,6 +428,7 @@ impl WriteBackendMethods for GccCodegenBackend { fn run_and_optimize_fat_lto( cgcx: &CodegenContext, + prof: &SelfProfilerRef, shared_emitter: &SharedEmitter, _tm_factory: TargetMachineFactoryFn, // FIXME(bjorn3): Limit LTO exports to these symbols @@ -434,11 +436,12 @@ impl WriteBackendMethods for GccCodegenBackend { each_linked_rlib_for_lto: &[PathBuf], modules: Vec>, ) -> ModuleCodegen { - back::lto::run_fat(cgcx, shared_emitter, each_linked_rlib_for_lto, modules) + back::lto::run_fat(cgcx, prof, shared_emitter, each_linked_rlib_for_lto, modules) } fn run_thin_lto( cgcx: &CodegenContext, + prof: &SelfProfilerRef, dcx: DiagCtxtHandle<'_>, // FIXME(bjorn3): Limit LTO exports to these symbols _exported_symbols_for_lto: &[String], @@ -446,7 +449,7 @@ impl WriteBackendMethods for GccCodegenBackend { modules: Vec<(String, Self::ThinBuffer)>, cached_modules: Vec<(SerializedModule, WorkProduct)>, ) -> (Vec>, Vec) { - back::lto::run_thin(cgcx, dcx, each_linked_rlib_for_lto, modules, cached_modules) + back::lto::run_thin(cgcx, prof, dcx, each_linked_rlib_for_lto, modules, cached_modules) } fn print_pass_timings(&self) { @@ -459,6 +462,7 @@ impl WriteBackendMethods for GccCodegenBackend { fn optimize( _cgcx: &CodegenContext, + _prof: &SelfProfilerRef, _shared_emitter: &SharedEmitter, module: &mut ModuleCodegen, config: &ModuleConfig, @@ -468,6 +472,7 @@ impl WriteBackendMethods for GccCodegenBackend { fn optimize_thin( cgcx: &CodegenContext, + _prof: &SelfProfilerRef, _shared_emitter: &SharedEmitter, _tm_factory: TargetMachineFactoryFn, thin: ThinModule, @@ -477,11 +482,12 @@ impl WriteBackendMethods for GccCodegenBackend { fn codegen( cgcx: &CodegenContext, + prof: &SelfProfilerRef, shared_emitter: &SharedEmitter, module: ModuleCodegen, config: &ModuleConfig, ) -> CompiledModule { - back::write::codegen(cgcx, shared_emitter, module, config) + back::write::codegen(cgcx, prof, shared_emitter, module, config) } fn prepare_thin(module: ModuleCodegen) -> (String, Self::ThinBuffer) { diff --git a/compiler/rustc_codegen_llvm/src/attributes.rs b/compiler/rustc_codegen_llvm/src/attributes.rs index f5eb9c10db996..b5ab26aea4922 100644 --- a/compiler/rustc_codegen_llvm/src/attributes.rs +++ b/compiler/rustc_codegen_llvm/src/attributes.rs @@ -301,17 +301,6 @@ fn stackprotector_attr<'ll>(cx: &SimpleCx<'ll>, sess: &Session) -> Option<&'ll A Some(sspattr.create_attr(cx.llcx)) } -fn backchain_attr<'ll>(cx: &SimpleCx<'ll>, sess: &Session) -> Option<&'ll Attribute> { - if sess.target.arch != Arch::S390x { - return None; - } - - let requested_features = sess.opts.cg.target_feature.split(','); - let found_positive = requested_features.clone().any(|r| r == "+backchain"); - - if found_positive { Some(llvm::CreateAttrString(cx.llcx, "backchain")) } else { None } -} - pub(crate) fn target_cpu_attr<'ll>(cx: &SimpleCx<'ll>, sess: &Session) -> &'ll Attribute { let target_cpu = llvm_util::target_cpu(sess); llvm::CreateAttrStringValue(cx.llcx, "target-cpu", target_cpu) @@ -530,9 +519,6 @@ pub(crate) fn llfn_attrs_from_instance<'ll, 'tcx>( if let Some(align) = codegen_fn_attrs.alignment { llvm::set_alignment(llfn, align); } - if let Some(backchain) = backchain_attr(cx, sess) { - to_add.push(backchain); - } to_add.extend(patchable_function_entry_attrs( cx, sess, diff --git a/compiler/rustc_codegen_llvm/src/back/lto.rs b/compiler/rustc_codegen_llvm/src/back/lto.rs index 5bd856a3ac435..5d272d10930b1 100644 --- a/compiler/rustc_codegen_llvm/src/back/lto.rs +++ b/compiler/rustc_codegen_llvm/src/back/lto.rs @@ -16,6 +16,7 @@ use rustc_codegen_ssa::traits::*; use rustc_codegen_ssa::{ModuleCodegen, ModuleKind, looks_like_rust_object_file}; use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::memmap::Mmap; +use rustc_data_structures::profiling::SelfProfilerRef; use rustc_errors::{DiagCtxt, DiagCtxtHandle}; use rustc_hir::attrs::SanitizerSet; use rustc_middle::bug; @@ -152,6 +153,7 @@ fn get_bitcode_slice_from_object_data<'a>( /// for further optimization. pub(crate) fn run_fat( cgcx: &CodegenContext, + prof: &SelfProfilerRef, shared_emitter: &SharedEmitter, tm_factory: TargetMachineFactoryFn, exported_symbols_for_lto: &[String], @@ -166,6 +168,7 @@ pub(crate) fn run_fat( symbols_below_threshold.iter().map(|c| c.as_ptr()).collect::>(); fat_lto( cgcx, + prof, dcx, shared_emitter, tm_factory, @@ -180,6 +183,7 @@ pub(crate) fn run_fat( /// can simply be copied over from the incr. comp. cache. pub(crate) fn run_thin( cgcx: &CodegenContext, + prof: &SelfProfilerRef, dcx: DiagCtxtHandle<'_>, exported_symbols_for_lto: &[String], each_linked_rlib_for_lto: &[PathBuf], @@ -196,7 +200,7 @@ pub(crate) fn run_thin( is deferred to the linker" ); } - thin_lto(cgcx, dcx, modules, upstream_modules, cached_modules, &symbols_below_threshold) + thin_lto(cgcx, prof, dcx, modules, upstream_modules, cached_modules, &symbols_below_threshold) } pub(crate) fn prepare_thin(module: ModuleCodegen) -> (String, ThinBuffer) { @@ -207,6 +211,7 @@ pub(crate) fn prepare_thin(module: ModuleCodegen) -> (String, ThinBu fn fat_lto( cgcx: &CodegenContext, + prof: &SelfProfilerRef, dcx: DiagCtxtHandle<'_>, shared_emitter: &SharedEmitter, tm_factory: TargetMachineFactoryFn, @@ -214,7 +219,7 @@ fn fat_lto( mut serialized_modules: Vec<(SerializedModule, CString)>, symbols_below_threshold: &[*const libc::c_char], ) -> ModuleCodegen { - let _timer = cgcx.prof.generic_activity("LLVM_fat_lto_build_monolithic_module"); + let _timer = prof.generic_activity("LLVM_fat_lto_build_monolithic_module"); info!("going for a fat lto"); // Sort out all our lists of incoming modules into two lists. @@ -303,8 +308,7 @@ fn fat_lto( // above, this is all mostly handled in C++. let mut linker = Linker::new(llmod); for (bc_decoded, name) in serialized_modules { - let _timer = cgcx - .prof + let _timer = prof .generic_activity_with_arg_recorder("LLVM_fat_lto_link_module", |recorder| { recorder.record_arg(format!("{name:?}")) }); @@ -394,13 +398,14 @@ impl Drop for Linker<'_> { /// they all go out of scope. fn thin_lto( cgcx: &CodegenContext, + prof: &SelfProfilerRef, dcx: DiagCtxtHandle<'_>, modules: Vec<(String, ThinBuffer)>, serialized_modules: Vec<(SerializedModule, CString)>, cached_modules: Vec<(SerializedModule, WorkProduct)>, symbols_below_threshold: &[*const libc::c_char], ) -> (Vec>, Vec) { - let _timer = cgcx.prof.generic_activity("LLVM_thin_lto_global_analysis"); + let _timer = prof.generic_activity("LLVM_thin_lto_global_analysis"); unsafe { info!("going for that thin, thin LTO"); @@ -598,11 +603,12 @@ pub(crate) fn enable_autodiff_settings(ad: &[config::AutoDiff]) { pub(crate) fn run_pass_manager( cgcx: &CodegenContext, + prof: &SelfProfilerRef, dcx: DiagCtxtHandle<'_>, module: &mut ModuleCodegen, thin: bool, ) { - let _timer = cgcx.prof.generic_activity_with_arg("LLVM_lto_optimize", &*module.name); + let _timer = prof.generic_activity_with_arg("LLVM_lto_optimize", &*module.name); let config = &cgcx.module_config; // Now we have one massive module inside of llmod. Time to run the @@ -628,7 +634,7 @@ pub(crate) fn run_pass_manager( }; unsafe { - write::llvm_optimize(cgcx, dcx, module, None, config, opt_level, opt_stage, stage); + write::llvm_optimize(cgcx, prof, dcx, module, None, config, opt_level, opt_stage, stage); } if cfg!(feature = "llvm_enzyme") && enable_ad && !thin { @@ -636,7 +642,9 @@ pub(crate) fn run_pass_manager( let stage = write::AutodiffStage::PostAD; if !config.autodiff.contains(&config::AutoDiff::NoPostopt) { unsafe { - write::llvm_optimize(cgcx, dcx, module, None, config, opt_level, opt_stage, stage); + write::llvm_optimize( + cgcx, prof, dcx, module, None, config, opt_level, opt_stage, stage, + ); } } @@ -739,6 +747,7 @@ impl Drop for ThinBuffer { pub(crate) fn optimize_thin_module( cgcx: &CodegenContext, + prof: &SelfProfilerRef, shared_emitter: &SharedEmitter, tm_factory: TargetMachineFactoryFn, thin_module: ThinModule, @@ -773,8 +782,7 @@ pub(crate) fn optimize_thin_module( // You can find some more comments about these functions in the LLVM // bindings we've got (currently `PassWrapper.cpp`) { - let _timer = - cgcx.prof.generic_activity_with_arg("LLVM_thin_lto_rename", thin_module.name()); + let _timer = prof.generic_activity_with_arg("LLVM_thin_lto_rename", thin_module.name()); unsafe { llvm::LLVMRustPrepareThinLTORename(thin_module.shared.data.0, llmod, target.raw()) }; @@ -782,9 +790,8 @@ pub(crate) fn optimize_thin_module( } { - let _timer = cgcx - .prof - .generic_activity_with_arg("LLVM_thin_lto_resolve_weak", thin_module.name()); + let _timer = + prof.generic_activity_with_arg("LLVM_thin_lto_resolve_weak", thin_module.name()); if unsafe { !llvm::LLVMRustPrepareThinLTOResolveWeak(thin_module.shared.data.0, llmod) } { write::llvm_err(dcx, LlvmError::PrepareThinLtoModule); @@ -793,9 +800,8 @@ pub(crate) fn optimize_thin_module( } { - let _timer = cgcx - .prof - .generic_activity_with_arg("LLVM_thin_lto_internalize", thin_module.name()); + let _timer = + prof.generic_activity_with_arg("LLVM_thin_lto_internalize", thin_module.name()); if unsafe { !llvm::LLVMRustPrepareThinLTOInternalize(thin_module.shared.data.0, llmod) } { write::llvm_err(dcx, LlvmError::PrepareThinLtoModule); @@ -804,8 +810,7 @@ pub(crate) fn optimize_thin_module( } { - let _timer = - cgcx.prof.generic_activity_with_arg("LLVM_thin_lto_import", thin_module.name()); + let _timer = prof.generic_activity_with_arg("LLVM_thin_lto_import", thin_module.name()); if unsafe { !llvm::LLVMRustPrepareThinLTOImport(thin_module.shared.data.0, llmod, target.raw()) } { @@ -821,7 +826,7 @@ pub(crate) fn optimize_thin_module( // little differently. { info!("running thin lto passes over {}", module.name); - run_pass_manager(cgcx, dcx, &mut module, true); + run_pass_manager(cgcx, prof, dcx, &mut module, true); save_temp_bitcode(cgcx, &module, "thin-lto-after-pm"); } } diff --git a/compiler/rustc_codegen_llvm/src/back/write.rs b/compiler/rustc_codegen_llvm/src/back/write.rs index 2bb5b5db5e485..3e3ccd39e674c 100644 --- a/compiler/rustc_codegen_llvm/src/back/write.rs +++ b/compiler/rustc_codegen_llvm/src/back/write.rs @@ -560,6 +560,7 @@ pub(crate) enum AutodiffStage { pub(crate) unsafe fn llvm_optimize( cgcx: &CodegenContext, + prof: &SelfProfilerRef, dcx: DiagCtxtHandle<'_>, module: &ModuleCodegen, thin_lto_buffer: Option<&mut *mut llvm::ThinLTOBuffer>, @@ -756,10 +757,9 @@ pub(crate) unsafe fn llvm_optimize( } } - let mut llvm_profiler = cgcx - .prof + let mut llvm_profiler = prof .llvm_recording_enabled() - .then(|| LlvmSelfProfiler::new(cgcx.prof.get_self_profiler().unwrap())); + .then(|| LlvmSelfProfiler::new(prof.get_self_profiler().unwrap())); let llvm_selfprofiler = llvm_profiler.as_mut().map(|s| s as *mut _ as *mut c_void).unwrap_or(std::ptr::null_mut()); @@ -878,7 +878,7 @@ pub(crate) unsafe fn llvm_optimize( &out_obj, None, llvm::FileType::ObjectFile, - &cgcx.prof, + prof, true, ); // We ignore cgcx.save_temps here and unconditionally always keep our `host.out` artifact. @@ -892,11 +892,12 @@ pub(crate) unsafe fn llvm_optimize( // Unsafe due to LLVM calls. pub(crate) fn optimize( cgcx: &CodegenContext, + prof: &SelfProfilerRef, shared_emitter: &SharedEmitter, module: &mut ModuleCodegen, config: &ModuleConfig, ) { - let _timer = cgcx.prof.generic_activity_with_arg("LLVM_module_optimize", &*module.name); + let _timer = prof.generic_activity_with_arg("LLVM_module_optimize", &*module.name); let dcx = DiagCtxt::new(Box::new(shared_emitter.clone())); let dcx = dcx.handle(); @@ -943,6 +944,7 @@ pub(crate) fn optimize( unsafe { llvm_optimize( cgcx, + prof, dcx, module, thin_lto_buffer.as_mut(), @@ -964,12 +966,12 @@ pub(crate) fn optimize( && let Some(thin_link_bitcode_filename) = bc_summary_out.file_name() { let summary_data = thin_lto_buffer.thin_link_data(); - cgcx.prof.artifact_size( + prof.artifact_size( "llvm_bitcode_summary", thin_link_bitcode_filename.to_string_lossy(), summary_data.len() as u64, ); - let _timer = cgcx.prof.generic_activity_with_arg( + let _timer = prof.generic_activity_with_arg( "LLVM_module_codegen_emit_bitcode_summary", &*module.name, ); @@ -983,11 +985,12 @@ pub(crate) fn optimize( pub(crate) fn codegen( cgcx: &CodegenContext, + prof: &SelfProfilerRef, shared_emitter: &SharedEmitter, module: ModuleCodegen, config: &ModuleConfig, ) -> CompiledModule { - let _timer = cgcx.prof.generic_activity_with_arg("LLVM_module_codegen", &*module.name); + let _timer = prof.generic_activity_with_arg("LLVM_module_codegen", &*module.name); let dcx = DiagCtxt::new(Box::new(shared_emitter.clone())); let dcx = dcx.handle(); @@ -1026,18 +1029,17 @@ pub(crate) fn codegen( if config.bitcode_needed() { if config.emit_bc || config.emit_obj == EmitObj::Bitcode { let thin = { - let _timer = cgcx.prof.generic_activity_with_arg( + let _timer = prof.generic_activity_with_arg( "LLVM_module_codegen_make_bitcode", &*module.name, ); ThinBuffer::new(llmod, config.emit_thin_lto) }; let data = thin.data(); - let _timer = cgcx - .prof + let _timer = prof .generic_activity_with_arg("LLVM_module_codegen_emit_bitcode", &*module.name); if let Some(bitcode_filename) = bc_out.file_name() { - cgcx.prof.artifact_size( + prof.artifact_size( "llvm_bitcode", bitcode_filename.to_string_lossy(), data.len() as u64, @@ -1049,8 +1051,7 @@ pub(crate) fn codegen( } if config.embed_bitcode() && module.kind == ModuleKind::Regular { - let _timer = cgcx - .prof + let _timer = prof .generic_activity_with_arg("LLVM_module_codegen_embed_bitcode", &*module.name); let thin_bc = module.thin_lto_buffer.as_deref().expect("cannot find embedded bitcode"); @@ -1060,7 +1061,7 @@ pub(crate) fn codegen( if config.emit_ir { let _timer = - cgcx.prof.generic_activity_with_arg("LLVM_module_codegen_emit_ir", &*module.name); + prof.generic_activity_with_arg("LLVM_module_codegen_emit_ir", &*module.name); let out = cgcx.output_filenames.temp_path_for_cgu( OutputType::LlvmAssembly, &module.name, @@ -1098,7 +1099,7 @@ pub(crate) fn codegen( unsafe { llvm::LLVMRustPrintModule(llmod, out_c.as_ptr(), demangle_callback) }; if result == llvm::LLVMRustResult::Success { - record_artifact_size(&cgcx.prof, "llvm_ir", &out); + record_artifact_size(prof, "llvm_ir", &out); } result @@ -1108,7 +1109,7 @@ pub(crate) fn codegen( if config.emit_asm { let _timer = - cgcx.prof.generic_activity_with_arg("LLVM_module_codegen_emit_asm", &*module.name); + prof.generic_activity_with_arg("LLVM_module_codegen_emit_asm", &*module.name); let path = cgcx.output_filenames.temp_path_for_cgu( OutputType::Assembly, &module.name, @@ -1132,16 +1133,15 @@ pub(crate) fn codegen( &path, None, llvm::FileType::AssemblyFile, - &cgcx.prof, + prof, config.verify_llvm_ir, ); } match config.emit_obj { EmitObj::ObjectCode(_) => { - let _timer = cgcx - .prof - .generic_activity_with_arg("LLVM_module_codegen_emit_obj", &*module.name); + let _timer = + prof.generic_activity_with_arg("LLVM_module_codegen_emit_obj", &*module.name); let dwo_out = cgcx .output_filenames @@ -1168,7 +1168,7 @@ pub(crate) fn codegen( &obj_out, dwo_out, llvm::FileType::ObjectFile, - &cgcx.prof, + prof, config.verify_llvm_ir, ); } @@ -1188,7 +1188,7 @@ pub(crate) fn codegen( EmitObj::None => {} } - record_llvm_cgu_instructions_stats(&cgcx.prof, &module.name, llmod); + record_llvm_cgu_instructions_stats(prof, &module.name, llmod); } // `.dwo` files are only emitted if: diff --git a/compiler/rustc_codegen_llvm/src/lib.rs b/compiler/rustc_codegen_llvm/src/lib.rs index 577c8a98b18c6..c92de64a3349f 100644 --- a/compiler/rustc_codegen_llvm/src/lib.rs +++ b/compiler/rustc_codegen_llvm/src/lib.rs @@ -35,6 +35,7 @@ use rustc_codegen_ssa::back::write::{ use rustc_codegen_ssa::traits::*; use rustc_codegen_ssa::{CodegenResults, CompiledModule, ModuleCodegen, TargetConfig}; use rustc_data_structures::fx::FxIndexMap; +use rustc_data_structures::profiling::SelfProfilerRef; use rustc_errors::{DiagCtxt, DiagCtxtHandle}; use rustc_metadata::EncodedMetadata; use rustc_middle::dep_graph::{WorkProduct, WorkProductId}; @@ -163,6 +164,7 @@ impl WriteBackendMethods for LlvmCodegenBackend { } fn run_and_optimize_fat_lto( cgcx: &CodegenContext, + prof: &SelfProfilerRef, shared_emitter: &SharedEmitter, tm_factory: TargetMachineFactoryFn, exported_symbols_for_lto: &[String], @@ -171,6 +173,7 @@ impl WriteBackendMethods for LlvmCodegenBackend { ) -> ModuleCodegen { let mut module = back::lto::run_fat( cgcx, + prof, shared_emitter, tm_factory, exported_symbols_for_lto, @@ -180,12 +183,13 @@ impl WriteBackendMethods for LlvmCodegenBackend { let dcx = DiagCtxt::new(Box::new(shared_emitter.clone())); let dcx = dcx.handle(); - back::lto::run_pass_manager(cgcx, dcx, &mut module, false); + back::lto::run_pass_manager(cgcx, prof, dcx, &mut module, false); module } fn run_thin_lto( cgcx: &CodegenContext, + prof: &SelfProfilerRef, dcx: DiagCtxtHandle<'_>, exported_symbols_for_lto: &[String], each_linked_rlib_for_lto: &[PathBuf], @@ -194,6 +198,7 @@ impl WriteBackendMethods for LlvmCodegenBackend { ) -> (Vec>, Vec) { back::lto::run_thin( cgcx, + prof, dcx, exported_symbols_for_lto, each_linked_rlib_for_lto, @@ -203,27 +208,30 @@ impl WriteBackendMethods for LlvmCodegenBackend { } fn optimize( cgcx: &CodegenContext, + prof: &SelfProfilerRef, shared_emitter: &SharedEmitter, module: &mut ModuleCodegen, config: &ModuleConfig, ) { - back::write::optimize(cgcx, shared_emitter, module, config) + back::write::optimize(cgcx, prof, shared_emitter, module, config) } fn optimize_thin( cgcx: &CodegenContext, + prof: &SelfProfilerRef, shared_emitter: &SharedEmitter, tm_factory: TargetMachineFactoryFn, thin: ThinModule, ) -> ModuleCodegen { - back::lto::optimize_thin_module(cgcx, shared_emitter, tm_factory, thin) + back::lto::optimize_thin_module(cgcx, prof, shared_emitter, tm_factory, thin) } fn codegen( cgcx: &CodegenContext, + prof: &SelfProfilerRef, shared_emitter: &SharedEmitter, module: ModuleCodegen, config: &ModuleConfig, ) -> CompiledModule { - back::write::codegen(cgcx, shared_emitter, module, config) + back::write::codegen(cgcx, prof, shared_emitter, module, config) } fn prepare_thin(module: ModuleCodegen) -> (String, Self::ThinBuffer) { back::lto::prepare_thin(module) diff --git a/compiler/rustc_codegen_ssa/src/back/write.rs b/compiler/rustc_codegen_ssa/src/back/write.rs index c814f8db521cf..87a043fbdf245 100644 --- a/compiler/rustc_codegen_ssa/src/back/write.rs +++ b/compiler/rustc_codegen_ssa/src/back/write.rs @@ -23,6 +23,7 @@ use rustc_hir::find_attr; use rustc_incremental::{ copy_cgu_workproduct_to_incr_comp_cache_dir, in_incr_comp_dir, in_incr_comp_dir_sess, }; +use rustc_macros::{Decodable, Encodable}; use rustc_metadata::fs::copy_to_stdout; use rustc_middle::bug; use rustc_middle::dep_graph::{WorkProduct, WorkProductId}; @@ -50,7 +51,7 @@ use crate::{ const PRE_LTO_BC_EXT: &str = "pre-lto.bc"; /// What kind of object file to emit. -#[derive(Clone, Copy, PartialEq)] +#[derive(Clone, Copy, PartialEq, Encodable, Decodable)] pub enum EmitObj { // No object file. None, @@ -64,7 +65,7 @@ pub enum EmitObj { } /// What kind of llvm bitcode section to embed in an object file. -#[derive(Clone, Copy, PartialEq)] +#[derive(Clone, Copy, PartialEq, Encodable, Decodable)] pub enum BitcodeSection { // No bitcode section. None, @@ -74,6 +75,7 @@ pub enum BitcodeSection { } /// Module-specific configuration for `optimize_and_codegen`. +#[derive(Encodable, Decodable)] pub struct ModuleConfig { /// Names of additional optimization passes to run. pub passes: Vec, @@ -319,10 +321,9 @@ pub type TargetMachineFactoryFn = Arc< >; /// Additional resources used by optimize_and_codegen (not module specific) -#[derive(Clone)] +#[derive(Clone, Encodable, Decodable)] pub struct CodegenContext { // Resources needed when running LTO - pub prof: SelfProfilerRef, pub lto: Lto, pub use_linker_plugin_lto: bool, pub dylib_lto: bool, @@ -363,16 +364,18 @@ pub struct CodegenContext { fn generate_thin_lto_work( cgcx: &CodegenContext, + prof: &SelfProfilerRef, dcx: DiagCtxtHandle<'_>, exported_symbols_for_lto: &[String], each_linked_rlib_for_lto: &[PathBuf], needs_thin_lto: Vec<(String, B::ThinBuffer)>, import_only_modules: Vec<(SerializedModule, WorkProduct)>, ) -> Vec<(ThinLtoWorkItem, u64)> { - let _prof_timer = cgcx.prof.generic_activity("codegen_thin_generate_lto_work"); + let _prof_timer = prof.generic_activity("codegen_thin_generate_lto_work"); let (lto_modules, copy_jobs) = B::run_thin_lto( cgcx, + prof, dcx, exported_symbols_for_lto, each_linked_rlib_for_lto, @@ -841,12 +844,13 @@ pub(crate) fn compute_per_cgu_lto_type( fn execute_optimize_work_item( cgcx: &CodegenContext, + prof: &SelfProfilerRef, shared_emitter: SharedEmitter, mut module: ModuleCodegen, ) -> WorkItemResult { - let _timer = cgcx.prof.generic_activity_with_arg("codegen_module_optimize", &*module.name); + let _timer = prof.generic_activity_with_arg("codegen_module_optimize", &*module.name); - B::optimize(cgcx, &shared_emitter, &mut module, &cgcx.module_config); + B::optimize(cgcx, prof, &shared_emitter, &mut module, &cgcx.module_config); // After we've done the initial round of optimizations we need to // decide whether to synchronously codegen this module or ship it @@ -867,7 +871,7 @@ fn execute_optimize_work_item( match lto_type { ComputedLtoType::No => { - let module = B::codegen(cgcx, &shared_emitter, module, &cgcx.module_config); + let module = B::codegen(cgcx, &prof, &shared_emitter, module, &cgcx.module_config); WorkItemResult::Finished(module) } ComputedLtoType::Thin => { @@ -897,12 +901,12 @@ fn execute_optimize_work_item( fn execute_copy_from_cache_work_item( cgcx: &CodegenContext, + prof: &SelfProfilerRef, shared_emitter: SharedEmitter, module: CachedModuleCodegen, ) -> CompiledModule { - let _timer = cgcx - .prof - .generic_activity_with_arg("codegen_copy_artifacts_from_incr_cache", &*module.name); + let _timer = + prof.generic_activity_with_arg("codegen_copy_artifacts_from_incr_cache", &*module.name); let dcx = DiagCtxt::new(Box::new(shared_emitter)); let dcx = dcx.handle(); @@ -985,6 +989,7 @@ fn execute_copy_from_cache_work_item( fn do_fat_lto( cgcx: &CodegenContext, + prof: &SelfProfilerRef, shared_emitter: SharedEmitter, tm_factory: TargetMachineFactoryFn, exported_symbols_for_lto: &[String], @@ -992,7 +997,7 @@ fn do_fat_lto( mut needs_fat_lto: Vec>, import_only_modules: Vec<(SerializedModule, WorkProduct)>, ) -> CompiledModule { - let _timer = cgcx.prof.verbose_generic_activity("LLVM_fatlto"); + let _timer = prof.verbose_generic_activity("LLVM_fatlto"); let dcx = DiagCtxt::new(Box::new(shared_emitter.clone())); let dcx = dcx.handle(); @@ -1005,17 +1010,19 @@ fn do_fat_lto( let module = B::run_and_optimize_fat_lto( cgcx, + prof, &shared_emitter, tm_factory, exported_symbols_for_lto, each_linked_rlib_for_lto, needs_fat_lto, ); - B::codegen(cgcx, &shared_emitter, module, &cgcx.module_config) + B::codegen(cgcx, prof, &shared_emitter, module, &cgcx.module_config) } -fn do_thin_lto<'a, B: ExtraBackendMethods>( - cgcx: &'a CodegenContext, +fn do_thin_lto( + cgcx: &CodegenContext, + prof: &SelfProfilerRef, shared_emitter: SharedEmitter, tm_factory: TargetMachineFactoryFn, exported_symbols_for_lto: Arc>, @@ -1026,7 +1033,7 @@ fn do_thin_lto<'a, B: ExtraBackendMethods>( WorkProduct, )>, ) -> Vec { - let _timer = cgcx.prof.verbose_generic_activity("LLVM_thinlto"); + let _timer = prof.verbose_generic_activity("LLVM_thinlto"); let dcx = DiagCtxt::new(Box::new(shared_emitter.clone())); let dcx = dcx.handle(); @@ -1056,6 +1063,7 @@ fn do_thin_lto<'a, B: ExtraBackendMethods>( // we don't worry about tokens. for (work, cost) in generate_thin_lto_work::( cgcx, + prof, dcx, &exported_symbols_for_lto, &each_linked_rlib_for_lto, @@ -1100,6 +1108,7 @@ fn do_thin_lto<'a, B: ExtraBackendMethods>( { spawn_thin_lto_work( &cgcx, + prof, shared_emitter.clone(), Arc::clone(&tm_factory), coordinator_send.clone(), @@ -1166,14 +1175,15 @@ fn do_thin_lto<'a, B: ExtraBackendMethods>( fn execute_thin_lto_work_item( cgcx: &CodegenContext, + prof: &SelfProfilerRef, shared_emitter: SharedEmitter, tm_factory: TargetMachineFactoryFn, module: lto::ThinModule, ) -> CompiledModule { - let _timer = cgcx.prof.generic_activity_with_arg("codegen_module_perform_lto", module.name()); + let _timer = prof.generic_activity_with_arg("codegen_module_perform_lto", module.name()); - let module = B::optimize_thin(cgcx, &shared_emitter, tm_factory, module); - B::codegen(cgcx, &shared_emitter, module, &cgcx.module_config) + let module = B::optimize_thin(cgcx, prof, &shared_emitter, tm_factory, module); + B::codegen(cgcx, prof, &shared_emitter, module, &cgcx.module_config) } /// Messages sent to the coordinator. @@ -1273,6 +1283,7 @@ fn start_executing_work( coordinator_send: Sender>, ) -> thread::JoinHandle, ()>> { let sess = tcx.sess; + let prof = sess.prof.clone(); let mut each_linked_rlib_for_lto = Vec::new(); let mut each_linked_rlib_file_for_lto = Vec::new(); @@ -1323,7 +1334,6 @@ fn start_executing_work( fewer_names: sess.fewer_names(), save_temps: sess.opts.cg.save_temps, time_trace: sess.opts.unstable_opts.llvm_time_trace, - prof: sess.prof.clone(), remark: sess.opts.cg.remark.clone(), remark_dir, incr_comp_session_dir: sess.incr_comp_session_dir_opt().map(|r| r.clone()), @@ -1524,7 +1534,7 @@ fn start_executing_work( let mut llvm_start_time: Option> = None; if let Some(allocator_module) = &mut allocator_module { - B::optimize(&cgcx, &shared_emitter, allocator_module, &allocator_config); + B::optimize(&cgcx, &prof, &shared_emitter, allocator_module, &allocator_config); } // Run the message loop while there's still anything that needs message @@ -1564,6 +1574,7 @@ fn start_executing_work( main_thread_state = MainThreadState::Lending; spawn_work( &cgcx, + &prof, shared_emitter.clone(), coordinator_send.clone(), &mut llvm_start_time, @@ -1588,6 +1599,7 @@ fn start_executing_work( main_thread_state = MainThreadState::Lending; spawn_work( &cgcx, + &prof, shared_emitter.clone(), coordinator_send.clone(), &mut llvm_start_time, @@ -1630,6 +1642,7 @@ fn start_executing_work( { spawn_work( &cgcx, + &prof, shared_emitter.clone(), coordinator_send.clone(), &mut llvm_start_time, @@ -1788,6 +1801,7 @@ fn start_executing_work( if cgcx.lto == Lto::ThinLocal { compiled_modules.extend(do_thin_lto::( &cgcx, + &prof, shared_emitter.clone(), tm_factory, exported_symbols_for_lto, @@ -1814,7 +1828,7 @@ fn start_executing_work( Ok(MaybeLtoModules::NoLto { modules: compiled_modules, allocator_module: allocator_module.map(|allocator_module| { - B::codegen(&cgcx, &shared_emitter, allocator_module, &allocator_config) + B::codegen(&cgcx, &prof, &shared_emitter, allocator_module, &allocator_config) }), }) }) @@ -1883,23 +1897,25 @@ fn start_executing_work( pub(crate) struct WorkerFatalError; fn spawn_work<'a, B: ExtraBackendMethods>( - cgcx: &'a CodegenContext, + cgcx: &CodegenContext, + prof: &'a SelfProfilerRef, shared_emitter: SharedEmitter, coordinator_send: Sender>, llvm_start_time: &mut Option>, work: WorkItem, ) { if llvm_start_time.is_none() { - *llvm_start_time = Some(cgcx.prof.verbose_generic_activity("LLVM_passes")); + *llvm_start_time = Some(prof.verbose_generic_activity("LLVM_passes")); } let cgcx = cgcx.clone(); + let prof = prof.clone(); B::spawn_named_thread(cgcx.time_trace, work.short_description(), move || { let result = std::panic::catch_unwind(AssertUnwindSafe(|| match work { - WorkItem::Optimize(m) => execute_optimize_work_item(&cgcx, shared_emitter, m), + WorkItem::Optimize(m) => execute_optimize_work_item(&cgcx, &prof, shared_emitter, m), WorkItem::CopyPostLtoArtifacts(m) => WorkItemResult::Finished( - execute_copy_from_cache_work_item(&cgcx, shared_emitter, m), + execute_copy_from_cache_work_item(&cgcx, &prof, shared_emitter, m), ), })); @@ -1920,22 +1936,24 @@ fn spawn_work<'a, B: ExtraBackendMethods>( .expect("failed to spawn work thread"); } -fn spawn_thin_lto_work<'a, B: ExtraBackendMethods>( - cgcx: &'a CodegenContext, +fn spawn_thin_lto_work( + cgcx: &CodegenContext, + prof: &SelfProfilerRef, shared_emitter: SharedEmitter, tm_factory: TargetMachineFactoryFn, coordinator_send: Sender, work: ThinLtoWorkItem, ) { let cgcx = cgcx.clone(); + let prof = prof.clone(); B::spawn_named_thread(cgcx.time_trace, work.short_description(), move || { let result = std::panic::catch_unwind(AssertUnwindSafe(|| match work { ThinLtoWorkItem::CopyPostLtoArtifacts(m) => { - execute_copy_from_cache_work_item(&cgcx, shared_emitter, m) + execute_copy_from_cache_work_item(&cgcx, &prof, shared_emitter, m) } ThinLtoWorkItem::ThinLto(m) => { - execute_thin_lto_work_item(&cgcx, shared_emitter, tm_factory, m) + execute_thin_lto_work_item(&cgcx, &prof, shared_emitter, tm_factory, m) } })); @@ -2183,6 +2201,7 @@ impl OngoingCodegen { CompiledModules { modules: vec![do_fat_lto( &cgcx, + &sess.prof, shared_emitter, tm_factory, &exported_symbols_for_lto, @@ -2209,6 +2228,7 @@ impl OngoingCodegen { CompiledModules { modules: do_thin_lto::( &cgcx, + &sess.prof, shared_emitter, tm_factory, exported_symbols_for_lto, diff --git a/compiler/rustc_codegen_ssa/src/traits/write.rs b/compiler/rustc_codegen_ssa/src/traits/write.rs index 0232ba39ac1f3..5f5d0ac5d9fc4 100644 --- a/compiler/rustc_codegen_ssa/src/traits/write.rs +++ b/compiler/rustc_codegen_ssa/src/traits/write.rs @@ -1,5 +1,6 @@ use std::path::PathBuf; +use rustc_data_structures::profiling::SelfProfilerRef; use rustc_errors::DiagCtxtHandle; use rustc_middle::dep_graph::WorkProduct; @@ -20,6 +21,7 @@ pub trait WriteBackendMethods: Clone + 'static { /// if necessary and running any further optimizations fn run_and_optimize_fat_lto( cgcx: &CodegenContext, + prof: &SelfProfilerRef, shared_emitter: &SharedEmitter, tm_factory: TargetMachineFactoryFn, exported_symbols_for_lto: &[String], @@ -31,6 +33,7 @@ pub trait WriteBackendMethods: Clone + 'static { /// can simply be copied over from the incr. comp. cache. fn run_thin_lto( cgcx: &CodegenContext, + prof: &SelfProfilerRef, dcx: DiagCtxtHandle<'_>, exported_symbols_for_lto: &[String], each_linked_rlib_for_lto: &[PathBuf], @@ -41,18 +44,21 @@ pub trait WriteBackendMethods: Clone + 'static { fn print_statistics(&self); fn optimize( cgcx: &CodegenContext, + prof: &SelfProfilerRef, shared_emitter: &SharedEmitter, module: &mut ModuleCodegen, config: &ModuleConfig, ); fn optimize_thin( cgcx: &CodegenContext, + prof: &SelfProfilerRef, shared_emitter: &SharedEmitter, tm_factory: TargetMachineFactoryFn, thin: ThinModule, ) -> ModuleCodegen; fn codegen( cgcx: &CodegenContext, + prof: &SelfProfilerRef, shared_emitter: &SharedEmitter, module: ModuleCodegen, config: &ModuleConfig, diff --git a/compiler/rustc_hir_typeck/src/coercion.rs b/compiler/rustc_hir_typeck/src/coercion.rs index 36a07b361d9de..88d2e80f1521e 100644 --- a/compiler/rustc_hir_typeck/src/coercion.rs +++ b/compiler/rustc_hir_typeck/src/coercion.rs @@ -644,7 +644,7 @@ impl<'f, 'tcx> Coerce<'f, 'tcx> { .infcx .visit_proof_tree( Goal::new(self.tcx, self.param_env, pred), - &mut CoerceVisitor { fcx: self.fcx, span: self.cause.span }, + &mut CoerceVisitor { fcx: self.fcx, span: self.cause.span, errored: false }, ) .is_break() { @@ -1961,6 +1961,10 @@ impl<'tcx> CoerceMany<'tcx> { struct CoerceVisitor<'a, 'tcx> { fcx: &'a FnCtxt<'a, 'tcx>, span: Span, + /// Whether the coercion is impossible. If so we sometimes still try to + /// coerce in these cases to emit better errors. This changes the behavior + /// when hitting the recursion limit. + errored: bool, } impl<'tcx> ProofTreeVisitor<'tcx> for CoerceVisitor<'_, 'tcx> { @@ -1987,6 +1991,7 @@ impl<'tcx> ProofTreeVisitor<'tcx> for CoerceVisitor<'_, 'tcx> { // If we prove the `Unsize` or `CoerceUnsized` goal, continue recursing. Ok(Certainty::Yes) => ControlFlow::Continue(()), Err(NoSolution) => { + self.errored = true; // Even if we find no solution, continue recursing if we find a single candidate // for which we're shallowly certain it holds to get the right error source. if let [only_candidate] = &goal.candidates()[..] @@ -2019,4 +2024,15 @@ impl<'tcx> ProofTreeVisitor<'tcx> for CoerceVisitor<'_, 'tcx> { } } } + + fn on_recursion_limit(&mut self) -> Self::Result { + if self.errored { + // This prevents accidentally committing unfulfilled unsized coercions while trying to + // find the error source for diagnostics. + // See https://github.com/rust-lang/trait-system-refactor-initiative/issues/266. + ControlFlow::Break(()) + } else { + ControlFlow::Continue(()) + } + } } diff --git a/compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp b/compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp index 97f95ac01e861..7aa4ddea78e1d 100644 --- a/compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp +++ b/compiler/rustc_llvm/llvm-wrapper/PassWrapper.cpp @@ -812,12 +812,12 @@ extern "C" LLVMRustResult LLVMRustOptimize( auto ThinLTOBuffer = std::make_unique(); raw_string_ostream ThinLTODataOS(ThinLTOBuffer->data); raw_string_ostream ThinLinkDataOS(ThinLTOBuffer->thin_link_data); + bool IsLTO = OptStage == LLVMRustOptStage::ThinLTO || + OptStage == LLVMRustOptStage::FatLTO; if (!NoPrepopulatePasses) { // The pre-link pipelines don't support O0 and require using // buildO0DefaultPipeline() instead. At the same time, the LTO pipelines do // support O0 and using them is required. - bool IsLTO = OptStage == LLVMRustOptStage::ThinLTO || - OptStage == LLVMRustOptStage::FatLTO; if (OptLevel == OptimizationLevel::O0 && !IsLTO) { for (const auto &C : PipelineStartEPCallbacks) PB.registerPipelineStartEPCallback(C); @@ -908,7 +908,10 @@ extern "C" LLVMRustResult LLVMRustOptimize( // now load "-enzyme" pass: // With dlopen, ENZYME macro may not be defined, so check EnzymePtr directly - if (EnzymePtr) { + // In the case of debug builds with multiple codegen units, we might not + // have all function definitions available during the early compiler + // invocations. We therefore wait for the final lto step to run Enzyme. + if (EnzymePtr && IsLTO) { if (PrintBeforeEnzyme) { // Handle the Rust flag `-Zautodiff=PrintModBefore`. @@ -929,6 +932,7 @@ extern "C" LLVMRustResult LLVMRustOptimize( MPM.addPass(PrintModulePass(outs(), Banner, true, false)); } } + if (PrintPasses) { // Print all passes from the PM: std::string Pipeline; diff --git a/compiler/rustc_macros/src/query.rs b/compiler/rustc_macros/src/query.rs index 6b5c427c8bc1b..5b869dc3409a7 100644 --- a/compiler/rustc_macros/src/query.rs +++ b/compiler/rustc_macros/src/query.rs @@ -1,4 +1,5 @@ use proc_macro::TokenStream; +use proc_macro2::Span; use quote::{quote, quote_spanned}; use syn::parse::{Parse, ParseStream, Result}; use syn::punctuated::Punctuated; @@ -62,7 +63,7 @@ impl Parse for Query { // If there are no doc-comments, give at least some idea of what // it does by showing the query description. if doc_comments.is_empty() { - doc_comments.push(doc_comment_from_desc(&modifiers.desc.1)?); + doc_comments.push(doc_comment_from_desc(&modifiers.desc.expr_list)?); } Ok(Query { doc_comments, modifiers, name, key, arg, result }) @@ -82,15 +83,27 @@ impl Parse for List { } } +struct Desc { + modifier: Ident, + tcx_binding: Option, + expr_list: Punctuated, +} + +struct CacheOnDiskIf { + modifier: Ident, + tcx_binding: Option, + block: Block, +} + struct QueryModifiers { /// The description of the query. - desc: (Option, Punctuated), + desc: Desc, /// Use this type for the in-memory cache. arena_cache: Option, /// Cache the query to disk if the `Block` returns true. - cache_on_disk_if: Option<(Option, Block)>, + cache_on_disk_if: Option, /// A cycle error for this query aborting the compilation with a fatal error. cycle_fatal: Option, @@ -164,7 +177,7 @@ fn parse_query_modifiers(input: ParseStream<'_>) -> Result { // `desc { |tcx| "foo {}", tcx.item_path(key) }` let attr_content; braced!(attr_content in input); - let tcx = if attr_content.peek(Token![|]) { + let tcx_binding = if attr_content.peek(Token![|]) { attr_content.parse::()?; let tcx = attr_content.parse()?; attr_content.parse::()?; @@ -172,15 +185,15 @@ fn parse_query_modifiers(input: ParseStream<'_>) -> Result { } else { None }; - let list = attr_content.parse_terminated(Expr::parse, Token![,])?; - try_insert!(desc = (tcx, list)); + let expr_list = attr_content.parse_terminated(Expr::parse, Token![,])?; + try_insert!(desc = Desc { modifier, tcx_binding, expr_list }); } else if modifier == "cache_on_disk_if" { // Parse a cache-on-disk modifier like: // // `cache_on_disk_if { true }` // `cache_on_disk_if { key.is_local() }` // `cache_on_disk_if(tcx) { tcx.is_typeck_child(key.to_def_id()) }` - let args = if input.peek(token::Paren) { + let tcx_binding = if input.peek(token::Paren) { let args; parenthesized!(args in input); let tcx = Pat::parse_single(&args)?; @@ -189,7 +202,7 @@ fn parse_query_modifiers(input: ParseStream<'_>) -> Result { None }; let block = input.parse()?; - try_insert!(cache_on_disk_if = (args, block)); + try_insert!(cache_on_disk_if = CacheOnDiskIf { modifier, tcx_binding, block }); } else if modifier == "arena_cache" { try_insert!(arena_cache = modifier); } else if modifier == "cycle_fatal" { @@ -275,44 +288,32 @@ struct HelperTokenStreams { } fn make_helpers_for_query(query: &Query, streams: &mut HelperTokenStreams) { - let Query { name, key, modifiers, .. } = &query; + let Query { name, key, modifiers, arg, .. } = &query; - // This dead code exists to instruct rust-analyzer about the link between the `rustc_queries` - // query names and the corresponding produced provider. The issue is that by nature of this - // macro producing a higher order macro that has all its token in the macro declaration we lose - // any meaningful spans, resulting in rust-analyzer being unable to make the connection between - // the query name and the corresponding providers field. The trick to fix this is to have - // `rustc_queries` emit a field access with the given name's span which allows it to successfully - // show references / go to definition to the corresponding provider assignment which is usually - // the more interesting place. - let ra_hint = quote! { - let crate::query::Providers { #name: _, .. }; - }; + // Replace span for `name` to make rust-analyzer ignore it. + let mut erased_name = name.clone(); + erased_name.set_span(Span::call_site()); // Generate a function to check whether we should cache the query to disk, for some key. - if let Some((args, expr)) = modifiers.cache_on_disk_if.as_ref() { - let tcx = args.as_ref().map(|t| quote! { #t }).unwrap_or_else(|| quote! { _ }); - // expr is a `Block`, meaning that `{ #expr }` gets expanded - // to `{ { stmts... } }`, which triggers the `unused_braces` lint. + if let Some(CacheOnDiskIf { tcx_binding, block, .. }) = modifiers.cache_on_disk_if.as_ref() { + let tcx = tcx_binding.as_ref().map(|t| quote! { #t }).unwrap_or_else(|| quote! { _ }); // we're taking `key` by reference, but some rustc types usually prefer being passed by value streams.cache_on_disk_if_fns_stream.extend(quote! { - #[allow(unused_variables, unused_braces, rustc::pass_by_value)] + #[allow(unused_variables, rustc::pass_by_value)] #[inline] - pub fn #name<'tcx>(#tcx: TyCtxt<'tcx>, #key: &crate::queries::#name::Key<'tcx>) -> bool { - #ra_hint - #expr - } + pub fn #erased_name<'tcx>(#tcx: TyCtxt<'tcx>, #key: &crate::queries::#name::Key<'tcx>) -> bool + #block }); } - let (tcx, desc) = &modifiers.desc; - let tcx = tcx.as_ref().map_or_else(|| quote! { _ }, |t| quote! { #t }); + let Desc { tcx_binding, expr_list, .. } = &modifiers.desc; + let tcx = tcx_binding.as_ref().map_or_else(|| quote! { _ }, |t| quote! { #t }); let desc = quote! { #[allow(unused_variables)] - pub fn #name<'tcx>(tcx: TyCtxt<'tcx>, key: crate::queries::#name::Key<'tcx>) -> String { + pub fn #erased_name<'tcx>(tcx: TyCtxt<'tcx>, key: #arg) -> String { let (#tcx, #key) = (tcx, key); - format!(#desc) + format!(#expr_list) } }; @@ -321,12 +322,88 @@ fn make_helpers_for_query(query: &Query, streams: &mut HelperTokenStreams) { }); } +/// Add hints for rust-analyzer +fn add_to_analyzer_stream(query: &Query, analyzer_stream: &mut proc_macro2::TokenStream) { + // Add links to relevant modifiers + + let modifiers = &query.modifiers; + + let mut modifiers_stream = quote! {}; + + let name = &modifiers.desc.modifier; + modifiers_stream.extend(quote! { + crate::query::modifiers::#name; + }); + + if let Some(CacheOnDiskIf { modifier, .. }) = &modifiers.cache_on_disk_if { + modifiers_stream.extend(quote! { + crate::query::modifiers::#modifier; + }); + } + + macro_rules! doc_link { + ( $( $modifier:ident ),+ $(,)? ) => { + $( + if let Some(name) = &modifiers.$modifier { + modifiers_stream.extend(quote! { + crate::query::modifiers::#name; + }); + } + )+ + } + } + + doc_link!( + arena_cache, + cycle_fatal, + cycle_delay_bug, + cycle_stash, + no_hash, + anon, + eval_always, + depth_limit, + separate_provide_extern, + feedable, + return_result_from_ensure_ok, + ); + + let name = &query.name; + + // Replace span for `name` to make rust-analyzer ignore it. + let mut erased_name = name.clone(); + erased_name.set_span(Span::call_site()); + + let result = &query.result; + + // This dead code exists to instruct rust-analyzer about the link between the `rustc_queries` + // query names and the corresponding produced provider. The issue is that by nature of this + // macro producing a higher order macro that has all its token in the macro declaration we lose + // any meaningful spans, resulting in rust-analyzer being unable to make the connection between + // the query name and the corresponding providers field. The trick to fix this is to have + // `rustc_queries` emit a field access with the given name's span which allows it to successfully + // show references / go to definition to the corresponding provider assignment which is usually + // the more interesting place. + let ra_hint = quote! { + let crate::query::Providers { #name: _, .. }; + }; + + analyzer_stream.extend(quote! { + #[inline(always)] + fn #erased_name<'tcx>() #result { + #ra_hint + #modifiers_stream + loop {} + } + }); +} + pub(super) fn rustc_queries(input: TokenStream) -> TokenStream { let queries = parse_macro_input!(input as List); let mut query_stream = quote! {}; let mut helpers = HelperTokenStreams::default(); let mut feedable_queries = quote! {}; + let mut analyzer_stream = quote! {}; let mut errors = quote! {}; macro_rules! assert { @@ -409,6 +486,7 @@ pub(super) fn rustc_queries(input: TokenStream) -> TokenStream { }); } + add_to_analyzer_stream(&query, &mut analyzer_stream); make_helpers_for_query(&query, &mut helpers); } @@ -442,6 +520,12 @@ pub(super) fn rustc_queries(input: TokenStream) -> TokenStream { } } + // Add hints for rust-analyzer + mod _analyzer_hints { + use super::*; + #analyzer_stream + } + /// Functions that format a human-readable description of each query /// and its key, as specified by the `desc` query modifier. /// diff --git a/compiler/rustc_middle/src/query/mod.rs b/compiler/rustc_middle/src/query/mod.rs index 24a38e70ff6f2..62d5c1f9dd208 100644 --- a/compiler/rustc_middle/src/query/mod.rs +++ b/compiler/rustc_middle/src/query/mod.rs @@ -13,6 +13,7 @@ mod keys; pub mod on_disk_cache; #[macro_use] pub mod plumbing; +pub(crate) mod modifiers; pub fn describe_as_module(def_id: impl Into, tcx: TyCtxt<'_>) -> String { let def_id = def_id.into(); diff --git a/compiler/rustc_middle/src/query/modifiers.rs b/compiler/rustc_middle/src/query/modifiers.rs new file mode 100644 index 0000000000000..81b9f0da6446c --- /dev/null +++ b/compiler/rustc_middle/src/query/modifiers.rs @@ -0,0 +1,77 @@ +//! This contains documentation which is linked from query modifiers used in the `rustc_queries!` proc macro. +#![allow(unused, non_camel_case_types)] +// FIXME: Update and clarify documentation for these modifiers. + +/// # `desc` query modifier +/// +/// The description of the query. This modifier is required on every query. +pub struct desc; + +/// # `arena_cache` query modifier +/// +/// Use this type for the in-memory cache. +pub struct arena_cache; + +/// # `cache_on_disk_if` query modifier +/// +/// Cache the query to disk if the `Block` returns true. +pub struct cache_on_disk_if; + +/// # `cycle_fatal` query modifier +/// +/// A cycle error for this query aborting the compilation with a fatal error. +pub struct cycle_fatal; + +/// # `cycle_delay_bug` query modifier +/// +/// A cycle error results in a delay_bug call +pub struct cycle_delay_bug; + +/// # `cycle_stash` query modifier +/// +/// A cycle error results in a stashed cycle error that can be unstashed and canceled later +pub struct cycle_stash; + +/// # `no_hash` query modifier +/// +/// Don't hash the result, instead just mark a query red if it runs +pub struct no_hash; + +/// # `anon` query modifier +/// +/// Generate a dep node based on the dependencies of the query +pub struct anon; + +/// # `eval_always` query modifier +/// +/// Always evaluate the query, ignoring its dependencies +pub struct eval_always; + +/// # `depth_limit` query modifier +/// +/// Whether the query has a call depth limit +pub struct depth_limit; + +/// # `separate_provide_extern` query modifier +/// +/// Use a separate query provider for local and extern crates +pub struct separate_provide_extern; + +/// # `feedable` query modifier +/// +/// Generate a `feed` method to set the query's value from another query. +pub struct feedable; + +/// # `return_result_from_ensure_ok` query modifier +/// +/// When this query is called via `tcx.ensure_ok()`, it returns +/// `Result<(), ErrorGuaranteed>` instead of `()`. If the query needs to +/// be executed, and that execution returns an error, the error result is +/// returned to the caller. +/// +/// If execution is skipped, a synthetic `Ok(())` is returned, on the +/// assumption that a query with all-green inputs must have succeeded. +/// +/// Can only be applied to queries with a return value of +/// `Result<_, ErrorGuaranteed>`. +pub struct return_result_from_ensure_ok; diff --git a/compiler/rustc_middle/src/query/plumbing.rs b/compiler/rustc_middle/src/query/plumbing.rs index 3c844eac1fca0..9a709d2c43c80 100644 --- a/compiler/rustc_middle/src/query/plumbing.rs +++ b/compiler/rustc_middle/src/query/plumbing.rs @@ -462,10 +462,14 @@ macro_rules! define_callbacks { } pub struct Providers { - $(pub $name: for<'tcx> fn( - TyCtxt<'tcx>, - $name::LocalKey<'tcx>, - ) -> $name::ProvidedValue<'tcx>,)* + $( + /// This is the provider for the query. Use `Find references` on this to + /// navigate between the provider assignment and the query definition. + pub $name: for<'tcx> fn( + TyCtxt<'tcx>, + $name::LocalKey<'tcx>, + ) -> $name::ProvidedValue<'tcx>, + )* } pub struct ExternProviders { diff --git a/compiler/rustc_middle/src/traits/cache.rs b/compiler/rustc_middle/src/traits/cache.rs index ed41a69f97148..9391764bf1ce2 100644 --- a/compiler/rustc_middle/src/traits/cache.rs +++ b/compiler/rustc_middle/src/traits/cache.rs @@ -5,7 +5,8 @@ use std::hash::Hash; use rustc_data_structures::fx::FxHashMap; use rustc_data_structures::sync::Lock; -use crate::dep_graph::{DepContext, DepNodeIndex}; +use crate::dep_graph::DepNodeIndex; +use crate::ty::TyCtxt; pub struct WithDepNodeCache { hashmap: Lock>>, @@ -24,7 +25,7 @@ impl Default for WithDepNodeCache { } impl WithDepNodeCache { - pub fn get(&self, key: &Key, tcx: Tcx) -> Option { + pub fn get<'tcx>(&self, key: &Key, tcx: TyCtxt<'tcx>) -> Option { Some(self.hashmap.borrow().get(key)?.get(tcx)) } @@ -40,12 +41,12 @@ pub struct WithDepNode { } impl WithDepNode { - pub fn new(dep_node: DepNodeIndex, cached_value: T) -> Self { + pub(crate) fn new(dep_node: DepNodeIndex, cached_value: T) -> Self { WithDepNode { dep_node, cached_value } } - pub fn get(&self, tcx: Tcx) -> T { - tcx.dep_graph().read_index(self.dep_node); + pub(crate) fn get<'tcx>(&self, tcx: TyCtxt<'tcx>) -> T { + tcx.dep_graph.read_index(self.dep_node); self.cached_value.clone() } } diff --git a/compiler/rustc_parse/src/parser/expr.rs b/compiler/rustc_parse/src/parser/expr.rs index 05216be06ff56..5c44bfb0cf3f3 100644 --- a/compiler/rustc_parse/src/parser/expr.rs +++ b/compiler/rustc_parse/src/parser/expr.rs @@ -1524,7 +1524,7 @@ impl<'a> Parser<'a> { }, ) } else if this.check_inline_const(0) { - this.parse_const_block(lo) + this.parse_const_block(lo, false) } else if this.may_recover() && this.is_do_catch_block() { this.recover_do_catch() } else if this.is_try_block() { diff --git a/compiler/rustc_parse/src/parser/item.rs b/compiler/rustc_parse/src/parser/item.rs index 5b81acb0f91f0..b7ba92bac5249 100644 --- a/compiler/rustc_parse/src/parser/item.rs +++ b/compiler/rustc_parse/src/parser/item.rs @@ -534,7 +534,7 @@ impl<'a> Parser<'a> { match self.parse_delim_args() { // `( .. )` or `[ .. ]` (followed by `;`), or `{ .. }`. Ok(args) => { - self.eat_semi_for_macro_if_needed(&args); + self.eat_semi_for_macro_if_needed(&args, Some(&path)); self.complain_if_pub_macro(vis, false); Ok(MacCall { path, args }) } @@ -2392,7 +2392,7 @@ impl<'a> Parser<'a> { } let body = self.parse_delim_args()?; - self.eat_semi_for_macro_if_needed(&body); + self.eat_semi_for_macro_if_needed(&body, None); self.complain_if_pub_macro(vis, true); Ok(ItemKind::MacroDef( @@ -2417,13 +2417,13 @@ impl<'a> Parser<'a> { } } - fn eat_semi_for_macro_if_needed(&mut self, args: &DelimArgs) { + fn eat_semi_for_macro_if_needed(&mut self, args: &DelimArgs, path: Option<&Path>) { if args.need_semicolon() && !self.eat(exp!(Semi)) { - self.report_invalid_macro_expansion_item(args); + self.report_invalid_macro_expansion_item(args, path); } } - fn report_invalid_macro_expansion_item(&self, args: &DelimArgs) { + fn report_invalid_macro_expansion_item(&self, args: &DelimArgs, path: Option<&Path>) { let span = args.dspan.entire(); let mut err = self.dcx().struct_span_err( span, @@ -2433,17 +2433,32 @@ impl<'a> Parser<'a> { // macros within the same crate (that we can fix), which is sad. if !span.from_expansion() { let DelimSpan { open, close } = args.dspan; - err.multipart_suggestion( - "change the delimiters to curly braces", - vec![(open, "{".to_string()), (close, '}'.to_string())], - Applicability::MaybeIncorrect, - ); - err.span_suggestion( - span.with_neighbor(self.token.span).shrink_to_hi(), - "add a semicolon", - ';', - Applicability::MaybeIncorrect, - ); + // Check if this looks like `macro_rules!(name) { ... }` + // a common mistake when trying to define a macro. + if let Some(path) = path + && path.segments.first().is_some_and(|seg| seg.ident.name == sym::macro_rules) + && args.delim == Delimiter::Parenthesis + { + let replace = + if path.span.hi() + rustc_span::BytePos(1) < open.lo() { "" } else { " " }; + err.multipart_suggestion( + "to define a macro, remove the parentheses around the macro name", + vec![(open, replace.to_string()), (close, String::new())], + Applicability::MachineApplicable, + ); + } else { + err.multipart_suggestion( + "change the delimiters to curly braces", + vec![(open, "{".to_string()), (close, '}'.to_string())], + Applicability::MaybeIncorrect, + ); + err.span_suggestion( + span.with_neighbor(self.token.span).shrink_to_hi(), + "add a semicolon", + ';', + Applicability::MaybeIncorrect, + ); + } } err.emit(); } diff --git a/compiler/rustc_parse/src/parser/mod.rs b/compiler/rustc_parse/src/parser/mod.rs index a6b956b09bc16..f95fe61b0abdc 100644 --- a/compiler/rustc_parse/src/parser/mod.rs +++ b/compiler/rustc_parse/src/parser/mod.rs @@ -1270,7 +1270,7 @@ impl<'a> Parser<'a> { } /// Parses inline const expressions. - fn parse_const_block(&mut self, span: Span) -> PResult<'a, Box> { + fn parse_const_block(&mut self, span: Span, pat: bool) -> PResult<'a, Box> { self.expect_keyword(exp!(Const))?; let (attrs, blk) = self.parse_inner_attrs_and_block(None)?; let anon_const = AnonConst { @@ -1279,7 +1279,18 @@ impl<'a> Parser<'a> { mgca_disambiguation: MgcaDisambiguation::AnonConst, }; let blk_span = anon_const.value.span; - let kind = ExprKind::ConstBlock(anon_const); + let kind = if pat { + let guar = self + .dcx() + .struct_span_err(blk_span, "const blocks cannot be used as patterns") + .with_help( + "use a named `const`-item or an `if`-guard (`x if x == const { ... }`) instead", + ) + .emit(); + ExprKind::Err(guar) + } else { + ExprKind::ConstBlock(anon_const) + }; Ok(self.mk_expr_with_attrs(span.to(blk_span), kind, attrs)) } diff --git a/compiler/rustc_parse/src/parser/pat.rs b/compiler/rustc_parse/src/parser/pat.rs index c1e864985e190..bc73c3a2007a0 100644 --- a/compiler/rustc_parse/src/parser/pat.rs +++ b/compiler/rustc_parse/src/parser/pat.rs @@ -785,10 +785,8 @@ impl<'a> Parser<'a> { } else if self.eat_keyword(exp!(Box)) { self.parse_pat_box()? } else if self.check_inline_const(0) { - // Parse `const pat`. - // NOTE: This will always error later during AST lowering because - // inline const cannot be used as patterns. - let const_expr = self.parse_const_block(lo.to(self.token.span))?; + // Parse `const pat` + let const_expr = self.parse_const_block(lo.to(self.token.span), true)?; if let Some(re) = self.parse_range_end() { self.parse_pat_range_begin_with(const_expr, re)? @@ -1283,7 +1281,7 @@ impl<'a> Parser<'a> { .then_some(self.prev_token.span); let bound = if self.check_inline_const(0) { - self.parse_const_block(self.token.span) + self.parse_const_block(self.token.span, true) } else if self.check_path() { let lo = self.token.span; let (qself, path) = if self.eat_lt() { diff --git a/compiler/rustc_session/src/config.rs b/compiler/rustc_session/src/config.rs index 5e2671ef4ef6b..b278a6179fe7f 100644 --- a/compiler/rustc_session/src/config.rs +++ b/compiler/rustc_session/src/config.rs @@ -88,7 +88,7 @@ pub enum CFProtection { Full, } -#[derive(Clone, Copy, Debug, PartialEq, Hash, HashStable_Generic)] +#[derive(Clone, Copy, Debug, PartialEq, Hash, HashStable_Generic, Encodable, Decodable)] pub enum OptLevel { /// `-Copt-level=0` No, @@ -108,7 +108,7 @@ pub enum OptLevel { /// and taking other command line options into account. /// /// Note that linker plugin-based LTO is a different mechanism entirely. -#[derive(Clone, PartialEq)] +#[derive(Clone, PartialEq, Encodable, Decodable)] pub enum Lto { /// Don't do any LTO whatsoever. No, @@ -190,7 +190,7 @@ pub enum CoverageLevel { } // The different settings that the `-Z offload` flag can have. -#[derive(Clone, PartialEq, Hash, Debug)] +#[derive(Clone, PartialEq, Hash, Debug, Encodable, Decodable)] pub enum Offload { /// Entry point for `std::offload`, enables kernel compilation for a gpu device Device, @@ -201,7 +201,7 @@ pub enum Offload { } /// The different settings that the `-Z autodiff` flag can have. -#[derive(Clone, PartialEq, Hash, Debug)] +#[derive(Clone, PartialEq, Hash, Debug, Encodable, Decodable)] pub enum AutoDiff { /// Enable the autodiff opt pipeline Enable, @@ -528,7 +528,7 @@ impl FmtDebug { } } -#[derive(Clone, PartialEq, Hash, Debug)] +#[derive(Clone, PartialEq, Hash, Debug, Encodable, Decodable)] pub enum SwitchWithOptPath { Enabled(Option), Disabled, @@ -583,7 +583,7 @@ pub enum MirStripDebugInfo { /// DWARF provides a mechanism which allows the linker to skip the sections which don't require /// link-time relocation - either by putting those sections in DWARF object files, or by keeping /// them in the object file in such a way that the linker will skip them. -#[derive(Clone, Copy, Debug, PartialEq, Hash)] +#[derive(Clone, Copy, Debug, PartialEq, Hash, Encodable, Decodable)] pub enum SplitDwarfKind { /// Sections which do not require relocation are written into object file but ignored by the /// linker. @@ -1539,7 +1539,7 @@ pub enum EntryFnType { pub use rustc_hir::attrs::CrateType; -#[derive(Clone, Hash, Debug, PartialEq, Eq)] +#[derive(Clone, Hash, Debug, PartialEq, Eq, Encodable, Decodable)] pub enum Passes { Some(Vec), All, diff --git a/compiler/rustc_span/src/symbol.rs b/compiler/rustc_span/src/symbol.rs index b9a9d8029d286..03dd630c61a12 100644 --- a/compiler/rustc_span/src/symbol.rs +++ b/compiler/rustc_span/src/symbol.rs @@ -2300,6 +2300,7 @@ symbols! { three_way_compare, thumb2, thumb_mode: "thumb-mode", + tinyencoding, tmm_reg, to_owned_method, to_string, diff --git a/compiler/rustc_target/src/asm/avr.rs b/compiler/rustc_target/src/asm/avr.rs index 55d393c81d3ec..3d20734553a15 100644 --- a/compiler/rustc_target/src/asm/avr.rs +++ b/compiler/rustc_target/src/asm/avr.rs @@ -1,8 +1,10 @@ use std::fmt; -use rustc_span::Symbol; +use rustc_data_structures::fx::FxIndexSet; +use rustc_span::{Symbol, sym}; use super::{InlineAsmArch, InlineAsmType, ModifierInfo}; +use crate::spec::{RelocModel, Target}; def_reg_class! { Avr AvrInlineAsmRegClass { @@ -52,24 +54,44 @@ impl AvrInlineAsmRegClass { } } +pub(crate) fn is_tiny(target_features: &FxIndexSet) -> bool { + target_features.contains(&sym::tinyencoding) +} + +fn not_tiny( + _arch: InlineAsmArch, + _reloc_model: RelocModel, + target_features: &FxIndexSet, + _target: &Target, + _is_clobber: bool, +) -> Result<(), &'static str> { + if is_tiny(target_features) { + Err( + "on AVRTiny, r[2-15] are unavailable, r16 (scratch register) and r17 (zero register) are reserved by LLVM", + ) + } else { + Ok(()) + } +} + def_regs! { Avr AvrInlineAsmReg AvrInlineAsmRegClass { - r2: reg = ["r2"], - r3: reg = ["r3"], - r4: reg = ["r4"], - r5: reg = ["r5"], - r6: reg = ["r6"], - r7: reg = ["r7"], - r8: reg = ["r8"], - r9: reg = ["r9"], - r10: reg = ["r10"], - r11: reg = ["r11"], - r12: reg = ["r12"], - r13: reg = ["r13"], - r14: reg = ["r14"], - r15: reg = ["r15"], - r16: reg, reg_upper = ["r16"], - r17: reg, reg_upper = ["r17"], + r2: reg = ["r2"] % not_tiny, + r3: reg = ["r3"] % not_tiny, + r4: reg = ["r4"] % not_tiny, + r5: reg = ["r5"] % not_tiny, + r6: reg = ["r6"] % not_tiny, + r7: reg = ["r7"] % not_tiny, + r8: reg = ["r8"] % not_tiny, + r9: reg = ["r9"] % not_tiny, + r10: reg = ["r10"] % not_tiny, + r11: reg = ["r11"] % not_tiny, + r12: reg = ["r12"] % not_tiny, + r13: reg = ["r13"] % not_tiny, + r14: reg = ["r14"] % not_tiny, + r15: reg = ["r15"] % not_tiny, + r16: reg, reg_upper = ["r16"] % not_tiny, + r17: reg, reg_upper = ["r17"] % not_tiny, r18: reg, reg_upper = ["r18"], r19: reg, reg_upper = ["r19"], r20: reg, reg_upper = ["r20"], @@ -83,14 +105,14 @@ def_regs! { r30: reg, reg_upper = ["r30", "ZL"], r31: reg, reg_upper = ["r31", "ZH"], - r3r2: reg_pair = ["r3r2"], - r5r4: reg_pair = ["r5r4"], - r7r6: reg_pair = ["r7r6"], - r9r8: reg_pair = ["r9r8"], - r11r10: reg_pair = ["r11r10"], - r13r12: reg_pair = ["r13r12"], - r15r14: reg_pair = ["r15r14"], - r17r16: reg_pair = ["r17r16"], + r3r2: reg_pair = ["r3r2"] % not_tiny, + r5r4: reg_pair = ["r5r4"] % not_tiny, + r7r6: reg_pair = ["r7r6"] % not_tiny, + r9r8: reg_pair = ["r9r8"] % not_tiny, + r11r10: reg_pair = ["r11r10"] % not_tiny, + r13r12: reg_pair = ["r13r12"] % not_tiny, + r15r14: reg_pair = ["r15r14"] % not_tiny, + r17r16: reg_pair = ["r17r16"] % not_tiny, r19r18: reg_pair = ["r19r18"], r21r20: reg_pair = ["r21r20"], r23r22: reg_pair = ["r23r22"], diff --git a/compiler/rustc_target/src/asm/mod.rs b/compiler/rustc_target/src/asm/mod.rs index a10699bbce884..d465973766095 100644 --- a/compiler/rustc_target/src/asm/mod.rs +++ b/compiler/rustc_target/src/asm/mod.rs @@ -932,6 +932,7 @@ pub enum InlineAsmClobberAbi { AArch64NoX18, Arm64EC, Avr, + AvrTiny, RiscV, RiscVE, LoongArch, @@ -993,7 +994,11 @@ impl InlineAsmClobberAbi { _ => Err(&["C", "system", "efiapi"]), }, InlineAsmArch::Avr => match name { - "C" | "system" => Ok(InlineAsmClobberAbi::Avr), + "C" | "system" => Ok(if avr::is_tiny(target_features) { + InlineAsmClobberAbi::AvrTiny + } else { + InlineAsmClobberAbi::Avr + }), _ => Err(&["C", "system"]), }, InlineAsmArch::LoongArch32 | InlineAsmArch::LoongArch64 => match name { @@ -1168,6 +1173,13 @@ impl InlineAsmClobberAbi { // is used. } }, + InlineAsmClobberAbi::AvrTiny => clobbered_regs! { + Avr AvrInlineAsmReg { + // Refs: https://gcc.gnu.org/wiki/avr-gcc#Reduced_Tiny + + r20, r21, r22, r23, r24, r25, r26, r27, r30, r31, + } + }, InlineAsmClobberAbi::RiscV => clobbered_regs! { RiscV RiscVInlineAsmReg { // ra diff --git a/compiler/rustc_target/src/spec/mod.rs b/compiler/rustc_target/src/spec/mod.rs index 537185f536ab1..2d2f15651c431 100644 --- a/compiler/rustc_target/src/spec/mod.rs +++ b/compiler/rustc_target/src/spec/mod.rs @@ -1082,7 +1082,7 @@ crate::target_spec_enum! { } crate::target_spec_enum! { - #[derive(Default)] + #[derive(Default, Encodable, Decodable)] pub enum SplitDebuginfo { /// Split debug-information is disabled, meaning that on supported platforms /// you can find all debug information in the executable itself. This is diff --git a/compiler/rustc_trait_selection/src/solve/inspect/analyse.rs b/compiler/rustc_trait_selection/src/solve/inspect/analyse.rs index ea8360c10b6fe..b8da64b9729a2 100644 --- a/compiler/rustc_trait_selection/src/solve/inspect/analyse.rs +++ b/compiler/rustc_trait_selection/src/solve/inspect/analyse.rs @@ -443,9 +443,10 @@ impl<'a, 'tcx> InspectGoal<'a, 'tcx> { pub(crate) fn visit_with>(&self, visitor: &mut V) -> V::Result { if self.depth < visitor.config().max_depth { try_visit!(visitor.visit_goal(self)); + V::Result::output() + } else { + visitor.on_recursion_limit() } - - V::Result::output() } } @@ -460,6 +461,10 @@ pub trait ProofTreeVisitor<'tcx> { } fn visit_goal(&mut self, goal: &InspectGoal<'_, 'tcx>) -> Self::Result; + + fn on_recursion_limit(&mut self) -> Self::Result { + Self::Result::output() + } } #[extension(pub trait InferCtxtProofTreeExt<'tcx>)] diff --git a/tests/codegen-llvm/asm/avr-clobbers.rs b/tests/codegen-llvm/asm/avr-clobbers.rs index 472ee328465b6..d881f88b0b3ca 100644 --- a/tests/codegen-llvm/asm/avr-clobbers.rs +++ b/tests/codegen-llvm/asm/avr-clobbers.rs @@ -1,7 +1,11 @@ //@ add-minicore //@ assembly-output: emit-asm -//@ compile-flags: --target avr-none -C target-cpu=atmega328p -//@ needs-llvm-components: avr +//@ revisions: avr avrtiny +//@[avr] compile-flags: --target avr-none -C target-cpu=atmega328p +//@[avr] needs-llvm-components: avr +//@[avrtiny] compile-flags: --target avr-none -C target-cpu=attiny104 +//@[avrtiny] needs-llvm-components: avr +// ignore-tidy-linelength #![crate_type = "rlib"] #![feature(no_core, asm_experimental_arch)] @@ -25,14 +29,16 @@ pub unsafe fn sreg_is_not_clobbered_if_preserve_flags_is_used() { } // CHECK-LABEL: @clobber_abi -// CHECK: asm sideeffect "", "={r18},={r19},={r20},={r21},={r22},={r23},={r24},={r25},={r26},={r27},={r30},={r31},~{sreg}"() +// avr: asm sideeffect "", "={r18},={r19},={r20},={r21},={r22},={r23},={r24},={r25},={r26},={r27},={r30},={r31},~{sreg}"() +// avrtiny: asm sideeffect "", "={r20},={r21},={r22},={r23},={r24},={r25},={r26},={r27},={r30},={r31},~{sreg}"() #[no_mangle] pub unsafe fn clobber_abi() { asm!("", clobber_abi("C"), options(nostack, nomem)); } // CHECK-LABEL: @clobber_abi_with_preserved_flags -// CHECK: asm sideeffect "", "={r18},={r19},={r20},={r21},={r22},={r23},={r24},={r25},={r26},={r27},={r30},={r31}"() +// avr: asm sideeffect "", "={r18},={r19},={r20},={r21},={r22},={r23},={r24},={r25},={r26},={r27},={r30},={r31}"() +// avrtiny: asm sideeffect "", "={r20},={r21},={r22},={r23},={r24},={r25},={r26},={r27},={r30},={r31}"() #[no_mangle] pub unsafe fn clobber_abi_with_preserved_flags() { asm!("", clobber_abi("C"), options(nostack, nomem, preserves_flags)); diff --git a/tests/codegen-llvm/backchain.rs b/tests/codegen-llvm/backchain.rs new file mode 100644 index 0000000000000..fae494dcad1b4 --- /dev/null +++ b/tests/codegen-llvm/backchain.rs @@ -0,0 +1,15 @@ +//@ add-minicore +//@ compile-flags: -Copt-level=3 --crate-type=lib --target=s390x-unknown-linux-gnu -Ctarget-feature=+backchain +//@ needs-llvm-components: systemz +#![crate_type = "lib"] +#![feature(no_core, lang_items)] +#![no_core] + +extern crate minicore; +use minicore::*; + +#[no_mangle] +pub fn test_backchain() { + // CHECK: @test_backchain() unnamed_addr #0 +} +// CHECK: attributes #0 = { {{.*}}"target-features"="{{[^"]*}}+backchain{{.*}} } diff --git a/tests/ui/abi/compatibility.rs b/tests/ui/abi/compatibility.rs index 84294ab343111..6071ad9bb435b 100644 --- a/tests/ui/abi/compatibility.rs +++ b/tests/ui/abi/compatibility.rs @@ -78,7 +78,6 @@ // FIXME: some targets are broken in various ways. // Hence there are `cfg` throughout this test to disable parts of it on those targets. -// sparc64: https://github.com/rust-lang/rust/issues/115336 // mips64: https://github.com/rust-lang/rust/issues/115404 extern crate minicore; @@ -246,7 +245,7 @@ test_transparent!(zst, Zst); test_transparent!(unit, ()); test_transparent!(enum_, Option); test_transparent!(enum_niched, Option<&'static i32>); -#[cfg(not(any(target_arch = "mips64", target_arch = "sparc64")))] +#[cfg(not(any(target_arch = "mips64")))] mod tuples { use super::*; // mixing in some floats since they often get special treatment @@ -260,7 +259,6 @@ mod tuples { test_transparent!(tuple, (i32, f32, i64, f64)); } // Some targets have special rules for arrays. -#[cfg(not(any(target_arch = "mips64", target_arch = "sparc64")))] mod arrays { use super::*; test_transparent!(empty_array, [u32; 0]); diff --git a/tests/ui/asm/avr/bad-reg.avr.stderr b/tests/ui/asm/avr/bad-reg.avr.stderr new file mode 100644 index 0000000000000..4d65560b5342d --- /dev/null +++ b/tests/ui/asm/avr/bad-reg.avr.stderr @@ -0,0 +1,56 @@ +error: invalid register `Y`: the frame pointer cannot be used as an operand for inline asm + --> $DIR/bad-reg.rs:19:18 + | +LL | asm!("", out("Y") _); + | ^^^^^^^^^^ + +error: invalid register `YL`: the frame pointer cannot be used as an operand for inline asm + --> $DIR/bad-reg.rs:21:18 + | +LL | asm!("", out("YL") _); + | ^^^^^^^^^^^ + +error: invalid register `YH`: the frame pointer cannot be used as an operand for inline asm + --> $DIR/bad-reg.rs:23:18 + | +LL | asm!("", out("YH") _); + | ^^^^^^^^^^^ + +error: invalid register `SP`: the stack pointer cannot be used as an operand for inline asm + --> $DIR/bad-reg.rs:25:18 + | +LL | asm!("", out("SP") _); + | ^^^^^^^^^^^ + +error: invalid register `SPL`: the stack pointer cannot be used as an operand for inline asm + --> $DIR/bad-reg.rs:27:18 + | +LL | asm!("", out("SPL") _); + | ^^^^^^^^^^^^ + +error: invalid register `SPH`: the stack pointer cannot be used as an operand for inline asm + --> $DIR/bad-reg.rs:29:18 + | +LL | asm!("", out("SPH") _); + | ^^^^^^^^^^^^ + +error: invalid register `r0`: LLVM reserves r0 (scratch register) and r1 (zero register) + --> $DIR/bad-reg.rs:31:18 + | +LL | asm!("", out("r0") _); + | ^^^^^^^^^^^ + +error: invalid register `r1`: LLVM reserves r0 (scratch register) and r1 (zero register) + --> $DIR/bad-reg.rs:33:18 + | +LL | asm!("", out("r1") _); + | ^^^^^^^^^^^ + +error: invalid register `r1r0`: LLVM reserves r0 (scratch register) and r1 (zero register) + --> $DIR/bad-reg.rs:35:18 + | +LL | asm!("", out("r1r0") _); + | ^^^^^^^^^^^^^ + +error: aborting due to 9 previous errors + diff --git a/tests/ui/asm/avr/bad-reg.avrtiny.stderr b/tests/ui/asm/avr/bad-reg.avrtiny.stderr new file mode 100644 index 0000000000000..6f0ecdb6e4348 --- /dev/null +++ b/tests/ui/asm/avr/bad-reg.avrtiny.stderr @@ -0,0 +1,200 @@ +error: invalid register `Y`: the frame pointer cannot be used as an operand for inline asm + --> $DIR/bad-reg.rs:19:18 + | +LL | asm!("", out("Y") _); + | ^^^^^^^^^^ + +error: invalid register `YL`: the frame pointer cannot be used as an operand for inline asm + --> $DIR/bad-reg.rs:21:18 + | +LL | asm!("", out("YL") _); + | ^^^^^^^^^^^ + +error: invalid register `YH`: the frame pointer cannot be used as an operand for inline asm + --> $DIR/bad-reg.rs:23:18 + | +LL | asm!("", out("YH") _); + | ^^^^^^^^^^^ + +error: invalid register `SP`: the stack pointer cannot be used as an operand for inline asm + --> $DIR/bad-reg.rs:25:18 + | +LL | asm!("", out("SP") _); + | ^^^^^^^^^^^ + +error: invalid register `SPL`: the stack pointer cannot be used as an operand for inline asm + --> $DIR/bad-reg.rs:27:18 + | +LL | asm!("", out("SPL") _); + | ^^^^^^^^^^^^ + +error: invalid register `SPH`: the stack pointer cannot be used as an operand for inline asm + --> $DIR/bad-reg.rs:29:18 + | +LL | asm!("", out("SPH") _); + | ^^^^^^^^^^^^ + +error: invalid register `r0`: LLVM reserves r0 (scratch register) and r1 (zero register) + --> $DIR/bad-reg.rs:31:18 + | +LL | asm!("", out("r0") _); + | ^^^^^^^^^^^ + +error: invalid register `r1`: LLVM reserves r0 (scratch register) and r1 (zero register) + --> $DIR/bad-reg.rs:33:18 + | +LL | asm!("", out("r1") _); + | ^^^^^^^^^^^ + +error: invalid register `r1r0`: LLVM reserves r0 (scratch register) and r1 (zero register) + --> $DIR/bad-reg.rs:35:18 + | +LL | asm!("", out("r1r0") _); + | ^^^^^^^^^^^^^ + +error: cannot use register `r2`: on AVRTiny, r[2-15] are unavailable, r16 (scratch register) and r17 (zero register) are reserved by LLVM + --> $DIR/bad-reg.rs:39:18 + | +LL | asm!("", out("r2") _); + | ^^^^^^^^^^^ + +error: cannot use register `r3`: on AVRTiny, r[2-15] are unavailable, r16 (scratch register) and r17 (zero register) are reserved by LLVM + --> $DIR/bad-reg.rs:41:18 + | +LL | asm!("", out("r3") _); + | ^^^^^^^^^^^ + +error: cannot use register `r4`: on AVRTiny, r[2-15] are unavailable, r16 (scratch register) and r17 (zero register) are reserved by LLVM + --> $DIR/bad-reg.rs:43:18 + | +LL | asm!("", out("r4") _); + | ^^^^^^^^^^^ + +error: cannot use register `r5`: on AVRTiny, r[2-15] are unavailable, r16 (scratch register) and r17 (zero register) are reserved by LLVM + --> $DIR/bad-reg.rs:45:18 + | +LL | asm!("", out("r5") _); + | ^^^^^^^^^^^ + +error: cannot use register `r6`: on AVRTiny, r[2-15] are unavailable, r16 (scratch register) and r17 (zero register) are reserved by LLVM + --> $DIR/bad-reg.rs:47:18 + | +LL | asm!("", out("r6") _); + | ^^^^^^^^^^^ + +error: cannot use register `r7`: on AVRTiny, r[2-15] are unavailable, r16 (scratch register) and r17 (zero register) are reserved by LLVM + --> $DIR/bad-reg.rs:49:18 + | +LL | asm!("", out("r7") _); + | ^^^^^^^^^^^ + +error: cannot use register `r8`: on AVRTiny, r[2-15] are unavailable, r16 (scratch register) and r17 (zero register) are reserved by LLVM + --> $DIR/bad-reg.rs:51:18 + | +LL | asm!("", out("r8") _); + | ^^^^^^^^^^^ + +error: cannot use register `r9`: on AVRTiny, r[2-15] are unavailable, r16 (scratch register) and r17 (zero register) are reserved by LLVM + --> $DIR/bad-reg.rs:53:18 + | +LL | asm!("", out("r9") _); + | ^^^^^^^^^^^ + +error: cannot use register `r10`: on AVRTiny, r[2-15] are unavailable, r16 (scratch register) and r17 (zero register) are reserved by LLVM + --> $DIR/bad-reg.rs:55:18 + | +LL | asm!("", out("r10") _); + | ^^^^^^^^^^^^ + +error: cannot use register `r11`: on AVRTiny, r[2-15] are unavailable, r16 (scratch register) and r17 (zero register) are reserved by LLVM + --> $DIR/bad-reg.rs:57:18 + | +LL | asm!("", out("r11") _); + | ^^^^^^^^^^^^ + +error: cannot use register `r12`: on AVRTiny, r[2-15] are unavailable, r16 (scratch register) and r17 (zero register) are reserved by LLVM + --> $DIR/bad-reg.rs:59:18 + | +LL | asm!("", out("r12") _); + | ^^^^^^^^^^^^ + +error: cannot use register `r13`: on AVRTiny, r[2-15] are unavailable, r16 (scratch register) and r17 (zero register) are reserved by LLVM + --> $DIR/bad-reg.rs:61:18 + | +LL | asm!("", out("r13") _); + | ^^^^^^^^^^^^ + +error: cannot use register `r14`: on AVRTiny, r[2-15] are unavailable, r16 (scratch register) and r17 (zero register) are reserved by LLVM + --> $DIR/bad-reg.rs:63:18 + | +LL | asm!("", out("r14") _); + | ^^^^^^^^^^^^ + +error: cannot use register `r15`: on AVRTiny, r[2-15] are unavailable, r16 (scratch register) and r17 (zero register) are reserved by LLVM + --> $DIR/bad-reg.rs:65:18 + | +LL | asm!("", out("r15") _); + | ^^^^^^^^^^^^ + +error: cannot use register `r16`: on AVRTiny, r[2-15] are unavailable, r16 (scratch register) and r17 (zero register) are reserved by LLVM + --> $DIR/bad-reg.rs:67:18 + | +LL | asm!("", out("r16") _); + | ^^^^^^^^^^^^ + +error: cannot use register `r17`: on AVRTiny, r[2-15] are unavailable, r16 (scratch register) and r17 (zero register) are reserved by LLVM + --> $DIR/bad-reg.rs:69:18 + | +LL | asm!("", out("r17") _); + | ^^^^^^^^^^^^ + +error: cannot use register `r3r2`: on AVRTiny, r[2-15] are unavailable, r16 (scratch register) and r17 (zero register) are reserved by LLVM + --> $DIR/bad-reg.rs:71:18 + | +LL | asm!("", out("r3r2") _); + | ^^^^^^^^^^^^^ + +error: cannot use register `r5r4`: on AVRTiny, r[2-15] are unavailable, r16 (scratch register) and r17 (zero register) are reserved by LLVM + --> $DIR/bad-reg.rs:73:18 + | +LL | asm!("", out("r5r4") _); + | ^^^^^^^^^^^^^ + +error: cannot use register `r7r6`: on AVRTiny, r[2-15] are unavailable, r16 (scratch register) and r17 (zero register) are reserved by LLVM + --> $DIR/bad-reg.rs:75:18 + | +LL | asm!("", out("r7r6") _); + | ^^^^^^^^^^^^^ + +error: cannot use register `r9r8`: on AVRTiny, r[2-15] are unavailable, r16 (scratch register) and r17 (zero register) are reserved by LLVM + --> $DIR/bad-reg.rs:77:18 + | +LL | asm!("", out("r9r8") _); + | ^^^^^^^^^^^^^ + +error: cannot use register `r11r10`: on AVRTiny, r[2-15] are unavailable, r16 (scratch register) and r17 (zero register) are reserved by LLVM + --> $DIR/bad-reg.rs:79:18 + | +LL | asm!("", out("r11r10") _); + | ^^^^^^^^^^^^^^^ + +error: cannot use register `r13r12`: on AVRTiny, r[2-15] are unavailable, r16 (scratch register) and r17 (zero register) are reserved by LLVM + --> $DIR/bad-reg.rs:81:18 + | +LL | asm!("", out("r13r12") _); + | ^^^^^^^^^^^^^^^ + +error: cannot use register `r15r14`: on AVRTiny, r[2-15] are unavailable, r16 (scratch register) and r17 (zero register) are reserved by LLVM + --> $DIR/bad-reg.rs:83:18 + | +LL | asm!("", out("r15r14") _); + | ^^^^^^^^^^^^^^^ + +error: cannot use register `r17r16`: on AVRTiny, r[2-15] are unavailable, r16 (scratch register) and r17 (zero register) are reserved by LLVM + --> $DIR/bad-reg.rs:85:18 + | +LL | asm!("", out("r17r16") _); + | ^^^^^^^^^^^^^^^ + +error: aborting due to 33 previous errors + diff --git a/tests/ui/asm/avr/bad-reg.rs b/tests/ui/asm/avr/bad-reg.rs new file mode 100644 index 0000000000000..82b9ccf69cfc1 --- /dev/null +++ b/tests/ui/asm/avr/bad-reg.rs @@ -0,0 +1,88 @@ +//@ add-minicore +//@ revisions: avr avrtiny +//@[avr] compile-flags: --target avr-none -C target-cpu=atmega328p +//@[avr] needs-llvm-components: avr +//@[avrtiny] compile-flags: --target avr-none -C target-cpu=attiny104 +//@[avrtiny] needs-llvm-components: avr +//@ ignore-backends: gcc + +#![crate_type = "rlib"] +#![feature(no_core, asm_experimental_arch)] +#![no_core] + +extern crate minicore; +use minicore::*; + +fn f() { + unsafe { + // Unsupported registers + asm!("", out("Y") _); + //~^ ERROR the frame pointer cannot be used as an operand for inline asm + asm!("", out("YL") _); + //~^ ERROR the frame pointer cannot be used as an operand for inline asm + asm!("", out("YH") _); + //~^ ERROR the frame pointer cannot be used as an operand for inline asm + asm!("", out("SP") _); + //~^ ERROR the stack pointer cannot be used as an operand for inline asm + asm!("", out("SPL") _); + //~^ ERROR the stack pointer cannot be used as an operand for inline asm + asm!("", out("SPH") _); + //~^ ERROR the stack pointer cannot be used as an operand for inline asm + asm!("", out("r0") _); + //~^ ERROR LLVM reserves r0 (scratch register) and r1 (zero register) + asm!("", out("r1") _); + //~^ ERROR LLVM reserves r0 (scratch register) and r1 (zero register) + asm!("", out("r1r0") _); + //~^ ERROR LLVM reserves r0 (scratch register) and r1 (zero register) + + // Unsupported only on AVRTiny + asm!("", out("r2") _); + //[avrtiny]~^ ERROR on AVRTiny, r[2-15] are unavailable, r16 (scratch register) and r17 (zero register) are reserved by LLVM + asm!("", out("r3") _); + //[avrtiny]~^ ERROR on AVRTiny, r[2-15] are unavailable, r16 (scratch register) and r17 (zero register) are reserved by LLVM + asm!("", out("r4") _); + //[avrtiny]~^ ERROR on AVRTiny, r[2-15] are unavailable, r16 (scratch register) and r17 (zero register) are reserved by LLVM + asm!("", out("r5") _); + //[avrtiny]~^ ERROR on AVRTiny, r[2-15] are unavailable, r16 (scratch register) and r17 (zero register) are reserved by LLVM + asm!("", out("r6") _); + //[avrtiny]~^ ERROR on AVRTiny, r[2-15] are unavailable, r16 (scratch register) and r17 (zero register) are reserved by LLVM + asm!("", out("r7") _); + //[avrtiny]~^ ERROR on AVRTiny, r[2-15] are unavailable, r16 (scratch register) and r17 (zero register) are reserved by LLVM + asm!("", out("r8") _); + //[avrtiny]~^ ERROR on AVRTiny, r[2-15] are unavailable, r16 (scratch register) and r17 (zero register) are reserved by LLVM + asm!("", out("r9") _); + //[avrtiny]~^ ERROR on AVRTiny, r[2-15] are unavailable, r16 (scratch register) and r17 (zero register) are reserved by LLVM + asm!("", out("r10") _); + //[avrtiny]~^ ERROR on AVRTiny, r[2-15] are unavailable, r16 (scratch register) and r17 (zero register) are reserved by LLVM + asm!("", out("r11") _); + //[avrtiny]~^ ERROR on AVRTiny, r[2-15] are unavailable, r16 (scratch register) and r17 (zero register) are reserved by LLVM + asm!("", out("r12") _); + //[avrtiny]~^ ERROR on AVRTiny, r[2-15] are unavailable, r16 (scratch register) and r17 (zero register) are reserved by LLVM + asm!("", out("r13") _); + //[avrtiny]~^ ERROR on AVRTiny, r[2-15] are unavailable, r16 (scratch register) and r17 (zero register) are reserved by LLVM + asm!("", out("r14") _); + //[avrtiny]~^ ERROR on AVRTiny, r[2-15] are unavailable, r16 (scratch register) and r17 (zero register) are reserved by LLVM + asm!("", out("r15") _); + //[avrtiny]~^ ERROR on AVRTiny, r[2-15] are unavailable, r16 (scratch register) and r17 (zero register) are reserved by LLVM + asm!("", out("r16") _); + //[avrtiny]~^ ERROR on AVRTiny, r[2-15] are unavailable, r16 (scratch register) and r17 (zero register) are reserved by LLVM + asm!("", out("r17") _); + //[avrtiny]~^ ERROR on AVRTiny, r[2-15] are unavailable, r16 (scratch register) and r17 (zero register) are reserved by LLVM + asm!("", out("r3r2") _); + //[avrtiny]~^ ERROR on AVRTiny, r[2-15] are unavailable, r16 (scratch register) and r17 (zero register) are reserved by LLVM + asm!("", out("r5r4") _); + //[avrtiny]~^ ERROR on AVRTiny, r[2-15] are unavailable, r16 (scratch register) and r17 (zero register) are reserved by LLVM + asm!("", out("r7r6") _); + //[avrtiny]~^ ERROR on AVRTiny, r[2-15] are unavailable, r16 (scratch register) and r17 (zero register) are reserved by LLVM + asm!("", out("r9r8") _); + //[avrtiny]~^ ERROR on AVRTiny, r[2-15] are unavailable, r16 (scratch register) and r17 (zero register) are reserved by LLVM + asm!("", out("r11r10") _); + //[avrtiny]~^ ERROR on AVRTiny, r[2-15] are unavailable, r16 (scratch register) and r17 (zero register) are reserved by LLVM + asm!("", out("r13r12") _); + //[avrtiny]~^ ERROR on AVRTiny, r[2-15] are unavailable, r16 (scratch register) and r17 (zero register) are reserved by LLVM + asm!("", out("r15r14") _); + //[avrtiny]~^ ERROR on AVRTiny, r[2-15] are unavailable, r16 (scratch register) and r17 (zero register) are reserved by LLVM + asm!("", out("r17r16") _); + //[avrtiny]~^ ERROR on AVRTiny, r[2-15] are unavailable, r16 (scratch register) and r17 (zero register) are reserved by LLVM + } +} diff --git a/tests/ui/autodiff/incremental.rs b/tests/ui/autodiff/incremental.rs index a79059deaa778..00dd632712aba 100644 --- a/tests/ui/autodiff/incremental.rs +++ b/tests/ui/autodiff/incremental.rs @@ -1,6 +1,6 @@ //@ revisions: DEBUG RELEASE //@[RELEASE] compile-flags: -Zautodiff=Enable,NoTT -C opt-level=3 -Clto=fat -//@[DEBUG] compile-flags: -Zautodiff=Enable,NoTT -C opt-level=0 -Clto=fat -C debuginfo=2 +//@[DEBUG] compile-flags: -Zautodiff=Enable,NoTT -Copt-level=0 -Clto=fat -Cdebuginfo=2 -Ccodegen-units=8 //@ needs-enzyme //@ incremental //@ no-prefer-dynamic @@ -13,6 +13,10 @@ // dropped. We now use globals instead and add this test to verify that incremental // keeps working. Also testing debug mode while at it. +// We extended this test to use 8 codegen-units in debug mode and call an intrinsic like powi, +// rather than just simple arithmetic. This caused a compilation failure, since the definition of +// the intrinsic was not available in the same cgu as the function being differentiated. + use std::autodiff::autodiff_reverse; #[autodiff_reverse(bar, Duplicated, Duplicated)] @@ -20,7 +24,7 @@ pub fn foo(r: &[f64; 10], res: &mut f64) { let mut output = [0.0; 10]; output[0] = r[0]; output[1] = r[1] * r[2]; - output[2] = r[4] * r[5]; + output[2] = r[4] * r[5].powi(2); output[3] = r[2] * r[6]; output[4] = r[1] * r[7]; output[5] = r[2] * r[8]; diff --git a/tests/ui/issues/issue-3026.rs b/tests/ui/borrowck/borrow-box-in-map-3026.rs similarity index 73% rename from tests/ui/issues/issue-3026.rs rename to tests/ui/borrowck/borrow-box-in-map-3026.rs index 05dc46c3cc096..dd63075eecba2 100644 --- a/tests/ui/issues/issue-3026.rs +++ b/tests/ui/borrowck/borrow-box-in-map-3026.rs @@ -1,3 +1,5 @@ +//! Regression test for https://github.com/rust-lang/rust/issues/3026 + //@ run-pass use std::collections::HashMap; diff --git a/tests/ui/inline-const/in-pat-recovery.rs b/tests/ui/inline-const/in-pat-recovery.rs index d519217fad3b5..037c58d3bf98c 100644 --- a/tests/ui/inline-const/in-pat-recovery.rs +++ b/tests/ui/inline-const/in-pat-recovery.rs @@ -4,63 +4,63 @@ fn main() { match 1 { const { 1 + 7 } => {} - //~^ ERROR arbitrary expressions aren't allowed in patterns + //~^ ERROR const blocks cannot be used as patterns 2 => {} _ => {} } match 5 { const { 1 } ..= 10 => {} - //~^ ERROR arbitrary expressions aren't allowed in patterns + //~^ ERROR const blocks cannot be used as patterns _ => {} } match 5 { 1 ..= const { 10 } => {} - //~^ ERROR arbitrary expressions aren't allowed in patterns + //~^ ERROR const blocks cannot be used as patterns _ => {} } match 5 { const { 1 } ..= const { 10 } => {} - //~^ ERROR arbitrary expressions aren't allowed in patterns - //~| ERROR arbitrary expressions aren't allowed in patterns + //~^ ERROR const blocks cannot be used as patterns + //~| ERROR const blocks cannot be used as patterns _ => {} } match 5 { const { 1 } .. 10 => {} - //~^ ERROR arbitrary expressions aren't allowed in patterns + //~^ ERROR const blocks cannot be used as patterns _ => {} } match 5 { 1 .. const { 10 } => {} - //~^ ERROR arbitrary expressions aren't allowed in patterns + //~^ ERROR const blocks cannot be used as patterns _ => {} } match 5 { const { 1 + 2 } ..= 10 => {} - //~^ ERROR arbitrary expressions aren't allowed in patterns + //~^ ERROR const blocks cannot be used as patterns _ => {} } match 5 { 1 ..= const { 5 + 5 } => {} - //~^ ERROR arbitrary expressions aren't allowed in patterns + //~^ ERROR const blocks cannot be used as patterns _ => {} } match 5 { const { 3 } .. => {} - //~^ ERROR arbitrary expressions aren't allowed in patterns + //~^ ERROR const blocks cannot be used as patterns _ => {} } match 5 { ..= const { 7 } => {} - //~^ ERROR arbitrary expressions aren't allowed in patterns + //~^ ERROR const blocks cannot be used as patterns _ => {} } } diff --git a/tests/ui/inline-const/in-pat-recovery.stderr b/tests/ui/inline-const/in-pat-recovery.stderr index 376c43aaecca6..55adb5c49a6d1 100644 --- a/tests/ui/inline-const/in-pat-recovery.stderr +++ b/tests/ui/inline-const/in-pat-recovery.stderr @@ -1,88 +1,88 @@ -error: arbitrary expressions aren't allowed in patterns - --> $DIR/in-pat-recovery.rs:6:9 +error: const blocks cannot be used as patterns + --> $DIR/in-pat-recovery.rs:6:15 | LL | const { 1 + 7 } => {} - | ^^^^^^^^^^^^^^^ + | ^^^^^^^^^ | = help: use a named `const`-item or an `if`-guard (`x if x == const { ... }`) instead -error: arbitrary expressions aren't allowed in patterns - --> $DIR/in-pat-recovery.rs:13:9 +error: const blocks cannot be used as patterns + --> $DIR/in-pat-recovery.rs:13:15 | LL | const { 1 } ..= 10 => {} - | ^^^^^^^^^^^ + | ^^^^^ | = help: use a named `const`-item or an `if`-guard (`x if x == const { ... }`) instead -error: arbitrary expressions aren't allowed in patterns - --> $DIR/in-pat-recovery.rs:19:15 +error: const blocks cannot be used as patterns + --> $DIR/in-pat-recovery.rs:19:21 | LL | 1 ..= const { 10 } => {} - | ^^^^^^^^^^^^ + | ^^^^^^ | = help: use a named `const`-item or an `if`-guard (`x if x == const { ... }`) instead -error: arbitrary expressions aren't allowed in patterns - --> $DIR/in-pat-recovery.rs:25:9 +error: const blocks cannot be used as patterns + --> $DIR/in-pat-recovery.rs:25:15 | LL | const { 1 } ..= const { 10 } => {} - | ^^^^^^^^^^^ + | ^^^^^ | = help: use a named `const`-item or an `if`-guard (`x if x == const { ... }`) instead -error: arbitrary expressions aren't allowed in patterns - --> $DIR/in-pat-recovery.rs:25:25 +error: const blocks cannot be used as patterns + --> $DIR/in-pat-recovery.rs:25:31 | LL | const { 1 } ..= const { 10 } => {} - | ^^^^^^^^^^^^ + | ^^^^^^ | = help: use a named `const`-item or an `if`-guard (`x if x == const { ... }`) instead -error: arbitrary expressions aren't allowed in patterns - --> $DIR/in-pat-recovery.rs:32:9 +error: const blocks cannot be used as patterns + --> $DIR/in-pat-recovery.rs:32:15 | LL | const { 1 } .. 10 => {} - | ^^^^^^^^^^^ + | ^^^^^ | = help: use a named `const`-item or an `if`-guard (`x if x == const { ... }`) instead -error: arbitrary expressions aren't allowed in patterns - --> $DIR/in-pat-recovery.rs:38:14 +error: const blocks cannot be used as patterns + --> $DIR/in-pat-recovery.rs:38:20 | LL | 1 .. const { 10 } => {} - | ^^^^^^^^^^^^ + | ^^^^^^ | = help: use a named `const`-item or an `if`-guard (`x if x == const { ... }`) instead -error: arbitrary expressions aren't allowed in patterns - --> $DIR/in-pat-recovery.rs:44:9 +error: const blocks cannot be used as patterns + --> $DIR/in-pat-recovery.rs:44:15 | LL | const { 1 + 2 } ..= 10 => {} - | ^^^^^^^^^^^^^^^ + | ^^^^^^^^^ | = help: use a named `const`-item or an `if`-guard (`x if x == const { ... }`) instead -error: arbitrary expressions aren't allowed in patterns - --> $DIR/in-pat-recovery.rs:50:15 +error: const blocks cannot be used as patterns + --> $DIR/in-pat-recovery.rs:50:21 | LL | 1 ..= const { 5 + 5 } => {} - | ^^^^^^^^^^^^^^^ + | ^^^^^^^^^ | = help: use a named `const`-item or an `if`-guard (`x if x == const { ... }`) instead -error: arbitrary expressions aren't allowed in patterns - --> $DIR/in-pat-recovery.rs:56:9 +error: const blocks cannot be used as patterns + --> $DIR/in-pat-recovery.rs:56:15 | LL | const { 3 } .. => {} - | ^^^^^^^^^^^ + | ^^^^^ | = help: use a named `const`-item or an `if`-guard (`x if x == const { ... }`) instead -error: arbitrary expressions aren't allowed in patterns - --> $DIR/in-pat-recovery.rs:62:13 +error: const blocks cannot be used as patterns + --> $DIR/in-pat-recovery.rs:62:19 | LL | ..= const { 7 } => {} - | ^^^^^^^^^^^ + | ^^^^^ | = help: use a named `const`-item or an `if`-guard (`x if x == const { ... }`) instead diff --git a/tests/ui/inline-const/reject-const-block-pat-pre-expansion.rs b/tests/ui/inline-const/reject-const-block-pat-pre-expansion.rs new file mode 100644 index 0000000000000..9af4925c0432a --- /dev/null +++ b/tests/ui/inline-const/reject-const-block-pat-pre-expansion.rs @@ -0,0 +1,12 @@ +//! Regression test for : reject inline const +//! patterns pre-expansion when possible. + +macro_rules! analyze { ($p:pat) => {}; } +analyze!(const { 0 }); +//~^ ERROR: const blocks cannot be used as patterns + +#[cfg(false)] +fn scope() { let const { 0 }; } +//~^ ERROR: const blocks cannot be used as patterns + +fn main() {} diff --git a/tests/ui/inline-const/reject-const-block-pat-pre-expansion.stderr b/tests/ui/inline-const/reject-const-block-pat-pre-expansion.stderr new file mode 100644 index 0000000000000..034b97699396e --- /dev/null +++ b/tests/ui/inline-const/reject-const-block-pat-pre-expansion.stderr @@ -0,0 +1,18 @@ +error: const blocks cannot be used as patterns + --> $DIR/reject-const-block-pat-pre-expansion.rs:9:24 + | +LL | fn scope() { let const { 0 }; } + | ^^^^^ + | + = help: use a named `const`-item or an `if`-guard (`x if x == const { ... }`) instead + +error: const blocks cannot be used as patterns + --> $DIR/reject-const-block-pat-pre-expansion.rs:5:16 + | +LL | analyze!(const { 0 }); + | ^^^^^ + | + = help: use a named `const`-item or an `if`-guard (`x if x == const { ... }`) instead + +error: aborting due to 2 previous errors + diff --git a/tests/ui/issues/issue-2904.rs b/tests/ui/issues/issue-2904.rs deleted file mode 100644 index 1ae3a8ad656ea..0000000000000 --- a/tests/ui/issues/issue-2904.rs +++ /dev/null @@ -1,79 +0,0 @@ -//@ build-pass -#![allow(unused_must_use)] -#![allow(dead_code)] -#![allow(unused_mut)] -#![allow(non_camel_case_types)] - -// Map representation - -use std::fmt; -use std::io::prelude::*; -use square::{bot, wall, rock, lambda, closed_lift, open_lift, earth, empty}; - -enum square { - bot, - wall, - rock, - lambda, - closed_lift, - open_lift, - earth, - empty -} - -impl fmt::Debug for square { - fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { - write!(f, "{}", match *self { - bot => { "R".to_string() } - wall => { "#".to_string() } - rock => { "*".to_string() } - lambda => { "\\".to_string() } - closed_lift => { "L".to_string() } - open_lift => { "O".to_string() } - earth => { ".".to_string() } - empty => { " ".to_string() } - }) - } -} - -fn square_from_char(c: char) -> square { - match c { - 'R' => { bot } - '#' => { wall } - '*' => { rock } - '\\' => { lambda } - 'L' => { closed_lift } - 'O' => { open_lift } - '.' => { earth } - ' ' => { empty } - _ => { - println!("invalid square: {}", c); - panic!() - } - } -} - -fn read_board_grid(mut input: rdr) - -> Vec> { - let mut input: &mut dyn Read = &mut input; - let mut grid = Vec::new(); - let mut line = [0; 10]; - input.read(&mut line); - let mut row = Vec::new(); - for c in &line { - row.push(square_from_char(*c as char)) - } - grid.push(row); - let width = grid[0].len(); - for row in &grid { assert_eq!(row.len(), width) } - grid -} - -mod test { - #[test] - pub fn trivial_to_string() { - assert_eq!(lambda.to_string(), "\\") - } -} - -pub fn main() {} diff --git a/tests/ui/issues/issue-3121.rs b/tests/ui/issues/issue-3121.rs deleted file mode 100644 index aa150f11cf400..0000000000000 --- a/tests/ui/issues/issue-3121.rs +++ /dev/null @@ -1,24 +0,0 @@ -//@ run-pass -#![allow(dead_code)] -#![allow(non_camel_case_types)] - -#[derive(Copy, Clone)] -enum side { mayo, catsup, vinegar } -#[derive(Copy, Clone)] -enum order { hamburger, fries(side), shake } -#[derive(Copy, Clone)] -enum meal { to_go(order), for_here(order) } - -fn foo(m: Box, cond: bool) { - match *m { - meal::to_go(_) => { } - meal::for_here(_) if cond => {} - meal::for_here(order::hamburger) => {} - meal::for_here(order::fries(_s)) => {} - meal::for_here(order::shake) => {} - } -} - -pub fn main() { - foo(Box::new(meal::for_here(order::hamburger)), true) -} diff --git a/tests/ui/macros/issue-118786.fixed b/tests/ui/macros/issue-118786.fixed new file mode 100644 index 0000000000000..5d4006acd6c81 --- /dev/null +++ b/tests/ui/macros/issue-118786.fixed @@ -0,0 +1,20 @@ +#![allow(unused_macros)] +//@ compile-flags: --crate-type lib +//@ dont-require-annotations: NOTE +//@ run-rustfix + +// Regression test for issue 118786 + +macro_rules! make_macro { + ($macro_name:tt) => { + macro_rules! $macro_name { + //~^ ERROR macro expansion ignores `{` and any tokens following + //~| ERROR cannot find macro `macro_rules` in this scope + //~| NOTE put a macro name here + () => {} + } + } +} + +make_macro!(meow); +//~^ ERROR macros that expand to items must be delimited with braces or followed by a semicolon diff --git a/tests/ui/macros/issue-118786.rs b/tests/ui/macros/issue-118786.rs index 78fd6ab6edddf..b79a2c7eedd74 100644 --- a/tests/ui/macros/issue-118786.rs +++ b/tests/ui/macros/issue-118786.rs @@ -1,5 +1,7 @@ -//@ compile-flags: --crate-type lib -O -C debug-assertions=yes +#![allow(unused_macros)] +//@ compile-flags: --crate-type lib //@ dont-require-annotations: NOTE +//@ run-rustfix // Regression test for issue 118786 diff --git a/tests/ui/macros/issue-118786.stderr b/tests/ui/macros/issue-118786.stderr index ddec281b82325..02b26e5a1f31b 100644 --- a/tests/ui/macros/issue-118786.stderr +++ b/tests/ui/macros/issue-118786.stderr @@ -1,21 +1,17 @@ error: macros that expand to items must be delimited with braces or followed by a semicolon - --> $DIR/issue-118786.rs:17:13 + --> $DIR/issue-118786.rs:19:13 | LL | make_macro!((meow)); | ^^^^^^ | -help: change the delimiters to curly braces +help: to define a macro, remove the parentheses around the macro name | LL - make_macro!((meow)); -LL + make_macro!({meow}); +LL + make_macro!(meow); | -help: add a semicolon - | -LL | macro_rules! $macro_name; { - | + error: macro expansion ignores `{` and any tokens following - --> $DIR/issue-118786.rs:8:34 + --> $DIR/issue-118786.rs:10:34 | LL | macro_rules! $macro_name { | ^ @@ -26,7 +22,7 @@ LL | make_macro!((meow)); = note: the usage of `make_macro!` is likely invalid in item context error: cannot find macro `macro_rules` in this scope - --> $DIR/issue-118786.rs:8:9 + --> $DIR/issue-118786.rs:10:9 | LL | macro_rules! $macro_name { | ^^^^^^^^^^^ @@ -35,7 +31,7 @@ LL | make_macro!((meow)); | ------------------- in this macro invocation | note: maybe you have forgotten to define a name for this `macro_rules!` - --> $DIR/issue-118786.rs:8:20 + --> $DIR/issue-118786.rs:10:20 | LL | macro_rules! $macro_name { | ^ put a macro name here diff --git a/tests/ui/match/match-nested-enum-box-3121.rs b/tests/ui/match/match-nested-enum-box-3121.rs new file mode 100644 index 0000000000000..f2ab4bf080750 --- /dev/null +++ b/tests/ui/match/match-nested-enum-box-3121.rs @@ -0,0 +1,37 @@ +//! Regression test for https://github.com/rust-lang/rust/issues/3121 + +//@ run-pass +#![allow(dead_code)] +#![allow(non_camel_case_types)] + +#[derive(Copy, Clone)] +enum side { + mayo, + catsup, + vinegar, +} +#[derive(Copy, Clone)] +enum order { + hamburger, + fries(side), + shake, +} +#[derive(Copy, Clone)] +enum meal { + to_go(order), + for_here(order), +} + +fn foo(m: Box, cond: bool) { + match *m { + meal::to_go(_) => {} + meal::for_here(_) if cond => {} + meal::for_here(order::hamburger) => {} + meal::for_here(order::fries(_s)) => {} + meal::for_here(order::shake) => {} + } +} + +pub fn main() { + foo(Box::new(meal::for_here(order::hamburger)), true) +} diff --git a/tests/ui/issues/issue-3029.rs b/tests/ui/panics/vec-extend-after-panic-3029.rs similarity index 74% rename from tests/ui/issues/issue-3029.rs rename to tests/ui/panics/vec-extend-after-panic-3029.rs index 22d0906ccf701..3ae708d91e197 100644 --- a/tests/ui/issues/issue-3029.rs +++ b/tests/ui/panics/vec-extend-after-panic-3029.rs @@ -1,3 +1,5 @@ +//! Regression test for https://github.com/rust-lang/rust/issues/3029 + //@ run-fail //@ error-pattern:so long //@ needs-subprocess diff --git a/tests/ui/parser/macro-rules-paren-name-issue-150899.rs b/tests/ui/parser/macro-rules-paren-name-issue-150899.rs new file mode 100644 index 0000000000000..174a6e7e7de8f --- /dev/null +++ b/tests/ui/parser/macro-rules-paren-name-issue-150899.rs @@ -0,0 +1,7 @@ +macro_rules!(i_think_the_name_should_go_here) { + //~^ ERROR macros that expand to items must be delimited with braces or followed by a semicolon + //~| ERROR expected item, found `{` + () => {} +} + +fn main() {} diff --git a/tests/ui/parser/macro-rules-paren-name-issue-150899.stderr b/tests/ui/parser/macro-rules-paren-name-issue-150899.stderr new file mode 100644 index 0000000000000..f5b6ff40f27ea --- /dev/null +++ b/tests/ui/parser/macro-rules-paren-name-issue-150899.stderr @@ -0,0 +1,22 @@ +error: macros that expand to items must be delimited with braces or followed by a semicolon + --> $DIR/macro-rules-paren-name-issue-150899.rs:1:13 + | +LL | macro_rules!(i_think_the_name_should_go_here) { + | ^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^^ + | +help: to define a macro, remove the parentheses around the macro name + | +LL - macro_rules!(i_think_the_name_should_go_here) { +LL + macro_rules! i_think_the_name_should_go_here { + | + +error: expected item, found `{` + --> $DIR/macro-rules-paren-name-issue-150899.rs:1:47 + | +LL | macro_rules!(i_think_the_name_should_go_here) { + | ^ expected item + | + = note: for a full list of items that can appear in modules, see + +error: aborting due to 2 previous errors + diff --git a/tests/ui/resolve/enum-variant-import-2904.rs b/tests/ui/resolve/enum-variant-import-2904.rs new file mode 100644 index 0000000000000..3272ee5fb5005 --- /dev/null +++ b/tests/ui/resolve/enum-variant-import-2904.rs @@ -0,0 +1,101 @@ +//! Regression test for https://github.com/rust-lang/rust/issues/2904 + +//@ build-pass +#![allow(unused_must_use)] +#![allow(dead_code)] +#![allow(unused_mut)] + +// Map representation + +use Square::{Bot, ClosedLift, Earth, Empty, Lambda, OpenLift, Rock, Wall}; +use std::fmt; +use std::io::prelude::*; + +enum Square { + Bot, + Wall, + Rock, + Lambda, + ClosedLift, + OpenLift, + Earth, + Empty, +} + +impl fmt::Debug for Square { + fn fmt(&self, f: &mut fmt::Formatter) -> fmt::Result { + write!( + f, + "{}", + match *self { + Bot => { + "R".to_string() + } + Wall => { + "#".to_string() + } + Rock => { + "*".to_string() + } + Lambda => { + "\\".to_string() + } + ClosedLift => { + "L".to_string() + } + OpenLift => { + "O".to_string() + } + Earth => { + ".".to_string() + } + Empty => { + " ".to_string() + } + } + ) + } +} + +fn square_from_char(c: char) -> Square { + match c { + 'R' => Bot, + '#' => Wall, + '*' => Rock, + '\\' => Lambda, + 'L' => ClosedLift, + 'O' => OpenLift, + '.' => Earth, + ' ' => Empty, + _ => { + println!("invalid Square: {}", c); + panic!() + } + } +} + +fn read_board_grid(mut input: Rdr) -> Vec> { + let mut input: &mut dyn Read = &mut input; + let mut grid = Vec::new(); + let mut line = [0; 10]; + input.read(&mut line); + let mut row = Vec::new(); + for c in &line { + row.push(square_from_char(*c as char)) + } + grid.push(row); + let width = grid[0].len(); + for row in &grid { + assert_eq!(row.len(), width) + } + grid +} + +mod test { + #[test] + pub fn trivial_to_string() { + assert_eq!(Lambda.to_string(), "\\") + } +} + +pub fn main() {} diff --git a/tests/ui/issues/issue-2708.rs b/tests/ui/resolve/struct-function-same-name-2708.rs similarity index 69% rename from tests/ui/issues/issue-2708.rs rename to tests/ui/resolve/struct-function-same-name-2708.rs index 09d19f87aa647..729a5819ae4e5 100644 --- a/tests/ui/issues/issue-2708.rs +++ b/tests/ui/resolve/struct-function-same-name-2708.rs @@ -1,15 +1,13 @@ +//! Regression test for https://github.com/rust-lang/rust/issues/2708 + //@ run-pass #![allow(dead_code)] #![allow(non_snake_case)] - - - struct Font { fontbuf: usize, cairo_font: usize, font_dtor: usize, - } impl Drop for Font { @@ -17,11 +15,7 @@ impl Drop for Font { } fn Font() -> Font { - Font { - fontbuf: 0, - cairo_font: 0, - font_dtor: 0 - } + Font { fontbuf: 0, cairo_font: 0, font_dtor: 0 } } pub fn main() { diff --git a/tests/ui/issues/issue-2895.rs b/tests/ui/structs/struct-size-with-drop-2895.rs similarity index 84% rename from tests/ui/issues/issue-2895.rs rename to tests/ui/structs/struct-size-with-drop-2895.rs index 6301a86375344..9540d340ff68b 100644 --- a/tests/ui/issues/issue-2895.rs +++ b/tests/ui/structs/struct-size-with-drop-2895.rs @@ -1,10 +1,12 @@ +//! Regression test for https://github.com/rust-lang/rust/issues/2895 + //@ run-pass #![allow(dead_code)] use std::mem; struct Cat { - x: isize + x: isize, } struct Kitty { diff --git a/tests/ui/traits/next-solver/coercion/unfulfilled-unsize-coercion-recursion-limit.rs b/tests/ui/traits/next-solver/coercion/unfulfilled-unsize-coercion-recursion-limit.rs new file mode 100644 index 0000000000000..24b32db3060d3 --- /dev/null +++ b/tests/ui/traits/next-solver/coercion/unfulfilled-unsize-coercion-recursion-limit.rs @@ -0,0 +1,35 @@ +//@ check-pass +//@ compile-flags: -Znext-solver + +// A regression test for https://github.com/rust-lang/trait-system-refactor-initiative/issues/266. +// Ensure that we do not accidentaly trying unfulfilled unsized coercions due to hitting recursion +// limits while trying to find the right fulfillment error source. + +fn argument_coercion(_: &U) {} + +pub fn test() { + argument_coercion(&{ + Nested(0.0, 0.0) + .add(0.0) + .add(0.0) + .add(0.0) + .add(0.0) + .add(0.0) + .add(0.0) + .add(0.0) + .add(0.0) + .add(0.0) + .add(0.0) + .add(0.0) + }); +} + +struct Nested(T, R); + +impl Nested { + fn add(self, value: U) -> Nested> { + Nested(value, self) + } +} + +fn main() {} diff --git a/tests/ui/traits/next-solver/coercion/unsize-coercion-recursion-limit.rs b/tests/ui/traits/next-solver/coercion/unsize-coercion-recursion-limit.rs new file mode 100644 index 0000000000000..42802e85cda10 --- /dev/null +++ b/tests/ui/traits/next-solver/coercion/unsize-coercion-recursion-limit.rs @@ -0,0 +1,25 @@ +//@ check-pass +//@ compile-flags: -Znext-solver + +// A test to ensure that unsized coercion is not aborted when visiting a nested goal that +// exceeds the recursion limit and evaluates to `Certainty::Maybe`. +// See https://github.com/rust-lang/rust/pull/152444. + +#![allow(warnings)] + +struct W(T); +type Four = W>>>; +type Sixteen = Four>>>; + +fn ret(x: T) -> Sixteen { + todo!(); +} + +fn please_coerce() { + let mut y = Default::default(); + let x = ret(y); + let _: &Sixteen = &x; + y = 1u32; +} + +fn main() {} diff --git a/tests/ui/issues/issue-2935.rs b/tests/ui/traits/trait-object-method-call-2935.rs similarity index 60% rename from tests/ui/issues/issue-2935.rs rename to tests/ui/traits/trait-object-method-call-2935.rs index bcc25f6187b5d..ea24aae89462c 100644 --- a/tests/ui/issues/issue-2935.rs +++ b/tests/ui/traits/trait-object-method-call-2935.rs @@ -1,3 +1,5 @@ +//! Regression test for https://github.com/rust-lang/rust/issues/2935 + //@ run-pass #![allow(dead_code)] #![allow(non_camel_case_types)] @@ -11,14 +13,14 @@ trait it { } impl it for t { - fn f(&self) { } + fn f(&self) {} } pub fn main() { - // let x = ({a: 4} as it); - // let y = box ({a: 4}); - // let z = box ({a: 4} as it); - // let z = box ({a: true} as it); + // let x = ({a: 4} as it); + // let y = box ({a: 4}); + // let z = box ({a: 4} as it); + // let z = box ({a: true} as it); let z: Box<_> = Box::new(Box::new(true) as Box); // x.f(); // y.f(); diff --git a/tests/ui/issues/issue-3052.rs b/tests/ui/traits/trait-object-type-alias-3052.rs similarity index 69% rename from tests/ui/issues/issue-3052.rs rename to tests/ui/traits/trait-object-type-alias-3052.rs index ab3519fe7147b..e601c76713dcf 100644 --- a/tests/ui/issues/issue-3052.rs +++ b/tests/ui/traits/trait-object-type-alias-3052.rs @@ -1,3 +1,5 @@ +//! Regression test for https://github.com/rust-lang/rust/issues/3052 + //@ run-pass #![allow(dead_code)] @@ -8,5 +10,4 @@ fn f() -> Option { Some(mock_connection) } -pub fn main() { -} +pub fn main() {}