Rollup merge of #154297 - RalfJung:mir-opt-comments, r=dianqk fix/extend some mir-opt comments Looks like CopyProp was refactored without updating that comment. And for GVN, I think this is what you had in mind @cjgillot but would be great if you could have a look.
diff --git a/Cargo.lock b/Cargo.lock index 1f2b417..85997e1 100644 --- a/Cargo.lock +++ b/Cargo.lock
@@ -4573,7 +4573,6 @@ "rustc_data_structures", "rustc_errors", "rustc_hir", - "rustc_index", "rustc_macros", "rustc_middle", "rustc_serialize",
diff --git a/compiler/rustc_codegen_cranelift/patches/0028-stdlib-Ensure-va_end-doesn-t-get-emitted-unless-VaList-is-a.patch b/compiler/rustc_codegen_cranelift/patches/0028-stdlib-Ensure-va_end-doesn-t-get-emitted-unless-VaList-is-a.patch deleted file mode 100644 index 2aa9316..0000000 --- a/compiler/rustc_codegen_cranelift/patches/0028-stdlib-Ensure-va_end-doesn-t-get-emitted-unless-VaList-is-a.patch +++ /dev/null
@@ -1,25 +0,0 @@ -From 116abc64add4d617104993a7a3011f20bcf31ef2 Mon Sep 17 00:00:00 2001 -From: bjorn3 <17426603+bjorn3@users.noreply.github.com> -Date: Mon, 26 Jan 2026 16:20:58 +0000 -Subject: [PATCH] Ensure va_end doesn't get emitted unless VaList is actually - used - ---- - library/core/src/ffi/va_list.rs | 1 + - 1 file changed, 1 insertion(+) - -diff --git a/library/core/src/ffi/va_list.rs b/library/core/src/ffi/va_list.rs -index d0f1553..75129af 100644 ---- a/library/core/src/ffi/va_list.rs -+++ b/library/core/src/ffi/va_list.rs -@@ -217,6 +217,7 @@ impl Clone for VaList<'_> { - - #[rustc_const_unstable(feature = "const_c_variadic", issue = "151787")] - impl<'f> const Drop for VaList<'f> { -+ #[inline] - fn drop(&mut self) { - // SAFETY: this variable argument list is being dropped, so won't be read from again. - unsafe { va_end(self) } --- -2.43.0 -
diff --git a/compiler/rustc_data_structures/src/vec_cache.rs b/compiler/rustc_data_structures/src/vec_cache.rs index aea5924..6d026bb 100644 --- a/compiler/rustc_data_structures/src/vec_cache.rs +++ b/compiler/rustc_data_structures/src/vec_cache.rs
@@ -6,12 +6,16 @@ //! //! This is currently used for query caching. -use std::fmt::Debug; +use std::fmt::{self, Debug}; use std::marker::PhantomData; +use std::ops::{Index, IndexMut}; use std::sync::atomic::{AtomicPtr, AtomicU32, AtomicUsize, Ordering}; use rustc_index::Idx; +#[cfg(test)] +mod tests; + struct Slot<V> { // We never construct &Slot<V> so it's fine for this to not be in an UnsafeCell. value: V, @@ -28,7 +32,7 @@ struct Slot<V> { #[derive(Copy, Clone, Debug)] struct SlotIndex { // the index of the bucket in VecCache (0 to 20) - bucket_idx: usize, + bucket_idx: BucketIndex, // the index of the slot within the bucket index_in_bucket: usize, } @@ -42,7 +46,7 @@ struct SlotIndex { let mut key = 0; loop { let si = SlotIndex::from_index(key); - entries[si.bucket_idx] = si.entries(); + entries[si.bucket_idx.to_usize()] = si.bucket_idx.capacity(); if key == 0 { key = 1; } else if key == (1 << 31) { @@ -57,48 +61,24 @@ struct SlotIndex { const BUCKETS: usize = 21; impl SlotIndex { - /// The total possible number of entries in the bucket - const fn entries(&self) -> usize { - if self.bucket_idx == 0 { 1 << 12 } else { 1 << (self.bucket_idx + 11) } - } - - // This unpacks a flat u32 index into identifying which bucket it belongs to and the offset - // within that bucket. As noted in the VecCache docs, buckets double in size with each index. - // Typically that would mean 31 buckets (2^0 + 2^1 ... + 2^31 = u32::MAX - 1), but to reduce - // the size of the VecCache struct and avoid uselessly small allocations, we instead have the - // first bucket have 2**12 entries. To simplify the math, the second bucket also 2**12 entries, - // and buckets double from there. - // - // We assert that [0, 2**32 - 1] uniquely map through this function to individual, consecutive - // slots (see `slot_index_exhaustive` in tests). + /// Unpacks a flat 32-bit index into a [`BucketIndex`] and a slot offset within that bucket. #[inline] const fn from_index(idx: u32) -> Self { - const FIRST_BUCKET_SHIFT: usize = 12; - if idx < (1 << FIRST_BUCKET_SHIFT) { - return SlotIndex { bucket_idx: 0, index_in_bucket: idx as usize }; - } - // We already ruled out idx 0, so this `ilog2` never panics (and the check optimizes away) - let bucket = idx.ilog2() as usize; - let entries = 1 << bucket; - SlotIndex { - bucket_idx: bucket - FIRST_BUCKET_SHIFT + 1, - index_in_bucket: idx as usize - entries, - } + let (bucket_idx, index_in_bucket) = BucketIndex::from_flat_index(idx as usize); + SlotIndex { bucket_idx, index_in_bucket } } // SAFETY: Buckets must be managed solely by functions here (i.e., get/put on SlotIndex) and // `self` comes from SlotIndex::from_index #[inline] unsafe fn get<V: Copy>(&self, buckets: &[AtomicPtr<Slot<V>>; 21]) -> Option<(V, u32)> { - // SAFETY: `bucket_idx` is ilog2(u32).saturating_sub(11), which is at most 21, i.e., - // in-bounds of buckets. See `from_index` for computation. - let bucket = unsafe { buckets.get_unchecked(self.bucket_idx) }; + let bucket = &buckets[self.bucket_idx]; let ptr = bucket.load(Ordering::Acquire); // Bucket is not yet initialized: then we obviously won't find this entry in that bucket. if ptr.is_null() { return None; } - debug_assert!(self.index_in_bucket < self.entries()); + debug_assert!(self.index_in_bucket < self.bucket_idx.capacity()); // SAFETY: `bucket` was allocated (so <= isize in total bytes) to hold `entries`, so this // must be inbounds. let slot = unsafe { ptr.add(self.index_in_bucket) }; @@ -131,7 +111,7 @@ fn bucket_ptr<V>(&self, bucket: &AtomicPtr<Slot<V>>) -> *mut Slot<V> { #[cold] #[inline(never)] - fn initialize_bucket<V>(bucket: &AtomicPtr<Slot<V>>, bucket_idx: usize) -> *mut Slot<V> { + fn initialize_bucket<V>(bucket: &AtomicPtr<Slot<V>>, bucket_idx: BucketIndex) -> *mut Slot<V> { static LOCK: std::sync::Mutex<()> = std::sync::Mutex::new(()); // If we are initializing the bucket, then acquire a global lock. @@ -145,8 +125,8 @@ fn initialize_bucket<V>(bucket: &AtomicPtr<Slot<V>>, bucket_idx: usize) -> *mut // OK, now under the allocator lock, if we're still null then it's definitely us that will // initialize this bucket. if ptr.is_null() { - let bucket_len = SlotIndex { bucket_idx, index_in_bucket: 0 }.entries(); - let bucket_layout = std::alloc::Layout::array::<Slot<V>>(bucket_len).unwrap(); + let bucket_layout = + std::alloc::Layout::array::<Slot<V>>(bucket_idx.capacity()).unwrap(); // This is more of a sanity check -- this code is very cold, so it's safe to pay a // little extra cost here. assert!(bucket_layout.size() > 0); @@ -167,12 +147,10 @@ fn initialize_bucket<V>(bucket: &AtomicPtr<Slot<V>>, bucket_idx: usize) -> *mut /// Returns true if this successfully put into the map. #[inline] fn put<V>(&self, buckets: &[AtomicPtr<Slot<V>>; 21], value: V, extra: u32) -> bool { - // SAFETY: `bucket_idx` is ilog2(u32).saturating_sub(11), which is at most 21, i.e., - // in-bounds of buckets. - let bucket = unsafe { buckets.get_unchecked(self.bucket_idx) }; + let bucket = &buckets[self.bucket_idx]; let ptr = self.bucket_ptr(bucket); - debug_assert!(self.index_in_bucket < self.entries()); + debug_assert!(self.index_in_bucket < self.bucket_idx.capacity()); // SAFETY: `bucket` was allocated (so <= isize in total bytes) to hold `entries`, so this // must be inbounds. let slot = unsafe { ptr.add(self.index_in_bucket) }; @@ -209,12 +187,10 @@ fn initialize_bucket<V>(bucket: &AtomicPtr<Slot<V>>, bucket_idx: usize) -> *mut /// Inserts into the map, given that the slot is unique, so it won't race with other threads. #[inline] unsafe fn put_unique<V>(&self, buckets: &[AtomicPtr<Slot<V>>; 21], value: V, extra: u32) { - // SAFETY: `bucket_idx` is ilog2(u32).saturating_sub(11), which is at most 21, i.e., - // in-bounds of buckets. - let bucket = unsafe { buckets.get_unchecked(self.bucket_idx) }; + let bucket = &buckets[self.bucket_idx]; let ptr = self.bucket_ptr(bucket); - debug_assert!(self.index_in_bucket < self.entries()); + debug_assert!(self.index_in_bucket < self.bucket_idx.capacity()); // SAFETY: `bucket` was allocated (so <= isize in total bytes) to hold `entries`, so this // must be inbounds. let slot = unsafe { ptr.add(self.index_in_bucket) }; @@ -254,7 +230,7 @@ pub struct VecCache<K: Idx, V, I> { // ... // Bucket 19: 1073741824 // Bucket 20: 2147483648 - // The total number of entries if all buckets are initialized is u32::MAX-1. + // The total number of entries if all buckets are initialized is 2^32. buckets: [AtomicPtr<Slot<V>>; BUCKETS], // In the compiler's current usage these are only *read* during incremental and self-profiling. @@ -289,7 +265,7 @@ fn drop(&mut self) { assert!(!std::mem::needs_drop::<K>()); assert!(!std::mem::needs_drop::<V>()); - for (idx, bucket) in self.buckets.iter().enumerate() { + for (idx, bucket) in BucketIndex::enumerate_buckets(&self.buckets) { let bucket = bucket.load(Ordering::Acquire); if !bucket.is_null() { let layout = std::alloc::Layout::array::<Slot<V>>(ENTRIES_BY_BUCKET[idx]).unwrap(); @@ -299,7 +275,7 @@ fn drop(&mut self) { } } - for (idx, bucket) in self.present.iter().enumerate() { + for (idx, bucket) in BucketIndex::enumerate_buckets(&self.present) { let bucket = bucket.load(Ordering::Acquire); if !bucket.is_null() { let layout = std::alloc::Layout::array::<Slot<()>>(ENTRIES_BY_BUCKET[idx]).unwrap(); @@ -365,5 +341,164 @@ pub fn len(&self) -> usize { } } -#[cfg(test)] -mod tests; +/// Index into an array of buckets. +/// +/// Using an enum lets us tell the compiler that values range from 0 to 20, +/// allowing array bounds checks to be optimized away, +/// without having to resort to pattern types or other unstable features. +#[derive(Clone, Copy, PartialEq, Eq)] +#[repr(usize)] +enum BucketIndex { + // tidy-alphabetical-start + Bucket00, + Bucket01, + Bucket02, + Bucket03, + Bucket04, + Bucket05, + Bucket06, + Bucket07, + Bucket08, + Bucket09, + Bucket10, + Bucket11, + Bucket12, + Bucket13, + Bucket14, + Bucket15, + Bucket16, + Bucket17, + Bucket18, + Bucket19, + Bucket20, + // tidy-alphabetical-end +} + +impl Debug for BucketIndex { + fn fmt(&self, f: &mut fmt::Formatter<'_>) -> fmt::Result { + Debug::fmt(&self.to_usize(), f) + } +} + +impl BucketIndex { + /// Capacity of bucket 0 (and also of bucket 1). + const BUCKET_0_CAPACITY: usize = 1 << (Self::NONZERO_BUCKET_SHIFT_ADJUST + 1); + /// Adjustment factor from the highest-set-bit-position of a flat index, + /// to its corresponding bucket number. + /// + /// For example, the first flat-index in bucket 2 is 8192. + /// Its highest-set-bit-position is `(8192).ilog2() == 13`, and subtracting + /// the adjustment factor of 11 gives the bucket number of 2. + const NONZERO_BUCKET_SHIFT_ADJUST: usize = 11; + + #[inline(always)] + const fn to_usize(self) -> usize { + self as usize + } + + #[inline(always)] + const fn from_raw(raw: usize) -> Self { + match raw { + // tidy-alphabetical-start + 00 => Self::Bucket00, + 01 => Self::Bucket01, + 02 => Self::Bucket02, + 03 => Self::Bucket03, + 04 => Self::Bucket04, + 05 => Self::Bucket05, + 06 => Self::Bucket06, + 07 => Self::Bucket07, + 08 => Self::Bucket08, + 09 => Self::Bucket09, + 10 => Self::Bucket10, + 11 => Self::Bucket11, + 12 => Self::Bucket12, + 13 => Self::Bucket13, + 14 => Self::Bucket14, + 15 => Self::Bucket15, + 16 => Self::Bucket16, + 17 => Self::Bucket17, + 18 => Self::Bucket18, + 19 => Self::Bucket19, + 20 => Self::Bucket20, + // tidy-alphabetical-end + _ => panic!("bucket index out of range"), + } + } + + /// Total number of slots in this bucket. + #[inline(always)] + const fn capacity(self) -> usize { + match self { + Self::Bucket00 => Self::BUCKET_0_CAPACITY, + // Bucket 1 has a capacity of `1 << (1 + 11) == pow(2, 12) == 4096`. + // Bucket 2 has a capacity of `1 << (2 + 11) == pow(2, 13) == 8192`. + _ => 1 << (self.to_usize() + Self::NONZERO_BUCKET_SHIFT_ADJUST), + } + } + + /// Converts a flat index in the range `0..=u32::MAX` into a bucket index, + /// and a slot offset within that bucket. + /// + /// Panics if `flat > u32::MAX`. + #[inline(always)] + const fn from_flat_index(flat: usize) -> (Self, usize) { + if flat > u32::MAX as usize { + panic!(); + } + + // If the index is in bucket 0, the conversion is trivial. + // This also avoids calling `ilog2` when `flat == 0`. + if flat < Self::BUCKET_0_CAPACITY { + return (Self::Bucket00, flat); + } + + // General-case conversion for a non-zero bucket index. + // + // | bucket | slot + // flat | ilog2 | index | offset + // ------------------------------ + // 4096 | 12 | 1 | 0 + // 4097 | 12 | 1 | 1 + // ... + // 8191 | 12 | 1 | 4095 + // 8192 | 13 | 2 | 0 + let highest_bit_pos = flat.ilog2() as usize; + let bucket_index = + BucketIndex::from_raw(highest_bit_pos - Self::NONZERO_BUCKET_SHIFT_ADJUST); + + // Clear the highest-set bit (which selects the bucket) to get the + // slot offset within this bucket. + let slot_offset = flat - (1 << highest_bit_pos); + + (bucket_index, slot_offset) + } + + #[inline(always)] + fn iter_all() -> impl ExactSizeIterator<Item = Self> { + (0usize..BUCKETS).map(BucketIndex::from_raw) + } + + #[inline(always)] + fn enumerate_buckets<T>(buckets: &[T; BUCKETS]) -> impl ExactSizeIterator<Item = (Self, &T)> { + BucketIndex::iter_all().zip(buckets) + } +} + +impl<T> Index<BucketIndex> for [T; BUCKETS] { + type Output = T; + + #[inline(always)] + fn index(&self, index: BucketIndex) -> &Self::Output { + // The optimizer should be able to see that see that a bucket index is + // always in-bounds, and omit the runtime bounds check. + &self[index.to_usize()] + } +} + +impl<T> IndexMut<BucketIndex> for [T; BUCKETS] { + #[inline(always)] + fn index_mut(&mut self, index: BucketIndex) -> &mut Self::Output { + &mut self[index.to_usize()] + } +}
diff --git a/compiler/rustc_data_structures/src/vec_cache/tests.rs b/compiler/rustc_data_structures/src/vec_cache/tests.rs index f588442..f12937a 100644 --- a/compiler/rustc_data_structures/src/vec_cache/tests.rs +++ b/compiler/rustc_data_structures/src/vec_cache/tests.rs
@@ -1,10 +1,46 @@ use super::*; #[test] +#[should_panic(expected = "bucket index out of range")] +fn bucket_index_n_buckets() { + BucketIndex::from_raw(BUCKETS); +} + +#[test] +fn bucket_index_round_trip() { + for i in 0..BUCKETS { + assert_eq!(BucketIndex::from_raw(i).to_usize(), i); + } +} + +#[test] +fn bucket_index_iter_all_len() { + let len = BucketIndex::iter_all().len(); + assert_eq!(len, BUCKETS); + + let len = BucketIndex::iter_all().collect::<Vec<_>>().len(); + assert_eq!(len, BUCKETS); + + let len = BucketIndex::enumerate_buckets(&[(); BUCKETS]).len(); + assert_eq!(len, BUCKETS); +} + +#[test] +fn bucket_index_capacity() { + // Check that the combined capacity of all buckets is 2^32 slots. + // That's 1 larger than `u32::MAX`, so store the total as a `u64`. + let mut total = 0u64; + for i in BucketIndex::iter_all() { + total += u64::try_from(i.capacity()).unwrap(); + } + assert_eq!(total, 1 << 32); +} + +#[test] #[cfg(not(miri))] -fn vec_cache_empty() { +fn vec_cache_empty_exhaustive() { let cache: VecCache<u32, u32, u32> = VecCache::default(); - for key in 0..u32::MAX { + for key in 0..=u32::MAX { assert!(cache.lookup(&key).is_none()); } } @@ -70,8 +106,8 @@ fn slot_entries_table() { #[test] fn bucket_entries_matches() { - for i in 0..BUCKETS { - assert_eq!(SlotIndex { bucket_idx: i, index_in_bucket: 0 }.entries(), ENTRIES_BY_BUCKET[i]); + for i in BucketIndex::iter_all() { + assert_eq!(i.capacity(), ENTRIES_BY_BUCKET[i]); } } @@ -84,13 +120,13 @@ fn slot_index_exhaustive() { } let slot_idx = SlotIndex::from_index(0); assert_eq!(slot_idx.index_in_bucket, 0); - assert_eq!(slot_idx.bucket_idx, 0); + assert_eq!(slot_idx.bucket_idx, BucketIndex::Bucket00); let mut prev = slot_idx; for idx in 1..=u32::MAX { let slot_idx = SlotIndex::from_index(idx); // SAFETY: Ensure indices don't go out of bounds of buckets. - assert!(slot_idx.index_in_bucket < slot_idx.entries()); + assert!(slot_idx.index_in_bucket < slot_idx.bucket_idx.capacity()); if prev.bucket_idx == slot_idx.bucket_idx { assert_eq!(prev.index_in_bucket + 1, slot_idx.index_in_bucket); @@ -98,8 +134,8 @@ fn slot_index_exhaustive() { assert_eq!(slot_idx.index_in_bucket, 0); } - assert_eq!(buckets[slot_idx.bucket_idx], slot_idx.entries() as u32); - assert_eq!(ENTRIES_BY_BUCKET[slot_idx.bucket_idx], slot_idx.entries(), "{}", idx); + assert_eq!(buckets[slot_idx.bucket_idx], slot_idx.bucket_idx.capacity() as u32); + assert_eq!(ENTRIES_BY_BUCKET[slot_idx.bucket_idx], slot_idx.bucket_idx.capacity(), "{idx}",); prev = slot_idx; }
diff --git a/compiler/rustc_incremental/src/persist/save.rs b/compiler/rustc_incremental/src/persist/save.rs index 1de64bb..dc14a8c 100644 --- a/compiler/rustc_incremental/src/persist/save.rs +++ b/compiler/rustc_incremental/src/persist/save.rs
@@ -6,6 +6,7 @@ use rustc_middle::dep_graph::{ DepGraph, SerializedDepGraph, WorkProduct, WorkProductId, WorkProductMap, }; +use rustc_middle::query::on_disk_cache; use rustc_middle::ty::TyCtxt; use rustc_serialize::Encodable as RustcEncodable; use rustc_serialize::opaque::FileEncoder; @@ -82,7 +83,7 @@ pub(crate) fn save_dep_graph(tcx: TyCtxt<'_>) { file_format::save_in(sess, query_cache_path, "query cache", |encoder| { tcx.sess.time("incr_comp_serialize_result_cache", || { - on_disk_cache.serialize(tcx, encoder) + on_disk_cache::OnDiskCache::serialize(tcx, encoder) }) }); });
diff --git a/compiler/rustc_middle/src/dep_graph/graph.rs b/compiler/rustc_middle/src/dep_graph/graph.rs index 0f50abb..444d025 100644 --- a/compiler/rustc_middle/src/dep_graph/graph.rs +++ b/compiler/rustc_middle/src/dep_graph/graph.rs
@@ -709,7 +709,7 @@ fn encode_side_effect<'tcx>( // side effect. std::iter::once(DepNodeIndex::FOREVER_RED_NODE).collect(), ); - tcx.store_side_effect(dep_node_index, side_effect); + tcx.query_system.side_effects.borrow_mut().insert(dep_node_index, side_effect); dep_node_index } @@ -718,7 +718,13 @@ fn encode_side_effect<'tcx>( #[inline] fn force_side_effect<'tcx>(&self, tcx: TyCtxt<'tcx>, prev_index: SerializedDepNodeIndex) { with_deps(TaskDepsRef::Ignore, || { - let side_effect = tcx.load_side_effect(prev_index).unwrap(); + let side_effect = tcx + .query_system + .on_disk_cache + .as_ref() + .unwrap() + .load_side_effect(tcx, prev_index) + .unwrap(); // Use `send_and_color` as `promote_node_and_deps_to_current` expects all // green dependencies. `send_and_color` will also prevent multiple nodes @@ -745,7 +751,7 @@ fn force_side_effect<'tcx>(&self, tcx: TyCtxt<'tcx>, prev_index: SerializedDepNo } // This will just overwrite the same value for concurrent calls. - tcx.store_side_effect(dep_node_index, side_effect); + tcx.query_system.side_effects.borrow_mut().insert(dep_node_index, side_effect); }) } @@ -1549,23 +1555,4 @@ impl<'tcx> TyCtxt<'tcx> { fn is_eval_always(self, kind: DepKind) -> bool { self.dep_kind_vtable(kind).is_eval_always } - - // Interactions with on_disk_cache - fn load_side_effect( - self, - prev_dep_node_index: SerializedDepNodeIndex, - ) -> Option<QuerySideEffect> { - self.query_system - .on_disk_cache - .as_ref() - .and_then(|c| c.load_side_effect(self, prev_dep_node_index)) - } - - #[inline(never)] - #[cold] - fn store_side_effect(self, dep_node_index: DepNodeIndex, side_effect: QuerySideEffect) { - if let Some(c) = self.query_system.on_disk_cache.as_ref() { - c.store_side_effect(dep_node_index, side_effect) - } - } }
diff --git a/compiler/rustc_middle/src/dep_graph/serialized.rs b/compiler/rustc_middle/src/dep_graph/serialized.rs index e84d117..ef5e3d9 100644 --- a/compiler/rustc_middle/src/dep_graph/serialized.rs +++ b/compiler/rustc_middle/src/dep_graph/serialized.rs
@@ -72,6 +72,15 @@ pub struct SerializedDepNodeIndex {} } +impl SerializedDepNodeIndex { + /// Converts a current-session dep node index to a "serialized" index, + /// for the purpose of serializing data to be loaded by future sessions. + #[inline(always)] + pub fn from_curr_for_serialization(index: DepNodeIndex) -> Self { + SerializedDepNodeIndex::from_u32(index.as_u32()) + } +} + const DEP_NODE_SIZE: usize = size_of::<SerializedDepNodeIndex>(); /// Amount of padding we need to add to the edge list data so that we can retrieve every /// SerializedDepNodeIndex with a fixed-size read then mask.
diff --git a/compiler/rustc_middle/src/hooks/mod.rs b/compiler/rustc_middle/src/hooks/mod.rs index 691096f..c70ceef 100644 --- a/compiler/rustc_middle/src/hooks/mod.rs +++ b/compiler/rustc_middle/src/hooks/mod.rs
@@ -9,7 +9,7 @@ use rustc_span::{ExpnHash, ExpnId}; use crate::mir; -use crate::query::on_disk_cache::{CacheEncoder, EncodedDepNodeIndex}; +use crate::query::on_disk_cache::CacheEncoder; use crate::ty::{Ty, TyCtxt}; macro_rules! declare_hooks { @@ -111,10 +111,8 @@ fn clone(&self) -> Self { *self } /// Creates the MIR for a given `DefId`, including unreachable code. hook build_mir_inner_impl(def: LocalDefId) -> mir::Body<'tcx>; - hook encode_query_values( - encoder: &mut CacheEncoder<'_, 'tcx>, - query_result_index: &mut EncodedDepNodeIndex - ) -> (); + /// Serializes all eligible query return values into the on-disk cache. + hook encode_query_values(encoder: &mut CacheEncoder<'_, 'tcx>) -> (); } #[cold]
diff --git a/compiler/rustc_middle/src/queries.rs b/compiler/rustc_middle/src/queries.rs index c5dab80..ff96f50 100644 --- a/compiler/rustc_middle/src/queries.rs +++ b/compiler/rustc_middle/src/queries.rs
@@ -131,10 +131,10 @@ // `Providers` that the driver creates (using several `rustc_*` crates). // // The result type of each query must implement `Clone`. Additionally -// `ty::query::from_cycle_error::FromCycleError` can be implemented which produces an appropriate +// `QueryVTable::handle_cycle_error_fn` can be used to produce an appropriate // placeholder (error) value if the query resulted in a query cycle. -// Queries without a `FromCycleError` implementation will raise a fatal error on query -// cycles instead. +// Queries without a custom `handle_cycle_error_fn` implementation will raise a +// fatal error on query cycles instead. rustc_queries! { /// Caches the expansion of a derive proc macro, e.g. `#[derive(Serialize)]`. /// The key is: @@ -577,7 +577,7 @@ /// Checks whether a type is representable or infinitely sized // - // Infinitely sized types will cause a cycle. The `value_from_cycle_error` impl will print + // Infinitely sized types will cause a cycle. The query's `handle_cycle_error_fn` will print // a custom error about the infinite size and then abort compilation. (In the past we // recovered and continued, but in practice that leads to confusing subsequent error // messages about cycles that then abort.)
diff --git a/compiler/rustc_middle/src/query/job.rs b/compiler/rustc_middle/src/query/job.rs index 3bf37a7..24c4daf 100644 --- a/compiler/rustc_middle/src/query/job.rs +++ b/compiler/rustc_middle/src/query/job.rs
@@ -6,7 +6,7 @@ use parking_lot::{Condvar, Mutex}; use rustc_span::Span; -use crate::query::CycleError; +use crate::query::Cycle; use crate::ty::TyCtxt; /// A value uniquely identifying an active query job. @@ -59,7 +59,7 @@ pub struct QueryWaiter<'tcx> { pub parent: Option<QueryJobId>, pub condvar: Condvar, pub span: Span, - pub cycle: Mutex<Option<CycleError<'tcx>>>, + pub cycle: Mutex<Option<Cycle<'tcx>>>, } #[derive(Clone, Debug)] @@ -79,7 +79,7 @@ pub fn wait_on( tcx: TyCtxt<'tcx>, query: Option<QueryJobId>, span: Span, - ) -> Result<(), CycleError<'tcx>> { + ) -> Result<(), Cycle<'tcx>> { let mut waiters_guard = self.waiters.lock(); let Some(waiters) = &mut *waiters_guard else { return Ok(()); // already complete
diff --git a/compiler/rustc_middle/src/query/mod.rs b/compiler/rustc_middle/src/query/mod.rs index 9c012f2..b7e5e9b 100644 --- a/compiler/rustc_middle/src/query/mod.rs +++ b/compiler/rustc_middle/src/query/mod.rs
@@ -5,8 +5,8 @@ pub use self::job::{QueryJob, QueryJobId, QueryLatch, QueryWaiter}; pub use self::keys::{AsLocalQueryKey, LocalCrate, QueryKey}; pub use self::plumbing::{ - ActiveKeyStatus, CycleError, EnsureMode, QueryMode, QueryState, QuerySystem, QueryVTable, - TyCtxtAt, TyCtxtEnsureDone, TyCtxtEnsureOk, TyCtxtEnsureResult, + ActiveKeyStatus, Cycle, EnsureMode, QueryMode, QueryState, QuerySystem, QueryVTable, TyCtxtAt, + TyCtxtEnsureDone, TyCtxtEnsureOk, TyCtxtEnsureResult, }; pub use self::stack::QueryStackFrame; pub use crate::queries::Providers;
diff --git a/compiler/rustc_middle/src/query/on_disk_cache.rs b/compiler/rustc_middle/src/query/on_disk_cache.rs index 607891d..b6fd306 100644 --- a/compiler/rustc_middle/src/query/on_disk_cache.rs +++ b/compiler/rustc_middle/src/query/on_disk_cache.rs
@@ -2,14 +2,14 @@ use std::sync::Arc; use std::{fmt, mem}; -use rustc_data_structures::fx::{FxHashMap, FxIndexMap, FxIndexSet}; +use rustc_data_structures::fx::{FxHashMap, FxIndexSet}; use rustc_data_structures::memmap::Mmap; use rustc_data_structures::sync::{HashMapExt, Lock, RwLock}; use rustc_data_structures::unhash::UnhashMap; use rustc_data_structures::unord::{UnordMap, UnordSet}; use rustc_hir::def_id::{CrateNum, DefId, DefIndex, LOCAL_CRATE, LocalDefId, StableCrateId}; use rustc_hir::definitions::DefPathHash; -use rustc_index::{Idx, IndexVec}; +use rustc_index::IndexVec; use rustc_macros::{Decodable, Encodable}; use rustc_serialize::opaque::{FileEncodeResult, FileEncoder, IntEncodedWithFixedSize, MemDecoder}; use rustc_serialize::{Decodable, Decoder, Encodable, Encoder}; @@ -53,22 +53,18 @@ pub struct OnDiskCache { // The complete cache data in serialized form. serialized_data: RwLock<Option<Mmap>>, - // Collects all `QuerySideEffect` created during the current compilation - // session. - current_side_effects: Lock<FxIndexMap<DepNodeIndex, QuerySideEffect>>, - file_index_to_stable_id: FxHashMap<SourceFileIndex, EncodedSourceFileId>, // Caches that are populated lazily during decoding. file_index_to_file: Lock<FxHashMap<SourceFileIndex, Arc<SourceFile>>>, - // A map from dep-node to the position of the cached query result in - // `serialized_data`. - query_result_index: FxHashMap<SerializedDepNodeIndex, AbsoluteBytePos>, + /// For query dep nodes that have a disk-cached return value, maps the node + /// index to the position of its serialized value in `serialized_data`. + query_values_index: FxHashMap<SerializedDepNodeIndex, AbsoluteBytePos>, - // A map from dep-node to the position of any associated `QuerySideEffect` in - // `serialized_data`. - prev_side_effects_index: FxHashMap<SerializedDepNodeIndex, AbsoluteBytePos>, + /// For `DepKind::SideEffect` dep nodes, maps the node index to the position + /// of its serialized [`QuerySideEffect`] in `serialized_data`. + side_effects_index: FxHashMap<SerializedDepNodeIndex, AbsoluteBytePos>, alloc_decoding_state: AllocDecodingState, @@ -101,8 +97,8 @@ pub struct OnDiskCache { #[derive(Encodable, Decodable)] struct Footer { file_index_to_stable_id: FxHashMap<SourceFileIndex, EncodedSourceFileId>, - query_result_index: EncodedDepNodeIndex, - side_effects_index: EncodedDepNodeIndex, + query_values_index: Vec<(SerializedDepNodeIndex, AbsoluteBytePos)>, + side_effects_index: Vec<(SerializedDepNodeIndex, AbsoluteBytePos)>, // The location of all allocations. // Most uses only need values up to u32::MAX, but benchmarking indicates that we can use a u64 // without measurable overhead. This permits larger const allocations without ICEing. @@ -114,8 +110,6 @@ struct Footer { foreign_expn_data: UnhashMap<ExpnHash, u32>, } -pub type EncodedDepNodeIndex = Vec<(SerializedDepNodeIndex, AbsoluteBytePos)>; - #[derive(Copy, Clone, PartialEq, Eq, Hash, Debug, Encodable, Decodable)] struct SourceFileIndex(u32); @@ -174,9 +168,8 @@ pub fn new(sess: &Session, data: Mmap, start_pos: usize) -> Result<Self, ()> { serialized_data: RwLock::new(Some(data)), file_index_to_stable_id: footer.file_index_to_stable_id, file_index_to_file: Default::default(), - current_side_effects: Default::default(), - query_result_index: footer.query_result_index.into_iter().collect(), - prev_side_effects_index: footer.side_effects_index.into_iter().collect(), + query_values_index: footer.query_values_index.into_iter().collect(), + side_effects_index: footer.side_effects_index.into_iter().collect(), alloc_decoding_state: AllocDecodingState::new(footer.interpret_alloc_index), syntax_contexts: footer.syntax_contexts, expn_data: footer.expn_data, @@ -190,9 +183,8 @@ pub fn new_empty() -> Self { serialized_data: RwLock::new(None), file_index_to_stable_id: Default::default(), file_index_to_file: Default::default(), - current_side_effects: Default::default(), - query_result_index: Default::default(), - prev_side_effects_index: Default::default(), + query_values_index: Default::default(), + side_effects_index: Default::default(), alloc_decoding_state: AllocDecodingState::new(Vec::new()), syntax_contexts: FxHashMap::default(), expn_data: UnhashMap::default(), @@ -207,7 +199,9 @@ pub fn close_serialized_data_mmap(&self) { *self.serialized_data.write() = None; } - pub fn serialize(&self, tcx: TyCtxt<'_>, encoder: FileEncoder) -> FileEncodeResult { + /// Serialize the current-session data that will be loaded by [`OnDiskCache`] + /// in a subsequent incremental compilation session. + pub fn serialize(tcx: TyCtxt<'_>, encoder: FileEncoder) -> FileEncodeResult { // Serializing the `DepGraph` should not modify it. tcx.dep_graph.with_ignore(|| { // Allocate `SourceFileIndex`es. @@ -241,30 +235,19 @@ pub fn serialize(&self, tcx: TyCtxt<'_>, encoder: FileEncoder) -> FileEncodeResu file_to_file_index, hygiene_context: &hygiene_encode_context, symbol_index_table: Default::default(), + query_values_index: Default::default(), + side_effects_index: Default::default(), }; - // Encode query results. - let mut query_result_index = EncodedDepNodeIndex::new(); - + // Encode query return values. tcx.sess.time("encode_query_values", || { - let enc = &mut encoder; - let qri = &mut query_result_index; - tcx.encode_query_values(enc, qri); + tcx.encode_query_values(&mut encoder); }); // Encode side effects. - let side_effects_index: EncodedDepNodeIndex = self - .current_side_effects - .borrow() - .iter() - .map(|(dep_node_index, side_effect)| { - let pos = AbsoluteBytePos::new(encoder.position()); - let dep_node_index = SerializedDepNodeIndex::new(dep_node_index.index()); - encoder.encode_tagged(dep_node_index, side_effect); - - (dep_node_index, pos) - }) - .collect(); + for (&dep_node_index, side_effect) in tcx.query_system.side_effects.borrow().iter() { + encoder.encode_side_effect(dep_node_index, side_effect); + } let interpret_alloc_index = { let mut interpret_alloc_index = Vec::new(); @@ -315,11 +298,13 @@ pub fn serialize(&self, tcx: TyCtxt<'_>, encoder: FileEncoder) -> FileEncodeResu // Encode the file footer. let footer_pos = encoder.position() as u64; + let query_values_index = mem::take(&mut encoder.query_values_index); + let side_effects_index = mem::take(&mut encoder.side_effects_index); encoder.encode_tagged( TAG_FILE_FOOTER, &Footer { file_index_to_stable_id, - query_result_index, + query_values_index, side_effects_index, interpret_alloc_index, syntax_contexts, @@ -340,35 +325,25 @@ pub fn serialize(&self, tcx: TyCtxt<'_>, encoder: FileEncoder) -> FileEncodeResu } /// Loads a `QuerySideEffect` created during the previous compilation session. - pub fn load_side_effect( + pub(crate) fn load_side_effect( &self, tcx: TyCtxt<'_>, dep_node_index: SerializedDepNodeIndex, ) -> Option<QuerySideEffect> { let side_effect: Option<QuerySideEffect> = - self.load_indexed(tcx, dep_node_index, &self.prev_side_effects_index); + self.load_indexed(tcx, dep_node_index, &self.side_effects_index); side_effect } - /// Stores a `QuerySideEffect` emitted during the current compilation session. - /// Anything stored like this will be available via `load_side_effect` in - /// the next compilation session. - pub fn store_side_effect(&self, dep_node_index: DepNodeIndex, side_effect: QuerySideEffect) { - let mut current_side_effects = self.current_side_effects.borrow_mut(); - let prev = current_side_effects.insert(dep_node_index, side_effect); - debug_assert!(prev.is_none()); - } - - /// Return whether the cached query result can be decoded. + /// Returns true if there is a disk-cached query return value for the given node. #[inline] pub fn loadable_from_disk(&self, dep_node_index: SerializedDepNodeIndex) -> bool { - self.query_result_index.contains_key(&dep_node_index) + self.query_values_index.contains_key(&dep_node_index) // with_decoder is infallible, so we can stop here } - /// Returns the cached query result if there is something in the cache for - /// the given `SerializedDepNodeIndex`; otherwise returns `None`. - pub fn try_load_query_result<'tcx, T>( + /// Returns the disk-cached query return value for the given node, if there is one. + pub fn try_load_query_value<'tcx, T>( &self, tcx: TyCtxt<'tcx>, dep_node_index: SerializedDepNodeIndex, @@ -376,9 +351,7 @@ pub fn try_load_query_result<'tcx, T>( where T: for<'a> Decodable<CacheDecoder<'a, 'tcx>>, { - let opt_value = self.load_indexed(tcx, dep_node_index, &self.query_result_index); - debug_assert_eq!(opt_value.is_some(), self.loadable_from_disk(dep_node_index)); - opt_value + self.load_indexed(tcx, dep_node_index, &self.query_values_index) } fn load_indexed<'tcx, T>( @@ -815,6 +788,9 @@ pub struct CacheEncoder<'a, 'tcx> { hygiene_context: &'a HygieneEncodeContext, // Used for both `Symbol`s and `ByteSymbol`s. symbol_index_table: FxHashMap<u32, usize>, + + query_values_index: Vec<(SerializedDepNodeIndex, AbsoluteBytePos)>, + side_effects_index: Vec<(SerializedDepNodeIndex, AbsoluteBytePos)>, } impl<'a, 'tcx> fmt::Debug for CacheEncoder<'a, 'tcx> { @@ -835,7 +811,7 @@ fn source_file_index(&mut self, source_file: Arc<SourceFile>) -> SourceFileIndex /// encode the specified tag, then the given value, then the number of /// bytes taken up by tag and value. On decoding, we can then verify that /// we get the expected tag and read the expected number of bytes. - pub fn encode_tagged<T: Encodable<Self>, V: Encodable<Self>>(&mut self, tag: T, value: &V) { + fn encode_tagged<T: Encodable<Self>, V: Encodable<Self>>(&mut self, tag: T, value: &V) { let start_pos = self.position(); tag.encode(self); @@ -845,6 +821,20 @@ pub fn encode_tagged<T: Encodable<Self>, V: Encodable<Self>>(&mut self, tag: T, ((end_pos - start_pos) as u64).encode(self); } + pub fn encode_query_value<V: Encodable<Self>>(&mut self, index: DepNodeIndex, value: &V) { + let index = SerializedDepNodeIndex::from_curr_for_serialization(index); + + self.query_values_index.push((index, AbsoluteBytePos::new(self.position()))); + self.encode_tagged(index, value); + } + + fn encode_side_effect(&mut self, index: DepNodeIndex, side_effect: &QuerySideEffect) { + let index = SerializedDepNodeIndex::from_curr_for_serialization(index); + + self.side_effects_index.push((index, AbsoluteBytePos::new(self.position()))); + self.encode_tagged(index, side_effect); + } + // copy&paste impl from rustc_metadata fn encode_symbol_or_byte_symbol( &mut self,
diff --git a/compiler/rustc_middle/src/query/plumbing.rs b/compiler/rustc_middle/src/query/plumbing.rs index 7d94d60..ef6259b 100644 --- a/compiler/rustc_middle/src/query/plumbing.rs +++ b/compiler/rustc_middle/src/query/plumbing.rs
@@ -2,13 +2,14 @@ use std::ops::Deref; use rustc_data_structures::fingerprint::Fingerprint; +use rustc_data_structures::fx::FxIndexMap; use rustc_data_structures::hash_table::HashTable; use rustc_data_structures::sharded::Sharded; -use rustc_data_structures::sync::{AtomicU64, WorkerLocal}; +use rustc_data_structures::sync::{AtomicU64, Lock, WorkerLocal}; use rustc_errors::Diag; use rustc_span::Span; -use crate::dep_graph::{DepKind, DepNodeIndex, SerializedDepNodeIndex}; +use crate::dep_graph::{DepKind, DepNodeIndex, QuerySideEffect, SerializedDepNodeIndex}; use crate::ich::StableHashingContext; use crate::queries::{ExternProviders, Providers, QueryArenas, QueryVTables, TaggedQueryKey}; use crate::query::on_disk_cache::OnDiskCache; @@ -47,12 +48,12 @@ pub enum ActiveKeyStatus<'tcx> { } #[derive(Debug)] -pub struct CycleError<'tcx> { +pub struct Cycle<'tcx> { /// The query and related span that uses the cycle. pub usage: Option<QueryStackFrame<'tcx>>, /// The span here corresponds to the reason for which this query was required. - pub cycle: Vec<QueryStackFrame<'tcx>>, + pub frames: Vec<QueryStackFrame<'tcx>>, } #[derive(Debug)] @@ -111,13 +112,10 @@ pub struct QueryVTable<'tcx, C: QueryCache> { /// Function pointer that handles a cycle error. `error` must be consumed, e.g. with `emit` (if /// it should be emitted) or `delay_as_bug` (if it need not be emitted because an alternative - /// error is created and emitted). - pub value_from_cycle_error: fn( - tcx: TyCtxt<'tcx>, - key: C::Key, - cycle_error: CycleError<'tcx>, - error: Diag<'_>, - ) -> C::Value, + /// error is created and emitted). A value may be returned, or (more commonly) the function may + /// just abort after emitting the error. + pub handle_cycle_error_fn: + fn(tcx: TyCtxt<'tcx>, key: C::Key, cycle: Cycle<'tcx>, error: Diag<'_>) -> C::Value, pub format_value: fn(&C::Value) -> String, @@ -151,6 +149,13 @@ pub struct QuerySystem<'tcx> { pub arenas: WorkerLocal<QueryArenas<'tcx>>, pub query_vtables: QueryVTables<'tcx>, + /// Side-effect associated with each [`DepKind::SideEffect`] node in the + /// current incremental-compilation session. Side effects will be written + /// to disk, and loaded by [`OnDiskCache`] in the next session. + /// + /// Always empty if incremental compilation is off. + pub side_effects: Lock<FxIndexMap<DepNodeIndex, QuerySideEffect>>, + /// This provides access to the incremental compilation on-disk cache for query results. /// Do not access this directly. It is only meant to be used by /// `DepGraph::try_mark_green()` and the query infrastructure.
diff --git a/compiler/rustc_query_impl/Cargo.toml b/compiler/rustc_query_impl/Cargo.toml index 1c3f0f9..02d3b01 100644 --- a/compiler/rustc_query_impl/Cargo.toml +++ b/compiler/rustc_query_impl/Cargo.toml
@@ -10,7 +10,6 @@ rustc_data_structures = { path = "../rustc_data_structures" } rustc_errors = { path = "../rustc_errors" } rustc_hir = { path = "../rustc_hir" } -rustc_index = { path = "../rustc_index" } rustc_macros = { path = "../rustc_macros" } rustc_middle = { path = "../rustc_middle" } rustc_serialize = { path = "../rustc_serialize" }
diff --git a/compiler/rustc_query_impl/src/dep_kind_vtables.rs b/compiler/rustc_query_impl/src/dep_kind_vtables.rs index 44b92dc..b70fe30 100644 --- a/compiler/rustc_query_impl/src/dep_kind_vtables.rs +++ b/compiler/rustc_query_impl/src/dep_kind_vtables.rs
@@ -4,7 +4,7 @@ use rustc_middle::query::QueryCache; use crate::GetQueryVTable; -use crate::plumbing::{force_from_dep_node_inner, promote_from_disk_inner}; +use crate::plumbing::promote_from_disk_inner; /// [`DepKindVTable`] constructors for special dep kinds that aren't queries. #[expect(non_snake_case, reason = "use non-snake case to avoid collision with query names")] @@ -111,10 +111,16 @@ pub(crate) fn make_dep_kind_vtable_for_query<'tcx, Q>( DepKindVTable { is_eval_always, key_fingerprint_style, - force_from_dep_node_fn: (can_recover && !is_no_force) - .then_some(force_from_dep_node_inner::<Q>), - promote_from_disk_fn: (can_recover && is_cache_on_disk) - .then_some(promote_from_disk_inner::<Q>), + force_from_dep_node_fn: (can_recover && !is_no_force).then_some( + |tcx, dep_node, _prev_index| { + let query = Q::query_vtable(tcx); + crate::execution::force_query_dep_node(tcx, query, dep_node) + }, + ), + promote_from_disk_fn: (can_recover && is_cache_on_disk).then_some(|tcx, dep_node| { + let query = Q::query_vtable(tcx); + promote_from_disk_inner(tcx, query, dep_node) + }), } }
diff --git a/compiler/rustc_query_impl/src/execution.rs b/compiler/rustc_query_impl/src/execution.rs index 42d5a78..8844d40 100644 --- a/compiler/rustc_query_impl/src/execution.rs +++ b/compiler/rustc_query_impl/src/execution.rs
@@ -8,8 +8,8 @@ use rustc_errors::FatalError; use rustc_middle::dep_graph::{DepGraphData, DepNodeKey, SerializedDepNodeIndex}; use rustc_middle::query::{ - ActiveKeyStatus, CycleError, EnsureMode, QueryCache, QueryJob, QueryJobId, QueryKey, - QueryLatch, QueryMode, QueryState, QueryVTable, + ActiveKeyStatus, Cycle, EnsureMode, QueryCache, QueryJob, QueryJobId, QueryKey, QueryLatch, + QueryMode, QueryState, QueryVTable, }; use rustc_middle::ty::TyCtxt; use rustc_middle::verify_ich::incremental_verify_ich; @@ -17,7 +17,7 @@ use tracing::warn; use crate::dep_graph::{DepNode, DepNodeIndex}; -use crate::job::{QueryJobInfo, QueryJobMap, find_cycle_in_stack, report_cycle}; +use crate::job::{QueryJobInfo, QueryJobMap, create_cycle_error, find_cycle_in_stack}; use crate::plumbing::{current_query_job, loadable_from_disk, next_job_id, start_query}; use crate::query_impl::for_each_query_vtable; @@ -108,14 +108,14 @@ fn collect_active_query_jobs_inner<'tcx, C>( #[cold] #[inline(never)] -fn mk_cycle<'tcx, C: QueryCache>( +fn handle_cycle<'tcx, C: QueryCache>( query: &'tcx QueryVTable<'tcx, C>, tcx: TyCtxt<'tcx>, key: C::Key, - cycle_error: CycleError<'tcx>, + cycle: Cycle<'tcx>, ) -> C::Value { - let error = report_cycle(tcx, &cycle_error); - (query.value_from_cycle_error)(tcx, key, cycle_error, error) + let error = create_cycle_error(tcx, &cycle); + (query.handle_cycle_error_fn)(tcx, key, cycle, error) } /// Guard object representing the responsibility to execute a query job and @@ -194,7 +194,7 @@ fn drop(&mut self) { #[cold] #[inline(never)] -fn cycle_error<'tcx, C: QueryCache>( +fn find_and_handle_cycle<'tcx, C: QueryCache>( query: &'tcx QueryVTable<'tcx, C>, tcx: TyCtxt<'tcx>, key: C::Key, @@ -205,8 +205,8 @@ fn cycle_error<'tcx, C: QueryCache>( // We need the complete map to ensure we find a cycle to break. let job_map = collect_active_query_jobs(tcx, CollectActiveJobsKind::FullNoContention); - let error = find_cycle_in_stack(try_execute, job_map, ¤t_query_job(), span); - (mk_cycle(query, tcx, key, error), None) + let cycle = find_cycle_in_stack(try_execute, job_map, ¤t_query_job(), span); + (handle_cycle(query, tcx, key, cycle), None) } #[inline(always)] @@ -250,7 +250,7 @@ fn wait_for_query<'tcx, C: QueryCache>( (v, Some(index)) } - Err(cycle) => (mk_cycle(query, tcx, key, cycle), None), + Err(cycle) => (handle_cycle(query, tcx, key, cycle), None), } } @@ -330,7 +330,7 @@ fn try_execute_query<'tcx, C: QueryCache, const INCR: bool>( // If we are single-threaded we know that we have cycle error, // so we just return the error. - cycle_error(query, tcx, key, id, span) + find_and_handle_cycle(query, tcx, key, id, span) } } ActiveKeyStatus::Poisoned => FatalError.raise(), @@ -624,20 +624,26 @@ pub(super) fn execute_query_incr_inner<'tcx, C: QueryCache>( Some(result) } -pub(crate) fn force_query<'tcx, C: QueryCache>( - query: &'tcx QueryVTable<'tcx, C>, +/// Inner implementation of [`DepKindVTable::force_from_dep_node_fn`][force_fn] +/// for query nodes. +/// +/// [force_fn]: rustc_middle::dep_graph::DepKindVTable::force_from_dep_node_fn +pub(crate) fn force_query_dep_node<'tcx, C: QueryCache>( tcx: TyCtxt<'tcx>, - key: C::Key, + query: &'tcx QueryVTable<'tcx, C>, dep_node: DepNode, -) { - // We may be concurrently trying both execute and force a query. - // Ensure that only one of them runs the query. - if let Some((_, index)) = query.cache.lookup(&key) { - tcx.prof.query_cache_hit(index.into()); - return; - } +) -> bool { + let Some(key) = C::Key::try_recover_key(tcx, &dep_node) else { + // We couldn't recover a key from the node's key fingerprint. + // Tell the caller that we couldn't force the node. + return false; + }; ensure_sufficient_stack(|| { try_execute_query::<C, true>(query, tcx, DUMMY_SP, key, Some(dep_node)) }); + + // We did manage to recover a key and force the node, though it's up to + // the caller to check whether the node ended up marked red or green. + true }
diff --git a/compiler/rustc_query_impl/src/from_cycle_error.rs b/compiler/rustc_query_impl/src/handle_cycle_error.rs similarity index 92% rename from compiler/rustc_query_impl/src/from_cycle_error.rs rename to compiler/rustc_query_impl/src/handle_cycle_error.rs index 0c6082e..5676669 100644 --- a/compiler/rustc_query_impl/src/from_cycle_error.rs +++ b/compiler/rustc_query_impl/src/handle_cycle_error.rs
@@ -10,33 +10,33 @@ use rustc_hir::def::{DefKind, Res}; use rustc_middle::bug; use rustc_middle::queries::{QueryVTables, TaggedQueryKey}; -use rustc_middle::query::CycleError; +use rustc_middle::query::Cycle; use rustc_middle::query::erase::erase_val; use rustc_middle::ty::layout::LayoutError; use rustc_middle::ty::{self, Ty, TyCtxt}; use rustc_span::def_id::{DefId, LocalDefId}; use rustc_span::{ErrorGuaranteed, Span}; -use crate::job::report_cycle; +use crate::job::create_cycle_error; pub(crate) fn specialize_query_vtables<'tcx>(vtables: &mut QueryVTables<'tcx>) { - vtables.fn_sig.value_from_cycle_error = |tcx, key, _, err| { + vtables.fn_sig.handle_cycle_error_fn = |tcx, key, _, err| { let guar = err.delay_as_bug(); erase_val(fn_sig(tcx, key, guar)) }; - vtables.check_representability.value_from_cycle_error = + vtables.check_representability.handle_cycle_error_fn = |tcx, _, cycle, _err| check_representability(tcx, cycle); - vtables.check_representability_adt_ty.value_from_cycle_error = + vtables.check_representability_adt_ty.handle_cycle_error_fn = |tcx, _, cycle, _err| check_representability(tcx, cycle); - vtables.variances_of.value_from_cycle_error = |tcx, key, _, err| { + vtables.variances_of.handle_cycle_error_fn = |tcx, key, _, err| { let _guar = err.delay_as_bug(); erase_val(variances_of(tcx, key)) }; - vtables.layout_of.value_from_cycle_error = |tcx, _, cycle, err| { + vtables.layout_of.handle_cycle_error_fn = |tcx, _, cycle, err| { let _guar = err.delay_as_bug(); erase_val(Err(layout_of(tcx, cycle))) } @@ -72,10 +72,10 @@ fn fn_sig<'tcx>( ))) } -fn check_representability<'tcx>(tcx: TyCtxt<'tcx>, cycle_error: CycleError<'tcx>) -> ! { +fn check_representability<'tcx>(tcx: TyCtxt<'tcx>, cycle: Cycle<'tcx>) -> ! { let mut item_and_field_ids = Vec::new(); let mut representable_ids = FxHashSet::default(); - for frame in &cycle_error.cycle { + for frame in &cycle.frames { if let TaggedQueryKey::check_representability(def_id) = frame.tagged_key && tcx.def_kind(def_id) == DefKind::Field { @@ -88,7 +88,7 @@ fn check_representability<'tcx>(tcx: TyCtxt<'tcx>, cycle_error: CycleError<'tcx> item_and_field_ids.push((item_id.expect_local(), field_id)); } } - for frame in &cycle_error.cycle { + for frame in &cycle.frames { if let TaggedQueryKey::check_representability_adt_ty(key) = frame.tagged_key && let Some(adt) = key.ty_adt_def() && let Some(def_id) = adt.did().as_local() @@ -127,14 +127,11 @@ fn search_for_cycle_permutation<Q, T>( otherwise() } -fn layout_of<'tcx>( - tcx: TyCtxt<'tcx>, - cycle_error: CycleError<'tcx>, -) -> &'tcx ty::layout::LayoutError<'tcx> { +fn layout_of<'tcx>(tcx: TyCtxt<'tcx>, cycle: Cycle<'tcx>) -> &'tcx ty::layout::LayoutError<'tcx> { let diag = search_for_cycle_permutation( - &cycle_error.cycle, - |cycle| { - if let TaggedQueryKey::layout_of(key) = cycle[0].tagged_key + &cycle.frames, + |frames| { + if let TaggedQueryKey::layout_of(key) = frames[0].tagged_key && let ty::Coroutine(def_id, _) = key.value.kind() && let Some(def_id) = def_id.as_local() && let def_kind = tcx.def_kind(def_id) @@ -158,7 +155,7 @@ fn layout_of<'tcx>( tcx.def_kind_descr_article(def_kind, def_id.to_def_id()), tcx.def_kind_descr(def_kind, def_id.to_def_id()), ); - for (i, frame) in cycle.iter().enumerate() { + for (i, frame) in frames.iter().enumerate() { let TaggedQueryKey::layout_of(frame_key) = frame.tagged_key else { continue; }; @@ -169,7 +166,7 @@ fn layout_of<'tcx>( continue; }; let frame_span = - frame.tagged_key.default_span(tcx, cycle[(i + 1) % cycle.len()].span); + frame.tagged_key.default_span(tcx, frames[(i + 1) % frames.len()].span); if frame_span.is_dummy() { continue; } @@ -203,7 +200,7 @@ fn layout_of<'tcx>( ControlFlow::Continue(()) } }, - || report_cycle(tcx, &cycle_error), + || create_cycle_error(tcx, &cycle), ); let guar = diag.emit();
diff --git a/compiler/rustc_query_impl/src/job.rs b/compiler/rustc_query_impl/src/job.rs index 213fc79..a27a6f4 100644 --- a/compiler/rustc_query_impl/src/job.rs +++ b/compiler/rustc_query_impl/src/job.rs
@@ -7,9 +7,7 @@ use rustc_errors::{Diag, DiagCtxtHandle}; use rustc_hir::def::DefKind; use rustc_middle::queries::TaggedQueryKey; -use rustc_middle::query::{ - CycleError, QueryJob, QueryJobId, QueryLatch, QueryStackFrame, QueryWaiter, -}; +use rustc_middle::query::{Cycle, QueryJob, QueryJobId, QueryLatch, QueryStackFrame, QueryWaiter}; use rustc_middle::ty::TyCtxt; use rustc_span::{DUMMY_SP, Span}; @@ -58,29 +56,28 @@ pub(crate) fn find_cycle_in_stack<'tcx>( job_map: QueryJobMap<'tcx>, current_job: &Option<QueryJobId>, span: Span, -) -> CycleError<'tcx> { - // Find the waitee amongst `current_job` parents - let mut cycle = Vec::new(); +) -> Cycle<'tcx> { + // Find the waitee amongst `current_job` parents. + let mut frames = Vec::new(); let mut current_job = Option::clone(current_job); while let Some(job) = current_job { let info = &job_map.map[&job]; - cycle.push(QueryStackFrame { span: info.job.span, tagged_key: info.tagged_key }); + frames.push(QueryStackFrame { span: info.job.span, tagged_key: info.tagged_key }); if job == id { - cycle.reverse(); + frames.reverse(); - // This is the end of the cycle - // The span entry we included was for the usage - // of the cycle itself, and not part of the cycle - // Replace it with the span which caused the cycle to form - cycle[0].span = span; - // Find out why the cycle itself was used + // This is the end of the cycle. The span entry we included was for + // the usage of the cycle itself, and not part of the cycle. + // Replace it with the span which caused the cycle to form. + frames[0].span = span; + // Find out why the cycle itself was used. let usage = try { let parent = info.job.parent?; QueryStackFrame { span: info.job.span, tagged_key: job_map.tagged_key_of(parent) } }; - return CycleError { usage, cycle }; + return Cycle { usage, frames }; } current_job = info.job.parent; @@ -319,9 +316,9 @@ struct EntryPoint { .map(|(span, job)| QueryStackFrame { span, tagged_key: job_map.tagged_key_of(job) }); // Create the cycle error - let error = CycleError { + let error = Cycle { usage, - cycle: stack + frames: stack .iter() .map(|&(span, job)| QueryStackFrame { span, @@ -454,27 +451,27 @@ pub fn print_query_stack<'tcx>( #[inline(never)] #[cold] -pub(crate) fn report_cycle<'tcx>( +pub(crate) fn create_cycle_error<'tcx>( tcx: TyCtxt<'tcx>, - CycleError { usage, cycle: stack }: &CycleError<'tcx>, + Cycle { usage, frames }: &Cycle<'tcx>, ) -> Diag<'tcx> { - assert!(!stack.is_empty()); + assert!(!frames.is_empty()); - let span = stack[0].tagged_key.default_span(tcx, stack[1 % stack.len()].span); + let span = frames[0].tagged_key.default_span(tcx, frames[1 % frames.len()].span); let mut cycle_stack = Vec::new(); use crate::error::StackCount; - let stack_bottom = stack[0].tagged_key.description(tcx); - let stack_count = if stack.len() == 1 { + let stack_bottom = frames[0].tagged_key.description(tcx); + let stack_count = if frames.len() == 1 { StackCount::Single { stack_bottom: stack_bottom.clone() } } else { StackCount::Multiple { stack_bottom: stack_bottom.clone() } }; - for i in 1..stack.len() { - let frame = &stack[i]; - let span = frame.tagged_key.default_span(tcx, stack[(i + 1) % stack.len()].span); + for i in 1..frames.len() { + let frame = &frames[i]; + let span = frame.tagged_key.default_span(tcx, frames[(i + 1) % frames.len()].span); cycle_stack .push(crate::error::CycleStack { span, desc: frame.tagged_key.description(tcx) }); } @@ -484,12 +481,12 @@ pub(crate) fn report_cycle<'tcx>( usage: usage.tagged_key.description(tcx), }); - let alias = if stack + let alias = if frames .iter() .all(|frame| frame.tagged_key.def_kind(tcx) == Some(DefKind::TyAlias)) { Some(crate::error::Alias::Ty) - } else if stack.iter().all(|frame| frame.tagged_key.def_kind(tcx) == Some(DefKind::TraitAlias)) + } else if frames.iter().all(|frame| frame.tagged_key.def_kind(tcx) == Some(DefKind::TraitAlias)) { Some(crate::error::Alias::Trait) } else {
diff --git a/compiler/rustc_query_impl/src/lib.rs b/compiler/rustc_query_impl/src/lib.rs index 2f69082..de03b48 100644 --- a/compiler/rustc_query_impl/src/lib.rs +++ b/compiler/rustc_query_impl/src/lib.rs
@@ -22,7 +22,7 @@ mod dep_kind_vtables; mod error; mod execution; -mod from_cycle_error; +mod handle_cycle_error; mod job; mod plumbing; mod profiling_support; @@ -49,10 +49,11 @@ pub fn query_system<'tcx>( incremental: bool, ) -> QuerySystem<'tcx> { let mut query_vtables = query_impl::make_query_vtables(incremental); - from_cycle_error::specialize_query_vtables(&mut query_vtables); + handle_cycle_error::specialize_query_vtables(&mut query_vtables); QuerySystem { arenas: Default::default(), query_vtables, + side_effects: Default::default(), on_disk_cache, local_providers, extern_providers,
diff --git a/compiler/rustc_query_impl/src/plumbing.rs b/compiler/rustc_query_impl/src/plumbing.rs index b8aa125..ef4fff2 100644 --- a/compiler/rustc_query_impl/src/plumbing.rs +++ b/compiler/rustc_query_impl/src/plumbing.rs
@@ -2,28 +2,24 @@ use rustc_data_structures::unord::UnordMap; use rustc_hir::limit::Limit; -use rustc_index::Idx; use rustc_middle::bug; #[expect(unused_imports, reason = "used by doc comments")] use rustc_middle::dep_graph::DepKindVTable; -use rustc_middle::dep_graph::{DepKind, DepNode, DepNodeIndex, DepNodeKey, SerializedDepNodeIndex}; +use rustc_middle::dep_graph::{DepNode, DepNodeIndex, DepNodeKey, SerializedDepNodeIndex}; use rustc_middle::query::erase::{Erasable, Erased}; -use rustc_middle::query::on_disk_cache::{ - AbsoluteBytePos, CacheDecoder, CacheEncoder, EncodedDepNodeIndex, -}; +use rustc_middle::query::on_disk_cache::{CacheDecoder, CacheEncoder}; use rustc_middle::query::{QueryCache, QueryJobId, QueryMode, QueryVTable, erase}; use rustc_middle::ty::TyCtxt; -use rustc_middle::ty::codec::TyEncoder; use rustc_middle::ty::tls::{self, ImplicitCtxt}; use rustc_serialize::{Decodable, Encodable}; use rustc_span::DUMMY_SP; use rustc_span::def_id::LOCAL_CRATE; use crate::error::{QueryOverflow, QueryOverflowNote}; -use crate::execution::{all_inactive, force_query}; +use crate::execution::all_inactive; use crate::job::find_dep_kind_root; use crate::query_impl::for_each_query_vtable; -use crate::{CollectActiveJobsKind, GetQueryVTable, collect_active_query_jobs}; +use crate::{CollectActiveJobsKind, collect_active_query_jobs}; fn depth_limit_error<'tcx>(tcx: TyCtxt<'tcx>, job: QueryJobId) { let job_map = collect_active_query_jobs(tcx, CollectActiveJobsKind::Full); @@ -79,13 +75,9 @@ pub(crate) fn start_query<R>( }) } -pub(crate) fn encode_query_values<'tcx>( - tcx: TyCtxt<'tcx>, - encoder: &mut CacheEncoder<'_, 'tcx>, - query_result_index: &mut EncodedDepNodeIndex, -) { +pub(crate) fn encode_query_values<'tcx>(tcx: TyCtxt<'tcx>, encoder: &mut CacheEncoder<'_, 'tcx>) { for_each_query_vtable!(CACHE_ON_DISK, tcx, |query| { - encode_query_values_inner(tcx, query, encoder, query_result_index) + encode_query_values_inner(tcx, query, encoder) }); } @@ -93,7 +85,6 @@ fn encode_query_values_inner<'a, 'tcx, C, V>( tcx: TyCtxt<'tcx>, query: &'tcx QueryVTable<'tcx, C>, encoder: &mut CacheEncoder<'a, 'tcx>, - query_result_index: &mut EncodedDepNodeIndex, ) where C: QueryCache<Value = Erased<V>>, V: Erasable + Encodable<CacheEncoder<'a, 'tcx>>, @@ -103,14 +94,7 @@ fn encode_query_values_inner<'a, 'tcx, C, V>( assert!(all_inactive(&query.state)); query.cache.for_each(&mut |key, value, dep_node| { if (query.will_cache_on_disk_for_key_fn)(tcx, *key) { - let dep_node = SerializedDepNodeIndex::new(dep_node.index()); - - // Record position of the cache entry. - query_result_index.push((dep_node, AbsoluteBytePos::new(encoder.position()))); - - // Encode the type check tables with the `SerializedDepNodeIndex` - // as tag. - encoder.encode_tagged(dep_node, &erase::restore_val::<V>(*value)); + encoder.encode_query_value::<V>(dep_node, &erase::restore_val::<V>(*value)); } }); } @@ -151,15 +135,15 @@ fn verify_query_key_hashes_inner<'tcx, C: QueryCache>( }); } -/// Implementation of [`DepKindVTable::promote_from_disk_fn`] for queries. -pub(crate) fn promote_from_disk_inner<'tcx, Q: GetQueryVTable<'tcx>>( +/// Inner implementation of [`DepKindVTable::promote_from_disk_fn`] for queries. +pub(crate) fn promote_from_disk_inner<'tcx, C: QueryCache>( tcx: TyCtxt<'tcx>, + query: &'tcx QueryVTable<'tcx, C>, dep_node: DepNode, ) { - let query = Q::query_vtable(tcx); debug_assert!(tcx.dep_graph.is_green(&dep_node)); - let key = <Q::Cache as QueryCache>::Key::try_recover_key(tcx, &dep_node).unwrap_or_else(|| { + let key = C::Key::try_recover_key(tcx, &dep_node).unwrap_or_else(|| { panic!( "Failed to recover key for {dep_node:?} with key fingerprint {}", dep_node.key_fingerprint @@ -214,44 +198,9 @@ pub(crate) fn try_load_from_disk<'tcx, V>( // details. let value = tcx .dep_graph - .with_query_deserialization(|| on_disk_cache.try_load_query_result(tcx, prev_index)); + .with_query_deserialization(|| on_disk_cache.try_load_query_value(tcx, prev_index)); prof_timer.finish_with_query_invocation_id(index.into()); value } - -/// Implementation of [`DepKindVTable::force_from_dep_node_fn`] for queries. -pub(crate) fn force_from_dep_node_inner<'tcx, Q: GetQueryVTable<'tcx>>( - tcx: TyCtxt<'tcx>, - dep_node: DepNode, - // Needed by the vtable function signature, but not used when forcing queries. - _prev_index: SerializedDepNodeIndex, -) -> bool { - let query = Q::query_vtable(tcx); - - // We must avoid ever having to call `force_from_dep_node()` for a - // `DepNode::codegen_unit`: - // Since we cannot reconstruct the query key of a `DepNode::codegen_unit`, we - // would always end up having to evaluate the first caller of the - // `codegen_unit` query that *is* reconstructible. This might very well be - // the `compile_codegen_unit` query, thus re-codegenning the whole CGU just - // to re-trigger calling the `codegen_unit` query with the right key. At - // that point we would already have re-done all the work we are trying to - // avoid doing in the first place. - // The solution is simple: Just explicitly call the `codegen_unit` query for - // each CGU, right after partitioning. This way `try_mark_green` will always - // hit the cache instead of having to go through `force_from_dep_node`. - // This assertion makes sure, we actually keep applying the solution above. - debug_assert!( - dep_node.kind != DepKind::codegen_unit, - "calling force_from_dep_node() on dep_kinds::codegen_unit" - ); - - if let Some(key) = <Q::Cache as QueryCache>::Key::try_recover_key(tcx, &dep_node) { - force_query(query, tcx, key, dep_node); - true - } else { - false - } -}
diff --git a/compiler/rustc_query_impl/src/query_impl.rs b/compiler/rustc_query_impl/src/query_impl.rs index 1a8ed05..60ddf3e 100644 --- a/compiler/rustc_query_impl/src/query_impl.rs +++ b/compiler/rustc_query_impl/src/query_impl.rs
@@ -166,10 +166,10 @@ pub(crate) fn make_query_vtable<'tcx>(incremental: bool) try_load_from_disk_fn: |_tcx, _key, _prev_index, _index| None, // The default just emits `err` and then aborts. - // `from_cycle_error::specialize_query_vtables` overwrites this default for - // certain queries. - value_from_cycle_error: |_tcx, _key, _cycle, err| { - $crate::from_cycle_error::default(err) + // `handle_cycle_error::specialize_query_vtables` overwrites this default + // for certain queries. + handle_cycle_error_fn: |_tcx, _key, _cycle, err| { + $crate::handle_cycle_error::default(err) }, #[cfg($no_hash)]
diff --git a/compiler/rustc_ty_utils/src/representability.rs b/compiler/rustc_ty_utils/src/representability.rs index 9fb16af..c83e0bb 100644 --- a/compiler/rustc_ty_utils/src/representability.rs +++ b/compiler/rustc_ty_utils/src/representability.rs
@@ -58,7 +58,7 @@ fn check_representability_ty<'tcx>(tcx: TyCtxt<'tcx>, ty: Ty<'tcx>) { // -> check_representability_adt_ty(Bar<Foo>) // -> check_representability(Foo) // -// For the diagnostic output (in `Value::from_cycle_error`), we want to detect +// For the diagnostic output (in `check_representability`), we want to detect // that the `Foo` in the *second* field of the struct is culpable. This // requires traversing the HIR of the struct and calling `params_in_repr(Bar)`. // But we can't call params_in_repr for a given type unless it is known to be
diff --git a/library/core/src/ffi/va_list.rs b/library/core/src/ffi/va_list.rs index b6dfe3a..f0f58a0 100644 --- a/library/core/src/ffi/va_list.rs +++ b/library/core/src/ffi/va_list.rs
@@ -244,7 +244,7 @@ pub(crate) const fn duplicate(&self) -> Self { #[rustc_const_unstable(feature = "const_c_variadic", issue = "151787")] impl<'f> const Clone for VaList<'f> { - #[inline] + #[inline] // Avoid codegen when not used to help backends that don't support VaList. fn clone(&self) -> Self { // We only implement Clone and not Copy because some future target might not be able to // implement Copy (e.g. because it allocates). For the same reason we use an intrinsic @@ -256,6 +256,7 @@ fn clone(&self) -> Self { #[rustc_const_unstable(feature = "const_c_variadic", issue = "151787")] impl<'f> const Drop for VaList<'f> { + #[inline] // Avoid codegen when not used to help backends that don't support VaList. fn drop(&mut self) { // SAFETY: this variable argument list is being dropped, so won't be read from again. unsafe { va_end(self) } @@ -326,7 +327,7 @@ impl<'f> VaList<'f> { /// /// Calling this function with an incompatible type, an invalid value, or when there /// are no more variable arguments, is unsound. - #[inline] + #[inline] // Avoid codegen when not used to help backends that don't support VaList. #[rustc_const_unstable(feature = "const_c_variadic", issue = "151787")] pub const unsafe fn arg<T: VaArgSafe>(&mut self) -> T { // SAFETY: the caller must uphold the safety contract for `va_arg`.
diff --git a/library/core/src/num/uint_macros.rs b/library/core/src/num/uint_macros.rs index ae8324c..27dbe6d 100644 --- a/library/core/src/num/uint_macros.rs +++ b/library/core/src/num/uint_macros.rs
@@ -447,7 +447,7 @@ pub const fn rotate_right(self, n: u32) -> Self { pub const fn funnel_shl(self, rhs: Self, n: u32) -> Self { assert!(n < Self::BITS, "attempt to funnel shift left with overflow"); // SAFETY: just checked that `shift` is in-range - unsafe { intrinsics::unchecked_funnel_shl(self, rhs, n) } + unsafe { self.unchecked_funnel_shl(rhs, n) } } /// Performs a right funnel shift (concatenates `self` and `rhs`, with `self` @@ -482,7 +482,61 @@ pub const fn funnel_shl(self, rhs: Self, n: u32) -> Self { pub const fn funnel_shr(self, rhs: Self, n: u32) -> Self { assert!(n < Self::BITS, "attempt to funnel shift right with overflow"); // SAFETY: just checked that `shift` is in-range - unsafe { intrinsics::unchecked_funnel_shr(self, rhs, n) } + unsafe { self.unchecked_funnel_shr(rhs, n) } + } + + /// Unchecked funnel shift left. + /// + /// # Safety + /// + /// This results in undefined behavior if `n` is greater than or equal to + #[doc = concat!("`", stringify!($SelfT) , "::BITS`,")] + /// i.e. when [`funnel_shl`](Self::funnel_shl) would panic. + /// + #[rustc_const_unstable(feature = "funnel_shifts", issue = "145686")] + #[unstable(feature = "funnel_shifts", issue = "145686")] + #[must_use = "this returns the result of the operation, \ + without modifying the original"] + #[inline(always)] + #[track_caller] + pub const unsafe fn unchecked_funnel_shl(self, low: Self, n: u32) -> Self { + assert_unsafe_precondition!( + check_language_ub, + concat!(stringify!($SelfT), "::unchecked_funnel_shl cannot overflow"), + (n: u32 = n) => n < <$ActualT>::BITS, + ); + + // SAFETY: this is guaranteed to be safe by the caller. + unsafe { + intrinsics::unchecked_funnel_shl(self, low, n) + } + } + + /// Unchecked funnel shift right. + /// + /// # Safety + /// + /// This results in undefined behavior if `n` is greater than or equal to + #[doc = concat!("`", stringify!($SelfT) , "::BITS`,")] + /// i.e. when [`funnel_shr`](Self::funnel_shr) would panic. + /// + #[rustc_const_unstable(feature = "funnel_shifts", issue = "145686")] + #[unstable(feature = "funnel_shifts", issue = "145686")] + #[must_use = "this returns the result of the operation, \ + without modifying the original"] + #[inline(always)] + #[track_caller] + pub const unsafe fn unchecked_funnel_shr(self, low: Self, n: u32) -> Self { + assert_unsafe_precondition!( + check_language_ub, + concat!(stringify!($SelfT), "::unchecked_funnel_shr cannot overflow"), + (n: u32 = n) => n < <$ActualT>::BITS, + ); + + // SAFETY: this is guaranteed to be safe by the caller. + unsafe { + intrinsics::unchecked_funnel_shr(self, low, n) + } } /// Performs a carry-less multiplication, returning the lower bits.
diff --git a/library/coretests/benches/lib.rs b/library/coretests/benches/lib.rs index 6b65eca..e8dc815 100644 --- a/library/coretests/benches/lib.rs +++ b/library/coretests/benches/lib.rs
@@ -8,7 +8,9 @@ #![feature(iter_array_chunks)] #![feature(iter_next_chunk)] #![feature(iter_advance_by)] +#![feature(num_internals)] #![feature(uint_gather_scatter_bits)] +#![allow(internal_features)] extern crate test;
diff --git a/src/doc/embedded-book b/src/doc/embedded-book index e88aa44..2463ede 160000 --- a/src/doc/embedded-book +++ b/src/doc/embedded-book
@@ -1 +1 @@ -Subproject commit e88aa4403b4bf2071c8df9509160477e40179099 +Subproject commit 2463edeb8003c5743918b3739a9f6870b86396f5
diff --git a/src/doc/reference b/src/doc/reference index c49e89c..7446bf9 160000 --- a/src/doc/reference +++ b/src/doc/reference
@@ -1 +1 @@ -Subproject commit c49e89cc8c7c2c43ca625a8d5b7ad9a53a9ce978 +Subproject commit 7446bf9697c95d155eef33c6a9d91fbd29a5e359
diff --git a/src/doc/rustc-dev-guide/ci/sembr/src/main.rs b/src/doc/rustc-dev-guide/ci/sembr/src/main.rs index 4038f11..a9b4d18 100644 --- a/src/doc/rustc-dev-guide/ci/sembr/src/main.rs +++ b/src/doc/rustc-dev-guide/ci/sembr/src/main.rs
@@ -15,7 +15,7 @@ struct Cli { /// Modify files that do not comply overwrite: bool, /// Applies to lines that are to be split - #[arg(long, default_value_t = 100)] + #[arg(long, default_value_t = 80)] line_length_limit: usize, }
diff --git a/src/doc/rustc-dev-guide/rust-version b/src/doc/rustc-dev-guide/rust-version index db94926..a51e46f 100644 --- a/src/doc/rustc-dev-guide/rust-version +++ b/src/doc/rustc-dev-guide/rust-version
@@ -1 +1 @@ -eda4fc7733ee89e484d7120cafbd80dcb2fce66e +562dee4820c458d823175268e41601d4c060588a
diff --git a/src/doc/rustc-dev-guide/src/backend/updating-llvm.md b/src/doc/rustc-dev-guide/src/backend/updating-llvm.md index 56fa49e..0b6bb0d 100644 --- a/src/doc/rustc-dev-guide/src/backend/updating-llvm.md +++ b/src/doc/rustc-dev-guide/src/backend/updating-llvm.md
@@ -1,6 +1,5 @@ # Updating LLVM -<!-- date-check: Aug 2024 --> Rust supports building against multiple LLVM versions: * Tip-of-tree for the current LLVM development branch is usually supported within a few days. @@ -91,7 +90,6 @@ ## New LLVM Release Updates -<!-- date-check: Jul 2023 --> Unlike bugfixes, updating to a new release of LLVM typically requires a lot more work. @@ -172,12 +170,14 @@ You'll change at least `src/llvm-project` and will likely also change [`llvm-wrapper`] as well. - <!-- date-check: mar 2025 --> + <!-- date-check: March 2026 --> > For prior art, here are some previous LLVM updates: > - [LLVM 17](https://github.com/rust-lang/rust/pull/115959) > - [LLVM 18](https://github.com/rust-lang/rust/pull/120055) > - [LLVM 19](https://github.com/rust-lang/rust/pull/127513) > - [LLVM 20](https://github.com/rust-lang/rust/pull/135763) + > - [LLVM 21](https://github.com/rust-lang/rust/pull/143684) + > - [LLVM 22](https://github.com/rust-lang/rust/pull/150722) Note that sometimes it's easiest to land [`llvm-wrapper`] compatibility as a PR before actually updating `src/llvm-project`.
diff --git a/src/doc/rustc-dev-guide/src/borrow-check/region-inference/member-constraints.md b/src/doc/rustc-dev-guide/src/borrow-check/region-inference/member-constraints.md index 2804c97..21a56da 100644 --- a/src/doc/rustc-dev-guide/src/borrow-check/region-inference/member-constraints.md +++ b/src/doc/rustc-dev-guide/src/borrow-check/region-inference/member-constraints.md
@@ -1,17 +1,18 @@ # Member constraints A member constraint `'m member of ['c_1..'c_N]` expresses that the -region `'m` must be *equal* to some **choice regions** `'c_i` (for -some `i`). These constraints cannot be expressed by users, but they -arise from `impl Trait` due to its lifetime capture rules. Consider a -function such as the following: +region `'m` must be *equal* to some **choice regions** `'c_i` (for some `i`). +These constraints cannot be expressed by users, but they +arise from `impl Trait` due to its lifetime capture rules. +Consider a function such as the following: ```rust,ignore fn make(a: &'a u32, b: &'b u32) -> impl Trait<'a, 'b> { .. } ``` Here, the true return type (often called the "hidden type") is only -permitted to capture the lifetimes `'a` or `'b`. You can kind of see +permitted to capture the lifetimes `'a` or `'b`. +You can kind of see this more clearly by desugaring that `impl Trait` return type into its more explicit form: @@ -23,7 +24,8 @@ Here, the idea is that the hidden type must be some type that could have been written in place of the `impl Trait<'x, 'y>` -- but clearly such a type can only reference the regions `'x` or `'y` (or -`'static`!), as those are the only names in scope. This limitation is +`'static`!), as those are the only names in scope. +This limitation is then translated into a restriction to only access `'a` or `'b` because we are returning `MakeReturn<'a, 'b>`, where `'x` and `'y` have been replaced with `'a` and `'b` respectively. @@ -31,8 +33,8 @@ ## Detailed example To help us explain member constraints in more detail, let's spell out -the `make` example in a bit more detail. First off, let's assume that -you have some dummy trait: +the `make` example in a bit more detail. +First off, let's assume that you have some dummy trait: ```rust,ignore trait Trait<'a, 'b> { } @@ -49,8 +51,8 @@ ``` What happens in this case is that the return type will be `(&'0 u32, &'1 u32)`, -where `'0` and `'1` are fresh region variables. We will have the following -region constraints: +where `'0` and `'1` are fresh region variables. +We will have the following region constraints: ```txt '0 live at {L} @@ -67,11 +69,11 @@ `'1` might have slightly different liveness sets, but that's not very interesting to the point we are illustrating here). -The `'a: '0` and `'b: '1` constraints arise from subtyping. When we -construct the `(a, b)` value, it will be assigned type `(&'0 u32, &'1 +The `'a: '0` and `'b: '1` constraints arise from subtyping. +When we construct the `(a, b)` value, it will be assigned type `(&'0 u32, &'1 u32)` -- the region variables reflect that the lifetimes of these -references could be made smaller. For this value to be created from -`a` and `b`, however, we do require that: +references could be made smaller. +For this value to be created from `a` and `b`, however, we do require that: ```txt (&'a u32, &'b u32) <: (&'0 u32, &'1 u32) @@ -82,35 +84,39 @@ Note that if we ignore member constraints, the value of `'0` would be inferred to some subset of the function body (from the liveness -constraints, which we did not write explicitly). It would never become +constraints, which we did not write explicitly). +It would never become `'a`, because there is no need for it too -- we have a constraint that -`'a: '0`, but that just puts a "cap" on how *large* `'0` can grow to -become. Since we compute the *minimal* value that we can, we are happy -to leave `'0` as being just equal to the liveness set. This is where -member constraints come in. +`'a: '0`, but that just puts a "cap" on how *large* `'0` can grow to become. +Since we compute the *minimal* value that we can, we are happy +to leave `'0` as being just equal to the liveness set. +This is where member constraints come in. ## Choices are always lifetime parameters At present, the "choice" regions from a member constraint are always lifetime -parameters from the current function. As of <!-- date-check --> October 2021, +parameters from the current function. As of <!-- date-check --> March 2026, this falls out from the placement of impl Trait, though in the future it may not -be the case. We take some advantage of this fact, as it simplifies the current -code. In particular, we don't have to consider a case like `'0 member of ['1, +be the case. +We take some advantage of this fact, as it simplifies the current code. +In particular, we don't have to consider a case like `'0 member of ['1, 'static]`, in which the value of both `'0` and `'1` are being inferred and hence -changing. See [rust-lang/rust#61773][#61773] for more information. +changing. +See [rust-lang/rust#61773][#61773] for more information. [#61773]: https://github.com/rust-lang/rust/issues/61773 ## Applying member constraints -Member constraints are a bit more complex than other forms of -constraints. This is because they have a "or" quality to them -- that +Member constraints are a bit more complex than other forms of constraints. +This is because they have a "or" quality to them -- that is, they describe multiple choices that we must select from. E.g., in our example constraint `'0 member of ['a, 'b, 'static]`, it might be -that `'0` is equal to `'a`, `'b`, *or* `'static`. How can we pick the -correct one? What we currently do is to look for a *minimal choice* --- if we find one, then we will grow `'0` to be equal to that minimal -choice. To find that minimal choice, we take two factors into +that `'0` is equal to `'a`, `'b`, *or* `'static`. +How can we pick the correct one? +What we currently do is to look for a *minimal choice* +-- if we find one, then we will grow `'0` to be equal to that minimal choice. +To find that minimal choice, we take two factors into consideration: lower and upper bounds. ### Lower bounds @@ -121,30 +127,34 @@ `'0` because we computed its minimal value (or at least, the lower bounds considering everything but member constraints). -Let `LB` be the current value of `'0`. We know then that `'0: LB` must -hold, whatever the final value of `'0` is. Therefore, we can rule out +Let `LB` be the current value of `'0`. +We know then that `'0: LB` must hold, whatever the final value of `'0` is. +Therefore, we can rule out any choice `'choice` where `'choice: LB` does not hold. -Unfortunately, in our example, this is not very helpful. The lower -bound for `'0` will just be the liveness set `{L}`, and we know that -all the lifetime parameters outlive that set. So we are left with the -same set of choices here. (But in other examples, particularly those +Unfortunately, in our example, this is not very helpful. +The lower bound for `'0` will just be the liveness set `{L}`, and we know that +all the lifetime parameters outlive that set. +So we are left with the same set of choices here. +(But in other examples, particularly those with different variance, lower bound constraints may be relevant.) ### Upper bounds The *upper bounds* are those lifetimes that *must outlive* `'0` -- i.e., that `'0` must be *smaller* than. In our example, this would be -`'a`, because we have the constraint that `'a: '0`. In more complex -examples, the chain may be more indirect. +`'a`, because we have the constraint that `'a: '0`. +In more complex examples, the chain may be more indirect. We can use upper bounds to rule out members in a very similar way to -lower bounds. If UB is some upper bound, then we know that `UB: +lower bounds. +If UB is some upper bound, then we know that `UB: '0` must hold, so we can rule out any choice `'choice` where `UB: 'choice` does not hold. In our example, we would be able to reduce our choice set from `['a, -'b, 'static]` to just `['a]`. This is because `'0` has an upper bound +'b, 'static]` to just `['a]`. +This is because `'0` has an upper bound of `'a`, and neither `'a: 'b` nor `'a: 'static` is known to hold. (For notes on how we collect upper bounds in the implementation, see @@ -153,39 +163,45 @@ ### Minimal choice After applying lower and upper bounds, we can still sometimes have -multiple possibilities. For example, imagine a variant of our example -using types with the opposite variance. In that case, we would have -the constraint `'0: 'a` instead of `'a: '0`. Hence the current value -of `'0` would be `{L, 'a}`. Using this as a lower bound, we would be +multiple possibilities. +For example, imagine a variant of our example +using types with the opposite variance. +In that case, we would have the constraint `'0: 'a` instead of `'a: '0`. +Hence the current value of `'0` would be `{L, 'a}`. +Using this as a lower bound, we would be able to narrow down the member choices to `['a, 'static]` because `'b: -'a` is not known to hold (but `'a: 'a` and `'static: 'a` do hold). We -would not have any upper bounds, so that would be our final set of choices. +'a` is not known to hold (but `'a: 'a` and `'static: 'a` do hold). +We would not have any upper bounds, so that would be our final set of choices. In that case, we apply the **minimal choice** rule -- basically, if -one of our choices if smaller than the others, we can use that. In -this case, we would opt for `'a` (and not `'static`). +one of our choices if smaller than the others, we can use that. +In this case, we would opt for `'a` (and not `'static`). This choice is consistent with the general 'flow' of region propagation, which always aims to compute a minimal value for the -region being inferred. However, it is somewhat arbitrary. +region being inferred. +However, it is somewhat arbitrary. <a id="collecting"></a> ### Collecting upper bounds in the implementation In practice, computing upper bounds is a bit inconvenient, because our -data structures are setup for the opposite. What we do is to compute +data structures are setup for the opposite. +What we do is to compute the **reverse SCC graph** (we do this lazily and cache the result) -- -that is, a graph where `'a: 'b` induces an edge `SCC('b) -> -SCC('a)`. Like the normal SCC graph, this is a DAG. We can then do a -depth-first search starting from `SCC('0)` in this graph. This will -take us to all the SCCs that must outlive `'0`. +that is, a graph where `'a: 'b` induces an edge `SCC('b) -> SCC('a)`. +Like the normal SCC graph, this is a DAG. +We can then do a depth-first search starting from `SCC('0)` in this graph. +This will take us to all the SCCs that must outlive `'0`. One wrinkle is that, as we walk the "upper bound" SCCs, their values -will not yet have been fully computed. However, we **have** already +will not yet have been fully computed. +However, we **have** already applied their liveness constraints, so we have some information about -their value. In particular, for any regions representing lifetime +their value. +In particular, for any regions representing lifetime parameters, their value will contain themselves (i.e., the initial -value for `'a` includes `'a` and the value for `'b` contains `'b`). So -we can collect all of the lifetime parameters that are reachable, +value for `'a` includes `'a` and the value for `'b` contains `'b`). +So we can collect all of the lifetime parameters that are reachable, which is precisely what we are interested in.
diff --git a/src/doc/rustc-dev-guide/src/diagnostics/error-codes.md b/src/doc/rustc-dev-guide/src/diagnostics/error-codes.md index 9a302f9..b5bb44c 100644 --- a/src/doc/rustc-dev-guide/src/diagnostics/error-codes.md +++ b/src/doc/rustc-dev-guide/src/diagnostics/error-codes.md
@@ -10,7 +10,7 @@ The explanations are written in Markdown (see the [CommonMark Spec] for specifics around syntax), and all of them are linked in the [`rustc_error_codes`] crate. Please read [RFC 1567] for details on how to format and write long error codes. -As of <!-- date-check --> February 2023, there is an +As of <!-- date-check --> March 2026, there is an effort[^new-explanations] to replace this largely outdated RFC with a new more flexible standard. Error explanations should expand on the error message and provide details about
diff --git a/src/doc/rustc-dev-guide/src/diagnostics/lintstore.md b/src/doc/rustc-dev-guide/src/diagnostics/lintstore.md index 7b98bc6..cda975a 100644 --- a/src/doc/rustc-dev-guide/src/diagnostics/lintstore.md +++ b/src/doc/rustc-dev-guide/src/diagnostics/lintstore.md
@@ -3,15 +3,14 @@ This page documents some of the machinery around lint registration and how we run lints in the compiler. -The [`LintStore`] is the central piece of infrastructure, around which -everything rotates. The `LintStore` is held as part of the [`Session`], and it +The [`LintStore`] is the central piece of infrastructure, around which everything rotates. +The `LintStore` is held as part of the [`Session`], and it gets populated with the list of lints shortly after the `Session` is created. ## Lints vs. lint passes -There are two parts to the linting mechanism within the compiler: lints and -lint passes. Unfortunately, a lot of the documentation we have refers to both -of these as just "lints." +There are two parts to the linting mechanism within the compiler: lints and lint passes. +Unfortunately, a lot of the documentation we have refers to both of these as just "lints." First, we have the lint declarations themselves, and this is where the name and default lint level and other metadata come from. @@ -21,14 +20,14 @@ as the macro is somewhat unwieldy to add new fields to, like all macros). -As of <!-- date-check --> Aug 2022, we lint against direct declarations without the use of the macro. Lint declarations don't carry any "state" - they are merely global identifiers -and descriptions of lints. We assert at runtime that they are not registered -twice (by lint name). +and descriptions of lints. +We assert at runtime that they are not registered twice (by lint name). -Lint passes are the meat of any lint. Notably, there is not a one-to-one +Lint passes are the meat of any lint. +Notably, there is not a one-to-one relationship between lints and lint passes; a lint might not have any lint pass that emits it, it could have many, or just one -- the compiler doesn't track whether a pass is in any way associated with a particular lint, and frequently @@ -45,36 +44,33 @@ There are three 'sources' of lints: * internal lints: lints only used by the rustc codebase -* builtin lints: lints built into the compiler and not provided by some outside - source -* `rustc_interface::Config`[`register_lints`]: lints passed into the compiler - during construction +* builtin lints: lints built into the compiler and not provided by some outside source +* `rustc_interface::Config`[`register_lints`]: lints passed into the compiler during construction -Lints are registered via the [`LintStore::register_lint`] function. This should -happen just once for any lint, or an ICE will occur. +Lints are registered via the [`LintStore::register_lint`] function. +This should happen just once for any lint, or an ICE will occur. -Once the registration is complete, we "freeze" the lint store by placing it in -an `Arc`. +Once the registration is complete, we "freeze" the lint store by placing it in an `Arc`. Lint passes are registered separately into one of the categories -(pre-expansion, early, late, late module). Passes are registered as a closure +(pre-expansion, early, late, late module). +Passes are registered as a closure -- i.e., `impl Fn() -> Box<dyn X>`, where `dyn X` is either an early or late -lint pass trait object. When we run the lint passes, we run the closure and -then invoke the lint pass methods. The lint pass methods take `&mut self` so -they can keep track of state internally. +lint pass trait object. +When we run the lint passes, we run the closure and then invoke the lint pass methods. +The lint pass methods take `&mut self` so they can keep track of state internally. #### Internal lints -These are lints used just by the compiler or drivers like `clippy`. They can be -found in [`rustc_lint::internal`]. +These are lints used just by the compiler or drivers like `clippy`. +They can be found in [`rustc_lint::internal`]. An example of such a lint is the check that lint passes are implemented using -the `declare_lint_pass!` macro and not by hand. This is accomplished with the -`LINT_PASS_IMPL_WITHOUT_MACRO` lint. +the `declare_lint_pass!` macro and not by hand. +This is accomplished with the `LINT_PASS_IMPL_WITHOUT_MACRO` lint. Registration of these lints happens in the [`rustc_lint::register_internals`] -function which is called when constructing a new lint store inside -[`rustc_lint::new_lint_store`]. +function which is called when constructing a new lint store inside [`rustc_lint::new_lint_store`]. #### Builtin Lints @@ -84,19 +80,18 @@ and the latter provides the lint pass definitions (and implementations), but this is not always true. -The builtin lint registration happens in -the [`rustc_lint::register_builtins`] function. +The builtin lint registration happens in the [`rustc_lint::register_builtins`] function. Just like with internal lints, this happens inside of [`rustc_lint::new_lint_store`]. #### Driver lints These are the lints provided by drivers via the `rustc_interface::Config` -[`register_lints`] field, which is a callback. Drivers should, if finding it -already set, call the function currently set within the callback they add. The -best way for drivers to get access to this is by overriding the -`Callbacks::config` function which gives them direct access to the `Config` -structure. +[`register_lints`] field, which is a callback. +Drivers should, if finding it +already set, call the function currently set within the callback they add. +The best way for drivers to get access to this is by overriding the +`Callbacks::config` function which gives them direct access to the `Config` structure. ## Compiler lint passes are combined into one pass @@ -106,8 +101,8 @@ individual lint passes; this is because then we get the benefits of static over dynamic dispatch for each of the (often empty) trait methods. -Ideally, we'd not have to do this, since it adds to the complexity of -understanding the code. However, with the current type-erased lint store +Ideally, we'd not have to do this, since it adds to the complexity of understanding the code. +However, with the current type-erased lint store approach, it is beneficial to do so for performance reasons. [`LintStore`]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_lint/struct.LintStore.html
diff --git a/src/doc/rustc-dev-guide/src/early-late-parameters.md b/src/doc/rustc-dev-guide/src/early-late-parameters.md index 7651dee..d78d533 100644 --- a/src/doc/rustc-dev-guide/src/early-late-parameters.md +++ b/src/doc/rustc-dev-guide/src/early-late-parameters.md
@@ -11,7 +11,8 @@ ## What does it mean to be "early" bound or "late" bound -Every function definition has a corresponding ZST that implements the `Fn*` traits known as a [function item type][function_item_type]. This part of the chapter will talk a little bit about the "desugaring" of function item types as it is useful context for explaining the difference between early bound and late bound generic parameters. +Every function definition has a corresponding ZST that implements the `Fn*` traits known as a [function item type][function_item_type]. +This part of the chapter will talk a little bit about the "desugaring" of function item types as it is useful context for explaining the difference between early bound and late bound generic parameters. Let's start with a very trivial example involving no generic parameters: @@ -36,7 +37,7 @@ A slightly more complicated example would involve introducing generic parameters to the function: ```rust -fn foo<T: Sized>(a: T) -> T { +fn foo<T: Sized>(a: T) -> T { # a /* snip */ } @@ -51,7 +52,8 @@ } ``` -Note that the function item type `FooFnItem` is generic over some type parameter `T` as defined on the function `foo`. However, not all generic parameters defined on functions are also defined on the function item type as demonstrated here: +Note that the function item type `FooFnItem` is generic over some type parameter `T` as defined on the function `foo`. +However, not all generic parameters defined on functions are also defined on the function item type as demonstrated here: ```rust fn foo<'a, T: Sized>(a: &'a T) -> &'a T { # a @@ -71,12 +73,13 @@ The lifetime parameter `'a` from the function `foo` is not present on the function item type `FooFnItem` and is instead introduced on the builtin impl solely for use in representing the argument types. Generic parameters not all being defined on the function item type means that there are two steps where generic arguments are provided when calling a function. -1. Naming the function (e.g. `let a = foo;`) the arguments for `FooFnItem` are provided. +1. Naming the function (e.g. `let a = foo;`) the arguments for `FooFnItem` are provided. 2. Calling the function (e.g. `a(&10);`) any parameters defined on the builtin impl are provided. -This two-step system is where the early vs late naming scheme comes from, early bound parameters are provided in the *earliest* step (naming the function), whereas late bound parameters are provided in the *latest* step (calling the function). +This two-step system is where the early vs late naming scheme comes from, early bound parameters are provided in the *earliest* step (naming the function), whereas late bound parameters are provided in the *latest* step (calling the function). -Looking at the desugaring from the previous example we can tell that `T` is an early bound type parameter and `'a` is a late bound lifetime parameter as `T` is present on the function item type but `'a` is not. See this example of calling `foo` annotated with where each generic parameter has an argument provided: +Looking at the desugaring from the previous example we can tell that `T` is an early bound type parameter and `'a` is a late bound lifetime parameter as `T` is present on the function item type but `'a` is not. +See this example of calling `foo` annotated with where each generic parameter has an argument provided: ```rust fn foo<'a, T: Sized>(a: &'a T) -> &'a T { # a @@ -96,15 +99,17 @@ ## Differences between early and late bound parameters -### Higher ranked function pointers and trait bounds +### Higher ranked function pointers and trait bounds -A generic parameter being late bound allows for more flexible usage of the function item. For example if we have some function `foo` with an early bound lifetime parameter and some function `bar` with a late bound lifetime parameter `'a` we would have the following builtin `Fn` impls: +A generic parameter being late bound allows for more flexible usage of the function item. +For example, if we have some function `foo` with an early bound lifetime parameter and some function `bar` with a late bound lifetime parameter `'a`, we would have the following builtin `Fn` impls: ```rust,ignore impl<'a> Fn<(&'a String,)> for FooFnItem<'a> { /* ... */ } impl<'a> Fn<(&'a String,)> for BarFnItem { /* ... */ } ``` -The `bar` function has a strictly more flexible signature as the function item type can be called with a borrow with *any* lifetime, whereas the `foo` function item type would only be callable with a borrow with the same lifetime on the function item type. We can show this by simply trying to call `foo`'s function item type multiple times with different lifetimes: +The `bar` function has a strictly more flexible signature as the function item type can be called with a borrow with *any* lifetime, whereas the `foo` function item type would only be callable with a borrow with the same lifetime on the function item type. +We can show this by simply trying to call `foo`'s function item type multiple times with different lifetimes: ```rust // The `'a: 'a` bound forces this lifetime to be early bound. @@ -125,9 +130,12 @@ f(&String::new()); ``` -In this example we call `foo`'s function item type twice, each time with a borrow of a temporary. These two borrows could not possible have lifetimes that overlap as the temporaries are only alive during the function call, not after. The lifetime parameter on `foo` being early bound requires all callers of `f` to provide a borrow with the same lifetime, as this is not possible the borrow checker errors. +In this example, we call `foo`'s function item type twice, each time with a borrow of a temporary. +These two borrows could not possibly have lifetimes that overlap as the temporaries are only alive during the function call, not after. +The lifetime parameter on `foo` being early bound requires all callers of `f` to provide a borrow with the same lifetime, as this is not possible the borrow checker errors. -If the lifetime parameter on `foo` was late bound this would be able to compile as each caller could provide a different lifetime argument for its borrow. See the following example which demonstrates this using the `bar` function defined above: +If the lifetime parameter on `foo` was late bound, this would be able to compile as each caller could provide a different lifetime argument for its borrow. +See the following example, which demonstrates this using the `bar` function defined above: ```rust # fn foo<'a: 'a>(b: &'a String) -> &'a String { b } @@ -143,7 +151,8 @@ b(&String::new()); ``` -This is reflected in the ability to coerce function item types to higher ranked function pointers and prove higher ranked `Fn` trait bounds. We can demonstrate this with the following example: +This is reflected in the ability to coerce function item types to higher ranked function pointers and prove higher ranked `Fn` trait bounds. +We can demonstrate this with the following example: ```rust // The `'a: 'a` bound forces this lifetime to be early bound. fn foo<'a: 'a>(b: &'a String) -> &'a String { b } @@ -163,14 +172,15 @@ fn higher_ranked_fn_ptr() { let bar_fn_item = bar; let fn_ptr: for<'a> fn(&'a String) -> &'a String = bar_fn_item; - + let foo_fn_item = foo::<'_>; // errors let fn_ptr: for<'a> fn(&'a String) -> &'a String = foo_fn_item; } ``` -In both of these cases the borrow checker errors as it does not consider `foo_fn_item` to be callable with a borrow of any lifetime. This is due to the fact that the lifetime parameter on `foo` is early bound, causing `foo_fn_item` to have a type of `FooFnItem<'_>` which (as demonstrated by the desugared `Fn` impl) is only callable with a borrow of the same lifetime `'_`. +In both of these cases, the borrow checker errors as it does not consider `foo_fn_item` to be callable with a borrow of any lifetime. +This is due to the fact that the lifetime parameter on `foo` is early bound, causing `foo_fn_item` to have a type of `FooFnItem<'_>` which (as demonstrated by the desugared `Fn` impl) is only callable with a borrow of the same lifetime `'_`. ### Turbofishing in the presence of late bound parameters @@ -188,15 +198,17 @@ let f /* : FooFnItem<????> */ = foo::<'static>; ``` -The above example errors as the lifetime parameter `'a` is late bound and so cannot be instantiated as part of the "naming a function" step. If we make the lifetime parameter early bound we will see this code start to compile: +The above example errors as the lifetime parameter `'a` is late bound and so cannot be instantiated as part of the "naming a function" step. +If we make the lifetime parameter early bound we will see this code start to compile: ```rust fn foo<'a: 'a>(b: &'a u32) -> &'a u32 { b } let f /* : FooFnItem<'static> */ = foo::<'static>; ``` -What the current implementation of the compiler aims to do is error when specifying lifetime arguments to a function that has both early *and* late bound lifetime parameters. In practice, due to excessive breakage, some cases are actually only future compatibility warnings ([#42868](https://github.com/rust-lang/rust/issues/42868)): -- When the amount of lifetime arguments is the same as the number of early bound lifetime parameters a FCW is emitted instead of an error +What the current implementation of the compiler aims to do is error when specifying lifetime arguments to a function that has both early *and* late bound lifetime parameters. +In practice, due to excessive breakage, some cases are actually only future compatibility warnings ([#42868](https://github.com/rust-lang/rust/issues/42868)): +- When the amount of lifetime arguments is the same as the number of early bound lifetime parameters, a FCW is emitted instead of an error - An error is always downgraded to a FCW when using method call syntax To demonstrate this we can write out the different kinds of functions and give them both a late and early bound lifetime: @@ -287,7 +299,8 @@ free_function::<'static, 'static, 'static>(&(), &()); ``` -Even when specifying enough lifetime arguments for both the late and early bound lifetime parameter, these arguments are not actually used to annotate the lifetime provided to late bound parameters. We can demonstrate this by turbofishing `'static` to a function while providing a non-static borrow: +Even when specifying enough lifetime arguments for both the late and early bound lifetime parameter, these arguments are not actually used to annotate the lifetime provided to late bound parameters. +We can demonstrate this by turbofishing `'static` to a function while providing a non-static borrow: ```rust struct Foo; @@ -302,7 +315,8 @@ ### Liveness of types with late bound parameters -When checking type outlives bounds involving function item types we take into account early bound parameters. For example: +When checking type outlives bounds involving function item types we take into account early bound parameters. +For example: ```rust fn foo<T>(_: T) {} @@ -315,9 +329,11 @@ } ``` -As the type parameter `T` is early bound, the desugaring of the function item type for `foo` would look something like `struct FooFnItem<T>`. Then in order for `FooFnItem<T>: 'static` to hold we must also require `T: 'static` to hold as otherwise we would wind up with soundness bugs. +As the type parameter `T` is early bound, the desugaring of the function item type for `foo` would look something like `struct FooFnItem<T>`. +Then, in order for `FooFnItem<T>: 'static` to hold, we must also require `T: 'static` to hold as otherwise we would wind up with soundness bugs. -Unfortunately, due to bugs in the compiler, we do not take into account early bound *lifetimes*, which is the cause of the open soundness bug [#84366](https://github.com/rust-lang/rust/issues/84366). This means that it's impossible to demonstrate a "difference" between early/late bound parameters for liveness/type outlives bounds as the only kind of generic parameters that are able to be late bound are lifetimes which are handled incorrectly. +Unfortunately, due to bugs in the compiler, we do not take into account early bound *lifetimes*, which is the cause of the open soundness bug [#84366](https://github.com/rust-lang/rust/issues/84366). +This means that it's impossible to demonstrate a "difference" between early/late bound parameters for liveness/type outlives bounds as the only kind of generic parameters that are able to be late bound are lifetimes which are handled incorrectly. Regardless, in theory the code example below *should* demonstrate such a difference once [#84366](https://github.com/rust-lang/rust/issues/84366) is fixed: ```rust @@ -341,17 +357,20 @@ ### Must be a lifetime parameter -Type and Const parameters are not able to be late bound as we do not have a way to support types such as `dyn for<T> Fn(Box<T>)` or `for<T> fn(Box<T>)`. Calling such types requires being able to monomorphize the underlying function which is not possible with indirection through dynamic dispatch. +Type and Const parameters are not able to be late bound as we do not have a way to support types such as `dyn for<T> Fn(Box<T>)` or `for<T> fn(Box<T>)`. +Calling such types requires being able to monomorphize the underlying function which is not possible with indirection through dynamic dispatch. ### Must not be used in a where clause -Currently when a generic parameter is used in a where clause it must be early bound. For example: +Currently when a generic parameter is used in a where clause it must be early bound. +For example: ```rust # trait Trait<'a> {} fn foo<'a, T: Trait<'a>>(_: &'a String, _: T) {} ``` -In this example the lifetime parameter `'a` is considered to be early bound as it appears in the where clause `T: Trait<'a>`. This is true even for "trivial" where clauses such as `'a: 'a` or those implied by wellformedness of function arguments, for example: +In this example the lifetime parameter `'a` is considered to be early bound as it appears in the where clause `T: Trait<'a>`. +This is true even for "trivial" where clauses such as `'a: 'a` or those implied by wellformedness of function arguments, for example: ```rust fn foo<'a: 'a>(_: &'a String) {} fn bar<'a, T: 'a>(_: &'a T) {} @@ -375,9 +394,12 @@ At *some point* during type checking an error should be emitted for this code as `String` does not implement `Trait` for any lifetime. -If the lifetime `'a` were late bound then this becomes difficult to check. When naming `foo` we do not know what lifetime should be used as part of the `T: Trait<'a>` trait bound as it has not yet been instantiated. When coercing the function item type to a function pointer we have no way of tracking the `String: Trait<'a>` trait bound that must be proven when calling the function. +If the lifetime `'a` were late bound then this becomes difficult to check. +When naming `foo` we do not know what lifetime should be used as part of the `T: Trait<'a>` trait bound as it has not yet been instantiated. +When coercing the function item type to a function pointer we have no way of tracking the `String: Trait<'a>` trait bound that must be proven when calling the function. -If the lifetime `'a` is early bound (which it is in the current implementation in rustc), then the trait bound can be checked when naming the function `foo`. Requiring parameters used in where clauses to be early bound gives a natural place to check where clauses defined on the function. +If the lifetime `'a` is early bound (which it is in the current implementation in rustc), then the trait bound can be checked when naming the function `foo`. +Requiring parameters used in where clauses to be early bound gives a natural place to check where clauses defined on the function. Finally, we do not require lifetimes to be early bound if they are used in *implied bounds*, for example: ```rust @@ -388,11 +410,13 @@ f(&String::new()); ``` -This code compiles, demonstrating that the lifetime parameter is late bound, even though `'a` is used in the type `&'a T` which implicitly requires `T: 'a` to hold. Implied bounds can be treated specially as any types introducing implied bounds are in the signature of the function pointer type, which means that when calling the function we know to prove `T: 'a`. +This code compiles, demonstrating that the lifetime parameter is late bound, even though `'a` is used in the type `&'a T` which implicitly requires `T: 'a` to hold. +Implied bounds can be treated specially as any types introducing implied bounds are in the signature of the function pointer type, which means that when calling the function we know to prove `T: 'a`. ### Must be constrained by argument types -It is important that builtin impls on function item types do not wind up with unconstrained generic parameters as this can lead to unsoundness. This is the same kind of restriction as applies to user written impls, for example the following code results in an error: +It is important that builtin impls on function item types do not wind up with unconstrained generic parameters as this can lead to unsoundness. +This is the same kind of restriction as applies to user written impls, for example the following code results in an error: ```rust trait Trait { type Assoc;
diff --git a/src/doc/rustc-dev-guide/src/parallel-rustc.md b/src/doc/rustc-dev-guide/src/parallel-rustc.md index f83aaa6..c4e0663 100644 --- a/src/doc/rustc-dev-guide/src/parallel-rustc.md +++ b/src/doc/rustc-dev-guide/src/parallel-rustc.md
@@ -11,7 +11,8 @@ As of <!-- date-check --> November 2024, most of the rust compiler is now parallelized. -- The codegen part is executed concurrently by default. You can use the `-C +- The codegen part is executed concurrently by default. + You can use the `-C codegen-units=n` option to control the number of concurrent tasks. - The parts after HIR lowering to codegen such as type checking, borrowing checking, and mir optimization are parallelized in the nightly version. @@ -31,17 +32,19 @@ ## Code generation During monomorphization the compiler splits up all the code to -be generated into smaller chunks called _codegen units_. These are then generated by -independent instances of LLVM running in parallel. At the end, the linker -is run to combine all the codegen units together into one binary. This process -occurs in the [`rustc_codegen_ssa::base`] module. +be generated into smaller chunks called _codegen units_. +These are then generated by independent instances of LLVM running in parallel. +At the end, the linker +is run to combine all the codegen units together into one binary. +This process occurs in the [`rustc_codegen_ssa::base`] module. [`rustc_codegen_ssa::base`]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_codegen_ssa/base/index.html ## Data structures The underlying thread-safe data-structures used in the parallel compiler -can be found in the [`rustc_data_structures::sync`] module. These data structures +can be found in the [`rustc_data_structures::sync`] module. +These data structures are implemented differently depending on whether `parallel-compiler` is true. | data structure | parallel | non-parallel | @@ -61,21 +64,25 @@ or the authoring of persistent documentation covering the specific of the invariants, the atomicity, and the lock orderings. -- On the other hand, we still need to figure out what other invariants +- On the other hand, we still need to figure out what other invariants during compilation might not hold in parallel compilation. [`rustc_data_structures::sync`]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_data_structures/sync/index.html ### WorkerLocal -[`WorkerLocal`] is a special data structure implemented for parallel compilers. It -holds worker-locals values for each thread in a thread pool. You can only +[`WorkerLocal`] is a special data structure implemented for parallel compilers. +It holds worker-locals values for each thread in a thread pool. +You can only access the worker local value through the `Deref` `impl` on the thread pool it -was constructed on. It panics otherwise. +was constructed on. +It panics otherwise. `WorkerLocal` is used to implement the `Arena` allocator in the parallel -environment, which is critical in parallel queries. Its implementation is -located in the [`rustc_data_structures::sync::worker_local`] module. However, +environment, which is critical in parallel queries. +Its implementation is +located in the [`rustc_data_structures::sync::worker_local`] module. +However, in the non-parallel compiler, it is implemented as `(OneThread<T>)`, whose `T` can be accessed directly through `Deref::deref`. @@ -85,10 +92,11 @@ ## Parallel iterator The parallel iterators provided by the [`rayon`] crate are easy ways to -implement parallelism. In the current implementation of the parallel compiler -we use a custom [fork][rustc-rayon] of `rayon` to run tasks in parallel. +implement parallelism. +In the current implementation of the parallel compiler, +we use [a custom fork of `rayon`][rustc-rayon] to run tasks in parallel. -Some iterator functions are implemented to run loops in parallel +Some iterator functions are implemented to run loops in parallel when `parallel-compiler` is true. | Function(Omit `Send` and `Sync`) | Introduction | Owning Module | @@ -142,15 +150,17 @@ start evaluating. - If there *is* another query invocation for the same key in progress, we release the lock, and just block the thread until the other invocation has - computed the result we are waiting for. **Cycle error detection** in the parallel - compiler requires more complex logic than in single-threaded mode. When - worker threads in parallel queries stop making progress due to interdependence, - the compiler uses an extra thread *(named deadlock handler)* to detect, remove and + computed the result we are waiting for. + **Cycle error detection** in the parallel + compiler requires more complex logic than in single-threaded mode. + When + worker threads in parallel queries stop making progress due to interdependence, + the compiler uses an extra thread *(named deadlock handler)* to detect, remove and report the cycle error. The parallel query feature still has implementation to do, most of which is -related to the previous `Data Structures` and `Parallel Iterators`. See [this -open feature tracking issue][tracking]. +related to the previous `Data Structures` and `Parallel Iterators`. +See [this open feature tracking issue][tracking]. ## Rustdoc
diff --git a/src/doc/rustc-dev-guide/src/solve/candidate-preference.md b/src/doc/rustc-dev-guide/src/solve/candidate-preference.md index 4615234..7fc3926 100644 --- a/src/doc/rustc-dev-guide/src/solve/candidate-preference.md +++ b/src/doc/rustc-dev-guide/src/solve/candidate-preference.md
@@ -1,16 +1,23 @@ # Candidate preference -There are multiple ways to prove `Trait` and `NormalizesTo` goals. Each such option is called a [`Candidate`]. If there are multiple applicable candidates, we prefer some candidates over others. We store the relevant information in their [`CandidateSource`]. +There are multiple ways to prove `Trait` and `NormalizesTo` goals. +Each such option is called a [`Candidate`]. +If there are multiple applicable candidates, we prefer some candidates over others. +We store the relevant information in their [`CandidateSource`]. -This preference may result in incorrect inference or region constraints and would therefore be unsound during coherence. Because of this, we simply try to merge all candidates in coherence. +This preference may result in incorrect inference or region constraints and would therefore be unsound during coherence. +Because of this, we simply try to merge all candidates in coherence. ## `Trait` goals -Trait goals merge their applicable candidates in [`fn merge_trait_candidates`]. This document provides additional details and references to explain *why* we've got the current preference rules. +Trait goals merge their applicable candidates in [`fn merge_trait_candidates`]. +This document provides additional details and references to explain *why* we've got the current preference rules. ### `CandidateSource::BuiltinImpl(BuiltinImplSource::Trivial))` -Trivial builtin impls are builtin impls which are known to be always applicable for well-formed types. This means that if one exists, using another candidate should never have fewer constraints. We currently only consider `Sized` - and `MetaSized` - impls to be trivial. +Trivial builtin impls are builtin impls which are known to be always applicable for well-formed types. +This means that if one exists, using another candidate should never have fewer constraints. +We currently only consider `Sized` - and `MetaSized` - impls to be trivial. This is necessary to prevent a lifetime error for the following pattern @@ -25,7 +32,7 @@ { // Elaborating the `&'a str: Trait<T>` where-bound results in a // `&'a str: Sized` where-bound. We do not want to prefer this - // over the builtin impl. + // over the builtin impl. is_sized(x); } ``` @@ -50,7 +57,8 @@ ### `CandidateSource::ParamEnv` Once there's at least one *non-global* `ParamEnv` candidate, we prefer *all* `ParamEnv` candidates over other candidate kinds. -A where-bound is global if it is not higher-ranked and doesn't contain any generic parameters. It may contain `'static`. +A where-bound is global if it is not higher-ranked and doesn't contain any generic parameters. +It may contain `'static`. We try to apply where-bounds over other candidates as users tends to have the most control over them, so they can most easily adjust them in case our candidate preference is incorrect. @@ -68,7 +76,8 @@ } ``` -We also need this as shadowed impls can result in currently ambiguous solver cycles: [trait-system-refactor-initiative#76]. Without preference we'd be forced to fail with ambiguity +We also need this as shadowed impls can result in currently ambiguous solver cycles: [trait-system-refactor-initiative#76]. +Without preference, we'd be forced to fail with ambiguity errors if the where-bound results in region constraints to avoid incompleteness. ```rust trait Super { @@ -89,13 +98,15 @@ fn overflow<T: Trait>() { // We can use the elaborated `Super<SuperAssoc = Self::TraitAssoc>` where-bound // to prove the where-bound of the `T: Trait` implementation. This currently results in - // overflow. + // overflow. let x: <T as Trait>::TraitAssoc; } ``` -This preference causes a lot of issues. See [#24066]. Most of the -issues are caused by preferring where-bounds over impls even if the where-bound guides type inference: +This preference causes a lot of issues. +See [#24066]. +Most of the +issues are caused by preferring where-bounds over impls even, if the where-bound guides type inference: ```rust trait Trait<T> { fn call_me(&self, x: T) {} @@ -167,7 +178,10 @@ #### Why no preference for global where-bounds -Global where-bounds are either fully implied by an impl or unsatisfiable. If they are unsatisfiable, we don't really care what happens. If a where-bound is fully implied then using the impl to prove the trait goal cannot result in additional constraints. For trait goals this is only useful for where-bounds which use `'static`: +Global where-bounds are either fully implied by an impl or unsatisfiable. +If they are unsatisfiable, we don't really care what happens. +If a where-bound is fully implied, then using the impl to prove the trait goal cannot result in additional constraints. +For trait goals, this is only useful for where-bounds which use `'static`: ```rust trait A { @@ -181,13 +195,15 @@ x.test(); } ``` -More importantly, by using impls here we prevent global where-bounds from shadowing impls when normalizing associated types. There are no known issues from preferring impls over global where-bounds. +More importantly, by using impls here, we prevent global where-bounds from shadowing impls when normalizing associated types. +There are no known issues from preferring impls over global where-bounds. #### Why still consider global where-bounds Given that we just use impls even if there exists a global where-bounds, you may ask why we don't just ignore these global where-bounds entirely: we use them to weaken the inference guidance from non-global where-bounds. -Without a global where-bound, we currently prefer non-global where bounds even though there would be an applicable impl as well. By adding a non-global where-bound, this unnecessary inference guidance is disabled, allowing the following to compile: +Without a global where-bound, we currently prefer non-global where bounds even though there would be an applicable impl as well. +By adding a non-global where-bound, this unnecessary inference guidance is disabled, allowing the following to compile: ```rust fn check<Color>(color: Color) where @@ -209,7 +225,9 @@ ### `CandidateSource::AliasBound` -We prefer alias-bound candidates over impls. We currently use this preference to guide type inference, causing the following to compile. I personally don't think this preference is desirable 🤷 +We prefer alias-bound candidates over impls. +We currently use this preference to guide type inference, causing the following to compile. +I personally don't think this preference is desirable 🤷 ```rust pub trait Dyn { type Word: Into<u64>; @@ -254,7 +272,9 @@ ### `CandidateSource::BuiltinImpl(BuiltinImplSource::Object(_))` -We prefer builtin trait object impls over user-written impls. This is **unsound** and should be remoed in the future. See [#57893](https://github.com/rust-lang/rust/issues/57893) and [#141347](https://github.com/rust-lang/rust/pull/141347) for more details. +We prefer builtin trait object impls over user-written impls. +This is **unsound** and should be remoed in the future. +See [#57893] and [#141347] for more details. ## `NormalizesTo` goals @@ -336,7 +356,7 @@ #### We prefer "orphaned" where-bounds We add "orphaned" `Projection` clauses into the `ParamEnv` when normalizing item bounds of GATs and RPITIT in `fn check_type_bounds`. -We need to prefer these `ParamEnv` candidates over impls and other where-bounds. +We need to prefer these `ParamEnv` candidates over impls and other where-bounds. ```rust #![feature(associated_type_defaults)] trait Foo { @@ -355,7 +375,8 @@ #### We prefer global where-bounds over impls -This is necessary for the following to compile. I don't know whether anything relies on it in practice 🤷 +This is necessary for the following to compile. +I don't know whether anything relies on it in practice 🤷 ```rust trait Id { type This; @@ -423,7 +444,8 @@ #### RPITIT `type_of` cycles -We currently have to avoid impl candidates if there are where-bounds to avoid query cycles for RPITIT, see [#139762]. It feels desirable to me to stop relying on auto-trait leakage of during RPITIT computation to remove this issue, see [#139788]. +We currently have to avoid impl candidates if there are where-bounds to avoid query cycles for RPITIT, see [#139762]. +It feels desirable to me to stop relying on auto-trait leakage of during RPITIT computation to remove this issue, see [#139788]. ```rust use std::future::Future; @@ -454,6 +476,31 @@ } ``` +<!-- date-check: Mar 2026 --> +#### Trait definition cannot use associated types from always applicable impls + +The `T: Trait` assumption in the trait definition prevents it from normalizing +`<Self as Trait>::Assoc` to `T` by using the blanket impl. +This feels like a somewhat desirable constraint, if not incredibly so. + +```rust +trait Eq<T> {} +impl<T> Eq<T> for T {} +struct IsEqual<T: Eq<U>, U>(T, U); + +trait Trait: Sized { + type Assoc; + fn foo() -> IsEqual<Self, Self::Assoc> { + //~^ ERROR the trait bound `Self: Eq<<Self as Trait>::Assoc>` is not satisfied + todo!() + } +} + +impl<T> Trait for T { + type Assoc = T; +} +``` + [`Candidate`]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_next_trait_solver/solve/assembly/struct.Candidate.html [`CandidateSource`]: https://doc.rust-lang.org/nightly/nightly-rustc/rustc_next_trait_solver/solve/enum.CandidateSource.html [`fn merge_trait_candidates`]: https://github.com/rust-lang/rust/blob/e3ee7f7aea5b45af3b42b5e4713da43876a65ac9/compiler/rustc_next_trait_solver/src/solve/trait_goals.rs#L1342-L1424 @@ -462,4 +509,6 @@ [#24066]: https://github.com/rust-lang/rust/issues/24066 [#133044]: https://github.com/rust-lang/rust/issues/133044 [#139762]: https://github.com/rust-lang/rust/pull/139762 -[#139788]: https://github.com/rust-lang/rust/issues/139788 \ No newline at end of file +[#139788]: https://github.com/rust-lang/rust/issues/139788 +[#57893]: https://github.com/rust-lang/rust/issues/57893 +[#141347]: https://github.com/rust-lang/rust/pull/141347
diff --git a/src/doc/rustc-dev-guide/src/tests/ci.md b/src/doc/rustc-dev-guide/src/tests/ci.md index 5161194..5581b7e 100644 --- a/src/doc/rustc-dev-guide/src/tests/ci.md +++ b/src/doc/rustc-dev-guide/src/tests/ci.md
@@ -109,11 +109,13 @@ At any given time, at most a single `auto` build is being executed. Find out more in [Merging PRs serially with bors](#merging-prs-serially-with-bors). -Normally, when an auto job fails, the whole CI workflow immediately ends. However, it can be useful to +Normally, when an auto job fails, the whole CI workflow immediately ends. +However, it can be useful to create auto jobs that are "non-blocking", or optional, to test them on CI for some time before blocking -merges on them. This can be useful if those jobs can be flaky. +merges on them. +This can be useful if those jobs can be flaky. -To do that, prefix such a job with `optional-`, and set `continue_on_error: true` for it in [`jobs.yml`]. +To do that, prefix such a job with `optional-`, and set `continue_on_error: true` for it in [`jobs.yml`]. [platform tiers]: https://forge.rust-lang.org/release/platform-support.html#rust-platform-support [auto]: https://github.com/rust-lang/rust/tree/automation/bors/auto
diff --git a/src/librustdoc/clean/mod.rs b/src/librustdoc/clean/mod.rs index 13a9c78..f543394 100644 --- a/src/librustdoc/clean/mod.rs +++ b/src/librustdoc/clean/mod.rs
@@ -2776,7 +2776,12 @@ fn get_name( // These kinds of item either don't need a `name` or accept a `None` one so we handle them // before. match item.kind { - ItemKind::Impl(ref impl_) => return clean_impl(impl_, item.owner_id.def_id, cx), + ItemKind::Impl(ref impl_) => { + // If `renamed` is `Some()` for an `impl`, it means it's been inlined because we use + // it as a marker to indicate that this is an inlined impl and that we should + // generate an impl placeholder and not a "real" impl item. + return clean_impl(impl_, item.owner_id.def_id, cx, renamed.is_some()); + } ItemKind::Use(path, kind) => { return clean_use_statement( item, @@ -2909,10 +2914,27 @@ fn clean_impl<'tcx>( impl_: &hir::Impl<'tcx>, def_id: LocalDefId, cx: &mut DocContext<'tcx>, + // If true, this is an inlined impl and it will be handled later on in the code. + // In here, we will generate a placeholder for it in order to be able to compute its + // `doc_cfg` info. + is_inlined: bool, ) -> Vec<Item> { let tcx = cx.tcx; let mut ret = Vec::new(); - let trait_ = impl_.of_trait.map(|t| clean_trait_ref(&t.trait_ref, cx)); + let trait_ = match impl_.of_trait { + Some(t) => { + if is_inlined { + return vec![Item::from_def_id_and_parts( + def_id.to_def_id(), + None, + PlaceholderImplItem, + cx, + )]; + } + Some(clean_trait_ref(&t.trait_ref, cx)) + } + None => None, + }; let items = impl_ .items .iter()
diff --git a/src/librustdoc/clean/types.rs b/src/librustdoc/clean/types.rs index 431e9ff..ad70fc1 100644 --- a/src/librustdoc/clean/types.rs +++ b/src/librustdoc/clean/types.rs
@@ -885,6 +885,9 @@ pub(crate) enum ItemKind { TraitItem(Box<Trait>), TraitAliasItem(TraitAlias), ImplItem(Box<Impl>), + /// This variant is used only as a placeholder for trait impls in order to correctly compute + /// `doc_cfg` as trait impls are added to `clean::Crate` after we went through the whole tree. + PlaceholderImplItem, /// A required method in a trait declaration meaning it's only a function signature. RequiredMethodItem(Box<Function>, Defaultness), /// A method in a trait impl or a provided method in a trait declaration. @@ -964,7 +967,8 @@ pub(crate) fn inner_items(&self) -> impl Iterator<Item = &Item> { | AssocTypeItem(..) | StrippedItem(_) | KeywordItem - | AttributeItem => [].iter(), + | AttributeItem + | PlaceholderImplItem => [].iter(), } } }
diff --git a/src/librustdoc/fold.rs b/src/librustdoc/fold.rs index c970fdb..8b9db46 100644 --- a/src/librustdoc/fold.rs +++ b/src/librustdoc/fold.rs
@@ -97,7 +97,8 @@ fn fold_inner_recur(&mut self, kind: ItemKind) -> ItemKind { | RequiredAssocTypeItem(..) | AssocTypeItem(..) | KeywordItem - | AttributeItem => kind, + | AttributeItem + | PlaceholderImplItem => kind, } }
diff --git a/src/librustdoc/formats/cache.rs b/src/librustdoc/formats/cache.rs index 5a97d8e..35071f4 100644 --- a/src/librustdoc/formats/cache.rs +++ b/src/librustdoc/formats/cache.rs
@@ -389,6 +389,8 @@ fn is_from_private_dep(tcx: TyCtxt<'_>, cache: &Cache, def_id: DefId) -> bool { // So would rather leave them to an expert, // as at least the list is better than `_ => {}`. } + + clean::PlaceholderImplItem => return None, } // Maintain the parent stack.
diff --git a/src/librustdoc/formats/item_type.rs b/src/librustdoc/formats/item_type.rs index 6830c1e..eb3492e 100644 --- a/src/librustdoc/formats/item_type.rs +++ b/src/librustdoc/formats/item_type.rs
@@ -122,7 +122,7 @@ fn from(item: &'a clean::Item) -> ItemType { clean::StaticItem(..) => ItemType::Static, clean::ConstantItem(..) => ItemType::Constant, clean::TraitItem(..) => ItemType::Trait, - clean::ImplItem(..) => ItemType::Impl, + clean::ImplItem(..) | clean::PlaceholderImplItem => ItemType::Impl, clean::RequiredMethodItem(..) => ItemType::TyMethod, clean::MethodItem(..) => ItemType::Method, clean::StructFieldItem(..) => ItemType::StructField,
diff --git a/src/librustdoc/json/conversions.rs b/src/librustdoc/json/conversions.rs index bc9ad16..5d1f477 100644 --- a/src/librustdoc/json/conversions.rs +++ b/src/librustdoc/json/conversions.rs
@@ -353,6 +353,8 @@ fn from_clean_item(item: &clean::Item, renderer: &JsonRenderer<'_>) -> ItemEnum name: name.as_ref().unwrap().to_string(), rename: src.map(|x| x.to_string()), }, + // All placeholder impl items should have been removed in the stripper passes. + PlaceholderImplItem => unreachable!(), } }
diff --git a/src/librustdoc/passes/calculate_doc_coverage.rs b/src/librustdoc/passes/calculate_doc_coverage.rs index 77b3a2e..ac5e780 100644 --- a/src/librustdoc/passes/calculate_doc_coverage.rs +++ b/src/librustdoc/passes/calculate_doc_coverage.rs
@@ -203,6 +203,10 @@ fn visit_item(&mut self, i: &clean::Item) { // don't count items in stripped modules return; } + clean::PlaceholderImplItem => { + // The "real" impl items are handled below. + return; + } // docs on `use` and `extern crate` statements are not displayed, so they're not // worth counting clean::ImportItem(..) | clean::ExternCrateItem { .. } => {}
diff --git a/src/librustdoc/passes/check_doc_test_visibility.rs b/src/librustdoc/passes/check_doc_test_visibility.rs index a1578ab..ff7535f 100644 --- a/src/librustdoc/passes/check_doc_test_visibility.rs +++ b/src/librustdoc/passes/check_doc_test_visibility.rs
@@ -80,6 +80,7 @@ pub(crate) fn should_have_doc_example(cx: &DocContext<'_>, item: &clean::Item) - | clean::ImplAssocConstItem(..) | clean::RequiredAssocTypeItem(..) | clean::ImplItem(_) + | clean::PlaceholderImplItem ) { return false;
diff --git a/src/librustdoc/passes/propagate_doc_cfg.rs b/src/librustdoc/passes/propagate_doc_cfg.rs index 54da158..f73db25 100644 --- a/src/librustdoc/passes/propagate_doc_cfg.rs +++ b/src/librustdoc/passes/propagate_doc_cfg.rs
@@ -1,10 +1,11 @@ //! Propagates [`#[doc(cfg(...))]`](https://github.com/rust-lang/rust/issues/43781) to child items. +use rustc_data_structures::fx::FxHashMap; use rustc_hir::Attribute; use rustc_hir::attrs::{AttributeKind, DocAttribute}; use crate::clean::inline::{load_attrs, merge_attrs}; -use crate::clean::{CfgInfo, Crate, Item, ItemKind}; +use crate::clean::{CfgInfo, Crate, Item, ItemId, ItemKind}; use crate::core::DocContext; use crate::fold::DocFolder; use crate::passes::Pass; @@ -17,7 +18,8 @@ pub(crate) fn propagate_doc_cfg(cr: Crate, cx: &mut DocContext<'_>) -> Crate { if cx.tcx.features().doc_cfg() { - CfgPropagator { cx, cfg_info: CfgInfo::default() }.fold_crate(cr) + CfgPropagator { cx, cfg_info: CfgInfo::default(), impl_cfg_info: FxHashMap::default() } + .fold_crate(cr) } else { cr } @@ -26,6 +28,10 @@ pub(crate) fn propagate_doc_cfg(cr: Crate, cx: &mut DocContext<'_>) -> Crate { struct CfgPropagator<'a, 'tcx> { cx: &'a mut DocContext<'tcx>, cfg_info: CfgInfo, + + /// To ensure the `doc_cfg` feature works with how `rustdoc` handles impls, we need to store + /// the `cfg` info of `impl`s placeholder to use them later on the "real" impl item. + impl_cfg_info: FxHashMap<ItemId, CfgInfo>, } /// This function goes through the attributes list (`new_attrs`) and extract the `cfg` tokens from @@ -78,7 +84,22 @@ impl DocFolder for CfgPropagator<'_, '_> { fn fold_item(&mut self, mut item: Item) -> Option<Item> { let old_cfg_info = self.cfg_info.clone(); - self.merge_with_parent_attributes(&mut item); + // If we have an impl, we check if it has an associated `cfg` "context", and if so we will + // use that context instead of the actual (wrong) one. + if let ItemKind::ImplItem(_) = item.kind + && let Some(cfg_info) = self.impl_cfg_info.remove(&item.item_id) + { + self.cfg_info = cfg_info; + } + + if let ItemKind::PlaceholderImplItem = item.kind { + // If we have a placeholder impl, we store the current `cfg` "context" to be used + // on the actual impl later on (the impls are generated after we go through the whole + // AST so they're stored in the `krate` object at the end). + self.impl_cfg_info.insert(item.item_id, self.cfg_info.clone()); + } else { + self.merge_with_parent_attributes(&mut item); + } let result = self.fold_item_recur(item); self.cfg_info = old_cfg_info;
diff --git a/src/librustdoc/passes/propagate_stability.rs b/src/librustdoc/passes/propagate_stability.rs index 5139ca3..c8691fd 100644 --- a/src/librustdoc/passes/propagate_stability.rs +++ b/src/librustdoc/passes/propagate_stability.rs
@@ -107,7 +107,8 @@ fn fold_item(&mut self, mut item: Item) -> Option<Item> { | ItemKind::AssocTypeItem(..) | ItemKind::PrimitiveItem(..) | ItemKind::KeywordItem - | ItemKind::AttributeItem => own_stability, + | ItemKind::AttributeItem + | ItemKind::PlaceholderImplItem => own_stability, ItemKind::StrippedItem(..) => unreachable!(), }
diff --git a/src/librustdoc/passes/stripper.rs b/src/librustdoc/passes/stripper.rs index 99d2252..bf4e842 100644 --- a/src/librustdoc/passes/stripper.rs +++ b/src/librustdoc/passes/stripper.rs
@@ -120,6 +120,10 @@ fn fold_item(&mut self, i: Item) -> Option<Item> { clean::ImplItem(..) => {} + // Since the `doc_cfg` propagation was handled before the current pass, we can (and + // should) remove all placeholder impl items. + clean::PlaceholderImplItem => return None, + // tymethods etc. have no control over privacy clean::RequiredMethodItem(..) | clean::RequiredAssocConstItem(..)
diff --git a/src/librustdoc/scrape_examples.rs b/src/librustdoc/scrape_examples.rs index cc78dec..63b869c 100644 --- a/src/librustdoc/scrape_examples.rs +++ b/src/librustdoc/scrape_examples.rs
@@ -3,7 +3,7 @@ use std::fs; use std::path::PathBuf; -use rustc_data_structures::fx::FxIndexMap; +use rustc_data_structures::fx::{FxHashSet, FxIndexMap}; use rustc_errors::DiagCtxtHandle; use rustc_hir as hir; use rustc_hir::intravisit::{self, Visitor}; @@ -15,7 +15,7 @@ use rustc_session::getopts; use rustc_span::def_id::{CrateNum, DefPathHash, LOCAL_CRATE}; use rustc_span::edition::Edition; -use rustc_span::{BytePos, FileName, SourceFile}; +use rustc_span::{BytePos, FileName, SourceFile, Span}; use tracing::{debug, trace, warn}; use crate::html::render::Context; @@ -114,6 +114,7 @@ struct FindCalls<'a, 'tcx> { target_crates: Vec<CrateNum>, calls: &'a mut AllCallLocations, bin_crate: bool, + call_ident_spans: FxHashSet<Span>, } impl<'a, 'tcx> Visitor<'tcx> for FindCalls<'a, 'tcx> @@ -165,6 +166,10 @@ fn visit_expr(&mut self, ex: &'tcx hir::Expr<'tcx>) { } }; + if !self.call_ident_spans.insert(ident_span) { + return; + } + // If this span comes from a macro expansion, then the source code may not actually show // a use of the given item, so it would be a poor example. Hence, we skip all uses in // macros. @@ -300,7 +305,13 @@ pub(crate) fn run( // Run call-finder on all items let mut calls = FxIndexMap::default(); - let mut finder = FindCalls { calls: &mut calls, cx, target_crates, bin_crate }; + let mut finder = FindCalls { + calls: &mut calls, + cx, + target_crates, + bin_crate, + call_ident_spans: FxHashSet::default(), + }; tcx.hir_visit_all_item_likes_in_crate(&mut finder); // The visitor might have found a type error, which we need to
diff --git a/src/librustdoc/visit.rs b/src/librustdoc/visit.rs index 9f6bf00..9cf7d6b 100644 --- a/src/librustdoc/visit.rs +++ b/src/librustdoc/visit.rs
@@ -50,7 +50,8 @@ fn visit_inner_recur(&mut self, kind: &'a ItemKind) { | RequiredAssocTypeItem(..) | AssocTypeItem(..) | KeywordItem - | AttributeItem => {} + | AttributeItem + | PlaceholderImplItem => {} } }
diff --git a/src/librustdoc/visit_ast.rs b/src/librustdoc/visit_ast.rs index fd6ea21..906289b 100644 --- a/src/librustdoc/visit_ast.rs +++ b/src/librustdoc/visit_ast.rs
@@ -373,6 +373,19 @@ fn reexport_public_and_not_hidden( } #[inline] + fn add_impl_to_current_mod(&mut self, item: &'tcx hir::Item<'_>, impl_: hir::Impl<'_>) { + self.add_to_current_mod( + item, + // The symbol here is used as a "sentinel" value and has no meaning in + // itself. It just tells that this is an inlined impl and that it should not + // be cleaned as a normal `ImplItem` but instead as a `PlaceholderImplItem`. + // It's to ensure that `doc_cfg` inheritance works as expected. + if impl_.of_trait.is_none() { None } else { Some(rustc_span::symbol::kw::Impl) }, + None, + ); + } + + #[inline] fn add_to_current_mod( &mut self, item: &'tcx hir::Item<'_>, @@ -426,12 +439,8 @@ fn visit_item_inner( // } // Bar::bar(); // ``` - if let hir::ItemKind::Impl(impl_) = item.kind && - // Don't duplicate impls when inlining or if it's implementing a trait, we'll pick - // them up regardless of where they're located. - impl_.of_trait.is_none() - { - self.add_to_current_mod(item, None, None); + if let hir::ItemKind::Impl(impl_) = item.kind { + self.add_impl_to_current_mod(item, impl_); } return; } @@ -530,10 +539,10 @@ fn visit_item_inner( } } hir::ItemKind::Impl(impl_) => { - // Don't duplicate impls when inlining or if it's implementing a trait, we'll pick + // Don't duplicate impls when inlining, we'll pick // them up regardless of where they're located. - if !self.inlining && impl_.of_trait.is_none() { - self.add_to_current_mod(item, None, None); + if !self.inlining { + self.add_impl_to_current_mod(item, impl_); } } }
diff --git a/src/tools/enzyme b/src/tools/enzyme index 0b86a67..3244024 160000 --- a/src/tools/enzyme +++ b/src/tools/enzyme
@@ -1 +1 @@ -Subproject commit 0b86a6759e5f250d6691a94a4a779a44d846e25b +Subproject commit 324402444ac48874d8ebd3ac767330bdc7cb1c06
diff --git a/tests/run-make-cargo/rustdoc-scrape-examples-duplicated-calls/Cargo.toml b/tests/run-make-cargo/rustdoc-scrape-examples-duplicated-calls/Cargo.toml new file mode 100644 index 0000000..4563b63 --- /dev/null +++ b/tests/run-make-cargo/rustdoc-scrape-examples-duplicated-calls/Cargo.toml
@@ -0,0 +1,10 @@ +[workspace] + +[package] +edition = "2024" +name = "tester" +version = "0.1.0" + +[[example]] +doc-scrape-examples = true +name = "window"
diff --git a/tests/run-make-cargo/rustdoc-scrape-examples-duplicated-calls/examples/window.rs b/tests/run-make-cargo/rustdoc-scrape-examples-duplicated-calls/examples/window.rs new file mode 100644 index 0000000..19d9ad8 --- /dev/null +++ b/tests/run-make-cargo/rustdoc-scrape-examples-duplicated-calls/examples/window.rs
@@ -0,0 +1,24 @@ +#![allow(dead_code)] +use tester::Window; + +macro_rules! info { + ($s:literal, $x:expr) => {{ + let _ = $x; + }}; +} + +struct WindowState { + window: Window, +} + +impl WindowState { + fn takes_ref(&self) { + info!("{:?}", self.window.id()); + } + + fn takes_mut(&mut self) { + info!("{:?}", self.window.id()); + } +} + +fn main() {}
diff --git a/tests/run-make-cargo/rustdoc-scrape-examples-duplicated-calls/rmake.rs b/tests/run-make-cargo/rustdoc-scrape-examples-duplicated-calls/rmake.rs new file mode 100644 index 0000000..67a328f --- /dev/null +++ b/tests/run-make-cargo/rustdoc-scrape-examples-duplicated-calls/rmake.rs
@@ -0,0 +1,11 @@ +//! This test ensures that the call locations are not duplicated when generating scraped examples. +//! To ensure that, we check that this call doesn't fail. +//! Regression test for <https://github.com/rust-lang/rust/issues/153837>. + +use run_make_support::{cargo, htmldocck}; + +fn main() { + cargo().args(["rustdoc", "-Zunstable-options", "-Zrustdoc-scrape-examples"]).run(); + + htmldocck().arg("target/doc").arg("src/lib.rs").run(); +}
diff --git a/tests/run-make-cargo/rustdoc-scrape-examples-duplicated-calls/src/lib.rs b/tests/run-make-cargo/rustdoc-scrape-examples-duplicated-calls/src/lib.rs new file mode 100644 index 0000000..8c86a37 --- /dev/null +++ b/tests/run-make-cargo/rustdoc-scrape-examples-duplicated-calls/src/lib.rs
@@ -0,0 +1,13 @@ +//@has tester/struct.Window.html +//@count - '//*[@class="docblock scraped-example-list"]//span[@class="highlight"]' 1 +//@has - '//*[@class="docblock scraped-example-list"]//span[@class="highlight"]' 'id' +//@count - '//*[@class="docblock scraped-example-list"]//span[@class="highlight focus"]' 1 +//@has - '//*[@class="docblock scraped-example-list"]//span[@class="highlight focus"]' 'id' + +pub struct Window {} + +impl Window { + pub fn id(&self) -> u64 { + todo!() + } +}
diff --git a/tests/rustdoc-html/doc-cfg/trait-impls-manual.rs b/tests/rustdoc-html/doc-cfg/trait-impls-manual.rs new file mode 100644 index 0000000..fbd96cc --- /dev/null +++ b/tests/rustdoc-html/doc-cfg/trait-impls-manual.rs
@@ -0,0 +1,92 @@ +// This test ensures that `doc_cfg` feature is working as expected on trait impls. +// Regression test for <https://github.com/rust-lang/rust/issues/153655>. + +#![feature(doc_cfg)] +#![doc(auto_cfg(hide( + target_pointer_width = "64", +)))] + +#![crate_name = "foo"] + +pub trait Trait { + fn f(&self) {} +} + +pub trait Bob { + fn bob(&self) {} +} + +pub trait Foo { + fn foo(&self) {} +} + +pub struct X; + +//@has 'foo/struct.X.html' +//@count - '//*[@id="impl-Bob-for-X"]' 1 +//@count - '//*[@id="impl-Bob-for-X"]/*[@class="item-info"]' 0 +//@count - '//*[@id="impl-Trait-for-X"]' 1 +//@count - '//*[@id="impl-Trait-for-X"]/*[@class="item-info"]' 0 + +// If you need to update this XPath, in particular `item-info`, update all +// the others in this file. +//@count - '//*[@id="impl-Foo-for-X"]/*[@class="item-info"]' 1 + +//@has 'foo/trait.Trait.html' +//@count - '//*[@id="impl-Trait-for-X"]' 1 +//@count - '//*[@id="impl-Trait-for-X"]/*[@class="item-info"]' 0 +#[doc(cfg(any(target_pointer_width = "64", target_arch = "wasm32")))] +#[doc(auto_cfg(hide(target_arch = "wasm32")))] +mod imp { + impl super::Trait for super::X { fn f(&self) {} } +} + +//@has 'foo/trait.Bob.html' +//@count - '//*[@id="impl-Bob-for-X"]' 1 +//@count - '//*[@id="impl-Bob-for-X"]/*[@class="item-info"]' 0 +#[doc(cfg(any(target_pointer_width = "64", target_arch = "wasm32")))] +#[doc(auto_cfg = false)] +mod imp2 { + impl super::Bob for super::X { fn bob(&self) {} } +} + +//@has 'foo/trait.Foo.html' +//@count - '//*[@id="impl-Foo-for-X"]/*[@class="item-info"]' 1 +// We use this to force xpath tests to be updated if `item-info` class is changed. +#[doc(cfg(any(target_pointer_width = "64", target_arch = "wasm32")))] +mod imp3 { + impl super::Foo for super::X { fn foo(&self) {} } +} + +pub struct Y; + +//@has 'foo/struct.Y.html' +//@count - '//*[@id="implementations-list"]/*[@class="impl-items"]' 1 +//@count - '//*[@id="implementations-list"]/*[@class="impl-items"]/*[@class="item-info"]' 0 +#[doc(cfg(any(target_pointer_width = "64", target_arch = "wasm32")))] +#[doc(auto_cfg(hide(target_arch = "wasm32")))] +mod imp4 { + impl super::Y { pub fn plain_auto() {} } +} + +pub struct Z; + +//@has 'foo/struct.Z.html' +//@count - '//*[@id="implementations-list"]/*[@class="impl-items"]' 1 +//@count - '//*[@id="implementations-list"]/*[@class="impl-items"]/*[@class="item-info"]' 0 +#[doc(cfg(any(target_pointer_width = "64", target_arch = "wasm32")))] +#[doc(auto_cfg = false)] +mod imp5 { + impl super::Z { pub fn plain_auto() {} } +} + +// The "witness" which has the item info. +pub struct W; + +//@has 'foo/struct.W.html' +//@count - '//*[@id="implementations-list"]/*[@class="impl-items"]' 1 +//@count - '//*[@id="implementations-list"]/*[@class="impl-items"]/*[@class="item-info"]' 1 +#[doc(cfg(any(target_pointer_width = "64", target_arch = "wasm32")))] +mod imp6 { + impl super::W { pub fn plain_auto() {} } +}
diff --git a/tests/rustdoc-html/doc-cfg/trait-impls.rs b/tests/rustdoc-html/doc-cfg/trait-impls.rs new file mode 100644 index 0000000..581d171 --- /dev/null +++ b/tests/rustdoc-html/doc-cfg/trait-impls.rs
@@ -0,0 +1,92 @@ +// This test ensures that `doc_cfg` feature is working as expected on trait impls. +// Regression test for <https://github.com/rust-lang/rust/issues/153655>. + +#![feature(doc_cfg)] +#![doc(auto_cfg(hide( + target_pointer_width = "64", +)))] + +#![crate_name = "foo"] + +pub trait Trait { + fn f(&self) {} +} + +pub trait Bob { + fn bob(&self) {} +} + +pub trait Foo { + fn foo(&self) {} +} + +pub struct X; + +//@has 'foo/struct.X.html' +//@count - '//*[@id="impl-Bob-for-X"]' 1 +//@count - '//*[@id="impl-Bob-for-X"]/*[@class="item-info"]' 0 +//@count - '//*[@id="impl-Trait-for-X"]' 1 +//@count - '//*[@id="impl-Trait-for-X"]/*[@class="item-info"]' 0 + +// If you need to update this XPath, in particular `item-info`, update all +// the others in this file. +//@count - '//*[@id="impl-Foo-for-X"]/*[@class="item-info"]' 1 + +//@has 'foo/trait.Trait.html' +//@count - '//*[@id="impl-Trait-for-X"]' 1 +//@count - '//*[@id="impl-Trait-for-X"]/*[@class="item-info"]' 0 +#[cfg(any(target_pointer_width = "64", target_arch = "wasm32"))] +#[doc(auto_cfg(hide(target_arch = "wasm32")))] +mod imp { + impl super::Trait for super::X { fn f(&self) {} } +} + +//@has 'foo/trait.Bob.html' +//@count - '//*[@id="impl-Bob-for-X"]' 1 +//@count - '//*[@id="impl-Bob-for-X"]/*[@class="item-info"]' 0 +#[cfg(any(target_pointer_width = "64", target_arch = "wasm32"))] +#[doc(auto_cfg = false)] +mod imp2 { + impl super::Bob for super::X { fn bob(&self) {} } +} + +//@has 'foo/trait.Foo.html' +//@count - '//*[@id="impl-Foo-for-X"]/*[@class="item-info"]' 1 +// We use this to force xpath tests to be updated if `item-info` class is changed. +#[cfg(any(target_pointer_width = "64", target_arch = "wasm32"))] +mod imp3 { + impl super::Foo for super::X { fn foo(&self) {} } +} + +pub struct Y; + +//@has 'foo/struct.Y.html' +//@count - '//*[@id="implementations-list"]/*[@class="impl-items"]' 1 +//@count - '//*[@id="implementations-list"]/*[@class="impl-items"]/*[@class="item-info"]' 0 +#[cfg(any(target_pointer_width = "64", target_arch = "wasm32"))] +#[doc(auto_cfg(hide(target_arch = "wasm32")))] +mod imp4 { + impl super::Y { pub fn plain_auto() {} } +} + +pub struct Z; + +//@has 'foo/struct.Z.html' +//@count - '//*[@id="implementations-list"]/*[@class="impl-items"]' 1 +//@count - '//*[@id="implementations-list"]/*[@class="impl-items"]/*[@class="item-info"]' 0 +#[cfg(any(target_pointer_width = "64", target_arch = "wasm32"))] +#[doc(auto_cfg = false)] +mod imp5 { + impl super::Z { pub fn plain_auto() {} } +} + +// The "witness" which has the item info. +pub struct W; + +//@has 'foo/struct.W.html' +//@count - '//*[@id="implementations-list"]/*[@class="impl-items"]' 1 +//@count - '//*[@id="implementations-list"]/*[@class="impl-items"]/*[@class="item-info"]' 1 +#[cfg(any(target_pointer_width = "64", target_arch = "wasm32"))] +mod imp6 { + impl super::W { pub fn plain_auto() {} } +}
diff --git a/tests/ui/parser/shebang/multiline-attrib.rs b/tests/ui/parser/shebang/multiline-attrib.rs index 2d2e029..d67ba5f 100644 --- a/tests/ui/parser/shebang/multiline-attrib.rs +++ b/tests/ui/parser/shebang/multiline-attrib.rs
@@ -1,7 +1,7 @@ #! [allow(unused_variables)] //@ check-pass -//@ reference: input.shebang.inner-attribute +//@ reference: shebang.syntax-description fn main() { let x = 5;
diff --git a/tests/ui/parser/shebang/regular-attrib.rs b/tests/ui/parser/shebang/regular-attrib.rs index c2ac256..f711b81 100644 --- a/tests/ui/parser/shebang/regular-attrib.rs +++ b/tests/ui/parser/shebang/regular-attrib.rs
@@ -1,6 +1,6 @@ #![allow(unused_variables)] //@ check-pass -//@ reference: input.shebang.inner-attribute +//@ reference: shebang.syntax-description fn main() { let x = 5; }
diff --git a/tests/ui/parser/shebang/shebang-and-attrib.rs b/tests/ui/parser/shebang/shebang-and-attrib.rs index d73db6b..e3e52b5 100644 --- a/tests/ui/parser/shebang/shebang-and-attrib.rs +++ b/tests/ui/parser/shebang/shebang-and-attrib.rs
@@ -1,7 +1,7 @@ #!/usr/bin/env run-cargo-script //@ check-pass -//@ reference: input.shebang.inner-attribute +//@ reference: shebang.syntax-description #![allow(unused_variables)]
diff --git a/tests/ui/parser/shebang/shebang-doc-comment.rs b/tests/ui/parser/shebang/shebang-doc-comment.rs index 4992c75..976a2b3 100644 --- a/tests/ui/parser/shebang/shebang-doc-comment.rs +++ b/tests/ui/parser/shebang/shebang-doc-comment.rs
@@ -2,4 +2,4 @@ [allow(unused_variables)] //~^ ERROR expected item, found `[` -//@ reference: input.shebang.inner-attribute +//@ reference: shebang.syntax-description
diff --git a/tests/ui/parser/shebang/sneaky-attrib.rs b/tests/ui/parser/shebang/sneaky-attrib.rs index e22c45c..b9c4adb 100644 --- a/tests/ui/parser/shebang/sneaky-attrib.rs +++ b/tests/ui/parser/shebang/sneaky-attrib.rs
@@ -11,7 +11,7 @@ [allow(unused_variables)] //@ check-pass -//@ reference: input.shebang.inner-attribute +//@ reference: shebang.syntax-description fn main() { let x = 5; }