Skip to content

Commit 5f4c412

Browse files
committed
[TEST] Use a fallback impl instead of backend-specific ones
1 parent c5093a2 commit 5f4c412

File tree

4 files changed

+34
-135
lines changed
  • compiler
  • library/core/src/intrinsics

4 files changed

+34
-135
lines changed

compiler/rustc_codegen_cranelift/src/intrinsics/mod.rs

Lines changed: 0 additions & 56 deletions
Original file line numberDiff line numberDiff line change
@@ -655,62 +655,6 @@ fn codegen_regular_intrinsic_call<'tcx>(
655655
let res = fx.bcx.ins().rotr(x, y);
656656
ret.write_cvalue(fx, CValue::by_val(res, layout));
657657
}
658-
sym::funnel_shl => {
659-
intrinsic_args!(fx, args => (x, y, z); intrinsic);
660-
let layout = x.layout();
661-
662-
let width_bits = layout.size.bits() as i64;
663-
664-
let lhs_bits = x.load_scalar(fx);
665-
let rhs_bits = y.load_scalar(fx);
666-
let raw_shift_bits = z.load_scalar(fx);
667-
668-
let ty = fx.bcx.func.dfg.value_type(lhs_bits);
669-
let zero = fx.bcx.ins().iconst(ty, 0);
670-
671-
let shift_bits = fx.bcx.ins().band_imm(raw_shift_bits, width_bits - 1);
672-
let inv_shift_bits = fx.bcx.ins().irsub_imm(shift_bits, width_bits);
673-
let is_zero = fx.bcx.ins().icmp_imm(IntCC::Equal, shift_bits, 0);
674-
675-
// lhs_bits << shift_bits
676-
let shl = fx.bcx.ins().ishl(lhs_bits, shift_bits);
677-
678-
// rhs_bits.unbounded_shr(inv_shift_bits)
679-
// we don't need a modulo here because `ushr` implicitly does it
680-
let shr = fx.bcx.ins().ushr(rhs_bits, inv_shift_bits);
681-
let shr = fx.bcx.ins().select(is_zero, zero, shr);
682-
683-
let res = fx.bcx.ins().bor(shr, shl);
684-
ret.write_cvalue(fx, CValue::by_val(res, layout));
685-
}
686-
sym::funnel_shr => {
687-
intrinsic_args!(fx, args => (x, y, z); intrinsic);
688-
let layout = x.layout();
689-
690-
let width_bits = layout.size.bits() as i64;
691-
692-
let lhs_bits = x.load_scalar(fx);
693-
let rhs_bits = y.load_scalar(fx);
694-
let raw_shift_bits = z.load_scalar(fx);
695-
696-
let ty = fx.bcx.func.dfg.value_type(lhs_bits);
697-
let zero = fx.bcx.ins().iconst(ty, 0);
698-
699-
let shift_bits = fx.bcx.ins().band_imm(raw_shift_bits, width_bits - 1);
700-
let inv_shift_bits = fx.bcx.ins().irsub_imm(shift_bits, width_bits);
701-
let is_zero = fx.bcx.ins().icmp_imm(IntCC::Equal, shift_bits, 0);
702-
703-
// rhs_bits >> shift_bits
704-
let shr = fx.bcx.ins().ushr(rhs_bits, shift_bits);
705-
706-
// lhs_bits.unbounded_shl(inv_shift_bits)
707-
// we don't need a modulo here because `ishl` implicitly does it
708-
let shl = fx.bcx.ins().ishl(lhs_bits, inv_shift_bits);
709-
let shl = fx.bcx.ins().select(is_zero, zero, shl);
710-
711-
let res = fx.bcx.ins().bor(shr, shl);
712-
ret.write_cvalue(fx, CValue::by_val(res, layout));
713-
}
714658

715659
// The only difference between offset and arith_offset is regarding UB. Because Cranelift
716660
// doesn't have UB both are codegen'ed the same way

compiler/rustc_codegen_gcc/src/intrinsic/mod.rs

Lines changed: 0 additions & 49 deletions
Original file line numberDiff line numberDiff line change
@@ -442,8 +442,6 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tc
442442
| sym::bitreverse
443443
| sym::rotate_left
444444
| sym::rotate_right
445-
| sym::funnel_shl
446-
| sym::funnel_shr
447445
| sym::saturating_add
448446
| sym::saturating_sub => {
449447
match int_type_width_signed(args[0].layout.ty, self) {
@@ -507,53 +505,6 @@ impl<'a, 'gcc, 'tcx> IntrinsicCallBuilderMethods<'tcx> for Builder<'a, 'gcc, 'tc
507505
self.rotate_right(val, raw_shift, width)
508506
}
509507
}
510-
sym::funnel_shl => {
511-
let lhs_bits = args[0].immediate();
512-
let rhs_bits = args[1].immediate();
513-
let raw_shift_bits = args[2].immediate();
514-
515-
let width_ty = raw_shift_bits.get_type();
516-
let width_bits = self.cx.gcc_uint(width_ty, width as u64);
517-
let shift_bits = self.gcc_urem(raw_shift_bits, width_bits);
518-
519-
// lhs_bits << shift_bits
520-
let shl = self.gcc_shl(lhs_bits, shift_bits);
521-
522-
// rhs_bits.bounded_shr(inv_shift_bits)
523-
let inv_shift_bits = self.gcc_sub(width_bits, shift_bits);
524-
let inv_shift_bits_mod = self.gcc_urem(inv_shift_bits, width_bits);
525-
let shr = self.gcc_lshr(rhs_bits, inv_shift_bits_mod);
526-
let zero = self.cx.gcc_uint(lhs_bits.get_type(), 0);
527-
let is_zero =
528-
self.gcc_icmp(IntPredicate::IntEQ, inv_shift_bits_mod, zero);
529-
let shr = self.select(is_zero, zero, shr);
530-
531-
self.or(shl, shr)
532-
}
533-
sym::funnel_shr => {
534-
let lhs_bits = args[0].immediate();
535-
let rhs_bits = args[1].immediate();
536-
let raw_shift_bits = args[2].immediate();
537-
538-
let width_ty = raw_shift_bits.get_type();
539-
let width_bits = self.cx.gcc_uint(width_ty, width as u64);
540-
let shift_bits = self.gcc_urem(raw_shift_bits, width_bits);
541-
542-
// rhs_bits >> shift_bits
543-
let shr = self.gcc_lshr(rhs_bits, shift_bits);
544-
545-
let inv_shift_bits = self.gcc_sub(width_bits, shift_bits);
546-
547-
// lhs_bits.bounded_shl(inv_shift_bits)
548-
let inv_shift_bits_mod = self.gcc_urem(inv_shift_bits, width_bits);
549-
let shl = self.gcc_shl(lhs_bits, inv_shift_bits_mod);
550-
let zero = self.cx.gcc_uint(lhs_bits.get_type(), 0);
551-
let is_zero =
552-
self.gcc_icmp(IntPredicate::IntEQ, inv_shift_bits_mod, zero);
553-
let shl = self.select(is_zero, zero, shl);
554-
555-
self.or(shl, shr)
556-
}
557508
sym::saturating_add => self.saturating_add(
558509
args[0].immediate(),
559510
args[1].immediate(),

compiler/rustc_const_eval/src/interpret/intrinsics.rs

Lines changed: 0 additions & 27 deletions
Original file line numberDiff line numberDiff line change
@@ -302,33 +302,6 @@ impl<'tcx, M: Machine<'tcx>> InterpCx<'tcx, M> {
302302
let result = Scalar::from_uint(truncated_bits, layout_val.size);
303303
self.write_scalar(result, dest)?;
304304
}
305-
sym::funnel_shl | sym::funnel_shr => {
306-
// funnel_shl: (A << (S % BW)) | (B >> ((BW - S) % BW))
307-
// funnel_shr: (A << ((BW - S) % BW)) | (B >> (S % BW))
308-
let layout_val = self.layout_of(instance_args.type_at(0))?;
309-
310-
let lhs = self.read_scalar(&args[0])?;
311-
let lhs_bits = lhs.to_bits(layout_val.size)?; // sign is ignored here
312-
313-
let rhs = self.read_scalar(&args[1])?;
314-
let rhs_bits = rhs.to_bits(layout_val.size)?; // sign is ignored here
315-
316-
let raw_shift = self.read_scalar(&args[2])?;
317-
let raw_shift_bits = raw_shift.to_u32()?;
318-
319-
// The funnel shifts modulo by T::BITS to circumvent panics/UB.
320-
let width_bits = u32::try_from(layout_val.size.bits()).unwrap();
321-
let shift_bits = raw_shift_bits % width_bits;
322-
let inv_shift_bits = width_bits - shift_bits;
323-
let result_bits = if intrinsic_name == sym::funnel_shl {
324-
(lhs_bits << shift_bits) | rhs_bits.unbounded_shr(inv_shift_bits)
325-
} else {
326-
(rhs_bits >> shift_bits) | lhs_bits.unbounded_shl(inv_shift_bits)
327-
};
328-
let truncated_bits = layout_val.size.truncate(result_bits);
329-
let result = Scalar::from_uint(truncated_bits, layout_val.size);
330-
self.write_scalar(result, dest)?;
331-
}
332305
sym::copy => {
333306
self.copy_intrinsic(&args[0], &args[1], &args[2], /*nonoverlapping*/ false)?;
334307
}

library/core/src/intrinsics/mod.rs

Lines changed: 34 additions & 3 deletions
Original file line numberDiff line numberDiff line change
@@ -56,6 +56,7 @@
5656

5757
use crate::ffi::va_list::{VaArgSafe, VaListImpl};
5858
use crate::marker::{ConstParamTy, DiscriminantKind, PointeeSized, Tuple};
59+
use crate::ops::BitOr;
5960
use crate::ptr;
6061

6162
mod bounds;
@@ -2105,7 +2106,7 @@ pub const fn saturating_sub<T: Copy>(a: T, b: T) -> T;
21052106
/// Funnel Shift left.
21062107
///
21072108
/// Concatenates `a` and `b` (with `a` in the most significant half),
2108-
/// creating an integer twice as wide. Then shift this inetegr left
2109+
/// creating an integer twice as wide. Then shift this integer left
21092110
/// by `shift` (taken modulo the bit size of `T`), and extract the
21102111
/// most significant half. If `a` and `b` are the same, this is equivalent
21112112
/// to a rotate left operation.
@@ -2121,7 +2122,22 @@ pub const fn saturating_sub<T: Copy>(a: T, b: T) -> T;
21212122
#[rustc_nounwind]
21222123
#[rustc_const_unstable(feature = "funnel_shifts", issue = "145686")]
21232124
#[unstable(feature = "funnel_shifts", issue = "145686")]
2124-
pub const fn funnel_shl<T: Copy>(a: T, b: T, shift: u32) -> T;
2125+
#[miri::intrinsic_fallback_is_spec]
2126+
pub const fn funnel_shl<T: Copy + BitOr<Output = T>>(a: T, b: T, shift: u32) -> T {
2127+
let width = 8 * size_of::<T>();
2128+
let shift = shift & width as u32;
2129+
2130+
// SAFETY: `shift` is less than `width`
2131+
let shl = unsafe { unchecked_shl(a, shift) };
2132+
let shr = if shift == 0 {
2133+
0
2134+
} else {
2135+
// SAFETY: `(width - shift)` is less than `width`, because `shift > 0`
2136+
unsafe { unchecked_shr(b, width - shift) }
2137+
};
2138+
2139+
shl | shr
2140+
}
21252141

21262142
/// Funnel Shift right.
21272143
///
@@ -2142,7 +2158,22 @@ pub const fn funnel_shl<T: Copy>(a: T, b: T, shift: u32) -> T;
21422158
#[rustc_nounwind]
21432159
#[rustc_const_unstable(feature = "funnel_shifts", issue = "145686")]
21442160
#[unstable(feature = "funnel_shifts", issue = "145686")]
2145-
pub const fn funnel_shr<T: Copy>(a: T, b: T, shift: u32) -> T;
2161+
#[miri::intrinsic_fallback_is_spec]
2162+
pub const fn funnel_shr<T: Copy + BitOr<Output = T>>(a: T, b: T, shift: u32) -> T {
2163+
let width = 8 * size_of::<T>();
2164+
let shift = shift & width;
2165+
2166+
// SAFETY: `shift` is less than `width`
2167+
let shr = unsafe { unchecked_shr(b, shift) };
2168+
let shl = if shift == 0 {
2169+
0
2170+
} else {
2171+
// SAFETY: `(width - shift)` is less than `width`, because `shift > 0`
2172+
unsafe { unchecked_shl(a, width - shift) }
2173+
};
2174+
2175+
shr | shl
2176+
}
21462177

21472178
/// This is an implementation detail of [`crate::ptr::read`] and should
21482179
/// not be used anywhere else. See its comments for why this exists.

0 commit comments

Comments
 (0)