Skip to content

Commit b5ed099

Browse files
committed
Make checked ops emit *unchecked* LLVM operations where feasible
For things with easily pre-checked overflow conditions -- shifts and unsigned subtraction -- write then checked methods in such a way that we stop emitting wrapping versions of them. For example, today <https://rust.godbolt.org/z/qM9YK8Txb> neither ```rust a.checked_sub(b).unwrap() ``` nor ```rust a.checked_sub(b).unwrap_unchecked() ``` actually optimizes to `sub nuw`. After this PR they do.
1 parent 5260893 commit b5ed099

File tree

4 files changed

+146
-46
lines changed

4 files changed

+146
-46
lines changed

library/core/src/num/int_macros.rs

+18-4
Original file line numberDiff line numberDiff line change
@@ -1163,12 +1163,19 @@ macro_rules! int_impl {
11631163
/// ```
11641164
#[stable(feature = "wrapping", since = "1.7.0")]
11651165
#[rustc_const_stable(feature = "const_checked_int_methods", since = "1.47.0")]
1166+
// We could always go back to wrapping
1167+
#[rustc_allow_const_fn_unstable(unchecked_shifts)]
11661168
#[must_use = "this returns the result of the operation, \
11671169
without modifying the original"]
11681170
#[inline]
11691171
pub const fn checked_shl(self, rhs: u32) -> Option<Self> {
1170-
let (a, b) = self.overflowing_shl(rhs);
1171-
if unlikely!(b) { None } else { Some(a) }
1172+
// Not using overflowing_shl as that's a wrapping shift
1173+
if rhs < Self::BITS {
1174+
// SAFETY: just checked the RHS is in-range
1175+
Some(unsafe { self.unchecked_shl(rhs) })
1176+
} else {
1177+
None
1178+
}
11721179
}
11731180

11741181
/// Strict shift left. Computes `self << rhs`, panicking if `rhs` is larger
@@ -1254,12 +1261,19 @@ macro_rules! int_impl {
12541261
/// ```
12551262
#[stable(feature = "wrapping", since = "1.7.0")]
12561263
#[rustc_const_stable(feature = "const_checked_int_methods", since = "1.47.0")]
1264+
// We could always go back to wrapping
1265+
#[rustc_allow_const_fn_unstable(unchecked_shifts)]
12571266
#[must_use = "this returns the result of the operation, \
12581267
without modifying the original"]
12591268
#[inline]
12601269
pub const fn checked_shr(self, rhs: u32) -> Option<Self> {
1261-
let (a, b) = self.overflowing_shr(rhs);
1262-
if unlikely!(b) { None } else { Some(a) }
1270+
// Not using overflowing_shr as that's a wrapping shift
1271+
if rhs < Self::BITS {
1272+
// SAFETY: just checked the RHS is in-range
1273+
Some(unsafe { self.unchecked_shr(rhs) })
1274+
} else {
1275+
None
1276+
}
12631277
}
12641278

12651279
/// Strict shift right. Computes `self >> rhs`, panicking `rhs` is

library/core/src/num/uint_macros.rs

+29-6
Original file line numberDiff line numberDiff line change
@@ -579,8 +579,17 @@ macro_rules! uint_impl {
579579
without modifying the original"]
580580
#[inline]
581581
pub const fn checked_sub(self, rhs: Self) -> Option<Self> {
582-
let (a, b) = self.overflowing_sub(rhs);
583-
if unlikely!(b) { None } else { Some(a) }
582+
// Per PR#103299, there's no advantage to the `overflowing` intrinsic
583+
// for *unsigned* subtraction and we just emit the manual check anyway.
584+
// Thus, rather than using `overflowing_sub` that produces a wrapping
585+
// subtraction, check it ourself so we can use an unchecked one.
586+
587+
if self >= rhs {
588+
// SAFETY: just checked this can't overflow
589+
Some(unsafe { intrinsics::unchecked_sub(self, rhs) })
590+
} else {
591+
None
592+
}
584593
}
585594

586595
/// Strict integer subtraction. Computes `self - rhs`, panicking if
@@ -1222,12 +1231,19 @@ macro_rules! uint_impl {
12221231
/// ```
12231232
#[stable(feature = "wrapping", since = "1.7.0")]
12241233
#[rustc_const_stable(feature = "const_checked_int_methods", since = "1.47.0")]
1234+
// We could always go back to wrapping
1235+
#[rustc_allow_const_fn_unstable(unchecked_shifts)]
12251236
#[must_use = "this returns the result of the operation, \
12261237
without modifying the original"]
12271238
#[inline]
12281239
pub const fn checked_shl(self, rhs: u32) -> Option<Self> {
1229-
let (a, b) = self.overflowing_shl(rhs);
1230-
if unlikely!(b) { None } else { Some(a) }
1240+
// Not using overflowing_shl as that's a wrapping shift
1241+
if rhs < Self::BITS {
1242+
// SAFETY: just checked the RHS is in-range
1243+
Some(unsafe { self.unchecked_shl(rhs) })
1244+
} else {
1245+
None
1246+
}
12311247
}
12321248

12331249
/// Strict shift left. Computes `self << rhs`, panicking if `rhs` is larger
@@ -1313,12 +1329,19 @@ macro_rules! uint_impl {
13131329
/// ```
13141330
#[stable(feature = "wrapping", since = "1.7.0")]
13151331
#[rustc_const_stable(feature = "const_checked_int_methods", since = "1.47.0")]
1332+
// We could always go back to wrapping
1333+
#[rustc_allow_const_fn_unstable(unchecked_shifts)]
13161334
#[must_use = "this returns the result of the operation, \
13171335
without modifying the original"]
13181336
#[inline]
13191337
pub const fn checked_shr(self, rhs: u32) -> Option<Self> {
1320-
let (a, b) = self.overflowing_shr(rhs);
1321-
if unlikely!(b) { None } else { Some(a) }
1338+
// Not using overflowing_shr as that's a wrapping shift
1339+
if rhs < Self::BITS {
1340+
// SAFETY: just checked the RHS is in-range
1341+
Some(unsafe { self.unchecked_shr(rhs) })
1342+
} else {
1343+
None
1344+
}
13221345
}
13231346

13241347
/// Strict shift right. Computes `self >> rhs`, panicking `rhs` is

tests/codegen/checked_math.rs

+86
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,86 @@
1+
//@ compile-flags: -O -Z merge-functions=disabled
2+
3+
#![crate_type = "lib"]
4+
#![feature(unchecked_shifts)]
5+
6+
// Because the result of something like `u32::checked_sub` can only be used if it
7+
// didn't overflow, make sure that LLVM actually knows that in optimized builds.
8+
// Thanks to poison semantics, this doesn't even need branches.
9+
10+
// CHECK-LABEL: @checked_sub_unsigned
11+
// CHECK-SAME: (i16 noundef %a, i16 noundef %b)
12+
#[no_mangle]
13+
pub fn checked_sub_unsigned(a: u16, b: u16) -> Option<u16> {
14+
// CHECK-DAG: %[[IS_SOME:.+]] = icmp uge i16 %a, %b
15+
// CHECK-DAG: %[[DIFF_P:.+]] = sub nuw i16 %a, %b
16+
// CHECK-DAG: %[[DISCR:.+]] = zext i1 %[[IS_SOME]] to i16
17+
// CHECK-DAG: %[[DIFF_U:.+]] = select i1 %[[IS_SOME]], i16 %[[DIFF_P]], i16 undef
18+
19+
// CHECK: %[[R0:.+]] = insertvalue { i16, i16 } poison, i16 %[[DISCR]], 0
20+
// CHECK: %[[R1:.+]] = insertvalue { i16, i16 } %[[R0]], i16 %[[DIFF_U]], 1
21+
// CHECK: ret { i16, i16 } %[[R1]]
22+
a.checked_sub(b)
23+
}
24+
25+
// Note that `shl` and `shr` in LLVM are already unchecked. So rather than
26+
// looking for no-wrap flags, we just need there to not be any masking.
27+
28+
// CHECK-LABEL: @checked_shl_unsigned
29+
// CHECK-SAME: (i32 noundef %a, i32 noundef %b)
30+
#[no_mangle]
31+
pub fn checked_shl_unsigned(a: u32, b: u32) -> Option<u32> {
32+
// CHECK-DAG: %[[IS_SOME:.+]] = icmp ult i32 %b, 32
33+
// CHECK-DAG: %[[SHIFTED_P:.+]] = shl i32 %a, %b
34+
// CHECK-DAG: %[[DISCR:.+]] = zext i1 %[[IS_SOME]] to i32
35+
// CHECK-DAG: %[[SHIFTED_U:.+]] = select i1 %[[IS_SOME]], i32 %[[SHIFTED_P]], i32 undef
36+
37+
// CHECK: %[[R0:.+]] = insertvalue { i32, i32 } poison, i32 %[[DISCR]], 0
38+
// CHECK: %[[R1:.+]] = insertvalue { i32, i32 } %[[R0]], i32 %[[SHIFTED_U]], 1
39+
// CHECK: ret { i32, i32 } %[[R1]]
40+
a.checked_shl(b)
41+
}
42+
43+
// CHECK-LABEL: @checked_shr_unsigned
44+
// CHECK-SAME: (i32 noundef %a, i32 noundef %b)
45+
#[no_mangle]
46+
pub fn checked_shr_unsigned(a: u32, b: u32) -> Option<u32> {
47+
// CHECK-DAG: %[[IS_SOME:.+]] = icmp ult i32 %b, 32
48+
// CHECK-DAG: %[[SHIFTED_P:.+]] = lshr i32 %a, %b
49+
// CHECK-DAG: %[[DISCR:.+]] = zext i1 %[[IS_SOME]] to i32
50+
// CHECK-DAG: %[[SHIFTED_U:.+]] = select i1 %[[IS_SOME]], i32 %[[SHIFTED_P]], i32 undef
51+
52+
// CHECK: %[[R0:.+]] = insertvalue { i32, i32 } poison, i32 %[[DISCR]], 0
53+
// CHECK: %[[R1:.+]] = insertvalue { i32, i32 } %[[R0]], i32 %[[SHIFTED_U]], 1
54+
// CHECK: ret { i32, i32 } %[[R1]]
55+
a.checked_shr(b)
56+
}
57+
58+
// CHECK-LABEL: @checked_shl_signed
59+
// CHECK-SAME: (i32 noundef %a, i32 noundef %b)
60+
#[no_mangle]
61+
pub fn checked_shl_signed(a: i32, b: u32) -> Option<i32> {
62+
// CHECK-DAG: %[[IS_SOME:.+]] = icmp ult i32 %b, 32
63+
// CHECK-DAG: %[[SHIFTED_P:.+]] = shl i32 %a, %b
64+
// CHECK-DAG: %[[DISCR:.+]] = zext i1 %[[IS_SOME]] to i32
65+
// CHECK-DAG: %[[SHIFTED_U:.+]] = select i1 %[[IS_SOME]], i32 %[[SHIFTED_P]], i32 undef
66+
67+
// CHECK: %[[R0:.+]] = insertvalue { i32, i32 } poison, i32 %[[DISCR]], 0
68+
// CHECK: %[[R1:.+]] = insertvalue { i32, i32 } %[[R0]], i32 %[[SHIFTED_U]], 1
69+
// CHECK: ret { i32, i32 } %[[R1]]
70+
a.checked_shl(b)
71+
}
72+
73+
// CHECK-LABEL: @checked_shr_signed
74+
// CHECK-SAME: (i32 noundef %a, i32 noundef %b)
75+
#[no_mangle]
76+
pub fn checked_shr_signed(a: i32, b: u32) -> Option<i32> {
77+
// CHECK-DAG: %[[IS_SOME:.+]] = icmp ult i32 %b, 32
78+
// CHECK-DAG: %[[SHIFTED_P:.+]] = ashr i32 %a, %b
79+
// CHECK-DAG: %[[DISCR:.+]] = zext i1 %[[IS_SOME]] to i32
80+
// CHECK-DAG: %[[SHIFTED_U:.+]] = select i1 %[[IS_SOME]], i32 %[[SHIFTED_P]], i32 undef
81+
82+
// CHECK: %[[R0:.+]] = insertvalue { i32, i32 } poison, i32 %[[DISCR]], 0
83+
// CHECK: %[[R1:.+]] = insertvalue { i32, i32 } %[[R0]], i32 %[[SHIFTED_U]], 1
84+
// CHECK: ret { i32, i32 } %[[R1]]
85+
a.checked_shr(b)
86+
}

tests/mir-opt/pre-codegen/checked_ops.checked_shl.PreCodegen.after.mir

+13-36
Original file line numberDiff line numberDiff line change
@@ -7,58 +7,35 @@ fn checked_shl(_1: u32, _2: u32) -> Option<u32> {
77
scope 1 (inlined core::num::<impl u32>::checked_shl) {
88
debug self => _1;
99
debug rhs => _2;
10-
let mut _6: bool;
11-
scope 2 {
12-
debug a => _4;
13-
debug b => _5;
14-
}
15-
scope 3 (inlined core::num::<impl u32>::overflowing_shl) {
10+
let mut _3: bool;
11+
let mut _4: u32;
12+
scope 2 (inlined core::num::<impl u32>::unchecked_shl) {
1613
debug self => _1;
1714
debug rhs => _2;
18-
let mut _4: u32;
19-
let mut _5: bool;
20-
scope 4 (inlined core::num::<impl u32>::wrapping_shl) {
21-
debug self => _1;
22-
debug rhs => _2;
23-
let mut _3: u32;
24-
scope 5 (inlined core::num::<impl u32>::unchecked_shl) {
25-
debug self => _1;
26-
debug rhs => _3;
27-
}
28-
}
2915
}
3016
}
3117

3218
bb0: {
33-
StorageLive(_4);
34-
StorageLive(_5);
3519
StorageLive(_3);
36-
_3 = BitAnd(_2, const 31_u32);
37-
_4 = ShlUnchecked(_1, _3);
38-
StorageDead(_3);
39-
_5 = Ge(_2, const core::num::<impl u32>::BITS);
40-
StorageLive(_6);
41-
_6 = unlikely(move _5) -> [return: bb1, unwind unreachable];
20+
_3 = Lt(_2, const core::num::<impl u32>::BITS);
21+
switchInt(move _3) -> [0: bb1, otherwise: bb2];
4222
}
4323

4424
bb1: {
45-
switchInt(move _6) -> [0: bb2, otherwise: bb3];
25+
_0 = const Option::<u32>::None;
26+
goto -> bb3;
4627
}
4728

4829
bb2: {
49-
_0 = Option::<u32>::Some(_4);
50-
goto -> bb4;
30+
StorageLive(_4);
31+
_4 = ShlUnchecked(_1, _2);
32+
_0 = Option::<u32>::Some(move _4);
33+
StorageDead(_4);
34+
goto -> bb3;
5135
}
5236

5337
bb3: {
54-
_0 = const Option::<u32>::None;
55-
goto -> bb4;
56-
}
57-
58-
bb4: {
59-
StorageDead(_6);
60-
StorageDead(_5);
61-
StorageDead(_4);
38+
StorageDead(_3);
6239
return;
6340
}
6441
}

0 commit comments

Comments
 (0)