@@ -1557,11 +1557,10 @@ pub(crate) unsafe fn align_offset<T: Sized>(p: *const T, a: usize) -> usize {
1557
1557
// FIXME(#75598): Direct use of these intrinsics improves codegen significantly at opt-level <=
1558
1558
// 1, where the method versions of these operations are not inlined.
1559
1559
use intrinsics:: {
1560
- unchecked_shl, unchecked_shr, unchecked_sub, wrapping_add, wrapping_mul, wrapping_sub,
1560
+ cttz_nonzero, exact_div, unchecked_rem, unchecked_shl, unchecked_shr, unchecked_sub,
1561
+ wrapping_add, wrapping_mul, wrapping_sub,
1561
1562
} ;
1562
1563
1563
- let addr = p. addr ( ) ;
1564
-
1565
1564
/// Calculate multiplicative modular inverse of `x` modulo `m`.
1566
1565
///
1567
1566
/// This implementation is tailored for `align_offset` and has following preconditions:
@@ -1611,36 +1610,61 @@ pub(crate) unsafe fn align_offset<T: Sized>(p: *const T, a: usize) -> usize {
1611
1610
}
1612
1611
}
1613
1612
1613
+ let addr = p. addr ( ) ;
1614
1614
let stride = mem:: size_of :: < T > ( ) ;
1615
1615
// SAFETY: `a` is a power-of-two, therefore non-zero.
1616
1616
let a_minus_one = unsafe { unchecked_sub ( a, 1 ) } ;
1617
- if stride == 1 {
1618
- // `stride == 1` case can be computed more simply through `-p (mod a)`, but doing so
1619
- // inhibits LLVM's ability to select instructions like `lea`. Instead we compute
1617
+
1618
+ if stride == 0 {
1619
+ // SPECIAL_CASE: handle 0-sized types. No matter how many times we step, the address will
1620
+ // stay the same, so no offset will be able to align the pointer unless it is already
1621
+ // aligned. This branch _will_ be optimized out as `stride` is known at compile-time.
1622
+ let p_mod_a = addr & a_minus_one;
1623
+ return if p_mod_a == 0 { 0 } else { usize:: MAX } ;
1624
+ }
1625
+
1626
+ // SAFETY: `stride == 0` case has been handled by the special case above.
1627
+ let a_mod_stride = unsafe { unchecked_rem ( a, stride) } ;
1628
+ if a_mod_stride == 0 {
1629
+ // SPECIAL_CASE: In cases where the `a` is divisible by `stride`, byte offset to align a
1630
+ // pointer can be computed more simply through `-p (mod a)`. In the off-chance the byte
1631
+ // offset is not a multiple of `stride`, the input pointer was misaligned and no pointer
1632
+ // offset will be able to produce a `p` aligned to the specified `a`.
1620
1633
//
1621
- // round_up_to_next_alignment(p, a) - p
1634
+ // The naive `-p (mod a)` equation inhibits LLVM's ability to select instructions
1635
+ // like `lea`. We compute `(round_up_to_next_alignment(p, a) - p)` instead. This
1636
+ // redistributes operations around the load-bearing, but pessimizing `and` instruction
1637
+ // sufficiently for LLVM to be able to utilize the various optimizations it knows about.
1622
1638
//
1623
- // which distributes operations around the load-bearing, but pessimizing `and` sufficiently
1624
- // for LLVM to be able to utilize the various optimizations it knows about.
1625
- return wrapping_sub ( wrapping_add ( addr, a_minus_one) & wrapping_sub ( 0 , a) , addr) ;
1626
- }
1639
+ // LLVM handles the branch here particularly nicely. If this branch needs to be evaluated
1640
+ // at runtime, it will produce a mask `if addr_mod_stride == 0 { 0 } else { usize::MAX }`
1641
+ // in a branch-free way and then bitwise-OR it with whatever result the `-p mod a`
1642
+ // computation produces.
1643
+
1644
+ // SAFETY: `stride == 0` case has been handled by the special case above.
1645
+ let addr_mod_stride = unsafe { unchecked_rem ( addr, stride) } ;
1627
1646
1628
- let pmoda = addr & a_minus_one;
1629
- if pmoda == 0 {
1630
- // Already aligned. Yay!
1631
- return 0 ;
1632
- } else if stride == 0 {
1633
- // If the pointer is not aligned, and the element is zero-sized, then no amount of
1634
- // elements will ever align the pointer.
1635
- return usize:: MAX ;
1647
+ return if addr_mod_stride == 0 {
1648
+ let aligned_address = wrapping_add ( addr, a_minus_one) & wrapping_sub ( 0 , a) ;
1649
+ let byte_offset = wrapping_sub ( aligned_address, addr) ;
1650
+ // SAFETY: `stride` is non-zero. This is guaranteed to divide exactly as well, because
1651
+ // addr has been verified to be aligned to the original type’s alignment requirements.
1652
+ unsafe { exact_div ( byte_offset, stride) }
1653
+ } else {
1654
+ usize:: MAX
1655
+ } ;
1636
1656
}
1637
1657
1638
- let smoda = stride & a_minus_one;
1658
+ // GENERAL_CASE: From here on we’re handling the very general case where `addr` may be
1659
+ // misaligned, there isn’t an obvious relationship between `stride` and `a` that we can take an
1660
+ // advantage of, etc. This case produces machine code that isn’t particularly high quality,
1661
+ // compared to the special cases above. The code produced here is still within the realm of
1662
+ // miracles, given the situations this case has to deal with.
1663
+
1639
1664
// SAFETY: a is power-of-two hence non-zero. stride == 0 case is handled above.
1640
- let gcdpow = unsafe { intrinsics :: cttz_nonzero ( stride) . min ( intrinsics :: cttz_nonzero ( a) ) } ;
1665
+ let gcdpow = unsafe { cttz_nonzero ( stride) . min ( cttz_nonzero ( a) ) } ;
1641
1666
// SAFETY: gcdpow has an upper-bound that’s at most the number of bits in a usize.
1642
1667
let gcd = unsafe { unchecked_shl ( 1usize , gcdpow) } ;
1643
-
1644
1668
// SAFETY: gcd is always greater or equal to 1.
1645
1669
if addr & unsafe { unchecked_sub ( gcd, 1 ) } == 0 {
1646
1670
// This branch solves for the following linear congruence equation:
@@ -1656,14 +1680,13 @@ pub(crate) unsafe fn align_offset<T: Sized>(p: *const T, a: usize) -> usize {
1656
1680
// ` p' + s'o = 0 mod a' `
1657
1681
// ` o = (a' - (p' mod a')) * (s'^-1 mod a') `
1658
1682
//
1659
- // The first term is "the relative alignment of `p` to `a`" (divided by the `g`), the second
1660
- // term is "how does incrementing `p` by `s` bytes change the relative alignment of `p`" (again
1661
- // divided by `g`).
1662
- // Division by `g` is necessary to make the inverse well formed if `a` and `s` are not
1663
- // co-prime.
1683
+ // The first term is "the relative alignment of `p` to `a`" (divided by the `g`), the
1684
+ // second term is "how does incrementing `p` by `s` bytes change the relative alignment of
1685
+ // `p`" (again divided by `g`). Division by `g` is necessary to make the inverse well
1686
+ // formed if `a` and `s` are not co-prime.
1664
1687
//
1665
1688
// Furthermore, the result produced by this solution is not "minimal", so it is necessary
1666
- // to take the result `o mod lcm(s, a)`. We can replace `lcm(s, a)` with just a `a'`.
1689
+ // to take the result `o mod lcm(s, a)`. This `lcm(s, a)` is the same as `a'`.
1667
1690
1668
1691
// SAFETY: `gcdpow` has an upper-bound not greater than the number of trailing 0-bits in
1669
1692
// `a`.
@@ -1673,11 +1696,11 @@ pub(crate) unsafe fn align_offset<T: Sized>(p: *const T, a: usize) -> usize {
1673
1696
let a2minus1 = unsafe { unchecked_sub ( a2, 1 ) } ;
1674
1697
// SAFETY: `gcdpow` has an upper-bound not greater than the number of trailing 0-bits in
1675
1698
// `a`.
1676
- let s2 = unsafe { unchecked_shr ( smoda , gcdpow) } ;
1699
+ let s2 = unsafe { unchecked_shr ( stride & a_minus_one , gcdpow) } ;
1677
1700
// SAFETY: `gcdpow` has an upper-bound not greater than the number of trailing 0-bits in
1678
1701
// `a`. Furthermore, the subtraction cannot overflow, because `a2 = a >> gcdpow` will
1679
1702
// always be strictly greater than `(p % a) >> gcdpow`.
1680
- let minusp2 = unsafe { unchecked_sub ( a2, unchecked_shr ( pmoda , gcdpow) ) } ;
1703
+ let minusp2 = unsafe { unchecked_sub ( a2, unchecked_shr ( addr & a_minus_one , gcdpow) ) } ;
1681
1704
// SAFETY: `a2` is a power-of-two, as proven above. `s2` is strictly less than `a2`
1682
1705
// because `(s % a) >> gcdpow` is strictly less than `a >> gcdpow`.
1683
1706
return wrapping_mul ( minusp2, unsafe { mod_inv ( s2, a2) } ) & a2minus1;
0 commit comments