Skip to content

Commit

Permalink
re-format and re-enable rustfmt on CI
Browse files Browse the repository at this point in the history
  • Loading branch information
gnzlbg committed Jun 15, 2018
1 parent 2777950 commit 40cae09
Show file tree
Hide file tree
Showing 59 changed files with 1,038 additions and 1,711 deletions.
1 change: 0 additions & 1 deletion .travis.yml
Original file line number Diff line number Diff line change
Expand Up @@ -55,7 +55,6 @@ matrix:
cargo clippy --all -- -D clippy-pedantic
allow_failures:
- env: CLIPPY=On TARGET=x86_64-unknown-linux-gnu NO_ADD=1
- env: RUSTFMT=On TARGET=x86_64-unknown-linux-gnu NO_ADD=1

before_install:
# FIXME (travis-ci/travis-ci#8920) shouldn't be necessary...
Expand Down
104 changes: 22 additions & 82 deletions coresimd/aarch64/crypto.rs
Original file line number Diff line number Diff line change
Expand Up @@ -16,36 +16,36 @@ extern "C" {
fn vsha1h_u32_(hash_e: u32) -> u32;
#[link_name = "llvm.aarch64.crypto.sha1su0"]
fn vsha1su0q_u32_(
w0_3: uint32x4_t, w4_7: uint32x4_t, w8_11: uint32x4_t
w0_3: uint32x4_t, w4_7: uint32x4_t, w8_11: uint32x4_t,
) -> uint32x4_t;
#[link_name = "llvm.aarch64.crypto.sha1su1"]
fn vsha1su1q_u32_(tw0_3: uint32x4_t, w12_15: uint32x4_t) -> uint32x4_t;
#[link_name = "llvm.aarch64.crypto.sha1c"]
fn vsha1cq_u32_(
hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t
hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t,
) -> uint32x4_t;
#[link_name = "llvm.aarch64.crypto.sha1p"]
fn vsha1pq_u32_(
hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t
hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t,
) -> uint32x4_t;
#[link_name = "llvm.aarch64.crypto.sha1m"]
fn vsha1mq_u32_(
hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t
hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t,
) -> uint32x4_t;

#[link_name = "llvm.aarch64.crypto.sha256h"]
fn vsha256hq_u32_(
hash_abcd: uint32x4_t, hash_efgh: uint32x4_t, wk: uint32x4_t
hash_abcd: uint32x4_t, hash_efgh: uint32x4_t, wk: uint32x4_t,
) -> uint32x4_t;
#[link_name = "llvm.aarch64.crypto.sha256h2"]
fn vsha256h2q_u32_(
hash_efgh: uint32x4_t, hash_abcd: uint32x4_t, wk: uint32x4_t
hash_efgh: uint32x4_t, hash_abcd: uint32x4_t, wk: uint32x4_t,
) -> uint32x4_t;
#[link_name = "llvm.aarch64.crypto.sha256su0"]
fn vsha256su0q_u32_(w0_3: uint32x4_t, w4_7: uint32x4_t) -> uint32x4_t;
#[link_name = "llvm.aarch64.crypto.sha256su1"]
fn vsha256su1q_u32_(
tw0_3: uint32x4_t, w8_11: uint32x4_t, w12_15: uint32x4_t
tw0_3: uint32x4_t, w8_11: uint32x4_t, w12_15: uint32x4_t,
) -> uint32x4_t;
}

Expand Down Expand Up @@ -97,7 +97,7 @@ pub unsafe fn vsha1h_u32(hash_e: u32) -> u32 {
#[target_feature(enable = "crypto")]
#[cfg_attr(test, assert_instr(sha1c))]
pub unsafe fn vsha1cq_u32(
hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t
hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t,
) -> uint32x4_t {
vsha1cq_u32_(hash_abcd, hash_e, wk)
}
Expand All @@ -107,7 +107,7 @@ pub unsafe fn vsha1cq_u32(
#[target_feature(enable = "crypto")]
#[cfg_attr(test, assert_instr(sha1m))]
pub unsafe fn vsha1mq_u32(
hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t
hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t,
) -> uint32x4_t {
vsha1mq_u32_(hash_abcd, hash_e, wk)
}
Expand All @@ -117,7 +117,7 @@ pub unsafe fn vsha1mq_u32(
#[target_feature(enable = "crypto")]
#[cfg_attr(test, assert_instr(sha1p))]
pub unsafe fn vsha1pq_u32(
hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t
hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t,
) -> uint32x4_t {
vsha1pq_u32_(hash_abcd, hash_e, wk)
}
Expand All @@ -127,7 +127,7 @@ pub unsafe fn vsha1pq_u32(
#[target_feature(enable = "crypto")]
#[cfg_attr(test, assert_instr(sha1su0))]
pub unsafe fn vsha1su0q_u32(
w0_3: uint32x4_t, w4_7: uint32x4_t, w8_11: uint32x4_t
w0_3: uint32x4_t, w4_7: uint32x4_t, w8_11: uint32x4_t,
) -> uint32x4_t {
vsha1su0q_u32_(w0_3, w4_7, w8_11)
}
Expand All @@ -137,7 +137,7 @@ pub unsafe fn vsha1su0q_u32(
#[target_feature(enable = "crypto")]
#[cfg_attr(test, assert_instr(sha1su1))]
pub unsafe fn vsha1su1q_u32(
tw0_3: uint32x4_t, w12_15: uint32x4_t
tw0_3: uint32x4_t, w12_15: uint32x4_t,
) -> uint32x4_t {
vsha1su1q_u32_(tw0_3, w12_15)
}
Expand All @@ -147,7 +147,7 @@ pub unsafe fn vsha1su1q_u32(
#[target_feature(enable = "crypto")]
#[cfg_attr(test, assert_instr(sha256h))]
pub unsafe fn vsha256hq_u32(
hash_abcd: uint32x4_t, hash_efgh: uint32x4_t, wk: uint32x4_t
hash_abcd: uint32x4_t, hash_efgh: uint32x4_t, wk: uint32x4_t,
) -> uint32x4_t {
vsha256hq_u32_(hash_abcd, hash_efgh, wk)
}
Expand All @@ -157,7 +157,7 @@ pub unsafe fn vsha256hq_u32(
#[target_feature(enable = "crypto")]
#[cfg_attr(test, assert_instr(sha256h2))]
pub unsafe fn vsha256h2q_u32(
hash_efgh: uint32x4_t, hash_abcd: uint32x4_t, wk: uint32x4_t
hash_efgh: uint32x4_t, hash_abcd: uint32x4_t, wk: uint32x4_t,
) -> uint32x4_t {
vsha256h2q_u32_(hash_efgh, hash_abcd, wk)
}
Expand All @@ -167,7 +167,7 @@ pub unsafe fn vsha256h2q_u32(
#[target_feature(enable = "crypto")]
#[cfg_attr(test, assert_instr(sha256su0))]
pub unsafe fn vsha256su0q_u32(
w0_3: uint32x4_t, w4_7: uint32x4_t
w0_3: uint32x4_t, w4_7: uint32x4_t,
) -> uint32x4_t {
vsha256su0q_u32_(w0_3, w4_7)
}
Expand All @@ -177,7 +177,7 @@ pub unsafe fn vsha256su0q_u32(
#[target_feature(enable = "crypto")]
#[cfg_attr(test, assert_instr(sha256su1))]
pub unsafe fn vsha256su1q_u32(
tw0_3: uint32x4_t, w8_11: uint32x4_t, w12_15: uint32x4_t
tw0_3: uint32x4_t, w8_11: uint32x4_t, w12_15: uint32x4_t,
) -> uint32x4_t {
vsha256su1q_u32_(tw0_3, w8_11, w12_15)
}
Expand All @@ -199,22 +199,8 @@ mod tests {
assert_eq!(
r,
u8x16::new(
124,
123,
124,
118,
124,
123,
124,
197,
124,
123,
124,
118,
124,
123,
124,
197
124, 123, 124, 118, 124, 123, 124, 197, 124, 123, 124, 118,
124, 123, 124, 197
)
);
}
Expand All @@ -229,22 +215,7 @@ mod tests {
assert_eq!(
r,
u8x16::new(
9,
213,
9,
251,
9,
213,
9,
56,
9,
213,
9,
251,
9,
213,
9,
56
9, 213, 9, 251, 9, 213, 9, 56, 9, 213, 9, 251, 9, 213, 9, 56
)
);
}
Expand All @@ -256,24 +227,7 @@ mod tests {
let r: u8x16 = vaesmcq_u8(data).into_bits();
assert_eq!(
r,
u8x16::new(
3,
4,
9,
10,
15,
8,
21,
30,
3,
4,
9,
10,
15,
8,
21,
30
)
u8x16::new(3, 4, 9, 10, 15, 8, 21, 30, 3, 4, 9, 10, 15, 8, 21, 30)
);
}

Expand All @@ -285,22 +239,8 @@ mod tests {
assert_eq!(
r,
u8x16::new(
43,
60,
33,
50,
103,
80,
125,
70,
43,
60,
33,
50,
103,
80,
125,
70
43, 60, 33, 50, 103, 80, 125, 70, 43, 60, 33, 50, 103, 80,
125, 70
)
);
}
Expand Down
61 changes: 12 additions & 49 deletions coresimd/aarch64/neon.rs
Original file line number Diff line number Diff line change
Expand Up @@ -546,7 +546,6 @@ pub unsafe fn vpmaxq_f64(a: float64x2_t, b: float64x2_t) -> float64x2_t {
vpmaxq_f64_(a, b)
}


#[cfg(test)]
mod tests {
use coresimd::aarch64::*;
Expand Down Expand Up @@ -800,20 +799,11 @@ mod tests {
#[simd_test(enable = "neon")]
unsafe fn test_vpminq_s8() {
#[cfg_attr(rustfmt, skip)]
let a = i8x16::new(
1, -2, 3, -4, 5, 6, 7, 8,
1, 2, 3, 4, 5, 6, 7, 8
);
let a = i8x16::new(1, -2, 3, -4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
#[cfg_attr(rustfmt, skip)]
let b = i8x16::new(
0, 3, 2, 5, 4, 7, 6, 9,
0, 3, 2, 5, 4, 7, 6, 9
);
let b = i8x16::new(0, 3, 2, 5, 4, 7, 6, 9, 0, 3, 2, 5, 4, 7, 6, 9);
#[cfg_attr(rustfmt, skip)]
let e = i8x16::new(
-2, -4, 5, 7, 1, 3, 5, 7,
0, 2, 4, 6, 0, 2, 4, 6,
);
let e = i8x16::new(-2, -4, 5, 7, 1, 3, 5, 7, 0, 2, 4, 6, 0, 2, 4, 6);
let r: i8x16 = vpminq_s8(a.into_bits(), b.into_bits()).into_bits();
assert_eq!(r, e);
}
Expand All @@ -839,20 +829,11 @@ mod tests {
#[simd_test(enable = "neon")]
unsafe fn test_vpminq_u8() {
#[cfg_attr(rustfmt, skip)]
let a = u8x16::new(
1, 2, 3, 4, 5, 6, 7, 8,
1, 2, 3, 4, 5, 6, 7, 8
);
let a = u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
#[cfg_attr(rustfmt, skip)]
let b = u8x16::new(
0, 3, 2, 5, 4, 7, 6, 9,
0, 3, 2, 5, 4, 7, 6, 9
);
let b = u8x16::new(0, 3, 2, 5, 4, 7, 6, 9, 0, 3, 2, 5, 4, 7, 6, 9);
#[cfg_attr(rustfmt, skip)]
let e = u8x16::new(
1, 3, 5, 7, 1, 3, 5, 7,
0, 2, 4, 6, 0, 2, 4, 6,
);
let e = u8x16::new(1, 3, 5, 7, 1, 3, 5, 7, 0, 2, 4, 6, 0, 2, 4, 6);
let r: u8x16 = vpminq_u8(a.into_bits(), b.into_bits()).into_bits();
assert_eq!(r, e);
}
Expand Down Expand Up @@ -896,20 +877,11 @@ mod tests {
#[simd_test(enable = "neon")]
unsafe fn test_vpmaxq_s8() {
#[cfg_attr(rustfmt, skip)]
let a = i8x16::new(
1, -2, 3, -4, 5, 6, 7, 8,
1, 2, 3, 4, 5, 6, 7, 8
);
let a = i8x16::new(1, -2, 3, -4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
#[cfg_attr(rustfmt, skip)]
let b = i8x16::new(
0, 3, 2, 5, 4, 7, 6, 9,
0, 3, 2, 5, 4, 7, 6, 9
);
let b = i8x16::new(0, 3, 2, 5, 4, 7, 6, 9, 0, 3, 2, 5, 4, 7, 6, 9);
#[cfg_attr(rustfmt, skip)]
let e = i8x16::new(
1, 3, 6, 8, 2, 4, 6, 8,
3, 5, 7, 9, 3, 5, 7, 9,
);
let e = i8x16::new(1, 3, 6, 8, 2, 4, 6, 8, 3, 5, 7, 9, 3, 5, 7, 9);
let r: i8x16 = vpmaxq_s8(a.into_bits(), b.into_bits()).into_bits();
assert_eq!(r, e);
}
Expand All @@ -935,20 +907,11 @@ mod tests {
#[simd_test(enable = "neon")]
unsafe fn test_vpmaxq_u8() {
#[cfg_attr(rustfmt, skip)]
let a = u8x16::new(
1, 2, 3, 4, 5, 6, 7, 8,
1, 2, 3, 4, 5, 6, 7, 8
);
let a = u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8);
#[cfg_attr(rustfmt, skip)]
let b = u8x16::new(
0, 3, 2, 5, 4, 7, 6, 9,
0, 3, 2, 5, 4, 7, 6, 9
);
let b = u8x16::new(0, 3, 2, 5, 4, 7, 6, 9, 0, 3, 2, 5, 4, 7, 6, 9);
#[cfg_attr(rustfmt, skip)]
let e = u8x16::new(
2, 4, 6, 8, 2, 4, 6, 8,
3, 5, 7, 9, 3, 5, 7, 9,
);
let e = u8x16::new(2, 4, 6, 8, 2, 4, 6, 8, 3, 5, 7, 9, 3, 5, 7, 9);
let r: u8x16 = vpmaxq_u8(a.into_bits(), b.into_bits()).into_bits();
assert_eq!(r, e);
}
Expand Down
20 changes: 14 additions & 6 deletions coresimd/arm/mod.rs
Original file line number Diff line number Diff line change
Expand Up @@ -19,11 +19,19 @@ pub use self::v7::*;

// NEON is supported on AArch64, and on ARM when built with the v7 and neon
// features. Building ARM without neon produces incorrect codegen.
#[cfg(any(target_arch = "aarch64",
all(target_feature = "v7", target_feature = "neon"),
dox))]
#[cfg(
any(
target_arch = "aarch64",
all(target_feature = "v7", target_feature = "neon"),
dox
)
)]
mod neon;
#[cfg(any(target_arch = "aarch64",
all(target_feature = "v7", target_feature = "neon"),
dox))]
#[cfg(
any(
target_arch = "aarch64",
all(target_feature = "v7", target_feature = "neon"),
dox
)
)]
pub use self::neon::*;
Loading

0 comments on commit 40cae09

Please sign in to comment.