Skip to content

Commit d49025c

Browse files
authored
Run rustfmt on stable, delete rustfmt.toml (#619)
This commit switches CI to running `rustfmt` on the stable compiler (as rustfmt is stable now!). Additionally it deletes `rustfmt.toml` to ensure we're following the same style as the rest of the ecosystem.
1 parent e3cdea8 commit d49025c

File tree

34 files changed

+920
-1502
lines changed

34 files changed

+920
-1502
lines changed

.travis.yml

Lines changed: 2 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -96,6 +96,7 @@ matrix:
9696
- name: "rustfmt"
9797
install: rustup component add rustfmt-preview
9898
script: cargo fmt --all -- --check
99+
rust: stable
99100
- name: "clippy"
100101
install: true
101102
script: |
@@ -113,7 +114,7 @@ matrix:
113114
- env: TARGET=i686-pc-windows-gnu
114115

115116
install: rustup target add $TARGET
116-
script:
117+
script:
117118
- cargo generate-lockfile
118119
- ci/run-docker.sh $TARGET $FEATURES
119120

coresimd/aarch64/crc.rs

Lines changed: 0 additions & 1 deletion
Original file line numberDiff line numberDiff line change
@@ -1,4 +1,3 @@
1-
21
extern "C" {
32
#[link_name = "llvm.aarch64.crc32b"]
43
fn crc32b_(crc: u32, data: u32) -> u32;

coresimd/aarch64/crypto.rs

Lines changed: 34 additions & 78 deletions
Original file line numberDiff line numberDiff line change
@@ -15,38 +15,24 @@ extern "C" {
1515
#[link_name = "llvm.aarch64.crypto.sha1h"]
1616
fn vsha1h_u32_(hash_e: u32) -> u32;
1717
#[link_name = "llvm.aarch64.crypto.sha1su0"]
18-
fn vsha1su0q_u32_(
19-
w0_3: uint32x4_t, w4_7: uint32x4_t, w8_11: uint32x4_t,
20-
) -> uint32x4_t;
18+
fn vsha1su0q_u32_(w0_3: uint32x4_t, w4_7: uint32x4_t, w8_11: uint32x4_t) -> uint32x4_t;
2119
#[link_name = "llvm.aarch64.crypto.sha1su1"]
2220
fn vsha1su1q_u32_(tw0_3: uint32x4_t, w12_15: uint32x4_t) -> uint32x4_t;
2321
#[link_name = "llvm.aarch64.crypto.sha1c"]
24-
fn vsha1cq_u32_(
25-
hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t,
26-
) -> uint32x4_t;
22+
fn vsha1cq_u32_(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t;
2723
#[link_name = "llvm.aarch64.crypto.sha1p"]
28-
fn vsha1pq_u32_(
29-
hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t,
30-
) -> uint32x4_t;
24+
fn vsha1pq_u32_(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t;
3125
#[link_name = "llvm.aarch64.crypto.sha1m"]
32-
fn vsha1mq_u32_(
33-
hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t,
34-
) -> uint32x4_t;
26+
fn vsha1mq_u32_(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t;
3527

3628
#[link_name = "llvm.aarch64.crypto.sha256h"]
37-
fn vsha256hq_u32_(
38-
hash_abcd: uint32x4_t, hash_efgh: uint32x4_t, wk: uint32x4_t,
39-
) -> uint32x4_t;
29+
fn vsha256hq_u32_(hash_abcd: uint32x4_t, hash_efgh: uint32x4_t, wk: uint32x4_t) -> uint32x4_t;
4030
#[link_name = "llvm.aarch64.crypto.sha256h2"]
41-
fn vsha256h2q_u32_(
42-
hash_efgh: uint32x4_t, hash_abcd: uint32x4_t, wk: uint32x4_t,
43-
) -> uint32x4_t;
31+
fn vsha256h2q_u32_(hash_efgh: uint32x4_t, hash_abcd: uint32x4_t, wk: uint32x4_t) -> uint32x4_t;
4432
#[link_name = "llvm.aarch64.crypto.sha256su0"]
4533
fn vsha256su0q_u32_(w0_3: uint32x4_t, w4_7: uint32x4_t) -> uint32x4_t;
4634
#[link_name = "llvm.aarch64.crypto.sha256su1"]
47-
fn vsha256su1q_u32_(
48-
tw0_3: uint32x4_t, w8_11: uint32x4_t, w12_15: uint32x4_t,
49-
) -> uint32x4_t;
35+
fn vsha256su1q_u32_(tw0_3: uint32x4_t, w8_11: uint32x4_t, w12_15: uint32x4_t) -> uint32x4_t;
5036
}
5137

5238
#[cfg(test)]
@@ -96,49 +82,39 @@ pub unsafe fn vsha1h_u32(hash_e: u32) -> u32 {
9682
#[inline]
9783
#[target_feature(enable = "crypto")]
9884
#[cfg_attr(test, assert_instr(sha1c))]
99-
pub unsafe fn vsha1cq_u32(
100-
hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t,
101-
) -> uint32x4_t {
85+
pub unsafe fn vsha1cq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t {
10286
vsha1cq_u32_(hash_abcd, hash_e, wk)
10387
}
10488

10589
/// SHA1 hash update accelerator, majority.
10690
#[inline]
10791
#[target_feature(enable = "crypto")]
10892
#[cfg_attr(test, assert_instr(sha1m))]
109-
pub unsafe fn vsha1mq_u32(
110-
hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t,
111-
) -> uint32x4_t {
93+
pub unsafe fn vsha1mq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t {
11294
vsha1mq_u32_(hash_abcd, hash_e, wk)
11395
}
11496

11597
/// SHA1 hash update accelerator, parity.
11698
#[inline]
11799
#[target_feature(enable = "crypto")]
118100
#[cfg_attr(test, assert_instr(sha1p))]
119-
pub unsafe fn vsha1pq_u32(
120-
hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t,
121-
) -> uint32x4_t {
101+
pub unsafe fn vsha1pq_u32(hash_abcd: uint32x4_t, hash_e: u32, wk: uint32x4_t) -> uint32x4_t {
122102
vsha1pq_u32_(hash_abcd, hash_e, wk)
123103
}
124104

125105
/// SHA1 schedule update accelerator, first part.
126106
#[inline]
127107
#[target_feature(enable = "crypto")]
128108
#[cfg_attr(test, assert_instr(sha1su0))]
129-
pub unsafe fn vsha1su0q_u32(
130-
w0_3: uint32x4_t, w4_7: uint32x4_t, w8_11: uint32x4_t,
131-
) -> uint32x4_t {
109+
pub unsafe fn vsha1su0q_u32(w0_3: uint32x4_t, w4_7: uint32x4_t, w8_11: uint32x4_t) -> uint32x4_t {
132110
vsha1su0q_u32_(w0_3, w4_7, w8_11)
133111
}
134112

135113
/// SHA1 schedule update accelerator, second part.
136114
#[inline]
137115
#[target_feature(enable = "crypto")]
138116
#[cfg_attr(test, assert_instr(sha1su1))]
139-
pub unsafe fn vsha1su1q_u32(
140-
tw0_3: uint32x4_t, w12_15: uint32x4_t,
141-
) -> uint32x4_t {
117+
pub unsafe fn vsha1su1q_u32(tw0_3: uint32x4_t, w12_15: uint32x4_t) -> uint32x4_t {
142118
vsha1su1q_u32_(tw0_3, w12_15)
143119
}
144120

@@ -147,7 +123,9 @@ pub unsafe fn vsha1su1q_u32(
147123
#[target_feature(enable = "crypto")]
148124
#[cfg_attr(test, assert_instr(sha256h))]
149125
pub unsafe fn vsha256hq_u32(
150-
hash_abcd: uint32x4_t, hash_efgh: uint32x4_t, wk: uint32x4_t,
126+
hash_abcd: uint32x4_t,
127+
hash_efgh: uint32x4_t,
128+
wk: uint32x4_t,
151129
) -> uint32x4_t {
152130
vsha256hq_u32_(hash_abcd, hash_efgh, wk)
153131
}
@@ -157,7 +135,9 @@ pub unsafe fn vsha256hq_u32(
157135
#[target_feature(enable = "crypto")]
158136
#[cfg_attr(test, assert_instr(sha256h2))]
159137
pub unsafe fn vsha256h2q_u32(
160-
hash_efgh: uint32x4_t, hash_abcd: uint32x4_t, wk: uint32x4_t,
138+
hash_efgh: uint32x4_t,
139+
hash_abcd: uint32x4_t,
140+
wk: uint32x4_t,
161141
) -> uint32x4_t {
162142
vsha256h2q_u32_(hash_efgh, hash_abcd, wk)
163143
}
@@ -166,9 +146,7 @@ pub unsafe fn vsha256h2q_u32(
166146
#[inline]
167147
#[target_feature(enable = "crypto")]
168148
#[cfg_attr(test, assert_instr(sha256su0))]
169-
pub unsafe fn vsha256su0q_u32(
170-
w0_3: uint32x4_t, w4_7: uint32x4_t,
171-
) -> uint32x4_t {
149+
pub unsafe fn vsha256su0q_u32(w0_3: uint32x4_t, w4_7: uint32x4_t) -> uint32x4_t {
172150
vsha256su0q_u32_(w0_3, w4_7)
173151
}
174152

@@ -177,7 +155,9 @@ pub unsafe fn vsha256su0q_u32(
177155
#[target_feature(enable = "crypto")]
178156
#[cfg_attr(test, assert_instr(sha256su1))]
179157
pub unsafe fn vsha256su1q_u32(
180-
tw0_3: uint32x4_t, w8_11: uint32x4_t, w12_15: uint32x4_t,
158+
tw0_3: uint32x4_t,
159+
w8_11: uint32x4_t,
160+
w12_15: uint32x4_t,
181161
) -> uint32x4_t {
182162
vsha256su1q_u32_(tw0_3, w8_11, w12_15)
183163
}
@@ -191,44 +171,31 @@ mod tests {
191171

192172
#[simd_test(enable = "crypto")]
193173
unsafe fn test_vaeseq_u8() {
194-
let data = ::mem::transmute(u8x16::new(
195-
1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8,
196-
));
197-
let key = ::mem::transmute(u8x16::new(
198-
0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7,
199-
));
174+
let data = ::mem::transmute(u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8));
175+
let key = ::mem::transmute(u8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7));
200176
let r: u8x16 = ::mem::transmute(vaeseq_u8(data, key));
201177
assert_eq!(
202178
r,
203179
u8x16::new(
204-
124, 123, 124, 118, 124, 123, 124, 197, 124, 123, 124, 118,
205-
124, 123, 124, 197
180+
124, 123, 124, 118, 124, 123, 124, 197, 124, 123, 124, 118, 124, 123, 124, 197
206181
)
207182
);
208183
}
209184

210185
#[simd_test(enable = "crypto")]
211186
unsafe fn test_vaesdq_u8() {
212-
let data = ::mem::transmute(u8x16::new(
213-
1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8,
214-
));
215-
let key = ::mem::transmute(u8x16::new(
216-
0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7,
217-
));
187+
let data = ::mem::transmute(u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8));
188+
let key = ::mem::transmute(u8x16::new(0, 1, 2, 3, 4, 5, 6, 7, 0, 1, 2, 3, 4, 5, 6, 7));
218189
let r: u8x16 = ::mem::transmute(vaesdq_u8(data, key));
219190
assert_eq!(
220191
r,
221-
u8x16::new(
222-
9, 213, 9, 251, 9, 213, 9, 56, 9, 213, 9, 251, 9, 213, 9, 56
223-
)
192+
u8x16::new(9, 213, 9, 251, 9, 213, 9, 56, 9, 213, 9, 251, 9, 213, 9, 56)
224193
);
225194
}
226195

227196
#[simd_test(enable = "crypto")]
228197
unsafe fn test_vaesmcq_u8() {
229-
let data = ::mem::transmute(u8x16::new(
230-
1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8,
231-
));
198+
let data = ::mem::transmute(u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8));
232199
let r: u8x16 = ::mem::transmute(vaesmcq_u8(data));
233200
assert_eq!(
234201
r,
@@ -238,16 +205,11 @@ mod tests {
238205

239206
#[simd_test(enable = "crypto")]
240207
unsafe fn test_vaesimcq_u8() {
241-
let data = ::mem::transmute(u8x16::new(
242-
1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8,
243-
));
208+
let data = ::mem::transmute(u8x16::new(1, 2, 3, 4, 5, 6, 7, 8, 1, 2, 3, 4, 5, 6, 7, 8));
244209
let r: u8x16 = ::mem::transmute(vaesimcq_u8(data));
245210
assert_eq!(
246211
r,
247-
u8x16::new(
248-
43, 60, 33, 50, 103, 80, 125, 70, 43, 60, 33, 50, 103, 80,
249-
125, 70
250-
)
212+
u8x16::new(43, 60, 33, 50, 103, 80, 125, 70, 43, 60, 33, 50, 103, 80, 125, 70)
251213
);
252214
}
253215

@@ -260,15 +222,9 @@ mod tests {
260222
#[simd_test(enable = "crypto")]
261223
unsafe fn test_vsha1su0q_u32() {
262224
let r: u32x4 = ::mem::transmute(vsha1su0q_u32(
263-
::mem::transmute(u32x4::new(
264-
0x1234_u32, 0x5678_u32, 0x9abc_u32, 0xdef0_u32,
265-
)),
266-
::mem::transmute(u32x4::new(
267-
0x1234_u32, 0x5678_u32, 0x9abc_u32, 0xdef0_u32,
268-
)),
269-
::mem::transmute(u32x4::new(
270-
0x1234_u32, 0x5678_u32, 0x9abc_u32, 0xdef0_u32,
271-
)),
225+
::mem::transmute(u32x4::new(0x1234_u32, 0x5678_u32, 0x9abc_u32, 0xdef0_u32)),
226+
::mem::transmute(u32x4::new(0x1234_u32, 0x5678_u32, 0x9abc_u32, 0xdef0_u32)),
227+
::mem::transmute(u32x4::new(0x1234_u32, 0x5678_u32, 0x9abc_u32, 0xdef0_u32)),
272228
));
273229
assert_eq!(r, u32x4::new(0x9abc, 0xdef0, 0x1234, 0x5678));
274230
}

0 commit comments

Comments
 (0)