1
1
//! This is a copy of `core::hash::sip` adapted to providing 128 bit hashes.
2
2
3
+ // This code is very hot and uses lots of arithmetic, avoid overflow checks for performance.
4
+ // See https://github.com/rust-lang/rust/pull/119440#issuecomment-1874255727
5
+ use rustc_serialize:: int_overflow:: { DebugStrictAdd , DebugStrictSub } ;
3
6
use std:: hash:: Hasher ;
4
7
use std:: mem:: { self , MaybeUninit } ;
5
8
use std:: ptr;
@@ -103,19 +106,19 @@ unsafe fn copy_nonoverlapping_small(src: *const u8, dst: *mut u8, count: usize)
103
106
}
104
107
105
108
let mut i = 0 ;
106
- if i + 3 < count {
109
+ if i. debug_strict_add ( 3 ) < count {
107
110
ptr:: copy_nonoverlapping ( src. add ( i) , dst. add ( i) , 4 ) ;
108
- i += 4 ;
111
+ i = i . debug_strict_add ( 4 ) ;
109
112
}
110
113
111
- if i + 1 < count {
114
+ if i. debug_strict_add ( 1 ) < count {
112
115
ptr:: copy_nonoverlapping ( src. add ( i) , dst. add ( i) , 2 ) ;
113
- i += 2
116
+ i = i . debug_strict_add ( 2 )
114
117
}
115
118
116
119
if i < count {
117
120
* dst. add ( i) = * src. add ( i) ;
118
- i += 1 ;
121
+ i = i . debug_strict_add ( 1 ) ;
119
122
}
120
123
121
124
debug_assert_eq ! ( i, count) ;
@@ -211,14 +214,14 @@ impl SipHasher128 {
211
214
debug_assert ! ( nbuf < BUFFER_SIZE ) ;
212
215
debug_assert ! ( nbuf + LEN < BUFFER_WITH_SPILL_SIZE ) ;
213
216
214
- if nbuf + LEN < BUFFER_SIZE {
217
+ if nbuf. debug_strict_add ( LEN ) < BUFFER_SIZE {
215
218
unsafe {
216
219
// The memcpy call is optimized away because the size is known.
217
220
let dst = ( self . buf . as_mut_ptr ( ) as * mut u8 ) . add ( nbuf) ;
218
221
ptr:: copy_nonoverlapping ( bytes. as_ptr ( ) , dst, LEN ) ;
219
222
}
220
223
221
- self . nbuf = nbuf + LEN ;
224
+ self . nbuf = nbuf. debug_strict_add ( LEN ) ;
222
225
223
226
return ;
224
227
}
@@ -265,8 +268,9 @@ impl SipHasher128 {
265
268
// This function should only be called when the write fills the buffer.
266
269
// Therefore, when LEN == 1, the new `self.nbuf` must be zero.
267
270
// LEN is statically known, so the branch is optimized away.
268
- self . nbuf = if LEN == 1 { 0 } else { nbuf + LEN - BUFFER_SIZE } ;
269
- self . processed += BUFFER_SIZE ;
271
+ self . nbuf =
272
+ if LEN == 1 { 0 } else { nbuf. debug_strict_add ( LEN ) . debug_strict_sub ( BUFFER_SIZE ) } ;
273
+ self . processed = self . processed . debug_strict_add ( BUFFER_SIZE ) ;
270
274
}
271
275
}
272
276
@@ -277,7 +281,7 @@ impl SipHasher128 {
277
281
let nbuf = self . nbuf ;
278
282
debug_assert ! ( nbuf < BUFFER_SIZE ) ;
279
283
280
- if nbuf + length < BUFFER_SIZE {
284
+ if nbuf. debug_strict_add ( length) < BUFFER_SIZE {
281
285
unsafe {
282
286
let dst = ( self . buf . as_mut_ptr ( ) as * mut u8 ) . add ( nbuf) ;
283
287
@@ -289,7 +293,7 @@ impl SipHasher128 {
289
293
}
290
294
}
291
295
292
- self . nbuf = nbuf + length;
296
+ self . nbuf = nbuf. debug_strict_add ( length) ;
293
297
294
298
return ;
295
299
}
@@ -315,7 +319,7 @@ impl SipHasher128 {
315
319
// This function should only be called when the write fills the buffer,
316
320
// so we know that there is enough input to fill the current element.
317
321
let valid_in_elem = nbuf % ELEM_SIZE ;
318
- let needed_in_elem = ELEM_SIZE - valid_in_elem;
322
+ let needed_in_elem = ELEM_SIZE . debug_strict_sub ( valid_in_elem) ;
319
323
320
324
let src = msg. as_ptr ( ) ;
321
325
let dst = ( self . buf . as_mut_ptr ( ) as * mut u8 ) . add ( nbuf) ;
@@ -327,7 +331,7 @@ impl SipHasher128 {
327
331
// ELEM_SIZE` to show the compiler that this loop's upper bound is > 0.
328
332
// We know that is true, because last step ensured we have a full
329
333
// element in the buffer.
330
- let last = nbuf / ELEM_SIZE + 1 ;
334
+ let last = ( nbuf / ELEM_SIZE ) . debug_strict_add ( 1 ) ;
331
335
332
336
for i in 0 ..last {
333
337
let elem = self . buf . get_unchecked ( i) . assume_init ( ) . to_le ( ) ;
@@ -338,7 +342,7 @@ impl SipHasher128 {
338
342
339
343
// Process the remaining element-sized chunks of input.
340
344
let mut processed = needed_in_elem;
341
- let input_left = length - processed;
345
+ let input_left = length. debug_strict_sub ( processed) ;
342
346
let elems_left = input_left / ELEM_SIZE ;
343
347
let extra_bytes_left = input_left % ELEM_SIZE ;
344
348
@@ -347,7 +351,7 @@ impl SipHasher128 {
347
351
self . state . v3 ^= elem;
348
352
Sip13Rounds :: c_rounds ( & mut self . state ) ;
349
353
self . state . v0 ^= elem;
350
- processed += ELEM_SIZE ;
354
+ processed = processed . debug_strict_add ( ELEM_SIZE ) ;
351
355
}
352
356
353
357
// Copy remaining input into start of buffer.
@@ -356,7 +360,7 @@ impl SipHasher128 {
356
360
copy_nonoverlapping_small ( src, dst, extra_bytes_left) ;
357
361
358
362
self . nbuf = extra_bytes_left;
359
- self . processed += nbuf + processed;
363
+ self . processed = self . processed . debug_strict_add ( nbuf. debug_strict_add ( processed) ) ;
360
364
}
361
365
}
362
366
@@ -394,7 +398,7 @@ impl SipHasher128 {
394
398
} ;
395
399
396
400
// Finalize the hash.
397
- let length = self . processed + self . nbuf ;
401
+ let length = self . processed . debug_strict_add ( self . nbuf ) ;
398
402
let b: u64 = ( ( length as u64 & 0xff ) << 56 ) | elem;
399
403
400
404
state. v3 ^= b;
0 commit comments