1
1
use alloc:: collections:: VecDeque ;
2
- use core:: { cmp, hint :: unreachable_unchecked , mem :: MaybeUninit , slice } ;
2
+ use core:: cmp;
3
3
4
4
pub struct RingBuffer {
5
- buf : VecDeque < MaybeUninit < u8 > > ,
5
+ buf : VecDeque < u8 > ,
6
6
}
7
7
8
8
impl RingBuffer {
@@ -24,12 +24,10 @@ impl RingBuffer {
24
24
}
25
25
26
26
/// Return the amount of available space (in bytes) of the buffer.
27
+ #[ cfg( test) ]
27
28
pub fn free ( & self ) -> usize {
28
29
let len = self . buf . len ( ) ;
29
30
let capacity = self . buf . capacity ( ) ;
30
- if len > capacity {
31
- unsafe { unreachable_unchecked ( ) }
32
- }
33
31
34
32
capacity - len
35
33
}
@@ -46,41 +44,23 @@ impl RingBuffer {
46
44
47
45
/// Ensure that there's space for `amount` elements in the buffer.
48
46
pub fn reserve ( & mut self , additional : usize ) {
49
- if self . free ( ) < additional {
50
- self . reserve_amortized ( additional) ;
51
- }
52
-
53
- if self . free ( ) < additional {
54
- unsafe { unreachable_unchecked ( ) }
55
- }
56
- }
57
-
58
- #[ inline( never) ]
59
- #[ cold]
60
- fn reserve_amortized ( & mut self , additional : usize ) {
61
47
self . buf . reserve ( additional) ;
62
48
}
63
49
64
50
#[ allow( dead_code) ]
65
51
pub fn push_back ( & mut self , byte : u8 ) {
66
- self . reserve ( 1 ) ;
67
- self . buf . push_back ( MaybeUninit :: new ( byte) ) ;
52
+ self . buf . push_back ( byte) ;
68
53
}
69
54
70
55
/// Fetch the byte stored at the selected index from the buffer, returning it, or
71
56
/// `None` if the index is out of bounds.
72
57
#[ allow( dead_code) ]
73
58
pub fn get ( & self , idx : usize ) -> Option < u8 > {
74
- self . buf
75
- . get ( idx)
76
- . map ( |& byte| unsafe { MaybeUninit :: assume_init ( byte) } )
59
+ self . buf . get ( idx) . copied ( )
77
60
}
78
61
79
62
/// Append the provided data to the end of `self`.
80
63
pub fn extend ( & mut self , data : & [ u8 ] ) {
81
- let len = data. len ( ) ;
82
- let data = data. as_ptr ( ) . cast :: < MaybeUninit < u8 > > ( ) ;
83
- let data = unsafe { slice:: from_raw_parts ( data, len) } ;
84
64
self . buf . extend ( data) ;
85
65
}
86
66
@@ -94,16 +74,12 @@ impl RingBuffer {
94
74
95
75
/// Return references to each part of the ring buffer.
96
76
pub fn as_slices ( & self ) -> ( & [ u8 ] , & [ u8 ] ) {
97
- let ( a, b) = self . buf . as_slices ( ) ;
98
-
99
- ( unsafe { slice_assume_init_ref_polyfill ( a) } , unsafe {
100
- slice_assume_init_ref_polyfill ( b)
101
- } )
77
+ self . buf . as_slices ( )
102
78
}
103
79
104
80
/// Copies elements from the provided range to the end of the buffer.
105
81
#[ allow( dead_code) ]
106
- pub fn extend_from_within ( & mut self , start : usize , len : usize ) {
82
+ pub fn extend_from_within ( & mut self , mut start : usize , len : usize ) {
107
83
if start + len > self . len ( ) {
108
84
panic ! (
109
85
"Calls to this functions must respect start ({}) + len ({}) <= self.len() ({})!" ,
@@ -113,43 +89,26 @@ impl RingBuffer {
113
89
) ;
114
90
}
115
91
116
- self . reserve ( len ) ;
117
-
118
- // SAFETY: Requirements checked:
119
- // 1. explicitly checked above, resulting in a panic if it does not hold
120
- // 2. explicitly reserved enough memory
121
- unsafe { self . extend_from_within_unchecked ( start , len ) }
122
- }
92
+ // Naive and cheaper implementation (for small lengths)
93
+ if len <= 12 {
94
+ self . reserve ( len ) ;
95
+ for i in 0 ..len {
96
+ let byte = self . get ( start + i ) . unwrap ( ) ;
97
+ self . push_back ( byte ) ;
98
+ }
123
99
124
- /// Copies data from the provided range to the end of the buffer, without
125
- /// first verifying that the unoccupied capacity is available.
126
- ///
127
- /// SAFETY:
128
- /// For this to be safe two requirements need to hold:
129
- /// 1. start + len <= self.len() so we do not copy uninitialised memory
130
- /// 2. More then len reserved space so we do not write out-of-bounds
131
- #[ warn( unsafe_op_in_unsafe_fn) ]
132
- pub unsafe fn extend_from_within_unchecked ( & mut self , mut start : usize , len : usize ) {
133
- debug_assert ! ( start + len <= self . len( ) ) ;
134
- debug_assert ! ( self . free( ) >= len) ;
135
-
136
- if self . free ( ) < len {
137
- unsafe { unreachable_unchecked ( ) }
100
+ return ;
138
101
}
139
102
140
103
let original_len = self . len ( ) ;
141
104
let mut intermediate = {
142
105
IntermediateRingBuffer {
143
106
this : self ,
144
107
original_len,
145
- disarmed : false ,
146
108
}
147
109
} ;
148
110
149
- intermediate
150
- . this
151
- . buf
152
- . extend ( ( 0 ..len) . map ( |_| MaybeUninit :: uninit ( ) ) ) ;
111
+ intermediate. this . buf . extend ( ( 0 ..len) . map ( |_| 0 ) ) ;
153
112
debug_assert_eq ! ( intermediate. this. buf. len( ) , original_len + len) ;
154
113
155
114
let ( a, b, a_spare, b_spare) = intermediate. as_slices_spare_mut ( ) ;
@@ -158,7 +117,7 @@ impl RingBuffer {
158
117
let skip = cmp:: min ( a. len ( ) , start) ;
159
118
start -= skip;
160
119
let a = & a[ skip..] ;
161
- let b = unsafe { b . get_unchecked ( start..) } ;
120
+ let b = & b [ start..] ;
162
121
163
122
let mut remaining_copy_len = len;
164
123
@@ -168,7 +127,6 @@ impl RingBuffer {
168
127
remaining_copy_len -= copy_at_least;
169
128
170
129
if remaining_copy_len == 0 {
171
- intermediate. disarmed = true ;
172
130
return ;
173
131
}
174
132
@@ -181,7 +139,6 @@ impl RingBuffer {
181
139
remaining_copy_len -= copy_at_least;
182
140
183
141
if remaining_copy_len == 0 {
184
- intermediate. disarmed = true ;
185
142
return ;
186
143
}
187
144
@@ -193,7 +150,6 @@ impl RingBuffer {
193
150
remaining_copy_len -= copy_at_least;
194
151
195
152
if remaining_copy_len == 0 {
196
- intermediate. disarmed = true ;
197
153
return ;
198
154
}
199
155
@@ -205,22 +161,17 @@ impl RingBuffer {
205
161
remaining_copy_len -= copy_at_least;
206
162
207
163
debug_assert_eq ! ( remaining_copy_len, 0 ) ;
208
-
209
- intermediate. disarmed = true ;
210
164
}
211
165
}
212
166
213
167
struct IntermediateRingBuffer < ' a > {
214
168
this : & ' a mut RingBuffer ,
215
169
original_len : usize ,
216
- disarmed : bool ,
217
170
}
218
171
219
172
impl < ' a > IntermediateRingBuffer < ' a > {
220
173
// inspired by `Vec::split_at_spare_mut`
221
- fn as_slices_spare_mut (
222
- & mut self ,
223
- ) -> ( & [ u8 ] , & [ u8 ] , & mut [ MaybeUninit < u8 > ] , & mut [ MaybeUninit < u8 > ] ) {
174
+ fn as_slices_spare_mut ( & mut self ) -> ( & [ u8 ] , & [ u8 ] , & mut [ u8 ] , & mut [ u8 ] ) {
224
175
let ( a, b) = self . this . buf . as_mut_slices ( ) ;
225
176
debug_assert ! ( a. len( ) + b. len( ) >= self . original_len) ;
226
177
@@ -230,26 +181,11 @@ impl<'a> IntermediateRingBuffer<'a> {
230
181
let b_mid = remaining_init_len;
231
182
debug_assert ! ( b. len( ) >= b_mid) ;
232
183
233
- let ( a, a_spare) = unsafe { a . split_at_mut_unchecked ( a_mid) } ;
234
- let ( b, b_spare) = unsafe { b . split_at_mut_unchecked ( b_mid) } ;
184
+ let ( a, a_spare) = a . split_at_mut ( a_mid) ;
185
+ let ( b, b_spare) = b . split_at_mut ( b_mid) ;
235
186
debug_assert ! ( a_spare. is_empty( ) || b. is_empty( ) ) ;
236
187
237
- (
238
- unsafe { slice_assume_init_ref_polyfill ( a) } ,
239
- unsafe { slice_assume_init_ref_polyfill ( b) } ,
240
- a_spare,
241
- b_spare,
242
- )
243
- }
244
- }
245
-
246
- impl < ' a > Drop for IntermediateRingBuffer < ' a > {
247
- fn drop ( & mut self ) {
248
- if self . disarmed {
249
- return ;
250
- }
251
-
252
- self . this . buf . truncate ( self . original_len ) ;
188
+ ( a, b, a_spare, b_spare)
253
189
}
254
190
}
255
191
@@ -266,48 +202,11 @@ impl<'a> Drop for IntermediateRingBuffer<'a> {
266
202
/// The chunk size is not part of the contract and may change depending on the target platform.
267
203
///
268
204
/// If that isn't possible we just fall back to ptr::copy_nonoverlapping
269
- fn copy_bytes_overshooting ( src : & [ u8 ] , dst : & mut [ MaybeUninit < u8 > ] , copy_at_least : usize ) {
270
- // this assert is required for this function to be safe
271
- // the optimizer should be able to remove it given how the caller
272
- // has somehow to figure out `copy_at_least <= src.len() && copy_at_least <= dst.len()`
273
- assert ! ( src. len( ) >= copy_at_least && dst. len( ) >= copy_at_least) ;
274
-
275
- type CopyType = usize ;
276
-
277
- const COPY_AT_ONCE_SIZE : usize = core:: mem:: size_of :: < CopyType > ( ) ;
278
- let min_buffer_size = usize:: min ( src. len ( ) , dst. len ( ) ) ;
279
-
280
- // this check should be removed by the optimizer thanks to the above assert
281
- // if `src.len() >= copy_at_least && dst.len() >= copy_at_least` then `min_buffer_size >= copy_at_least`
282
- assert ! ( min_buffer_size >= copy_at_least) ;
283
-
284
- // these bounds checks are removed because this is guaranteed:
285
- // `min_buffer_size <= src.len() && min_buffer_size <= dst.len()`
286
- let src = & src[ ..min_buffer_size] ;
287
- let dst = & mut dst[ ..min_buffer_size] ;
288
-
289
- // Can copy in just one read+write, very common case
290
- if min_buffer_size >= COPY_AT_ONCE_SIZE && copy_at_least <= COPY_AT_ONCE_SIZE {
291
- let chunk = unsafe { src. as_ptr ( ) . cast :: < CopyType > ( ) . read_unaligned ( ) } ;
292
- unsafe { dst. as_mut_ptr ( ) . cast :: < CopyType > ( ) . write_unaligned ( chunk) } ;
293
- } else {
294
- unsafe {
295
- dst. as_mut_ptr ( )
296
- . cast :: < u8 > ( )
297
- . copy_from_nonoverlapping ( src. as_ptr ( ) , copy_at_least)
298
- } ;
299
- }
300
-
301
- debug_assert_eq ! ( & src[ ..copy_at_least] , unsafe {
302
- slice_assume_init_ref_polyfill( & dst[ ..copy_at_least] )
303
- } ) ;
304
- }
205
+ fn copy_bytes_overshooting ( src : & [ u8 ] , dst : & mut [ u8 ] , copy_at_least : usize ) {
206
+ let src = & src[ ..copy_at_least] ;
207
+ let dst = & mut dst[ ..copy_at_least] ;
305
208
306
- #[ inline( always) ]
307
- unsafe fn slice_assume_init_ref_polyfill ( slice : & [ MaybeUninit < u8 > ] ) -> & [ u8 ] {
308
- let len = slice. len ( ) ;
309
- let data = slice. as_ptr ( ) . cast :: < u8 > ( ) ;
310
- slice:: from_raw_parts ( data, len)
209
+ dst. copy_from_slice ( src) ;
311
210
}
312
211
313
212
#[ cfg( test) ]
0 commit comments