bytes/bytes_mut.rs
1use core::mem::{self, ManuallyDrop, MaybeUninit};
2use core::ops::{Deref, DerefMut};
3use core::ptr::{self, NonNull};
4use core::{cmp, fmt, hash, slice};
5
6use alloc::{
7 borrow::{Borrow, BorrowMut},
8 boxed::Box,
9 string::String,
10 vec,
11 vec::Vec,
12};
13
14use crate::buf::{IntoIter, UninitSlice};
15use crate::bytes::Vtable;
16#[allow(unused)]
17use crate::loom::sync::atomic::AtomicMut;
18use crate::loom::sync::atomic::{AtomicPtr, AtomicUsize, Ordering};
19use crate::{Buf, BufMut, Bytes, TryGetError};
20
21/// A unique reference to a contiguous slice of memory.
22///
23/// `BytesMut` represents a unique view into a potentially shared memory region.
24/// Given the uniqueness guarantee, owners of `BytesMut` handles are able to
25/// mutate the memory.
26///
27/// `BytesMut` can be thought of as containing a `buf: Arc<Vec<u8>>`, an offset
28/// into `buf`, a slice length, and a guarantee that no other `BytesMut` for the
29/// same `buf` overlaps with its slice. That guarantee means that a write lock
30/// is not required.
31///
32/// # Growth
33///
34/// `BytesMut`'s `BufMut` implementation will implicitly grow its buffer as
35/// necessary. However, explicitly reserving the required space up-front before
36/// a series of inserts will be more efficient.
37///
38/// # Examples
39///
40/// ```
41/// use bytes::{BytesMut, BufMut};
42///
43/// let mut buf = BytesMut::with_capacity(64);
44///
45/// buf.put_u8(b'h');
46/// buf.put_u8(b'e');
47/// buf.put(&b"llo"[..]);
48///
49/// assert_eq!(&buf[..], b"hello");
50///
51/// // Freeze the buffer so that it can be shared
52/// let a = buf.freeze();
53///
54/// // This does not allocate, instead `b` points to the same memory.
55/// let b = a.clone();
56///
57/// assert_eq!(&a[..], b"hello");
58/// assert_eq!(&b[..], b"hello");
59/// ```
60pub struct BytesMut {
61 ptr: NonNull<u8>,
62 len: usize,
63 cap: usize,
64 data: *mut Shared,
65}
66
67// Thread-safe reference-counted container for the shared storage. This mostly
68// the same as `core::sync::Arc` but without the weak counter. The ref counting
69// fns are based on the ones found in `std`.
70//
71// The main reason to use `Shared` instead of `core::sync::Arc` is that it ends
72// up making the overall code simpler and easier to reason about. This is due to
73// some of the logic around setting `Inner::arc` and other ways the `arc` field
74// is used. Using `Arc` ended up requiring a number of funky transmutes and
75// other shenanigans to make it work.
76struct Shared {
77 vec: Vec<u8>,
78 original_capacity_repr: usize,
79 ref_count: AtomicUsize,
80}
81
82// Assert that the alignment of `Shared` is divisible by 2.
83// This is a necessary invariant since we depend on allocating `Shared` a
84// shared object to implicitly carry the `KIND_ARC` flag in its pointer.
85// This flag is set when the LSB is 0.
86const _: [(); 0 - mem::align_of::<Shared>() % 2] = []; // Assert that the alignment of `Shared` is divisible by 2.
87
88// Buffer storage strategy flags.
89const KIND_ARC: usize = 0b0;
90const KIND_VEC: usize = 0b1;
91const KIND_MASK: usize = 0b1;
92
93// The max original capacity value. Any `Bytes` allocated with a greater initial
94// capacity will default to this.
95const MAX_ORIGINAL_CAPACITY_WIDTH: usize = 17;
96// The original capacity algorithm will not take effect unless the originally
97// allocated capacity was at least 1kb in size.
98const MIN_ORIGINAL_CAPACITY_WIDTH: usize = 10;
99// The original capacity is stored in powers of 2 starting at 1kb to a max of
100// 64kb. Representing it as such requires only 3 bits of storage.
101const ORIGINAL_CAPACITY_MASK: usize = 0b11100;
102const ORIGINAL_CAPACITY_OFFSET: usize = 2;
103
104const VEC_POS_OFFSET: usize = 5;
105// When the storage is in the `Vec` representation, the pointer can be advanced
106// at most this value. This is due to the amount of storage available to track
107// the offset is usize - number of KIND bits and number of ORIGINAL_CAPACITY
108// bits.
109const MAX_VEC_POS: usize = usize::MAX >> VEC_POS_OFFSET;
110const NOT_VEC_POS_MASK: usize = 0b11111;
111
112#[cfg(target_pointer_width = "64")]
113const PTR_WIDTH: usize = 64;
114#[cfg(target_pointer_width = "32")]
115const PTR_WIDTH: usize = 32;
116
117/*
118 *
119 * ===== BytesMut =====
120 *
121 */
122
123impl BytesMut {
124 /// Creates a new `BytesMut` with the specified capacity.
125 ///
126 /// The returned `BytesMut` will be able to hold at least `capacity` bytes
127 /// without reallocating.
128 ///
129 /// It is important to note that this function does not specify the length
130 /// of the returned `BytesMut`, but only the capacity.
131 ///
132 /// # Examples
133 ///
134 /// ```
135 /// use bytes::{BytesMut, BufMut};
136 ///
137 /// let mut bytes = BytesMut::with_capacity(64);
138 ///
139 /// // `bytes` contains no data, even though there is capacity
140 /// assert_eq!(bytes.len(), 0);
141 ///
142 /// bytes.put(&b"hello world"[..]);
143 ///
144 /// assert_eq!(&bytes[..], b"hello world");
145 /// ```
146 #[inline]
147 pub fn with_capacity(capacity: usize) -> BytesMut {
148 BytesMut::from_vec(Vec::with_capacity(capacity))
149 }
150
151 /// Creates a new `BytesMut` with default capacity.
152 ///
153 /// Resulting object has length 0 and unspecified capacity.
154 /// This function does not allocate.
155 ///
156 /// # Examples
157 ///
158 /// ```
159 /// use bytes::{BytesMut, BufMut};
160 ///
161 /// let mut bytes = BytesMut::new();
162 ///
163 /// assert_eq!(0, bytes.len());
164 ///
165 /// bytes.reserve(2);
166 /// bytes.put_slice(b"xy");
167 ///
168 /// assert_eq!(&b"xy"[..], &bytes[..]);
169 /// ```
170 #[inline]
171 pub fn new() -> BytesMut {
172 BytesMut::with_capacity(0)
173 }
174
175 /// Returns the number of bytes contained in this `BytesMut`.
176 ///
177 /// # Examples
178 ///
179 /// ```
180 /// use bytes::BytesMut;
181 ///
182 /// let b = BytesMut::from(&b"hello"[..]);
183 /// assert_eq!(b.len(), 5);
184 /// ```
185 #[inline]
186 pub fn len(&self) -> usize {
187 self.len
188 }
189
190 /// Returns true if the `BytesMut` has a length of 0.
191 ///
192 /// # Examples
193 ///
194 /// ```
195 /// use bytes::BytesMut;
196 ///
197 /// let b = BytesMut::with_capacity(64);
198 /// assert!(b.is_empty());
199 /// ```
200 #[inline]
201 pub fn is_empty(&self) -> bool {
202 self.len == 0
203 }
204
205 /// Returns the number of bytes the `BytesMut` can hold without reallocating.
206 ///
207 /// # Examples
208 ///
209 /// ```
210 /// use bytes::BytesMut;
211 ///
212 /// let b = BytesMut::with_capacity(64);
213 /// assert_eq!(b.capacity(), 64);
214 /// ```
215 #[inline]
216 pub fn capacity(&self) -> usize {
217 self.cap
218 }
219
220 /// Converts `self` into an immutable `Bytes`.
221 ///
222 /// The conversion is zero cost and is used to indicate that the slice
223 /// referenced by the handle will no longer be mutated. Once the conversion
224 /// is done, the handle can be cloned and shared across threads.
225 ///
226 /// # Examples
227 ///
228 /// ```ignore-wasm
229 /// use bytes::{BytesMut, BufMut};
230 /// use std::thread;
231 ///
232 /// let mut b = BytesMut::with_capacity(64);
233 /// b.put(&b"hello world"[..]);
234 /// let b1 = b.freeze();
235 /// let b2 = b1.clone();
236 ///
237 /// let th = thread::spawn(move || {
238 /// assert_eq!(&b1[..], b"hello world");
239 /// });
240 ///
241 /// assert_eq!(&b2[..], b"hello world");
242 /// th.join().unwrap();
243 /// ```
244 #[inline]
245 pub fn freeze(self) -> Bytes {
246 let bytes = ManuallyDrop::new(self);
247 if bytes.kind() == KIND_VEC {
248 // Just re-use `Bytes` internal Vec vtable
249 unsafe {
250 let off = bytes.get_vec_pos();
251 let vec = rebuild_vec(bytes.ptr.as_ptr(), bytes.len, bytes.cap, off);
252 let mut b: Bytes = vec.into();
253 b.advance(off);
254 b
255 }
256 } else {
257 debug_assert_eq!(bytes.kind(), KIND_ARC);
258
259 let ptr = bytes.ptr.as_ptr();
260 let len = bytes.len;
261 let data = AtomicPtr::new(bytes.data.cast());
262 unsafe { Bytes::with_vtable(ptr, len, data, &SHARED_VTABLE) }
263 }
264 }
265
266 /// Creates a new `BytesMut` containing `len` zeros.
267 ///
268 /// The resulting object has a length of `len` and a capacity greater
269 /// than or equal to `len`. The entire length of the object will be filled
270 /// with zeros.
271 ///
272 /// On some platforms or allocators this function may be faster than
273 /// a manual implementation.
274 ///
275 /// # Examples
276 ///
277 /// ```
278 /// use bytes::BytesMut;
279 ///
280 /// let zeros = BytesMut::zeroed(42);
281 ///
282 /// assert!(zeros.capacity() >= 42);
283 /// assert_eq!(zeros.len(), 42);
284 /// zeros.into_iter().for_each(|x| assert_eq!(x, 0));
285 /// ```
286 pub fn zeroed(len: usize) -> BytesMut {
287 BytesMut::from_vec(vec![0; len])
288 }
289
290 /// Splits the bytes into two at the given index.
291 ///
292 /// Afterwards `self` contains elements `[0, at)`, and the returned
293 /// `BytesMut` contains elements `[at, capacity)`. It's guaranteed that the
294 /// memory does not move, that is, the address of `self` does not change,
295 /// and the address of the returned slice is `at` bytes after that.
296 ///
297 /// This is an `O(1)` operation that just increases the reference count
298 /// and sets a few indices.
299 ///
300 /// # Examples
301 ///
302 /// ```
303 /// use bytes::BytesMut;
304 ///
305 /// let mut a = BytesMut::from(&b"hello world"[..]);
306 /// let mut b = a.split_off(5);
307 ///
308 /// a[0] = b'j';
309 /// b[0] = b'!';
310 ///
311 /// assert_eq!(&a[..], b"jello");
312 /// assert_eq!(&b[..], b"!world");
313 /// ```
314 ///
315 /// # Panics
316 ///
317 /// Panics if `at > capacity`.
318 #[must_use = "consider BytesMut::truncate if you don't need the other half"]
319 pub fn split_off(&mut self, at: usize) -> BytesMut {
320 assert!(
321 at <= self.capacity(),
322 "split_off out of bounds: {:?} <= {:?}",
323 at,
324 self.capacity(),
325 );
326 unsafe {
327 let mut other = self.shallow_clone();
328 // SAFETY: We've checked that `at` <= `self.capacity()` above.
329 other.advance_unchecked(at);
330 self.cap = at;
331 self.len = cmp::min(self.len, at);
332 other
333 }
334 }
335
336 /// Removes the bytes from the current view, returning them in a new
337 /// `BytesMut` handle.
338 ///
339 /// Afterwards, `self` will be empty, but will retain any additional
340 /// capacity that it had before the operation. This is identical to
341 /// `self.split_to(self.len())`.
342 ///
343 /// This is an `O(1)` operation that just increases the reference count and
344 /// sets a few indices.
345 ///
346 /// # Examples
347 ///
348 /// ```
349 /// use bytes::{BytesMut, BufMut};
350 ///
351 /// let mut buf = BytesMut::with_capacity(1024);
352 /// buf.put(&b"hello world"[..]);
353 ///
354 /// let other = buf.split();
355 ///
356 /// assert!(buf.is_empty());
357 /// assert_eq!(1013, buf.capacity());
358 ///
359 /// assert_eq!(other, b"hello world"[..]);
360 /// ```
361 #[must_use = "consider BytesMut::clear if you don't need the other half"]
362 pub fn split(&mut self) -> BytesMut {
363 let len = self.len();
364 self.split_to(len)
365 }
366
367 /// Splits the buffer into two at the given index.
368 ///
369 /// Afterwards `self` contains elements `[at, len)`, and the returned `BytesMut`
370 /// contains elements `[0, at)`.
371 ///
372 /// This is an `O(1)` operation that just increases the reference count and
373 /// sets a few indices.
374 ///
375 /// # Examples
376 ///
377 /// ```
378 /// use bytes::BytesMut;
379 ///
380 /// let mut a = BytesMut::from(&b"hello world"[..]);
381 /// let mut b = a.split_to(5);
382 ///
383 /// a[0] = b'!';
384 /// b[0] = b'j';
385 ///
386 /// assert_eq!(&a[..], b"!world");
387 /// assert_eq!(&b[..], b"jello");
388 /// ```
389 ///
390 /// # Panics
391 ///
392 /// Panics if `at > len`.
393 #[must_use = "consider BytesMut::advance if you don't need the other half"]
394 pub fn split_to(&mut self, at: usize) -> BytesMut {
395 assert!(
396 at <= self.len(),
397 "split_to out of bounds: {:?} <= {:?}",
398 at,
399 self.len(),
400 );
401
402 unsafe {
403 let mut other = self.shallow_clone();
404 // SAFETY: We've checked that `at` <= `self.len()` and we know that `self.len()` <=
405 // `self.capacity()`.
406 self.advance_unchecked(at);
407 other.cap = at;
408 other.len = at;
409 other
410 }
411 }
412
413 /// Shortens the buffer, keeping the first `len` bytes and dropping the
414 /// rest.
415 ///
416 /// If `len` is greater than the buffer's current length, this has no
417 /// effect.
418 ///
419 /// Existing underlying capacity is preserved.
420 ///
421 /// The [split_off](`Self::split_off()`) method can emulate `truncate`, but this causes the
422 /// excess bytes to be returned instead of dropped.
423 ///
424 /// # Examples
425 ///
426 /// ```
427 /// use bytes::BytesMut;
428 ///
429 /// let mut buf = BytesMut::from(&b"hello world"[..]);
430 /// buf.truncate(5);
431 /// assert_eq!(buf, b"hello"[..]);
432 /// ```
433 pub fn truncate(&mut self, len: usize) {
434 if len <= self.len() {
435 // SAFETY: Shrinking the buffer cannot expose uninitialized bytes.
436 unsafe { self.set_len(len) };
437 }
438 }
439
440 /// Clears the buffer, removing all data. Existing capacity is preserved.
441 ///
442 /// # Examples
443 ///
444 /// ```
445 /// use bytes::BytesMut;
446 ///
447 /// let mut buf = BytesMut::from(&b"hello world"[..]);
448 /// buf.clear();
449 /// assert!(buf.is_empty());
450 /// ```
451 pub fn clear(&mut self) {
452 // SAFETY: Setting the length to zero cannot expose uninitialized bytes.
453 unsafe { self.set_len(0) };
454 }
455
456 /// Resizes the buffer so that `len` is equal to `new_len`.
457 ///
458 /// If `new_len` is greater than `len`, the buffer is extended by the
459 /// difference with each additional byte set to `value`. If `new_len` is
460 /// less than `len`, the buffer is simply truncated.
461 ///
462 /// # Examples
463 ///
464 /// ```
465 /// use bytes::BytesMut;
466 ///
467 /// let mut buf = BytesMut::new();
468 ///
469 /// buf.resize(3, 0x1);
470 /// assert_eq!(&buf[..], &[0x1, 0x1, 0x1]);
471 ///
472 /// buf.resize(2, 0x2);
473 /// assert_eq!(&buf[..], &[0x1, 0x1]);
474 ///
475 /// buf.resize(4, 0x3);
476 /// assert_eq!(&buf[..], &[0x1, 0x1, 0x3, 0x3]);
477 /// ```
478 pub fn resize(&mut self, new_len: usize, value: u8) {
479 let additional = if let Some(additional) = new_len.checked_sub(self.len()) {
480 additional
481 } else {
482 self.truncate(new_len);
483 return;
484 };
485
486 if additional == 0 {
487 return;
488 }
489
490 self.reserve(additional);
491 let dst = self.spare_capacity_mut().as_mut_ptr();
492 // SAFETY: `spare_capacity_mut` returns a valid, properly aligned pointer and we've
493 // reserved enough space to write `additional` bytes.
494 unsafe { ptr::write_bytes(dst, value, additional) };
495
496 // SAFETY: There are at least `new_len` initialized bytes in the buffer so no
497 // uninitialized bytes are being exposed.
498 unsafe { self.set_len(new_len) };
499 }
500
501 /// Sets the length of the buffer.
502 ///
503 /// This will explicitly set the size of the buffer without actually
504 /// modifying the data, so it is up to the caller to ensure that the data
505 /// has been initialized.
506 ///
507 /// # Examples
508 ///
509 /// ```
510 /// use bytes::BytesMut;
511 ///
512 /// let mut b = BytesMut::from(&b"hello world"[..]);
513 ///
514 /// unsafe {
515 /// b.set_len(5);
516 /// }
517 ///
518 /// assert_eq!(&b[..], b"hello");
519 ///
520 /// unsafe {
521 /// b.set_len(11);
522 /// }
523 ///
524 /// assert_eq!(&b[..], b"hello world");
525 /// ```
526 #[inline]
527 pub unsafe fn set_len(&mut self, len: usize) {
528 debug_assert!(len <= self.cap, "set_len out of bounds");
529 self.len = len;
530 }
531
532 /// Reserves capacity for at least `additional` more bytes to be inserted
533 /// into the given `BytesMut`.
534 ///
535 /// More than `additional` bytes may be reserved in order to avoid frequent
536 /// reallocations. A call to `reserve` may result in an allocation.
537 ///
538 /// Before allocating new buffer space, the function will attempt to reclaim
539 /// space in the existing buffer. If the current handle references a view
540 /// into a larger original buffer, and all other handles referencing part
541 /// of the same original buffer have been dropped, then the current view
542 /// can be copied/shifted to the front of the buffer and the handle can take
543 /// ownership of the full buffer, provided that the full buffer is large
544 /// enough to fit the requested additional capacity.
545 ///
546 /// This optimization will only happen if shifting the data from the current
547 /// view to the front of the buffer is not too expensive in terms of the
548 /// (amortized) time required. The precise condition is subject to change;
549 /// as of now, the length of the data being shifted needs to be at least as
550 /// large as the distance that it's shifted by. If the current view is empty
551 /// and the original buffer is large enough to fit the requested additional
552 /// capacity, then reallocations will never happen.
553 ///
554 /// # Examples
555 ///
556 /// In the following example, a new buffer is allocated.
557 ///
558 /// ```
559 /// use bytes::BytesMut;
560 ///
561 /// let mut buf = BytesMut::from(&b"hello"[..]);
562 /// buf.reserve(64);
563 /// assert!(buf.capacity() >= 69);
564 /// ```
565 ///
566 /// In the following example, the existing buffer is reclaimed.
567 ///
568 /// ```
569 /// use bytes::{BytesMut, BufMut};
570 ///
571 /// let mut buf = BytesMut::with_capacity(128);
572 /// buf.put(&[0; 64][..]);
573 ///
574 /// let ptr = buf.as_ptr();
575 /// let other = buf.split();
576 ///
577 /// assert!(buf.is_empty());
578 /// assert_eq!(buf.capacity(), 64);
579 ///
580 /// drop(other);
581 /// buf.reserve(128);
582 ///
583 /// assert_eq!(buf.capacity(), 128);
584 /// assert_eq!(buf.as_ptr(), ptr);
585 /// ```
586 ///
587 /// # Panics
588 ///
589 /// Panics if the new capacity overflows `usize`.
590 #[inline]
591 pub fn reserve(&mut self, additional: usize) {
592 let len = self.len();
593 let rem = self.capacity() - len;
594
595 if additional <= rem {
596 // The handle can already store at least `additional` more bytes, so
597 // there is no further work needed to be done.
598 return;
599 }
600
601 // will always succeed
602 let _ = self.reserve_inner(additional, true);
603 }
604
605 // In separate function to allow the short-circuits in `reserve` and `try_reclaim` to
606 // be inline-able. Significantly helps performance. Returns false if it did not succeed.
607 fn reserve_inner(&mut self, additional: usize, allocate: bool) -> bool {
608 let len = self.len();
609 let kind = self.kind();
610
611 if kind == KIND_VEC {
612 // If there's enough free space before the start of the buffer, then
613 // just copy the data backwards and reuse the already-allocated
614 // space.
615 //
616 // Otherwise, since backed by a vector, use `Vec::reserve`
617 //
618 // We need to make sure that this optimization does not kill the
619 // amortized runtimes of BytesMut's operations.
620 unsafe {
621 let off = self.get_vec_pos();
622
623 // Only reuse space if we can satisfy the requested additional space.
624 //
625 // Also check if the value of `off` suggests that enough bytes
626 // have been read to account for the overhead of shifting all
627 // the data (in an amortized analysis).
628 // Hence the condition `off >= self.len()`.
629 //
630 // This condition also already implies that the buffer is going
631 // to be (at least) half-empty in the end; so we do not break
632 // the (amortized) runtime with future resizes of the underlying
633 // `Vec`.
634 //
635 // [For more details check issue #524, and PR #525.]
636 if self.capacity() - self.len() + off >= additional && off >= self.len() {
637 // There's enough space, and it's not too much overhead:
638 // reuse the space!
639 //
640 // Just move the pointer back to the start after copying
641 // data back.
642 let base_ptr = self.ptr.as_ptr().sub(off);
643 // Since `off >= self.len()`, the two regions don't overlap.
644 ptr::copy_nonoverlapping(self.ptr.as_ptr(), base_ptr, self.len);
645 self.ptr = vptr(base_ptr);
646 self.set_vec_pos(0);
647
648 // Length stays constant, but since we moved backwards we
649 // can gain capacity back.
650 self.cap += off;
651 } else {
652 if !allocate {
653 return false;
654 }
655 // Not enough space, or reusing might be too much overhead:
656 // allocate more space!
657 let mut v =
658 ManuallyDrop::new(rebuild_vec(self.ptr.as_ptr(), self.len, self.cap, off));
659 v.reserve(additional);
660
661 // Update the info
662 self.ptr = vptr(v.as_mut_ptr().add(off));
663 self.cap = v.capacity() - off;
664 debug_assert_eq!(self.len, v.len() - off);
665 }
666
667 return true;
668 }
669 }
670
671 debug_assert_eq!(kind, KIND_ARC);
672 let shared: *mut Shared = self.data;
673
674 // Reserving involves abandoning the currently shared buffer and
675 // allocating a new vector with the requested capacity.
676 //
677 // Compute the new capacity
678 let mut new_cap = match len.checked_add(additional) {
679 Some(new_cap) => new_cap,
680 None if !allocate => return false,
681 None => panic!("overflow"),
682 };
683
684 unsafe {
685 // First, try to reclaim the buffer. This is possible if the current
686 // handle is the only outstanding handle pointing to the buffer.
687 if (*shared).is_unique() {
688 // This is the only handle to the buffer. It can be reclaimed.
689 // However, before doing the work of copying data, check to make
690 // sure that the vector has enough capacity.
691 let v = &mut (*shared).vec;
692
693 let v_capacity = v.capacity();
694 let ptr = v.as_mut_ptr();
695
696 let offset = self.ptr.as_ptr().offset_from(ptr) as usize;
697
698 let new_cap_plus_offset = match new_cap.checked_add(offset) {
699 Some(new_cap_plus_offset) => new_cap_plus_offset,
700 None if !allocate => return false,
701 None => panic!("overflow"),
702 };
703
704 // Compare the condition in the `kind == KIND_VEC` case above
705 // for more details.
706 if v_capacity >= new_cap_plus_offset {
707 self.cap = new_cap;
708 // no copy is necessary
709 } else if v_capacity >= new_cap && offset >= len {
710 // The capacity is sufficient, and copying is not too much
711 // overhead: reclaim the buffer!
712
713 // `offset >= len` means: no overlap
714 ptr::copy_nonoverlapping(self.ptr.as_ptr(), ptr, len);
715
716 self.ptr = vptr(ptr);
717 self.cap = v.capacity();
718 } else {
719 if !allocate {
720 return false;
721 }
722
723 // new_cap is calculated in terms of `BytesMut`, not the underlying
724 // `Vec`, so it does not take the offset into account.
725 //
726 // Thus we have to manually add it here.
727 new_cap = new_cap_plus_offset;
728
729 // The vector capacity is not sufficient. The reserve request is
730 // asking for more than the initial buffer capacity. Allocate more
731 // than requested if `new_cap` is not much bigger than the current
732 // capacity.
733 //
734 // There are some situations, using `reserve_exact` that the
735 // buffer capacity could be below `original_capacity`, so do a
736 // check.
737 let double = v.capacity().checked_shl(1).unwrap_or(new_cap);
738
739 new_cap = cmp::max(double, new_cap);
740
741 // No space - allocate more
742 //
743 // The length field of `Shared::vec` is not used by the `BytesMut`;
744 // instead we use the `len` field in the `BytesMut` itself. However,
745 // when calling `reserve`, it doesn't guarantee that data stored in
746 // the unused capacity of the vector is copied over to the new
747 // allocation, so we need to ensure that we don't have any data we
748 // care about in the unused capacity before calling `reserve`.
749 debug_assert!(offset + len <= v.capacity());
750 v.set_len(offset + len);
751 v.reserve(new_cap - v.len());
752
753 // Update the info
754 self.ptr = vptr(v.as_mut_ptr().add(offset));
755 self.cap = v.capacity() - offset;
756 }
757
758 return true;
759 }
760 }
761 if !allocate {
762 return false;
763 }
764
765 let original_capacity_repr = unsafe { (*shared).original_capacity_repr };
766 let original_capacity = original_capacity_from_repr(original_capacity_repr);
767
768 new_cap = cmp::max(new_cap, original_capacity);
769
770 // Create a new vector to store the data
771 let mut v = ManuallyDrop::new(Vec::with_capacity(new_cap));
772
773 // Copy the bytes
774 v.extend_from_slice(self.as_ref());
775
776 // Release the shared handle. This must be done *after* the bytes are
777 // copied.
778 unsafe { release_shared(shared) };
779
780 // Update self
781 let data = (original_capacity_repr << ORIGINAL_CAPACITY_OFFSET) | KIND_VEC;
782 self.data = invalid_ptr(data);
783 self.ptr = vptr(v.as_mut_ptr());
784 self.cap = v.capacity();
785 debug_assert_eq!(self.len, v.len());
786 true
787 }
788
789 /// Attempts to cheaply reclaim already allocated capacity for at least `additional` more
790 /// bytes to be inserted into the given `BytesMut` and returns `true` if it succeeded.
791 ///
792 /// `try_reclaim` behaves exactly like `reserve`, except that it never allocates new storage
793 /// and returns a `bool` indicating whether it was successful in doing so:
794 ///
795 /// `try_reclaim` returns false under these conditions:
796 /// - The spare capacity left is less than `additional` bytes AND
797 /// - The existing allocation cannot be reclaimed cheaply or it was less than
798 /// `additional` bytes in size
799 ///
800 /// Reclaiming the allocation cheaply is possible if the `BytesMut` has no outstanding
801 /// references through other `BytesMut`s or `Bytes` which point to the same underlying
802 /// storage.
803 ///
804 /// # Examples
805 ///
806 /// ```
807 /// use bytes::BytesMut;
808 ///
809 /// let mut buf = BytesMut::with_capacity(64);
810 /// assert_eq!(true, buf.try_reclaim(64));
811 /// assert_eq!(64, buf.capacity());
812 ///
813 /// buf.extend_from_slice(b"abcd");
814 /// let mut split = buf.split();
815 /// assert_eq!(60, buf.capacity());
816 /// assert_eq!(4, split.capacity());
817 /// assert_eq!(false, split.try_reclaim(64));
818 /// assert_eq!(false, buf.try_reclaim(64));
819 /// // The split buffer is filled with "abcd"
820 /// assert_eq!(false, split.try_reclaim(4));
821 /// // buf is empty and has capacity for 60 bytes
822 /// assert_eq!(true, buf.try_reclaim(60));
823 ///
824 /// drop(buf);
825 /// assert_eq!(false, split.try_reclaim(64));
826 ///
827 /// split.clear();
828 /// assert_eq!(4, split.capacity());
829 /// assert_eq!(true, split.try_reclaim(64));
830 /// assert_eq!(64, split.capacity());
831 /// ```
832 // I tried splitting out try_reclaim_inner after the short circuits, but it was inlined
833 // regardless with Rust 1.78.0 so probably not worth it
834 #[inline]
835 #[must_use = "consider BytesMut::reserve if you need an infallible reservation"]
836 pub fn try_reclaim(&mut self, additional: usize) -> bool {
837 let len = self.len();
838 let rem = self.capacity() - len;
839
840 if additional <= rem {
841 // The handle can already store at least `additional` more bytes, so
842 // there is no further work needed to be done.
843 return true;
844 }
845
846 self.reserve_inner(additional, false)
847 }
848
849 /// Appends given bytes to this `BytesMut`.
850 ///
851 /// If this `BytesMut` object does not have enough capacity, it is resized
852 /// first.
853 ///
854 /// # Examples
855 ///
856 /// ```
857 /// use bytes::BytesMut;
858 ///
859 /// let mut buf = BytesMut::with_capacity(0);
860 /// buf.extend_from_slice(b"aaabbb");
861 /// buf.extend_from_slice(b"cccddd");
862 ///
863 /// assert_eq!(b"aaabbbcccddd", &buf[..]);
864 /// ```
865 #[inline]
866 pub fn extend_from_slice(&mut self, extend: &[u8]) {
867 let cnt = extend.len();
868 self.reserve(cnt);
869
870 unsafe {
871 let dst = self.spare_capacity_mut();
872 // Reserved above
873 debug_assert!(dst.len() >= cnt);
874
875 ptr::copy_nonoverlapping(extend.as_ptr(), dst.as_mut_ptr().cast(), cnt);
876 }
877
878 unsafe {
879 self.advance_mut(cnt);
880 }
881 }
882
883 /// Absorbs a `BytesMut` that was previously split off.
884 ///
885 /// If the two `BytesMut` objects were previously contiguous and not mutated
886 /// in a way that causes re-allocation i.e., if `other` was created by
887 /// calling `split_off` on this `BytesMut`, then this is an `O(1)` operation
888 /// that just decreases a reference count and sets a few indices.
889 /// Otherwise this method degenerates to
890 /// `self.extend_from_slice(other.as_ref())`.
891 ///
892 /// # Examples
893 ///
894 /// ```
895 /// use bytes::BytesMut;
896 ///
897 /// let mut buf = BytesMut::with_capacity(64);
898 /// buf.extend_from_slice(b"aaabbbcccddd");
899 ///
900 /// let split = buf.split_off(6);
901 /// assert_eq!(b"aaabbb", &buf[..]);
902 /// assert_eq!(b"cccddd", &split[..]);
903 ///
904 /// buf.unsplit(split);
905 /// assert_eq!(b"aaabbbcccddd", &buf[..]);
906 /// ```
907 pub fn unsplit(&mut self, other: BytesMut) {
908 if self.is_empty() {
909 *self = other;
910 return;
911 }
912
913 if let Err(other) = self.try_unsplit(other) {
914 self.extend_from_slice(other.as_ref());
915 }
916 }
917
918 // private
919
920 // For now, use a `Vec` to manage the memory for us, but we may want to
921 // change that in the future to some alternate allocator strategy.
922 //
923 // Thus, we don't expose an easy way to construct from a `Vec` since an
924 // internal change could make a simple pattern (`BytesMut::from(vec)`)
925 // suddenly a lot more expensive.
926 #[inline]
927 pub(crate) fn from_vec(vec: Vec<u8>) -> BytesMut {
928 let mut vec = ManuallyDrop::new(vec);
929 let ptr = vptr(vec.as_mut_ptr());
930 let len = vec.len();
931 let cap = vec.capacity();
932
933 let original_capacity_repr = original_capacity_to_repr(cap);
934 let data = (original_capacity_repr << ORIGINAL_CAPACITY_OFFSET) | KIND_VEC;
935
936 BytesMut {
937 ptr,
938 len,
939 cap,
940 data: invalid_ptr(data),
941 }
942 }
943
944 #[inline]
945 fn as_slice(&self) -> &[u8] {
946 unsafe { slice::from_raw_parts(self.ptr.as_ptr(), self.len) }
947 }
948
949 #[inline]
950 fn as_slice_mut(&mut self) -> &mut [u8] {
951 unsafe { slice::from_raw_parts_mut(self.ptr.as_ptr(), self.len) }
952 }
953
954 /// Advance the buffer without bounds checking.
955 ///
956 /// # SAFETY
957 ///
958 /// The caller must ensure that `count` <= `self.cap`.
959 pub(crate) unsafe fn advance_unchecked(&mut self, count: usize) {
960 // Setting the start to 0 is a no-op, so return early if this is the
961 // case.
962 if count == 0 {
963 return;
964 }
965
966 debug_assert!(count <= self.cap, "internal: set_start out of bounds");
967
968 let kind = self.kind();
969
970 if kind == KIND_VEC {
971 // Setting the start when in vec representation is a little more
972 // complicated. First, we have to track how far ahead the
973 // "start" of the byte buffer from the beginning of the vec. We
974 // also have to ensure that we don't exceed the maximum shift.
975 let pos = self.get_vec_pos() + count;
976
977 if pos <= MAX_VEC_POS {
978 self.set_vec_pos(pos);
979 } else {
980 // The repr must be upgraded to ARC. This will never happen
981 // on 64 bit systems and will only happen on 32 bit systems
982 // when shifting past 134,217,727 bytes. As such, we don't
983 // worry too much about performance here.
984 self.promote_to_shared(/*ref_count = */ 1);
985 }
986 }
987
988 // Updating the start of the view is setting `ptr` to point to the
989 // new start and updating the `len` field to reflect the new length
990 // of the view.
991 self.ptr = vptr(self.ptr.as_ptr().add(count));
992 self.len = self.len.saturating_sub(count);
993 self.cap -= count;
994 }
995
996 fn try_unsplit(&mut self, other: BytesMut) -> Result<(), BytesMut> {
997 if other.capacity() == 0 {
998 return Ok(());
999 }
1000
1001 let ptr = unsafe { self.ptr.as_ptr().add(self.len) };
1002 if ptr == other.ptr.as_ptr()
1003 && self.kind() == KIND_ARC
1004 && other.kind() == KIND_ARC
1005 && self.data == other.data
1006 {
1007 // Contiguous blocks, just combine directly
1008 self.len += other.len;
1009 self.cap += other.cap;
1010 Ok(())
1011 } else {
1012 Err(other)
1013 }
1014 }
1015
1016 #[inline]
1017 fn kind(&self) -> usize {
1018 self.data as usize & KIND_MASK
1019 }
1020
1021 unsafe fn promote_to_shared(&mut self, ref_cnt: usize) {
1022 debug_assert_eq!(self.kind(), KIND_VEC);
1023 debug_assert!(ref_cnt == 1 || ref_cnt == 2);
1024
1025 let original_capacity_repr =
1026 (self.data as usize & ORIGINAL_CAPACITY_MASK) >> ORIGINAL_CAPACITY_OFFSET;
1027
1028 // The vec offset cannot be concurrently mutated, so there
1029 // should be no danger reading it.
1030 let off = (self.data as usize) >> VEC_POS_OFFSET;
1031
1032 // First, allocate a new `Shared` instance containing the
1033 // `Vec` fields. It's important to note that `ptr`, `len`,
1034 // and `cap` cannot be mutated without having `&mut self`.
1035 // This means that these fields will not be concurrently
1036 // updated and since the buffer hasn't been promoted to an
1037 // `Arc`, those three fields still are the components of the
1038 // vector.
1039 let shared = Box::new(Shared {
1040 vec: rebuild_vec(self.ptr.as_ptr(), self.len, self.cap, off),
1041 original_capacity_repr,
1042 ref_count: AtomicUsize::new(ref_cnt),
1043 });
1044
1045 let shared = Box::into_raw(shared);
1046
1047 // The pointer should be aligned, so this assert should
1048 // always succeed.
1049 debug_assert_eq!(shared as usize & KIND_MASK, KIND_ARC);
1050
1051 self.data = shared;
1052 }
1053
1054 /// Makes an exact shallow clone of `self`.
1055 ///
1056 /// The kind of `self` doesn't matter, but this is unsafe
1057 /// because the clone will have the same offsets. You must
1058 /// be sure the returned value to the user doesn't allow
1059 /// two views into the same range.
1060 #[inline]
1061 unsafe fn shallow_clone(&mut self) -> BytesMut {
1062 if self.kind() == KIND_ARC {
1063 increment_shared(self.data);
1064 ptr::read(self)
1065 } else {
1066 self.promote_to_shared(/*ref_count = */ 2);
1067 ptr::read(self)
1068 }
1069 }
1070
1071 #[inline]
1072 unsafe fn get_vec_pos(&self) -> usize {
1073 debug_assert_eq!(self.kind(), KIND_VEC);
1074
1075 self.data as usize >> VEC_POS_OFFSET
1076 }
1077
1078 #[inline]
1079 unsafe fn set_vec_pos(&mut self, pos: usize) {
1080 debug_assert_eq!(self.kind(), KIND_VEC);
1081 debug_assert!(pos <= MAX_VEC_POS);
1082
1083 self.data = invalid_ptr((pos << VEC_POS_OFFSET) | (self.data as usize & NOT_VEC_POS_MASK));
1084 }
1085
1086 /// Returns the remaining spare capacity of the buffer as a slice of `MaybeUninit<u8>`.
1087 ///
1088 /// The returned slice can be used to fill the buffer with data (e.g. by
1089 /// reading from a file) before marking the data as initialized using the
1090 /// [`set_len`] method.
1091 ///
1092 /// [`set_len`]: BytesMut::set_len
1093 ///
1094 /// # Examples
1095 ///
1096 /// ```
1097 /// use bytes::BytesMut;
1098 ///
1099 /// // Allocate buffer big enough for 10 bytes.
1100 /// let mut buf = BytesMut::with_capacity(10);
1101 ///
1102 /// // Fill in the first 3 elements.
1103 /// let uninit = buf.spare_capacity_mut();
1104 /// uninit[0].write(0);
1105 /// uninit[1].write(1);
1106 /// uninit[2].write(2);
1107 ///
1108 /// // Mark the first 3 bytes of the buffer as being initialized.
1109 /// unsafe {
1110 /// buf.set_len(3);
1111 /// }
1112 ///
1113 /// assert_eq!(&buf[..], &[0, 1, 2]);
1114 /// ```
1115 #[inline]
1116 pub fn spare_capacity_mut(&mut self) -> &mut [MaybeUninit<u8>] {
1117 unsafe {
1118 let ptr = self.ptr.as_ptr().add(self.len);
1119 let len = self.cap - self.len;
1120
1121 slice::from_raw_parts_mut(ptr.cast(), len)
1122 }
1123 }
1124}
1125
1126impl Drop for BytesMut {
1127 fn drop(&mut self) {
1128 let kind = self.kind();
1129
1130 if kind == KIND_VEC {
1131 unsafe {
1132 let off = self.get_vec_pos();
1133
1134 // Vector storage, free the vector
1135 let _ = rebuild_vec(self.ptr.as_ptr(), self.len, self.cap, off);
1136 }
1137 } else if kind == KIND_ARC {
1138 unsafe { release_shared(self.data) };
1139 }
1140 }
1141}
1142
1143impl Buf for BytesMut {
1144 #[inline]
1145 fn remaining(&self) -> usize {
1146 self.len()
1147 }
1148
1149 #[inline]
1150 fn chunk(&self) -> &[u8] {
1151 self.as_slice()
1152 }
1153
1154 #[inline]
1155 fn advance(&mut self, cnt: usize) {
1156 assert!(
1157 cnt <= self.remaining(),
1158 "cannot advance past `remaining`: {:?} <= {:?}",
1159 cnt,
1160 self.remaining(),
1161 );
1162 unsafe {
1163 // SAFETY: We've checked that `cnt` <= `self.remaining()` and we know that
1164 // `self.remaining()` <= `self.cap`.
1165 self.advance_unchecked(cnt);
1166 }
1167 }
1168
1169 fn copy_to_bytes(&mut self, len: usize) -> Bytes {
1170 self.split_to(len).freeze()
1171 }
1172}
1173
1174unsafe impl BufMut for BytesMut {
1175 #[inline]
1176 fn remaining_mut(&self) -> usize {
1177 // Max allocation size is isize::MAX.
1178 isize::MAX as usize - self.len()
1179 }
1180
1181 #[inline]
1182 unsafe fn advance_mut(&mut self, cnt: usize) {
1183 let remaining = self.cap - self.len();
1184 if cnt > remaining {
1185 super::panic_advance(&TryGetError {
1186 requested: cnt,
1187 available: remaining,
1188 });
1189 }
1190 // Addition won't overflow since it is at most `self.cap`.
1191 self.len = self.len() + cnt;
1192 }
1193
1194 #[inline]
1195 fn chunk_mut(&mut self) -> &mut UninitSlice {
1196 if self.capacity() == self.len() {
1197 self.reserve(64);
1198 }
1199 self.spare_capacity_mut().into()
1200 }
1201
1202 // Specialize these methods so they can skip checking `remaining_mut`
1203 // and `advance_mut`.
1204
1205 fn put<T: Buf>(&mut self, mut src: T)
1206 where
1207 Self: Sized,
1208 {
1209 if !src.has_remaining() {
1210 // prevent calling `copy_to_bytes`->`put`->`copy_to_bytes` infintely when src is empty
1211 return;
1212 } else if self.capacity() == 0 {
1213 // When capacity is zero, try reusing allocation of `src`.
1214 let src_copy = src.copy_to_bytes(src.remaining());
1215 drop(src);
1216 match src_copy.try_into_mut() {
1217 Ok(bytes_mut) => *self = bytes_mut,
1218 Err(bytes) => self.extend_from_slice(&bytes),
1219 }
1220 } else {
1221 // In case the src isn't contiguous, reserve upfront.
1222 self.reserve(src.remaining());
1223
1224 while src.has_remaining() {
1225 let s = src.chunk();
1226 let l = s.len();
1227 self.extend_from_slice(s);
1228 src.advance(l);
1229 }
1230 }
1231 }
1232
1233 fn put_slice(&mut self, src: &[u8]) {
1234 self.extend_from_slice(src);
1235 }
1236
1237 fn put_bytes(&mut self, val: u8, cnt: usize) {
1238 self.reserve(cnt);
1239 unsafe {
1240 let dst = self.spare_capacity_mut();
1241 // Reserved above
1242 debug_assert!(dst.len() >= cnt);
1243
1244 ptr::write_bytes(dst.as_mut_ptr(), val, cnt);
1245
1246 self.advance_mut(cnt);
1247 }
1248 }
1249}
1250
1251impl AsRef<[u8]> for BytesMut {
1252 #[inline]
1253 fn as_ref(&self) -> &[u8] {
1254 self.as_slice()
1255 }
1256}
1257
1258impl Deref for BytesMut {
1259 type Target = [u8];
1260
1261 #[inline]
1262 fn deref(&self) -> &[u8] {
1263 self.as_ref()
1264 }
1265}
1266
1267impl AsMut<[u8]> for BytesMut {
1268 #[inline]
1269 fn as_mut(&mut self) -> &mut [u8] {
1270 self.as_slice_mut()
1271 }
1272}
1273
1274impl DerefMut for BytesMut {
1275 #[inline]
1276 fn deref_mut(&mut self) -> &mut [u8] {
1277 self.as_mut()
1278 }
1279}
1280
1281impl<'a> From<&'a [u8]> for BytesMut {
1282 fn from(src: &'a [u8]) -> BytesMut {
1283 BytesMut::from_vec(src.to_vec())
1284 }
1285}
1286
1287impl<'a> From<&'a str> for BytesMut {
1288 fn from(src: &'a str) -> BytesMut {
1289 BytesMut::from(src.as_bytes())
1290 }
1291}
1292
1293impl From<BytesMut> for Bytes {
1294 fn from(src: BytesMut) -> Bytes {
1295 src.freeze()
1296 }
1297}
1298
1299impl PartialEq for BytesMut {
1300 fn eq(&self, other: &BytesMut) -> bool {
1301 self.as_slice() == other.as_slice()
1302 }
1303}
1304
1305impl PartialOrd for BytesMut {
1306 fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
1307 Some(self.cmp(other))
1308 }
1309}
1310
1311impl Ord for BytesMut {
1312 fn cmp(&self, other: &BytesMut) -> cmp::Ordering {
1313 self.as_slice().cmp(other.as_slice())
1314 }
1315}
1316
1317impl Eq for BytesMut {}
1318
1319impl Default for BytesMut {
1320 #[inline]
1321 fn default() -> BytesMut {
1322 BytesMut::new()
1323 }
1324}
1325
1326impl hash::Hash for BytesMut {
1327 fn hash<H>(&self, state: &mut H)
1328 where
1329 H: hash::Hasher,
1330 {
1331 let s: &[u8] = self.as_ref();
1332 s.hash(state);
1333 }
1334}
1335
1336impl Borrow<[u8]> for BytesMut {
1337 fn borrow(&self) -> &[u8] {
1338 self.as_ref()
1339 }
1340}
1341
1342impl BorrowMut<[u8]> for BytesMut {
1343 fn borrow_mut(&mut self) -> &mut [u8] {
1344 self.as_mut()
1345 }
1346}
1347
1348impl fmt::Write for BytesMut {
1349 #[inline]
1350 fn write_str(&mut self, s: &str) -> fmt::Result {
1351 if self.remaining_mut() >= s.len() {
1352 self.put_slice(s.as_bytes());
1353 Ok(())
1354 } else {
1355 Err(fmt::Error)
1356 }
1357 }
1358
1359 #[inline]
1360 fn write_fmt(&mut self, args: fmt::Arguments<'_>) -> fmt::Result {
1361 fmt::write(self, args)
1362 }
1363}
1364
1365impl Clone for BytesMut {
1366 fn clone(&self) -> BytesMut {
1367 BytesMut::from(&self[..])
1368 }
1369}
1370
1371impl IntoIterator for BytesMut {
1372 type Item = u8;
1373 type IntoIter = IntoIter<BytesMut>;
1374
1375 fn into_iter(self) -> Self::IntoIter {
1376 IntoIter::new(self)
1377 }
1378}
1379
1380impl<'a> IntoIterator for &'a BytesMut {
1381 type Item = &'a u8;
1382 type IntoIter = core::slice::Iter<'a, u8>;
1383
1384 fn into_iter(self) -> Self::IntoIter {
1385 self.as_ref().iter()
1386 }
1387}
1388
1389impl Extend<u8> for BytesMut {
1390 fn extend<T>(&mut self, iter: T)
1391 where
1392 T: IntoIterator<Item = u8>,
1393 {
1394 let iter = iter.into_iter();
1395
1396 let (lower, _) = iter.size_hint();
1397 self.reserve(lower);
1398
1399 // TODO: optimize
1400 // 1. If self.kind() == KIND_VEC, use Vec::extend
1401 for b in iter {
1402 self.put_u8(b);
1403 }
1404 }
1405}
1406
1407impl<'a> Extend<&'a u8> for BytesMut {
1408 fn extend<T>(&mut self, iter: T)
1409 where
1410 T: IntoIterator<Item = &'a u8>,
1411 {
1412 self.extend(iter.into_iter().copied())
1413 }
1414}
1415
1416impl Extend<Bytes> for BytesMut {
1417 fn extend<T>(&mut self, iter: T)
1418 where
1419 T: IntoIterator<Item = Bytes>,
1420 {
1421 for bytes in iter {
1422 self.extend_from_slice(&bytes)
1423 }
1424 }
1425}
1426
1427impl FromIterator<u8> for BytesMut {
1428 fn from_iter<T: IntoIterator<Item = u8>>(into_iter: T) -> Self {
1429 BytesMut::from_vec(Vec::from_iter(into_iter))
1430 }
1431}
1432
1433impl<'a> FromIterator<&'a u8> for BytesMut {
1434 fn from_iter<T: IntoIterator<Item = &'a u8>>(into_iter: T) -> Self {
1435 BytesMut::from_iter(into_iter.into_iter().copied())
1436 }
1437}
1438
1439/*
1440 *
1441 * ===== Inner =====
1442 *
1443 */
1444
1445unsafe fn increment_shared(ptr: *mut Shared) {
1446 let old_size = (*ptr).ref_count.fetch_add(1, Ordering::Relaxed);
1447
1448 if old_size > isize::MAX as usize {
1449 crate::abort();
1450 }
1451}
1452
1453unsafe fn release_shared(ptr: *mut Shared) {
1454 // `Shared` storage... follow the drop steps from Arc.
1455 if (*ptr).ref_count.fetch_sub(1, Ordering::Release) != 1 {
1456 return;
1457 }
1458
1459 // This fence is needed to prevent reordering of use of the data and
1460 // deletion of the data. Because it is marked `Release`, the decreasing
1461 // of the reference count synchronizes with this `Acquire` fence. This
1462 // means that use of the data happens before decreasing the reference
1463 // count, which happens before this fence, which happens before the
1464 // deletion of the data.
1465 //
1466 // As explained in the [Boost documentation][1],
1467 //
1468 // > It is important to enforce any possible access to the object in one
1469 // > thread (through an existing reference) to *happen before* deleting
1470 // > the object in a different thread. This is achieved by a "release"
1471 // > operation after dropping a reference (any access to the object
1472 // > through this reference must obviously happened before), and an
1473 // > "acquire" operation before deleting the object.
1474 //
1475 // [1]: (www.boost.org/doc/libs/1_55_0/doc/html/atomic/usage_examples.html)
1476 //
1477 // Thread sanitizer does not support atomic fences. Use an atomic load
1478 // instead.
1479 (*ptr).ref_count.load(Ordering::Acquire);
1480
1481 // Drop the data
1482 drop(Box::from_raw(ptr));
1483}
1484
1485impl Shared {
1486 fn is_unique(&self) -> bool {
1487 // The goal is to check if the current handle is the only handle
1488 // that currently has access to the buffer. This is done by
1489 // checking if the `ref_count` is currently 1.
1490 //
1491 // The `Acquire` ordering synchronizes with the `Release` as
1492 // part of the `fetch_sub` in `release_shared`. The `fetch_sub`
1493 // operation guarantees that any mutations done in other threads
1494 // are ordered before the `ref_count` is decremented. As such,
1495 // this `Acquire` will guarantee that those mutations are
1496 // visible to the current thread.
1497 self.ref_count.load(Ordering::Acquire) == 1
1498 }
1499}
1500
1501#[inline]
1502fn original_capacity_to_repr(cap: usize) -> usize {
1503 let width = PTR_WIDTH - ((cap >> MIN_ORIGINAL_CAPACITY_WIDTH).leading_zeros() as usize);
1504 cmp::min(
1505 width,
1506 MAX_ORIGINAL_CAPACITY_WIDTH - MIN_ORIGINAL_CAPACITY_WIDTH,
1507 )
1508}
1509
1510fn original_capacity_from_repr(repr: usize) -> usize {
1511 if repr == 0 {
1512 return 0;
1513 }
1514
1515 1 << (repr + (MIN_ORIGINAL_CAPACITY_WIDTH - 1))
1516}
1517
1518#[cfg(test)]
1519mod tests {
1520 use super::*;
1521
1522 #[test]
1523 fn test_original_capacity_to_repr() {
1524 assert_eq!(original_capacity_to_repr(0), 0);
1525
1526 let max_width = 32;
1527
1528 for width in 1..(max_width + 1) {
1529 let cap = 1 << width - 1;
1530
1531 let expected = if width < MIN_ORIGINAL_CAPACITY_WIDTH {
1532 0
1533 } else if width < MAX_ORIGINAL_CAPACITY_WIDTH {
1534 width - MIN_ORIGINAL_CAPACITY_WIDTH
1535 } else {
1536 MAX_ORIGINAL_CAPACITY_WIDTH - MIN_ORIGINAL_CAPACITY_WIDTH
1537 };
1538
1539 assert_eq!(original_capacity_to_repr(cap), expected);
1540
1541 if width > 1 {
1542 assert_eq!(original_capacity_to_repr(cap + 1), expected);
1543 }
1544
1545 // MIN_ORIGINAL_CAPACITY_WIDTH must be bigger than 7 to pass tests below
1546 if width == MIN_ORIGINAL_CAPACITY_WIDTH + 1 {
1547 assert_eq!(original_capacity_to_repr(cap - 24), expected - 1);
1548 assert_eq!(original_capacity_to_repr(cap + 76), expected);
1549 } else if width == MIN_ORIGINAL_CAPACITY_WIDTH + 2 {
1550 assert_eq!(original_capacity_to_repr(cap - 1), expected - 1);
1551 assert_eq!(original_capacity_to_repr(cap - 48), expected - 1);
1552 }
1553 }
1554 }
1555
1556 #[test]
1557 fn test_original_capacity_from_repr() {
1558 assert_eq!(0, original_capacity_from_repr(0));
1559
1560 let min_cap = 1 << MIN_ORIGINAL_CAPACITY_WIDTH;
1561
1562 assert_eq!(min_cap, original_capacity_from_repr(1));
1563 assert_eq!(min_cap * 2, original_capacity_from_repr(2));
1564 assert_eq!(min_cap * 4, original_capacity_from_repr(3));
1565 assert_eq!(min_cap * 8, original_capacity_from_repr(4));
1566 assert_eq!(min_cap * 16, original_capacity_from_repr(5));
1567 assert_eq!(min_cap * 32, original_capacity_from_repr(6));
1568 assert_eq!(min_cap * 64, original_capacity_from_repr(7));
1569 }
1570}
1571
1572unsafe impl Send for BytesMut {}
1573unsafe impl Sync for BytesMut {}
1574
1575/*
1576 *
1577 * ===== PartialEq / PartialOrd =====
1578 *
1579 */
1580
1581impl PartialEq<[u8]> for BytesMut {
1582 fn eq(&self, other: &[u8]) -> bool {
1583 &**self == other
1584 }
1585}
1586
1587impl PartialOrd<[u8]> for BytesMut {
1588 fn partial_cmp(&self, other: &[u8]) -> Option<cmp::Ordering> {
1589 (**self).partial_cmp(other)
1590 }
1591}
1592
1593impl PartialEq<BytesMut> for [u8] {
1594 fn eq(&self, other: &BytesMut) -> bool {
1595 *other == *self
1596 }
1597}
1598
1599impl PartialOrd<BytesMut> for [u8] {
1600 fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
1601 <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other)
1602 }
1603}
1604
1605impl PartialEq<str> for BytesMut {
1606 fn eq(&self, other: &str) -> bool {
1607 &**self == other.as_bytes()
1608 }
1609}
1610
1611impl PartialOrd<str> for BytesMut {
1612 fn partial_cmp(&self, other: &str) -> Option<cmp::Ordering> {
1613 (**self).partial_cmp(other.as_bytes())
1614 }
1615}
1616
1617impl PartialEq<BytesMut> for str {
1618 fn eq(&self, other: &BytesMut) -> bool {
1619 *other == *self
1620 }
1621}
1622
1623impl PartialOrd<BytesMut> for str {
1624 fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
1625 <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other)
1626 }
1627}
1628
1629impl PartialEq<Vec<u8>> for BytesMut {
1630 fn eq(&self, other: &Vec<u8>) -> bool {
1631 *self == other[..]
1632 }
1633}
1634
1635impl PartialOrd<Vec<u8>> for BytesMut {
1636 fn partial_cmp(&self, other: &Vec<u8>) -> Option<cmp::Ordering> {
1637 (**self).partial_cmp(&other[..])
1638 }
1639}
1640
1641impl PartialEq<BytesMut> for Vec<u8> {
1642 fn eq(&self, other: &BytesMut) -> bool {
1643 *other == *self
1644 }
1645}
1646
1647impl PartialOrd<BytesMut> for Vec<u8> {
1648 fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
1649 other.partial_cmp(self)
1650 }
1651}
1652
1653impl PartialEq<String> for BytesMut {
1654 fn eq(&self, other: &String) -> bool {
1655 *self == other[..]
1656 }
1657}
1658
1659impl PartialOrd<String> for BytesMut {
1660 fn partial_cmp(&self, other: &String) -> Option<cmp::Ordering> {
1661 (**self).partial_cmp(other.as_bytes())
1662 }
1663}
1664
1665impl PartialEq<BytesMut> for String {
1666 fn eq(&self, other: &BytesMut) -> bool {
1667 *other == *self
1668 }
1669}
1670
1671impl PartialOrd<BytesMut> for String {
1672 fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
1673 <[u8] as PartialOrd<[u8]>>::partial_cmp(self.as_bytes(), other)
1674 }
1675}
1676
1677impl<'a, T: ?Sized> PartialEq<&'a T> for BytesMut
1678where
1679 BytesMut: PartialEq<T>,
1680{
1681 fn eq(&self, other: &&'a T) -> bool {
1682 *self == **other
1683 }
1684}
1685
1686impl<'a, T: ?Sized> PartialOrd<&'a T> for BytesMut
1687where
1688 BytesMut: PartialOrd<T>,
1689{
1690 fn partial_cmp(&self, other: &&'a T) -> Option<cmp::Ordering> {
1691 self.partial_cmp(*other)
1692 }
1693}
1694
1695impl PartialEq<BytesMut> for &[u8] {
1696 fn eq(&self, other: &BytesMut) -> bool {
1697 *other == *self
1698 }
1699}
1700
1701impl PartialOrd<BytesMut> for &[u8] {
1702 fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
1703 <[u8] as PartialOrd<[u8]>>::partial_cmp(self, other)
1704 }
1705}
1706
1707impl PartialEq<BytesMut> for &str {
1708 fn eq(&self, other: &BytesMut) -> bool {
1709 *other == *self
1710 }
1711}
1712
1713impl PartialOrd<BytesMut> for &str {
1714 fn partial_cmp(&self, other: &BytesMut) -> Option<cmp::Ordering> {
1715 other.partial_cmp(self)
1716 }
1717}
1718
1719impl PartialEq<BytesMut> for Bytes {
1720 fn eq(&self, other: &BytesMut) -> bool {
1721 other[..] == self[..]
1722 }
1723}
1724
1725impl PartialEq<Bytes> for BytesMut {
1726 fn eq(&self, other: &Bytes) -> bool {
1727 other[..] == self[..]
1728 }
1729}
1730
1731impl From<BytesMut> for Vec<u8> {
1732 fn from(bytes: BytesMut) -> Self {
1733 let kind = bytes.kind();
1734 let bytes = ManuallyDrop::new(bytes);
1735
1736 let mut vec = if kind == KIND_VEC {
1737 unsafe {
1738 let off = bytes.get_vec_pos();
1739 rebuild_vec(bytes.ptr.as_ptr(), bytes.len, bytes.cap, off)
1740 }
1741 } else {
1742 let shared = bytes.data;
1743
1744 if unsafe { (*shared).is_unique() } {
1745 let vec = core::mem::take(unsafe { &mut (*shared).vec });
1746
1747 unsafe { release_shared(shared) };
1748
1749 vec
1750 } else {
1751 return ManuallyDrop::into_inner(bytes).deref().to_vec();
1752 }
1753 };
1754
1755 let len = bytes.len;
1756
1757 unsafe {
1758 ptr::copy(bytes.ptr.as_ptr(), vec.as_mut_ptr(), len);
1759 vec.set_len(len);
1760 }
1761
1762 vec
1763 }
1764}
1765
1766#[inline]
1767fn vptr(ptr: *mut u8) -> NonNull<u8> {
1768 if cfg!(debug_assertions) {
1769 NonNull::new(ptr).expect("Vec pointer should be non-null")
1770 } else {
1771 unsafe { NonNull::new_unchecked(ptr) }
1772 }
1773}
1774
1775/// Returns a dangling pointer with the given address. This is used to store
1776/// integer data in pointer fields.
1777///
1778/// It is equivalent to `addr as *mut T`, but this fails on miri when strict
1779/// provenance checking is enabled.
1780#[inline]
1781fn invalid_ptr<T>(addr: usize) -> *mut T {
1782 let ptr = core::ptr::null_mut::<u8>().wrapping_add(addr);
1783 debug_assert_eq!(ptr as usize, addr);
1784 ptr.cast::<T>()
1785}
1786
1787unsafe fn rebuild_vec(ptr: *mut u8, mut len: usize, mut cap: usize, off: usize) -> Vec<u8> {
1788 let ptr = ptr.sub(off);
1789 len += off;
1790 cap += off;
1791
1792 Vec::from_raw_parts(ptr, len, cap)
1793}
1794
1795// ===== impl SharedVtable =====
1796
1797static SHARED_VTABLE: Vtable = Vtable {
1798 clone: shared_v_clone,
1799 into_vec: shared_v_to_vec,
1800 into_mut: shared_v_to_mut,
1801 is_unique: shared_v_is_unique,
1802 drop: shared_v_drop,
1803};
1804
1805unsafe fn shared_v_clone(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Bytes {
1806 let shared = data.load(Ordering::Relaxed) as *mut Shared;
1807 increment_shared(shared);
1808
1809 let data = AtomicPtr::new(shared as *mut ());
1810 Bytes::with_vtable(ptr, len, data, &SHARED_VTABLE)
1811}
1812
1813unsafe fn shared_v_to_vec(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> Vec<u8> {
1814 let shared: *mut Shared = data.load(Ordering::Relaxed).cast();
1815
1816 if (*shared).is_unique() {
1817 let shared = &mut *shared;
1818
1819 // Drop shared
1820 let mut vec = core::mem::take(&mut shared.vec);
1821 release_shared(shared);
1822
1823 // Copy back buffer
1824 ptr::copy(ptr, vec.as_mut_ptr(), len);
1825 vec.set_len(len);
1826
1827 vec
1828 } else {
1829 let v = slice::from_raw_parts(ptr, len).to_vec();
1830 release_shared(shared);
1831 v
1832 }
1833}
1834
1835unsafe fn shared_v_to_mut(data: &AtomicPtr<()>, ptr: *const u8, len: usize) -> BytesMut {
1836 let shared: *mut Shared = data.load(Ordering::Relaxed).cast();
1837
1838 if (*shared).is_unique() {
1839 let shared = &mut *shared;
1840
1841 // The capacity is always the original capacity of the buffer
1842 // minus the offset from the start of the buffer
1843 let v = &mut shared.vec;
1844 let v_capacity = v.capacity();
1845 let v_ptr = v.as_mut_ptr();
1846 let offset = ptr.offset_from(v_ptr) as usize;
1847 let cap = v_capacity - offset;
1848
1849 let ptr = vptr(ptr as *mut u8);
1850
1851 BytesMut {
1852 ptr,
1853 len,
1854 cap,
1855 data: shared,
1856 }
1857 } else {
1858 let v = slice::from_raw_parts(ptr, len).to_vec();
1859 release_shared(shared);
1860 BytesMut::from_vec(v)
1861 }
1862}
1863
1864unsafe fn shared_v_is_unique(data: &AtomicPtr<()>) -> bool {
1865 let shared = data.load(Ordering::Acquire);
1866 let ref_count = (*shared.cast::<Shared>()).ref_count.load(Ordering::Relaxed);
1867 ref_count == 1
1868}
1869
1870unsafe fn shared_v_drop(data: &mut AtomicPtr<()>, _ptr: *const u8, _len: usize) {
1871 data.with_mut(|shared| {
1872 release_shared(*shared as *mut Shared);
1873 });
1874}
1875
1876// compile-fails
1877
1878/// ```compile_fail
1879/// use bytes::BytesMut;
1880/// #[deny(unused_must_use)]
1881/// {
1882/// let mut b1 = BytesMut::from("hello world");
1883/// b1.split_to(6);
1884/// }
1885/// ```
1886fn _split_to_must_use() {}
1887
1888/// ```compile_fail
1889/// use bytes::BytesMut;
1890/// #[deny(unused_must_use)]
1891/// {
1892/// let mut b1 = BytesMut::from("hello world");
1893/// b1.split_off(6);
1894/// }
1895/// ```
1896fn _split_off_must_use() {}
1897
1898/// ```compile_fail
1899/// use bytes::BytesMut;
1900/// #[deny(unused_must_use)]
1901/// {
1902/// let mut b1 = BytesMut::from("hello world");
1903/// b1.split();
1904/// }
1905/// ```
1906fn _split_must_use() {}
1907
1908// fuzz tests
1909#[cfg(all(test, loom))]
1910mod fuzz {
1911 use loom::sync::Arc;
1912 use loom::thread;
1913
1914 use super::BytesMut;
1915 use crate::Bytes;
1916
1917 #[test]
1918 fn bytes_mut_cloning_frozen() {
1919 loom::model(|| {
1920 let a = BytesMut::from(&b"abcdefgh"[..]).split().freeze();
1921 let addr = a.as_ptr() as usize;
1922
1923 // test the Bytes::clone is Sync by putting it in an Arc
1924 let a1 = Arc::new(a);
1925 let a2 = a1.clone();
1926
1927 let t1 = thread::spawn(move || {
1928 let b: Bytes = (*a1).clone();
1929 assert_eq!(b.as_ptr() as usize, addr);
1930 });
1931
1932 let t2 = thread::spawn(move || {
1933 let b: Bytes = (*a2).clone();
1934 assert_eq!(b.as_ptr() as usize, addr);
1935 });
1936
1937 t1.join().unwrap();
1938 t2.join().unwrap();
1939 });
1940 }
1941}