From e3a193a24da8e8f7dfa42c31b418f9aee7de1cb0 Mon Sep 17 00:00:00 2001 From: neevek Date: Wed, 5 Apr 2023 00:26:16 +0800 Subject: [PATCH 1/2] ensure len and capacity methods reflect their intentions --- src/lib.rs | 2 +- src/pool.rs | 64 ++++++++++++++++++++++++++++++++++++++++++++----- src/poolable.rs | 52 +++++++++++++++++++++++++++++++++++++--- 3 files changed, 108 insertions(+), 10 deletions(-) diff --git a/src/lib.rs b/src/lib.rs index 1dcb781..bf56388 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -8,7 +8,7 @@ //! let pool = BytePool::>::new(); //! //! // Allocate a buffer with capacity 1024. -//! let mut buf = pool.alloc(1024); +//! let mut buf = pool.alloc_and_fill(1024); //! //! // write some data into it //! for i in 0..100 { diff --git a/src/pool.rs b/src/pool.rs index 05e413e..49a6002 100644 --- a/src/pool.rs +++ b/src/pool.rs @@ -56,6 +56,14 @@ impl BytePool { /// The returned `Block` contains arbitrary data, and must be zeroed or overwritten, /// in cases this is needed. pub fn alloc(&self, size: usize) -> Block<'_, T> { + self.alloc_internal(size, false) + } + + pub fn alloc_and_fill(&self, size: usize) -> Block<'_, T> { + self.alloc_internal(size, true) + } + + pub fn alloc_internal(&self, size: usize, fill: bool) -> Block<'_, T> { assert!(size > 0, "Can not allocate empty blocks"); // check the last 4 blocks @@ -64,9 +72,12 @@ impl BytePool { } else { &self.list_large }; - if let Some(el) = list.pop() { - if el.capacity() == size { + if let Some(mut el) = list.pop() { + if el.capacity() >= size && el.capacity() < size + 1024 { // found one, reuse it + if fill { + el.resize(size) + } return Block::new(el, self); } else { // put it back @@ -75,7 +86,11 @@ impl BytePool { } // allocate a new block - let data = T::alloc(size); + let data = if fill { + T::alloc_and_fill(size) + } else { + T::alloc(size) + }; Block::new(data, self) } @@ -90,7 +105,8 @@ impl BytePool { impl<'a, T: Poolable> Drop for Block<'a, T> { fn drop(&mut self) { - let data = mem::ManuallyDrop::into_inner(unsafe { ptr::read(&self.data) }); + let mut data = mem::ManuallyDrop::into_inner(unsafe { ptr::read(&self.data) }); + data.reset(); self.pool.push_raw_block(data); } } @@ -138,6 +154,41 @@ unsafe impl<'a, T: StableDeref + Poolable> StableDeref for Block<'a, T> {} #[cfg(test)] mod tests { use super::*; + #[test] + fn append() { + let pool = BytePool::>::new(); + let mut buf = pool.alloc(4); + assert_eq!(0, buf.len()); + assert_eq!(4, buf.capacity()); + buf.push(12u8); + assert_eq!(1, buf.len()); + buf.extend_from_slice("hello".as_bytes()); + assert_eq!(6, buf.len()); + buf.clear(); + assert_eq!(0, buf.len()); + assert!(buf.capacity() > 0); + } + + #[test] + fn len_and_capacity() { + let pool = BytePool::>::new(); + for i in 1..10 { + let buf = pool.alloc_and_fill(i); + assert_eq!(buf.len(), i) + } + for i in 1..10 { + let buf = pool.alloc(i); + assert_eq!(buf.len(), 0) + } + for i in 1..10 { + let buf = pool.alloc_and_fill(i * 10000); + assert_eq!(buf.len(), i * 10000) + } + for i in 1..10 { + let buf = pool.alloc(i * 10000); + assert_eq!(buf.len(), 0) + } + } #[test] fn basics_vec_u8() { @@ -174,6 +225,7 @@ mod tests { let _slice: &[u8] = &buf; assert_eq!(buf.capacity(), 10); + buf.resize(10, 0); for i in 0..10 { buf[i] = 1; } @@ -198,7 +250,7 @@ mod tests { let pool1 = pool.clone(); let h1 = std::thread::spawn(move || { for _ in 0..100 { - let mut buf = pool1.alloc(64); + let mut buf = pool1.alloc_and_fill(64); buf[10] = 10; } }); @@ -206,7 +258,7 @@ mod tests { let pool2 = pool.clone(); let h2 = std::thread::spawn(move || { for _ in 0..100 { - let mut buf = pool2.alloc(64); + let mut buf = pool2.alloc_and_fill(64); buf[10] = 10; } }); diff --git a/src/poolable.rs b/src/poolable.rs index 23fd0cc..b851e97 100644 --- a/src/poolable.rs +++ b/src/poolable.rs @@ -3,16 +3,41 @@ use std::hash::{BuildHasher, Hash}; /// The trait required to be able to use a type in `BytePool`. pub trait Poolable { + fn empty(&self) -> bool; + fn len(&self) -> usize; fn capacity(&self) -> usize; + fn resize(&mut self, count: usize); + fn reset(&mut self); fn alloc(size: usize) -> Self; + fn alloc_and_fill(size: usize) -> Self; } impl Poolable for Vec { - fn capacity(&self) -> usize { + fn empty(&self) -> bool { + self.len() == 0 + } + + fn len(&self) -> usize { self.len() } + fn capacity(&self) -> usize { + self.capacity() + } + + fn resize(&mut self, count: usize) { + self.resize(count, T::default()); + } + + fn reset(&mut self) { + self.clear(); + } + fn alloc(size: usize) -> Self { + Vec::::with_capacity(size) + } + + fn alloc_and_fill(size: usize) -> Self { vec![T::default(); size] } } @@ -22,11 +47,32 @@ where K: Eq + Hash, S: BuildHasher + Default, { - fn capacity(&self) -> usize { + fn empty(&self) -> bool { + self.len() == 0 + } + + fn len(&self) -> usize { self.len() } + fn capacity(&self) -> usize { + self.capacity() + } + + fn resize(&mut self, _count: usize) { + // do thing + } + + fn reset(&mut self) { + self.clear(); + } + fn alloc(size: usize) -> Self { + Self::alloc_and_fill(size) + } + + fn alloc_and_fill(size: usize) -> Self { + // not actually filling the HaspMap though HashMap::with_capacity_and_hasher(size, Default::default()) } } @@ -42,7 +88,7 @@ impl Realloc for Vec { assert!(new_size > 0); match new_size.cmp(&self.capacity()) { - Greater => self.resize(new_size, T::default()), + Greater => self.reserve(new_size - self.capacity()), Less => { self.truncate(new_size); self.shrink_to_fit(); From 5eebfff50a8cd1b2dab1404222afdc5421b15be7 Mon Sep 17 00:00:00 2001 From: neevek Date: Thu, 12 Feb 2026 18:55:38 +0800 Subject: [PATCH 2/2] added alloc_from_slice and set_filled_len --- README.md | 20 ++++++++++---------- src/lib.rs | 20 ++++++++++---------- src/pool.rs | 52 ++++++++++++++++++++++++++++++++++++++++++++++++++++ 3 files changed, 72 insertions(+), 20 deletions(-) diff --git a/README.md b/README.md index ffa0a28..4fdcdcb 100644 --- a/README.md +++ b/README.md @@ -52,19 +52,19 @@ use byte_pool::BytePool; // Create a pool let pool = BytePool::>::new(); -// Allocate a buffer -let mut buf = pool.alloc(1024); +// Allocate and copy from existing bytes. +let payload = pool.alloc_from_slice(b"hello"); +assert_eq!(&payload[..], b"hello"); -// write some data into it -for i in 0..100 { - buf[i] = 12; -} - -// Check that we actually wrote sth. -assert_eq!(buf[55], 12); +// Allocate a writable frame and shrink it after recv/write. +let mut frame = pool.alloc_and_fill(1024); +let received = 128; +frame.set_filled_len(received); +assert_eq!(frame.len(), received); // Returns the underlying memory to the pool. -drop(buf); +drop(payload); +drop(frame); // Frees all memory in the pool. drop(pool); diff --git a/src/lib.rs b/src/lib.rs index bf56388..ea0bcac 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -7,19 +7,19 @@ //! // Create a pool //! let pool = BytePool::>::new(); //! -//! // Allocate a buffer with capacity 1024. -//! let mut buf = pool.alloc_and_fill(1024); +//! // Allocate and copy from existing bytes. +//! let payload = pool.alloc_from_slice(b"hello"); +//! assert_eq!(&payload[..], b"hello"); //! -//! // write some data into it -//! for i in 0..100 { -//! buf[i] = 12; -//! } -//! -//! // Check that we actually wrote sth. -//! assert_eq!(buf[55], 12); +//! // Allocate a writable frame and then shrink to received size. +//! let mut frame = pool.alloc_and_fill(1024); +//! let received = 128; +//! frame.set_filled_len(received); +//! assert_eq!(frame.len(), received); //! //! // Returns the underlying memory to the pool. -//! drop(buf); +//! drop(payload); +//! drop(frame); //! //! // Frees all memory in the pool. //! drop(pool); diff --git a/src/pool.rs b/src/pool.rs index 49a6002..1e47c37 100644 --- a/src/pool.rs +++ b/src/pool.rs @@ -103,6 +103,15 @@ impl BytePool { } } +impl BytePool> { + /// Allocates a new block and fills it with a copy of `data`. + pub fn alloc_from_slice(&self, data: &[T]) -> Block<'_, Vec> { + let mut block = self.alloc(data.len()); + block.extend_from_slice(data); + block + } +} + impl<'a, T: Poolable> Drop for Block<'a, T> { fn drop(&mut self) { let mut data = mem::ManuallyDrop::into_inner(unsafe { ptr::read(&self.data) }); @@ -132,6 +141,23 @@ impl<'a, T: Poolable + Realloc> Block<'a, T> { } } +impl<'a, T: Default + Clone> Block<'a, Vec> { + /// Updates the logical length after writing into a pre-sized buffer. + /// + /// This is intended for buffers created by `alloc_and_fill`, where `len()` + /// starts at the maximum writable size. `filled_len` must not exceed the + /// current length. + pub fn set_filled_len(&mut self, filled_len: usize) { + assert!( + filled_len <= self.len(), + "filled_len ({}) must be <= current len ({})", + filled_len, + self.len() + ); + self.truncate(filled_len); + } +} + impl<'a, T: Poolable> Deref for Block<'a, T> { type Target = T; @@ -169,6 +195,14 @@ mod tests { assert!(buf.capacity() > 0); } + #[test] + fn alloc_from_slice() { + let pool = BytePool::>::new(); + let buf = pool.alloc_from_slice(b"hello"); + assert_eq!(buf.len(), 5); + assert_eq!(&buf[..], b"hello"); + } + #[test] fn len_and_capacity() { let pool = BytePool::>::new(); @@ -190,6 +224,24 @@ mod tests { } } + #[test] + fn set_filled_len() { + let pool = BytePool::>::new(); + let mut frame = pool.alloc_and_fill(64); + frame[10] = 123; + frame.set_filled_len(16); + assert_eq!(frame.len(), 16); + assert_eq!(frame[10], 123); + } + + #[test] + #[should_panic(expected = "filled_len")] + fn set_filled_len_panics_when_too_large() { + let pool = BytePool::>::new(); + let mut frame = pool.alloc_and_fill(16); + frame.set_filled_len(17); + } + #[test] fn basics_vec_u8() { let pool: BytePool> = BytePool::new();