diff --git a/src/collections/id_map/direct_map.rs b/src/collections/id_map/direct_map.rs index 236bffb51..bd168f908 100644 --- a/src/collections/id_map/direct_map.rs +++ b/src/collections/id_map/direct_map.rs @@ -27,9 +27,9 @@ impl + Into + Copy, I: From + Into + Cop #[inline(always)] pub fn insert_with_new_id(&mut self, internal_id: I) -> Option { - let higher_order_bits: u64 = self.generate_id() as u64; + let higher_order_bits = self.generate_id() as u64; // Use random number for higher order bits and the offset for lower order bits. - let external_id: u64 = higher_order_bits << 32 | >::into(internal_id); + let external_id = higher_order_bits << 32 | >::into(internal_id); Some(external_id.into()) } @@ -39,7 +39,7 @@ impl + Into + Copy, I: From + Into + Cop } fn mask_id(external_id: &E) -> I { - let masked_id: u32 = >::into(*external_id) as u32; + let masked_id = >::into(*external_id) as u32; >::from(masked_id as u64) } } diff --git a/src/collections/id_map/indirect_map.rs b/src/collections/id_map/indirect_map.rs index 69272b196..fc8268aa1 100644 --- a/src/collections/id_map/indirect_map.rs +++ b/src/collections/id_map/indirect_map.rs @@ -39,7 +39,7 @@ impl + Into + Copy, I: From + Into + Cop pub fn insert_with_new_id(&mut self, internal_id: I) -> Option { // Otherwise, allocate a new external id. for _ in 0..MAX_RETRIES_ID_ALLOC { - let external_id: E = E::from(self.generate_id()); + let external_id = E::from(self.generate_id()); if let Entry::Vacant(e) = self.ids.entry(external_id) { e.insert(internal_id); return Some(external_id); @@ -76,7 +76,7 @@ impl + Into + Copy, I: From + Into + Cop /// until we find an unused id (up to a maximum number of tries). pub fn insert_with_new_id(&mut self, internal_id: I) -> Option { for _ in 0..MAX_RETRIES_ID_ALLOC { - let external_id: E = E::from(self.generate_id()); + let external_id = E::from(self.generate_id()); if let Entry::Vacant(e) = self.ids.entry(external_id) { e.insert(internal_id); return Some(external_id); diff --git a/src/lib.rs b/src/lib.rs index 9ef915e8a..2f6650f91 100644 --- a/src/lib.rs +++ b/src/lib.rs @@ -203,7 +203,7 @@ macro_rules! expect_ok { macro_rules! timer { ($name:expr) => { #[cfg(feature = "profiler")] - let _guard = $crate::perftools::profiler::PROFILER.with(|p| p.clone().create_and_enter_sync_scope($name)); + let _guard = $crate::perftools::profiler::PROFILER.with(|p| p.clone().enter_sync($name)); }; } diff --git a/src/perftools/profiler/mod.rs b/src/perftools/profiler/mod.rs index be653cdf9..48e2f3a9b 100644 --- a/src/perftools/profiler/mod.rs +++ b/src/perftools/profiler/mod.rs @@ -30,7 +30,7 @@ use ::std::{ ops::{Deref, DerefMut}, pin::Pin, thread, - time::{Duration, SystemTime}, + time::SystemTime, }; //====================================================================================================================== @@ -45,10 +45,10 @@ thread_local!( /// thread-local instance of `Profiler` in [`PROFILER`](constant.PROFILER.html), so it is not possible to manually /// create an instance of `Profiler`. pub struct Profiler { - root_scopes: Vec, - current_sync_scope: Option, - current_async_scope: Option, - perf_callback: Option, + roots: Vec, + current_sync: Option, + current_async: Option, + callback: Option, } #[derive(Clone)] @@ -62,8 +62,8 @@ pub fn reset() { PROFILER.with(|p| p.clone().reset()); } -pub fn set_callback(perf_callback: demi_callback_t) { - PROFILER.with(|p| p.clone().set_callback(perf_callback)); +pub fn set_callback(callback: demi_callback_t) { + PROFILER.with(|p| p.clone().set_callback(callback)); } /// Create a special async scopes that is rooted because it does not run under other scopes. @@ -77,48 +77,47 @@ impl Profiler { let thread_id = thread::current().id(); let ns_per_cycle = Self::measure_ns_per_cycle(); - // Header row - writeln!( - out, - "call_depth,thread_id,function_name,num_calls,cycles_per_call,nanoseconds_per_call,total_duration,total_duration_exclusive" - )?; + const HEADER_ROW: &'static str = "call_depth,thread_id,function_name,num_calls,cycles_per_call,nanoseconds_per_call,total_duration,total_duration_exclusive"; + writeln!(out, "{}", HEADER_ROW)?; - for s in self.root_scopes.iter() { - s.write_recursive(out, thread_id, 0, ns_per_cycle)?; + for scope in self.roots.iter() { + scope.write_recursive(out, thread_id, 0, ns_per_cycle)?; } out.flush() } fn measure_ns_per_cycle() -> f64 { - let start: SystemTime = SystemTime::now(); - let start_cycle: u64 = unsafe { x86::time::rdtscp().0 }; + let start_ts = SystemTime::now(); + let start_cycles = unsafe { x86::time::rdtscp().0 }; - test::black_box((0..10000).fold(0, |old, new| old ^ new)); // dummy calculations for measurement + // dummy calculations for measurement + test::black_box((0..10000).fold(0, |old, new| old ^ new)); - let end_cycle: u64 = unsafe { x86::time::rdtscp().0 }; - let since_the_epoch: Duration = SystemTime::now().duration_since(start).expect("Time went backwards"); - let in_ns: u64 = since_the_epoch.as_secs() * 1_000_000_000 + since_the_epoch.subsec_nanos() as u64; + let end_cycles = unsafe { x86::time::rdtscp().0 }; + let duration = SystemTime::now().duration_since(start_ts).expect("Time went backwards"); + let duration_in_ns = duration.as_secs() * 1_000_000_000 + duration.subsec_nanos() as u64; + let ns_per_cycle = duration_in_ns as f64 / (end_cycles - start_cycles) as f64; - in_ns as f64 / (end_cycle - start_cycle) as f64 + ns_per_cycle } } impl SharedProfiler { - pub fn set_callback(&mut self, perf_callback: demi_callback_t) { - self.perf_callback = Some(perf_callback) + pub fn set_callback(&mut self, callback: demi_callback_t) { + self.callback = Some(callback) } fn find_or_create_new_scope( scopes: &mut Vec, name: &'static str, - parent_scope: Option, - perf_callback: Option, + parent: Option, + callback: Option, ) -> SharedScope { match scopes.iter().find(|s| s.name == name) { - Some(existing_scope) => existing_scope.clone(), + Some(s) => s.clone(), None => { - let new_scope: SharedScope = SharedScope::new(name, parent_scope, perf_callback); + let new_scope = SharedScope::new(name, parent, callback); scopes.push(new_scope.clone()); new_scope }, @@ -128,57 +127,60 @@ impl SharedProfiler { /// Create and enter a syncronous scope. Returns a [`Guard`](struct.Guard.html) that should be dropped upon /// leaving the scope. Usually, this method will be called by the [`profile`](macro.profile.html) macro, /// so it does not need to be used directly. - fn create_scope(&mut self, current_scope: &mut Option, name: &'static str) -> SharedScope { - let perf_callback: Option = self.perf_callback; - match current_scope { - Some(current_scope) => { - let parent_scope: Option = Some(current_scope.clone()); - Self::find_or_create_new_scope(&mut current_scope.children_scopes, name, parent_scope, perf_callback) + fn create_scope(&mut self, current: &mut Option, name: &'static str) -> SharedScope { + let callback = self.callback; + match current { + Some(current) => { + let parent = Some(current.clone()); + Self::find_or_create_new_scope(&mut current.children, name, parent, callback) }, - None => Self::find_or_create_new_scope(&mut self.root_scopes, name, None, perf_callback), + None => Self::find_or_create_new_scope(&mut self.roots, name, None, callback), } } - pub fn create_and_enter_sync_scope(&mut self, name: &'static str) -> SyncScopeGuard { - let mut current_scope: Option = self.current_sync_scope.clone(); - let scope = self.create_scope(&mut current_scope, name); - self.current_sync_scope = Some(scope.clone()); - scope.enter_sync_scope() + pub fn enter_sync(&mut self, name: &'static str) -> SyncScopeGuard { + let mut current = self.current_sync.clone(); + let scope = self.create_scope(&mut current, name); + self.current_sync = Some(scope.clone()); + scope.enter_sync() } #[inline] - fn leave_sync_scope(&mut self, duration: u64) { + fn leave_sync(&mut self, duration: u64) { // Note that we could now still be anywhere in the previous profiling // tree, so we can not simply reset `self.current`. However, as the // frame comes to an end we will eventually leave a root node, at which // point `self.current` will be set to `None`. - self.current_sync_scope = if let Some(mut current_scope) = self.current_sync_scope.take() { - current_scope.add_duration(duration); - current_scope.parent_scope.as_ref().cloned() + self.current_sync = if let Some(mut current) = self.current_sync.take() { + current.add_duration(duration); + let parent = current.parent.as_ref().cloned(); + parent } else { // This should not happen with proper usage. - unreachable!("Called perftools::profiler::leave() while not in any scope"); + unreachable!("Called perftools::profiler::leave_sync() while not in any scope"); }; } - pub fn create_and_enter_async_scope(&mut self, name: &'static str) { - let mut current_scope: Option = self.current_async_scope.clone(); - let scope = self.create_scope(&mut current_scope, name); - self.current_async_scope = Some(scope.clone()); + pub fn enter_async(&mut self, name: &'static str) { + let mut current = self.current_async.clone(); + let scope = self.create_scope(&mut current, name); + self.current_async = Some(scope.clone()); } #[inline] - fn leave_async_scope(&mut self, duration: u64) { - self.current_async_scope = if let Some(mut current_scope) = self.current_async_scope.take() { - current_scope.add_duration(duration); - current_scope.parent_scope.as_ref().cloned() + fn leave_async(&mut self, duration: u64) { + self.current_async = if let Some(mut current) = self.current_async.take() { + current.add_duration(duration); + let parent = current.parent.as_ref().cloned(); + parent } else { // This should not happen with proper usage. - unreachable!("Called perftools::profiler::leave() while not in any scope"); + unreachable!("Called perftools::profiler::leave_async() while not in any scope"); }; } + fn reset(&mut self) { - self.root_scopes.clear(); + self.roots.clear(); } } @@ -203,10 +205,10 @@ impl DerefMut for SharedProfiler { impl Default for SharedProfiler { fn default() -> Self { Self(SharedObject::new(Profiler { - root_scopes: Vec::new(), - current_sync_scope: None, - current_async_scope: None, - perf_callback: None, + roots: Vec::new(), + current_sync: None, + current_async: None, + callback: None, })) } } diff --git a/src/perftools/profiler/scope.rs b/src/perftools/profiler/scope.rs index bc5fc2d54..eea84df4a 100644 --- a/src/perftools/profiler/scope.rs +++ b/src/perftools/profiler/scope.rs @@ -29,10 +29,10 @@ use ::std::{ /// profiled blocks. pub struct Scope { pub name: &'static str, - pub parent_scope: Option, - pub children_scopes: Vec, + pub parent: Option, + pub children: Vec, /// Callback to report statistics. If this is set to None, we collect averages by default. - pub perf_callback: Option, + pub callback: Option, pub num_calls: usize, /// In total, how much time has been spent in this scope? pub duration_sum: u64, @@ -57,32 +57,28 @@ pub struct AsyncScope<'a, F: Future> { //====================================================================================================================== impl SharedScope { - pub fn new( - name: &'static str, - parent_scope: Option, - perf_callback: Option, - ) -> SharedScope { + pub fn new(name: &'static str, parent: Option, callback: Option) -> SharedScope { Self(SharedObject::new(Scope { name, - parent_scope, - children_scopes: Vec::new(), + parent, + children: Vec::new(), num_calls: 0, duration_sum: 0, - perf_callback, + callback, })) } /// Enter this scope. Returns a `Guard` instance that should be dropped when leaving the scope. #[inline] - pub fn enter_sync_scope(&self) -> SyncScopeGuard { + pub fn enter_sync(&self) -> SyncScopeGuard { SyncScopeGuard::enter() } /// Leave this scope. Called automatically by the `Guard` instance. #[inline] pub fn add_duration(&mut self, duration: u64) { - if let Some(callback_fn) = self.perf_callback { - callback_fn(self.name.as_ptr() as *const i8, self.name.len() as u32, duration); + if let Some(callback) = self.callback { + callback(self.name.as_ptr() as *const i8, self.name.len() as u32, duration); } else { self.num_calls += 1; // Even though this is extremely unlikely, let's not panic on overflow. @@ -91,10 +87,10 @@ impl SharedScope { } pub fn compute_exclusive_duration(&self) -> u64 { - let mut children_total_duration: u64 = 0; + let mut children_total_duration = 0; - for s in &self.children_scopes { - children_total_duration += s.duration_sum; + for scope in &self.children { + children_total_duration += scope.duration_sum; } self.duration_sum - children_total_duration @@ -107,7 +103,7 @@ impl SharedScope { depth: usize, ns_per_cycle: f64, ) -> io::Result<()> { - let duration_sum: f64 = self.duration_sum as f64; + let duration_sum = self.duration_sum as f64; // Write markers. let mut markers = "+".to_string(); @@ -126,8 +122,8 @@ impl SharedScope { self.compute_exclusive_duration(), )?; - for child_scope in &self.children_scopes { - child_scope.write_recursive(out, thread_id, depth + 1, ns_per_cycle)?; + for child in &self.children { + child.write_recursive(out, thread_id, depth + 1, ns_per_cycle)?; } Ok(()) @@ -143,7 +139,7 @@ impl<'a, F: Future> AsyncScope<'a, F> { impl SyncScopeGuard { #[inline] pub fn enter() -> Self { - let now: u64 = unsafe { x86::time::rdtscp().0 }; + let now = unsafe { x86::time::rdtscp().0 }; Self { enter_time: now } } } @@ -176,14 +172,14 @@ impl<'a, F: Future> Future for AsyncScope<'a, F> { type Output = F::Output; fn poll(self: Pin<&mut Self>, ctx: &mut Context<'_>) -> Poll { - let self_: &mut Self = self.get_mut(); + let self_ = self.get_mut(); - PROFILER.with(|p| p.clone().create_and_enter_async_scope(self_.name)); - let start: u64 = unsafe { x86::time::rdtscp().0 }; + PROFILER.with(|p| p.clone().enter_async(self_.name)); + let start = unsafe { x86::time::rdtscp().0 }; let result = Future::poll(self_.future.as_mut(), ctx); - let end: u64 = unsafe { x86::time::rdtscp().0 }; - let duration: u64 = end - start; - PROFILER.with(|p| p.clone().leave_async_scope(duration)); + let end = unsafe { x86::time::rdtscp().0 }; + let duration = end - start; + PROFILER.with(|p| p.clone().leave_async(duration)); result } } @@ -191,9 +187,9 @@ impl<'a, F: Future> Future for AsyncScope<'a, F> { impl Drop for SyncScopeGuard { #[inline] fn drop(&mut self) { - let now: u64 = unsafe { x86::time::rdtscp().0 }; - let duration: u64 = now - self.enter_time; + let now = unsafe { x86::time::rdtscp().0 }; + let duration = now - self.enter_time; - PROFILER.with(|p| p.clone().leave_sync_scope(duration)); + PROFILER.with(|p| p.clone().leave_sync(duration)); } } diff --git a/src/perftools/profiler/tests.rs b/src/perftools/profiler/tests.rs index 751c58f7e..ca9d05b22 100644 --- a/src/perftools/profiler/tests.rs +++ b/src/perftools/profiler/tests.rs @@ -23,18 +23,18 @@ fn test_multiple_roots() -> Result<()> { } profiler::PROFILER.with(|p| -> Result<()> { - crate::ensure_eq!(p.root_scopes.len(), 2); + crate::ensure_eq!(p.roots.len(), 2); - for root in p.root_scopes.iter() { - crate::ensure_eq!(root.parent_scope.is_none(), true); - crate::ensure_eq!(root.children_scopes.is_empty(), true); + for root in p.roots.iter() { + crate::ensure_eq!(root.parent.is_none(), true); + crate::ensure_eq!(root.children.is_empty(), true); } - crate::ensure_eq!(p.root_scopes[0].name, "b"); - crate::ensure_eq!(p.root_scopes[1].name, "a"); + crate::ensure_eq!(p.roots[0].name, "b"); + crate::ensure_eq!(p.roots[1].name, "a"); - crate::ensure_eq!(p.root_scopes[0].num_calls, 6); - crate::ensure_eq!(p.root_scopes[1].num_calls, 1); + crate::ensure_eq!(p.roots[0].num_calls, 6); + crate::ensure_eq!(p.roots[1].num_calls, 1); Ok(()) }) @@ -51,21 +51,21 @@ fn test_succ_reuse() -> Result<()> { } } - crate::ensure_eq!(profiler::PROFILER.with(|p| p.root_scopes.len()), 1); + crate::ensure_eq!(profiler::PROFILER.with(|p| p.roots.len()), 1); profiler::PROFILER.with(|p| -> Result<()> { - crate::ensure_eq!(p.root_scopes.len(), 1); + crate::ensure_eq!(p.roots.len(), 1); - let root = p.root_scopes[0].clone(); + let root = p.roots[0].clone(); crate::ensure_eq!(root.name, "a"); - crate::ensure_eq!(root.parent_scope.is_none(), true); - crate::ensure_eq!(root.children_scopes.len(), 1); + crate::ensure_eq!(root.parent.is_none(), true); + crate::ensure_eq!(root.children.len(), 1); crate::ensure_eq!(root.num_calls, 6); - let child = root.children_scopes[0].clone(); + let child = root.children[0].clone(); crate::ensure_eq!(child.name, "b"); - crate::ensure_eq!(child.parent_scope.clone().unwrap().name, p.root_scopes[0].clone().name); - crate::ensure_eq!(child.children_scopes.is_empty(), true); + crate::ensure_eq!(child.parent.clone().unwrap().name, p.roots[0].clone().name); + crate::ensure_eq!(child.children.is_empty(), true); crate::ensure_eq!(child.num_calls, 3); Ok(()) @@ -85,15 +85,15 @@ fn test_reset_during_frame() -> Result<()> { profiler::reset(); } - crate::ensure_eq!(profiler::PROFILER.with(|p| p.current_sync_scope.is_some()), true); + crate::ensure_eq!(profiler::PROFILER.with(|p| p.current_sync.is_some()), true); timer!("d"); } } profiler::PROFILER.with(|p| -> Result<()> { - crate::ensure_eq!(p.root_scopes.is_empty(), true); - crate::ensure_eq!(p.current_sync_scope.is_none(), true); + crate::ensure_eq!(p.roots.is_empty(), true); + crate::ensure_eq!(p.current_sync.is_none(), true); Ok(()) }) } @@ -107,9 +107,9 @@ impl Future for DummyCoroutine { fn poll(self: Pin<&mut Self>, _ctx: &mut Context) -> Poll { match profiler::PROFILER.with(|p| -> Result<()> { - crate::ensure_eq!(p.root_scopes.len(), 1); + crate::ensure_eq!(p.roots.len(), 1); - let root = p.root_scopes[0].clone(); + let root = p.roots[0].clone(); crate::ensure_eq!(root.name, "dummy"); crate::ensure_eq!(root.num_calls, self.as_ref().iterations); Ok(()) diff --git a/src/runtime/memory/buffer_pool.rs b/src/runtime/memory/buffer_pool.rs index ce57f1e57..b2ff9e783 100644 --- a/src/runtime/memory/buffer_pool.rs +++ b/src/runtime/memory/buffer_pool.rs @@ -5,8 +5,6 @@ // Imports //====================================================================================================================== -use std::{alloc::LayoutError, num::NonZeroUsize, rc::Rc}; - use crate::{ pal::CPU_DATA_CACHE_LINE_SIZE_IN_BYTES, runtime::memory::{ @@ -14,6 +12,7 @@ use crate::{ memory_pool::MemoryPool, }, }; +use ::std::{alloc::LayoutError, num::NonZeroUsize, rc::Rc}; //====================================================================================================================== // Structures @@ -48,26 +47,22 @@ impl BufferPool { } } -// Unit tests for `BufferPool` type. #[cfg(test)] mod tests { - use std::{mem::MaybeUninit, num::NonZeroUsize, ptr::NonNull}; - - use ::anyhow::Result; - use anyhow::{anyhow, ensure}; - use crate::{ ensure_eq, runtime::memory::{BufferPool, DemiBuffer, MetaData}, }; + use ::anyhow::{anyhow, ensure, Result}; + use ::std::{mem::MaybeUninit, num::NonZeroUsize, ptr::NonNull}; #[test] fn get_buffer_from_pool() -> Result<()> { const BUFFER_SIZE: usize = 0x1000; const PAGE_SIZE: usize = 0x80000000; - let mut buffer: Vec> = Vec::with_capacity(BUFFER_SIZE); + let mut buffer = Vec::with_capacity(BUFFER_SIZE); buffer.resize(buffer.capacity(), MaybeUninit::uninit()); - let pool: BufferPool = BufferPool::new(u16::try_from(buffer.len() - std::mem::size_of::())?)?; + let pool = BufferPool::new(u16::try_from(buffer.len() - std::mem::size_of::())?)?; unsafe { pool.pool().populate( @@ -78,7 +73,7 @@ mod tests { ensure_eq!(pool.pool().len(), 1); - let buffer: DemiBuffer = DemiBuffer::new_in_pool(&pool).ok_or(anyhow!("could not create buffer"))?; + let buffer = DemiBuffer::new_in_pool(&pool).ok_or(anyhow!("could not create buffer"))?; ensure_eq!(buffer.len(), BUFFER_SIZE - std::mem::size_of::()); ensure!(pool.pool().is_empty()); diff --git a/src/runtime/memory/demibuffer.rs b/src/runtime/memory/demibuffer.rs index e6c7f1b26..44ee0f535 100644 --- a/src/runtime/memory/demibuffer.rs +++ b/src/runtime/memory/demibuffer.rs @@ -30,7 +30,7 @@ #[cfg(feature = "libdpdk")] use crate::runtime::libdpdk::{ - rte_errno, rte_mbuf, rte_mbuf_from_indirect, rte_mempool, rte_pktmbuf_adj, rte_pktmbuf_clone, rte_pktmbuf_detach, + rte_errno, rte_mbuf, rte_mbuf_from_indirect, rte_pktmbuf_adj, rte_pktmbuf_clone, rte_pktmbuf_detach, rte_pktmbuf_free, rte_pktmbuf_prepend, rte_pktmbuf_trim, }; use crate::{ @@ -215,17 +215,15 @@ impl MetaData { } } - // Increments the reference count and returns the new value. #[inline] fn inc_refcnt(&mut self) -> u16 { self.refcnt += 1; self.refcnt } - // Decrements the reference count and returns the new value. #[inline] fn dec_refcnt(&mut self) -> u16 { - // We should never decrement an already zero reference count. Check this on debug builds. + // We should never decrement an already zero reference count. Check this on debug builds. debug_assert_ne!(self.refcnt, 0); self.refcnt -= 1; self.refcnt @@ -234,7 +232,7 @@ impl MetaData { // Gets the MetaData for the last segment in the buffer chain. #[inline] fn get_last_segment(&mut self) -> &mut MetaData { - let mut md: &mut MetaData = self; + let mut md = self; while md.next.is_some() { // Safety: The call to as_mut is safe, as the pointer is aligned and dereferenceable, and the MetaData // struct it points to is initialized properly. @@ -319,20 +317,14 @@ impl DemiBuffer { // propagate actual allocation failures outward, if we determine that would be helpful. For now, we stick to the // status quo, and assume this allocation never fails. pub fn new(capacity: u16) -> Self { - // Allocate some memory off the heap. - let (metadata_buf, buffer): (&mut MaybeUninit, &mut [MaybeUninit]) = - allocate_metadata_data(capacity); - + let (metadata_buf, buffer) = allocate_metadata_data(capacity); Self::new_from_parts(metadata_buf, buffer.as_mut_ptr(), 0, capacity, None) } /// Creates a new heap-allocated DemiBuffer with reserved headroom. The application cannot use this space but it /// is reserved for Demikernel to use for headers. pub fn new_with_headroom(capacity: u16, headroom: u16) -> Self { - // Allocate some memory off the heap. - let (metadata_buf, buffer): (&mut MaybeUninit, &mut [MaybeUninit]) = - allocate_metadata_data(capacity + headroom); - + let (metadata_buf, buffer) = allocate_metadata_data(capacity + headroom); Self::new_from_parts(metadata_buf, buffer.as_mut_ptr(), headroom, capacity, None) } @@ -343,12 +335,11 @@ impl DemiBuffer { /// Possibly this requirement could be relaxed with more buffer reference types, or buffers which carry an explicit /// lifetime. Until a compelling use case arises, this will cap to `'static`. pub fn new_in_pool(pool: &BufferPool) -> Option { - let buffer: PoolBuf = pool.pool().get()?; - let (mut buffer, pool): (NonNull<[MaybeUninit]>, Rc) = PoolBuf::into_raw(buffer); + let buffer = pool.pool().get()?; + let (mut buffer, pool) = PoolBuf::into_raw(buffer); // Safety: the buffer size and alignment requirements are enforced by BufferPool. - let (metadata_buf, buffer): (&mut MaybeUninit, &mut [MaybeUninit]) = - unsafe { split_buffer_for_metadata(buffer.as_mut()) }; + let (metadata_buf, buffer) = unsafe { split_buffer_for_metadata(buffer.as_mut()) }; assert!(buffer.len() <= (u16::MAX as usize)); @@ -369,14 +360,14 @@ impl DemiBuffer { capacity: u16, pool: Option>, ) -> Self { - let buf_addr: *mut u8 = if capacity + headroom > 0 { + let buf_addr = if capacity + headroom > 0 { // TODO: casting the MaybeUninit away can cause UB (when deref'd). Change the exposed data type from // DemiBuffer to better expose un/initialized values. buf_addr.cast() } else { ptr::null_mut() }; - let metadata: NonNull = NonNull::from(metadata_buf.write(MetaData::new(DemiMetaData { + let metadata = NonNull::from(metadata_buf.write(MetaData::new(DemiMetaData { buf_addr, data_off: headroom, refcnt: 1, @@ -392,9 +383,8 @@ impl DemiBuffer { }))); // Embed the buffer type into the lower bits of the pointer. - let tagged: NonNull = metadata.with_addr(metadata.addr() | Tag::Heap); + let tagged = metadata.with_addr(metadata.addr() | Tag::Heap); - // Return the new DemiBuffer. DemiBuffer { tagged_ptr: tagged, _phantom: PhantomData, @@ -411,17 +401,16 @@ impl DemiBuffer { /// Allocates a new DemiBuffer with some headroom and copies the slice contents. pub fn from_slice_with_headroom(slice: &[u8], headroom: usize) -> Result { // Check size of the slice to ensure a single DemiBuffer can hold it. - let size: u16 = if slice.len() + headroom < u16::MAX as usize { + let size = if slice.len() + headroom < u16::MAX as usize { (slice.len() + headroom) as u16 } else { return Err(Fail::new(libc::EINVAL, "slice is larger than a DemiBuffer can hold")); }; - // Allocate some memory off the heap. - let (temp, buffer): (&mut MaybeUninit, &mut [MaybeUninit]) = allocate_metadata_data(size); + let (temp, buffer) = allocate_metadata_data(size); // Point buf_addr at the newly allocated data space (if any). - let buf_addr: *mut u8 = if size == 0 { + let buf_addr = if size == 0 { // No direct data, so don't point buf_addr at anything. null_mut() } else { @@ -435,7 +424,7 @@ impl DemiBuffer { }; // Set field values as appropriate. - let metadata: NonNull = NonNull::from(temp.write(MetaData::new(DemiMetaData { + let metadata = NonNull::from(temp.write(MetaData::new(DemiMetaData { buf_addr, data_off: headroom as u16, refcnt: 1, @@ -449,9 +438,8 @@ impl DemiBuffer { }))); // Embed the buffer type into the lower bits of the pointer. - let tagged: NonNull = metadata.with_addr(metadata.addr() | Tag::Heap); + let tagged = metadata.with_addr(metadata.addr() | Tag::Heap); - // Return the new DemiBuffer. Ok(DemiBuffer { tagged_ptr: tagged, _phantom: PhantomData, @@ -475,8 +463,8 @@ impl DemiBuffer { // It is the caller's responsibility to guarantee this, which is why this function is marked "unsafe". pub unsafe fn from_mbuf(mbuf_ptr: *mut rte_mbuf) -> Self { // Convert the raw pointer into a NonNull and add a tag indicating it is a DPDK buffer (i.e. a MBuf). - let temp: NonNull = NonNull::new_unchecked(mbuf_ptr as *mut _); - let tagged: NonNull = temp.with_addr(temp.addr() | Tag::Dpdk); + let temp = NonNull::new_unchecked(mbuf_ptr as *mut _); + let tagged = temp.with_addr(temp.addr() | Tag::Dpdk); DemiBuffer { tagged_ptr: tagged, @@ -503,13 +491,11 @@ impl DemiBuffer { } } - /// Returns `true` if this `DemiBuffer` was allocated off of the heap, and `false` otherwise. pub fn is_heap_allocated(&self) -> bool { self.get_tag() == Tag::Heap } #[cfg(feature = "libdpdk")] - /// Returns `true` if this `DemiBuffer` was allocated by DPDK, and `false` otherwise. pub fn is_dpdk_allocated(&self) -> bool { self.get_tag() == Tag::Dpdk } @@ -539,11 +525,11 @@ impl DemiBuffer { } fn heap_buf_into_direct(self) -> DemiBuffer { - let metadata: &mut MetaData = self.as_metadata(); + let metadata = self.as_metadata(); // Step 1: get the direct buffer. - let offset: isize = -(size_of::() as isize); - let direct: &mut MetaData = unsafe { + let offset = -(size_of::() as isize); + let direct = unsafe { // Safety: The offset call is safe as `offset` is known to be "in bounds" for buf_addr. // Safety: The as_mut call is safe as the pointer is aligned, dereferenceable, and // points to an initialized MetaData instance. @@ -557,8 +543,8 @@ impl DemiBuffer { metadata.ol_flags &= !METADATA_F_INDIRECT; // Step 3: reconstitute the direct DemiBuffer. - let direct: NonNull = NonNull::from(direct); - let tagged: NonNull = direct.with_addr(direct.addr() | Tag::Heap); + let direct = NonNull::from(direct); + let tagged = direct.with_addr(direct.addr() | Tag::Heap); unsafe { DemiBuffer::from_raw(tagged.cast()) } } @@ -566,7 +552,7 @@ impl DemiBuffer { #[cfg(feature = "libdpdk")] unsafe fn dpdk_buf_into_direct(self) -> DemiBuffer { // Step 1: get the direct buffer. - let direct: *mut rte_mbuf = rte_mbuf_from_indirect(self.as_mbuf()); + let direct = rte_mbuf_from_indirect(self.as_mbuf()); // Step 2: detach the indirect buffer. rte_pktmbuf_detach(self.as_mbuf()); @@ -596,7 +582,7 @@ impl DemiBuffer { // TODO: Review having this "match", since MetaData and MBuf are laid out the same, these are equivalent cases. match self.get_tag() { Tag::Heap => { - let metadata: &mut MetaData = self.as_metadata(); + let metadata = self.as_metadata(); if nbytes > metadata.data_len as usize { return Err(Fail::new(libc::EINVAL, "tried to remove more bytes than are present")); } @@ -607,7 +593,7 @@ impl DemiBuffer { }, #[cfg(feature = "libdpdk")] Tag::Dpdk => { - let mbuf: *mut rte_mbuf = self.as_mbuf(); + let mbuf = self.as_mbuf(); unsafe { // Safety: The `mbuf` dereference below is safe, as it is aligned and dereferenceable. if ((*mbuf).data_len as usize) < nbytes { @@ -617,8 +603,8 @@ impl DemiBuffer { // Safety: rte_pktmbuf_adj is a FFI, which is safe since we call it with an actual MBuf pointer. if unsafe { rte_pktmbuf_adj(mbuf, nbytes as u16) } == ptr::null_mut() { - let rte_errno: libc::c_int = unsafe { rte_errno() }; - let cause: String = format!("tried to remove more bytes than are present: {:?}", rte_errno); + let rte_errno = unsafe { rte_errno() }; + let cause = format!("tried to remove more bytes than are present: {:?}", rte_errno); warn!("adjust(): {}", cause); return Err(Fail::new(libc::EINVAL, &cause)); } @@ -636,8 +622,8 @@ impl DemiBuffer { // TODO: Review having this "match", since MetaData and MBuf are laid out the same, these are equivalent cases. match self.get_tag() { Tag::Heap => { - let md_first: &mut MetaData = self.as_metadata(); - let md_last: &mut MetaData = md_first.get_last_segment(); + let md_first = self.as_metadata(); + let md_last = md_first.get_last_segment(); if nbytes > md_last.data_len as usize { return Err(Fail::new(libc::EINVAL, "tried to remove more bytes than are present")); @@ -648,7 +634,7 @@ impl DemiBuffer { }, #[cfg(feature = "libdpdk")] Tag::Dpdk => { - let mbuf: *mut rte_mbuf = self.as_mbuf(); + let mbuf = self.as_mbuf(); unsafe { // Safety: The `mbuf` dereference below is safe, as it is aligned and dereferenceable. if ((*mbuf).data_len as usize) < nbytes { @@ -657,9 +643,9 @@ impl DemiBuffer { } // Safety: rte_pktmbuf_trim is a FFI, which is safe since we call it with an actual MBuf pointer. - let rte_errno: libc::c_int = unsafe { rte_pktmbuf_trim(mbuf, nbytes as u16) }; + let rte_errno = unsafe { rte_pktmbuf_trim(mbuf, nbytes as u16) }; if rte_errno != 0 { - let cause: String = format!("tried to remove more bytes than are present: {:?}", rte_errno); + let cause = format!("tried to remove more bytes than are present: {:?}", rte_errno); warn!("trim(): {}", cause); return Err(Fail::new(libc::EINVAL, &cause)); } @@ -673,7 +659,7 @@ impl DemiBuffer { pub fn prepend(&mut self, nbytes: usize) -> Result<(), Fail> { match self.get_tag() { Tag::Heap => { - let metadata: &mut MetaData = self.as_metadata(); + let metadata = self.as_metadata(); if nbytes > metadata.data_off as usize { return Err(Fail::new( libc::EINVAL, @@ -690,14 +676,14 @@ impl DemiBuffer { }, #[cfg(feature = "libdpdk")] Tag::Dpdk => { - let mbuf: *mut rte_mbuf = unsafe { + let mbuf = unsafe { // Safety: rte_pktmbuf_prepend does both sanity and headroom space checks. rte_pktmbuf_prepend(self.as_mbuf(), nbytes as u16) as *mut rte_mbuf }; if mbuf.is_null() { - let rte_errno: libc::c_int = unsafe { rte_errno() }; - let cause: String = format!("tried to prepend more bytes than are allowed: {:?}", rte_errno); + let rte_errno = unsafe { rte_errno() }; + let cause = format!("tried to prepend more bytes than are allowed: {:?}", rte_errno); warn!("prepend(): {}", cause); return Err(Fail::new(libc::EINVAL, &cause)); @@ -765,20 +751,20 @@ impl DemiBuffer { fn split(&mut self, split_front: bool, offset: usize) -> Result { // Check if this is a multi-segment buffer. if self.is_multi_segment() { - let cause: &'static str = "cannot split a multi-segment buffer"; + let cause = "cannot split a multi-segment buffer"; error!("split_front(): {}", cause); return Err(Fail::new(libc::EINVAL, cause)); } // Check if split offset is valid. if self.len() < offset { - let cause: String = format!("cannot split buffer at given offset (offset={:?})", offset); + let cause = format!("cannot split buffer at given offset (offset={:?})", offset); error!("split_front(): {}", &cause); return Err(Fail::new(libc::EINVAL, &cause)); } // Clone the target buffer before any changes are applied. - let mut cloned_buf: DemiBuffer = self.clone(); + let mut cloned_buf = self.clone(); if split_front { // Remove data starting at `offset` from the front half buffer (cloned buffer). @@ -802,7 +788,6 @@ impl DemiBuffer { cloned_buf.adjust(offset).unwrap(); } - // Return the cloned buffer. Ok(cloned_buf) } @@ -849,8 +834,7 @@ impl DemiBuffer { #[inline] fn get_ptr(&self) -> NonNull { // Safety: The call to NonZeroUsize::new_unchecked is safe, as its argument is guaranteed to be non-zero. - let address: NonZeroUsize = - unsafe { NonZeroUsize::new_unchecked(usize::from(self.tagged_ptr.addr()) & !Tag::MASK) }; + let address = unsafe { NonZeroUsize::new_unchecked(usize::from(self.tagged_ptr.addr()) & !Tag::MASK) }; self.tagged_ptr.with_addr(address).cast::() } @@ -872,8 +856,8 @@ impl DemiBuffer { // Gets a raw pointer to the DemiBuffer data. fn data_ptr(&self) -> *mut u8 { - let metadata: &mut MetaData = self.as_metadata(); - let buf_ptr: *mut u8 = metadata.buf_addr; + let metadata = self.as_metadata(); + let buf_ptr = metadata.buf_addr; // Safety: The call to offset is safe, as its argument is known to remain within the allocated region. unsafe { buf_ptr.offset(metadata.data_off as isize) } } @@ -890,12 +874,12 @@ impl DemiBuffer { fn is_multi_segment(&self) -> bool { match self.get_tag() { Tag::Heap => { - let md_front: &MetaData = self.as_metadata(); + let md_front = self.as_metadata(); md_front.nb_segs != 1 }, #[cfg(feature = "libdpdk")] Tag::Dpdk => { - let mbuf: *const rte_mbuf = self.as_mbuf(); + let mbuf = self.as_mbuf(); // Safety: The `mbuf` dereferences in this block are safe, as it is aligned and dereferenceable. unsafe { (*mbuf).nb_segs != 1 } }, @@ -910,10 +894,10 @@ impl DemiBuffer { // Allocates the MetaData (plus the space for any directly attached data) for a new heap-allocated DemiBuffer. fn allocate_metadata_data<'a>(direct_data_size: u16) -> (&'a mut MaybeUninit, &'a mut [MaybeUninit]) { // We need space for the MetaData struct, plus any extra memory for directly attached data. - let amount: usize = size_of::() + direct_data_size as usize; + let amount = size_of::() + direct_data_size as usize; // Given our limited allocation amount (u16::MAX) and fixed alignment size, this unwrap cannot panic. - let layout: Layout = Layout::from_size_align(amount, CPU_DATA_CACHE_LINE_SIZE_IN_BYTES).unwrap(); + let layout = Layout::from_size_align(amount, CPU_DATA_CACHE_LINE_SIZE_IN_BYTES).unwrap(); // Safety: This is safe, as we check for a null return value before dereferencing "allocation". let allocation: *mut MaybeUninit = unsafe { alloc(layout) }.cast(); @@ -922,7 +906,7 @@ fn allocate_metadata_data<'a>(direct_data_size: u16) -> (&'a mut MaybeUninit] = unsafe { slice::from_raw_parts_mut(allocation, amount) }; + let buffer = unsafe { slice::from_raw_parts_mut(allocation, amount) }; // Safety: buffer is aligned to CPU_DATA_CACHE_LINE_SIZE (which is overaligned for MetaData) and will always be no // smaller than MetaData. @@ -945,17 +929,16 @@ unsafe fn split_buffer_for_metadata( // Safety: buffer is not null and properly aligned since it comes from a reference. MaybeUninit does not // require initialization. - let metadata: &mut MaybeUninit = - unsafe { &mut *metadata_buf.as_mut_ptr().cast::>() }; + let metadata = unsafe { &mut *metadata_buf.as_mut_ptr().cast::>() }; (metadata, data_buf) } // Frees the MetaData (plus the space for any directly attached data) for a heap-allocated DemiBuffer. fn free_metadata_data(mut buffer: NonNull) { - let (amount, pool): (usize, Option>) = { + let (amount, pool) = { // Safety: This is safe, as `buffer` is aligned, dereferenceable, and we don't let `metadata` escape this function. - let metadata: &mut MetaData = unsafe { buffer.as_mut() }; + let metadata = unsafe { buffer.as_mut() }; // Determine the size of the original allocation. // Note that this code currently assumes we're not using a "private data" feature akin to DPDK's. @@ -977,8 +960,8 @@ fn free_metadata_data(mut buffer: NonNull) { // `buffer` will also be valid and dereferenceable. The `MetaData` buffer is created from // `PoolBuf::into_raw` by the constructor, so the buffer may be passed back to `PoolBuf::from_raw`. unsafe { - let pool_layout: Layout = pool.layout(); - let mem_slice: &mut [MaybeUninit] = + let pool_layout = pool.layout(); + let mem_slice = slice::from_raw_parts_mut(buffer.cast::>().as_ptr(), pool_layout.size()); mem::drop(PoolBuf::from_raw(NonNull::from(mem_slice), pool)); } @@ -986,8 +969,8 @@ fn free_metadata_data(mut buffer: NonNull) { None => { // Convert buffer pointer into a raw allocation pointer. - let allocation: *mut u8 = buffer.cast::().as_ptr(); - let layout: Layout = Layout::from_size_align(amount, CPU_DATA_CACHE_LINE_SIZE_IN_BYTES).unwrap(); + let allocation = buffer.cast::().as_ptr(); + let layout = Layout::from_size_align(amount, CPU_DATA_CACHE_LINE_SIZE_IN_BYTES).unwrap(); // Safety: this is safe because we're using the same (de)allocator and Layout used for allocation. unsafe { dealloc(allocation, layout) }; @@ -1011,15 +994,15 @@ impl Clone for DemiBuffer { // Allocate space for a new MetaData struct without any direct data. This will become the clone. // TODO: Pooled MetaData should be reallocated from the pool. - let (head, _): (&mut MaybeUninit, _) = allocate_metadata_data(0); - let mut temp: NonNull> = NonNull::from(&*head); + let (head, _) = allocate_metadata_data(0); + let mut temp = NonNull::from(&*head); // This might be a chain of buffers. If so, we'll walk the list. There is always a first one. - let mut next_entry: Option> = Some(self.get_ptr::()); + let mut next_entry = Some(self.get_ptr::()); while let Some(mut entry) = next_entry { // Safety: This is safe, as `entry` is aligned, dereferenceable, and the MetaData struct it // points to is initialized. - let original: &mut MetaData = unsafe { entry.as_mut() }; + let original = unsafe { entry.as_mut() }; // Remember the next entry in the chain. next_entry = original.next; @@ -1027,10 +1010,10 @@ impl Clone for DemiBuffer { // Initialize the MetaData of the indirect buffer. { // Safety: Safe, as `temp` is aligned, dereferenceable, and `clone` isn't aliased in this block. - let clone: &mut MaybeUninit = unsafe { temp.as_mut() }; + let clone = unsafe { temp.as_mut() }; // Next needs to point to the next entry in the cloned chain, not the original. - let next: Option> = if next_entry.is_none() { + let next = if next_entry.is_none() { None } else { // Allocate space for the next segment's MetaData struct. @@ -1041,11 +1024,10 @@ impl Clone for DemiBuffer { // Add indirect flag to clone for non-empty buffers. Empty buffers don't reference any data, so // aren't indirect. - let ol_flags: u64 = - original.ol_flags | if original.buf_len != 0 { METADATA_F_INDIRECT } else { 0 }; + let ol_flags = original.ol_flags | if original.buf_len != 0 { METADATA_F_INDIRECT } else { 0 }; // Copy other relevant fields from our progenitor. - let values: DemiMetaData = DemiMetaData { + let values = DemiMetaData { // Our cloned segment has only one reference (the one we return from this function). refcnt: 1, next, @@ -1079,8 +1061,8 @@ impl Clone for DemiBuffer { } else { // Cloning an indirect buffer. Increment the ref count on the direct buffer with the data. // The direct buffer's MetaData struct should immediately preceed the actual data. - let offset: isize = -(size_of::() as isize); - let direct: &mut MetaData = unsafe { + let offset = -(size_of::() as isize); + let direct = unsafe { // Safety: The offset call is safe as `offset` is known to be "in bounds" for buf_addr. // Safety: The as_mut call is safe as the pointer is aligned, dereferenceable, and // points to an initialized MetaData instance. @@ -1093,10 +1075,9 @@ impl Clone for DemiBuffer { // Embed the buffer type into the lower bits of the pointer. // Safety: head is initialized by the above loop. - let head_ptr: NonNull = NonNull::from(unsafe { head.assume_init_mut() }); - let tagged: NonNull = head_ptr.with_addr(head_ptr.addr() | Tag::Heap); + let head_ptr = NonNull::from(unsafe { head.assume_init_mut() }); + let tagged = head_ptr.with_addr(head_ptr.addr() | Tag::Heap); - // Return the new DemiBuffer. DemiBuffer { tagged_ptr: tagged, _phantom: PhantomData, @@ -1104,16 +1085,16 @@ impl Clone for DemiBuffer { }, #[cfg(feature = "libdpdk")] Tag::Dpdk => unsafe { - let mbuf_ptr: *mut rte_mbuf = self.as_mbuf(); + let mbuf_ptr = self.as_mbuf(); // TODO: This allocates the clone MBuf from the same MBuf pool as the original MBuf. Since the clone // never has any direct data, we could potentially save memory by allocating these from a special pool. // Safety: it is safe to dereference "mbuf_ptr" as it is known to point to a valid MBuf. - let mempool_ptr: *mut rte_mempool = (*mbuf_ptr).pool; + let mempool_ptr = (*mbuf_ptr).pool; // Safety: rte_pktmbuf_clone is a FFI, which is safe to call since we call it with valid arguments and // properly check its return value for null (failure) before using. - let mbuf_ptr_clone: *mut rte_mbuf = rte_pktmbuf_clone(mbuf_ptr, mempool_ptr); + let mbuf_ptr_clone = rte_pktmbuf_clone(mbuf_ptr, mempool_ptr); if mbuf_ptr_clone.is_null() { - let rte_errno: libc::c_int = rte_errno(); + let rte_errno = rte_errno(); panic!("failed to clone mbuf: {:?}", rte_errno); } @@ -1155,7 +1136,7 @@ impl Drop for DemiBuffer { match self.get_tag() { Tag::Heap => { // This might be a chain of buffers. If so, we'll walk the list. - let mut next_entry: Option> = Some(self.get_ptr()); + let mut next_entry = Some(self.get_ptr()); while let Some(mut entry) = next_entry { // Safety: This is safe, as `entry` is aligned, dereferenceable, and the MetaData struct it points // to is initialized. @@ -1171,8 +1152,8 @@ impl Drop for DemiBuffer { // See if the data is directly attached, or indirectly attached. if metadata.ol_flags & METADATA_F_INDIRECT != 0 { // This is an indirect buffer. Find the direct buffer that holds the actual data. - let offset: isize = -(size_of::() as isize); - let direct: &mut MetaData = unsafe { + let offset = -(size_of::() as isize); + let direct = unsafe { // Safety: The offset call is safe as `offset` is known to be "in bounds" for buf_addr. // Safety: The as_mut call is safe as the pointer is aligned, dereferenceable, and // points to an initialized MetaData instance. @@ -1192,7 +1173,7 @@ impl Drop for DemiBuffer { // Convert to NonNull type. // Safety: The NonNull::new_unchecked call is safe, as `direct` is known to be non-null. - let allocation: NonNull = unsafe { NonNull::new_unchecked(direct as *mut _) }; + let allocation = unsafe { NonNull::new_unchecked(direct as *mut _) }; // Free the direct buffer. free_metadata_data(allocation); @@ -1206,7 +1187,7 @@ impl Drop for DemiBuffer { }, #[cfg(feature = "libdpdk")] Tag::Dpdk => { - let mbuf_ptr: *mut rte_mbuf = self.as_mbuf(); + let mbuf_ptr = self.as_mbuf(); // Safety: This is safe, as mbuf_ptr does indeed point to a valid MBuf. unsafe { // Note: This DPDK routine properly handles MBuf chains, as well as indirect, and external MBufs. @@ -1223,21 +1204,20 @@ impl TryFrom<&[u8]> for DemiBuffer { fn try_from(slice: &[u8]) -> Result { // Check size of the slice to ensure a single DemiBuffer can hold it. - let size: u16 = if slice.len() < u16::MAX as usize { + let size = if slice.len() < u16::MAX as usize { slice.len() as u16 } else { return Err(Fail::new(libc::EINVAL, "slice is larger than a DemiBuffer can hold")); }; - // Allocate some memory off the heap. - let (temp, buffer): (&mut MaybeUninit, &mut [MaybeUninit]) = allocate_metadata_data(size); + let (temp, buffer) = allocate_metadata_data(size); // Point buf_addr at the newly allocated data space (if any). - let buf_addr: *mut u8 = if size == 0 { + let buf_addr = if size == 0 { // No direct data, so don't point buf_addr at anything. null_mut() } else { - let buf_addr: *mut u8 = buffer.as_mut_ptr().cast(); + let buf_addr = buffer.as_mut_ptr().cast(); // Copy the data from the slice into the DemiBuffer. // Safety: This is safe, as the src/dst argument pointers are valid for reads/writes of `size` bytes, @@ -1247,7 +1227,7 @@ impl TryFrom<&[u8]> for DemiBuffer { }; // Set field values as appropriate. - let metadata: NonNull = NonNull::from(temp.write(MetaData::new(DemiMetaData { + let metadata = NonNull::from(temp.write(MetaData::new(DemiMetaData { buf_addr, data_off: 0, refcnt: 1, @@ -1261,9 +1241,8 @@ impl TryFrom<&[u8]> for DemiBuffer { }))); // Embed the buffer type into the lower bits of the pointer. - let tagged: NonNull = metadata.with_addr(metadata.addr() | Tag::Heap); + let tagged = metadata.with_addr(metadata.addr() | Tag::Heap); - // Return the new DemiBuffer. Ok(DemiBuffer { tagged_ptr: tagged, _phantom: PhantomData, @@ -1283,43 +1262,43 @@ mod tests { #[test] fn basic() -> Result<()> { // Create a new (heap-allocated) `DemiBuffer` with a 42 byte data area. - let mut buf: DemiBuffer = DemiBuffer::new(42); - crate::ensure_eq!(buf.is_heap_allocated(), true); - crate::ensure_eq!(buf.len(), 42); + let mut buffer = DemiBuffer::new(42); + crate::ensure_eq!(buffer.is_heap_allocated(), true); + crate::ensure_eq!(buffer.len(), 42); // Remove 7 bytes from the beginning of the data area. Length should now be 35. - crate::ensure_eq!(buf.adjust(7).is_ok(), true); - crate::ensure_eq!(buf.len(), 35); + crate::ensure_eq!(buffer.adjust(7).is_ok(), true); + crate::ensure_eq!(buffer.len(), 35); // Remove 7 bytes from the end of the data area. Length should now be 28. - crate::ensure_eq!(buf.trim(7).is_ok(), true); - crate::ensure_eq!(buf.len(), 28); + crate::ensure_eq!(buffer.trim(7).is_ok(), true); + crate::ensure_eq!(buffer.len(), 28); // Verify bad requests actually fail. - crate::ensure_eq!(buf.adjust(30).is_err(), true); - crate::ensure_eq!(buf.trim(30).is_err(), true); - crate::ensure_eq!(buf.prepend(30).is_err(), true); + crate::ensure_eq!(buffer.adjust(30).is_err(), true); + crate::ensure_eq!(buffer.trim(30).is_err(), true); + crate::ensure_eq!(buffer.prepend(30).is_err(), true); Ok(()) } #[test] fn headroom() -> Result<()> { - let mut buf: DemiBuffer = DemiBuffer::new_with_headroom(0, 42); - crate::ensure_eq!(buf.is_heap_allocated(), true); - crate::ensure_eq!(buf.len(), 0); + let mut buffer = DemiBuffer::new_with_headroom(0, 42); + crate::ensure_eq!(buffer.is_heap_allocated(), true); + crate::ensure_eq!(buffer.len(), 0); // Add 7 bytes to the beginning of the data area. Length should now be 7. - crate::ensure_eq!(buf.prepend(7).is_ok(), true); - crate::ensure_eq!(buf.len(), 7); + crate::ensure_eq!(buffer.prepend(7).is_ok(), true); + crate::ensure_eq!(buffer.len(), 7); // Remove 7 bytes from the end of the data area. Length should now be 0. - crate::ensure_eq!(buf.trim(7).is_ok(), true); - crate::ensure_eq!(buf.len(), 0); + crate::ensure_eq!(buffer.trim(7).is_ok(), true); + crate::ensure_eq!(buffer.len(), 0); // Verify bad requests actually fail. - crate::ensure_eq!(buf.adjust(30).is_err(), true); - crate::ensure_eq!(buf.trim(30).is_err(), true); + crate::ensure_eq!(buffer.adjust(30).is_err(), true); + crate::ensure_eq!(buffer.trim(30).is_err(), true); Ok(()) } @@ -1328,36 +1307,34 @@ mod tests { #[test] fn advanced() -> Result<()> { fn clone_me(buf: DemiBuffer) -> DemiBuffer { - // Clone and return the buffer. buf.clone() // `buf` should be dropped here. } fn convert_to_token(buf: DemiBuffer) -> NonNull { - // Convert the buffer into a raw token. buf.into_raw() // `buf` was consumed by into_raw(), so it isn't dropped here. The token holds the reference. } // Create a buffer and clone it. - let fortytwo: DemiBuffer = DemiBuffer::new(42); + let fortytwo = DemiBuffer::new(42); crate::ensure_eq!(fortytwo.len(), 42); - let clone: DemiBuffer = clone_me(fortytwo); + let clone = clone_me(fortytwo); crate::ensure_eq!(clone.len(), 42); // Convert a buffer into a raw token and bring it back. - let token: NonNull = convert_to_token(clone); - let reconstituted: DemiBuffer = unsafe { DemiBuffer::from_raw(token) }; + let token = convert_to_token(clone); + let reconstituted = unsafe { DemiBuffer::from_raw(token) }; crate::ensure_eq!(reconstituted.len(), 42); // Create a zero-sized buffer. - let zero: DemiBuffer = DemiBuffer::new(0); + let zero = DemiBuffer::new(0); crate::ensure_eq!(zero.len(), 0); // Clone it, and keep the original around. - let clone: DemiBuffer = zero.clone(); + let clone = zero.clone(); crate::ensure_eq!(clone.len(), 0); // Clone the clone, and drop the first clone. - let another: DemiBuffer = clone_me(clone); + let another = clone_me(clone); crate::ensure_eq!(another.len(), 0); Ok(()) @@ -1367,10 +1344,10 @@ mod tests { #[test] fn split_back() -> Result<()> { // Create a new (heap-allocated) `DemiBuffer` by copying a slice of a `String`. - let str: &'static str = "word one two three four five six seven eight nine"; - let slice: &[u8] = str.as_bytes(); + let str = "word one two three four five six seven eight nine"; + let slice = str.as_bytes(); // `DemiBuffer::from_slice` shouldn't fail, as we passed it a valid slice of a `DemiBuffer`-allowable length. - let mut buf: DemiBuffer = match DemiBuffer::from_slice(slice) { + let mut buffer = match DemiBuffer::from_slice(slice) { Ok(buf) => buf, Err(e) => anyhow::bail!( "DemiBuffer::from_slice should return a DemiBuffer for this slice: {}", @@ -1379,37 +1356,37 @@ mod tests { }; // The `DemiBuffer` data length should equal the original string length. - crate::ensure_eq!(buf.len(), str.len()); + crate::ensure_eq!(buffer.len(), str.len()); // The `DemiBuffer` data (content) should match that of the original string. - crate::ensure_eq!(&*buf, slice); + crate::ensure_eq!(&*buffer, slice); // Split this `DemiBuffer` into two. // `DemiBuffer::split_back` shouldn't fail, as we passed it a valid offset. - let mut split_buf: DemiBuffer = match buf.split_back(24) { + let mut split_buf = match buffer.split_back(24) { Ok(buf) => buf, Err(e) => anyhow::bail!("DemiBuffer::split_back shouldn't fail for this offset: {}", e), }; - crate::ensure_eq!(buf.len(), 24); + crate::ensure_eq!(buffer.len(), 24); crate::ensure_eq!(split_buf.len(), 25); // Compare contents. - crate::ensure_eq!(&buf[..], &str.as_bytes()[..24]); + crate::ensure_eq!(&buffer[..], &str.as_bytes()[..24]); crate::ensure_eq!(&split_buf[..], &str.as_bytes()[24..]); // Split another `DemiBuffer` off of the already-split-off one. // `DemiBuffer::split_back` shouldn't fail, as we passed it a valid offset. - let another_buf: DemiBuffer = match split_buf.split_back(9) { + let another_buf = match split_buf.split_back(9) { Ok(buf) => buf, Err(e) => anyhow::bail!("DemiBuffer::split_back shouldn't fail for this offset: {}", e), }; - crate::ensure_eq!(buf.len(), 24); + crate::ensure_eq!(buffer.len(), 24); crate::ensure_eq!(split_buf.len(), 9); crate::ensure_eq!(another_buf.len(), 16); // Compare contents (including the unaffected original to ensure that it is actually unaffected). - crate::ensure_eq!(&buf[..], &str.as_bytes()[..24]); + crate::ensure_eq!(&buffer[..], &str.as_bytes()[..24]); crate::ensure_eq!(&split_buf[..], &str.as_bytes()[24..33]); crate::ensure_eq!(&another_buf[..], &str.as_bytes()[33..]); @@ -1420,10 +1397,10 @@ mod tests { #[test] fn split_front() -> Result<()> { // Create a new (heap-allocated) `DemiBuffer` by copying a slice of a `String`. - let str: &'static str = "word one two three four five six seven eight nine"; - let slice: &[u8] = str.as_bytes(); + let str = "word one two three four five six seven eight nine"; + let slice = str.as_bytes(); // `DemiBuffer::from_slice` shouldn't fail, as we passed it a valid slice of a `DemiBuffer`-allowable length. - let mut buf: DemiBuffer = match DemiBuffer::from_slice(slice) { + let mut buffer = match DemiBuffer::from_slice(slice) { Ok(buf) => buf, Err(e) => anyhow::bail!( "DemiBuffer::from_slice should return a DemiBuffer for this slice: {}", @@ -1432,36 +1409,36 @@ mod tests { }; // The `DemiBuffer` data length should equal the original string length. - crate::ensure_eq!(buf.len(), str.len()); + crate::ensure_eq!(buffer.len(), str.len()); // The `DemiBuffer` data (content) should match that of the original string. - crate::ensure_eq!(&*buf, slice); + crate::ensure_eq!(&*buffer, slice); // Split this `DemiBuffer` into two. // `DemiBuffer::split_off` shouldn't fail, as we passed it a valid offset. - let mut split_buf: DemiBuffer = match buf.split_front(24) { + let mut split_buf = match buffer.split_front(24) { Ok(buf) => buf, Err(e) => anyhow::bail!("DemiBuffer::split_off shouldn't fail for this offset: {}", e), }; - crate::ensure_eq!(buf.len(), 25); + crate::ensure_eq!(buffer.len(), 25); crate::ensure_eq!(split_buf.len(), 24); // Compare contents. - crate::ensure_eq!(&buf[..], &str.as_bytes()[24..]); + crate::ensure_eq!(&buffer[..], &str.as_bytes()[24..]); crate::ensure_eq!(&split_buf[..], &str.as_bytes()[..24]); // Split another `DemiBuffer` off of the already-split-off one. // `DemiBuffer::split_off` shouldn't fail, as we passed it a valid offset. - let another_buf: DemiBuffer = match split_buf.split_front(9) { + let another_buf = match split_buf.split_front(9) { Ok(buf) => buf, Err(e) => anyhow::bail!("DemiBuffer::split_off shouldn't fail for this offset: {}", e), }; - crate::ensure_eq!(buf.len(), 25); + crate::ensure_eq!(buffer.len(), 25); crate::ensure_eq!(split_buf.len(), 15); crate::ensure_eq!(another_buf.len(), 9); // Compare contents (including the unaffected original to ensure that it is actually unaffected). - crate::ensure_eq!(&buf[..], &str.as_bytes()[24..]); + crate::ensure_eq!(&buffer[..], &str.as_bytes()[24..]); crate::ensure_eq!(&split_buf[..], &str.as_bytes()[9..24]); crate::ensure_eq!(&another_buf[..], &str.as_bytes()[..9]); diff --git a/src/runtime/memory/memory_pool.rs b/src/runtime/memory/memory_pool.rs index ba5c7c9dd..3ef0a3966 100644 --- a/src/runtime/memory/memory_pool.rs +++ b/src/runtime/memory/memory_pool.rs @@ -81,14 +81,10 @@ impl MemoryPool { /// Get one buffer from the pool. If no buffers remain, returns None. pub fn get(self: &Rc) -> Option { - let buffers: &mut Vec]>> = unsafe { &mut *self.buffers.get() }; - let pool: Rc = self.clone(); + let buffers = unsafe { &mut *self.buffers.get() }; + let pool = self.clone(); buffers.pop().map(|buffer: NonNull<[MaybeUninit]>| { - trace!( - "get: buffer = {:?}, pool = {:?}", - buffer.as_ptr(), - self.as_ref() as *const _ - ); + trace!("get = {:?}, pool = {:?}", buffer.as_ptr(), self.as_ref() as *const _); PoolBuf { buffer, pool } }) } @@ -96,14 +92,14 @@ impl MemoryPool { /// Return a buffer to the pool. fn return_buffer(self: &Rc, buffer: NonNull<[MaybeUninit]>) { trace!( - "return_buffer: buffer = {:?}, pool = {:?}", + "return_buffer = {:?}, pool = {:?}", buffer.as_ptr(), self.as_ref() as *const _ ); // Safety: buffers is only granted a &mut alias during the methods of this class. As long as these methods are // neither called asynchronously nor nested, aliasing is obeyed. - let buffers: &mut Vec]>> = unsafe { &mut *self.buffers.get() }; + let buffers = unsafe { &mut *self.buffers.get() }; buffers.push(buffer); } @@ -118,11 +114,11 @@ impl MemoryPool { mut buffer: NonNull<[MaybeUninit]>, page_size: NonZeroUsize, ) -> Result]>, Fail> { - let mut iter: PackingIterator = PackingIterator::new(unsafe { buffer.as_mut() }, page_size, self.buf_layout)?; + let mut iter = PackingIterator::new(unsafe { buffer.as_mut() }, page_size, self.buf_layout)?; // Safety: buffers is only granted a &mut alias during the methods of this class. As long as these methods are // neither called asynchronously nor nested, aliasing is obeyed. - let buffers: &mut Vec]>> = unsafe { &mut *self.buffers.get() }; + let buffers = unsafe { &mut *self.buffers.get() }; buffers.extend(std::iter::from_fn(|| iter.next()).map(NonNull::from)); Ok(NonNull::from(iter.into_slice())) @@ -132,7 +128,7 @@ impl MemoryPool { pub fn len(self: &Rc) -> usize { // Safety: buffers is only granted a &mut alias during the methods of this class. As long as these methods are // neither called asynchronously nor nested, aliasing is obeyed. - let buffers: &Vec]>> = unsafe { &*self.buffers.get() }; + let buffers = unsafe { &*self.buffers.get() }; buffers.len() } @@ -155,10 +151,10 @@ impl PoolBuf { /// /// Note that this is an associated function to match idioms in e.g., [`Box::into_raw`]. pub fn into_raw(b: PoolBuf) -> (NonNull<[MaybeUninit]>, Rc) { - let b: ManuallyDrop = ManuallyDrop::new(b); + let b = ManuallyDrop::new(b); // Safety: pool field is valid and readable. - let pool: Rc = unsafe { std::ptr::read(&b.pool) }; + let pool = unsafe { std::ptr::read(&b.pool) }; (b.buffer, pool) } @@ -197,10 +193,10 @@ impl BufferCursor { pub fn take_at_most<'a>(&mut self, bytes: usize) -> &'a mut [MaybeUninit] { debug_assert!(bytes <= isize::MAX as usize); - let bytes: usize = std::cmp::min(bytes, self.len); + let bytes = std::cmp::min(bytes, self.len); // Safety: the offset from cursor is within the originally allocated span and not larger than isize::MAX. - let result: &mut [MaybeUninit] = unsafe { std::slice::from_raw_parts_mut(self.cursor.as_ptr(), bytes) }; + let result = unsafe { std::slice::from_raw_parts_mut(self.cursor.as_ptr(), bytes) }; self.cursor = unsafe { NonNull::new_unchecked(self.cursor.as_ptr().add(bytes)) }; self.len -= bytes; result @@ -215,7 +211,7 @@ impl BufferCursor { /// Align the cursor to `align`, skipping at most align bytes. Returns true iff the cursor is aligned to `align` and /// points to at least one byte. pub fn skip_to_align(&mut self, align: usize) -> bool { - let bytes: usize = self.cursor.as_ptr().align_offset(align); + let bytes = self.cursor.as_ptr().align_offset(align); self.skip(bytes) } @@ -299,7 +295,7 @@ impl<'a> Iterator for PackingIterator<'a> { fn next(&mut self) -> Option<&'a mut [MaybeUninit]> { // Reborrow cursor into a temporary so we can back out our changes if we fail. - let mut temp: BufferCursor = self.cursor.reborrow(); + let mut temp = self.cursor.reborrow(); if !temp.skip_to_align(self.layout.align()) { return None; @@ -307,7 +303,7 @@ impl<'a> Iterator for PackingIterator<'a> { // The number of bytes required to be in a page to span the minimum number of pages. The algorithm here // prioritizes minimizing the number of pages per object, which can result in sparse "packing". - let req_bytes_in_page: usize = self.layout.size() % self.page_size; + let req_bytes_in_page = self.layout.size() % self.page_size; // Check how many bytes left in the page; see if we need to realign to reduce page spanning. match temp.next_align_offset(self.page_size) { @@ -340,10 +336,9 @@ impl<'a> Iterator for PackingIterator<'a> { // Unit Tests //====================================================================================================================== -// Unit tests for `BufferPool` type. #[cfg(test)] mod tests { - use std::{mem::MaybeUninit, num::NonZeroUsize, ptr::NonNull, rc::Rc}; + use std::{mem::MaybeUninit, num::NonZeroUsize, ptr::NonNull}; use ::anyhow::{anyhow, Result}; use anyhow::ensure; @@ -360,7 +355,7 @@ mod tests { store.reserve(total_size); store.extend(std::iter::repeat_n(MaybeUninit::::zeroed(), total_size)); - let align_bytes: usize = store.as_ptr().align_offset(page_size); + let align_bytes = store.as_ptr().align_offset(page_size); assert!(align_bytes + alloc_size <= store.len()); &mut store.as_mut_slice()[align_bytes..alloc_size + align_bytes] } @@ -378,23 +373,23 @@ mod tests { } fn run_basic_test(settings: BasicTestSettings, results: BasicTestResults) -> Result<()> { - let page_size: NonZeroUsize = NonZeroUsize::new(settings.page_size).ok_or(anyhow!("bad page size"))?; - let buf_size_ea: NonZeroUsize = NonZeroUsize::new(settings.buf_size_ea).ok_or(anyhow!("bad buffer size"))?; - let buf_align: NonZeroUsize = NonZeroUsize::new(settings.buf_align).ok_or(anyhow!("bad buffer alignment"))?; + let page_size = NonZeroUsize::new(settings.page_size).ok_or(anyhow!("bad page size"))?; + let buf_size_ea = NonZeroUsize::new(settings.buf_size_ea).ok_or(anyhow!("bad buffer size"))?; + let buf_align = NonZeroUsize::new(settings.buf_align).ok_or(anyhow!("bad buffer alignment"))?; - let mut store: Vec> = Vec::new(); - let buffer: &mut [MaybeUninit] = alloc_page_buf(settings.page_size, settings.pool_size, &mut store); - let pool: Rc = MemoryPool::new(buf_size_ea, buf_align)?; + let mut store = Vec::new(); + let buffer = alloc_page_buf(settings.page_size, settings.pool_size, &mut store); + let pool = MemoryPool::new(buf_size_ea, buf_align)?; ensure_eq!(pool.len(), 0); ensure!(pool.get().is_none()); - let remaining: &mut [MaybeUninit] = unsafe { pool.populate(NonNull::from(buffer), page_size)?.as_mut() }; + let remaining = unsafe { pool.populate(NonNull::from(buffer), page_size)?.as_mut() }; ensure_eq!(remaining.len(), results.bytes_left_over); ensure_eq!(pool.len(), results.number_of_buffers); - let mut bufs: Vec> = Vec::from_iter(std::iter::from_fn(|| Some(pool.get())).take(pool.len())); + let mut bufs = Vec::from_iter(std::iter::from_fn(|| Some(pool.get())).take(pool.len())); ensure_eq!(bufs.len(), results.number_of_buffers); ensure!(bufs.iter().all(|o: &Option<_>| o.is_some())); @@ -410,16 +405,16 @@ mod tests { // NB if the buffer size is a factor or multiple of the page size, no bytes will be wasted at the end of the // page. - let span: usize = if settings.buf_size_ea.is_power_of_two() { + let span = if settings.buf_size_ea.is_power_of_two() { 0 } else { settings.buf_size_ea % settings.page_size }; - let align: usize = std::cmp::max(settings.buf_size_ea, settings.buf_align); - let mut last_buffer_ptr: usize = bufs[0].as_ref().unwrap().as_ptr().addr() - align; + let align = std::cmp::max(settings.buf_size_ea, settings.buf_align); + let mut last_buffer_ptr = bufs[0].as_ref().unwrap().as_ptr().addr() - align; for buf_holder in bufs.iter_mut() { - let mut buf: PoolBuf = buf_holder.take().unwrap(); + let mut buf = buf_holder.take().unwrap(); ensure_eq!(buf.len(), buf_size_ea.get()); ensure!( @@ -434,7 +429,7 @@ mod tests { // NB MemoryPool does not guarantee LIFO, but since the pool only has one buffer in it, it must be the same // buffer. - let buf: PoolBuf = pool.get().ok_or(anyhow!("pool should not be empty"))?; + let buf = pool.get().ok_or(anyhow!("pool should not be empty"))?; ensure_eq!(expected, unsafe { std::slice::from_raw_parts(buf.as_ptr().cast::(), buf.len()) }); diff --git a/src/runtime/memory/mod.rs b/src/runtime/memory/mod.rs index 195fc9490..f7402688c 100644 --- a/src/runtime/memory/mod.rs +++ b/src/runtime/memory/mod.rs @@ -70,7 +70,7 @@ pub fn sgaalloc(size: usize, mem_alloc: &M) -> Result Res } // Calculate the amount the new starting address is ahead of the old. And then adjust `clone` to match. - let adjustment_amount: usize = sga_data.addr() - clone_data.addr(); + let adjustment_amount = sga_data.addr() - clone_data.addr(); clone.adjust(adjustment_amount)?; // An adjustment above would have reduced clone.len() by the adjustment amount. @@ -173,7 +173,7 @@ fn check_demi_buf_limits(sga_seg: &demi_sgaseg_t, clone: &mut DemiBuffer) -> Res debug_assert_eq!(clone_len, clone.len()); // Trim the clone down to size. - let trim_amount: usize = clone_len - sga_len; + let trim_amount = clone_len - sga_len; clone.trim(trim_amount)?; } Ok(()) diff --git a/src/runtime/network/socket/state.rs b/src/runtime/network/socket/state.rs index 427f390a2..273fa9f91 100644 --- a/src/runtime/network/socket/state.rs +++ b/src/runtime/network/socket/state.rs @@ -89,7 +89,7 @@ impl SocketStateMachine { } pub fn listen(&mut self) { - let new_state: SocketState = match self.current.get() { + let new_state = match self.current.get() { SocketState::Bound(local) => SocketState::PassiveListening { local }, _ => return, }; @@ -131,7 +131,7 @@ impl SocketStateMachine { } pub fn connecting(&mut self, remote: SocketAddr) { - let local: Option = match self.current.get() { + let local = match self.current.get() { SocketState::Bound(local) => Some(local), _ => None, }; @@ -139,7 +139,7 @@ impl SocketStateMachine { } pub fn connected(&mut self, remote: SocketAddr) { - let local: Option = match self.current.get() { + let local = match self.current.get() { SocketState::Bound(local) => Some(local), SocketState::ActiveConnecting { local, remote: _ } => local, _ => None, diff --git a/src/runtime/queue/mod.rs b/src/runtime/queue/mod.rs index fbb91a5f6..486b6e874 100644 --- a/src/runtime/queue/mod.rs +++ b/src/runtime/queue/mod.rs @@ -58,12 +58,10 @@ pub struct IoQueueTable { // Associated Functions //====================================================================================================================== -/// Associated functions for I/O queue descriptors tables. impl IoQueueTable { - /// Allocates a new entry in the target I/O queue descriptors table. pub fn alloc(&mut self, queue: T) -> QDesc { - let index: usize = self.table.insert(Box::new(queue)); - let qd: QDesc = expect_some!( + let index = self.table.insert(Box::new(queue)); + let qd = expect_some!( self.qd_to_offset.insert_with_new_id(InternalId(index)), "should be able to allocate an id" ); @@ -71,7 +69,6 @@ impl IoQueueTable { qd } - /// Gets the type of the queue. pub fn get_type(&self, qd: &QDesc) -> Result { Ok(self.get_queue_ref(qd)?.get_qtype()) } @@ -88,10 +85,10 @@ impl IoQueueTable { /// Releases the entry associated with an I/O queue descriptor. pub fn free(&mut self, qd: &QDesc) -> Result { - let internal_id: InternalId = match self.qd_to_offset.remove(qd) { + let internal_id = match self.qd_to_offset.remove(qd) { Some(id) => id, None => { - let cause: String = format!("invalid queue descriptor (qd={:?})", qd); + let cause = format!("invalid queue descriptor (qd={:?})", qd); error!("free(): {}", &cause); return Err(Fail::new(libc::EBADF, &cause)); }, @@ -116,7 +113,7 @@ impl IoQueueTable { } } - let cause: String = format!("invalid queue descriptor (qd={:?})", qd); + let cause = format!("invalid queue descriptor (qd={:?})", qd); error!("get(): {}", &cause); Err(Fail::new(libc::EBADF, &cause)) } @@ -128,7 +125,7 @@ impl IoQueueTable { } } - let cause: String = format!("invalid queue descriptor (qd={:?})", qd); + let cause = format!("invalid queue descriptor (qd={:?})", qd); error!("get(): {}", &cause); Err(Fail::new(libc::EBADF, &cause)) } @@ -141,14 +138,14 @@ impl IoQueueTable { /// Downcasts a [IoQueue] reference to a concrete queue type reference `&T`. pub fn downcast_queue_ptr(boxed_queue_ptr: &Box) -> Result<&T, Fail> { // 1. Get reference to queue inside the box. - let queue_ptr: &dyn IoQueue = boxed_queue_ptr.as_ref(); + let queue_ptr = boxed_queue_ptr.as_ref(); // 2. Cast that reference to a void pointer for downcasting. - let void_ptr: &dyn Any = queue_ptr.as_any_ref(); + let void_ptr = queue_ptr.as_any_ref(); // 3. Downcast to concrete type T match void_ptr.downcast_ref::() { Some(ptr) => Ok(ptr), None => { - let cause: &'static str = "invalid queue type"; + let cause = "invalid queue type"; error!("downcast_queue_ptr(): {}", cause); Err(Fail::new(libc::EINVAL, cause)) }, @@ -157,14 +154,14 @@ pub fn downcast_queue_ptr(boxed_queue_ptr: &Box) -> Res pub fn downcast_mut_ptr(boxed_queue_ptr: &mut Box) -> Result<&mut T, Fail> { // 1. Get reference to queue inside the box. - let queue_ptr: &mut dyn IoQueue = boxed_queue_ptr.as_mut(); + let queue_ptr = boxed_queue_ptr.as_mut(); // 2. Cast that reference to a void pointer for downcasting. - let void_ptr: &mut dyn Any = queue_ptr.as_any_mut(); + let void_ptr = queue_ptr.as_any_mut(); // 3. Downcast to concrete type T match void_ptr.downcast_mut::() { Some(ptr) => Ok(ptr), None => { - let cause: &'static str = "invalid queue type"; + let cause = "invalid queue type"; error!("downcast_mut_ptr(): {}", cause); Err(Fail::new(libc::EINVAL, cause)) }, @@ -177,7 +174,7 @@ pub fn downcast_queue(boxed_queue: Box) -> Result() { Ok(queue) => Ok(*queue), Err(_) => { - let cause: &'static str = "invalid queue type"; + let cause = "invalid queue type"; error!("downcast_queue(): {}", cause); Err(Fail::new(libc::EINVAL, cause)) }, @@ -238,7 +235,7 @@ mod tests { use crate::{ expect_ok, runtime::{IoQueue, IoQueueTable}, - QDesc, QType, + QType, }; use ::std::any::Any; use ::test::{black_box, Bencher}; @@ -264,12 +261,12 @@ mod tests { #[bench] fn alloc_free_bench(b: &mut Bencher) { - let mut ioqueue_table: IoQueueTable = IoQueueTable::default(); + let mut ioqueue_table = IoQueueTable::default(); b.iter(|| { - let qd: QDesc = ioqueue_table.alloc::(TestQueue {}); + let qd = ioqueue_table.alloc::(TestQueue {}); black_box(qd); - let queue: TestQueue = expect_ok!(ioqueue_table.free::(&qd), "must be TestQueue"); + let queue = expect_ok!(ioqueue_table.free::(&qd), "must be TestQueue"); black_box(queue); }); } diff --git a/src/runtime/queue/qdesc.rs b/src/runtime/queue/qdesc.rs index 3dd019e20..ece0a060b 100644 --- a/src/runtime/queue/qdesc.rs +++ b/src/runtime/queue/qdesc.rs @@ -22,28 +22,24 @@ impl QDesc { //====================================================================================================================== impl From for i32 { - /// Converts a [QDesc] to a [i32]. fn from(val: QDesc) -> Self { val.0 as i32 } } impl From for QDesc { - /// Converts a [i32] to a [QDesc]. fn from(val: i32) -> Self { QDesc(val as u32) } } impl From for u32 { - /// Converts a [QDesc] to a [u32]. fn from(val: QDesc) -> Self { val.0 } } impl From for QDesc { - /// Converts a [u32] to a [QDesc]. fn from(val: u32) -> Self { QDesc(val) } diff --git a/src/runtime/queue/qtoken.rs b/src/runtime/queue/qtoken.rs index 92d6596c8..eb101c338 100644 --- a/src/runtime/queue/qtoken.rs +++ b/src/runtime/queue/qtoken.rs @@ -5,8 +5,6 @@ // Structures //====================================================================================================================== -/// Queue Token -/// /// This is used to uniquely identify operations on IO queues. #[derive(Clone, Copy, Debug, Eq, PartialEq, Hash)] #[repr(C)] @@ -17,14 +15,12 @@ pub struct QToken(u64); //====================================================================================================================== impl From for QToken { - /// Converts a [QToken] to a [u64]. fn from(value: u64) -> Self { QToken(value) } } impl From for u64 { - /// Converts a [QToken] to a [u64]. fn from(value: QToken) -> Self { value.0 } diff --git a/src/runtime/queue/qtype.rs b/src/runtime/queue/qtype.rs index 2f35cf6d4..56f187fd7 100644 --- a/src/runtime/queue/qtype.rs +++ b/src/runtime/queue/qtype.rs @@ -19,7 +19,6 @@ pub enum QType { // Trait Implementations //====================================================================================================================== -/// From Trait Implementation for IO Queue Types impl From for u32 { fn from(value: QType) -> Self { match value { @@ -31,7 +30,6 @@ impl From for u32 { } } -/// From Trait Implementation for IO Queue Types impl TryFrom for QType { type Error = &'static str;