Skip to content

Commit

Permalink
introduce StagingWriteBeltBufferTyped
Browse files Browse the repository at this point in the history
  • Loading branch information
Wumpf committed Dec 19, 2022
1 parent ace5d36 commit ac4b395
Show file tree
Hide file tree
Showing 3 changed files with 62 additions and 34 deletions.
36 changes: 20 additions & 16 deletions crates/re_renderer/src/point_cloud_builder.rs
Original file line number Diff line number Diff line change
Expand Up @@ -4,7 +4,7 @@ use crate::{
renderer::{
PointCloudBatchInfo, PointCloudDrawData, PointCloudDrawDataError, PointCloudVertex,
},
staging_write_belt::StagingWriteBeltBuffer,
staging_write_belt::StagingWriteBeltBufferTyped,
Color32, DebugLabel, RenderContext, Size,
};

Expand All @@ -22,8 +22,8 @@ pub struct PointCloudBuilder<PerPointUserData> {

pub batches: Vec<PointCloudBatchInfo>,

pub(crate) vertices_gpu: StagingWriteBeltBuffer,
pub(crate) colors_gpu: StagingWriteBeltBuffer,
pub(crate) vertices_gpu: StagingWriteBeltBufferTyped<PointCloudVertex>,
pub(crate) colors_gpu: StagingWriteBeltBufferTyped<Color32>,

/// z value given to the next 2d point.
pub next_2d_z: f32,
Expand All @@ -37,21 +37,25 @@ where
// TODO: check max_num_points bound

let mut staging_belt = ctx.staging_belt.lock();
let vertices_gpu = staging_belt.allocate(
&ctx.device,
&mut ctx.gpu_resources.buffers,
(std::mem::size_of::<PointCloudVertex>() * max_num_points) as wgpu::BufferAddress,
wgpu::COPY_BYTES_PER_ROW_ALIGNMENT as u64,
);
let mut colors_gpu = staging_belt.allocate(
&ctx.device,
&mut ctx.gpu_resources.buffers,
(std::mem::size_of::<PointCloudVertex>() * max_num_points) as wgpu::BufferAddress,
wgpu::COPY_BYTES_PER_ROW_ALIGNMENT as u64,
);
let vertices_gpu = staging_belt
.allocate(
&ctx.device,
&mut ctx.gpu_resources.buffers,
(std::mem::size_of::<PointCloudVertex>() * max_num_points) as wgpu::BufferAddress,
wgpu::COPY_BYTES_PER_ROW_ALIGNMENT as u64,
)
.typed_view();
let mut colors_gpu = staging_belt
.allocate(
&ctx.device,
&mut ctx.gpu_resources.buffers,
(std::mem::size_of::<PointCloudVertex>() * max_num_points) as wgpu::BufferAddress,
wgpu::COPY_BYTES_PER_ROW_ALIGNMENT as u64,
)
.typed_view::<Color32>();
// Default unassigned colors to white.
// TODO(andreas): Do we actually need this? Can we do this lazily if no color was specified?
colors_gpu.memset(255);
colors_gpu.buffer.memset(255);

Self {
vertices: Vec::with_capacity(max_num_points),
Expand Down
4 changes: 2 additions & 2 deletions crates/re_renderer/src/renderer/point_cloud.rs
Original file line number Diff line number Diff line change
Expand Up @@ -218,7 +218,7 @@ impl PointCloudDrawData {
depth_or_array_layers: 1,
};

vertices_gpu.copy_to_texture(
vertices_gpu.buffer.copy_to_texture(
&mut ctx.frame_global_commands,
wgpu::ImageCopyTexture {
texture: &ctx
Expand All @@ -239,7 +239,7 @@ impl PointCloudDrawData {
size,
);

colors_gpu.copy_to_texture(
colors_gpu.buffer.copy_to_texture(
&mut ctx.frame_global_commands,
wgpu::ImageCopyTexture {
texture: &ctx
Expand Down
56 changes: 40 additions & 16 deletions crates/re_renderer/src/staging_write_belt.rs
Original file line number Diff line number Diff line change
Expand Up @@ -23,36 +23,60 @@ pub struct StagingWriteBeltBuffer {
offset_in_chunk: wgpu::BufferAddress,
}

impl StagingWriteBeltBuffer {
/// Writes bytes to the buffer at a given location.
///
/// We do *not* allow reading from this buffer as it is typically write-combined memory.
/// Reading would work, but it can be *insanely* slow.
#[inline]
#[allow(dead_code)]
pub fn write_bytes(&mut self, bytes: &[u8], offset: usize) {
self.write_view[offset..(offset + bytes.len())].clone_from_slice(bytes);
}
pub struct StagingWriteBeltBufferTyped<T: bytemuck::Pod + 'static> {
pub buffer: StagingWriteBeltBuffer,
memory: &'static mut [T],
}

impl<T> StagingWriteBeltBufferTyped<T>
where
T: bytemuck::Pod + 'static,
{
/// Writes several objects to the buffer at a given location.
/// User is responsible for ensuring the element offset is valid with the element types's alignment requirement.
/// (panics otherwise)
///
/// We do *not* allow reading from this buffer as it is typically write-combined memory.
/// Reading would work, but it can be *insanely* slow.
#[inline]
pub fn write<T: bytemuck::Pod>(&mut self, elements: &[T], offset_in_element_sizes: usize) {
bytemuck::cast_slice_mut(&mut self.write_view)
[offset_in_element_sizes..(offset_in_element_sizes + elements.len())]
.clone_from_slice(elements);
pub fn write(&mut self, elements: &[T], offset_in_element_sizes: usize) {
self.memory[offset_in_element_sizes..(offset_in_element_sizes + elements.len())]
.copy_from_slice(elements);
}

/// Writes a single objects to the buffer at a given location.
/// User is responsible for ensuring the element offset is valid with the element types's alignment requirement.
/// (panics otherwise)
#[inline]
pub fn write_single<T: bytemuck::Pod>(&mut self, element: &T, offset_in_element_sizes: usize) {
bytemuck::cast_slice_mut(&mut self.write_view)[offset_in_element_sizes] = *element;
pub fn write_single(&mut self, element: &T, offset_in_element_sizes: usize) {
self.memory[offset_in_element_sizes] = *element;
}
}

impl StagingWriteBeltBuffer {
#[allow(unsafe_code)]
pub fn typed_view<T: bytemuck::Pod + 'static>(mut self) -> StagingWriteBeltBufferTyped<T> {
let view_ptr = &mut self.write_view as *mut wgpu::BufferViewMut<'static>;

// SAFETY:
// The memory pointer lifes as long as the view since we store the view into StagingWriteBeltBufferTyped as well.
let static_view = Box::leak(unsafe { Box::from_raw(view_ptr) });

let memory = bytemuck::cast_slice_mut(static_view);
StagingWriteBeltBufferTyped {
buffer: self,
memory,
}
}

/// Writes bytes to the buffer at a given location.
///
/// We do *not* allow reading from this buffer as it is typically write-combined memory.
/// Reading would work, but it can be *insanely* slow.
#[inline]
#[allow(dead_code)]
pub fn write_bytes(&mut self, bytes: &[u8], offset: usize) {
self.write_view[offset..(offset + bytes.len())].clone_from_slice(bytes);
}

/// Sets all bytes in the buffer to a given value
Expand Down

0 comments on commit ac4b395

Please sign in to comment.