Compare commits

...

6 Commits

Author SHA1 Message Date
eb1bbee3eb Use best multisampling available 2024-09-26 20:08:59 +03:00
bf38462c78 Add FPS counter 2024-09-26 20:00:33 +03:00
45ed4dff90 Enable multisampling 2024-09-26 00:47:29 +03:00
ec6f2e3c57 Extract viewport 2024-09-26 00:29:33 +03:00
aa94681ab9 Extract line rendering 2024-09-25 23:53:29 +03:00
2e2c93792b Extract camera 2024-09-25 23:09:07 +03:00
4 changed files with 518 additions and 302 deletions

123
src/bin/wireframe/camera.rs Normal file
View File

@ -0,0 +1,123 @@
use std::mem;
use glam::{mat4, vec3, vec4, Mat4, Vec2, Vec3};
#[repr(C)]
#[derive(Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
struct CameraUniform {
mvp: [[f32; 4]; 4],
scale: [f32; 2],
pad: [u32; 2],
}
pub struct Camera {
buf: wgpu::Buffer,
bind: wgpu::BindGroup,
layout: wgpu::BindGroupLayout,
}
impl Camera {
pub fn new(device: &wgpu::Device) -> Camera {
let buf = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Camera Buffer"),
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
size: mem::size_of::<CameraUniform>() as u64,
mapped_at_creation: false,
});
let layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::VERTEX,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
}],
label: Some("Camera BindGroupLayout"),
});
let bind = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &layout,
entries: &[wgpu::BindGroupEntry {
binding: 0,
resource: buf.as_entire_binding(),
}],
label: Some("Camera BindGroup"),
});
Camera { buf, bind, layout }
}
pub fn bind_group_layout(&self) -> &wgpu::BindGroupLayout {
&self.layout
}
pub fn bind_group(&self) -> &wgpu::BindGroup {
&self.bind
}
fn uniform(view_mtx: Mat4, view_size: Vec2) -> CameraUniform {
const M: Mat4 = mat4(
vec4(0., 0., 1., 0.),
vec4(-1., 0., 0., 0.),
vec4(0., 1., 0., 0.),
vec4(0., 0., 0., 1.),
);
let size = view_size.normalize() * std::f32::consts::SQRT_2;
let proj = make_proj_matrix(vec3(size.x, size.y, 2.), (1., 4096.)) * M;
let mvp = proj * view_mtx;
CameraUniform {
mvp: mvp.to_cols_array_2d(),
scale: (1. / size).to_array(),
pad: [0; 2],
}
}
pub fn set(&self, queue: &wgpu::Queue, view_mtx: Mat4, view_size: Vec2) {
let uniform = Self::uniform(view_mtx, view_size);
queue.write_buffer(&self.buf, 0, bytemuck::bytes_of(&uniform));
}
}
/// Make a projection matrix, assuming input coordinates are (right, up, forward).
///
/// `corner` is a vector that will be mapped to (x=1, y=1) after the perspective division.
/// `zrange` is the Z range that will be mapped to z∈[-1, 1]. It has no other effect. Both ends have to be positive though.
fn make_proj_matrix(corner: Vec3, zrange: (f32, f32)) -> Mat4 {
let scale = 1.0 / corner;
let zspan = zrange.1 - zrange.0;
mat4(
scale.x * vec4(1., 0., 0., 0.),
scale.y * vec4(0., 1., 0., 0.),
scale.z * vec4(0., 0., (zrange.0 + zrange.1) / zspan, 1.),
scale.z * vec4(0., 0., -2. * zrange.0 * zrange.1 / zspan, 0.),
)
}
#[cfg(test)]
mod tests {
use super::*;
use approx::assert_abs_diff_eq;
use glam::vec3;
#[test]
fn test_proj_matrix() {
let m = make_proj_matrix(vec3(2., 3., 4.), (0.5, 20.0));
let v = m * vec4(2., 3., 4., 1.);
assert_abs_diff_eq!(v.x / v.w, 1.0);
assert_abs_diff_eq!(v.y / v.w, 1.0);
assert!(-v.w < v.z && v.z < v.w, "z out of range in {v}");
let v = m * vec4(2., 3., 0.5, 1.);
assert_abs_diff_eq!(v.x / v.w, 8.0);
assert_abs_diff_eq!(v.y / v.w, 8.0);
assert_abs_diff_eq!(v.z / v.w, -1.0);
let v = m * vec4(2., 3., 20.0, 1.);
assert_abs_diff_eq!(v.x / v.w, 0.2);
assert_abs_diff_eq!(v.y / v.w, 0.2);
assert_abs_diff_eq!(v.z / v.w, 1.0);
}
}

170
src/bin/wireframe/lines.rs Normal file
View File

@ -0,0 +1,170 @@
use std::mem;
use bytemuck::{bytes_of, cast_slice, Pod, Zeroable};
use glam::Vec3;
use refraction::types::Ray;
use wgpu::util::DeviceExt as _;
#[repr(C)]
#[derive(Copy, Clone, Debug, Pod, Zeroable)]
struct Vertex {
pub position: [f32; 3],
pub tangent: [f32; 3],
}
#[repr(C)]
#[derive(Copy, Clone, Pod, Zeroable)]
struct PushConsts {
pub color: [f32; 3],
pub _pad: f32,
}
#[derive(Copy, Clone)]
pub struct Attrs {
pub color: Vec3,
}
impl Attrs {
fn consts(&self) -> PushConsts {
PushConsts {
color: self.color.to_array(),
_pad: 0.,
}
}
}
pub struct Line {
consts: PushConsts,
npoints: u32,
buf: wgpu::Buffer,
}
impl Line {
pub fn new_strip(device: &wgpu::Device, attrs: Attrs, points: Vec<Ray>) -> Line {
let data: Vec<Vertex> = points
.into_iter()
.map(|r| Vertex {
position: r.pos.to_array(),
tangent: r.dir.to_array(),
})
.collect();
let buf = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Vertex Buffer"),
contents: cast_slice(&data),
usage: wgpu::BufferUsages::VERTEX,
});
Line {
consts: attrs.consts(),
npoints: data.len() as u32,
buf,
}
}
}
pub struct LineRenderer {
pipeline: wgpu::RenderPipeline,
}
static SHADER: &'static str = include_str!("ray.wgsl");
impl LineRenderer {
pub fn new(
device: &wgpu::Device,
cam_layout: &wgpu::BindGroupLayout,
target_format: wgpu::TextureFormat,
depth_stencil: Option<wgpu::DepthStencilState>,
multisample: wgpu::MultisampleState,
) -> LineRenderer {
let shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
label: Some("Line Shader"),
source: wgpu::ShaderSource::Wgsl(SHADER.into()),
});
let consts_range = wgpu::PushConstantRange {
stages: wgpu::ShaderStages::VERTEX,
range: 0..mem::size_of::<PushConsts>() as u32,
};
let layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Line RenderPipelineLayout"),
bind_group_layouts: &[cam_layout],
push_constant_ranges: &[consts_range],
});
let pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("Line RenderPipeline"),
layout: Some(&layout),
vertex: wgpu::VertexState {
module: &shader,
entry_point: "vs_main",
buffers: &[wgpu::VertexBufferLayout {
array_stride: mem::size_of::<Vertex>() as wgpu::BufferAddress,
step_mode: wgpu::VertexStepMode::Instance,
attributes: &[
wgpu::VertexAttribute {
offset: mem::offset_of!(Vertex, position) as u64,
shader_location: 0,
format: wgpu::VertexFormat::Float32x3,
},
wgpu::VertexAttribute {
offset: mem::offset_of!(Vertex, tangent) as u64,
shader_location: 1,
format: wgpu::VertexFormat::Float32x3,
},
wgpu::VertexAttribute {
offset: (mem::size_of::<Vertex>() + mem::offset_of!(Vertex, position))
as u64,
shader_location: 2,
format: wgpu::VertexFormat::Float32x3,
},
wgpu::VertexAttribute {
offset: (mem::size_of::<Vertex>() + mem::offset_of!(Vertex, tangent))
as u64,
shader_location: 3,
format: wgpu::VertexFormat::Float32x3,
},
],
}],
compilation_options: Default::default(),
},
fragment: Some(wgpu::FragmentState {
module: &shader,
entry_point: "fs_main",
targets: &[Some(wgpu::ColorTargetState {
format: target_format,
blend: Some(wgpu::BlendState {
color: wgpu::BlendComponent::OVER,
alpha: wgpu::BlendComponent::OVER,
}),
write_mask: wgpu::ColorWrites::ALL,
})],
compilation_options: Default::default(),
}),
primitive: wgpu::PrimitiveState {
topology: wgpu::PrimitiveTopology::TriangleStrip,
..Default::default()
},
depth_stencil,
multisample,
multiview: None,
cache: None,
});
LineRenderer { pipeline }
}
pub fn render<'a>(
&self,
pass: &mut wgpu::RenderPass,
cam_bind: &wgpu::BindGroup,
lines: impl Iterator<Item = &'a Line>,
) {
pass.set_pipeline(&self.pipeline);
pass.set_bind_group(0, cam_bind, &[]);
for line in lines {
pass.set_push_constants(wgpu::ShaderStages::VERTEX, 0, bytes_of(&line.consts));
pass.set_vertex_buffer(0, line.buf.slice(..));
pass.draw(0..4, 0..line.npoints - 1);
}
}
}

View File

@ -1,7 +1,6 @@
use std::{iter, mem, time::Instant}; use std::time::Instant;
use glam::{mat4, vec2, vec3, vec4, Mat4, Vec3}; use glam::{uvec2, vec3, Vec3};
use wgpu::{util::DeviceExt, ShaderStages};
use winit::{ use winit::{
event::*, event::*,
event_loop::EventLoop, event_loop::EventLoop,
@ -9,47 +8,20 @@ use winit::{
window::{Window, WindowBuilder}, window::{Window, WindowBuilder},
}; };
mod camera;
mod lines;
mod scene; mod scene;
mod viewport;
// The coordinate system: // The coordinate system:
// * X: forward // * X: forward
// * Y: left // * Y: left
// * Z: up // * Z: up
#[repr(C)] fn prepare_scene(device: &wgpu::Device) -> Vec<lines::Line> {
#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
struct Vertex {
position: [f32; 3],
tangent: [f32; 3],
}
struct Wireframe {
color: Vec3,
data: wgpu::Buffer,
size: u32,
}
fn prepare_scene(device: &wgpu::Device) -> Vec<Wireframe> {
scene::build() scene::build()
.into_iter() .into_iter()
.map(|line| { .map(|line| lines::Line::new_strip(device, lines::Attrs { color: line.color }, line.pts))
let color = line.color;
let data: Vec<Vertex> = line
.pts
.into_iter()
.map(|r| Vertex {
position: r.pos.to_array(),
tangent: r.dir.to_array(),
})
.collect();
let size = data.len() as u32;
let data = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Vertex Buffer"),
contents: bytemuck::cast_slice(&data),
usage: wgpu::BufferUsages::VERTEX,
});
Wireframe { color, data, size }
})
.collect() .collect()
} }
@ -140,37 +112,20 @@ static KEYS_ROTATE: &'static [(PhysicalKey, Vec3)] = &[
(PhysicalKey::Code(KeyCode::Numpad6), vec3(0., 0., -1.)), (PhysicalKey::Code(KeyCode::Numpad6), vec3(0., 0., -1.)),
]; ];
#[repr(C)]
#[derive(Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
struct CameraUniform {
mvp: [[f32; 4]; 4],
scale: [f32; 2],
pad: [u32; 2],
}
#[repr(C)]
#[derive(Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
struct LineUniform {
color: [f32; 3],
_pad: f32,
}
struct State<'a> { struct State<'a> {
surface: wgpu::Surface<'a>,
device: wgpu::Device, device: wgpu::Device,
queue: wgpu::Queue, queue: wgpu::Queue,
config: wgpu::SurfaceConfiguration,
size: winit::dpi::PhysicalSize<u32>,
render_pipeline: wgpu::RenderPipeline,
fps: fps::Counter,
kbd: keyctl::Keyboard, kbd: keyctl::Keyboard,
cam: camctl::CameraLocation,
t1: Instant, t1: Instant,
camera_buffer: wgpu::Buffer, viewport: viewport::Viewport<'a>,
camera_bind_group: wgpu::BindGroup, cam_loc: camctl::CameraLocation,
cam_obj: camera::Camera,
line_rend: lines::LineRenderer,
scene: Vec<Wireframe>, scene: Vec<lines::Line>,
window: &'a Window, window: &'a Window,
} }
@ -196,9 +151,10 @@ impl<'a> State<'a> {
.request_device( .request_device(
&wgpu::DeviceDescriptor { &wgpu::DeviceDescriptor {
label: None, label: None,
required_features: wgpu::Features::PUSH_CONSTANTS, required_features: wgpu::Features::PUSH_CONSTANTS
| wgpu::Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES,
required_limits: wgpu::Limits { required_limits: wgpu::Limits {
max_push_constant_size: mem::size_of::<LineUniform>() as u32, max_push_constant_size: 16,
..wgpu::Limits::default() ..wgpu::Limits::default()
}, },
memory_hints: Default::default(), memory_hints: Default::default(),
@ -208,152 +164,44 @@ impl<'a> State<'a> {
.await .await
.unwrap(); .unwrap();
let surface_caps = surface.get_capabilities(&adapter); let viewport =
let surface_format = surface_caps viewport::Viewport::new(&adapter, &device, surface, uvec2(size.width, size.height));
.formats
.iter()
.copied()
.find(|f| !f.is_srgb())
.unwrap_or(surface_caps.formats[0]);
let config = wgpu::SurfaceConfiguration {
usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
format: surface_format,
width: size.width,
height: size.height,
present_mode: surface_caps.present_modes[0],
alpha_mode: surface_caps.alpha_modes[0],
view_formats: vec![],
desired_maximum_frame_latency: 2,
};
let kbd = keyctl::Keyboard::new(); let kbd = keyctl::Keyboard::new();
let cam = camctl::CameraLocation::new(); let cam_loc = camctl::CameraLocation::new();
let t1 = Instant::now(); let t1 = Instant::now();
let camera_buffer = device.create_buffer(&wgpu::BufferDescriptor { let depth = None;
label: Some("Camera Buffer"), let msaa = wgpu::MultisampleState {
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST, count: viewport.sample_count(),
size: mem::size_of::<CameraUniform>() as u64,
mapped_at_creation: false,
});
let camera_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::VERTEX,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
}],
label: Some("camera_bind_group_layout"),
});
let camera_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &camera_bind_group_layout,
entries: &[wgpu::BindGroupEntry {
binding: 0,
resource: camera_buffer.as_entire_binding(),
}],
label: Some("camera_bind_group"),
});
let shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
label: Some("Shader"),
source: wgpu::ShaderSource::Wgsl(include_str!("ray.wgsl").into()),
});
let line_push_constant_range = wgpu::PushConstantRange {
stages: ShaderStages::VERTEX,
range: 0..mem::size_of::<LineUniform>() as u32,
};
let render_pipeline_layout =
device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Render Pipeline Layout"),
bind_group_layouts: &[&camera_bind_group_layout],
push_constant_ranges: &[line_push_constant_range],
});
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("Render Pipeline"),
layout: Some(&render_pipeline_layout),
vertex: wgpu::VertexState {
module: &shader,
entry_point: "vs_main",
buffers: &[wgpu::VertexBufferLayout {
array_stride: mem::size_of::<Vertex>() as wgpu::BufferAddress,
step_mode: wgpu::VertexStepMode::Instance,
attributes: &[
wgpu::VertexAttribute {
offset: mem::offset_of!(Vertex, position) as u64,
shader_location: 0,
format: wgpu::VertexFormat::Float32x3,
},
wgpu::VertexAttribute {
offset: mem::offset_of!(Vertex, tangent) as u64,
shader_location: 1,
format: wgpu::VertexFormat::Float32x3,
},
wgpu::VertexAttribute {
offset: (mem::size_of::<Vertex>() + mem::offset_of!(Vertex, position))
as u64,
shader_location: 2,
format: wgpu::VertexFormat::Float32x3,
},
wgpu::VertexAttribute {
offset: (mem::size_of::<Vertex>() + mem::offset_of!(Vertex, tangent))
as u64,
shader_location: 3,
format: wgpu::VertexFormat::Float32x3,
},
],
}],
compilation_options: Default::default(),
},
fragment: Some(wgpu::FragmentState {
module: &shader,
entry_point: "fs_main",
targets: &[Some(wgpu::ColorTargetState {
format: config.format,
blend: Some(wgpu::BlendState {
color: wgpu::BlendComponent::OVER,
alpha: wgpu::BlendComponent::OVER,
}),
write_mask: wgpu::ColorWrites::ALL,
})],
compilation_options: Default::default(),
}),
primitive: wgpu::PrimitiveState {
topology: wgpu::PrimitiveTopology::TriangleStrip,
..Default::default()
},
depth_stencil: None,
multisample: wgpu::MultisampleState {
count: 1,
mask: !0, mask: !0,
alpha_to_coverage_enabled: false, alpha_to_coverage_enabled: false,
}, };
multiview: None,
cache: None, let cam_obj = camera::Camera::new(&device);
}); let line_rend = lines::LineRenderer::new(
&device,
cam_obj.bind_group_layout(),
viewport.format(),
depth,
msaa,
);
let scene = prepare_scene(&device); let scene = prepare_scene(&device);
let fps = fps::Counter::new();
Self { Self {
surface,
device, device,
queue, queue,
config, viewport,
size, line_rend,
render_pipeline,
kbd, kbd,
cam, fps,
cam_loc,
cam_obj,
t1, t1,
scene, scene,
camera_buffer,
camera_bind_group,
window, window,
} }
} }
@ -364,10 +212,8 @@ impl<'a> State<'a> {
fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) { fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
if new_size.width > 0 && new_size.height > 0 { if new_size.width > 0 && new_size.height > 0 {
self.size = new_size; self.viewport
self.config.width = new_size.width; .resize(&self.device, uvec2(new_size.width, new_size.height));
self.config.height = new_size.height;
self.surface.configure(&self.device, &self.config);
} }
} }
@ -378,81 +224,27 @@ impl<'a> State<'a> {
self.t1 = t2; self.t1 = t2;
dt.as_secs_f32() dt.as_secs_f32()
}; };
self.cam.move_rel(100. * dt * self.kbd.control(&KEYS_MOVE)); let size = self.viewport.size().as_vec2();
self.cam
self.cam_loc
.move_rel(100. * dt * self.kbd.control(&KEYS_MOVE));
self.cam_loc
.rotate_rel_ypr(2. * dt * self.kbd.control(&KEYS_ROTATE)); .rotate_rel_ypr(2. * dt * self.kbd.control(&KEYS_ROTATE));
self.cam_obj.set(&self.queue, self.cam_loc.view_mtx(), size);
let size = vec2(self.config.width as f32, self.config.height as f32);
let size = size.normalize() * std::f32::consts::SQRT_2;
let proj = make_proj_matrix(vec3(size.x, size.y, 2.), (1., 4096.));
let my_to_gl = mat4(
vec4(0., 0., 1., 0.),
vec4(-1., 0., 0., 0.),
vec4(0., 1., 0., 0.),
vec4(0., 0., 0., 1.),
);
let view = my_to_gl * self.cam.view_mtx();
let mvp = proj * view;
let camera_uniform = CameraUniform {
mvp: mvp.to_cols_array_2d(),
scale: (1. / size).to_array(),
pad: [0; 2],
};
self.queue
.write_buffer(&self.camera_buffer, 0, bytemuck::bytes_of(&camera_uniform));
} }
fn render(&mut self) -> Result<(), wgpu::SurfaceError> { fn render(&mut self) -> Result<(), wgpu::SurfaceError> {
let output = self.surface.get_current_texture()?; self.fps.on_frame();
let view = output self.window
.texture .set_title(&format!("Space Refraction ({:.1} FPS)", self.fps.get()));
.create_view(&wgpu::TextureViewDescriptor::default()); self.viewport
.render_single_pass(&self.device, &self.queue, |mut render_pass| {
let mut encoder = self self.line_rend.render(
.device &mut render_pass,
.create_command_encoder(&wgpu::CommandEncoderDescriptor { self.cam_obj.bind_group(),
label: Some("Render Encoder"), self.scene.iter(),
}); );
})
{
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
label: Some("Render Pass"),
color_attachments: &[Some(wgpu::RenderPassColorAttachment {
view: &view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color {
r: 0.,
g: 0.,
b: 0.,
a: 1.,
}),
store: wgpu::StoreOp::Store,
},
})],
depth_stencil_attachment: None,
occlusion_query_set: None,
timestamp_writes: None,
});
render_pass.set_pipeline(&self.render_pipeline);
render_pass.set_bind_group(0, &self.camera_bind_group, &[]);
for wireframe in &self.scene {
let line = LineUniform {
color: wireframe.color.to_array(),
_pad: 0.,
};
render_pass.set_push_constants(ShaderStages::VERTEX, 0, bytemuck::bytes_of(&line));
render_pass.set_vertex_buffer(0, wireframe.data.slice(..));
render_pass.draw(0..4, 0..wireframe.size - 1);
}
}
self.queue.submit(iter::once(encoder.finish()));
output.present();
Ok(())
} }
} }
@ -500,7 +292,7 @@ pub async fn run() {
Ok(_) => {} Ok(_) => {}
// Reconfigure the surface if it's lost or outdated // Reconfigure the surface if it's lost or outdated
Err(wgpu::SurfaceError::Lost | wgpu::SurfaceError::Outdated) => { Err(wgpu::SurfaceError::Lost | wgpu::SurfaceError::Outdated) => {
state.resize(state.size) state.viewport.configure(&state.device);
} }
// The system is out of memory, we should probably quit // The system is out of memory, we should probably quit
Err(wgpu::SurfaceError::OutOfMemory) => { Err(wgpu::SurfaceError::OutOfMemory) => {
@ -527,44 +319,39 @@ fn main() {
pollster::block_on(run()); pollster::block_on(run());
} }
/// Make a projection matrix, assuming input coordinates are (right, up, forward). mod fps {
/// use std::time::{Duration, Instant};
/// `corner` is a vector that will be mapped to (x=1, y=1) after the perspective division.
/// `zrange` is the Z range that will be mapped to z∈[-1, 1]. It has no other effect. Both ends have to be positive though. pub struct Counter {
fn make_proj_matrix(corner: Vec3, zrange: (f32, f32)) -> Mat4 { fps: f32,
let scale = 1.0 / corner; t1: Instant,
let zspan = zrange.1 - zrange.0; frames: u32,
mat4(
scale.x * vec4(1., 0., 0., 0.),
scale.y * vec4(0., 1., 0., 0.),
scale.z * vec4(0., 0., (zrange.0 + zrange.1) / zspan, 1.),
scale.z * vec4(0., 0., -2. * zrange.0 * zrange.1 / zspan, 0.),
)
} }
#[cfg(test)] impl Counter {
mod tests { pub fn new() -> Self {
use super::*; Self {
use approx::assert_abs_diff_eq; fps: 0.,
use glam::vec3; t1: Instant::now(),
frames: 0,
#[test] }
fn test_proj_matrix() { }
let m = make_proj_matrix(vec3(2., 3., 4.), (0.5, 20.0));
pub fn get(&self) -> f32 {
let v = m * vec4(2., 3., 4., 1.); self.fps
assert_abs_diff_eq!(v.x / v.w, 1.0); }
assert_abs_diff_eq!(v.y / v.w, 1.0);
assert!(-v.w < v.z && v.z < v.w, "z out of range in {v}"); pub fn on_frame(&mut self) {
self.frames += 1;
let v = m * vec4(2., 3., 0.5, 1.); let t2 = Instant::now();
assert_abs_diff_eq!(v.x / v.w, 8.0); let dt = t2 - self.t1;
assert_abs_diff_eq!(v.y / v.w, 8.0); if dt >= Duration::from_secs(1) {
assert_abs_diff_eq!(v.z / v.w, -1.0); *self = Self {
fps: self.frames as f32 / dt.as_secs_f32(),
let v = m * vec4(2., 3., 20.0, 1.); t1: t2,
assert_abs_diff_eq!(v.x / v.w, 0.2); frames: 0,
assert_abs_diff_eq!(v.y / v.w, 0.2); }
assert_abs_diff_eq!(v.z / v.w, 1.0); }
}
} }
} }

View File

@ -0,0 +1,136 @@
use glam::{uvec2, UVec2};
pub struct Viewport<'a> {
surface: wgpu::Surface<'a>,
config: wgpu::SurfaceConfiguration,
sample_count: u32,
multisample: Multisample,
}
impl<'a> Viewport<'a> {
pub fn new(
adapter: &wgpu::Adapter,
device: &wgpu::Device,
surface: wgpu::Surface<'a>,
size: UVec2,
) -> Self {
let caps = surface.get_capabilities(adapter);
let format = wgpu::TextureFormat::Bgra8Unorm;
let sample_count = adapter
.get_texture_format_features(format)
.flags
.supported_sample_counts()
.into_iter()
.max()
.unwrap();
eprintln!("Using x{sample_count} mutlisampling");
let config = wgpu::SurfaceConfiguration {
usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
format,
width: size.x,
height: size.y,
present_mode: caps.present_modes[0],
alpha_mode: caps.alpha_modes[0],
view_formats: vec![],
desired_maximum_frame_latency: 2,
};
let multisample = Multisample::new(device, format, size, sample_count);
Self {
surface,
config,
sample_count,
multisample,
}
}
pub fn configure(&mut self, device: &wgpu::Device) {
self.surface.configure(&device, &self.config);
self.multisample = Multisample::new(device, self.format(), self.size(), self.sample_count);
}
pub fn resize(&mut self, device: &wgpu::Device, size: UVec2) {
self.config.width = size.x;
self.config.height = size.y;
self.configure(&device);
}
pub fn size(&self) -> UVec2 {
uvec2(self.config.width, self.config.height)
}
pub fn format(&self) -> wgpu::TextureFormat {
self.config.format
}
pub fn sample_count(&self) -> u32 {
self.sample_count
}
pub fn render_single_pass(
&mut self,
device: &wgpu::Device,
queue: &wgpu::Queue,
f: impl FnOnce(wgpu::RenderPass),
) -> Result<(), wgpu::SurfaceError> {
let output = self.surface.get_current_texture()?;
let view = output
.texture
.create_view(&wgpu::TextureViewDescriptor::default());
let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Render CommandEncoder"),
});
let render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
label: Some("RenderPass"),
color_attachments: &[Some(wgpu::RenderPassColorAttachment {
view: &self.multisample.view,
resolve_target: Some(&view),
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color {
r: 0.,
g: 0.,
b: 0.,
a: 1.,
}),
store: wgpu::StoreOp::Store,
},
})],
depth_stencil_attachment: None,
occlusion_query_set: None,
timestamp_writes: None,
});
f(render_pass);
queue.submit(std::iter::once(encoder.finish()));
output.present();
Ok(())
}
}
struct Multisample {
view: wgpu::TextureView,
}
impl Multisample {
fn new(
device: &wgpu::Device,
format: wgpu::TextureFormat,
size: UVec2,
sample_count: u32,
) -> Multisample {
let tex = device.create_texture(&wgpu::TextureDescriptor {
label: Some("Multisample texture"),
size: wgpu::Extent3d {
width: size.x,
height: size.y,
depth_or_array_layers: 1,
},
mip_level_count: 1,
sample_count,
dimension: wgpu::TextureDimension::D2,
format,
usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
view_formats: &[],
});
let view = tex.create_view(&wgpu::TextureViewDescriptor::default());
Multisample { view }
}
}