Compare commits

..

No commits in common. "eb1bbee3eb7ed74d931a573f48fc89b2a6927d71" and "d3d4048a5c397d4e759b39bdcc5126ad397aee5f" have entirely different histories.

4 changed files with 306 additions and 522 deletions

View File

@ -1,123 +0,0 @@
use std::mem;
use glam::{mat4, vec3, vec4, Mat4, Vec2, Vec3};
#[repr(C)]
#[derive(Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
struct CameraUniform {
mvp: [[f32; 4]; 4],
scale: [f32; 2],
pad: [u32; 2],
}
pub struct Camera {
buf: wgpu::Buffer,
bind: wgpu::BindGroup,
layout: wgpu::BindGroupLayout,
}
impl Camera {
pub fn new(device: &wgpu::Device) -> Camera {
let buf = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Camera Buffer"),
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
size: mem::size_of::<CameraUniform>() as u64,
mapped_at_creation: false,
});
let layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::VERTEX,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
}],
label: Some("Camera BindGroupLayout"),
});
let bind = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &layout,
entries: &[wgpu::BindGroupEntry {
binding: 0,
resource: buf.as_entire_binding(),
}],
label: Some("Camera BindGroup"),
});
Camera { buf, bind, layout }
}
pub fn bind_group_layout(&self) -> &wgpu::BindGroupLayout {
&self.layout
}
pub fn bind_group(&self) -> &wgpu::BindGroup {
&self.bind
}
fn uniform(view_mtx: Mat4, view_size: Vec2) -> CameraUniform {
const M: Mat4 = mat4(
vec4(0., 0., 1., 0.),
vec4(-1., 0., 0., 0.),
vec4(0., 1., 0., 0.),
vec4(0., 0., 0., 1.),
);
let size = view_size.normalize() * std::f32::consts::SQRT_2;
let proj = make_proj_matrix(vec3(size.x, size.y, 2.), (1., 4096.)) * M;
let mvp = proj * view_mtx;
CameraUniform {
mvp: mvp.to_cols_array_2d(),
scale: (1. / size).to_array(),
pad: [0; 2],
}
}
pub fn set(&self, queue: &wgpu::Queue, view_mtx: Mat4, view_size: Vec2) {
let uniform = Self::uniform(view_mtx, view_size);
queue.write_buffer(&self.buf, 0, bytemuck::bytes_of(&uniform));
}
}
/// Make a projection matrix, assuming input coordinates are (right, up, forward).
///
/// `corner` is a vector that will be mapped to (x=1, y=1) after the perspective division.
/// `zrange` is the Z range that will be mapped to z∈[-1, 1]. It has no other effect. Both ends have to be positive though.
fn make_proj_matrix(corner: Vec3, zrange: (f32, f32)) -> Mat4 {
let scale = 1.0 / corner;
let zspan = zrange.1 - zrange.0;
mat4(
scale.x * vec4(1., 0., 0., 0.),
scale.y * vec4(0., 1., 0., 0.),
scale.z * vec4(0., 0., (zrange.0 + zrange.1) / zspan, 1.),
scale.z * vec4(0., 0., -2. * zrange.0 * zrange.1 / zspan, 0.),
)
}
#[cfg(test)]
mod tests {
use super::*;
use approx::assert_abs_diff_eq;
use glam::vec3;
#[test]
fn test_proj_matrix() {
let m = make_proj_matrix(vec3(2., 3., 4.), (0.5, 20.0));
let v = m * vec4(2., 3., 4., 1.);
assert_abs_diff_eq!(v.x / v.w, 1.0);
assert_abs_diff_eq!(v.y / v.w, 1.0);
assert!(-v.w < v.z && v.z < v.w, "z out of range in {v}");
let v = m * vec4(2., 3., 0.5, 1.);
assert_abs_diff_eq!(v.x / v.w, 8.0);
assert_abs_diff_eq!(v.y / v.w, 8.0);
assert_abs_diff_eq!(v.z / v.w, -1.0);
let v = m * vec4(2., 3., 20.0, 1.);
assert_abs_diff_eq!(v.x / v.w, 0.2);
assert_abs_diff_eq!(v.y / v.w, 0.2);
assert_abs_diff_eq!(v.z / v.w, 1.0);
}
}

View File

@ -1,170 +0,0 @@
use std::mem;
use bytemuck::{bytes_of, cast_slice, Pod, Zeroable};
use glam::Vec3;
use refraction::types::Ray;
use wgpu::util::DeviceExt as _;
#[repr(C)]
#[derive(Copy, Clone, Debug, Pod, Zeroable)]
struct Vertex {
pub position: [f32; 3],
pub tangent: [f32; 3],
}
#[repr(C)]
#[derive(Copy, Clone, Pod, Zeroable)]
struct PushConsts {
pub color: [f32; 3],
pub _pad: f32,
}
#[derive(Copy, Clone)]
pub struct Attrs {
pub color: Vec3,
}
impl Attrs {
fn consts(&self) -> PushConsts {
PushConsts {
color: self.color.to_array(),
_pad: 0.,
}
}
}
pub struct Line {
consts: PushConsts,
npoints: u32,
buf: wgpu::Buffer,
}
impl Line {
pub fn new_strip(device: &wgpu::Device, attrs: Attrs, points: Vec<Ray>) -> Line {
let data: Vec<Vertex> = points
.into_iter()
.map(|r| Vertex {
position: r.pos.to_array(),
tangent: r.dir.to_array(),
})
.collect();
let buf = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Vertex Buffer"),
contents: cast_slice(&data),
usage: wgpu::BufferUsages::VERTEX,
});
Line {
consts: attrs.consts(),
npoints: data.len() as u32,
buf,
}
}
}
pub struct LineRenderer {
pipeline: wgpu::RenderPipeline,
}
static SHADER: &'static str = include_str!("ray.wgsl");
impl LineRenderer {
pub fn new(
device: &wgpu::Device,
cam_layout: &wgpu::BindGroupLayout,
target_format: wgpu::TextureFormat,
depth_stencil: Option<wgpu::DepthStencilState>,
multisample: wgpu::MultisampleState,
) -> LineRenderer {
let shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
label: Some("Line Shader"),
source: wgpu::ShaderSource::Wgsl(SHADER.into()),
});
let consts_range = wgpu::PushConstantRange {
stages: wgpu::ShaderStages::VERTEX,
range: 0..mem::size_of::<PushConsts>() as u32,
};
let layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Line RenderPipelineLayout"),
bind_group_layouts: &[cam_layout],
push_constant_ranges: &[consts_range],
});
let pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("Line RenderPipeline"),
layout: Some(&layout),
vertex: wgpu::VertexState {
module: &shader,
entry_point: "vs_main",
buffers: &[wgpu::VertexBufferLayout {
array_stride: mem::size_of::<Vertex>() as wgpu::BufferAddress,
step_mode: wgpu::VertexStepMode::Instance,
attributes: &[
wgpu::VertexAttribute {
offset: mem::offset_of!(Vertex, position) as u64,
shader_location: 0,
format: wgpu::VertexFormat::Float32x3,
},
wgpu::VertexAttribute {
offset: mem::offset_of!(Vertex, tangent) as u64,
shader_location: 1,
format: wgpu::VertexFormat::Float32x3,
},
wgpu::VertexAttribute {
offset: (mem::size_of::<Vertex>() + mem::offset_of!(Vertex, position))
as u64,
shader_location: 2,
format: wgpu::VertexFormat::Float32x3,
},
wgpu::VertexAttribute {
offset: (mem::size_of::<Vertex>() + mem::offset_of!(Vertex, tangent))
as u64,
shader_location: 3,
format: wgpu::VertexFormat::Float32x3,
},
],
}],
compilation_options: Default::default(),
},
fragment: Some(wgpu::FragmentState {
module: &shader,
entry_point: "fs_main",
targets: &[Some(wgpu::ColorTargetState {
format: target_format,
blend: Some(wgpu::BlendState {
color: wgpu::BlendComponent::OVER,
alpha: wgpu::BlendComponent::OVER,
}),
write_mask: wgpu::ColorWrites::ALL,
})],
compilation_options: Default::default(),
}),
primitive: wgpu::PrimitiveState {
topology: wgpu::PrimitiveTopology::TriangleStrip,
..Default::default()
},
depth_stencil,
multisample,
multiview: None,
cache: None,
});
LineRenderer { pipeline }
}
pub fn render<'a>(
&self,
pass: &mut wgpu::RenderPass,
cam_bind: &wgpu::BindGroup,
lines: impl Iterator<Item = &'a Line>,
) {
pass.set_pipeline(&self.pipeline);
pass.set_bind_group(0, cam_bind, &[]);
for line in lines {
pass.set_push_constants(wgpu::ShaderStages::VERTEX, 0, bytes_of(&line.consts));
pass.set_vertex_buffer(0, line.buf.slice(..));
pass.draw(0..4, 0..line.npoints - 1);
}
}
}

View File

@ -1,6 +1,7 @@
use std::time::Instant;
use std::{iter, mem, time::Instant};
use glam::{uvec2, vec3, Vec3};
use glam::{mat4, vec2, vec3, vec4, Mat4, Vec3};
use wgpu::{util::DeviceExt, ShaderStages};
use winit::{
event::*,
event_loop::EventLoop,
@ -8,20 +9,47 @@ use winit::{
window::{Window, WindowBuilder},
};
mod camera;
mod lines;
mod scene;
mod viewport;
// The coordinate system:
// * X: forward
// * Y: left
// * Z: up
fn prepare_scene(device: &wgpu::Device) -> Vec<lines::Line> {
#[repr(C)]
#[derive(Copy, Clone, Debug, bytemuck::Pod, bytemuck::Zeroable)]
struct Vertex {
position: [f32; 3],
tangent: [f32; 3],
}
struct Wireframe {
color: Vec3,
data: wgpu::Buffer,
size: u32,
}
fn prepare_scene(device: &wgpu::Device) -> Vec<Wireframe> {
scene::build()
.into_iter()
.map(|line| lines::Line::new_strip(device, lines::Attrs { color: line.color }, line.pts))
.map(|line| {
let color = line.color;
let data: Vec<Vertex> = line
.pts
.into_iter()
.map(|r| Vertex {
position: r.pos.to_array(),
tangent: r.dir.to_array(),
})
.collect();
let size = data.len() as u32;
let data = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: Some("Vertex Buffer"),
contents: bytemuck::cast_slice(&data),
usage: wgpu::BufferUsages::VERTEX,
});
Wireframe { color, data, size }
})
.collect()
}
@ -112,20 +140,37 @@ static KEYS_ROTATE: &'static [(PhysicalKey, Vec3)] = &[
(PhysicalKey::Code(KeyCode::Numpad6), vec3(0., 0., -1.)),
];
#[repr(C)]
#[derive(Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
struct CameraUniform {
mvp: [[f32; 4]; 4],
scale: [f32; 2],
pad: [u32; 2],
}
#[repr(C)]
#[derive(Copy, Clone, bytemuck::Pod, bytemuck::Zeroable)]
struct LineUniform {
color: [f32; 3],
_pad: f32,
}
struct State<'a> {
surface: wgpu::Surface<'a>,
device: wgpu::Device,
queue: wgpu::Queue,
config: wgpu::SurfaceConfiguration,
size: winit::dpi::PhysicalSize<u32>,
render_pipeline: wgpu::RenderPipeline,
fps: fps::Counter,
kbd: keyctl::Keyboard,
cam: camctl::CameraLocation,
t1: Instant,
viewport: viewport::Viewport<'a>,
cam_loc: camctl::CameraLocation,
cam_obj: camera::Camera,
line_rend: lines::LineRenderer,
camera_buffer: wgpu::Buffer,
camera_bind_group: wgpu::BindGroup,
scene: Vec<lines::Line>,
scene: Vec<Wireframe>,
window: &'a Window,
}
@ -151,10 +196,9 @@ impl<'a> State<'a> {
.request_device(
&wgpu::DeviceDescriptor {
label: None,
required_features: wgpu::Features::PUSH_CONSTANTS
| wgpu::Features::TEXTURE_ADAPTER_SPECIFIC_FORMAT_FEATURES,
required_features: wgpu::Features::PUSH_CONSTANTS,
required_limits: wgpu::Limits {
max_push_constant_size: 16,
max_push_constant_size: mem::size_of::<LineUniform>() as u32,
..wgpu::Limits::default()
},
memory_hints: Default::default(),
@ -164,44 +208,152 @@ impl<'a> State<'a> {
.await
.unwrap();
let viewport =
viewport::Viewport::new(&adapter, &device, surface, uvec2(size.width, size.height));
let kbd = keyctl::Keyboard::new();
let cam_loc = camctl::CameraLocation::new();
let t1 = Instant::now();
let depth = None;
let msaa = wgpu::MultisampleState {
count: viewport.sample_count(),
mask: !0,
alpha_to_coverage_enabled: false,
let surface_caps = surface.get_capabilities(&adapter);
let surface_format = surface_caps
.formats
.iter()
.copied()
.find(|f| !f.is_srgb())
.unwrap_or(surface_caps.formats[0]);
let config = wgpu::SurfaceConfiguration {
usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
format: surface_format,
width: size.width,
height: size.height,
present_mode: surface_caps.present_modes[0],
alpha_mode: surface_caps.alpha_modes[0],
view_formats: vec![],
desired_maximum_frame_latency: 2,
};
let cam_obj = camera::Camera::new(&device);
let line_rend = lines::LineRenderer::new(
&device,
cam_obj.bind_group_layout(),
viewport.format(),
depth,
msaa,
);
let kbd = keyctl::Keyboard::new();
let cam = camctl::CameraLocation::new();
let t1 = Instant::now();
let camera_buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: Some("Camera Buffer"),
usage: wgpu::BufferUsages::UNIFORM | wgpu::BufferUsages::COPY_DST,
size: mem::size_of::<CameraUniform>() as u64,
mapped_at_creation: false,
});
let camera_bind_group_layout =
device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::VERTEX,
ty: wgpu::BindingType::Buffer {
ty: wgpu::BufferBindingType::Uniform,
has_dynamic_offset: false,
min_binding_size: None,
},
count: None,
}],
label: Some("camera_bind_group_layout"),
});
let camera_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &camera_bind_group_layout,
entries: &[wgpu::BindGroupEntry {
binding: 0,
resource: camera_buffer.as_entire_binding(),
}],
label: Some("camera_bind_group"),
});
let shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
label: Some("Shader"),
source: wgpu::ShaderSource::Wgsl(include_str!("ray.wgsl").into()),
});
let line_push_constant_range = wgpu::PushConstantRange {
stages: ShaderStages::VERTEX,
range: 0..mem::size_of::<LineUniform>() as u32,
};
let render_pipeline_layout =
device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Render Pipeline Layout"),
bind_group_layouts: &[&camera_bind_group_layout],
push_constant_ranges: &[line_push_constant_range],
});
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("Render Pipeline"),
layout: Some(&render_pipeline_layout),
vertex: wgpu::VertexState {
module: &shader,
entry_point: "vs_main",
buffers: &[wgpu::VertexBufferLayout {
array_stride: mem::size_of::<Vertex>() as wgpu::BufferAddress,
step_mode: wgpu::VertexStepMode::Instance,
attributes: &[
wgpu::VertexAttribute {
offset: mem::offset_of!(Vertex, position) as u64,
shader_location: 0,
format: wgpu::VertexFormat::Float32x3,
},
wgpu::VertexAttribute {
offset: mem::offset_of!(Vertex, tangent) as u64,
shader_location: 1,
format: wgpu::VertexFormat::Float32x3,
},
wgpu::VertexAttribute {
offset: (mem::size_of::<Vertex>() + mem::offset_of!(Vertex, position))
as u64,
shader_location: 2,
format: wgpu::VertexFormat::Float32x3,
},
wgpu::VertexAttribute {
offset: (mem::size_of::<Vertex>() + mem::offset_of!(Vertex, tangent))
as u64,
shader_location: 3,
format: wgpu::VertexFormat::Float32x3,
},
],
}],
compilation_options: Default::default(),
},
fragment: Some(wgpu::FragmentState {
module: &shader,
entry_point: "fs_main",
targets: &[Some(wgpu::ColorTargetState {
format: config.format,
blend: Some(wgpu::BlendState {
color: wgpu::BlendComponent::OVER,
alpha: wgpu::BlendComponent::OVER,
}),
write_mask: wgpu::ColorWrites::ALL,
})],
compilation_options: Default::default(),
}),
primitive: wgpu::PrimitiveState {
topology: wgpu::PrimitiveTopology::TriangleStrip,
..Default::default()
},
depth_stencil: None,
multisample: wgpu::MultisampleState {
count: 1,
mask: !0,
alpha_to_coverage_enabled: false,
},
multiview: None,
cache: None,
});
let scene = prepare_scene(&device);
let fps = fps::Counter::new();
Self {
surface,
device,
queue,
viewport,
line_rend,
config,
size,
render_pipeline,
kbd,
fps,
cam_loc,
cam_obj,
cam,
t1,
scene,
camera_buffer,
camera_bind_group,
window,
}
}
@ -212,8 +364,10 @@ impl<'a> State<'a> {
fn resize(&mut self, new_size: winit::dpi::PhysicalSize<u32>) {
if new_size.width > 0 && new_size.height > 0 {
self.viewport
.resize(&self.device, uvec2(new_size.width, new_size.height));
self.size = new_size;
self.config.width = new_size.width;
self.config.height = new_size.height;
self.surface.configure(&self.device, &self.config);
}
}
@ -224,27 +378,81 @@ impl<'a> State<'a> {
self.t1 = t2;
dt.as_secs_f32()
};
let size = self.viewport.size().as_vec2();
self.cam_loc
.move_rel(100. * dt * self.kbd.control(&KEYS_MOVE));
self.cam_loc
self.cam.move_rel(100. * dt * self.kbd.control(&KEYS_MOVE));
self.cam
.rotate_rel_ypr(2. * dt * self.kbd.control(&KEYS_ROTATE));
self.cam_obj.set(&self.queue, self.cam_loc.view_mtx(), size);
let size = vec2(self.config.width as f32, self.config.height as f32);
let size = size.normalize() * std::f32::consts::SQRT_2;
let proj = make_proj_matrix(vec3(size.x, size.y, 2.), (1., 4096.));
let my_to_gl = mat4(
vec4(0., 0., 1., 0.),
vec4(-1., 0., 0., 0.),
vec4(0., 1., 0., 0.),
vec4(0., 0., 0., 1.),
);
let view = my_to_gl * self.cam.view_mtx();
let mvp = proj * view;
let camera_uniform = CameraUniform {
mvp: mvp.to_cols_array_2d(),
scale: (1. / size).to_array(),
pad: [0; 2],
};
self.queue
.write_buffer(&self.camera_buffer, 0, bytemuck::bytes_of(&camera_uniform));
}
fn render(&mut self) -> Result<(), wgpu::SurfaceError> {
self.fps.on_frame();
self.window
.set_title(&format!("Space Refraction ({:.1} FPS)", self.fps.get()));
self.viewport
.render_single_pass(&self.device, &self.queue, |mut render_pass| {
self.line_rend.render(
&mut render_pass,
self.cam_obj.bind_group(),
self.scene.iter(),
);
})
let output = self.surface.get_current_texture()?;
let view = output
.texture
.create_view(&wgpu::TextureViewDescriptor::default());
let mut encoder = self
.device
.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Render Encoder"),
});
{
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
label: Some("Render Pass"),
color_attachments: &[Some(wgpu::RenderPassColorAttachment {
view: &view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color {
r: 0.,
g: 0.,
b: 0.,
a: 1.,
}),
store: wgpu::StoreOp::Store,
},
})],
depth_stencil_attachment: None,
occlusion_query_set: None,
timestamp_writes: None,
});
render_pass.set_pipeline(&self.render_pipeline);
render_pass.set_bind_group(0, &self.camera_bind_group, &[]);
for wireframe in &self.scene {
let line = LineUniform {
color: wireframe.color.to_array(),
_pad: 0.,
};
render_pass.set_push_constants(ShaderStages::VERTEX, 0, bytemuck::bytes_of(&line));
render_pass.set_vertex_buffer(0, wireframe.data.slice(..));
render_pass.draw(0..4, 0..wireframe.size - 1);
}
}
self.queue.submit(iter::once(encoder.finish()));
output.present();
Ok(())
}
}
@ -292,7 +500,7 @@ pub async fn run() {
Ok(_) => {}
// Reconfigure the surface if it's lost or outdated
Err(wgpu::SurfaceError::Lost | wgpu::SurfaceError::Outdated) => {
state.viewport.configure(&state.device);
state.resize(state.size)
}
// The system is out of memory, we should probably quit
Err(wgpu::SurfaceError::OutOfMemory) => {
@ -319,39 +527,44 @@ fn main() {
pollster::block_on(run());
}
mod fps {
use std::time::{Duration, Instant};
/// Make a projection matrix, assuming input coordinates are (right, up, forward).
///
/// `corner` is a vector that will be mapped to (x=1, y=1) after the perspective division.
/// `zrange` is the Z range that will be mapped to z∈[-1, 1]. It has no other effect. Both ends have to be positive though.
fn make_proj_matrix(corner: Vec3, zrange: (f32, f32)) -> Mat4 {
let scale = 1.0 / corner;
let zspan = zrange.1 - zrange.0;
mat4(
scale.x * vec4(1., 0., 0., 0.),
scale.y * vec4(0., 1., 0., 0.),
scale.z * vec4(0., 0., (zrange.0 + zrange.1) / zspan, 1.),
scale.z * vec4(0., 0., -2. * zrange.0 * zrange.1 / zspan, 0.),
)
}
pub struct Counter {
fps: f32,
t1: Instant,
frames: u32,
}
#[cfg(test)]
mod tests {
use super::*;
use approx::assert_abs_diff_eq;
use glam::vec3;
impl Counter {
pub fn new() -> Self {
Self {
fps: 0.,
t1: Instant::now(),
frames: 0,
}
}
#[test]
fn test_proj_matrix() {
let m = make_proj_matrix(vec3(2., 3., 4.), (0.5, 20.0));
pub fn get(&self) -> f32 {
self.fps
}
let v = m * vec4(2., 3., 4., 1.);
assert_abs_diff_eq!(v.x / v.w, 1.0);
assert_abs_diff_eq!(v.y / v.w, 1.0);
assert!(-v.w < v.z && v.z < v.w, "z out of range in {v}");
pub fn on_frame(&mut self) {
self.frames += 1;
let t2 = Instant::now();
let dt = t2 - self.t1;
if dt >= Duration::from_secs(1) {
*self = Self {
fps: self.frames as f32 / dt.as_secs_f32(),
t1: t2,
frames: 0,
}
}
}
let v = m * vec4(2., 3., 0.5, 1.);
assert_abs_diff_eq!(v.x / v.w, 8.0);
assert_abs_diff_eq!(v.y / v.w, 8.0);
assert_abs_diff_eq!(v.z / v.w, -1.0);
let v = m * vec4(2., 3., 20.0, 1.);
assert_abs_diff_eq!(v.x / v.w, 0.2);
assert_abs_diff_eq!(v.y / v.w, 0.2);
assert_abs_diff_eq!(v.z / v.w, 1.0);
}
}

View File

@ -1,136 +0,0 @@
use glam::{uvec2, UVec2};
pub struct Viewport<'a> {
surface: wgpu::Surface<'a>,
config: wgpu::SurfaceConfiguration,
sample_count: u32,
multisample: Multisample,
}
impl<'a> Viewport<'a> {
pub fn new(
adapter: &wgpu::Adapter,
device: &wgpu::Device,
surface: wgpu::Surface<'a>,
size: UVec2,
) -> Self {
let caps = surface.get_capabilities(adapter);
let format = wgpu::TextureFormat::Bgra8Unorm;
let sample_count = adapter
.get_texture_format_features(format)
.flags
.supported_sample_counts()
.into_iter()
.max()
.unwrap();
eprintln!("Using x{sample_count} mutlisampling");
let config = wgpu::SurfaceConfiguration {
usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
format,
width: size.x,
height: size.y,
present_mode: caps.present_modes[0],
alpha_mode: caps.alpha_modes[0],
view_formats: vec![],
desired_maximum_frame_latency: 2,
};
let multisample = Multisample::new(device, format, size, sample_count);
Self {
surface,
config,
sample_count,
multisample,
}
}
pub fn configure(&mut self, device: &wgpu::Device) {
self.surface.configure(&device, &self.config);
self.multisample = Multisample::new(device, self.format(), self.size(), self.sample_count);
}
pub fn resize(&mut self, device: &wgpu::Device, size: UVec2) {
self.config.width = size.x;
self.config.height = size.y;
self.configure(&device);
}
pub fn size(&self) -> UVec2 {
uvec2(self.config.width, self.config.height)
}
pub fn format(&self) -> wgpu::TextureFormat {
self.config.format
}
pub fn sample_count(&self) -> u32 {
self.sample_count
}
pub fn render_single_pass(
&mut self,
device: &wgpu::Device,
queue: &wgpu::Queue,
f: impl FnOnce(wgpu::RenderPass),
) -> Result<(), wgpu::SurfaceError> {
let output = self.surface.get_current_texture()?;
let view = output
.texture
.create_view(&wgpu::TextureViewDescriptor::default());
let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Render CommandEncoder"),
});
let render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
label: Some("RenderPass"),
color_attachments: &[Some(wgpu::RenderPassColorAttachment {
view: &self.multisample.view,
resolve_target: Some(&view),
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color {
r: 0.,
g: 0.,
b: 0.,
a: 1.,
}),
store: wgpu::StoreOp::Store,
},
})],
depth_stencil_attachment: None,
occlusion_query_set: None,
timestamp_writes: None,
});
f(render_pass);
queue.submit(std::iter::once(encoder.finish()));
output.present();
Ok(())
}
}
struct Multisample {
view: wgpu::TextureView,
}
impl Multisample {
fn new(
device: &wgpu::Device,
format: wgpu::TextureFormat,
size: UVec2,
sample_count: u32,
) -> Multisample {
let tex = device.create_texture(&wgpu::TextureDescriptor {
label: Some("Multisample texture"),
size: wgpu::Extent3d {
width: size.x,
height: size.y,
depth_or_array_layers: 1,
},
mip_level_count: 1,
sample_count,
dimension: wgpu::TextureDimension::D2,
format,
usage: wgpu::TextureUsages::RENDER_ATTACHMENT,
view_formats: &[],
});
let view = tex.create_view(&wgpu::TextureViewDescriptor::default());
Multisample { view }
}
}