Use WebGPU to present the image

It is just *insanely* verbose
This commit is contained in:
numzero 2024-12-08 20:30:19 +03:00
parent 2b95e32b13
commit b27472fbb3
2 changed files with 265 additions and 35 deletions

View File

@ -1,25 +1,31 @@
use glam::*;
use glam::{vec3, Vec3};
use image::RgbImage;
use refraction::mesh_loader::load_mesh;
use refraction::mesh_tracer::{trace_to_mesh, Mesh};
use show_image::event::{ElementState, VirtualKeyCode, WindowEvent};
use show_image::{exit, ImageInfo, ImageView, WindowOptions};
use std::env;
use std::error::Error;
use std::f32::consts::PI;
use std::fs::File;
use std::io::BufReader;
use std::process::exit;
use std::sync::atomic::{AtomicUsize, Ordering::Relaxed};
use winit::keyboard::{KeyCode, PhysicalKey};
use winit::{
event::*,
event_loop::EventLoop,
window::{Window, WindowBuilder},
};
const W: i32 = 320;
const H: i32 = 240;
const W: u32 = 320;
const H: u32 = 240;
#[derive(Copy, Clone)]
struct Color(u8, u8, u8);
struct Image {
w: i32,
h: i32,
w: u32,
h: u32,
data: Vec<u8>,
}
@ -28,14 +34,15 @@ impl Image {
self.data.as_slice()
}
fn put_pixel(&mut self, x: i32, y: i32, color: Color) {
if x < 0 || x >= self.w || y < 0 || y > self.h {
fn put_pixel(&mut self, x: u32, y: u32, color: Color) {
if x >= self.w || y >= self.h {
return;
}
let index = 3 * (x + self.w * y) as usize;
let index = 4 * (x + self.w * y) as usize;
self.data[index] = color.0;
self.data[index + 1] = color.1;
self.data[index + 2] = color.2;
self.data[index + 3] = 255;
}
}
@ -68,7 +75,7 @@ fn render(mesh: &Mesh, texture: &RgbImage, camera: impl Fn(Vec2) -> (Vec3, Vec3)
let mut img = Image {
w: W,
h: H,
data: vec![0; (3 * W * H) as usize],
data: vec![0; (4 * W * H) as usize],
};
let img_size = vec2(W as f32, H as f32);
for y in 0..H {
@ -116,7 +123,6 @@ fn test_projs() {
static PROJ_INDEX: AtomicUsize = AtomicUsize::new(0);
static PROJS: [fn(dist: f32, off: Vec2) -> (Vec3, Vec3); 2] = [persp, ortho];
#[show_image::main]
fn main() -> Result<(), Box<dyn Error>> {
let args: Vec<String> = env::args().collect();
if args.len() != 3 {
@ -129,29 +135,227 @@ fn main() -> Result<(), Box<dyn Error>> {
load_mesh(&mut f)?
};
let texture = image::io::Reader::open(&args[2])?.decode()?.into_rgb8();
let window = show_image::create_window("Raytracing", WindowOptions::default())?;
window.add_event_handler(|_wnd, ev, _ctl| {
if let WindowEvent::KeyboardInput(ev) = ev {
if ev.input.state != ElementState::Pressed {
return;
}
if let Some(VirtualKeyCode::Tab) = ev.input.key_code {
PROJ_INDEX.store((PROJ_INDEX.load(Relaxed) + 1) % PROJS.len(), Relaxed);
}
}
})?;
loop {
for phi in 0..360 {
let proj = PROJS[PROJ_INDEX.load(Relaxed)];
let m_view = ypr_to_mat(vec3((135.0 + phi as f32) * PI / 180.0, -30.0 * PI / 180.0, 0.0f32));
let m_camera = m_view.transpose();
let img = render(mesh.as_slice(), &texture, |off| {
let (base, ray) = proj(40., 20. * off);
(m_camera * base, m_camera * ray)
});
let image = ImageView::new(ImageInfo::rgb8(W as u32, H as u32), img.data());
window.set_image("image", image)?;
}
}
let event_loop = EventLoop::new().unwrap();
let window = &WindowBuilder::new()
.with_title("Refraction: Textured")
.build(&event_loop)
.unwrap();
let (device, queue, surface) = pollster::block_on(init_gpu(window))?;
let traced_size = wgpu::Extent3d {
width: W,
height: H,
depth_or_array_layers: 1,
};
let traced_texture = device.create_texture(&wgpu::TextureDescriptor {
size: traced_size,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST,
label: Some("tracing output"),
view_formats: &[],
});
let traced_view = traced_texture.create_view(&wgpu::TextureViewDescriptor::default());
let traced_sampler = device.create_sampler(&wgpu::SamplerDescriptor {
address_mode_u: wgpu::AddressMode::ClampToEdge,
address_mode_v: wgpu::AddressMode::ClampToEdge,
address_mode_w: wgpu::AddressMode::ClampToEdge,
mag_filter: wgpu::FilterMode::Nearest,
min_filter: wgpu::FilterMode::Nearest,
mipmap_filter: wgpu::FilterMode::Nearest,
..Default::default()
});
let traced_bind_group_layout = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Texture {
multisampled: false,
view_dimension: wgpu::TextureViewDimension::D2,
sample_type: wgpu::TextureSampleType::Float { filterable: true },
},
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
count: None,
},
],
label: Some("traced_bind_group_layout"),
});
let traced_bind_group = device.create_bind_group(&wgpu::BindGroupDescriptor {
layout: &traced_bind_group_layout,
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::TextureView(&traced_view),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::Sampler(&traced_sampler),
},
],
label: Some("traced_bind_group"),
});
let present_shader = device.create_shader_module(wgpu::include_wgsl!("present.wgsl"));
let render_pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: Some("Render Pipeline Layout"),
bind_group_layouts: &[&traced_bind_group_layout],
push_constant_ranges: &[],
});
let render_pipeline = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: Some("Render Pipeline"),
layout: Some(&render_pipeline_layout),
vertex: wgpu::VertexState {
module: &present_shader,
entry_point: "vs_main",
buffers: &[],
compilation_options: wgpu::PipelineCompilationOptions::default(),
},
fragment: Some(wgpu::FragmentState {
module: &present_shader,
entry_point: "fs_main",
targets: &[Some(wgpu::ColorTargetState {
format: wgpu::TextureFormat::Bgra8UnormSrgb,
blend: Some(wgpu::BlendState::REPLACE),
write_mask: wgpu::ColorWrites::ALL,
})],
compilation_options: wgpu::PipelineCompilationOptions::default(),
}),
primitive: wgpu::PrimitiveState {
topology: wgpu::PrimitiveTopology::TriangleStrip,
..Default::default()
},
depth_stencil: None,
multisample: wgpu::MultisampleState::default(),
multiview: None,
cache: None,
});
let mut surface_configured = false;
let mut phi = 0;
event_loop.run(move |event, control_flow| match event {
Event::WindowEvent { ref event, window_id } if window_id == window.id() => match event {
WindowEvent::KeyboardInput {
device_id: _,
event,
is_synthetic: _,
} => {
if event.physical_key == PhysicalKey::Code(KeyCode::Tab) {
PROJ_INDEX.store((PROJ_INDEX.load(Relaxed) + 1) % PROJS.len(), Relaxed);
}
}
WindowEvent::CloseRequested => control_flow.exit(),
WindowEvent::Resized(physical_size) => {
surface.configure(
&device,
&wgpu::SurfaceConfiguration {
usage: wgpu::TextureUsages::RENDER_ATTACHMENT | wgpu::TextureUsages::COPY_DST,
format: wgpu::TextureFormat::Bgra8UnormSrgb,
width: physical_size.width,
height: physical_size.height,
present_mode: wgpu::PresentMode::Fifo,
alpha_mode: wgpu::CompositeAlphaMode::Auto,
view_formats: vec![],
desired_maximum_frame_latency: 2,
},
);
surface_configured = true;
}
WindowEvent::RedrawRequested => {
window.request_redraw();
if !surface_configured {
return;
}
let proj = PROJS[PROJ_INDEX.load(Relaxed)];
let m_view = ypr_to_mat(vec3((135.0 + phi as f32) * PI / 180.0, -30.0 * PI / 180.0, 0.0f32));
let m_camera = m_view.transpose();
let img = render(mesh.as_slice(), &texture, |off| {
let (base, ray) = proj(40., 20. * off);
(m_camera * base, m_camera * ray)
});
phi += 1;
phi %= 360;
queue.write_texture(
wgpu::ImageCopyTexture {
texture: &traced_texture,
mip_level: 0,
origin: wgpu::Origin3d::ZERO,
aspect: wgpu::TextureAspect::All,
},
&img.data,
wgpu::ImageDataLayout {
offset: 0,
bytes_per_row: Some(4 * img.w),
rows_per_image: Some(img.h),
},
traced_size,
);
let output = surface.get_current_texture().unwrap();
let view = output.texture.create_view(&wgpu::TextureViewDescriptor::default());
let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor {
label: Some("Render CommandEncoder"),
});
let mut render_pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
label: Some("RenderPass"),
color_attachments: &[Some(wgpu::RenderPassColorAttachment {
view: &view,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color::TRANSPARENT),
store: wgpu::StoreOp::Store,
},
})],
depth_stencil_attachment: None,
occlusion_query_set: None,
timestamp_writes: None,
});
render_pass.set_pipeline(&render_pipeline);
render_pass.set_bind_group(0, &traced_bind_group, &[]);
render_pass.draw(0..4, 0..1);
drop(render_pass);
queue.submit(std::iter::once(encoder.finish()));
output.present();
}
_ => {}
},
_ => {}
})?;
Ok(())
}
async fn init_gpu(wnd: &Window) -> Result<(wgpu::Device, wgpu::Queue, wgpu::Surface), Box<dyn Error>> {
let instance = wgpu::Instance::new(wgpu::InstanceDescriptor {
backends: wgpu::Backends::PRIMARY,
..Default::default()
});
let surface = instance.create_surface(wnd)?;
let adapter = instance
.request_adapter(&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::default(),
compatible_surface: None,
force_fallback_adapter: false,
})
.await
.unwrap();
let (device, queue) = adapter
.request_device(
&wgpu::DeviceDescriptor {
label: None,
required_features: wgpu::Features::empty(),
required_limits: wgpu::Limits::default(),
memory_hints: Default::default(),
},
None,
)
.await
.unwrap();
Ok((device, queue, surface))
}

View File

@ -0,0 +1,26 @@
struct VertexOutput {
@builtin(position) clip_position: vec4<f32>,
@location(0) rel_coords: vec2<f32>,
};
@vertex
fn vs_main(
@builtin(vertex_index) in_vertex_index: u32,
) -> VertexOutput {
var out: VertexOutput;
let x = f32(in_vertex_index & 1u);
let y = f32((in_vertex_index & 2u) >> 1);
out.rel_coords = vec2(x, y);
out.clip_position = vec4<f32>(2.0 * x - 1.0, 1.0 - 2.0 * y, 0.0, 1.0);
return out;
}
@group(0) @binding(0)
var t_traced: texture_2d<f32>;
@group(0) @binding(1)
var s_traced: sampler;
@fragment
fn fs_main(in: VertexOutput) -> @location(0) vec4<f32> {
return textureSample(t_traced, s_traced, in.rel_coords);
}