minitracing/src/bin/rec.rs

216 lines
5.9 KiB
Rust

use std::env::args;
use std::error::Error;
use std::io::{stdout, Write};
use std::path::{Path, PathBuf};
use std::sync::Arc;
use std::{fs, io};
use glam::{uvec2, UVec2};
use image::buffer::ConvertBuffer;
use raytracing3::present::{self, Presenter};
use raytracing3::scene::{load_envmap, Renderer, SceneParams};
const SIZE: UVec2 = uvec2(1920, 1080);
const FRAME_RATE: u32 = 60;
const N_SPHERES: u32 = 100;
const RAYS_PER_PIXEL: u32 = 1024;
fn main() {
let path: PathBuf;
let start_frame;
let stop_frame;
match args().collect::<Vec<_>>().as_slice() {
[_, apath] => {
path = apath.into();
start_frame = 0;
stop_frame = 5;
}
[_, apath, aframes] => {
path = apath.into();
start_frame = 0;
stop_frame = aframes.parse().unwrap();
}
[_, apath, astart_frame, astop_frame] => {
path = apath.into();
start_frame = astart_frame.parse().unwrap();
stop_frame = astop_frame.parse().unwrap();
}
_ => {
panic!("invalid arguments");
}
}
println!(
"Rendering frames {start_frame} to {stop_frame} ({} total)",
stop_frame - start_frame
);
match fs::create_dir(&path) {
Ok(_) => (),
Err(err) if err.kind() == io::ErrorKind::AlreadyExists => (),
Err(err) => panic!("failed to create the output directory: {err}"),
}
let n_threads = std::thread::available_parallelism().map(|n| n.get()).unwrap_or(2);
let (img_sender, img_receiver) = async_channel::bounded::<Frame>(2 * n_threads);
std::thread::scope(|s| {
for _ in 0..n_threads {
start_saver(s, &path, &img_receiver);
}
do_work(img_sender, start_frame, stop_frame);
});
}
type Frame = (u32, Arc<wgpu::Buffer>);
fn start_saver<'a>(
scope: &'a std::thread::Scope<'a, '_>,
path: &'a Path,
img_receiver: &'a async_channel::Receiver<Frame>,
) {
scope.spawn(move || {
while let Ok((frame, buffer)) = img_receiver.recv_blocking() {
let img = image::RgbaImage::from_raw(SIZE.x, SIZE.y, buffer.slice(..).get_mapped_range().to_vec())
.expect("read failure!");
let img: image::RgbImage = img.convert();
img.save(path.join(&format!("frame{frame:06}.webp")))
.expect("save failure!");
}
});
}
fn do_work(img_sender: async_channel::Sender<Frame>, start_frame: u32, stop_frame: u32) {
println!("Loading...");
let img_sender = Arc::new(img_sender);
let (device, queue) = pollster::block_on(init_gpu()).unwrap();
let envmap = load_envmap(&device, &queue);
queue.submit([]);
let texsize = wgpu::Extent3d {
width: SIZE.x,
height: SIZE.y,
depth_or_array_layers: 1,
};
let output_format = wgpu::TextureFormat::Rgba8UnormSrgb;
let hdr_format = wgpu::TextureFormat::Rgba16Float;
let scene = SceneParams::new(N_SPHERES);
let renderer = Renderer::new(&device, envmap);
let presenter = Presenter::new(&device, output_format);
println!("Rendering...");
for frame in start_frame..stop_frame {
if frame % FRAME_RATE == 0 {
println!("{frame}");
}
let output = device.create_texture(&wgpu::TextureDescriptor {
label: None,
size: texsize,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: output_format,
usage: wgpu::TextureUsages::RENDER_ATTACHMENT | wgpu::TextureUsages::COPY_SRC,
view_formats: &[],
});
let view = output.create_view(&wgpu::TextureViewDescriptor::default());
let hdr = device.create_texture(&wgpu::TextureDescriptor {
label: None,
size: texsize,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: hdr_format,
usage: wgpu::TextureUsages::RENDER_ATTACHMENT | wgpu::TextureUsages::TEXTURE_BINDING,
view_formats: &[],
});
let hdr = hdr.create_view(&wgpu::TextureViewDescriptor::default());
let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: None });
{
let mut render_pass = renderer.prepare(&mut encoder, &hdr);
for subframe in 0..RAYS_PER_PIXEL {
let subframe = frame * RAYS_PER_PIXEL + subframe;
let time = subframe as f32 / (RAYS_PER_PIXEL * FRAME_RATE) as f32;
renderer.render_frame(&device, &mut render_pass, SIZE, &scene, time, subframe);
}
}
presenter.render(
&device,
&mut encoder,
&hdr,
&view,
present::Params {
divisor: RAYS_PER_PIXEL as f32,
},
);
let buffer = device.create_buffer(&wgpu::BufferDescriptor {
label: None,
size: (4 * SIZE.x * SIZE.y) as u64,
usage: wgpu::BufferUsages::COPY_DST | wgpu::BufferUsages::MAP_READ,
mapped_at_creation: false,
});
encoder.copy_texture_to_buffer(
wgpu::ImageCopyTexture {
texture: &output,
mip_level: 0,
origin: wgpu::Origin3d::ZERO,
aspect: wgpu::TextureAspect::All,
},
wgpu::ImageCopyBuffer {
buffer: &buffer,
layout: wgpu::ImageDataLayout {
offset: 0,
bytes_per_row: Some(4 * SIZE.x),
rows_per_image: Some(SIZE.y),
},
},
texsize,
);
queue.submit([encoder.finish()]);
let buffer = Arc::new(buffer);
let img_sender = Arc::clone(&img_sender);
Arc::clone(&buffer)
.slice(..)
.map_async(wgpu::MapMode::Read, move |res| {
res.unwrap();
img_sender.send_blocking((frame, buffer)).unwrap();
});
print!(".");
stdout().flush().unwrap();
}
println!("{stop_frame}");
device.poll(wgpu::Maintain::Wait);
println!("Done!");
}
async fn init_gpu() -> Result<(wgpu::Device, wgpu::Queue), Box<dyn Error>> {
let instance = wgpu::Instance::new(wgpu::InstanceDescriptor {
backends: wgpu::Backends::PRIMARY,
..Default::default()
});
let adapter = instance
.request_adapter(&wgpu::RequestAdapterOptions {
power_preference: wgpu::PowerPreference::default(),
compatible_surface: None,
force_fallback_adapter: false,
})
.await
.unwrap();
let (device, queue) = adapter
.request_device(
&wgpu::DeviceDescriptor {
label: None,
required_features: wgpu::Features::PUSH_CONSTANTS,
required_limits: wgpu::Limits {
max_push_constant_size: 128,
..Default::default()
},
memory_hints: Default::default(),
},
None,
)
.await
.unwrap();
Ok((device, queue))
}