Compare commits

..

No commits in common. "daylight-glare" and "master" have entirely different histories.

9 changed files with 191 additions and 351 deletions

View File

@ -1,9 +1,8 @@
use std::error::Error;
use glam::{uvec2, vec2};
use raytracing3::glare::Glarer;
use glam::uvec2;
use raytracing3::present::{self, Presenter};
use raytracing3::scene::{Renderer, SceneParams};
use raytracing3::scene::{load_envmap, Renderer, SceneParams};
use winit::{
event::{Event, WindowEvent},
event_loop::EventLoop,
@ -22,13 +21,13 @@ fn main() {
.unwrap();
let (device, queue, surface) = pollster::block_on(init_gpu(window)).unwrap();
let envmap = load_envmap(&device, &queue);
queue.submit([]);
let output_format = wgpu::TextureFormat::Bgra8UnormSrgb;
let hdr_format = wgpu::TextureFormat::Rgba16Float;
let scene = SceneParams::new(N_SPHERES);
let renderer = Renderer::new(&device);
let glarer = Glarer::new(&device, hdr_format);
let renderer = Renderer::new(&device, envmap);
let presenter = Presenter::new(&device, output_format);
let mut frame = 0;
@ -74,13 +73,13 @@ fn main() {
usage: wgpu::TextureUsages::RENDER_ATTACHMENT | wgpu::TextureUsages::TEXTURE_BINDING,
view_formats: &[],
});
let hdr1v = hdr.create_view(&wgpu::TextureViewDescriptor::default());
let hdr = hdr.create_view(&wgpu::TextureViewDescriptor::default());
let mut encoder = device.create_command_encoder(&wgpu::CommandEncoderDescriptor { label: None });
{
let mut render_pass = renderer.prepare(&mut encoder, &hdr1v);
let time = frame as f32 / (60. * RAYS_PER_PIXEL as f32);
let mut render_pass = renderer.prepare(&mut encoder, &hdr);
for _ in 0..RAYS_PER_PIXEL {
frame += 1;
let time = frame as f32 / (60. * RAYS_PER_PIXEL as f32);
renderer.render_frame(
&device,
&mut render_pass,
@ -91,12 +90,10 @@ fn main() {
);
}
}
let sigma = vec2(size.width as f32, size.height as f32).length();
glarer.render(&device, &mut encoder, &hdr, raytracing3::glare::Params { sigma });
presenter.render(
&device,
&mut encoder,
&hdr1v,
&hdr,
&view,
present::Params {
divisor: RAYS_PER_PIXEL as f32,

View File

@ -8,7 +8,7 @@ use std::{fs, io};
use glam::{uvec2, UVec2};
use image::buffer::ConvertBuffer;
use raytracing3::present::{self, Presenter};
use raytracing3::scene::{Renderer, SceneParams};
use raytracing3::scene::{load_envmap, Renderer, SceneParams};
const SIZE: UVec2 = uvec2(1920, 1080);
const FRAME_RATE: u32 = 60;
@ -82,6 +82,7 @@ fn do_work(img_sender: async_channel::Sender<Frame>, start_frame: u32, stop_fram
let img_sender = Arc::new(img_sender);
let (device, queue) = pollster::block_on(init_gpu()).unwrap();
let envmap = load_envmap(&device, &queue);
queue.submit([]);
let texsize = wgpu::Extent3d {
@ -94,7 +95,7 @@ fn do_work(img_sender: async_channel::Sender<Frame>, start_frame: u32, stop_fram
let hdr_format = wgpu::TextureFormat::Rgba16Float;
let scene = SceneParams::new(N_SPHERES);
let renderer = Renderer::new(&device);
let renderer = Renderer::new(&device, envmap);
let presenter = Presenter::new(&device, output_format);
println!("Rendering...");

View File

@ -1,222 +0,0 @@
use std::mem::offset_of;
use bytemuck::{bytes_of, Pod, Zeroable};
use glam::{vec2, UVec2, Vec2};
use wgpu::util::DeviceExt;
#[derive(Debug, Clone, Copy, Pod, Zeroable)]
#[repr(C)]
struct Vertex {
pub screen: Vec2,
}
#[derive(Debug, Clone, Copy, Pod, Zeroable)]
#[repr(C)]
pub struct Params {
pub sigma: f32, // in pixels
}
pub struct Glarer {
view_buf: wgpu::Buffer,
format: wgpu::TextureFormat,
pipeline_h: wgpu::RenderPipeline,
pipeline_v: wgpu::RenderPipeline,
}
static SHADER: &str = include_str!("glare.wgsl");
impl Glarer {
pub fn new(device: &wgpu::Device, format: wgpu::TextureFormat) -> Self {
let view_buf = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: None,
contents: bytes_of(&[(0., 0.), (1., 0.), (0., 1.), (1., 1.)].map(|(x, y)| Vertex { screen: vec2(x, y) })),
usage: wgpu::BufferUsages::VERTEX,
});
let shader = device.create_shader_module(wgpu::ShaderModuleDescriptor {
label: None,
source: wgpu::ShaderSource::Wgsl(SHADER.into()),
});
let vertex_state = wgpu::VertexState {
module: &shader,
entry_point: None,
compilation_options: wgpu::PipelineCompilationOptions::default(),
buffers: &[wgpu::VertexBufferLayout {
array_stride: size_of::<Vertex>() as u64,
step_mode: wgpu::VertexStepMode::Vertex,
attributes: &[wgpu::VertexAttribute {
shader_location: 0,
offset: offset_of!(Vertex, screen) as u64,
format: wgpu::VertexFormat::Float32x2,
}],
}],
};
let pipeline1 = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: None,
layout: None,
vertex: vertex_state.clone(),
primitive: wgpu::PrimitiveState {
topology: wgpu::PrimitiveTopology::TriangleStrip,
..Default::default()
},
depth_stencil: None,
multisample: wgpu::MultisampleState::default(),
fragment: Some(wgpu::FragmentState {
module: &shader,
entry_point: Some("pass_h"),
compilation_options: wgpu::PipelineCompilationOptions::default(),
targets: &[Some(wgpu::ColorTargetState {
format,
blend: Some(wgpu::BlendState::REPLACE),
write_mask: wgpu::ColorWrites::ALL,
})],
}),
multiview: None,
cache: None,
});
let pipeline2 = device.create_render_pipeline(&wgpu::RenderPipelineDescriptor {
label: None,
layout: None,
vertex: vertex_state.clone(),
primitive: wgpu::PrimitiveState {
topology: wgpu::PrimitiveTopology::TriangleStrip,
..Default::default()
},
depth_stencil: None,
multisample: wgpu::MultisampleState::default(),
fragment: Some(wgpu::FragmentState {
module: &shader,
entry_point: Some("pass_v"),
compilation_options: wgpu::PipelineCompilationOptions::default(),
targets: &[Some(wgpu::ColorTargetState {
format,
blend: Some(wgpu::BlendState {
color: wgpu::BlendComponent {
src_factor: wgpu::BlendFactor::One,
dst_factor: wgpu::BlendFactor::One,
operation: wgpu::BlendOperation::Add,
},
alpha: wgpu::BlendComponent {
src_factor: wgpu::BlendFactor::One,
dst_factor: wgpu::BlendFactor::One,
operation: wgpu::BlendOperation::Add,
},
}),
write_mask: wgpu::ColorWrites::ALL,
})],
}),
multiview: None,
cache: None,
});
Self {
view_buf,
format,
pipeline_h: pipeline1,
pipeline_v: pipeline2,
}
}
pub fn render(
&self,
device: &wgpu::Device,
encoder: &mut wgpu::CommandEncoder,
source: &wgpu::Texture,
// target: &wgpu::Texture,
params: Params,
) {
let target = source;
let size = target.size();
// assert_eq!(source.size(), size);
let intermediate = device.create_texture(&wgpu::TextureDescriptor {
label: None,
size,
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: self.format,
usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::RENDER_ATTACHMENT,
view_formats: &[],
});
let intermediate = intermediate.create_view(&wgpu::TextureViewDescriptor::default());
let params_buf = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: None,
contents: bytes_of(&params),
usage: wgpu::BufferUsages::UNIFORM,
});
let source = &source.create_view(&wgpu::TextureViewDescriptor::default());
let target = &target.create_view(&wgpu::TextureViewDescriptor::default());
{
let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
label: None,
color_attachments: &[Some(wgpu::RenderPassColorAttachment {
view: &intermediate,
resolve_target: None,
ops: wgpu::Operations {
load: wgpu::LoadOp::Clear(wgpu::Color::TRANSPARENT),
store: wgpu::StoreOp::Store,
},
})],
depth_stencil_attachment: None,
occlusion_query_set: None,
timestamp_writes: None,
});
let bindings = device.create_bind_group(&wgpu::BindGroupDescriptor {
label: None,
layout: &self.pipeline_h.get_bind_group_layout(0),
entries: &[
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::TextureView(source),
},
wgpu::BindGroupEntry {
binding: 2,
resource: params_buf.as_entire_binding(),
},
],
});
pass.set_pipeline(&self.pipeline_h);
pass.set_vertex_buffer(0, self.view_buf.slice(..));
pass.set_bind_group(0, &bindings, &[]);
pass.draw(0..4, 0..1);
}
{
let mut pass = encoder.begin_render_pass(&wgpu::RenderPassDescriptor {
label: None,
color_attachments: &[Some(wgpu::RenderPassColorAttachment {
view: target,
resolve_target: None,
ops: wgpu::Operations {
// load: wgpu::LoadOp::Load,
load: wgpu::LoadOp::Clear(wgpu::Color::TRANSPARENT),
store: wgpu::StoreOp::Store,
},
})],
depth_stencil_attachment: None,
occlusion_query_set: None,
timestamp_writes: None,
});
let params_buf = device.create_buffer_init(&wgpu::util::BufferInitDescriptor {
label: None,
contents: bytes_of(&params),
usage: wgpu::BufferUsages::UNIFORM,
});
let bindings = device.create_bind_group(&wgpu::BindGroupDescriptor {
label: None,
layout: &self.pipeline_v.get_bind_group_layout(0),
entries: &[
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::TextureView(&intermediate),
},
wgpu::BindGroupEntry {
binding: 2,
resource: params_buf.as_entire_binding(),
},
],
});
pass.set_pipeline(&self.pipeline_v);
pass.set_vertex_buffer(0, self.view_buf.slice(..));
pass.set_bind_group(0, &bindings, &[]);
pass.draw(0..4, 0..1);
}
}
}

View File

@ -1,67 +0,0 @@
struct Params {
sigma: f32,
}
struct Vertex {
@location(0) screen: vec2f,
}
struct Varying {
@location(0) tex: vec2f,
@builtin(position) v: vec4f,
}
@group(0) @binding(1) var tex: texture_2d<f32>;
@group(0) @binding(2) var<uniform> params: Params;
@vertex
fn on_vertex(in: Vertex) -> Varying {
let uv = in.screen;
let xy = vec2(2. * uv.x - 1., 1. - 2. * uv.y);
return Varying(uv, vec4(xy, 0., 1.));
}
const CUTOFF: i32 = 50;
@fragment
fn pass_h(in: Varying) -> @location(0) vec4f {
let size = vec2i(textureDimensions(tex));
let pix = vec2i(floor(in.tex * vec2f(size)));
let a = max(0, pix.x - CUTOFF);
let b = min(size.x, pix.x + CUTOFF + 1);
var accum = vec4(0.);
for (var x = a; x < b; x++) {
let w = weight(x - pix.x);
let pixel = textureLoad(tex, vec2i(x, pix.y), 0);
if (all(pixel >= vec4(0.))) { // excludes NaNs
accum += w * pixel;
}
}
// accum.x = textureLoad(tex, pix, 0).x;
// accum.y = textureLoad(tex, pix, 0).y;
return vec4(accum.xyz, params.sigma);
}
@fragment
fn pass_v(in: Varying) -> @location(0) vec4f {
let size = vec2i(textureDimensions(tex));
let pix = vec2i(floor(in.tex * vec2f(size)));
let a = max(0, pix.y - CUTOFF);
let b = min(size.y, pix.y + CUTOFF + 1);
var accum = vec4(0.);
for (var y = a; y < b; y++) {
let w = weight(y - pix.y);
let pixel = textureLoad(tex, vec2i(pix.x, y), 0);
if (all(pixel >= vec4(0.))) { // excludes NaNs
accum += w * pixel;
}
}
// accum.x = textureLoad(tex, pix, 0).x;
return vec4(accum.xyz, params.sigma);
}
fn weight(dist: i32) -> f32 {
let σ = params.sigma / 1e4;
let d = f32(dist) / σ;
return 1. / (1. + d*d);
}

View File

@ -1,5 +1,4 @@
pub mod anim;
pub mod glare;
pub mod perlin;
pub mod present;
pub mod scene;

View File

@ -24,7 +24,7 @@ fn on_vertex(in: Vertex) -> Varying {
@fragment
fn on_fragment(in: Varying) -> @location(0) vec4f {
let pixel = .1 * textureSample(tex, smp, in.tex) / params.divisor;
let pixel = textureSample(tex, smp, in.tex) / params.divisor;
return vec4(rational_tone_map(pixel.xyz), 1.0);
}

View File

@ -1,10 +1,9 @@
use crate::anim::{self, SphereParams};
use crate::trace::{self, Tracer, TracerData};
use crate::Sphere;
use crate::trace::{self, Tracer, TracerData, TracerEnv};
use glam::{mat3, uvec2, vec3, UVec2, Vec3};
use image::ImageReader;
use std::f32::consts::PI;
#[derive(Debug, Clone, Copy)]
struct CamLoc {
eye: Vec3,
forward: Vec3,
@ -34,39 +33,47 @@ const CAMERA_LAG: f32 = 0.03;
pub struct Renderer {
tracer: Tracer,
env: TracerEnv,
}
impl Renderer {
pub fn new(device: &wgpu::Device) -> Self {
pub fn new(device: &wgpu::Device, env: wgpu::TextureView) -> Self {
let hdr_format = wgpu::TextureFormat::Rgba16Float;
let tracer = Tracer::new(&device, hdr_format);
Self { tracer }
let env = TracerEnv::new(&device, &tracer, &env);
Self { tracer, env }
}
}
pub struct SceneParams {
spheres: Vec<Sphere>,
camera: CamLoc,
pub spheres: Vec<SphereParams>,
pub camera: SphereParams,
pub target: SphereParams,
}
impl SceneParams {
pub fn new(n_spheres: u32) -> Self {
let mut rng = rand_pcg::Pcg32::new(42, 0);
const R: f32 = 100.;
let sphere = Sphere {
center: vec3(0., 0., -R),
radius: R,
emit_color: vec3(0., 0., 0.),
reflect_color: Vec3::splat(0.3),
glossiness: 0.,
let spheres: Vec<_> = {
let distr = anim::distr();
(0..n_spheres).map(|_| distr(&mut rng)).collect()
};
let spheres = vec![sphere];
let camera = CamLoc {
eye: vec3(-1., -1., 1.),
forward: vec3(1., 1., 0.).normalize(),
right: vec3(1., -1., 0.).normalize(),
let camera = {
let mut p = anim::distr()(&mut rng);
p.amplitudes *= 2.0;
p.frequencies *= 0.1;
p
};
Self { spheres, camera }
let target = {
let mut p = spheres[0];
p.phases -= 2. * PI * CAMERA_LAG * p.frequencies;
p
};
Self {
spheres,
camera,
target,
}
}
}
@ -96,24 +103,104 @@ impl Renderer {
time: f32,
seed: u32,
) {
let target = scene.target.to_sphere(time).center;
let eye = scene.camera.to_sphere(time).center;
let right = scene.camera.deriv(time);
let forward = target - eye;
let viewport = make_viewport(size.x, size.y);
let location = convert_location(scene.camera);
let data = TracerData::new(&device, &self.tracer, &scene.spheres);
let location = convert_location(CamLoc { eye, forward, right });
let spheres: Vec<_> = scene.spheres.iter().map(|p| p.to_sphere(time)).collect();
let data = TracerData::new(&device, &self.tracer, &spheres);
self.tracer.render(
&mut render_pass.0,
&data,
&self.env,
trace::Params {
max_reflections: 3,
min_strength: 0.1,
sphere_count: scene.spheres.len() as u32,
sphere_count: N_SPHERES,
seed,
},
viewport,
trace::Aperture {
radius: 0.0003,
focal_distance: std::f32::INFINITY,
glare_strength: 0.1,
glare_radius: 0.1,
},
location,
);
}
}
pub fn load_envmap(device: &wgpu::Device, queue: &wgpu::Queue) -> wgpu::TextureView {
let imgs = std::thread::scope(|s| {
[0, 1, 2, 3, 4, 5]
.map(|face| {
s.spawn(move || {
let img = ImageReader::open(format!("textures/env{face}.webp"))
.unwrap()
.with_guessed_format()
.unwrap()
.decode()
.unwrap();
img.to_rgba8()
})
})
.map(|t| t.join().unwrap())
});
let size = imgs[0].width();
for img in &imgs {
assert!(img.width() == size);
assert!(img.height() == size);
}
let texture = device.create_texture(&wgpu::TextureDescriptor {
label: None,
size: wgpu::Extent3d {
width: size,
height: size,
depth_or_array_layers: 6,
},
mip_level_count: 1,
sample_count: 1,
dimension: wgpu::TextureDimension::D2,
format: wgpu::TextureFormat::Rgba8UnormSrgb,
usage: wgpu::TextureUsages::TEXTURE_BINDING | wgpu::TextureUsages::COPY_DST,
view_formats: &[],
});
for (face, img) in imgs.iter().enumerate() {
queue.write_texture(
wgpu::ImageCopyTexture {
texture: &texture,
mip_level: 0,
origin: wgpu::Origin3d {
x: 0,
y: 0,
z: face as u32,
},
aspect: wgpu::TextureAspect::All,
},
img.as_raw(),
wgpu::ImageDataLayout {
offset: 0,
bytes_per_row: Some(4 * size),
rows_per_image: Some(size),
},
wgpu::Extent3d {
width: size,
height: size,
depth_or_array_layers: 1,
},
);
}
texture.create_view(&wgpu::TextureViewDescriptor {
label: None,
format: None,
dimension: Some(wgpu::TextureViewDimension::Cube),
aspect: wgpu::TextureAspect::All,
base_mip_level: 0,
mip_level_count: None,
base_array_layer: 0,
array_layer_count: None,
})
}

View File

@ -22,6 +22,8 @@ pub struct Viewport {
pub struct Aperture {
pub radius: f32,
pub focal_distance: f32, // from 0 (exclusive) to +∞ (inclusive)
pub glare_strength: f32,
pub glare_radius: f32, // at distance 1
}
#[derive(Debug, Clone, Copy)]
@ -48,6 +50,8 @@ struct CameraData {
aperture: f32,
eye: Vec3,
antifocal: f32,
glare_strength: f32,
glare_radius: f32,
}
impl From<(Viewport, CameraLocation, Aperture)> for CameraData {
@ -61,6 +65,8 @@ impl From<(Viewport, CameraLocation, Aperture)> for CameraData {
height: value.0.corner.y / value.0.corner.z,
aperture: value.2.radius,
antifocal: 1. / value.2.focal_distance,
glare_strength: value.2.glare_strength,
glare_radius: value.2.glare_radius,
}
}
}
@ -113,6 +119,10 @@ pub struct TracerData {
bindings: wgpu::BindGroup,
}
pub struct TracerEnv {
bindings: wgpu::BindGroup,
}
static SHADER: &str = include_str!("trace.wgsl");
impl Tracer {
@ -140,9 +150,30 @@ impl Tracer {
count: None,
}],
});
let envmap_bgl = device.create_bind_group_layout(&wgpu::BindGroupLayoutDescriptor {
label: None,
entries: &[
wgpu::BindGroupLayoutEntry {
binding: 0,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Sampler(wgpu::SamplerBindingType::Filtering),
count: None,
},
wgpu::BindGroupLayoutEntry {
binding: 1,
visibility: wgpu::ShaderStages::FRAGMENT,
ty: wgpu::BindingType::Texture {
sample_type: wgpu::TextureSampleType::Float { filterable: true },
view_dimension: wgpu::TextureViewDimension::Cube,
multisampled: false,
},
count: None,
},
],
});
let pipeline_layout = device.create_pipeline_layout(&wgpu::PipelineLayoutDescriptor {
label: None,
bind_group_layouts: &[&spheres_bgl],
bind_group_layouts: &[&spheres_bgl, &envmap_bgl],
push_constant_ranges: &[wgpu::PushConstantRange {
stages: wgpu::ShaderStages::FRAGMENT,
range: 0..mem::size_of::<ParamsData>() as u32,
@ -227,6 +258,7 @@ impl Tracer {
&self,
pass: &mut wgpu::RenderPass,
data: &TracerData,
env: &TracerEnv,
params: Params,
viewport: Viewport,
aperture: Aperture,
@ -243,6 +275,7 @@ impl Tracer {
);
pass.set_vertex_buffer(0, self.view_buf.slice(..));
pass.set_bind_group(0, &data.bindings, &[]);
pass.set_bind_group(1, &env.bindings, &[]);
pass.draw(0..4, 0..1);
}
}
@ -266,3 +299,24 @@ impl TracerData {
Self { bindings }
}
}
impl TracerEnv {
pub fn new(device: &wgpu::Device, tracer: &Tracer, view: &wgpu::TextureView) -> Self {
let sampler = device.create_sampler(&wgpu::SamplerDescriptor::default());
let bindings = device.create_bind_group(&wgpu::BindGroupDescriptor {
label: None,
layout: &tracer.pipeline.get_bind_group_layout(1),
entries: &[
wgpu::BindGroupEntry {
binding: 0,
resource: wgpu::BindingResource::Sampler(&sampler),
},
wgpu::BindGroupEntry {
binding: 1,
resource: wgpu::BindingResource::TextureView(view),
},
],
});
Self { bindings }
}
}

View File

@ -12,6 +12,9 @@ struct Params {
aperture: f32,
eye: vec3f,
antifocal: f32,
glare_strength: f32,
glare_radius: f32,
}
struct Sphere {
@ -33,6 +36,8 @@ struct Varying {
var<push_constant> params: Params;
@group(0) @binding(1) var<storage, read> spheres: array<Sphere>;
@group(1) @binding(0) var env_sampler: sampler;
@group(1) @binding(1) var env_texture: texture_cube<f32>;
@vertex
fn on_vertex(in: Vertex) -> Varying {
@ -79,7 +84,13 @@ fn trace_fragment(in: Varying) -> vec3f {
pos = params.eye + aperture_offset_abs;
let off_px = vec2(rand_float(), rand_float()) - .5;
let off_w = mat2x3(dpdx(in.dir), dpdy(in.dir));
let dir = in.dir + off_w * off_px;
var dir = in.dir + off_w * off_px;
if (rand_float() < params.glare_strength) {
let p = rand_float();
let d = params.glare_radius * pow(p, 2.);
let glare_off = d * rand_disc();
dir += view_mtx * vec3(glare_off, 0.);
}
ray = normalize(dir - params.antifocal * aperture_offset_abs);
for (var k = 0; k < params.max_reflections; k++) {
@ -93,28 +104,8 @@ fn trace_fragment(in: Varying) -> vec3f {
}
}
if (sphere == -1) {
let theta = dot(ray, normalize(vec3(1., 2., 1.)));
var env: vec3f; // in kilonits
const ILLUMINANCE_LUX = 1e5;
const ANGULAR_DIAMETER_DEG = 20.0; // Sun: 0.5°
const PI = 3.141592653589793;
const ANGULAR_DIAMETER_RAD = PI / 180.0 * ANGULAR_DIAMETER_DEG;
const THETA = 0.5 * ANGULAR_DIAMETER_RAD;
const COS_THETA = 1.0 - 0.5 * THETA * THETA; // approximately
const SOLID_ANGLE_SR = PI * THETA * THETA; // approximately
const LUMINANCE_NIT = ILLUMINANCE_LUX / SOLID_ANGLE_SR;
const LUMINANCE_KNIT = 1e-3 * LUMINANCE_NIT;
if (theta > COS_THETA) {
env = vec3(1.0, 0.9, 0.6) * LUMINANCE_KNIT;
} else {
env = mix(vec3(0.5, 1.0, 2.0), vec3(2.0, 3.0, 4.0), 0.5 * theta + 0.5);
}
result += color * env.xyz;
let env = textureSampleLevel(env_texture, env_sampler, ray, 0.0);
result += 3.0 * color * env.xyz;
break;
}
let s = spheres[sphere];