This commit is contained in:
2026-02-11 15:05:56 +01:00
parent 311ca48092
commit fecc1f1718
8 changed files with 1051 additions and 60 deletions

Binary file not shown.

Binary file not shown.

View File

@@ -54,6 +54,6 @@ fn fragment(in: FullscreenVertexOutput) -> @location(0) vec4<f32> {
// Sample each color channel with an arbitrary shift
return vec4<f32>(
final_gray, final_gray, final_gray, a
final_gray, final_gray, final_gray, 0.0
);
}

View File

@@ -0,0 +1,305 @@
//! Edge Detection using 3x3 Sobel Filter
//!
//! This shader implements edge detection based on depth, normal, and color gradients using a 3x3 Sobel filter.
//! It combines the results of depth, normal, and color edge detection to produce a final edge map.
#import bevy_core_pipeline::fullscreen_vertex_shader::FullscreenVertexOutput
#import bevy_render::view::View
#import bevy_pbr::view_transformations::uv_to_ndc
@group(0) @binding(0) var screen_texture: texture_2d<f32>;
#ifdef MULTISAMPLED
@group(0) @binding(1) var depth_prepass_texture: texture_depth_multisampled_2d;
#else
@group(0) @binding(1) var depth_prepass_texture: texture_depth_2d;
#endif
#ifdef MULTISAMPLED
@group(0) @binding(2) var normal_prepass_texture: texture_multisampled_2d<f32>;
#else
@group(0) @binding(2) var normal_prepass_texture: texture_2d<f32>;
#endif
@group(0) @binding(3) var filtering_sampler: sampler;
@group(0) @binding(4) var depth_sampler: sampler;
@group(0) @binding(5) var noise_texture: texture_2d<f32>;
@group(0) @binding(6) var noise_sampler: sampler;
@group(0) @binding(7) var<uniform> view: View;
@group(0) @binding(8) var<uniform> ed_uniform: EdgeDetectionUniform;
struct EdgeDetectionUniform {
depth_threshold: f32,
normal_threshold: f32,
color_threshold: f32,
depth_thickness: f32,
normal_thickness: f32,
color_thickness: f32,
steep_angle_threshold: f32,
steep_angle_multiplier: f32,
// xy: distortion frequency; zw: distortion strength
uv_distortion: vec4f,
edge_color: vec4f,
block_pixel: u32,
}
// -----------------------
// View Transformation ---
// -----------------------
fn saturate(x: f32) -> f32 { return clamp(x, 0.0, 1.0); }
/// Retrieve the perspective camera near clipping plane
fn perspective_camera_near() -> f32 {
return view.clip_from_view[3][2];
}
/// Convert ndc depth to linear view z.
/// Note: Depth values in front of the camera will be negative as -z is forward
fn depth_ndc_to_view_z(ndc_depth: f32) -> f32 {
#ifdef VIEW_PROJECTION_PERSPECTIVE
return -perspective_camera_near() / ndc_depth;
#else ifdef VIEW_PROJECTION_ORTHOGRAPHIC
return -(view.clip_from_view[3][2] - ndc_depth) / view.clip_from_view[2][2];
#else
let view_pos = view.view_from_clip * vec4f(0.0, 0.0, ndc_depth, 1.0);
return view_pos.z / view_pos.w;
#endif
}
/// Convert a ndc space position to world space
fn position_ndc_to_world(ndc_pos: vec3<f32>) -> vec3<f32> {
let world_pos = view.world_from_clip * vec4f(ndc_pos, 1.0);
return world_pos.xyz / world_pos.w;
}
fn calculate_view(world_position: vec3f) -> vec3f {
#ifdef VIEW_PROJECTION_ORTHOGRAPHIC
// Orthographic view vector
return normalize(vec3f(view.clip_from_world[0].z, view.clip_from_world[1].z, view.clip_from_world[2].z));
#else
// Only valid for a perspective projection
return normalize(view.world_position.xyz - world_position.xyz);
#endif
}
// -----------------------
// Depth Detection -------
// -----------------------
fn prepass_depth(uv: vec2f) -> f32 {
#ifdef MULTISAMPLED
let pixel_coord = vec2i(uv * texture_size);
let depth = textureLoad(depth_prepass_texture, pixel_coord, sample_index_i);
#else
let depth = textureSample(depth_prepass_texture, depth_sampler, uv);
#endif
return depth;
}
fn prepass_view_z(uv: vec2f) -> f32 {
let depth = prepass_depth(uv);
return depth_ndc_to_view_z(depth);
}
fn view_z_gradient_x(uv: vec2f, y: f32, thickness: f32) -> f32 {
let l_coord = uv + texel_size * vec2f(-thickness, y); // left coordinate
let r_coord = uv + texel_size * vec2f( thickness, y); // right coordinate
return prepass_view_z(r_coord) - prepass_view_z(l_coord);
}
fn view_z_gradient_y(uv: vec2f, x: f32, thickness: f32) -> f32 {
let d_coord = uv + texel_size * vec2f(x, -thickness); // down coordinate
let t_coord = uv + texel_size * vec2f(x, thickness); // top coordinate
return prepass_view_z(t_coord) - prepass_view_z(d_coord);
}
fn detect_edge_depth(uv: vec2f, thickness: f32, fresnel: f32) -> f32 {
let deri_x =
view_z_gradient_x(uv, thickness, thickness) +
2.0 * view_z_gradient_x(uv, 0.0, thickness) +
view_z_gradient_x(uv, -thickness, thickness);
let deri_y =
view_z_gradient_y(uv, thickness, thickness) +
2.0 * view_z_gradient_y(uv, 0.0, thickness) +
view_z_gradient_y(uv, -thickness, thickness);
// why not `let grad = sqrt(deri_x * deri_x + deri_y * deri_y);`?
//
// Because ·deri_x· or ·deri_y· might be too large,
// causing overflow in the calculation and resulting in incorrect results.
let grad = max(abs(deri_x), abs(deri_y));
let view_z = abs(prepass_view_z(uv));
let steep_angle_adjustment =
smoothstep(ed_uniform.steep_angle_threshold, 1.0, fresnel) * ed_uniform.steep_angle_multiplier * view_z;
return f32(grad > ed_uniform.depth_threshold * (1.0 + steep_angle_adjustment));
}
// -----------------------
// Normal Detection ------
// -----------------------
fn prepass_normal_unpack(uv: vec2f) -> vec3f {
let normal_packed = prepass_normal(uv);
return normalize(normal_packed.xyz * 2.0 - vec3f(1.0));
}
fn prepass_normal(uv: vec2f) -> vec3f {
#ifdef MULTISAMPLED
let pixel_coord = vec2i(uv * texture_size);
let normal = textureLoad(normal_prepass_texture, pixel_coord, sample_index_i);
#else
let normal = textureSample(normal_prepass_texture, filtering_sampler, uv);
#endif
return normal.xyz;
}
fn normal_gradient_x(uv: vec2f, y: f32, thickness: f32) -> vec3f {
let l_coord = uv + texel_size * vec2f(-thickness, y); // left coordinate
let r_coord = uv + texel_size * vec2f( thickness, y); // right coordinate
return prepass_normal(r_coord) - prepass_normal(l_coord);
}
fn normal_gradient_y(uv: vec2f, x: f32, thickness: f32) -> vec3f {
let d_coord = uv + texel_size * vec2f(x, -thickness); // down coordinate
let t_coord = uv + texel_size * vec2f(x, thickness); // top coordinate
return prepass_normal(t_coord) - prepass_normal(d_coord);
}
fn detect_edge_normal(uv: vec2f, thickness: f32) -> f32 {
let deri_x = abs(
normal_gradient_x(uv, thickness, thickness) +
2.0 * normal_gradient_x(uv, 0.0, thickness) +
normal_gradient_x(uv, -thickness, thickness));
let deri_y = abs(
normal_gradient_y(uv, thickness, thickness) +
2.0 * normal_gradient_y(uv, 0.0, thickness) +
normal_gradient_y(uv, -thickness, thickness));
let x_max = max(deri_x.x, max(deri_x.y, deri_x.z));
let y_max = max(deri_y.x, max(deri_y.y, deri_y.z));
let grad = max(x_max, y_max);
return f32(grad > ed_uniform.normal_threshold);
}
// ----------------------
// Color Detection ------
// ----------------------
fn prepass_color(uv: vec2f) -> vec3f {
return textureSample(screen_texture, filtering_sampler, uv).rgb;
}
fn color_gradient_x(uv: vec2f, y: f32, thickness: f32) -> vec3f {
let l_coord = uv + texel_size * vec2f(-thickness, y); // left coordinate
let r_coord = uv + texel_size * vec2f( thickness, y); // right coordinate
return prepass_color(r_coord) - prepass_color(l_coord);
}
fn color_gradient_y(uv: vec2f, x: f32, thickness: f32) -> vec3f {
let d_coord = uv + texel_size * vec2f(x, -thickness); // down coordinate
let t_coord = uv + texel_size * vec2f(x, thickness); // top coordinate
return prepass_color(t_coord) - prepass_color(d_coord);
}
fn detect_edge_color(uv: vec2f, thickness: f32) -> f32 {
let deri_x =
color_gradient_x(uv, thickness, thickness) +
2.0 * color_gradient_x(uv, 0.0, thickness) +
color_gradient_x(uv, -thickness, thickness);
let deri_y =
color_gradient_y(uv, thickness, thickness) +
2.0 * color_gradient_y(uv, 0.0, thickness) +
color_gradient_y(uv, -thickness, thickness);
let grad = max(length(deri_x), length(deri_y));
return f32(grad > ed_uniform.color_threshold);
}
fn pixelate_uv(uv: vec2f, dims: vec2f, block_px: f32) -> vec2f {
let b = max(block_px, 1.0);
let cell = floor(uv * dims / b);
let center = (cell * b + 0.5 * b) / dims; // sample at block center
return center;
}
var<private> texture_size: vec2f;
var<private> texel_size: vec2f;
var<private> sample_index_i: i32 = 0;
@fragment
fn fragment(
#ifdef MULTISAMPLED
@builtin(sample_index) sample_index: u32,
#endif
in: FullscreenVertexOutput
) -> @location(0) vec4f {
#ifdef MULTISAMPLED
sample_index_i = i32(sample_index);
#endif
texture_size = vec2f(textureDimensions(screen_texture, 0));
texel_size = 1.0 / texture_size;
let near_ndc_pos = vec3f(uv_to_ndc(in.uv), 1.0);
let near_world_pos = position_ndc_to_world(near_ndc_pos);
let view_direction = calculate_view(near_world_pos);
let normal = prepass_normal_unpack(in.uv);
let fresnel = 1.0 - saturate(dot(normal, view_direction));
let sample_uv = in.position.xy * min(texel_size.x, texel_size.y);
let noise = textureSample(noise_texture, noise_sampler, sample_uv * ed_uniform.uv_distortion.xy);
let uv_noise = in.uv + noise.xy * ed_uniform.uv_distortion.zw;
let block_pixel = max(f32(ed_uniform.block_pixel), 1.0);
let uv_noise_px = pixelate_uv(uv_noise, texture_size, f32(block_pixel));
let uv_px = pixelate_uv(in.uv, texture_size, f32(block_pixel));
var edge = 0.0;
#ifdef ENABLE_DEPTH
let edge_depth = detect_edge_depth(uv_noise_px, ed_uniform.depth_thickness, fresnel);
edge = max(edge, edge_depth);
#endif
#ifdef ENABLE_NORMAL
let edge_normal = detect_edge_normal(uv_noise_px, ed_uniform.normal_thickness);
edge = max(edge, edge_normal);
#endif
#ifdef ENABLE_COLOR
let edge_color = detect_edge_color(uv_noise_px, ed_uniform.color_thickness);
edge = max(edge, edge_color);
#endif
var color = textureSample(screen_texture, filtering_sampler, uv_px).rgb;
color = mix(color, ed_uniform.edge_color.rgb, edge);
return vec4f(color, 1.0);
}

View File

@@ -0,0 +1,669 @@
// https://github.com/Mediocre-AI/bevy_edge_detection_outline
use bevy::{
asset::{embedded_asset, load_embedded_asset},
core_pipeline::{
FullscreenShader,
core_3d::{
DEPTH_TEXTURE_SAMPLING_SUPPORTED,
graph::{Core3d, Node3d},
},
prepass::{DepthPrepass, NormalPrepass, ViewPrepassTextures},
},
ecs::query::QueryItem,
prelude::*,
render::{
Extract, Render, RenderApp, RenderSystems,
extract_component::{
ComponentUniforms, DynamicUniformIndex, ExtractComponent, ExtractComponentPlugin,
UniformComponentPlugin,
},
render_asset::RenderAssets,
render_graph::{
NodeRunError, RenderGraphContext, RenderGraphExt, RenderLabel, ViewNode, ViewNodeRunner,
},
render_resource::{
binding_types::{
sampler, texture_2d, texture_2d_multisampled, texture_depth_2d,
texture_depth_2d_multisampled, uniform_buffer,
},
*,
},
renderer::{RenderContext, RenderDevice},
sync_component::SyncComponentPlugin,
sync_world::RenderEntity,
texture::GpuImage,
view::{ViewTarget, ViewUniform, ViewUniformOffset, ViewUniforms},
},
};
// ──────────────────────────────────────────────
// Plugin Setup
// ──────────────────────────────────────────────
pub struct EdgeDetectionPlugin {
pub before: Node3d,
}
impl Default for EdgeDetectionPlugin {
fn default() -> Self {
Self {
before: Node3d::Fxaa,
}
}
}
impl Plugin for EdgeDetectionPlugin {
fn build(&self, app: &mut App) {
embedded_asset!(app, "edge_detection_shader.wgsl");
embedded_asset!(app, "perlin_noise.png");
app.register_type::<EdgeDetection>();
app.add_plugins(SyncComponentPlugin::<EdgeDetection>::default())
.add_plugins((
ExtractComponentPlugin::<EdgeDetectionUniform>::default(),
UniformComponentPlugin::<EdgeDetectionUniform>::default(),
));
// We need to get the render app from the main app
let Some(render_app) = app.get_sub_app_mut(RenderApp) else {
return;
};
render_app
.init_resource::<SpecializedRenderPipelines<EdgeDetectionPipeline>>()
.add_systems(
ExtractSchedule,
EdgeDetectionUniform::extract_edge_detection_settings,
)
.add_systems(
Render,
prepare_edge_detection_pipelines.in_set(RenderSystems::Prepare),
)
.add_render_graph_node::<ViewNodeRunner<EdgeDetectionNode>>(Core3d, EdgeDetectionLabel)
.add_render_graph_edges(
Core3d,
(
Node3d::PostProcessing,
EdgeDetectionLabel,
self.before.clone(),
),
);
}
fn finish(&self, app: &mut App) {
app.sub_app_mut(RenderApp)
.init_resource::<EdgeDetectionPipeline>();
}
}
// This contains global data used by the render pipeline. This will be created once on startup.
#[derive(Resource)]
pub struct EdgeDetectionPipeline {
pub shader: Handle<Shader>,
pub noise_texture: Handle<Image>,
pub linear_sampler: Sampler,
pub nonfiltering_sampler: Sampler,
pub noise_sampler: Sampler,
pub layout_with_msaa: BindGroupLayoutDescriptor,
pub layout_without_msaa: BindGroupLayoutDescriptor,
pub fullscreen_shader: FullscreenShader,
}
impl EdgeDetectionPipeline {
pub fn bind_group_layout(&self, multisampled: bool) -> &BindGroupLayoutDescriptor {
if multisampled {
&self.layout_with_msaa
} else {
&self.layout_without_msaa
}
}
}
impl FromWorld for EdgeDetectionPipeline {
fn from_world(world: &mut World) -> Self {
let render_device = world.resource::<RenderDevice>();
// let noise_texture = world.load_asset("embedded://bevy_edge_detection/perlin_noise.png");
let shader = load_embedded_asset!(world, "edge_detection_shader.wgsl");
let noise_texture = load_embedded_asset!(world, "perlin_noise.png");
let layout_with_msaa = BindGroupLayoutDescriptor::new(
"edge_detection: bind_group_layout with msaa",
&BindGroupLayoutEntries::sequential(
// The layout entries will only be visible in the fragment stage
ShaderStages::FRAGMENT,
(
// color attachment
texture_2d(TextureSampleType::Float { filterable: true }),
// depth prepass
texture_depth_2d_multisampled(),
// normal prepass
texture_2d_multisampled(TextureSampleType::Float { filterable: false }),
// filtering sampler for color/normal
sampler(SamplerBindingType::Filtering),
// non-filtering sampler for depth prepass
sampler(SamplerBindingType::NonFiltering),
// perlin-noise texture
texture_2d(TextureSampleType::Float { filterable: true }),
// perlin-noise sampler
sampler(SamplerBindingType::Filtering),
// view
uniform_buffer::<ViewUniform>(true),
// The uniform that will control the effect
uniform_buffer::<EdgeDetectionUniform>(true),
),
),
);
let layout_without_msaa = BindGroupLayoutDescriptor::new(
"edge_detection: bind_group_layout without msaa",
&BindGroupLayoutEntries::sequential(
// The layout entries will only be visible in the fragment stage
ShaderStages::FRAGMENT,
(
// color attachment
texture_2d(TextureSampleType::Float { filterable: true }),
// depth prepass
texture_depth_2d(),
// normal prepass
texture_2d(TextureSampleType::Float { filterable: true }),
// texture sampler
sampler(SamplerBindingType::Filtering),
// for depth
sampler(SamplerBindingType::NonFiltering),
// perlin-noise texture
texture_2d(TextureSampleType::Float { filterable: true }),
// perlin-noise sampler
sampler(SamplerBindingType::Filtering),
// view
uniform_buffer::<ViewUniform>(true),
// The uniform that will control the effect
uniform_buffer::<EdgeDetectionUniform>(true),
),
),
);
let linear_sampler = render_device.create_sampler(&SamplerDescriptor {
label: Some("edge detection linear sampler"),
mag_filter: FilterMode::Linear,
min_filter: FilterMode::Linear,
..default()
});
let nonfiltering_sampler = render_device.create_sampler(&SamplerDescriptor {
label: Some("edge detection nonfiltering sampler"),
mag_filter: FilterMode::Nearest,
min_filter: FilterMode::Nearest,
..default()
});
let noise_sampler = render_device.create_sampler(&SamplerDescriptor {
label: Some("edge detection noise sampler"),
mag_filter: FilterMode::Linear,
min_filter: FilterMode::Linear,
address_mode_u: AddressMode::Repeat,
address_mode_v: AddressMode::Repeat,
..default()
});
Self {
shader,
noise_texture,
linear_sampler,
nonfiltering_sampler,
noise_sampler,
layout_with_msaa,
layout_without_msaa,
fullscreen_shader: world.resource::<FullscreenShader>().clone(),
}
}
}
impl SpecializedRenderPipeline for EdgeDetectionPipeline {
type Key = EdgeDetectionKey;
fn specialize(&self, key: Self::Key) -> RenderPipelineDescriptor {
let targets = vec![Some(ColorTargetState {
format: if key.hdr {
ViewTarget::TEXTURE_FORMAT_HDR
} else {
TextureFormat::bevy_default()
},
blend: None,
write_mask: ColorWrites::ALL,
})];
let mut shader_defs = vec![];
if key.enable_depth {
shader_defs.push("ENABLE_DEPTH".into());
}
if key.enable_normal {
shader_defs.push("ENABLE_NORMAL".into());
}
if key.enable_color {
shader_defs.push("ENABLE_COLOR".into());
}
if key.multisampled {
shader_defs.push("MULTISAMPLED".into());
}
match key.projection {
ProjectionType::Perspective => shader_defs.push("VIEW_PROJECTION_PERSPECTIVE".into()),
ProjectionType::Orthographic => shader_defs.push("VIEW_PROJECTION_ORTHOGRAPHIC".into()),
_ => (),
};
RenderPipelineDescriptor {
label: Some("edge_detection: pipeline".into()),
layout: vec![self.bind_group_layout(key.multisampled).clone()],
vertex: self.fullscreen_shader.to_vertex_state(),
fragment: Some(FragmentState {
shader: self.shader.clone(),
shader_defs,
entry_point: Some("fragment".into()),
targets,
}),
primitive: default(),
depth_stencil: None,
multisample: default(),
push_constant_ranges: vec![],
zero_initialize_workgroup_memory: false,
}
}
}
#[derive(Component, Clone, Copy)]
pub struct EdgeDetectionPipelineId(CachedRenderPipelineId);
pub fn prepare_edge_detection_pipelines(
mut commands: Commands,
pipeline_cache: Res<PipelineCache>,
mut pipelines: ResMut<SpecializedRenderPipelines<EdgeDetectionPipeline>>,
edge_detection_pipeline: Res<EdgeDetectionPipeline>,
query: Query<(Entity, &EdgeDetection, Option<&Projection>, &ViewTarget)>,
) {
for (entity, edge_detection, projection, view_target) in &query {
let multisampled = view_target.sampled_main_texture_view().is_some();
let hdr = view_target.is_hdr();
let id = pipelines.specialize(
&pipeline_cache,
&edge_detection_pipeline,
EdgeDetectionKey::new(edge_detection, hdr, multisampled, projection),
);
commands.entity(entity).insert(EdgeDetectionPipelineId(id));
}
}
// ──────────────────────────────────────────────
// Core structs and types
// ──────────────────────────────────────────────
#[derive(Clone, Copy, PartialEq, Eq, Hash)]
pub enum ProjectionType {
None,
Perspective,
Orthographic,
}
impl From<Option<&Projection>> for ProjectionType {
fn from(proj: Option<&Projection>) -> Self {
if let Some(projection) = proj {
return match projection {
Projection::Perspective(_) => Self::Perspective,
Projection::Orthographic(_) => Self::Orthographic,
Projection::Custom(_) => Self::None,
};
};
Self::None
}
}
#[derive(Clone, Copy, PartialEq, Eq, Hash)]
pub struct EdgeDetectionKey {
/// Whether to enable depth-based edge detection.
/// If `true`, edges will be detected based on depth variations.
pub enable_depth: bool,
/// Whether to enable normal-based edge detection.
/// If `true`, edges will be detected based on normal direction variations.
pub enable_normal: bool,
/// Whether to enable color-based edge detection.
/// If `true`, edges will be detected based on color variations.
pub enable_color: bool,
/// Whether we're using HDR.
pub hdr: bool,
/// Whether the render target is multisampled.
pub multisampled: bool,
/// The projection type of view
pub projection: ProjectionType,
}
impl EdgeDetectionKey {
pub fn new(
edge_detection: &EdgeDetection,
hdr: bool,
multisampled: bool,
projection: Option<&Projection>,
) -> Self {
Self {
enable_depth: edge_detection.enable_depth,
enable_normal: edge_detection.enable_normal,
enable_color: edge_detection.enable_color,
hdr,
multisampled,
projection: projection.into(),
}
}
}
#[derive(Component, Clone, Copy, Debug, Reflect)]
#[reflect(Component, Default)]
#[require(DepthPrepass, NormalPrepass)]
pub struct EdgeDetection {
/// Depth threshold, used to detect edges with significant depth changes.
/// Areas where the depth variation exceeds this threshold will be marked as edges.
pub depth_threshold: f32,
/// Normal threshold, used to detect edges with significant normal direction changes.
/// Areas where the normal direction variation exceeds this threshold will be marked as edges.
pub normal_threshold: f32,
/// Color threshold, used to detect edges with significant color changes.
/// Areas where the color variation exceeds this threshold will be marked as edges.
pub color_threshold: f32,
/// Thickness of the edges detected based on depth variations.
/// This value controls the width of the edges drawn when depth-based edge detection is enabled.
/// Higher values result in thicker edges.
pub depth_thickness: f32,
/// Thickness of the edges detected based on normal direction variations.
/// This value controls the width of the edges drawn when normal-based edge detection is enabled.
/// Higher values result in thicker edges.
pub normal_thickness: f32,
/// Thickness of the edges detected based on color variations.
/// This value controls the width of the edges drawn when color-based edge detection is enabled.
/// Higher values result in thicker edges.
pub color_thickness: f32,
/// Steep angle threshold, used to adjust the depth threshold when viewing surfaces at steep angles.
/// When the angle between the view direction and the surface normal is very steep, the depth gradient
/// can appear artificially large, causing non-edge regions to be mistakenly detected as edges.
/// This threshold defines the angle at which the depth threshold adjustment begins to take effect.
///
/// Range: [0.0, 1.0]
pub steep_angle_threshold: f32,
/// Multiplier applied to the depth threshold when the view angle is steep.
/// When the angle between the view direction and the surface normal exceeds the `steep_angle_threshold`,
/// the depth threshold is scaled by this multiplier to reduce the likelihood of false edge detection.
///
/// A value of 1.0 means no adjustment, while values greater than 1.0 increase the depth threshold,
/// making edge detection less sensitive in steep angles.
///
/// Range: [0.0, inf)
pub steep_angle_multiplier: f32,
/// Frequency of UV distortion applied to the edge detection process.
/// This controls how often the distortion effect repeats across the UV coordinates.
/// Higher values result in more frequent distortion patterns.
pub uv_distortion_frequency: Vec2,
/// Strength of UV distortion applied to the edge detection process.
/// This controls the intensity of the distortion effect.
/// Higher values result in more pronounced distortion.
pub uv_distortion_strength: Vec2,
/// Edge color, used to draw the detected edges.
/// Typically a high-contrast color (e.g., red or black) to visually highlight the edges.
pub edge_color: Color,
/// Whether to enable depth-based edge detection.
/// If `true`, edges will be detected based on depth variations.
pub enable_depth: bool,
/// Whether to enable normal-based edge detection.
/// If `true`, edges will be detected based on normal direction variations.
pub enable_normal: bool,
/// Whether to enable color-based edge detection.
/// If `true`, edges will be detected based on color variations.
pub enable_color: bool,
/// Pixel block size.
pub block_pixel: u32,
}
impl Default for EdgeDetection {
fn default() -> Self {
Self {
depth_threshold: 1.0,
normal_threshold: 0.8,
color_threshold: 0.1,
depth_thickness: 1.0,
normal_thickness: 1.0,
color_thickness: 1.0,
steep_angle_threshold: 0.00,
steep_angle_multiplier: 0.30,
uv_distortion_frequency: Vec2::splat(0.0),
uv_distortion_strength: Vec2::splat(0.004),
edge_color: Color::BLACK,
enable_depth: true,
enable_normal: true,
enable_color: false,
block_pixel: 1,
}
}
}
#[derive(Component, Clone, Copy, ShaderType, ExtractComponent)]
pub struct EdgeDetectionUniform {
pub depth_threshold: f32,
pub normal_threshold: f32,
pub color_threshold: f32,
pub depth_thickness: f32,
pub normal_thickness: f32,
pub color_thickness: f32,
pub steep_angle_threshold: f32,
pub steep_angle_multiplier: f32,
pub uv_distortion: Vec4,
pub edge_color: LinearRgba,
pub block_pixel: u32,
}
impl From<&EdgeDetection> for EdgeDetectionUniform {
fn from(ed: &EdgeDetection) -> Self {
Self {
depth_threshold: ed.depth_threshold,
normal_threshold: ed.normal_threshold,
color_threshold: ed.color_threshold,
depth_thickness: ed.depth_thickness,
normal_thickness: ed.normal_thickness,
color_thickness: ed.color_thickness,
steep_angle_threshold: ed.steep_angle_threshold,
steep_angle_multiplier: ed.steep_angle_multiplier,
uv_distortion: Vec4::new(
ed.uv_distortion_frequency.x,
ed.uv_distortion_frequency.y,
ed.uv_distortion_strength.x,
ed.uv_distortion_strength.y,
),
edge_color: ed.edge_color.into(),
block_pixel: ed.block_pixel,
}
}
}
impl EdgeDetectionUniform {
pub fn extract_edge_detection_settings(
mut commands: Commands,
mut query: Extract<Query<(RenderEntity, &EdgeDetection)>>,
) {
if !DEPTH_TEXTURE_SAMPLING_SUPPORTED {
info_once!(
"Disable edge detection on this platform because depth textures aren't supported correctly"
);
return;
}
for (entity, edge_detection) in query.iter_mut() {
let mut entity_commands = commands
.get_entity(entity)
.expect("Edge Detection entity wasn't synced.");
entity_commands.insert((*edge_detection, EdgeDetectionUniform::from(edge_detection)));
}
}
}
#[derive(Debug, Hash, PartialEq, Eq, Clone, RenderLabel)]
pub struct EdgeDetectionLabel;
// ──────────────────────────────────────────────
// Render graph nodes
// ──────────────────────────────────────────────
#[derive(Default)]
pub struct EdgeDetectionNode;
impl ViewNode for EdgeDetectionNode {
type ViewQuery = (
&'static ViewTarget,
&'static ViewPrepassTextures,
&'static ViewUniformOffset,
&'static DynamicUniformIndex<EdgeDetectionUniform>,
&'static EdgeDetectionPipelineId,
);
fn run(
&self,
_graph: &mut RenderGraphContext,
render_context: &mut RenderContext,
(
view_target,
prepass_textures,
view_uniform_index,
ed_uniform_index,
edge_detection_pipeline_id,
): QueryItem<Self::ViewQuery>,
world: &World,
) -> Result<(), NodeRunError> {
let edge_detection_pipeline = world.resource::<EdgeDetectionPipeline>();
let Some(pipeline) = world
.resource::<PipelineCache>()
.get_render_pipeline(edge_detection_pipeline_id.0)
else {
return Ok(());
};
let (Some(depth_texture), Some(normal_texture)) =
(&prepass_textures.depth, &prepass_textures.normal)
else {
info!("depth or normal texture not found");
return Ok(());
};
let Some(noise_texture) = world
.resource::<RenderAssets<GpuImage>>()
.get(&edge_detection_pipeline.noise_texture)
else {
return Ok(());
};
let Some(view_uniforms_binding) = world.resource::<ViewUniforms>().uniforms.binding()
else {
info!("view uniforms not found");
return Ok(());
};
let Some(ed_uniform_binding) = world
.resource::<ComponentUniforms<EdgeDetectionUniform>>()
.uniforms()
.binding()
else {
info!("edge detection uniform not found");
return Ok(());
};
// This will start a new "post process write", obtaining two texture
// views from the view target - a `source` and a `destination`.
// `source` is the "current" main texture and you _must_ write into
// `destination` because calling `post_process_write()` on the
// [`ViewTarget`] will internally flip the [`ViewTarget`]'s main
// texture to the `destination` texture. Failing to do so will cause
// the current main texture information to be lost.
let post_process = view_target.post_process_write();
// The bind_group gets created each frame.
//
// Normally, you would create a bind_group in the Queue set,
// but this doesn't work with the post_process_write().
// The reason it doesn't work is because each post_process_write will alternate the source/destination.
// The only way to have the correct source/destination for the bind_group
// is to make sure you get it during the node execution.
// let multisampled = *msaa != Msaa::Off;
let bind_group = render_context.render_device().create_bind_group(
"edge_detection_bind_group",
&bevy::render::render_resource::BindGroupLayout::from(
pipeline.get_bind_group_layout(0),
),
// edge_detection_pipeline.bind_group_layout(multisampled),
// It's important for this to match the BindGroupLayout defined in the PostProcessPipeline
&BindGroupEntries::sequential((
// Make sure to use the source view
post_process.source,
// Use depth prepass
&depth_texture.texture.default_view,
// Use normal prepass
&normal_texture.texture.default_view,
// Use simple texture sampler
&edge_detection_pipeline.linear_sampler,
// nonfiltering sampler for depth
&edge_detection_pipeline.nonfiltering_sampler,
// Use noise texture
&noise_texture.texture_view,
// Use noise texture sampler
&edge_detection_pipeline.noise_sampler,
// view uniform binding
view_uniforms_binding,
// Set the uniform binding
ed_uniform_binding,
)),
);
let mut render_pass = render_context.begin_tracked_render_pass(RenderPassDescriptor {
label: Some("edge_detection_pass"),
color_attachments: &[Some(RenderPassColorAttachment {
view: post_process.destination,
depth_slice: None,
resolve_target: None,
ops: Operations::default(),
})],
depth_stencil_attachment: None,
timestamp_writes: None,
occlusion_query_set: None,
});
render_pass.set_render_pipeline(pipeline);
render_pass.set_bind_group(
0,
&bind_group,
&[view_uniform_index.offset, ed_uniform_index.index()],
);
render_pass.draw(0..3, 0..1);
Ok(())
}
}

Binary file not shown.

After

Width:  |  Height:  |  Size: 154 KiB

View File

@@ -7,6 +7,7 @@ use bevy::color::Color;
use bevy::prelude::*;
use wgpu::{TextureFormat, TextureUsages};
mod edge_detection;
mod post_process;
use crate::post_process::PostProcessSettings;
@@ -16,87 +17,103 @@ use crate::shared::Spinner;
mod renderer;
mod shared;
const USE_G13: bool = false;
fn main() {
App::new()
.add_plugins((
DefaultPlugins.set(WindowPlugin {
primary_window: Some(Window {
resolution: bevy::window::WindowResolution::new(160 * 4, 43 * 4)
.with_scale_factor_override(4.0),
..default()
}),
let mut app = App::new();
app.add_plugins((
DefaultPlugins.set(WindowPlugin {
primary_window: Some(Window {
resolution: bevy::window::WindowResolution::new(160 * 4, 43 * 4)
.with_scale_factor_override(4.0),
..default()
}),
ScheduleRunnerPlugin::run_loop(Duration::from_secs_f64(1. / 30.)),
post_process::PostProcessPlugin,
ImageExportPlugin,
))
.insert_resource(ClearColor(Color::linear_rgba(0.0, 0.0, 0.0, 0.0)))
.add_systems(Startup, setup_scene_system)
.add_systems(PostStartup, spawn_in_ui)
..default()
}),
edge_detection::EdgeDetectionPlugin::default(),
post_process::PostProcessPlugin,
ImageExportPlugin,
));
if USE_G13 {
app.add_plugins(ScheduleRunnerPlugin::run_loop(Duration::from_secs_f64(
1.0 / 30.0,
)));
}
app.insert_resource(ClearColor(Color::linear_rgba(0.0, 0.0, 0.0, 0.0)))
.add_systems(Startup, (setup_camera, shared::spawn_3d_scene))
// .add_systems(PostStartup, spawn_ui)
.add_systems(Update, rotate_cube)
.run();
}
fn setup_scene_system(
fn setup_camera(
mut commands: Commands,
meshes: ResMut<Assets<Mesh>>,
materials: ResMut<Assets<StandardMaterial>>,
mut images: ResMut<Assets<Image>>,
mut export_sources: ResMut<Assets<ImageExportSource>>,
) {
shared::spawn_3d_scene(commands.reborrow(), meshes, materials);
let mut image = Image::new_target_texture(
160,
43,
TextureFormat::Rgba8Unorm,
Some(TextureFormat::Rgba8UnormSrgb),
);
image.texture_descriptor.usage = TextureUsages::TEXTURE_BINDING
| TextureUsages::COPY_SRC
| TextureUsages::COPY_DST
| TextureUsages::RENDER_ATTACHMENT;
let image_handle = images.add(image);
commands.spawn((
let mut camera_commands = commands.spawn((
Camera3d::default(),
Camera::default(),
RenderTarget::Image(image_handle.clone().into()),
Transform::from_xyz(0.0, 1.0, 2.5).looking_at(Vec3::ZERO, Vec3::Y),
UiAntiAlias::Off,
bevy::core_pipeline::prepass::DepthPrepass,
bevy::core_pipeline::prepass::NormalPrepass,
edge_detection::EdgeDetection {
depth_thickness: 0.5,
normal_thickness: 0.5,
color_thickness: 0.5,
..default()
},
PostProcessSettings { scale: 1.0 },
));
commands.spawn((ImageExport(export_sources.add(image_handle)),));
if USE_G13 {
let mut image = Image::new_target_texture(
160,
43,
TextureFormat::Rgba8Unorm,
Some(TextureFormat::Rgba8UnormSrgb),
);
image.texture_descriptor.usage = TextureUsages::TEXTURE_BINDING
| TextureUsages::COPY_SRC
| TextureUsages::COPY_DST
| TextureUsages::RENDER_ATTACHMENT;
let image_handle = images.add(image);
camera_commands.insert(RenderTarget::Image(image_handle.clone().into()));
commands.spawn(ImageExport(export_sources.add(image_handle)));
}
}
fn spawn_in_ui(mut commands: Commands, assets: Res<AssetServer>, camera: Query<(Entity, &Camera)>) {
let font: Handle<Font> = assets.load("fonts/Roboto-Bold.ttf");
// fn spawn_ui(mut commands: Commands, assets: Res<AssetServer>, camera: Query<(Entity, &Camera)>) {
// let font: Handle<Font> = assets.load("fonts/falconded.otf");
let text_font = TextFont::from(font.clone())
.with_font_size(12.)
.with_font_smoothing(bevy::text::FontSmoothing::None);
// let text_font = TextFont::from(font.clone())
// .with_font_size(16.)
// .with_font_smoothing(bevy::text::FontSmoothing::None);
let Ok(main_camera) = camera.single() else {
return; // no camera... yet?
};
// let Ok(main_camera) = camera.single() else {
// return; // no camera... yet?
// };
commands.spawn((
Node {
position_type: PositionType::Absolute,
top: Val::Px(5.0),
left: Val::Px(5.0),
..default()
},
Text::new("Hello World!"),
text_font.clone(),
TextColor(Color::WHITE),
UiTargetCamera(main_camera.0),
));
}
// commands.spawn((
// Node {
// position_type: PositionType::Absolute,
// top: Val::Px(-3.5),
// left: Val::Px(0.0),
// ..default()
// },
// Text::new("I CAN HAZ UI?!"),
// text_font.clone(),
// TextColor(Color::WHITE),
// UiTargetCamera(main_camera.0), // <- IMPORTANT
// ));
// }
fn rotate_cube(
mut cubes: Query<(&mut Transform, &Spinner)>,

View File

@@ -17,7 +17,7 @@ pub fn spawn_3d_scene(
mut materials: ResMut<Assets<StandardMaterial>>,
) {
commands.spawn((
Mesh3d(meshes.add(Cuboid::default())),
Mesh3d(meshes.add(Cylinder::default())),
MeshMaterial3d(materials.add(Color::from(bevy::color::palettes::css::WHITE))),
Transform::from_xyz(0.0, 0.0, 0.0),
Spinner,