Source code for simvx.graphics.renderer.shadow_renderer

"""Shadow map rendering — directional CSM and point/spot shadows."""

from __future__ import annotations

import logging
from typing import TYPE_CHECKING, Any

import numpy as np
import vulkan as vk

from ..gpu.memory import upload_numpy

if TYPE_CHECKING:
    from .forward import ForwardRenderer

__all__ = ["ShadowRenderer"]

log = logging.getLogger(__name__)


[docs] class ShadowRenderer: """Handles shadow depth map rendering for directional, point, and spot lights.""" def __init__(self, renderer: ForwardRenderer) -> None: self._r = renderer
[docs] def render_shadows(self, cmd: Any, registry: Any) -> None: """Render shadow depth maps for directional lights.""" r = self._r sp = r._shadow_pass if not sp or not r._instances: return viewports = r.viewport_manager.get_all() if not viewports: return _, viewport = viewports[0] # Get light direction from scene light_dir = None if r._lights is not None and len(r._lights) > 0: for i in range(len(r._lights)): if r._lights[i]["position"][3] < 0.5: # directional light_dir = r._lights[i]["position"][:3].copy() break if light_dir is None: # Fallback: default sun direction matching shader fallback light_dir = np.array([-1.0, -1.0, -1.0], dtype=np.float32) # Transforms already uploaded by _upload_transforms() in pre_render() # Compute cascade matrices sp.compute_cascades(viewport.camera_view, viewport.camera_proj, light_dir) sp.render(cmd, r._instances[: r._max_objects], r._ssbo_set, registry) # Upload shadow cascade data to SSBO for forward pass sampling shadow_data = np.zeros(336, dtype=np.uint8) # 3 cascade VP matrices: 3 * 64 = 192 bytes for c in range(3): shadow_data[c * 64 : (c + 1) * 64] = np.frombuffer(sp.cascade_vps[c].tobytes(), dtype=np.uint8) # Cascade splits: 4 floats at offset 192 shadow_data[192:208] = np.frombuffer(sp.cascade_splits.tobytes(), dtype=np.uint8) # Shadow texture index: uint32 at offset 208 shadow_data[208:212] = np.array([sp.shadow_texture_index], dtype=np.uint32).view(np.uint8) # IBL enabled flag: uint32 at offset 212 shadow_data[212:216] = np.array([1 if r._ibl_enabled else 0], dtype=np.uint32).view(np.uint8) # HDR output flag: uint32 at offset 216 pp = r._post_process hdr_flag = 1 if (pp and pp.enabled) else 0 shadow_data[216:220] = np.array([hdr_flag], dtype=np.uint32).view(np.uint8) # Point/spot shadow sentinel (will be overwritten by render_point_spot_shadows) sentinel = np.array([0xFFFFFFFF], dtype=np.uint32).view(np.uint8) shadow_data[220:224] = sentinel # point_shadow_tex shadow_data[224:228] = sentinel # spot_shadow_tex upload_numpy(r._engine.ctx.device, r._shadow_mem, shadow_data)
[docs] def render_point_spot_shadows(self, cmd: Any, registry: Any) -> None: """Render point and spot light shadow maps, then upload data to shadow SSBO.""" r = self._r psp = r._point_shadow_pass if not psp or not r._instances: return # Scan lights for first point and first spot light that cast shadows point_light = None spot_light = None if r._lights is not None and len(r._lights) > 0: for i in range(len(r._lights)): light_type = r._lights[i]["position"][3] if 0.5 < light_type < 1.5 and point_light is None: point_light = r._lights[i] elif light_type > 1.5 and spot_light is None: spot_light = r._lights[i] if point_light and spot_light: break # Read current shadow SSBO to patch point/spot data ffi = vk.ffi device = r._engine.ctx.device shadow_data = np.zeros(336, dtype=np.uint8) src = vk.vkMapMemory(device, r._shadow_mem, 0, 336, 0) ffi.memmove(ffi.cast("void*", shadow_data.ctypes.data), src, 336) vk.vkUnmapMemory(device, r._shadow_mem) sentinel = np.array([0xFFFFFFFF], dtype=np.uint32).view(np.uint8) # Render point light shadow if point_light is not None: light_pos = point_light["position"][:3].copy() light_range = point_light["params"][0] # range if light_range > 0: psp.render_point_shadow( cmd, light_pos, light_range, r._instances, r._ssbo_set, registry, ) # Write point shadow texture index and light data shadow_data[220:224] = np.array( [psp.point_shadow_texture_index], dtype=np.uint32, ).view(np.uint8) # point_light_pos_range at offset 240 (vec4) pos_range = np.array([*light_pos, light_range], dtype=np.float32) shadow_data[240:256] = pos_range.view(np.uint8) else: shadow_data[220:224] = sentinel else: shadow_data[220:224] = sentinel # Render spot light shadow if spot_light is not None: light_pos = spot_light["position"][:3].copy() light_dir = spot_light["direction"][:3].copy() light_range = spot_light["params"][0] # range outer_cone = spot_light["params"][2] # outer_cone angle in degrees if light_range > 0 and outer_cone > 0: psp.render_spot_shadow( cmd, light_pos, light_dir, outer_cone, light_range, r._instances, r._ssbo_set, registry, ) # Write spot shadow texture index shadow_data[224:228] = np.array( [psp.spot_shadow_texture_index], dtype=np.uint32, ).view(np.uint8) # spot_vp at offset 256 (mat4 = 64 bytes) spot_vp = psp.get_spot_vp_matrix(light_pos, light_dir, outer_cone, light_range) shadow_data[256:320] = np.ascontiguousarray(spot_vp, dtype=np.float32).ravel().view(np.uint8) # spot_light_pos_range at offset 320 (vec4) pos_range = np.array([*light_pos, light_range], dtype=np.float32) shadow_data[320:336] = pos_range.view(np.uint8) else: shadow_data[224:228] = sentinel else: shadow_data[224:228] = sentinel upload_numpy(device, r._shadow_mem, shadow_data)