From f74fdbf110368b7a494f3d33cfaff6e9c9778ce5 Mon Sep 17 00:00:00 2001 From: Giorgio Gori Date: Fri, 27 Mar 2026 14:34:54 -0700 Subject: [PATCH 1/5] Sync with 6d9ea6b --- .github/workflows/continuous.yaml | 2 +- .gitignore | 9 + LagrangeOptions.cmake.sample | 47 +- cmake/lagrange/lagrangeMklModules.txt | 2 +- .../lagrange_add_python_binding.cmake | 7 + cmake/lagrange/lagrange_add_test.cmake | 13 + cmake/lagrange/lagrange_find_package.cmake | 1 + .../lagrange/lagrange_limit_parallelism.cmake | 94 +- cmake/recipes/external/CPM.cmake | 10 +- cmake/recipes/external/Eigen3.cmake | 30 +- cmake/recipes/external/cista.cmake | 38 + cmake/recipes/external/cpptrace.cmake | 2 + .../external/instant-meshes-core.cmake | 5 +- .../external/instant-meshes-core.patch | 11 - cmake/recipes/external/piqp.cmake | 82 -- cmake/recipes/external/polyscope.cmake | 1 + cmake/recipes/external/quadprog.cmake | 4 +- cmake/recipes/external/zstd.cmake | 42 + modules/bvh/examples/CMakeLists.txt | 4 + modules/bvh/examples/uv_overlap.cpp | 452 ++++++++ .../include/lagrange/bvh/compute_uv_overlap.h | 161 +++ modules/bvh/python/src/bvh.cpp | 89 ++ modules/bvh/python/tests/asset.py | 76 -- .../python/tests/conftest.py} | 19 +- modules/bvh/python/tests/test_EdgeAABBTree.py | 2 - .../bvh/python/tests/test_TriangleAABBTree.py | 2 - .../tests/test_compute_mesh_distances.py | 4 - .../python/tests/test_compute_uv_overlap.py | 230 +++++ .../tests/test_remove_interior_shells.py | 2 - modules/bvh/src/compute_uv_overlap.cpp | 976 ++++++++++++++++++ modules/bvh/tests/test_compute_uv_overlap.cpp | 670 ++++++++++++ .../python/tests/assets.py => conftest.py} | 113 +- modules/core/include/lagrange/Attribute.h | 32 +- .../core/include/lagrange/ExactPredicates.h | 7 + .../lagrange/ExactPredicatesShewchuk.h | 7 + modules/core/include/lagrange/SurfaceMesh.h | 15 +- .../core/include/lagrange/attribute_names.h | 7 + .../lagrange/compute_facet_circumcenter.h | 7 + .../lagrange/compute_facet_facet_adjacency.h | 52 + .../lagrange/compute_mesh_covariance.h | 7 + .../include/lagrange/compute_pointcloud_pca.h | 7 + .../include/lagrange/compute_seam_edges.h | 7 + .../core/include/lagrange/compute_uv_charts.h | 7 + .../lagrange/internal/SurfaceMeshInfo.h | 72 ++ .../lagrange/internal/get_uv_attribute.h | 8 +- .../internal/set_invalid_indexed_values.h | 69 ++ .../internal/surface_mesh_info_convert.h | 52 + modules/core/include/lagrange/mesh_bbox.h | 46 + .../lagrange/mesh_cleanup/close_small_holes.h | 9 + .../mesh_cleanup/detect_degenerate_facets.h | 7 + .../mesh_cleanup/remove_degenerate_facets.h | 7 + .../mesh_cleanup/remove_duplicate_facets.h | 7 + .../mesh_cleanup/remove_duplicate_vertices.h | 7 + .../mesh_cleanup/remove_isolated_vertices.h | 7 + .../mesh_cleanup/remove_null_area_facets.h | 7 + .../mesh_cleanup/remove_short_edges.h | 7 + .../remove_topologically_degenerate_facets.h | 7 + .../lagrange/mesh_cleanup/rescale_uv_charts.h | 7 + .../mesh_cleanup/resolve_nonmanifoldness.h | 7 + .../resolve_vertex_nonmanifoldness.h | 7 + .../lagrange/mesh_cleanup/split_long_edges.h | 7 + .../mesh_cleanup/unflip_uv_triangles.h | 7 + .../core/include/lagrange/orient_outward.h | 7 + modules/core/include/lagrange/reorder_mesh.h | 7 + .../segment_segment_squared_distance.h | 7 + .../lagrange/split_facets_by_material.h | 7 + .../include/lagrange/thicken_and_close_mesh.h | 7 + .../core/include/lagrange/utils/StackVector.h | 2 +- modules/core/include/lagrange/uv_mesh.h | 30 +- modules/core/python/src/bind_utilities.h | 18 +- modules/core/python/tests/test_attribute.py | 1 - .../core/python/tests/test_cast_attribute.py | 1 - .../python/tests/test_close_small_holes.py | 2 - .../core/python/tests/test_combine_meshes.py | 2 - .../python/tests/test_compute_centroid.py | 2 - .../python/tests/test_compute_components.py | 2 - .../tests/test_compute_dihedral_angles.py | 2 - .../tests/test_compute_dijkstra_distance.py | 3 - .../python/tests/test_compute_edge_lengths.py | 2 - .../python/tests/test_compute_facet_area.py | 2 - .../tests/test_compute_facet_circumcenter.py | 2 - .../python/tests/test_compute_facet_normal.py | 2 - .../tests/test_compute_mesh_covariance.py | 2 - .../core/python/tests/test_compute_normal.py | 2 - .../python/tests/test_compute_seam_edges.py | 2 - .../tests/test_compute_tangent_bitangent.py | 2 - .../tests/test_compute_vertex_normal.py | 2 - .../tests/test_compute_vertex_valence.py | 2 - .../tests/test_detect_degenerate_facets.py | 1 - .../core/python/tests/test_extract_submesh.py | 161 +++ .../python/tests/test_filter_attributes.py | 3 - .../python/tests/test_indexed_attribute.py | 2 - modules/core/python/tests/test_isoline.py | 1 - .../core/python/tests/test_orient_outward.py | 1 - .../core/python/tests/test_permute_facets.py | 1 - .../python/tests/test_permute_vertices.py | 2 - .../core/python/tests/test_remap_vertices.py | 2 - .../tests/test_remove_degenerate_facets.py | 1 - .../tests/test_remove_duplicate_vertices.py | 1 - .../tests/test_remove_null_area_facets.py | 1 - .../python/tests/test_remove_short_edges.py | 2 - .../tests/test_resolve_nonmanifoldness.py | 1 - ...test_select_facets_by_normal_similarity.py | 2 - .../tests/test_select_facets_in_frustum.py | 2 - .../python/tests/test_split_long_edges.py | 2 - .../core/python/tests/test_surface_mesh.py | 1 - .../tests/test_thicken_and_close_mesh.py | 2 - .../core/python/tests/test_transform_mesh.py | 2 - .../test_triangulate_polygonal_facets.py | 2 - .../python/tests/test_unify_index_buffer.py | 2 - .../tests/test_weld_indexed_attribute.py | 2 - modules/core/src/Attribute.cpp | 24 +- modules/core/src/SurfaceMesh.cpp | 172 ++- .../src/compute_facet_facet_adjacency.cpp | 63 ++ .../core/src/internal/get_uv_attribute.cpp | 48 +- modules/core/src/mesh_bbox.cpp | 45 + modules/core/src/normalize_meshes.cpp | 4 + modules/core/src/uv_mesh.cpp | 121 ++- modules/core/src/weld_indexed_attribute.cpp | 14 + .../test_compute_facet_facet_adjacency.cpp | 208 ++++ modules/core/tests/test_compute_normal.cpp | 6 +- modules/core/tests/test_mesh_bbox.cpp | 78 ++ .../tests/test_set_invalid_indexed_values.cpp | 132 +++ modules/core/tests/test_utils_geometry3d.cpp | 9 +- modules/core/tests/test_uv_mesh.cpp | 258 +++++ .../tests/test_weld_indexed_attribute.cpp | 56 + .../python/tests/test_mesh_smoothing.py | 3 - modules/io/CMakeLists.txt | 3 +- modules/io/examples/mesh_convert.cpp | 39 +- modules/io/python/src/io.cpp | 2 +- modules/io/src/load_gltf.cpp | 6 +- modules/io/src/load_mesh.cpp | 14 +- modules/io/src/load_obj.cpp | 27 +- modules/io/src/load_scene.cpp | 9 +- modules/io/src/load_simple_scene.cpp | 10 +- modules/io/src/save_gltf.cpp | 40 +- modules/io/src/save_mesh.cpp | 5 +- modules/io/src/save_scene.cpp | 5 +- modules/io/src/save_simple_scene.cpp | 5 +- modules/io/tests/test_obj.cpp | 81 ++ modules/polyddg/CMakeLists.txt | 6 +- .../lagrange/polyddg/DifferentialOperators.h | 94 +- .../polyddg/compute_smooth_direction_field.h | 83 ++ .../lagrange/polyddg/hodge_decomposition.h | 175 ++++ .../python/examples/hodge_decomposition.py | 195 ++++ modules/polyddg/python/src/polyddg.cpp | 372 ++++++- modules/polyddg/python/tests/conftest.py | 96 ++ modules/polyddg/python/tests/test_polyddg.py | 228 +++- modules/polyddg/src/DifferentialOperators.cpp | 107 +- .../src/compute_principal_curvatures.cpp | 32 +- .../src/compute_smooth_direction_field.cpp | 216 ++++ modules/polyddg/src/hodge_decomposition.cpp | 523 ++++++++++ modules/polyddg/src/nrosy_utils.h | 57 + modules/polyddg/tests/CMakeLists.txt | 3 + .../test_compute_principal_curvatures.cpp | 160 +++ .../test_compute_smooth_direction_field.cpp | 340 ++++++ .../tests/test_differential_operators.cpp | 94 ++ .../tests/test_hodge_decomposition.cpp | 471 +++++++++ modules/polyscope/examples/CMakeLists.txt | 2 +- modules/polyscope/examples/mesh_viewer.cpp | 36 +- modules/polyscope/src/register_attributes.h | 56 +- .../polyscope/src/register_edge_network.cpp | 15 +- modules/polyscope/src/register_mesh.cpp | 15 +- .../polyscope/src/register_point_cloud.cpp | 15 +- modules/polyscope/src/register_structure.cpp | 15 +- modules/polyscope/tests/CMakeLists.txt | 7 + modules/polyscope/tests/test_polyscope.cpp | 46 + modules/python/CMakeLists.txt | 32 +- modules/raycasting/examples/picking_demo.cpp | 6 +- .../include/lagrange/raycasting/RayCaster.h | 2 + .../raycasting/compute_local_feature_size.h | 115 +++ modules/raycasting/python/src/raycasting.cpp | 63 ++ .../python/tests/test_raycasting.py | 139 +++ modules/raycasting/raycasting.md | 1 + modules/raycasting/src/RayCaster.cpp | 38 +- .../src/closest_vertex_from_barycentric.h | 31 + .../src/compute_local_feature_size.cpp | 269 +++++ .../raycasting/src/project_closest_vertex.cpp | 13 +- .../tests/test_compute_local_feature_size.cpp | 179 ++++ modules/remeshing_im/src/remesh.cpp | 2 +- modules/scene/CMakeLists.txt | 3 +- modules/scene/include/lagrange/scene/Scene.h | 4 +- .../lagrange/scene/internal/shared_utils.h | 213 ++++ modules/scene/python/tests/test_scene.py | 2 - .../scene/python/tests/test_simple_scene.py | 2 - .../scene/src/internal/scene_string_utils.cpp | 26 +- modules/serialization2/CMakeLists.txt | 38 + .../include/lagrange/serialization/api.h | 34 + .../lagrange/serialization/serialize_mesh.h | 119 +++ .../lagrange/serialization/serialize_scene.h | 113 ++ .../serialization/serialize_simple_scene.h | 115 +++ .../include/lagrange/serialization/types.h | 80 ++ modules/serialization2/python/CMakeLists.txt | 12 + .../include/lagrange/python/serialization2.h | 18 + .../python/src/serialization2.cpp | 375 +++++++ .../python/tests/test_serialization2.py | 240 +++++ modules/serialization2/src/CistaMesh.h | 69 ++ modules/serialization2/src/CistaScene.h | 175 ++++ modules/serialization2/src/CistaSimpleScene.h | 54 + modules/serialization2/src/CistaValue.h | 60 ++ modules/serialization2/src/compress.cpp | 135 +++ modules/serialization2/src/compress.h | 47 + modules/serialization2/src/detect_type.cpp | 68 ++ modules/serialization2/src/detect_type.h | 40 + modules/serialization2/src/mesh_convert.h | 28 + modules/serialization2/src/serialize_mesh.cpp | 306 ++++++ .../serialization2/src/serialize_scene.cpp | 878 ++++++++++++++++ .../src/serialize_simple_scene.cpp | 425 ++++++++ .../tests/CMakeLists.txt} | 16 +- .../tests/test_serialization2_benchmark.cpp | 87 ++ .../tests/test_serialize_mesh.cpp | 395 +++++++ .../tests/test_serialize_scene.cpp | 541 ++++++++++ .../tests/test_serialize_simple_scene.cpp | 353 +++++++ .../subdivision/examples/mesh_subdivision.cpp | 19 +- .../lagrange/subdivision/mesh_subdivision.h | 25 +- .../subdivision/python/src/subdivision.cpp | 8 +- .../python/tests/test_mesh_subdivision.py | 91 +- .../subdivision/src/TopologyRefinerFactory.h | 2 +- .../subdivision/src/subdivide_adaptive.cpp | 316 +++++- modules/subdivision/src/subdivide_uniform.cpp | 31 +- modules/subdivision/tests/CMakeLists.txt | 2 + .../tests/test_mesh_subdivision.cpp | 377 ++++++- modules/testing/CMakeLists.txt | 3 +- .../lagrange/testing/check_meshes_equal.h | 126 +++ .../lagrange/testing/check_scenes_equal.h | 200 ++++ .../testing/check_simple_scenes_equal.h | 60 ++ .../examples/extract_mesh_with_alpha_mask.cpp | 88 +- modules/texproc/examples/io_helpers.h | 1 + .../examples/texture_rasterization.cpp | 2 +- .../texproc/extract_mesh_with_alpha_mask.h | 4 +- modules/texproc/python/src/texproc.cpp | 12 +- .../python/tests/{assets.py => conftest.py} | 0 .../python/tests/test_mesh_with_alpha_mask.py | 28 +- modules/texproc/python/tests/test_texproc.py | 2 - modules/texproc/shared/shared_utils.h | 148 +-- modules/texproc/src/clip_triangle_by_bbox.cpp | 164 +++ modules/texproc/src/clip_triangle_by_bbox.h | 44 + .../src/extract_mesh_with_alpha_mask.cpp | 513 ++++++--- .../tests/test_mesh_with_alpha_mask.cpp | 96 +- .../texproc/tests/test_texture_processing.cpp | 6 +- modules/ui/src/utils/colormap.cpp | 2 + .../include/lagrange/volume/mesh_to_volume.h | 2 +- modules/volume/python/CMakeLists.txt | 11 +- modules/volume/python/src/volume.cpp | 15 + modules/volume/python/tests/assets.py | 47 - modules/volume/python/tests/test_volume.py | 4 +- modules/volume/src/mesh_to_volume.cpp | 35 +- modules/volume/tests/test_voxelization.cpp | 38 + .../examples/sample_points_in_mesh.cpp | 6 +- .../tests/test_fast_winding_number.cpp | 6 +- pyproject.toml | 4 +- 251 files changed, 16253 insertions(+), 1211 deletions(-) create mode 100644 cmake/recipes/external/cista.cmake delete mode 100644 cmake/recipes/external/instant-meshes-core.patch delete mode 100644 cmake/recipes/external/piqp.cmake create mode 100644 cmake/recipes/external/zstd.cmake create mode 100644 modules/bvh/examples/uv_overlap.cpp create mode 100644 modules/bvh/include/lagrange/bvh/compute_uv_overlap.h delete mode 100644 modules/bvh/python/tests/asset.py rename modules/{filtering/python/tests/assets.py => bvh/python/tests/conftest.py} (80%) create mode 100644 modules/bvh/python/tests/test_compute_uv_overlap.py create mode 100644 modules/bvh/src/compute_uv_overlap.cpp create mode 100644 modules/bvh/tests/test_compute_uv_overlap.cpp rename modules/{core/python/tests/assets.py => conftest.py} (57%) create mode 100644 modules/core/include/lagrange/compute_facet_facet_adjacency.h create mode 100644 modules/core/include/lagrange/internal/SurfaceMeshInfo.h create mode 100644 modules/core/include/lagrange/internal/set_invalid_indexed_values.h create mode 100644 modules/core/include/lagrange/internal/surface_mesh_info_convert.h create mode 100644 modules/core/include/lagrange/mesh_bbox.h create mode 100644 modules/core/python/tests/test_extract_submesh.py create mode 100644 modules/core/src/compute_facet_facet_adjacency.cpp create mode 100644 modules/core/src/mesh_bbox.cpp create mode 100644 modules/core/tests/test_compute_facet_facet_adjacency.cpp create mode 100644 modules/core/tests/test_mesh_bbox.cpp create mode 100644 modules/core/tests/test_set_invalid_indexed_values.cpp create mode 100644 modules/core/tests/test_uv_mesh.cpp create mode 100644 modules/polyddg/include/lagrange/polyddg/compute_smooth_direction_field.h create mode 100644 modules/polyddg/include/lagrange/polyddg/hodge_decomposition.h create mode 100644 modules/polyddg/python/examples/hodge_decomposition.py create mode 100644 modules/polyddg/python/tests/conftest.py create mode 100644 modules/polyddg/src/compute_smooth_direction_field.cpp create mode 100644 modules/polyddg/src/hodge_decomposition.cpp create mode 100644 modules/polyddg/src/nrosy_utils.h create mode 100644 modules/polyddg/tests/test_compute_principal_curvatures.cpp create mode 100644 modules/polyddg/tests/test_compute_smooth_direction_field.cpp create mode 100644 modules/polyddg/tests/test_hodge_decomposition.cpp create mode 100644 modules/raycasting/include/lagrange/raycasting/compute_local_feature_size.h create mode 100644 modules/raycasting/src/closest_vertex_from_barycentric.h create mode 100644 modules/raycasting/src/compute_local_feature_size.cpp create mode 100644 modules/raycasting/tests/test_compute_local_feature_size.cpp create mode 100644 modules/scene/include/lagrange/scene/internal/shared_utils.h create mode 100644 modules/serialization2/CMakeLists.txt create mode 100644 modules/serialization2/include/lagrange/serialization/api.h create mode 100644 modules/serialization2/include/lagrange/serialization/serialize_mesh.h create mode 100644 modules/serialization2/include/lagrange/serialization/serialize_scene.h create mode 100644 modules/serialization2/include/lagrange/serialization/serialize_simple_scene.h create mode 100644 modules/serialization2/include/lagrange/serialization/types.h create mode 100644 modules/serialization2/python/CMakeLists.txt create mode 100644 modules/serialization2/python/include/lagrange/python/serialization2.h create mode 100644 modules/serialization2/python/src/serialization2.cpp create mode 100644 modules/serialization2/python/tests/test_serialization2.py create mode 100644 modules/serialization2/src/CistaMesh.h create mode 100644 modules/serialization2/src/CistaScene.h create mode 100644 modules/serialization2/src/CistaSimpleScene.h create mode 100644 modules/serialization2/src/CistaValue.h create mode 100644 modules/serialization2/src/compress.cpp create mode 100644 modules/serialization2/src/compress.h create mode 100644 modules/serialization2/src/detect_type.cpp create mode 100644 modules/serialization2/src/detect_type.h create mode 100644 modules/serialization2/src/mesh_convert.h create mode 100644 modules/serialization2/src/serialize_mesh.cpp create mode 100644 modules/serialization2/src/serialize_scene.cpp create mode 100644 modules/serialization2/src/serialize_simple_scene.cpp rename modules/{scene/python/tests/assets.py => serialization2/tests/CMakeLists.txt} (69%) create mode 100644 modules/serialization2/tests/test_serialization2_benchmark.cpp create mode 100644 modules/serialization2/tests/test_serialize_mesh.cpp create mode 100644 modules/serialization2/tests/test_serialize_scene.cpp create mode 100644 modules/serialization2/tests/test_serialize_simple_scene.cpp create mode 100644 modules/testing/include/lagrange/testing/check_meshes_equal.h create mode 100644 modules/testing/include/lagrange/testing/check_scenes_equal.h create mode 100644 modules/testing/include/lagrange/testing/check_simple_scenes_equal.h rename modules/texproc/python/tests/{assets.py => conftest.py} (100%) create mode 100644 modules/texproc/src/clip_triangle_by_bbox.cpp create mode 100644 modules/texproc/src/clip_triangle_by_bbox.h delete mode 100644 modules/volume/python/tests/assets.py diff --git a/.github/workflows/continuous.yaml b/.github/workflows/continuous.yaml index c104e7bd..131a7b7a 100644 --- a/.github/workflows/continuous.yaml +++ b/.github/workflows/continuous.yaml @@ -109,7 +109,7 @@ jobs: echo "CC=clang" >> $GITHUB_ENV echo "CXX=clang++" >> $GITHUB_ENV fi - sudo apt-get install xorg-dev + sudo apt-get install -y xorg-dev - name: Dependencies (macOS) if: runner.os == 'macOS' diff --git a/.gitignore b/.gitignore index 3361782a..c3aa2dea 100644 --- a/.gitignore +++ b/.gitignore @@ -43,6 +43,10 @@ uv.lock *.out *.app +# Lagrange serialized mesh/scene +*.lgs +*.lgm + # Mesh files *.obj *.off @@ -96,3 +100,8 @@ keyfile.txt # ctags tags + +# Claude +.claude/agent-memory/ +/.tmp/ +.claude/settings.local.json diff --git a/LagrangeOptions.cmake.sample b/LagrangeOptions.cmake.sample index a05af14e..1de610b6 100644 --- a/LagrangeOptions.cmake.sample +++ b/LagrangeOptions.cmake.sample @@ -48,29 +48,30 @@ # option(LAGRANGE_ALL "Build all lagrange modules" ON) # Optional modules in alphabetical order. -# option(LAGRANGE_MODULE_BVH "Build module lagrange::bvh" ON) -# option(LAGRANGE_MODULE_FILTERING "Build module lagrange::filtering" ON) -# option(LAGRANGE_MODULE_FS "Build module lagrange::fs" ON) -# option(LAGRANGE_MODULE_GEODESIC "Build module lagrange::geodesic" ON) -# option(LAGRANGE_MODULE_IMAGE "Build module lagrange::image" ON) -# option(LAGRANGE_MODULE_IMAGE_IO "Build module lagrange::image_io" ON) -# option(LAGRANGE_MODULE_IO "Build module lagrange::io" ON) -# option(LAGRANGE_MODULE_PACKING "Build module lagrange::packing" ON) -# option(LAGRANGE_MODULE_PARTITIONING "Build module lagrange::partitioning" ON) -# option(LAGRANGE_MODULE_POISSON "Build module lagrange::poisson" ON) -# option(LAGRANGE_MODULE_POLYDDG "Build module lagrange::polyddg" ON) -# option(LAGRANGE_MODULE_POLYSCOPE "Build module lagrange::polyscope" ON) -# option(LAGRANGE_MODULE_PRIMITIVE "Build module lagrange::primitive" ON) -# option(LAGRANGE_MODULE_PYTHON "Build module lagrange::python" ON) -# option(LAGRANGE_MODULE_RAYCASTING "Build module lagrange::raycasting" ON) -# option(LAGRANGE_MODULE_REMESHING_IM "Build module lagrange::remeshing_im" ON) -# option(LAGRANGE_MODULE_SCENE "Build module lagrange::scene" ON) -# option(LAGRANGE_MODULE_SOLVER "Build module lagrange::solver" ON) -# option(LAGRANGE_MODULE_SUBDIVISION "Build module lagrange::subdivision" ON) -# option(LAGRANGE_MODULE_TEXPROC "Build module lagrange::texproc" ON) -# option(LAGRANGE_MODULE_UI "Build module lagrange::ui" ON) -# option(LAGRANGE_MODULE_VOLUME "Build module lagrange::volume" ON) -# option(LAGRANGE_MODULE_WINDING "Build module lagrange::winding" ON) +# option(LAGRANGE_MODULE_BVH "Build module lagrange::bvh" ON) +# option(LAGRANGE_MODULE_FILTERING "Build module lagrange::filtering" ON) +# option(LAGRANGE_MODULE_FS "Build module lagrange::fs" ON) +# option(LAGRANGE_MODULE_GEODESIC "Build module lagrange::geodesic" ON) +# option(LAGRANGE_MODULE_IMAGE "Build module lagrange::image" ON) +# option(LAGRANGE_MODULE_IMAGE_IO "Build module lagrange::image_io" ON) +# option(LAGRANGE_MODULE_IO "Build module lagrange::io" ON) +# option(LAGRANGE_MODULE_PACKING "Build module lagrange::packing" ON) +# option(LAGRANGE_MODULE_PARTITIONING "Build module lagrange::partitioning" ON) +# option(LAGRANGE_MODULE_POISSON "Build module lagrange::poisson" ON) +# option(LAGRANGE_MODULE_POLYDDG "Build module lagrange::polyddg" ON) +# option(LAGRANGE_MODULE_POLYSCOPE "Build module lagrange::polyscope" ON) +# option(LAGRANGE_MODULE_PRIMITIVE "Build module lagrange::primitive" ON) +# option(LAGRANGE_MODULE_PYTHON "Build module lagrange::python" ON) +# option(LAGRANGE_MODULE_RAYCASTING "Build module lagrange::raycasting" ON) +# option(LAGRANGE_MODULE_REMESHING_IM "Build module lagrange::remeshing_im" ON) +# option(LAGRANGE_MODULE_SCENE "Build module lagrange::scene" ON) +# option(LAGRANGE_MODULE_SERIALIZATION2 "Build module lagrange::serialization2" ON) +# option(LAGRANGE_MODULE_SOLVER "Build module lagrange::solver" ON) +# option(LAGRANGE_MODULE_SUBDIVISION "Build module lagrange::subdivision" ON) +# option(LAGRANGE_MODULE_TEXPROC "Build module lagrange::texproc" ON) +# option(LAGRANGE_MODULE_UI "Build module lagrange::ui" ON) +# option(LAGRANGE_MODULE_VOLUME "Build module lagrange::volume" ON) +# option(LAGRANGE_MODULE_WINDING "Build module lagrange::winding" ON) # General options # option(LAGRANGE_COMPILE_TESTS "Enable compilation tests" ON) diff --git a/cmake/lagrange/lagrangeMklModules.txt b/cmake/lagrange/lagrangeMklModules.txt index 47ca1447..f8bbc7c7 100644 --- a/cmake/lagrange/lagrangeMklModules.txt +++ b/cmake/lagrange/lagrangeMklModules.txt @@ -1 +1 @@ -anorigami;baking;cad_io;contouring;decal;deformers;filtering;meshproc;quadrangulation;solver;texproc +anorigami;baking;cad_io;contouring;decal;deformers;filtering;meshproc;polyddg;quadrangulation;solver;texproc diff --git a/cmake/lagrange/lagrange_add_python_binding.cmake b/cmake/lagrange/lagrange_add_python_binding.cmake index 4616d34e..8bd9334c 100644 --- a/cmake/lagrange/lagrange_add_python_binding.cmake +++ b/cmake/lagrange/lagrange_add_python_binding.cmake @@ -10,6 +10,8 @@ # governing permissions and limitations under the License. # function(lagrange_add_python_binding) + cmake_parse_arguments(PARSE_ARGV 0 ARG "" "PYTHON_NAME" "") + # Retrieve module name get_filename_component(module_path "${CMAKE_CURRENT_SOURCE_DIR}/.." REALPATH) get_filename_component(module_name "${module_path}" NAME) @@ -39,4 +41,9 @@ function(lagrange_add_python_binding) # Keep track of active modules set_property(TARGET lagrange_python APPEND PROPERTY LAGRANGE_ACTIVE_MODULES ${module_name}) + + # Optional: override the Python submodule name (e.g. expose "serialization2" as "serialization") + if(ARG_PYTHON_NAME) + set_property(TARGET lagrange_python PROPERTY LAGRANGE_PYTHON_NAME_${module_name} ${ARG_PYTHON_NAME}) + endif() endfunction() diff --git a/cmake/lagrange/lagrange_add_test.cmake b/cmake/lagrange/lagrange_add_test.cmake index 1ef414d8..5ccc56b8 100644 --- a/cmake/lagrange/lagrange_add_test.cmake +++ b/cmake/lagrange/lagrange_add_test.cmake @@ -56,15 +56,28 @@ function(lagrange_add_test) # Register tests file(MAKE_DIRECTORY "${CMAKE_BINARY_DIR}/reports") + # When cross-compiling with Emscripten, test discovery at build time can produce truncated JSON + # output due to stdout flushing issues with PROXY_TO_PTHREAD. Use PRE_TEST to defer discovery to + # ctest runtime instead. See: + # - https://github.com/emscripten-core/emscripten/issues/15186 + # - https://github.com/emscripten-core/emscripten/issues/20059 + if(EMSCRIPTEN) + set(_discovery_mode PRE_TEST) + else() + set(_discovery_mode POST_BUILD) + endif() + if(LAGRANGE_TOPLEVEL_PROJECT AND NOT USE_SANITIZER MATCHES "([Tt]hread)") catch_discover_tests(${test_target} REPORTER junit OUTPUT_DIR "${CMAKE_BINARY_DIR}/reports" OUTPUT_SUFFIX ".xml" + DISCOVERY_MODE ${_discovery_mode} PROPERTIES ENVIRONMENT ${LAGRANGE_TESTS_ENVIRONMENT} ) else() catch_discover_tests(${test_target} + DISCOVERY_MODE ${_discovery_mode} PROPERTIES ENVIRONMENT ${LAGRANGE_TESTS_ENVIRONMENT} ) endif() diff --git a/cmake/lagrange/lagrange_find_package.cmake b/cmake/lagrange/lagrange_find_package.cmake index fc82db97..ec87aae2 100644 --- a/cmake/lagrange/lagrange_find_package.cmake +++ b/cmake/lagrange/lagrange_find_package.cmake @@ -24,6 +24,7 @@ function(lagrange_find_package name) span-lite spdlog TBB + zstd ) # Defer to find_package() if desired. In the future we will switch to a dependency provider. diff --git a/cmake/lagrange/lagrange_limit_parallelism.cmake b/cmake/lagrange/lagrange_limit_parallelism.cmake index eeef25b9..8dd9d046 100644 --- a/cmake/lagrange/lagrange_limit_parallelism.cmake +++ b/cmake/lagrange/lagrange_limit_parallelism.cmake @@ -10,81 +10,57 @@ # governing permissions and limitations under the License. # function(lagrange_limit_parallelism) - # Query system information cmake_host_system_information(RESULT NUMBER_OF_PHYSICAL_CORES QUERY NUMBER_OF_PHYSICAL_CORES) - cmake_host_system_information(RESULT AVAILABLE_PHYSICAL_MEMORY QUERY AVAILABLE_PHYSICAL_MEMORY) - cmake_host_system_information(RESULT AVAILABLE_VIRTUAL_MEMORY QUERY AVAILABLE_VIRTUAL_MEMORY) - cmake_host_system_information(RESULT TOTAL_VIRTUAL_MEMORY QUERY TOTAL_VIRTUAL_MEMORY) cmake_host_system_information(RESULT TOTAL_PHYSICAL_MEMORY QUERY TOTAL_PHYSICAL_MEMORY) - # Peak memory computed "manually" for each platform (in MB) - # Use a hard coded limit of 2 parallel linking jobs - set(max_rss_linux_debug 3000) - set(max_rss_linux_release 6000) # Force -j3 on a 16G memory machine. - set(max_rss_darwin_debug 729) - set(max_rss_darwin_release 395) - set(max_rss_windows_debug 2100) - set(max_rss_windows_release 1300) - - # Use "release" limit only for matching single-config mode - if(GENERATOR_IS_MULTI_CONFIG OR NOT DEFINED CMAKE_BUILD_TYPE) - message(STATUS "Defaulting to debug") - set(_postfix "debug") + # Determine build type for memory estimation + get_property(_is_multi_config GLOBAL PROPERTY GENERATOR_IS_MULTI_CONFIG) + if(_is_multi_config OR NOT DEFINED CMAKE_BUILD_TYPE) + set(_build_type "debug") else() - string(TOLOWER ${CMAKE_BUILD_TYPE} _type) - if(_type STREQUAL release) - set(_postfix "release") - else() - set(_postfix "debug") - endif() + string(TOLOWER "${CMAKE_BUILD_TYPE}" _build_type) endif() - string(TOLOWER ${CMAKE_HOST_SYSTEM_NAME} _system) + # Estimated peak RSS per link job (in MB), by build type. + # Measure with: /usr/bin/time -v cmake --build --preset -j1 --target lagrange_python + set(_default_link_memory_release 16000) # 16GB + set(_default_link_memory_relwithdebinfo 32000) # 32GB + set(_default_link_memory_debug 32000) # 32GB - # Use a 3/2 factor safety margin compared to observed memory usage - math(EXPR num_cpu_memory "${AVAILABLE_PHYSICAL_MEMORY} * 3 / 2 / ${max_rss_${_system}_${_postfix}}") + # LTO links can require significantly more memory + if(CMAKE_INTERPROCEDURAL_OPTIMIZATION) + set(_default_link_memory_release 64000) # 64GB + set(_default_link_memory_relwithdebinfo 64000) # 64GB + endif() - # Compute limits for link/compile steps - set(num_cpu_link 2) - set(num_cpu_compile ${NUMBER_OF_PHYSICAL_CORES}) - if(num_cpu_link GREATER num_cpu_memory) - set(num_cpu_link ${num_cpu_memory}) + if(DEFINED _default_link_memory_${_build_type}) + set(_link_memory ${_default_link_memory_${_build_type}}) + else() + set(_link_memory ${_default_link_memory_debug}) endif() - if(num_cpu_compile GREATER num_cpu_memory) - set(num_cpu_compile ${num_cpu_memory}) + + math(EXPR num_link_jobs "${TOTAL_PHYSICAL_MEMORY} / ${_link_memory}") + if(num_link_jobs LESS 1) + set(num_link_jobs 1) endif() if(CMAKE_SCRIPT_MODE_FILE) - # The message() command, without any mode, will print to stderr. But jenkins only allows us - # to capture stdout. To print a clean message without hyphens, we use cmake's echo command. - execute_process(COMMAND ${CMAKE_COMMAND} -E echo "${num_cpu_compile}") + # Script mode: echo the number of physical cores for use as the -j flag in Jenkins. + # Link parallelism is handled separately via Ninja job pools at configure time. + execute_process(COMMAND ${CMAKE_COMMAND} -E echo "${NUMBER_OF_PHYSICAL_CORES}") else() - message(STATUS "Parallelism: Available physical memory: ${AVAILABLE_PHYSICAL_MEMORY} / ${TOTAL_PHYSICAL_MEMORY}") - message(STATUS "Parallelism: Available virtual memory: ${AVAILABLE_VIRTUAL_MEMORY} / ${TOTAL_VIRTUAL_MEMORY}") - message(STATUS "Parallelism: Number of physical cores: ${NUMBER_OF_PHYSICAL_CORES}") - message(STATUS "Parallelism: Limiting link pool to ${num_cpu_link}") - message(STATUS "Parallelism: Limiting compile pool to ${num_cpu_compile}") - endif() + message(STATUS "Parallelism: Total physical memory: ${TOTAL_PHYSICAL_MEMORY} MB") + message(STATUS "Parallelism: Link job memory budget: ${_link_memory} MB (${_build_type})") + message(STATUS "Parallelism: Limiting link pool to ${num_link_jobs}") - # Limit parallelism based on number of physical cores + available memory. - set_property(GLOBAL PROPERTY JOB_POOLS - pool-link=${num_cpu_link} - pool-compile=${num_cpu_compile} - pool-precompile-header=${num_cpu_compile} - ) - set(CMAKE_JOB_POOL_LINK "pool-link" CACHE STRING "Job pool for linking" FORCE) - set(CMAKE_JOB_POOL_COMPILE "pool-compile" CACHE STRING "Job pool for compiling" FORCE) - set(CMAKE_JOB_POOL_PRECOMPILE_HEADER "pool-precompile-header" CACHE STRING "Job pool for generating pre-compiled headers" FORCE) - - # Note: We cannot set directly CMAKE_BUILD_PARALLEL_LEVEL or CTEST_PARALLEL_LEVEL from this CMake file, - # since those are environment variables [1]: they are not cached and do not affect subsequent CMake calls. - # In practice, the parallelism for Ninja should be limited by our job pools, so the only thing we need is - # to run ctest in parallel. - # [1]: https://cmake.org/cmake/help/latest/manual/cmake-language.7.html#cmake-language-environment-variables + set_property(GLOBAL PROPERTY JOB_POOLS pool-link=${num_link_jobs}) + set(CMAKE_JOB_POOL_LINK "pool-link" CACHE STRING "Job pool for linking" FORCE) + endif() endfunction() -# If this file is run in script mode, calling this function will simply echo the total number of -# cores that we desire to build/test with. +# If this file is run in script mode, it echoes the number of physical cores for use as +# the -j flag for cmake --build and ctest. Link parallelism is not relevant here — it is +# enforced by Ninja job pools set during the configure step. if(CMAKE_SCRIPT_MODE_FILE) if(DEFINED CMAKE_ARGV3) # We need to extract build type from preset diff --git a/cmake/recipes/external/CPM.cmake b/cmake/recipes/external/CPM.cmake index ff4b772a..97e694a9 100644 --- a/cmake/recipes/external/CPM.cmake +++ b/cmake/recipes/external/CPM.cmake @@ -6,7 +6,9 @@ # accordance with the terms of the Adobe license agreement accompanying # it. # -set(CPM_DOWNLOAD_VERSION 0.42.1) + +# we use a fork slightly ahead of this until https://github.com/cpm-cmake/CPM.cmake/pull/688 is merged +set(CPM_DOWNLOAD_VERSION 0.42.1-f50a6c0) if(CPM_SOURCE_CACHE) set(CPM_DOWNLOAD_LOCATION "${CPM_SOURCE_CACHE}/cpm/CPM_${CPM_DOWNLOAD_VERSION}.cmake") @@ -22,8 +24,10 @@ get_filename_component(CPM_DOWNLOAD_LOCATION ${CPM_DOWNLOAD_LOCATION} ABSOLUTE) function(download_cpm) message(STATUS "Downloading CPM.cmake to ${CPM_DOWNLOAD_LOCATION}") file(DOWNLOAD - https://github.com/cpm-cmake/CPM.cmake/releases/download/v${CPM_DOWNLOAD_VERSION}/CPM.cmake - ${CPM_DOWNLOAD_LOCATION} + # Revert to upstream URL once https://github.com/cpm-cmake/CPM.cmake/pull/688 is merged: + # https://github.com/cpm-cmake/CPM.cmake/releases/download/v${CPM_DOWNLOAD_VERSION}/CPM.cmake + https://raw.githubusercontent.com/jdumas/CPM.cmake/f50a6c0ad986fdd407ae14a46f08b38716f36bc8/cmake/CPM.cmake + ${CPM_DOWNLOAD_LOCATION} ) endfunction() diff --git a/cmake/recipes/external/Eigen3.cmake b/cmake/recipes/external/Eigen3.cmake index bc86f2ae..726e6053 100644 --- a/cmake/recipes/external/Eigen3.cmake +++ b/cmake/recipes/external/Eigen3.cmake @@ -16,25 +16,19 @@ endif() option(EIGEN_WITH_MKL "Use Eigen with MKL" OFF) option(EIGEN_DONT_VECTORIZE "Disable Eigen vectorization" OFF) -if(EIGEN_ROOT) - message(STATUS "Third-party (external): creating target 'Eigen3::Eigen' for external path: ${EIGEN_ROOT}") - set(EIGEN_INCLUDE_DIRS ${EIGEN_ROOT}) -else() - message(STATUS "Third-party (external): creating target 'Eigen3::Eigen'") +message(STATUS "Third-party (external): creating target 'Eigen3::Eigen'") - include(CPM) - CPMAddPackage( - NAME eigen - GIT_REPOSITORY https://gitlab.com/libeigen/eigen.git - GIT_TAG 3.4.1 - DOWNLOAD_ONLY ON - ) - set(EIGEN_INCLUDE_DIRS ${eigen_SOURCE_DIR}) +set(EIGEN_VERSION "5.0.1" CACHE STRING "Version of Eigen to use") - install(DIRECTORY ${EIGEN_INCLUDE_DIRS}/Eigen - DESTINATION include - ) -endif() +include(CPM) +CPMAddPackage( + NAME eigen + GIT_REPOSITORY https://gitlab.com/libeigen/eigen.git + GIT_TAG ${EIGEN_VERSION} + DOWNLOAD_ONLY ON +) +FetchContent_GetProperties(eigen) +set(EIGEN_INCLUDE_DIRS ${eigen_SOURCE_DIR}) add_library(Eigen3_Eigen INTERFACE) add_library(Eigen3::Eigen ALIAS Eigen3_Eigen) @@ -44,6 +38,8 @@ target_include_directories(Eigen3_Eigen SYSTEM INTERFACE $ $ ) + +# Not necessary after Eigen 5, but required for older versions. Doesn't hurt to keep it. target_compile_definitions(Eigen3_Eigen INTERFACE EIGEN_MPL2_ONLY) if(EIGEN_DONT_VECTORIZE) diff --git a/cmake/recipes/external/cista.cmake b/cmake/recipes/external/cista.cmake new file mode 100644 index 00000000..b5438622 --- /dev/null +++ b/cmake/recipes/external/cista.cmake @@ -0,0 +1,38 @@ +# +# Copyright 2026 Adobe. All rights reserved. +# This file is licensed to you under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. You may obtain a copy +# of the License at http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software distributed under +# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS +# OF ANY KIND, either express or implied. See the License for the specific language +# governing permissions and limitations under the License. +# +if(TARGET cista::cista) + return() +endif() + +message(STATUS "Third-party (external): creating target 'cista::cista'") + +include(CPM) +CPMAddPackage( + NAME cista + GITHUB_REPOSITORY felixguendling/cista + GIT_TAG 4356022b0020dd924ab8afb3bf0199c07e9a9943 # ahead of v0.16 because of 6f9a254 + DOWNLOAD_ONLY ON + OPTIONS + "CISTA_FMT OFF" +) + +add_library(cista INTERFACE) +add_library(cista::cista ALIAS cista) + +target_include_directories(cista SYSTEM INTERFACE "${cista_SOURCE_DIR}/include") + +set_target_properties(cista PROPERTIES FOLDER "third_party") + +# Install rules +set(CMAKE_INSTALL_DEFAULT_COMPONENT_NAME cista) +install(TARGETS cista EXPORT Cista_Targets INCLUDES DESTINATION ${CMAKE_INSTALL_INCLUDEDIR}) +install(EXPORT Cista_Targets DESTINATION ${CMAKE_INSTALL_LIBDIR}/cmake/cista NAMESPACE cista::) diff --git a/cmake/recipes/external/cpptrace.cmake b/cmake/recipes/external/cpptrace.cmake index 2bb1bc0c..8bc74e1a 100644 --- a/cmake/recipes/external/cpptrace.cmake +++ b/cmake/recipes/external/cpptrace.cmake @@ -16,6 +16,8 @@ endif() message(STATUS "Third-party (external): creating target 'cpptrace::cpptrace'") +lagrange_find_package(zstd CONFIG REQUIRED GLOBAL) + include(CPM) CPMAddPackage( NAME cpptrace diff --git a/cmake/recipes/external/instant-meshes-core.cmake b/cmake/recipes/external/instant-meshes-core.cmake index f5766719..3b651718 100644 --- a/cmake/recipes/external/instant-meshes-core.cmake +++ b/cmake/recipes/external/instant-meshes-core.cmake @@ -19,10 +19,7 @@ include(CPM) CPMAddPackage( NAME instant-meshes-core GITHUB_REPOSITORY qnzhou/instant-meshes-core - GIT_TAG 7e2b804d533e10578a730bb9d06dee2a5418730d - PATCHES - # Fix memory leak: adj_sets buffer not freed in generate_adjacency_matrix_pointcloud. - instant-meshes-core.patch + GIT_TAG 8c87f12bec4b98ce29febcf5dd63ebb90e957104 ) add_library(instant-meshes-core::instant-meshes-core ALIAS instant-meshes-core) diff --git a/cmake/recipes/external/instant-meshes-core.patch b/cmake/recipes/external/instant-meshes-core.patch deleted file mode 100644 index 2d4b5bf9..00000000 --- a/cmake/recipes/external/instant-meshes-core.patch +++ /dev/null @@ -1,11 +0,0 @@ ---- a/src/adjacency.cpp -+++ b/src/adjacency.cpp -@@ -320,6 +320,8 @@ AdjacencyMatrix generate_adjacency_matrix_pointcloud( - } - }); - -+ delete[] adj_sets; -+ - /* Use a heuristic to estimate some useful quantities for point clouds (this - is a biased estimate due to the kNN queries, but it's convenient and - reasonably accurate) */ diff --git a/cmake/recipes/external/piqp.cmake b/cmake/recipes/external/piqp.cmake deleted file mode 100644 index c9264bd9..00000000 --- a/cmake/recipes/external/piqp.cmake +++ /dev/null @@ -1,82 +0,0 @@ -# -# Copyright 2025 Adobe. All rights reserved. -# This file is licensed to you under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. You may obtain a copy -# of the License at http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software distributed under -# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS -# OF ANY KIND, either express or implied. See the License for the specific language -# governing permissions and limitations under the License. -# -if(TARGET piqp::piqp) - return() -endif() - -message(STATUS "Third-party (external): creating target 'piqp::piqp'") - -block() - macro(push_variable var value) - if(DEFINED CACHE{${var}}) - set(PIQP_OLD_${var}_VALUE "${${var}}") - set(PIQP_OLD_${var}_TYPE CACHE_TYPE) - elseif(DEFINED ${var}) - set(PIQP_OLD_${var}_VALUE "${${var}}") - set(PIQP_OLD_${var}_TYPE NORMAL_TYPE) - else() - set(PIQP_OLD_${var}_TYPE NONE_TYPE) - endif() - set(${var} "${value}") - endmacro() - - macro(pop_variable var) - if(PIQP_OLD_${var}_TYPE STREQUAL CACHE_TYPE) - set(${var} "${PIQP_OLD_${var}_VALUE}" CACHE PATH "" FORCE) - elseif(PIQP_OLD_${var}_TYPE STREQUAL NORMAL_TYPE) - unset(${var} CACHE) - set(${var} "${PIQP_OLD_${var}_VALUE}") - elseif(PIQP_OLD_${var}_TYPE STREQUAL NONE_TYPE) - unset(${var} CACHE) - else() - message(FATAL_ERROR "Trying to pop a variable that has not been pushed: ${var}") - endif() - endmacro() - - macro(ignore_package NAME VERSION) - set(PIQP_DUMMY_DIR "${CMAKE_CURRENT_BINARY_DIR}/piqp_cmake/${NAME}") - file(WRITE ${PIQP_DUMMY_DIR}/${NAME}Config.cmake "") - include(CMakePackageConfigHelpers) - write_basic_package_version_file(${PIQP_DUMMY_DIR}/${NAME}ConfigVersion.cmake - VERSION ${VERSION} - COMPATIBILITY AnyNewerVersion - ARCH_INDEPENDENT - ) - push_variable(${NAME}_DIR ${PIQP_DUMMY_DIR}) - push_variable(${NAME}_ROOT ${PIQP_DUMMY_DIR}) - endmacro() - - macro(unignore_package NAME) - pop_variable(${NAME}_DIR) - pop_variable(${NAME}_ROOT) - endmacro() - - # Prefer Config mode before Module mode to prevent PIQP from loading its own FindEigen3.cmake - set(CMAKE_FIND_PACKAGE_PREFER_CONFIG TRUE) - - # Import our own targets - lagrange_find_package(Eigen3 REQUIRED GLOBAL) - ignore_package(Eigen3 3.4) - - # Ready to include openvdb CMake - include(CPM) - CPMAddPackage( - NAME piqp - GITHUB_REPOSITORY PREDICT-EPFL/piqp - GIT_TAG v0.6.0 - ) - - unignore_package(Eigen3) - - set_target_properties(piqp PROPERTIES FOLDER third_party) - set_target_properties(piqp_c PROPERTIES FOLDER third_party) -endblock() diff --git a/cmake/recipes/external/polyscope.cmake b/cmake/recipes/external/polyscope.cmake index fe723894..85a73cd2 100644 --- a/cmake/recipes/external/polyscope.cmake +++ b/cmake/recipes/external/polyscope.cmake @@ -38,3 +38,4 @@ endblock() add_library(polyscope::polyscope ALIAS polyscope) set_target_properties(polyscope PROPERTIES FOLDER third_party) set_target_properties(glm PROPERTIES FOLDER third_party) +target_compile_features(polyscope PUBLIC cxx_std_17) diff --git a/cmake/recipes/external/quadprog.cmake b/cmake/recipes/external/quadprog.cmake index b61d90f0..377b43aa 100644 --- a/cmake/recipes/external/quadprog.cmake +++ b/cmake/recipes/external/quadprog.cmake @@ -18,8 +18,8 @@ message(STATUS "Third-party (external): creating target 'quadprog::quadprog'") include(CPM) CPMAddPackage( NAME quadprog - GITHUB_REPOSITORY ggael/QuadProg - GIT_TAG c031c027671488fd13ef3569f9d6319b4e2fec5c + GITHUB_REPOSITORY jdumas/QuadProg + GIT_TAG 8e6e38ad5b257d1795a9ef995e4fd7db72ccf551 ) add_library(quadprog diff --git a/cmake/recipes/external/zstd.cmake b/cmake/recipes/external/zstd.cmake new file mode 100644 index 00000000..910d4241 --- /dev/null +++ b/cmake/recipes/external/zstd.cmake @@ -0,0 +1,42 @@ +# +# Copyright 2026 Adobe. All rights reserved. +# This file is licensed to you under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. You may obtain a copy +# of the License at http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software distributed under +# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS +# OF ANY KIND, either express or implied. See the License for the specific language +# governing permissions and limitations under the License. +# +if(TARGET zstd::libzstd) + return() +endif() + +message(STATUS "Third-party (external): creating target 'zstd::libzstd'") + +include(CPM) +CPMAddPackage( + NAME zstd + GITHUB_REPOSITORY facebook/zstd + GIT_TAG v1.5.6 + SOURCE_SUBDIR build/cmake + OPTIONS + "ZSTD_BUILD_SHARED OFF" + "ZSTD_BUILD_PROGRAMS OFF" + "ZSTD_BUILD_TESTS OFF" + "ZSTD_BUILD_CONTRIB OFF" + "ZSTD_MULTITHREAD_SUPPORT ON" +) + +if(NOT TARGET zstd::libzstd) + if(TARGET libzstd_static) + add_library(zstd::libzstd ALIAS libzstd_static) + elseif(TARGET libzstd_shared) + add_library(zstd::libzstd ALIAS libzstd_shared) + endif() +endif() + +if(TARGET libzstd_static) + set_target_properties(libzstd_static PROPERTIES FOLDER "third_party") +endif() diff --git a/modules/bvh/examples/CMakeLists.txt b/modules/bvh/examples/CMakeLists.txt index 74963230..a82342e7 100644 --- a/modules/bvh/examples/CMakeLists.txt +++ b/modules/bvh/examples/CMakeLists.txt @@ -17,3 +17,7 @@ target_link_libraries(weld_vertices lagrange::bvh lagrange::io CLI11::CLI11) lagrange_add_example(remove_interior_shells remove_interior_shells.cpp) target_link_libraries(remove_interior_shells lagrange::bvh lagrange::io CLI11::CLI11) + +lagrange_include_modules(packing polyscope) +lagrange_add_example(uv_overlap uv_overlap.cpp WITH_UI) +target_link_libraries(uv_overlap lagrange::bvh lagrange::packing lagrange::polyscope lagrange::io CLI11::CLI11) diff --git a/modules/bvh/examples/uv_overlap.cpp b/modules/bvh/examples/uv_overlap.cpp new file mode 100644 index 00000000..422c7d3e --- /dev/null +++ b/modules/bvh/examples/uv_overlap.cpp @@ -0,0 +1,452 @@ +/* + * Copyright 2026 Adobe. All rights reserved. + * This file is licensed to you under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under + * the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS + * OF ANY KIND, either express or implied. See the License for the specific language + * governing permissions and limitations under the License. + */ +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include +#include + +// clang-format off +#include +#include +#include +#include +#include +// clang-format on + +#include + +// ============================================================================ +// Mesh processing utilities +// ============================================================================ + +using SurfaceMesh = lagrange::SurfaceMesh32d; + +/// +/// Prepare a mesh for 3D display in polyscope by unifying non-UV index buffers +/// and converting indexed UV attributes to corner attributes. +/// +void prepare_mesh_for_display(SurfaceMesh& mesh) +{ + lagrange::AttributeMatcher matcher; + matcher.element_types = lagrange::AttributeElement::Indexed; + + matcher.usages = ~lagrange::BitField(lagrange::AttributeUsage::UV); + auto ids = lagrange::find_matching_attributes(mesh, matcher); + if (!ids.empty()) { + std::vector attr_names; + for (auto id : ids) { + attr_names.emplace_back(mesh.get_attribute_name(id)); + } + lagrange::logger().info( + "Unifying index buffers for {} non-UV indexed attributes: {}", + ids.size(), + fmt::join(attr_names, ", ")); + mesh = lagrange::unify_index_buffer(mesh, ids); + } + + matcher.usages = lagrange::AttributeUsage::UV; + ids = lagrange::find_matching_attributes(mesh, matcher); + for (auto id : ids) { + lagrange::logger().info( + "Converting indexed UV attribute to corner attribute: {}", + mesh.get_attribute_name(id)); + map_attribute_in_place(mesh, id, lagrange::AttributeElement::Corner); + } +} + +/// +/// Repack UV charts by splitting overlapping regions into separate packing layers. +/// +void repack_overlapping_charts( + SurfaceMesh& mesh, + lagrange::AttributeId overlap_coloring_id, + const std::string& uv_attribute_name) +{ + lagrange::logger().info("Repacking UV charts using overlap coloring..."); + + // 1. Compute connectivity-based UV charts + lagrange::UVChartOptions chart_options; + chart_options.uv_attribute_name = uv_attribute_name; + chart_options.output_attribute_name = "@chart_id"; + size_t num_charts = lagrange::compute_uv_charts(mesh, chart_options); + + // 2. Combine chart ID and overlap color into a split chart attribute: + // split_id = chart_id * num_colors + overlap_color + auto chart_ids = + lagrange::attribute_vector_view(mesh, mesh.get_attribute_id("@chart_id")); + auto color_ids = lagrange::attribute_vector_view(mesh, overlap_coloring_id); + + uint32_t num_colors = color_ids.maxCoeff() + 1; + auto num_facets = mesh.get_num_facets(); + std::vector split_ids(num_facets); + for (uint32_t f = 0; f < num_facets; ++f) { + split_ids[f] = static_cast(chart_ids[f] * num_colors + color_ids[f]); + } + mesh.template create_attribute( + "@split_chart_id", + lagrange::AttributeElement::Facet, + lagrange::AttributeUsage::Scalar, + 1, + split_ids); + + // 3. Split UV indices so that facets in different split charts don't share UV vertices + lagrange::AttributeMatcher uv_matcher; + uv_matcher.usages = lagrange::AttributeUsage::UV; + uv_matcher.element_types = lagrange::AttributeElement::Indexed; + auto uv_attr_id = lagrange::find_matching_attribute(mesh, uv_matcher); + la_runtime_assert(uv_attr_id.has_value(), "No indexed UV attribute found."); + + auto& uv_attr = mesh.template ref_indexed_attribute(*uv_attr_id); + auto old_values = lagrange::matrix_view(uv_attr.values()); + auto uv_indices = lagrange::vector_ref(uv_attr.indices()); + + std::unordered_map< + std::pair, + uint32_t, + lagrange::OrderedPairHash>> + remap; + std::vector new_values; + new_values.reserve(old_values.rows()); + + for (uint32_t f = 0; f < num_facets; ++f) { + auto c_begin = mesh.get_facet_corner_begin(f); + auto c_end = mesh.get_facet_corner_end(f); + for (auto c = c_begin; c < c_end; ++c) { + auto key = std::make_pair(static_cast(uv_indices[c]), split_ids[f]); + auto [it, inserted] = remap.emplace(key, static_cast(new_values.size())); + if (inserted) { + new_values.push_back(old_values.row(key.first)); + } + uv_indices[c] = it->second; + } + } + + uv_attr.values().resize_elements(new_values.size()); + auto new_val_ref = lagrange::matrix_ref(uv_attr.values()); + for (size_t i = 0; i < new_values.size(); ++i) { + new_val_ref.row(i) = new_values[i]; + } + + lagrange::logger().info( + "Split UV vertices: {} -> {} (charts={}, colors={}).", + old_values.rows(), + new_values.size(), + num_charts, + num_colors); + + // 4. Repack using the split chart attribute + lagrange::packing::RepackOptions repack_options; + repack_options.chart_attribute_name = "@split_chart_id"; + lagrange::packing::repack_uv_charts(mesh, repack_options); +} + +// ============================================================================ +// GUI state and callbacks +// ============================================================================ + +struct DemoState +{ + // Original mesh with indexed UVs + SurfaceMesh mesh_original; + // Prepared mesh for polyscope display (unified indices, corner UVs) + SurfaceMesh mesh_display; + + // Repacked versions (populated on button click) + SurfaceMesh repacked_mesh_original; + SurfaceMesh repacked_mesh_display; + + // Overlap coloring + lagrange::AttributeId coloring_id = lagrange::invalid_attribute_id(); + std::string uv_attribute_name; + std::string mesh_name; + std::string output_path; + + // UI state + bool uv_view = false; + bool repacked = false; + + // Saved view state for each view mode + struct SavedView + { + std::string camera_json; + std::tuple bounding_box = {glm::vec3{0.f}, glm::vec3{0.f}}; + float length_scale = 0.f; + bool valid = false; + }; + SavedView saved_3d_view; + SavedView saved_uv_view; + + bool has_coloring() const { return coloring_id != lagrange::invalid_attribute_id(); } +}; + +/// Register a mesh as a 2D UV polyscope structure, with optional overlap coloring. +void register_uv_mesh( + const std::string& name, + SurfaceMesh& mesh, + lagrange::AttributeId coloring_id, + double x_offset = 0.0, + const std::string& uv_attribute_name = "") +{ + lagrange::UVMeshOptions uv_opts; + uv_opts.uv_attribute_name = uv_attribute_name; + auto uv = lagrange::uv_mesh_view(mesh, uv_opts); + auto* ps = + static_cast<::polyscope::SurfaceMesh*>(lagrange::polyscope::register_structure(name, uv)); + if (x_offset != 0.0) { + glm::mat4 T = glm::translate(glm::mat4(1.0f), glm::vec3(x_offset, 0, 0)); + ps->setTransform(T); + } + if (coloring_id != lagrange::invalid_attribute_id()) { + auto& ca = mesh.template get_attribute(coloring_id); + lagrange::polyscope::register_attribute(*ps, "uv_overlap_color", ca); + } +} + +/// Remove all polyscope structures and re-register them for the current view mode. +void register_view(DemoState& state) +{ + // Horizontal offset and scene extent for side-by-side UV layout display + constexpr float k_uv_mesh_spacing = 1.2f; + constexpr float k_uv_scene_extent_single = 1.f; + constexpr float k_uv_scene_extent_dual = k_uv_mesh_spacing + k_uv_scene_extent_single; + + polyscope::removeAllStructures(); + + auto repacked_name = state.mesh_name + "_repacked"; + + if (state.uv_view) { + register_uv_mesh( + state.mesh_name, + state.mesh_original, + state.coloring_id, + 0.0, + state.uv_attribute_name); + if (state.repacked) { + register_uv_mesh( + repacked_name, + state.repacked_mesh_original, + state.coloring_id, + k_uv_mesh_spacing, + state.uv_attribute_name); + } + + polyscope::options::automaticallyComputeSceneExtents = false; + float x_extent = state.repacked ? k_uv_scene_extent_dual : k_uv_scene_extent_single; + polyscope::state::boundingBox = + std::make_tuple(glm::vec3{0.f, 0.f, 0.f}, glm::vec3{x_extent, 1.f, 0.f}); + polyscope::state::lengthScale = x_extent; + + polyscope::view::setUpDir(polyscope::UpDir::YUp); + polyscope::view::setNavigateStyle(polyscope::NavigateStyle::Planar); + } else { + auto* ps3d = lagrange::polyscope::register_structure(state.mesh_name, state.mesh_display); + ps3d->setTransform(glm::mat4(1.0f)); + if (state.repacked) { + auto* ps3d_repacked = + lagrange::polyscope::register_structure(repacked_name, state.repacked_mesh_display); + ps3d_repacked->setTransform(glm::mat4(1.0f)); + } + + polyscope::options::automaticallyComputeSceneExtents = true; + polyscope::view::setNavigateStyle(polyscope::NavigateStyle::Turntable); + } + + polyscope::view::resetCameraToHomeView(); +} + +/// Toggle between 3D and UV view, saving/restoring camera and scene bounds. +void toggle_uv_view(DemoState& state) +{ + // Save state for the view we're leaving + auto& leaving = state.uv_view ? state.saved_uv_view : state.saved_3d_view; + leaving.camera_json = polyscope::view::getViewAsJson(); + leaving.bounding_box = polyscope::state::boundingBox; + leaving.length_scale = polyscope::state::lengthScale; + leaving.valid = true; + + state.uv_view = !state.uv_view; + register_view(state); + + // Restore state for the view we're entering + auto& entering = state.uv_view ? state.saved_uv_view : state.saved_3d_view; + if (entering.valid) { + polyscope::state::boundingBox = entering.bounding_box; + polyscope::state::lengthScale = entering.length_scale; + polyscope::view::setViewFromJson(entering.camera_json, false); + } +} + +/// ImGui callback for the polyscope UI. +void user_callback(DemoState& state) +{ + ImGuiIO& io = ImGui::GetIO(); + if (!io.WantCaptureKeyboard && ImGui::IsKeyPressed(ImGuiKey_U)) { + toggle_uv_view(state); + } + + bool prev_uv_view = state.uv_view; + ImGui::Checkbox("Show UV Layout (U)", &state.uv_view); + if (state.uv_view != prev_uv_view) { + // Undo checkbox toggle, then let toggle_uv_view handle state consistently + state.uv_view = prev_uv_view; + toggle_uv_view(state); + } + + if (state.has_coloring()) { + ImGui::BeginDisabled(state.repacked); + if (ImGui::Button("Repack UV Charts")) { + state.repacked_mesh_original = state.mesh_original; + repack_overlapping_charts( + state.repacked_mesh_original, + state.coloring_id, + state.uv_attribute_name); + state.repacked_mesh_display = state.repacked_mesh_original; + prepare_mesh_for_display(state.repacked_mesh_display); + state.repacked = true; + + auto camera_json = polyscope::view::getViewAsJson(); + register_view(state); + polyscope::view::setViewFromJson(camera_json, false); + } + ImGui::EndDisabled(); + + ImGui::BeginDisabled(!state.repacked); + if (ImGui::Button("Export Repacked Mesh")) { + lagrange::logger().info("Saving repacked mesh: {}", state.output_path); + lagrange::io::save_mesh(state.output_path, state.repacked_mesh_original); + } + ImGui::EndDisabled(); + } +} + +// ============================================================================ +// Main +// ============================================================================ + +int main(int argc, char** argv) +{ + struct + { + std::string input; + std::string output = "output.obj"; + std::string method = "hybrid"; + std::string uv_attribute_name; + bool gui = false; + bool uv_view = false; + bool repack = false; + int log_level = 2; + } args; + + CLI::App app{argv[0]}; + app.option_defaults()->always_capture_default(); + app.add_option("input", args.input, "Input mesh.")->required()->check(CLI::ExistingFile); + app.add_option("output", args.output, "Output mesh."); + app.add_option("-m,--method", args.method, "Candidate detection method: sweep, bvh, hybrid.") + ->check(CLI::IsMember({"sweep", "bvh", "hybrid"})); + app.add_option("--uv", args.uv_attribute_name, "UV attribute name (default: first UV found)."); + app.add_flag("--gui", args.gui, "Launch the Polyscope GUI to visualize results."); + app.add_flag("--uv-view", args.uv_view, "Start in the 2D UV layout view (implies --gui)."); + app.add_flag("--repack", args.repack, "Repack UV charts per overlap color layer."); + app.add_option("-l,--level", args.log_level, "Log level (0 = most verbose, 6 = off)."); + CLI11_PARSE(app, argc, argv) + + if (args.uv_view) { + args.gui = true; + } + + args.log_level = std::max(0, std::min(6, args.log_level)); + spdlog::set_level(static_cast(args.log_level)); + + // Load and triangulate + lagrange::logger().info("Loading input mesh: {}", args.input); + auto mesh = lagrange::io::load_mesh(args.input); + lagrange::triangulate_polygonal_facets(mesh); + lagrange::logger().info( + "Mesh has {} vertices and {} facets.", + mesh.get_num_vertices(), + mesh.get_num_facets()); + + // Compute UV overlap + lagrange::bvh::UVOverlapOptions options; + options.uv_attribute_name = args.uv_attribute_name; + options.compute_overlap_coloring = true; + if (args.method == "bvh") { + options.method = lagrange::bvh::UVOverlapMethod::BVH; + } else if (args.method == "hybrid") { + options.method = lagrange::bvh::UVOverlapMethod::Hybrid; + } else { + options.method = lagrange::bvh::UVOverlapMethod::SweepAndPrune; + } + + lagrange::logger().info("Computing UV overlap (method={})...", args.method); + lagrange::VerboseTimer timer("compute_uv_overlap", nullptr, spdlog::level::info); + timer.tick(); + auto result = lagrange::bvh::compute_uv_overlap(mesh, options); + timer.tock(); + + if (result.has_overlap) { + if (result.overlap_area.has_value()) { + lagrange::logger().info("Total overlap area: {}", result.overlap_area.value()); + } else { + lagrange::logger().info("UV overlap detected."); + } + } else { + lagrange::logger().info("No UV overlap detected."); + } + + // GUI or CLI output + if (args.gui) { + polyscope::options::configureImGuiStyleCallback = []() { + ImGui::Spectrum::StyleColorsSpectrum(); + ImGui::Spectrum::LoadFont(); + }; + polyscope::init(); + + DemoState state; + state.mesh_original = std::move(mesh); + state.mesh_display = state.mesh_original; + prepare_mesh_for_display(state.mesh_display); + state.coloring_id = result.overlap_coloring_id; + state.uv_attribute_name = args.uv_attribute_name; + state.mesh_name = lagrange::fs::path(args.input).stem().string(); + state.output_path = args.output; + state.uv_view = args.uv_view; + + register_view(state); + polyscope::state::userCallback = [&]() { user_callback(state); }; + polyscope::show(); + } else { + if (args.repack && result.overlap_coloring_id != lagrange::invalid_attribute_id()) { + repack_overlapping_charts(mesh, result.overlap_coloring_id, args.uv_attribute_name); + } + lagrange::logger().info("Saving result: {}", args.output); + lagrange::io::save_mesh(args.output, mesh); + } + + return 0; +} diff --git a/modules/bvh/include/lagrange/bvh/compute_uv_overlap.h b/modules/bvh/include/lagrange/bvh/compute_uv_overlap.h new file mode 100644 index 00000000..ec20f389 --- /dev/null +++ b/modules/bvh/include/lagrange/bvh/compute_uv_overlap.h @@ -0,0 +1,161 @@ +/* + * Copyright 2026 Adobe. All rights reserved. + * This file is licensed to you under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under + * the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS + * OF ANY KIND, either express or implied. See the License for the specific language + * governing permissions and limitations under the License. + */ +#pragma once + +#include +#include +#include + +#include +#include +#include +#include + +namespace lagrange::bvh { + +/// @addtogroup module-bvh +/// @{ + +/// +/// Algorithm used to find candidate bounding-box pairs in compute_uv_overlap. +/// +enum class UVOverlapMethod { + /// + /// Zomorodian-Edelsbrunner sweep-and-prune. + /// + /// Sorts triangle bounding-box x-intervals and sweeps a 1-D active set to enumerate + /// all overlapping pairs in O((n + k) log n) time, where k is the output size. + /// + SweepAndPrune, + + /// + /// AABB tree per-triangle query. + /// + /// Builds an @ref AABB tree on all triangle bounding boxes, then queries + /// each box against the tree in parallel. Useful for benchmarking and cross-validation + /// against the sweep-and-prune path. + /// + BVH, + + /// + /// Zomorodian-Edelsbrunner HYBRID algorithm (recursive divide-and-conquer). + /// + /// Based on the HYBRID procedure from Figure 5 of "Fast Software for Box + /// Intersections" (Zomorodian & Edelsbrunner, 2002), simplified for the 2-D + /// complete case. Recursively splits on the y-dimension median, handles spanning + /// intervals at each node with a one-dimensional OneWayScan, and falls through to + /// scanning for subsets below a cutoff size. See the implementation file for a + /// detailed description of the differences from the paper's algorithm. + /// + Hybrid, +}; + +/// +/// Options for compute_uv_overlap. +/// +struct UVOverlapOptions +{ + /// UV attribute name. Empty string = use the first compatible UV attribute found on the mesh. + /// Vertex, indexed, and corner attributes are all supported. + std::string uv_attribute_name; + + /// If true (default), compute the total overlap area via Sutherland-Hodgman clipping. + /// Set to false when only overlap detection or coloring is needed (faster). + bool compute_overlap_area = true; + + /// + /// If true, compute a per-facet integer attribute that assigns each triangle a color + /// (greedy graph coloring of the overlap graph). Color 0 = not overlapping with any + /// other triangle; colors >= 1 are assigned so that no two triangles with the same + /// color have a positive-area intersection. This allows splitting UV charts into + /// non-overlapping layers before repacking. + /// + bool compute_overlap_coloring = false; + + /// Name of the per-facet output integer attribute written when compute_overlap_coloring + /// is true. + std::string overlap_coloring_attribute_name = "@uv_overlap_color"; + + /// + /// If true, populate UVOverlapResult::overlapping_pairs with the sorted list of + /// (i, j) facet-index pairs that have a positive-area interior intersection. + /// + bool compute_overlapping_pairs = false; + + /// Candidate pair detection algorithm. + UVOverlapMethod method = UVOverlapMethod::Hybrid; +}; + +/// +/// Result of compute_uv_overlap. +/// +/// @tparam Scalar Mesh scalar type. +/// @tparam Index Mesh index type. +/// +template +struct UVOverlapResult +{ + /// True when at least one pair of UV triangles has a positive-area interior intersection. + /// This is the canonical overlap indicator and is valid regardless of compute_overlap_area. + bool has_overlap = false; + + /// + /// Sum of intersection areas over all overlapping triangle pairs. std::nullopt when + /// UVOverlapOptions::compute_overlap_area is false or when has_overlap is false. + /// Check has_overlap (not this field) to test for overlap existence. + /// + std::optional overlap_area; + + /// + /// Sorted list of (i, j) facet-index pairs (i < j) that have a positive-area interior + /// intersection. Empty when UVOverlapOptions::compute_overlapping_pairs is false or + /// when there are no overlapping triangles. + /// + std::vector> overlapping_pairs; + + /// + /// AttributeId of the per-facet integer coloring attribute written to the mesh. + /// Equals invalid_attribute_id() when UVOverlapOptions::compute_overlap_coloring is + /// false or when there are no overlapping triangles. + /// + AttributeId overlap_coloring_id = invalid_attribute_id(); +}; + +/// +/// Compute pairwise UV triangle overlap. +/// +/// For every pair of UV-space triangles whose 2-D axis-aligned bounding boxes intersect +/// (detected via the Zomorodian-Edelsbrunner sweep-and-prune or a BVH), an exact +/// separating-axis test using orient2D predicates is applied to confirm a genuine +/// interior intersection before computing the intersection area using +/// Sutherland-Hodgman polygon clipping. +/// +/// Triangles that share only a boundary edge or a single vertex are never counted as +/// overlapping — the exact orient2D predicate handles boundary contacts correctly. +/// +/// @param[in,out] mesh Input triangle mesh with a UV attribute. +/// @param[in] options Options controlling algorithm and output. +/// +/// @return @ref UVOverlapResult containing the optional total overlap area and the +/// optional AttributeId of the per-facet coloring attribute. +/// +/// @tparam Scalar Mesh scalar type. +/// @tparam Index Mesh index type. +/// +template +LA_BVH_API UVOverlapResult compute_uv_overlap( + SurfaceMesh& mesh, + const UVOverlapOptions& options = {}); + +/// @} + +} // namespace lagrange::bvh diff --git a/modules/bvh/python/src/bvh.cpp b/modules/bvh/python/src/bvh.cpp index 87e83d17..37065f40 100644 --- a/modules/bvh/python/src/bvh.cpp +++ b/modules/bvh/python/src/bvh.cpp @@ -13,6 +13,7 @@ #include #include #include +#include #include #include #include @@ -469,6 +470,94 @@ Both meshes must have the same spatial dimension and must be triangle meshes. :param mesh: Input mesh to process. :return: A new mesh with interior shells removed.)"); + + // UV overlap enum + nb::enum_(m, "UVOverlapMethod") + .value( + "SweepAndPrune", + bvh::UVOverlapMethod::SweepAndPrune, + "Zomorodian-Edelsbrunner sweep-and-prune.") + .value("BVH", bvh::UVOverlapMethod::BVH, "AABB tree per-triangle query.") + .value( + "Hybrid", + bvh::UVOverlapMethod::Hybrid, + "Zomorodian-Edelsbrunner HYBRID algorithm (recursive divide-and-conquer)."); + + // UV overlap result (Python NamedTuple) + // Note: No direct nanobind support for NamedTuple yet, see: + // https://github.com/wjakob/nanobind/discussions/1279 + nb::object typing = nb::module_::import_("typing"); + nb::object builtins = nb::module_::import_("builtins"); + nb::object UVOverlapResult = typing.attr("NamedTuple")( + "UVOverlapResult", + nb::make_tuple( + nb::make_tuple("has_overlap", builtins.attr("bool")), + nb::make_tuple("overlap_area", typing.attr("Optional")[builtins.attr("float")]), + nb::make_tuple("overlapping_pairs", builtins.attr("list")), + nb::make_tuple("overlap_coloring_id", builtins.attr("int")))); + m.attr("UVOverlapResult") = UVOverlapResult; + + // compute_uv_overlap function + m.def( + "compute_uv_overlap", + [UVOverlapResult]( + MeshType& mesh, + std::string uv_attribute_name, + bool compute_overlap_area, + bool compute_overlap_coloring, + std::string overlap_coloring_attribute_name, + bool compute_overlapping_pairs, + bvh::UVOverlapMethod method) -> nb::object { + bvh::UVOverlapOptions opts; + opts.uv_attribute_name = std::move(uv_attribute_name); + opts.compute_overlap_area = compute_overlap_area; + opts.compute_overlap_coloring = compute_overlap_coloring; + opts.overlap_coloring_attribute_name = std::move(overlap_coloring_attribute_name); + opts.compute_overlapping_pairs = compute_overlapping_pairs; + opts.method = method; + auto result = bvh::compute_uv_overlap(mesh, opts); + + nb::object area = result.overlap_area.has_value() + ? nb::cast(result.overlap_area.value()) + : nb::none(); + + nb::list pairs; + for (auto& [i, j] : result.overlapping_pairs) { + pairs.append(nb::make_tuple(i, j)); + } + + return UVOverlapResult( + nb::cast(result.has_overlap), + area, + pairs, + nb::cast(result.overlap_coloring_id)); + }, + "mesh"_a, + "uv_attribute_name"_a = bvh::UVOverlapOptions{}.uv_attribute_name, + "compute_overlap_area"_a = bvh::UVOverlapOptions{}.compute_overlap_area, + "compute_overlap_coloring"_a = bvh::UVOverlapOptions{}.compute_overlap_coloring, + "overlap_coloring_attribute_name"_a = + bvh::UVOverlapOptions{}.overlap_coloring_attribute_name, + "compute_overlapping_pairs"_a = bvh::UVOverlapOptions{}.compute_overlapping_pairs, + "method"_a = bvh::UVOverlapOptions{}.method, + R"(Compute pairwise UV triangle overlap. + +For every pair of UV-space triangles whose 2-D bounding boxes intersect, an exact +separating-axis test using orient2D predicates confirms a genuine interior intersection +before computing the intersection area via Sutherland-Hodgman clipping. + +Triangles that share only a boundary edge or a single vertex are never counted as +overlapping. + +:param mesh: Input triangle mesh with a UV attribute. +:param uv_attribute_name: UV attribute name. Empty string uses the first UV attribute found. Vertex, indexed and corner attributes are supported. +:param compute_overlap_area: If True, compute the total overlap area (default: True). +:param compute_overlap_coloring: If True, compute a per-facet coloring attribute (default: False). +:param overlap_coloring_attribute_name: Name of the coloring attribute (default: "@uv_overlap_color"). +:param compute_overlapping_pairs: If True, return the list of overlapping pairs (default: False). +:param method: Candidate detection algorithm (default: UVOverlapMethod.Hybrid). + +:return: UVOverlapResult containing overlap detection results.)"); } } // namespace lagrange::python diff --git a/modules/bvh/python/tests/asset.py b/modules/bvh/python/tests/asset.py deleted file mode 100644 index c5dfcf9c..00000000 --- a/modules/bvh/python/tests/asset.py +++ /dev/null @@ -1,76 +0,0 @@ -# -# Copyright 2025 Adobe. All rights reserved. -# This file is licensed to you under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. You may obtain a copy -# of the License at http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software distributed under -# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS -# OF ANY KIND, either express or implied. See the License for the specific language -# governing permissions and limitations under the License. -# -import lagrange -import numpy as np -import pytest - - -@pytest.fixture -def cube(): - vertices = np.array( - [ - [0, 0, 0], - [1, 0, 0], - [1, 1, 0], - [0, 1, 0], - [0, 0, 1], - [1, 0, 1], - [1, 1, 1], - [0, 1, 1], - ], - dtype=float, - ) - facets = np.array( - [ - [0, 3, 2], - [2, 1, 0], - [4, 5, 6], - [6, 7, 4], - [1, 2, 6], - [6, 5, 1], - [4, 7, 3], - [3, 0, 4], - [2, 3, 7], - [7, 6, 2], - [0, 1, 5], - [5, 4, 0], - ], - dtype=np.uint32, - ) - mesh = lagrange.SurfaceMesh() - mesh.vertices = vertices - mesh.facets = facets - return mesh - - -@pytest.fixture -def square(): - vertices = np.array( - [ - [0, 0], - [1, 0], - [1, 1], - [0, 1], - ], - dtype=float, - ) - facets = np.array( - [ - [0, 1, 2], - [2, 3, 0], - ], - dtype=np.uint32, - ) - mesh = lagrange.SurfaceMesh(2) - mesh.vertices = vertices - mesh.facets = facets - return mesh diff --git a/modules/filtering/python/tests/assets.py b/modules/bvh/python/tests/conftest.py similarity index 80% rename from modules/filtering/python/tests/assets.py rename to modules/bvh/python/tests/conftest.py index f1666094..c03be3da 100644 --- a/modules/filtering/python/tests/assets.py +++ b/modules/bvh/python/tests/conftest.py @@ -10,7 +10,6 @@ # governing permissions and limitations under the License. # import lagrange - import numpy as np import pytest @@ -32,12 +31,18 @@ def cube(): ) facets = np.array( [ - [0, 3, 2, 1], - [4, 5, 6, 7], - [1, 2, 6, 5], - [4, 7, 3, 0], - [2, 3, 7, 6], - [0, 1, 5, 4], + [0, 3, 2], + [2, 1, 0], + [4, 5, 6], + [6, 7, 4], + [1, 2, 6], + [6, 5, 1], + [4, 7, 3], + [3, 0, 4], + [2, 3, 7], + [7, 6, 2], + [0, 1, 5], + [5, 4, 0], ], dtype=np.uint32, ) diff --git a/modules/bvh/python/tests/test_EdgeAABBTree.py b/modules/bvh/python/tests/test_EdgeAABBTree.py index ccbaa53d..0e2107bb 100644 --- a/modules/bvh/python/tests/test_EdgeAABBTree.py +++ b/modules/bvh/python/tests/test_EdgeAABBTree.py @@ -13,8 +13,6 @@ import numpy as np import pytest -from .asset import cube, square # noqa: F401 - class TestEdgeAABBTree: def test_closest_point_3D(self, cube): diff --git a/modules/bvh/python/tests/test_TriangleAABBTree.py b/modules/bvh/python/tests/test_TriangleAABBTree.py index 14510b27..94d7f97a 100644 --- a/modules/bvh/python/tests/test_TriangleAABBTree.py +++ b/modules/bvh/python/tests/test_TriangleAABBTree.py @@ -13,8 +13,6 @@ import numpy as np import pytest -from .asset import cube, square # noqa: F401 - class TestTriangleAABBTree: def test_elements_in_radius_3D(self, cube): diff --git a/modules/bvh/python/tests/test_compute_mesh_distances.py b/modules/bvh/python/tests/test_compute_mesh_distances.py index 06a81281..1b8939a0 100644 --- a/modules/bvh/python/tests/test_compute_mesh_distances.py +++ b/modules/bvh/python/tests/test_compute_mesh_distances.py @@ -13,14 +13,10 @@ import numpy as np import pytest -from .asset import cube # noqa: F401 - # --------------------------------------------------------------------------- # Helpers # --------------------------------------------------------------------------- - - def make_parallel_squares(d: float): """Two axis-aligned unit squares at z=0 and z=d, each made of 2 triangles. Every vertex of sq_a is directly below a vertex of sq_b, so the diff --git a/modules/bvh/python/tests/test_compute_uv_overlap.py b/modules/bvh/python/tests/test_compute_uv_overlap.py new file mode 100644 index 00000000..77edacaf --- /dev/null +++ b/modules/bvh/python/tests/test_compute_uv_overlap.py @@ -0,0 +1,230 @@ +# +# Copyright 2026 Adobe. All rights reserved. +# This file is licensed to you under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. You may obtain a copy +# of the License at http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software distributed under +# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS +# OF ANY KIND, either express or implied. See the License for the specific language +# governing permissions and limitations under the License. +# +import lagrange +import numpy as np +import pytest + + +# --------------------------------------------------------------------------- +# Helpers +# --------------------------------------------------------------------------- +def make_uv_mesh(uv_coords, faces): + """Build a triangle mesh with per-vertex UV attribute. + + The 3-D positions are set to (u, v, 0) for convenience. + """ + mesh = lagrange.SurfaceMesh() + uv = np.array(uv_coords, dtype=float) + mesh.vertices = np.column_stack([uv, np.zeros(len(uv))]) + mesh.facets = np.array(faces, dtype=np.uint32) + mesh.create_attribute( + "@uv", + element=lagrange.AttributeElement.Vertex, + usage=lagrange.AttributeUsage.UV, + num_channels=2, + initial_values=uv, + ) + return mesh + + +# --------------------------------------------------------------------------- +# Basic overlap detection +# --------------------------------------------------------------------------- +class TestNoOverlap: + def test_disjoint_triangles(self): + mesh = make_uv_mesh( + [[0, 0], [1, 0], [0, 1], [2, 0], [3, 0], [2, 1]], + [[0, 1, 2], [3, 4, 5]], + ) + result = lagrange.bvh.compute_uv_overlap(mesh) + assert not result.has_overlap + assert result.overlap_area is None + + def test_adjacent_shared_edge(self): + mesh = make_uv_mesh( + [[0, 0], [1, 0], [0, 1], [1, 1]], + [[0, 1, 2], [1, 3, 2]], + ) + result = lagrange.bvh.compute_uv_overlap(mesh) + assert not result.has_overlap + assert result.overlap_area is None + + def test_adjacent_shared_vertex(self): + mesh = make_uv_mesh( + [[0, 0], [1, 0], [0, 1], [1, 0], [2, 0], [1, 1]], + [[0, 1, 2], [3, 4, 5]], + ) + result = lagrange.bvh.compute_uv_overlap(mesh) + assert not result.has_overlap + + +class TestOverlap: + def test_identical_triangles(self): + mesh = make_uv_mesh( + [[0, 0], [1, 0], [0, 1], [0, 0], [1, 0], [0, 1]], + [[0, 1, 2], [3, 4, 5]], + ) + result = lagrange.bvh.compute_uv_overlap(mesh) + assert result.has_overlap + assert result.overlap_area == pytest.approx(0.5, abs=1e-5) + + def test_partial_overlap_known_area(self): + # A: (0,0)-(1,0)-(0,1) B: (0.5,0)-(1.5,0)-(0.5,1) + # Intersection area = 0.125 + mesh = make_uv_mesh( + [[0, 0], [1, 0], [0, 1], [0.5, 0], [1.5, 0], [0.5, 1]], + [[0, 1, 2], [3, 4, 5]], + ) + result = lagrange.bvh.compute_uv_overlap(mesh) + assert result.has_overlap + assert result.overlap_area == pytest.approx(0.125, abs=1e-5) + + def test_cw_oriented_triangle(self): + # CW triangle overlaps identically with CCW triangle. + mesh = make_uv_mesh( + [[0, 0], [1, 0], [0, 1], [0, 0], [0, 1], [1, 0]], + [[0, 1, 2], [3, 4, 5]], + ) + result = lagrange.bvh.compute_uv_overlap(mesh) + assert result.has_overlap + assert result.overlap_area == pytest.approx(0.5, abs=1e-5) + + +# --------------------------------------------------------------------------- +# Edge cases +# --------------------------------------------------------------------------- +class TestEdgeCases: + def test_single_triangle(self): + mesh = make_uv_mesh([[0, 0], [1, 0], [0, 1]], [[0, 1, 2]]) + result = lagrange.bvh.compute_uv_overlap(mesh) + assert not result.has_overlap + assert result.overlap_area is None + + def test_three_triangles_one_pair_overlaps(self): + # A and C overlap, B is disjoint. + mesh = make_uv_mesh( + [ + [0, 0], + [1, 0], + [0, 1], # A + [5, 5], + [6, 5], + [5, 6], # B (far away) + [0, 0], + [1, 0], + [0, 1], + ], # C (identical to A) + [[0, 1, 2], [3, 4, 5], [6, 7, 8]], + ) + result = lagrange.bvh.compute_uv_overlap(mesh, compute_overlapping_pairs=True) + assert result.has_overlap + assert result.overlapping_pairs == [(0, 2)] + + +# --------------------------------------------------------------------------- +# Options / kwargs +# --------------------------------------------------------------------------- +class TestOptions: + def test_overlap_area_disabled(self): + mesh = make_uv_mesh( + [[0, 0], [1, 0], [0, 1], [0, 0], [1, 0], [0, 1]], + [[0, 1, 2], [3, 4, 5]], + ) + result = lagrange.bvh.compute_uv_overlap(mesh, compute_overlap_area=False) + assert result.has_overlap + assert result.overlap_area is None + + def test_overlapping_pairs(self): + mesh = make_uv_mesh( + [[0, 0], [1, 0], [0, 1], [0, 0], [1, 0], [0, 1]], + [[0, 1, 2], [3, 4, 5]], + ) + result = lagrange.bvh.compute_uv_overlap(mesh, compute_overlapping_pairs=True) + assert result.overlapping_pairs == [(0, 1)] + + def test_overlapping_pairs_empty_when_disabled(self): + mesh = make_uv_mesh( + [[0, 0], [1, 0], [0, 1], [0, 0], [1, 0], [0, 1]], + [[0, 1, 2], [3, 4, 5]], + ) + result = lagrange.bvh.compute_uv_overlap(mesh, compute_overlapping_pairs=False) + assert result.overlapping_pairs == [] + + def test_coloring(self): + mesh = make_uv_mesh( + [[0, 0], [1, 0], [0, 1], [0, 0], [1, 0], [0, 1]], + [[0, 1, 2], [3, 4, 5]], + ) + result = lagrange.bvh.compute_uv_overlap(mesh, compute_overlap_coloring=True) + assert result.has_overlap + assert mesh.has_attribute("@uv_overlap_color") + + def test_coloring_not_created_when_disabled(self): + mesh = make_uv_mesh( + [[0, 0], [1, 0], [0, 1], [0, 0], [1, 0], [0, 1]], + [[0, 1, 2], [3, 4, 5]], + ) + lagrange.bvh.compute_uv_overlap(mesh, compute_overlap_coloring=False) + assert not mesh.has_attribute("@uv_overlap_color") + + +# --------------------------------------------------------------------------- +# All methods produce consistent results +# --------------------------------------------------------------------------- +class TestMethods: + @pytest.mark.parametrize("method", list(lagrange.bvh.UVOverlapMethod)) + def test_method_overlap_area(self, method): + mesh = make_uv_mesh( + [[0, 0], [1, 0], [0, 1], [0.5, 0], [1.5, 0], [0.5, 1]], + [[0, 1, 2], [3, 4, 5]], + ) + result = lagrange.bvh.compute_uv_overlap( + mesh, method=method, compute_overlapping_pairs=True + ) + assert result.has_overlap + assert result.overlap_area == pytest.approx(0.125, abs=1e-5) + assert result.overlapping_pairs == [(0, 1)] + + @pytest.mark.parametrize("method", list(lagrange.bvh.UVOverlapMethod)) + def test_method_no_overlap(self, method): + mesh = make_uv_mesh( + [[0, 0], [1, 0], [0, 1], [2, 0], [3, 0], [2, 1]], + [[0, 1, 2], [3, 4, 5]], + ) + result = lagrange.bvh.compute_uv_overlap(mesh, method=method) + assert not result.has_overlap + + +# --------------------------------------------------------------------------- +# NamedTuple behavior +# --------------------------------------------------------------------------- +class TestResultType: + def test_unpacking(self): + mesh = make_uv_mesh( + [[0, 0], [1, 0], [0, 1], [0, 0], [1, 0], [0, 1]], + [[0, 1, 2], [3, 4, 5]], + ) + has_overlap, area, pairs, coloring_id = lagrange.bvh.compute_uv_overlap(mesh) + assert has_overlap is True + assert area == pytest.approx(0.5, abs=1e-5) + + def test_asdict(self): + mesh = make_uv_mesh( + [[0, 0], [1, 0], [0, 1], [0, 0], [1, 0], [0, 1]], + [[0, 1, 2], [3, 4, 5]], + ) + result = lagrange.bvh.compute_uv_overlap(mesh) + d = result._asdict() + assert "has_overlap" in d + assert "overlap_area" in d + assert "overlapping_pairs" in d + assert "overlap_coloring_id" in d diff --git a/modules/bvh/python/tests/test_remove_interior_shells.py b/modules/bvh/python/tests/test_remove_interior_shells.py index b7cc50b7..9460bb2e 100644 --- a/modules/bvh/python/tests/test_remove_interior_shells.py +++ b/modules/bvh/python/tests/test_remove_interior_shells.py @@ -11,8 +11,6 @@ # import lagrange -from .asset import cube # noqa: F401 - class TestRemoveInteriorShells: def test_remove_interior_shells(self, cube): diff --git a/modules/bvh/src/compute_uv_overlap.cpp b/modules/bvh/src/compute_uv_overlap.cpp new file mode 100644 index 00000000..0b401cc9 --- /dev/null +++ b/modules/bvh/src/compute_uv_overlap.cpp @@ -0,0 +1,976 @@ +/* + * Copyright 2026 Adobe. All rights reserved. + * This file is licensed to you under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under + * the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS + * OF ANY KIND, either express or implied. See the License for the specific language + * governing permissions and limitations under the License. + */ +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// clang-format off +#include +#include +#include +#include +#include +#include +// clang-format on + +#include +#include +#include +#include +#include +#include +#include +#include + +namespace lagrange::bvh { + +namespace { + +// --------------------------------------------------------------------------- +// TBB helper +// --------------------------------------------------------------------------- + +/// Flatten a tbb::enumerable_thread_specific> into a single vector. +template +std::vector flatten_thread_local(tbb::enumerable_thread_specific>& tls) +{ + size_t total = 0; + for (auto& v : tls) total += v.size(); + std::vector result; + result.reserve(total); + for (auto& v : tls) { + result.insert(result.end(), v.begin(), v.end()); + } + return result; +} + +// --------------------------------------------------------------------------- +// 2-D geometry types +// --------------------------------------------------------------------------- + +template +using Vec2 = Eigen::Matrix; + +/// A small convex polygon with compile-time max vertex count (no heap allocation). +template +struct SmallPoly2D +{ + std::array, MaxVerts> verts; + int n = 0; +}; + +// Clipping a triangle (3 verts) against 3 half-planes adds at most 1 vertex per step: +// 3 → 4 → 5 → 6. Use 8 for safety. +template +using ClipPoly = SmallPoly2D; + +/// Convert a Vec2 to a double[2] array for orient2D predicates. +template +std::array to_double2(const Vec2& v) +{ + return {static_cast(v.x()), static_cast(v.y())}; +} + +// --------------------------------------------------------------------------- +// Sutherland-Hodgman clipping helpers +// --------------------------------------------------------------------------- + +/// +/// Clip @p poly_in against the CCW half-plane defined by edge (edge_a → edge_b). +/// "Inside" = orient2D(edge_a, edge_b, p) >= 0 (left side or on the line). +/// Intersection points are computed with floating-point arithmetic. +/// Results are placed in @p poly_out (which is overwritten). +/// +template +void clip_by_halfplane( + const Vec2& edge_a, + const Vec2& edge_b, + const ClipPoly& poly_in, + ClipPoly& poly_out, + const ExactPredicates& pred) +{ + poly_out.n = 0; + + auto da = to_double2(edge_a); + auto db = to_double2(edge_b); + + for (int k = 0; k < poly_in.n; ++k) { + const Vec2& curr = poly_in.verts[k]; + const Vec2& next = poly_in.verts[(k + 1) % poly_in.n]; + + auto dc = to_double2(curr); + auto dn = to_double2(next); + const short s_curr = pred.orient2D(da.data(), db.data(), dc.data()); + const short s_next = pred.orient2D(da.data(), db.data(), dn.data()); + + if (s_curr >= 0) { + // curr is inside or on the clipping line → include it + poly_out.verts[poly_out.n++] = curr; + } + + if ((s_curr > 0 && s_next < 0) || (s_curr < 0 && s_next > 0)) { + // The segment (curr → next) straddles the clipping line. + // Compute floating-point intersection via cross-product parameterisation. + const Scalar ex = edge_b.x() - edge_a.x(); + const Scalar ey = edge_b.y() - edge_a.y(); + const Scalar d_curr = ex * (curr.y() - edge_a.y()) - ey * (curr.x() - edge_a.x()); + const Scalar d_next = ex * (next.y() - edge_a.y()) - ey * (next.x() - edge_a.x()); + const Scalar t = d_curr / (d_curr - d_next); + poly_out.verts[poly_out.n++] = curr + t * (next - curr); + } + } +} + +// --------------------------------------------------------------------------- +// Exact separating-axis test +// --------------------------------------------------------------------------- + +/// +/// Return true iff triangles @p a and @p b have a positive-area interior intersection. +/// +/// Uses exact orient2D predicates for a separating-axis test (SAT). Two triangles are +/// rejected (no interior overlap) if there exists an edge of either triangle such that all +/// three vertices of the other triangle lie on the exterior-or-boundary side. This +/// correctly handles adjacent triangles that share only an edge or a vertex. +/// +template +bool triangles_have_interior_overlap( + const Vec2 a[3], + const Vec2 b[3], + const ExactPredicates& pred) +{ + // Determine orientations to normalize to CCW. + auto da0 = to_double2(a[0]); + auto da1 = to_double2(a[1]); + auto da2 = to_double2(a[2]); + const short orient_a = pred.orient2D(da0.data(), da1.data(), da2.data()); + if (orient_a == 0) return false; // degenerate A — no interior + + auto db0 = to_double2(b[0]); + auto db1 = to_double2(b[1]); + auto db2 = to_double2(b[2]); + const short orient_b = pred.orient2D(db0.data(), db1.data(), db2.data()); + if (orient_b == 0) return false; // degenerate B — no interior + + // Normalize to CCW: if CW, swap vertices 1 and 2 to reverse orientation. + const Vec2* pa[3] = {&a[0], &a[1], &a[2]}; + const Vec2* pb[3] = {&b[0], &b[1], &b[2]}; + if (orient_a < 0) std::swap(pa[1], pa[2]); + if (orient_b < 0) std::swap(pb[1], pb[2]); + + // For a CCW triangle, the interior of edge (p0 → p1) is the LEFT half-plane, + // i.e. orient2D(p0, p1, q) > 0. + // + // A separating axis exists if ALL three vertices of the opposite triangle lie on the + // exterior-or-boundary side: orient2D(p0, p1, q) <= 0 for all three. + // + // We test edges of A against B and edges of B against A. + + auto has_separating_edge = + [&pred](const Vec2* tri[3], const Vec2* opp[3]) -> bool { + for (int k = 0; k < 3; ++k) { + auto dp0 = to_double2(*tri[k]); + auto dp1 = to_double2(*tri[(k + 1) % 3]); + bool all_outside = true; + for (int j = 0; j < 3; ++j) { + auto dq = to_double2(*opp[j]); + if (pred.orient2D(dp0.data(), dp1.data(), dq.data()) > 0) { + all_outside = false; + break; + } + } + if (all_outside) return true; + } + return false; + }; + + if (has_separating_edge(pa, pb)) return false; + if (has_separating_edge(pb, pa)) return false; + return true; +} + +// --------------------------------------------------------------------------- +// Triangle-triangle intersection area (Sutherland-Hodgman) +// --------------------------------------------------------------------------- + +/// +/// Compute the area of the intersection of two UV-space triangles. +/// +/// Returns 0 if the triangles do not overlap or are degenerate. +/// Caller should first run triangles_have_interior_overlap() to skip most zero-area cases. +/// +template +Scalar triangle_intersection_area_2d( + const Vec2 a[3], + const Vec2 b[3], + const ExactPredicates& pred) +{ + // Normalize a to CCW so that the S-H half-planes are oriented correctly. + // Use exact orientation predicate for robustness with near-degenerate or + // large-coordinate triangles. + Vec2 a_ccw[3] = {a[0], a[1], a[2]}; + { + auto pa = to_double2(a_ccw[0]); + auto pb = to_double2(a_ccw[1]); + auto pc = to_double2(a_ccw[2]); + const short o = pred.orient2D(pa.data(), pb.data(), pc.data()); + if (o < 0) { + std::swap(a_ccw[1], a_ccw[2]); + } else if (o == 0) { + return Scalar(0); // degenerate triangle + } + } + + // Initialize polygon with the vertices of b. + ClipPoly poly, tmp; + poly.verts[0] = b[0]; + poly.verts[1] = b[1]; + poly.verts[2] = b[2]; + poly.n = 3; + + // Clip b against each CCW edge of a. + for (int k = 0; k < 3 && poly.n > 0; ++k) { + clip_by_halfplane(a_ccw[k], a_ccw[(k + 1) % 3], poly, tmp, pred); + std::swap(poly, tmp); + } + + if (poly.n < 3) return Scalar(0); + + // Shoelace formula for the area of the clipped polygon. + Scalar area2 = Scalar(0); + for (int i = 0; i < poly.n; ++i) { + const int j = (i + 1) % poly.n; + area2 += poly.verts[i].x() * poly.verts[j].y() - poly.verts[j].x() * poly.verts[i].y(); + } + return std::abs(area2) * Scalar(0.5); +} + +// --------------------------------------------------------------------------- +// Candidate pair processor (SAT + area with fast adjacent-triangle rejection) +// --------------------------------------------------------------------------- + +/// +/// Process a single candidate pair of triangles: check for overlap and compute intersection area. +/// +/// For adjacent triangles sharing exactly 2 UV vertex indices (a shared edge), a fast orient2D +/// check determines whether the opposite vertices lie on the same side of the shared edge. +/// The common case (opposite sides = properly unfolded) is rejected with a single orient2D call +/// per vertex, avoiding the full 6-edge SAT. Only when both opposite vertices are on the same +/// side (a UV fold/overlap) do we proceed to area computation. +/// +/// @param area_out If non-null, the intersection area is computed via Sutherland-Hodgman +/// clipping and written to *area_out. If null, only overlap detection is +/// performed (faster). +/// @return true if the pair has an interior overlap. +/// +template +bool process_candidate_pair( + Index i, + Index j, + const UVVerts& uv_verts, + const UVFaces& uv_facets, + const ExactPredicates& pred, + Scalar* area_out) +{ + if (area_out) *area_out = Scalar(0); + + // Count shared UV vertex indices (cheap index comparisons only). + int shared = 0; + int sa[2] = {}, sb[2] = {}; + for (int ka = 0; ka < 3; ++ka) { + for (int kb = 0; kb < 3; ++kb) { + if (uv_facets(i, ka) == uv_facets(j, kb)) { + if (shared < 2) { + sa[shared] = ka; + sb[shared] = kb; + } + ++shared; + break; // each vertex of i matches at most one vertex of j + } + } + } + + auto load_vert = [&](Index f, int k) -> Vec2 { + const Index v = uv_facets(f, k); + return Vec2(uv_verts(v, 0), uv_verts(v, 1)); + }; + + if (shared == 2) { + // Adjacent triangles sharing an edge. Check whether the opposite vertices lie on the + // same side of the shared edge line. If they are on opposite sides (the common case + // for properly unfolded UV maps), there is no interior overlap. + const int opp_a = 3 - sa[0] - sa[1]; + const int opp_b = 3 - sb[0] - sb[1]; + auto ds0 = to_double2(load_vert(i, sa[0])); + auto ds1 = to_double2(load_vert(i, sa[1])); + auto da = to_double2(load_vert(i, opp_a)); + auto db = to_double2(load_vert(j, opp_b)); + const short oa = pred.orient2D(ds0.data(), ds1.data(), da.data()); + const short ob = pred.orient2D(ds0.data(), ds1.data(), db.data()); + // Different sides or collinear → no interior overlap. + // Same side (oa * ob > 0) → UV fold, overlap confirmed, skip SAT. + // oa or ob == 0 means a degenerate triangle (opposite vertex on the shared edge), + // which has no interior and cannot produce positive-area overlap. + if (oa * ob <= 0) return false; + + // Overlap confirmed (shared==2 fast path). Compute area if requested. + if (area_out) { + Vec2 tri_a[3], tri_b[3]; + for (int k = 0; k < 3; ++k) { + tri_a[k] = load_vert(i, k); + tri_b[k] = load_vert(j, k); + } + *area_out = triangle_intersection_area_2d(tri_a, tri_b, pred); + } + return true; + } else { + Vec2 tri_a[3], tri_b[3]; + for (int k = 0; k < 3; ++k) { + tri_a[k] = load_vert(i, k); + tri_b[k] = load_vert(j, k); + } + if (!triangles_have_interior_overlap(tri_a, tri_b, pred)) return false; + + // Overlap confirmed. Compute area if requested (reuse already-loaded vertices). + if (area_out) { + *area_out = triangle_intersection_area_2d(tri_a, tri_b, pred); + } + return true; + } +} + +// --------------------------------------------------------------------------- +// Candidate pair finders +// --------------------------------------------------------------------------- + +/// +/// Zomorodian-Edelsbrunner OneWayScan for 2-D axis-aligned boxes (complete case). +/// +/// Sort boxes by x low endpoint. For each box i, scan forward through boxes j +/// (in sorted order, so x.lo[j] >= x.lo[i]) while x.lo[j] <= x.hi[i]. By +/// Property 2 of the ZE paper, two intervals intersect iff one contains the low +/// endpoint of the other; since j comes after i in sorted order, the only possible +/// containment is x.lo[j] ∈ [x.lo[i], x.hi[i]], so this one-directional scan +/// reports every intersecting x-interval pair exactly once. Then verify y-interval +/// overlap before emitting the candidate pair. +/// +/// Total work is O(n log n + k) where k = number of candidate pairs. +/// The outer loop is run in parallel via TBB. +/// +/// Reference: "Fast Software for Box Intersections", A. Zomorodian & H. Edelsbrunner, +/// Int. J. Computational Geometry & Applications 12(1-2), 2002. +/// +template +std::vector> sweep_and_prune_candidates( + const std::vector>& boxes) +{ + const Index n = static_cast(boxes.size()); + if (n < 2) return {}; + + // Sort box indices by x low endpoint. + std::vector order(n); + std::iota(order.begin(), order.end(), Index(0)); + tbb::parallel_sort(order.begin(), order.end(), [&](Index a, Index b) { + return boxes[a].min().x() < boxes[b].min().x(); + }); + + // Parallel outer loop: each ii is independent (read-only access to order/boxes). + using PairVec = std::vector>; + tbb::enumerable_thread_specific local_pairs; + + tbb::parallel_for(Index(0), n, [&](Index ii) { + const Index i = order[ii]; + const auto& bi = boxes[i]; + auto& lp = local_pairs.local(); + + // Scan forward: j comes after i in x.lo order. + // Stop as soon as x.lo[j] >= x.hi[i] (no further j can strictly overlap i in x). + for (Index jj = ii + 1; jj < n; ++jj) { + const Index j = order[jj]; + const auto& bj = boxes[j]; + if (bj.min().x() >= bi.max().x()) break; + // Strict overlap in both axes. + if (bj.max().x() > bi.min().x() && bj.max().y() > bi.min().y() && + bj.min().y() < bi.max().y()) { + lp.push_back(std::minmax(i, j)); + } + } + }); + + return flatten_thread_local(local_pairs); +} + +/// +/// Zomorodian-Edelsbrunner HYBRID algorithm for 2-D axis-aligned boxes (complete case). +/// +/// Reference paper: "Fast Software for Box Intersections", +/// A. Zomorodian & H. Edelsbrunner, Int. J. Comp. Geom. & Appl. 12(1-2), 2002. +/// https://pub.ista.ac.at/~edels/Papers/2002-01-FastBoxIntersection.pdf +/// +/// The paper's Figure 5 defines HYBRID(I, P, lo, hi, d) as a streamed segment tree that +/// maintains two separate sets — I (intervals) and P (points) — which can diverge through +/// recursion even for the complete case (I = P = S initially). At each node it: +/// (4) extracts I_m = intervals spanning the segment [lo, hi), processes I_m × P +/// and P × I_m at d−1; +/// (5-7) splits P by a median mi, partitions I − I_m by intersection with [lo, mi) +/// and [mi, hi), and recurses on both children at d. Note that I_l and I_r are +/// NOT disjoint: a box straddling mi (but not spanning [lo, hi)) appears in both. +/// +/// This implementation is a simplification for the 2-D complete case (d ∈ {0, 1}) that +/// differs from the paper in several ways: +/// +/// 1. Single-set recursion. Instead of tracking I and P separately, we maintain one +/// set S and always perform self-intersection. This avoids the bookkeeping of two +/// diverging pointer arrays at the cost of generality (bipartite / 3-D would need +/// the paper's two-set structure). +/// +/// 2. Eager spanning extraction. Where the paper gradually identifies spanning +/// intervals as [lo, hi) narrows (I_m is often ∅ at the root), and splits the +/// remaining intervals into overlapping I_l / I_r subsets, we eagerly partition +/// S into three disjoint subsets at every level: +/// S_m (spanning): y_lo ≤ mi ∧ y_hi > mi (all pairwise overlap in y) +/// S_l (below): y_lo < mi ∧ y_hi ≤ mi +/// S_r (above): y_lo ≥ mi ∧ ¬spanning +/// No explicit [lo, hi) segment bounds are tracked. S_l and S_r are +/// y-separated (no pair can overlap in y), so S_l × S_r is skipped entirely. +/// +/// 3. Decomposition of S_m pairs. The paper handles I_m × P recursively at d−1 +/// (covering both self-pairs and cross-pairs in one call). We split this into: +/// • S_m × S_m self-pairs → hybrid(s_m, d−1) +/// • S_m × (S_l ∪ S_r) → flat bipartite OneWayScan +/// For 2-D these are equivalent because d−1 = 0 immediately reaches the scan base +/// case; a 3-D extension would need the paper's recursive approach. +/// +/// 4. Imbalanced partition fallback. When max(|S_l|, |S_r|) > ¾ |S_l ∪ S_r|, the +/// recursion is replaced by a single flat OneWayScan on S_l ∪ S_r. The paper +/// always recurses on both children. +/// +/// d=0 (x dimension): base case — OneWayScan on x. +/// d=1 (y dimension): partition by median on y, recurse on left/right/spanning, +/// bipartite scan for spanning × non-spanning pairs. +/// +template +class HybridSolver +{ + using Box2 = Eigen::AlignedBox; + using PairVec = std::vector>; + + // Scan cutoff: switch to flat OneWayScan when a subset has ≤ this many boxes. + static constexpr Index k_cutoff = 512; + + const std::vector& m_boxes; + tbb::enumerable_thread_specific& m_tls_results; + +public: + HybridSolver( + const std::vector& boxes, + tbb::enumerable_thread_specific& tls_results) + : m_boxes(boxes) + , m_tls_results(tls_results) + {} + + // OneWayScan in x (complete case: S sorted by x.lo, emit each pair once). + void one_way_scan(span S) + { + const Index n = static_cast(S.size()); + tbb::parallel_for(Index(0), n, [&](Index ii) { + auto& lp = m_tls_results.local(); + const Index i = S[ii]; + const auto& bi = m_boxes[i]; + + for (Index jj = ii + 1; jj < n; ++jj) { + const Index j = S[jj]; + const auto& bj = m_boxes[j]; + if (bj.min().x() >= bi.max().x()) break; + if (bj.max().x() > bi.min().x() && bj.max().y() > bi.min().y() && + bj.min().y() < bi.max().y()) { + lp.push_back(std::minmax(i, j)); + } + } + }); + } + + // OneWayScan in x (bipartite case: I and P are disjoint sets, both sorted by x.lo). + // + // The caller invokes this twice with swapped arguments to cover both + // containment directions. To avoid duplicate reports when x.lo[i] == x.lo[j], + // the dual call should use strict=true. + void one_way_scan_bipartite(span I, span P, bool strict = false) + { + const Index nI = static_cast(I.size()); + const Index nP = static_cast(P.size()); + if (nI == 0 || nP == 0) return; + + auto scan_one = [&](Index ii, PairVec& lp) { + const Index i = I[ii]; + const auto& bi = m_boxes[i]; + const Scalar xlo_i = bi.min().x(); + + Index start; + if (strict) { + start = static_cast( + std::upper_bound( + P.begin(), + P.end(), + xlo_i, + [&](Scalar v, Index jj) { return v < m_boxes[jj].min().x(); }) - + P.begin()); + } else { + start = static_cast( + std::lower_bound( + P.begin(), + P.end(), + xlo_i, + [&](Index jj, Scalar v) { return m_boxes[jj].min().x() < v; }) - + P.begin()); + } + + for (Index jj = start; jj < nP; ++jj) { + const Index j = P[jj]; + const auto& bj = m_boxes[j]; + if (bj.min().x() >= bi.max().x()) break; + if (bj.max().x() > bi.min().x() && bj.max().y() > bi.min().y() && + bj.min().y() < bi.max().y()) { + lp.push_back(std::minmax(i, j)); + } + } + }; + + tbb::parallel_for(Index(0), nI, [&](Index ii) { scan_one(ii, m_tls_results.local()); }); + } + + // HYBRID recursive procedure (complete case only: self-intersection of a single set). + // + // Corresponds to HYBRID(I, P, lo, hi, d) in Figure 5 of the ZE paper, specialized + // for the complete case (I = P = S) in 2-D. See the class-level comment above for + // a detailed description of the differences from the paper. + // + // Operates in-place on the mutable span S. The input buffer is partitioned + // in-place at each level into [S_l | S_r | S_m] so that S_l ∪ S_r is contiguous + // and S_m is at the end. + void hybrid(span S, int d) + { + const Index n = static_cast(S.size()); + if (n < 2) return; + + // Paper steps 2-3: base cases (d = 0 → scan, small set → scan). + if (d == 0 || n <= k_cutoff) { + sort_by_xlo(S); + one_way_scan(S); + return; + } + + // Paper step 5: compute median of y_lo values. + const Scalar mi = median_ylo(S); + + // Paper steps 4, 6-7 combined: three-way in-place partition [S_l | S_r | S_m]. + // Unlike the paper (which uses non-disjoint I_l / I_r and a separate I_m that + // spans [lo, hi)), we eagerly extract S_m as boxes spanning the median point: + // S_m (spanning): ylo <= mi && yhi > mi (all pairwise overlap in y) + // S_l (below): ylo <= mi && yhi <= mi + // S_r (above): ylo > mi && yhi > mi (S_l × S_r never overlap in y) + // + // First pass: separate non-spanning from spanning → [non-spanning | S_m] + Index* mid_span = std::partition(S.data(), S.data() + S.size(), [&](Index i) { + return !(m_boxes[i].min().y() <= mi && m_boxes[i].max().y() > mi); + }); + // Second pass within non-spanning: separate S_l from S_r → [S_l | S_r | S_m] + Index* mid_lr = + std::partition(S.data(), mid_span, [&](Index i) { return m_boxes[i].min().y() <= mi; }); + + const Index n_l = static_cast(mid_lr - S.data()); + const Index n_r = static_cast(mid_span - mid_lr); + const Index n_m = static_cast(S.data() + S.size() - mid_span); + + span s_l(S.data(), n_l); + span s_r(mid_lr, n_r); + span s_m(mid_span, n_m); + span s_rest(S.data(), n_l + n_r); // S_l ∪ S_r is contiguous + + // Paper step 4 (partial): S_m self-pairs at d-1. + // Paper steps 6-7: recurse on left and right halves (y-separated, so S_l × S_r + // pairs never overlap in y and can be skipped). Must happen BEFORE the bipartite + // scan because sorting s_rest by x.lo would mix S_l and S_r elements, destroying + // the partition boundary. + // + // When the partition is very lopsided (one branch > 75%), fall through to a flat + // scan on all of S_rest instead of recursing on nearly-empty + nearly-full halves. + const Index max_branch = std::max(n_l, n_r); + if (max_branch > static_cast(s_rest.size()) * 3 / 4) { + tbb::parallel_invoke( + [&] { + if (n_m >= 2) hybrid(s_m, d - 1); + }, + [&] { + sort_by_xlo(s_rest); + one_way_scan(s_rest); + }); + } else { + tbb::parallel_invoke( + [&] { + if (n_m >= 2) hybrid(s_m, d - 1); + }, + [&] { hybrid(s_l, d); }, + [&] { hybrid(s_r, d); }); + } + + // Paper step 4 (remaining): S_m × (S_l ∪ S_r) cross-pairs via bipartite scan. + // Both scan directions are needed to find all x-overlapping pairs. + if (n_m > 0 && !s_rest.empty()) { + sort_by_xlo(s_m); + sort_by_xlo(s_rest); + tbb::parallel_invoke( + [&] { one_way_scan_bipartite(s_m, s_rest); }, + [&] { one_way_scan_bipartite(s_rest, s_m, true); }); + } + } + +private: + void sort_by_xlo(span v) + { + tbb::parallel_sort(v.begin(), v.end(), [&](Index a, Index b) { + return m_boxes[a].min().x() < m_boxes[b].min().x(); + }); + } + + Scalar median_ylo(span S) + { + auto* mid = S.data() + S.size() / 2; + std::nth_element(S.data(), mid, S.data() + S.size(), [&](Index a, Index b) { + return m_boxes[a].min().y() < m_boxes[b].min().y(); + }); + return m_boxes[*mid].min().y(); + } +}; + +template +std::vector> hybrid_candidates( + const std::vector>& boxes) +{ + using PairVec = std::vector>; + const Index n = static_cast(boxes.size()); + if (n < 2) return {}; + + // Build initial index array sorted by y.lo (required by HYBRID entry point at d=1). + std::vector order(n); + std::iota(order.begin(), order.end(), Index(0)); + tbb::parallel_sort(order.begin(), order.end(), [&](Index a, Index b) { + return boxes[a].min().y() < boxes[b].min().y(); + }); + + tbb::enumerable_thread_specific tls_results; + HybridSolver solver(boxes, tls_results); + solver.hybrid(span(order), 1); + + return flatten_thread_local(tls_results); +} + +/// +/// BVH-based candidate pair detection for benchmarking. +/// +/// Builds an AABB tree on all triangle bounding boxes, then performs a +/// per-triangle box query in parallel. Emits each pair (i,j) with i < j exactly once. +/// +template +std::vector> bvh_candidates(std::vector> boxes) +{ + const Index n = static_cast(boxes.size()); + if (n < 2) return {}; + + la_runtime_assert( + n <= static_cast(std::numeric_limits::max()), + "BVH method: number of facets exceeds uint32_t range"); + + // Build AABB tree (its Index type is always uint32_t). + AABB tree; + tree.build(boxes); + + // Per-thread pair accumulator. + using PairVec = std::vector>; + tbb::enumerable_thread_specific local_pairs; + + tbb::parallel_for(Index(0), n, [&](Index i) { + auto& lp = local_pairs.local(); + const auto& bi = boxes[i]; + tree.intersect(bi, [&](uint32_t j_u32) -> bool { + const Index j = static_cast(j_u32); + if (j <= i) return true; // emit each (i,j) with i < j only once; skip self + // Strict interior overlap: the AABB tree uses non-strict intersection + // for internal node traversal, so we filter at the leaf level. + const auto& bj = boxes[j]; + if (bj.max().x() <= bi.min().x() || bj.min().x() >= bi.max().x()) return true; + if (bj.max().y() <= bi.min().y() || bj.min().y() >= bi.max().y()) return true; + lp.push_back({i, j}); + return true; + }); + }); + + return flatten_thread_local(local_pairs); +} + +// --------------------------------------------------------------------------- +// Chart-aware graph coloring +// --------------------------------------------------------------------------- + +/// +/// Chart-aware graph coloring of the overlap graph. +/// +/// Color 0 is reserved for non-overlapping triangles. Overlapping triangles +/// receive colors 1, 2, ... such that no two overlapping triangles share the +/// same color. +/// +/// Unlike a simple greedy coloring that processes facets in index order, this +/// performs a BFS traversal through each UV chart (via @p uv_adj). When +/// visiting a facet, it tries to reuse the BFS parent's color (if not +/// forbidden by an overlap conflict). This produces spatially coherent color +/// regions, minimizing the number of connected components per (chart, color) +/// pair when the coloring is later used to split charts for repacking. +/// +template +std::vector chart_aware_graph_color( + Index num_facets, + const std::vector>& overlap_edges, + const AdjacencyList& uv_adj) +{ + // Build overlap adjacency list. + std::vector> overlap_adj(num_facets); + for (auto& [i, j] : overlap_edges) { + overlap_adj[i].push_back(j); + overlap_adj[j].push_back(i); + } + + std::vector color(num_facets, Index(0)); // 0 = not involved in any overlap + std::vector visited(num_facets, false); + std::vector forbidden; + std::deque> queue; // (facet, parent_facet) + + for (Index start = 0; start < num_facets; ++start) { + if (visited[start]) continue; + + // BFS through one UV chart. + queue.clear(); + queue.push_back({start, start}); + visited[start] = true; + + while (!queue.empty()) { + auto [v, parent] = queue.front(); + queue.pop_front(); + + if (!overlap_adj[v].empty()) { + // Collect forbidden colors from overlap neighbors. + forbidden.clear(); + for (Index nb : overlap_adj[v]) { + if (color[nb] > 0) { + const size_t c = static_cast(color[nb]) - 1; + if (c >= forbidden.size()) forbidden.resize(c + 1, false); + forbidden[c] = true; + } + } + + // Try to reuse the BFS parent's color first. + Index best = 0; + if (color[parent] > 0) { + const size_t pc = static_cast(color[parent]) - 1; + if (pc >= forbidden.size() || !forbidden[pc]) { + best = color[parent]; + } + } + + // If parent's color is unavailable, try other UV neighbors' colors. + if (best == 0) { + for (Index nb : uv_adj.get_neighbors(v)) { + if (color[nb] > 0) { + const size_t nc = static_cast(color[nb]) - 1; + if (nc >= forbidden.size() || !forbidden[nc]) { + best = color[nb]; + break; + } + } + } + } + + // Fallback: smallest available color >= 1. + if (best == 0) { + size_t c = 0; + while (c < forbidden.size() && forbidden[c]) ++c; + best = static_cast(c + 1); + } + + color[v] = best; + } + + // Enqueue UV-adjacent unvisited facets. + for (Index nb : uv_adj.get_neighbors(v)) { + if (!visited[nb]) { + visited[nb] = true; + queue.push_back({nb, v}); + } + } + } + } + + return color; +} + +} // namespace + +// --------------------------------------------------------------------------- +// compute_uv_overlap — main entry point +// --------------------------------------------------------------------------- + +template +UVOverlapResult compute_uv_overlap( + SurfaceMesh& mesh, + const UVOverlapOptions& options) +{ + la_runtime_assert(mesh.is_triangle_mesh(), "compute_uv_overlap: mesh must be triangulated."); + + // ----- Phase 1: extract UV mesh ----- + UVMeshOptions uv_opts; + uv_opts.uv_attribute_name = options.uv_attribute_name; + uv_opts.element_types = UVMeshOptions::ElementTypes::All; + // uv_mesh_view creates a 2-D mesh whose vertex positions are UV coordinates. + auto uv_mesh = uv_mesh_view(mesh, uv_opts); + + const Index num_facets = uv_mesh.get_num_facets(); + if (num_facets == 0) return {}; + + const auto uv_verts = vertex_view(uv_mesh); // (num_uv_verts × 2) + const auto uv_facets = facet_view(uv_mesh); // (num_facets × 3) + + // ----- Phase 2: compute per-triangle 2-D bounding boxes ----- + using Box2 = Eigen::AlignedBox; + std::vector boxes(num_facets); + for (Index f = 0; f < num_facets; ++f) { + boxes[f].setEmpty(); + for (int k = 0; k < 3; ++k) { + const Index vid = uv_facets(f, k); + boxes[f].extend(Vec2(uv_verts(vid, 0), uv_verts(vid, 1))); + } + } + + // ----- Phase 3: candidate pair detection ----- + std::vector> candidates; + switch (options.method) { + case UVOverlapMethod::SweepAndPrune: + candidates = sweep_and_prune_candidates(boxes); + break; + case UVOverlapMethod::BVH: candidates = bvh_candidates(std::move(boxes)); break; + case UVOverlapMethod::Hybrid: candidates = hybrid_candidates(boxes); break; + } + + if (candidates.empty()) return {}; + + // ----- Phase 4: exact intersection test + optional area (parallel) ----- + ExactPredicatesShewchuk pred; + const bool compute_area = options.compute_overlap_area; + const bool collect_pairs = + options.compute_overlapping_pairs || options.compute_overlap_coloring; + + struct ThreadLocal + { + std::vector> pairs; // only used when collect_pairs + size_t count = 0; + Scalar area = 0.f; + }; + tbb::enumerable_thread_specific tls; + + tbb::parallel_for( + tbb::blocked_range(0, candidates.size()), + [&](const tbb::blocked_range& r) { + auto& loc = tls.local(); + for (size_t p = r.begin(); p != r.end(); ++p) { + const Index i = candidates[p].first; + const Index j = candidates[p].second; + Scalar area = 0; + Scalar* area_ptr = compute_area ? &area : nullptr; + if (process_candidate_pair(i, j, uv_verts, uv_facets, pred, area_ptr)) { + loc.count++; + loc.area += area; + if (collect_pairs) { + loc.pairs.push_back({i, j}); + } + } + } + }); + + // ----- Phase 5: accumulate results ----- + size_t total_results = 0; + Scalar total_area = Scalar(0); + std::vector> overlap_edges; + + for (auto& loc : tls) { + total_results += loc.count; + if (compute_area) total_area += loc.area; + } + if (collect_pairs && total_results > 0) { + overlap_edges.reserve(total_results); + for (auto& loc : tls) { + overlap_edges.insert(overlap_edges.end(), loc.pairs.begin(), loc.pairs.end()); + } + } + + if (total_results == 0) return {}; + + UVOverlapResult result; + result.has_overlap = true; + if (compute_area) result.overlap_area = total_area; + + // ----- Phase 6: optional per-facet overlap coloring ----- + if (options.compute_overlap_coloring && !overlap_edges.empty()) { + const auto uv_adj = compute_facet_facet_adjacency(uv_mesh); + std::vector colors = + chart_aware_graph_color(num_facets, overlap_edges, uv_adj); + + const AttributeId attr_id = internal::find_or_create_attribute( + mesh, + options.overlap_coloring_attribute_name, + AttributeElement::Facet, + AttributeUsage::Scalar, + 1, + internal::ResetToDefault::No); + + auto& attr = mesh.template ref_attribute(attr_id); + auto attr_data = attr.ref_all(); + la_debug_assert(static_cast(attr_data.size()) == num_facets); + for (Index f = 0; f < num_facets; ++f) { + attr_data[f] = colors[f]; + } + + result.overlap_coloring_id = attr_id; + } + + if (options.compute_overlapping_pairs) { + tbb::parallel_sort(overlap_edges.begin(), overlap_edges.end()); + result.overlapping_pairs = std::move(overlap_edges); + } + + return result; +} + +// Explicit template instantiations. +#define LA_X_compute_uv_overlap(_, Scalar, Index) \ + template LA_BVH_API UVOverlapResult compute_uv_overlap( \ + SurfaceMesh&, \ + const UVOverlapOptions&); +LA_SURFACE_MESH_X(compute_uv_overlap, 0) + +} // namespace lagrange::bvh diff --git a/modules/bvh/tests/test_compute_uv_overlap.cpp b/modules/bvh/tests/test_compute_uv_overlap.cpp new file mode 100644 index 00000000..0d1b78ba --- /dev/null +++ b/modules/bvh/tests/test_compute_uv_overlap.cpp @@ -0,0 +1,670 @@ +/* + * Copyright 2026 Adobe. All rights reserved. + * This file is licensed to you under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under + * the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS + * OF ANY KIND, either express or implied. See the License for the specific language + * governing permissions and limitations under the License. + */ +#include + +#include +#include +#include +#include + +#include + +#include +#include + +namespace { + +using namespace lagrange; +using Scalar = float; +using Index = uint32_t; + +/// +/// Build a triangle mesh with a per-vertex UV attribute. +/// +/// @p uv_coords UV coordinates for each vertex (u, v pairs). +/// @p faces Triangle connectivity (indices into uv_coords). +/// +/// The 3-D vertex positions are set to (u, v, 0) for convenience; they are not +/// used by compute_uv_overlap. +/// +SurfaceMesh make_uv_mesh( + const std::vector>& uv_coords, + const std::vector>& faces) +{ + SurfaceMesh mesh; + for (auto& uv : uv_coords) { + mesh.add_vertex({uv[0], uv[1], Scalar(0)}); + } + for (auto& f : faces) { + mesh.add_triangle(f[0], f[1], f[2]); + } + + // Create a per-vertex UV attribute so that uv_mesh_view can extract UV positions. + const AttributeId uv_id = mesh.template create_attribute( + "@uv", + AttributeElement::Vertex, + AttributeUsage::UV, + 2); + auto& attr = mesh.template ref_attribute(uv_id); + auto values = attr.ref_all(); + for (Index i = 0; i < static_cast(uv_coords.size()); ++i) { + values[i * 2 + 0] = uv_coords[i][0]; + values[i * 2 + 1] = uv_coords[i][1]; + } + return mesh; +} + +/// +/// Run compute_uv_overlap with all three methods, requesting overlapping pairs. +/// Verify that the sorted pair lists are identical across methods. +/// Returns the SweepAndPrune result. +/// +/// Must be called from within a Catch2 TEST_CASE (uses REQUIRE/INFO macros). +/// +bvh::UVOverlapResult run_all_methods_and_check_pairs( + SurfaceMesh& mesh) +{ + bvh::UVOverlapOptions opts; + opts.compute_overlapping_pairs = true; + + opts.method = bvh::UVOverlapMethod::SweepAndPrune; + auto result_ze = bvh::compute_uv_overlap(mesh, opts); + + opts.method = bvh::UVOverlapMethod::BVH; + auto result_bvh = bvh::compute_uv_overlap(mesh, opts); + + opts.method = bvh::UVOverlapMethod::Hybrid; + auto result_h = bvh::compute_uv_overlap(mesh, opts); + + INFO("SweepAndPrune pairs: " << result_ze.overlapping_pairs.size()); + INFO("BVH pairs: " << result_bvh.overlapping_pairs.size()); + INFO("Hybrid pairs: " << result_h.overlapping_pairs.size()); + + REQUIRE(result_bvh.overlapping_pairs == result_ze.overlapping_pairs); + REQUIRE(result_h.overlapping_pairs == result_ze.overlapping_pairs); + + return result_ze; +} + +} // namespace + +// --------------------------------------------------------------------------- +// Basic overlap detection +// --------------------------------------------------------------------------- + +TEST_CASE("compute_uv_overlap: no overlap", "[bvh][uv_overlap]") +{ + using namespace lagrange; + + // Two unit right triangles placed far apart in UV space. + // A: (0,0)-(1,0)-(0,1) B: (2,0)-(3,0)-(2,1) + auto mesh = + make_uv_mesh({{0, 0}, {1, 0}, {0, 1}, {2, 0}, {3, 0}, {2, 1}}, {{{0, 1, 2}}, {{3, 4, 5}}}); + + auto result = bvh::compute_uv_overlap(mesh); + REQUIRE_FALSE(result.overlap_area.has_value()); + REQUIRE(result.overlap_coloring_id == invalid_attribute_id()); +} + +TEST_CASE("compute_uv_overlap: full overlap (identical triangles)", "[bvh][uv_overlap]") +{ + using namespace lagrange; + + // Two triangles with exactly the same UV coordinates. + // Area of each = 0.5; intersection = the whole triangle, area = 0.5. + auto mesh = + make_uv_mesh({{0, 0}, {1, 0}, {0, 1}, {0, 0}, {1, 0}, {0, 1}}, {{{0, 1, 2}}, {{3, 4, 5}}}); + + auto result = bvh::compute_uv_overlap(mesh); + REQUIRE(result.overlap_area.has_value()); + REQUIRE_THAT(*result.overlap_area, Catch::Matchers::WithinAbs(0.5f, 1e-5f)); +} + +TEST_CASE("compute_uv_overlap: partial overlap (known area)", "[bvh][uv_overlap]") +{ + using namespace lagrange; + + // A: (0,0)-(1,0)-(0,1) + // B: (0.5,0)-(1.5,0)-(0.5,1) + // + // Sutherland-Hodgman gives intersection polygon: (0.5,0)-(1,0)-(0.5,0.5) + // Area = 0.5 * 0.5 * 0.5 = 0.125 + auto mesh = make_uv_mesh( + {{0, 0}, {1, 0}, {0, 1}, {0.5f, 0}, {1.5f, 0}, {0.5f, 1}}, + {{{0, 1, 2}}, {{3, 4, 5}}}); + + auto result = bvh::compute_uv_overlap(mesh); + REQUIRE(result.overlap_area.has_value()); + REQUIRE_THAT(*result.overlap_area, Catch::Matchers::WithinAbs(0.125f, 1e-5f)); +} + +TEST_CASE("compute_uv_overlap: CW-oriented triangle", "[bvh][uv_overlap]") +{ + using namespace lagrange; + + // Triangle A CCW: (0,0)-(1,0)-(0,1) + // Triangle B CW (same triangle, reversed): (0,0)-(0,1)-(1,0) + // These are the same triangle; intersection area = 0.5. + auto mesh = + make_uv_mesh({{0, 0}, {1, 0}, {0, 1}, {0, 0}, {0, 1}, {1, 0}}, {{{0, 1, 2}}, {{3, 4, 5}}}); + + auto result = bvh::compute_uv_overlap(mesh); + REQUIRE(result.overlap_area.has_value()); + REQUIRE_THAT(*result.overlap_area, Catch::Matchers::WithinAbs(0.5f, 1e-5f)); +} + +// --------------------------------------------------------------------------- +// Adjacent triangles must NOT be counted as overlapping +// --------------------------------------------------------------------------- + +TEST_CASE("compute_uv_overlap: adjacent triangles sharing an edge", "[bvh][uv_overlap]") +{ + using namespace lagrange; + + // Standard quad split into two triangles sharing edge (1,0)-(0,1). + // v0=(0,0) v1=(1,0) v2=(0,1) v3=(1,1) + // T0: v0-v1-v2 T1: v1-v3-v2 + auto mesh = make_uv_mesh({{0, 0}, {1, 0}, {0, 1}, {1, 1}}, {{{0, 1, 2}}, {{1, 3, 2}}}); + + auto result = bvh::compute_uv_overlap(mesh); + REQUIRE_FALSE(result.overlap_area.has_value()); +} + +TEST_CASE("compute_uv_overlap: adjacent triangles sharing a vertex", "[bvh][uv_overlap]") +{ + using namespace lagrange; + + // T0: (0,0)-(1,0)-(0,1) T1: (1,0)-(2,0)-(1,1) + // They share vertex (1,0) only; no interior overlap. + auto mesh = + make_uv_mesh({{0, 0}, {1, 0}, {0, 1}, {1, 0}, {2, 0}, {1, 1}}, {{{0, 1, 2}}, {{3, 4, 5}}}); + + auto result = bvh::compute_uv_overlap(mesh); + REQUIRE_FALSE(result.overlap_area.has_value()); +} + +// --------------------------------------------------------------------------- +// Edge cases +// --------------------------------------------------------------------------- + +TEST_CASE("compute_uv_overlap: empty mesh", "[bvh][uv_overlap]") +{ + using namespace lagrange; + + SurfaceMesh mesh; + // Create a UV attribute even on the empty mesh so uv_mesh_view can find it. + mesh.template create_attribute("@uv", AttributeElement::Vertex, AttributeUsage::UV, 2); + + auto result = bvh::compute_uv_overlap(mesh); + REQUIRE_FALSE(result.overlap_area.has_value()); + REQUIRE(result.overlap_coloring_id == invalid_attribute_id()); +} + +TEST_CASE("compute_uv_overlap: single triangle", "[bvh][uv_overlap]") +{ + using namespace lagrange; + + auto mesh = make_uv_mesh({{0, 0}, {1, 0}, {0, 1}}, {{{0, 1, 2}}}); + + auto result = bvh::compute_uv_overlap(mesh); + REQUIRE_FALSE(result.overlap_area.has_value()); +} + +TEST_CASE("compute_uv_overlap: coloring flag false no attribute created", "[bvh][uv_overlap]") +{ + using namespace lagrange; + + auto mesh = + make_uv_mesh({{0, 0}, {1, 0}, {0, 1}, {0, 0}, {1, 0}, {0, 1}}, {{{0, 1, 2}}, {{3, 4, 5}}}); + + bvh::UVOverlapOptions opts; + opts.compute_overlap_coloring = false; + auto result = bvh::compute_uv_overlap(mesh, opts); + + REQUIRE(result.overlap_area.has_value()); + REQUIRE(result.overlap_coloring_id == invalid_attribute_id()); + REQUIRE_FALSE(mesh.has_attribute("@uv_overlap_color")); +} + +TEST_CASE("compute_uv_overlap: custom UV attribute name", "[bvh][uv_overlap]") +{ + using namespace lagrange; + + SurfaceMesh mesh; + mesh.add_vertex({0, 0, 0}); + mesh.add_vertex({1, 0, 0}); + mesh.add_vertex({0, 1, 0}); + mesh.add_vertex({0, 0, 0}); + mesh.add_vertex({1, 0, 0}); + mesh.add_vertex({0, 1, 0}); + mesh.add_triangle(0, 1, 2); + mesh.add_triangle(3, 4, 5); + + // Create a UV attribute with a custom name. + const AttributeId uv_id = mesh.template create_attribute( + "my_uvs", + AttributeElement::Vertex, + AttributeUsage::UV, + 2); + { + auto& attr = mesh.template ref_attribute(uv_id); + auto v = attr.ref_all(); + // Both triangles identical in UV → full overlap. + float coords[] = {0, 0, 1, 0, 0, 1, 0, 0, 1, 0, 0, 1}; + std::copy(std::begin(coords), std::end(coords), v.begin()); + } + + bvh::UVOverlapOptions opts; + opts.uv_attribute_name = "my_uvs"; + auto result = bvh::compute_uv_overlap(mesh, opts); + + REQUIRE(result.overlap_area.has_value()); + REQUIRE_THAT(*result.overlap_area, Catch::Matchers::WithinAbs(0.5f, 1e-5f)); +} + +// --------------------------------------------------------------------------- +// Per-facet overlap coloring +// --------------------------------------------------------------------------- + +TEST_CASE("compute_uv_overlap: coloring chain A-B-C, A does not overlap C", "[bvh][uv_overlap]") +{ + using namespace lagrange; + + // A: (0,0)-(1,0)-(0,1) + // B: (0.5,0)-(1.5,0)-(0.5,1) overlaps A + // C: (1.0,0)-(2.0,0)-(1.0,1) overlaps B; shares only vertex (1,0) with A → no overlap + // + // Overlap graph: A-B, B-C (A and C are not adjacent) + // Greedy coloring: A→1, B→2, C→1 + auto mesh = make_uv_mesh( + {{0, 0}, {1, 0}, {0, 1}, {0.5f, 0}, {1.5f, 0}, {0.5f, 1}, {1.0f, 0}, {2.0f, 0}, {1.0f, 1}}, + {{{0, 1, 2}}, {{3, 4, 5}}, {{6, 7, 8}}}); + + bvh::UVOverlapOptions opts; + opts.compute_overlap_coloring = true; + auto result = bvh::compute_uv_overlap(mesh, opts); + + REQUIRE(result.overlap_area.has_value()); + REQUIRE(result.overlap_coloring_id != invalid_attribute_id()); + + auto colors = attribute_vector_view(mesh, result.overlap_coloring_id); + REQUIRE(colors.size() == 3); + + const Index color_a = colors[0]; + const Index color_b = colors[1]; + const Index color_c = colors[2]; + + // All overlapping triangles must have a non-zero color. + REQUIRE(color_a > 0); + REQUIRE(color_b > 0); + REQUIRE(color_c > 0); + + // Adjacent (overlapping) triangles must have different colors. + REQUIRE(color_a != color_b); // A overlaps B + REQUIRE(color_b != color_c); // B overlaps C + + // Non-adjacent triangles (A and C) may share a color. + REQUIRE(color_a == color_c); +} + +TEST_CASE("compute_uv_overlap: coloring three mutually overlapping triangles", "[bvh][uv_overlap]") +{ + using namespace lagrange; + + // Three triangles all nearly identical → mutually overlapping → need 3 distinct colors. + // A: (0,0)-(1,0)-(0,1) + // B: (0.1,0)-(1.1,0)-(0.1,1) + // C: (0.2,0)-(1.2,0)-(0.2,1) + auto mesh = make_uv_mesh( + {{0, 0}, {1, 0}, {0, 1}, {0.1f, 0}, {1.1f, 0}, {0.1f, 1}, {0.2f, 0}, {1.2f, 0}, {0.2f, 1}}, + {{{0, 1, 2}}, {{3, 4, 5}}, {{6, 7, 8}}}); + + bvh::UVOverlapOptions opts; + opts.compute_overlap_coloring = true; + auto result = bvh::compute_uv_overlap(mesh, opts); + + REQUIRE(result.overlap_area.has_value()); + REQUIRE(result.overlap_coloring_id != invalid_attribute_id()); + + auto colors = attribute_vector_view(mesh, result.overlap_coloring_id); + REQUIRE(colors.size() == 3); + + // All three must be overlapping → non-zero colors. + REQUIRE(colors[0] > 0); + REQUIRE(colors[1] > 0); + REQUIRE(colors[2] > 0); + + // No two may share a color since all three pairwise-overlap. + REQUIRE(colors[0] != colors[1]); + REQUIRE(colors[1] != colors[2]); + REQUIRE(colors[0] != colors[2]); +} + +TEST_CASE("compute_uv_overlap: coloring custom attribute name is used", "[bvh][uv_overlap]") +{ + using namespace lagrange; + + auto mesh = + make_uv_mesh({{0, 0}, {1, 0}, {0, 1}, {0, 0}, {1, 0}, {0, 1}}, {{{0, 1, 2}}, {{3, 4, 5}}}); + + bvh::UVOverlapOptions opts; + opts.compute_overlap_coloring = true; + opts.overlap_coloring_attribute_name = "my_colors"; + auto result = bvh::compute_uv_overlap(mesh, opts); + + REQUIRE(result.overlap_area.has_value()); + REQUIRE(mesh.has_attribute("my_colors")); + REQUIRE_FALSE(mesh.has_attribute("@uv_overlap_color")); +} + +TEST_CASE("compute_uv_overlap: coloring and pairs requested together", "[bvh][uv_overlap]") +{ + using namespace lagrange; + + // A overlaps B, B overlaps C, A does not overlap C. + auto mesh = make_uv_mesh( + {{0, 0}, {1, 0}, {0, 1}, {0.5f, 0}, {1.5f, 0}, {0.5f, 1}, {1.0f, 0}, {2.0f, 0}, {1.0f, 1}}, + {{{0, 1, 2}}, {{3, 4, 5}}, {{6, 7, 8}}}); + + bvh::UVOverlapOptions opts; + opts.compute_overlap_coloring = true; + opts.compute_overlapping_pairs = true; + auto result = bvh::compute_uv_overlap(mesh, opts); + + // Coloring must be valid. + REQUIRE(result.overlap_coloring_id != invalid_attribute_id()); + auto colors = attribute_vector_view(mesh, result.overlap_coloring_id); + REQUIRE(colors[0] != colors[1]); + REQUIRE(colors[1] != colors[2]); + + // Pairs must also be populated (not destroyed by the coloring phase). + REQUIRE(result.overlapping_pairs.size() == 2); + REQUIRE(result.overlapping_pairs[0] == std::make_pair(Index(0), Index(1))); + REQUIRE(result.overlapping_pairs[1] == std::make_pair(Index(1), Index(2))); +} + +// --------------------------------------------------------------------------- +// Method cross-validation: all three methods must produce identical pair lists +// --------------------------------------------------------------------------- + +TEST_CASE("compute_uv_overlap: all methods agree on partial overlap pairs", "[bvh][uv_overlap]") +{ + auto mesh = make_uv_mesh( + {{0, 0}, {1, 0}, {0, 1}, {0.5f, 0}, {1.5f, 0}, {0.5f, 1}}, + {{{0, 1, 2}}, {{3, 4, 5}}}); + + auto result = run_all_methods_and_check_pairs(mesh); + REQUIRE(result.overlap_area.has_value()); + REQUIRE(result.overlapping_pairs.size() == 1); + REQUIRE(result.overlapping_pairs[0] == std::make_pair(Index(0), Index(1))); +} + +TEST_CASE("compute_uv_overlap: all methods agree on no-overlap mesh", "[bvh][uv_overlap]") +{ + auto mesh = + make_uv_mesh({{0, 0}, {1, 0}, {0, 1}, {2, 0}, {3, 0}, {2, 1}}, {{{0, 1, 2}}, {{3, 4, 5}}}); + + auto result = run_all_methods_and_check_pairs(mesh); + REQUIRE_FALSE(result.overlap_area.has_value()); + REQUIRE(result.overlapping_pairs.empty()); +} + +TEST_CASE("compute_uv_overlap: all methods agree on adjacent-only mesh", "[bvh][uv_overlap]") +{ + auto mesh = make_uv_mesh({{0, 0}, {1, 0}, {0, 1}, {1, 1}}, {{{0, 1, 2}}, {{1, 3, 2}}}); + + auto result = run_all_methods_and_check_pairs(mesh); + REQUIRE_FALSE(result.overlap_area.has_value()); + REQUIRE(result.overlapping_pairs.empty()); +} + +TEST_CASE("compute_uv_overlap: all methods agree on chain A-B-C", "[bvh][uv_overlap]") +{ + // A overlaps B, B overlaps C, A does not overlap C. + auto mesh = make_uv_mesh( + {{0, 0}, {1, 0}, {0, 1}, {0.5f, 0}, {1.5f, 0}, {0.5f, 1}, {1.0f, 0}, {2.0f, 0}, {1.0f, 1}}, + {{{0, 1, 2}}, {{3, 4, 5}}, {{6, 7, 8}}}); + + auto result = run_all_methods_and_check_pairs(mesh); + REQUIRE(result.overlap_area.has_value()); + REQUIRE(result.overlapping_pairs.size() == 2); + REQUIRE(result.overlapping_pairs[0] == std::make_pair(Index(0), Index(1))); + REQUIRE(result.overlapping_pairs[1] == std::make_pair(Index(1), Index(2))); +} + +TEST_CASE("compute_uv_overlap: all methods agree on dense overlapping grid", "[bvh][uv_overlap]") +{ + using namespace lagrange; + + // 10x10 grid of unit right-triangles spaced at 0.9 (< step=1.0) so adjacent + // columns overlap by 0.1 in x, guaranteeing many overlapping pairs. + std::vector> uvs; + std::vector> faces; + const Index n = 10; + const Scalar step = 1.0f; + const Scalar spacing = 0.9f; // < step → adjacent triangles overlap + Index vi = 0; + for (Index row = 0; row < n; ++row) { + for (Index col = 0; col < n; ++col) { + const Scalar x = static_cast(col) * spacing; + const Scalar y = static_cast(row) * spacing; + uvs.push_back({x, y}); + uvs.push_back({x + step, y}); + uvs.push_back({x, y + step}); + faces.push_back({vi, vi + 1, vi + 2}); + vi += 3; + } + } + auto mesh = make_uv_mesh(uvs, faces); + + auto result = run_all_methods_and_check_pairs(mesh); + REQUIRE(result.overlap_area.has_value()); + REQUIRE(result.overlapping_pairs.size() > 0); +} + +// --------------------------------------------------------------------------- +// Method cross-validation on real meshes +// --------------------------------------------------------------------------- + +TEST_CASE( + "compute_uv_overlap: all methods agree on Grenade_H", + "[bvh][uv_overlap][corp]" LA_CORP_FLAG LA_SLOW_DEBUG_FLAG) +{ + using namespace lagrange; + + auto mesh = lagrange::testing::load_surface_mesh("corp/io/Grenade_H.obj"); + lagrange::triangulate_polygonal_facets(mesh); + REQUIRE(mesh.is_triangle_mesh()); + + auto result = run_all_methods_and_check_pairs(mesh); + REQUIRE(result.overlap_area.has_value()); + REQUIRE(result.overlapping_pairs.size() > 0); +} + +// --------------------------------------------------------------------------- +// Benchmarks (disabled by default; run with [!benchmark] tag) +// --------------------------------------------------------------------------- + +namespace { + +/// +/// Build a mesh with @p n unit right triangles tiled in UV space with no overlap. +/// Every other triangle is then shifted to create a controllable overlap fraction. +/// +SurfaceMesh make_benchmark_mesh(Index n_per_side, float overlap_shift) +{ + std::vector> uvs; + std::vector> faces; + + const Scalar step = 1.0f; + Index vi = 0; + for (Index row = 0; row < n_per_side; ++row) { + for (Index col = 0; col < n_per_side; ++col) { + const Scalar x = static_cast(col) * step * 1.1f; + const Scalar y = static_cast(row) * step * 1.1f; + // Apply a small overlap shift to odd triangles. + const Scalar dx = ((row + col) % 2 == 1) ? overlap_shift : 0.0f; + uvs.push_back({x + dx, y}); + uvs.push_back({x + dx + step, y}); + uvs.push_back({x + dx, y + step}); + faces.push_back({vi, vi + 1, vi + 2}); + vi += 3; + } + } + return make_uv_mesh(uvs, faces); +} + +} // namespace + +TEST_CASE("compute_uv_overlap benchmark: synthetic grid", "[bvh][!benchmark]") +{ + using namespace lagrange; + + // 20×20 = 400 triangles, with a small shift so ~half overlap their neighbour. + auto mesh = make_benchmark_mesh(20, 0.05f); + + BENCHMARK("hybrid (ZE)") + { + bvh::UVOverlapOptions opts; + opts.method = bvh::UVOverlapMethod::Hybrid; + return bvh::compute_uv_overlap(mesh, opts); + }; + + BENCHMARK("sweep-and-prune (ZE)") + { + bvh::UVOverlapOptions opts; + opts.method = bvh::UVOverlapMethod::SweepAndPrune; + return bvh::compute_uv_overlap(mesh, opts); + }; + + BENCHMARK("BVH per-triangle") + { + bvh::UVOverlapOptions opts; + opts.method = bvh::UVOverlapMethod::BVH; + return bvh::compute_uv_overlap(mesh, opts); + }; +} + +TEST_CASE("compute_uv_overlap benchmark: dragon", "[bvh][!benchmark]") +{ + using namespace lagrange; + + // Load the dragon mesh and assign UV coordinates via an orthographic XY projection. + // This gives every vertex u = x and v = y (using the mesh's existing spatial scale), + // which causes front-facing and back-facing triangles to overlap in UV space — + // a realistic stress-test that exercises the full overlap pipeline on a + // complex high-resolution mesh (~870 K triangles). + auto mesh = lagrange::testing::load_surface_mesh("open/core/dragon.obj"); + la_runtime_assert(mesh.is_triangle_mesh(), "dragon.obj must be triangulated"); + + { + const auto verts = vertex_view(mesh); + const Index nv = mesh.get_num_vertices(); + + // Compute XY bounding box so we can report it, but do not normalise — + // keeping the original scale preserves the geometric relationship between + // triangles and avoids artificially concentrating all UVs into [0,1]^2. + Scalar x_min = std::numeric_limits::max(); + Scalar x_max = std::numeric_limits::lowest(); + Scalar y_min = std::numeric_limits::max(); + Scalar y_max = std::numeric_limits::lowest(); + for (Index i = 0; i < nv; ++i) { + x_min = std::min(x_min, verts(i, 0)); + x_max = std::max(x_max, verts(i, 0)); + y_min = std::min(y_min, verts(i, 1)); + y_max = std::max(y_max, verts(i, 1)); + } + INFO( + "Dragon XY UV extent: [" << x_min << ", " << x_max << "] x [" << y_min << ", " << y_max + << "]"); + + const AttributeId uv_id = mesh.template create_attribute( + "@uv", + AttributeElement::Vertex, + AttributeUsage::UV, + 2); + auto& attr = mesh.template ref_attribute(uv_id); + auto values = attr.ref_all(); + for (Index i = 0; i < nv; ++i) { + values[i * 2 + 0] = verts(i, 0); // u = x + values[i * 2 + 1] = verts(i, 1); // v = y + } + } + + BENCHMARK("hybrid (ZE)") + { + bvh::UVOverlapOptions opts; + opts.method = bvh::UVOverlapMethod::Hybrid; + return bvh::compute_uv_overlap(mesh, opts); + }; + + BENCHMARK("sweep-and-prune (ZE)") + { + bvh::UVOverlapOptions opts; + opts.method = bvh::UVOverlapMethod::SweepAndPrune; + return bvh::compute_uv_overlap(mesh, opts); + }; + + BENCHMARK("BVH per-triangle") + { + bvh::UVOverlapOptions opts; + opts.method = bvh::UVOverlapMethod::BVH; + return bvh::compute_uv_overlap(mesh, opts); + }; + + BENCHMARK("hybrid (ZE) + coloring") + { + bvh::UVOverlapOptions opts; + opts.method = bvh::UVOverlapMethod::Hybrid; + opts.compute_overlap_coloring = true; + return bvh::compute_uv_overlap(mesh, opts); + }; +} + +TEST_CASE("compute_uv_overlap benchmark: grenade", "[bvh][!benchmark]" LA_CORP_FLAG) +{ + using namespace lagrange; + + // Largest mesh in data/ with real UV coordinates (708 K triangles after triangulation). + auto mesh = lagrange::testing::load_surface_mesh("corp/io/Grenade_H.obj"); + lagrange::triangulate_polygonal_facets(mesh); + la_runtime_assert(mesh.is_triangle_mesh()); + + BENCHMARK("hybrid (ZE)") + { + bvh::UVOverlapOptions opts; + opts.method = bvh::UVOverlapMethod::Hybrid; + return bvh::compute_uv_overlap(mesh, opts); + }; + + BENCHMARK("sweep-and-prune (ZE)") + { + bvh::UVOverlapOptions opts; + opts.method = bvh::UVOverlapMethod::SweepAndPrune; + return bvh::compute_uv_overlap(mesh, opts); + }; + + BENCHMARK("BVH per-triangle") + { + bvh::UVOverlapOptions opts; + opts.method = bvh::UVOverlapMethod::BVH; + return bvh::compute_uv_overlap(mesh, opts); + }; + + BENCHMARK("hybrid (ZE) + coloring") + { + bvh::UVOverlapOptions opts; + opts.method = bvh::UVOverlapMethod::Hybrid; + opts.compute_overlap_coloring = true; + return bvh::compute_uv_overlap(mesh, opts); + }; +} diff --git a/modules/core/python/tests/assets.py b/modules/conftest.py similarity index 57% rename from modules/core/python/tests/assets.py rename to modules/conftest.py index a3678fbf..375e9f8c 100644 --- a/modules/core/python/tests/assets.py +++ b/modules/conftest.py @@ -9,14 +9,26 @@ # OF ANY KIND, either express or implied. See the License for the specific language # governing permissions and limitations under the License. # -import lagrange +""" +Global pytest fixtures for Lagrange tests. + +This conftest.py provides common fixtures that are shared across all modules. +Fixtures defined here are automatically available to all test files without imports. +""" +import lagrange import numpy as np import pytest +# ============================================================================= +# Basic Mesh Fixtures +# ============================================================================= + + @pytest.fixture def single_triangle(): + """A simple triangle mesh with vertices at the identity matrix positions.""" mesh = lagrange.SurfaceMesh() mesh.add_vertices(np.eye(3)) mesh.add_triangle(0, 1, 2) @@ -27,6 +39,7 @@ def single_triangle(): @pytest.fixture def single_triangle_with_index(single_triangle): + """A single triangle mesh with vertex_index and facet_index attributes.""" mesh = single_triangle mesh.wrap_as_attribute( "vertex_index", @@ -47,6 +60,7 @@ def single_triangle_with_index(single_triangle): @pytest.fixture def single_triangle_with_uv(single_triangle): + """A single triangle mesh with UV coordinates.""" mesh = single_triangle mesh.wrap_as_indexed_attribute( "uv", @@ -60,6 +74,7 @@ def single_triangle_with_uv(single_triangle): @pytest.fixture def cube(): + """A unit cube mesh with quad facets.""" vertices = np.array( [ [0, 0, 0], @@ -92,6 +107,7 @@ def cube(): @pytest.fixture def cube_triangular(): + """A unit cube mesh with triangular facets.""" vertices = np.array( [ [0, 0, 0], @@ -130,6 +146,7 @@ def cube_triangular(): @pytest.fixture def cube_with_uv(cube): + """A unit cube mesh with UV coordinates.""" mesh = cube mesh.create_attribute( "uv", @@ -165,3 +182,97 @@ def cube_with_uv(cube): ), ) return mesh + + +@pytest.fixture +def house(): + """A house-shaped mesh (quad + triangle) with UV coordinates.""" + vertices = np.array( + [ + [0.25, 0.25, 0.0], + [0.75, 0.25, 0.0], + [0.75, 0.75, 0.0], + [0.25, 0.75, 0.0], + [0.5, 1.0, 0.0], + ], + dtype=float, + ) + mesh = lagrange.SurfaceMesh() + mesh.vertices = vertices + mesh.add_quad(0, 1, 2, 3) + mesh.add_triangle(3, 2, 4) + texcoord_values = np.array( + [ + [0.25, 0.25], + [0.75, 0.25], + [0.75, 0.75], + [0.25, 0.75], + [0.25, 0.75], + [0.75, 0.75], + [0.5, 1.0], + ], + dtype=float, + ) + texcoord_indices = np.arange(len(texcoord_values), dtype=np.uint32) + texcoord_id = mesh.create_attribute( + name="texcoord_0", + element=lagrange.AttributeElement.Indexed, + usage=lagrange.AttributeUsage.UV, + initial_values=texcoord_values, + initial_indices=texcoord_indices, + ) + lagrange.weld_indexed_attribute(mesh, texcoord_id) + return mesh + + +@pytest.fixture +def triangle(): + mesh = lagrange.SurfaceMesh() + mesh.add_vertices(np.array([[0, 0, 0], [1, 0, 0], [0, 1, 0]], dtype=np.float64)) + mesh.add_triangle(0, 1, 2) + return mesh + + +@pytest.fixture +def quad(): + mesh = lagrange.SurfaceMesh() + mesh.add_vertices(np.array([[0, 0, 0], [1, 0, 0], [1, 1, 0], [0, 1, 0]], dtype=np.float64)) + mesh.add_quad(0, 1, 2, 3) + return mesh + + +@pytest.fixture +def triangle_with_attribute(triangle): + mesh = triangle + mesh.create_attribute( + "temperature", + element=lagrange.AttributeElement.Vertex, + usage=lagrange.AttributeUsage.Scalar, + initial_values=np.array([1.0, 2.0, 3.0], dtype=np.float64), + ) + return mesh + + +@pytest.fixture +def square(): + """A 2D unit square mesh with triangular facets.""" + vertices = np.array( + [ + [0, 0], + [1, 0], + [1, 1], + [0, 1], + ], + dtype=float, + ) + facets = np.array( + [ + [0, 1, 2], + [2, 3, 0], + ], + dtype=np.uint32, + ) + mesh = lagrange.SurfaceMesh(2) + mesh.vertices = vertices + mesh.facets = facets + return mesh diff --git a/modules/core/include/lagrange/Attribute.h b/modules/core/include/lagrange/Attribute.h index 81d6b20c..0f7266d4 100644 --- a/modules/core/include/lagrange/Attribute.h +++ b/modules/core/include/lagrange/Attribute.h @@ -19,6 +19,27 @@ namespace lagrange { +/// @cond LA_INTERNAL_DOCS +namespace internal { + +/// +/// Returns a read-only view of the attribute buffer (num elements x num channels), after +/// unpoisoning the padding region [size, capacity) under ASan for internal buffers. This allows +/// external libraries (e.g. Embree) to safely perform SIMD reads that may overshoot the logical +/// buffer size. +/// +/// @param[in] attr The attribute to access. +/// +/// @tparam ValueType Attribute value type. +/// +/// @return A read-only view of the attribute buffer (same as get_all()). +/// +template +lagrange::span get_all_unpoisoned(const Attribute& attr); + +} // namespace internal +/// @endcond + /// /// @defgroup group-surfacemesh-attr Attributes /// @ingroup group-surfacemesh @@ -565,13 +586,10 @@ class Attribute : public AttributeBase /// lagrange::span get_all() const; - /// - /// Returns a read-only view of the full allocated buffer, including any padding entries added - /// via reserve_entries(). For external attributes, this is equivalent to get_all(). - /// - /// @return A read-only view of the full attribute buffer including padding. - /// - lagrange::span get_all_with_padding() const; + /// @cond LA_INTERNAL_DOCS + template + friend lagrange::span internal::get_all_unpoisoned(const Attribute& attr); + /// @endcond /// /// Returns a writable view of the buffer spanning num elements x num channels. The actual diff --git a/modules/core/include/lagrange/ExactPredicates.h b/modules/core/include/lagrange/ExactPredicates.h index efcf8e6b..6b1447e9 100644 --- a/modules/core/include/lagrange/ExactPredicates.h +++ b/modules/core/include/lagrange/ExactPredicates.h @@ -19,6 +19,11 @@ namespace lagrange { +/// +/// @addtogroup module-core +/// @{ +/// + class LA_CORE_API ExactPredicates { public: @@ -110,4 +115,6 @@ class LA_CORE_API ExactPredicates virtual short insphere(double p1[3], double p2[3], double p3[3], double p4[3], double p5[3]) const = 0; }; +/// @} + } // namespace lagrange diff --git a/modules/core/include/lagrange/ExactPredicatesShewchuk.h b/modules/core/include/lagrange/ExactPredicatesShewchuk.h index 7492e149..44c92cc9 100644 --- a/modules/core/include/lagrange/ExactPredicatesShewchuk.h +++ b/modules/core/include/lagrange/ExactPredicatesShewchuk.h @@ -15,6 +15,11 @@ namespace lagrange { +/// +/// @addtogroup module-core +/// @{ +/// + class LA_CORE_API ExactPredicatesShewchuk : public ExactPredicates { public: @@ -43,4 +48,6 @@ class LA_CORE_API ExactPredicatesShewchuk : public ExactPredicates const; }; +/// @} + } // namespace lagrange diff --git a/modules/core/include/lagrange/SurfaceMesh.h b/modules/core/include/lagrange/SurfaceMesh.h index d60b53d4..bbc5ac94 100644 --- a/modules/core/include/lagrange/SurfaceMesh.h +++ b/modules/core/include/lagrange/SurfaceMesh.h @@ -25,10 +25,17 @@ namespace lagrange { /// @cond LA_INTERNAL_DOCS /// Forward declarations +template +class SurfaceMesh; namespace internal { template class weak_ptr; -} +struct SurfaceMeshInfo; +template +SurfaceMeshInfo from_surface_mesh(const SurfaceMesh&); +template +SurfaceMesh to_surface_mesh(const SurfaceMeshInfo&); +} // namespace internal /// @endcond /// @defgroup group-surfacemesh SurfaceMesh @@ -2789,6 +2796,12 @@ class SurfaceMesh template friend class SurfaceMesh; + /// Allow from_surface_mesh/to_surface_mesh to access internal data. + template + friend internal::SurfaceMeshInfo internal::from_surface_mesh(const SurfaceMesh&); + template + friend SurfaceMesh internal::to_surface_mesh(const internal::SurfaceMeshInfo&); + /// /// Hidden attribute manager class. /// diff --git a/modules/core/include/lagrange/attribute_names.h b/modules/core/include/lagrange/attribute_names.h index a3ae455c..849f2475 100644 --- a/modules/core/include/lagrange/attribute_names.h +++ b/modules/core/include/lagrange/attribute_names.h @@ -15,6 +15,11 @@ namespace lagrange { +/// +/// @addtogroup group-surfacemesh-attr +/// @{ +/// + // Valid attribute semantic properties are listed below. // Attribute semantic property names can be in the form [semantic]_[set_index], e.g. texcoord_0, // texcoord_1, etc. @@ -75,4 +80,6 @@ struct AttributeName static constexpr std::string_view indexed_joint = "indexed_joint"; }; +/// @} + } // namespace lagrange diff --git a/modules/core/include/lagrange/compute_facet_circumcenter.h b/modules/core/include/lagrange/compute_facet_circumcenter.h index c9961009..c0b4d428 100644 --- a/modules/core/include/lagrange/compute_facet_circumcenter.h +++ b/modules/core/include/lagrange/compute_facet_circumcenter.h @@ -15,6 +15,11 @@ namespace lagrange { +/// +/// @addtogroup group-surfacemesh-utils +/// @{ +/// + struct FacetCircumcenterOptions { /// Output facet circumcenter attribute name. @@ -34,4 +39,6 @@ AttributeId compute_facet_circumcenter( SurfaceMesh& mesh, FacetCircumcenterOptions options = {}); +/// @} + } // namespace lagrange diff --git a/modules/core/include/lagrange/compute_facet_facet_adjacency.h b/modules/core/include/lagrange/compute_facet_facet_adjacency.h new file mode 100644 index 00000000..0d8c70eb --- /dev/null +++ b/modules/core/include/lagrange/compute_facet_facet_adjacency.h @@ -0,0 +1,52 @@ +/* + * Copyright 2026 Adobe. All rights reserved. + * This file is licensed to you under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under + * the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS + * OF ANY KIND, either express or implied. See the License for the specific language + * governing permissions and limitations under the License. + */ +#pragma once + +#include +#include + +namespace lagrange { + +/// +/// @defgroup group-surfacemesh-utils Mesh utilities +/// @ingroup group-surfacemesh +/// +/// Various mesh processing utilities. +/// +/// @{ + +/// +/// Compute facet-facet adjacency information based on shared edges. +/// +/// Two facets are considered adjacent if they share an edge. For non-manifold edges with 3 or more +/// incident facets, a complete clique is formed (every pair of facets around the edge is adjacent). +/// +/// @note If two facets share multiple edges, the neighboring facet will appear in the +/// adjacency list once for each time the shared edge is referenced by its incident +/// facets (i.e., neighbors are not deduplicated). +/// +/// @note This function calls @c initialize_edges() if edges have not been initialized yet, +/// which mutates the mesh. +/// +/// @param mesh The input mesh (edges will be initialized if needed). +/// +/// @tparam Scalar Mesh scalar type. +/// @tparam Index Mesh index type. +/// +/// @return The facet-facet adjacency list. +/// +template +AdjacencyList compute_facet_facet_adjacency(SurfaceMesh& mesh); + +/// @} + +} // namespace lagrange diff --git a/modules/core/include/lagrange/compute_mesh_covariance.h b/modules/core/include/lagrange/compute_mesh_covariance.h index 80b93093..26d67817 100644 --- a/modules/core/include/lagrange/compute_mesh_covariance.h +++ b/modules/core/include/lagrange/compute_mesh_covariance.h @@ -20,6 +20,11 @@ namespace lagrange { +/// +/// @addtogroup group-surfacemesh-utils +/// @{ +/// + /// /// Options struct for computing mesh covariance. /// @@ -52,4 +57,6 @@ std::array, 3> compute_mesh_covariance( const SurfaceMesh& mesh, const MeshCovarianceOptions& options = {}); +/// @} + } // namespace lagrange diff --git a/modules/core/include/lagrange/compute_pointcloud_pca.h b/modules/core/include/lagrange/compute_pointcloud_pca.h index 6e1cc23a..58c58b9a 100644 --- a/modules/core/include/lagrange/compute_pointcloud_pca.h +++ b/modules/core/include/lagrange/compute_pointcloud_pca.h @@ -19,6 +19,11 @@ namespace lagrange { +/// +/// @addtogroup group-surfacemesh-utils +/// @{ +/// + struct ComputePointcloudPCAOptions { /** @@ -75,4 +80,6 @@ PointcloudPCAOutput compute_pointcloud_pca( span points, ComputePointcloudPCAOptions options = {}); +/// @} + } // namespace lagrange diff --git a/modules/core/include/lagrange/compute_seam_edges.h b/modules/core/include/lagrange/compute_seam_edges.h index 612833ca..9b3096c7 100644 --- a/modules/core/include/lagrange/compute_seam_edges.h +++ b/modules/core/include/lagrange/compute_seam_edges.h @@ -13,6 +13,11 @@ namespace lagrange { +/// +/// @addtogroup group-surfacemesh-utils +/// @{ +/// + /// /// Options for computing seam edges. /// @@ -44,4 +49,6 @@ AttributeId compute_seam_edges( AttributeId indexed_attribute_id, const SeamEdgesOptions& options = {}); +/// @} + } // namespace lagrange diff --git a/modules/core/include/lagrange/compute_uv_charts.h b/modules/core/include/lagrange/compute_uv_charts.h index eb1d2faf..fe53c0ee 100644 --- a/modules/core/include/lagrange/compute_uv_charts.h +++ b/modules/core/include/lagrange/compute_uv_charts.h @@ -16,6 +16,11 @@ namespace lagrange { +/// +/// @addtogroup group-surfacemesh-utils +/// @{ +/// + struct UVChartOptions { using ConnectivityType = lagrange::ConnectivityType; @@ -50,4 +55,6 @@ struct UVChartOptions template size_t compute_uv_charts(SurfaceMesh& mesh, const UVChartOptions& options = {}); +/// @} + } // namespace lagrange diff --git a/modules/core/include/lagrange/internal/SurfaceMeshInfo.h b/modules/core/include/lagrange/internal/SurfaceMeshInfo.h new file mode 100644 index 00000000..9ca1a6fb --- /dev/null +++ b/modules/core/include/lagrange/internal/SurfaceMeshInfo.h @@ -0,0 +1,72 @@ +/* + * Copyright 2026 Adobe. All rights reserved. + * This file is licensed to you under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under + * the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS + * OF ANY KIND, either express or implied. See the License for the specific language + * governing permissions and limitations under the License. + */ +#pragma once + +#include +#include +#include + +#include +#include +#include + +namespace lagrange::internal { + +/// +/// Metadata and raw byte views for a single serialized attribute. +/// +/// Byte views are non-owning — the underlying data must outlive this struct. +/// +struct AttributeInfo +{ + std::string_view name; + AttributeId attribute_id = 0; + std::underlying_type_t value_type = 0; + std::underlying_type_t element_type = 0; + std::underlying_type_t usage = 0; + size_t num_channels = 0; + size_t num_elements = 0; + bool is_indexed = false; + + /// Non-indexed attribute: raw data bytes (num_elements * num_channels * sizeof(ValueType)). + span data_bytes; + + /// Indexed attribute: values and indices stored separately. + span values_bytes; + size_t values_num_elements = 0; + size_t values_num_channels = 0; + span indices_bytes; + size_t indices_num_elements = 0; + uint8_t index_type_size = 0; +}; + +/// +/// Complete serialized mesh representation using standard types. +/// +/// All byte views are non-owning — the source data must outlive this struct. +/// +struct SurfaceMeshInfo +{ + uint8_t scalar_type_size = 0; ///< sizeof(Scalar): 4 for float, 8 for double. + uint8_t index_type_size = 0; ///< sizeof(Index): 4 for uint32_t, 8 for uint64_t. + + size_t num_vertices = 0; + size_t num_facets = 0; + size_t num_corners = 0; + size_t num_edges = 0; + size_t dimension = 0; + size_t vertex_per_facet = 0; ///< >0 for regular meshes, 0 for hybrid meshes. + + std::vector attributes; +}; + +} // namespace lagrange::internal diff --git a/modules/core/include/lagrange/internal/get_uv_attribute.h b/modules/core/include/lagrange/internal/get_uv_attribute.h index 2c0bb094..66ed376a 100644 --- a/modules/core/include/lagrange/internal/get_uv_attribute.h +++ b/modules/core/include/lagrange/internal/get_uv_attribute.h @@ -23,6 +23,7 @@ */ #include +#include #include #include @@ -35,7 +36,9 @@ namespace lagrange::internal { /// /// @param mesh The mesh to get the UV attribute from. /// @param uv_attribute_name The name of the UV attribute. If empty, use the first indexed or -/// vertex UV attribute. +/// vertex UV attribute or, if element_types is set to +/// UVMeshOptions::ElementTypes::All, the first corner attribute. +/// @param element_types Supported element types for the UV attribute lookup. /// /// @tparam Scalar Mesh scalar type. /// @tparam Index Mesh index type. @@ -46,7 +49,8 @@ namespace lagrange::internal { template AttributeId get_uv_id( const SurfaceMesh& mesh, - std::string_view uv_attribute_name = ""); + std::string_view uv_attribute_name = "", + UVMeshOptions::ElementTypes element_types = UVMeshOptions::ElementTypes::IndexedOrVertex); /// /// Get the constant UV attribute buffers of a mesh. diff --git a/modules/core/include/lagrange/internal/set_invalid_indexed_values.h b/modules/core/include/lagrange/internal/set_invalid_indexed_values.h new file mode 100644 index 00000000..0cda06f4 --- /dev/null +++ b/modules/core/include/lagrange/internal/set_invalid_indexed_values.h @@ -0,0 +1,69 @@ +/* + * Copyright 2026 Adobe. All rights reserved. + * This file is licensed to you under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under + * the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS + * OF ANY KIND, either express or implied. See the License for the specific language + * governing permissions and limitations under the License. + */ +#pragma once + +#include +#include +#include + +namespace lagrange::internal { + +/// +/// @addtogroup group-surfacemesh-attr-utils +/// @{ +/// + +/// +/// For each element in the index buffer set to `invalid()`, appends a new +/// value set to invalid() and updates the index to point to it. +/// +/// @tparam ValueType Attribute value type. +/// @tparam Index Attribute index type. +/// +/// @param[in,out] attr The indexed attribute to fix up. +/// +template +void set_invalid_indexed_values(IndexedAttribute& attr) +{ + auto indices = attr.indices().ref_all(); + const size_t num_indices = attr.indices().get_num_elements(); + + // Count the number of invalid indices. + size_t num_invalid = 0; + for (size_t i = 0; i < num_indices; ++i) { + if (indices[i] == invalid()) { + ++num_invalid; + } + } + + if (num_invalid == 0) return; + + // Append invalid() values for each invalid index. + const size_t num_valid = attr.values().get_num_elements(); + + const ValueType old_default_value = attr.values().get_default_value(); + auto scope = make_scope_guard([&] { attr.values().set_default_value(old_default_value); }); + attr.values().set_default_value(invalid()); + attr.values().insert_elements(num_invalid); + + // Assign each invalid index to its own new value. + Index next_value = static_cast(num_valid); + for (size_t i = 0; i < num_indices; ++i) { + if (indices[i] == invalid()) { + indices[i] = next_value++; + } + } +} + +/// @} + +} // namespace lagrange::internal diff --git a/modules/core/include/lagrange/internal/surface_mesh_info_convert.h b/modules/core/include/lagrange/internal/surface_mesh_info_convert.h new file mode 100644 index 00000000..3d3eb835 --- /dev/null +++ b/modules/core/include/lagrange/internal/surface_mesh_info_convert.h @@ -0,0 +1,52 @@ +/* + * Copyright 2026 Adobe. All rights reserved. + * This file is licensed to you under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under + * the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS + * OF ANY KIND, either express or implied. See the License for the specific language + * governing permissions and limitations under the License. + */ +#pragma once + +#include +#include + +namespace lagrange::internal { + +/// +/// Extract a SurfaceMeshInfo from a SurfaceMesh. +/// +/// The returned info contains non-owning spans pointing into the mesh's attribute buffers. The mesh +/// must outlive the returned SurfaceMeshInfo. +/// +/// @param[in] mesh The source mesh. +/// +/// @tparam Scalar Mesh scalar type. +/// @tparam Index Mesh index type. +/// +/// @return A SurfaceMeshInfo with spans into the mesh's data. +/// +template +SurfaceMeshInfo from_surface_mesh(const SurfaceMesh& mesh); + +/// +/// Reconstruct a SurfaceMesh from a SurfaceMeshInfo. +/// +/// This directly populates the mesh's internal data structures, avoiding expensive topology +/// reconstruction (e.g. initialize_edges). All attributes, including reserved ones, are restored +/// from the stored byte data. +/// +/// @param[in] info The mesh info to restore from. +/// +/// @tparam Scalar Mesh scalar type. +/// @tparam Index Mesh index type. +/// +/// @return The reconstructed mesh. +/// +template +SurfaceMesh to_surface_mesh(const SurfaceMeshInfo& info); + +} // namespace lagrange::internal diff --git a/modules/core/include/lagrange/mesh_bbox.h b/modules/core/include/lagrange/mesh_bbox.h new file mode 100644 index 00000000..2d91dbab --- /dev/null +++ b/modules/core/include/lagrange/mesh_bbox.h @@ -0,0 +1,46 @@ +/* + * Copyright 2026 Adobe. All rights reserved. + * This file is licensed to you under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under + * the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS + * OF ANY KIND, either express or implied. See the License for the specific language + * governing permissions and limitations under the License. + */ + +#pragma once + +#include + +#include + +namespace lagrange { + +/// +/// @addtogroup group-surfacemesh-utils +/// @{ +/// + +/// +/// Compute the axis-aligned bounding box of a mesh. +/// +/// If the mesh has no vertices, the returned bounding box is empty (default-constructed +/// `Eigen::AlignedBox`, where `isEmpty()` returns true). +/// +/// @param[in] mesh Input mesh. Its dimension must match the @p Dimension template parameter. +/// +/// @tparam Dimension Spatial dimension of the bounding box. Must be 2 or 3. +/// @tparam Scalar Mesh scalar type. +/// @tparam Index Mesh index type. +/// +/// @return The axis-aligned bounding box of the mesh vertices. +/// +template +Eigen::AlignedBox(Dimension)> mesh_bbox( + const SurfaceMesh& mesh); + +/// @} + +} // namespace lagrange diff --git a/modules/core/include/lagrange/mesh_cleanup/close_small_holes.h b/modules/core/include/lagrange/mesh_cleanup/close_small_holes.h index b834c111..c985010f 100644 --- a/modules/core/include/lagrange/mesh_cleanup/close_small_holes.h +++ b/modules/core/include/lagrange/mesh_cleanup/close_small_holes.h @@ -17,6 +17,13 @@ #include namespace lagrange { +/// +/// @defgroup group-surfacemesh-cleanup Mesh cleanup +/// @ingroup group-surfacemesh +/// +/// Mesh cleanup and repair utilities. +/// +/// @{ /// /// Option struct for closing small holes. @@ -39,4 +46,6 @@ struct CloseSmallHolesOptions template void close_small_holes(SurfaceMesh& mesh, CloseSmallHolesOptions options = {}); +/// @} + } // namespace lagrange diff --git a/modules/core/include/lagrange/mesh_cleanup/detect_degenerate_facets.h b/modules/core/include/lagrange/mesh_cleanup/detect_degenerate_facets.h index e14514a6..fa260694 100644 --- a/modules/core/include/lagrange/mesh_cleanup/detect_degenerate_facets.h +++ b/modules/core/include/lagrange/mesh_cleanup/detect_degenerate_facets.h @@ -17,6 +17,11 @@ namespace lagrange { +/// +/// @addtogroup group-surfacemesh-cleanup +/// @{ +/// + /// /// Detects degenerate facets in a mesh. /// @@ -35,4 +40,6 @@ namespace lagrange { template std::vector detect_degenerate_facets(const SurfaceMesh& mesh); +/// @} + } // namespace lagrange diff --git a/modules/core/include/lagrange/mesh_cleanup/remove_degenerate_facets.h b/modules/core/include/lagrange/mesh_cleanup/remove_degenerate_facets.h index 2c70c53b..b050f3f5 100644 --- a/modules/core/include/lagrange/mesh_cleanup/remove_degenerate_facets.h +++ b/modules/core/include/lagrange/mesh_cleanup/remove_degenerate_facets.h @@ -15,6 +15,11 @@ namespace lagrange { +/// +/// @addtogroup group-surfacemesh-cleanup +/// @{ +/// + /// /// Removes degenerate facets from a mesh. /// @@ -31,4 +36,6 @@ namespace lagrange { template void remove_degenerate_facets(SurfaceMesh& mesh); +/// @} + } // namespace lagrange diff --git a/modules/core/include/lagrange/mesh_cleanup/remove_duplicate_facets.h b/modules/core/include/lagrange/mesh_cleanup/remove_duplicate_facets.h index 4ac0629f..1609049b 100644 --- a/modules/core/include/lagrange/mesh_cleanup/remove_duplicate_facets.h +++ b/modules/core/include/lagrange/mesh_cleanup/remove_duplicate_facets.h @@ -19,6 +19,11 @@ namespace lagrange { +/// +/// @addtogroup group-surfacemesh-cleanup +/// @{ +/// + /// /// Options for remove_duplicate_facets /// @@ -48,4 +53,6 @@ void remove_duplicate_facets( SurfaceMesh& mesh, const RemoveDuplicateFacetOptions& opts = {}); +/// @} + } // namespace lagrange diff --git a/modules/core/include/lagrange/mesh_cleanup/remove_duplicate_vertices.h b/modules/core/include/lagrange/mesh_cleanup/remove_duplicate_vertices.h index b1dfd363..401b2eb9 100644 --- a/modules/core/include/lagrange/mesh_cleanup/remove_duplicate_vertices.h +++ b/modules/core/include/lagrange/mesh_cleanup/remove_duplicate_vertices.h @@ -21,6 +21,11 @@ namespace lagrange { +/// +/// @addtogroup group-surfacemesh-cleanup +/// @{ +/// + /// /// Option struct for remove_duplicate_vertices. /// @@ -48,4 +53,6 @@ void remove_duplicate_vertices( SurfaceMesh& mesh, const RemoveDuplicateVerticesOptions& options = {}); +/// @} + } // namespace lagrange diff --git a/modules/core/include/lagrange/mesh_cleanup/remove_isolated_vertices.h b/modules/core/include/lagrange/mesh_cleanup/remove_isolated_vertices.h index 08a793b2..1048eb5b 100644 --- a/modules/core/include/lagrange/mesh_cleanup/remove_isolated_vertices.h +++ b/modules/core/include/lagrange/mesh_cleanup/remove_isolated_vertices.h @@ -19,6 +19,11 @@ namespace lagrange { +/// +/// @addtogroup group-surfacemesh-cleanup +/// @{ +/// + /// /// Removes isolated vertices of a mesh. Facets incident to any removed vertex will be deleted. /// @@ -30,4 +35,6 @@ namespace lagrange { template void remove_isolated_vertices(SurfaceMesh& mesh); +/// @} + } // namespace lagrange diff --git a/modules/core/include/lagrange/mesh_cleanup/remove_null_area_facets.h b/modules/core/include/lagrange/mesh_cleanup/remove_null_area_facets.h index 320222c8..6fba3241 100644 --- a/modules/core/include/lagrange/mesh_cleanup/remove_null_area_facets.h +++ b/modules/core/include/lagrange/mesh_cleanup/remove_null_area_facets.h @@ -15,6 +15,11 @@ namespace lagrange { +/// +/// @addtogroup group-surfacemesh-cleanup +/// @{ +/// + /// /// Option struct for remove_null_area_facets. /// @@ -41,4 +46,6 @@ void remove_null_area_facets( SurfaceMesh& mesh, const RemoveNullAreaFacetsOptions& options = {}); +/// @} + } // namespace lagrange diff --git a/modules/core/include/lagrange/mesh_cleanup/remove_short_edges.h b/modules/core/include/lagrange/mesh_cleanup/remove_short_edges.h index c0fa63fe..8c6f9267 100644 --- a/modules/core/include/lagrange/mesh_cleanup/remove_short_edges.h +++ b/modules/core/include/lagrange/mesh_cleanup/remove_short_edges.h @@ -19,6 +19,11 @@ namespace lagrange { +/// +/// @addtogroup group-surfacemesh-cleanup +/// @{ +/// + /// /// Collapse all edges shorter than a given tolerance. /// @@ -28,4 +33,6 @@ namespace lagrange { template void remove_short_edges(SurfaceMesh& mesh, Scalar threshold = 0); +/// @} + } // namespace lagrange diff --git a/modules/core/include/lagrange/mesh_cleanup/remove_topologically_degenerate_facets.h b/modules/core/include/lagrange/mesh_cleanup/remove_topologically_degenerate_facets.h index d4bf61bc..0b96ea41 100644 --- a/modules/core/include/lagrange/mesh_cleanup/remove_topologically_degenerate_facets.h +++ b/modules/core/include/lagrange/mesh_cleanup/remove_topologically_degenerate_facets.h @@ -15,6 +15,11 @@ namespace lagrange { +/// +/// @addtogroup group-surfacemesh-cleanup +/// @{ +/// + /// /// Remove topologically degenerate facets (i.e. triangles like (0, 1, 1)). /// @@ -28,4 +33,6 @@ namespace lagrange { template void remove_topologically_degenerate_facets(SurfaceMesh& mesh); +/// @} + } // namespace lagrange diff --git a/modules/core/include/lagrange/mesh_cleanup/rescale_uv_charts.h b/modules/core/include/lagrange/mesh_cleanup/rescale_uv_charts.h index 5f98a5f3..61e60bfb 100644 --- a/modules/core/include/lagrange/mesh_cleanup/rescale_uv_charts.h +++ b/modules/core/include/lagrange/mesh_cleanup/rescale_uv_charts.h @@ -17,6 +17,11 @@ namespace lagrange { +/// +/// @addtogroup group-surfacemesh-cleanup +/// @{ +/// + struct RescaleUVOptions { /** @@ -51,4 +56,6 @@ struct RescaleUVOptions template void rescale_uv_charts(SurfaceMesh& mesh, const RescaleUVOptions& options = {}); +/// @} + } // namespace lagrange diff --git a/modules/core/include/lagrange/mesh_cleanup/resolve_nonmanifoldness.h b/modules/core/include/lagrange/mesh_cleanup/resolve_nonmanifoldness.h index 03813d65..e66a8b8d 100644 --- a/modules/core/include/lagrange/mesh_cleanup/resolve_nonmanifoldness.h +++ b/modules/core/include/lagrange/mesh_cleanup/resolve_nonmanifoldness.h @@ -17,6 +17,11 @@ namespace lagrange { +/// +/// @addtogroup group-surfacemesh-cleanup +/// @{ +/// + /// /// Resolve both non-manifold vertices and non-manifold edges in the input mesh. /// @@ -28,4 +33,6 @@ namespace lagrange { template void resolve_nonmanifoldness(SurfaceMesh& mesh); +/// @} + } // namespace lagrange diff --git a/modules/core/include/lagrange/mesh_cleanup/resolve_vertex_nonmanifoldness.h b/modules/core/include/lagrange/mesh_cleanup/resolve_vertex_nonmanifoldness.h index a3f0cc14..3eb1b5c3 100644 --- a/modules/core/include/lagrange/mesh_cleanup/resolve_vertex_nonmanifoldness.h +++ b/modules/core/include/lagrange/mesh_cleanup/resolve_vertex_nonmanifoldness.h @@ -19,6 +19,11 @@ namespace lagrange { +/// +/// @addtogroup group-surfacemesh-cleanup +/// @{ +/// + /// /// Resolve nonmanifold vertices by pulling disconnected 1-ring /// neighborhood apart. @@ -34,4 +39,6 @@ namespace lagrange { template void resolve_vertex_nonmanifoldness(SurfaceMesh& mesh); +/// @} + } // namespace lagrange diff --git a/modules/core/include/lagrange/mesh_cleanup/split_long_edges.h b/modules/core/include/lagrange/mesh_cleanup/split_long_edges.h index a393073a..f9fc6d14 100644 --- a/modules/core/include/lagrange/mesh_cleanup/split_long_edges.h +++ b/modules/core/include/lagrange/mesh_cleanup/split_long_edges.h @@ -22,6 +22,11 @@ namespace lagrange { +/// +/// @addtogroup group-surfacemesh-cleanup +/// @{ +/// + struct SplitLongEdgesOptions { /// Maximum edge length. Edges longer than this value will be split. @@ -51,4 +56,6 @@ struct SplitLongEdgesOptions template void split_long_edges(SurfaceMesh& mesh, SplitLongEdgesOptions options = {}); +/// @} + } // namespace lagrange diff --git a/modules/core/include/lagrange/mesh_cleanup/unflip_uv_triangles.h b/modules/core/include/lagrange/mesh_cleanup/unflip_uv_triangles.h index f6cbfc6d..f8b5c915 100644 --- a/modules/core/include/lagrange/mesh_cleanup/unflip_uv_triangles.h +++ b/modules/core/include/lagrange/mesh_cleanup/unflip_uv_triangles.h @@ -17,6 +17,11 @@ namespace lagrange { +/// +/// @addtogroup group-surfacemesh-cleanup +/// @{ +/// + struct UnflipUVOptions { /// Name of the attribute containing the UV coordinates. @@ -44,4 +49,6 @@ struct UnflipUVOptions template void unflip_uv_triangles(SurfaceMesh& mesh, const UnflipUVOptions& options = {}); +/// @} + } // namespace lagrange diff --git a/modules/core/include/lagrange/orient_outward.h b/modules/core/include/lagrange/orient_outward.h index aaaa87f0..d1ce3ad3 100644 --- a/modules/core/include/lagrange/orient_outward.h +++ b/modules/core/include/lagrange/orient_outward.h @@ -19,6 +19,11 @@ namespace lagrange { +/// +/// @addtogroup group-surfacemesh-utils +/// @{ +/// + /// /// Options for orienting the facets of a mesh. /// @@ -41,4 +46,6 @@ struct OrientOptions template void orient_outward(lagrange::SurfaceMesh& mesh, const OrientOptions& options = {}); +/// @} + } // namespace lagrange diff --git a/modules/core/include/lagrange/reorder_mesh.h b/modules/core/include/lagrange/reorder_mesh.h index 5c1e2fc9..d850e068 100644 --- a/modules/core/include/lagrange/reorder_mesh.h +++ b/modules/core/include/lagrange/reorder_mesh.h @@ -15,6 +15,11 @@ namespace lagrange { +/// +/// @addtogroup group-surfacemesh-utils +/// @{ +/// + /// /// Mesh reordering method to apply before decimation. /// @@ -37,4 +42,6 @@ enum class ReorderingMethod { template void reorder_mesh(SurfaceMesh& mesh, ReorderingMethod method); +/// @} + } // namespace lagrange diff --git a/modules/core/include/lagrange/segment_segment_squared_distance.h b/modules/core/include/lagrange/segment_segment_squared_distance.h index f81ba7d3..42adb2a0 100644 --- a/modules/core/include/lagrange/segment_segment_squared_distance.h +++ b/modules/core/include/lagrange/segment_segment_squared_distance.h @@ -18,6 +18,11 @@ namespace lagrange { +/// +/// @addtogroup group-surfacemesh-utils +/// @{ +/// + /// /// Computes the squared distance between two N-d line segments, and the closest pair of points /// whose separation is this distance. @@ -136,4 +141,6 @@ auto segment_segment_squared_distance( return (closest_pointU - closest_pointV).squaredNorm(); } +/// @} + } // namespace lagrange diff --git a/modules/core/include/lagrange/split_facets_by_material.h b/modules/core/include/lagrange/split_facets_by_material.h index df31af9d..8668a74c 100644 --- a/modules/core/include/lagrange/split_facets_by_material.h +++ b/modules/core/include/lagrange/split_facets_by_material.h @@ -18,6 +18,11 @@ namespace lagrange { +/// +/// @addtogroup group-surfacemesh-utils +/// @{ +/// + /// /// Split mesh facets based on material labels. /// @@ -37,4 +42,6 @@ void split_facets_by_material( SurfaceMesh& mesh, std::string_view material_attribute_name); +/// @} + } // namespace lagrange diff --git a/modules/core/include/lagrange/thicken_and_close_mesh.h b/modules/core/include/lagrange/thicken_and_close_mesh.h index 316930e9..9ede3f8e 100644 --- a/modules/core/include/lagrange/thicken_and_close_mesh.h +++ b/modules/core/include/lagrange/thicken_and_close_mesh.h @@ -22,6 +22,11 @@ namespace lagrange { +/// +/// @addtogroup group-surfacemesh-utils +/// @{ +/// + /// /// Options for thicken_and_close_mesh. /// @@ -68,4 +73,6 @@ SurfaceMesh thicken_and_close_mesh( SurfaceMesh input_mesh, const ThickenAndCloseOptions& options = {}); +/// @} + } // namespace lagrange diff --git a/modules/core/include/lagrange/utils/StackVector.h b/modules/core/include/lagrange/utils/StackVector.h index e4de193f..32e34d86 100644 --- a/modules/core/include/lagrange/utils/StackVector.h +++ b/modules/core/include/lagrange/utils/StackVector.h @@ -35,7 +35,7 @@ template struct StackVector { private: - std::array m_array; + std::array m_array = {}; size_t m_size = 0; public: diff --git a/modules/core/include/lagrange/uv_mesh.h b/modules/core/include/lagrange/uv_mesh.h index 6903840e..748f83b2 100644 --- a/modules/core/include/lagrange/uv_mesh.h +++ b/modules/core/include/lagrange/uv_mesh.h @@ -17,13 +17,31 @@ namespace lagrange { +/// +/// @addtogroup group-surfacemesh-utils +/// @{ +/// + struct UVMeshOptions { + /// Supported element types for UV mesh extraction. + enum class ElementTypes { + IndexedOrVertex, ///< Only indexed/vertex attributes (zero-copy, no allocation). + All, ///< Also supports corner attributes (may allocate an index buffer). + }; + /// Input UV attribute name. /// - /// The attribute must be a vertex or indexed attribute of type `Scalar`. - /// If empty, the first UV attribute will be used. + /// The attribute must be a UV attribute of type `UVScalar` with 2 channels. Supported element + /// types depend on the `element_types` option. If empty, the first matching UV attribute will be + /// used. std::string_view uv_attribute_name = ""; + + /// Supported element types for UV attribute lookup. + /// + /// By default, only indexed and vertex attributes are supported (zero-copy). Set to + /// ElementTypes::All to also support corner attributes, which may allocate an index buffer. + ElementTypes element_types = ElementTypes::IndexedOrVertex; }; /** @@ -33,9 +51,6 @@ struct UVMeshOptions * positions set to the corresponding UV coordinates. Modification of UV mesh vertices will be * reflected in the input mesh. * - * @warning This method requires that the input UV attribute is an indexed or vertex attribute. - * Corner attributes are not supported by this function. - * * @param mesh Input mesh. * @param options Options to control UV mesh extraction. * @@ -58,9 +73,6 @@ SurfaceMesh uv_mesh_ref( * This method will create a new mesh with the same topology as the input mesh, but with vertex * positions set to the corresponding UV coordinates. The output UV mesh cannot be modified. * - * @warning This method requires that the input UV attribute is an indexed or vertex attribute. - * Corner attributes are not supported by this function. - * * @param mesh Input mesh. * @param options Options to control UV mesh extraction. * @@ -77,4 +89,6 @@ SurfaceMesh uv_mesh_view( const SurfaceMesh& mesh, const UVMeshOptions& options = {}); +/// @} + } // namespace lagrange diff --git a/modules/core/python/src/bind_utilities.h b/modules/core/python/src/bind_utilities.h index 1b6428cf..ba7c9fd1 100644 --- a/modules/core/python/src/bind_utilities.h +++ b/modules/core/python/src/bind_utilities.h @@ -1326,7 +1326,7 @@ Basel: Birkhäuser Basel, 2008. 175-188. m.def( "extract_submesh", [](MeshType& mesh, - Tensor selected_facets, + std::variant, nb::list> selected_facets, std::string_view source_vertex_attr_name, std::string_view source_facet_attr_name, bool map_attributes) { @@ -1334,9 +1334,17 @@ Basel: Birkhäuser Basel, 2008. 175-188. options.source_vertex_attr_name = source_vertex_attr_name; options.source_facet_attr_name = source_facet_attr_name; options.map_attributes = map_attributes; - auto [data, shape, stride] = tensor_to_span(selected_facets); - la_runtime_assert(is_dense(shape, stride)); - return extract_submesh(mesh, data, options); + if (std::holds_alternative(selected_facets)) { + auto selected_facets_list = + nb::cast>(std::get(selected_facets)); + span data{selected_facets_list.data(), selected_facets_list.size()}; + return extract_submesh(mesh, data, options); + } else { + auto selected_facets_tensor = std::get>(selected_facets); + auto [data, shape, stride] = tensor_to_span(selected_facets_tensor); + la_runtime_assert(is_dense(shape, stride)); + return extract_submesh(mesh, data, options); + } }, "mesh"_a, "selected_facets"_a, @@ -1346,7 +1354,7 @@ Basel: Birkhäuser Basel, 2008. 175-188. R"(Extract a submesh based on the selected facets. :param mesh: The source mesh. -:param selected_facets: A listed of facet ids to extract. +:param selected_facets: A list or tensor of facet ids to extract. :param source_vertex_attr_name: The optional attribute name to track source vertices. :param source_facet_attr_name: The optional attribute name to track source facets. :param map_attributes: Map attributes from the source to target meshes. diff --git a/modules/core/python/tests/test_attribute.py b/modules/core/python/tests/test_attribute.py index fccff5a0..b94088f7 100644 --- a/modules/core/python/tests/test_attribute.py +++ b/modules/core/python/tests/test_attribute.py @@ -15,7 +15,6 @@ import pytest import sys -from .assets import single_triangle, single_triangle_with_index, cube # noqa: F401 from .utils import address, assert_sharing_raw_data diff --git a/modules/core/python/tests/test_cast_attribute.py b/modules/core/python/tests/test_cast_attribute.py index cd1054e8..1b775465 100644 --- a/modules/core/python/tests/test_cast_attribute.py +++ b/modules/core/python/tests/test_cast_attribute.py @@ -12,7 +12,6 @@ import lagrange import numpy as np -from .assets import single_triangle # noqa: F401 class TestCastAttribute: diff --git a/modules/core/python/tests/test_close_small_holes.py b/modules/core/python/tests/test_close_small_holes.py index 5c1ab613..09255aca 100644 --- a/modules/core/python/tests/test_close_small_holes.py +++ b/modules/core/python/tests/test_close_small_holes.py @@ -11,8 +11,6 @@ # import lagrange -from .assets import cube, cube_with_uv # noqa: F401 - class TestCloseSmallHoles: def test_close_small_holes(self, cube): diff --git a/modules/core/python/tests/test_combine_meshes.py b/modules/core/python/tests/test_combine_meshes.py index f5892e69..119aabca 100644 --- a/modules/core/python/tests/test_combine_meshes.py +++ b/modules/core/python/tests/test_combine_meshes.py @@ -13,8 +13,6 @@ import numpy as np -from .assets import cube # noqa: F401 - class TestCombineMeshes: def test_empty(self): diff --git a/modules/core/python/tests/test_compute_centroid.py b/modules/core/python/tests/test_compute_centroid.py index e76a1266..4f4cce72 100644 --- a/modules/core/python/tests/test_compute_centroid.py +++ b/modules/core/python/tests/test_compute_centroid.py @@ -14,8 +14,6 @@ import numpy as np import pytest -from .assets import single_triangle, cube # noqa: F401 - class TestComputeCentroid: def test_cube(self, cube): diff --git a/modules/core/python/tests/test_compute_components.py b/modules/core/python/tests/test_compute_components.py index fe78c5ae..94a3f68f 100644 --- a/modules/core/python/tests/test_compute_components.py +++ b/modules/core/python/tests/test_compute_components.py @@ -12,8 +12,6 @@ import lagrange import numpy as np -from .assets import cube # noqa: F401 - class TestComputeComponents: def test_empty(self): diff --git a/modules/core/python/tests/test_compute_dihedral_angles.py b/modules/core/python/tests/test_compute_dihedral_angles.py index 2ae8d49a..6c29b3d1 100644 --- a/modules/core/python/tests/test_compute_dihedral_angles.py +++ b/modules/core/python/tests/test_compute_dihedral_angles.py @@ -14,8 +14,6 @@ import numpy as np import pytest -from .assets import single_triangle, cube # noqa: F401 - class TestComputeDihedralAngles: def test_cube(self, cube): diff --git a/modules/core/python/tests/test_compute_dijkstra_distance.py b/modules/core/python/tests/test_compute_dijkstra_distance.py index 5b5c2e5e..a9477f69 100644 --- a/modules/core/python/tests/test_compute_dijkstra_distance.py +++ b/modules/core/python/tests/test_compute_dijkstra_distance.py @@ -12,9 +12,6 @@ import lagrange -from .assets import single_triangle, cube # noqa: F401 - - class TestComputeDijkstraDistance: def test_cube(self, cube): mesh = cube diff --git a/modules/core/python/tests/test_compute_edge_lengths.py b/modules/core/python/tests/test_compute_edge_lengths.py index ab95b3be..9952b515 100644 --- a/modules/core/python/tests/test_compute_edge_lengths.py +++ b/modules/core/python/tests/test_compute_edge_lengths.py @@ -14,8 +14,6 @@ import numpy as np import pytest -from .assets import cube # noqa: F401 - class TestComputeEdgeLengths: def test_cube(self, cube): diff --git a/modules/core/python/tests/test_compute_facet_area.py b/modules/core/python/tests/test_compute_facet_area.py index 86d5a4b5..c6cd88e9 100644 --- a/modules/core/python/tests/test_compute_facet_area.py +++ b/modules/core/python/tests/test_compute_facet_area.py @@ -15,8 +15,6 @@ import math import pytest -from .assets import single_triangle, cube, single_triangle_with_uv # noqa: F401 - class TestComputeFacetArea: def test_cube(self, cube): diff --git a/modules/core/python/tests/test_compute_facet_circumcenter.py b/modules/core/python/tests/test_compute_facet_circumcenter.py index 93897924..a4942e92 100644 --- a/modules/core/python/tests/test_compute_facet_circumcenter.py +++ b/modules/core/python/tests/test_compute_facet_circumcenter.py @@ -13,8 +13,6 @@ import numpy as np -from .assets import cube_triangular # noqa: F401 - class TestComputeFacetCircumcenter: def check_centroid(self, mesh, centroid_attr_id): diff --git a/modules/core/python/tests/test_compute_facet_normal.py b/modules/core/python/tests/test_compute_facet_normal.py index 6c725f78..79abc1cc 100644 --- a/modules/core/python/tests/test_compute_facet_normal.py +++ b/modules/core/python/tests/test_compute_facet_normal.py @@ -14,8 +14,6 @@ import numpy as np import pytest -from .assets import single_triangle, cube # noqa: F401 - class TestComputeFacetNormal: def validate_facet_normal(self, mesh, normal_attr): diff --git a/modules/core/python/tests/test_compute_mesh_covariance.py b/modules/core/python/tests/test_compute_mesh_covariance.py index 6b3662ec..5bd3d647 100644 --- a/modules/core/python/tests/test_compute_mesh_covariance.py +++ b/modules/core/python/tests/test_compute_mesh_covariance.py @@ -12,8 +12,6 @@ import lagrange import numpy as np -from .assets import cube, single_triangle # noqa: F401 - class TestComputeMeshCovariance: def test_triangle(self, single_triangle): diff --git a/modules/core/python/tests/test_compute_normal.py b/modules/core/python/tests/test_compute_normal.py index fb131b4c..0dcd7381 100644 --- a/modules/core/python/tests/test_compute_normal.py +++ b/modules/core/python/tests/test_compute_normal.py @@ -15,8 +15,6 @@ import math import pytest -from .assets import single_triangle, cube # noqa: F401 - class TestComputeNormal: def validate_normal(self, mesh, normal_attr): diff --git a/modules/core/python/tests/test_compute_seam_edges.py b/modules/core/python/tests/test_compute_seam_edges.py index 77ca82f3..56ff5d39 100644 --- a/modules/core/python/tests/test_compute_seam_edges.py +++ b/modules/core/python/tests/test_compute_seam_edges.py @@ -13,8 +13,6 @@ import numpy as np -from .assets import cube_with_uv, cube # noqa: F401 - class TestComputeCentroid: def test_cube(self, cube_with_uv): diff --git a/modules/core/python/tests/test_compute_tangent_bitangent.py b/modules/core/python/tests/test_compute_tangent_bitangent.py index 1d51db32..4db2623f 100644 --- a/modules/core/python/tests/test_compute_tangent_bitangent.py +++ b/modules/core/python/tests/test_compute_tangent_bitangent.py @@ -13,8 +13,6 @@ import pytest -from .assets import single_triangle, single_triangle_with_uv, cube, cube_with_uv # noqa: F401 - class TestComputeTangentBitangent: def check_orthogonality(self, mesh, normal_id, tangent_id, bitangent_id): diff --git a/modules/core/python/tests/test_compute_vertex_normal.py b/modules/core/python/tests/test_compute_vertex_normal.py index c43af739..7f0cf6da 100644 --- a/modules/core/python/tests/test_compute_vertex_normal.py +++ b/modules/core/python/tests/test_compute_vertex_normal.py @@ -15,8 +15,6 @@ import math import pytest -from .assets import cube # noqa: F401 - class TestComputeVertexNormal: def test_cube(self, cube): diff --git a/modules/core/python/tests/test_compute_vertex_valence.py b/modules/core/python/tests/test_compute_vertex_valence.py index 1ea44fc2..3a1827dd 100644 --- a/modules/core/python/tests/test_compute_vertex_valence.py +++ b/modules/core/python/tests/test_compute_vertex_valence.py @@ -13,8 +13,6 @@ import numpy as np -from .assets import single_triangle, cube # noqa: F401 - class TestComputeVertexValence: def test_simple(self, single_triangle): diff --git a/modules/core/python/tests/test_detect_degenerate_facets.py b/modules/core/python/tests/test_detect_degenerate_facets.py index cbb22f34..c74d8338 100644 --- a/modules/core/python/tests/test_detect_degenerate_facets.py +++ b/modules/core/python/tests/test_detect_degenerate_facets.py @@ -10,7 +10,6 @@ # governing permissions and limitations under the License. # import lagrange -from .assets import cube # noqa: F401 import numpy as np diff --git a/modules/core/python/tests/test_extract_submesh.py b/modules/core/python/tests/test_extract_submesh.py new file mode 100644 index 00000000..ecfdb183 --- /dev/null +++ b/modules/core/python/tests/test_extract_submesh.py @@ -0,0 +1,161 @@ +# +# Copyright 2026 Adobe. All rights reserved. +# This file is licensed to you under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. You may obtain a copy +# of the License at http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software distributed under +# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS +# OF ANY KIND, either express or implied. See the License for the specific language +# governing permissions and limitations under the License. +# +import lagrange + +import numpy as np + + +class TestExtractSubmesh: + def test_extract_submesh_with_tensor(self, cube): + """Test extract_submesh with Tensor input (existing behavior).""" + mesh = cube + # Select first 3 facets using numpy array (automatically converted to Tensor) + selected_facets = np.array([0, 1, 2], dtype=np.uint32) + + submesh = lagrange.extract_submesh( + mesh, + selected_facets, + source_vertex_attr_name="source_vertex", + source_facet_attr_name="source_facet", + map_attributes=True, + ) + + assert submesh.num_facets == 3 + assert submesh.num_vertices <= mesh.num_vertices + assert submesh.has_attribute("source_vertex") + assert submesh.has_attribute("source_facet") + + # Verify the source facet mapping + source_facet = submesh.attribute("source_facet").data + assert np.array_equal(source_facet, [0, 1, 2]) + + def test_extract_submesh_with_list(self, cube): + """Test extract_submesh with Python list input (new behavior).""" + mesh = cube + # Select first 3 facets using Python list + selected_facets = [0, 1, 2] + + submesh = lagrange.extract_submesh( + mesh, + selected_facets, + source_vertex_attr_name="source_vertex", + source_facet_attr_name="source_facet", + map_attributes=True, + ) + + assert submesh.num_facets == 3 + assert submesh.num_vertices <= mesh.num_vertices + assert submesh.has_attribute("source_vertex") + assert submesh.has_attribute("source_facet") + + # Verify the source facet mapping + source_facet = submesh.attribute("source_facet").data + assert np.array_equal(source_facet, [0, 1, 2]) + + def test_extract_submesh_tensor_vs_list(self, cube): + """Verify that tensor and list inputs produce identical results.""" + mesh = cube + selected_indices = [0, 2, 4] + + # Extract using numpy array (tensor) + submesh_tensor = lagrange.extract_submesh( + mesh, + np.array(selected_indices, dtype=np.uint32), + source_vertex_attr_name="source_vertex", + source_facet_attr_name="source_facet", + map_attributes=True, + ) + + # Extract using Python list + submesh_list = lagrange.extract_submesh( + mesh, + selected_indices, + source_vertex_attr_name="source_vertex", + source_facet_attr_name="source_facet", + map_attributes=True, + ) + + # Both should produce identical results + assert submesh_tensor.num_facets == submesh_list.num_facets + assert submesh_tensor.num_vertices == submesh_list.num_vertices + assert np.allclose(submesh_tensor.vertices, submesh_list.vertices) + assert np.array_equal(submesh_tensor.facets, submesh_list.facets) + + # Verify source attributes match + source_facet_tensor = submesh_tensor.attribute("source_facet").data + source_facet_list = submesh_list.attribute("source_facet").data + assert np.array_equal(source_facet_tensor, source_facet_list) + + def test_extract_submesh_with_attributes(self, cube_with_uv): + """Test extract_submesh preserves and maps attributes correctly.""" + mesh = cube_with_uv + assert mesh.has_attribute("uv") + + # Select subset of facets using list + selected_facets = [0, 1, 2] + + submesh = lagrange.extract_submesh( + mesh, + selected_facets, + source_vertex_attr_name="source_vertex", + source_facet_attr_name="source_facet", + map_attributes=True, + ) + + assert submesh.num_facets == 3 + assert submesh.has_attribute("source_vertex") + assert submesh.has_attribute("source_facet") + assert submesh.has_attribute("uv") + assert submesh.is_attribute_indexed("uv") + + def test_extract_submesh_single_facet_list(self, cube): + """Test extract_submesh with a single facet in a list.""" + mesh = cube + selected_facets = [3] + + submesh = lagrange.extract_submesh( + mesh, + selected_facets, + source_facet_attr_name="source_facet", + ) + + assert submesh.num_facets == 1 + assert submesh.has_attribute("source_facet") + source_facet = submesh.attribute("source_facet").data + assert source_facet[0] == 3 + + def test_extract_submesh_all_facets_list(self, cube): + """Test extract_submesh with all facets using a list.""" + mesh = cube + selected_facets = list(range(mesh.num_facets)) + + submesh = lagrange.extract_submesh( + mesh, + selected_facets, + map_attributes=True, + ) + + assert submesh.num_facets == mesh.num_facets + assert submesh.num_vertices == mesh.num_vertices + + def test_extract_submesh_empty_list(self, cube): + """Test extract_submesh with an empty list.""" + mesh = cube + selected_facets = [] + + submesh = lagrange.extract_submesh( + mesh, + selected_facets, + ) + + assert submesh.num_facets == 0 + assert submesh.num_vertices == 0 diff --git a/modules/core/python/tests/test_filter_attributes.py b/modules/core/python/tests/test_filter_attributes.py index 5cd5881f..2bfe6273 100644 --- a/modules/core/python/tests/test_filter_attributes.py +++ b/modules/core/python/tests/test_filter_attributes.py @@ -12,9 +12,6 @@ import lagrange -from .assets import cube, cube_with_uv # noqa: F401 - - class TestFilterAttributes: def test_included(self, cube_with_uv): mesh = cube_with_uv diff --git a/modules/core/python/tests/test_indexed_attribute.py b/modules/core/python/tests/test_indexed_attribute.py index 4855fddd..15379937 100644 --- a/modules/core/python/tests/test_indexed_attribute.py +++ b/modules/core/python/tests/test_indexed_attribute.py @@ -13,8 +13,6 @@ import numpy as np -from .assets import cube # noqa: F401 - class TestIndexedAttribute: def test_attribute_basics(self, cube): diff --git a/modules/core/python/tests/test_isoline.py b/modules/core/python/tests/test_isoline.py index 79bb239f..69d2b9d5 100644 --- a/modules/core/python/tests/test_isoline.py +++ b/modules/core/python/tests/test_isoline.py @@ -10,7 +10,6 @@ # governing permissions and limitations under the License. # import lagrange -from .assets import single_triangle # noqa: F401 import numpy as np diff --git a/modules/core/python/tests/test_orient_outward.py b/modules/core/python/tests/test_orient_outward.py index e1ad8bc1..175330d0 100644 --- a/modules/core/python/tests/test_orient_outward.py +++ b/modules/core/python/tests/test_orient_outward.py @@ -10,7 +10,6 @@ # governing permissions and limitations under the License. # import lagrange -from .assets import cube # noqa: F401 import numpy as np diff --git a/modules/core/python/tests/test_permute_facets.py b/modules/core/python/tests/test_permute_facets.py index a61cd4f7..7b23bfb7 100644 --- a/modules/core/python/tests/test_permute_facets.py +++ b/modules/core/python/tests/test_permute_facets.py @@ -10,7 +10,6 @@ # governing permissions and limitations under the License. # import lagrange -from .assets import cube # noqa: F401 import numpy as np import pytest diff --git a/modules/core/python/tests/test_permute_vertices.py b/modules/core/python/tests/test_permute_vertices.py index caf7d0fd..617632e8 100644 --- a/modules/core/python/tests/test_permute_vertices.py +++ b/modules/core/python/tests/test_permute_vertices.py @@ -14,8 +14,6 @@ import numpy as np import pytest -from .assets import cube, cube_with_uv # noqa: F401 - class TestPermuteVertices: def test_cube(self, cube): diff --git a/modules/core/python/tests/test_remap_vertices.py b/modules/core/python/tests/test_remap_vertices.py index eb936795..0d4f924f 100644 --- a/modules/core/python/tests/test_remap_vertices.py +++ b/modules/core/python/tests/test_remap_vertices.py @@ -14,8 +14,6 @@ import numpy as np import pytest -from .assets import cube, cube_with_uv # noqa: F401 - class TestRemapVertices: def test_cube(self, cube): diff --git a/modules/core/python/tests/test_remove_degenerate_facets.py b/modules/core/python/tests/test_remove_degenerate_facets.py index c214c29d..5648d649 100644 --- a/modules/core/python/tests/test_remove_degenerate_facets.py +++ b/modules/core/python/tests/test_remove_degenerate_facets.py @@ -10,7 +10,6 @@ # governing permissions and limitations under the License. # import lagrange -from .assets import cube, cube_triangular, cube_with_uv # noqa: F401 import numpy as np diff --git a/modules/core/python/tests/test_remove_duplicate_vertices.py b/modules/core/python/tests/test_remove_duplicate_vertices.py index cafcafcd..3bdcb6ad 100644 --- a/modules/core/python/tests/test_remove_duplicate_vertices.py +++ b/modules/core/python/tests/test_remove_duplicate_vertices.py @@ -10,7 +10,6 @@ # governing permissions and limitations under the License. # import lagrange -from .assets import cube # noqa: F401 import numpy as np diff --git a/modules/core/python/tests/test_remove_null_area_facets.py b/modules/core/python/tests/test_remove_null_area_facets.py index cce8ee3e..e9f838eb 100644 --- a/modules/core/python/tests/test_remove_null_area_facets.py +++ b/modules/core/python/tests/test_remove_null_area_facets.py @@ -10,7 +10,6 @@ # governing permissions and limitations under the License. # import lagrange -from .assets import cube # noqa: F401 import numpy as np diff --git a/modules/core/python/tests/test_remove_short_edges.py b/modules/core/python/tests/test_remove_short_edges.py index 257b7cce..0e43cc78 100644 --- a/modules/core/python/tests/test_remove_short_edges.py +++ b/modules/core/python/tests/test_remove_short_edges.py @@ -11,8 +11,6 @@ # import lagrange -from .assets import single_triangle # noqa: F401 - class TestRemoveShortEdges: def test_triangle(self, single_triangle): diff --git a/modules/core/python/tests/test_resolve_nonmanifoldness.py b/modules/core/python/tests/test_resolve_nonmanifoldness.py index 25576ab8..2f87b38d 100644 --- a/modules/core/python/tests/test_resolve_nonmanifoldness.py +++ b/modules/core/python/tests/test_resolve_nonmanifoldness.py @@ -10,7 +10,6 @@ # governing permissions and limitations under the License. # import lagrange -from .assets import cube # noqa: F401 class TestRemoveVertexNonmanifoldness: diff --git a/modules/core/python/tests/test_select_facets_by_normal_similarity.py b/modules/core/python/tests/test_select_facets_by_normal_similarity.py index 6ed53d1f..8707e85a 100644 --- a/modules/core/python/tests/test_select_facets_by_normal_similarity.py +++ b/modules/core/python/tests/test_select_facets_by_normal_similarity.py @@ -11,8 +11,6 @@ # import lagrange -from .assets import single_triangle # noqa: F401 - class TestSelectFacetsByNormalSimilarity: def test_cube(self, single_triangle): diff --git a/modules/core/python/tests/test_select_facets_in_frustum.py b/modules/core/python/tests/test_select_facets_in_frustum.py index 03f09aa7..40af7763 100644 --- a/modules/core/python/tests/test_select_facets_in_frustum.py +++ b/modules/core/python/tests/test_select_facets_in_frustum.py @@ -12,8 +12,6 @@ import lagrange import numpy as np -from .assets import cube # noqa: F401 - class TestSelectFacetsInFrustum: def test_cube_big_frustum(self, cube): diff --git a/modules/core/python/tests/test_split_long_edges.py b/modules/core/python/tests/test_split_long_edges.py index 9e8f66ff..73a57782 100644 --- a/modules/core/python/tests/test_split_long_edges.py +++ b/modules/core/python/tests/test_split_long_edges.py @@ -12,8 +12,6 @@ import lagrange import numpy as np -from .assets import single_triangle # noqa: F401 - class TestSplitLongEdges: def test_triangle(self, single_triangle): diff --git a/modules/core/python/tests/test_surface_mesh.py b/modules/core/python/tests/test_surface_mesh.py index 0f5d0923..d79add63 100644 --- a/modules/core/python/tests/test_surface_mesh.py +++ b/modules/core/python/tests/test_surface_mesh.py @@ -14,7 +14,6 @@ import numpy as np import pytest -from .assets import single_triangle, single_triangle_with_index, cube # noqa: F401 from .utils import address, assert_sharing_raw_data diff --git a/modules/core/python/tests/test_thicken_and_close_mesh.py b/modules/core/python/tests/test_thicken_and_close_mesh.py index 38c95a1b..9eebea03 100644 --- a/modules/core/python/tests/test_thicken_and_close_mesh.py +++ b/modules/core/python/tests/test_thicken_and_close_mesh.py @@ -11,8 +11,6 @@ # import lagrange -from .assets import single_triangle, cube # noqa: F401 - class TestThickenAndCloseMesh: def test_triangle(self, single_triangle): diff --git a/modules/core/python/tests/test_transform_mesh.py b/modules/core/python/tests/test_transform_mesh.py index 817d5cf2..539edb6a 100644 --- a/modules/core/python/tests/test_transform_mesh.py +++ b/modules/core/python/tests/test_transform_mesh.py @@ -13,8 +13,6 @@ import numpy as np -from .assets import single_triangle # noqa: F401 - class TestTransformMesh: def test_identity(self, single_triangle): diff --git a/modules/core/python/tests/test_triangulate_polygonal_facets.py b/modules/core/python/tests/test_triangulate_polygonal_facets.py index c8396678..61fc3c7d 100644 --- a/modules/core/python/tests/test_triangulate_polygonal_facets.py +++ b/modules/core/python/tests/test_triangulate_polygonal_facets.py @@ -12,8 +12,6 @@ import lagrange import pytest -from .assets import cube # noqa: F401 - class TestTriangulatePolygonalFacets: def test_empty_mesh(self): diff --git a/modules/core/python/tests/test_unify_index_buffer.py b/modules/core/python/tests/test_unify_index_buffer.py index 2c1caf5c..58d4c1e3 100644 --- a/modules/core/python/tests/test_unify_index_buffer.py +++ b/modules/core/python/tests/test_unify_index_buffer.py @@ -11,8 +11,6 @@ # import lagrange -from .assets import cube # noqa: F401 - class TestUnifyIndexBuffer: def test_empty_mesh(self): diff --git a/modules/core/python/tests/test_weld_indexed_attribute.py b/modules/core/python/tests/test_weld_indexed_attribute.py index 793fee39..a22b0515 100644 --- a/modules/core/python/tests/test_weld_indexed_attribute.py +++ b/modules/core/python/tests/test_weld_indexed_attribute.py @@ -13,8 +13,6 @@ import numpy as np -from .assets import cube # noqa: F401 - class TestWeldIndexedAttribute: def test_unique_values(self, cube): diff --git a/modules/core/src/Attribute.cpp b/modules/core/src/Attribute.cpp index be74e750..1341ab2b 100644 --- a/modules/core/src/Attribute.cpp +++ b/modules/core/src/Attribute.cpp @@ -527,24 +527,27 @@ lagrange::span Attribute::get_all() const return m_const_view.first(m_num_elements * get_num_channels()); } +namespace internal { + template -lagrange::span Attribute::get_all_with_padding() const +lagrange::span get_all_unpoisoned(const Attribute& attr) { - if (!is_external()) { + if (!attr.is_external()) { #if LAGRANGE_ASAN_ENABLED // Unpoison the [size, capacity) region so that external libraries (e.g. Embree) can safely - // read padding entries without triggering ASan's container-overflow detection. - if (m_data.capacity() > m_data.size()) { + // perform SIMD reads that overshoot the logical buffer size. + if (attr.m_data.capacity() > attr.m_data.size()) { ASAN_UNPOISON_MEMORY_REGION( - m_data.data() + m_data.size(), - (m_data.capacity() - m_data.size()) * sizeof(ValueType)); + attr.m_data.data() + attr.m_data.size(), + (attr.m_data.capacity() - attr.m_data.size()) * sizeof(ValueType)); } #endif - return {m_data.data(), m_data.capacity()}; } - return get_all(); + return attr.get_all(); } +} // namespace internal + template lagrange::span Attribute::ref_all() { @@ -687,6 +690,11 @@ void Attribute::clear_views() #define LA_X_attr(_, ValueType) template class LA_CORE_API Attribute; LA_ATTRIBUTE_X(attr, 0) +#define LA_X_get_all_unpoisoned(_, ValueType) \ + template LA_CORE_API lagrange::span internal::get_all_unpoisoned( \ + const Attribute&); +LA_ATTRIBUTE_X(get_all_unpoisoned, 0) + // Workaround for cartesian product of attr type with itself... // clang-format off #define LA_ATTRIBUTE2_X(mode, data) \ diff --git a/modules/core/src/SurfaceMesh.cpp b/modules/core/src/SurfaceMesh.cpp index ceb8e62d..a40888e0 100644 --- a/modules/core/src/SurfaceMesh.cpp +++ b/modules/core/src/SurfaceMesh.cpp @@ -18,6 +18,7 @@ #include #include #include +#include #include #include #include @@ -2307,8 +2308,8 @@ void SurfaceMesh::compress_if_regular() delete_attribute(s_reserved_names.facet_to_first_corner(), AttributeDeletePolicy::Force); delete_attribute(s_reserved_names.corner_to_facet(), AttributeDeletePolicy::Force); m_vertex_per_facet = nvpf; + la_debug_assert(is_regular()); } - la_debug_assert(is_regular()); } //////////////////////////////////////////////////////////////////////////////// @@ -3590,6 +3591,175 @@ AttributeId SurfaceMesh::wrap_as_attribute_internal( } } +//////////////////////////////////////////////////////////////////////////////// +// SurfaceMeshInfo conversion +//////////////////////////////////////////////////////////////////////////////// + +namespace internal { + +template +SurfaceMeshInfo from_surface_mesh(const SurfaceMesh& mesh) +{ + SurfaceMeshInfo info; + info.scalar_type_size = sizeof(Scalar); + info.index_type_size = sizeof(Index); + info.num_vertices = mesh.m_num_vertices; + info.num_facets = mesh.m_num_facets; + info.num_corners = mesh.m_num_corners; + info.num_edges = mesh.m_num_edges; + info.dimension = mesh.m_dimension; + info.vertex_per_facet = mesh.m_vertex_per_facet; + + seq_foreach_named_attribute_read(mesh, [&](std::string_view name, auto&& attr) { + using AttributeType = std::decay_t; + + AttributeInfo ai; + ai.name = name; + ai.attribute_id = mesh.get_attribute_id(name); + ai.value_type = static_cast(attr.get_value_type()); + ai.element_type = static_cast(attr.get_element_type()); + ai.usage = static_cast(attr.get_usage()); + ai.num_channels = attr.get_num_channels(); + + if constexpr (AttributeType::IsIndexed) { + ai.is_indexed = true; + const auto& values = attr.values(); + const auto& indices = attr.indices(); + ai.num_elements = 0; + ai.values_num_elements = values.get_num_elements(); + ai.values_num_channels = values.get_num_channels(); + ai.indices_num_elements = indices.get_num_elements(); + ai.index_type_size = sizeof(Index); + + auto vals = values.get_all(); + ai.values_bytes = span( + reinterpret_cast(vals.data()), + vals.size() * sizeof(typename AttributeType::ValueType)); + auto inds = indices.get_all(); + ai.indices_bytes = span( + reinterpret_cast(inds.data()), + inds.size() * sizeof(Index)); + } else { + ai.is_indexed = false; + ai.num_elements = attr.get_num_elements(); + + auto data = attr.get_all(); + ai.data_bytes = span( + reinterpret_cast(data.data()), + data.size() * sizeof(typename AttributeType::ValueType)); + } + + info.attributes.push_back(std::move(ai)); + }); + + return info; +} + +template +SurfaceMesh to_surface_mesh(const SurfaceMeshInfo& info) +{ + la_runtime_assert( + info.scalar_type_size == sizeof(Scalar), + "Scalar type size mismatch: expected " + std::to_string(sizeof(Scalar)) + ", got " + + std::to_string(info.scalar_type_size)); + la_runtime_assert( + info.index_type_size == sizeof(Index), + "Index type size mismatch: expected " + std::to_string(sizeof(Index)) + ", got " + + std::to_string(info.index_type_size)); + + using MeshType = SurfaceMesh; + MeshType mesh(typename MeshType::BareMeshTag{}); + + mesh.m_num_vertices = static_cast(info.num_vertices); + mesh.m_num_facets = static_cast(info.num_facets); + mesh.m_num_corners = static_cast(info.num_corners); + mesh.m_num_edges = static_cast(info.num_edges); + mesh.m_dimension = static_cast(info.dimension); + mesh.m_vertex_per_facet = static_cast(info.vertex_per_facet); + + // Sort attributes by attribute_id to restore original creation order + std::vector sorted_attrs; + sorted_attrs.reserve(info.attributes.size()); + for (const auto& ai : info.attributes) { + sorted_attrs.push_back(&ai); + } + std::sort(sorted_attrs.begin(), sorted_attrs.end(), [](const auto* a, const auto* b) { + return a->attribute_id < b->attribute_id; + }); + + for (const auto* ai : sorted_attrs) { + auto value_type = static_cast(ai->value_type); + auto element = static_cast(ai->element_type); + auto usage = static_cast(ai->usage); + + AttributeId id = invalid_attribute_id(); + + if (ai->is_indexed) { + switch (value_type) { +#define LA_X_restore_indexed(_, ValueType) \ + case make_attribute_value_type(): { \ + auto values = span( \ + reinterpret_cast(ai->values_bytes.data()), \ + ai->values_bytes.size() / sizeof(ValueType)); \ + auto indices = span( \ + reinterpret_cast(ai->indices_bytes.data()), \ + ai->indices_bytes.size() / sizeof(Index)); \ + id = mesh.template create_attribute_internal( \ + ai->name, \ + element, \ + usage, \ + ai->values_num_channels, \ + values, \ + indices); \ + break; \ + } + LA_ATTRIBUTE_X(restore_indexed, 0) +#undef LA_X_restore_indexed + } + } else { + switch (value_type) { +#define LA_X_restore_attr(_, ValueType) \ + case make_attribute_value_type(): { \ + auto data = span( \ + reinterpret_cast(ai->data_bytes.data()), \ + ai->data_bytes.size() / sizeof(ValueType)); \ + id = mesh.template create_attribute_internal( \ + ai->name, \ + element, \ + usage, \ + ai->num_channels, \ + data); \ + break; \ + } + LA_ATTRIBUTE_X(restore_attr, 0) +#undef LA_X_restore_attr + } + } + + // Set reserved attribute IDs + constexpr int N = MeshType::ReservedAttributeIds::size(); + for (int i = 0; i < N; ++i) { + if (ai->name == MeshType::s_reserved_names.items[i]) { + mesh.m_reserved_ids.items[i] = id; + break; + } + } + } + + return mesh; +} + +// Explicit instantiations for from_surface_mesh / to_surface_mesh +#define LA_X_mesh_info(_, Scalar, Index) \ + template LA_CORE_API SurfaceMeshInfo from_surface_mesh( \ + const SurfaceMesh&); \ + template LA_CORE_API SurfaceMesh to_surface_mesh( \ + const SurfaceMeshInfo&); +LA_SURFACE_MESH_X(mesh_info, 0) +#undef LA_X_mesh_info + +} // namespace internal + //////////////////////////////////////////////////////////////////////////////// // Explicit template instantiations //////////////////////////////////////////////////////////////////////////////// diff --git a/modules/core/src/compute_facet_facet_adjacency.cpp b/modules/core/src/compute_facet_facet_adjacency.cpp new file mode 100644 index 00000000..5a95bf66 --- /dev/null +++ b/modules/core/src/compute_facet_facet_adjacency.cpp @@ -0,0 +1,63 @@ +/* + * Copyright 2026 Adobe. All rights reserved. + * This file is licensed to you under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under + * the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS + * OF ANY KIND, either express or implied. See the License for the specific language + * governing permissions and limitations under the License. + */ +#include +#include + +// clang-format off +#include +#include +#include +// clang-format on + +#include +#include + +namespace lagrange { + +template +AdjacencyList compute_facet_facet_adjacency(SurfaceMesh& mesh) +{ + if (!mesh.has_edges()) { + mesh.initialize_edges(); + } + + const Index num_facets = mesh.get_num_facets(); + + using ValueArray = typename AdjacencyList::ValueArray; + using IndexArray = typename AdjacencyList::IndexArray; + + // Count neighbors per facet using foreach_facet_around_facet, which already + // skips self and handles non-manifold edges (complete clique). + IndexArray adjacency_index(num_facets + 1, 0); + tbb::parallel_for(Index(0), num_facets, [&](Index f) { + mesh.foreach_facet_around_facet(f, [&](Index) { adjacency_index[f + 1]++; }); + }); + + // Prefix sum to get offsets. + std::partial_sum(adjacency_index.begin(), adjacency_index.end(), adjacency_index.begin()); + + // Fill adjacency data. + ValueArray adjacency_data(adjacency_index.back()); + tbb::parallel_for(Index(0), num_facets, [&](Index f) { + size_t pos = adjacency_index[f]; + mesh.foreach_facet_around_facet(f, [&](Index g) { adjacency_data[pos++] = g; }); + }); + + return AdjacencyList(std::move(adjacency_data), std::move(adjacency_index)); +} + +#define LA_X_compute_facet_facet_adjacency(_, Scalar, Index) \ + template LA_CORE_API AdjacencyList compute_facet_facet_adjacency( \ + SurfaceMesh&); +LA_SURFACE_MESH_X(compute_facet_facet_adjacency, 0) + +} // namespace lagrange diff --git a/modules/core/src/internal/get_uv_attribute.cpp b/modules/core/src/internal/get_uv_attribute.cpp index 8b1bc198..0bb2f891 100644 --- a/modules/core/src/internal/get_uv_attribute.cpp +++ b/modules/core/src/internal/get_uv_attribute.cpp @@ -10,6 +10,7 @@ * governing permissions and limitations under the License. */ #include +#include #include #include #include @@ -21,7 +22,10 @@ namespace lagrange::internal { template -AttributeId get_uv_id(const SurfaceMesh& mesh, std::string_view uv_attribute_name) +AttributeId get_uv_id( + const SurfaceMesh& mesh, + std::string_view uv_attribute_name, + UVMeshOptions::ElementTypes element_types) { AttributeId uv_attr_id; if (uv_attribute_name.empty()) { @@ -42,28 +46,45 @@ AttributeId get_uv_id(const SurfaceMesh& mesh, std::string_view u 2); } if (uv_attr_id == invalid_attribute_id()) { - // Still no UV attribute found. Look for a corner attribute and issue a warning if one - // is found. + // Still no UV attribute found. Look for a corner attribute. auto corner_attr_id = internal::find_matching_attribute( mesh, "", AttributeElement::Corner, AttributeUsage::UV, 2); - // Ideally we would be able to extract a proxy mesh from a corner attribute, but this - // requires creating a temporary buffer for indices (since corner indices are implicit - // for pure triangle or pure quad meshes). But maybe we should bite the bullet and do - // this? if (corner_attr_id != invalid_attribute_id()) { - logger().warn( - "Unable to find an indexed or per-vertex UV attribute to extract a mesh from. " - "Instead, we found a corner UV attribute '{}'. Consider converting it to a " - "vertex or indexed attribute for compatibility.", - mesh.get_attribute_name(corner_attr_id)); + if (element_types == UVMeshOptions::ElementTypes::All) { + uv_attr_id = corner_attr_id; + } else { + uv_attr_id = invalid_attribute_id(); + logger().warn( + "Unable to find an indexed or per-vertex UV attribute. Found a corner UV " + "attribute instead. Set element_types to UVMeshOptions::ElementTypes::All " + "to enable corner attribute support."); + } } } } else { uv_attr_id = mesh.get_attribute_id(uv_attribute_name); + const auto& attr = mesh.get_attribute_base(uv_attr_id); + la_runtime_assert( + attr.get_value_type() == make_attribute_value_type(), + "UV attribute value type does not match the requested UVScalar type."); + la_runtime_assert( + attr.get_num_channels() == 2, + "UV attribute must have exactly 2 channels."); + la_runtime_assert( + attr.get_element_type() == AttributeElement::Vertex || + attr.get_element_type() == AttributeElement::Indexed || + attr.get_element_type() == AttributeElement::Corner, + "UV attribute must be a vertex, indexed, or corner attribute."); + if (element_types != UVMeshOptions::ElementTypes::All) { + la_runtime_assert( + attr.get_element_type() != AttributeElement::Corner, + "UV attribute is a corner attribute. Set element_types to " + "UVMeshOptions::ElementTypes::All to enable corner attribute support."); + } } return uv_attr_id; } @@ -119,7 +140,8 @@ std::tuple, VectorView> ref_uv_attribute( #define LA_X_get_uv_attribute(UVScalar, Scalar, Index) \ template LA_CORE_API AttributeId get_uv_id( \ const SurfaceMesh&, \ - std::string_view); \ + std::string_view, \ + UVMeshOptions::ElementTypes); \ template LA_CORE_API std::tuple, ConstVectorView> \ get_uv_attribute( \ const SurfaceMesh&, \ diff --git a/modules/core/src/mesh_bbox.cpp b/modules/core/src/mesh_bbox.cpp new file mode 100644 index 00000000..0a5a7311 --- /dev/null +++ b/modules/core/src/mesh_bbox.cpp @@ -0,0 +1,45 @@ +/* + * Copyright 2026 Adobe. All rights reserved. + * This file is licensed to you under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under + * the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS + * OF ANY KIND, either express or implied. See the License for the specific language + * governing permissions and limitations under the License. + */ + +#include + +#include +#include +#include + +namespace lagrange { + +template +Eigen::AlignedBox(Dimension)> mesh_bbox( + const SurfaceMesh& mesh) +{ + static_assert(Dimension == 2 || Dimension == 3, "Only 2D and 3D meshes are supported."); + la_runtime_assert( + mesh.get_dimension() == Dimension, + "Mesh dimension does not match the requested bounding box dimension."); + const auto vertices = vertex_view(mesh); + Eigen::AlignedBox(Dimension)> bbox; + if (vertices.rows() > 0) { + bbox.min() = vertices.colwise().minCoeff().transpose(); + bbox.max() = vertices.colwise().maxCoeff().transpose(); + } + return bbox; +} + +#define LA_X_mesh_bbox(_, Scalar, Index) \ + template LA_CORE_API Eigen::AlignedBox mesh_bbox<2u, Scalar, Index>( \ + const SurfaceMesh&); \ + template LA_CORE_API Eigen::AlignedBox mesh_bbox<3u, Scalar, Index>( \ + const SurfaceMesh&); +LA_SURFACE_MESH_X(mesh_bbox, 0) + +} // namespace lagrange diff --git a/modules/core/src/normalize_meshes.cpp b/modules/core/src/normalize_meshes.cpp index c72228ea..2c698c57 100644 --- a/modules/core/src/normalize_meshes.cpp +++ b/modules/core/src/normalize_meshes.cpp @@ -10,7 +10,11 @@ * governing permissions and limitations under the License. */ +#include + +LA_IGNORE_MAYBE_UNINITIALIZED_START #include +LA_IGNORE_MAYBE_UNINITIALIZED_END #include #include diff --git a/modules/core/src/uv_mesh.cpp b/modules/core/src/uv_mesh.cpp index a18a5afa..48b351d6 100644 --- a/modules/core/src/uv_mesh.cpp +++ b/modules/core/src/uv_mesh.cpp @@ -14,35 +14,69 @@ #include #include #include +#include + +#include namespace lagrange { +namespace { + +/// Add facets to the UV mesh using iota indices (for corner attributes). +template +void add_iota_facets(SurfaceMesh& uv_mesh, const SurfaceMesh& mesh) +{ + uv_mesh.add_hybrid( + mesh.get_num_facets(), + [&](Index f) { return mesh.get_facet_size(f); }, + [&](Index f, span t) { + std::iota(t.begin(), t.end(), mesh.get_facet_corner_begin(f)); + }); +} + +} // namespace + template SurfaceMesh uv_mesh_ref( SurfaceMesh& mesh, const UVMeshOptions& options) { - auto [uv_values, uv_indices] = - internal::ref_uv_attribute(mesh, options.uv_attribute_name); + AttributeId uv_attr_id = internal::get_uv_id( + mesh, + options.uv_attribute_name, + options.element_types); + la_runtime_assert(uv_attr_id != invalid_attribute_id(), "No UV attribute found."); SurfaceMesh uv_mesh(2); - uv_mesh.wrap_as_vertices( - {uv_values.data(), static_cast(uv_values.size())}, - static_cast(uv_values.rows())); - - if (mesh.is_regular()) { - uv_mesh.wrap_as_facets( - {uv_indices.data(), static_cast(uv_indices.size())}, - mesh.get_num_facets(), - mesh.get_vertex_per_facet()); + + if (mesh.get_attribute_base(uv_attr_id).get_element_type() == AttributeElement::Corner) { + auto& uv_attr = mesh.template ref_attribute(uv_attr_id); + auto uv_values = matrix_ref(uv_attr); + uv_mesh.wrap_as_vertices( + {uv_values.data(), static_cast(uv_values.size())}, + static_cast(uv_values.rows())); + add_iota_facets(uv_mesh, mesh); } else { - AttributeId facet_offset_id = mesh.attr_id_facet_to_first_corner(); - auto& facet_offset = mesh.template ref_attribute(facet_offset_id); - uv_mesh.wrap_as_facets( - facet_offset.ref_all(), - mesh.get_num_facets(), - {uv_indices.data(), static_cast(uv_indices.size())}, - mesh.get_num_corners()); + auto [uv_values, uv_indices] = + internal::ref_uv_attribute(mesh, options.uv_attribute_name); + uv_mesh.wrap_as_vertices( + {uv_values.data(), static_cast(uv_values.size())}, + static_cast(uv_values.rows())); + + if (mesh.is_regular()) { + uv_mesh.wrap_as_facets( + {uv_indices.data(), static_cast(uv_indices.size())}, + mesh.get_num_facets(), + mesh.get_vertex_per_facet()); + } else { + AttributeId facet_offset_id = mesh.attr_id_facet_to_first_corner(); + auto& facet_offset = mesh.template ref_attribute(facet_offset_id); + uv_mesh.wrap_as_facets( + facet_offset.ref_all(), + mesh.get_num_facets(), + {uv_indices.data(), static_cast(uv_indices.size())}, + mesh.get_num_corners()); + } } return uv_mesh; @@ -53,27 +87,42 @@ SurfaceMesh uv_mesh_view( const SurfaceMesh& mesh, const UVMeshOptions& options) { - auto [uv_values, uv_indices] = - internal::get_uv_attribute(mesh, options.uv_attribute_name); + AttributeId uv_attr_id = internal::get_uv_id( + mesh, + options.uv_attribute_name, + options.element_types); + la_runtime_assert(uv_attr_id != invalid_attribute_id(), "No UV attribute found."); SurfaceMesh uv_mesh(2); - uv_mesh.wrap_as_const_vertices( - {uv_values.data(), static_cast(uv_values.size())}, - static_cast(uv_values.rows())); - - if (mesh.is_regular()) { - uv_mesh.wrap_as_const_facets( - {uv_indices.data(), static_cast(uv_indices.size())}, - mesh.get_num_facets(), - mesh.get_vertex_per_facet()); + + if (mesh.get_attribute_base(uv_attr_id).get_element_type() == AttributeElement::Corner) { + const auto& uv_attr = mesh.template get_attribute(uv_attr_id); + auto uv_values = matrix_view(uv_attr); + uv_mesh.wrap_as_const_vertices( + {uv_values.data(), static_cast(uv_values.size())}, + static_cast(uv_values.rows())); + add_iota_facets(uv_mesh, mesh); } else { - AttributeId facet_offset_id = mesh.attr_id_facet_to_first_corner(); - const auto& facet_offset = mesh.template get_attribute(facet_offset_id); - uv_mesh.wrap_as_const_facets( - facet_offset.get_all(), - mesh.get_num_facets(), - {uv_indices.data(), static_cast(uv_indices.size())}, - mesh.get_num_corners()); + auto [uv_values, uv_indices] = + internal::get_uv_attribute(mesh, options.uv_attribute_name); + uv_mesh.wrap_as_const_vertices( + {uv_values.data(), static_cast(uv_values.size())}, + static_cast(uv_values.rows())); + + if (mesh.is_regular()) { + uv_mesh.wrap_as_const_facets( + {uv_indices.data(), static_cast(uv_indices.size())}, + mesh.get_num_facets(), + mesh.get_vertex_per_facet()); + } else { + AttributeId facet_offset_id = mesh.attr_id_facet_to_first_corner(); + const auto& facet_offset = mesh.template get_attribute(facet_offset_id); + uv_mesh.wrap_as_const_facets( + facet_offset.get_all(), + mesh.get_num_facets(), + {uv_indices.data(), static_cast(uv_indices.size())}, + mesh.get_num_corners()); + } } return uv_mesh; diff --git a/modules/core/src/weld_indexed_attribute.cpp b/modules/core/src/weld_indexed_attribute.cpp index d534eec0..d206546f 100644 --- a/modules/core/src/weld_indexed_attribute.cpp +++ b/modules/core/src/weld_indexed_attribute.cpp @@ -346,6 +346,20 @@ void weld_indexed_attribute( options.exclude_vertices, options.merge_across_vertices, [&, eps_rel, eps_abs, cos_angle_abs](Index i, Index j) -> bool { + if (values.row(i) == values.row(j)) { + return true; + } + const bool invalid_i = + (values.row(i).array() == lagrange::invalid()).any(); + const bool invalid_j = + (values.row(j).array() == lagrange::invalid()).any(); + if (invalid_i || invalid_j) { + // Along with the equality check above, this ensures that we only merge + // invalid values with other invalid values, and we don't merge valid values + // with invalid values. + return false; + } + la_debug_assert(values.row(i).allFinite() && values.row(j).allFinite()); return allclose( values.row(i).template cast(), values.row(j).template cast(), diff --git a/modules/core/tests/test_compute_facet_facet_adjacency.cpp b/modules/core/tests/test_compute_facet_facet_adjacency.cpp new file mode 100644 index 00000000..a2388f2d --- /dev/null +++ b/modules/core/tests/test_compute_facet_facet_adjacency.cpp @@ -0,0 +1,208 @@ +/* + * Copyright 2026 Adobe. All rights reserved. + * This file is licensed to you under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under + * the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS + * OF ANY KIND, either express or implied. See the License for the specific language + * governing permissions and limitations under the License. + */ +#include + +#include + +#include + +TEST_CASE("compute_facet_facet_adjacency", "[surface][adjacency]") +{ + using namespace lagrange; + using Scalar = double; + using Index = uint32_t; + + SECTION("single triangle") + { + SurfaceMesh mesh; + mesh.add_vertex({0, 0, 0}); + mesh.add_vertex({1, 0, 0}); + mesh.add_vertex({0, 1, 0}); + mesh.add_triangle(0, 1, 2); + + auto adj = compute_facet_facet_adjacency(mesh); + REQUIRE(adj.get_num_entries() == 1); + REQUIRE(adj.get_neighbors(0).size() == 0); + } + + SECTION("two triangles sharing an edge") + { + // 2 + // / \. + // 0--1 + // \ / + // 3 + SurfaceMesh mesh; + mesh.add_vertex({0, 0, 0}); + mesh.add_vertex({1, 0, 0}); + mesh.add_vertex({0.5, 1, 0}); + mesh.add_vertex({0.5, -1, 0}); + mesh.add_triangle(0, 1, 2); + mesh.add_triangle(1, 0, 3); + + auto adj = compute_facet_facet_adjacency(mesh); + REQUIRE(adj.get_num_entries() == 2); + + auto n0 = adj.get_neighbors(0); + auto n1 = adj.get_neighbors(1); + REQUIRE(n0.size() == 1); + REQUIRE(n1.size() == 1); + REQUIRE(n0[0] == 1); + REQUIRE(n1[0] == 0); + } + + SECTION("triangle strip") + { + // 0--2--4 + // |\ |\ | + // | \| \| + // 1--3--5 + SurfaceMesh mesh; + mesh.add_vertex({0, 1, 0}); + mesh.add_vertex({0, 0, 0}); + mesh.add_vertex({1, 1, 0}); + mesh.add_vertex({1, 0, 0}); + mesh.add_vertex({2, 1, 0}); + mesh.add_vertex({2, 0, 0}); + mesh.add_triangle(0, 1, 3); // f0 + mesh.add_triangle(0, 3, 2); // f1 + mesh.add_triangle(2, 3, 5); // f2 + mesh.add_triangle(2, 5, 4); // f3 + + auto adj = compute_facet_facet_adjacency(mesh); + REQUIRE(adj.get_num_entries() == 4); + + // f0 shares edge (0,3) with f1, edge (1,3) with nobody + // f1 shares edge (0,3) with f0, edge (2,3) with f2 + // f2 shares edge (2,3) with f1, edge (3,5) with nobody, edge (2,5) with f3 + // f3 shares edge (2,5) with f2 + + auto n0 = adj.get_neighbors(0); + auto n1 = adj.get_neighbors(1); + auto n2 = adj.get_neighbors(2); + auto n3 = adj.get_neighbors(3); + + REQUIRE(n0.size() == 1); + CHECK(n0[0] == 1); + + REQUIRE(n1.size() == 2); + std::set s1(n1.begin(), n1.end()); + CHECK(s1.count(0) == 1); + CHECK(s1.count(2) == 1); + + REQUIRE(n2.size() == 2); + std::set s2(n2.begin(), n2.end()); + CHECK(s2.count(1) == 1); + CHECK(s2.count(3) == 1); + + REQUIRE(n3.size() == 1); + CHECK(n3[0] == 2); + } + + SECTION("isolated triangles") + { + SurfaceMesh mesh; + mesh.add_vertex({0, 0, 0}); + mesh.add_vertex({1, 0, 0}); + mesh.add_vertex({0, 1, 0}); + mesh.add_vertex({2, 0, 0}); + mesh.add_vertex({3, 0, 0}); + mesh.add_vertex({2, 1, 0}); + mesh.add_triangle(0, 1, 2); + mesh.add_triangle(3, 4, 5); + + auto adj = compute_facet_facet_adjacency(mesh); + REQUIRE(adj.get_num_entries() == 2); + REQUIRE(adj.get_neighbors(0).size() == 0); + REQUIRE(adj.get_neighbors(1).size() == 0); + } + + SECTION("non-manifold edge (3 facets sharing an edge)") + { + // Three triangles sharing edge (0,1): + // f0: (0, 1, 2) + // f1: (0, 1, 3) + // f2: (0, 1, 4) + // Each facet should be adjacent to the other two. + SurfaceMesh mesh; + mesh.add_vertex({0, 0, 0}); + mesh.add_vertex({1, 0, 0}); + mesh.add_vertex({0, 1, 0}); + mesh.add_vertex({0, -1, 0}); + mesh.add_vertex({0, 0, 1}); + mesh.add_triangle(0, 1, 2); + mesh.add_triangle(0, 1, 3); + mesh.add_triangle(0, 1, 4); + + auto adj = compute_facet_facet_adjacency(mesh); + REQUIRE(adj.get_num_entries() == 3); + + for (Index f = 0; f < 3; ++f) { + auto neighbors = adj.get_neighbors(f); + std::set nset(neighbors.begin(), neighbors.end()); + REQUIRE(nset.size() == 2); + for (Index g = 0; g < 3; ++g) { + if (g != f) { + CHECK(nset.count(g) == 1); + } + } + } + } + + SECTION("degenerate facet incident to same edge twice") + { + // A quad facet {0, 1, 2, 1} visits edge (0,1) twice via two different + // corners. foreach_facet_around_facet() reports the neighbor once per + // shared edge reference, so neighbors are NOT deduplicated. + SurfaceMesh mesh; + mesh.add_vertex({0, 0, 0}); // v0 + mesh.add_vertex({1, 0, 0}); // v1 + mesh.add_vertex({0, 1, 0}); // v2 + mesh.add_vertex({1, 1, 0}); // v3 + mesh.add_quad(0, 1, 2, 1); // degenerate quad (v1 appears twice) + mesh.add_quad(0, 1, 3, 2); // normal quad sharing edge (0,1) + + auto adj = compute_facet_facet_adjacency(mesh); + REQUIRE(adj.get_num_entries() == 2); + + // f0 ({0,1,2,1}) references edge (0,1) from two corners, so f1 appears + // twice in f0's neighbor list. f1 ({0,1,3,2}) references edge (0,1) once, + // but foreach_facet_around_edge reports f0 twice (once per corner of f0 on + // that edge), so f0 also appears twice in f1's neighbor list. + // No self-neighbors should exist. + auto n0 = adj.get_neighbors(0); + auto n1 = adj.get_neighbors(1); + REQUIRE(n0.size() == 2); + CHECK(n0[0] == 1); + CHECK(n0[1] == 1); + REQUIRE(n1.size() == 2); + CHECK(n1[0] == 0); + CHECK(n1[1] == 0); + } + + SECTION("edges already initialized") + { + SurfaceMesh mesh; + mesh.add_vertex({0, 0, 0}); + mesh.add_vertex({1, 0, 0}); + mesh.add_vertex({0.5, 1, 0}); + mesh.add_vertex({0.5, -1, 0}); + mesh.add_triangle(0, 1, 2); + mesh.add_triangle(1, 0, 3); + mesh.initialize_edges(); + + auto adj = compute_facet_facet_adjacency(mesh); + auto n0 = adj.get_neighbors(0); + REQUIRE(n0.size() == 1); + REQUIRE(n0[0] == 1); + } +} diff --git a/modules/core/tests/test_compute_normal.cpp b/modules/core/tests/test_compute_normal.cpp index fc04f6c4..24f23578 100644 --- a/modules/core/tests/test_compute_normal.cpp +++ b/modules/core/tests/test_compute_normal.cpp @@ -616,8 +616,10 @@ TEST_CASE("legacy::compute_normal", "[mesh][attribute][normal][legacy]" LA_SLOW_ lagrange::testing::FloatPointBehavior::XcodeGreaterThan14) { // For some reason x.cross(x) is not zero on arm64 Xcode 14+. It's around 1e-17, which // is enough for stableNormalize() to produce a non-zero first row. - REQUIRE(normal_values(Eigen::seq(1, Eigen::last), Eigen::all).isZero(0)); - REQUIRE(triangle_normals(Eigen::seq(1, Eigen::last), Eigen::all).isZero(0)); + REQUIRE(normal_values(Eigen::seq(1, Eigen::indexing::last), Eigen::indexing::all) + .isZero(0)); + REQUIRE(triangle_normals(Eigen::seq(1, Eigen::indexing::last), Eigen::indexing::all) + .isZero(0)); } else { REQUIRE(normal_values.isZero(0)); REQUIRE(triangle_normals.isZero(0)); diff --git a/modules/core/tests/test_mesh_bbox.cpp b/modules/core/tests/test_mesh_bbox.cpp new file mode 100644 index 00000000..a38c66fd --- /dev/null +++ b/modules/core/tests/test_mesh_bbox.cpp @@ -0,0 +1,78 @@ +/* + * Copyright 2026 Adobe. All rights reserved. + * This file is licensed to you under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under + * the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS + * OF ANY KIND, either express or implied. See the License for the specific language + * governing permissions and limitations under the License. + */ +#include +#include + +TEST_CASE("mesh_bbox 3D", "[core][mesh_bbox]") +{ + using Scalar = double; + using Index = uint32_t; + + SECTION("empty mesh") + { + lagrange::SurfaceMesh mesh(3); + auto bbox = lagrange::mesh_bbox<3>(mesh); + REQUIRE(bbox.isEmpty()); + } + + SECTION("single vertex") + { + lagrange::SurfaceMesh mesh; + mesh.add_vertex({1, 2, 3}); + auto bbox = lagrange::mesh_bbox<3>(mesh); + REQUIRE(bbox.min().x() == 1); + REQUIRE(bbox.min().y() == 2); + REQUIRE(bbox.min().z() == 3); + REQUIRE(bbox.max() == bbox.min()); + } + + SECTION("triangle") + { + lagrange::SurfaceMesh mesh; + mesh.add_vertex({0, 0, 0}); + mesh.add_vertex({1, 0, 0}); + mesh.add_vertex({0, 1, 0}); + mesh.add_triangle(0, 1, 2); + auto bbox = lagrange::mesh_bbox<3>(mesh); + REQUIRE(bbox.min().x() == 0); + REQUIRE(bbox.min().y() == 0); + REQUIRE(bbox.min().z() == 0); + REQUIRE(bbox.max().x() == 1); + REQUIRE(bbox.max().y() == 1); + REQUIRE(bbox.max().z() == 0); + } +} + +TEST_CASE("mesh_bbox 2D", "[core][mesh_bbox]") +{ + using Scalar = float; + using Index = uint32_t; + + SECTION("empty mesh") + { + lagrange::SurfaceMesh mesh(2); + auto bbox = lagrange::mesh_bbox<2>(mesh); + REQUIRE(bbox.isEmpty()); + } + + SECTION("two vertices") + { + lagrange::SurfaceMesh mesh(2); + mesh.add_vertex({-1, 3}); + mesh.add_vertex({4, -2}); + auto bbox = lagrange::mesh_bbox<2>(mesh); + REQUIRE(bbox.min().x() == -1); + REQUIRE(bbox.min().y() == -2); + REQUIRE(bbox.max().x() == 4); + REQUIRE(bbox.max().y() == 3); + } +} diff --git a/modules/core/tests/test_set_invalid_indexed_values.cpp b/modules/core/tests/test_set_invalid_indexed_values.cpp new file mode 100644 index 00000000..b502aedd --- /dev/null +++ b/modules/core/tests/test_set_invalid_indexed_values.cpp @@ -0,0 +1,132 @@ +/* + * Copyright 2026 Adobe. All rights reserved. + * This file is licensed to you under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under + * the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS + * OF ANY KIND, either express or implied. See the License for the specific language + * governing permissions and limitations under the License. + */ +#include + +#include + +TEST_CASE("set_invalid_indexed_values", "[core][attribute][indexed]") +{ + using namespace lagrange; + using Scalar = float; + using Index = uint32_t; + + SECTION("no invalid indices") + { + SurfaceMesh mesh(2); + mesh.add_vertex({0, 0}); + mesh.add_vertex({1, 0}); + mesh.add_vertex({0, 1}); + mesh.add_triangle(0, 1, 2); + + std::array uv_values{0, 0, 1, 0, 0, 1}; + std::array uv_indices{0, 1, 2}; + auto id = mesh.create_attribute( + "uv", + AttributeElement::Indexed, + AttributeUsage::UV, + 2, + uv_values, + uv_indices); + + auto& attr = mesh.ref_indexed_attribute(id); + internal::set_invalid_indexed_values(attr); + + // Nothing should change. + REQUIRE(attr.values().get_num_elements() == 3); + REQUIRE(attr.indices().get(0) == 0); + REQUIRE(attr.indices().get(1) == 1); + REQUIRE(attr.indices().get(2) == 2); + } + + SECTION("some invalid indices") + { + SurfaceMesh mesh(2); + mesh.add_vertex({0, 0}); + mesh.add_vertex({1, 0}); + mesh.add_vertex({0, 1}); + mesh.add_vertex({1, 1}); + mesh.add_triangle(0, 1, 2); + mesh.add_triangle(2, 1, 3); + + std::array uv_values{0, 0, 1, 0}; + std::array + uv_indices{0, 1, invalid(), invalid(), 1, invalid()}; + auto id = mesh.create_attribute( + "uv", + AttributeElement::Indexed, + AttributeUsage::UV, + 2, + uv_values, + uv_indices); + + auto& attr = mesh.ref_indexed_attribute(id); + internal::set_invalid_indexed_values(attr); + + // 3 invalid indices should create 3 new values (indices 2, 3, 4). + REQUIRE(attr.values().get_num_elements() == 5); + + // Original valid indices unchanged. + CHECK(attr.indices().get(0) == 0); + CHECK(attr.indices().get(1) == 1); + CHECK(attr.indices().get(4) == 1); + + // Each invalid index gets a unique new value index. + Index i2 = attr.indices().get(2); + Index i3 = attr.indices().get(3); + Index i5 = attr.indices().get(5); + CHECK(i2 != invalid()); + CHECK(i3 != invalid()); + CHECK(i5 != invalid()); + CHECK(i2 >= 2); + CHECK(i3 >= 2); + CHECK(i5 >= 2); + // All distinct. + CHECK(i2 != i3); + CHECK(i2 != i5); + CHECK(i3 != i5); + + // New values should be set to invalid(). + for (Index idx : {i2, i3, i5}) { + CHECK(attr.values().get(idx, 0) == invalid()); + CHECK(attr.values().get(idx, 1) == invalid()); + } + } + + SECTION("all invalid indices") + { + SurfaceMesh mesh(2); + mesh.add_vertex({0, 0}); + mesh.add_vertex({1, 0}); + mesh.add_vertex({0, 1}); + mesh.add_triangle(0, 1, 2); + + std::array uv_values{0, 0}; + std::array uv_indices{invalid(), invalid(), invalid()}; + auto id = mesh.create_attribute( + "uv", + AttributeElement::Indexed, + AttributeUsage::UV, + 2, + uv_values, + uv_indices); + + auto& attr = mesh.ref_indexed_attribute(id); + internal::set_invalid_indexed_values(attr); + + // Original 1 value + 3 new. + REQUIRE(attr.values().get_num_elements() == 4); + // Each gets a unique index. + CHECK(attr.indices().get(0) == 1); + CHECK(attr.indices().get(1) == 2); + CHECK(attr.indices().get(2) == 3); + } +} diff --git a/modules/core/tests/test_utils_geometry3d.cpp b/modules/core/tests/test_utils_geometry3d.cpp index f60e1ad4..a70d97bb 100644 --- a/modules/core/tests/test_utils_geometry3d.cpp +++ b/modules/core/tests/test_utils_geometry3d.cpp @@ -13,6 +13,7 @@ #include #include +#include using namespace Eigen; using namespace lagrange; @@ -38,7 +39,9 @@ TEST_CASE("utils-geometry3d") REQUIRE(project_on_plane(Vector3d(1, 1, 1), Vector3d(0, 1, 0)).isApprox(Vector3d(1, 0, 1))); REQUIRE(project_on_plane(Vector3d(2, 2, 2), Vector3d(0, 1, 0)).isApprox(Vector3d(2, 0, 2))); - REQUIRE( - projected_cos_angle_between(Vector3d(1, 1, 1), Vector3d(1, 1, -1), Vector3d(0, 1, 0)) == - 0.0); + Vector3d a(1, 1, 1); + Vector3d b(1, 1, -1); + Vector3d n(0, 1, 0); + + REQUIRE_THAT(projected_cos_angle_between(a, b, n), Catch::Matchers::WithinAbs(0.0, 1e-15)); } diff --git a/modules/core/tests/test_uv_mesh.cpp b/modules/core/tests/test_uv_mesh.cpp new file mode 100644 index 00000000..01f38b8d --- /dev/null +++ b/modules/core/tests/test_uv_mesh.cpp @@ -0,0 +1,258 @@ +/* + * Copyright 2026 Adobe. All rights reserved. + * This file is licensed to you under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under + * the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS + * OF ANY KIND, either express or implied. See the License for the specific language + * governing permissions and limitations under the License. + */ +#include +#include +#include +#include +#include + +TEST_CASE("uv_mesh: indexed attribute", "[core][uv_mesh]") +{ + using namespace lagrange; + using Scalar = double; + using Index = uint32_t; + + // Create a mesh with 2 triangles sharing an edge + SurfaceMesh mesh; + mesh.add_vertex({0, 0, 0}); + mesh.add_vertex({1, 0, 0}); + mesh.add_vertex({0, 1, 0}); + mesh.add_vertex({1, 1, 0}); + mesh.add_triangle(0, 1, 2); + mesh.add_triangle(2, 1, 3); + + // Add indexed UV attribute + std::vector uv_values{0, 0, 1, 0, 0, 1, 1, 1}; + std::vector uv_indices{0, 1, 2, 2, 1, 3}; + mesh.template create_attribute( + "uv", + AttributeElement::Indexed, + AttributeUsage::UV, + 2, + uv_values, + uv_indices); + + SECTION("uv_mesh_view") + { + auto uv = uv_mesh_view(mesh); + REQUIRE(uv.get_num_vertices() == 4); + REQUIRE(uv.get_num_facets() == 2); + REQUIRE(uv.get_num_corners() == 6); + } + + SECTION("uv_mesh_ref") + { + auto uv = uv_mesh_ref(mesh); + REQUIRE(uv.get_num_vertices() == 4); + REQUIRE(uv.get_num_facets() == 2); + REQUIRE(uv.get_num_corners() == 6); + } +} + +TEST_CASE("uv_mesh: vertex attribute", "[core][uv_mesh]") +{ + using namespace lagrange; + using Scalar = double; + using Index = uint32_t; + + SurfaceMesh mesh; + mesh.add_vertex({0, 0, 0}); + mesh.add_vertex({1, 0, 0}); + mesh.add_vertex({0, 1, 0}); + mesh.add_vertex({1, 1, 0}); + mesh.add_triangle(0, 1, 2); + mesh.add_triangle(2, 1, 3); + + // Add vertex UV attribute (UV == position xy) + std::vector uv_values{0, 0, 1, 0, 0, 1, 1, 1}; + mesh.template create_attribute( + "uv", + AttributeElement::Vertex, + AttributeUsage::UV, + 2, + uv_values); + + SECTION("uv_mesh_view") + { + auto uv = uv_mesh_view(mesh); + REQUIRE(uv.get_num_vertices() == 4); + REQUIRE(uv.get_num_facets() == 2); + REQUIRE(uv.get_num_corners() == 6); + } + + SECTION("uv_mesh_ref") + { + auto uv = uv_mesh_ref(mesh); + REQUIRE(uv.get_num_vertices() == 4); + REQUIRE(uv.get_num_facets() == 2); + REQUIRE(uv.get_num_corners() == 6); + } +} + +TEST_CASE("uv_mesh: corner attribute", "[core][uv_mesh]") +{ + using namespace lagrange; + using Scalar = double; + using Index = uint32_t; + + SurfaceMesh mesh; + mesh.add_vertex({0, 0, 0}); + mesh.add_vertex({1, 0, 0}); + mesh.add_vertex({0, 1, 0}); + mesh.add_vertex({1, 1, 0}); + mesh.add_triangle(0, 1, 2); + mesh.add_triangle(2, 1, 3); + + // Add corner UV attribute (one UV per corner = 6 values) + std::vector uv_values{0, 0, 1, 0, 0, 1, 0, 1, 1, 0, 1, 1}; + mesh.template create_attribute( + "uv", + AttributeElement::Corner, + AttributeUsage::UV, + 2, + uv_values); + + SECTION("default element_types rejects corner attribute") + { + UVMeshOptions options; + options.uv_attribute_name = "uv"; + LA_REQUIRE_THROWS(uv_mesh_view(mesh, options)); + } + + SECTION("uv_mesh_view with UVMeshOptions::ElementTypes::All") + { + UVMeshOptions options; + options.uv_attribute_name = "uv"; + options.element_types = UVMeshOptions::ElementTypes::All; + auto uv = uv_mesh_view(mesh, options); + REQUIRE(uv.get_num_vertices() == 6); + REQUIRE(uv.get_num_facets() == 2); + REQUIRE(uv.get_num_corners() == 6); + REQUIRE(uv.get_vertex_per_facet() == 3); + + // Verify vertex positions match original corner UV values + for (Index c = 0; c < 6; ++c) { + Index v = uv.get_corner_vertex(c); + REQUIRE(v == c); + auto p = uv.get_position(v); + REQUIRE(p[0] == uv_values[c * 2]); + REQUIRE(p[1] == uv_values[c * 2 + 1]); + } + } + + SECTION("uv_mesh_ref with UVMeshOptions::ElementTypes::All") + { + UVMeshOptions options; + options.uv_attribute_name = "uv"; + options.element_types = UVMeshOptions::ElementTypes::All; + auto uv = uv_mesh_ref(mesh, options); + REQUIRE(uv.get_num_vertices() == 6); + REQUIRE(uv.get_num_facets() == 2); + REQUIRE(uv.get_num_corners() == 6); + + // Modify UV mesh vertex and verify it reflects in the original corner attribute + auto uv_pos = matrix_ref(uv.ref_vertex_to_position()); + uv_pos(0, 0) = 42.0; + uv_pos(0, 1) = 43.0; + + auto& corner_attr = mesh.get_attribute("uv"); + auto corner_data = corner_attr.get_all(); + REQUIRE(corner_data[0] == 42.0); + REQUIRE(corner_data[1] == 43.0); + } +} + +TEST_CASE("uv_mesh: corner attribute hybrid mesh", "[core][uv_mesh]") +{ + using namespace lagrange; + using Scalar = double; + using Index = uint32_t; + + // Create a hybrid mesh with 1 triangle + 1 quad + SurfaceMesh mesh; + mesh.add_vertex({0, 0, 0}); + mesh.add_vertex({1, 0, 0}); + mesh.add_vertex({0, 1, 0}); + mesh.add_vertex({1, 1, 0}); + mesh.add_vertex({2, 0, 0}); + mesh.add_triangle(0, 1, 2); + mesh.add_quad(1, 4, 3, 2); + + // 3 + 4 = 7 corners + std::vector uv_values{0, 0, 1, 0, 0, 1, 1, 0, 2, 0, 1, 1, 0, 1}; + mesh.template create_attribute( + "uv", + AttributeElement::Corner, + AttributeUsage::UV, + 2, + uv_values); + + UVMeshOptions options; + options.uv_attribute_name = "uv"; + options.element_types = UVMeshOptions::ElementTypes::All; + + SECTION("uv_mesh_view") + { + auto uv = uv_mesh_view(mesh, options); + REQUIRE(uv.get_num_vertices() == 7); + REQUIRE(uv.get_num_facets() == 2); + REQUIRE(uv.get_num_corners() == 7); + REQUIRE(uv.get_facet_size(0) == 3); + REQUIRE(uv.get_facet_size(1) == 4); + + for (Index c = 0; c < 7; ++c) { + Index v = uv.get_corner_vertex(c); + REQUIRE(v == c); + } + } + + SECTION("uv_mesh_ref") + { + auto uv = uv_mesh_ref(mesh, options); + REQUIRE(uv.get_num_vertices() == 7); + REQUIRE(uv.get_num_facets() == 2); + REQUIRE(uv.get_num_corners() == 7); + REQUIRE(uv.get_facet_size(0) == 3); + REQUIRE(uv.get_facet_size(1) == 4); + } +} + +TEST_CASE( + "uv_mesh: auto-detect corner attribute with UVMeshOptions::ElementTypes::All", + "[core][uv_mesh]") +{ + using namespace lagrange; + using Scalar = double; + using Index = uint32_t; + + SurfaceMesh mesh; + mesh.add_vertex({0, 0, 0}); + mesh.add_vertex({1, 0, 0}); + mesh.add_vertex({0, 1, 0}); + mesh.add_triangle(0, 1, 2); + + // Only a corner UV attribute (no name specified, auto-detect) + std::vector uv_values{0, 0, 1, 0, 0, 1}; + mesh.template create_attribute( + "uv", + AttributeElement::Corner, + AttributeUsage::UV, + 2, + uv_values); + + UVMeshOptions options; + options.element_types = UVMeshOptions::ElementTypes::All; + auto uv = uv_mesh_view(mesh, options); + REQUIRE(uv.get_num_vertices() == 3); + REQUIRE(uv.get_num_facets() == 1); + REQUIRE(uv.get_num_corners() == 3); +} diff --git a/modules/core/tests/test_weld_indexed_attribute.cpp b/modules/core/tests/test_weld_indexed_attribute.cpp index daa55be4..7cfe6ffc 100644 --- a/modules/core/tests/test_weld_indexed_attribute.cpp +++ b/modules/core/tests/test_weld_indexed_attribute.cpp @@ -14,6 +14,7 @@ #include #include #include +#include #include #ifdef LAGRANGE_ENABLE_LEGACY_FUNCTIONS @@ -274,6 +275,61 @@ TEST_CASE("weld_indexed_attribute", "[core][attribute][surface]") REQUIRE(attr.values().get_num_elements() == 1); } } + + SECTION("Valid and invalid values do not weld even with large epsilon") + { + const Scalar inv = lagrange::invalid(); + + std::array uv_values{0, 0, inv, inv, 0, 0, 0, 0, 0, 0, 0, 0}; + std::array uv_indices{0, 1, 2, 3, 4, 5}; + auto id = mesh.create_attribute( + "uv", + AttributeElement::Indexed, + AttributeUsage::UV, + 2, + uv_values, + uv_indices); + + WeldOptions options; + options.epsilon_abs = std::numeric_limits::infinity(); + options.epsilon_rel = 1.0f; + weld_indexed_attribute(mesh, id, options); + + const auto& attr = mesh.get_indexed_attribute(id); + const auto& welded_values = attr.values(); + const auto& welded_indices = attr.indices(); + + // Valid rows 2 and 3 collapse into one; the rest stays separate. + REQUIRE(welded_values.get_num_elements() == 5); + REQUIRE(welded_indices.get(2) == welded_indices.get(3)); + REQUIRE(welded_indices.get(1) != welded_indices.get(4)); + } + + SECTION("Two identical invalid rows weld together") + { + const Scalar inv = lagrange::invalid(); + + std::array uv_values{0, 0, inv, inv, 0, 0, inv, inv, inv, inv, 0, 0}; + std::array uv_indices{0, 1, 2, 3, 4, 5}; + auto id = mesh.create_attribute( + "uv", + AttributeElement::Indexed, + AttributeUsage::UV, + 2, + uv_values, + uv_indices); + + weld_indexed_attribute(mesh, id); + + const auto& attr = mesh.get_indexed_attribute(id); + const auto& welded_values = attr.values(); + const auto& welded_indices = attr.indices(); + + // Rows 1 and 4 (both invalid {inv,inv}) collapse into one; the rest stays separate. + REQUIRE(welded_values.get_num_elements() == 5); + REQUIRE(welded_indices.get(1) == welded_indices.get(4)); + REQUIRE(welded_indices.get(2) != welded_indices.get(3)); + } } TEST_CASE("weld_indexed_attribute hybrid mesh", "[core][attribute][surface]") diff --git a/modules/filtering/python/tests/test_mesh_smoothing.py b/modules/filtering/python/tests/test_mesh_smoothing.py index 16372fd4..86bc1cbf 100644 --- a/modules/filtering/python/tests/test_mesh_smoothing.py +++ b/modules/filtering/python/tests/test_mesh_smoothing.py @@ -12,9 +12,6 @@ import lagrange -from .assets import cube # noqa: F401 - - class TestMeshSmoothing: def test_cube(self, cube): assert cube.num_vertices == 8 diff --git a/modules/io/CMakeLists.txt b/modules/io/CMakeLists.txt index 25c95b42..a3ba3a19 100644 --- a/modules/io/CMakeLists.txt +++ b/modules/io/CMakeLists.txt @@ -18,7 +18,7 @@ if(LAGRANGE_TOPLEVEL_PROJECT) endif() # 2. dependencies -lagrange_include_modules(core fs scene image_io) +lagrange_include_modules(core fs scene image_io serialization2) include(tinyobjloader) include(tinygltf) include(libigl) # TODO: remove libigl later @@ -34,6 +34,7 @@ target_link_libraries(lagrange_io happly::happly igl::core PRIVATE + lagrange::serialization2 lagrange::image_io tinygltf::tinygltf ufbx::ufbx diff --git a/modules/io/examples/mesh_convert.cpp b/modules/io/examples/mesh_convert.cpp index 85b11a0f..11385aab 100644 --- a/modules/io/examples/mesh_convert.cpp +++ b/modules/io/examples/mesh_convert.cpp @@ -20,21 +20,38 @@ #include -#include +#include + +using Index = uint32_t; +constexpr size_t Dimension = 3; + +std::unordered_set input_scene_formats() +{ + return {".gltf", ".glb", ".fbx", ".lgs"}; +} +std::unordered_set output_scene_formats() +{ + return {".gltf", ".glb", ".obj", ".lgs"}; +} template void convert(const lagrange::fs::path& input_filename, const lagrange::fs::path& output_filename) { - using Index = uint32_t; using MeshType = lagrange::SurfaceMesh; - using SceneType = lagrange::scene::SimpleScene; + using SceneType = lagrange::scene::SimpleScene; // Load as scene if extension is .fbx, .gtlf or .glb std::string input_ext = lagrange::to_lower(input_filename.extension().string()); - if (std::set{".fbx", ".gltf", ".glb"}.count(input_ext)) { + if (input_scene_formats().count(input_ext)) { // Load scene lagrange::logger().info("Loading input scene: {}", input_filename.string()); - auto scene = lagrange::io::load_simple_scene(input_filename); + lagrange::io::LoadOptions load_options; + if (input_ext == ".gltf" || input_ext == ".glb") { + // If input scene if gtTF, we need to stitch duplicate vertices (glTF doesn't support + // indexed buffers) + load_options.stitch_vertices = true; + } + auto scene = lagrange::io::load_simple_scene(input_filename, load_options); // Display info lagrange::logger().info( @@ -42,19 +59,9 @@ void convert(const lagrange::fs::path& input_filename, const lagrange::fs::path& scene.get_num_meshes(), scene.compute_num_instances()); - // If input scene if gtTF, we need to stitch duplicate vertices (glTF doesn't support - // indexed buffers) - if (input_ext == ".gltf" || input_ext == ".glb") { - lagrange::logger().info("Stitching duplicate vertices"); - for (Index i = 0; i < scene.get_num_meshes(); i++) { - auto& mesh = scene.ref_mesh(i); - lagrange::remove_duplicate_vertices(mesh); - } - } - // Save as scene or mesh std::string output_ext = lagrange::to_lower(output_filename.extension().string()); - if (std::set{".gltf", ".glb"}.count(output_ext)) { + if (output_scene_formats().count(output_ext)) { lagrange::logger().info("Saving output scene: {}", output_filename.string()); lagrange::io::save_simple_scene(output_filename, scene); } else { diff --git a/modules/io/python/src/io.cpp b/modules/io/python/src/io.cpp index 710e305a..913f21ee 100644 --- a/modules/io/python/src/io.cpp +++ b/modules/io/python/src/io.cpp @@ -174,7 +174,7 @@ void populate_io_module(nb::module_& m) "selected_attributes"_a = nb::none(), R"(Save mesh to file. -Filename extension determines the file format. Supported formats are: `obj`, `ply`, `msh`, `glb` and `gltf`. +Filename extension determines the file format. Supported formats are: `obj`, `ply`, `msh`, `glb`, `gltf` and `lgm`. :param filename: The output file name. :param mesh: The input mesh. diff --git a/modules/io/src/load_gltf.cpp b/modules/io/src/load_gltf.cpp index bb4847de..fe6badf4 100644 --- a/modules/io/src/load_gltf.cpp +++ b/modules/io/src/load_gltf.cpp @@ -557,10 +557,8 @@ Eigen::Transform get_node_transform(const tinygltf::Node& node) Scalar(node.rotation[2])); } if (!node.scale.empty()) { - scale = Eigen::Scaling( - Scalar(node.scale[0]), - Scalar(node.scale[1]), - Scalar(node.scale[2])); + scale = + Eigen::Scaling(Scalar(node.scale[0]), Scalar(node.scale[1]), Scalar(node.scale[2])); } t = translation * rotation * scale; } diff --git a/modules/io/src/load_mesh.cpp b/modules/io/src/load_mesh.cpp index ea2edd05..bd38cf45 100644 --- a/modules/io/src/load_mesh.cpp +++ b/modules/io/src/load_mesh.cpp @@ -21,6 +21,7 @@ #include #include #include +#include #include #include @@ -59,7 +60,18 @@ template < MeshType load_mesh(const fs::path& filename, const LoadOptions& options) { std::string ext = to_lower(filename.extension().string()); - if (ext == ".obj") { + if (ext == ".lgm") { + serialization::DeserializeOptions enc_options; + enc_options.allow_type_cast = true; + enc_options.quiet = options.quiet; + return serialization::load_mesh(filename, enc_options); + } else if (ext == ".lgs") { + serialization::DeserializeOptions enc_options; + enc_options.allow_scene_conversion = true; + enc_options.allow_type_cast = true; + enc_options.quiet = options.quiet; + return serialization::load_mesh(filename, enc_options); + } else if (ext == ".obj") { return load_mesh_obj(filename, options); } else if (ext == ".ply") { return load_mesh_ply(filename, options); diff --git a/modules/io/src/load_obj.cpp b/modules/io/src/load_obj.cpp index a2e5836c..82b01810 100644 --- a/modules/io/src/load_obj.cpp +++ b/modules/io/src/load_obj.cpp @@ -25,6 +25,7 @@ #include #include #include +#include #include #include #include @@ -203,6 +204,7 @@ ObjReaderResult extract_mes logger().trace("[load_mesh_obj] Copy facet indices"); std::partial_sum(facet_counts.begin(), facet_counts.end(), facet_counts.begin()); std::atomic_size_t num_invalid_uv = 0; + std::atomic_size_t num_invalid_nrm = 0; tbb::parallel_for(Index(0), Index(shapes.size()), [&](Index i) { const auto& shape = shapes[i]; const Index first_facet = (i == 0 ? 0 : facet_counts[i - 1]); @@ -248,7 +250,12 @@ ObjReaderResult extract_mes } } if (!nrm_indices.empty()) { - nrm_indices[c] = safe_cast(index.normal_index); + if (index.normal_index < 0) { + nrm_indices[c] = invalid(); + ++num_invalid_nrm; + } else { + nrm_indices[c] = safe_cast(index.normal_index); + } } } } @@ -256,12 +263,18 @@ ObjReaderResult extract_mes // TODO: Support smoothing groups + subd tags }); - if (num_invalid_uv) { - // This one is a legit warning, so we do not silence it even in quiet mode. - logger().warn( - "Found {} vertices without UV indices. UV attribute will have invalid values.", - num_invalid_uv.load()); - } + auto handle_invalid_indices = [&](std::atomic_size_t& num_invalid, + std::string_view attr_name, + IndexedAttribute* attr_ptr) { + if (!attr_ptr) return; + if (!num_invalid) return; + if (!options.quiet) { + logger().warn("Found {} corners without {} indices.", num_invalid.load(), attr_name); + } + lagrange::internal::set_invalid_indexed_values(*attr_ptr); + }; + handle_invalid_indices(num_invalid_uv, AttributeName::texcoord, uv_attr); + handle_invalid_indices(num_invalid_nrm, AttributeName::normal, nrm_attr); logger().trace("[load_mesh_obj] Loading complete"); if (options.stitch_vertices) { diff --git a/modules/io/src/load_scene.cpp b/modules/io/src/load_scene.cpp index 34eb7890..396ebaba 100644 --- a/modules/io/src/load_scene.cpp +++ b/modules/io/src/load_scene.cpp @@ -19,6 +19,7 @@ #include #include #include +#include #include #include @@ -34,7 +35,13 @@ template SceneType load_scene(const fs::path& filename, const LoadOptions& options) { std::string ext = to_lower(filename.extension().string()); - if (ext == ".gltf" || ext == ".glb") { + if (ext == ".lgm" || ext == ".lgs") { + serialization::DeserializeOptions enc_options; + enc_options.allow_scene_conversion = true; + enc_options.allow_type_cast = true; + enc_options.quiet = options.quiet; + return serialization::load_scene(filename, enc_options); + } else if (ext == ".gltf" || ext == ".glb") { return load_scene_gltf(filename, options); } else if (ext == ".fbx") { return load_scene_fbx(filename, options); diff --git a/modules/io/src/load_simple_scene.cpp b/modules/io/src/load_simple_scene.cpp index 4ee9d49e..1a149182 100644 --- a/modules/io/src/load_simple_scene.cpp +++ b/modules/io/src/load_simple_scene.cpp @@ -18,6 +18,7 @@ #include #include #include +#include #include #include @@ -30,8 +31,15 @@ namespace lagrange::io { template SceneType load_simple_scene(const fs::path& filename, const LoadOptions& options) { + // TODO: load .obj as simple scene std::string ext = to_lower(filename.extension().string()); - if (ext == ".gltf" || ext == ".glb") { + if (ext == ".lgm" || ext == ".lgs") { + serialization::DeserializeOptions enc_options; + enc_options.allow_scene_conversion = true; + enc_options.allow_type_cast = true; + enc_options.quiet = options.quiet; + return serialization::load_simple_scene(filename, enc_options); + } else if (ext == ".gltf" || ext == ".glb") { return load_simple_scene_gltf(filename, options); } else if (ext == ".fbx") { return load_simple_scene_fbx(filename, options); diff --git a/modules/io/src/save_gltf.cpp b/modules/io/src/save_gltf.cpp index 41c361e2..c8c48820 100644 --- a/modules/io/src/save_gltf.cpp +++ b/modules/io/src/save_gltf.cpp @@ -23,12 +23,14 @@ #include #include #include +#include #include #include #include #include #include #include +#include #include #include #include @@ -144,10 +146,20 @@ void save_gltf(const fs::path& filename, const tinygltf::Model& model, const Sav constexpr bool embed_buffers = true; constexpr bool pretty_print = true; + +#if LAGRANGE_TARGET_COMPILER(EMSCRIPTEN) + // On Emscripten, writing external image files via tinygltf may silently fail on the virtual + // filesystem, producing a .glb/.gltf with unencoded raw pixel data that STB cannot decode on reload. + // Force embedding images when saving as binary .glb/.gltf to ensure a self-contained file. + bool embed_images = true; +#else + bool embed_images = options.embed_images; +#endif + bool success = loader.WriteGltfSceneToFile( &model, filename.string(), - options.embed_images, + embed_images, embed_buffers, pretty_print, binary); @@ -721,7 +733,8 @@ tinygltf::Model lagrange_scene_to_gltf_model( scene.extensions = convert_extension_map(lscene.extensions, options); } - for (const auto& llight : lscene.lights) { + for (size_t light_idx = 0; light_idx < lscene.lights.size(); ++light_idx) { + const auto& llight = lscene.lights[light_idx]; // note that the gltf support for lights is limited compared to our representation. // Information can be lost. tinygltf::Light light; @@ -731,13 +744,32 @@ tinygltf::Model lagrange_scene_to_gltf_model( llight.color_diffuse.y(), llight.color_diffuse.z()}; light.intensity = 1 / llight.attenuation_constant; + auto light_label = llight.name.empty() + ? fmt::format("light[{}]", light_idx) + : fmt::format("'{}' (index {})", llight.name, light_idx); switch (llight.type) { case scene::Light::Type::Directional: light.type = "directional"; break; case scene::Light::Type::Point: light.type = "point"; break; case scene::Light::Type::Spot: + la_runtime_assert( + llight.angle_inner_cone.has_value() && llight.angle_outer_cone.has_value(), + fmt::format("Spot light {} must have inner and outer cone angles.", light_label)); light.type = "spot"; - light.spot.innerConeAngle = llight.angle_inner_cone; - light.spot.outerConeAngle = llight.angle_outer_cone; + light.spot.innerConeAngle = llight.angle_inner_cone.value(); + light.spot.outerConeAngle = llight.angle_outer_cone.value(); + if (!options.quiet) { + constexpr double half_pi = lagrange::internal::pi / 2.0; + if (light.spot.innerConeAngle < 0 || + light.spot.innerConeAngle > light.spot.outerConeAngle || + light.spot.outerConeAngle > half_pi) { + logger().warn( + "Spot light {} has invalid cone angles (inner={}, outer={}). " + "glTF requires 0 <= innerConeAngle <= outerConeAngle <= pi/2.", + light_label, + light.spot.innerConeAngle, + light.spot.outerConeAngle); + } + } break; default: if (!options.quiet) { diff --git a/modules/io/src/save_mesh.cpp b/modules/io/src/save_mesh.cpp index 53e5fb40..0fcefa56 100644 --- a/modules/io/src/save_mesh.cpp +++ b/modules/io/src/save_mesh.cpp @@ -13,6 +13,7 @@ #include #include +#include #include #include @@ -49,7 +50,9 @@ void save_mesh( const SaveOptions& options) { std::string ext = to_lower(filename.extension().string()); - if (ext == ".obj") { + if (ext == ".lgm") { + serialization::save_mesh(filename, mesh); + } else if (ext == ".obj") { save_mesh_obj(filename, mesh, options); } else if (ext == ".ply") { save_mesh_ply(filename, mesh, options); diff --git a/modules/io/src/save_scene.cpp b/modules/io/src/save_scene.cpp index 277f666c..1d18787c 100644 --- a/modules/io/src/save_scene.cpp +++ b/modules/io/src/save_scene.cpp @@ -16,6 +16,7 @@ #include #include #include +#include #include #include @@ -30,7 +31,9 @@ void save_scene( { std::string ext = to_lower(filename.extension().string()); - if (ext == ".gltf" || ext == ".glb") { + if (ext == ".lgs") { + serialization::save_scene(filename, scene); + } else if (ext == ".gltf" || ext == ".glb") { save_scene_gltf(filename, scene, options); } else if (ext == ".obj") { save_scene_obj(filename, scene, options); diff --git a/modules/io/src/save_simple_scene.cpp b/modules/io/src/save_simple_scene.cpp index ae176b8f..f6eaecfe 100644 --- a/modules/io/src/save_simple_scene.cpp +++ b/modules/io/src/save_simple_scene.cpp @@ -15,6 +15,7 @@ #include #include +#include #include #include @@ -32,7 +33,9 @@ void save_simple_scene( const SaveOptions& options) { std::string ext = to_lower(filename.extension().string()); - if (ext == ".obj") { + if (ext == ".lgs") { + serialization::save_simple_scene(filename, scene); + } else if (ext == ".obj") { save_simple_scene_obj(filename, scene, options); } else if (ext == ".ply") { // todo diff --git a/modules/io/tests/test_obj.cpp b/modules/io/tests/test_obj.cpp index 719bf255..1dabb546 100644 --- a/modules/io/tests/test_obj.cpp +++ b/modules/io/tests/test_obj.cpp @@ -260,6 +260,87 @@ TEST_CASE("io/obj scene with materials", "[io][obj]") } } +TEST_CASE("io/obj missing indices", "[io][obj]") +{ + using namespace lagrange; + using Scalar = double; + using Index = uint32_t; + + // OBJ with 4 vertices, 3 UV coords, 1 normal, and 3 object groups. + // Each group uses a different face format and includes one face with + // indices and one without, to test missing index handling. + // tinyobjloader requires consistent face formats within a single group, + // so we use separate "o" groups to mix formats. + const std::string obj_data = R"(v 0 0 0 +v 1 0 0 +v 0 1 0 +v 1 1 0 +vt 0.0 0.0 +vt 1.0 0.0 +vt 0.0 1.0 +vn 0 0 1 +o uv_only +f 1/1 2/2 3/3 +f 3 2 4 +o normal_only +f 1//1 2//1 3//1 +f 3 2 4 +o all +f 1/1/1 2/2/1 3/3/1 +f 3 2 4 +)"; + + std::istringstream ss(obj_data); + auto mesh = io::load_mesh_obj>(ss); + REQUIRE(mesh.get_num_facets() == 6); + REQUIRE(mesh.has_attribute("texcoord")); + REQUIRE(mesh.has_attribute("normal")); + + constexpr Index num_uv_values = 3; + constexpr Index num_nrm_values = 1; + + auto& uv_attr = mesh.get_indexed_attribute("texcoord"); + auto uv_indices = uv_attr.indices().get_all(); + auto& nrm_attr = mesh.get_indexed_attribute("normal"); + auto nrm_indices = nrm_attr.indices().get_all(); + + REQUIRE(mesh.get_vertex_per_facet() == 3); + const Index nvpf = 3; + + // Check that all corners of a face have original (non-remapped) indices. + auto check_valid = [&](const auto& indices, Index face_index, Index original_count) { + for (Index c = 0; c < nvpf; ++c) { + CHECK(indices[face_index * nvpf + c] < original_count); + } + }; + + // Check that all corners of a face have been remapped to appended default values. + auto check_remapped = [&](const auto& indices, Index face_index, Index original_count) { + for (Index c = 0; c < nvpf; ++c) { + CHECK(indices[face_index * nvpf + c] != invalid()); + CHECK(indices[face_index * nvpf + c] >= original_count); + } + }; + + // uv_only group: face 0 has UVs, face 1 has neither. + check_valid(uv_indices, 0, num_uv_values); + check_remapped(nrm_indices, 0, num_nrm_values); + check_remapped(uv_indices, 1, num_uv_values); + check_remapped(nrm_indices, 1, num_nrm_values); + + // normal_only group: face 2 has normals, face 3 has neither. + check_remapped(uv_indices, 2, num_uv_values); + check_valid(nrm_indices, 2, num_nrm_values); + check_remapped(uv_indices, 3, num_uv_values); + check_remapped(nrm_indices, 3, num_nrm_values); + + // all group: face 4 has both, face 5 has neither. + check_valid(uv_indices, 4, num_uv_values); + check_valid(nrm_indices, 4, num_nrm_values); + check_remapped(uv_indices, 5, num_uv_values); + check_remapped(nrm_indices, 5, num_nrm_values); +} + TEST_CASE("io/obj 2d mesh", "[io][obj]") { using namespace lagrange; diff --git a/modules/polyddg/CMakeLists.txt b/modules/polyddg/CMakeLists.txt index 16d39d93..f40fd848 100644 --- a/modules/polyddg/CMakeLists.txt +++ b/modules/polyddg/CMakeLists.txt @@ -16,7 +16,11 @@ if(LAGRANGE_TOPLEVEL_PROJECT) endif() # 2. dependencies -target_link_libraries(lagrange_polyddg PUBLIC lagrange::core) +lagrange_include_modules(solver) +target_link_libraries(lagrange_polyddg + PUBLIC lagrange::core + PRIVATE lagrange::solver +) # 3. unit tests and examples if(LAGRANGE_UNIT_TESTS) diff --git a/modules/polyddg/include/lagrange/polyddg/DifferentialOperators.h b/modules/polyddg/include/lagrange/polyddg/DifferentialOperators.h index 2490b46a..c4de6ca4 100644 --- a/modules/polyddg/include/lagrange/polyddg/DifferentialOperators.h +++ b/modules/polyddg/include/lagrange/polyddg/DifferentialOperators.h @@ -86,11 +86,28 @@ class DifferentialOperators Eigen::SparseMatrix star0() const; /// - /// Compute the discrete Hodge star operator for 1-forms (diagonal mass matrix, size #E x #E). + /// Compute the discrete Hodge star operator for 1-forms (size #E x #E). /// - /// @return A diagonal sparse matrix of size #E x #E. + /// Following de Goes, Butts and Desbrun (ACM Trans. Graph. 2020, Section 4.4), the 1-form + /// Hodge star is the VEM-stabilized inner product /// - Eigen::SparseMatrix star1() const; + /// @f[ + /// M_1 = \sum_f \text{area}^f U_f^T U_f + \lambda P_f^T P_f + /// @f] + /// + /// where @f$ U_f @f$ is the per-face sharp operator and @f$ P_f = I - V_f U_f @f$ is the projection onto the + /// kernel of @f$ U_f @f$. This matrix is symmetric positive-definite, scale-invariant, and + /// constant-precise for arbitrary polygons (including non-planar faces). + /// + /// This mirrors the convention used for star0() and star2(), which also delegate to their + /// respective inner-product operators. For triangulated meshes, λ=1 is recommended + /// and coincides with the standard cotangent Laplacian construction. + /// + /// @param[in] lambda Stabilization weight for the VEM projection term (default 1). + /// + /// @return A symmetric positive-definite sparse matrix of size #E x #E. + /// + Eigen::SparseMatrix star1(Scalar lambda = 1) const; /// /// Compute the discrete Hodge star operator for 2-forms (diagonal mass matrix, size #F x #F). @@ -186,10 +203,12 @@ class DifferentialOperators Eigen::SparseMatrix curl() const; /// - /// Compute the discrete Laplacian operator. + /// Compute the discrete weak-form Laplacian operator (@f$ \Delta : \Omega^0 \to \tilde{\Omega}^2 @f$). /// - /// The Laplacian operator computes the Laplacian of a 0-form (i.e. scalar field). - /// The discrete Laplacian operator is a matrix of size #V by #V. + /// Maps a primal 0-form (per-vertex scalar) to a dual 2-form (integrated value over each dual + /// cell, also per-vertex). This is the weak-form (Galerkin) Laplacian: @f$ \Delta = d_0^T \cdot M_1 \cdot d_0 @f$, + /// so the right-hand side of @f$ \Delta u = f @f$ must be assembled as a dual 2-form (i.e. @f$ \int f \varphi_v dA @f$ per + /// vertex), not as pointwise vertex values. The matrix is of size #V by #V. /// /// @param[in] lambda Weight of projection term for the 1-form inner product (default: 1). /// @@ -197,6 +216,66 @@ class DifferentialOperators /// Eigen::SparseMatrix laplacian(Scalar lambda = 1) const; + /// + /// Compute the discrete weak-form co-differential operator (@f$ \delta_1 : \Omega^1 \to \tilde{\Omega}^2 @f$). + /// + /// This is a weak-form (Galerkin) operator. It maps a primal 1-form (per-edge scalar, size #E) + /// to a dual 2-form (integrated value over each dual cell, per-vertex, size #V). The output is + /// NOT a primal 0-form; to recover a primal 0-form one would need to apply the inverse vertex + /// mass matrix @f$ M_0^{-1} @f$ (strong form: @f$ M_0^{-1} d_0^T M_1 @f$). + /// + /// Equals divergence(lambda): @f$ \delta_1 = d_0^T \cdot M_1(\lambda) @f$. The matrix is of size #V by #E. + /// + /// @param[in] lambda Weight of projection term for the 1-form inner product (default: 1). + /// + /// @return A sparse matrix representing the weak-form 1-form co-differential operator. + /// + Eigen::SparseMatrix delta1(Scalar lambda = 1) const; + + /// + /// Compute the discrete weak-form co-differential operator (@f$ \delta_2 : \Omega^2 \to \tilde{\Omega}^1 @f$). + /// + /// This is a weak-form (Galerkin) operator. It maps a primal 2-form (per-facet scalar, size #F) + /// to a dual 1-form (integrated value over each dual edge, per-edge, size #E). The output is + /// NOT a primal 1-form; to recover a primal 1-form one would need to apply the inverse edge + /// mass matrix @f$ M_1^{-1} @f$ (strong form: @f$ M_1^{-1} d_1^T M_2 @f$). + /// + /// @f$ \delta_2 = d_1^T \cdot M_2 @f$ where @f$ M_2 = @f$ inner_product_2_form(). The matrix is of size #E by #F. + /// + /// @return A sparse matrix representing the weak-form 2-form co-differential operator. + /// + Eigen::SparseMatrix delta2() const; + + /// + /// Compute the discrete weak-form Laplacian operator on 2-forms (@f$ \Delta_2 : \Omega^2 \to \tilde{\Omega}^0 @f$). + /// + /// Maps a primal 2-form (per-facet scalar, size #F) to a dual 0-form (integrated value over + /// each dual vertex, per-facet, size #F). This is a weak-form operator: @f$ \Delta_2 = d_1 \cdot \delta_2 = d_1 \cdot d_1^T \cdot M_2 @f$. + /// The matrix is of size #F by #F. + /// + /// @return A sparse matrix representing the weak-form 2-form Laplacian operator. + /// + Eigen::SparseMatrix laplacian2() const; + + /// + /// Compute the discrete weak-form Hodge Laplacian on 1-forms (@f$ \Delta_1 : \Omega^1 \to \tilde{\Omega}^1 @f$). + /// + /// Maps a primal 1-form (per-edge scalar, size #E) to a dual 1-form (per-edge, size #E). + /// Decomposes into an exact part (@f$ d_0 \delta_1 @f$) and a co-exact part (@f$ \delta_2 d_1 @f$), both weak-form: + /// @f[ + /// \Delta_1 = d_0 \cdot \delta_1(\lambda) + \delta_2 \cdot d_1 + /// @f] + /// + /// This operator is required for Helmholtz-Hodge decomposition of 1-forms and is distinct + /// from connection_laplacian(), which operates on tangent vector fields at vertices. + /// The matrix is of size #E by #E. + /// + /// @param[in] lambda Weight of projection term for the 1-form inner product (default: 1). + /// + /// @return A sparse matrix representing the weak-form Hodge Laplacian on 1-forms. + /// + Eigen::SparseMatrix laplacian1(Scalar lambda = 1) const; + /// /// Compute the coordinate transformation that maps a per-vertex tangent vector field expressed /// in the global 3D coordinate to the local tangent basis at each vertex. @@ -245,7 +324,8 @@ class DifferentialOperators /// setting, both vector fields are defined on the vertices. The output covariant derivative is /// a flattened 2x2 matrix defined on each facet. /// - /// @return A sparse matrix representing the discrete covariant derivative operator. + /// @return A sparse matrix of size (#F * 4) x (#V * 2) representing the discrete covariant + /// derivative operator. /// Eigen::SparseMatrix covariant_derivative() const; diff --git a/modules/polyddg/include/lagrange/polyddg/compute_smooth_direction_field.h b/modules/polyddg/include/lagrange/polyddg/compute_smooth_direction_field.h new file mode 100644 index 00000000..b53b3a65 --- /dev/null +++ b/modules/polyddg/include/lagrange/polyddg/compute_smooth_direction_field.h @@ -0,0 +1,83 @@ +/* + * Copyright 2026 Adobe. All rights reserved. + * This file is licensed to you under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under + * the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS + * OF ANY KIND, either express or implied. See the License for the specific language + * governing permissions and limitations under the License. + */ +#pragma once + +#include +#include +#include + +#include +#include + +namespace lagrange::polyddg { + +/// @addtogroup module-polyddg +/// @{ + +/// +/// Options for compute_smooth_direction_field(). +/// +struct SmoothDirectionFieldOptions +{ + /// Symmetry order of the direction field (e.g. 1 = vector field, 2 = line field, + /// 4 = cross field). + uint8_t nrosy = 4; + + /// Stabilization weight for the VEM projection term in the connection Laplacian. + double lambda = 1.0; + + /// Name of a per-vertex 3-D tangent vector field attribute used as alignment constraints. + /// Each vertex with a non-zero vector is softly constrained to align to that direction. + /// Vertices with a zero vector are unconstrained. If empty (the default), no alignment + /// constraints are applied and the globally smoothest field is computed via inverse power + /// iteration. + std::string_view alignment_attribute = ""; + + /// Scaling factor for the spectral shift in the alignment solve, following the fieldgen + /// formulation (Knöppel et al. 2013). The actual shift is @f$ \alpha = s \cdot + /// \sigma_{\min} @f$, where @f$ s @f$ is this value and @f$ \sigma_{\min} @f$ is the + /// smallest eigenvalue of the connection Laplacian (computed automatically). At the + /// default value of 1.0, the shift equals @f$ \sigma_{\min} @f$, giving maximum + /// alignment. Values in (0, 1) give weaker alignment (more smoothness). + double alignment_weight = 1.0; + + /// Output attribute name for the smooth direction field (3-D vector, per vertex). + std::string_view direction_field_attribute = "@smooth_direction_field"; +}; + +/// +/// Compute the globally smoothest n-direction field on a surface mesh. +/// +/// This function is based on the following paper: +/// +/// Knöppel, Felix, et al. "Globally optimal direction fields." ACM Transactions on Graphics (ToG) +/// 32.4 (2013): 1-10. +/// +/// The solution is stored as a per-vertex 3-D tangent vector attribute in world-space +/// coordinates, obtained by mapping the local 2-D solution through the vertex tangent basis. +/// +/// @param[in,out] mesh Input surface mesh. The output attribute is added or overwritten. +/// @param[in] ops Precomputed differential operators for the mesh. +/// @param[in] options Options controlling the rosy order, stabilization weight, +/// optional alignment constraints, and output attribute name. +/// +/// @return Attribute ID of the output direction field attribute. +/// +template +LA_POLYDDG_API AttributeId compute_smooth_direction_field( + SurfaceMesh& mesh, + const DifferentialOperators& ops, + SmoothDirectionFieldOptions options = {}); + +/// @} + +} // namespace lagrange::polyddg diff --git a/modules/polyddg/include/lagrange/polyddg/hodge_decomposition.h b/modules/polyddg/include/lagrange/polyddg/hodge_decomposition.h new file mode 100644 index 00000000..2c45330f --- /dev/null +++ b/modules/polyddg/include/lagrange/polyddg/hodge_decomposition.h @@ -0,0 +1,175 @@ +/* + * Copyright 2026 Adobe. All rights reserved. + * This file is licensed to you under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under + * the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS + * OF ANY KIND, either express or implied. See the License for the specific language + * governing permissions and limitations under the License. + */ +#pragma once + +#include +#include +#include + +#include +#include + +namespace lagrange::polyddg { + +/// @addtogroup module-polyddg +/// @{ + +/// +/// Options for Hodge decomposition functions. +/// +/// Depending on which function is passed this argument, the attributes are interpreted differently: +/// - For hodge_decomposition_1_form(): input/output are per-edge scalars (1-forms) +/// - For hodge_decomposition_vector_field(): input/output are per-vertex 3D vectors (vector fields) +/// +struct HodgeDecompositionOptions +{ + /// Stabilization weight @f$ \lambda @f$ for the VEM 1-form inner product. + double lambda = 1.0; + + /// N-rosy symmetry order. Only used by hodge_decomposition_vector_field(). + /// When n = 1 (default) the input is a plain vector field. + /// When n > 1 the input is one representative vector of an n-rosy field. The vector is + /// encoded in the local tangent plane via n-fold angle multiplication before the 1-form + /// decomposition and decoded back afterwards. + uint8_t nrosy = 1; + + /// Input attribute name. + /// - For hodge_decomposition_1_form(): per-edge scalar (1-form) + /// - For hodge_decomposition_vector_field(): per-vertex 3D vector (global coordinates) + std::string_view input_attribute = "@hodge_input"; + + /// Output attribute name for the exact component. + /// - For hodge_decomposition_1_form(): per-edge scalar + /// - For hodge_decomposition_vector_field(): per-vertex 3D vector (global coordinates) + std::string_view exact_attribute = "@hodge_exact"; + + /// Output attribute name for the co-exact component. + /// - For hodge_decomposition_1_form(): per-edge scalar + /// - For hodge_decomposition_vector_field(): per-vertex 3D vector (global coordinates) + std::string_view coexact_attribute = "@hodge_coexact"; + + /// Output attribute name for the harmonic component. + /// - For hodge_decomposition_1_form(): per-edge scalar + /// - For hodge_decomposition_vector_field(): per-vertex 3D vector (global coordinates) + std::string_view harmonic_attribute = "@hodge_harmonic"; +}; + +/// +/// Result of Hodge decomposition functions. +/// +/// Depending on which function returned this result, the attribute IDs refer to different types: +/// - For hodge_decomposition_1_form(): per-edge scalar attributes +/// - For hodge_decomposition_vector_field(): per-vertex 3D vector attributes +/// +struct HodgeDecompositionResult +{ + /// Attribute ID of the exact component. + AttributeId exact_id = invalid_attribute_id(); + + /// Attribute ID of the co-exact component. + AttributeId coexact_id = invalid_attribute_id(); + + /// Attribute ID of the harmonic component. + AttributeId harmonic_id = invalid_attribute_id(); +}; + +// ---- 1-form level ----------------------------------------------------------- + +/// +/// Compute the Helmholtz-Hodge decomposition of a 1-form on a closed surface mesh. +/// +/// Takes a discrete 1-form (scalar per edge) and decomposes it into three orthogonal components: +/// +/// @f[ +/// \omega = \omega_{\text{exact}} + \omega_{\text{coexact}} + \omega_{\text{harmonic}} +/// @f] +/// +/// where: +/// - @f$ \omega_{\text{exact}} = d_0 \alpha @f$ is the **exact** (curl-free) part, +/// - @f$ \omega_{\text{coexact}} @f$ is the **co-exact** (divergence-free) part, +/// - @f$ \omega_{\text{harmonic}} @f$ is the **harmonic** part (zero for genus-0 surfaces). +/// +/// The exact part is obtained by solving the scalar Laplacian @f$ L_0 \alpha = \delta_1 \omega @f$. +/// The co-exact part is obtained by solving a saddle-point system that minimizes the @f$ M_1 @f$-norm +/// subject to the constraint @f$ d_1 \omega_{\text{coexact}} = d_1 \omega @f$. +/// The harmonic part is the residual. +/// +/// @param[in,out] mesh Input surface mesh. Must be closed with a single connected component. +/// The input edge attribute must already exist. +/// @param[in] ops Precomputed differential operators for the mesh. +/// @param[in] options Options. The nrosy field is ignored for 1-form decomposition. +/// +/// @return Attribute IDs of the three output per-edge scalar attributes. +/// +template +LA_POLYDDG_API HodgeDecompositionResult hodge_decomposition_1_form( + SurfaceMesh& mesh, + const DifferentialOperators& ops, + HodgeDecompositionOptions options = {}); + +/// +/// Convenience overload that constructs a DifferentialOperators object internally. +/// +template +LA_POLYDDG_API HodgeDecompositionResult hodge_decomposition_1_form( + SurfaceMesh& mesh, + HodgeDecompositionOptions options = {}); + +// ---- Per-vertex vector field level ------------------------------------------ + +/// +/// Compute the Helmholtz-Hodge decomposition of a per-vertex vector field on a surface mesh. +/// +/// Takes a per-vertex vector field in global 3D coordinates and decomposes it into three orthogonal +/// components, each stored as a per-vertex vector field in global coordinates: +/// +/// @f[ +/// V = V_{\text{exact}} + V_{\text{coexact}} + V_{\text{harmonic}} +/// @f] +/// +/// Internally, the per-vertex vector field is converted to a 1-form (scalar per edge) using +/// midpoint integration along edges: @f$ \omega_e = \frac{V_i + V_j}{2} \cdot (x_j - x_i) @f$. +/// Any normal component of the input vectors is automatically annihilated by the dot product +/// with the edge vector, so explicit tangent-plane projection is not needed for n=1. +/// The 1-form is then decomposed using hodge_decomposition_1_form(), and each +/// component is converted back to a per-vertex vector field using the discrete sharp operator +/// followed by area-weighted averaging from faces to vertices. +/// +/// When the n-rosy option is greater than 1, each input vector is first projected into the +/// local vertex tangent plane and its angle is multiplied by n (encoding), converting the +/// n-rosy representative into a regular vector field. After decomposition, each output +/// component is decoded by dividing the tangent-plane angle by n. +/// +/// @param[in,out] mesh Input surface mesh. The input attribute (per-vertex 3D vector) must +/// already exist. Output attributes are created or overwritten. +/// @param[in] ops Precomputed differential operators for the mesh. +/// @param[in] options Options. +/// +/// @return Attribute IDs of the three output per-vertex vector attributes. +/// +template +LA_POLYDDG_API HodgeDecompositionResult hodge_decomposition_vector_field( + SurfaceMesh& mesh, + const DifferentialOperators& ops, + HodgeDecompositionOptions options = {}); + +/// +/// Convenience overload that constructs a DifferentialOperators object internally. +/// +template +LA_POLYDDG_API HodgeDecompositionResult hodge_decomposition_vector_field( + SurfaceMesh& mesh, + HodgeDecompositionOptions options = {}); + +/// @} + +} // namespace lagrange::polyddg diff --git a/modules/polyddg/python/examples/hodge_decomposition.py b/modules/polyddg/python/examples/hodge_decomposition.py new file mode 100644 index 00000000..e9f6ddce --- /dev/null +++ b/modules/polyddg/python/examples/hodge_decomposition.py @@ -0,0 +1,195 @@ +#!/usr/bin/env python + +# +# Copyright 2026 Adobe. All rights reserved. +# This file is licensed to you under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. You may obtain a copy +# of the License at https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software distributed under +# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS +# OF ANY KIND, either express or implied. See the License for the specific language +# governing permissions and limitations under the License. +# +# /// script +# requires-python = ">=3.9" +# dependencies = [ +# "adobe-lagrange", +# "polyscope", +# "numpy", +# ] +# /// + +# +# Copyright 2026 Adobe. All rights reserved. +# This file is licensed to you under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. You may obtain a copy +# of the License at https://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software distributed under +# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS +# OF ANY KIND, either express or implied. See the License for the specific language +# governing permissions and limitations under the License. +# + +"""Visualize Helmholtz-Hodge decomposition of a smooth vector field. + +Computes a smooth direction field on a triangle mesh using the polyddg module, +then decomposes it into exact (curl-free), co-exact (divergence-free), and +harmonic components. All four fields are displayed side-by-side with polyscope. + +Usage: + uv run modules/polyddg/python/examples/hodge_decomposition.py input_mesh.obj +""" + +import argparse + +import numpy as np +import polyscope as ps + +import lagrange + + +def get_vertex_vectors(mesh, attr_name): + """Extract a per-vertex 3D vector attribute as an (nv, 3) numpy array.""" + return np.array(mesh.attribute(attr_name).data).reshape(-1, 3) + + +def get_facet_indices(mesh): + """Extract triangle indices as an (nf, 3) numpy array.""" + nf = mesh.num_facets + indices = np.empty((nf, 3), dtype=np.uint32) + for fi in range(nf): + verts = mesh.get_facet_vertices(fi) + indices[fi] = verts + return indices + + +def get_vertex_tangent_bases(ops, nv): + """Extract per-vertex tangent bases as (nv, 3) arrays for basisX and basisY.""" + basisX = np.empty((nv, 3)) + basisY = np.empty((nv, 3)) + for v in range(nv): + B = np.array(ops.vertex_basis(v)) # 3x2 + basisX[v] = B[:, 0] + basisY[v] = B[:, 1] + return basisX, basisY + + +def project_to_tangent(field_3d, basisX, basisY): + """Project 3D vectors to 2D tangent coordinates: (nv, 3) -> (nv, 2).""" + u = np.sum(field_3d * basisX, axis=1) + v = np.sum(field_3d * basisY, axis=1) + return np.column_stack([u, v]) + + +def register_mesh_with_field(name, vertices, faces, field_2d, basisX, basisY, offset, nrosy=1): + """Register a polyscope mesh translated by offset, with a tangent vector field.""" + shifted = vertices + offset + ps_mesh = ps.register_surface_mesh(name, shifted, faces, smooth_shade=True) + ps_mesh.add_tangent_vector_quantity( + "field", field_2d, basisX, basisY, n_sym=nrosy, enabled=True + ) + return ps_mesh + + +def main(): + parser = argparse.ArgumentParser( + description="Visualize Helmholtz-Hodge decomposition of a smooth vector field." + ) + parser.add_argument("input_mesh", help="Path to input triangle mesh (e.g. .obj, .ply)") + parser.add_argument( + "--nrosy", + type=int, + default=1, + help="N-rosy symmetry order for the direction field (default: 1)", + ) + args = parser.parse_args() + + # Load mesh. + mesh = lagrange.io.load_mesh(args.input_mesh) + lagrange.triangulate_polygonal_facets(mesh) + mesh.initialize_edges() + print(f"Loaded mesh: {mesh.num_vertices} vertices, {mesh.num_facets} faces") + + # Compute smooth direction field. + ops = lagrange.polyddg.DifferentialOperators(mesh) + field_attr = "@smooth_direction_field" + lagrange.polyddg.compute_smooth_direction_field( + mesh, + ops, + nrosy=args.nrosy, + direction_field_attribute=field_attr, + ) + print("Computed smooth direction field") + + # Hodge decomposition. + exact_attr = "@hodge_exact" + coexact_attr = "@hodge_coexact" + harmonic_attr = "@hodge_harmonic" + lagrange.polyddg.hodge_decomposition_vector_field( + mesh, + ops, + input_attribute=field_attr, + exact_attribute=exact_attr, + coexact_attribute=coexact_attr, + harmonic_attribute=harmonic_attr, + nrosy=args.nrosy, + ) + print("Computed Hodge decomposition") + + # Extract data. + vertices = np.array(mesh.vertices).reshape(-1, 3) + faces = get_facet_indices(mesh) + nv = mesh.num_vertices + basisX, basisY = get_vertex_tangent_bases(ops, nv) + + V_input = get_vertex_vectors(mesh, field_attr) + V_exact = get_vertex_vectors(mesh, exact_attr) + V_coexact = get_vertex_vectors(mesh, coexact_attr) + V_harmonic = get_vertex_vectors(mesh, harmonic_attr) + + # Print norms. + print(f" ||V_input|| = {np.linalg.norm(V_input):.6f}") + print(f" ||V_exact|| = {np.linalg.norm(V_exact):.6f}") + print(f" ||V_coexact|| = {np.linalg.norm(V_coexact):.6f}") + print(f" ||V_harmonic||= {np.linalg.norm(V_harmonic):.6f}") + print(f" ||residual|| = {np.linalg.norm(V_input - V_exact - V_coexact - V_harmonic):.2e}") + + # Project 3D vectors to 2D tangent coordinates for polyscope. + T_input = project_to_tangent(V_input, basisX, basisY) + T_exact = project_to_tangent(V_exact, basisX, basisY) + T_coexact = project_to_tangent(V_coexact, basisX, basisY) + T_harmonic = project_to_tangent(V_harmonic, basisX, basisY) + + # Compute bounding box for side-by-side layout. + bbox_size = vertices.max(axis=0) - vertices.min(axis=0) + spacing = bbox_size[0] * 1.3 + nrosy = args.nrosy + + # Visualize with polyscope. + ps.init() + ps.set_up_dir("z_up") + + register_mesh_with_field("Input", vertices, faces, T_input, basisX, basisY, [0, 0, 0], nrosy) + register_mesh_with_field( + "Exact (curl-free)", vertices, faces, T_exact, basisX, basisY, [spacing, 0, 0] + ) + register_mesh_with_field( + "Co-exact (div-free)", + vertices, + faces, + T_coexact, + basisX, + basisY, + [2 * spacing, 0, 0], + ) + register_mesh_with_field( + "Harmonic", vertices, faces, T_harmonic, basisX, basisY, [3 * spacing, 0, 0] + ) + + ps.show() + + +if __name__ == "__main__": + main() diff --git a/modules/polyddg/python/src/polyddg.cpp b/modules/polyddg/python/src/polyddg.cpp index 92dd3d30..a7d7c85f 100644 --- a/modules/polyddg/python/src/polyddg.cpp +++ b/modules/polyddg/python/src/polyddg.cpp @@ -12,6 +12,8 @@ #include #include +#include +#include #include #include @@ -63,10 +65,26 @@ void populate_polyddg_module(nb::module_& m) :return: A diagonal sparse matrix of size (#V, #V).)") .def( "star1", - [](const polyddg::DifferentialOperators& self) { return self.star1(); }, - R"(Compute the discrete Hodge star operator for 1-forms (diagonal mass matrix, size #E x #E). + [](const polyddg::DifferentialOperators& self, Scalar lambda) { + return self.star1(lambda); + }, + "beta"_a = Scalar(1), + R"(Compute the discrete Hodge star operator for 1-forms (size #E x #E). + +Following de Goes, Butts and Desbrun (ACM Trans. Graph. 2020, Section 4.4), this is the +VEM-stabilized 1-form inner product assembled from per-face Gram matrices:: + + M_f = area_f * U_f^T U_f + beta * P_f^T P_f + +where ``U_f`` is the per-face sharp operator and ``P_f = I - V_f U_f`` projects onto the +kernel of ``U_f``. The result is a symmetric positive-definite sparse matrix (non-diagonal +for polygonal meshes). ``beta = 1`` is recommended and gives the best accuracy. -:return: A diagonal sparse matrix of size (#E, #E).)") +This is consistent with :meth:`star0` and :meth:`star2`, which also delegate to their +respective inner-product operators. + +:param beta: Stabilization weight for the VEM projection term (default 1). +:return: A symmetric positive-definite sparse matrix of size (#E, #E).)") .def( "star2", [](const polyddg::DifferentialOperators& self) { return self.star2(); }, @@ -131,6 +149,17 @@ void populate_polyddg_module(nb::module_& m) R"(Compute the discrete polygonal sharp operator. :return: A sparse matrix representing the sharp operator.)") + .def( + "projection", + [](const polyddg::DifferentialOperators& self) { + return self.projection(); + }, + R"(Compute the projection operator. + +The projection operator measures the information loss when extracting the part of the +1-form associated with a vector field. It is a matrix of size #E by #E. + +:return: A sparse matrix representing the projection operator.)") .def( "laplacian", [](const polyddg::DifferentialOperators& self, Scalar beta) { @@ -143,6 +172,62 @@ void populate_polyddg_module(nb::module_& m) :param beta: Weight of projection term for the 1-form inner product (default: 1). :return: A sparse matrix representing the Laplacian operator.)") + .def( + "delta1", + [](const polyddg::DifferentialOperators& self, Scalar beta) { + return self.delta1(beta); + }, + nb::kw_only(), + "beta"_a = 1, + R"(Compute the discrete co-differential operator :math:`\delta_1` in weak form. + +The co-differential is the formal adjoint of ``d0`` with respect to the 1-form inner product. +In this implementation ``delta1`` returns the weak-form operator :math:`d_0^T \cdot M_1`: when applied to a +primal per-edge scalar 1-form it produces a dual 0-form (an integrated scalar per vertex star), +not a pointwise primal per-vertex quantity. To obtain a primal 0-form one must apply the inverse +vertex mass matrix, e.g. :math:`M_0^{-1} \cdot \text{delta1}(\text{beta}) \cdot \alpha` for a 1-form :math:`\alpha`. + +Equal to ``divergence(beta)`` in weak form. + +:param beta: Weight of projection term for the 1-form inner product (default: 1). + +:return: A sparse matrix of size (#V, #E) implementing :math:`d_0^T \cdot M_1`.)") + .def( + "delta2", + [](const polyddg::DifferentialOperators& self) { return self.delta2(); }, + R"(Compute the discrete co-differential operator (:math:`\delta_2 : \Omega^2 \to \Omega^1`). + +The co-differential is the formal adjoint of ``d1`` with respect to the 2-form inner product. +It maps a per-facet scalar 2-form to a per-edge scalar 1-form. + +:return: A sparse matrix of size (#E, #F).)") + .def( + "laplacian2", + [](const polyddg::DifferentialOperators& self) { + return self.laplacian2(); + }, + R"(Compute the discrete Laplacian on 2-forms (:math:`\Delta_2 : \Omega^2 \to \Omega^2`). + +Equal to ``d1 · delta2()``. Analogous to ``laplacian()`` but acting on per-facet +scalar 2-forms. Required for computing the co-exact component in Helmholtz-Hodge decomposition. + +:return: A sparse matrix of size (#F, #F).)") + .def( + "laplacian1", + [](const polyddg::DifferentialOperators& self, Scalar beta) { + return self.laplacian1(beta); + }, + nb::kw_only(), + "beta"_a = 1, + R"(Compute the discrete Hodge Laplacian on 1-forms (:math:`\Delta_1 : \Omega^1 \to \Omega^1`). + +Combines the exact part (``d0 · delta1(beta)``) and the co-exact part +(``delta2() · d1()``). Required for full Helmholtz-Hodge decomposition. +Distinct from ``connection_laplacian()``, which acts on tangent vector fields at vertices. + +:param beta: Weight of projection term for the 1-form inner product (default: 1). + +:return: A sparse matrix of size (#E, #E).)") .def( "vertex_tangent_coordinates", [](const polyddg::DifferentialOperators& self) { @@ -580,11 +665,27 @@ determinant gives the Gaussian curvature. .def_prop_ro( "vertex_normal_attribute_id", &polyddg::DifferentialOperators::get_vertex_normal_attribute_id, - "Attribute ID of the per-vertex normal attribute."); + "Attribute ID of the per-vertex normal attribute.") + .def( + "vertex_basis", + &polyddg::DifferentialOperators::vertex_basis, + "vid"_a, + R"(Compute the local tangent basis for a single vertex. + +:param vid: Vertex index. +:return: A 3x2 matrix whose columns are orthonormal tangent vectors.)") + .def( + "facet_basis", + &polyddg::DifferentialOperators::facet_basis, + "fid"_a, + R"(Compute the local tangent basis for a single facet. + +:param fid: Facet index. +:return: A 3x2 matrix whose columns are orthonormal tangent vectors.)"); // Default attribute names are taken directly from PrincipalCurvaturesOptions to avoid // duplication if the defaults change. - static const polyddg::PrincipalCurvaturesOptions default_pc_opts{}; + const polyddg::PrincipalCurvaturesOptions default_pc_opts{}; m.def( "compute_principal_curvatures", @@ -676,6 +777,267 @@ are the principal directions. All four quantities are stored as vertex attribute (default: ``"@principal_direction_max"``). :return: A tuple ``(kappa_min_id, kappa_max_id, direction_min_id, direction_max_id)`` of attribute IDs.)"); + + // ---- compute_smooth_direction_field ---- + const polyddg::SmoothDirectionFieldOptions default_sdf_opts{}; + + m.def( + "compute_smooth_direction_field", + [](SurfaceMesh& mesh, + const polyddg::DifferentialOperators& ops, + uint8_t nrosy, + double beta, + std::string_view alignment_attribute, + double alignment_weight, + std::string_view direction_field_attribute) { + polyddg::SmoothDirectionFieldOptions opts; + opts.nrosy = nrosy; + opts.lambda = beta; + opts.alignment_attribute = alignment_attribute; + opts.alignment_weight = alignment_weight; + opts.direction_field_attribute = direction_field_attribute; + return polyddg::compute_smooth_direction_field(mesh, ops, opts); + }, + "mesh"_a, + "ops"_a, + nb::kw_only(), + "nrosy"_a = default_sdf_opts.nrosy, + "beta"_a = default_sdf_opts.lambda, + "alignment_attribute"_a = default_sdf_opts.alignment_attribute, + "alignment_weight"_a = default_sdf_opts.alignment_weight, + "direction_field_attribute"_a = default_sdf_opts.direction_field_attribute, + R"(Compute the globally smoothest n-direction field on a surface mesh. + +Based on: Knöppel et al., "Globally optimal direction fields", ACM ToG 32(4), 2013. + +Without alignment constraints (``alignment_attribute`` is empty), solves the generalized +eigenvalue problem :math:`L u = \sigma M u` for the smallest eigenvector. The result +minimizes the Dirichlet energy of the connection. + +With alignment constraints, reads per-vertex prescribed 3-D tangent vectors from the given +attribute (zero-length vectors are unconstrained) and solves the shifted linear system +:math:`(L - \alpha M) u = M q`, where :math:`q` is the M-normalized prescribed field and +:math:`\alpha = \texttt{alignment\_lambda} \cdot \sigma_{\min}`. + +:param mesh: Input surface mesh (modified in place with the new attribute). +:param ops: Precomputed :class:`DifferentialOperators` for the mesh. +:param nrosy: Symmetry order of the direction field (1 = vector field, 2 = line field, + 4 = cross field, default: 4). +:param beta: Stabilization weight for the VEM projection term in the connection Laplacian + (default: 1). +:param alignment_attribute: Name of a per-vertex 3-D alignment vector attribute (zero = + unconstrained). If empty, the unconstrained smoothest field is computed. +:param alignment_weight: Scaling factor for the spectral shift (default: 1). The actual + shift is ``alignment_weight * sigma_min``, where ``sigma_min`` is the smallest eigenvalue + of the connection Laplacian (computed automatically). Values in (0, 1) give weaker + alignment (more smoothness). +:param direction_field_attribute: Output attribute name for the per-vertex 3-D direction + field (default: ``"@smooth_direction_field"``). + +:return: Attribute ID of the output per-vertex direction field.)"); + + // ---- hodge_decomposition_1_form ---- + constexpr polyddg::HodgeDecompositionOptions default_hd_1form_opts{}; + + m.def( + "hodge_decomposition_1_form", + [](SurfaceMesh& mesh, + const polyddg::DifferentialOperators& ops, + std::string_view input_attribute, + std::string_view exact_attribute, + std::string_view coexact_attribute, + std::string_view harmonic_attribute, + Scalar beta) { + polyddg::HodgeDecompositionOptions opts; + opts.input_attribute = input_attribute; + opts.exact_attribute = exact_attribute; + opts.coexact_attribute = coexact_attribute; + opts.harmonic_attribute = harmonic_attribute; + opts.lambda = beta; + auto r = polyddg::hodge_decomposition_1_form(mesh, ops, opts); + return std::make_tuple(r.exact_id, r.coexact_id, r.harmonic_id); + }, + "mesh"_a, + "ops"_a, + nb::kw_only(), + "input_attribute"_a = default_hd_1form_opts.input_attribute, + "exact_attribute"_a = default_hd_1form_opts.exact_attribute, + "coexact_attribute"_a = default_hd_1form_opts.coexact_attribute, + "harmonic_attribute"_a = default_hd_1form_opts.harmonic_attribute, + "beta"_a = Scalar(1), + R"(Compute the Helmholtz-Hodge decomposition of a 1-form on a closed surface mesh. + +Takes a discrete 1-form (per-edge scalar) and decomposes it into three orthogonal +components, each stored as a per-edge scalar attribute: + +.. math:: + \omega = \omega_{\text{exact}} + \omega_{\text{coexact}} + \omega_{\text{harmonic}} + +:param mesh: Input surface mesh (modified in place with new attributes). The input attribute + (per-edge scalar) must already exist on the mesh. +:param ops: Precomputed :class:`DifferentialOperators` for the mesh. +:param input_attribute: Edge attribute name of the input 1-form + (default: ``"@hodge_1form_input"``). +:param exact_attribute: Output edge attribute name for the exact component + (default: ``"@hodge_1form_exact"``). +:param coexact_attribute: Output edge attribute name for the co-exact component + (default: ``"@hodge_1form_coexact"``). +:param harmonic_attribute: Output edge attribute name for the harmonic component + (default: ``"@hodge_1form_harmonic"``). +:param beta: Stabilization weight for the VEM 1-form inner product (default: 1). + +:return: A tuple ``(exact_id, coexact_id, harmonic_id)`` of edge attribute IDs.)"); + + m.def( + "hodge_decomposition_1_form", + [](SurfaceMesh& mesh, + std::string_view input_attribute, + std::string_view exact_attribute, + std::string_view coexact_attribute, + std::string_view harmonic_attribute, + Scalar beta) { + polyddg::HodgeDecompositionOptions opts; + opts.input_attribute = input_attribute; + opts.exact_attribute = exact_attribute; + opts.coexact_attribute = coexact_attribute; + opts.harmonic_attribute = harmonic_attribute; + opts.lambda = beta; + auto r = polyddg::hodge_decomposition_1_form(mesh, opts); + return std::make_tuple(r.exact_id, r.coexact_id, r.harmonic_id); + }, + "mesh"_a, + nb::kw_only(), + "input_attribute"_a = default_hd_1form_opts.input_attribute, + "exact_attribute"_a = default_hd_1form_opts.exact_attribute, + "coexact_attribute"_a = default_hd_1form_opts.coexact_attribute, + "harmonic_attribute"_a = default_hd_1form_opts.harmonic_attribute, + "beta"_a = Scalar(1), + R"(Compute the Helmholtz-Hodge decomposition of a 1-form on a closed surface mesh. + +Convenience overload that constructs a :class:`DifferentialOperators` instance internally. + +:param mesh: Input surface mesh (modified in place with new attributes). The input attribute + (per-edge scalar) must already exist on the mesh. +:param input_attribute: Edge attribute name of the input 1-form + (default: ``"@hodge_1form_input"``). +:param exact_attribute: Output edge attribute name for the exact component + (default: ``"@hodge_1form_exact"``). +:param coexact_attribute: Output edge attribute name for the co-exact component + (default: ``"@hodge_1form_coexact"``). +:param harmonic_attribute: Output edge attribute name for the harmonic component + (default: ``"@hodge_1form_harmonic"``). +:param beta: Stabilization weight for the VEM 1-form inner product (default: 1). + +:return: A tuple ``(exact_id, coexact_id, harmonic_id)`` of edge attribute IDs.)"); + + // ---- hodge_decomposition_vector_field ---- + constexpr polyddg::HodgeDecompositionOptions default_hd_vf_opts{}; + + m.def( + "hodge_decomposition_vector_field", + [](SurfaceMesh& mesh, + const polyddg::DifferentialOperators& ops, + std::string_view input_attribute, + std::string_view exact_attribute, + std::string_view coexact_attribute, + std::string_view harmonic_attribute, + Scalar beta, + uint8_t nrosy) { + polyddg::HodgeDecompositionOptions opts; + opts.input_attribute = input_attribute; + opts.exact_attribute = exact_attribute; + opts.coexact_attribute = coexact_attribute; + opts.harmonic_attribute = harmonic_attribute; + opts.lambda = beta; + opts.nrosy = nrosy; + auto r = polyddg::hodge_decomposition_vector_field(mesh, ops, opts); + return std::make_tuple(r.exact_id, r.coexact_id, r.harmonic_id); + }, + "mesh"_a, + "ops"_a, + nb::kw_only(), + "input_attribute"_a = default_hd_vf_opts.input_attribute, + "exact_attribute"_a = default_hd_vf_opts.exact_attribute, + "coexact_attribute"_a = default_hd_vf_opts.coexact_attribute, + "harmonic_attribute"_a = default_hd_vf_opts.harmonic_attribute, + "beta"_a = Scalar(1), + "nrosy"_a = uint8_t(1), + R"(Compute the Helmholtz-Hodge decomposition of a per-vertex vector field on a surface mesh. + +Takes a per-vertex vector field in global 3D coordinates and decomposes it into three +orthogonal components, each stored as a per-vertex 3D vector attribute: + +.. math:: + V = V_{\text{exact}} + V_{\text{coexact}} + V_{\text{harmonic}} + +Internally, the vector field is converted to a 1-form and decomposed via +:func:`hodge_decomposition_1_form`, then each component is converted back. + +When ``nrosy > 1``, the input is treated as one representative vector of an n-rosy field. + +:param mesh: Input surface mesh (modified in place with new attributes). The input attribute + (per-vertex 3D vector) must already exist on the mesh. +:param ops: Precomputed :class:`DifferentialOperators` for the mesh. +:param input_attribute: Vertex attribute name of the input vector field + (default: ``"@hodge_input"``). +:param exact_attribute: Output vertex attribute name for the exact component + (default: ``"@hodge_exact"``). +:param coexact_attribute: Output vertex attribute name for the co-exact component + (default: ``"@hodge_coexact"``). +:param harmonic_attribute: Output vertex attribute name for the harmonic component + (default: ``"@hodge_harmonic"``). +:param beta: Stabilization weight for the VEM 1-form inner product (default: 1). +:param nrosy: N-rosy symmetry order (default: 1 for plain vector fields). + +:return: A tuple ``(exact_id, coexact_id, harmonic_id)`` of vertex attribute IDs.)"); + + m.def( + "hodge_decomposition_vector_field", + [](SurfaceMesh& mesh, + std::string_view input_attribute, + std::string_view exact_attribute, + std::string_view coexact_attribute, + std::string_view harmonic_attribute, + Scalar beta, + uint8_t nrosy) { + polyddg::HodgeDecompositionOptions opts; + opts.input_attribute = input_attribute; + opts.exact_attribute = exact_attribute; + opts.coexact_attribute = coexact_attribute; + opts.harmonic_attribute = harmonic_attribute; + opts.lambda = beta; + opts.nrosy = nrosy; + auto r = polyddg::hodge_decomposition_vector_field(mesh, opts); + return std::make_tuple(r.exact_id, r.coexact_id, r.harmonic_id); + }, + "mesh"_a, + nb::kw_only(), + "input_attribute"_a = default_hd_vf_opts.input_attribute, + "exact_attribute"_a = default_hd_vf_opts.exact_attribute, + "coexact_attribute"_a = default_hd_vf_opts.coexact_attribute, + "harmonic_attribute"_a = default_hd_vf_opts.harmonic_attribute, + "beta"_a = Scalar(1), + "nrosy"_a = uint8_t(1), + R"(Compute the Helmholtz-Hodge decomposition of a per-vertex vector field on a surface mesh. + +Convenience overload that constructs a :class:`DifferentialOperators` instance internally. + +When ``nrosy > 1``, the input is treated as one representative vector of an n-rosy field. + +:param mesh: Input surface mesh (modified in place with new attributes). The input attribute + (per-vertex 3D vector) must already exist on the mesh. +:param input_attribute: Vertex attribute name of the input vector field + (default: ``"@hodge_input"``). +:param exact_attribute: Output vertex attribute name for the exact component + (default: ``"@hodge_exact"``). +:param coexact_attribute: Output vertex attribute name for the co-exact component + (default: ``"@hodge_coexact"``). +:param harmonic_attribute: Output vertex attribute name for the harmonic component + (default: ``"@hodge_harmonic"``). +:param beta: Stabilization weight for the VEM 1-form inner product (default: 1). +:param nrosy: N-rosy symmetry order (default: 1 for plain vector fields). + +:return: A tuple ``(exact_id, coexact_id, harmonic_id)`` of vertex attribute IDs.)"); } } // namespace lagrange::python diff --git a/modules/polyddg/python/tests/conftest.py b/modules/polyddg/python/tests/conftest.py new file mode 100644 index 00000000..daeea8ae --- /dev/null +++ b/modules/polyddg/python/tests/conftest.py @@ -0,0 +1,96 @@ +# +# Copyright 2026 Adobe. All rights reserved. +# This file is licensed to you under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. You may obtain a copy +# of the License at http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software distributed under +# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS +# OF ANY KIND, either express or implied. See the License for the specific language +# governing permissions and limitations under the License. +# + +import lagrange +import numpy as np +import pytest + + +@pytest.fixture +def triangle(): + """A triangle mesh with vertices at identity matrix positions. + + This fixture uses np.eye(3) for vertex positions, which creates a triangle + in 3D space with vertices at [1,0,0], [0,1,0], [0,0,1]. This is useful for + testing differential operators as it creates a non-degenerate triangle. + """ + mesh = lagrange.SurfaceMesh() + mesh.add_vertices(np.eye(3)) + mesh.add_triangle(0, 1, 2) + assert mesh.num_vertices == 3 + assert mesh.num_facets == 1 + return mesh + + +@pytest.fixture +def nonplanar_quad(): + """A non-planar quadrilateral mesh. + + Creates a quad with vertices that are not coplanar, useful for testing + differential operators on non-planar polygonal faces. + """ + mesh = lagrange.SurfaceMesh() + mesh.add_vertex([0, 0, 0]) + mesh.add_vertex([1, 0, 1]) + mesh.add_vertex([1, 1, 0]) + mesh.add_vertex([0, 1, 1]) + mesh.add_quad(0, 1, 2, 3) + return mesh + + +@pytest.fixture +def pyramid(): + """A pyramid mesh (4 triangles + 1 quad base). + + Creates a pyramid with a square base and four triangular faces meeting at an apex. + This is a mixed polygonal mesh useful for testing polyddg operators. + """ + mesh = lagrange.SurfaceMesh() + mesh.add_vertex([0, 0, 0]) + mesh.add_vertex([1, 0, 0]) + mesh.add_vertex([1, 1, 0]) + mesh.add_vertex([0, 1, 0]) + mesh.add_vertex([0.5, 0.5, 1]) + mesh.add_triangle(0, 1, 4) + mesh.add_triangle(1, 2, 4) + mesh.add_triangle(2, 3, 4) + mesh.add_triangle(3, 0, 4) + mesh.add_quad(0, 3, 2, 1) + return mesh + + +@pytest.fixture +def octahedron(): + """An octahedron mesh (genus-0 closed surface). + + Creates a regular octahedron with 6 vertices and 8 triangular faces. + This is a closed manifold mesh with genus 0, useful for testing Hodge decomposition + and other topological operations. The harmonic component of Hodge decomposition + should vanish on this mesh. + """ + mesh = lagrange.SurfaceMesh() + mesh.add_vertex([1, 0, 0]) + mesh.add_vertex([0, 1, 0]) + mesh.add_vertex([-1, 0, 0]) + mesh.add_vertex([0, -1, 0]) + mesh.add_vertex([0, 0, 1]) + mesh.add_vertex([0, 0, -1]) + mesh.add_triangle(0, 1, 4) + mesh.add_triangle(1, 2, 4) + mesh.add_triangle(2, 3, 4) + mesh.add_triangle(3, 0, 4) + mesh.add_triangle(0, 5, 3) + mesh.add_triangle(1, 5, 0) + mesh.add_triangle(2, 5, 1) + mesh.add_triangle(3, 5, 2) + mesh.initialize_edges() + return mesh diff --git a/modules/polyddg/python/tests/test_polyddg.py b/modules/polyddg/python/tests/test_polyddg.py index 79e68a7e..4d7d7cfd 100644 --- a/modules/polyddg/python/tests/test_polyddg.py +++ b/modules/polyddg/python/tests/test_polyddg.py @@ -10,47 +10,10 @@ # governing permissions and limitations under the License. # import lagrange -import pytest +import lagrange.polyddg import numpy as np -@pytest.fixture -def triangle(): - mesh = lagrange.SurfaceMesh() - mesh.add_vertices(np.eye(3)) - mesh.add_triangle(0, 1, 2) - assert mesh.num_vertices == 3 - assert mesh.num_facets == 1 - return mesh - - -@pytest.fixture -def nonplanar_quad(): - mesh = lagrange.SurfaceMesh() - mesh.add_vertex([0, 0, 0]) - mesh.add_vertex([1, 0, 1]) - mesh.add_vertex([1, 1, 0]) - mesh.add_vertex([0, 1, 1]) - mesh.add_quad(0, 1, 2, 3) - return mesh - - -@pytest.fixture -def pyramid(): - mesh = lagrange.SurfaceMesh() - mesh.add_vertex([0, 0, 0]) - mesh.add_vertex([1, 0, 0]) - mesh.add_vertex([1, 1, 0]) - mesh.add_vertex([0, 1, 0]) - mesh.add_vertex([0.5, 0.5, 1]) - mesh.add_triangle(0, 1, 4) - mesh.add_triangle(1, 2, 4) - mesh.add_triangle(2, 3, 4) - mesh.add_triangle(3, 0, 4) - mesh.add_quad(0, 3, 2, 1) - return mesh - - class TestPolyDDG: """Tests for the PolyDDG discrete differential operators based on the lemmas in the PolyDDG paper. @@ -150,3 +113,192 @@ def test_pyramid(self, pyramid): self.lemma_5(pyramid) self.lemma_6(pyramid) self.lemma_7(pyramid) + + +class TestHodgeDecomposition: + """Tests for Hodge decomposition functions.""" + + def test_hodge_decomposition_vector_field_basic(self, octahedron): + """Test basic vector field decomposition - reconstruction property.""" + ops = lagrange.polyddg.DifferentialOperators(octahedron) + + # Create random vector field + np.random.seed(42) + nv = octahedron.num_vertices + V_input = np.random.randn(nv, 3) + + # Store as attribute + input_attr = "@test_vf_input" + octahedron.create_attribute( + input_attr, + element=lagrange.AttributeElement.Vertex, + usage=lagrange.AttributeUsage.Vector, + initial_values=V_input, + ) + + # Decompose + exact_id, coexact_id, harmonic_id = lagrange.polyddg.hodge_decomposition_vector_field( + octahedron, + ops, + input_attribute=input_attr, + exact_attribute="@test_exact", + coexact_attribute="@test_coexact", + harmonic_attribute="@test_harmonic", + ) + + # Get results + V_exact = np.array(octahedron.attribute("@test_exact").data).reshape(-1, 3) + V_coexact = np.array(octahedron.attribute("@test_coexact").data).reshape(-1, 3) + V_harmonic = np.array(octahedron.attribute("@test_harmonic").data).reshape(-1, 3) + + # Verify reconstruction: V = V_exact + V_coexact + V_harmonic + residual = V_input - V_exact - V_coexact - V_harmonic + assert np.linalg.norm(residual) < 1e-12 + + def test_hodge_decomposition_vector_field_decompose_and_reconstruct(self, octahedron): + """Test that decomposition components can be accessed.""" + ops = lagrange.polyddg.DifferentialOperators(octahedron) + + np.random.seed(123) + nv = octahedron.num_vertices + V_input = np.random.randn(nv, 3) + + input_attr = "@test_vf_genus0" + octahedron.create_attribute( + input_attr, + element=lagrange.AttributeElement.Vertex, + usage=lagrange.AttributeUsage.Vector, + initial_values=V_input, + ) + + exact_id, coexact_id, harmonic_id = lagrange.polyddg.hodge_decomposition_vector_field( + octahedron, + ops, + input_attribute=input_attr, + exact_attribute="@test_exact_g0", + coexact_attribute="@test_coexact_g0", + harmonic_attribute="@test_harmonic_g0", + ) + + # Verify all three components are created + assert octahedron.has_attribute("@test_exact_g0") + assert octahedron.has_attribute("@test_coexact_g0") + assert octahedron.has_attribute("@test_harmonic_g0") + + # Verify they are vertex vector attributes + V_exact = np.array(octahedron.attribute("@test_exact_g0").data).reshape(-1, 3) + V_coexact = np.array(octahedron.attribute("@test_coexact_g0").data).reshape(-1, 3) + V_harmonic = np.array(octahedron.attribute("@test_harmonic_g0").data).reshape(-1, 3) + + assert V_exact.shape == (nv, 3) + assert V_coexact.shape == (nv, 3) + assert V_harmonic.shape == (nv, 3) + + # Verify reconstruction holds + residual = V_input - V_exact - V_coexact - V_harmonic + assert np.linalg.norm(residual) < 1e-12 + + def test_hodge_decomposition_vector_field_nrosy(self, octahedron): + """Test n-rosy decomposition (n=4) creates valid output.""" + ops = lagrange.polyddg.DifferentialOperators(octahedron) + + np.random.seed(456) + nv = octahedron.num_vertices + V_input = np.random.randn(nv, 3) + + input_attr = "@test_vf_4rosy" + octahedron.create_attribute( + input_attr, + element=lagrange.AttributeElement.Vertex, + usage=lagrange.AttributeUsage.Vector, + initial_values=V_input, + ) + + exact_id, coexact_id, harmonic_id = lagrange.polyddg.hodge_decomposition_vector_field( + octahedron, + ops, + input_attribute=input_attr, + exact_attribute="@test_exact_4", + coexact_attribute="@test_coexact_4", + harmonic_attribute="@test_harmonic_4", + nrosy=4, + ) + + # Verify all three components are created + assert octahedron.has_attribute("@test_exact_4") + assert octahedron.has_attribute("@test_coexact_4") + assert octahedron.has_attribute("@test_harmonic_4") + + # Verify they are vertex vector attributes + V_exact = np.array(octahedron.attribute("@test_exact_4").data).reshape(-1, 3) + V_coexact = np.array(octahedron.attribute("@test_coexact_4").data).reshape(-1, 3) + V_harmonic = np.array(octahedron.attribute("@test_harmonic_4").data).reshape(-1, 3) + + assert V_exact.shape == (nv, 3) + assert V_coexact.shape == (nv, 3) + assert V_harmonic.shape == (nv, 3) + + def test_hodge_decomposition_1_form_basic(self, octahedron): + """Test basic 1-form decomposition - reconstruction property.""" + ops = lagrange.polyddg.DifferentialOperators(octahedron) + + # Create random 1-form (per-edge scalar) + np.random.seed(789) + ne = octahedron.num_edges + omega_input = np.random.randn(ne) + + # Store as attribute + input_attr = "@test_1form_input" + octahedron.create_attribute( + input_attr, + element=lagrange.AttributeElement.Edge, + usage=lagrange.AttributeUsage.Scalar, + initial_values=omega_input, + ) + + # Decompose + exact_id, coexact_id, harmonic_id = lagrange.polyddg.hodge_decomposition_1_form( + octahedron, + ops, + input_attribute=input_attr, + exact_attribute="@test_1form_exact", + coexact_attribute="@test_1form_coexact", + harmonic_attribute="@test_1form_harmonic", + ) + + # Get results + omega_exact = np.array(octahedron.attribute("@test_1form_exact").data) + omega_coexact = np.array(octahedron.attribute("@test_1form_coexact").data) + omega_harmonic = np.array(octahedron.attribute("@test_1form_harmonic").data) + + # Verify reconstruction: omega = omega_exact + omega_coexact + omega_harmonic + residual = omega_input - omega_exact - omega_coexact - omega_harmonic + assert np.linalg.norm(residual) < 1e-12 + + def test_hodge_decomposition_1_form_harmonic_vanishes_genus_0(self, octahedron): + """Test that harmonic 1-form vanishes on genus-0 surfaces.""" + ops = lagrange.polyddg.DifferentialOperators(octahedron) + + np.random.seed(101) + ne = octahedron.num_edges + omega_input = np.random.randn(ne) + + input_attr = "@test_1form_genus0" + octahedron.create_attribute( + input_attr, + element=lagrange.AttributeElement.Edge, + usage=lagrange.AttributeUsage.Scalar, + initial_values=omega_input, + ) + + lagrange.polyddg.hodge_decomposition_1_form( + octahedron, + ops, + input_attribute=input_attr, + exact_attribute="@test_1form_exact_g0", + coexact_attribute="@test_1form_coexact_g0", + harmonic_attribute="@test_1form_harmonic_g0", + ) + + omega_harmonic = np.array(octahedron.attribute("@test_1form_harmonic_g0").data) + assert np.linalg.norm(omega_harmonic) < 1e-10 diff --git a/modules/polyddg/src/DifferentialOperators.cpp b/modules/polyddg/src/DifferentialOperators.cpp index b49a2578..0c39d46b 100644 --- a/modules/polyddg/src/DifferentialOperators.cpp +++ b/modules/polyddg/src/DifferentialOperators.cpp @@ -239,48 +239,11 @@ Eigen::SparseMatrix DifferentialOperators::star0() const return inner_product_0_form(); } -// star1 operator +// star1 operator — delegates to inner_product_1_form, consistent with star0 and star2. template -Eigen::SparseMatrix DifferentialOperators::star1() const +Eigen::SparseMatrix DifferentialOperators::star1(Scalar lambda) const { - const Index num_edges = m_mesh.get_num_edges(); - - auto vertices = vertex_view(m_mesh); - auto facet_centroids = attribute_matrix_view(m_mesh, m_centroid_id); - - std::vector> entries(num_edges); - for (Index eid = 0; eid < num_edges; eid++) { - auto [v0, v1] = m_mesh.get_edge_vertices(eid); - Scalar primal_edge_length = (vertices.row(v1) - vertices.row(v0)).norm(); - - Vector c0, c1; - - auto cid = m_mesh.get_first_corner_around_edge(eid); - la_debug_assert(cid != invalid(), "Invalid corner index for boundary edge."); - auto fid = m_mesh.get_corner_facet(cid); - c0 = facet_centroids.row(fid).template head<3>(); - auto edge_valence = m_mesh.count_num_corners_around_edge(eid); - la_debug_assert(edge_valence > 0, "Edge valence must be positive."); - - if (edge_valence == 1) { - c1 = (vertices.row(v0) + vertices.row(v1)) / 2; - } else if (edge_valence == 2) { - auto cid2 = m_mesh.get_next_corner_around_edge(cid); - auto fid2 = m_mesh.get_corner_facet(cid2); - c1 = facet_centroids.row(fid2).template head<3>(); - } else { - throw std::runtime_error("star1 is only implemented for manifold meshes."); - } - Scalar dual_edge_length = (c1 - c0).norm(); - entries[eid] = Eigen::Triplet( - static_cast(eid), - static_cast(eid), - dual_edge_length / primal_edge_length); - } - - Eigen::SparseMatrix M(num_edges, num_edges); - M.setFromTriplets(entries.begin(), entries.end()); - return M; + return inner_product_1_form(lambda); } // star2 operator @@ -542,6 +505,50 @@ Eigen::SparseMatrix DifferentialOperators::laplacian(Scal return D0.transpose() * M * D0; } +// co-differential δ₁ : Ω¹ → Ω⁰ (size #V × #E) +// δ₁ = d0ᵀ · M₁(λ), the adjoint of d0 w.r.t. the 1-form inner product. +// Identical to divergence(lambda). +template +Eigen::SparseMatrix DifferentialOperators::delta1(Scalar lambda) const +{ + return divergence(lambda); +} + +// co-differential δ₂ : Ω² → Ω¹ (size #E × #F) +// δ₂ = d1ᵀ · M₂, the adjoint of d1 w.r.t. the 2-form inner product. +// Consistent with delta1 = d0ᵀ · M₁ (both omit the left-hand mass-matrix inverse). +template +Eigen::SparseMatrix DifferentialOperators::delta2() const +{ + auto D1 = d1(); + auto M2 = inner_product_2_form(); + return D1.transpose() * M2; +} + +// 2-form Laplacian Δ₂ : Ω² → Ω² (size #F × #F) +// Δ₂ = d1 · δ₂ = d1 · d1ᵀ · M₂. +// Analogous to laplacian() = d0ᵀ · M₁ · d0, but for 2-forms. +template +Eigen::SparseMatrix DifferentialOperators::laplacian2() const +{ + auto D1 = d1(); + auto Cd = delta2(); + return D1 * Cd; +} + +// Hodge Laplacian on 1-forms Δ₁ : Ω¹ → Ω¹ (size #E × #E) +// Δ₁ = d0 · d0ᵀ · M₁(λ) + d1ᵀ · M₂ · d1 +// ╰── exact / downward ──╯ ╰── co-exact / upward ──╯ +template +Eigen::SparseMatrix DifferentialOperators::laplacian1(Scalar lambda) const +{ + auto D0 = d0(); + auto D1 = d1(); + auto M1 = inner_product_1_form(lambda); + auto M2 = inner_product_2_form(); + return D0 * (D0.transpose() * M1) + D1.transpose() * M2 * D1; +} + // Vertex tangent bases template Eigen::SparseMatrix DifferentialOperators::vertex_tangent_coordinates() const @@ -1029,17 +1036,22 @@ DifferentialOperators::levi_civita_nrosy(Index fid, Index lv, Ind Vector nv = vertex_normal.row(vid); auto Q = Eigen::Quaternion::FromTwoVectors(nv, nf).matrix(); - if (n != 1) { - la_debug_assert(n > 1, "n should be positive."); + // Compute the n=1 connection in the 2D tangent plane, then raise the 2D + // rotation to the n-th power. This correctly multiplies the tangent-plane + // rotation angle by n for the n-rosy connection. + Eigen::Matrix R2d = Tf.transpose() * Q * Tv; - Eigen::Matrix R = Q; + la_runtime_assert(n >= 1, "levi_civita_nrosy: n must be >= 1."); + + if (n != 1) { + Eigen::Matrix R2d_n = R2d; for (Index i = 1; i < n; i++) { - R = R * Q; + R2d_n = R2d_n * R2d; } - Q = R; + return R2d_n; } - return Tf.transpose() * Q * Tv; + return R2d; } // Per-facet Levi-Civita operator @@ -1562,6 +1574,9 @@ Eigen::Matrix DifferentialOperators::vertex_basis(I if (c == invalid()) { // All incident edges are degenerate, pick arbitrary u u = n.unitOrthogonal(); + } else { + // Project u onto the tangent plane so both basis vectors are orthogonal to n. + u = (u - u.dot(n) * n).stableNormalized(); } Vector v = n.cross(u); diff --git a/modules/polyddg/src/compute_principal_curvatures.cpp b/modules/polyddg/src/compute_principal_curvatures.cpp index e89acb43..9cb53793 100644 --- a/modules/polyddg/src/compute_principal_curvatures.cpp +++ b/modules/polyddg/src/compute_principal_curvatures.cpp @@ -11,6 +11,7 @@ */ #include +#include #include #include #include @@ -27,6 +28,7 @@ LA_IGNORE_MAYBE_UNINITIALIZED_START #include LA_IGNORE_MAYBE_UNINITIALIZED_END +#include #include namespace lagrange::polyddg { @@ -81,15 +83,29 @@ PrincipalCurvaturesResult compute_principal_curvatures( Eigen::Matrix S = ops.adjoint_shape_operator(vid); Eigen::SelfAdjointEigenSolver> solver(S); - kappa_min_data(vid, 0) = solver.eigenvalues()(0); - kappa_max_data(vid, 0) = solver.eigenvalues()(1); + if (solver.info() != Eigen::Success) { + // Eigen-decomposition failed (e.g. NaN/Inf in S from degenerate geometry). + // Write sentinel NaNs and skip normalization. + logger().warn( + "compute_principal_curvatures: eigen-decomposition failed for vertex {}", + vid); + kappa_min_data(vid, 0) = std::numeric_limits::quiet_NaN(); + kappa_max_data(vid, 0) = std::numeric_limits::quiet_NaN(); + direction_min_data.row(vid).setConstant(std::numeric_limits::quiet_NaN()); + direction_max_data.row(vid).setConstant(std::numeric_limits::quiet_NaN()); + } else { + kappa_min_data(vid, 0) = solver.eigenvalues()(0); + kappa_max_data(vid, 0) = solver.eigenvalues()(1); - // Map 2-D eigenvectors back to 3-D through the vertex tangent basis. - Eigen::Matrix B = ops.vertex_basis(vid); - LA_IGNORE_ARRAY_BOUNDS_BEGIN - direction_min_data.row(vid) = (B * solver.eigenvectors().col(0)).normalized().transpose(); - direction_max_data.row(vid) = (B * solver.eigenvectors().col(1)).normalized().transpose(); - LA_IGNORE_ARRAY_BOUNDS_END + // Map 2-D eigenvectors back to 3-D through the vertex tangent basis. + Eigen::Matrix B = ops.vertex_basis(vid); + LA_IGNORE_ARRAY_BOUNDS_BEGIN + direction_min_data.row(vid) = + (B * solver.eigenvectors().col(0)).normalized().transpose(); + direction_max_data.row(vid) = + (B * solver.eigenvectors().col(1)).normalized().transpose(); + LA_IGNORE_ARRAY_BOUNDS_END + } }); return {kappa_min_id, kappa_max_id, direction_min_id, direction_max_id}; diff --git a/modules/polyddg/src/compute_smooth_direction_field.cpp b/modules/polyddg/src/compute_smooth_direction_field.cpp new file mode 100644 index 00000000..adde54fe --- /dev/null +++ b/modules/polyddg/src/compute_smooth_direction_field.cpp @@ -0,0 +1,216 @@ +/* + * Copyright 2026 Adobe. All rights reserved. + * This file is licensed to you under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under + * the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS + * OF ANY KIND, either express or implied. See the License for the specific language + * governing permissions and limitations under the License. + */ +#include + +#include "nrosy_utils.h" + +#include +#include +#include +#include +#include + +#include +#include + +#include +#include + +namespace lagrange::polyddg { + +using solver::SolverLDLT; + +template +AttributeId compute_smooth_direction_field( + SurfaceMesh& mesh, + const DifferentialOperators& ops, + SmoothDirectionFieldOptions options) +{ + la_runtime_assert(options.nrosy >= 1, "compute_smooth_direction_field: nrosy must be >= 1."); + + const Index num_vertices = mesh.get_num_vertices(); + const Index n = static_cast(options.nrosy); + const int n_int = static_cast(options.nrosy); + const Scalar lambda = static_cast(options.lambda); + + // Build the connection Laplacian L of size (#V*2) x (#V*2). + auto L = ops.connection_laplacian_nrosy(n, lambda); + + // Build the mass matrix M of size (#V*2) x (#V*2) by expanding star0() (which is + // #V x #V diagonal) into 2x2 identity blocks scaled by the per-vertex area. + auto M0 = ops.star0(); + std::vector> M_triplets; + M_triplets.reserve(num_vertices * 2); + for (Index vid = 0; vid < num_vertices; ++vid) { + const Scalar m = M0.coeff(static_cast(vid), static_cast(vid)); + M_triplets.emplace_back( + static_cast(vid * 2), + static_cast(vid * 2), + m); + M_triplets.emplace_back( + static_cast(vid * 2 + 1), + static_cast(vid * 2 + 1), + m); + } + Eigen::SparseMatrix M( + static_cast(num_vertices * 2), + static_cast(num_vertices * 2)); + M.setFromTriplets(M_triplets.begin(), M_triplets.end()); + + // --- Compute the smallest eigenvector/eigenvalue of L u = σ M u --- + // + // This is needed for both the unconstrained case (the result IS the smoothest field) + // and the constrained case (σ_min sets the spectral shift for alignment). + Eigen::Matrix x(static_cast(num_vertices * 2)); + Scalar sigma_min = Scalar(0); + { + constexpr Scalar eps = Scalar(1e-8); + Eigen::SparseMatrix L_reg = L + eps * M; + bool solved = false; + + { + auto result = solver::generalized_selfadjoint_eigen_smallest(L_reg, M, 1); + if (result.is_successful() && result.num_converged >= 1) { + x = result.eigenvectors.col(0); + sigma_min = result.eigenvalues(0) - eps; + solved = true; + } else { + logger().warn( + "compute_smooth_direction_field: Spectra eigen solver did not converge, " + "falling back to inverse power iteration."); + } + } + + if (!solved) { + // Fallback: inverse power iteration. + SolverLDLT> solver(L_reg); + la_runtime_assert( + solver.info() == Eigen::Success, + "compute_smooth_direction_field: Cholesky factorization of L + eps*M failed"); + + x = Eigen::Matrix::Ones( + static_cast(num_vertices * 2)); + x.normalize(); + + constexpr int max_iter = 20; + for (int iter = 0; iter < max_iter; ++iter) { + x = M * x; + x = solver.solve(x); + x.normalize(); + } + // Estimate σ_min via Rayleigh quotient: σ = (x^T L x) / (x^T M x). + sigma_min = x.dot(L * x) / x.dot(M * x); + } + } + + const bool has_constraints = !options.alignment_attribute.empty(); + + if (has_constraints) { + // --- Constrained solve: (L - α*M + ε*M) u = M*q --- + // + // Following fieldgen (Knöppel et al. 2013), the spectral shift α = σ_min makes + // (L - α*M) singular along the smoothest mode, maximally biasing the solution + // toward the prescribed field q. The small ε*M regularization prevents exact + // singularity. alignment_weight scales the shift (1.0 = full fieldgen shift). + la_runtime_assert( + options.alignment_weight > 0 && options.alignment_weight <= 1.0, + "compute_smooth_direction_field: alignment_weight must be in (0, 1]."); + + const Scalar alpha = + static_cast(options.alignment_weight) * std::max(sigma_min, Scalar(0)); + constexpr Scalar eps = Scalar(1e-8); + + const auto alignment_id = internal::find_attribute( + mesh, + options.alignment_attribute, + AttributeElement::Vertex, + AttributeUsage::Vector, + 3); + la_runtime_assert( + alignment_id != invalid_attribute_id(), + "compute_smooth_direction_field: alignment attribute not found or does not match " + "expected properties (must be a Vector Vertex attribute with 3 channels)."); + + auto align_data = attribute_matrix_view(mesh, alignment_id); + + Eigen::Matrix q(static_cast(num_vertices * 2)); + q.setZero(); + + for (Index vid = 0; vid < num_vertices; ++vid) { + Eigen::Matrix v3 = align_data.row(vid).transpose(); + if (v3.squaredNorm() < Scalar(1e-20)) continue; // unconstrained vertex + + // Project into the local 2-D tangent frame (vertex_basis is orthonormal). + Eigen::Matrix B = ops.vertex_basis(vid); + Eigen::Matrix v2 = (B.transpose() * v3).stableNormalized(); + + // Apply n-fold symmetry encoding: [cos(n*theta), sin(n*theta)]. + q.template segment<2>(static_cast(vid * 2)) = nrosy_encode(v2, n_int); + } + + // M-normalize q so ||q||_M = 1 (following fieldgen). + Scalar norm_q = std::sqrt(q.dot(M * q)); + la_runtime_assert( + norm_q > Scalar(1e-10), + "compute_smooth_direction_field: all alignment vectors are zero or near-zero"); + q /= norm_q; + + // Assemble and factor the shifted system matrix: L - α*M + ε*M. + Eigen::SparseMatrix L_shifted = L - (alpha - eps) * M; + SolverLDLT> solver(L_shifted); + la_runtime_assert( + solver.info() == Eigen::Success, + "compute_smooth_direction_field: factorization of L - alpha*M failed."); + + Eigen::Matrix Mq = M * q; + x = solver.solve(Mq); + la_runtime_assert( + solver.info() == Eigen::Success, + "compute_smooth_direction_field: constrained solve failed"); + } + // else: x already holds the unconstrained smallest eigenvector from above. + + // Create or reuse the output attribute (3-D vector per vertex). + const auto direction_field_id = internal::find_or_create_attribute( + mesh, + options.direction_field_attribute, + AttributeElement::Vertex, + AttributeUsage::Vector, + 3, + internal::ResetToDefault::No); + + auto direction_data = attribute_matrix_ref(mesh, direction_field_id); + + // Decode the n-fold representation and map to 3-D world-space tangent vectors. + // The solution x lives in the n-rosy encoded space: each vertex's 2-D component is + // (cos(n*θ), sin(n*θ)) where θ is the actual direction angle. Decode by dividing + // the angle by n to recover one representative direction of the n-rosy field. + for (Index vid = 0; vid < num_vertices; ++vid) { + Eigen::Matrix u2 = x.template segment<2>(static_cast(vid * 2)); + if (n > 1) { + u2 = nrosy_decode(u2, n_int); + } + Eigen::Matrix B = ops.vertex_basis(vid); + direction_data.row(vid) = (B * u2).stableNormalized().transpose(); + } + + return direction_field_id; +} + +#define LA_X_compute_smooth_direction_field(_, Scalar, Index) \ + template LA_POLYDDG_API AttributeId compute_smooth_direction_field( \ + SurfaceMesh&, \ + const DifferentialOperators&, \ + SmoothDirectionFieldOptions); +LA_SURFACE_MESH_X(compute_smooth_direction_field, 0) + +} // namespace lagrange::polyddg diff --git a/modules/polyddg/src/hodge_decomposition.cpp b/modules/polyddg/src/hodge_decomposition.cpp new file mode 100644 index 00000000..87ce7a52 --- /dev/null +++ b/modules/polyddg/src/hodge_decomposition.cpp @@ -0,0 +1,523 @@ +/* + * Copyright 2026 Adobe. All rights reserved. + * This file is licensed to you under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under + * the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS + * OF ANY KIND, either express or implied. See the License for the specific language + * governing permissions and limitations under the License. + */ +#include + +#include "nrosy_utils.h" + +#include +#include +#include +#include +#include +#include + +#include + +#include + +#include +#include +#include + +namespace lagrange::polyddg { + +using solver::SolverLDLT; + +// ============================================================================= +// 1-form level +// ============================================================================= + +template +HodgeDecompositionResult hodge_decomposition_1_form( + SurfaceMesh& mesh, + const DifferentialOperators& ops, + HodgeDecompositionOptions options) +{ + using VectorX = Eigen::Matrix; + using SpMat = Eigen::SparseMatrix; + + const auto lambda = static_cast(options.lambda); + + // Read input 1-form (per-edge scalar). + const auto input_id = internal::find_attribute( + mesh, + options.input_attribute, + AttributeElement::Edge, + AttributeUsage::Scalar, + 1); + la_runtime_assert( + input_id != invalid_attribute_id(), + "hodge_decomposition_1_form: input attribute not found or does not match expected " + "properties (must be a Scalar Edge attribute with 1 channel)."); + + const auto input_view = attribute_matrix_view(mesh, input_id); + const VectorX omega = input_view.col(0); + + la_runtime_assert( + is_closed(mesh), + "hodge_decomposition_1_form: mesh must be closed (no boundary edges)."); + + la_runtime_assert( + compute_components(mesh) == 1, + "hodge_decomposition_1_form: mesh must have a single connected component."); + + // Discrete operators. + const SpMat D0 = ops.d0(); // #E × #V + const SpMat D1 = ops.d1(); // #F × #E + + const Eigen::Index nv = static_cast(mesh.get_num_vertices()); + const Eigen::Index ne = static_cast(mesh.get_num_edges()); + const Eigen::Index nf = static_cast(mesh.get_num_facets()); + + // ---- Step 1: Exact part ---- + // Solve L₀ α = δ₁ ω, L₀ = d0ᵀ M₁ d0 (#V × #V, SPSD) + // ops.laplacian(λ) and ops.delta1(λ) both use M₁ = star1(λ), so the systems are + // consistent with the 1-form inner product. + // + // Uniqueness is enforced by requiring the average of α to be zero. This is + // encoded as a Lagrange multiplier augmenting L₀ to a (nv+1)×(nv+1) system: + // + // [ L₀ 1 ] [ α ] [ δ₁ω ] + // [ 1ᵀ 0 ] [ λ ] = [ 0 ] + // + // The matrix is symmetric indefinite (one negative pivot from the constraint) + // and is solved with SimplicialLDLT, which factors as LDLᵀ and handles + // negative diagonal entries in D. + SpMat L0 = ops.laplacian(lambda); + const VectorX rhs0 = ops.delta1(lambda) * omega; + + SpMat L0_aug(nv + 1, nv + 1); + { + std::vector> triplets; + triplets.reserve(L0.nonZeros() + 2 * nv); + // L0 block (#V × #V) + for (int k = 0; k < L0.outerSize(); ++k) + for (typename SpMat::InnerIterator it(L0, k); it; ++it) + triplets.emplace_back(it.row(), it.col(), it.value()); + // Zero-mean constraint: sum(alpha) = 0 + // Add row and column of 1s + for (Eigen::Index i = 0; i < nv; ++i) { + triplets.emplace_back(nv, i, Scalar(1)); + triplets.emplace_back(i, nv, Scalar(1)); + } + L0_aug.setFromTriplets(triplets.begin(), triplets.end()); + } + VectorX rhs0_aug(nv + 1); + rhs0_aug.head(nv) = rhs0; + rhs0_aug(nv) = Scalar(0); + + SolverLDLT solver0(L0_aug); + la_runtime_assert( + solver0.info() == Eigen::Success, + "hodge_decomposition_1_form: scalar Laplacian factorization failed."); + const VectorX alpha = solver0.solve(rhs0_aug).head(nv); + la_runtime_assert( + solver0.info() == Eigen::Success, + "hodge_decomposition_1_form: scalar Laplacian solve failed."); + + const VectorX omega_exact = D0 * alpha; + + // ---- Step 2: Co-exact part ---- + // M₁ = star1(λ) is SPD but non-diagonal for polygonal meshes, so we cannot + // invert it element-wise. Instead solve the symmetric saddle-point system: + // + // [ M₁ D1ᵀ ] [ ω_coexact ] [ 0 ] + // [ D1 0 ] [ ψ ] = [ D1 ω ] + // + // Row 1: M₁ ω_coexact + D1ᵀ ψ = 0 ⟹ ω_coexact = −M₁⁻¹ D1ᵀ ψ + // Row 2: D1 ω_coexact = D1 ω + // + // The matrix is symmetric indefinite (SPD upper-left block, zero lower-right block) + // and is solved with SparseLU (LU with partial pivoting), which handles non-SPD + // sparse systems correctly. Gauge: pin ψ[0] = 0 to remove the constant null space + // of D1ᵀ on closed meshes. This is done by skipping the first column of D1ᵀ and + // the first row of D1 during assembly, then inserting a 1 on the diagonal at (ne, ne). + const SpMat M1 = ops.star1(lambda); + const SpMat D1T = D1.transpose(); + const Eigen::Index total = ne + nf; + + SpMat A(total, total); + { + std::vector> triplets; + triplets.reserve(M1.nonZeros() + 2 * D1.nonZeros() + 1); + // M1 block (#E × #E) + for (int k = 0; k < M1.outerSize(); ++k) + for (typename SpMat::InnerIterator it(M1, k); it; ++it) + triplets.emplace_back(it.row(), it.col(), it.value()); + // D1^T block (#E × #F), skip column 0 (gauge fix: lambda[0] = 0) + for (int k = 0; k < D1T.outerSize(); ++k) + for (typename SpMat::InnerIterator it(D1T, k); it; ++it) + if (it.col() != 0) triplets.emplace_back(it.row(), ne + it.col(), it.value()); + // D1 block (#F × #E), skip row 0 (corresponding to lambda[0]) + for (int k = 0; k < D1.outerSize(); ++k) + for (typename SpMat::InnerIterator it(D1, k); it; ++it) + if (it.row() != 0) triplets.emplace_back(ne + it.row(), it.col(), it.value()); + // Dummy equation at (ne, ne) to fix lambda[0] = 0 + triplets.emplace_back(ne, ne, Scalar(1)); + A.setFromTriplets(triplets.begin(), triplets.end()); + } + + VectorX rhs_aug = VectorX::Zero(total); + rhs_aug.tail(nf) = D1 * omega; + rhs_aug(ne) = Scalar(0); // lambda[0] = 0 + + Eigen::SparseLU solver1; + solver1.compute(A); + la_runtime_assert( + solver1.info() == Eigen::Success, + "hodge_decomposition_1_form: saddle-point factorization failed."); + const VectorX sol = solver1.solve(rhs_aug); + la_runtime_assert( + solver1.info() == Eigen::Success, + "hodge_decomposition_1_form: saddle-point solve failed."); + + const VectorX omega_coexact = sol.head(ne); + + // ---- Step 3: Harmonic part ---- + // The harmonic component is the residual after removing exact and co-exact parts. + // For genus-0 closed surfaces, this should be nearly zero (within numerical precision). + // For higher genus surfaces, this captures the non-trivial harmonic 1-forms. + const VectorX omega_harmonic = omega - omega_exact - omega_coexact; + + // ---- Write output per-edge scalar attributes ---- + auto write_edge_attr = [&](std::string_view name, const VectorX& values) -> AttributeId { + const auto id = internal::find_or_create_attribute( + mesh, + name, + AttributeElement::Edge, + AttributeUsage::Scalar, + 1, + internal::ResetToDefault::No); + auto ref = attribute_matrix_ref(mesh, id); + ref.col(0) = values; + return id; + }; + + HodgeDecompositionResult result; + result.exact_id = write_edge_attr(options.exact_attribute, omega_exact); + result.coexact_id = write_edge_attr(options.coexact_attribute, omega_coexact); + result.harmonic_id = write_edge_attr(options.harmonic_attribute, omega_harmonic); + return result; +} + +template +HodgeDecompositionResult hodge_decomposition_1_form( + SurfaceMesh& mesh, + HodgeDecompositionOptions options) +{ + DifferentialOperators ops(mesh); + return hodge_decomposition_1_form(mesh, ops, std::move(options)); +} + +// ============================================================================= +// Per-vertex vector field level +// ============================================================================= + +template +HodgeDecompositionResult hodge_decomposition_vector_field( + SurfaceMesh& mesh, + const DifferentialOperators& ops, + HodgeDecompositionOptions options) +{ + using VectorX = Eigen::Matrix; + using Vector2 = Eigen::Matrix; + using Vector3 = Eigen::Matrix; + using MatrixX3 = Eigen::Matrix; + using SpMat = Eigen::SparseMatrix; + + const int nrosy = static_cast(options.nrosy); + + la_runtime_assert(nrosy >= 1, "hodge_decomposition_vector_field: nrosy must be >= 1."); + + // Read input per-vertex vector field (3D vector per vertex in global coordinates). + const auto input_id = internal::find_attribute( + mesh, + options.input_attribute, + AttributeElement::Vertex, + AttributeUsage::Vector, + 3); + la_runtime_assert( + input_id != invalid_attribute_id(), + "hodge_decomposition_vector_field: input attribute not found or does not match expected " + "properties (must be a Vector Vertex attribute with 3 channels)."); + + const auto input_view = attribute_matrix_view(mesh, input_id); + + // Discrete operators. + const SpMat D0 = ops.d0(); // #E × #V + + const Eigen::Index nv = static_cast(mesh.get_num_vertices()); + const Eigen::Index ne = static_cast(mesh.get_num_edges()); + const Eigen::Index nf = static_cast(mesh.get_num_facets()); + + // Precompute face areas from vector area attribute. + const auto va_id = ops.get_vector_area_attribute_id(); + const auto va_view = attribute_matrix_view(mesh, va_id); + + // Precompute the n-rosy Levi-Civita transport matrix (reused for both forward and + // inverse transport in the n-rosy path). + SpMat LC_n; + if (nrosy > 1) { + LC_n = ops.levi_civita_nrosy(static_cast(nrosy)); + } + + // ---- Convert per-vertex vector field to 1-form ---- + VectorX omega(ne); + + if (nrosy == 1) { + // Simple midpoint rule: ω_e = (V_i + V_j)/2 · (x_j - x_i). + // No tangent-plane projection needed: the normal component vanishes in the + // dot product with the edge vector (which lies on the surface). + MatrixX3 positions(nv, 3); + for (Index v = 0; v < static_cast(nv); ++v) { + auto p = mesh.get_position(v); + for (int d = 0; d < 3; ++d) positions(v, d) = p[d]; + } + MatrixX3 edge_vecs = D0 * positions; // #E × 3 + + for (Index e = 0; e < static_cast(ne); ++e) { + auto ev = mesh.get_edge_vertices(e); + Eigen::Matrix avg_vec = + (input_view.row(ev[0]) + input_view.row(ev[1])) / Scalar(2); + omega(e) = avg_vec.dot(edge_vecs.row(e)); + } + } else { + // N-rosy path: encode in vertex tangent planes, parallel transport to face + // frames via the n-rosy Levi-Civita connection, then apply the flat operator. + + // 1. Encode per-vertex vectors to 2D tangent vectors. + VectorX u_encoded(2 * nv); + for (Index v = 0; v < static_cast(nv); ++v) { + auto B = ops.vertex_basis(v); // 3 × 2 + Vector2 u2 = B.transpose() * Vector3(input_view.row(v).transpose()); + u2 = nrosy_encode(u2, nrosy); + u_encoded.segment(2 * v, 2) = u2; + } + + // 2. Transport vertex tangent vectors to corners using n-rosy connection. + // LC_n is (#C * 2) × (#V * 2). + VectorX u_corners = LC_n * u_encoded; + + // 3. Average corner vectors per face → per-face 2D → per-face 3D. + VectorX face_vecs_3d(3 * nf); + Index corner_offset = 0; + for (Index f = 0; f < static_cast(nf); ++f) { + Index fs = mesh.get_facet_size(f); + Vector2 avg_u = Vector2::Zero(); + for (Index lv = 0; lv < fs; ++lv) { + avg_u += u_corners.segment(2 * (corner_offset + lv), 2); + } + avg_u /= static_cast(fs); + + auto Bf = ops.facet_basis(f); // 3 × 2 + Vector3 v3 = Bf * avg_u; + face_vecs_3d.segment(3 * f, 3) = v3; + + corner_offset += fs; + } + + // 4. Apply global flat operator: per-face 3D vectors → 1-form. + SpMat Flat = ops.flat(); // #E × (#F * 3) + omega = Flat * face_vecs_3d; + } + + // ---- Write 1-form to edge attribute and decompose ---- + { + const auto omega_id = internal::find_or_create_attribute( + mesh, + "@_hodge_vf_omega", + AttributeElement::Edge, + AttributeUsage::Scalar, + 1, + internal::ResetToDefault::No); + attribute_matrix_ref(mesh, omega_id).col(0) = omega; + } + + HodgeDecompositionOptions opts_1form; + opts_1form.lambda = options.lambda; + opts_1form.input_attribute = "@_hodge_vf_omega"; + opts_1form.exact_attribute = "@_hodge_vf_1form_exact"; + opts_1form.coexact_attribute = "@_hodge_vf_1form_coexact"; + opts_1form.harmonic_attribute = "@_hodge_vf_1form_harmonic"; + + auto result_1form = hodge_decomposition_1_form(mesh, ops, opts_1form); + + // ---- Read 1-form results ---- + const VectorX omega_exact = attribute_matrix_view(mesh, result_1form.exact_id).col(0); + const VectorX omega_coexact = + attribute_matrix_view(mesh, result_1form.coexact_id).col(0); + const VectorX omega_harmonic = + attribute_matrix_view(mesh, result_1form.harmonic_id).col(0); + + // ---- Convert 1-form components back to per-vertex vector fields ---- + const SpMat Sharp = ops.sharp(); // #F*3 × #E + + MatrixX3 vertex_exact, vertex_coexact, vertex_harmonic; + + // Precompute face areas (reused across all components). + VectorX face_areas(nf); + for (Index f = 0; f < static_cast(nf); ++f) { + face_areas(f) = va_view.row(f).norm(); + } + + if (nrosy == 1) { + // Simple sharp + area-weighted averaging to vertices. + auto oneform_to_vertex = [&](const VectorX& omega_1form) -> MatrixX3 { + VectorX fv = Sharp * omega_1form; + + MatrixX3 result = MatrixX3::Zero(nv, 3); + VectorX weights = VectorX::Zero(nv); + + for (Index f = 0; f < static_cast(nf); ++f) { + Eigen::Matrix face_vec(fv(3 * f), fv(3 * f + 1), fv(3 * f + 2)); + + Index facet_size = mesh.get_facet_size(f); + for (Index lv = 0; lv < facet_size; ++lv) { + Index v = mesh.get_facet_vertex(f, lv); + result.row(v) += face_areas(f) * face_vec; + weights(v) += face_areas(f); + } + } + + for (Index v = 0; v < static_cast(nv); ++v) { + if (weights(v) > Scalar(0)) result.row(v) /= weights(v); + } + + return result; + }; + + vertex_exact = oneform_to_vertex(omega_exact); + vertex_coexact = oneform_to_vertex(omega_coexact); + // Compute harmonic as residual so that V_exact + V_coexact + V_harmonic = V_input + // exactly. Converting ω_harmonic independently would only recover the roundtripped + // input (sharp ∘ flat ≠ identity on vertex fields). + vertex_harmonic = input_view - vertex_exact - vertex_coexact; + } else { + // N-rosy path: sharp → per-face 2D → inverse transport → decode. + // + // For each 1-form component: + // 1. sharp → per-face 3D vectors + // 2. Project to face 2D tangent, replicate to corners (area-weighted) + // 3. Inverse transport via LC_n^T (adjoint of n-rosy Levi-Civita) + // 4. Normalize by area weights at vertices + // 5. Decode n-rosy and convert to 3D + SpMat LC_nT = LC_n.transpose(); // (#V * 2) × (#C * 2) + + // Precompute per-vertex area sums (reused across all 3 components). + Index total_corners = 0; + VectorX vertex_area = VectorX::Zero(nv); + for (Index f = 0; f < static_cast(nf); ++f) { + Index fs = mesh.get_facet_size(f); + total_corners += fs; + for (Index lv = 0; lv < fs; ++lv) { + Index v = mesh.get_facet_vertex(f, lv); + vertex_area(v) += face_areas(f); + } + } + + auto oneform_to_vertex_nrosy = [&](const VectorX& omega_1form) -> MatrixX3 { + // 1. Per-face 3D vectors via sharp. + VectorX fv = Sharp * omega_1form; + + // 2. Project to face 2D, replicate to corners with area weighting. + VectorX u_corners(2 * total_corners); + Index corner_offset = 0; + for (Index f = 0; f < static_cast(nf); ++f) { + Vector3 fv3(fv(3 * f), fv(3 * f + 1), fv(3 * f + 2)); + auto Bf = ops.facet_basis(f); // 3 × 2 + Vector2 u_f = Bf.transpose() * fv3; + + Index fs = mesh.get_facet_size(f); + for (Index lv = 0; lv < fs; ++lv) { + u_corners.segment(2 * (corner_offset + lv), 2) = face_areas(f) * u_f; + } + corner_offset += fs; + } + + // 3. Inverse transport: LC_n^T sums R_{f,v}^{n,T} * (area_f * u_f) + // at each vertex. + VectorX u_vertices = LC_nT * u_corners; + + // 4. Normalize by total incident face area, decode, convert to 3D. + MatrixX3 result(nv, 3); + for (Index v = 0; v < static_cast(nv); ++v) { + Vector2 u2 = u_vertices.segment(2 * v, 2); + if (vertex_area(v) > Scalar(0)) u2 /= vertex_area(v); + u2 = nrosy_decode(u2, nrosy); + auto B = ops.vertex_basis(v); + result.row(v) = (B * u2).transpose(); + } + + return result; + }; + + vertex_exact = oneform_to_vertex_nrosy(omega_exact); + vertex_coexact = oneform_to_vertex_nrosy(omega_coexact); + vertex_harmonic = oneform_to_vertex_nrosy(omega_harmonic); + } + + // ---- Write output per-vertex vector attributes ---- + auto write_attr = [&](std::string_view name, const MatrixX3& values) -> AttributeId { + const auto id = internal::find_or_create_attribute( + mesh, + name, + AttributeElement::Vertex, + AttributeUsage::Vector, + 3, + internal::ResetToDefault::No); + attribute_matrix_ref(mesh, id) = values; + return id; + }; + + HodgeDecompositionResult result; + result.exact_id = write_attr(options.exact_attribute, vertex_exact); + result.coexact_id = write_attr(options.coexact_attribute, vertex_coexact); + result.harmonic_id = write_attr(options.harmonic_attribute, vertex_harmonic); + return result; +} + +template +HodgeDecompositionResult hodge_decomposition_vector_field( + SurfaceMesh& mesh, + HodgeDecompositionOptions options) +{ + DifferentialOperators ops(mesh); + return hodge_decomposition_vector_field(mesh, ops, std::move(options)); +} + +// ============================================================================= +// Explicit template instantiations +// ============================================================================= + +#define LA_X_hodge_decomposition_1_form(_, Scalar, Index) \ + template LA_POLYDDG_API HodgeDecompositionResult hodge_decomposition_1_form( \ + SurfaceMesh&, \ + const DifferentialOperators&, \ + HodgeDecompositionOptions); \ + template LA_POLYDDG_API HodgeDecompositionResult hodge_decomposition_1_form( \ + SurfaceMesh&, \ + HodgeDecompositionOptions); +LA_SURFACE_MESH_X(hodge_decomposition_1_form, 0) + +#define LA_X_hodge_decomposition_vector_field(_, Scalar, Index) \ + template LA_POLYDDG_API HodgeDecompositionResult \ + hodge_decomposition_vector_field( \ + SurfaceMesh&, \ + const DifferentialOperators&, \ + HodgeDecompositionOptions); \ + template LA_POLYDDG_API HodgeDecompositionResult \ + hodge_decomposition_vector_field( \ + SurfaceMesh&, \ + HodgeDecompositionOptions); +LA_SURFACE_MESH_X(hodge_decomposition_vector_field, 0) + +} // namespace lagrange::polyddg diff --git a/modules/polyddg/src/nrosy_utils.h b/modules/polyddg/src/nrosy_utils.h new file mode 100644 index 00000000..f60480b2 --- /dev/null +++ b/modules/polyddg/src/nrosy_utils.h @@ -0,0 +1,57 @@ +/* + * Copyright 2026 Adobe. All rights reserved. + * This file is licensed to you under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under + * the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS + * OF ANY KIND, either express or implied. See the License for the specific language + * governing permissions and limitations under the License. + */ +#pragma once + +#include + +#include +#include + +namespace lagrange::polyddg { + +/// +/// N-rosy encode: multiply the tangent-plane angle by n while preserving magnitude. +/// Given a 2D tangent vector (r cos θ, r sin θ), produce (r cos nθ, r sin nθ). +/// +template +Eigen::Matrix nrosy_encode(const Eigen::Matrix& v, int n) +{ + Scalar r = v.norm(); + if (r < std::numeric_limits::epsilon()) return v; + + Scalar c = v(0) / r, s = v(1) / r; + Scalar re = Scalar(1), im = Scalar(0); + for (int k = 0; k < n; ++k) { + Scalar new_re = re * c - im * s; + Scalar new_im = re * s + im * c; + re = new_re; + im = new_im; + } + return {r * re, r * im}; +} + +/// +/// N-rosy decode: divide the tangent-plane angle by n while preserving magnitude. +/// Given a 2D tangent vector (r cos φ, r sin φ), produce (r cos(φ/n), r sin(φ/n)). +/// +template +Eigen::Matrix nrosy_decode(const Eigen::Matrix& v, int n) +{ + Scalar r = v.norm(); + if (r < std::numeric_limits::epsilon()) return v; + + Scalar phi = std::atan2(v(1), v(0)); + Scalar theta = phi / static_cast(n); + return {r * std::cos(theta), r * std::sin(theta)}; +} + +} // namespace lagrange::polyddg diff --git a/modules/polyddg/tests/CMakeLists.txt b/modules/polyddg/tests/CMakeLists.txt index 5a095531..d3eeab0f 100644 --- a/modules/polyddg/tests/CMakeLists.txt +++ b/modules/polyddg/tests/CMakeLists.txt @@ -10,3 +10,6 @@ # governing permissions and limitations under the License. # lagrange_add_test() + +lagrange_include_modules(primitive) +target_link_libraries(test_lagrange_polyddg PRIVATE lagrange::primitive) diff --git a/modules/polyddg/tests/test_compute_principal_curvatures.cpp b/modules/polyddg/tests/test_compute_principal_curvatures.cpp new file mode 100644 index 00000000..feb115dd --- /dev/null +++ b/modules/polyddg/tests/test_compute_principal_curvatures.cpp @@ -0,0 +1,160 @@ +/* + * Copyright 2026 Adobe. All rights reserved. + * This file is licensed to you under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under + * the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS + * OF ANY KIND, either express or implied. See the License for the specific language + * governing permissions and limitations under the License. + */ + +#include +#include +#include +#include +#include +#include + +#include + +#include + +TEST_CASE("compute_principal_curvatures - unit sphere", "[polyddg]") +{ + using namespace lagrange; + using Scalar = double; + using Index = uint32_t; + + // Create a subdivided icosphere for uniform triangulation without pole singularities. + // Level 4 subdivision gives 10242 vertices with excellent uniformity and smoothness. + // For a unit sphere, both principal curvatures should be 1, and principal directions + // should be tangent and orthogonal. + primitive::IcosahedronOptions ico_opts; + ico_opts.radius = 1.0; + auto base_icosahedron = primitive::generate_icosahedron(ico_opts); + + primitive::SubdividedSphereOptions subdiv_opts; + subdiv_opts.radius = 1.0; + subdiv_opts.subdiv_level = 4; + auto sphere = + primitive::generate_subdivided_sphere(base_icosahedron, subdiv_opts); + + polyddg::DifferentialOperators ops(sphere); + + polyddg::PrincipalCurvaturesOptions curvature_opts; + auto result = polyddg::compute_principal_curvatures(sphere, ops, curvature_opts); + + // Read the computed curvature attributes. + const auto kappa_min_view = attribute_matrix_view(sphere, result.kappa_min_id); + const auto kappa_max_view = attribute_matrix_view(sphere, result.kappa_max_id); + const auto dir_min_view = attribute_matrix_view(sphere, result.direction_min_id); + const auto dir_max_view = attribute_matrix_view(sphere, result.direction_max_id); + + const Index num_vertices = sphere.get_num_vertices(); + + SECTION("principal curvatures near 1 on unit sphere") + { + // For a unit sphere with radius 1, both principal curvatures should be 1. + // Subdivided icosphere has mostly uniform triangulation, though original icosahedron + // vertices may have slightly larger errors. + Scalar min_kmin = 1e10, max_kmin = -1e10, sum_kmin = 0; + Scalar min_kmax = 1e10, max_kmax = -1e10, sum_kmax = 0; + Index good_vertices = 0; + + for (Index vid = 0; vid < num_vertices; ++vid) { + Scalar kappa_min = kappa_min_view(vid, 0); + Scalar kappa_max = kappa_max_view(vid, 0); + min_kmin = std::min(min_kmin, kappa_min); + max_kmin = std::max(max_kmin, kappa_min); + sum_kmin += kappa_min; + min_kmax = std::min(min_kmax, kappa_max); + max_kmax = std::max(max_kmax, kappa_max); + sum_kmax += kappa_max; + + // Count vertices with tight accuracy. + if (std::abs(kappa_min - 1.0) < 0.05 && std::abs(kappa_max - 1.0) < 0.05) { + good_vertices++; + } + } + + INFO( + "κ_min range: [" << min_kmin << ", " << max_kmin + << "], avg: " << sum_kmin / num_vertices); + INFO( + "κ_max range: [" << min_kmax << ", " << max_kmax + << "], avg: " << sum_kmax / num_vertices); + INFO( + "Vertices within ±0.05: " << good_vertices << " / " << num_vertices << " (" + << (100.0 * good_vertices / num_vertices) << "%)"); + + // Check that average curvature is very accurate. + REQUIRE_THAT(sum_kmin / num_vertices, Catch::Matchers::WithinAbs(1.0, 0.01)); + REQUIRE_THAT(sum_kmax / num_vertices, Catch::Matchers::WithinAbs(1.0, 0.01)); + + // Check that vast majority (>99%) of vertices have tight accuracy. + REQUIRE(good_vertices > static_cast(0.99 * num_vertices)); + } + + SECTION("principal directions are unit-length") + { + for (Index vid = 0; vid < num_vertices; ++vid) { + Eigen::Matrix dir_min = dir_min_view.row(vid); + Eigen::Matrix dir_max = dir_max_view.row(vid); + REQUIRE_THAT(dir_min.norm(), Catch::Matchers::WithinAbs(1.0, 1e-6)); + REQUIRE_THAT(dir_max.norm(), Catch::Matchers::WithinAbs(1.0, 1e-6)); + } + } + + SECTION("principal directions are tangent to surface") + { + // Principal directions should be perpendicular to the vertex normal. + const auto normal_id = ops.get_vertex_normal_attribute_id(); + const auto normal_view = attribute_matrix_view(sphere, normal_id); + + Scalar max_dot_min = 0, max_dot_max = 0; + Index worst_vid_min = 0, worst_vid_max = 0; + + for (Index vid = 0; vid < num_vertices; ++vid) { + Eigen::Matrix dir_min = dir_min_view.row(vid); + Eigen::Matrix dir_max = dir_max_view.row(vid); + Eigen::Matrix normal = normal_view.row(vid); + + Scalar dot_min = std::abs(dir_min.dot(normal)); + Scalar dot_max = std::abs(dir_max.dot(normal)); + if (dot_min > max_dot_min) { + max_dot_min = dot_min; + worst_vid_min = vid; + } + if (dot_max > max_dot_max) { + max_dot_max = dot_max; + worst_vid_max = vid; + } + } + + INFO("Max |dot(dir_min, normal)|: " << max_dot_min << " at vertex " << worst_vid_min); + INFO("Max |dot(dir_max, normal)|: " << max_dot_max << " at vertex " << worst_vid_max); + + // Principal directions should be tangent (perpendicular to normal, so dot product ≈ 0). + // Most vertices should have good tangency; allow for a few outliers. + REQUIRE(max_dot_min < 0.1); + REQUIRE(max_dot_max < 0.1); + } + + SECTION("principal directions are orthogonal") + { + // The two principal directions should be perpendicular to each other (dot product ≈ 0). + Scalar max_dot = 0; + for (Index vid = 0; vid < num_vertices; ++vid) { + Eigen::Matrix dir_min = dir_min_view.row(vid); + Eigen::Matrix dir_max = dir_max_view.row(vid); + Scalar dot = std::abs(dir_min.dot(dir_max)); + max_dot = std::max(max_dot, dot); + } + + INFO("Max |dot(dir_min, dir_max)|: " << max_dot); + // Most vertices should have good orthogonality; allow for a few outliers. + REQUIRE(max_dot < 0.05); + } +} diff --git a/modules/polyddg/tests/test_compute_smooth_direction_field.cpp b/modules/polyddg/tests/test_compute_smooth_direction_field.cpp new file mode 100644 index 00000000..89f27ca3 --- /dev/null +++ b/modules/polyddg/tests/test_compute_smooth_direction_field.cpp @@ -0,0 +1,340 @@ +/* + * Copyright 2026 Adobe. All rights reserved. + * This file is licensed to you under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under + * the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS + * OF ANY KIND, either express or implied. See the License for the specific language + * governing permissions and limitations under the License. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +#include +#include + +TEST_CASE("compute_smooth_direction_field", "[polyddg]") +{ + using namespace lagrange; + using Scalar = double; + using Index = uint32_t; + + SECTION("output dimensions and unit length — triangle mesh") + { + SurfaceMesh triangle_mesh; + triangle_mesh.add_vertex({1.0, 0.0, 0.0}); + triangle_mesh.add_vertex({0.0, 1.0, 0.0}); + triangle_mesh.add_vertex({0.0, 0.0, 1.0}); + triangle_mesh.add_triangle(0, 1, 2); + + polyddg::DifferentialOperators ops(triangle_mesh); + polyddg::SmoothDirectionFieldOptions opts; + opts.nrosy = 1; + auto result = polyddg::compute_smooth_direction_field(triangle_mesh, ops, opts); + + REQUIRE(result != invalid_attribute_id()); + auto data = attribute_matrix_view(triangle_mesh, result); + REQUIRE(data.rows() == static_cast(triangle_mesh.get_num_vertices())); + REQUIRE(data.cols() == 3); + for (Index vid = 0; vid < triangle_mesh.get_num_vertices(); ++vid) { + REQUIRE_THAT(data.row(vid).norm(), Catch::Matchers::WithinAbs(1.0, 1e-10)); + } + } + + SECTION("output dimensions and unit length — pyramid mesh n=4") + { + SurfaceMesh pyramid_mesh; + pyramid_mesh.add_vertex({0.0, 0.0, 0.0}); + pyramid_mesh.add_vertex({1.0, 0.0, 0.0}); + pyramid_mesh.add_vertex({1.0, 1.0, 0.0}); + pyramid_mesh.add_vertex({0.0, 1.0, 0.0}); + pyramid_mesh.add_vertex({0.5, 0.5, 1.0}); + pyramid_mesh.add_triangle(0, 1, 4); + pyramid_mesh.add_triangle(1, 2, 4); + pyramid_mesh.add_triangle(2, 3, 4); + pyramid_mesh.add_triangle(3, 0, 4); + pyramid_mesh.add_quad(0, 3, 2, 1); + + polyddg::DifferentialOperators ops(pyramid_mesh); + polyddg::SmoothDirectionFieldOptions opts; + opts.nrosy = 4; + auto result = polyddg::compute_smooth_direction_field(pyramid_mesh, ops, opts); + + REQUIRE(result != invalid_attribute_id()); + auto data = attribute_matrix_view(pyramid_mesh, result); + REQUIRE(data.rows() == static_cast(pyramid_mesh.get_num_vertices())); + REQUIRE(data.cols() == 3); + for (Index vid = 0; vid < pyramid_mesh.get_num_vertices(); ++vid) { + REQUIRE_THAT(data.row(vid).norm(), Catch::Matchers::WithinAbs(1.0, 1e-10)); + } + } + + SECTION("result is deterministic") + { + SurfaceMesh pyramid_mesh; + pyramid_mesh.add_vertex({0.0, 0.0, 0.0}); + pyramid_mesh.add_vertex({1.0, 0.0, 0.0}); + pyramid_mesh.add_vertex({1.0, 1.0, 0.0}); + pyramid_mesh.add_vertex({0.0, 1.0, 0.0}); + pyramid_mesh.add_vertex({0.5, 0.5, 1.0}); + pyramid_mesh.add_triangle(0, 1, 4); + pyramid_mesh.add_triangle(1, 2, 4); + pyramid_mesh.add_triangle(2, 3, 4); + pyramid_mesh.add_triangle(3, 0, 4); + pyramid_mesh.add_quad(0, 3, 2, 1); + + polyddg::DifferentialOperators ops(pyramid_mesh); + polyddg::SmoothDirectionFieldOptions opts; + opts.nrosy = 4; + opts.direction_field_attribute = "@sdf_first"; + auto result1 = polyddg::compute_smooth_direction_field(pyramid_mesh, ops, opts); + + opts.direction_field_attribute = "@sdf_second"; + auto result2 = polyddg::compute_smooth_direction_field(pyramid_mesh, ops, opts); + + auto data1 = attribute_matrix_view(pyramid_mesh, result1); + auto data2 = attribute_matrix_view(pyramid_mesh, result2); + + // Compare in the n-fold encoded space to handle n-rosy equivalence: + // for n=4, two fields differing by a rotation of k*π/2 are equivalent. + // Encode each vertex's 2D tangent direction to 4θ and compare. + Scalar max_encoded_diff = 0; + for (Index vid = 0; vid < pyramid_mesh.get_num_vertices(); ++vid) { + auto B = ops.vertex_basis(vid); + Eigen::Matrix u1 = B.transpose() * data1.row(vid).transpose(); + Eigen::Matrix u2 = B.transpose() * data2.row(vid).transpose(); + Scalar a1 = 4.0 * std::atan2(u1(1), u1(0)); + Scalar a2 = 4.0 * std::atan2(u2(1), u2(0)); + // 1 - cos(Δ) measures angular distance in the encoded space. + max_encoded_diff = std::max(max_encoded_diff, 1.0 - std::cos(a1 - a2)); + } + REQUIRE_THAT(max_encoded_diff, Catch::Matchers::WithinAbs(0.0, 1e-8)); + } + + SECTION("sphere: tangent to surface n=4") + { + primitive::IcosahedronOptions ico_opts; + ico_opts.radius = 1.0; + auto base_ico = primitive::generate_icosahedron(ico_opts); + primitive::SubdividedSphereOptions subdiv_opts; + subdiv_opts.radius = 1.0; + subdiv_opts.subdiv_level = 1; + auto sphere = primitive::generate_subdivided_sphere(base_ico, subdiv_opts); + + polyddg::DifferentialOperators ops(sphere); + polyddg::SmoothDirectionFieldOptions opts; + opts.nrosy = 4; + auto result = polyddg::compute_smooth_direction_field(sphere, ops, opts); + + auto data = attribute_matrix_view(sphere, result); + const auto normal_id = ops.get_vertex_normal_attribute_id(); + const auto normal_view = attribute_matrix_view(sphere, normal_id); + + Scalar max_dot = 0; + for (Index vid = 0; vid < sphere.get_num_vertices(); ++vid) { + max_dot = std::max(max_dot, std::abs(data.row(vid).dot(normal_view.row(vid)))); + } + REQUIRE(max_dot < 1e-10); + } + + SECTION("sphere: n=4 decoded angle in principal branch") + { + primitive::IcosahedronOptions ico_opts; + ico_opts.radius = 1.0; + auto base_ico = primitive::generate_icosahedron(ico_opts); + primitive::SubdividedSphereOptions subdiv_opts; + subdiv_opts.radius = 1.0; + subdiv_opts.subdiv_level = 1; + auto sphere = primitive::generate_subdivided_sphere(base_ico, subdiv_opts); + + polyddg::DifferentialOperators ops(sphere); + polyddg::SmoothDirectionFieldOptions opts; + opts.nrosy = 4; + auto result = polyddg::compute_smooth_direction_field(sphere, ops, opts); + + auto data = attribute_matrix_view(sphere, result); + for (Index vid = 0; vid < sphere.get_num_vertices(); ++vid) { + auto B = ops.vertex_basis(vid); + Eigen::Matrix u2 = B.transpose() * data.row(vid).transpose(); + if (u2.norm() < 1e-10) continue; + Scalar theta = std::atan2(u2(1), u2(0)); + REQUIRE(theta > -internal::pi_4 - 1e-10); + REQUIRE(theta <= internal::pi_4 + 1e-10); + } + } + + SECTION("torus: alignment constraint is respected") + { + primitive::TorusOptions torus_opts; + torus_opts.major_radius = 3.0; + torus_opts.minor_radius = 1.0; + torus_opts.ring_segments = 30; + torus_opts.pipe_segments = 20; + auto torus = primitive::generate_torus(torus_opts); + polyddg::DifferentialOperators ops(torus); + + const Index nv = torus.get_num_vertices(); + const Index constrained_vid = 0; + + // Prescribe the ∂/∂u tangent direction at a single vertex. + auto p = torus.get_position(constrained_vid); + Scalar angle_u = std::atan2(p[1], p[0]); + Eigen::Matrix prescribed_dir(-std::sin(angle_u), std::cos(angle_u), 0); + + auto align_id = internal::find_or_create_attribute( + torus, + "@test_alignment", + AttributeElement::Vertex, + AttributeUsage::Vector, + 3, + internal::ResetToDefault::Yes); + auto align_data = attribute_matrix_ref(torus, align_id); + align_data.setZero(); + align_data.row(constrained_vid) = prescribed_dir.transpose(); + + // Compute constrained n=1 vector field. + polyddg::SmoothDirectionFieldOptions opts; + opts.nrosy = 1; + opts.alignment_attribute = "@test_alignment"; + opts.alignment_weight = 1.0; + auto result = polyddg::compute_smooth_direction_field(torus, ops, opts); + auto data = attribute_matrix_view(torus, result); + + // The output at the constrained vertex should align with the prescribed ∂/∂u + // direction. On a torus, ∂/∂u is a smooth (harmonic) field, so the solver + // should produce a field well-aligned with it. + Eigen::Matrix ref = prescribed_dir.normalized().transpose(); + Scalar dot = data.row(constrained_vid).dot(ref); + REQUIRE(std::abs(dot) > 0.9); + + // All output vectors should be tangent and unit length. + const auto normal_id = ops.get_vertex_normal_attribute_id(); + const auto normal_view = attribute_matrix_view(torus, normal_id); + for (Index vid = 0; vid < nv; ++vid) { + REQUIRE_THAT(data.row(vid).norm(), Catch::Matchers::WithinAbs(1.0, 1e-10)); + Scalar ndot = std::abs(data.row(vid).dot(normal_view.row(vid).normalized())); + REQUIRE_THAT(ndot, Catch::Matchers::WithinAbs(0.0, 1e-8)); + } + } + + SECTION("torus: 4-rosy alignment constraint is respected") + { + primitive::TorusOptions torus_opts; + torus_opts.major_radius = 3.0; + torus_opts.minor_radius = 1.0; + torus_opts.ring_segments = 30; + torus_opts.pipe_segments = 20; + auto torus = primitive::generate_torus(torus_opts); + polyddg::DifferentialOperators ops(torus); + + const Index nv = torus.get_num_vertices(); + const Index constrained_vid = 0; + + // Prescribe the ∂/∂u tangent direction at a single vertex. + auto p = torus.get_position(constrained_vid); + Scalar angle_u = std::atan2(p[1], p[0]); + Eigen::Matrix prescribed_dir(-std::sin(angle_u), std::cos(angle_u), 0); + + auto align_id = internal::find_or_create_attribute( + torus, + "@test_alignment_4rosy", + AttributeElement::Vertex, + AttributeUsage::Vector, + 3, + internal::ResetToDefault::Yes); + auto align_data = attribute_matrix_ref(torus, align_id); + align_data.setZero(); + align_data.row(constrained_vid) = prescribed_dir.transpose(); + + // Compute constrained 4-rosy field. + polyddg::SmoothDirectionFieldOptions opts; + opts.nrosy = 4; + opts.alignment_attribute = "@test_alignment_4rosy"; + opts.alignment_weight = 1.0; + auto result = polyddg::compute_smooth_direction_field(torus, ops, opts); + auto data = attribute_matrix_view(torus, result); + + // The output at the constrained vertex should align with the prescribed ∂/∂u + // direction up to 4-rosy symmetry: cos(4*(θ_out - θ_ref)) should be close to 1. + auto B = ops.vertex_basis(constrained_vid); + Eigen::Matrix out_2d = B.transpose() * data.row(constrained_vid).transpose(); + Eigen::Matrix ref_2d = B.transpose() * prescribed_dir; + Scalar out_angle = std::atan2(out_2d(1), out_2d(0)); + Scalar ref_angle = std::atan2(ref_2d(1), ref_2d(0)); + Scalar cos4_diff = std::cos(4.0 * (out_angle - ref_angle)); + REQUIRE(cos4_diff > 0.9); + + // All output vectors should be tangent and unit length. + const auto normal_id = ops.get_vertex_normal_attribute_id(); + const auto normal_view = attribute_matrix_view(torus, normal_id); + for (Index vid = 0; vid < nv; ++vid) { + REQUIRE_THAT(data.row(vid).norm(), Catch::Matchers::WithinAbs(1.0, 1e-10)); + Scalar ndot = std::abs(data.row(vid).dot(normal_view.row(vid).normalized())); + REQUIRE_THAT(ndot, Catch::Matchers::WithinAbs(0.0, 1e-8)); + } + } + + SECTION("torus: per-face zero-energy condition") + { + primitive::TorusOptions torus_opts; + torus_opts.major_radius = 3.0; + torus_opts.minor_radius = 1.0; + torus_opts.ring_segments = 30; + torus_opts.pipe_segments = 20; + auto torus = primitive::generate_torus(torus_opts); + polyddg::DifferentialOperators ops(torus); + + const Index nf = torus.get_num_facets(); + Eigen::Matrix c(1, 0); + for (Index fid = 0; fid < std::min(nf, Index(10)); ++fid) { + Index fs = torus.get_facet_size(fid); + Eigen::Matrix u_local(2 * fs); + for (Index lv = 0; lv < fs; ++lv) { + auto R4 = ops.levi_civita_nrosy(fid, lv, Index(4)); + u_local.segment(2 * lv, 2) = R4.transpose() * c; + } + auto G_cov = ops.covariant_derivative_nrosy(fid, Index(4)); + REQUIRE_THAT((G_cov * u_local).norm(), Catch::Matchers::WithinAbs(0.0, 1e-10)); + } + } + + SECTION("torus: 4-rosy field is tangent and unit length") + { + primitive::TorusOptions torus_opts; + torus_opts.major_radius = 3.0; + torus_opts.minor_radius = 1.0; + torus_opts.ring_segments = 30; + torus_opts.pipe_segments = 20; + auto torus = primitive::generate_torus(torus_opts); + polyddg::DifferentialOperators ops(torus); + + polyddg::SmoothDirectionFieldOptions opts; + opts.nrosy = 4; + auto result = polyddg::compute_smooth_direction_field(torus, ops, opts); + + auto data = attribute_matrix_view(torus, result); + const auto normal_id = ops.get_vertex_normal_attribute_id(); + const auto normal_view = attribute_matrix_view(torus, normal_id); + + for (Index vid = 0; vid < torus.get_num_vertices(); ++vid) { + REQUIRE_THAT(data.row(vid).norm(), Catch::Matchers::WithinAbs(1.0, 1e-10)); + Scalar dot = std::abs(data.row(vid).dot(normal_view.row(vid).normalized())); + REQUIRE_THAT(dot, Catch::Matchers::WithinAbs(0.0, 1e-8)); + } + } +} diff --git a/modules/polyddg/tests/test_differential_operators.cpp b/modules/polyddg/tests/test_differential_operators.cpp index dc8740fe..8db89c41 100644 --- a/modules/polyddg/tests/test_differential_operators.cpp +++ b/modules/polyddg/tests/test_differential_operators.cpp @@ -11,8 +11,10 @@ */ #include +#include #include #include +#include #include #include @@ -21,6 +23,10 @@ #include +#include +#include +#include + TEST_CASE("DifferentialOperators", "[polyddg]") { using namespace lagrange; @@ -688,4 +694,92 @@ TEST_CASE("DifferentialOperators", "[polyddg]") } } } + + SECTION("Levi-Civita edge transport consistency converges on torus") + { + // For edge (v0, v1) shared by faces f0 and f1, the parallel transport + // v0→f0→v1 should equal v0→f1→v1: + // R_{f0,v1}^T * R_{f0,v0} == R_{f1,v1}^T * R_{f1,v0} + // The inconsistency comes from composing shortest-arc rotations through + // different face normals and converges to zero at O(h²) under refinement. + auto compute_max_inconsistency = [](SurfaceMesh& mesh_ref) { + polyddg::DifferentialOperators ops_ref(mesh_ref); + Scalar max_inconsistency = 0; + for (Index eid = 0; eid < mesh_ref.get_num_edges(); ++eid) { + auto ev = mesh_ref.get_edge_vertices(eid); + Index v0 = ev[0], v1 = ev[1]; + + std::vector> Ts; + mesh_ref.foreach_facet_around_edge(eid, [&](Index fid) { + Index f_size = mesh_ref.get_facet_size(fid); + Eigen::Matrix R0, R1; + for (Index lv = 0; lv < f_size; ++lv) { + if (mesh_ref.get_facet_vertex(fid, lv) == v0) + R0 = ops_ref.levi_civita(fid, lv); + else if (mesh_ref.get_facet_vertex(fid, lv) == v1) + R1 = ops_ref.levi_civita(fid, lv); + } + Ts.push_back(R1.transpose() * R0); + }); + + if (Ts.size() == 2) { + max_inconsistency = std::max(max_inconsistency, (Ts[0] - Ts[1]).norm()); + } + } + return max_inconsistency; + }; + + Scalar prev_inc = std::numeric_limits::max(); + for (int res : {20, 40, 80}) { + primitive::TorusOptions fine_opts; + fine_opts.major_radius = 3.0; + fine_opts.minor_radius = 1.0; + fine_opts.ring_segments = static_cast(res * 3 / 2); + fine_opts.pipe_segments = static_cast(res); + + auto torus_quad = primitive::generate_torus(fine_opts); + Scalar inc_quad = compute_max_inconsistency(torus_quad); + + auto tri_mesh = primitive::generate_torus(fine_opts); + triangulate_polygonal_facets(tri_mesh); + Scalar inc_tri = compute_max_inconsistency(tri_mesh); + + Scalar inc = std::max(inc_quad, inc_tri); + INFO("res=" << res << ": quad=" << inc_quad << ", tri=" << inc_tri); + REQUIRE(inc < prev_inc); + prev_inc = inc; + } + } + + SECTION("vertex_basis orthogonal to vertex normal") + { + auto check_orthogonality = [](SurfaceMesh& mesh) { + polyddg::DifferentialOperators diff_ops(mesh); + auto vertex_normals = + attribute_matrix_view(mesh, diff_ops.get_vertex_normal_attribute_id()); + + for (Index vid = 0; vid < mesh.get_num_vertices(); vid++) { + Eigen::Matrix B = diff_ops.vertex_basis(vid); + Eigen::Matrix n = + vertex_normals.row(vid).template head<3>().stableNormalized(); + + // Both tangent vectors must be orthogonal to the vertex normal. + REQUIRE_THAT(std::abs(n.dot(B.col(0))), Catch::Matchers::WithinAbs(0.0, 1e-12)); + REQUIRE_THAT(std::abs(n.dot(B.col(1))), Catch::Matchers::WithinAbs(0.0, 1e-12)); + } + }; + + SECTION("triangle") + { + check_orthogonality(triangle_mesh); + } + SECTION("quad") + { + check_orthogonality(quad_mesh); + } + SECTION("pyramid") + { + check_orthogonality(pyramid_mesh); + } + } } diff --git a/modules/polyddg/tests/test_hodge_decomposition.cpp b/modules/polyddg/tests/test_hodge_decomposition.cpp new file mode 100644 index 00000000..0ff6252e --- /dev/null +++ b/modules/polyddg/tests/test_hodge_decomposition.cpp @@ -0,0 +1,471 @@ +/* + * Copyright 2026 Adobe. All rights reserved. + * This file is licensed to you under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under + * the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS + * OF ANY KIND, either express or implied. See the License for the specific language + * governing permissions and limitations under the License. + */ + +#include +#include +#include +#include +#include +#include +#include +#include + +#include +#include + +#include + +#include +#include + +// ---- helpers ---------------------------------------------------------------- + +namespace { + +using Scalar = double; +using Index = uint32_t; +using VectorX = Eigen::Matrix; +using MatrixX3 = Eigen::Matrix; +using SpMat = Eigen::SparseMatrix; + +/// Run hodge_decomposition_vector_field and return (V_exact, V_coexact, V_harmonic). +std::tuple run_decomp( + lagrange::SurfaceMesh& mesh, + const lagrange::polyddg::DifferentialOperators& ops, + const MatrixX3& V, + std::string_view name = "@hd_test_input", + uint8_t nrosy = 1) +{ + using namespace lagrange; + + const auto input_id = internal::find_or_create_attribute( + mesh, + name, + AttributeElement::Vertex, + AttributeUsage::Vector, + 3, + internal::ResetToDefault::No); + attribute_matrix_ref(mesh, input_id) = V; + + polyddg::HodgeDecompositionOptions opts; + opts.input_attribute = name; + opts.nrosy = nrosy; + auto r = polyddg::hodge_decomposition_vector_field(mesh, ops, opts); + + auto read = [&](AttributeId id) -> MatrixX3 { return attribute_matrix_view(mesh, id); }; + return {read(r.exact_id), read(r.coexact_id), read(r.harmonic_id)}; +} + +/// Run hodge_decomposition_1_form and return (omega_exact, omega_coexact, omega_harmonic). +std::tuple run_decomp_1form( + lagrange::SurfaceMesh& mesh, + const lagrange::polyddg::DifferentialOperators& ops, + const VectorX& omega, + std::string_view name = "@hd_test_1form_input") +{ + using namespace lagrange; + + const auto input_id = internal::find_or_create_attribute( + mesh, + name, + AttributeElement::Edge, + AttributeUsage::Scalar, + 1, + internal::ResetToDefault::No); + attribute_matrix_ref(mesh, input_id).col(0) = omega; + + polyddg::HodgeDecompositionOptions opts; + opts.input_attribute = name; + auto r = polyddg::hodge_decomposition_1_form(mesh, ops, opts); + + auto read = [&](AttributeId id) -> VectorX { + return attribute_matrix_view(mesh, id).col(0); + }; + return {read(r.exact_id), read(r.coexact_id), read(r.harmonic_id)}; +} + +/// Build a per-vertex gradient vector field from a scalar function f. +MatrixX3 make_gradient_field( + lagrange::SurfaceMesh& mesh, + const lagrange::polyddg::DifferentialOperators& ops, + const VectorX& f) +{ + using namespace lagrange; + + const Eigen::Index nv = static_cast(mesh.get_num_vertices()); + const Eigen::Index nf = static_cast(mesh.get_num_facets()); + + SpMat D0 = ops.d0(); + VectorX omega = D0 * f; + + SpMat Sharp = ops.sharp(); + VectorX fv = Sharp * omega; + + auto va_id = ops.get_vector_area_attribute_id(); + auto va_view = attribute_matrix_view(mesh, va_id); + + MatrixX3 result = MatrixX3::Zero(nv, 3); + VectorX weights = VectorX::Zero(nv); + + for (Index fi = 0; fi < static_cast(nf); ++fi) { + Scalar area = va_view.row(fi).norm(); + Eigen::Matrix face_vec(fv(3 * fi), fv(3 * fi + 1), fv(3 * fi + 2)); + + Index facet_size = mesh.get_facet_size(fi); + for (Index lv = 0; lv < facet_size; ++lv) { + Index v = mesh.get_facet_vertex(fi, lv); + result.row(v) += area * face_vec; + weights(v) += area; + } + } + + for (Index v = 0; v < static_cast(nv); ++v) { + if (weights(v) > 0) result.row(v) /= weights(v); + } + + return result; +} + +/// Replicate the internal vertex→1-form conversion for n-rosy fields. +VectorX vertex_to_1form_nrosy( + lagrange::SurfaceMesh& mesh, + const lagrange::polyddg::DifferentialOperators& ops, + const MatrixX3& V, + uint8_t nrosy) +{ + using namespace lagrange; + const Eigen::Index nv = static_cast(mesh.get_num_vertices()); + const Eigen::Index nf = static_cast(mesh.get_num_facets()); + const Eigen::Index ne = static_cast(mesh.get_num_edges()); + + if (nrosy == 1) { + SpMat D0 = ops.d0(); + MatrixX3 positions(nv, 3); + for (Index v = 0; v < static_cast(nv); ++v) { + auto p = mesh.get_position(v); + for (int d = 0; d < 3; ++d) positions(v, d) = p[d]; + } + MatrixX3 edge_vecs = D0 * positions; + + VectorX omega(ne); + for (Index e = 0; e < static_cast(ne); ++e) { + auto ev = mesh.get_edge_vertices(e); + Eigen::Matrix avg_vec = (V.row(ev[0]) + V.row(ev[1])) / 2.0; + omega(e) = avg_vec.dot(edge_vecs.row(e)); + } + return omega; + } + + VectorX u_encoded(2 * nv); + for (Index v = 0; v < static_cast(nv); ++v) { + auto B = ops.vertex_basis(v); + Eigen::Matrix u2 = B.transpose() * V.row(v).transpose(); + Scalar r = u2.norm(); + if (r > std::numeric_limits::epsilon()) { + Scalar c = u2(0) / r, s = u2(1) / r; + Scalar re = 1.0, im = 0.0; + for (int k = 0; k < nrosy; ++k) { + Scalar nr = re * c - im * s; + Scalar ni = re * s + im * c; + re = nr; + im = ni; + } + u2 = Eigen::Matrix(r * re, r * im); + } + u_encoded.segment(2 * v, 2) = u2; + } + + SpMat LC_n = ops.levi_civita_nrosy(static_cast(nrosy)); + VectorX u_corners = LC_n * u_encoded; + + VectorX face_vecs_3d(3 * nf); + Index corner_offset = 0; + for (Index f = 0; f < static_cast(nf); ++f) { + Index fs = mesh.get_facet_size(f); + Eigen::Matrix avg_u = Eigen::Matrix::Zero(); + for (Index lv = 0; lv < fs; ++lv) { + avg_u += u_corners.segment(2 * (corner_offset + lv), 2); + } + avg_u /= static_cast(fs); + + auto Bf = ops.facet_basis(f); + Eigen::Matrix v3 = Bf * avg_u; + face_vecs_3d.segment(3 * f, 3) = v3; + + corner_offset += fs; + } + + SpMat Flat = ops.flat(); + return Flat * face_vecs_3d; +} + +} // namespace + +// ---- tests ------------------------------------------------------------------ + +TEST_CASE("HodgeDecomposition1Form", "[polyddg]") +{ + using namespace lagrange; + + // Octahedron: 6 vertices, 12 edges, 8 triangles — genus-0 closed surface. + SurfaceMesh mesh; + mesh.add_vertex({1, 0, 0}); + mesh.add_vertex({0, 1, 0}); + mesh.add_vertex({-1, 0, 0}); + mesh.add_vertex({0, -1, 0}); + mesh.add_vertex({0, 0, 1}); + mesh.add_vertex({0, 0, -1}); + mesh.add_triangle(0, 1, 4); + mesh.add_triangle(1, 2, 4); + mesh.add_triangle(2, 3, 4); + mesh.add_triangle(3, 0, 4); + mesh.add_triangle(0, 5, 3); + mesh.add_triangle(1, 5, 0); + mesh.add_triangle(2, 5, 1); + mesh.add_triangle(3, 5, 2); + + polyddg::DifferentialOperators ops(mesh); + const Eigen::Index ne = static_cast(mesh.get_num_edges()); + + SECTION("1-form reconstruction") + { + VectorX omega = VectorX::Random(ne); + const auto [w_exact, w_coexact, w_harmonic] = run_decomp_1form(mesh, ops, omega); + + const VectorX residual = w_exact + w_coexact + w_harmonic - omega; + REQUIRE_THAT(residual.norm(), Catch::Matchers::WithinAbs(0.0, 1e-12)); + } + + SECTION("exact 1-form has vanishing curl") + { + VectorX omega = VectorX::Random(ne); + const auto [w_exact, w_coexact, w_harmonic] = run_decomp_1form(mesh, ops, omega); + + SpMat D1 = ops.d1(); + REQUIRE_THAT((D1 * w_exact).norm(), Catch::Matchers::WithinAbs(0.0, 1e-10)); + } + + SECTION("gradient 1-form has vanishing co-exact part") + { + // Build a gradient 1-form: omega = D0 * f. + const Eigen::Index nv = static_cast(mesh.get_num_vertices()); + VectorX f(nv); + for (Index v = 0; v < static_cast(nv); v++) f(v) = mesh.get_position(v)[0]; + SpMat D0 = ops.d0(); + VectorX omega = D0 * f; + + const auto [w_exact, w_coexact, w_harmonic] = run_decomp_1form(mesh, ops, omega); + + REQUIRE_THAT(w_coexact.norm(), Catch::Matchers::WithinAbs(0.0, 1e-8)); + } + + SECTION("harmonic vanishes on genus-0") + { + VectorX omega = VectorX::Random(ne); + const auto [w_exact, w_coexact, w_harmonic] = run_decomp_1form(mesh, ops, omega); + + REQUIRE_THAT(w_harmonic.norm(), Catch::Matchers::WithinAbs(0.0, 1e-10)); + } +} + +TEST_CASE("HodgeDecompositionVectorField", "[polyddg]") +{ + using namespace lagrange; + + // Octahedron: 6 vertices, 12 edges, 8 triangles — genus-0 closed surface. + SurfaceMesh mesh; + mesh.add_vertex({1, 0, 0}); + mesh.add_vertex({0, 1, 0}); + mesh.add_vertex({-1, 0, 0}); + mesh.add_vertex({0, -1, 0}); + mesh.add_vertex({0, 0, 1}); + mesh.add_vertex({0, 0, -1}); + mesh.add_triangle(0, 1, 4); + mesh.add_triangle(1, 2, 4); + mesh.add_triangle(2, 3, 4); + mesh.add_triangle(3, 0, 4); + mesh.add_triangle(0, 5, 3); + mesh.add_triangle(1, 5, 0); + mesh.add_triangle(2, 5, 1); + mesh.add_triangle(3, 5, 2); + + polyddg::DifferentialOperators ops(mesh); + const Eigen::Index num_vertices = mesh.get_num_vertices(); + + SECTION("reconstruction") + { + MatrixX3 V = MatrixX3::Random(num_vertices, 3); + const auto [V_exact, V_coexact, V_harmonic] = run_decomp(mesh, ops, V); + + const MatrixX3 residual = V_exact + V_coexact + V_harmonic - V; + REQUIRE_THAT(residual.norm(), Catch::Matchers::WithinAbs(0.0, 1e-12)); + } + + SECTION("gradient input has vanishing co-exact part") + { + VectorX f(num_vertices); + for (Index v = 0; v < static_cast(num_vertices); v++) f(v) = mesh.get_position(v)[0]; + MatrixX3 V = make_gradient_field(mesh, ops, f); + + const auto [V_exact, V_coexact, V_harmonic] = run_decomp(mesh, ops, V); + + REQUIRE_THAT(V_coexact.norm(), Catch::Matchers::WithinAbs(0.0, 1e-8)); + REQUIRE_THAT( + (V_exact + V_coexact + V_harmonic - V).norm(), + Catch::Matchers::WithinAbs(0.0, 1e-12)); + } + + SECTION("sphere reconstruction") + { + auto sphere = lagrange::testing::create_test_sphere( + lagrange::testing::CreateOptions{false, false}); + polyddg::DifferentialOperators sphere_ops(sphere); + + const MatrixX3 V = MatrixX3::Random(sphere.get_num_vertices(), 3); + const auto [V_exact, V_coexact, V_harmonic] = run_decomp(sphere, sphere_ops, V); + + const MatrixX3 residual = V_exact + V_coexact + V_harmonic - V; + REQUIRE_THAT(residual.norm(), Catch::Matchers::WithinAbs(0.0, 1e-12)); + } + + SECTION("nrosy=4 harmonic vanishes on genus-0") + { + MatrixX3 V = MatrixX3::Random(num_vertices, 3); + const auto [V_exact, V_coexact, V_harmonic] = run_decomp(mesh, ops, V, "@hd_4", 4); + + REQUIRE_THAT(V_harmonic.norm(), Catch::Matchers::WithinAbs(0.0, 1e-6)); + } + + SECTION("nrosy=4 output vectors lie in tangent plane") + { + MatrixX3 V = MatrixX3::Random(num_vertices, 3); + const auto [V_exact, V_coexact, V_harmonic] = run_decomp(mesh, ops, V, "@hd_4tp", 4); + + auto vn_id = ops.get_vertex_normal_attribute_id(); + auto vn_view = attribute_matrix_view(mesh, vn_id); + + for (Index v = 0; v < static_cast(num_vertices); ++v) { + Eigen::Matrix n = vn_view.row(v).normalized(); + if (V_exact.row(v).norm() > 1e-10) + REQUIRE_THAT( + std::abs(V_exact.row(v).dot(n)), + Catch::Matchers::WithinAbs(0.0, 1e-6)); + if (V_coexact.row(v).norm() > 1e-10) + REQUIRE_THAT( + std::abs(V_coexact.row(v).dot(n)), + Catch::Matchers::WithinAbs(0.0, 1e-6)); + } + } + + SECTION("torus 4-rosy: nontrivial harmonic on genus-1") + { + primitive::TorusOptions torus_opts; + torus_opts.major_radius = 3.0; + torus_opts.minor_radius = 1.0; + torus_opts.ring_segments = 60; + torus_opts.pipe_segments = 40; + auto torus = primitive::generate_torus(torus_opts); + polyddg::DifferentialOperators torus_ops(torus); + + const Eigen::Index nv = static_cast(torus.get_num_vertices()); + const SpMat D1 = torus_ops.d1(); + + // Use ∂/∂u tangent direction as input. + MatrixX3 V_input(nv, 3); + for (Index vid = 0; vid < static_cast(nv); ++vid) { + auto p = torus.get_position(vid); + Scalar angle_u = std::atan2(p[1], p[0]); + Eigen::Matrix e_u(-std::sin(angle_u), std::cos(angle_u), 0); + auto B = torus_ops.vertex_basis(vid); + Eigen::Matrix u2 = B.transpose() * e_u; + if (u2.norm() < 1e-10) u2 = Eigen::Matrix(1, 0); + V_input.row(vid) = (B * u2.normalized()).transpose(); + } + + const auto [V_exact, V_coexact, V_harmonic] = + run_decomp(torus, torus_ops, V_input, "@hd_torus_du", 4); + + // All three components should be nonzero on genus-1. + REQUIRE(V_exact.norm() > 1e-6); + REQUIRE(V_coexact.norm() > 1e-6); + REQUIRE(V_harmonic.norm() > 1e-6); + + // 1-form properties (approximate due to encode/decode roundtrip). + VectorX omega = vertex_to_1form_nrosy(torus, torus_ops, V_input, 4); + VectorX omega_exact = vertex_to_1form_nrosy(torus, torus_ops, V_exact, 4); + VectorX omega_coexact = vertex_to_1form_nrosy(torus, torus_ops, V_coexact, 4); + VectorX omega_harmonic = vertex_to_1form_nrosy(torus, torus_ops, V_harmonic, 4); + + REQUIRE((omega_exact + omega_coexact + omega_harmonic - omega).norm() / omega.norm() < 0.5); + REQUIRE((D1 * omega_exact).norm() / omega_exact.norm() < 0.5); + REQUIRE((torus_ops.delta1() * omega_coexact).norm() / omega_coexact.norm() < 0.5); + } + + SECTION("torus 4-rosy: two inputs produce independent harmonic parts") + { + primitive::TorusOptions torus_opts; + torus_opts.major_radius = 3.0; + torus_opts.minor_radius = 1.0; + torus_opts.ring_segments = 60; + torus_opts.pipe_segments = 40; + auto torus = primitive::generate_torus(torus_opts); + polyddg::DifferentialOperators torus_ops(torus); + + const Eigen::Index nv = static_cast(torus.get_num_vertices()); + + // Input 1: ∂/∂u tangent. + MatrixX3 V1(nv, 3); + for (Index vid = 0; vid < static_cast(nv); ++vid) { + auto p = torus.get_position(vid); + Scalar angle_u = std::atan2(p[1], p[0]); + Eigen::Matrix e_u(-std::sin(angle_u), std::cos(angle_u), 0); + auto B = torus_ops.vertex_basis(vid); + Eigen::Matrix u2 = B.transpose() * e_u; + if (u2.norm() < 1e-10) u2 = Eigen::Matrix(1, 0); + V1.row(vid) = (B * u2.normalized()).transpose(); + } + + // Input 2: ∂/∂u rotated by π/3. + MatrixX3 V2(nv, 3); + for (Index vid = 0; vid < static_cast(nv); ++vid) { + auto p = torus.get_position(vid); + Scalar angle_u = std::atan2(p[1], p[0]); + Eigen::Matrix e_u(-std::sin(angle_u), std::cos(angle_u), 0); + auto B = torus_ops.vertex_basis(vid); + Eigen::Matrix u2 = B.transpose() * e_u; + Eigen::Matrix u2_rot( + u2(0) * std::cos(internal::pi / 3) - u2(1) * std::sin(internal::pi / 3), + u2(0) * std::sin(internal::pi / 3) + u2(1) * std::cos(internal::pi / 3)); + if (u2_rot.norm() < 1e-10) u2_rot = Eigen::Matrix(1, 0); + V2.row(vid) = (B * u2_rot.normalized()).transpose(); + } + + const auto [exact1, coexact1, harmonic1] = + run_decomp(torus, torus_ops, V1, "@hd_torus_v1", 4); + const auto [exact2, coexact2, harmonic2] = + run_decomp(torus, torus_ops, V2, "@hd_torus_v2", 4); + + REQUIRE(harmonic1.norm() > 1e-6); + REQUIRE(harmonic2.norm() > 1e-6); + + // M₁-linear independence via Gram determinant. + VectorX h1 = vertex_to_1form_nrosy(torus, torus_ops, harmonic1, 4); + VectorX h2 = vertex_to_1form_nrosy(torus, torus_ops, harmonic2, 4); + SpMat M1 = torus_ops.star1(); + Scalar g11 = h1.dot(M1 * h1); + Scalar g12 = h1.dot(M1 * h2); + Scalar g22 = h2.dot(M1 * h2); + Scalar normalized_det = (g11 * g22 - g12 * g12) / (g11 * g22); + INFO("Gram det / (||h1||² ||h2||²) = " << normalized_det); + REQUIRE(normalized_det > 0.01); + } +} diff --git a/modules/polyscope/examples/CMakeLists.txt b/modules/polyscope/examples/CMakeLists.txt index 46e78690..d5c7521f 100644 --- a/modules/polyscope/examples/CMakeLists.txt +++ b/modules/polyscope/examples/CMakeLists.txt @@ -12,4 +12,4 @@ include(polyscope) lagrange_add_example(mesh_viewer mesh_viewer.cpp) -target_link_libraries(mesh_viewer lagrange::polyscope lagrange::io CLI11::CLI11) +target_link_libraries(mesh_viewer lagrange::polyscope lagrange::io lagrange::scene CLI11::CLI11) diff --git a/modules/polyscope/examples/mesh_viewer.cpp b/modules/polyscope/examples/mesh_viewer.cpp index d71bf2ec..44d13ac2 100644 --- a/modules/polyscope/examples/mesh_viewer.cpp +++ b/modules/polyscope/examples/mesh_viewer.cpp @@ -11,9 +11,11 @@ */ #include #include +#include #include #include #include +#include #include #include @@ -27,7 +29,10 @@ #include -using SurfaceMesh = lagrange::SurfaceMesh32d; +using Scalar = double; +using Index = uint32_t; +using SurfaceMesh = lagrange::SurfaceMesh; +using SimpleScene = lagrange::scene::SimpleScene; void prepare_mesh(SurfaceMesh& mesh) { @@ -65,12 +70,14 @@ int main(int argc, char** argv) struct { std::vector inputs; + bool scene = false; int log_level = 2; // normal } args; CLI::App app{argv[0]}; app.add_option("inputs", args.inputs, "Input mesh(es).")->required()->check(CLI::ExistingFile); app.add_option("-l,--level", args.log_level, "Log level (0 = most verbose, 6 = off)."); + app.add_flag("--scene", args.scene, "Load as a scene (instead of a single mesh per file)."); CLI11_PARSE(app, argc, argv) spdlog::set_level(static_cast(args.log_level)); @@ -81,11 +88,30 @@ int main(int argc, char** argv) }; polyscope::init(); + lagrange::io::LoadOptions load_options; + load_options.stitch_vertices = true; + for (auto input : args.inputs) { - lagrange::logger().info("Loading input mesh: {}", input.string()); - auto mesh = lagrange::io::load_mesh(input); - prepare_mesh(mesh); - lagrange::polyscope::register_structure(input.stem().string(), std::move(mesh)); + lagrange::logger().info("Loading input: {}", input.string()); + + if (args.scene) { + SimpleScene simple_scene = + lagrange::io::load_simple_scene(input, load_options); + auto meshes = lagrange::scene::simple_scene_to_meshes(simple_scene); + lagrange::logger().info( + "Loaded scene with {} mesh(es) from {}", + meshes.size(), + input.string()); + for (size_t i = 0; i < meshes.size(); ++i) { + prepare_mesh(meshes[i]); + std::string name = input.stem().string() + "_" + std::to_string(i); + lagrange::polyscope::register_structure(name, std::move(meshes[i])); + } + } else { + SurfaceMesh mesh = lagrange::io::load_mesh(input, load_options); + prepare_mesh(mesh); + lagrange::polyscope::register_structure(input.stem().string(), std::move(mesh)); + } } polyscope::show(); diff --git a/modules/polyscope/src/register_attributes.h b/modules/polyscope/src/register_attributes.h index e93004ed..b05616f1 100644 --- a/modules/polyscope/src/register_attributes.h +++ b/modules/polyscope/src/register_attributes.h @@ -14,6 +14,7 @@ #include #include #include +#include // clang-format off #include @@ -95,36 +96,45 @@ auto register_attribute( constexpr bool IsMesh = std::is_same_v; constexpr bool IsEdge = std::is_same_v; + constexpr auto scalar_data_type = std::is_integral_v + ? ::polyscope::DataType::CATEGORICAL + : ::polyscope::DataType::STANDARD; + switch (attr.get_element_type()) { case lagrange::AttributeElement::Vertex: if (attr.get_usage() == Usage::Scalar) { lagrange::logger().info("Registering scalar vertex attribute: {}", name); if constexpr (IsMesh) { - return ps_struct->addVertexScalarQuantity(name, vector_view(attr)); + return ps_struct->addVertexScalarQuantity( + name, + vector_view(attr), + scalar_data_type); } else if constexpr (IsEdge) { - return ps_struct->addNodeScalarQuantity(name, vector_view(attr)); + return ps_struct->addNodeScalarQuantity(name, vector_view(attr), scalar_data_type); } else { - return ps_struct->addScalarQuantity(name, vector_view(attr)); + return ps_struct->addScalarQuantity(name, vector_view(attr), scalar_data_type); } - } else if (attr.get_num_channels() == 3) { + } else if (attr.get_num_channels() == 3 || attr.get_num_channels() == 4) { if (show_as_vector(attr.get_usage())) { lagrange::logger().info("Registering vector vertex attribute: {}", name); ::polyscope::VectorType vt = vector_type(attr.get_usage()); + auto matrix = matrix_view(attr).template leftCols<3>(); if constexpr (IsMesh) { - return ps_struct->addVertexVectorQuantity(name, matrix_view(attr), vt); + return ps_struct->addVertexVectorQuantity(name, matrix, vt); } else if constexpr (IsEdge) { - return ps_struct->addNodeVectorQuantity(name, matrix_view(attr), vt); + return ps_struct->addNodeVectorQuantity(name, matrix, vt); } else { - return ps_struct->addVectorQuantity(name, matrix_view(attr), vt); + return ps_struct->addVectorQuantity(name, matrix, vt); } } else if (attr.get_usage() == Usage::Color) { lagrange::logger().info("Registering color vertex attribute: {}", name); + auto matrix = as_color_matrix(attr).template leftCols<3>(); if constexpr (IsMesh) { - return ps_struct->addVertexColorQuantity(name, as_color_matrix(attr)); + return ps_struct->addVertexColorQuantity(name, matrix); } else if constexpr (IsEdge) { - return ps_struct->addNodeColorQuantity(name, as_color_matrix(attr)); + return ps_struct->addNodeColorQuantity(name, matrix); } else { - return ps_struct->addColorQuantity(name, as_color_matrix(attr)); + return ps_struct->addColorQuantity(name, matrix); } } } else if (attr.get_num_channels() == 2) { @@ -152,25 +162,27 @@ auto register_attribute( if (attr.get_usage() == Usage::Scalar) { lagrange::logger().info("Registering scalar facet attribute: {}", name); if constexpr (IsMesh) { - return ps_struct->addFaceScalarQuantity(name, vector_view(attr)); + return ps_struct->addFaceScalarQuantity(name, vector_view(attr), scalar_data_type); } else if constexpr (IsEdge) { - return ps_struct->addEdgeScalarQuantity(name, vector_view(attr)); + return ps_struct->addEdgeScalarQuantity(name, vector_view(attr), scalar_data_type); } - } else if (attr.get_num_channels() == 3) { + } else if (attr.get_num_channels() == 3 || attr.get_num_channels() == 4) { if (show_as_vector(attr.get_usage())) { lagrange::logger().info("Registering vector facet attribute: {}", name); ::polyscope::VectorType vt = vector_type(attr.get_usage()); + auto matrix = matrix_view(attr).template leftCols<3>(); if constexpr (IsMesh) { - return ps_struct->addFaceVectorQuantity(name, matrix_view(attr), vt); + return ps_struct->addFaceVectorQuantity(name, matrix, vt); } else if constexpr (IsEdge) { - return ps_struct->addEdgeVectorQuantity(name, matrix_view(attr), vt); + return ps_struct->addEdgeVectorQuantity(name, matrix, vt); } } else if (attr.get_usage() == Usage::Color) { lagrange::logger().info("Registering color facet attribute: {}", name); + auto matrix = as_color_matrix(attr).template leftCols<3>(); if constexpr (IsMesh) { - return ps_struct->addFaceColorQuantity(name, as_color_matrix(attr)); + return ps_struct->addFaceColorQuantity(name, matrix); } else if constexpr (IsEdge) { - return ps_struct->addEdgeColorQuantity(name, as_color_matrix(attr)); + return ps_struct->addEdgeColorQuantity(name, matrix); } } } else if (attr.get_num_channels() == 2) { @@ -189,7 +201,7 @@ auto register_attribute( if constexpr (IsMesh) { if (attr.get_usage() == Usage::Scalar) { lagrange::logger().info("Registering scalar edge attribute: {}", name); - return ps_struct->addEdgeScalarQuantity(name, vector_view(attr)); + return ps_struct->addEdgeScalarQuantity(name, vector_view(attr), scalar_data_type); } } break; @@ -220,7 +232,13 @@ void register_attributes(PolyscopeStructure* ps_struct, const SurfaceMesh #include +#include #include #include @@ -56,16 +57,16 @@ ::polyscope::CurveNetworkQuantity* register_attribute( return register_attribute(&ps_curve_network, name, attr); } -#define LA_X_register_edge_network(_, Scalar, Index) \ - template ::polyscope::CurveNetwork* register_edge_network( \ - std::string_view name, \ +#define LA_X_register_edge_network(_, Scalar, Index) \ + template LA_POLYSCOPE_API ::polyscope::CurveNetwork* register_edge_network( \ + std::string_view name, \ const SurfaceMesh& mesh); LA_SURFACE_MESH_X(register_edge_network, 0) -#define LA_X_register_attribute(_, ValueType) \ - template ::polyscope::CurveNetworkQuantity* register_attribute( \ - ::polyscope::CurveNetwork & ps_curve_network, \ - std::string_view name, \ +#define LA_X_register_attribute(_, ValueType) \ + template LA_POLYSCOPE_API ::polyscope::CurveNetworkQuantity* register_attribute( \ + ::polyscope::CurveNetwork & ps_curve_network, \ + std::string_view name, \ const lagrange::Attribute& attr); LA_ATTRIBUTE_X(register_attribute, 0) diff --git a/modules/polyscope/src/register_mesh.cpp b/modules/polyscope/src/register_mesh.cpp index c6004e5f..5e818e1c 100644 --- a/modules/polyscope/src/register_mesh.cpp +++ b/modules/polyscope/src/register_mesh.cpp @@ -15,6 +15,7 @@ #include #include +#include #include #include @@ -105,16 +106,16 @@ ::polyscope::SurfaceMeshQuantity* register_attribute( return register_attribute(&ps_mesh, name, attr); } -#define LA_X_register_mesh(_, Scalar, Index) \ - template ::polyscope::SurfaceMesh* register_mesh( \ - std::string_view name, \ +#define LA_X_register_mesh(_, Scalar, Index) \ + template LA_POLYSCOPE_API ::polyscope::SurfaceMesh* register_mesh( \ + std::string_view name, \ const SurfaceMesh& mesh); LA_SURFACE_MESH_X(register_mesh, 0) -#define LA_X_register_attribute(_, ValueType) \ - template ::polyscope::SurfaceMeshQuantity* register_attribute( \ - ::polyscope::SurfaceMesh & ps_mesh, \ - std::string_view name, \ +#define LA_X_register_attribute(_, ValueType) \ + template LA_POLYSCOPE_API ::polyscope::SurfaceMeshQuantity* register_attribute( \ + ::polyscope::SurfaceMesh & ps_mesh, \ + std::string_view name, \ const lagrange::Attribute& attr); LA_ATTRIBUTE_X(register_attribute, 0) diff --git a/modules/polyscope/src/register_point_cloud.cpp b/modules/polyscope/src/register_point_cloud.cpp index 6d5af763..dee890de 100644 --- a/modules/polyscope/src/register_point_cloud.cpp +++ b/modules/polyscope/src/register_point_cloud.cpp @@ -15,6 +15,7 @@ #include #include +#include #include #include @@ -53,16 +54,16 @@ ::polyscope::PointCloudQuantity* register_attribute( return register_attribute(&ps_point_cloud, name, attr); } -#define LA_X_register_point_cloud(_, Scalar, Index) \ - template ::polyscope::PointCloud* register_point_cloud( \ - std::string_view name, \ +#define LA_X_register_point_cloud(_, Scalar, Index) \ + template LA_POLYSCOPE_API ::polyscope::PointCloud* register_point_cloud( \ + std::string_view name, \ const SurfaceMesh& mesh); LA_SURFACE_MESH_X(register_point_cloud, 0) -#define LA_X_register_attribute(_, ValueType) \ - template ::polyscope::PointCloudQuantity* register_attribute( \ - ::polyscope::PointCloud & ps_point_cloud, \ - std::string_view name, \ +#define LA_X_register_attribute(_, ValueType) \ + template LA_POLYSCOPE_API ::polyscope::PointCloudQuantity* register_attribute( \ + ::polyscope::PointCloud & ps_point_cloud, \ + std::string_view name, \ const lagrange::Attribute& attr); LA_ATTRIBUTE_X(register_attribute, 0) diff --git a/modules/polyscope/src/register_structure.cpp b/modules/polyscope/src/register_structure.cpp index 62535ac9..a3f4ca60 100644 --- a/modules/polyscope/src/register_structure.cpp +++ b/modules/polyscope/src/register_structure.cpp @@ -15,6 +15,7 @@ #include #include +#include #include #include #include @@ -60,16 +61,16 @@ ::polyscope::Quantity* register_attribute( } } -#define LA_X_register_structure(_, Scalar, Index) \ - template ::polyscope::Structure* register_structure( \ - std::string_view name, \ +#define LA_X_register_structure(_, Scalar, Index) \ + template LA_POLYSCOPE_API ::polyscope::Structure* register_structure( \ + std::string_view name, \ const SurfaceMesh& mesh); LA_SURFACE_MESH_X(register_structure, 0) -#define LA_X_register_attribute(_, ValueType) \ - template ::polyscope::Quantity* register_attribute( \ - ::polyscope::Structure & ps_structure, \ - std::string_view name, \ +#define LA_X_register_attribute(_, ValueType) \ + template LA_POLYSCOPE_API ::polyscope::Quantity* register_attribute( \ + ::polyscope::Structure & ps_structure, \ + std::string_view name, \ const lagrange::Attribute& attr); LA_ATTRIBUTE_X(register_attribute, 0) diff --git a/modules/polyscope/tests/CMakeLists.txt b/modules/polyscope/tests/CMakeLists.txt index fec2a425..0caf2ba6 100644 --- a/modules/polyscope/tests/CMakeLists.txt +++ b/modules/polyscope/tests/CMakeLists.txt @@ -9,6 +9,13 @@ # OF ANY KIND, either express or implied. See the License for the specific language # governing permissions and limitations under the License. # + +if(BUILD_SHARED_LIBS AND WIN32) + # See https://github.com/nmwsharp/polyscope/issues/330 + message(WARNING "Skipping polyscope unit tests in shared lib builds on Windows.") + return() +endif() + lagrange_add_test() option(LAGRANGE_POLYSCOPE_MOCK_BACKEND "Use Polyscope's mock backend for unit testing" OFF) diff --git a/modules/polyscope/tests/test_polyscope.cpp b/modules/polyscope/tests/test_polyscope.cpp index f095e9c9..aef2f21c 100644 --- a/modules/polyscope/tests/test_polyscope.cpp +++ b/modules/polyscope/tests/test_polyscope.cpp @@ -219,3 +219,49 @@ TEST_CASE("register_edges_2d", "[polyscope]") auto ps_struct = lagrange::polyscope::register_structure("edge_network_struct", uv_mesh); REQUIRE(dynamic_cast(ps_struct) != nullptr); } + +TEST_CASE("register_4channel_attributes", "[polyscope]") +{ + using Scalar = double; + using Index = uint32_t; + + polyscope::init(g_backend); + auto mesh = lagrange::testing::load_surface_mesh("open/core/simple/cube.obj"); + + // Add 4-channel vertex color attribute (RGBA) + auto vertex_color_id = mesh.template create_attribute( + "vertex_color", + lagrange::AttributeElement::Vertex, + lagrange::AttributeUsage::Color, + 4); + auto& vertex_color = mesh.template ref_attribute(vertex_color_id); + + // Add 4-channel vertex vector attribute + auto vertex_vec_id = mesh.template create_attribute( + "vertex_vec", + lagrange::AttributeElement::Vertex, + lagrange::AttributeUsage::Vector, + 4); + auto& vertex_vec = mesh.template ref_attribute(vertex_vec_id); + + // Add 4-channel facet color attribute (RGBA) + auto facet_color_id = mesh.template create_attribute( + "facet_color", + lagrange::AttributeElement::Facet, + lagrange::AttributeUsage::Color, + 4); + auto& facet_color = mesh.template ref_attribute(facet_color_id); + + // Register mesh and attributes + auto ps_mesh = lagrange::polyscope::register_mesh("mesh_4ch", mesh); + REQUIRE(ps_mesh != nullptr); + + auto attr1 = lagrange::polyscope::register_attribute(*ps_mesh, "vertex_color", vertex_color); + REQUIRE(attr1 != nullptr); + + auto attr2 = lagrange::polyscope::register_attribute(*ps_mesh, "vertex_vec", vertex_vec); + REQUIRE(attr2 != nullptr); + + auto attr3 = lagrange::polyscope::register_attribute(*ps_mesh, "facet_color", facet_color); + REQUIRE(attr3 != nullptr); +} diff --git a/modules/python/CMakeLists.txt b/modules/python/CMakeLists.txt index cc1dc675..f4a66423 100644 --- a/modules/python/CMakeLists.txt +++ b/modules/python/CMakeLists.txt @@ -96,8 +96,13 @@ function(lagrange_generate_binding_file) set(bind_lines "") get_target_property(active_modules lagrange_python LAGRANGE_ACTIVE_MODULES) foreach(module_name IN ITEMS ${active_modules}) + # Check for a Python name override (e.g. expose "serialization2" as "serialization") + get_target_property(python_name lagrange_python LAGRANGE_PYTHON_NAME_${module_name}) + if(NOT python_name) + set(python_name ${module_name}) + endif() list(APPEND include_lines "#include ") - list(APPEND bind_lines "nb::module_ m_${module_name} = m.def_submodule(\"${module_name}\", \"${module_name} module\")\\\;") + list(APPEND bind_lines "nb::module_ m_${module_name} = m.def_submodule(\"${python_name}\", \"${python_name} module\")\\\;") list(APPEND bind_lines "lagrange::python::populate_${module_name}_module(m_${module_name})\\\;") endforeach() @@ -132,7 +137,12 @@ function(lagrange_generate_init_file) set(init_lines "") get_target_property(active_modules lagrange_python LAGRANGE_ACTIVE_MODULES) foreach(module_name IN ITEMS ${active_modules}) - list(APPEND init_lines "from .lagrange import ${module_name}") + # Check for a Python name override + get_target_property(python_name lagrange_python LAGRANGE_PYTHON_NAME_${module_name}) + if(NOT python_name) + set(python_name ${module_name}) + endif() + list(APPEND init_lines "from .lagrange import ${python_name}") endforeach() # Determine Python package variant @@ -142,11 +152,20 @@ function(lagrange_generate_init_file) set(python_variant "corp") endif() - # Build the import lines + # Build the import lines and sys.modules alias lines set(import_lines "") + set(sysmod_lines "") foreach(line IN ITEMS ${init_lines}) string(APPEND import_lines "${line}\n") endforeach() + foreach(module_name IN ITEMS ${active_modules}) + # Check for a Python name override + get_target_property(python_name lagrange_python LAGRANGE_PYTHON_NAME_${module_name}) + if(NOT python_name) + set(python_name ${module_name}) + endif() + string(APPEND sysmod_lines "sys.modules[__name__ + '.${python_name}'] = ${python_name}\n") + endforeach() # Generate __init__.py to import all modules using file(GENERATE) set(init_file ${SKBUILD_PLATLIB_DIR}/lagrange/__init__.py) @@ -162,6 +181,8 @@ function(lagrange_generate_init_file) # OF ANY KIND, either express or implied. See the License for the specific language # governing permissions and limitations under the License. # +import sys + from .lagrange.core import * from ._logging import logger from ._version import * @@ -173,7 +194,10 @@ del _logging, lagrange # type: ignore variant: str = \"${python_variant}\" # Import all modules. -${import_lines}") +${import_lines} +# Register submodules in sys.modules so that 'import lagrange.X' and +# 'from lagrange.X import Y' work correctly. +${sysmod_lines}") endfunction() function(lagrange_generate_python_binding_module) diff --git a/modules/raycasting/examples/picking_demo.cpp b/modules/raycasting/examples/picking_demo.cpp index 5f49192d..58420e03 100644 --- a/modules/raycasting/examples/picking_demo.cpp +++ b/modules/raycasting/examples/picking_demo.cpp @@ -14,6 +14,7 @@ #include #include #include +#include #include #include #include @@ -296,10 +297,7 @@ int main(int argc, char** argv) lagrange::logger().info("Acceleration structure built"); // Compute scene extent for normal length - Eigen::AlignedBox3f bbox; - for (const auto& p : lagrange::vertex_view(state.mesh).rowwise()) { - bbox.extend(p.transpose()); - } + auto bbox = lagrange::mesh_bbox<3>(state.mesh); state.normal_length = 0.1f * bbox.diagonal().norm(); // Set up user callback diff --git a/modules/raycasting/include/lagrange/raycasting/RayCaster.h b/modules/raycasting/include/lagrange/raycasting/RayCaster.h index deac2cd8..b1bcbf59 100644 --- a/modules/raycasting/include/lagrange/raycasting/RayCaster.h +++ b/modules/raycasting/include/lagrange/raycasting/RayCaster.h @@ -768,8 +768,10 @@ class RayCaster /// @} private: + /// @cond LA_INTERNAL_DOCS struct Impl; value_ptr m_impl; + /// @endcond }; } // namespace lagrange::raycasting diff --git a/modules/raycasting/include/lagrange/raycasting/compute_local_feature_size.h b/modules/raycasting/include/lagrange/raycasting/compute_local_feature_size.h new file mode 100644 index 00000000..19333468 --- /dev/null +++ b/modules/raycasting/include/lagrange/raycasting/compute_local_feature_size.h @@ -0,0 +1,115 @@ +/* + * Copyright 2026 Adobe. All rights reserved. + * This file is licensed to you under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under + * the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS + * OF ANY KIND, either express or implied. See the License for the specific language + * governing permissions and limitations under the License. + */ +#pragma once + +#include +#include + +#include +#include + +namespace lagrange::raycasting { + +class RayCaster; + +/// +/// @addtogroup group-raycasting +/// @{ +/// + +/// +/// Ray direction mode for local feature size computation. +/// +enum class RayDirectionMode { + /// Cast rays into the interior of the shape (hemisphere bounded by 1-ring normals, pointing + /// inward). + Interior, + + /// Cast rays to the exterior of the shape (hemisphere bounded by 1-ring normals, pointing + /// outward). + Exterior, + + /// Cast rays in all directions (full sphere). + Both, +}; + +/// +/// Options for compute_local_feature_size(). +/// +struct LocalFeatureSizeOptions +{ + /// Output attribute name for local feature size values. + std::string_view output_attribute_name = "@lfs"; + + /// Input vertex normal attribute name. If empty, vertex normals will be computed internally. + std::string_view vertex_normal_attribute_name = ""; + + /// Ray direction mode (interior, exterior, or both). + RayDirectionMode direction_mode = RayDirectionMode::Interior; + + /// Ray offset along the vertex normal to avoid self-intersection (relative to bounding box + /// diagonal). The actual offset distance is `ray_offset * bbox_diagonal`. + /// Set to 0 to disable offset (not recommended). + float ray_offset = 1e-4f; + + /// Default local feature size value used when raycasting fails to find valid hits. + /// Default: infinity. + float default_lfs = std::numeric_limits::infinity(); + + /// Error tolerance for medial axis binary search convergence (relative to bounding box diagonal). + /// The binary search stops when |distance_to_surface - depth_along_ray| < tolerance * bbox_diagonal. + /// Smaller values produce more accurate results but require more iterations. + /// Default: 1e-4 (0.01% of bounding box diagonal). + float medial_axis_tolerance = 1e-4f; +}; + +/// +/// Compute local feature size for each vertex of a mesh using medial axis approximation. +/// +/// For each vertex, this function: +/// 1. Casts a single ray along the normal direction (inward for Interior, outward for Exterior, +/// both directions for Both mode) to find the opposite surface. +/// 2. Performs binary search along the ray to find the medial axis point - approximated as the +/// largest depth from where the closest point on surface is within 1-ring of the vertex. +/// 3. Returns this distance as the local feature size. +/// +/// @note This method works best for meshes with uniformly sized triangles. +/// +/// @note For `Interior` mode, rays are cast inward (negative normal direction). For +/// `Exterior` mode, rays are cast outward (positive normal direction). For `Both` +/// mode, rays are cast in both directions and the minimum LFS is taken. +/// +/// @note If raycasting fails to find a hit, or if the binary search fails to converge, +/// the `default_lfs` value is used as a fallback. +/// +/// @param[in,out] mesh Mesh to process (must be a triangle mesh). The mesh is modified to +/// add the local feature size attribute. +/// @param[in] options Options for local feature size computation. +/// @param[in] ray_caster If provided, use this ray caster to perform the queries. The mesh +/// must have been added to the ray caster in advance, and the scene +/// must have been committed. If nullptr, a temporary ray caster will +/// be created internally. +/// +/// @tparam Scalar Mesh scalar type. +/// @tparam Index Mesh index type. +/// +/// @return The attribute id of the local feature size attribute. +/// +template +LA_RAYCASTING_API AttributeId compute_local_feature_size( + SurfaceMesh& mesh, + const LocalFeatureSizeOptions& options = {}, + const RayCaster* ray_caster = nullptr); + +/// @} + +} // namespace lagrange::raycasting diff --git a/modules/raycasting/python/src/raycasting.cpp b/modules/raycasting/python/src/raycasting.cpp index b8d9dac5..ec941b37 100644 --- a/modules/raycasting/python/src/raycasting.cpp +++ b/modules/raycasting/python/src/raycasting.cpp @@ -16,6 +16,7 @@ #include #include #include +#include #include #include #include @@ -666,6 +667,68 @@ source mesh, vertex positions are set from the hit. :param ray_caster: Optional pre-built :class:`RayCaster` for caching. :return: (N, 3) NumPy array of projected positions (float64).)"); + + // ========================================================================= + // compute_local_feature_size + // ========================================================================= + + m.def( + "compute_local_feature_size", + [](MeshType& mesh, + std::string_view output_attribute_name, + std::string_view direction_mode, + float ray_offset, + float default_lfs, + float medial_axis_tolerance, + const raycasting::RayCaster* ray_caster) -> AttributeId { + raycasting::LocalFeatureSizeOptions opts; + opts.output_attribute_name = output_attribute_name; + + if (direction_mode == "interior") { + opts.direction_mode = raycasting::RayDirectionMode::Interior; + } else if (direction_mode == "exterior") { + opts.direction_mode = raycasting::RayDirectionMode::Exterior; + } else if (direction_mode == "both") { + opts.direction_mode = raycasting::RayDirectionMode::Both; + } else { + throw std::runtime_error( + "Invalid direction_mode. Use 'interior', 'exterior', or 'both'."); + } + + opts.ray_offset = ray_offset; + opts.default_lfs = default_lfs; + opts.medial_axis_tolerance = medial_axis_tolerance; + return raycasting::compute_local_feature_size(mesh, opts, ray_caster); + }, + "mesh"_a, + nb::kw_only(), + "output_attribute_name"_a = "@lfs", + "direction_mode"_a = "interior", + "ray_offset"_a = 1e-4f, + "default_lfs"_a = std::numeric_limits::infinity(), + "medial_axis_tolerance"_a = 1e-4f, + "ray_caster"_a = nullptr, + R"(Compute local feature size for each vertex using medial axis approximation. + +The local feature size is stored as a per-vertex attribute on the mesh. + +:param mesh: Triangle mesh (modified in place to add the LFS attribute). +:param output_attribute_name: Name of the output LFS attribute (default: ``"@lfs"``). +:param direction_mode: Ray direction mode -- ``"interior"``, ``"exterior"``, or + ``"both"`` (default: ``"interior"``). +:param ray_offset: Ray offset along the vertex normal to avoid self-intersection + (relative to bounding box diagonal). The actual offset distance + is ``ray_offset * bbox_diagonal`` (default: 1e-4). +:param default_lfs: Default local feature size value used when raycasting fails + to find valid hits (default: infinity). +:param medial_axis_tolerance: Error tolerance for medial axis binary search convergence + (relative to bounding box diagonal). The binary search stops + when ``|distance_to_surface - depth_along_ray| < tolerance * + bbox_diagonal``. Smaller values produce more accurate results + but require more iterations (default: 1e-4). +:param ray_caster: Optional pre-built :class:`RayCaster` for caching. +:return: Attribute id of the newly added LFS attribute. +:rtype: int)"); } } // namespace lagrange::python diff --git a/modules/raycasting/python/tests/test_raycasting.py b/modules/raycasting/python/tests/test_raycasting.py index 6fac3952..f96539eb 100644 --- a/modules/raycasting/python/tests/test_raycasting.py +++ b/modules/raycasting/python/tests/test_raycasting.py @@ -491,3 +491,142 @@ def test_raycasting_mode(self, unit_cube): ) assert isinstance(result, np.ndarray) np.testing.assert_allclose(result[0, 2], 1.0, atol=1e-5) + + +# --------------------------------------------------------------------------- +# compute_local_feature_size +# --------------------------------------------------------------------------- + + +class TestComputeLocalFeatureSize: + def test_default_parameters(self, unit_cube): + """Test compute_local_feature_size with default parameters.""" + attr_id = lagrange.raycasting.compute_local_feature_size(unit_cube) + assert unit_cube.has_attribute("@lfs") + lfs_data = unit_cube.attribute(attr_id).data + assert lfs_data.shape == (unit_cube.num_vertices,) + # All LFS values should be finite for a closed cube + assert np.all(np.isfinite(lfs_data)) + + def test_interior_mode(self, unit_cube): + """Test with interior ray casting mode.""" + # Test with string + attr_id = lagrange.raycasting.compute_local_feature_size( + unit_cube, + output_attribute_name="@lfs_interior", + direction_mode="interior", + ) + assert unit_cube.has_attribute("@lfs_interior") + lfs_data = unit_cube.attribute(attr_id).data + assert np.all(np.isfinite(lfs_data)) + assert np.all(lfs_data > 0) + + # Test with enum + attr_id_enum = lagrange.raycasting.compute_local_feature_size( + unit_cube, + output_attribute_name="@lfs_interior_enum", + direction_mode="interior", + ) + assert unit_cube.has_attribute("@lfs_interior_enum") + lfs_data_enum = unit_cube.attribute(attr_id_enum).data + assert np.all(np.isfinite(lfs_data_enum)) + + def test_exterior_mode(self, unit_cube): + """Test with exterior ray casting mode using string.""" + lagrange.raycasting.compute_local_feature_size( + unit_cube, + output_attribute_name="@lfs_exterior", + direction_mode="exterior", + ) + assert unit_cube.has_attribute("@lfs_exterior") + # Note: exterior rays may not always hit for a finite mesh + # so we just check that the attribute was created + + def test_both_mode(self, unit_cube): + """Test with both directions ray casting mode using string.""" + attr_id = lagrange.raycasting.compute_local_feature_size( + unit_cube, + output_attribute_name="@lfs_both", + direction_mode="both", + ) + assert unit_cube.has_attribute("@lfs_both") + lfs_data = unit_cube.attribute(attr_id).data + assert np.all(np.isfinite(lfs_data)) + + def test_with_cached_raycaster(self, unit_cube): + """Test using a pre-built RayCaster.""" + rc = lagrange.raycasting.RayCaster() + rc.add_mesh(unit_cube) + rc.commit_updates() + + attr_id = lagrange.raycasting.compute_local_feature_size( + unit_cube, + output_attribute_name="@lfs_cached", + direction_mode="interior", + ray_caster=rc, + ) + assert unit_cube.has_attribute("@lfs_cached") + lfs_data = unit_cube.attribute(attr_id).data + assert np.all(np.isfinite(lfs_data)) + + def test_medial_axis_tolerance(self, unit_cube): + """Test the medial axis tolerance parameter.""" + # With loose tolerance + attr_id_loose = lagrange.raycasting.compute_local_feature_size( + unit_cube, + output_attribute_name="@lfs_loose", + direction_mode="interior", + medial_axis_tolerance=1e-3, + ) + assert unit_cube.has_attribute("@lfs_loose") + lfs_loose = unit_cube.attribute(attr_id_loose).data + assert np.all(np.isfinite(lfs_loose)) + + # With tight tolerance + attr_id_tight = lagrange.raycasting.compute_local_feature_size( + unit_cube, + output_attribute_name="@lfs_tight", + direction_mode="interior", + medial_axis_tolerance=1e-5, + ) + assert unit_cube.has_attribute("@lfs_tight") + lfs_tight = unit_cube.attribute(attr_id_tight).data + assert np.all(np.isfinite(lfs_tight)) + + # Both should produce valid results + # (values may differ slightly due to different convergence) + + def test_default_lfs(self, unit_cube): + """Test the default_lfs parameter as fallback value.""" + attr_id = lagrange.raycasting.compute_local_feature_size( + unit_cube, + output_attribute_name="@lfs_default", + default_lfs=999.0, + ) + assert unit_cube.has_attribute("@lfs_default") + lfs_data = unit_cube.attribute(attr_id).data + # For a closed cube with interior mode, all rays should hit + # so we should get valid computed values (not the default) + assert np.all(lfs_data < 999.0) + assert lfs_data.shape == (unit_cube.num_vertices,) + + def test_invalid_direction_mode(self, unit_cube): + """Test that invalid direction mode string raises an error.""" + with pytest.raises(RuntimeError, match="Invalid direction_mode"): + lagrange.raycasting.compute_local_feature_size( + unit_cube, + direction_mode="invalid_mode", + ) + + def test_keyword_only_arguments(self, unit_cube): + """Test that optional arguments are keyword-only.""" + # This should work + lagrange.raycasting.compute_local_feature_size( + unit_cube, + medial_axis_tolerance=1e-4, + ) + assert unit_cube.has_attribute("@lfs") + + # This should fail because we're passing positional args after mesh + with pytest.raises(TypeError): + lagrange.raycasting.compute_local_feature_size(unit_cube, 1e-4) diff --git a/modules/raycasting/raycasting.md b/modules/raycasting/raycasting.md index 0dd9b458..41a64c43 100644 --- a/modules/raycasting/raycasting.md +++ b/modules/raycasting/raycasting.md @@ -16,3 +16,4 @@ Raycasting Module - [project_attributes_closest_point](@ref lagrange::raycasting::project_attributes_closest_point) - [project_attributes_directional](@ref lagrange::raycasting::project_attributes_directional) - [project_attributes_closest_vertex](@ref lagrange::bvh::project_attributes_closest_vertex) +- [compute_local_feature_size](@ref lagrange::raycasting::compute_local_feature_size) diff --git a/modules/raycasting/src/RayCaster.cpp b/modules/raycasting/src/RayCaster.cpp index cda2d67d..1ef0f88f 100644 --- a/modules/raycasting/src/RayCaster.cpp +++ b/modules/raycasting/src/RayCaster.cpp @@ -159,8 +159,8 @@ constexpr RTCBuildQuality to_embree_build_quality(BuildQuality quality) } } -/// Convert a bool-typed Eigen mask to an int32_t array for Embree. -/// Embree expects 0 (inactive) / -1 (active, i.e. 0xFFFFFFFF) as int32_t. +// Convert a bool-typed Eigen mask to an int32_t array for Embree. +// Embree expects 0 (inactive) / -1 (active, i.e. 0xFFFFFFFF) as int32_t. template void mask_to_embree(const Eigen::Matrix& mask, std::array& out) { @@ -169,7 +169,7 @@ void mask_to_embree(const Eigen::Matrix& mask, std::array& o } } -/// Build an Embree int32_t mask from a count: the first `count` lanes are active. +// Build an Embree int32_t mask from a count: the first `count` lanes are active. template void count_to_embree_mask(size_t count, std::array& out) { @@ -178,7 +178,7 @@ void count_to_embree_mask(size_t count, std::array& out) } } -/// Resolve a std::variant into an Embree-ready int32_t mask. +// Resolve a std::variant into an Embree-ready int32_t mask. template void resolve_active_mask( const std::variant, size_t>& active, @@ -191,15 +191,15 @@ void resolve_active_mask( } } -/// Mapping from Embree instance geometry ID to user-facing mesh/instance indices. +// Mapping from Embree instance geometry ID to user-facing mesh/instance indices. struct InstanceIndices { uint32_t mesh_index; uint32_t instance_index; }; -/// User data passed to the Embree point query callback for closest-point queries on an instanced -/// scene. Holds a pointer to the scene so that the callback can retrieve triangle vertices. +// User data passed to the Embree point query callback for closest-point queries on an instanced +// scene. Holds a pointer to the scene so that the callback can retrieve triangle vertices. struct ClosestPointUserData { const SimpleScene32f* scene = nullptr; @@ -342,6 +342,7 @@ bool embree_closest_point_callback(RTCPointQueryFunctionArguments* args) // Impl // ============================================================================ +/// @cond LA_INTERNAL_DOCS struct RayCasterImpl { RTCSceneFlags m_scene_flags = to_embree_scene_flags(SceneFlags::None); @@ -446,7 +447,7 @@ struct RayCasterImpl RTC_BUFFER_TYPE_VERTEX, 0, RTC_FORMAT_FLOAT3, - mesh.get_vertex_to_position().get_all_with_padding().data(), + lagrange::internal::get_all_unpoisoned(mesh.get_vertex_to_position()).data(), 0, sizeof(float) * 3, mesh.get_num_vertices()); @@ -463,7 +464,7 @@ struct RayCasterImpl RTC_BUFFER_TYPE_INDEX, 0, RTC_FORMAT_UINT3, - mesh.get_corner_to_vertex().get_all_with_padding().data(), + lagrange::internal::get_all_unpoisoned(mesh.get_corner_to_vertex()).data(), 0, sizeof(uint32_t) * 3, mesh.get_num_facets()); @@ -572,16 +573,17 @@ struct RayCasterImpl inst_idx = m_instance_indices[rtc_inst_id].instance_index; } }; +/// @endcond // ============================================================================ // Embree filter callbacks // ============================================================================ -/// Whether the filter is for intersection or occlusion queries. +// Whether the filter is for intersection or occlusion queries. enum class FilterKind { Intersection, Occlusion }; -/// Context struct that extends RTCRayQueryContext (Embree 4) / RTCIntersectContext (Embree 3) with a -/// pointer back to the RayCasterImpl so that the filter callback can look up user filter functions. +// Context struct that extends RTCRayQueryContext (Embree 4) / RTCIntersectContext (Embree 3) with a +// pointer back to the RayCasterImpl so that the filter callback can look up user filter functions. struct FilterContext { #ifdef LAGRANGE_WITH_EMBREE_3 @@ -593,8 +595,8 @@ struct FilterContext FilterKind kind = FilterKind::Intersection; }; -/// Embree filter callback invoked for each potential hit. Looks up the user-defined filter function -/// on the mesh that was hit and rejects the hit (sets valid[i] = 0) if the filter returns false. +// Embree filter callback invoked for each potential hit. Looks up the user-defined filter function +// on the mesh that was hit and rejects the hit (sets valid[i] = 0) if the filter returns false. void embree_filter_callback(const RTCFilterFunctionNArguments* args) { // Recover our extended context from the context pointer. @@ -635,7 +637,7 @@ void embree_filter_callback(const RTCFilterFunctionNArguments* args) } } -/// Returns true if any mesh in the scene has an intersection filter set. +// Returns true if any mesh in the scene has an intersection filter set. bool has_any_intersection_filter(const RayCasterImpl& impl) { for (const auto& m : impl.m_meshes) { @@ -644,7 +646,7 @@ bool has_any_intersection_filter(const RayCasterImpl& impl) return false; } -/// Returns true if any mesh in the scene has an occlusion filter set. +// Returns true if any mesh in the scene has an occlusion filter set. bool has_any_occlusion_filter(const RayCasterImpl& impl) { for (const auto& m : impl.m_meshes) { @@ -653,7 +655,7 @@ bool has_any_occlusion_filter(const RayCasterImpl& impl) return false; } -/// Initialize a FilterContext for intersection queries. +// Initialize a FilterContext for intersection queries. void init_intersection_filter_context(FilterContext& fctx, const RayCasterImpl& impl) { #ifdef LAGRANGE_WITH_EMBREE_3 @@ -666,7 +668,7 @@ void init_intersection_filter_context(FilterContext& fctx, const RayCasterImpl& fctx.kind = FilterKind::Intersection; } -/// Initialize a FilterContext for occlusion queries. +// Initialize a FilterContext for occlusion queries. void init_occlusion_filter_context(FilterContext& fctx, const RayCasterImpl& impl) { #ifdef LAGRANGE_WITH_EMBREE_3 diff --git a/modules/raycasting/src/closest_vertex_from_barycentric.h b/modules/raycasting/src/closest_vertex_from_barycentric.h new file mode 100644 index 00000000..2dfd6100 --- /dev/null +++ b/modules/raycasting/src/closest_vertex_from_barycentric.h @@ -0,0 +1,31 @@ +/* + * Copyright 2026 Adobe. All rights reserved. + * This file is licensed to you under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under + * the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS + * OF ANY KIND, either express or implied. See the License for the specific language + * governing permissions and limitations under the License. + */ +#pragma once + +namespace lagrange::raycasting { + +/// Determine which vertex of a triangle the barycentric coordinates are closest to. +/// Barycentric coordinates (u, v) represent: p = (1-u-v)*v0 + u*v1 + v*v2. +/// Returns 0, 1, or 2 for the three vertices of the triangle. +inline int closest_vertex_from_barycentric(float u, float v) +{ + float w = 1.0f - u - v; + if (w >= u && w >= v) { + return 0; + } else if (u >= v) { + return 1; + } else { + return 2; + } +} + +} // namespace lagrange::raycasting diff --git a/modules/raycasting/src/compute_local_feature_size.cpp b/modules/raycasting/src/compute_local_feature_size.cpp new file mode 100644 index 00000000..a0848f09 --- /dev/null +++ b/modules/raycasting/src/compute_local_feature_size.cpp @@ -0,0 +1,269 @@ +/* + * Copyright 2026 Adobe. All rights reserved. + * This file is licensed to you under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under + * the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS + * OF ANY KIND, either express or implied. See the License for the specific language + * governing permissions and limitations under the License. + */ + +#include + +#include "closest_vertex_from_barycentric.h" +#include "prepare_ray_caster.h" + +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include +#include + +// clang-format off +#include +#include +#include +#include +// clang-format on + +#include +#include +#include +#include + +namespace lagrange::raycasting { + +namespace { + +// Maximum number of vertices in a one-ring neighborhood (stack-allocated). +constexpr size_t kMaxOneRingSize = 64; + +// Collect the one-ring vertex neighborhood of a vertex into a StackSet. +template +StackSet collect_one_ring( + const SurfaceMesh& mesh, + Index vertex_index) +{ + StackSet one_ring; + one_ring.insert(vertex_index); + mesh.foreach_edge_around_vertex_with_duplicates(vertex_index, [&](Index edge_id) { + auto v0 = mesh.get_edge_vertices(edge_id); + Index neighbor = (v0[0] == vertex_index) ? v0[1] : v0[0]; + one_ring.insert(neighbor); + }); + return one_ring; +} + +// Binary search for the medial axis point along a ray direction. +// Returns the maximum distance along the ray where the closest point on the surface +// still corresponds to the same vertex (vertex_index). +// If search fails or no valid medial axis point exists, returns max_depth. +template +float find_medial_axis_distance( + const RayCaster* ray_caster, + const StackSet& one_ring, + const FacetView& facets, + const Eigen::Vector3f& start_point, + const Eigen::Vector3f& direction, + float max_depth, + float tolerance) +{ + // Binary search in range [0, max_depth] + float min_d = 0.0f; + float max_d = max_depth; + + while (max_d - min_d > tolerance) { + float mid_d = (min_d + max_d) * 0.5f; + + // Query point at this depth + Eigen::Vector3f query_point = start_point + mid_d * direction; + + // Find closest point on surface + auto hit = ray_caster->closest_point(query_point); + la_runtime_assert( + hit.has_value(), + "Ray caster should always return a hit for closed meshes"); + + // Get the closest vertex of the hit triangle + int local_vertex_idx = + closest_vertex_from_barycentric(hit->barycentric_coord[0], hit->barycentric_coord[1]); + + // Get the global vertex index + auto facet = facets.row(static_cast(hit->facet_index)); + Index closest_vertex = facet[local_vertex_idx]; + + // Check if the closest vertex is within one ring of the original vertex + if (one_ring.contains(closest_vertex)) { + min_d = mid_d; + } else { + max_d = mid_d; + } + } + + // Return the maximum depth where we're still closest to the original vertex + return min_d; +} + +} // namespace + +template +AttributeId compute_local_feature_size( + SurfaceMesh& mesh, + const LocalFeatureSizeOptions& options, + const RayCaster* ray_caster) +{ + mesh.initialize_edges(); + la_runtime_assert(mesh.is_triangle_mesh(), "Only triangle meshes are supported"); + la_runtime_assert(mesh.get_dimension() == 3, "Only 3D meshes are supported"); + + const Index num_vertices = mesh.get_num_vertices(); + + // Create output attribute + AttributeId lfs_id = internal::find_or_create_attribute( + mesh, + options.output_attribute_name, + Vertex, + AttributeUsage::Scalar, + 1, + internal::ResetToDefault::Yes); + + if (num_vertices == 0) { + // Nothing to do for empty mesh + logger().warn("Mesh has no vertices. Returning empty local feature size."); + return lfs_id; + } + + auto lfs_values = mesh.template ref_attribute(lfs_id).ref_all(); + + // Initialize with default_lfs (or infinity if no upper bound is set) + std::fill(lfs_values.begin(), lfs_values.end(), static_cast(options.default_lfs)); + + // Build a temporary ray caster if one is not provided + auto engine = prepare_ray_caster(mesh, ray_caster); + if (engine) { + ray_caster = engine.get(); + } + + auto vertices = vertex_view(mesh); + + // Compute bounding box diagonal for scaling tolerance + Eigen::Vector3f bbox_min = vertices.colwise().minCoeff().template cast(); + Eigen::Vector3f bbox_max = vertices.colwise().maxCoeff().template cast(); + float bbox_diagonal = (bbox_max - bbox_min).norm(); + + // Scale relative parameters by bounding box diagonal + float absolute_tolerance = options.medial_axis_tolerance * bbox_diagonal; + float absolute_ray_offset = options.ray_offset * bbox_diagonal; + logger().debug("Absolute tolerance: {}", absolute_tolerance); + + // Precompute vertex normals for all vertices + AttributeId normal_id = invalid(); + bool owns_normal_attribute = false; + if (options.vertex_normal_attribute_name.empty()) { + VertexNormalOptions normal_options; + normal_options.output_attribute_name = "@vertex_normal_lfs_tmp"; + normal_options.weight_type = NormalWeightingType::Angle; + normal_id = compute_vertex_normal(mesh, normal_options); + owns_normal_attribute = true; + } else { + la_runtime_assert( + mesh.has_attribute(options.vertex_normal_attribute_name), + "Specified vertex normal attribute does not exist"); + if (!mesh.template is_attribute_type(options.vertex_normal_attribute_name)) { + logger().info( + "Casting vertex normal attribute {} to Scalar type", + options.vertex_normal_attribute_name); + normal_id = cast_attribute( + mesh, + options.vertex_normal_attribute_name, + "@vertex_normal_lfs_tmp"); + owns_normal_attribute = true; + } else { + normal_id = mesh.get_attribute_id(options.vertex_normal_attribute_name); + } + } + auto vertex_normals = attribute_matrix_view(mesh, normal_id); + auto facets = facet_view(mesh); + + // Process each vertex + tbb::parallel_for(Index(0), num_vertices, [&](Index vi) { + Eigen::Vector3f vertex_pos = vertices.row(vi).template cast().transpose(); + + // Get precomputed vertex normal + Eigen::Vector3f vertex_normal = vertex_normals.row(vi).template cast().transpose(); + + float lfs = options.default_lfs; + + // Precompute one-ring neighborhood for this vertex + auto one_ring = collect_one_ring(mesh, vi); + + // Function to process a single ray direction + auto process_direction = [&](const Eigen::Vector3f& dir, const Eigen::Vector3f& origin) { + // Cast ray to find opposite surface + auto hit = ray_caster->cast(origin + dir * absolute_ray_offset, dir); + + if (hit.has_value() && hit->ray_depth > 0.0f) { + float hit_depth = hit->ray_depth + absolute_ray_offset; + + // Binary search for medial axis point + float medial_distance = find_medial_axis_distance( + ray_caster, + one_ring, + facets, + origin, + dir, + hit_depth, + absolute_tolerance); + + return medial_distance; + } + + // No hit found, return default_lfs + return options.default_lfs; + }; + + if (options.direction_mode == RayDirectionMode::Interior) { + // Shoot inward + Eigen::Vector3f ray_direction = -vertex_normal; + lfs = process_direction(ray_direction, vertex_pos); + } else if (options.direction_mode == RayDirectionMode::Exterior) { + // Shoot outward + Eigen::Vector3f ray_direction = vertex_normal; + lfs = process_direction(ray_direction, vertex_pos); + } else if (options.direction_mode == RayDirectionMode::Both) { + // Shoot in both directions and take minimum + Eigen::Vector3f ray_direction_out = vertex_normal; + Eigen::Vector3f ray_direction_in = -vertex_normal; + float lfs_positive = process_direction(ray_direction_out, vertex_pos); + float lfs_negative = process_direction(ray_direction_in, vertex_pos); + lfs = std::min(lfs_positive, lfs_negative); + } + + // Store result (already clamped by initialization and process_direction fallback) + lfs_values[vi] = static_cast(lfs); + }); + + // Clean up temporary vertex normal attribute + if (owns_normal_attribute) { + mesh.delete_attribute(normal_id); + } + + return lfs_id; +} + +#define LA_X_compute_local_feature_size(_, Scalar, Index) \ + template LA_RAYCASTING_API AttributeId compute_local_feature_size( \ + SurfaceMesh&, \ + const LocalFeatureSizeOptions&, \ + const RayCaster*); +LA_SURFACE_MESH_X(compute_local_feature_size, 0) + +} // namespace lagrange::raycasting diff --git a/modules/raycasting/src/project_closest_vertex.cpp b/modules/raycasting/src/project_closest_vertex.cpp index 6c617cbe..8384f1b8 100644 --- a/modules/raycasting/src/project_closest_vertex.cpp +++ b/modules/raycasting/src/project_closest_vertex.cpp @@ -12,6 +12,7 @@ #include +#include "closest_vertex_from_barycentric.h" #include "prepare_attribute_ids.h" #include "prepare_ray_caster.h" @@ -129,18 +130,10 @@ void project_closest_vertex( // largest barycentric weight. float u = result.barycentric_coords(0, static_cast(b)); float v = result.barycentric_coords(1, static_cast(b)); - float w = 1.0f - u - v; auto face = source_facets.row(result.facet_indices(static_cast(b))); - - Index closest_vi; - if (w >= u && w >= v) { - closest_vi = face[0]; - } else if (u >= v) { - closest_vi = face[1]; - } else { - closest_vi = face[2]; - } + int local_vi = closest_vertex_from_barycentric(u, v); + Index closest_vi = face[local_vi]; // Copy attribute values from the closest source vertex. for (const auto& info : attrs) { diff --git a/modules/raycasting/tests/test_compute_local_feature_size.cpp b/modules/raycasting/tests/test_compute_local_feature_size.cpp new file mode 100644 index 00000000..537265b5 --- /dev/null +++ b/modules/raycasting/tests/test_compute_local_feature_size.cpp @@ -0,0 +1,179 @@ +/* + * Copyright 2026 Adobe. All rights reserved. + * This file is licensed to you under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under + * the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS + * OF ANY KIND, either express or implied. See the License for the specific language + * governing permissions and limitations under the License. + */ + +#include +#include + +#include +#include +#include +#include +#include + +TEST_CASE("compute_local_feature_size", "[raycasting][lfs]") +{ + using namespace lagrange; + using Scalar = float; + using Index = uint32_t; + + SECTION("empty mesh") + { + SurfaceMesh mesh; + + raycasting::LocalFeatureSizeOptions options; + options.output_attribute_name = "@lfs"; + options.direction_mode = raycasting::RayDirectionMode::Interior; + + auto lfs_id = raycasting::compute_local_feature_size(mesh, options); + + REQUIRE(mesh.has_attribute(options.output_attribute_name)); + const auto& lfs_attr = mesh.get_attribute(lfs_id); + REQUIRE(lfs_attr.get_num_elements() == 0); + } + + SECTION("blub") + { + auto mesh = lagrange::testing::load_surface_mesh("open/core/blub/blub.obj"); + + raycasting::LocalFeatureSizeOptions options; + options.output_attribute_name = "@lfs"; + options.direction_mode = raycasting::RayDirectionMode::Interior; + + auto lfs_id = raycasting::compute_local_feature_size(mesh, options); + + REQUIRE(mesh.has_attribute(options.output_attribute_name)); + const auto& lfs_attr = mesh.get_attribute(lfs_id); + REQUIRE(lfs_attr.get_num_elements() == mesh.get_num_vertices()); + + auto lfs_values = lagrange::attribute_vector_view(mesh, lfs_id); + REQUIRE(lfs_values.array().isFinite().all()); + REQUIRE((lfs_values.array() > 0.0f).all()); + } + + SECTION("Bunny mesh") + { + auto mesh = + lagrange::testing::load_surface_mesh("open/core/bunny_simple.obj"); + + raycasting::LocalFeatureSizeOptions options; + options.output_attribute_name = "@lfs"; + options.direction_mode = raycasting::RayDirectionMode::Interior; + + auto lfs_id = raycasting::compute_local_feature_size(mesh, options); + + REQUIRE(mesh.has_attribute(options.output_attribute_name)); + const auto& lfs_attr = mesh.get_attribute(lfs_id); + REQUIRE(lfs_attr.get_num_elements() == mesh.get_num_vertices()); + + auto lfs_values = lagrange::attribute_vector_view(mesh, lfs_id); + REQUIRE(lfs_values.array().isFinite().all()); + REQUIRE((lfs_values.array() > 0.0f).all()); + } + + SECTION("Different direction modes") + { + auto mesh = lagrange::testing::load_surface_mesh( + "open/core/spot/spot_triangulated.obj"); + + SECTION("Interior") + { + raycasting::LocalFeatureSizeOptions options; + options.direction_mode = raycasting::RayDirectionMode::Interior; + options.output_attribute_name = "@lfs_interior"; + + auto lfs_id = raycasting::compute_local_feature_size(mesh, options); + REQUIRE(mesh.has_attribute(options.output_attribute_name)); + + const auto& lfs_attr = mesh.get_attribute(lfs_id); + auto lfs_values = lfs_attr.get_all(); + + bool all_finite = true; + for (auto val : lfs_values) { + if (!std::isfinite(val)) { + all_finite = false; + break; + } + } + // With no filtering, all vertices should have finite values + REQUIRE(all_finite); + } + + SECTION("Exterior") + { + raycasting::LocalFeatureSizeOptions options; + options.direction_mode = raycasting::RayDirectionMode::Exterior; + options.output_attribute_name = "@lfs_exterior"; + + auto lfs_id = raycasting::compute_local_feature_size(mesh, options); + REQUIRE(mesh.has_attribute(options.output_attribute_name)); + + // For Exterior mode on a closed sphere, rays shoot outward into empty space. + // We don't expect finite values since there's nothing outside to hit. + // Just verify the function runs successfully. + const auto& lfs_attr = mesh.get_attribute(lfs_id); + REQUIRE(lfs_attr.get_num_elements() == mesh.get_num_vertices()); + } + + SECTION("Both") + { + raycasting::LocalFeatureSizeOptions options; + options.direction_mode = raycasting::RayDirectionMode::Both; + options.output_attribute_name = "@lfs_both"; + + auto lfs_id = raycasting::compute_local_feature_size(mesh, options); + REQUIRE(mesh.has_attribute(options.output_attribute_name)); + + auto lfs_values = lagrange::attribute_vector_view(mesh, lfs_id); + REQUIRE(lfs_values.array().isFinite().all()); + REQUIRE((lfs_values.array() > 0.0f).all()); + } + } + + SECTION("Medial axis tolerance") + { + auto mesh = lagrange::testing::load_surface_mesh( + "open/core/spot/spot_triangulated.obj"); + + SECTION("Loose tolerance") + { + raycasting::LocalFeatureSizeOptions options; + options.output_attribute_name = "@lfs_loose"; + options.medial_axis_tolerance = 1e-3f; + options.direction_mode = raycasting::RayDirectionMode::Both; + + auto lfs_id = raycasting::compute_local_feature_size(mesh, options); + REQUIRE(mesh.has_attribute(options.output_attribute_name)); + + const auto& lfs_attr = mesh.get_attribute(lfs_id); + REQUIRE(lfs_attr.get_num_elements() == mesh.get_num_vertices()); + + auto lfs_values = lagrange::attribute_vector_view(mesh, lfs_id); + REQUIRE(lfs_values.array().isFinite().all()); + REQUIRE((lfs_values.array() > 0.0f).all()); + } + + SECTION("Tight tolerance") + { + raycasting::LocalFeatureSizeOptions options; + options.output_attribute_name = "@lfs_tight"; + options.medial_axis_tolerance = 1e-5f; + options.direction_mode = raycasting::RayDirectionMode::Both; + + auto lfs_id = raycasting::compute_local_feature_size(mesh, options); + REQUIRE(mesh.has_attribute(options.output_attribute_name)); + + auto lfs_values = lagrange::attribute_vector_view(mesh, lfs_id); + REQUIRE(lfs_values.array().isFinite().all()); + REQUIRE((lfs_values.array() > 0.0f).all()); + } + } +} diff --git a/modules/remeshing_im/src/remesh.cpp b/modules/remeshing_im/src/remesh.cpp index c60cdb01..edb3ed7c 100644 --- a/modules/remeshing_im/src/remesh.cpp +++ b/modules/remeshing_im/src/remesh.cpp @@ -18,9 +18,9 @@ #include #include #include +#include #include #include -#include // clang-format off #include diff --git a/modules/scene/CMakeLists.txt b/modules/scene/CMakeLists.txt index 5f6d6eaf..72a12e9b 100644 --- a/modules/scene/CMakeLists.txt +++ b/modules/scene/CMakeLists.txt @@ -17,13 +17,14 @@ if(LAGRANGE_TOPLEVEL_PROJECT) set_target_properties(lagrange_scene PROPERTIES COMPILE_WARNING_AS_ERROR ON) endif() -lagrange_include_modules(fs) +lagrange_include_modules(fs image) # 2. dependencies target_link_libraries(lagrange_scene PUBLIC lagrange::core lagrange::fs + lagrange::image ) # 3. unit tests and examples diff --git a/modules/scene/include/lagrange/scene/Scene.h b/modules/scene/include/lagrange/scene/Scene.h index a787fc3f..26bc62ec 100644 --- a/modules/scene/include/lagrange/scene/Scene.h +++ b/modules/scene/include/lagrange/scene/Scene.h @@ -261,8 +261,8 @@ struct LA_SCENE_API Light // inner and outer angle of a spot light's light cone // they are both 2PI for point lights, and undefined for directional lights. - float angle_inner_cone; - float angle_outer_cone; + std::optional angle_inner_cone; + std::optional angle_outer_cone; // size of area light source Eigen::Vector2f size = Eigen::Vector2f::Zero(); diff --git a/modules/scene/include/lagrange/scene/internal/shared_utils.h b/modules/scene/include/lagrange/scene/internal/shared_utils.h new file mode 100644 index 00000000..d00edc61 --- /dev/null +++ b/modules/scene/include/lagrange/scene/internal/shared_utils.h @@ -0,0 +1,213 @@ +/* + * Copyright 2026 Adobe. All rights reserved. + * This file is licensed to you under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under + * the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS + * OF ANY KIND, either express or implied. See the License for the specific language + * governing permissions and limitations under the License. + */ +#pragma once + +#include +#include +#include +#include +#include +#include +#include + +#include + +namespace lagrange::scene::internal { + +using Array3Df = image::experimental::Array3D; +using View3Df = image::experimental::View3D; +using ConstView3Df = image::experimental::View3D; + +// FIXME this strips non-color channel, other variants of this function don't. +inline Array3Df convert_from(const ImageBufferExperimental& image) +{ + size_t nc = std::min(image.num_channels, size_t(3)); + auto result = image::experimental::create_image(image.width, image.height, nc); + + auto copy_buffer = [&](auto scalar) { + using T = std::decay_t; + constexpr bool IsChar = std::is_integral_v && sizeof(T) == 1; + la_runtime_assert(sizeof(T) * 8 == image.get_bits_per_element()); + auto rawbuf = reinterpret_cast(image.data.data()); + for (size_t y = 0, i = 0; y < image.height; ++y) { + for (size_t x = 0; x < image.width; ++x) { + for (size_t c = 0; c < image.num_channels; ++c) { + if (c >= nc) { + ++i; + continue; + } + if constexpr (IsChar) { + result(x, y, c) = static_cast(rawbuf[i++]) / 255.f; + } else { + result(x, y, c) = rawbuf[i++]; + } + } + } + } + }; + + switch (image.element_type) { + case AttributeValueType::e_uint8_t: copy_buffer(uint8_t()); break; + case AttributeValueType::e_int8_t: copy_buffer(int8_t()); break; + case AttributeValueType::e_uint32_t: copy_buffer(uint32_t()); break; + case AttributeValueType::e_int32_t: copy_buffer(int32_t()); break; + case AttributeValueType::e_float: copy_buffer(float()); break; + case AttributeValueType::e_double: copy_buffer(double()); break; + default: throw std::runtime_error("Unsupported image scalar type"); + } + + return result; +} + +// Convert a float Array3D image to an ImageBufferExperimental (uint8, row-major y,x,c order). +// Note: convert_from() truncates to 3 channels, so this is only a true round-trip inverse for +// images with <= 3 channels. +inline ImageBufferExperimental convert_to(const ConstView3Df& image) +{ + ImageBufferExperimental result; + result.width = image.extent(0); + result.height = image.extent(1); + const size_t num_channels = image.extent(2); + la_runtime_assert( + num_channels == 1 || num_channels == 3 || num_channels == 4, + "ImageBufferExperimental requires 1, 3, or 4 channels"); + result.num_channels = num_channels; + result.element_type = AttributeValueType::e_uint8_t; + result.data.resize(result.width * result.height * result.num_channels); + for (size_t y = 0, i = 0; y < result.height; ++y) { + for (size_t x = 0; x < result.width; ++x) { + for (size_t c = 0; c < result.num_channels; ++c) { + result.data[i++] = + static_cast(std::clamp(image(x, y, c), 0.0f, 1.0f) * 255.0f); + } + } + } + return result; +} + +struct SingleMeshToSceneOptions +{ + MaterialExperimental::AlphaMode alpha_mode = MaterialExperimental::AlphaMode::Opaque; + float alpha_cutoff = 0.5f; +}; + +// Create a scene containing a single mesh with a base color texture. +// This is the inverse of single_mesh_from_scene(). +template +Scene single_mesh_to_scene( + SurfaceMesh mesh, + const ConstView3Df& image, + const SingleMeshToSceneOptions& options = {}) +{ + Scene scene; + + auto mesh_id = scene.add(std::move(mesh)); + + ImageExperimental scene_image; + scene_image.name = "base_color"; + scene_image.image = convert_to(image); + auto image_id = scene.add(std::move(scene_image)); + + Texture texture; + texture.name = "base_color"; + texture.image = image_id; + auto texture_id = scene.add(std::move(texture)); + + MaterialExperimental material; + material.name = "material"; + material.alpha_mode = options.alpha_mode; + material.alpha_cutoff = options.alpha_cutoff; + material.base_color_texture.index = texture_id; + material.base_color_texture.texcoord = 0; + auto material_id = scene.add(std::move(material)); + + Node node; + node.name = "mesh"; + SceneMeshInstance instance; + instance.mesh = mesh_id; + instance.materials.push_back(material_id); + node.meshes.push_back(std::move(instance)); + auto node_id = scene.add(std::move(node)); + scene.root_nodes.push_back(node_id); + + return scene; +} + +// Extract a single uv unwrapped mesh and optionally its base color tensor from a scene. +template +std::tuple, std::optional> single_mesh_from_scene( + const Scene& scene) +{ + using ElementId = scene::ElementId; + + // Find mesh nodes in the scene + std::vector mesh_node_ids; + for (ElementId node_id = 0; node_id < scene.nodes.size(); ++node_id) { + const auto& node = scene.nodes[node_id]; + if (!node.meshes.empty()) { + mesh_node_ids.push_back(node_id); + } + } + + if (mesh_node_ids.size() != 1) { + throw std::runtime_error( + fmt::format( + "Input scene contains {} mesh nodes. Expected exactly 1 mesh node.", + mesh_node_ids.size())); + } + const auto& mesh_node = scene.nodes[mesh_node_ids.front()]; + + if (mesh_node.meshes.size() != 1) { + throw std::runtime_error( + fmt::format( + "Input scene has a mesh node with {} instance per node. Expected " + "exactly 1 instance per node", + mesh_node.meshes.size())); + } + const auto& mesh_instance = mesh_node.meshes.front(); + + [[maybe_unused]] const auto mesh_id = mesh_instance.mesh; + la_debug_assert(mesh_id < scene.meshes.size()); + SurfaceMesh mesh = scene.meshes[mesh_instance.mesh]; + { + // Apply node local->world transform + auto world_from_mesh = utils::compute_global_node_transform(scene, mesh_node_ids.front()) + .template cast(); + transform_mesh(mesh, world_from_mesh); + } + + // Find base texture if available + if (auto num_mats = mesh_instance.materials.size(); num_mats != 1) { + logger().warn( + "Mesh node has {} materials. Expected exactly 1 material. Ignoring materials.", + num_mats); + return {mesh, std::nullopt}; + } + const auto& material = scene.materials[mesh_instance.materials.front()]; + if (material.base_color_texture.texcoord != 0) { + logger().warn( + "Mesh node material texcoord is {} != 0. Expected 0. Ignoring texcoord.", + material.base_color_texture.texcoord); + } + const auto texture_id = material.base_color_texture.index; + la_debug_assert(texture_id < scene.textures.size()); + const auto& texture = scene.textures[texture_id]; + + const auto image_id = texture.image; + la_debug_assert(image_id < scene.images.size()); + const auto& image_ = scene.images[image_id].image; + Array3Df image = convert_from(image_); + + return {mesh, image}; +} + +} // namespace lagrange::scene::internal diff --git a/modules/scene/python/tests/test_scene.py b/modules/scene/python/tests/test_scene.py index 1c49c82d..8545edb8 100644 --- a/modules/scene/python/tests/test_scene.py +++ b/modules/scene/python/tests/test_scene.py @@ -14,8 +14,6 @@ import lagrange import numpy as np -from .assets import single_triangle # noqa: F401 - class TestScene: def test_empty_scene(self): diff --git a/modules/scene/python/tests/test_simple_scene.py b/modules/scene/python/tests/test_simple_scene.py index 8370dd51..eb31b3ae 100644 --- a/modules/scene/python/tests/test_simple_scene.py +++ b/modules/scene/python/tests/test_simple_scene.py @@ -12,8 +12,6 @@ import lagrange import numpy as np -from .assets import single_triangle # noqa: F401 - class TestSimpleScene: def test_empty_scene(self): diff --git a/modules/scene/src/internal/scene_string_utils.cpp b/modules/scene/src/internal/scene_string_utils.cpp index e5ae0129..15774353 100644 --- a/modules/scene/src/internal/scene_string_utils.cpp +++ b/modules/scene/src/internal/scene_string_utils.cpp @@ -24,6 +24,20 @@ namespace lagrange::scene::internal { +namespace { + +template +std::string fmt_optional(const std::optional& value) +{ + if (value.has_value()) { + return fmt::format("{}", value.value()); + } else { + return ""; + } +} + +} // namespace + std::string to_string(const std::vector& ids) { return fmt::format("[{}]", fmt::join(ids, ", ")); @@ -327,8 +341,16 @@ std::string to_string(const Light& light, size_t indent) light.color_ambient[0], light.color_ambient[1], light.color_ambient[2]) + - fmt::format("{:{}s}angle_inner_cone: {}\n", "", indent, light.angle_inner_cone) + - fmt::format("{:{}s}angle_outer_cone: {}\n", "", indent, light.angle_outer_cone) + + fmt::format( + "{:{}s}angle_inner_cone: {}\n", + "", + indent, + fmt_optional(light.angle_inner_cone)) + + fmt::format( + "{:{}s}angle_outer_cone: {}\n", + "", + indent, + fmt_optional(light.angle_outer_cone)) + fmt::format("{:{}s}size: [{}, {}]\n", "", indent, light.size[0], light.size[1]); if (!light.extensions.empty()) { r += fmt::format( diff --git a/modules/serialization2/CMakeLists.txt b/modules/serialization2/CMakeLists.txt new file mode 100644 index 00000000..ec1911c3 --- /dev/null +++ b/modules/serialization2/CMakeLists.txt @@ -0,0 +1,38 @@ +# +# Copyright 2026 Adobe. All rights reserved. +# This file is licensed to you under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. You may obtain a copy +# of the License at http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software distributed under +# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS +# OF ANY KIND, either express or implied. See the License for the specific language +# governing permissions and limitations under the License. +# + +# 1. define module +lagrange_add_module() + +# 2. dependencies +include(cista) +lagrange_find_package(zstd CONFIG REQUIRED GLOBAL) +lagrange_include_modules(fs scene) +target_link_libraries(lagrange_serialization2 + PUBLIC + lagrange::core + lagrange::fs + lagrange::scene + PRIVATE + cista::cista + zstd::libzstd +) + +# 3. python binding +if(LAGRANGE_MODULE_PYTHON) + add_subdirectory(python) +endif() + +# 4. unit tests +if(LAGRANGE_UNIT_TESTS) + add_subdirectory(tests) +endif() diff --git a/modules/serialization2/include/lagrange/serialization/api.h b/modules/serialization2/include/lagrange/serialization/api.h new file mode 100644 index 00000000..be48fba1 --- /dev/null +++ b/modules/serialization2/include/lagrange/serialization/api.h @@ -0,0 +1,34 @@ +/* + * Copyright 2026 Adobe. All rights reserved. + * This file is licensed to you under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under + * the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS + * OF ANY KIND, either express or implied. See the License for the specific language + * governing permissions and limitations under the License. + */ +#pragma once + +#ifdef LA_SERIALIZATION2_STATIC_DEFINE + #define LA_SERIALIZATION2_API +#else + #ifndef LA_SERIALIZATION2_API + #ifdef lagrange_serialization2_EXPORTS + // We are building this library + #if defined(_WIN32) || defined(_WIN64) + #define LA_SERIALIZATION2_API __declspec(dllexport) + #else + #define LA_SERIALIZATION2_API __attribute__((visibility("default"))) + #endif + #else + // We are using this library + #if defined(_WIN32) || defined(_WIN64) + #define LA_SERIALIZATION2_API __declspec(dllimport) + #else + #define LA_SERIALIZATION2_API __attribute__((visibility("default"))) + #endif + #endif + #endif +#endif diff --git a/modules/serialization2/include/lagrange/serialization/serialize_mesh.h b/modules/serialization2/include/lagrange/serialization/serialize_mesh.h new file mode 100644 index 00000000..332545ce --- /dev/null +++ b/modules/serialization2/include/lagrange/serialization/serialize_mesh.h @@ -0,0 +1,119 @@ +/* + * Copyright 2026 Adobe. All rights reserved. + * This file is licensed to you under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under + * the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS + * OF ANY KIND, either express or implied. See the License for the specific language + * governing permissions and limitations under the License. + */ +#pragma once + +#include +#include +#include +#include +#include + +#include +#include + +namespace lagrange::serialization { + +/// +/// @defgroup group-serialization2 Serialization +/// @ingroup module-serialization2 Binary serialization of SurfaceMesh, SimpleScene and Scene objects. +/// +/// The suggested file extension for serialized meshes is `.lgm`, and `.lgs` for serialized scenes +/// (both SimpleScene and Scene). +/// +/// @{ + +/// +/// Current mesh serialization format version. +/// +/// @return The current mesh serialization format version. +/// +constexpr uint32_t mesh_format_version() +{ + return 1; +} + +/// +/// Serialize a SurfaceMesh to a byte buffer. +/// +/// @param[in] mesh The mesh to serialize. +/// @param[in] options Serialization options (compression, etc.). +/// +/// @tparam Scalar Mesh scalar type. +/// @tparam Index Mesh index type. +/// +/// @return A byte buffer containing the serialized mesh. +/// +template +LA_SERIALIZATION2_API std::vector serialize_mesh( + const SurfaceMesh& mesh, + const SerializeOptions& options = {}); + +/// +/// Deserialize a SurfaceMesh from a byte buffer. +/// +/// The function auto-detects whether the buffer is compressed. If the buffer contains a SimpleScene +/// or Scene instead of a SurfaceMesh, it can be automatically converted to a mesh when +/// DeserializeOptions::allow_scene_conversion is enabled. If the buffer was serialized with +/// different Scalar/Index types, type casting can be enabled via +/// DeserializeOptions::allow_type_cast. +/// +/// @param[in] buffer A byte buffer containing the serialized data. +/// @param[in] options Deserialization options. +/// +/// @tparam MeshType Mesh type (e.g. SurfaceMesh). +/// +/// @return The deserialized mesh. +/// +template +LA_SERIALIZATION2_API MeshType +deserialize_mesh(span buffer, const DeserializeOptions& options = {}); + +/// +/// Save a SurfaceMesh to a file. +/// +/// The suggested file extension is `.lgm`. +/// +/// @param[in] filename Output file path. +/// @param[in] mesh The mesh to save. +/// @param[in] options Serialization options (compression, etc.). +/// +/// @tparam Scalar Mesh scalar type. +/// @tparam Index Mesh index type. +/// +template +LA_SERIALIZATION2_API void save_mesh( + const fs::path& filename, + const SurfaceMesh& mesh, + const SerializeOptions& options = {}); + +/// +/// Load a SurfaceMesh from a file. +/// +/// Reads the file into memory and calls deserialize_mesh(). The function auto-detects whether the +/// file contents are compressed. +/// +/// @param[in] filename Input file path. +/// @param[in] options Deserialization options. +/// +/// @tparam MeshType Mesh type (e.g. SurfaceMesh). +/// +/// @return The loaded mesh. +/// +/// @see deserialize_mesh() for details on scene conversion and type casting options. +/// +template +LA_SERIALIZATION2_API MeshType +load_mesh(const fs::path& filename, const DeserializeOptions& options = {}); + +/// @} + +} // namespace lagrange::serialization diff --git a/modules/serialization2/include/lagrange/serialization/serialize_scene.h b/modules/serialization2/include/lagrange/serialization/serialize_scene.h new file mode 100644 index 00000000..b552fe31 --- /dev/null +++ b/modules/serialization2/include/lagrange/serialization/serialize_scene.h @@ -0,0 +1,113 @@ +/* + * Copyright 2026 Adobe. All rights reserved. + * This file is licensed to you under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under + * the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS + * OF ANY KIND, either express or implied. See the License for the specific language + * governing permissions and limitations under the License. + */ +#pragma once + +#include +#include +#include +#include +#include + +#include +#include + +namespace lagrange::serialization { + +/// @addtogroup group-serialization2 +/// @{ + +/// +/// Current scene serialization format version. +/// +/// @return The current scene serialization format version. +/// +constexpr uint32_t scene_format_version() +{ + return 1; +} + +/// +/// Serialize a Scene to a byte buffer. +/// +/// @param[in] scene The scene to serialize. +/// @param[in] options Serialization options (compression, etc.). +/// +/// @tparam Scalar Mesh scalar type. +/// @tparam Index Mesh index type. +/// +/// @return A byte buffer containing the serialized scene. +/// +template +LA_SERIALIZATION2_API std::vector serialize_scene( + const scene::Scene& scene, + const SerializeOptions& options = {}); + +/// +/// Deserialize a Scene from a byte buffer. +/// +/// The function auto-detects whether the buffer is compressed. If the buffer contains a SurfaceMesh +/// or SimpleScene instead of a Scene, it can be automatically converted when +/// DeserializeOptions::allow_scene_conversion is enabled. If the buffer was serialized with +/// different Scalar/Index types, type casting can be enabled via +/// DeserializeOptions::allow_type_cast. +/// +/// @param[in] buffer A byte buffer containing the serialized data. +/// @param[in] options Deserialization options. +/// +/// @tparam SceneType Scene type (e.g. scene::Scene). +/// +/// @return The deserialized scene. +/// +template +LA_SERIALIZATION2_API SceneType +deserialize_scene(span buffer, const DeserializeOptions& options = {}); + +/// +/// Save a Scene to a file. +/// +/// The suggested file extension is `.lgs`. +/// +/// @param[in] filename Output file path. +/// @param[in] scene The scene to save. +/// @param[in] options Serialization options (compression, etc.). +/// +/// @tparam Scalar Mesh scalar type. +/// @tparam Index Mesh index type. +/// +template +LA_SERIALIZATION2_API void save_scene( + const fs::path& filename, + const scene::Scene& scene, + const SerializeOptions& options = {}); + +/// +/// Load a Scene from a file. +/// +/// Reads the file into memory and calls deserialize_scene(). The function auto-detects whether the +/// file contents are compressed. +/// +/// @param[in] filename Input file path. +/// @param[in] options Deserialization options. +/// +/// @tparam SceneType Scene type (e.g. scene::Scene). +/// +/// @return The loaded scene. +/// +/// @see deserialize_scene() for details on scene conversion and type casting options. +/// +template +LA_SERIALIZATION2_API SceneType +load_scene(const fs::path& filename, const DeserializeOptions& options = {}); + +/// @} + +} // namespace lagrange::serialization diff --git a/modules/serialization2/include/lagrange/serialization/serialize_simple_scene.h b/modules/serialization2/include/lagrange/serialization/serialize_simple_scene.h new file mode 100644 index 00000000..e8547ab2 --- /dev/null +++ b/modules/serialization2/include/lagrange/serialization/serialize_simple_scene.h @@ -0,0 +1,115 @@ +/* + * Copyright 2026 Adobe. All rights reserved. + * This file is licensed to you under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under + * the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS + * OF ANY KIND, either express or implied. See the License for the specific language + * governing permissions and limitations under the License. + */ +#pragma once + +#include +#include +#include +#include +#include + +#include +#include + +namespace lagrange::serialization { + +/// @addtogroup group-serialization2 +/// @{ + +/// +/// Current simple scene serialization format version. +/// +/// @return The current simple scene serialization format version. +/// +constexpr uint32_t simple_scene_format_version() +{ + return 1; +} + +/// +/// Serialize a SimpleScene to a byte buffer. +/// +/// @param[in] scene The scene to serialize. +/// @param[in] options Serialization options (compression, etc.). +/// +/// @tparam Scalar Mesh scalar type. +/// @tparam Index Mesh index type. +/// @tparam Dimension Scene dimension (2 or 3). +/// +/// @return A byte buffer containing the serialized scene. +/// +template +LA_SERIALIZATION2_API std::vector serialize_simple_scene( + const scene::SimpleScene& scene, + const SerializeOptions& options = {}); + +/// +/// Deserialize a SimpleScene from a byte buffer. +/// +/// The function auto-detects whether the buffer is compressed. If the buffer contains a SurfaceMesh +/// or Scene instead of a SimpleScene, it can be automatically converted when +/// DeserializeOptions::allow_scene_conversion is enabled. If the buffer was serialized with +/// different Scalar/Index types, type casting can be enabled via +/// DeserializeOptions::allow_type_cast. +/// +/// @param[in] buffer A byte buffer containing the serialized data. +/// @param[in] options Deserialization options. +/// +/// @tparam SceneType SimpleScene type (e.g. scene::SimpleScene). +/// +/// @return The deserialized scene. +/// +template +LA_SERIALIZATION2_API SceneType +deserialize_simple_scene(span buffer, const DeserializeOptions& options = {}); + +/// +/// Save a SimpleScene to a file. +/// +/// The suggested file extension is `.lgs`. +/// +/// @param[in] filename Output file path. +/// @param[in] scene The scene to save. +/// @param[in] options Serialization options (compression, etc.). +/// +/// @tparam Scalar Mesh scalar type. +/// @tparam Index Mesh index type. +/// @tparam Dimension Scene dimension (2 or 3). +/// +template +LA_SERIALIZATION2_API void save_simple_scene( + const fs::path& filename, + const scene::SimpleScene& scene, + const SerializeOptions& options = {}); + +/// +/// Load a SimpleScene from a file. +/// +/// Reads the file into memory and calls deserialize_simple_scene(). The function auto-detects +/// whether the file contents are compressed. +/// +/// @param[in] filename Input file path. +/// @param[in] options Deserialization options. +/// +/// @tparam SceneType SimpleScene type (e.g. scene::SimpleScene). +/// +/// @return The loaded scene. +/// +/// @see deserialize_simple_scene() for details on scene conversion and type casting options. +/// +template +LA_SERIALIZATION2_API SceneType +load_simple_scene(const fs::path& filename, const DeserializeOptions& options = {}); + +/// @} + +} // namespace lagrange::serialization diff --git a/modules/serialization2/include/lagrange/serialization/types.h b/modules/serialization2/include/lagrange/serialization/types.h new file mode 100644 index 00000000..d873ef87 --- /dev/null +++ b/modules/serialization2/include/lagrange/serialization/types.h @@ -0,0 +1,80 @@ +/* + * Copyright 2026 Adobe. All rights reserved. + * This file is licensed to you under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under + * the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS + * OF ANY KIND, either express or implied. See the License for the specific language + * governing permissions and limitations under the License. + */ +#pragma once + +#include + +namespace lagrange::serialization { + +/// +/// Options for serialization (save/serialize functions). +/// +struct SerializeOptions +{ + /// Enable zstd compression. + bool compress = true; + + /// Zstd compression level (1-22). Lower is faster, higher is better compression. Levels >= 20 + /// require more memory and should be used with caution. + int compression_level = 3; + + /// Number of zstd compression threads. 0 = automatic, 1 = single-threaded. >1 = passed to + /// ZSTD_c_nbWorkers. On Emscripten, this setting is ignored (always single-threaded). + unsigned num_threads = 0; +}; + +/// +/// Options for deserialization (load/deserialize functions). +/// +struct DeserializeOptions +{ + /// + /// Allow converting between meshes and scenes during deserialization. + /// + /// When enabled, deserializing a buffer that contains a different type than requested will + /// attempt automatic conversion. For example, calling deserialize_mesh() on a buffer containing + /// a Scene will convert the scene to a single mesh using scene_to_mesh(). + /// + /// When disabled (the default), a type mismatch will throw an exception. + /// + /// @note A warning is logged when a conversion is performed, unless @ref quiet is set. + /// + bool allow_scene_conversion = false; + + /// + /// Allow casting scalar and index types during deserialization. + /// + /// When enabled, deserializing a buffer serialized with different Scalar/Index types than the + /// requested template parameters will cast the data accordingly. For example, deserializing a + /// mesh saved as `` into a `SurfaceMesh`. + /// + /// This applies to all contained meshes, including those inside Scene and SimpleScene objects. + /// For SimpleScene, instance transforms are also cast to the target scalar type. + /// + /// When disabled (the default), a scalar/index type mismatch will throw an exception. + /// + /// @note A warning is logged when a type mismatch is detected and casting is performed, + /// unless @ref quiet is set. If the stored types already match the requested types, + /// no casting or warning occurs regardless of this setting. + /// + bool allow_type_cast = false; + + /// + /// Suppress warnings during deserialization. + /// + /// When set to true, suppresses the warnings that are normally logged when scene conversion + /// (via @ref allow_scene_conversion) or type casting (via @ref allow_type_cast) is performed. + /// + bool quiet = false; +}; + +} // namespace lagrange::serialization diff --git a/modules/serialization2/python/CMakeLists.txt b/modules/serialization2/python/CMakeLists.txt new file mode 100644 index 00000000..762903eb --- /dev/null +++ b/modules/serialization2/python/CMakeLists.txt @@ -0,0 +1,12 @@ +# +# Copyright 2026 Adobe. All rights reserved. +# This file is licensed to you under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. You may obtain a copy +# of the License at http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software distributed under +# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS +# OF ANY KIND, either express or implied. See the License for the specific language +# governing permissions and limitations under the License. +# +lagrange_add_python_binding(PYTHON_NAME serialization) diff --git a/modules/serialization2/python/include/lagrange/python/serialization2.h b/modules/serialization2/python/include/lagrange/python/serialization2.h new file mode 100644 index 00000000..254589f2 --- /dev/null +++ b/modules/serialization2/python/include/lagrange/python/serialization2.h @@ -0,0 +1,18 @@ +/* + * Copyright 2026 Adobe. All rights reserved. + * This file is licensed to you under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under + * the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS + * OF ANY KIND, either express or implied. See the License for the specific language + * governing permissions and limitations under the License. + */ +#pragma once + +#include + +namespace lagrange::python { +void populate_serialization2_module(nanobind::module_& m); +} diff --git a/modules/serialization2/python/src/serialization2.cpp b/modules/serialization2/python/src/serialization2.cpp new file mode 100644 index 00000000..2362d792 --- /dev/null +++ b/modules/serialization2/python/src/serialization2.cpp @@ -0,0 +1,375 @@ +/* + * Copyright 2026 Adobe. All rights reserved. + * This file is licensed to you under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under + * the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS + * OF ANY KIND, either express or implied. See the License for the specific language + * governing permissions and limitations under the License. + */ + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +namespace nb = nanobind; +using namespace nb::literals; + +namespace lagrange::python { + +void populate_serialization2_module(nb::module_& m) +{ + using Scalar = double; + using Index = uint32_t; + using MeshType = SurfaceMesh; + using SimpleSceneType = scene::SimpleScene; + using SceneType = scene::Scene; + + // Format version constants + m.def( + "mesh_format_version", + &serialization::mesh_format_version, + "Return the current mesh serialization format version."); + m.def( + "simple_scene_format_version", + &serialization::simple_scene_format_version, + "Return the current simple scene serialization format version."); + m.def( + "scene_format_version", + &serialization::scene_format_version, + "Return the current scene serialization format version."); + + // ----------------------------------------------------------------------- + // Mesh serialization + // ----------------------------------------------------------------------- + + m.def( + "serialize_mesh", + [](const MeshType& mesh, bool compress, int compression_level, unsigned num_threads) { + serialization::SerializeOptions opts; + opts.compress = compress; + opts.compression_level = compression_level; + opts.num_threads = num_threads; + auto buf = serialization::serialize_mesh(mesh, opts); + return nb::bytes(reinterpret_cast(buf.data()), buf.size()); + }, + "mesh"_a, + "compress"_a = serialization::SerializeOptions().compress, + "compression_level"_a = serialization::SerializeOptions().compression_level, + "num_threads"_a = serialization::SerializeOptions().num_threads, + R"(Serialize a mesh to a bytes buffer. + +:param mesh: The mesh to serialize. +:param compress: Enable zstd compression. Defaults to True. +:param compression_level: Zstd compression level (1-22). Defaults to 3. +:param num_threads: Number of compression threads. 0 = automatic, 1 = single-threaded. Defaults to 0. + +:return bytes: A bytes object containing the serialized mesh.)"); + + m.def( + "deserialize_mesh", + [](nb::bytes data, bool allow_scene_conversion, bool allow_type_cast, bool quiet) { + serialization::DeserializeOptions opts; + opts.allow_scene_conversion = allow_scene_conversion; + opts.allow_type_cast = allow_type_cast; + opts.quiet = quiet; + span buf(reinterpret_cast(data.c_str()), data.size()); + return serialization::deserialize_mesh(buf, opts); + }, + "data"_a, + "allow_scene_conversion"_a = serialization::DeserializeOptions().allow_scene_conversion, + "allow_type_cast"_a = serialization::DeserializeOptions().allow_type_cast, + "quiet"_a = serialization::DeserializeOptions().quiet, + R"(Deserialize a mesh from a bytes buffer. + +Auto-detects compression. If the buffer contains a SimpleScene or Scene, it can be converted +when allow_scene_conversion is enabled. Type casting can be enabled via allow_type_cast. + +:param data: A bytes object containing the serialized data. +:param allow_scene_conversion: Allow converting between meshes and scenes. Defaults to False. +:param allow_type_cast: Allow casting scalar and index types. Defaults to False. +:param quiet: Suppress warnings. Defaults to False. + +:return SurfaceMesh: The deserialized mesh.)"); + + m.def( + "save_mesh", + [](const fs::path& filename, + const MeshType& mesh, + bool compress, + int compression_level, + unsigned num_threads) { + serialization::SerializeOptions opts; + opts.compress = compress; + opts.compression_level = compression_level; + opts.num_threads = num_threads; + serialization::save_mesh(filename, mesh, opts); + }, + "filename"_a, + "mesh"_a, + "compress"_a = serialization::SerializeOptions().compress, + "compression_level"_a = serialization::SerializeOptions().compression_level, + "num_threads"_a = serialization::SerializeOptions().num_threads, + R"(Save a mesh to a binary file. + +:param filename: Output file path. +:param mesh: The mesh to save. +:param compress: Enable zstd compression. Defaults to True. +:param compression_level: Zstd compression level (1-22). Defaults to 3. +:param num_threads: Number of compression threads. 0 = automatic, 1 = single-threaded. Defaults to 0.)"); + + m.def( + "load_mesh", + [](const fs::path& filename, + bool allow_scene_conversion, + bool allow_type_cast, + bool quiet) { + serialization::DeserializeOptions opts; + opts.allow_scene_conversion = allow_scene_conversion; + opts.allow_type_cast = allow_type_cast; + opts.quiet = quiet; + return serialization::load_mesh(filename, opts); + }, + "filename"_a, + "allow_scene_conversion"_a = serialization::DeserializeOptions().allow_scene_conversion, + "allow_type_cast"_a = serialization::DeserializeOptions().allow_type_cast, + "quiet"_a = serialization::DeserializeOptions().quiet, + R"(Load a mesh from a binary file. + +Auto-detects compression. If the file contains a SimpleScene or Scene, it can be converted +when allow_scene_conversion is enabled. Type casting can be enabled via allow_type_cast. + +:param filename: Input file path. +:param allow_scene_conversion: Allow converting between meshes and scenes. Defaults to False. +:param allow_type_cast: Allow casting scalar and index types. Defaults to False. +:param quiet: Suppress warnings. Defaults to False. + +:return SurfaceMesh: The loaded mesh.)"); + + // ----------------------------------------------------------------------- + // SimpleScene serialization + // ----------------------------------------------------------------------- + + m.def( + "serialize_simple_scene", + [](const SimpleSceneType& scene, + bool compress, + int compression_level, + unsigned num_threads) { + serialization::SerializeOptions opts; + opts.compress = compress; + opts.compression_level = compression_level; + opts.num_threads = num_threads; + auto buf = serialization::serialize_simple_scene(scene, opts); + return nb::bytes(reinterpret_cast(buf.data()), buf.size()); + }, + "scene"_a, + "compress"_a = serialization::SerializeOptions().compress, + "compression_level"_a = serialization::SerializeOptions().compression_level, + "num_threads"_a = serialization::SerializeOptions().num_threads, + R"(Serialize a simple scene to a bytes buffer. + +:param scene: The simple scene to serialize. +:param compress: Enable zstd compression. Defaults to True. +:param compression_level: Zstd compression level (1-22). Defaults to 3. +:param num_threads: Number of compression threads. 0 = automatic, 1 = single-threaded. Defaults to 0. + +:return bytes: A bytes object containing the serialized simple scene.)"); + + m.def( + "deserialize_simple_scene", + [](nb::bytes data, bool allow_scene_conversion, bool allow_type_cast, bool quiet) { + serialization::DeserializeOptions opts; + opts.allow_scene_conversion = allow_scene_conversion; + opts.allow_type_cast = allow_type_cast; + opts.quiet = quiet; + span buf(reinterpret_cast(data.c_str()), data.size()); + return serialization::deserialize_simple_scene(buf, opts); + }, + "data"_a, + "allow_scene_conversion"_a = serialization::DeserializeOptions().allow_scene_conversion, + "allow_type_cast"_a = serialization::DeserializeOptions().allow_type_cast, + "quiet"_a = serialization::DeserializeOptions().quiet, + R"(Deserialize a simple scene from a bytes buffer. + +Auto-detects compression. If the buffer contains a SurfaceMesh or Scene, it can be converted +when allow_scene_conversion is enabled. Type casting can be enabled via allow_type_cast. + +:param data: A bytes object containing the serialized data. +:param allow_scene_conversion: Allow converting between meshes and scenes. Defaults to False. +:param allow_type_cast: Allow casting scalar and index types. Defaults to False. +:param quiet: Suppress warnings. Defaults to False. + +:return SimpleScene: The deserialized simple scene.)"); + + m.def( + "save_simple_scene", + [](const fs::path& filename, + const SimpleSceneType& scene, + bool compress, + int compression_level, + unsigned num_threads) { + serialization::SerializeOptions opts; + opts.compress = compress; + opts.compression_level = compression_level; + opts.num_threads = num_threads; + serialization::save_simple_scene(filename, scene, opts); + }, + "filename"_a, + "scene"_a, + "compress"_a = serialization::SerializeOptions().compress, + "compression_level"_a = serialization::SerializeOptions().compression_level, + "num_threads"_a = serialization::SerializeOptions().num_threads, + R"(Save a simple scene to a binary file. + +:param filename: Output file path. +:param scene: The simple scene to save. +:param compress: Enable zstd compression. Defaults to True. +:param compression_level: Zstd compression level (1-22). Defaults to 3. +:param num_threads: Number of compression threads. 0 = automatic, 1 = single-threaded. Defaults to 0.)"); + + m.def( + "load_simple_scene", + [](const fs::path& filename, + bool allow_scene_conversion, + bool allow_type_cast, + bool quiet) { + serialization::DeserializeOptions opts; + opts.allow_scene_conversion = allow_scene_conversion; + opts.allow_type_cast = allow_type_cast; + opts.quiet = quiet; + return serialization::load_simple_scene(filename, opts); + }, + "filename"_a, + "allow_scene_conversion"_a = serialization::DeserializeOptions().allow_scene_conversion, + "allow_type_cast"_a = serialization::DeserializeOptions().allow_type_cast, + "quiet"_a = serialization::DeserializeOptions().quiet, + R"(Load a simple scene from a binary file. + +Auto-detects compression. If the file contains a SurfaceMesh or Scene, it can be converted +when allow_scene_conversion is enabled. Type casting can be enabled via allow_type_cast. + +:param filename: Input file path. +:param allow_scene_conversion: Allow converting between meshes and scenes. Defaults to False. +:param allow_type_cast: Allow casting scalar and index types. Defaults to False. +:param quiet: Suppress warnings. Defaults to False. + +:return SimpleScene: The loaded simple scene.)"); + + // ----------------------------------------------------------------------- + // Scene serialization + // ----------------------------------------------------------------------- + + m.def( + "serialize_scene", + [](const SceneType& scene, bool compress, int compression_level, unsigned num_threads) { + serialization::SerializeOptions opts; + opts.compress = compress; + opts.compression_level = compression_level; + opts.num_threads = num_threads; + auto buf = serialization::serialize_scene(scene, opts); + return nb::bytes(reinterpret_cast(buf.data()), buf.size()); + }, + "scene"_a, + "compress"_a = serialization::SerializeOptions().compress, + "compression_level"_a = serialization::SerializeOptions().compression_level, + "num_threads"_a = serialization::SerializeOptions().num_threads, + R"(Serialize a scene to a bytes buffer. + +:param scene: The scene to serialize. +:param compress: Enable zstd compression. Defaults to True. +:param compression_level: Zstd compression level (1-22). Defaults to 3. +:param num_threads: Number of compression threads. 0 = automatic, 1 = single-threaded. Defaults to 0. + +:return bytes: A bytes object containing the serialized scene.)"); + + m.def( + "deserialize_scene", + [](nb::bytes data, bool allow_scene_conversion, bool allow_type_cast, bool quiet) { + serialization::DeserializeOptions opts; + opts.allow_scene_conversion = allow_scene_conversion; + opts.allow_type_cast = allow_type_cast; + opts.quiet = quiet; + span buf(reinterpret_cast(data.c_str()), data.size()); + return serialization::deserialize_scene(buf, opts); + }, + "data"_a, + "allow_scene_conversion"_a = serialization::DeserializeOptions().allow_scene_conversion, + "allow_type_cast"_a = serialization::DeserializeOptions().allow_type_cast, + "quiet"_a = serialization::DeserializeOptions().quiet, + R"(Deserialize a scene from a bytes buffer. + +Auto-detects compression. If the buffer contains a SurfaceMesh or SimpleScene, it can be converted +when allow_scene_conversion is enabled. Type casting can be enabled via allow_type_cast. + +:param data: A bytes object containing the serialized data. +:param allow_scene_conversion: Allow converting between meshes and scenes. Defaults to False. +:param allow_type_cast: Allow casting scalar and index types. Defaults to False. +:param quiet: Suppress warnings. Defaults to False. + +:return Scene: The deserialized scene.)"); + + m.def( + "save_scene", + [](const fs::path& filename, + const SceneType& scene, + bool compress, + int compression_level, + unsigned num_threads) { + serialization::SerializeOptions opts; + opts.compress = compress; + opts.compression_level = compression_level; + opts.num_threads = num_threads; + serialization::save_scene(filename, scene, opts); + }, + "filename"_a, + "scene"_a, + "compress"_a = serialization::SerializeOptions().compress, + "compression_level"_a = serialization::SerializeOptions().compression_level, + "num_threads"_a = serialization::SerializeOptions().num_threads, + R"(Save a scene to a binary file. + +:param filename: Output file path. +:param scene: The scene to save. +:param compress: Enable zstd compression. Defaults to True. +:param compression_level: Zstd compression level (1-22). Defaults to 3. +:param num_threads: Number of compression threads. 0 = automatic, 1 = single-threaded. Defaults to 0.)"); + + m.def( + "load_scene", + [](const fs::path& filename, + bool allow_scene_conversion, + bool allow_type_cast, + bool quiet) { + serialization::DeserializeOptions opts; + opts.allow_scene_conversion = allow_scene_conversion; + opts.allow_type_cast = allow_type_cast; + opts.quiet = quiet; + return serialization::load_scene(filename, opts); + }, + "filename"_a, + "allow_scene_conversion"_a = serialization::DeserializeOptions().allow_scene_conversion, + "allow_type_cast"_a = serialization::DeserializeOptions().allow_type_cast, + "quiet"_a = serialization::DeserializeOptions().quiet, + R"(Load a scene from a binary file. + +Auto-detects compression. If the file contains a SurfaceMesh or SimpleScene, it can be converted +when allow_scene_conversion is enabled. Type casting can be enabled via allow_type_cast. + +:param filename: Input file path. +:param allow_scene_conversion: Allow converting between meshes and scenes. Defaults to False. +:param allow_type_cast: Allow casting scalar and index types. Defaults to False. +:param quiet: Suppress warnings. Defaults to False. + +:return Scene: The loaded scene.)"); +} + +} // namespace lagrange::python diff --git a/modules/serialization2/python/tests/test_serialization2.py b/modules/serialization2/python/tests/test_serialization2.py new file mode 100644 index 00000000..33220b11 --- /dev/null +++ b/modules/serialization2/python/tests/test_serialization2.py @@ -0,0 +1,240 @@ +# +# Copyright 2026 Adobe. All rights reserved. +# This file is licensed to you under the Apache License, Version 2.0 (the "License"); +# you may not use this file except in compliance with the License. You may obtain a copy +# of the License at http://www.apache.org/licenses/LICENSE-2.0 +# +# Unless required by applicable law or agreed to in writing, software distributed under +# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS +# OF ANY KIND, either express or implied. See the License for the specific language +# governing permissions and limitations under the License. +# +import pathlib +import tempfile + +import lagrange +import numpy as np +import pytest + + +def assert_same_mesh(mesh, mesh2): + assert mesh.num_vertices == mesh2.num_vertices + assert mesh.num_facets == mesh2.num_facets + assert np.all(mesh.vertices.ravel() == mesh2.vertices.ravel()) + assert np.all(mesh.facets.ravel() == mesh2.facets.ravel()) + + # Check attributes + ids1 = mesh.get_matching_attribute_ids() + ids2 = mesh2.get_matching_attribute_ids() + names1 = sorted(mesh.get_attribute_name(i) for i in ids1) + names2 = sorted(mesh2.get_attribute_name(i) for i in ids2) + assert names1 == names2, f"Attribute mismatch: {names1} != {names2}" + for name in names1: + attr1 = mesh.attribute(name) + attr2 = mesh2.attribute(name) + assert attr1.element_type == attr2.element_type + assert attr1.usage == attr2.usage + assert attr1.num_channels == attr2.num_channels + np.testing.assert_array_equal(attr1.data, attr2.data) + + +class TestImportName: + def test_available_as_serialization(self): + assert hasattr(lagrange, "serialization") + + def test_hidden_as_serialization2(self): + assert not hasattr(lagrange, "serialization2") + + +class TestFormatVersion: + def test_mesh_format_version(self): + v = lagrange.serialization.mesh_format_version() + assert isinstance(v, int) + assert v >= 1 + + def test_simple_scene_format_version(self): + v = lagrange.serialization.simple_scene_format_version() + assert isinstance(v, int) + assert v >= 1 + + def test_scene_format_version(self): + v = lagrange.serialization.scene_format_version() + assert isinstance(v, int) + assert v >= 1 + + +class TestMeshSerialization: + def test_serialize_deserialize(self, triangle): + data = lagrange.serialization.serialize_mesh(triangle) + assert isinstance(data, bytes) + assert len(data) > 0 + + mesh2 = lagrange.serialization.deserialize_mesh(data) + assert_same_mesh(triangle, mesh2) + + def test_serialize_uncompressed(self, triangle): + data_compressed = lagrange.serialization.serialize_mesh(triangle, compress=True) + data_uncompressed = lagrange.serialization.serialize_mesh(triangle, compress=False) + assert isinstance(data_uncompressed, bytes) + assert len(data_uncompressed) > 0 + + # Uncompressed should generally be larger (or at least different) + assert data_compressed != data_uncompressed + + mesh2 = lagrange.serialization.deserialize_mesh(data_uncompressed) + assert_same_mesh(triangle, mesh2) + + def test_serialize_compression_level(self, triangle): + data_low = lagrange.serialization.serialize_mesh(triangle, compression_level=1) + data_high = lagrange.serialization.serialize_mesh(triangle, compression_level=19) + assert isinstance(data_low, bytes) + assert isinstance(data_high, bytes) + + mesh2 = lagrange.serialization.deserialize_mesh(data_low) + assert_same_mesh(triangle, mesh2) + + mesh3 = lagrange.serialization.deserialize_mesh(data_high) + assert_same_mesh(triangle, mesh3) + + def test_serialize_with_attribute(self, triangle_with_attribute): + data = lagrange.serialization.serialize_mesh(triangle_with_attribute) + mesh2 = lagrange.serialization.deserialize_mesh(data) + assert_same_mesh(triangle_with_attribute, mesh2) + assert mesh2.has_attribute("temperature") + + def test_save_load_file(self, triangle): + with tempfile.TemporaryDirectory() as tmp_dir: + filepath = pathlib.Path(tmp_dir) / "test.lgm" + lagrange.serialization.save_mesh(filepath, triangle) + assert filepath.exists() + assert filepath.stat().st_size > 0 + + mesh2 = lagrange.serialization.load_mesh(filepath) + assert_same_mesh(triangle, mesh2) + + def test_save_load_uncompressed(self, triangle): + with tempfile.TemporaryDirectory() as tmp_dir: + filepath = pathlib.Path(tmp_dir) / "test.lgm" + lagrange.serialization.save_mesh(filepath, triangle, compress=False) + mesh2 = lagrange.serialization.load_mesh(filepath) + assert_same_mesh(triangle, mesh2) + + def test_quad_mesh(self, quad): + data = lagrange.serialization.serialize_mesh(quad) + mesh2 = lagrange.serialization.deserialize_mesh(data) + assert_same_mesh(quad, mesh2) + + def test_empty_mesh(self): + mesh = lagrange.SurfaceMesh() + data = lagrange.serialization.serialize_mesh(mesh) + mesh2 = lagrange.serialization.deserialize_mesh(data) + assert mesh2.num_vertices == 0 + assert mesh2.num_facets == 0 + + def test_point_cloud(self): + mesh = lagrange.SurfaceMesh() + mesh.add_vertices(np.eye(3)) + data = lagrange.serialization.serialize_mesh(mesh) + mesh2 = lagrange.serialization.deserialize_mesh(data) + assert mesh2.num_vertices == 3 + assert mesh2.num_facets == 0 + assert mesh2.vertices == pytest.approx(mesh.vertices) + + +class TestSimpleSceneSerialization: + @pytest.fixture + def simple_scene(self, triangle): + return lagrange.scene.mesh_to_simple_scene(triangle) + + def test_serialize_deserialize(self, simple_scene): + data = lagrange.serialization.serialize_simple_scene(simple_scene) + assert isinstance(data, bytes) + assert len(data) > 0 + + scene2 = lagrange.serialization.deserialize_simple_scene(data) + assert scene2.num_meshes == simple_scene.num_meshes + for i in range(simple_scene.num_meshes): + assert scene2.num_instances(i) == simple_scene.num_instances(i) + + def test_save_load_file(self, simple_scene): + with tempfile.TemporaryDirectory() as tmp_dir: + filepath = pathlib.Path(tmp_dir) / "test.lgs" + lagrange.serialization.save_simple_scene(filepath, simple_scene) + assert filepath.exists() + + scene2 = lagrange.serialization.load_simple_scene(filepath) + assert scene2.num_meshes == simple_scene.num_meshes + for i in range(simple_scene.num_meshes): + assert scene2.num_instances(i) == simple_scene.num_instances(i) + + def test_roundtrip_mesh_data(self, simple_scene, triangle): + data = lagrange.serialization.serialize_simple_scene(simple_scene) + scene2 = lagrange.serialization.deserialize_simple_scene(data) + + mesh2 = lagrange.scene.simple_scene_to_mesh(scene2) + assert_same_mesh(triangle, mesh2) + + +class TestSceneSerialization: + @pytest.fixture + def scene(self, triangle): + s = lagrange.scene.Scene() + s.add(triangle) + node = lagrange.scene.Node() + instance = lagrange.scene.SceneMeshInstance() + instance.mesh = 0 + node.meshes.append(instance) + s.add(node) + return s + + def test_serialize_deserialize(self, scene): + data = lagrange.serialization.serialize_scene(scene) + assert isinstance(data, bytes) + assert len(data) > 0 + + scene2 = lagrange.serialization.deserialize_scene(data) + assert len(scene2.meshes) == len(scene.meshes) + assert len(scene2.nodes) == len(scene.nodes) + + def test_save_load_file(self, scene): + with tempfile.TemporaryDirectory() as tmp_dir: + filepath = pathlib.Path(tmp_dir) / "test.lgs" + lagrange.serialization.save_scene(filepath, scene) + assert filepath.exists() + + scene2 = lagrange.serialization.load_scene(filepath) + assert len(scene2.meshes) == len(scene.meshes) + assert len(scene2.nodes) == len(scene.nodes) + + +class TestSceneConversion: + def test_mesh_from_simple_scene_buffer(self, triangle): + simple_scene = lagrange.scene.mesh_to_simple_scene(triangle) + data = lagrange.serialization.serialize_simple_scene(simple_scene) + + # Without allow_scene_conversion, should fail + with pytest.raises(RuntimeError): + lagrange.serialization.deserialize_mesh(data) + + # With allow_scene_conversion, should succeed + mesh2 = lagrange.serialization.deserialize_mesh(data, allow_scene_conversion=True) + assert_same_mesh(triangle, mesh2) + + def test_simple_scene_from_mesh_buffer(self, triangle): + data = lagrange.serialization.serialize_mesh(triangle) + + with pytest.raises(RuntimeError): + lagrange.serialization.deserialize_simple_scene(data) + + scene2 = lagrange.serialization.deserialize_simple_scene(data, allow_scene_conversion=True) + assert scene2.num_meshes == 1 + + def test_quiet_suppresses_warnings(self, triangle): + simple_scene = lagrange.scene.mesh_to_simple_scene(triangle) + data = lagrange.serialization.serialize_simple_scene(simple_scene) + + # Should not raise, quiet just suppresses log warnings + mesh2 = lagrange.serialization.deserialize_mesh( + data, allow_scene_conversion=True, quiet=True + ) + assert_same_mesh(triangle, mesh2) diff --git a/modules/serialization2/src/CistaMesh.h b/modules/serialization2/src/CistaMesh.h new file mode 100644 index 00000000..4605d361 --- /dev/null +++ b/modules/serialization2/src/CistaMesh.h @@ -0,0 +1,69 @@ +/* + * Copyright 2026 Adobe. All rights reserved. + * This file is licensed to you under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under + * the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS + * OF ANY KIND, either express or implied. See the License for the specific language + * governing permissions and limitations under the License. + */ +#pragma once + +#include +#include + +#include +#include + +#include +#include + +namespace lagrange::serialization::internal { + +namespace data = cista::offset; + +/// Metadata and raw data for a single serialized attribute. +struct CistaAttributeInfo +{ + data::string name; + AttributeId attribute_id = 0; + std::underlying_type_t value_type = 0; + std::underlying_type_t element_type = 0; + std::underlying_type_t usage = 0; + uint64_t num_channels = 0; + uint64_t num_elements = 0; + bool is_indexed = false; + + // Non-indexed attribute: raw data bytes (num_elements * num_channels * sizeof(ValueType)) + data::vector data_bytes; + + // Indexed attribute: values and indices stored separately + data::vector values_bytes; + uint64_t values_num_elements = 0; + uint64_t values_num_channels = 0; + data::vector indices_bytes; + uint64_t indices_num_elements = 0; + uint8_t index_type_size = 0; // sizeof(Index) of the mesh (4 or 8) +}; + +/// Complete serialized mesh representation. +struct CistaMesh +{ + uint32_t version = 1; // Format version for forward/backward compatibility + + uint8_t scalar_type_size = 0; // sizeof(Scalar): 4 for float, 8 for double + uint8_t index_type_size = 0; // sizeof(Index): 4 for uint32_t, 8 for uint64_t + + uint64_t num_vertices = 0; + uint64_t num_facets = 0; + uint64_t num_corners = 0; + uint64_t num_edges = 0; + uint64_t dimension = 0; + uint64_t vertex_per_facet = 0; + + data::vector attributes; +}; + +} // namespace lagrange::serialization::internal diff --git a/modules/serialization2/src/CistaScene.h b/modules/serialization2/src/CistaScene.h new file mode 100644 index 00000000..f42d3ec1 --- /dev/null +++ b/modules/serialization2/src/CistaScene.h @@ -0,0 +1,175 @@ +/* + * Copyright 2026 Adobe. All rights reserved. + * This file is licensed to you under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under + * the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS + * OF ANY KIND, either express or implied. See the License for the specific language + * governing permissions and limitations under the License. + */ +#pragma once + +#include "CistaMesh.h" +#include "CistaValue.h" + +#include +#include +#include + +#include +#include + +namespace lagrange::serialization::internal { + +namespace data = cista::offset; + +constexpr uint64_t k_invalid_element = ~uint64_t(0); + +struct CistaTextureInfo +{ + uint64_t index = k_invalid_element; + int32_t texcoord = 0; +}; + +struct CistaSceneMeshInstance +{ + uint64_t mesh = k_invalid_element; + data::vector materials; +}; + +struct CistaNode +{ + data::string name; + std::array transform{}; // Affine3f = 4x4 float matrix + uint64_t parent = k_invalid_element; + data::vector children; + data::vector meshes; + data::vector cameras; + data::vector lights; + CistaExtensions extensions; +}; + +struct CistaImageBuffer +{ + uint64_t width = 0; + uint64_t height = 0; + uint64_t num_channels = 0; + uint8_t element_type = 0; // std::underlying_type_t + data::vector data_bytes; +}; + +struct CistaImage +{ + data::string name; + CistaImageBuffer image; + data::string uri; + CistaExtensions extensions; +}; + +struct CistaTexture +{ + data::string name; + uint64_t image = k_invalid_element; + int32_t mag_filter = 0; + int32_t min_filter = 0; + uint8_t wrap_u = 0; + uint8_t wrap_v = 0; + std::array scale{}; + std::array offset{}; + float rotation = 0.f; + CistaExtensions extensions; +}; + +struct CistaMaterial +{ + data::string name; + std::array base_color_value{}; + std::array emissive_value{}; + float metallic_value = 0.f; + float roughness_value = 0.f; + float alpha_cutoff = 0.f; + float normal_scale = 0.f; + float occlusion_strength = 0.f; + uint8_t alpha_mode = 0; + bool double_sided = false; + CistaTextureInfo base_color_texture; + CistaTextureInfo emissive_texture; + CistaTextureInfo metallic_roughness_texture; + CistaTextureInfo normal_texture; + CistaTextureInfo occlusion_texture; + CistaExtensions extensions; +}; + +struct CistaLight +{ + data::string name; + uint8_t type = 0; + std::array position{}; + std::array direction{}; + std::array up{}; + float intensity = 0.f; + float attenuation_constant = 0.f; + float attenuation_linear = 0.f; + float attenuation_quadratic = 0.f; + float attenuation_cubic = 0.f; + float range = 0.f; + std::array color_diffuse{}; + std::array color_specular{}; + std::array color_ambient{}; + cista::optional angle_inner_cone; + cista::optional angle_outer_cone; + std::array size{}; + CistaExtensions extensions; +}; + +struct CistaCamera +{ + data::string name; + uint8_t type = 0; + std::array position{}; + std::array up{}; + std::array look_at{}; + float near_plane = 0.f; + cista::optional far_plane; + float orthographic_width = 0.f; + float aspect_ratio = 0.f; + float horizontal_fov = 0.f; + CistaExtensions extensions; +}; + +struct CistaSkeleton +{ + data::vector meshes; + CistaExtensions extensions; +}; + +struct CistaAnimation +{ + data::string name; + CistaExtensions extensions; +}; + +struct CistaScene +{ + uint32_t version = 1; + uint8_t scalar_type_size = 0; + uint8_t index_type_size = 0; + + data::string name; + + data::vector nodes; + data::vector root_nodes; + data::vector meshes; + data::vector images; + data::vector textures; + data::vector materials; + data::vector lights; + data::vector cameras; + data::vector skeletons; + data::vector animations; + CistaExtensions extensions; +}; + +} // namespace lagrange::serialization::internal diff --git a/modules/serialization2/src/CistaSimpleScene.h b/modules/serialization2/src/CistaSimpleScene.h new file mode 100644 index 00000000..a982a127 --- /dev/null +++ b/modules/serialization2/src/CistaSimpleScene.h @@ -0,0 +1,54 @@ +/* + * Copyright 2026 Adobe. All rights reserved. + * This file is licensed to you under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under + * the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS + * OF ANY KIND, either express or implied. See the License for the specific language + * governing permissions and limitations under the License. + */ +#pragma once + +#include "CistaMesh.h" + +#include + +#include + +namespace lagrange::serialization::internal { + +namespace data = cista::offset; + +/// Cista-compatible representation of a single MeshInstance. +struct CistaInstance +{ + uint64_t mesh_index = 0; + + /// Raw bytes of the Eigen AffineTransform matrix. + /// Size = (Dimension+1)^2 * sizeof(Scalar). + data::vector transform_bytes; + + // MeshInstance::user_data (std::any) is NOT serialized. +}; + +/// Cista-compatible representation of SimpleScene. +struct CistaSimpleScene +{ + uint32_t version = 1; + + uint8_t scalar_type_size = 0; // sizeof(Scalar): 4 for float, 8 for double + uint8_t index_type_size = 0; // sizeof(Index): 4 for uint32_t, 8 for uint64_t + uint8_t dimension = 0; // 2 or 3 + + data::vector meshes; + + /// Number of instances per mesh (used to reconstruct the nested vector structure). + data::vector instances_per_mesh; + + /// Flattened list of all instances across all meshes. + data::vector instances; +}; + +} // namespace lagrange::serialization::internal diff --git a/modules/serialization2/src/CistaValue.h b/modules/serialization2/src/CistaValue.h new file mode 100644 index 00000000..e8270f7c --- /dev/null +++ b/modules/serialization2/src/CistaValue.h @@ -0,0 +1,60 @@ +/* + * Copyright 2026 Adobe. All rights reserved. + * This file is licensed to you under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under + * the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS + * OF ANY KIND, either express or implied. See the License for the specific language + * governing permissions and limitations under the License. + */ +#pragma once + +#include +#include + +#include + +namespace lagrange::serialization::internal { + +namespace data = cista::offset; + +/// Type tag matching scene::Value variant indices. +enum class CistaValueType : uint8_t { + Bool = 0, + Int = 1, + Double = 2, + String = 3, + Buffer = 4, + Array = 5, + Object = 6 +}; + +/// Cista-compatible representation of scene::Value. +/// Only one field is populated based on `type`. +struct CistaValue +{ + CistaValueType type = CistaValueType::Bool; + + bool bool_val = false; + int32_t int_val = 0; + double double_val = 0.0; + data::string string_val; + data::vector buffer_val; + data::vector array_val; + + // Object stored as parallel key/value vectors (cista has no map). + data::vector object_keys; + data::vector object_values; +}; + +/// Cista-compatible representation of scene::Extensions. +/// Only the `data` map is serialized; `user_data` (std::any) is skipped. +struct CistaExtensions +{ + data::vector keys; + data::vector values; +}; + +} // namespace lagrange::serialization::internal diff --git a/modules/serialization2/src/compress.cpp b/modules/serialization2/src/compress.cpp new file mode 100644 index 00000000..7d122d37 --- /dev/null +++ b/modules/serialization2/src/compress.cpp @@ -0,0 +1,135 @@ +/* + * Copyright 2026 Adobe. All rights reserved. + * This file is licensed to you under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under + * the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS + * OF ANY KIND, either express or implied. See the License for the specific language + * governing permissions and limitations under the License. + */ +#include "compress.h" + +#include +#include +#include +#include + +#include + +#include +#include + +namespace lagrange::serialization::internal { + +std::vector +compress_buffer(const std::vector& input, int compression_level, unsigned num_threads) +{ + const size_t max_compressed_size = ZSTD_compressBound(input.size()); + std::vector output(k_header_size + max_compressed_size); + + // Write magic header + std::memcpy(output.data(), k_magic, 4); + + // Write uncompressed size (little-endian uint64_t) + uint64_t uncompressed_size = input.size(); + std::memcpy(output.data() + 4, &uncompressed_size, 8); + + // Compress using the advanced API to support multithreading + ZSTD_CCtx* cctx = ZSTD_createCCtx(); + la_runtime_assert(cctx != nullptr, "Failed to create zstd compression context"); + auto cctx_guard = make_scope_guard([&]() noexcept { ZSTD_freeCCtx(cctx); }); + + { + size_t rc = ZSTD_CCtx_setParameter(cctx, ZSTD_c_compressionLevel, compression_level); + la_runtime_assert( + !ZSTD_isError(rc), + "Failed to set zstd compression level: " + std::string(ZSTD_getErrorName(rc))); + } + +#if LAGRANGE_TARGET_COMPILER(EMSCRIPTEN) + // On Emscripten with PROXY_TO_PTHREAD, multi-threaded zstd compression spawns additional worker + // threads that can exhaust the fixed PTHREAD_POOL_SIZE under load, causing intermittent + // "memory access out of bounds" crashes in the emscripten threading proxy layer (a_cas / + // pthread_cond_signal / emscripten_proxy_finish). Force single-threaded compression. + if (num_threads > 1) { + logger().warn( + "Ignoring multithreaded compression request on Emscripten. Falling back to " + "single-threaded compression."); + } +#else + if (num_threads != 1) { + unsigned workers = num_threads; + if (workers == 0) { + workers = std::thread::hardware_concurrency(); + if (workers == 0) { + logger().warn( + "Failed to detect hardware concurrency, defaulting to 4 threads for " + "compression"); + workers = 4; + } + } + size_t rc = ZSTD_CCtx_setParameter(cctx, ZSTD_c_nbWorkers, static_cast(workers)); + la_runtime_assert( + !ZSTD_isError(rc), + "Failed to set zstd worker count: " + std::string(ZSTD_getErrorName(rc))); + } +#endif + + const size_t compressed_size = ZSTD_compress2( + cctx, + output.data() + k_header_size, + max_compressed_size, + input.data(), + input.size()); + + la_runtime_assert(!ZSTD_isError(compressed_size), "Zstd compression failed"); + + output.resize(k_header_size + compressed_size); + return output; +} + +std::vector decompress_buffer(span input) +{ + la_runtime_assert(input.size() >= k_header_size, "Buffer too small for compressed header"); + + // Read uncompressed size from our header + uint64_t uncompressed_size = 0; + std::memcpy(&uncompressed_size, input.data() + 4, 8); + + // Cross-check against zstd's frame content size to detect corrupted headers + const uint8_t* zstd_data = input.data() + k_header_size; + const size_t zstd_size = input.size() - k_header_size; + const unsigned long long frame_size = ZSTD_getFrameContentSize(zstd_data, zstd_size); + la_runtime_assert( + frame_size != ZSTD_CONTENTSIZE_ERROR, + "Invalid zstd frame in compressed buffer"); + if (frame_size != ZSTD_CONTENTSIZE_UNKNOWN) { + la_runtime_assert( + uncompressed_size == frame_size, + "Header uncompressed size (" + std::to_string(uncompressed_size) + + ") does not match zstd frame content size (" + std::to_string(frame_size) + ")"); + } + + std::vector output(uncompressed_size); + + const size_t result = ZSTD_decompress( + output.data(), + uncompressed_size, + input.data() + k_header_size, + input.size() - k_header_size); + + la_runtime_assert(!ZSTD_isError(result), "Zstd decompression failed"); + la_runtime_assert(result == uncompressed_size, "Zstd decompressed size mismatch"); + + return output; +} + +bool is_compressed(span buffer) +{ + if (buffer.size() < k_header_size) return false; + return std::memcmp(buffer.data(), k_magic, 4) == 0; +} + +} // namespace lagrange::serialization::internal diff --git a/modules/serialization2/src/compress.h b/modules/serialization2/src/compress.h new file mode 100644 index 00000000..7bad396c --- /dev/null +++ b/modules/serialization2/src/compress.h @@ -0,0 +1,47 @@ +/* + * Copyright 2026 Adobe. All rights reserved. + * This file is licensed to you under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under + * the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS + * OF ANY KIND, either express or implied. See the License for the specific language + * governing permissions and limitations under the License. + */ +#pragma once + +#include + +#include + +#include +#include + +namespace lagrange::serialization::internal { + +/// Cista serialization mode: type hash for version checking + integrity hash for corruption +/// detection. +constexpr auto k_cista_mode = cista::mode::WITH_VERSION | cista::mode::WITH_INTEGRITY; + +/// Magic header for compressed buffers: "LENC" (Lagrange ENCoding). +constexpr uint8_t k_magic[4] = {'L', 'E', 'N', 'C'}; + +/// Header size: magic (4 bytes) + uncompressed size (8 bytes). +constexpr size_t k_header_size = 4 + 8; + +/// Compress a buffer using zstd, prepending the LENC magic header. +/// +/// @param[in] input The uncompressed data. +/// @param[in] compression_level Zstd compression level (1-22). +/// @param[in] num_threads Number of compression threads. 0 = automatic, 1 = single-threaded. +std::vector +compress_buffer(const std::vector& input, int compression_level, unsigned num_threads); + +/// Decompress a buffer that was compressed with compress_buffer(). +std::vector decompress_buffer(span input); + +/// Check if a buffer starts with the LENC magic header. +bool is_compressed(span buffer); + +} // namespace lagrange::serialization::internal diff --git a/modules/serialization2/src/detect_type.cpp b/modules/serialization2/src/detect_type.cpp new file mode 100644 index 00000000..581fb8d1 --- /dev/null +++ b/modules/serialization2/src/detect_type.cpp @@ -0,0 +1,68 @@ +/* + * Copyright 2026 Adobe. All rights reserved. + * This file is licensed to you under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under + * the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS + * OF ANY KIND, either express or implied. See the License for the specific language + * governing permissions and limitations under the License. + */ +#include "detect_type.h" + +#include "CistaMesh.h" +#include "CistaScene.h" +#include "CistaSimpleScene.h" +#include "compress.h" + +#include + +#include + +#include + +namespace lagrange::serialization::internal { + +std::vector read_file_to_buffer(const fs::path& filename) +{ + fs::ifstream ifs(filename, std::ios::binary | std::ios::ate); + la_runtime_assert(ifs.good(), "Failed to open file for reading: " + filename.string()); + auto size = ifs.tellg(); + ifs.seekg(0, std::ios::beg); + std::vector buf(static_cast(size)); + ifs.read(reinterpret_cast(buf.data()), static_cast(size)); + la_runtime_assert(ifs.good(), "Failed to read from file: " + filename.string()); + return buf; +} + +std::vector ensure_decompressed(span buffer) +{ + if (is_compressed(buffer)) { + return decompress_buffer(buffer); + } + return std::vector(buffer.begin(), buffer.end()); +} + +EncodedType detect_encoded_type(span buffer) +{ + // With WITH_VERSION mode, cista stores the type hash in the first 8 bytes of the buffer. + if (buffer.size() < sizeof(cista::hash_t)) { + return EncodedType::Unknown; + } + + cista::hash_t stored_hash = 0; + std::memcpy(&stored_hash, buffer.data(), sizeof(cista::hash_t)); + + static const cista::hash_t mesh_hash = cista::type_hash(); + static const cista::hash_t simple_scene_hash = cista::type_hash(); + static const cista::hash_t scene_hash = cista::type_hash(); + + if (stored_hash == mesh_hash) return EncodedType::Mesh; + if (stored_hash == simple_scene_hash) return EncodedType::SimpleScene; + if (stored_hash == scene_hash) return EncodedType::Scene; + + return EncodedType::Unknown; +} + +} // namespace lagrange::serialization::internal diff --git a/modules/serialization2/src/detect_type.h b/modules/serialization2/src/detect_type.h new file mode 100644 index 00000000..7cdb4981 --- /dev/null +++ b/modules/serialization2/src/detect_type.h @@ -0,0 +1,40 @@ +/* + * Copyright 2026 Adobe. All rights reserved. + * This file is licensed to you under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under + * the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS + * OF ANY KIND, either express or implied. See the License for the specific language + * governing permissions and limitations under the License. + */ +#pragma once + +#include +#include + +#include +#include + +namespace lagrange::serialization::internal { + +/// Type of data stored in an encoded buffer. +enum class EncodedType { + Mesh, + SimpleScene, + Scene, + Unknown, +}; + +/// Read a file into a byte buffer. +std::vector read_file_to_buffer(const fs::path& filename); + +/// Decompress an encoded buffer if compressed. Returns the original buffer if not compressed. +std::vector ensure_decompressed(span buffer); + +/// Detect the type of data stored in an encoded (and already decompressed) buffer. +/// The buffer must already be decompressed. +EncodedType detect_encoded_type(span buffer); + +} // namespace lagrange::serialization::internal diff --git a/modules/serialization2/src/mesh_convert.h b/modules/serialization2/src/mesh_convert.h new file mode 100644 index 00000000..75f142fe --- /dev/null +++ b/modules/serialization2/src/mesh_convert.h @@ -0,0 +1,28 @@ +/* + * Copyright 2026 Adobe. All rights reserved. + * This file is licensed to you under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under + * the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS + * OF ANY KIND, either express or implied. See the License for the specific language + * governing permissions and limitations under the License. + */ +#pragma once + +#include "CistaMesh.h" + +#include + +namespace lagrange::serialization::internal { + +/// Convert a SurfaceMesh to a CistaMesh intermediate representation. +template +CistaMesh to_cista_mesh(const SurfaceMesh& mesh); + +/// Convert a CistaMesh intermediate representation back to a SurfaceMesh. +template +SurfaceMesh from_cista_mesh(const CistaMesh& cmesh); + +} // namespace lagrange::serialization::internal diff --git a/modules/serialization2/src/serialize_mesh.cpp b/modules/serialization2/src/serialize_mesh.cpp new file mode 100644 index 00000000..69a22f46 --- /dev/null +++ b/modules/serialization2/src/serialize_mesh.cpp @@ -0,0 +1,306 @@ +/* + * Copyright 2026 Adobe. All rights reserved. + * This file is licensed to you under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under + * the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS + * OF ANY KIND, either express or implied. See the License for the specific language + * governing permissions and limitations under the License. + */ +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "CistaMesh.h" +#include "compress.h" +#include "detect_type.h" + + +namespace lagrange::serialization { + +using internal::compress_buffer; +using internal::decompress_buffer; +using internal::is_compressed; +using internal::k_cista_mode; + +namespace internal { + +namespace { + +/// Set up a data::vector as a non-owning view over a span's data. +/// The span's data must outlive the vector. The vector will not free the memory on destruction. +void set_non_owning(data::vector& dest, lagrange::span src) +{ + if (!src.empty()) { + dest.el_ = const_cast(src.data()); + dest.used_size_ = src.size(); + dest.allocated_size_ = src.size(); + dest.self_allocated_ = false; + } +} + +} // namespace + +template +CistaMesh to_cista_mesh(const SurfaceMesh& mesh) +{ + auto info = lagrange::internal::from_surface_mesh(mesh); + + CistaMesh cmesh; + cmesh.scalar_type_size = info.scalar_type_size; + cmesh.index_type_size = info.index_type_size; + cmesh.num_vertices = info.num_vertices; + cmesh.num_facets = info.num_facets; + cmesh.num_corners = info.num_corners; + cmesh.num_edges = info.num_edges; + cmesh.dimension = info.dimension; + cmesh.vertex_per_facet = info.vertex_per_facet; + + for (const auto& ai : info.attributes) { + CistaAttributeInfo cai; + cai.name = data::string(ai.name.data(), ai.name.size()); + cai.attribute_id = ai.attribute_id; + cai.value_type = ai.value_type; + cai.element_type = ai.element_type; + cai.usage = ai.usage; + cai.num_channels = ai.num_channels; + cai.num_elements = ai.num_elements; + cai.is_indexed = ai.is_indexed; + + if (ai.is_indexed) { + set_non_owning(cai.values_bytes, ai.values_bytes); + cai.values_num_elements = ai.values_num_elements; + cai.values_num_channels = ai.values_num_channels; + set_non_owning(cai.indices_bytes, ai.indices_bytes); + cai.indices_num_elements = ai.indices_num_elements; + cai.index_type_size = ai.index_type_size; + } else { + set_non_owning(cai.data_bytes, ai.data_bytes); + } + + cmesh.attributes.emplace_back(std::move(cai)); + } + + return cmesh; +} + +template +SurfaceMesh from_cista_mesh(const CistaMesh& cmesh) +{ + la_runtime_assert( + cmesh.version == mesh_format_version(), + "Unsupported encoding format version: expected " + std::to_string(mesh_format_version()) + + ", got " + std::to_string(cmesh.version)); + + lagrange::internal::SurfaceMeshInfo info; + info.scalar_type_size = cmesh.scalar_type_size; + info.index_type_size = cmesh.index_type_size; + info.num_vertices = cmesh.num_vertices; + info.num_facets = cmesh.num_facets; + info.num_corners = cmesh.num_corners; + info.num_edges = cmesh.num_edges; + info.dimension = cmesh.dimension; + info.vertex_per_facet = cmesh.vertex_per_facet; + + for (const auto& cai : cmesh.attributes) { + lagrange::internal::AttributeInfo ai; + ai.name = std::string_view(cai.name.data(), cai.name.size()); + ai.attribute_id = cai.attribute_id; + ai.value_type = cai.value_type; + ai.element_type = cai.element_type; + ai.usage = cai.usage; + ai.num_channels = cai.num_channels; + ai.num_elements = cai.num_elements; + ai.is_indexed = cai.is_indexed; + + if (cai.is_indexed) { + ai.values_bytes = + lagrange::span(cai.values_bytes.data(), cai.values_bytes.size()); + ai.values_num_elements = cai.values_num_elements; + ai.values_num_channels = cai.values_num_channels; + ai.indices_bytes = + lagrange::span(cai.indices_bytes.data(), cai.indices_bytes.size()); + ai.indices_num_elements = cai.indices_num_elements; + ai.index_type_size = cai.index_type_size; + } else { + ai.data_bytes = + lagrange::span(cai.data_bytes.data(), cai.data_bytes.size()); + } + + info.attributes.push_back(std::move(ai)); + } + + return lagrange::internal::to_surface_mesh(info); +} + +/// Deserialize a CistaMesh buffer with runtime dispatch on the stored Scalar/Index types, then +/// cast to the requested types. +template +SurfaceMesh deserialize_mesh_with_cast(span buffer) +{ + const auto* cmesh = + cista::deserialize(buffer.data(), buffer.data() + buffer.size()); + const uint8_t ss = cmesh->scalar_type_size; + const uint8_t is = cmesh->index_type_size; + + // Runtime dispatch on stored (scalar_size, index_size) -> load with native types -> cast + if (ss == sizeof(float) && is == sizeof(uint32_t)) { + auto mesh = from_cista_mesh(*cmesh); + return lagrange::cast(mesh); + } else if (ss == sizeof(double) && is == sizeof(uint32_t)) { + auto mesh = from_cista_mesh(*cmesh); + return lagrange::cast(mesh); + } else if (ss == sizeof(float) && is == sizeof(uint64_t)) { + auto mesh = from_cista_mesh(*cmesh); + return lagrange::cast(mesh); + } else if (ss == sizeof(double) && is == sizeof(uint64_t)) { + auto mesh = from_cista_mesh(*cmesh); + return lagrange::cast(mesh); + } else { + throw std::runtime_error( + "Unsupported scalar/index type sizes: scalar=" + std::to_string(ss) + + " index=" + std::to_string(is)); + } +} + +} // namespace internal + +template +std::vector serialize_mesh( + const SurfaceMesh& mesh, + const SerializeOptions& options) +{ + auto cmesh = internal::to_cista_mesh(mesh); + + cista::buf> buf; + cista::serialize(buf, cmesh); + + if (options.compress) { + return compress_buffer(buf.buf_, options.compression_level, options.num_threads); + } + + return std::move(buf.buf_); +} + +template +MeshType deserialize_mesh(span buffer, const DeserializeOptions& options) +{ + using Scalar = typename MeshType::Scalar; + using Index = typename MeshType::Index; + + // Decompress if needed + std::vector decompressed_storage; + span data = buffer; + if (is_compressed(buffer)) { + decompressed_storage = decompress_buffer(buffer); + data = span(decompressed_storage); + } + + auto load_native_mesh = [&]() -> MeshType { + const auto* cmesh = cista::deserialize( + data.data(), + data.data() + data.size()); + if (cmesh->scalar_type_size != sizeof(Scalar) || cmesh->index_type_size != sizeof(Index)) { + if (!options.allow_type_cast) { + throw std::runtime_error( + "Scalar/Index type mismatch: buffer has scalar_size=" + + std::to_string(cmesh->scalar_type_size) + + " index_size=" + std::to_string(cmesh->index_type_size) + + ", expected scalar_size=" + std::to_string(sizeof(Scalar)) + + " index_size=" + std::to_string(sizeof(Index))); + } + if (!options.quiet) { + logger().warn( + "Casting mesh types: buffer has scalar_size={} index_size={}, " + "requested scalar_size={} index_size={}", + cmesh->scalar_type_size, + cmesh->index_type_size, + sizeof(Scalar), + sizeof(Index)); + } + return internal::deserialize_mesh_with_cast(data); + } + return internal::from_cista_mesh(*cmesh); + }; + + if (!options.allow_scene_conversion) { + return load_native_mesh(); + } + + auto type = internal::detect_encoded_type(data); + switch (type) { + case internal::EncodedType::Mesh: return load_native_mesh(); + case internal::EncodedType::SimpleScene: { + if (!options.quiet) { + logger().warn("Buffer contains a SimpleScene, converting to Mesh"); + } + DeserializeOptions native_opts; + native_opts.allow_scene_conversion = false; + auto scene = + deserialize_simple_scene>(data, native_opts); + return scene::simple_scene_to_mesh(scene); + } + case internal::EncodedType::Scene: { + if (!options.quiet) { + logger().warn("Buffer contains a Scene, converting to Mesh"); + } + DeserializeOptions native_opts; + native_opts.allow_scene_conversion = false; + auto scene = deserialize_scene>(data, native_opts); + return scene::scene_to_mesh(scene); + } + default: throw std::runtime_error("Unknown encoded data type in buffer"); + } +} + +template +void save_mesh( + const fs::path& filename, + const SurfaceMesh& mesh, + const SerializeOptions& options) +{ + // TODO: serialize/deserialize directly to a mmap file + auto buf = serialize_mesh(mesh, options); + fs::ofstream ofs(filename, std::ios::binary); + la_runtime_assert(ofs.good(), "Failed to open file for writing: " + filename.string()); + ofs.write(reinterpret_cast(buf.data()), static_cast(buf.size())); + la_runtime_assert(ofs.good(), "Failed to write to file: " + filename.string()); +} + +template +MeshType load_mesh(const fs::path& filename, const DeserializeOptions& options) +{ + auto buf = internal::read_file_to_buffer(filename); + if (is_compressed(buf)) { + buf = decompress_buffer(buf); + } + return deserialize_mesh(lagrange::span(buf), options); +} + +// Explicit template instantiations +#define LA_X_serialization2(_, Scalar, Index) \ + template LA_SERIALIZATION2_API std::vector serialize_mesh( \ + const SurfaceMesh&, \ + const SerializeOptions&); \ + template LA_SERIALIZATION2_API SurfaceMesh \ + deserialize_mesh>(span, const DeserializeOptions&); \ + template LA_SERIALIZATION2_API void save_mesh( \ + const fs::path&, \ + const SurfaceMesh&, \ + const SerializeOptions&); \ + template LA_SERIALIZATION2_API SurfaceMesh \ + load_mesh>(const fs::path&, const DeserializeOptions&); +LA_SURFACE_MESH_X(serialization2, 0) +#undef LA_X_serialization2 + +} // namespace lagrange::serialization diff --git a/modules/serialization2/src/serialize_scene.cpp b/modules/serialization2/src/serialize_scene.cpp new file mode 100644 index 00000000..b48716ef --- /dev/null +++ b/modules/serialization2/src/serialize_scene.cpp @@ -0,0 +1,878 @@ +/* + * Copyright 2026 Adobe. All rights reserved. + * This file is licensed to you under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under + * the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS + * OF ANY KIND, either express or implied. See the License for the specific language + * governing permissions and limitations under the License. + */ +#include + +#include +#include +#include +#include +#include +#include +#include +#include +#include + +#include "CistaScene.h" +#include "compress.h" +#include "detect_type.h" +#include "mesh_convert.h" + +#include + +namespace lagrange::serialization { + +using internal::compress_buffer; +using internal::decompress_buffer; +using internal::is_compressed; +using internal::k_cista_mode; +using internal::k_invalid_element; + +namespace internal { + +// --------------------------------------------------------------------------- +// Value / Extensions conversion +// --------------------------------------------------------------------------- + +CistaValue to_cista_value(const scene::Value& value) +{ + CistaValue cv; + if (value.is_bool()) { + cv.type = CistaValueType::Bool; + cv.bool_val = value.get_bool(); + } else if (value.is_int()) { + cv.type = CistaValueType::Int; + cv.int_val = value.get_int(); + } else if (value.is_real()) { + cv.type = CistaValueType::Double; + cv.double_val = value.get_real(); + } else if (value.is_string()) { + cv.type = CistaValueType::String; + const auto& s = value.get_string(); + cv.string_val = data::string(s.data(), s.size()); + } else if (value.is_buffer()) { + cv.type = CistaValueType::Buffer; + const auto& buf = value.get_buffer(); + cv.buffer_val.resize(buf.size()); + if (!buf.empty()) { + std::memcpy(cv.buffer_val.data(), buf.data(), buf.size()); + } + } else if (value.is_array()) { + cv.type = CistaValueType::Array; + const auto& arr = value.get_array(); + cv.array_val.reserve(arr.size()); + for (const auto& elem : arr) { + cv.array_val.emplace_back(to_cista_value(elem)); + } + } else if (value.is_object()) { + cv.type = CistaValueType::Object; + const auto& obj = value.get_object(); + cv.object_keys.reserve(obj.size()); + cv.object_values.reserve(obj.size()); + for (const auto& [key, val] : obj) { + cv.object_keys.emplace_back(data::string(key.data(), key.size())); + cv.object_values.emplace_back(to_cista_value(val)); + } + } + return cv; +} + +scene::Value from_cista_value(const CistaValue& cv) +{ + switch (cv.type) { + case CistaValueType::Bool: return scene::Value(cv.bool_val); + case CistaValueType::Int: return scene::Value(cv.int_val); + case CistaValueType::Double: return scene::Value(cv.double_val); + case CistaValueType::String: + return scene::Value(std::string(cv.string_val.data(), cv.string_val.size())); + case CistaValueType::Buffer: { + scene::Value::Buffer buf(cv.buffer_val.size()); + if (!cv.buffer_val.empty()) { + std::memcpy(buf.data(), cv.buffer_val.data(), cv.buffer_val.size()); + } + return scene::Value(std::move(buf)); + } + case CistaValueType::Array: { + scene::Value::Array arr; + arr.reserve(cv.array_val.size()); + for (const auto& elem : cv.array_val) { + arr.emplace_back(from_cista_value(elem)); + } + return scene::Value(std::move(arr)); + } + case CistaValueType::Object: { + scene::Value::Object obj; + for (size_t i = 0; i < cv.object_keys.size(); ++i) { + std::string key(cv.object_keys[i].data(), cv.object_keys[i].size()); + obj.emplace(std::move(key), from_cista_value(cv.object_values[i])); + } + return scene::Value(std::move(obj)); + } + } + return scene::Value(false); // unreachable +} + +CistaExtensions to_cista_extensions(const scene::Extensions& ext) +{ + CistaExtensions cext; + cext.keys.reserve(ext.data.size()); + cext.values.reserve(ext.data.size()); + for (const auto& [key, val] : ext.data) { + cext.keys.emplace_back(data::string(key.data(), key.size())); + cext.values.emplace_back(to_cista_value(val)); + } + return cext; +} + +scene::Extensions from_cista_extensions(const CistaExtensions& cext) +{ + scene::Extensions ext; + for (size_t i = 0; i < cext.keys.size(); ++i) { + std::string key(cext.keys[i].data(), cext.keys[i].size()); + ext.data.emplace(std::move(key), from_cista_value(cext.values[i])); + } + return ext; +} + +// --------------------------------------------------------------------------- +// TextureInfo conversion +// --------------------------------------------------------------------------- + +CistaTextureInfo to_cista_texture_info(const scene::TextureInfo& ti) +{ + CistaTextureInfo cti; + cti.index = (ti.index == scene::invalid_element) ? k_invalid_element : ti.index; + cti.texcoord = ti.texcoord; + return cti; +} + +scene::TextureInfo from_cista_texture_info(const CistaTextureInfo& cti) +{ + scene::TextureInfo ti; + ti.index = (cti.index == k_invalid_element) ? scene::invalid_element : cti.index; + ti.texcoord = cti.texcoord; + return ti; +} + +// --------------------------------------------------------------------------- +// Node conversion +// --------------------------------------------------------------------------- + +CistaNode to_cista_node(const scene::Node& node) +{ + CistaNode cn; + cn.name = data::string(node.name.data(), node.name.size()); + + // Affine3f = 4x4 float = 16 floats + const float* tf = node.transform.matrix().data(); + std::copy(tf, tf + 16, cn.transform.begin()); + + cn.parent = (node.parent == scene::invalid_element) ? k_invalid_element : node.parent; + + cn.children.reserve(node.children.size()); + for (size_t i = 0; i < node.children.size(); ++i) { + cn.children.emplace_back(node.children[i]); + } + + cn.meshes.reserve(node.meshes.size()); + for (size_t i = 0; i < node.meshes.size(); ++i) { + CistaSceneMeshInstance cmi; + cmi.mesh = (node.meshes[i].mesh == scene::invalid_element) ? k_invalid_element + : node.meshes[i].mesh; + cmi.materials.reserve(node.meshes[i].materials.size()); + for (size_t j = 0; j < node.meshes[i].materials.size(); ++j) { + cmi.materials.emplace_back(node.meshes[i].materials[j]); + } + cn.meshes.emplace_back(std::move(cmi)); + } + + cn.cameras.reserve(node.cameras.size()); + for (size_t i = 0; i < node.cameras.size(); ++i) { + cn.cameras.emplace_back(node.cameras[i]); + } + + cn.lights.reserve(node.lights.size()); + for (size_t i = 0; i < node.lights.size(); ++i) { + cn.lights.emplace_back(node.lights[i]); + } + + cn.extensions = to_cista_extensions(node.extensions); + return cn; +} + +scene::Node from_cista_node(const CistaNode& cn) +{ + scene::Node node; + node.name = std::string(cn.name.data(), cn.name.size()); + + std::copy(cn.transform.begin(), cn.transform.end(), node.transform.matrix().data()); + + node.parent = (cn.parent == k_invalid_element) ? scene::invalid_element : cn.parent; + + for (const auto& child : cn.children) { + node.children.push_back(child); + } + + for (const auto& cmi : cn.meshes) { + scene::SceneMeshInstance smi; + smi.mesh = (cmi.mesh == k_invalid_element) ? scene::invalid_element : cmi.mesh; + for (const auto& mat : cmi.materials) { + smi.materials.push_back(mat); + } + node.meshes.push_back(std::move(smi)); + } + + for (const auto& cam : cn.cameras) { + node.cameras.push_back(cam); + } + + for (const auto& light : cn.lights) { + node.lights.push_back(light); + } + + node.extensions = from_cista_extensions(cn.extensions); + return node; +} + +// --------------------------------------------------------------------------- +// Image conversion +// --------------------------------------------------------------------------- + +CistaImage to_cista_image(const scene::ImageExperimental& img) +{ + CistaImage ci; + ci.name = data::string(img.name.data(), img.name.size()); + + ci.image.width = img.image.width; + ci.image.height = img.image.height; + ci.image.num_channels = img.image.num_channels; + ci.image.element_type = static_cast(img.image.element_type); + ci.image.data_bytes.resize(img.image.data.size()); + if (!img.image.data.empty()) { + std::memcpy(ci.image.data_bytes.data(), img.image.data.data(), img.image.data.size()); + } + + auto uri_str = img.uri.string(); + ci.uri = data::string(uri_str.data(), uri_str.size()); + + ci.extensions = to_cista_extensions(img.extensions); + return ci; +} + +scene::ImageExperimental from_cista_image(const CistaImage& ci) +{ + scene::ImageExperimental img; + img.name = std::string(ci.name.data(), ci.name.size()); + + img.image.width = ci.image.width; + img.image.height = ci.image.height; + img.image.num_channels = ci.image.num_channels; + img.image.element_type = static_cast(ci.image.element_type); + img.image.data.resize(ci.image.data_bytes.size()); + if (!ci.image.data_bytes.empty()) { + std::memcpy(img.image.data.data(), ci.image.data_bytes.data(), ci.image.data_bytes.size()); + } + + img.uri = fs::path(std::string(ci.uri.data(), ci.uri.size())); + + img.extensions = from_cista_extensions(ci.extensions); + return img; +} + +// --------------------------------------------------------------------------- +// Texture conversion +// --------------------------------------------------------------------------- + +CistaTexture to_cista_texture(const scene::Texture& tex) +{ + CistaTexture ct; + ct.name = data::string(tex.name.data(), tex.name.size()); + ct.image = (tex.image == scene::invalid_element) ? k_invalid_element : tex.image; + ct.mag_filter = static_cast(tex.mag_filter); + ct.min_filter = static_cast(tex.min_filter); + ct.wrap_u = static_cast(tex.wrap_u); + ct.wrap_v = static_cast(tex.wrap_v); + + ct.scale = {tex.scale.x(), tex.scale.y()}; + ct.offset = {tex.offset.x(), tex.offset.y()}; + ct.rotation = tex.rotation; + + ct.extensions = to_cista_extensions(tex.extensions); + return ct; +} + +scene::Texture from_cista_texture(const CistaTexture& ct) +{ + scene::Texture tex; + tex.name = std::string(ct.name.data(), ct.name.size()); + tex.image = (ct.image == k_invalid_element) ? scene::invalid_element : ct.image; + tex.mag_filter = static_cast(ct.mag_filter); + tex.min_filter = static_cast(ct.min_filter); + tex.wrap_u = static_cast(ct.wrap_u); + tex.wrap_v = static_cast(ct.wrap_v); + + tex.scale = Eigen::Vector2f(ct.scale[0], ct.scale[1]); + tex.offset = Eigen::Vector2f(ct.offset[0], ct.offset[1]); + tex.rotation = ct.rotation; + + tex.extensions = from_cista_extensions(ct.extensions); + return tex; +} + +// --------------------------------------------------------------------------- +// Material conversion +// --------------------------------------------------------------------------- + +CistaMaterial to_cista_material(const scene::MaterialExperimental& mat) +{ + CistaMaterial cm; + cm.name = data::string(mat.name.data(), mat.name.size()); + + cm.base_color_value = { + mat.base_color_value.x(), + mat.base_color_value.y(), + mat.base_color_value.z(), + mat.base_color_value.w()}; + cm.emissive_value = {mat.emissive_value.x(), mat.emissive_value.y(), mat.emissive_value.z()}; + cm.metallic_value = mat.metallic_value; + cm.roughness_value = mat.roughness_value; + cm.alpha_cutoff = mat.alpha_cutoff; + cm.normal_scale = mat.normal_scale; + cm.occlusion_strength = mat.occlusion_strength; + + cm.alpha_mode = static_cast(mat.alpha_mode); + cm.double_sided = mat.double_sided; + cm.base_color_texture = to_cista_texture_info(mat.base_color_texture); + cm.emissive_texture = to_cista_texture_info(mat.emissive_texture); + cm.metallic_roughness_texture = to_cista_texture_info(mat.metallic_roughness_texture); + cm.normal_texture = to_cista_texture_info(mat.normal_texture); + cm.occlusion_texture = to_cista_texture_info(mat.occlusion_texture); + + cm.extensions = to_cista_extensions(mat.extensions); + return cm; +} + +scene::MaterialExperimental from_cista_material(const CistaMaterial& cm) +{ + scene::MaterialExperimental mat; + mat.name = std::string(cm.name.data(), cm.name.size()); + + mat.base_color_value = Eigen::Vector4f( + cm.base_color_value[0], + cm.base_color_value[1], + cm.base_color_value[2], + cm.base_color_value[3]); + mat.emissive_value = + Eigen::Vector3f(cm.emissive_value[0], cm.emissive_value[1], cm.emissive_value[2]); + mat.metallic_value = cm.metallic_value; + mat.roughness_value = cm.roughness_value; + mat.alpha_cutoff = cm.alpha_cutoff; + mat.normal_scale = cm.normal_scale; + mat.occlusion_strength = cm.occlusion_strength; + + mat.alpha_mode = static_cast(cm.alpha_mode); + mat.double_sided = cm.double_sided; + mat.base_color_texture = from_cista_texture_info(cm.base_color_texture); + mat.emissive_texture = from_cista_texture_info(cm.emissive_texture); + mat.metallic_roughness_texture = from_cista_texture_info(cm.metallic_roughness_texture); + mat.normal_texture = from_cista_texture_info(cm.normal_texture); + mat.occlusion_texture = from_cista_texture_info(cm.occlusion_texture); + + mat.extensions = from_cista_extensions(cm.extensions); + return mat; +} + +// --------------------------------------------------------------------------- +// Light conversion +// --------------------------------------------------------------------------- + +CistaLight to_cista_light(const scene::Light& light) +{ + CistaLight cl; + cl.name = data::string(light.name.data(), light.name.size()); + cl.type = static_cast(light.type); + + cl.position = {light.position.x(), light.position.y(), light.position.z()}; + cl.direction = {light.direction.x(), light.direction.y(), light.direction.z()}; + cl.up = {light.up.x(), light.up.y(), light.up.z()}; + cl.intensity = light.intensity; + cl.attenuation_constant = light.attenuation_constant; + cl.attenuation_linear = light.attenuation_linear; + cl.attenuation_quadratic = light.attenuation_quadratic; + cl.attenuation_cubic = light.attenuation_cubic; + cl.range = light.range; + cl.color_diffuse = {light.color_diffuse.x(), light.color_diffuse.y(), light.color_diffuse.z()}; + cl.color_specular = { + light.color_specular.x(), + light.color_specular.y(), + light.color_specular.z()}; + cl.color_ambient = {light.color_ambient.x(), light.color_ambient.y(), light.color_ambient.z()}; + if (light.angle_inner_cone) cl.angle_inner_cone = *light.angle_inner_cone; + if (light.angle_outer_cone) cl.angle_outer_cone = *light.angle_outer_cone; + cl.size = {light.size.x(), light.size.y()}; + + cl.extensions = to_cista_extensions(light.extensions); + return cl; +} + +scene::Light from_cista_light(const CistaLight& cl) +{ + scene::Light light; + light.name = std::string(cl.name.data(), cl.name.size()); + light.type = static_cast(cl.type); + + light.position = Eigen::Vector3f(cl.position[0], cl.position[1], cl.position[2]); + light.direction = Eigen::Vector3f(cl.direction[0], cl.direction[1], cl.direction[2]); + light.up = Eigen::Vector3f(cl.up[0], cl.up[1], cl.up[2]); + light.intensity = cl.intensity; + light.attenuation_constant = cl.attenuation_constant; + light.attenuation_linear = cl.attenuation_linear; + light.attenuation_quadratic = cl.attenuation_quadratic; + light.attenuation_cubic = cl.attenuation_cubic; + light.range = cl.range; + light.color_diffuse = + Eigen::Vector3f(cl.color_diffuse[0], cl.color_diffuse[1], cl.color_diffuse[2]); + light.color_specular = + Eigen::Vector3f(cl.color_specular[0], cl.color_specular[1], cl.color_specular[2]); + light.color_ambient = + Eigen::Vector3f(cl.color_ambient[0], cl.color_ambient[1], cl.color_ambient[2]); + light.angle_inner_cone = + cl.angle_inner_cone.has_value() ? std::optional(*cl.angle_inner_cone) : std::nullopt; + light.angle_outer_cone = + cl.angle_outer_cone.has_value() ? std::optional(*cl.angle_outer_cone) : std::nullopt; + light.size = Eigen::Vector2f(cl.size[0], cl.size[1]); + + light.extensions = from_cista_extensions(cl.extensions); + return light; +} + +// --------------------------------------------------------------------------- +// Camera conversion +// --------------------------------------------------------------------------- + +CistaCamera to_cista_camera(const scene::Camera& cam) +{ + CistaCamera cc; + cc.name = data::string(cam.name.data(), cam.name.size()); + cc.type = static_cast(cam.type); + + cc.position = {cam.position.x(), cam.position.y(), cam.position.z()}; + cc.up = {cam.up.x(), cam.up.y(), cam.up.z()}; + cc.look_at = {cam.look_at.x(), cam.look_at.y(), cam.look_at.z()}; + cc.near_plane = cam.near_plane; + if (cam.far_plane) cc.far_plane = *cam.far_plane; + cc.orthographic_width = cam.orthographic_width; + cc.aspect_ratio = cam.aspect_ratio; + cc.horizontal_fov = cam.horizontal_fov; + + cc.extensions = to_cista_extensions(cam.extensions); + return cc; +} + +scene::Camera from_cista_camera(const CistaCamera& cc) +{ + scene::Camera cam; + cam.name = std::string(cc.name.data(), cc.name.size()); + cam.type = static_cast(cc.type); + + cam.position = Eigen::Vector3f(cc.position[0], cc.position[1], cc.position[2]); + cam.up = Eigen::Vector3f(cc.up[0], cc.up[1], cc.up[2]); + cam.look_at = Eigen::Vector3f(cc.look_at[0], cc.look_at[1], cc.look_at[2]); + cam.near_plane = cc.near_plane; + cam.far_plane = cc.far_plane.has_value() ? std::optional(*cc.far_plane) : std::nullopt; + cam.orthographic_width = cc.orthographic_width; + cam.aspect_ratio = cc.aspect_ratio; + cam.horizontal_fov = cc.horizontal_fov; + + cam.extensions = from_cista_extensions(cc.extensions); + return cam; +} + +// --------------------------------------------------------------------------- +// Skeleton / Animation conversion +// --------------------------------------------------------------------------- + +CistaSkeleton to_cista_skeleton(const scene::Skeleton& skel) +{ + CistaSkeleton cs; + cs.meshes.reserve(skel.meshes.size()); + for (size_t i = 0; i < skel.meshes.size(); ++i) { + cs.meshes.emplace_back(skel.meshes[i]); + } + cs.extensions = to_cista_extensions(skel.extensions); + return cs; +} + +scene::Skeleton from_cista_skeleton(const CistaSkeleton& cs) +{ + scene::Skeleton skel; + for (const auto& m : cs.meshes) { + skel.meshes.push_back(m); + } + skel.extensions = from_cista_extensions(cs.extensions); + return skel; +} + +CistaAnimation to_cista_animation(const scene::Animation& anim) +{ + CistaAnimation ca; + ca.name = data::string(anim.name.data(), anim.name.size()); + ca.extensions = to_cista_extensions(anim.extensions); + return ca; +} + +scene::Animation from_cista_animation(const CistaAnimation& ca) +{ + scene::Animation anim; + anim.name = std::string(ca.name.data(), ca.name.size()); + anim.extensions = from_cista_extensions(ca.extensions); + return anim; +} + +// --------------------------------------------------------------------------- +// Scene conversion +// --------------------------------------------------------------------------- + +template +CistaScene to_cista_scene(const scene::Scene& scene) +{ + CistaScene cs; + cs.scalar_type_size = sizeof(Scalar); + cs.index_type_size = sizeof(Index); + cs.name = data::string(scene.name.data(), scene.name.size()); + + // Nodes + cs.nodes.reserve(scene.nodes.size()); + for (size_t i = 0; i < scene.nodes.size(); ++i) { + cs.nodes.emplace_back(to_cista_node(scene.nodes[i])); + } + + // Root nodes + cs.root_nodes.reserve(scene.root_nodes.size()); + for (size_t i = 0; i < scene.root_nodes.size(); ++i) { + cs.root_nodes.emplace_back(scene.root_nodes[i]); + } + + // Meshes + cs.meshes.reserve(scene.meshes.size()); + for (size_t i = 0; i < scene.meshes.size(); ++i) { + cs.meshes.emplace_back(to_cista_mesh(scene.meshes[i])); + } + + // Images + cs.images.reserve(scene.images.size()); + for (size_t i = 0; i < scene.images.size(); ++i) { + cs.images.emplace_back(to_cista_image(scene.images[i])); + } + + // Textures + cs.textures.reserve(scene.textures.size()); + for (size_t i = 0; i < scene.textures.size(); ++i) { + cs.textures.emplace_back(to_cista_texture(scene.textures[i])); + } + + // Materials + cs.materials.reserve(scene.materials.size()); + for (size_t i = 0; i < scene.materials.size(); ++i) { + cs.materials.emplace_back(to_cista_material(scene.materials[i])); + } + + // Lights + cs.lights.reserve(scene.lights.size()); + for (size_t i = 0; i < scene.lights.size(); ++i) { + cs.lights.emplace_back(to_cista_light(scene.lights[i])); + } + + // Cameras + cs.cameras.reserve(scene.cameras.size()); + for (size_t i = 0; i < scene.cameras.size(); ++i) { + cs.cameras.emplace_back(to_cista_camera(scene.cameras[i])); + } + + // Skeletons + cs.skeletons.reserve(scene.skeletons.size()); + for (size_t i = 0; i < scene.skeletons.size(); ++i) { + cs.skeletons.emplace_back(to_cista_skeleton(scene.skeletons[i])); + } + + // Animations + cs.animations.reserve(scene.animations.size()); + for (size_t i = 0; i < scene.animations.size(); ++i) { + cs.animations.emplace_back(to_cista_animation(scene.animations[i])); + } + + // Scene-level extensions + cs.extensions = to_cista_extensions(scene.extensions); + + return cs; +} + +template +scene::Scene from_cista_scene(const CistaScene& cs) +{ + la_runtime_assert( + cs.version == scene_format_version(), + "Unsupported encoding format version: expected " + std::to_string(scene_format_version()) + + ", got " + std::to_string(cs.version)); + la_runtime_assert( + cs.scalar_type_size == sizeof(Scalar), + "Scalar type size mismatch: expected " + std::to_string(sizeof(Scalar)) + ", got " + + std::to_string(cs.scalar_type_size)); + la_runtime_assert( + cs.index_type_size == sizeof(Index), + "Index type size mismatch: expected " + std::to_string(sizeof(Index)) + ", got " + + std::to_string(cs.index_type_size)); + + scene::Scene sc; + sc.name = std::string(cs.name.data(), cs.name.size()); + + // Nodes + for (const auto& cn : cs.nodes) { + sc.nodes.push_back(from_cista_node(cn)); + } + + // Root nodes + for (const auto& rn : cs.root_nodes) { + sc.root_nodes.push_back(rn); + } + + // Meshes + for (const auto& cm : cs.meshes) { + sc.meshes.push_back(from_cista_mesh(cm)); + } + + // Images + for (const auto& ci : cs.images) { + sc.images.push_back(from_cista_image(ci)); + } + + // Textures + for (const auto& ct : cs.textures) { + sc.textures.push_back(from_cista_texture(ct)); + } + + // Materials + for (const auto& cm : cs.materials) { + sc.materials.push_back(from_cista_material(cm)); + } + + // Lights + for (const auto& cl : cs.lights) { + sc.lights.push_back(from_cista_light(cl)); + } + + // Cameras + for (const auto& cc : cs.cameras) { + sc.cameras.push_back(from_cista_camera(cc)); + } + + // Skeletons + for (const auto& cskel : cs.skeletons) { + sc.skeletons.push_back(from_cista_skeleton(cskel)); + } + + // Animations + for (const auto& ca : cs.animations) { + sc.animations.push_back(from_cista_animation(ca)); + } + + // Scene-level extensions + sc.extensions = from_cista_extensions(cs.extensions); + + return sc; +} + +/// Deserialize a CistaScene with native Scalar/Index types, then cast meshes to target types. +template +scene::Scene cast_cista_scene(const CistaScene& cs) +{ + auto native = from_cista_scene(cs); + scene::Scene result; + result.name = std::move(native.name); + result.nodes = std::move(native.nodes); + result.root_nodes = std::move(native.root_nodes); + result.meshes.reserve(native.meshes.size()); + for (auto& mesh : native.meshes) { + result.meshes.push_back(lagrange::cast(mesh)); + } + result.images = std::move(native.images); + result.textures = std::move(native.textures); + result.materials = std::move(native.materials); + result.lights = std::move(native.lights); + result.cameras = std::move(native.cameras); + result.skeletons = std::move(native.skeletons); + result.animations = std::move(native.animations); + result.extensions = std::move(native.extensions); + return result; +} + +/// Deserialize a CistaScene buffer with runtime dispatch on stored Scalar/Index types, then +/// cast to the requested types. +template +scene::Scene deserialize_scene_with_cast(span buffer) +{ + const auto* cs = + cista::deserialize(buffer.data(), buffer.data() + buffer.size()); + const uint8_t ss = cs->scalar_type_size; + const uint8_t is = cs->index_type_size; + + if (ss == sizeof(float) && is == sizeof(uint32_t)) { + return cast_cista_scene(*cs); + } else if (ss == sizeof(double) && is == sizeof(uint32_t)) { + return cast_cista_scene(*cs); + } else if (ss == sizeof(float) && is == sizeof(uint64_t)) { + return cast_cista_scene(*cs); + } else if (ss == sizeof(double) && is == sizeof(uint64_t)) { + return cast_cista_scene(*cs); + } else { + throw std::runtime_error( + "Unsupported scalar/index type sizes: scalar=" + std::to_string(ss) + + " index=" + std::to_string(is)); + } +} + +} // namespace internal + +// --------------------------------------------------------------------------- +// Public API +// --------------------------------------------------------------------------- + +template +std::vector serialize_scene( + const scene::Scene& scene, + const SerializeOptions& options) +{ + auto cscene = internal::to_cista_scene(scene); + + cista::buf> buf; + cista::serialize(buf, cscene); + + if (options.compress) { + return compress_buffer(buf.buf_, options.compression_level, options.num_threads); + } + + return std::move(buf.buf_); +} + +template +SceneType deserialize_scene(span buffer, const DeserializeOptions& options) +{ + using Scalar = typename SceneType::MeshType::Scalar; + using Index = typename SceneType::MeshType::Index; + + // Decompress if needed + std::vector decompressed_storage; + span data = buffer; + if (is_compressed(buffer)) { + decompressed_storage = decompress_buffer(buffer); + data = span(decompressed_storage); + } + + auto load_native_scene = [&]() -> SceneType { + const auto* cscene = cista::deserialize( + data.data(), + data.data() + data.size()); + if (cscene->scalar_type_size != sizeof(Scalar) || + cscene->index_type_size != sizeof(Index)) { + if (!options.allow_type_cast) { + throw std::runtime_error( + "Scalar/Index type mismatch: buffer has scalar_size=" + + std::to_string(cscene->scalar_type_size) + + " index_size=" + std::to_string(cscene->index_type_size) + + ", expected scalar_size=" + std::to_string(sizeof(Scalar)) + + " index_size=" + std::to_string(sizeof(Index))); + } + if (!options.quiet) { + logger().warn( + "Casting Scene types: buffer has scalar_size={} index_size={}, " + "requested scalar_size={} index_size={}", + cscene->scalar_type_size, + cscene->index_type_size, + sizeof(Scalar), + sizeof(Index)); + } + return internal::deserialize_scene_with_cast(data); + } + return internal::from_cista_scene(*cscene); + }; + + if (!options.allow_scene_conversion) { + return load_native_scene(); + } + + auto type = internal::detect_encoded_type(data); + switch (type) { + case internal::EncodedType::Scene: return load_native_scene(); + case internal::EncodedType::Mesh: { + if (!options.quiet) { + logger().warn("Buffer contains a Mesh, converting to Scene"); + } + DeserializeOptions native_opts; + native_opts.allow_scene_conversion = false; + auto mesh = deserialize_mesh>(data, native_opts); + return scene::mesh_to_scene(std::move(mesh)); + } + case internal::EncodedType::SimpleScene: { + if (!options.quiet) { + logger().warn("Buffer contains a SimpleScene, converting to Scene"); + } + DeserializeOptions native_opts; + native_opts.allow_scene_conversion = false; + auto simple_scene = + deserialize_simple_scene>(data, native_opts); + return scene::simple_scene_to_scene(simple_scene); + } + default: throw std::runtime_error("Unknown encoded data type in buffer"); + } +} + +template +void save_scene( + const fs::path& filename, + const scene::Scene& scene, + const SerializeOptions& options) +{ + auto buf = serialize_scene(scene, options); + fs::ofstream ofs(filename, std::ios::binary); + la_runtime_assert(ofs.good(), "Failed to open file for writing: " + filename.string()); + ofs.write(reinterpret_cast(buf.data()), static_cast(buf.size())); + la_runtime_assert(ofs.good(), "Failed to write to file: " + filename.string()); +} + +template +SceneType load_scene(const fs::path& filename, const DeserializeOptions& options) +{ + auto buf = internal::read_file_to_buffer(filename); + if (is_compressed(buf)) { + buf = decompress_buffer(buf); + } + return deserialize_scene(lagrange::span(buf), options); +} + +// Explicit template instantiations +#define LA_X_serialization2_sc(_, Scalar, Index) \ + template LA_SERIALIZATION2_API std::vector serialize_scene( \ + const scene::Scene&, \ + const SerializeOptions&); \ + template LA_SERIALIZATION2_API scene::Scene \ + deserialize_scene>( \ + span, \ + const DeserializeOptions&); \ + template LA_SERIALIZATION2_API void save_scene( \ + const fs::path&, \ + const scene::Scene&, \ + const SerializeOptions&); \ + template LA_SERIALIZATION2_API scene::Scene \ + load_scene>(const fs::path&, const DeserializeOptions&); +LA_SCENE_X(serialization2_sc, 0) +#undef LA_X_serialization2_sc + +} // namespace lagrange::serialization diff --git a/modules/serialization2/src/serialize_simple_scene.cpp b/modules/serialization2/src/serialize_simple_scene.cpp new file mode 100644 index 00000000..2ca694c3 --- /dev/null +++ b/modules/serialization2/src/serialize_simple_scene.cpp @@ -0,0 +1,425 @@ +/* + * Copyright 2026 Adobe. All rights reserved. + * This file is licensed to you under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under + * the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS + * OF ANY KIND, either express or implied. See the License for the specific language + * governing permissions and limitations under the License. + */ +#include + +#include +#include +#include +#include +#include +#include +#include +#include + +#include "CistaSimpleScene.h" +#include "compress.h" +#include "detect_type.h" +#include "mesh_convert.h" + +#include + +namespace lagrange::serialization { + +using internal::compress_buffer; +using internal::decompress_buffer; +using internal::is_compressed; +using internal::k_cista_mode; + +namespace internal { + +template +CistaSimpleScene to_cista_simple_scene(const scene::SimpleScene& scene) +{ + CistaSimpleScene cscene; + cscene.scalar_type_size = sizeof(Scalar); + cscene.index_type_size = sizeof(Index); + cscene.dimension = static_cast(Dimension); + + const Index num_meshes = scene.get_num_meshes(); + + // Serialize meshes + cscene.meshes.resize(num_meshes); + for (Index i = 0; i < num_meshes; ++i) { + cscene.meshes[i] = to_cista_mesh(scene.get_mesh(i)); + } + + // Serialize instances (flattened with per-mesh counts) + cscene.instances_per_mesh.resize(num_meshes); + for (Index i = 0; i < num_meshes; ++i) { + const Index num_instances = scene.get_num_instances(i); + cscene.instances_per_mesh[i] = num_instances; + + for (Index j = 0; j < num_instances; ++j) { + const auto& inst = scene.get_instance(i, j); + + CistaInstance cinst; + cinst.mesh_index = inst.mesh_index; + + // Store transform as raw bytes + constexpr size_t matrix_size = (Dimension + 1) * (Dimension + 1); + constexpr size_t byte_size = matrix_size * sizeof(Scalar); + cinst.transform_bytes.resize(byte_size); + std::memcpy(cinst.transform_bytes.data(), inst.transform.matrix().data(), byte_size); + + cscene.instances.emplace_back(std::move(cinst)); + } + } + + return cscene; +} + +template +scene::SimpleScene from_cista_simple_scene(const CistaSimpleScene& cscene) +{ + la_runtime_assert( + cscene.version == simple_scene_format_version(), + fmt::format( + "Unsupported encoding format version: expected {}, got {}", + simple_scene_format_version(), + cscene.version)); + la_runtime_assert( + cscene.scalar_type_size == sizeof(Scalar), + fmt::format( + "Scalar type size mismatch: expected {}, got {}", + sizeof(Scalar), + cscene.scalar_type_size)); + la_runtime_assert( + cscene.index_type_size == sizeof(Index), + fmt::format( + "Index type size mismatch: expected {}, got {}", + sizeof(Index), + cscene.index_type_size)); + la_runtime_assert( + cscene.dimension == Dimension, + fmt::format("Dimension mismatch: expected {}, got {}", Dimension, cscene.dimension)); + + scene::SimpleScene scene; + + const size_t num_meshes = cscene.meshes.size(); + la_runtime_assert( + cscene.instances_per_mesh.size() == num_meshes, + fmt::format( + "instances_per_mesh size mismatch: expected {}, got {}", + num_meshes, + cscene.instances_per_mesh.size())); + scene.reserve_meshes(static_cast(num_meshes)); + + for (size_t i = 0; i < num_meshes; ++i) { + scene.add_mesh(from_cista_mesh(cscene.meshes[i])); + } + + // Reconstruct instances from flattened data + size_t instance_offset = 0; + for (size_t i = 0; i < num_meshes; ++i) { + const size_t num_instances = static_cast(cscene.instances_per_mesh[i]); + la_runtime_assert( + instance_offset + num_instances <= cscene.instances.size(), + fmt::format( + "Instance offset out of bounds: offset={} + count={} > total={}", + instance_offset, + num_instances, + cscene.instances.size())); + scene.reserve_instances(static_cast(i), static_cast(num_instances)); + + for (size_t j = 0; j < num_instances; ++j) { + const auto& cinst = cscene.instances[instance_offset + j]; + + using InstanceType = scene::MeshInstance; + InstanceType inst; + inst.mesh_index = static_cast(cinst.mesh_index); + + // Restore transform from raw bytes + constexpr size_t matrix_size = (Dimension + 1) * (Dimension + 1); + constexpr size_t byte_size = matrix_size * sizeof(Scalar); + la_runtime_assert( + cinst.transform_bytes.size() == byte_size, + fmt::format( + "Transform data size mismatch: expected {}, got {}", + byte_size, + cinst.transform_bytes.size())); + std::memcpy(inst.transform.matrix().data(), cinst.transform_bytes.data(), byte_size); + + scene.add_instance(std::move(inst)); + } + instance_offset += num_instances; + } + la_runtime_assert( + instance_offset == cscene.instances.size(), + fmt::format( + "Total instance count mismatch: expected {}, got {}", + cscene.instances.size(), + instance_offset)); + + return scene; +} + +/// Deserialize a CistaSimpleScene buffer with runtime dispatch on stored Scalar/Index types, then +/// cast meshes and transforms to the requested types. +template +scene::SimpleScene deserialize_simple_scene_with_cast( + span buffer) +{ + const auto* cscene = cista::deserialize( + buffer.data(), + buffer.data() + buffer.size()); + + la_runtime_assert( + cscene->version == simple_scene_format_version(), + fmt::format( + "Unsupported encoding format version: expected {}, got {}", + simple_scene_format_version(), + cscene->version)); + la_runtime_assert( + cscene->dimension == Dimension, + fmt::format("Dimension mismatch: expected {}, got {}", Dimension, cscene->dimension)); + + const uint8_t ss = cscene->scalar_type_size; + const uint8_t is = cscene->index_type_size; + + // Helper to deserialize and cast a single mesh + auto cast_one_mesh = [&](const CistaMesh& cm) -> SurfaceMesh { + if (ss == sizeof(float) && is == sizeof(uint32_t)) { + auto m = from_cista_mesh(cm); + return lagrange::cast(m); + } else if (ss == sizeof(double) && is == sizeof(uint32_t)) { + auto m = from_cista_mesh(cm); + return lagrange::cast(m); + } else if (ss == sizeof(float) && is == sizeof(uint64_t)) { + auto m = from_cista_mesh(cm); + return lagrange::cast(m); + } else if (ss == sizeof(double) && is == sizeof(uint64_t)) { + auto m = from_cista_mesh(cm); + return lagrange::cast(m); + } else { + throw std::runtime_error( + fmt::format("Unsupported scalar/index type sizes: scalar={} index={}", ss, is)); + } + }; + + scene::SimpleScene result; + + const size_t num_meshes = cscene->meshes.size(); + la_runtime_assert( + cscene->instances_per_mesh.size() == num_meshes, + fmt::format( + "instances_per_mesh size mismatch: expected {}, got {}", + num_meshes, + cscene->instances_per_mesh.size())); + result.reserve_meshes(static_cast(num_meshes)); + + for (size_t i = 0; i < num_meshes; ++i) { + result.add_mesh(cast_one_mesh(cscene->meshes[i])); + } + + // Reconstruct instances with transform casting + constexpr size_t matrix_size = (Dimension + 1) * (Dimension + 1); + size_t instance_offset = 0; + for (size_t i = 0; i < num_meshes; ++i) { + const size_t num_instances = static_cast(cscene->instances_per_mesh[i]); + la_runtime_assert( + instance_offset + num_instances <= cscene->instances.size(), + fmt::format( + "Instance offset out of bounds: offset={} + count={} > total={}", + instance_offset, + num_instances, + cscene->instances.size())); + result.reserve_instances(static_cast(i), static_cast(num_instances)); + + for (size_t j = 0; j < num_instances; ++j) { + const auto& cinst = cscene->instances[instance_offset + j]; + + using InstanceType = scene::MeshInstance; + InstanceType inst; + inst.mesh_index = static_cast(cinst.mesh_index); + + // Cast transform from native scalar type + const size_t expected_byte_size = matrix_size * ss; + la_runtime_assert( + cinst.transform_bytes.size() == expected_byte_size, + fmt::format( + "Transform data size mismatch: expected {}, got {}", + expected_byte_size, + cinst.transform_bytes.size())); + if (ss == sizeof(float)) { + float native[matrix_size]; + std::memcpy(native, cinst.transform_bytes.data(), matrix_size * sizeof(float)); + for (size_t k = 0; k < matrix_size; ++k) { + inst.transform.matrix().data()[k] = static_cast(native[k]); + } + } else if (ss == sizeof(double)) { + double native[matrix_size]; + std::memcpy(native, cinst.transform_bytes.data(), matrix_size * sizeof(double)); + for (size_t k = 0; k < matrix_size; ++k) { + inst.transform.matrix().data()[k] = static_cast(native[k]); + } + } else { + throw std::runtime_error(fmt::format("Unsupported scalar type size: {}", ss)); + } + + result.add_instance(std::move(inst)); + } + instance_offset += num_instances; + } + la_runtime_assert( + instance_offset == cscene->instances.size(), + fmt::format( + "Total instance count mismatch: expected {}, got {}", + cscene->instances.size(), + instance_offset)); + + return result; +} + +} // namespace internal + +template +std::vector serialize_simple_scene( + const scene::SimpleScene& scene, + const SerializeOptions& options) +{ + auto cscene = internal::to_cista_simple_scene(scene); + + cista::buf> buf; + cista::serialize(buf, cscene); + + if (options.compress) { + return compress_buffer(buf.buf_, options.compression_level, options.num_threads); + } + + return std::move(buf.buf_); +} + +template +SceneType deserialize_simple_scene(span buffer, const DeserializeOptions& options) +{ + using Scalar = typename SceneType::MeshType::Scalar; + using Index = typename SceneType::MeshType::Index; + constexpr size_t Dimension = SceneType::Dim; + + // Decompress if needed + std::vector decompressed_storage; + span data = buffer; + if (is_compressed(buffer)) { + decompressed_storage = decompress_buffer(buffer); + data = span(decompressed_storage); + } + + auto load_native_simple_scene = [&]() -> SceneType { + const auto* cscene = cista::deserialize( + data.data(), + data.data() + data.size()); + if (cscene->scalar_type_size != sizeof(Scalar) || + cscene->index_type_size != sizeof(Index)) { + if (!options.allow_type_cast) { + throw std::runtime_error( + fmt::format( + "Scalar/Index type mismatch: buffer has scalar_size={} index_size={}, " + "expected scalar_size={} index_size={}", + cscene->scalar_type_size, + cscene->index_type_size, + sizeof(Scalar), + sizeof(Index))); + } + if (!options.quiet) { + logger().warn( + "Casting SimpleScene types: buffer has scalar_size={} index_size={}, " + "requested scalar_size={} index_size={}", + cscene->scalar_type_size, + cscene->index_type_size, + sizeof(Scalar), + sizeof(Index)); + } + return internal::deserialize_simple_scene_with_cast( + data); + } + return internal::from_cista_simple_scene(*cscene); + }; + + if (!options.allow_scene_conversion) { + return load_native_simple_scene(); + } + + auto type = internal::detect_encoded_type(data); + switch (type) { + case internal::EncodedType::SimpleScene: return load_native_simple_scene(); + case internal::EncodedType::Mesh: { + if (!options.quiet) { + logger().warn("Buffer contains a Mesh, converting to SimpleScene"); + } + DeserializeOptions native_opts; + native_opts.allow_scene_conversion = false; + auto mesh = deserialize_mesh>(data, native_opts); + return scene::mesh_to_simple_scene(std::move(mesh)); + } + case internal::EncodedType::Scene: { + if constexpr (Dimension == 3) { + if (!options.quiet) { + logger().warn("Buffer contains a Scene, converting to SimpleScene"); + } + DeserializeOptions native_opts; + native_opts.allow_scene_conversion = false; + auto scene = deserialize_scene>(data, native_opts); + return scene::scene_to_simple_scene(scene); + } else { + throw std::runtime_error( + "Cannot convert an encoded Scene to a SimpleScene with Dimension != 3"); + } + } + default: throw std::runtime_error("Unknown encoded data type in buffer"); + } +} + +template +void save_simple_scene( + const fs::path& filename, + const scene::SimpleScene& scene, + const SerializeOptions& options) +{ + auto buf = serialize_simple_scene(scene, options); + fs::ofstream ofs(filename, std::ios::binary); + la_runtime_assert(ofs.good(), "Failed to open file for writing: " + filename.string()); + ofs.write(reinterpret_cast(buf.data()), static_cast(buf.size())); + la_runtime_assert(ofs.good(), "Failed to write to file: " + filename.string()); +} + +template +SceneType load_simple_scene(const fs::path& filename, const DeserializeOptions& options) +{ + auto buf = internal::read_file_to_buffer(filename); + if (is_compressed(buf)) { + buf = decompress_buffer(buf); + } + return deserialize_simple_scene(lagrange::span(buf), options); +} + +// Explicit template instantiations +#define LA_X_serialization2_ss(_, Scalar, Index, Dim) \ + template LA_SERIALIZATION2_API std::vector \ + serialize_simple_scene( \ + const scene::SimpleScene&, \ + const SerializeOptions&); \ + template LA_SERIALIZATION2_API scene::SimpleScene \ + deserialize_simple_scene>( \ + span, \ + const DeserializeOptions&); \ + template LA_SERIALIZATION2_API void save_simple_scene( \ + const fs::path&, \ + const scene::SimpleScene&, \ + const SerializeOptions&); \ + template LA_SERIALIZATION2_API scene::SimpleScene \ + load_simple_scene>( \ + const fs::path&, \ + const DeserializeOptions&); +LA_SIMPLE_SCENE_X(serialization2_ss, 0) +#undef LA_X_serialization2_ss + +} // namespace lagrange::serialization diff --git a/modules/scene/python/tests/assets.py b/modules/serialization2/tests/CMakeLists.txt similarity index 69% rename from modules/scene/python/tests/assets.py rename to modules/serialization2/tests/CMakeLists.txt index e02c99a3..a817bf72 100644 --- a/modules/scene/python/tests/assets.py +++ b/modules/serialization2/tests/CMakeLists.txt @@ -9,16 +9,8 @@ # OF ANY KIND, either express or implied. See the License for the specific language # governing permissions and limitations under the License. # -import lagrange -import numpy as np -import pytest +lagrange_add_test() - -@pytest.fixture -def single_triangle(): - mesh = lagrange.SurfaceMesh() - mesh.add_vertices(np.eye(3)) - mesh.add_triangle(0, 1, 2) - assert mesh.num_vertices == 3 - assert mesh.num_facets == 1 - return mesh +lagrange_include_modules(primitive) +include(cista) +target_link_libraries(test_lagrange_serialization2 PRIVATE lagrange::primitive cista::cista) diff --git a/modules/serialization2/tests/test_serialization2_benchmark.cpp b/modules/serialization2/tests/test_serialization2_benchmark.cpp new file mode 100644 index 00000000..0265ed56 --- /dev/null +++ b/modules/serialization2/tests/test_serialization2_benchmark.cpp @@ -0,0 +1,87 @@ +/* + * Copyright 2026 Adobe. All rights reserved. + * This file is licensed to you under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under + * the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS + * OF ANY KIND, either express or implied. See the License for the specific language + * governing permissions and limitations under the License. + */ +#include +#include +#include + +#include +#include + +TEST_CASE("serialization2: dragon benchmark", "[serialization2][!benchmark]") +{ + using Scalar = double; + using Index = uint32_t; + + auto mesh = lagrange::testing::load_surface_mesh("open/core/dragon.obj"); + INFO( + "Dragon mesh: " << mesh.get_num_vertices() << " vertices, " << mesh.get_num_facets() + << " facets"); + + // Pre-serialize buffers for deserialization benchmarks + lagrange::serialization::SerializeOptions opts_compressed; + + lagrange::serialization::SerializeOptions opts_uncompressed; + opts_uncompressed.compress = false; + + auto buf_compressed = lagrange::serialization::serialize_mesh(mesh, opts_compressed); + auto buf_uncompressed = lagrange::serialization::serialize_mesh(mesh, opts_uncompressed); + + BENCHMARK("serialize (uncompressed)") + { + return lagrange::serialization::serialize_mesh(mesh, opts_uncompressed); + }; + + BENCHMARK("serialize (compressed, level 3)") + { + return lagrange::serialization::serialize_mesh(mesh, opts_compressed); + }; + + BENCHMARK("deserialize (uncompressed)") + { + return lagrange::serialization::deserialize_mesh>( + buf_uncompressed); + }; + + BENCHMARK("deserialize (compressed)") + { + return lagrange::serialization::deserialize_mesh>( + buf_compressed); + }; + + // Also benchmark with edge topology initialized + mesh.initialize_edges(); + + auto buf_edges_compressed = lagrange::serialization::serialize_mesh(mesh, opts_compressed); + auto buf_edges_uncompressed = lagrange::serialization::serialize_mesh(mesh, opts_uncompressed); + + BENCHMARK("serialize with edges (uncompressed)") + { + return lagrange::serialization::serialize_mesh(mesh, opts_uncompressed); + }; + + BENCHMARK("serialize with edges (compressed)") + { + return lagrange::serialization::serialize_mesh(mesh, opts_compressed); + }; + + BENCHMARK("deserialize with edges (uncompressed)") + { + return lagrange::serialization::deserialize_mesh>( + buf_edges_uncompressed); + }; + + BENCHMARK("deserialize with edges (compressed)") + { + return lagrange::serialization::deserialize_mesh>( + buf_edges_compressed); + }; +} diff --git a/modules/serialization2/tests/test_serialize_mesh.cpp b/modules/serialization2/tests/test_serialize_mesh.cpp new file mode 100644 index 00000000..58d311ae --- /dev/null +++ b/modules/serialization2/tests/test_serialize_mesh.cpp @@ -0,0 +1,395 @@ +/* + * Copyright 2026 Adobe. All rights reserved. + * This file is licensed to you under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under + * the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS + * OF ANY KIND, either express or implied. See the License for the specific language + * governing permissions and limitations under the License. + */ +#include +#include +#include +#include +#include +#include +#include + +#include + +#include + +namespace { + +template +lagrange::SurfaceMesh make_test_sphere() +{ + lagrange::primitive::SphereOptions opts; + opts.num_longitude_sections = 8; + opts.num_latitude_sections = 8; + return lagrange::primitive::generate_sphere(opts); +} + +template +lagrange::SurfaceMesh make_large_test_sphere() +{ + lagrange::primitive::SphereOptions opts; + opts.num_longitude_sections = 32; + opts.num_latitude_sections = 32; + return lagrange::primitive::generate_sphere(opts); +} + +} // namespace + +TEST_CASE("serialization2: empty mesh", "[serialization2]") +{ + using Scalar = double; + using Index = uint32_t; + lagrange::SurfaceMesh mesh; + + SECTION("uncompressed") + { + lagrange::serialization::SerializeOptions opts; + opts.compress = false; + auto buf = lagrange::serialization::serialize_mesh(mesh, opts); + auto result = + lagrange::serialization::deserialize_mesh>(buf); + lagrange::testing::check_meshes_equal(mesh, result); + } + + SECTION("compressed") + { + auto buf = lagrange::serialization::serialize_mesh(mesh); + auto result = + lagrange::serialization::deserialize_mesh>(buf); + lagrange::testing::check_meshes_equal(mesh, result); + } +} + +TEST_CASE("serialization2: triangle mesh round-trip", "[serialization2]") +{ + using Scalar = double; + using Index = uint32_t; + + auto mesh = make_test_sphere(); + + SECTION("uncompressed") + { + lagrange::serialization::SerializeOptions opts; + opts.compress = false; + auto buf = lagrange::serialization::serialize_mesh(mesh, opts); + auto result = + lagrange::serialization::deserialize_mesh>(buf); + lagrange::testing::check_meshes_equal(mesh, result); + } + + SECTION("compressed") + { + auto buf = lagrange::serialization::serialize_mesh(mesh); + auto result = + lagrange::serialization::deserialize_mesh>(buf); + lagrange::testing::check_meshes_equal(mesh, result); + } +} + +TEST_CASE("serialization2: user attributes", "[serialization2]") +{ + using Scalar = float; + using Index = uint32_t; + + auto mesh = make_test_sphere(); + + // Add per-vertex color (4 channels) + { + std::vector colors(mesh.get_num_vertices() * 4); + for (size_t i = 0; i < colors.size(); ++i) { + colors[i] = static_cast(i) / static_cast(colors.size()); + } + mesh.template create_attribute( + "color", + lagrange::AttributeElement::Vertex, + lagrange::AttributeUsage::Color, + 4, + lagrange::span(colors.data(), colors.size())); + } + + // Add per-facet label (1 channel, integer) + { + std::vector labels(mesh.get_num_facets()); + for (size_t i = 0; i < labels.size(); ++i) { + labels[i] = static_cast(i % 5); + } + mesh.template create_attribute( + "label", + lagrange::AttributeElement::Facet, + lagrange::AttributeUsage::Scalar, + 1, + lagrange::span(labels.data(), labels.size())); + } + + // Add per-corner weight (1 channel) + { + std::vector weights(mesh.get_num_corners()); + for (size_t i = 0; i < weights.size(); ++i) { + weights[i] = static_cast(i) * Scalar(0.01); + } + mesh.template create_attribute( + "weight", + lagrange::AttributeElement::Corner, + lagrange::AttributeUsage::Scalar, + 1, + lagrange::span(weights.data(), weights.size())); + } + + auto buf = lagrange::serialization::serialize_mesh(mesh); + auto result = + lagrange::serialization::deserialize_mesh>(buf); + lagrange::testing::check_meshes_equal(mesh, result); +} + +TEST_CASE("serialization2: indexed attributes", "[serialization2]") +{ + using Scalar = double; + using Index = uint32_t; + + auto mesh = make_test_sphere(); + + // Add an indexed UV attribute + { + size_t num_uv_values = 10; + std::vector uv_values(num_uv_values * 2); + for (size_t i = 0; i < uv_values.size(); ++i) { + uv_values[i] = static_cast(i) / static_cast(uv_values.size()); + } + std::vector uv_indices(mesh.get_num_corners()); + for (size_t i = 0; i < uv_indices.size(); ++i) { + uv_indices[i] = static_cast(i % num_uv_values); + } + mesh.template create_attribute( + "uv", + lagrange::AttributeElement::Indexed, + lagrange::AttributeUsage::UV, + 2, + lagrange::span(uv_values.data(), uv_values.size()), + lagrange::span(uv_indices.data(), uv_indices.size())); + } + + auto buf = lagrange::serialization::serialize_mesh(mesh); + auto result = + lagrange::serialization::deserialize_mesh>(buf); + lagrange::testing::check_meshes_equal(mesh, result); +} + +TEST_CASE("serialization2: hybrid mesh", "[serialization2]") +{ + using Scalar = double; + using Index = uint32_t; + + lagrange::SurfaceMesh mesh(3); + + // Create a mesh with mixed polygon sizes + mesh.add_vertices( + 6, + {0.0, 0.0, 0.0, 1.0, 0.0, 0.0, 1.0, 1.0, 0.0, 0.0, 1.0, 0.0, 2.0, 0.0, 0.0, 2.0, 1.0, 0.0}); + + // Add a triangle and a quad + mesh.add_polygon({0, 1, 2}); + mesh.add_polygon({0, 2, 3}); + mesh.add_polygon({1, 4, 5, 2}); + + REQUIRE(mesh.is_hybrid()); + + auto buf = lagrange::serialization::serialize_mesh(mesh); + auto result = + lagrange::serialization::deserialize_mesh>(buf); + lagrange::testing::check_meshes_equal(mesh, result); +} + +TEST_CASE("serialization2: edge topology", "[serialization2]") +{ + using Scalar = float; + using Index = uint32_t; + + auto mesh = make_test_sphere(); + mesh.initialize_edges(); + REQUIRE(mesh.get_num_edges() > 0); + + auto buf = lagrange::serialization::serialize_mesh(mesh); + auto result = + lagrange::serialization::deserialize_mesh>(buf); + + REQUIRE(result.get_num_edges() == mesh.get_num_edges()); + lagrange::testing::check_meshes_equal(mesh, result); +} + +TEST_CASE("serialization2: compression reduces size", "[serialization2]") +{ + using Scalar = double; + using Index = uint32_t; + + auto mesh = make_large_test_sphere(); + + lagrange::serialization::SerializeOptions raw_opts; + raw_opts.compress = false; + auto buf_raw = lagrange::serialization::serialize_mesh(mesh, raw_opts); + + auto buf_compressed = lagrange::serialization::serialize_mesh(mesh); + + REQUIRE(buf_compressed.size() < buf_raw.size()); + + auto result = lagrange::serialization::deserialize_mesh>( + buf_compressed); + lagrange::testing::check_meshes_equal(mesh, result); +} + +TEST_CASE("serialization2: compression levels", "[serialization2]") +{ + using Scalar = double; + using Index = uint32_t; + + auto mesh = make_test_sphere(); + + for (int level : {1, 10, 22}) { + lagrange::serialization::SerializeOptions opts; + opts.compression_level = level; + auto buf = lagrange::serialization::serialize_mesh(mesh, opts); + auto result = + lagrange::serialization::deserialize_mesh>(buf); + lagrange::testing::check_meshes_equal(mesh, result); + } +} + +TEST_CASE("serialization2: all mesh type instantiations", "[serialization2]") +{ + SECTION("float, uint32_t") + { + auto mesh = make_test_sphere(); + auto buf = lagrange::serialization::serialize_mesh(mesh); + auto result = + lagrange::serialization::deserialize_mesh>(buf); + lagrange::testing::check_meshes_equal(mesh, result); + } + + SECTION("double, uint32_t") + { + auto mesh = make_test_sphere(); + auto buf = lagrange::serialization::serialize_mesh(mesh); + auto result = + lagrange::serialization::deserialize_mesh>(buf); + lagrange::testing::check_meshes_equal(mesh, result); + } + + SECTION("float, uint64_t") + { + auto mesh = make_test_sphere(); + auto buf = lagrange::serialization::serialize_mesh(mesh); + auto result = + lagrange::serialization::deserialize_mesh>(buf); + lagrange::testing::check_meshes_equal(mesh, result); + } + + SECTION("double, uint64_t") + { + auto mesh = make_test_sphere(); + auto buf = lagrange::serialization::serialize_mesh(mesh); + auto result = + lagrange::serialization::deserialize_mesh>(buf); + lagrange::testing::check_meshes_equal(mesh, result); + } +} + +TEST_CASE("serialization2: type mismatch detection", "[serialization2]") +{ + using Scalar = float; + using Index = uint32_t; + + auto mesh = make_test_sphere(); + auto buf = lagrange::serialization::serialize_mesh(mesh); + + LA_REQUIRE_THROWS( + lagrange::serialization::deserialize_mesh>(buf)); + LA_REQUIRE_THROWS( + lagrange::serialization::deserialize_mesh>(buf)); +} + +TEST_CASE("serialization2: integrity check detects corruption", "[serialization2]") +{ + using Scalar = float; + using Index = uint32_t; + + auto mesh = make_test_sphere(); + auto buf = lagrange::serialization::serialize_mesh(mesh); + + // Corrupt a byte in the middle of the buffer + REQUIRE(buf.size() > 100); + buf[buf.size() / 2] ^= 0xFF; + + LA_REQUIRE_THROWS( + lagrange::serialization::deserialize_mesh>(buf)); +} + +TEST_CASE("serialization2: format version is accessible", "[serialization2]") +{ + REQUIRE(lagrange::serialization::mesh_format_version() >= 1); +} + +TEST_CASE("serialization2: user attribute ids are preserved", "[serialization2]") +{ + using Scalar = float; + using Index = uint32_t; + + auto mesh = make_test_sphere(); + + // Record original user attribute ids + std::vector> original_ids; + mesh.seq_foreach_attribute_id([&](std::string_view name, lagrange::AttributeId id) { + if (!lagrange::SurfaceMesh::attr_name_is_reserved(name)) { + original_ids.emplace_back(std::string(name), id); + } + }); + REQUIRE(!original_ids.empty()); + + auto buf = lagrange::serialization::serialize_mesh(mesh); + auto result = + lagrange::serialization::deserialize_mesh>(buf); + + for (const auto& [name, expected_id] : original_ids) { + INFO("Checking attribute id for: " << name); + REQUIRE(result.has_attribute(name)); + REQUIRE(result.get_attribute_id(name) == expected_id); + } +} + +TEST_CASE("serialization2: file round-trip", "[serialization2]") +{ + using Scalar = double; + using Index = uint32_t; + + auto mesh = make_test_sphere(); + + auto tmp_dir = lagrange::fs::temp_directory_path(); + + SECTION("compressed") + { + auto path = tmp_dir / "test_mesh_compressed.lmesh"; + lagrange::serialization::save_mesh(path, mesh); + auto result = + lagrange::serialization::load_mesh>(path); + lagrange::testing::check_meshes_equal(mesh, result); + lagrange::fs::remove(path); + } + + SECTION("uncompressed") + { + auto path = tmp_dir / "test_mesh_uncompressed.lmesh"; + lagrange::serialization::SerializeOptions opts; + opts.compress = false; + lagrange::serialization::save_mesh(path, mesh, opts); + auto result = + lagrange::serialization::load_mesh>(path); + lagrange::testing::check_meshes_equal(mesh, result); + lagrange::fs::remove(path); + } +} diff --git a/modules/serialization2/tests/test_serialize_scene.cpp b/modules/serialization2/tests/test_serialize_scene.cpp new file mode 100644 index 00000000..9d175a0f --- /dev/null +++ b/modules/serialization2/tests/test_serialize_scene.cpp @@ -0,0 +1,541 @@ +/* + * Copyright 2026 Adobe. All rights reserved. + * This file is licensed to you under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under + * the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS + * OF ANY KIND, either express or implied. See the License for the specific language + * governing permissions and limitations under the License. + */ +#include +#include +#include +#include +#include + +#include + +#include + +#include + +#include + +using namespace lagrange::scene; + +TEST_CASE("serialization2: empty scene", "[serialization2][scene]") +{ + using Scalar = double; + using Index = uint32_t; + Scene scene; + scene.name = "empty_scene"; + + // Need at least one root node + Node root; + root.name = "root"; + scene.nodes.push_back(root); + scene.root_nodes.push_back(0); + + SECTION("uncompressed") + { + lagrange::serialization::SerializeOptions opts; + opts.compress = false; + auto buf = lagrange::serialization::serialize_scene(scene, opts); + auto result = lagrange::serialization::deserialize_scene>(buf); + lagrange::testing::check_scenes_equal(scene, result); + } + + SECTION("compressed") + { + auto buf = lagrange::serialization::serialize_scene(scene); + auto result = lagrange::serialization::deserialize_scene>(buf); + lagrange::testing::check_scenes_equal(scene, result); + } +} + +TEST_CASE("serialization2: scene with hierarchy", "[serialization2][scene]") +{ + using Scalar = float; + using Index = uint32_t; + Scene scene; + scene.name = "hierarchy_test"; + + // Create a mesh + lagrange::primitive::SphereOptions sphere_opts; + sphere_opts.num_longitude_sections = 8; + sphere_opts.num_latitude_sections = 8; + scene.meshes.push_back(lagrange::primitive::generate_sphere(sphere_opts)); + + // Build hierarchy: root -> child1 -> grandchild, root -> child2 + Node root; + root.name = "root"; + root.transform = Eigen::Affine3f::Identity(); + root.parent = invalid_element; + root.children.push_back(1); + root.children.push_back(2); + + Node child1; + child1.name = "child1"; + child1.transform = Eigen::Affine3f::Identity(); + child1.transform.translate(Eigen::Vector3f(1.f, 0.f, 0.f)); + child1.parent = 0; + child1.children.push_back(3); + { + SceneMeshInstance smi; + smi.mesh = 0; + child1.meshes.push_back(std::move(smi)); + } + + Node child2; + child2.name = "child2"; + child2.transform = Eigen::Affine3f::Identity(); + child2.transform.rotate(Eigen::AngleAxisf(0.5f, Eigen::Vector3f::UnitY())); + child2.parent = 0; + + Node grandchild; + grandchild.name = "grandchild"; + grandchild.transform = Eigen::Affine3f::Identity(); + grandchild.transform.scale(Eigen::Vector3f(2.f, 2.f, 2.f)); + grandchild.parent = 1; + { + SceneMeshInstance smi; + smi.mesh = 0; + grandchild.meshes.push_back(std::move(smi)); + } + + scene.nodes.push_back(std::move(root)); + scene.nodes.push_back(std::move(child1)); + scene.nodes.push_back(std::move(child2)); + scene.nodes.push_back(std::move(grandchild)); + scene.root_nodes.push_back(0); + + auto buf = lagrange::serialization::serialize_scene(scene); + auto result = lagrange::serialization::deserialize_scene>(buf); + lagrange::testing::check_scenes_equal(scene, result); +} + +TEST_CASE("serialization2: scene with materials and textures", "[serialization2][scene]") +{ + using Scalar = float; + using Index = uint32_t; + Scene scene; + + // Image: 2x2 RGBA + ImageExperimental img; + img.name = "test_image"; + img.image.width = 2; + img.image.height = 2; + img.image.num_channels = 4; + img.image.element_type = lagrange::AttributeValueType::e_uint8_t; + img.image.data.resize(2 * 2 * 4, 128); + img.uri = lagrange::fs::path("textures/test.png"); + scene.images.push_back(std::move(img)); + + // Texture + Texture tex; + tex.name = "base_color_tex"; + tex.image = 0; + tex.mag_filter = Texture::TextureFilter::Linear; + tex.min_filter = Texture::TextureFilter::LinearMipmapLinear; + tex.wrap_u = Texture::WrapMode::Clamp; + tex.wrap_v = Texture::WrapMode::Mirror; + tex.scale = Eigen::Vector2f(2.f, 3.f); + tex.offset = Eigen::Vector2f(0.5f, 0.25f); + tex.rotation = 0.1f; + scene.textures.push_back(std::move(tex)); + + // Material + MaterialExperimental mat; + mat.name = "test_material"; + mat.base_color_value = Eigen::Vector4f(0.8f, 0.2f, 0.1f, 1.0f); + mat.emissive_value = Eigen::Vector3f(0.1f, 0.05f, 0.0f); + mat.metallic_value = 0.0f; + mat.roughness_value = 0.7f; + mat.alpha_mode = MaterialExperimental::AlphaMode::Blend; + mat.alpha_cutoff = 0.3f; + mat.normal_scale = 1.5f; + mat.occlusion_strength = 0.8f; + mat.double_sided = true; + mat.base_color_texture.index = 0; + mat.base_color_texture.texcoord = 0; + scene.materials.push_back(std::move(mat)); + + // Node referencing mesh + material + lagrange::primitive::SphereOptions sphere_opts; + sphere_opts.num_longitude_sections = 4; + sphere_opts.num_latitude_sections = 4; + scene.meshes.push_back(lagrange::primitive::generate_sphere(sphere_opts)); + + Node root; + root.name = "root"; + SceneMeshInstance smi; + smi.mesh = 0; + smi.materials.push_back(0); + root.meshes.push_back(std::move(smi)); + scene.nodes.push_back(std::move(root)); + scene.root_nodes.push_back(0); + + auto buf = lagrange::serialization::serialize_scene(scene); + auto result = lagrange::serialization::deserialize_scene>(buf); + lagrange::testing::check_scenes_equal(scene, result); +} + +TEST_CASE("serialization2: scene with lights", "[serialization2][scene]") +{ + using Scalar = double; + using Index = uint32_t; + Scene scene; + + // Point light + Light point_light; + point_light.name = "point_light"; + point_light.type = Light::Type::Point; + point_light.position = Eigen::Vector3f(1.f, 2.f, 3.f); + point_light.intensity = 100.f; + point_light.attenuation_constant = 1.f; + point_light.attenuation_linear = 0.09f; + point_light.attenuation_quadratic = 0.032f; + point_light.color_diffuse = Eigen::Vector3f(1.f, 0.9f, 0.8f); + scene.lights.push_back(std::move(point_light)); + + // Spot light + Light spot_light; + spot_light.name = "spot_light"; + spot_light.type = Light::Type::Spot; + spot_light.position = Eigen::Vector3f(0.f, 5.f, 0.f); + spot_light.direction = Eigen::Vector3f(0.f, -1.f, 0.f); + spot_light.intensity = 50.f; + spot_light.angle_inner_cone = 0.3f; + spot_light.angle_outer_cone = 0.5f; + spot_light.color_diffuse = Eigen::Vector3f(1.f, 1.f, 1.f); + scene.lights.push_back(std::move(spot_light)); + + // Directional light + Light dir_light; + dir_light.name = "dir_light"; + dir_light.type = Light::Type::Directional; + dir_light.direction = Eigen::Vector3f(0.f, -1.f, -1.f).normalized(); + dir_light.color_diffuse = Eigen::Vector3f(0.5f, 0.5f, 0.5f); + scene.lights.push_back(std::move(dir_light)); + + Node root; + root.name = "root"; + root.lights.push_back(0); + root.lights.push_back(1); + root.lights.push_back(2); + scene.nodes.push_back(std::move(root)); + scene.root_nodes.push_back(0); + + auto buf = lagrange::serialization::serialize_scene(scene); + auto result = lagrange::serialization::deserialize_scene>(buf); + lagrange::testing::check_scenes_equal(scene, result); +} + +TEST_CASE("serialization2: scene with cameras", "[serialization2][scene]") +{ + using Scalar = float; + using Index = uint32_t; + Scene scene; + + // Perspective camera with far plane + Camera persp; + persp.name = "perspective_cam"; + persp.type = Camera::Type::Perspective; + persp.position = Eigen::Vector3f(0.f, 1.f, 5.f); + persp.up = Eigen::Vector3f(0.f, 1.f, 0.f); + persp.look_at = Eigen::Vector3f(0.f, 0.f, 0.f); + persp.near_plane = 0.1f; + persp.far_plane = 1000.f; + persp.aspect_ratio = 16.f / 9.f; + persp.horizontal_fov = static_cast(M_PI) / 3.f; + scene.cameras.push_back(std::move(persp)); + + // Perspective camera without far plane + Camera persp_no_far; + persp_no_far.name = "infinite_cam"; + persp_no_far.type = Camera::Type::Perspective; + persp_no_far.near_plane = 0.01f; + persp_no_far.far_plane = std::nullopt; + scene.cameras.push_back(std::move(persp_no_far)); + + // Orthographic camera + Camera ortho; + ortho.name = "ortho_cam"; + ortho.type = Camera::Type::Orthographic; + ortho.position = Eigen::Vector3f(0.f, 10.f, 0.f); + ortho.look_at = Eigen::Vector3f(0.f, 0.f, 0.f); + ortho.near_plane = 0.1f; + ortho.far_plane = 100.f; + ortho.orthographic_width = 20.f; + ortho.aspect_ratio = 1.f; + scene.cameras.push_back(std::move(ortho)); + + Node root; + root.name = "root"; + root.cameras.push_back(0); + root.cameras.push_back(1); + root.cameras.push_back(2); + scene.nodes.push_back(std::move(root)); + scene.root_nodes.push_back(0); + + auto buf = lagrange::serialization::serialize_scene(scene); + auto result = lagrange::serialization::deserialize_scene>(buf); + lagrange::testing::check_scenes_equal(scene, result); + + // Verify optional far plane handling + REQUIRE(result.cameras[0].far_plane.has_value()); + REQUIRE(result.cameras[0].far_plane.value() == 1000.f); + REQUIRE_FALSE(result.cameras[1].far_plane.has_value()); + REQUIRE(result.cameras[2].far_plane.has_value()); +} + +TEST_CASE("serialization2: scene with extensions", "[serialization2][scene]") +{ + using Scalar = double; + using Index = uint32_t; + Scene scene; + + Node root; + root.name = "root"; + + // Test all Value types in extensions + root.extensions.data["bool_val"] = Value(true); + root.extensions.data["int_val"] = Value(42); + root.extensions.data["double_val"] = Value(3.14); + root.extensions.data["string_val"] = Value(std::string("hello world")); + + Value::Buffer buf_data = {0x01, 0x02, 0x03, 0x04}; + root.extensions.data["buffer_val"] = Value(buf_data); + + // Array + Value::Array arr; + arr.push_back(Value(1)); + arr.push_back(Value(std::string("two"))); + arr.push_back(Value(3.0)); + root.extensions.data["array_val"] = Value(std::move(arr)); + + // Nested object + Value::Object obj; + obj["nested_bool"] = Value(false); + obj["nested_int"] = Value(-7); + Value::Array inner_arr; + inner_arr.push_back(Value(10)); + inner_arr.push_back(Value(20)); + obj["nested_array"] = Value(std::move(inner_arr)); + root.extensions.data["object_val"] = Value(std::move(obj)); + + scene.nodes.push_back(std::move(root)); + scene.root_nodes.push_back(0); + + // Also add scene-level extensions + scene.extensions.data["scene_version"] = Value(std::string("1.0")); + + auto buf = lagrange::serialization::serialize_scene(scene); + auto result = lagrange::serialization::deserialize_scene>(buf); + + // Check node extensions + const auto& ext = result.nodes[0].extensions; + REQUIRE(ext.data.at("bool_val").get_bool() == true); + REQUIRE(ext.data.at("int_val").get_int() == 42); + REQUIRE(ext.data.at("double_val").get_real() == 3.14); + REQUIRE(ext.data.at("string_val").get_string() == "hello world"); + + const auto& result_buf = ext.data.at("buffer_val").get_buffer(); + REQUIRE(result_buf.size() == 4); + REQUIRE(result_buf[0] == 0x01); + REQUIRE(result_buf[3] == 0x04); + + const auto& result_arr = ext.data.at("array_val").get_array(); + REQUIRE(result_arr.size() == 3); + REQUIRE(result_arr[0].get_int() == 1); + REQUIRE(result_arr[1].get_string() == "two"); + REQUIRE(result_arr[2].get_real() == 3.0); + + const auto& result_obj = ext.data.at("object_val").get_object(); + REQUIRE(result_obj.at("nested_bool").get_bool() == false); + REQUIRE(result_obj.at("nested_int").get_int() == -7); + const auto& inner = result_obj.at("nested_array").get_array(); + REQUIRE(inner.size() == 2); + REQUIRE(inner[0].get_int() == 10); + REQUIRE(inner[1].get_int() == 20); + + // Check scene-level extensions + REQUIRE(result.extensions.data.at("scene_version").get_string() == "1.0"); +} + +TEST_CASE("serialization2: scene with skeletons and animations", "[serialization2][scene]") +{ + using Scalar = float; + using Index = uint32_t; + Scene scene; + + lagrange::primitive::SphereOptions sphere_opts; + sphere_opts.num_longitude_sections = 4; + sphere_opts.num_latitude_sections = 4; + scene.meshes.push_back(lagrange::primitive::generate_sphere(sphere_opts)); + + Skeleton skel; + skel.meshes.push_back(0); + skel.extensions.data["joint_count"] = Value(10); + scene.skeletons.push_back(std::move(skel)); + + Animation anim; + anim.name = "walk_cycle"; + anim.extensions.data["duration"] = Value(2.5); + scene.animations.push_back(std::move(anim)); + + Node root; + root.name = "root"; + SceneMeshInstance smi; + smi.mesh = 0; + root.meshes.push_back(std::move(smi)); + scene.nodes.push_back(std::move(root)); + scene.root_nodes.push_back(0); + + auto buf = lagrange::serialization::serialize_scene(scene); + auto result = lagrange::serialization::deserialize_scene>(buf); + lagrange::testing::check_scenes_equal(scene, result); + + REQUIRE(result.skeletons[0].meshes.size() == 1); + REQUIRE(result.skeletons[0].meshes[0] == 0); + REQUIRE(result.skeletons[0].extensions.data.at("joint_count").get_int() == 10); + REQUIRE(result.animations[0].name == "walk_cycle"); + REQUIRE(result.animations[0].extensions.data.at("duration").get_real() == 2.5); +} + +TEST_CASE("serialization2: scene all type instantiations", "[serialization2][scene]") +{ + SECTION("float, uint32_t") + { + Scene scene; + Node root; + scene.nodes.push_back(std::move(root)); + scene.root_nodes.push_back(0); + auto buf = lagrange::serialization::serialize_scene(scene); + auto result = lagrange::serialization::deserialize_scene>(buf); + lagrange::testing::check_scenes_equal(scene, result); + } + + SECTION("double, uint32_t") + { + Scene scene; + Node root; + scene.nodes.push_back(std::move(root)); + scene.root_nodes.push_back(0); + auto buf = lagrange::serialization::serialize_scene(scene); + auto result = lagrange::serialization::deserialize_scene>(buf); + lagrange::testing::check_scenes_equal(scene, result); + } + + SECTION("float, uint64_t") + { + Scene scene; + Node root; + scene.nodes.push_back(std::move(root)); + scene.root_nodes.push_back(0); + auto buf = lagrange::serialization::serialize_scene(scene); + auto result = lagrange::serialization::deserialize_scene>(buf); + lagrange::testing::check_scenes_equal(scene, result); + } + + SECTION("double, uint64_t") + { + Scene scene; + Node root; + scene.nodes.push_back(std::move(root)); + scene.root_nodes.push_back(0); + auto buf = lagrange::serialization::serialize_scene(scene); + auto result = lagrange::serialization::deserialize_scene>(buf); + lagrange::testing::check_scenes_equal(scene, result); + } +} + +TEST_CASE("serialization2: scene type mismatch", "[serialization2][scene]") +{ + using Scalar = float; + using Index = uint32_t; + Scene scene; + + lagrange::primitive::SphereOptions sphere_opts; + sphere_opts.num_longitude_sections = 4; + sphere_opts.num_latitude_sections = 4; + scene.meshes.push_back(lagrange::primitive::generate_sphere(sphere_opts)); + + Node root; + root.name = "root"; + scene.nodes.push_back(std::move(root)); + scene.root_nodes.push_back(0); + + auto buf = lagrange::serialization::serialize_scene(scene); + + LA_REQUIRE_THROWS((lagrange::serialization::deserialize_scene>(buf))); + LA_REQUIRE_THROWS((lagrange::serialization::deserialize_scene>(buf))); +} + +TEST_CASE("serialization2: scene file round-trip", "[serialization2][scene]") +{ + using Scalar = double; + using Index = uint32_t; + Scene scene; + scene.name = "file_test"; + + lagrange::primitive::SphereOptions sphere_opts; + sphere_opts.num_longitude_sections = 8; + sphere_opts.num_latitude_sections = 8; + scene.meshes.push_back(lagrange::primitive::generate_sphere(sphere_opts)); + + Node root; + root.name = "root"; + SceneMeshInstance smi; + smi.mesh = 0; + root.meshes.push_back(std::move(smi)); + scene.nodes.push_back(std::move(root)); + scene.root_nodes.push_back(0); + + auto tmp_dir = lagrange::fs::temp_directory_path(); + + SECTION("compressed") + { + auto path = tmp_dir / "test_scene_compressed.lscene"; + lagrange::serialization::save_scene(path, scene); + auto result = lagrange::serialization::load_scene>(path); + lagrange::testing::check_scenes_equal(scene, result); + lagrange::fs::remove(path); + } + + SECTION("uncompressed") + { + auto path = tmp_dir / "test_scene_uncompressed.lscene"; + lagrange::serialization::SerializeOptions opts; + opts.compress = false; + lagrange::serialization::save_scene(path, scene, opts); + auto result = lagrange::serialization::load_scene>(path); + lagrange::testing::check_scenes_equal(scene, result); + lagrange::fs::remove(path); + } +} + +TEST_CASE("serialization2: scene user_data gracefully skipped", "[serialization2][scene]") +{ + using Scalar = float; + using Index = uint32_t; + Scene scene; + + Node root; + root.name = "root"; + root.extensions.data["preserved"] = Value(std::string("keep me")); + root.extensions.user_data["transient"] = std::any(42); + scene.nodes.push_back(std::move(root)); + scene.root_nodes.push_back(0); + + auto buf = lagrange::serialization::serialize_scene(scene); + auto result = lagrange::serialization::deserialize_scene>(buf); + + // data should be preserved + REQUIRE(result.nodes[0].extensions.data.at("preserved").get_string() == "keep me"); + + // user_data should be empty after round-trip (std::any is not serializable) + REQUIRE(result.nodes[0].extensions.user_data.empty()); +} diff --git a/modules/serialization2/tests/test_serialize_simple_scene.cpp b/modules/serialization2/tests/test_serialize_simple_scene.cpp new file mode 100644 index 00000000..2b521133 --- /dev/null +++ b/modules/serialization2/tests/test_serialize_simple_scene.cpp @@ -0,0 +1,353 @@ +/* + * Copyright 2026 Adobe. All rights reserved. + * This file is licensed to you under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under + * the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS + * OF ANY KIND, either express or implied. See the License for the specific language + * governing permissions and limitations under the License. + */ +#include +#include +#include +#include + +#include + +// Internal headers for corruption tests +#include "../src/CistaSimpleScene.h" +#include "../src/compress.h" + +#include + +#include + +namespace { + +template +lagrange::SurfaceMesh make_test_sphere() +{ + lagrange::primitive::SphereOptions opts; + opts.num_longitude_sections = 8; + opts.num_latitude_sections = 8; + return lagrange::primitive::generate_sphere(opts); +} + +} // namespace + +TEST_CASE("serialization2: empty simple scene", "[serialization2][simple_scene]") +{ + using Scalar = double; + using Index = uint32_t; + lagrange::scene::SimpleScene scene; + + SECTION("uncompressed") + { + lagrange::serialization::SerializeOptions opts; + opts.compress = false; + auto buf = lagrange::serialization::serialize_simple_scene(scene, opts); + auto result = lagrange::serialization::deserialize_simple_scene< + lagrange::scene::SimpleScene>(buf); + lagrange::testing::check_simple_scenes_equal(scene, result); + } + + SECTION("compressed") + { + auto buf = lagrange::serialization::serialize_simple_scene(scene); + auto result = lagrange::serialization::deserialize_simple_scene< + lagrange::scene::SimpleScene>(buf); + lagrange::testing::check_simple_scenes_equal(scene, result); + } +} + +TEST_CASE( + "serialization2: simple scene with meshes and instances", + "[serialization2][simple_scene]") +{ + using Scalar = double; + using Index = uint32_t; + lagrange::scene::SimpleScene scene; + + auto mesh = make_test_sphere(); + Index mesh_idx = scene.add_mesh(std::move(mesh)); + + // Add instances with different transforms + using Transform = Eigen::Transform; + { + lagrange::scene::MeshInstance inst; + inst.mesh_index = mesh_idx; + inst.transform = Transform::Identity(); + scene.add_instance(std::move(inst)); + } + { + lagrange::scene::MeshInstance inst; + inst.mesh_index = mesh_idx; + inst.transform = Transform::Identity(); + inst.transform.translate(Eigen::Matrix(1.0, 2.0, 3.0)); + scene.add_instance(std::move(inst)); + } + { + lagrange::scene::MeshInstance inst; + inst.mesh_index = mesh_idx; + inst.transform = Transform::Identity(); + inst.transform.rotate( + Eigen::AngleAxis(Scalar(0.5), Eigen::Matrix::UnitZ())); + scene.add_instance(std::move(inst)); + } + + auto buf = lagrange::serialization::serialize_simple_scene(scene); + auto result = lagrange::serialization::deserialize_simple_scene< + lagrange::scene::SimpleScene>(buf); + lagrange::testing::check_simple_scenes_equal(scene, result); +} + +TEST_CASE("serialization2: simple scene multiple meshes", "[serialization2][simple_scene]") +{ + using Scalar = float; + using Index = uint32_t; + lagrange::scene::SimpleScene scene; + + for (int k = 0; k < 3; ++k) { + auto mesh = make_test_sphere(); + Index mesh_idx = scene.add_mesh(std::move(mesh)); + + lagrange::scene::MeshInstance inst; + inst.mesh_index = mesh_idx; + inst.transform = Eigen::Transform::Identity(); + inst.transform.translate(Eigen::Matrix(static_cast(k), 0.f, 0.f)); + scene.add_instance(std::move(inst)); + } + + auto buf = lagrange::serialization::serialize_simple_scene(scene); + auto result = lagrange::serialization::deserialize_simple_scene< + lagrange::scene::SimpleScene>(buf); + lagrange::testing::check_simple_scenes_equal(scene, result); +} + +TEST_CASE("serialization2: simple scene 2D", "[serialization2][simple_scene]") +{ + using Scalar = float; + using Index = uint32_t; + constexpr size_t Dim = 2; + lagrange::scene::SimpleScene scene; + + lagrange::SurfaceMesh mesh(2); + mesh.add_vertices(3, {0.f, 0.f, 1.f, 0.f, 0.f, 1.f}); + mesh.add_polygon({0, 1, 2}); + scene.add_mesh(std::move(mesh)); + + lagrange::scene::MeshInstance inst; + inst.mesh_index = 0; + inst.transform = Eigen::Transform::Identity(); + scene.add_instance(std::move(inst)); + + auto buf = lagrange::serialization::serialize_simple_scene(scene); + auto result = lagrange::serialization::deserialize_simple_scene< + lagrange::scene::SimpleScene>(buf); + lagrange::testing::check_simple_scenes_equal(scene, result); +} + +TEST_CASE("serialization2: simple scene all type instantiations", "[serialization2][simple_scene]") +{ + SECTION("float, uint32_t, 3") + { + lagrange::scene::SimpleScene scene; + auto buf = lagrange::serialization::serialize_simple_scene(scene); + auto result = lagrange::serialization::deserialize_simple_scene< + lagrange::scene::SimpleScene>(buf); + lagrange::testing::check_simple_scenes_equal(scene, result); + } + + SECTION("double, uint32_t, 3") + { + lagrange::scene::SimpleScene scene; + auto buf = lagrange::serialization::serialize_simple_scene(scene); + auto result = lagrange::serialization::deserialize_simple_scene< + lagrange::scene::SimpleScene>(buf); + lagrange::testing::check_simple_scenes_equal(scene, result); + } + + SECTION("float, uint64_t, 3") + { + lagrange::scene::SimpleScene scene; + auto buf = lagrange::serialization::serialize_simple_scene(scene); + auto result = lagrange::serialization::deserialize_simple_scene< + lagrange::scene::SimpleScene>(buf); + lagrange::testing::check_simple_scenes_equal(scene, result); + } + + SECTION("double, uint64_t, 3") + { + lagrange::scene::SimpleScene scene; + auto buf = lagrange::serialization::serialize_simple_scene(scene); + auto result = lagrange::serialization::deserialize_simple_scene< + lagrange::scene::SimpleScene>(buf); + lagrange::testing::check_simple_scenes_equal(scene, result); + } +} + +TEST_CASE("serialization2: simple scene type mismatch", "[serialization2][simple_scene]") +{ + using Scalar = float; + using Index = uint32_t; + lagrange::scene::SimpleScene scene; + auto mesh = make_test_sphere(); + scene.add_mesh(std::move(mesh)); + + auto buf = lagrange::serialization::serialize_simple_scene(scene); + + LA_REQUIRE_THROWS((lagrange::serialization::deserialize_simple_scene< + lagrange::scene::SimpleScene>(buf))); + LA_REQUIRE_THROWS((lagrange::serialization::deserialize_simple_scene< + lagrange::scene::SimpleScene>(buf))); +} + +namespace { + +/// Helper to serialize a CistaSimpleScene directly (bypassing the public API) so we can construct +/// intentionally malformed buffers for negative testing. +std::vector serialize_raw_cista_simple_scene( + const lagrange::serialization::internal::CistaSimpleScene& cscene) +{ + using namespace lagrange::serialization::internal; + cista::buf> buf; + cista::serialize(buf, cscene); + return std::move(buf.buf_); +} + +} // namespace + +TEST_CASE("serialization2: simple scene corrupted buffer", "[serialization2][simple_scene]") +{ + using namespace lagrange::serialization::internal; + using SceneType = lagrange::scene::SimpleScene; + + // Build a valid CistaSimpleScene with 1 mesh placeholder and 1 instance + CistaSimpleScene cscene; + cscene.version = 1; + cscene.scalar_type_size = sizeof(float); + cscene.index_type_size = sizeof(uint32_t); + cscene.dimension = 3; + + // Add a minimal mesh (empty, but valid enough for the struct) + cscene.meshes.emplace_back(); + + // One mesh with one instance + cscene.instances_per_mesh.push_back(1); + + CistaInstance cinst; + cinst.mesh_index = 0; + constexpr size_t byte_size = 4 * 4 * sizeof(float); // (3+1)^2 * sizeof(float) + cinst.transform_bytes.resize(byte_size); + std::memset(cinst.transform_bytes.data(), 0, byte_size); + cscene.instances.emplace_back(std::move(cinst)); + + SECTION("instances_per_mesh size mismatch") + { + // Add an extra entry to instances_per_mesh so it doesn't match meshes.size() + cscene.instances_per_mesh.push_back(0); + auto buf = serialize_raw_cista_simple_scene(cscene); + LA_REQUIRE_THROWS(lagrange::serialization::deserialize_simple_scene(buf)); + } + + SECTION("instance offset out of bounds") + { + // Claim more instances than actually exist + cscene.instances_per_mesh[0] = 99; + auto buf = serialize_raw_cista_simple_scene(cscene); + LA_REQUIRE_THROWS(lagrange::serialization::deserialize_simple_scene(buf)); + } + + SECTION("total instance count mismatch (trailing instances)") + { + // Add extra instances that are not accounted for by instances_per_mesh + CistaInstance extra; + extra.mesh_index = 0; + extra.transform_bytes.resize(byte_size); + std::memset(extra.transform_bytes.data(), 0, byte_size); + cscene.instances.emplace_back(std::move(extra)); + auto buf = serialize_raw_cista_simple_scene(cscene); + LA_REQUIRE_THROWS(lagrange::serialization::deserialize_simple_scene(buf)); + } + + SECTION("transform data size mismatch") + { + // Truncate the transform bytes + cscene.instances[0].transform_bytes.resize(byte_size / 2); + auto buf = serialize_raw_cista_simple_scene(cscene); + LA_REQUIRE_THROWS(lagrange::serialization::deserialize_simple_scene(buf)); + } + + SECTION("cast path: instance offset out of bounds") + { + // Serialize as double/uint32_t, deserialize as float/uint32_t with casting + cscene.scalar_type_size = sizeof(double); + constexpr size_t double_byte_size = 4 * 4 * sizeof(double); + cscene.instances[0].transform_bytes.resize(double_byte_size); + std::memset(cscene.instances[0].transform_bytes.data(), 0, double_byte_size); + cscene.instances_per_mesh[0] = 99; // More instances than exist + auto buf = serialize_raw_cista_simple_scene(cscene); + lagrange::serialization::DeserializeOptions opts; + opts.allow_type_cast = true; + LA_REQUIRE_THROWS(lagrange::serialization::deserialize_simple_scene(buf, opts)); + } + + SECTION("cast path: total instance count mismatch") + { + // Serialize as double/uint32_t with trailing instances + cscene.scalar_type_size = sizeof(double); + constexpr size_t double_byte_size = 4 * 4 * sizeof(double); + cscene.instances[0].transform_bytes.resize(double_byte_size); + std::memset(cscene.instances[0].transform_bytes.data(), 0, double_byte_size); + CistaInstance extra; + extra.mesh_index = 0; + extra.transform_bytes.resize(double_byte_size); + std::memset(extra.transform_bytes.data(), 0, double_byte_size); + cscene.instances.emplace_back(std::move(extra)); + auto buf = serialize_raw_cista_simple_scene(cscene); + lagrange::serialization::DeserializeOptions opts; + opts.allow_type_cast = true; + LA_REQUIRE_THROWS(lagrange::serialization::deserialize_simple_scene(buf, opts)); + } +} + +TEST_CASE("serialization2: simple scene file round-trip", "[serialization2][simple_scene]") +{ + using Scalar = double; + using Index = uint32_t; + lagrange::scene::SimpleScene scene; + + auto mesh = make_test_sphere(); + scene.add_mesh(std::move(mesh)); + + lagrange::scene::MeshInstance inst; + inst.mesh_index = 0; + inst.transform = Eigen::Transform::Identity(); + scene.add_instance(std::move(inst)); + + auto tmp_dir = lagrange::fs::temp_directory_path(); + + SECTION("compressed") + { + auto path = tmp_dir / "test_simple_scene_compressed.lscene"; + lagrange::serialization::save_simple_scene(path, scene); + auto result = lagrange::serialization::load_simple_scene< + lagrange::scene::SimpleScene>(path); + lagrange::testing::check_simple_scenes_equal(scene, result); + lagrange::fs::remove(path); + } + + SECTION("uncompressed") + { + auto path = tmp_dir / "test_simple_scene_uncompressed.lscene"; + lagrange::serialization::SerializeOptions opts; + opts.compress = false; + lagrange::serialization::save_simple_scene(path, scene, opts); + auto result = lagrange::serialization::load_simple_scene< + lagrange::scene::SimpleScene>(path); + lagrange::testing::check_simple_scenes_equal(scene, result); + lagrange::fs::remove(path); + } +} diff --git a/modules/subdivision/examples/mesh_subdivision.cpp b/modules/subdivision/examples/mesh_subdivision.cpp index 22897740..85b143c6 100644 --- a/modules/subdivision/examples/mesh_subdivision.cpp +++ b/modules/subdivision/examples/mesh_subdivision.cpp @@ -71,14 +71,31 @@ int main(int argc, char** argv) "Project vertex attributes to the limit surface"); app.add_option("--refinement", subdivision_options.refinement, "Mesh refinement method") ->transform(CLI::CheckedTransformer(map, CLI::ignore_case)); - app.add_option( + auto edge_length_option = app.add_option( "--edge-length", subdivision_options.max_edge_length, "Max edge length target for adaptive refinement"); + auto chordal_deviation_option = app.add_option( + "--chordal-deviation", + subdivision_options.max_chordal_deviation, + "Max chordal deviation for adaptive refinement"); + edge_length_option->excludes(chordal_deviation_option); + chordal_deviation_option->excludes(edge_length_option); app.add_flag("--normal", args.output_btn, "Compute limit normal as a vertex attribute"); app.add_option("-l,--level", args.log_level, "Log level (0 = most verbose, 6 = off)."); CLI11_PARSE(app, argc, argv); + if ((*edge_length_option || *chordal_deviation_option) && + subdivision_options.refinement != lagrange::subdivision::RefinementType::EdgeAdaptive) { + lagrange::logger().error( + "--edge-length and --chordal-deviation require --refinement EdgeAdaptive: {} (Uniform: " + "{}, EdgeAdaptive: {})", + static_cast(subdivision_options.refinement), + static_cast(lagrange::subdivision::RefinementType::Uniform), + static_cast(lagrange::subdivision::RefinementType::EdgeAdaptive)); + return 1; + } + args.log_level = std::max(0, std::min(6, args.log_level)); spdlog::set_level(static_cast(args.log_level)); diff --git a/modules/subdivision/include/lagrange/subdivision/mesh_subdivision.h b/modules/subdivision/include/lagrange/subdivision/mesh_subdivision.h index 229676cb..19988112 100644 --- a/modules/subdivision/include/lagrange/subdivision/mesh_subdivision.h +++ b/modules/subdivision/include/lagrange/subdivision/mesh_subdivision.h @@ -128,7 +128,7 @@ class InterpolatedAttributes /// cannot be interpolated (because of an incompatible value type or element type). /// /// @param[in] smooth Per-vertex or indexed attribute ids to smoothly interpolate. - /// @param[in] linear Per-vertex attribute ids to smoothly interpolate. + /// @param[in] linear Per-vertex attribute ids to linearly interpolate. /// /// @return The interpolated attributes configuration. /// @@ -150,10 +150,10 @@ class InterpolatedAttributes void set_none() { *this = none(); } /// - /// Set selection to a specific list of attribte ids. + /// Set selection to a specific list of attribute ids. /// /// @param[in] smooth Per-vertex or indexed attribute ids to smoothly interpolate. - /// @param[in] linear Per-vertex attribute ids to smoothly interpolate. + /// @param[in] linear Per-vertex attribute ids to linearly interpolate. /// void set_selected(std::vector smooth, std::vector linear = {}) { @@ -199,10 +199,29 @@ struct SubdivisionOptions /// @name Adaptive tessellation options /// @{ + /// /// Maximum edge length for adaptive tessellation. If not specified, it is set to the longest /// edge length divided by num_levels. + /// + /// @note Mutually exclusive with max_chordal_deviation. + /// std::optional max_edge_length; + /// + /// Maximum chordal deviation for adaptive tessellation. This controls the maximum distance + /// between the limit surface and its piecewise-linear approximation. For each edge, the peak + /// deviation is found using Newton's method on the squared-distance function, and the + /// tessellation rate is chosen so that all sub-segment deviations stay below this threshold. + /// Rates are defined intrinsically per edge (shared consistently across adjacent faces). + /// + /// @note This option is only supported for meshes with 3D vertex positions; using it with + /// other dimensionalities will trigger runtime assertions. The value must be + /// strictly positive (greater than zero). + /// + /// @note Mutually exclusive with max_edge_length. + /// + std::optional max_chordal_deviation; + /// @} /// @name Interpolation Rules /// @{ diff --git a/modules/subdivision/python/src/subdivision.cpp b/modules/subdivision/python/src/subdivision.cpp index 8c52c4f7..9e6ff499 100644 --- a/modules/subdivision/python/src/subdivision.cpp +++ b/modules/subdivision/python/src/subdivision.cpp @@ -143,6 +143,7 @@ void populate_subdivision_module(nb::module_& m) std::optional scheme, bool adaptive, std::optional max_edge_length, + std::optional max_chordal_deviation, lagrange::subdivision::VertexBoundaryInterpolation vertex_boundary_interpolation, lagrange::subdivision::FaceVaryingInterpolation face_varying_interpolation, bool use_limit_surface, @@ -163,6 +164,9 @@ void populate_subdivision_module(nb::module_& m) if (max_edge_length.has_value()) { options.max_edge_length = max_edge_length.value(); } + if (max_chordal_deviation.has_value()) { + options.max_chordal_deviation = max_chordal_deviation.value(); + } } options.num_levels = num_levels; options.vertex_boundary_interpolation = vertex_boundary_interpolation; @@ -196,6 +200,7 @@ void populate_subdivision_module(nb::module_& m) "scheme"_a = nb::none(), "adaptive"_a = false, "max_edge_length"_a = nb::none(), + "max_chordal_deviation"_a = nb::none(), "vertex_boundary_interpolation"_a = SubdivOptions{}.vertex_boundary_interpolation, "face_varying_interpolation"_a = SubdivOptions{}.face_varying_interpolation, "use_limit_surface"_a = SubdivOptions{}.use_limit_surface, @@ -215,7 +220,8 @@ void populate_subdivision_module(nb::module_& m) :param num_levels: The number of levels of subdivision to apply. :param scheme: Subdivision scheme. If None, uses Loop for triangle meshes and CatmullClark for quad-dominant meshes. :param adaptive: Whether to use edge-adaptive refinement. -:param max_edge_length: Maximum edge length for adaptive refinement. If None, uses longest edge / num_levels. Ignored when adaptive is False. +:param max_edge_length: Maximum edge length for adaptive refinement. If None, uses longest edge / num_levels. Ignored when adaptive is False. Mutually exclusive with max_chordal_deviation. +:param max_chordal_deviation: Maximum chordal deviation for adaptive refinement. This controls the maximum distance between the limit surface and its piecewise-linear approximation. For each edge, the peak deviation is found using Newton's method on the squared-distance function, and the tessellation rate is chosen so that all sub-segment deviations stay below this threshold. Only supported for meshes with 3D vertex positions. The value must be strictly positive. Ignored when adaptive is False. Mutually exclusive with max_edge_length. :param vertex_boundary_interpolation: Vertex boundary interpolation rule. :param face_varying_interpolation: Face-varying interpolation rule. :param use_limit_surface: Interpolate all data to the limit surface. diff --git a/modules/subdivision/python/tests/test_mesh_subdivision.py b/modules/subdivision/python/tests/test_mesh_subdivision.py index 00d70092..02773a2f 100644 --- a/modules/subdivision/python/tests/test_mesh_subdivision.py +++ b/modules/subdivision/python/tests/test_mesh_subdivision.py @@ -10,48 +10,69 @@ # governing permissions and limitations under the License. # import lagrange - -import pytest import numpy as np -@pytest.fixture -def cube(): - vertices = np.array( - [ - [0, 0, 0], - [1, 0, 0], - [1, 1, 0], - [0, 1, 0], - [0, 0, 1], - [1, 0, 1], - [1, 1, 1], - [0, 1, 1], - ], - dtype=float, - ) - facets = np.array( - [ - [0, 3, 2, 1], - [4, 5, 6, 7], - [1, 2, 6, 5], - [4, 7, 3, 0], - [2, 3, 7, 6], - [0, 1, 5, 4], - ], - dtype=np.uint32, - ) - mesh = lagrange.SurfaceMesh() - mesh.vertices = vertices - mesh.facets = facets - return mesh +class TestMeshSubdivision: + def test_cube_passthrough(self, cube): + mesh = lagrange.subdivision.subdivide_mesh( + cube, + num_levels=0, + ) + all_zeros = (np.abs(mesh.vertices) < 1e-6).all() + assert not all_zeros + def test_cube_passthrough_limit(self, cube): + mesh = lagrange.subdivision.subdivide_mesh( + cube, + num_levels=0, + use_limit_surface=True, + ) + all_zeros = (np.abs(mesh.vertices) < 1e-6).all() + assert not all_zeros -class TestMeshSubdivision: - def test_basic(self, cube): + def test_house_passthrough(self, house): + assert house.is_attribute_indexed("texcoord_0") + mesh = lagrange.subdivision.subdivide_mesh( + house, + num_levels=0, + ) + all_zeros = (np.abs(mesh.vertices) < 1e-6).all() + assert not all_zeros + assert mesh.is_attribute_indexed("texcoord_0") + + def test_house_passthrough_limit(self, house): + assert house.is_attribute_indexed("texcoord_0") + mesh = lagrange.subdivision.subdivide_mesh( + house, + num_levels=0, + use_limit_surface=True, + ) + all_zeros = (np.abs(mesh.vertices) < 1e-6).all() + assert not all_zeros + assert mesh.is_attribute_indexed("texcoord_0") + + def test_cube_twice(self, cube): num_levels = 2 vert_id, edge_id, normal_id = lagrange.subdivision.compute_sharpness(cube) mesh = lagrange.subdivision.subdivide_mesh( - cube, num_levels=num_levels, vertex_sharpness_attr=vert_id, edge_sharpness_attr=edge_id + cube, + num_levels=num_levels, + vertex_sharpness_attr=vert_id, + edge_sharpness_attr=edge_id, ) assert mesh.num_facets == cube.num_corners * 4 ** (num_levels - 1) + + def test_house_twice(self, house): + assert house.is_attribute_indexed("texcoord_0") + mesh = lagrange.subdivision.subdivide_mesh( + house, + num_levels=2, + ) + all_zeros = (np.abs(mesh.vertices) < 1e-6).all() + assert not all_zeros + texcoord_id = mesh.get_attribute_id("texcoord_0") + assert mesh.is_attribute_indexed(texcoord_id) + texcoord_attr = mesh.indexed_attribute(texcoord_id) + assert np.abs(texcoord_attr.indices.data - mesh.facets.flatten()).max() == 0 + assert np.abs(texcoord_attr.values.data - mesh.vertices[:, :2]).max() < 1e-6 diff --git a/modules/subdivision/src/TopologyRefinerFactory.h b/modules/subdivision/src/TopologyRefinerFactory.h index 2a572e44..3b8d15ae 100644 --- a/modules/subdivision/src/TopologyRefinerFactory.h +++ b/modules/subdivision/src/TopologyRefinerFactory.h @@ -169,7 +169,7 @@ bool TopologyRefinerFactory::assignComponentTags( auto values = attr.get_all(); for (int f = 0; f < static_cast(values.size()); ++f) { if (values[f] != ValueType(0)) { - lagrange::logger().warn("Setting facet f{} as a hole", f); + lagrange::logger().debug("Setting facet f{} as a hole", f); setBaseFaceHole(refiner, f, true); } } diff --git a/modules/subdivision/src/subdivide_adaptive.cpp b/modules/subdivision/src/subdivide_adaptive.cpp index 5d4441d9..ee72c936 100644 --- a/modules/subdivision/src/subdivide_adaptive.cpp +++ b/modules/subdivision/src/subdivide_adaptive.cpp @@ -553,6 +553,236 @@ void eval_patch_btn( } } +/// +/// Compute per-edge tessellation rates for a face based on chordal deviation (3D only). +/// +/// For each edge, the algorithm proceeds in three steps: +/// +/// 1. **Peak detection (Newton's method):** Finds the parameter t that maximizes the squared +/// deviation g(t) = ||L(t) - S(t)||² between the linear interpolation L(t) of the corner limit +/// positions and the limit surface S(t). Newton iterations solve g'(t) = 0 using the gradient +/// and Hessian of g, computed from 1st and 2nd derivatives of the surface via the chain rule. +/// The peak squared deviation from Newton is compared against the squared midpoint deviation (t +/// = 0.5), and the maximum is taken as the edge's peak squared deviation. +/// +/// 2. **Initial rate estimate:** An initial tessellation rate is computed from the peak deviation +/// using the 1/n² scaling model: rate = ceil(sqrt(peak_deviation / max_deviation)). +/// +/// 3. **Verification:** The rate is verified by evaluating the limit surface at sub-segment +/// midpoints. For rate n, sub-segment k spans [k/n, (k+1)/n]; the deviation at each midpoint is +/// measured against the linear interpolation of the limit surface at the sub-segment endpoints +/// (not the corner positions). The S(t_hi) evaluation from segment k is reused as S(t_lo) for +/// segment k+1 to reduce the number of surface evaluations. If any sub-segment exceeds the +/// tolerance, the rate is incremented and re-verified. +/// +/// @note Per-edge rates depend only on the edge itself (not on the face), because two +/// adjacent faces sharing an edge must agree on its tessellation rate. Edge deviations +/// are intrinsically consistent across adjacent faces since the limit surface is +/// continuous. +/// +template +void get_facet_tess_rates_chordal( + OpenSubdiv::Bfr::Surface& facet_surface, + span patch_values_in, + span corner_positions, + Scalar max_deviation, + int tess_rate_max, + int* edge_rates) +{ + la_debug_assert(max_deviation > Scalar(0), "max_chordal_deviation must be positive"); + + constexpr int dimension = 3; + using Vector3s = Eigen::Vector3; + using ConstMap3s = Eigen::Map; + + int N = facet_surface.GetFaceSize(); + OpenSubdiv::Bfr::Parameterization face_param = facet_surface.GetParameterization(); + + Vector3s S; // limit surface position + Vector3s dSdu; // dS/du + Vector3s dSdv; // dS/dv + Vector3s d2Sdu2; // d²S/du² + Vector3s d2Sduv; // d²S/dudv + Vector3s d2Sdv2; // d²S/dv² + + // Compute the edge UV direction: d(uv)/dt is constant along each edge since GetEdgeCoord + // is (piecewise) linear. For QUAD_SUBFACES parameterizations the mapping is piecewise linear + // with breaks at sub-face boundaries, so we identify which sub-face t lies in and compute the + // derivative within that linear segment. + auto get_edge_uv_dir = [&](int edge_idx, Scalar t) { + Scalar uv0[2], uv1[2]; + if (!face_param.HasSubFaces()) { + // For QUAD and TRI, GetEdgeCoord is linear: d(uv)/dt is constant. + face_param.GetEdgeCoord(edge_idx, Scalar(0), uv0); + face_param.GetEdgeCoord(edge_idx, Scalar(1), uv1); + return std::make_pair(uv1[0] - uv0[0], uv1[1] - uv0[1]); + } + // For QUAD_SUBFACES, the edge parameterization is piecewise-linear across sub-faces. + // Use GetSubFace to determine which sub-face t lies in, then compute d(uv)/dt from + // two sample points guaranteed to be within the same linear segment. + Scalar uv_t[2]; + face_param.GetEdgeCoord(edge_idx, t, uv_t); + int sub_face_t = face_param.GetSubFace(uv_t); + + face_param.GetEdgeCoord(edge_idx, Scalar(0), uv0); + if (face_param.GetSubFace(uv0) == sub_face_t) { + // t is in the first sub-face segment (same as edge start). + face_param.GetEdgeCoord(edge_idx, Scalar(0.25), uv1); + return std::make_pair((uv1[0] - uv0[0]) * Scalar(4), (uv1[1] - uv0[1]) * Scalar(4)); + } else { + // t is in the second sub-face segment (same as edge end). + face_param.GetEdgeCoord(edge_idx, Scalar(0.75), uv0); + face_param.GetEdgeCoord(edge_idx, Scalar(1), uv1); + return std::make_pair((uv1[0] - uv0[0]) * Scalar(4), (uv1[1] - uv0[1]) * Scalar(4)); + } + }; + + // Evaluate the squared deviation g(t) = ||L(t) - S(t)||^2 and its derivatives g'(t), g''(t). + // L(t) is the linear interpolation of corner positions, S(t) is the limit surface. + // e(t) = L(t) - S(t), so: + // g'(t) = 2 * e · e' + // g''(t) = 2 * (e' · e' + e · e'') + // where e'(t) = dL/dt - dS/dt and e''(t) = -d²S/dt². + auto eval_sq_deviation = + [&](int edge_idx, int next_idx, Scalar t, Scalar& g, Scalar& gp, Scalar& gpp) { + Scalar uv[2]; + face_param.GetEdgeCoord(edge_idx, t, uv); + facet_surface.Evaluate( + uv, + patch_values_in.data(), + dimension, + S.data(), + dSdu.data(), + dSdv.data(), + d2Sdu2.data(), + d2Sduv.data(), + d2Sdv2.data()); + + auto [du_dt, dv_dt] = get_edge_uv_dir(edge_idx, t); + + ConstMap3s P0(&corner_positions[edge_idx * dimension]); + ConstMap3s P1(&corner_positions[next_idx * dimension]); + Vector3s L_vec = P0 * (Scalar(1) - t) + P1 * t; + Vector3s dL_dt = P1 - P0; + + Vector3s dS_dt = dSdu * du_dt + dSdv * dv_dt; + Vector3s d2S_dt2 = d2Sdu2 * (du_dt * du_dt) + d2Sduv * (Scalar(2) * du_dt * dv_dt) + + d2Sdv2 * (dv_dt * dv_dt); + + Vector3s e = L_vec - S; + Vector3s ep = dL_dt - dS_dt; + Vector3s epp = -d2S_dt2; + + g = e.squaredNorm(); + gp = Scalar(2) * e.dot(ep); + gpp = Scalar(2) * (ep.squaredNorm() + e.dot(epp)); + }; + + // Evaluate the squared chordal deviation at parameter t (position-only, no derivatives needed). + auto sq_deviation_at = [&](int edge_idx, int next_idx, Scalar t) -> Scalar { + Scalar uv[2]; + face_param.GetEdgeCoord(edge_idx, t, uv); + facet_surface.Evaluate(uv, patch_values_in.data(), dimension, S.data()); + ConstMap3s P0(&corner_positions[edge_idx * dimension]); + ConstMap3s P1(&corner_positions[next_idx * dimension]); + return (S - (P0 * (Scalar(1) - t) + P1 * t)).squaredNorm(); + }; + + // Check if all sub-segments at the given rate have deviation <= max_deviation. + // For rate n, sub-segment k spans [k/n, (k+1)/n]. The sub-segment's "linear approximation" + // interpolates the limit surface at its endpoints. We check the midpoint of each sub-segment. + auto verify_rate = [&](int edge_idx, int rate) -> bool { + Scalar max_dev_sq = max_deviation * max_deviation; + Vector3s S_mid; + Vector3s S_lo; + Vector3s S_hi; + + // Evaluate S at t=0 for the first segment + Scalar uv_lo[2]; + face_param.GetEdgeCoord(edge_idx, Scalar(0), uv_lo); + facet_surface.Evaluate(uv_lo, patch_values_in.data(), dimension, S_lo.data()); + + for (int k = 0; k < rate; ++k) { + Scalar t_hi = Scalar(k + 1) / Scalar(rate); + Scalar t_mid = (Scalar(k) / Scalar(rate) + t_hi) * Scalar(0.5); + + Scalar uv_hi[2], uv_mid[2]; + face_param.GetEdgeCoord(edge_idx, t_hi, uv_hi); + face_param.GetEdgeCoord(edge_idx, t_mid, uv_mid); + + facet_surface.Evaluate(uv_hi, patch_values_in.data(), dimension, S_hi.data()); + facet_surface.Evaluate(uv_mid, patch_values_in.data(), dimension, S_mid.data()); + + // Sub-segment midpoint deviation: distance from S(t_mid) to linear interp of + // S(t_lo) and S(t_hi) + Scalar dev_sq = (S_mid - (S_lo + S_hi) * Scalar(0.5)).squaredNorm(); + if (dev_sq > max_dev_sq) return false; + + std::swap(S_lo, S_hi); + } + return true; + }; + + constexpr int num_newton_iters = 4; + + // Run Newton's method on [t_lo, t_hi] to find peak squared deviation g(t). + // Returns the maximum g value encountered during the iterations. + auto newton_peak = [&](int edge_idx, int next_idx, Scalar t_lo, Scalar t_hi) { + Scalar g_best = Scalar(0); + Scalar t = (t_lo + t_hi) * Scalar(0.5); + for (int iter = 0; iter < num_newton_iters; ++iter) { + Scalar g, gp, gpp; + eval_sq_deviation(edge_idx, next_idx, t, g, gp, gpp); + g_best = std::max(g_best, g); + + if (std::abs(gpp) > std::abs(gp) * Scalar(1e-10)) { + t -= gp / gpp; + } + t = std::max(t_lo, std::min(t_hi, t)); + } + return g_best; + }; + + bool has_sub_faces = face_param.HasSubFaces(); + + for (int i = 0; i < N; ++i) { + int j = (i + 1) % N; + + // Step 1: Find the peak deviation along the coarse edge using Newton's method. + // Maximize g(t) = ||L(t) - S(t)||^2 by solving g'(t) = 0. + // + // For QUAD_SUBFACES, the UV parameterization has a discontinuity at t=0.5 (the + // 3D surface is continuous but derivatives are not). Run Newton separately on each + // half to avoid crossing the discontinuity, and also sample at the quarter-points. + Scalar peak_sq_dev; + if (has_sub_faces) { + Scalar g1 = newton_peak(i, j, Scalar(0), Scalar(0.5)); + Scalar g2 = newton_peak(i, j, Scalar(0.5), Scalar(1)); + Scalar sq_q1 = sq_deviation_at(i, j, Scalar(0.25)); + Scalar sq_q2 = sq_deviation_at(i, j, Scalar(0.75)); + peak_sq_dev = std::max({g1, g2, sq_q1, sq_q2}); + } else { + Scalar g_newton = newton_peak(i, j, Scalar(0), Scalar(1)); + Scalar sq_dev_mid = sq_deviation_at(i, j, Scalar(0.5)); + peak_sq_dev = std::max(g_newton, sq_dev_mid); + } + Scalar edge_deviation = std::sqrt(peak_sq_dev); + + // Step 2: Compute initial rate estimate from 1/n² scaling model. + int rate = 1; + if (edge_deviation > max_deviation) { + rate = static_cast(std::ceil(std::sqrt(edge_deviation / max_deviation))); + } + + // Step 3: Verify by checking sub-segment midpoint deviations. Increment if needed. + while (rate < tess_rate_max && !verify_rate(i, rate)) { + ++rate; + } + + edge_rates[i] = std::min(rate, tess_rate_max); + } +} + template void compute_facet_tess_rates( const OpenSubdiv::Far::TopologyRefiner& mesh_topology, @@ -565,6 +795,7 @@ void compute_facet_tess_rates( bool use_limit_positions, Scalar tess_interval, int tess_rate_max, + std::optional max_chordal_deviation, std::vector& facet_tess_rates) { // @@ -598,17 +829,9 @@ void compute_facet_tess_rates( patch_values_out.resize(N * dimension); - if (!use_limit_positions) { - OpenSubdiv::Far::ConstIndexArray verts = - mesh_topology.GetLevel(0).GetFaceVertices(face_index); - - for (int i = 0, j = 0; i < N; ++i, j += dimension) { - const Scalar* v_pos = &mesh_vertex_positions[verts[i] * dimension]; - patch_values_out[j] = v_pos[0]; - patch_values_out[j + 1] = v_pos[1]; - patch_values_out[j + 2] = v_pos[2]; - } - } else { + // Evaluate limit positions at corners (needed for chordal deviation mode, and + // optionally for edge-length mode when use_limit_positions is true). + if (use_limit_positions || max_chordal_deviation.has_value()) { OpenSubdiv::Bfr::Parameterization face_param = facet_surface.GetParameterization(); for (int i = 0, j = 0; i < N; ++i, j += dimension) { @@ -619,12 +842,38 @@ void compute_facet_tess_rates( } facet_tess_rates.resize(N); - get_edge_tess_rates( - patch_values_out, - dimension, - tess_interval, - tess_rate_max, - facet_tess_rates.data()); + + if (max_chordal_deviation.has_value()) { + // Chordal deviation mode: rates based on max distance between limit surface and + // piecewise-linear approximation, measured at edge midpoints. + get_facet_tess_rates_chordal( + facet_surface, + patch_values_in, + patch_values_out, + max_chordal_deviation.value(), + tess_rate_max, + facet_tess_rates.data()); + } else { + // Edge-length mode: optionally use control hull positions instead of limit positions + if (!use_limit_positions) { + OpenSubdiv::Far::ConstIndexArray verts = + mesh_topology.GetLevel(0).GetFaceVertices(face_index); + + for (int i = 0, j = 0; i < N; ++i, j += dimension) { + const Scalar* v_pos = &mesh_vertex_positions[verts[i] * dimension]; + patch_values_out[j] = v_pos[0]; + patch_values_out[j + 1] = v_pos[1]; + patch_values_out[j + 2] = v_pos[2]; + } + } + + get_edge_tess_rates( + patch_values_out, + dimension, + tess_interval, + tess_rate_max, + facet_tess_rates.data()); + } } using FVarId = OpenSubdiv::Bfr::SurfaceFactory::FVarID; @@ -710,6 +959,7 @@ void interpolate_attributes( bool use_limit_positions, Scalar tess_interval, int tess_rate_max, + std::optional max_chordal_deviation, bool preserve_shared_indices) { const bool need_limit_btn = @@ -1033,6 +1283,7 @@ void interpolate_attributes( use_limit_positions, tess_interval, tess_rate_max, + max_chordal_deviation, tmp.facet_tess_rates); // Interpolate all attributes. The first attribute in this list is the vertex position, and @@ -1109,6 +1360,7 @@ SurfaceMesh extract_adaptive_mesh_topology( bool use_limit_positions, Scalar tess_interval, int tess_rate_max, + std::optional max_chordal_deviation, bool preserve_shared_indices) { // @@ -1190,6 +1442,7 @@ SurfaceMesh extract_adaptive_mesh_topology( use_limit_positions, tess_interval, tess_rate_max, + max_chordal_deviation, preserve_shared_indices); return tessellated_mesh; @@ -1212,28 +1465,46 @@ SurfaceMesh subdivide_edge_adaptive( "warning, please set 'use_limit_surface' to 'true' in your subdivision options."); } + la_runtime_assert( + !(options.max_edge_length.has_value() && options.max_chordal_deviation.has_value()), + "max_edge_length and max_chordal_deviation are mutually exclusive"); + la_runtime_assert( + !options.max_chordal_deviation.has_value() || options.max_chordal_deviation.value() > 0, + "max_chordal_deviation must be positive"); + la_runtime_assert( + !options.max_chordal_deviation.has_value() || input_mesh.get_dimension() == 3, + "Chordal deviation only supported for 3D meshes"); + // Extract mesh facet topology bool output_quads = !input_mesh.is_triangle_mesh(); bool use_limit_positions = true; - Scalar tess_interval; + Scalar tess_interval = 1; // Default; unused in chordal deviation mode + + // Chordal deviation mode + std::optional max_chordal_deviation; + if (options.max_chordal_deviation.has_value()) { + max_chordal_deviation = static_cast(options.max_chordal_deviation.value()); + } // Only limit max edge tessellation if no target edge length is specified int tess_rate_max = - (options.max_edge_length.has_value() ? std::numeric_limits::max() - : std::max(1u, options.num_levels)); + ((options.max_edge_length.has_value() || max_chordal_deviation.has_value()) + ? std::numeric_limits::max() + : std::max(1u, options.num_levels)); logger().debug("Output quads? {}", output_quads); if (options.max_edge_length.has_value()) { tess_interval = options.max_edge_length.value(); - } else { + } else if (!max_chordal_deviation.has_value()) { auto [min_len, max_len, avg_len] = find_min_max_avg_edges( topology_refiner, input_mesh.get_vertex_to_position().get_all(), input_mesh.get_dimension()); tess_interval = max_len / static_cast(tess_rate_max); logger().info( - "Adaptive tessellation.\n\t- Max edge len: {},\n\t- Min edge len: {},\n\t- Avg edge " + "Adaptive tessellation.\n\t- Max edge len: {},\n\t- Min edge len: {},\n\t- Avg " + "edge " "len: {},\n\t- Max rate: {},\n\t- Tess interval: {}", max_len, min_len, @@ -1252,6 +1523,7 @@ SurfaceMesh subdivide_edge_adaptive( use_limit_positions, tess_interval, tess_rate_max, + max_chordal_deviation, options.preserve_shared_indices); return output_mesh; diff --git a/modules/subdivision/src/subdivide_uniform.cpp b/modules/subdivision/src/subdivide_uniform.cpp index fa6d774e..7e62eb77 100644 --- a/modules/subdivision/src/subdivide_uniform.cpp +++ b/modules/subdivision/src/subdivide_uniform.cpp @@ -178,6 +178,15 @@ void interpolate_vertex_attribute( } src = dst; } + if (!need_limit && num_refined_levels == 1) { + // No refinement and no limit projection: copy source directly to output. + for (size_t i = 0; i < target_attr.get_num_elements(); ++i) { + const auto& s = source_attr.get_row(i); + auto t = target_attr.ref_row(i); + std::copy(s.begin(), s.end(), t.begin()); + } + return; + } if (need_limit_btn) { // Project the vertex positions to the limit surface and compute derivatives const auto& last_level = topology_refiner.GetLevel(num_refined_levels - 1); @@ -271,10 +280,20 @@ void interpolate_indexed_attribute_values( src = dst; } if (limit) { - // Project the last level interpolated data to the limit surface - Vertex* dst = - src + topology_refiner.GetLevel(num_refined_levels - 1).GetNumFVarValues(fvar_index); - primvar_refiner.LimitFaceVarying(src, dst, fvar_index); + // Project the last level interpolated data to the limit surface. + // Note: LimitFaceVarying requires at least one level of refinement. + if (num_refined_levels > 1) { + Vertex* dst = + src + + topology_refiner.GetLevel(num_refined_levels - 1).GetNumFVarValues(fvar_index); + primvar_refiner.LimitFaceVarying(src, dst, fvar_index); + } else { + // No refinement: copy the source values directly to the output. + Vertex* dst = src + topology_refiner.GetLevel(0).GetNumFVarValues(fvar_index); + for (int i = 0; i < topology_refiner.GetLevel(0).GetNumFVarValues(fvar_index); ++i) { + std::copy(src[i].values.begin(), src[i].values.end(), dst[i].values.begin()); + } + } } } @@ -305,8 +324,10 @@ SurfaceMesh subdivide_uniform( topology_refiner.RefineUniform(uniform_options); } - // Adaptive refinement may result in fewer levels than the max specified. + // Check number of refinement levels int num_refined_levels = topology_refiner.GetNumLevels(); + la_debug_assert(num_refined_levels == options.num_levels + 1); + la_debug_assert(num_refined_levels >= 1); // Extract mesh facet topology SurfaceMesh output_mesh = extract_uniform_mesh_topology( diff --git a/modules/subdivision/tests/CMakeLists.txt b/modules/subdivision/tests/CMakeLists.txt index 568dde1d..1bbf77a6 100644 --- a/modules/subdivision/tests/CMakeLists.txt +++ b/modules/subdivision/tests/CMakeLists.txt @@ -10,3 +10,5 @@ # governing permissions and limitations under the License. # lagrange_add_test() +lagrange_include_modules(bvh) +target_link_libraries(test_lagrange_subdivision PRIVATE lagrange::bvh) diff --git a/modules/subdivision/tests/test_mesh_subdivision.cpp b/modules/subdivision/tests/test_mesh_subdivision.cpp index e51c7252..4430314b 100644 --- a/modules/subdivision/tests/test_mesh_subdivision.cpp +++ b/modules/subdivision/tests/test_mesh_subdivision.cpp @@ -22,15 +22,20 @@ #include #include #include +#include #include #include #include #include #include +#include #include + +#include #include #include #include +#include #include @@ -163,6 +168,9 @@ TEST_CASE("mesh_subdivision", "[mesh][subdivision]" LA_SLOW_DEBUG_FLAG) options.refinement = refinement; options.num_levels = level; options.validate_topology = true; + if (refinement == lagrange::subdivision::RefinementType::EdgeAdaptive) { + options.use_limit_surface = true; + } auto mesh = lagrange::testing::load_surface_mesh(filename); if (scheme == lagrange::subdivision::SchemeType::Loop && @@ -296,7 +304,6 @@ TEST_CASE("mesh_subdivision_limit_uniform", "[mesh][subdivision]" LA_SLOW_DEBUG_ options.output_limit_tangents = "tangent"; options.output_limit_bitangents = "bitangent"; auto limit_mesh = lagrange::subdivision::subdivide_mesh(mesh, options); - lagrange::io::save_mesh("limit_uniform.obj", limit_mesh); // Check limit positions auto V_refined = vertex_view(refined_mesh); @@ -557,6 +564,74 @@ TEST_CASE("mesh_subdivision_empty", "[mesh][subdivision]") } } +TEST_CASE("mesh_subdivision_zero_levels", "[mesh][subdivision]") +{ + using Scalar = double; + using Index = uint32_t; + auto mesh = lagrange::testing::load_surface_mesh("open/subdivision/cube.obj"); + + lagrange::subdivision::SubdivisionOptions options; + options.num_levels = 0; + + SECTION("without limit surface") + { + auto result = lagrange::subdivision::subdivide_mesh(mesh, options); + REQUIRE(result.get_num_vertices() == mesh.get_num_vertices()); + REQUIRE(result.get_num_facets() == mesh.get_num_facets()); + + // Verify vertex positions are preserved (not all zeros) + auto V_input = vertex_view(mesh); + auto V_result = vertex_view(result); + REQUIRE(V_input.isApprox(V_result)); + } + + SECTION("with limit surface") + { + options.use_limit_surface = true; + auto result = lagrange::subdivision::subdivide_mesh(mesh, options); + REQUIRE(result.get_num_vertices() == mesh.get_num_vertices()); + REQUIRE(result.get_num_facets() == mesh.get_num_facets()); + + // Limit positions should differ from the original (shrinkage towards limit surface) + auto V_input = vertex_view(mesh); + auto V_result = vertex_view(result); + REQUIRE(!V_result.isZero()); + bool any_different = false; + for (Index v = 0; v < mesh.get_num_vertices(); ++v) { + if ((V_input.row(v) - V_result.row(v)).norm() > 1e-10) { + any_different = true; + break; + } + } + REQUIRE(any_different); + } + + SECTION("with limit surface and face-varying attributes") + { + // This tests the LimitFaceVarying workaround at level 0 + options.use_limit_surface = true; + auto nrm_id = lagrange::compute_normal(mesh, lagrange::internal::pi * 0.5); + std::string nrm_name(mesh.get_attribute_name(nrm_id)); + auto result = lagrange::subdivision::subdivide_mesh(mesh, options); + REQUIRE(result.has_attribute(nrm_name)); + REQUIRE(result.get_num_vertices() == mesh.get_num_vertices()); + REQUIRE(result.get_num_facets() == mesh.get_num_facets()); + } + + SECTION("with limit normals") + { + // This tests the LimitFaceVarying workaround at level 0 + options.use_limit_surface = true; + options.output_limit_normals = "@limit_normal"; + auto nrm_id = lagrange::compute_normal(mesh, lagrange::internal::pi * 0.5); + std::string nrm_name(mesh.get_attribute_name(nrm_id)); + auto result = lagrange::subdivision::subdivide_mesh(mesh, options); + REQUIRE(result.has_attribute(nrm_name)); + REQUIRE(result.get_num_vertices() == mesh.get_num_vertices()); + REQUIRE(result.get_num_facets() == mesh.get_num_facets()); + } +} + TEST_CASE("compute_sharpness", "[mesh][subdivision][sharpness]") { using Scalar = double; @@ -769,3 +844,303 @@ TEST_CASE("compute_sharpness", "[mesh][subdivision][sharpness]") REQUIRE(vertex_sharpness.size() == mesh.get_num_vertices()); } } + +namespace { + +template +lagrange::subdivision::SubdivisionOptions prepare_subdiv_mesh( + lagrange::SurfaceMesh& mesh) +{ + // Compute sharpness based on input normals + lagrange::AttributeMatcher matcher; + matcher.element_types = lagrange::AttributeElement::Indexed; + matcher.usages = lagrange::AttributeUsage::Normal; + lagrange::AttributeId nrm_id = lagrange::find_matching_attribute(mesh, matcher).value(); + lagrange::WeldOptions weld_options; + weld_options.epsilon_abs = 1e-3; + weld_options.epsilon_rel = 1e-3; + lagrange::weld_indexed_attribute(mesh, nrm_id, weld_options); + + lagrange::subdivision::SharpnessOptions sharpness_options; + sharpness_options.normal_attribute_name = mesh.get_attribute_name(nrm_id); + auto sharpness_results = lagrange::subdivision::compute_sharpness(mesh, sharpness_options); + REQUIRE(sharpness_results.normal_attr.has_value()); + mesh.delete_attribute(sharpness_results.normal_attr.value()); + + lagrange::subdivision::SubdivisionOptions subdiv_options; + subdiv_options.refinement = lagrange::subdivision::RefinementType::EdgeAdaptive; + subdiv_options.use_limit_surface = true; + + REQUIRE(sharpness_results.edge_sharpness_attr.has_value()); + subdiv_options.edge_sharpness_attr = sharpness_results.edge_sharpness_attr; + + REQUIRE(sharpness_results.vertex_sharpness_attr.has_value()); + subdiv_options.vertex_sharpness_attr = sharpness_results.vertex_sharpness_attr; + + return subdiv_options; +} + +} // namespace + + +TEST_CASE("mesh_subdivision_chordal_deviation closed", "[mesh][subdivision]" LA_CORP_FLAG) +{ + using Scalar = double; + using Index = uint32_t; + + // Load a mesh with curved features + lagrange::io::LoadOptions load_options; + load_options.stitch_vertices = true; + auto mesh = lagrange::io::load_mesh>( + lagrange::testing::get_data_path("corp/subdivision/cgt_ratchet_wrench_001.fbx"), + load_options); + REQUIRE(mesh.get_num_vertices() == 506); + REQUIRE(mesh.get_num_facets() == 500); + + lagrange::SeparateByComponentsOptions split_options; + split_options.map_attributes = true; + auto meshes = lagrange::separate_by_components(mesh, split_options); + REQUIRE(meshes.size() == 2); + if (meshes[0].get_num_vertices() < meshes[1].get_num_vertices()) { + std::swap(meshes[0], meshes[1]); + } + // Use the largest component, which we know to be closed. + mesh = std::move(meshes[0]); + + auto subdiv_options = prepare_subdiv_mesh(mesh); + + const float voxel_size = 1e-3f; + const int euler_input = lagrange::compute_euler(mesh); + REQUIRE(is_closed(mesh)); + REQUIRE(is_manifold(mesh)); + + SECTION("edge length") + { + // Set up adaptive subdivision with max edge length + subdiv_options.max_edge_length = + voxel_size * std::sqrt(3.f) / 2.f; // Half diagonal of a voxel + + auto subdivided_mesh = lagrange::subdivision::subdivide_mesh(mesh, subdiv_options); + REQUIRE(subdivided_mesh.get_num_vertices() > mesh.get_num_vertices()); + REQUIRE(subdivided_mesh.get_num_facets() > mesh.get_num_facets()); + + REQUIRE(compute_euler(subdivided_mesh) == euler_input); + REQUIRE(is_closed(subdivided_mesh)); + REQUIRE(is_manifold(subdivided_mesh)); + } + + SECTION("chordal deviation") + { + // Set up adaptive subdivision with chordal deviation + subdiv_options.max_chordal_deviation = 0.5f * voxel_size; // Half voxel size + + auto subdivided_mesh = lagrange::subdivision::subdivide_mesh(mesh, subdiv_options); + REQUIRE(subdivided_mesh.get_num_vertices() > mesh.get_num_vertices()); + REQUIRE(subdivided_mesh.get_num_facets() > mesh.get_num_facets()); + + REQUIRE(compute_euler(subdivided_mesh) == euler_input); + REQUIRE(is_closed(subdivided_mesh)); + REQUIRE(is_manifold(subdivided_mesh)); + + // Verify that tighter tolerance produces more tessellation + lagrange::subdivision::SubdivisionOptions options_tight = subdiv_options; + options_tight.max_chordal_deviation = subdiv_options.max_chordal_deviation.value() * 0.1f; + auto subdivided_tight = lagrange::subdivision::subdivide_mesh(mesh, options_tight); + REQUIRE(subdivided_tight.get_num_facets() >= subdivided_mesh.get_num_facets()); + } +} + +TEST_CASE("mesh_subdivision_chordal_deviation open", "[mesh][subdivision]" LA_CORP_FLAG) +{ + using Scalar = double; + using Index = uint32_t; + + // Load a mesh with curved features + lagrange::io::LoadOptions load_options; + load_options.stitch_vertices = true; + auto mesh = lagrange::io::load_mesh>( + lagrange::testing::get_data_path("corp/subdivision/cgt_ratchet_wrench_001.fbx"), + load_options); + REQUIRE(mesh.get_num_vertices() == 506); + REQUIRE(mesh.get_num_facets() == 500); + + auto subdiv_options = prepare_subdiv_mesh(mesh); + + const float voxel_size = 1e-3f; + const int euler_input = lagrange::compute_euler(mesh); + REQUIRE(!is_closed(mesh)); + REQUIRE(is_manifold(mesh)); + + SECTION("edge length") + { + // Set up adaptive subdivision with max edge length + subdiv_options.max_edge_length = + voxel_size * std::sqrt(3.f) / 2.f; // Half diagonal of a voxel + + auto subdivided_mesh = lagrange::subdivision::subdivide_mesh(mesh, subdiv_options); + // Uncomment to inspect the result: + // lagrange::io::save_mesh("subdivided_mesh_edge_length.obj", subdivided_mesh); + REQUIRE(subdivided_mesh.get_num_vertices() > mesh.get_num_vertices()); + REQUIRE(subdivided_mesh.get_num_facets() > mesh.get_num_facets()); + + REQUIRE(compute_euler(subdivided_mesh) == euler_input); + REQUIRE(is_manifold(subdivided_mesh)); + } + + SECTION("chordal deviation") + { + // Set up adaptive subdivision with chordal deviation + subdiv_options.max_chordal_deviation = 0.1f * voxel_size; // 10% of voxel size + + auto subdivided_mesh = lagrange::subdivision::subdivide_mesh(mesh, subdiv_options); + // Uncomment to inspect the result: + // lagrange::io::save_mesh("subdivided_mesh_chordal_deviation.obj", subdivided_mesh); + REQUIRE(subdivided_mesh.get_num_vertices() > mesh.get_num_vertices()); + REQUIRE(subdivided_mesh.get_num_facets() > mesh.get_num_facets()); + + REQUIRE(compute_euler(subdivided_mesh) == euler_input); + REQUIRE(is_manifold(subdivided_mesh)); + + // Verify that tighter tolerance produces more tessellation + lagrange::subdivision::SubdivisionOptions options_tight = subdiv_options; + options_tight.max_chordal_deviation = subdiv_options.max_chordal_deviation.value() * 0.1f; + auto subdivided_tight = lagrange::subdivision::subdivide_mesh(mesh, options_tight); + REQUIRE(subdivided_tight.get_num_facets() >= subdivided_mesh.get_num_facets()); + } + + SECTION("both") + { + // Verify mutual exclusion of max_edge_length and max_chordal_deviation + subdiv_options.max_edge_length = 1.0f; + subdiv_options.max_chordal_deviation = 1e-4f; + LA_REQUIRE_THROWS(lagrange::subdivision::subdivide_mesh(mesh, subdiv_options)); + } +} + +TEST_CASE("mesh_subdivision_chordal_deviation_accuracy", "[mesh][subdivision]" LA_CORP_FLAG) +{ + using Scalar = double; + using Index = uint32_t; + + // Load test mesh + lagrange::io::LoadOptions load_options; + load_options.stitch_vertices = true; + auto mesh = lagrange::io::load_mesh>( + lagrange::testing::get_data_path("corp/subdivision/cgt_ratchet_wrench_001.fbx"), + load_options); + REQUIRE(mesh.get_num_vertices() == 506); + REQUIRE(mesh.get_num_facets() == 500); + + auto subdiv_options = prepare_subdiv_mesh(mesh); + const float voxel_size = 1e-3f; + const float max_chordal_deviation = 0.1f * voxel_size; + subdiv_options.max_chordal_deviation = max_chordal_deviation; + + // Create a face-varying attribute encoding the local parametric position of each corner within + // its face. For a quad face with OpenSubdiv's parameterization, the corners are at + // (0,0), (1,0), (1,1), (0,1). After linear FV interpolation through subdivision, boundary + // sub-edges (lying on coarse mesh edges) can be identified: they have one parametric coordinate + // pinned to 0 or 1 at both endpoints. + mesh.initialize_edges(); + const Index num_corners = mesh.get_num_corners(); + + // Build the per-corner UV values. Each corner gets the OpenSubdiv parametric coordinate of + // its vertex within the face. For quads: vertex 0→(0,0), 1→(1,0), 2→(1,1), 3→(0,1). + std::vector corner_uv_values(num_corners * 2); + for (Index f = 0; f < mesh.get_num_facets(); ++f) { + Index N = mesh.get_facet_size(f); + Index c0 = mesh.get_facet_corner_begin(f); + REQUIRE((N == 3 || N == 4)); // Test mesh should only have triangles and quads + for (Index lv = 0; lv < N; ++lv) { + Scalar u, v; + if (N == 4) { + // QUAD parameterization + u = static_cast((lv != 0) && (lv < 3)); + v = static_cast(lv > 1); + } else { + // TRI parameterization + u = static_cast(lv == 1); + v = static_cast(lv == 2); + } + corner_uv_values[(c0 + lv) * 2 + 0] = u; + corner_uv_values[(c0 + lv) * 2 + 1] = v; + } + } + // Each corner has its own unique value (no sharing between corners) + std::vector corner_uv_indices(num_corners); + std::iota(corner_uv_indices.begin(), corner_uv_indices.end(), Index(0)); + + mesh.template create_attribute( + "face_uv", + lagrange::AttributeElement::Indexed, + lagrange::AttributeUsage::UV, + 2, + corner_uv_values, + corner_uv_indices); + + // Subdivide with chordal deviation and linear face-varying interpolation so the parametric + // UV attribute is linearly interpolated. + subdiv_options.face_varying_interpolation = + lagrange::subdivision::FaceVaryingInterpolation::All; + subdiv_options.interpolated_attributes.set_all(); + auto subdivided_mesh = lagrange::subdivision::subdivide_mesh(mesh, subdiv_options); + + // Identify output face-edges lying on coarse mesh edges using the face-varying UV attribute. + // For quads, a face-edge is on a coarse boundary if one UV coordinate is pinned to 0 or 1 + // at both corners. + const auto& uv_attr = subdivided_mesh.template get_indexed_attribute("face_uv"); + auto uv_values = uv_attr.values().get_all(); + auto uv_indices = uv_attr.indices().get_all(); + auto V = vertex_view(subdivided_mesh); + constexpr Scalar eps = 1e-8; + + lagrange::SurfaceMesh edge_midpoints; + for (Index f = 0; f < subdivided_mesh.get_num_facets(); ++f) { + Index fsize = subdivided_mesh.get_facet_size(f); + Index c0 = subdivided_mesh.get_facet_corner_begin(f); + for (Index lv = 0; lv < fsize; ++lv) { + Index ca = c0 + lv; + Index cb = c0 + (lv + 1) % fsize; + Index ia = uv_indices[ca]; + Index ib = uv_indices[cb]; + Scalar ua = uv_values[ia * 2 + 0], va = uv_values[ia * 2 + 1]; + Scalar ub = uv_values[ib * 2 + 0], vb = uv_values[ib * 2 + 1]; + + // Check if one parametric coordinate is pinned to 0 or 1 at both endpoints. + bool on_boundary = false; + for (int ch = 0; ch < 2; ++ch) { + Scalar a = (ch == 0) ? ua : va; + Scalar b = (ch == 0) ? ub : vb; + if ((std::abs(a) < eps && std::abs(b) < eps) || + (std::abs(a - 1.0) < eps && std::abs(b - 1.0) < eps)) { + on_boundary = true; + break; + } + } + if (!on_boundary) continue; + + // This edge lies on a coarse mesh edge. Compute its midpoint. + Index va_idx = subdivided_mesh.get_corner_vertex(ca); + Index vb_idx = subdivided_mesh.get_corner_vertex(cb); + Eigen::RowVector3d p = (V.row(va_idx) + V.row(vb_idx)) * 0.5; + edge_midpoints.add_vertex({p[0], p[1], p[2]}); + } + } + REQUIRE(edge_midpoints.get_num_vertices() > 0); + INFO("Number of boundary sub-edge midpoints: " << edge_midpoints.get_num_vertices()); + + // Build a dense reference mesh by subdividing with a much tighter tolerance. + lagrange::subdivision::SubdivisionOptions tight_options = subdiv_options; + tight_options.max_chordal_deviation = max_chordal_deviation * 0.1f; + tight_options.interpolated_attributes = lagrange::subdivision::InterpolatedAttributes::none(); + auto reference = lagrange::subdivision::subdivide_mesh(mesh, tight_options); + lagrange::triangulate_polygonal_facets(reference); + + // Measure one-sided Hausdorff distance from coarse-edge sub-segment midpoints to the reference. + auto dist_id = lagrange::bvh::compute_mesh_distances(edge_midpoints, reference); + auto distances = lagrange::attribute_vector_view(edge_midpoints, dist_id); + Scalar max_dist = *std::max_element(distances.begin(), distances.end()); + INFO("Max distance: " << max_dist << " vs threshold: " << max_chordal_deviation); + CHECK(max_dist <= static_cast(max_chordal_deviation)); + CHECK(2. * max_dist >= static_cast(max_chordal_deviation)); +} diff --git a/modules/testing/CMakeLists.txt b/modules/testing/CMakeLists.txt index 976942fe..2b9e2841 100644 --- a/modules/testing/CMakeLists.txt +++ b/modules/testing/CMakeLists.txt @@ -16,11 +16,12 @@ set_target_properties(lagrange_testing PROPERTIES FOLDER "${LAGRANGE_IDE_PREFIX} # 2. dependencies include(catch2) -lagrange_include_modules(io) +lagrange_include_modules(io scene) target_link_libraries(lagrange_testing PUBLIC Catch2::Catch2 Threads::Threads lagrange::io + lagrange::scene ) # 3. test-specific properties diff --git a/modules/testing/include/lagrange/testing/check_meshes_equal.h b/modules/testing/include/lagrange/testing/check_meshes_equal.h new file mode 100644 index 00000000..b6a0c688 --- /dev/null +++ b/modules/testing/include/lagrange/testing/check_meshes_equal.h @@ -0,0 +1,126 @@ +/* + * Copyright 2026 Adobe. All rights reserved. + * This file is licensed to you under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under + * the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS + * OF ANY KIND, either express or implied. See the License for the specific language + * governing permissions and limitations under the License. + */ + +#pragma once + +#include +#include +#include +#include + +// clang-format off +#include +#include +#include +// clang-format on + +#include +#include + +namespace lagrange::testing { + +/// +/// Check that two meshes are bitwise identical. +/// +/// Verifies topology counts, all attribute names, attribute IDs for user attributes, +/// and bitwise equality of all attribute data (both reserved and non-reserved). +/// This is suitable for verifying lossless serialization round-trips. +/// +/// @param[in] a First mesh. +/// @param[in] b Second mesh. +/// +/// @tparam Scalar Mesh scalar type. +/// @tparam Index Mesh index type. +/// +template +void check_meshes_equal(const SurfaceMesh& a, const SurfaceMesh& b) +{ + // Check topology counts + REQUIRE(a.get_num_vertices() == b.get_num_vertices()); + REQUIRE(a.get_num_facets() == b.get_num_facets()); + REQUIRE(a.get_num_corners() == b.get_num_corners()); + REQUIRE(a.get_num_edges() == b.get_num_edges()); + REQUIRE(a.get_dimension() == b.get_dimension()); + + if (a.is_regular()) { + REQUIRE(b.is_regular()); + REQUIRE(a.get_vertex_per_facet() == b.get_vertex_per_facet()); + } else { + REQUIRE(b.is_hybrid()); + } + + // Collect all attribute names from both meshes + std::vector> names_a, names_b; + a.seq_foreach_attribute_id([&](std::string_view name, AttributeId id) { + names_a.emplace_back(std::string(name), id); + }); + b.seq_foreach_attribute_id([&](std::string_view name, AttributeId id) { + names_b.emplace_back(std::string(name), id); + }); + + // Check that all attributes in a exist in b + for (const auto& [name, id] : names_a) { + INFO("Attribute in a missing from b: " << name); + REQUIRE(b.has_attribute(name)); + } + + // Check that all attributes in b exist in a + for (const auto& [name, id] : names_b) { + INFO("Attribute in b missing from a: " << name); + REQUIRE(a.has_attribute(name)); + } + + // Check that user attribute IDs match + for (const auto& [name, id_a] : names_a) { + if (SurfaceMesh::attr_name_is_reserved(name)) continue; + INFO("Checking attribute id for: " << name); + REQUIRE(b.get_attribute_id(name) == id_a); + } + + // Check all attribute data (reserved and non-reserved) + seq_foreach_named_attribute_read(a, [&](std::string_view name, auto&& attr_a) { + using AttrType = std::decay_t; + + INFO("Checking attribute data: " << std::string(name)); + REQUIRE(b.has_attribute(name)); + + if constexpr (AttrType::IsIndexed) { + using ValueType = typename AttrType::ValueType; + const auto& attr_b = b.template get_indexed_attribute(name); + auto vals_a = attr_a.values().get_all(); + auto vals_b = attr_b.values().get_all(); + REQUIRE(vals_a.size() == vals_b.size()); + for (size_t i = 0; i < vals_a.size(); ++i) { + REQUIRE(vals_a[i] == vals_b[i]); + } + auto idx_a = attr_a.indices().get_all(); + auto idx_b = attr_b.indices().get_all(); + REQUIRE(idx_a.size() == idx_b.size()); + for (size_t i = 0; i < idx_a.size(); ++i) { + REQUIRE(idx_a[i] == idx_b[i]); + } + } else { + using ValueType = typename AttrType::ValueType; + const auto& attr_b = b.template get_attribute(name); + REQUIRE(attr_a.get_num_elements() == attr_b.get_num_elements()); + REQUIRE(attr_a.get_num_channels() == attr_b.get_num_channels()); + auto span_a = attr_a.get_all(); + auto span_b = attr_b.get_all(); + REQUIRE(span_a.size() == span_b.size()); + for (size_t i = 0; i < span_a.size(); ++i) { + REQUIRE(span_a[i] == span_b[i]); + } + } + }); +} + +} // namespace lagrange::testing diff --git a/modules/testing/include/lagrange/testing/check_scenes_equal.h b/modules/testing/include/lagrange/testing/check_scenes_equal.h new file mode 100644 index 00000000..fe95e4e6 --- /dev/null +++ b/modules/testing/include/lagrange/testing/check_scenes_equal.h @@ -0,0 +1,200 @@ +/* + * Copyright 2026 Adobe. All rights reserved. + * This file is licensed to you under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under + * the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS + * OF ANY KIND, either express or implied. See the License for the specific language + * governing permissions and limitations under the License. + */ + +#pragma once + +#include +#include + +// clang-format off +#include +#include +#include +// clang-format on + +namespace lagrange::testing { + +/// +/// Check that two scenes are bitwise identical. +/// +/// Verifies scene name, meshes, nodes (hierarchy, transforms, mesh/camera/light references), +/// images, textures, materials, lights, cameras (including optional far_plane), +/// skeletons, and animations. +/// This is suitable for verifying lossless serialization round-trips. +/// +/// @note Extensions and user_data are NOT compared by this function. +/// +/// @param[in] a First scene. +/// @param[in] b Second scene. +/// +/// @tparam Scalar Mesh scalar type. +/// @tparam Index Mesh index type. +/// +template +void check_scenes_equal(const scene::Scene& a, const scene::Scene& b) +{ + REQUIRE(a.name == b.name); + + // Meshes + REQUIRE(a.meshes.size() == b.meshes.size()); + for (size_t i = 0; i < a.meshes.size(); ++i) { + check_meshes_equal(a.meshes[i], b.meshes[i]); + } + + // Root nodes + REQUIRE(a.root_nodes.size() == b.root_nodes.size()); + for (size_t i = 0; i < a.root_nodes.size(); ++i) { + REQUIRE(a.root_nodes[i] == b.root_nodes[i]); + } + + // Nodes + REQUIRE(a.nodes.size() == b.nodes.size()); + for (size_t i = 0; i < a.nodes.size(); ++i) { + const auto& na = a.nodes[i]; + const auto& nb = b.nodes[i]; + REQUIRE(na.name == nb.name); + REQUIRE(na.transform.matrix().isApprox(nb.transform.matrix(), 0.f)); + REQUIRE(na.parent == nb.parent); + REQUIRE(na.children.size() == nb.children.size()); + for (size_t j = 0; j < na.children.size(); ++j) { + REQUIRE(na.children[j] == nb.children[j]); + } + REQUIRE(na.meshes.size() == nb.meshes.size()); + for (size_t j = 0; j < na.meshes.size(); ++j) { + REQUIRE(na.meshes[j].mesh == nb.meshes[j].mesh); + REQUIRE(na.meshes[j].materials.size() == nb.meshes[j].materials.size()); + for (size_t k = 0; k < na.meshes[j].materials.size(); ++k) { + REQUIRE(na.meshes[j].materials[k] == nb.meshes[j].materials[k]); + } + } + REQUIRE(na.cameras.size() == nb.cameras.size()); + for (size_t j = 0; j < na.cameras.size(); ++j) { + REQUIRE(na.cameras[j] == nb.cameras[j]); + } + REQUIRE(na.lights.size() == nb.lights.size()); + for (size_t j = 0; j < na.lights.size(); ++j) { + REQUIRE(na.lights[j] == nb.lights[j]); + } + } + + // Images + REQUIRE(a.images.size() == b.images.size()); + for (size_t i = 0; i < a.images.size(); ++i) { + REQUIRE(a.images[i].name == b.images[i].name); + REQUIRE(a.images[i].image.width == b.images[i].image.width); + REQUIRE(a.images[i].image.height == b.images[i].image.height); + REQUIRE(a.images[i].image.num_channels == b.images[i].image.num_channels); + REQUIRE(a.images[i].image.element_type == b.images[i].image.element_type); + REQUIRE(a.images[i].image.data == b.images[i].image.data); + REQUIRE(a.images[i].uri == b.images[i].uri); + } + + // Textures + REQUIRE(a.textures.size() == b.textures.size()); + for (size_t i = 0; i < a.textures.size(); ++i) { + const auto& ta = a.textures[i]; + const auto& tb = b.textures[i]; + REQUIRE(ta.name == tb.name); + REQUIRE(ta.image == tb.image); + REQUIRE(ta.mag_filter == tb.mag_filter); + REQUIRE(ta.min_filter == tb.min_filter); + REQUIRE(ta.wrap_u == tb.wrap_u); + REQUIRE(ta.wrap_v == tb.wrap_v); + REQUIRE(ta.scale.isApprox(tb.scale, 0.f)); + REQUIRE(ta.offset.isApprox(tb.offset, 0.f)); + REQUIRE(ta.rotation == tb.rotation); + } + + // Materials + REQUIRE(a.materials.size() == b.materials.size()); + for (size_t i = 0; i < a.materials.size(); ++i) { + const auto& ma = a.materials[i]; + const auto& mb = b.materials[i]; + REQUIRE(ma.name == mb.name); + REQUIRE(ma.base_color_value.isApprox(mb.base_color_value, 0.f)); + REQUIRE(ma.emissive_value.isApprox(mb.emissive_value, 0.f)); + REQUIRE(ma.metallic_value == mb.metallic_value); + REQUIRE(ma.roughness_value == mb.roughness_value); + REQUIRE(ma.alpha_mode == mb.alpha_mode); + REQUIRE(ma.alpha_cutoff == mb.alpha_cutoff); + REQUIRE(ma.normal_scale == mb.normal_scale); + REQUIRE(ma.occlusion_strength == mb.occlusion_strength); + REQUIRE(ma.double_sided == mb.double_sided); + REQUIRE(ma.base_color_texture.index == mb.base_color_texture.index); + REQUIRE(ma.base_color_texture.texcoord == mb.base_color_texture.texcoord); + REQUIRE(ma.emissive_texture.index == mb.emissive_texture.index); + REQUIRE(ma.metallic_roughness_texture.index == mb.metallic_roughness_texture.index); + REQUIRE(ma.normal_texture.index == mb.normal_texture.index); + REQUIRE(ma.occlusion_texture.index == mb.occlusion_texture.index); + } + + // Lights + REQUIRE(a.lights.size() == b.lights.size()); + for (size_t i = 0; i < a.lights.size(); ++i) { + const auto& la = a.lights[i]; + const auto& lb = b.lights[i]; + REQUIRE(la.name == lb.name); + REQUIRE(la.type == lb.type); + REQUIRE(la.position.isApprox(lb.position, 0.f)); + REQUIRE(la.direction.isApprox(lb.direction, 0.f)); + REQUIRE(la.up.isApprox(lb.up, 0.f)); + REQUIRE(la.intensity == lb.intensity); + REQUIRE(la.attenuation_constant == lb.attenuation_constant); + REQUIRE(la.attenuation_linear == lb.attenuation_linear); + REQUIRE(la.attenuation_quadratic == lb.attenuation_quadratic); + REQUIRE(la.attenuation_cubic == lb.attenuation_cubic); + REQUIRE(la.range == lb.range); + REQUIRE(la.color_diffuse.isApprox(lb.color_diffuse, 0.f)); + REQUIRE(la.color_specular.isApprox(lb.color_specular, 0.f)); + REQUIRE(la.color_ambient.isApprox(lb.color_ambient, 0.f)); + REQUIRE(la.angle_inner_cone == lb.angle_inner_cone); + REQUIRE(la.angle_outer_cone == lb.angle_outer_cone); + REQUIRE(la.size.isApprox(lb.size, 0.f)); + } + + // Cameras + REQUIRE(a.cameras.size() == b.cameras.size()); + for (size_t i = 0; i < a.cameras.size(); ++i) { + const auto& ca = a.cameras[i]; + const auto& cb = b.cameras[i]; + REQUIRE(ca.name == cb.name); + REQUIRE(ca.type == cb.type); + REQUIRE(ca.position.isApprox(cb.position, 0.f)); + REQUIRE(ca.up.isApprox(cb.up, 0.f)); + REQUIRE(ca.look_at.isApprox(cb.look_at, 0.f)); + REQUIRE(ca.near_plane == cb.near_plane); + REQUIRE(ca.far_plane.has_value() == cb.far_plane.has_value()); + if (ca.far_plane.has_value()) { + REQUIRE(ca.far_plane.value() == cb.far_plane.value()); + } + REQUIRE(ca.orthographic_width == cb.orthographic_width); + REQUIRE(ca.aspect_ratio == cb.aspect_ratio); + REQUIRE(ca.horizontal_fov == cb.horizontal_fov); + } + + // Skeletons + REQUIRE(a.skeletons.size() == b.skeletons.size()); + for (size_t i = 0; i < a.skeletons.size(); ++i) { + REQUIRE(a.skeletons[i].meshes.size() == b.skeletons[i].meshes.size()); + for (size_t j = 0; j < a.skeletons[i].meshes.size(); ++j) { + REQUIRE(a.skeletons[i].meshes[j] == b.skeletons[i].meshes[j]); + } + } + + // Animations + REQUIRE(a.animations.size() == b.animations.size()); + for (size_t i = 0; i < a.animations.size(); ++i) { + REQUIRE(a.animations[i].name == b.animations[i].name); + } +} + +} // namespace lagrange::testing diff --git a/modules/testing/include/lagrange/testing/check_simple_scenes_equal.h b/modules/testing/include/lagrange/testing/check_simple_scenes_equal.h new file mode 100644 index 00000000..ac8b882e --- /dev/null +++ b/modules/testing/include/lagrange/testing/check_simple_scenes_equal.h @@ -0,0 +1,60 @@ +/* + * Copyright 2026 Adobe. All rights reserved. + * This file is licensed to you under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under + * the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS + * OF ANY KIND, either express or implied. See the License for the specific language + * governing permissions and limitations under the License. + */ + +#pragma once + +#include +#include + +// clang-format off +#include +#include +#include +// clang-format on + +namespace lagrange::testing { + +/// +/// Check that two simple scenes are bitwise identical. +/// +/// Verifies mesh counts, mesh data (via check_meshes_equal), instance counts, +/// mesh indices, and transform matrices (exact match). +/// This is suitable for verifying lossless serialization round-trips. +/// +/// @param[in] a First simple scene. +/// @param[in] b Second simple scene. +/// +/// @tparam Scalar Mesh scalar type. +/// @tparam Index Mesh index type. +/// @tparam Dimension Spatial dimension. +/// +template +void check_simple_scenes_equal( + const scene::SimpleScene& a, + const scene::SimpleScene& b) +{ + REQUIRE(a.get_num_meshes() == b.get_num_meshes()); + + for (Index i = 0; i < a.get_num_meshes(); ++i) { + check_meshes_equal(a.get_mesh(i), b.get_mesh(i)); + REQUIRE(a.get_num_instances(i) == b.get_num_instances(i)); + + for (Index j = 0; j < a.get_num_instances(i); ++j) { + const auto& inst_a = a.get_instance(i, j); + const auto& inst_b = b.get_instance(i, j); + REQUIRE(inst_a.mesh_index == inst_b.mesh_index); + REQUIRE(inst_a.transform.matrix().isApprox(inst_b.transform.matrix(), Scalar(0))); + } + } +} + +} // namespace lagrange::testing diff --git a/modules/texproc/examples/extract_mesh_with_alpha_mask.cpp b/modules/texproc/examples/extract_mesh_with_alpha_mask.cpp index bb0b8d33..58544bf8 100644 --- a/modules/texproc/examples/extract_mesh_with_alpha_mask.cpp +++ b/modules/texproc/examples/extract_mesh_with_alpha_mask.cpp @@ -12,23 +12,29 @@ #include "../tests/image_helpers.h" -#include - #include #include #include +#include #include #include #include -#include +#include #include +#include #include +#include #include +#include #include #include +// Suggested meshes to try it with: +// - fish bowl: objaverse1/000-010/5a39e8c683f946b1aeb848dd2e88deb8.glb +// - helmet: objaverse1/000-138/f85b8c2149c24a4a8b86c7b7df7be254.glb + namespace fs = lagrange::fs; using Scene = lagrange::scene::Scene; using SurfaceMesh = Scene::MeshType; @@ -38,7 +44,7 @@ int main(int argc, char** argv) struct { fs::path input_path; - fs::path output_path; + fs::path output_path = "output.obj"; bool split_grids = false; size_t log_level = 2; } args; @@ -46,7 +52,7 @@ int main(int argc, char** argv) CLI::App app{argv[0]}; app.option_defaults()->always_capture_default(); app.add_option("input", args.input_path, "Input scene.")->required()->check(CLI::ExistingFile); - app.add_option("output", args.output_path, "Output scene."); + app.add_option("output", args.output_path, "Output mesh."); app.add_option("-l,--level", args.log_level, "Log level."); CLI11_PARSE(app, argc, argv) @@ -97,47 +103,45 @@ int main(int argc, char** argv) } logger.info("found {} compatible materials", material_to_payloads.size()); - lagrange::scene::MaterialExperimental material_; - const auto material_id_ = scene.add(material_); - size_t num_extracted = 0; - for (auto& node : scene.nodes) { - la_runtime_assert(!node.name.empty()); - for (auto& instance : node.meshes) { - if (instance.materials.size() != 1) { - throw std::runtime_error("multi-material instance not supported"); - continue; - } - la_runtime_assert(instance.materials.size() == 1); - const auto iter = material_to_payloads.find(instance.materials.front()); - if (iter == material_to_payloads.end()) continue; - const auto& payload = iter->second; - const auto image = - test::scene_image_to_image_array(scene.images.at(payload.image_id).image); - auto mesh = scene.meshes.at(instance.mesh); - la_runtime_assert(image.extent(2) == 4, "must have alpha channel"); - const auto texcoord_id = - mesh.get_attribute_id(fmt::format("texcoord_{}", payload.texcoord_id)); - if (texcoord_id == lagrange::invalid_attribute_id()) continue; - if (!mesh.is_attribute_indexed(texcoord_id)) continue; - lagrange::texproc::ExtractMeshWithAlphaMaskOptions extract_options; - extract_options.texcoord_id = texcoord_id; - extract_options.alpha_threshold = payload.alpha_threshold; - auto mesh_ = lagrange::texproc::extract_mesh_with_alpha_mask( - mesh, - image.to_mdspan(), - extract_options); - const auto mesh_id_ = scene.add(mesh_); - instance.mesh = mesh_id_; - instance.materials.clear(); - instance.materials.emplace_back(material_id_); - num_extracted += 1; + const auto [meshes, material_ids] = lagrange::scene::scene_to_meshes_and_materials(scene); + + std::vector extracted_meshes; + la_runtime_assert(meshes.size() == material_ids.size()); + for (const auto kk : lagrange::range(meshes.size())) { + const auto& mesh = meshes[kk]; + const auto& material_id = material_ids[kk]; + la_runtime_assert(material_id.size() == 1); + const auto iter = material_to_payloads.find(material_id.front()); + if (iter == material_to_payloads.end()) { + extracted_meshes.emplace_back(mesh); + continue; } + + const auto& payload = iter->second; + const auto image = + test::scene_image_to_image_array(scene.images.at(payload.image_id).image); + la_runtime_assert(image.extent(2) == 4, "must have alpha channel"); + const auto texcoord_id = + mesh.get_attribute_id(fmt::format("texcoord_{}", payload.texcoord_id)); + if (texcoord_id == lagrange::invalid_attribute_id()) continue; + if (!mesh.is_attribute_indexed(texcoord_id)) continue; + lagrange::texproc::ExtractMeshWithAlphaMaskOptions extract_options; + extract_options.texcoord_id = texcoord_id; + extract_options.alpha_threshold = payload.alpha_threshold; + auto extracted = lagrange::texproc::extract_mesh_with_alpha_mask( + mesh, + image.to_mdspan(), + extract_options); + extracted_meshes.emplace_back(extracted); } - logger.info("extracted {} meshes", num_extracted); + + logger.info("combining {} meshes", extracted_meshes.size()); + auto combined = + lagrange::combine_meshes(extracted_meshes); if (!args.output_path.empty()) { - logger.info("saving scene \"{}\"", args.output_path.string()); - lagrange::io::save_scene(args.output_path, scene); + logger.info("saving mesh \"{}\"", args.output_path.string()); + lagrange::io::save_mesh(args.output_path, combined); } return 0; diff --git a/modules/texproc/examples/io_helpers.h b/modules/texproc/examples/io_helpers.h index e3f8921b..41498d72 100644 --- a/modules/texproc/examples/io_helpers.h +++ b/modules/texproc/examples/io_helpers.h @@ -23,6 +23,7 @@ using Array3Df = lagrange::image::experimental::Array3D; using View3Df = lagrange::image::experimental::View3D; +using ConstView3Df = lagrange::image::experimental::View3D; template Array3Df convert_from(const lagrange::image_io::LoadImageResult& img) diff --git a/modules/texproc/examples/texture_rasterization.cpp b/modules/texproc/examples/texture_rasterization.cpp index 87fff816..83a19221 100644 --- a/modules/texproc/examples/texture_rasterization.cpp +++ b/modules/texproc/examples/texture_rasterization.cpp @@ -113,7 +113,7 @@ int main(int argc, char** argv) // Load rendered images to unproject lagrange::logger().info("Loading input {} renders", args.input_renders.size()); std::vector renders; - std::vector views; + std::vector views; for (const auto& render : args.input_renders) { renders.push_back(load_image(render)); views.push_back(renders.back().to_mdspan()); diff --git a/modules/texproc/include/lagrange/texproc/extract_mesh_with_alpha_mask.h b/modules/texproc/include/lagrange/texproc/extract_mesh_with_alpha_mask.h index 960015d7..8b2c48d3 100644 --- a/modules/texproc/include/lagrange/texproc/extract_mesh_with_alpha_mask.h +++ b/modules/texproc/include/lagrange/texproc/extract_mesh_with_alpha_mask.h @@ -26,7 +26,7 @@ struct ExtractMeshWithAlphaMaskOptions /// Must be a valid attribute of the input mesh. AttributeId texcoord_id = invalid_attribute_id(); - /// Opaque mask theshold. + /// Opaque mask threshold. float alpha_threshold = 0.5f; }; @@ -40,7 +40,7 @@ struct ExtractMeshWithAlphaMaskOptions /// @tparam Scalar Mesh scalar type. /// @tparam Index Mesh index type. /// -/// @return Tesselated mesh. +/// @return Tessellated triangle mesh, quad mesh or quad-dominant mesh. /// template auto extract_mesh_with_alpha_mask( diff --git a/modules/texproc/python/src/texproc.cpp b/modules/texproc/python/src/texproc.cpp index 3f51bdf0..2dded73c 100644 --- a/modules/texproc/python/src/texproc.cpp +++ b/modules/texproc/python/src/texproc.cpp @@ -287,7 +287,7 @@ void populate_texproc_module(nb::module_& m) const std::optional height, const float low_confidence_ratio, const std::optional base_confidence) { - std::vector views; + std::vector views; for (const auto& render : renders) { views.push_back(tensor_to_image_view(render)); } @@ -336,14 +336,10 @@ void populate_texproc_module(nb::module_& m) const std::optional texcoord_id, const float alpha_threshold) -> SurfaceMesh32d { const auto image = tensor_to_image_view(image_); - tp::ExtractMeshWithAlphaMaskOptions options; if (texcoord_id) options.texcoord_id = *texcoord_id; options.alpha_threshold = alpha_threshold; - - auto mesh_ = tp::extract_mesh_with_alpha_mask(mesh, image, options); - - return mesh_; + return tp::extract_mesh_with_alpha_mask(mesh, image, options); }, "mesh"_a, "image"_a, @@ -354,9 +350,9 @@ void populate_texproc_module(nb::module_& m) :param mesh: Input mesh. :param image: RGBA non-opaque texture. :param texcoord_id: Indexed UV attribute id. -:param alpha_threshold: Opaque mask theshold. +:param alpha_threshold: Opaque mask threshold. -:returns: Tesselated mesh.)"); +:returns: Tessellated triangle mesh, quad mesh or quad-dominant mesh.)"); } } // namespace lagrange::python diff --git a/modules/texproc/python/tests/assets.py b/modules/texproc/python/tests/conftest.py similarity index 100% rename from modules/texproc/python/tests/assets.py rename to modules/texproc/python/tests/conftest.py diff --git a/modules/texproc/python/tests/test_mesh_with_alpha_mask.py b/modules/texproc/python/tests/test_mesh_with_alpha_mask.py index f7447c0f..02babeb4 100644 --- a/modules/texproc/python/tests/test_mesh_with_alpha_mask.py +++ b/modules/texproc/python/tests/test_mesh_with_alpha_mask.py @@ -49,29 +49,19 @@ def load_alpha_data(scene_path: Path): return mesh, texcoord_id, image, material.alpha_cutoff, node_transform -all_alpha_datas = ( - [] - if lagrange.variant == "open" - else list( - map( - lambda pp: load_alpha_data(Path("data/corp/texproc") / pp), - [Path("alpha_cube_numbers.glb"), Path("alpha_cube_letters.glb")], - ) - ) -) +alpha_scene_files = ["alpha_cube_numbers.glb", "alpha_cube_letters.glb"] @pytest.mark.skipif( lagrange.variant == "open", reason="Test requires corp data", ) -@pytest.mark.parametrize( - "alpha_data", - all_alpha_datas, -) +@pytest.mark.parametrize("scene_file", alpha_scene_files) class TestMeshWithAlphaMask: - def test_extract_alpha_cube(self, alpha_data): - mesh, texcoord_id, image, alpha_threshold, transform = alpha_data + def test_extract_alpha_cube(self, scene_file): + mesh, texcoord_id, image, alpha_threshold, transform = load_alpha_data( + Path("data/corp/texproc") / scene_file + ) # extract tessellated mesh mesh_ = lagrange.texproc.extract_mesh_with_alpha_mask( @@ -82,7 +72,9 @@ def test_extract_alpha_cube(self, alpha_data): ) assert mesh_.num_facets > 0 - def test_transform(self, alpha_data): - mesh, texcoord_id, image, alpha_threshold, transform = alpha_data + def test_transform(self, scene_file): + mesh, texcoord_id, image, alpha_threshold, transform = load_alpha_data( + Path("data/corp/texproc") / scene_file + ) assert np.abs(transform - np.eye(4)).max() < 1e-7 diff --git a/modules/texproc/python/tests/test_texproc.py b/modules/texproc/python/tests/test_texproc.py index 65111420..4492fcab 100644 --- a/modules/texproc/python/tests/test_texproc.py +++ b/modules/texproc/python/tests/test_texproc.py @@ -13,8 +13,6 @@ import numpy as np -from .assets import quad_scene, quad_mesh, quad_tex, cube_with_uv # noqa: F401 - class TestTextureProcessing: def test_filtering(self, quad_mesh, quad_tex): diff --git a/modules/texproc/shared/shared_utils.h b/modules/texproc/shared/shared_utils.h index dd075d65..e2227045 100644 --- a/modules/texproc/shared/shared_utils.h +++ b/modules/texproc/shared/shared_utils.h @@ -11,155 +11,41 @@ */ #pragma once -#include -#include -#include -#include -#include -#include -#include -#include - -#include - -// NOTE: These shared utils are used in our cli examples and Python bindings. They depend on the +// NOTE: These shared utils are used in our CLI examples and Python bindings. They depend on the // lagrange::scene module. But we do not want to create a strong dependency between // lagrange::texproc and lagrange::scene, so this file is included directly via relative path in the // examples and Python bindings C++ files. To avoid confusion with internal src/ files, we place -// this file is a separate "shared/" folder. +// this file in a separate "shared/" folder. -namespace lagrange::texproc { - -using Array3Df = image::experimental::Array3D; -using View3Df = image::experimental::View3D; - -// FIXME this strips non-color channel, other variants of this function don't. -Array3Df convert_from(const scene::ImageBufferExperimental& image) -{ - size_t nc = std::min(image.num_channels, size_t(3)); - auto result = image::experimental::create_image(image.width, image.height, nc); - - auto copy_buffer = [&](auto scalar) { - using T = std::decay_t; - constexpr bool IsChar = std::is_integral_v && sizeof(T) == 1; - la_runtime_assert(sizeof(T) * 8 == image.get_bits_per_element()); - auto rawbuf = reinterpret_cast(image.data.data()); - for (size_t y = 0, i = 0; y < image.height; ++y) { - for (size_t x = 0; x < image.width; ++x) { - for (size_t c = 0; c < image.num_channels; ++c) { - if (c >= nc) { - ++i; - continue; - } - if constexpr (IsChar) { - result(x, y, c) = static_cast(rawbuf[i++]) / 255.f; - } else { - result(x, y, c) = rawbuf[i++]; - } - } - } - } - }; - - switch (image.element_type) { - case AttributeValueType::e_uint8_t: copy_buffer(uint8_t()); break; - case AttributeValueType::e_int8_t: copy_buffer(int8_t()); break; - case AttributeValueType::e_uint32_t: copy_buffer(uint32_t()); break; - case AttributeValueType::e_int32_t: copy_buffer(int32_t()); break; - case AttributeValueType::e_float: copy_buffer(float()); break; - case AttributeValueType::e_double: copy_buffer(double()); break; - default: throw std::runtime_error("Unsupported image scalar type"); - } - - return result; -} - -// Extract a single uv unwrapped mesh and optionally its base color tensor from a scene. -template -std::tuple, std::optional> single_mesh_from_scene( - const scene::Scene& scene) -{ - using ElementId = scene::ElementId; - - // Find mesh nodes in the scene - std::vector mesh_node_ids; - for (ElementId node_id = 0; node_id < scene.nodes.size(); ++node_id) { - const auto& node = scene.nodes[node_id]; - if (!node.meshes.empty()) { - mesh_node_ids.push_back(node_id); - } - } - - if (mesh_node_ids.size() != 1) { - throw std::runtime_error( - fmt::format( - "Input scene contains {} mesh nodes. Expected exactly 1 mesh node.", - mesh_node_ids.size())); - } - const auto& mesh_node = scene.nodes[mesh_node_ids.front()]; - - if (mesh_node.meshes.size() != 1) { - throw std::runtime_error( - fmt::format( - "Input scene has a mesh node with {} instance per node. Expected " - "exactly 1 instance per node", - mesh_node.meshes.size())); - } - const auto& mesh_instance = mesh_node.meshes.front(); - - [[maybe_unused]] const auto mesh_id = mesh_instance.mesh; - la_debug_assert(mesh_id < scene.meshes.size()); - SurfaceMesh mesh = scene.meshes[mesh_instance.mesh]; - { - // Apply node local->world transform - auto world_from_mesh = - scene::utils::compute_global_node_transform(scene, mesh_node_ids.front()) - .template cast(); - transform_mesh(mesh, world_from_mesh); - } +#include +#include +#include - // Find base texture if available - if (auto num_mats = mesh_instance.materials.size(); num_mats != 1) { - logger().warn( - "Mesh node has {} materials. Expected exactly 1 material. Ignoring materials.", - num_mats); - return {mesh, std::nullopt}; - } - const auto& material = scene.materials[mesh_instance.materials.front()]; - if (material.base_color_texture.texcoord != 0) { - logger().warn( - "Mesh node material texcoord is {} != 0. Expected 0. Ignoring texcoord.", - material.base_color_texture.texcoord); - } - const auto texture_id = material.base_color_texture.index; - la_debug_assert(texture_id < scene.textures.size()); - const auto& texture = scene.textures[texture_id]; +#include - const auto image_id = texture.image; - la_debug_assert(image_id < scene.images.size()); - const auto& image_ = scene.images[image_id].image; - Array3Df image = convert_from(image_); +namespace lagrange::texproc { - return {mesh, image}; -} +using scene::internal::Array3Df; +using scene::internal::ConstView3Df; +using scene::internal::View3Df; template std::vector cameras_from_scene(const scene::Scene& scene) { using ElementId = scene::ElementId; - // Find cameras in the scene std::vector cameras; for (ElementId node_id = 0; node_id < scene.nodes.size(); ++node_id) { - using namespace scene::utils; const auto& node = scene.nodes[node_id]; if (!node.cameras.empty()) { - auto world_from_node = compute_global_node_transform(scene, node_id); + auto world_from_node = scene::utils::compute_global_node_transform(scene, node_id); for (auto camera_id : node.cameras) { const auto& scene_camera = scene.cameras[camera_id]; CameraOptions camera; - camera.view_transform = camera_view_transform(scene_camera, world_from_node); - camera.projection_transform = camera_projection_transform(scene_camera); + camera.view_transform = + scene::utils::camera_view_transform(scene_camera, world_from_node); + camera.projection_transform = + scene::utils::camera_projection_transform(scene_camera); cameras.push_back(camera); } } @@ -172,14 +58,14 @@ template std::vector> rasterize_textures_from_renders( const lagrange::scene::Scene& scene, std::optional base_texture_in, - const std::vector& renders, + const std::vector& renders, const std::optional tex_width, const std::optional tex_height, const float low_confidence_ratio, const std::optional base_confidence) { // Load mesh, base texture and cameras from input scene - auto [mesh, base_texture] = single_mesh_from_scene(scene); + auto [mesh, base_texture] = scene::internal::single_mesh_from_scene(scene); auto cameras = cameras_from_scene(scene); lagrange::logger().info("Found {} cameras in the input scene", cameras.size()); diff --git a/modules/texproc/src/clip_triangle_by_bbox.cpp b/modules/texproc/src/clip_triangle_by_bbox.cpp new file mode 100644 index 00000000..1e7ae6d6 --- /dev/null +++ b/modules/texproc/src/clip_triangle_by_bbox.cpp @@ -0,0 +1,164 @@ +/* + * Copyright 2026 Adobe. All rights reserved. + * This file is licensed to you under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under + * the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS + * OF ANY KIND, either express or implied. See the License for the specific language + * governing permissions and limitations under the License. + */ + +#include "clip_triangle_by_bbox.h" + +#include +#include + +namespace lagrange::texproc::internal { + +namespace { + +enum class Sign { + Negative = -1, + Zero = 0, + Positive = 1, +}; + +template +struct AlignedHalfPlane +{ + using Scalar = ScalarT; + using RowVector2s = Eigen::Matrix; + static constexpr int axis = Axis; + static constexpr bool invert = Invert; + Scalar coord = 0; +}; + +template +inline Sign point_is_in_aligned_half_plane( + const typename HalfPlaneType::RowVector2s& p, + HalfPlaneType half_plane) +{ + if (p[half_plane.axis] == half_plane.coord) { + return Sign::Zero; + } else if (p[half_plane.axis] > half_plane.coord) { + return half_plane.invert ? Sign::Negative : Sign::Positive; + } else { + return half_plane.invert ? Sign::Positive : Sign::Negative; + } +} + +template +inline bool intersect_line_half_plane( + const typename HalfPlaneType::RowVector2s& p1, + const typename HalfPlaneType::RowVector2s& p2, + HalfPlaneType half_plane, + typename HalfPlaneType::RowVector2s& result) +{ + constexpr int axis = half_plane.axis; + + if (p1[axis] == p2[axis]) { + return false; + } + + using Scalar = typename HalfPlaneType::Scalar; + + // Sort endpoints by the clipped axis to ensure the interpolation produces + // bit-identical results regardless of which side of the half-plane is being clipped. + auto [lo, hi] = + std::minmax(p1, p2, [&](const auto& a, const auto& b) { return a[axis] < b[axis]; }); + const Scalar t = (half_plane.coord - lo[axis]) / (hi[axis] - lo[axis]); + result = (Scalar(1) - t) * lo + t * hi; + + // Snap the axis coordinate to the exact half-plane value. + result[axis] = half_plane.coord; + + return true; +} + +template +SmallPolygon2 clip_small_poly_by_aligned_half_plane( + const SmallPolygon2& poly, + HalfPlaneType half_plane) +{ + using Scalar = typename HalfPlaneType::Scalar; + using RowVector2s = typename HalfPlaneType::RowVector2s; + + SmallPolygon2 result(0, 2); + + auto push_back = [&](const RowVector2s& p) { + la_debug_assert(result.rows() != 7); + int idx = static_cast(result.rows()); + result.conservativeResize(idx + 1, Eigen::NoChange); + result.row(idx) = p; + }; + + if (poly.rows() == 0) { + return result; + } + + if (poly.rows() == 1) { + if (point_is_in_aligned_half_plane(poly.row(0), half_plane) != Sign::Zero) { + push_back(poly.row(0)); + } + return result; + } + + RowVector2s prev_p = poly.row(poly.rows() - 1); + Sign prev_status = point_is_in_aligned_half_plane(prev_p, half_plane); + + for (Eigen::Index i = 0; i < poly.rows(); ++i) { + const RowVector2s p = poly.row(i); + const Sign status = point_is_in_aligned_half_plane(p, half_plane); + if (status != prev_status && status != Sign::Zero && prev_status != Sign::Zero) { + RowVector2s intersect; + if (intersect_line_half_plane(prev_p, p, half_plane, intersect)) { + push_back(intersect); + } + } + + switch (status) { + case Sign::Negative: break; + case Sign::Zero: [[fallthrough]]; + case Sign::Positive: push_back(p); break; + default: break; + } + + prev_p = p; + prev_status = status; + } + + return result; +} + +} // anonymous namespace + +// ----------------------------------------------------------------------------- + +template +SmallPolygon2 clip_triangle_by_bbox( + const SmallPolygon2& triangle, + const Eigen::AlignedBox& bbox) +{ + const AlignedHalfPlane h0{bbox.min().x()}; + const AlignedHalfPlane h1{bbox.max().x()}; + const AlignedHalfPlane h2{bbox.min().y()}; + const AlignedHalfPlane h3{bbox.max().y()}; + + SmallPolygon2 result = triangle; + result = clip_small_poly_by_aligned_half_plane(result, h0); + result = clip_small_poly_by_aligned_half_plane(result, h1); + result = clip_small_poly_by_aligned_half_plane(result, h2); + result = clip_small_poly_by_aligned_half_plane(result, h3); + + return result; +} + +#define LA_X_clip_triangle_by_bbox(ValueType, Scalar) \ + template SmallPolygon2 clip_triangle_by_bbox( \ + const SmallPolygon2& triangle, \ + const Eigen::AlignedBox& bbox); +LA_ATTRIBUTE_SCALAR_X(clip_triangle_by_bbox, 0) + +} // namespace lagrange::texproc::internal diff --git a/modules/texproc/src/clip_triangle_by_bbox.h b/modules/texproc/src/clip_triangle_by_bbox.h new file mode 100644 index 00000000..77e76a00 --- /dev/null +++ b/modules/texproc/src/clip_triangle_by_bbox.h @@ -0,0 +1,44 @@ +/* + * Copyright 2026 Adobe. All rights reserved. + * This file is licensed to you under the Apache License, Version 2.0 (the "License"); + * you may not use this file except in compliance with the License. You may obtain a copy + * of the License at http://www.apache.org/licenses/LICENSE-2.0 + * + * Unless required by applicable law or agreed to in writing, software distributed under + * the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS + * OF ANY KIND, either express or implied. See the License for the specific language + * governing permissions and limitations under the License. + */ +#pragma once + +#include +#include + +namespace lagrange::texproc::internal { + +/// +/// Stack-allocated matrix for storing vertices of a 2D polygon when its maximum size is known +/// in advance. +/// +/// @tparam Scalar The scalar type of vertices. +/// @tparam Size The maximum number of vertices. +/// +template +using SmallPolygon2 = Eigen::Matrix; + +/// +/// Clip a triangle by an axis-aligned box. +/// +/// @param[in] triangle Triangle to clip. +/// @param[in] bbox Axis-aligned bbox to clip with. +/// +/// @tparam Scalar The scalar type of vertices. +/// +/// @return Clipped (convex) polygon. +/// +template +SmallPolygon2 clip_triangle_by_bbox( + const SmallPolygon2& triangle, + const Eigen::AlignedBox& bbox); + +} // namespace lagrange::texproc::internal diff --git a/modules/texproc/src/extract_mesh_with_alpha_mask.cpp b/modules/texproc/src/extract_mesh_with_alpha_mask.cpp index 832904aa..71281466 100644 --- a/modules/texproc/src/extract_mesh_with_alpha_mask.cpp +++ b/modules/texproc/src/extract_mesh_with_alpha_mask.cpp @@ -11,6 +11,8 @@ */ #include +#include "clip_triangle_by_bbox.h" + #include #include #include @@ -22,73 +24,100 @@ #include -#include +#include #include +#include +#include + namespace lagrange::texproc { +namespace { + +template +auto uv_to_coordinate(const Eigen::Vector& uv, const size_t width, const size_t height) + -> Eigen::Vector +{ + auto ii = static_cast(uv(0) * static_cast(width)); + auto jj = static_cast(uv(1) * static_cast(height)); + ii = std::min(ii, width - 1); + jj = std::min(jj, height - 1); + la_debug_assert(ii < width); + la_debug_assert(jj < height); + return {ii, jj}; +} + +template +auto uv_from_coordinate(const Eigen::Vector& ij, const size_t width, const size_t height) + -> Eigen::Vector +{ + la_debug_assert(ij(0) < width); + la_debug_assert(ij(1) < height); + const auto uu = (static_cast(ij(0)) + 0.5) / static_cast(width); + const auto vv = (static_cast(ij(1)) + 0.5) / static_cast(height); + return {uu, vv}; +} + +template +auto wedge(const Eigen::Vector& xx, const Eigen::Vector& yy) -> Scalar +{ + return xx(0) * yy(1) - xx(1) * yy(0); +} + +template +auto barycentric_weights( + const Eigen::Matrix& texcoords, + const Eigen::Vector& uv) -> Eigen::Vector +{ + const Eigen::Vector q0 = texcoords.row(0).transpose(); + const Eigen::Vector q1 = texcoords.row(1).transpose(); + const Eigen::Vector q2 = texcoords.row(2).transpose(); + [[maybe_unused]] const Scalar area = wedge(q1 - q0, q2 - q0); + la_debug_assert(std::fabs(area) > 1e-7); + la_debug_assert(uv(0) >= 0.0); + la_debug_assert(uv(1) >= 0.0); + la_debug_assert(uv(0) <= 1.0); + la_debug_assert(uv(1) <= 1.0); + Scalar w0 = wedge(q1 - uv, q2 - uv); + Scalar w1 = wedge(q2 - uv, q0 - uv); + Scalar w2 = wedge(q0 - uv, q1 - uv); + const Scalar area_sum = w0 + w1 + w2; + la_debug_assert(std::fabs(area_sum - area) < 1e-5); + w0 /= area_sum; + w1 /= area_sum; + w2 /= area_sum; + const auto ww = Eigen::Vector(w0, w1, w2); + la_debug_assert(std::fabs(ww.sum() - 1.0) < 1e-5); + return ww; +} + template void rasterize_triangle_data( - const Eigen::Matrix& texcoords, + const Eigen::Matrix& texcoords, const size_t width, const size_t height, Callback callback) { - const auto to_coordinate = [&](Eigen::Vector uv) -> Eigen::Vector { - auto ii = static_cast(uv(0) * static_cast(width)); - auto jj = static_cast(uv(1) * static_cast(height)); - ii = std::min(ii, width - 1); - jj = std::min(jj, height - 1); - la_debug_assert(ii < width); - la_debug_assert(jj < height); - return {ii, jj}; - }; - - const auto from_coordinate = - [&](const Eigen::Vector& ij) -> Eigen::Vector { - la_debug_assert(ij(0) < width); - la_debug_assert(ij(1) < height); - const auto uu = (static_cast(ij(0)) + 0.5) / static_cast(width); - const auto vv = (static_cast(ij(1)) + 0.5) / static_cast(height); - return {uu, vv}; - }; - - const auto wedge = [](const Eigen::Vector& xx, const Eigen::Vector& yy) { - return xx(0) * yy(1) - xx(1) * yy(0); - }; - - Eigen::AlignedBox bbox_uv; + Eigen::AlignedBox bbox_triangle; for (auto uv : texcoords.rowwise()) { - bbox_uv.extend(uv.transpose()); + bbox_triangle.extend(uv.transpose()); } - const auto coordinate_min = to_coordinate(bbox_uv.min()); - const auto coordinate_max = to_coordinate(bbox_uv.max()); + auto coordinate_min = uv_to_coordinate(bbox_triangle.min(), width, height); + auto coordinate_max = uv_to_coordinate(bbox_triangle.max(), width, height); - const Eigen::Vector q0 = texcoords.row(0).transpose(); - const Eigen::Vector q1 = texcoords.row(1).transpose(); - const Eigen::Vector q2 = texcoords.row(2).transpose(); - const Scalar area = wedge(q1 - q0, q2 - q0); - if (std::fabs(area) <= 1e-7) return; - - const auto barycentric_weights = - [&](const Eigen::Vector& ij) -> Eigen::Vector { - const auto uv = from_coordinate(ij); - la_debug_assert(uv(0) >= 0.0); - la_debug_assert(uv(1) >= 0.0); - la_debug_assert(uv(0) < 1.0); - la_debug_assert(uv(1) < 1.0); - Scalar w0 = wedge(q1 - uv, q2 - uv); - Scalar w1 = wedge(q2 - uv, q0 - uv); - Scalar w2 = wedge(q0 - uv, q1 - uv); - const Scalar area_ = w0 + w1 + w2; - la_debug_assert(std::fabs(area_ - area) < 1e-6); - w0 /= area_; - w1 /= area_; - w2 /= area_; - [[maybe_unused]] const auto ww = Eigen::Vector(w0, w1, w2); - la_debug_assert(std::fabs(ww.sum() - 1.0) < 1e-6); - return {w0, w1, w2}; - }; + // Extend bbox to cover uv triangle. + if (coordinate_min(0) > 0) coordinate_min(0) -= 1; + if (coordinate_min(1) > 0) coordinate_min(1) -= 1; + if (coordinate_max(0) < width - 1) coordinate_max(0) += 1; + if (coordinate_max(1) < height - 1) coordinate_max(1) += 1; + + { + const Eigen::Vector q0 = texcoords.row(0).transpose(); + const Eigen::Vector q1 = texcoords.row(1).transpose(); + const Eigen::Vector q2 = texcoords.row(2).transpose(); + const Scalar area = wedge(q1 - q0, q2 - q0); + if (std::fabs(area) <= 1e-7) return; + } for (const auto jj : lagrange::range(coordinate_min(1), coordinate_max(1))) { la_debug_assert(jj < height - 1); @@ -98,27 +127,188 @@ void rasterize_triangle_data( const auto ij_bottom_right = Eigen::Vector(ii + 1, jj); const auto ij_top_left = Eigen::Vector(ii, jj + 1); const auto ij_top_right = Eigen::Vector(ii + 1, jj + 1); - const auto ww_bottom_left = barycentric_weights(ij_bottom_left); - const auto ww_bottom_right = barycentric_weights(ij_bottom_right); - const auto ww_top_left = barycentric_weights(ij_top_left); - const auto ww_top_right = barycentric_weights(ij_top_right); + // Negative orientation because v is flipped. const auto ijs = std::array, 4>{ ij_bottom_left, - ij_bottom_right, - ij_top_right, ij_top_left, + ij_top_right, + ij_bottom_right, }; - const auto wws = std::array, 4>{ - ww_bottom_left, - ww_bottom_right, - ww_top_right, - ww_top_left, - }; - callback(ijs, wws); + callback(ijs); + } + } +} + +template +Scalar sample_alpha( + const Eigen::Vector& uv, + const image::experimental::View3D& image) +{ + const auto w = static_cast(image.extent(0)); + const auto h = static_cast(image.extent(1)); + + // Convert UV to continuous pixel coordinates. Texel centers are at integer indices, + // so uv_from_coordinate maps texel (i,j) to UV ((i+0.5)/w, (j+0.5)/h). + // The inverse is: px = u*w - 0.5, py = v*h - 0.5. + const Scalar px = uv(0) * w - Scalar(0.5); + const Scalar py = uv(1) * h - Scalar(0.5); + + const int ix = static_cast(std::floor(px)); + const int iy = static_cast(std::floor(py)); + const int iw = static_cast(image.extent(0)); + const int ih = static_cast(image.extent(1)); + + const auto x0 = static_cast(std::clamp(ix, 0, iw - 1)); + const auto x1 = static_cast(std::clamp(ix + 1, 0, iw - 1)); + const auto y0 = static_cast(std::clamp(iy, 0, ih - 1)); + const auto y1 = static_cast(std::clamp(iy + 1, 0, ih - 1)); + + const Scalar fx = std::clamp(px - static_cast(ix), Scalar(0), Scalar(1)); + const Scalar fy = std::clamp(py - static_cast(iy), Scalar(0), Scalar(1)); + + // Read 4 alpha values. Flip v because (0,0) is the bottom-left corner in UV space. + const auto yflip = [&](size_t y) { return image.extent(1) - 1 - y; }; + const Scalar a00 = static_cast(image(x0, yflip(y0), 3)); + const Scalar a10 = static_cast(image(x1, yflip(y0), 3)); + const Scalar a01 = static_cast(image(x0, yflip(y1), 3)); + const Scalar a11 = static_cast(image(x1, yflip(y1), 3)); + + // Bilinear interpolation. + return (Scalar(1) - fy) * ((Scalar(1) - fx) * a00 + fx * a10) + + fy * ((Scalar(1) - fx) * a01 + fx * a11); +} + +template +Eigen::Vector lift_uv_to_position( + const Eigen::Vector& uv, + const Eigen::Matrix& triangle_texcoords, + const Eigen::Matrix& triangle_vertices) +{ + const auto ww = barycentric_weights(triangle_texcoords, uv); + return triangle_vertices.transpose() * ww; +} + +template +bool predicate_impl( + EmitPolygon emit_polygon, + const image::experimental::View3D& image, + const Scalar& alpha_threshold, + const Eigen::Matrix& triangle_texcoords, + const Eigen::Matrix& triangle_vertices, + const std::array, 4>& ijs) +{ + la_debug_assert(ijs.size() == 4); + + // Clip uv triangle with current 2-cell. + // Ijs are axis aligned by construction, so are uvs. + Eigen::AlignedBox bbox_cell; + for (const auto& ij : ijs) { + la_debug_assert(ij(0) < image.extent(0)); + la_debug_assert(ij(1) < image.extent(1)); + const auto uv = uv_from_coordinate(ij, image.extent(0), image.extent(1)); + la_debug_assert(ij == uv_to_coordinate(uv, image.extent(0), image.extent(1))); + bbox_cell.extend(uv); + } + const internal::SmallPolygon2 triangle_poly = triangle_texcoords; + const auto uvs_cell = internal::clip_triangle_by_bbox(triangle_poly, bbox_cell); + la_debug_assert(uvs_cell.rows() <= 7); + + if (uvs_cell.rows() == 0) return true; + + // Sample alpha at each clipped polygon vertex. + const auto n = static_cast(uvs_cell.rows()); + StackVector alphas; + StackVector, 7> uvs; + for (Eigen::Index i = 0; i < uvs_cell.rows(); ++i) { + const Eigen::Vector uv = uvs_cell.row(i).transpose(); + uvs.emplace_back(uv); + alphas.emplace_back(sample_alpha(uv, image)); + } + + // Count opaque-transparent crossings to detect the ambiguous saddle case. + size_t num_crossings = 0; + bool all_opaque = true; + for (size_t i = 0; i < n; ++i) { + if (!(alphas[i] > alpha_threshold)) all_opaque = false; + const size_t j = (i + 1) % n; + if ((alphas[i] > alpha_threshold) != (alphas[j] > alpha_threshold)) { + num_crossings++; } } + + // Helper: interpolate position at the alpha threshold crossing between vertices i and j. + auto make_crossing = [&](size_t i, size_t j) { + const Scalar ai = alphas[i]; + const Scalar aj = alphas[j]; + const Scalar t = std::clamp((alpha_threshold - ai) / (aj - ai), Scalar(0), Scalar(1)); + const Eigen::Vector uv_crossing = (Scalar(1) - t) * uvs[i] + t * uvs[j]; + return lift_uv_to_position(uv_crossing, triangle_texcoords, triangle_vertices); + }; + + // Determine whether ambiguous saddle runs should be merged or kept separate. + // Sample alpha at the cell center using bilinear interpolation. + bool split_runs = false; + if (num_crossings >= 4) { + const Eigen::Vector center_uv = bbox_cell.center(); + const Scalar center_alpha = sample_alpha(center_uv, image); + split_runs = !(center_alpha > alpha_threshold); + } + + if (!split_runs) { + // Simple case (0 or 2 crossings), or connected saddle: emit a single polygon. + StackVector, 14> pps; + for (size_t i = 0; i < n; ++i) { + const size_t j = (i + 1) % n; + if (alphas[i] > alpha_threshold) { + pps.emplace_back( + lift_uv_to_position(uvs[i], triangle_texcoords, triangle_vertices)); + } + if ((alphas[i] > alpha_threshold) != (alphas[j] > alpha_threshold)) { + pps.emplace_back(make_crossing(i, j)); + } + } + if (!pps.empty()) emit_polygon(pps); + } else { + // Disconnected saddle: emit separate polygons for each opaque run. + // Start walking from a transparent vertex so runs are cleanly delimited. + size_t start = 0; + for (size_t i = 0; i < n; ++i) { + if (!(alphas[i] > alpha_threshold)) { + start = i; + break; + } + } + + StackVector, 14> pps; + for (size_t k = 0; k < n; ++k) { + const size_t i = (start + k) % n; + const size_t j = (i + 1) % n; + const bool i_opaque = alphas[i] > alpha_threshold; + const bool j_opaque = alphas[j] > alpha_threshold; + + if (i_opaque) { + pps.emplace_back( + lift_uv_to_position(uvs[i], triangle_texcoords, triangle_vertices)); + } + + if (i_opaque != j_opaque) { + pps.emplace_back(make_crossing(i, j)); + if (i_opaque && !j_opaque) { + // Leaving opaque region: finish this run. + emit_polygon(pps); + pps.clear(); + } + } + } + if (!pps.empty()) { + emit_polygon(pps); + } + } + + return all_opaque; } +} // namespace template auto extract_mesh_with_alpha_mask( @@ -140,10 +330,8 @@ auto extract_mesh_with_alpha_mask( la_runtime_assert(mesh.is_triangle_mesh()); la_runtime_assert(mesh.is_attribute_indexed(texcoord_id)); - const auto width = image.extent(0); - const auto height = image.extent(1); - la_runtime_assert(width > 0); - la_runtime_assert(height > 0); + la_runtime_assert(image.extent(0) > 0); + la_runtime_assert(image.extent(1) > 0); la_runtime_assert(image.extent(2) == 4, "expected rgba image"); logger().debug( @@ -153,15 +341,17 @@ auto extract_mesh_with_alpha_mask( mesh.get_num_facets(), mesh.get_attribute_name(texcoord_id)); logger().debug("texture {}x{}x{}", image.extent(0), image.extent(1), image.extent(2)); + logger().debug("alpha_threshold {}", options.alpha_threshold); const auto& texcoord_attr = mesh.template get_indexed_attribute(texcoord_id); const auto texcoord_indices = reshaped_view(texcoord_attr.indices(), 3); const auto texcoord_values = matrix_view(texcoord_attr.values()); const auto vertices = vertex_view(mesh); const auto facets = facet_view(mesh); + const auto get_triangle_data = [&](Index ff) { - Eigen::Matrix triangle_texcoords; - Eigen::Matrix triangle_vertices; + Eigen::Matrix triangle_texcoords; + Eigen::Matrix triangle_vertices; for (const Eigen::Index ii : lagrange::range(3)) { triangle_texcoords.row(ii) = texcoord_values.row(texcoord_indices(ff, ii)); triangle_vertices.row(ii) = vertices.row(facets(ff, ii)); @@ -171,90 +361,123 @@ auto extract_mesh_with_alpha_mask( return std::make_pair(triangle_texcoords, triangle_vertices); }; - tbb::concurrent_vector> triangles; - tbb::concurrent_vector> quads; + using TriMatrix = Eigen::Matrix; + using QuadMatrix = Eigen::Matrix; + + auto make_tri = [](const auto& p0, const auto& p1, const auto& p2) { + TriMatrix tri; + tri.row(0) = p0.transpose(); + tri.row(1) = p1.transpose(); + tri.row(2) = p2.transpose(); + return tri; + }; + + auto make_quad = [](const auto& p0, const auto& p1, const auto& p2, const auto& p3) { + QuadMatrix quad; + quad.row(0) = p0.transpose(); + quad.row(1) = p1.transpose(); + quad.row(2) = p2.transpose(); + quad.row(3) = p3.transpose(); + return quad; + }; + + struct Payload + { + std::vector triangles; + std::vector quads; + std::vector committed_triangles; + std::vector committed_quads; + }; + tbb::enumerable_thread_specific payloads; const auto loop = [&](const Index ff) { const auto triangle_data = get_triangle_data(ff); const auto& triangle_texcoords = std::get<0>(triangle_data); const auto& triangle_vertices = std::get<1>(triangle_data); + auto& payload = payloads.local(); + la_debug_assert(payload.triangles.empty()); + la_debug_assert(payload.quads.empty()); + bool all_opaque = true; rasterize_triangle_data( triangle_texcoords, - width, - height, - [&](const std::array, 4>& ijs, - const std::array, 4>& wws) { - la_debug_assert(ijs.size() == 4); - la_debug_assert(wws.size() == 4); - Eigen::Vector mean_wws = Eigen::Vector::Zero(); - for (const auto& ww : wws) mean_wws += ww; - mean_wws.array() /= static_cast(wws.size()); - const bool center_outside_triangle = mean_wws.minCoeff() < 0.0; - StackVector, 4> pps; - for (const auto kk : lagrange::range(4)) { - const auto& ww = wws[kk]; - if (center_outside_triangle && ww.minCoeff() < 0.0) continue; - const auto& ij = ijs[kk]; - la_debug_assert(ij(0) < width); - la_debug_assert(ij(1) < height); - // Flip v because (0, 0) is assumed to be the bottom-left corner. - const bool is_opaque = - image(ij(0), height - 1 - ij(1), 3) > options.alpha_threshold; - if (!is_opaque) continue; - const Eigen::Vector pp = triangle_vertices.transpose() * ww; - pps.emplace_back(pp); - } - switch (pps.size()) { - case 0: - case 1: - case 2: break; - case 3: { - Eigen::Matrix triangle; - triangle.row(0) = pps[0].transpose(); - triangle.row(1) = pps[1].transpose(); - triangle.row(2) = pps[2].transpose(); - triangles.emplace_back(triangle); - } break; - case 4: { - Eigen::Matrix quad; - quad.row(0) = pps[0].transpose(); - quad.row(1) = pps[1].transpose(); - quad.row(2) = pps[2].transpose(); - quad.row(3) = pps[3].transpose(); - quads.emplace_back(quad); - } break; - default: la_runtime_assert(false); break; - } + image.extent(0), + image.extent(1), + [&](const std::array, 4>& ijs) { + auto emit_polygon = [&](const auto& pps) { + // Fan-triangulate the polygon (vertices are in convex order). + if (pps.size() == 3) { + payload.triangles.emplace_back(make_tri(pps[0], pps[1], pps[2])); + } else if (pps.size() == 4) { + payload.quads.emplace_back(make_quad(pps[0], pps[1], pps[2], pps[3])); + } else if (pps.size() >= 5) { + for (size_t k = 1; k + 1 < pps.size(); ++k) { + payload.triangles.emplace_back(make_tri(pps[0], pps[k], pps[k + 1])); + } + } + }; + all_opaque &= predicate_impl( + emit_polygon, + image, + static_cast(options.alpha_threshold), + triangle_texcoords, + triangle_vertices, + ijs); }); + if (all_opaque) { + // Bypass tessellation if all texels are opaque. + payload.committed_triangles.emplace_back(triangle_vertices); + } else { + payload.committed_triangles.insert( + payload.committed_triangles.end(), + payload.triangles.begin(), + payload.triangles.end()); + payload.committed_quads.insert( + payload.committed_quads.end(), + payload.quads.begin(), + payload.quads.end()); + } + payload.triangles.clear(); + payload.quads.clear(); }; tbb::parallel_for(static_cast(0), mesh.get_num_facets(), loop); - logger().debug("num_triangles {}", triangles.size()); - logger().debug("num_quads {}", quads.size()); - - SurfaceMesh mesh_; - for (const auto& triangle : triangles) { - const auto ii = mesh_.get_num_vertices(); - mesh_.add_vertices(3, [&](const Index vv, span pp) { - la_debug_assert(vv < 3); - la_debug_assert(pp.size() == 3); - const auto pp_ = triangle.row(vv); - std::copy(pp_.data(), pp_.data() + pp.size(), pp.begin()); - }); - la_debug_assert(mesh_.get_num_vertices() == ii + 3); - mesh_.add_triangle(ii + 2, ii + 1, ii + 0); + + if (logger().should_log(spdlog::level::debug)) { + size_t num_triangles = 0; + size_t num_quads = 0; + for (const auto& payload : payloads) { + num_triangles += payload.committed_triangles.size(); + num_quads += payload.committed_quads.size(); + } + logger().debug("num_committed_triangles {}", num_triangles); + logger().debug("num_committed_quads {}", num_quads); } - for (const auto& quad : quads) { - const auto ii = mesh_.get_num_vertices(); - mesh_.add_vertices(4, [&](const Index vv, span pp) { - la_debug_assert(vv < 4); - la_debug_assert(pp.size() == 3); - const auto pp_ = quad.row(vv); - std::copy(pp_.data(), pp_.data() + pp.size(), pp.begin()); - }); - la_debug_assert(mesh_.get_num_vertices() == ii + 4); - mesh_.add_quad(ii + 3, ii + 2, ii + 1, ii + 0); + + SurfaceMesh result; + for (const auto& payload : payloads) { + for (const auto& triangle : payload.committed_triangles) { + const auto ii = result.get_num_vertices(); + result.add_vertices(3, [&](const Index vv, span pp) { + la_debug_assert(vv < 3); + la_debug_assert(pp.size() == 3); + const auto row = triangle.row(vv); + std::copy(row.data(), row.data() + pp.size(), pp.begin()); + }); + la_debug_assert(result.get_num_vertices() == ii + 3); + result.add_triangle(ii + 0, ii + 1, ii + 2); + } + for (const auto& quad : payload.committed_quads) { + const auto ii = result.get_num_vertices(); + result.add_vertices(4, [&](const Index vv, span pp) { + la_debug_assert(vv < 4); + la_debug_assert(pp.size() == 3); + const auto row = quad.row(vv); + std::copy(row.data(), row.data() + pp.size(), pp.begin()); + }); + la_debug_assert(result.get_num_vertices() == ii + 4); + result.add_quad(ii + 0, ii + 1, ii + 2, ii + 3); + } } - return mesh_; + return result; } #define LA_X_extract_mesh_with_alpha_mask(_, Scalar, Index) \ diff --git a/modules/texproc/tests/test_mesh_with_alpha_mask.cpp b/modules/texproc/tests/test_mesh_with_alpha_mask.cpp index 1e0f0a5b..a5182834 100644 --- a/modules/texproc/tests/test_mesh_with_alpha_mask.cpp +++ b/modules/texproc/tests/test_mesh_with_alpha_mask.cpp @@ -10,12 +10,15 @@ * governing permissions and limitations under the License. */ +#include "../shared/shared_utils.h" #include "image_helpers.h" #include #include #include +#include +#include #include #include #include @@ -61,7 +64,6 @@ void run_mesh_with_alpha_mask(const lagrange::fs::path& path) SECTION("extract mesh") { - // extract mesh from mesh and alpha mask lagrange::texproc::ExtractMeshWithAlphaMaskOptions extract_options; extract_options.texcoord_id = texcoord_id; extract_options.alpha_threshold = material.alpha_cutoff; @@ -73,6 +75,98 @@ void run_mesh_with_alpha_mask(const lagrange::fs::path& path) } } +TEST_CASE("ambiguous marching squares saddle case", "[texproc][alpha_mask]") +{ + using Scalar = float; + using Index = uint32_t; + + // Create a single triangle mesh covering most of [0,1]². + lagrange::SurfaceMesh mesh(3); + mesh.add_vertex({0.0f, 0.0f, 0.0f}); + mesh.add_vertex({1.0f, 0.0f, 0.0f}); + mesh.add_vertex({0.5f, 1.0f, 0.0f}); + mesh.add_triangle(0, 1, 2); + + // Add indexed UV attribute matching the vertex XY positions. + const Scalar uv_data[] = {0.0f, 0.0f, 1.0f, 0.0f, 0.5f, 1.0f}; + const Index uv_indices[] = {0, 1, 2}; + auto texcoord_id = mesh.create_attribute( + "texcoord_0", + lagrange::AttributeElement::Indexed, + 2, + lagrange::AttributeUsage::UV, + {uv_data, 6}, + {uv_indices, 3}); + + // Create 2x2 RGBA image with saddle pattern. + // sample_alpha(uv) reads image(ij(0), height-1-ij(1), 3). + // BL texel (0,0) -> image(0,1,3) opaque + // BR texel (1,0) -> image(1,1,3) transparent + // TL texel (0,1) -> image(0,0,3) transparent + // TR texel (1,1) -> image(1,0,3) opaque + auto image = lagrange::image::experimental::create_image(2, 2, 4); + for (size_t x = 0; x < 2; ++x) { + for (size_t y = 0; y < 2; ++y) { + for (size_t c = 0; c < 4; ++c) { + image(x, y, c) = 0.0f; + } + } + } + image(0, 1, 3) = 1.0f; // BL opaque + image(1, 0, 3) = 1.0f; // TR opaque + + // Save the saddle input as a glTF scene with embedded texture (enable for debugging) + if (0) { + lagrange::scene::internal::SingleMeshToSceneOptions scene_options; + scene_options.alpha_mode = lagrange::scene::MaterialExperimental::AlphaMode::Blend; + scene_options.alpha_cutoff = 0.5f; + auto scene = lagrange::scene::internal::single_mesh_to_scene( + lagrange::SurfaceMesh(mesh), + image.to_mdspan(), + scene_options); + lagrange::io::save_scene_gltf("saddle_input.glb", scene); + } + + SECTION("disconnected saddle produces separate polygons") + { + // center_alpha = (1+0+0+1)/4 = 0.5, threshold = 0.5 -> disconnected (two polygons) + lagrange::texproc::ExtractMeshWithAlphaMaskOptions options; + options.texcoord_id = texcoord_id; + options.alpha_threshold = 0.5f; + const auto result = + lagrange::texproc::extract_mesh_with_alpha_mask(mesh, image.to_mdspan(), options); + // lagrange::io::save_mesh("saddle_disconnected.obj", result); + + // Should produce at least 2 facets (one per opaque corner). + REQUIRE(result.get_num_facets() >= 2); + + // All output facets must have positive signed area (no degenerate/inverted triangles). + const auto verts = lagrange::vertex_view(result); + for (Index f = 0; f < result.get_num_facets(); ++f) { + const auto facet = result.get_facet_vertices(f); + if (facet.size() >= 3) { + Eigen::Vector3f v0 = verts.row(facet[0]).transpose(); + Eigen::Vector3f v1 = verts.row(facet[1]).transpose(); + Eigen::Vector3f v2 = verts.row(facet[2]).transpose(); + float area = (v1 - v0).cross(v2 - v0).z() * 0.5f; + REQUIRE(area > 1e-6f); + } + } + } + + SECTION("connected saddle produces merged polygon") + { + // center_alpha = 0.5 > 0.4 -> connected (single merged polygon) + lagrange::texproc::ExtractMeshWithAlphaMaskOptions options; + options.texcoord_id = texcoord_id; + options.alpha_threshold = 0.4f; + const auto result = + lagrange::texproc::extract_mesh_with_alpha_mask(mesh, image.to_mdspan(), options); + // lagrange::io::save_mesh("saddle_connected.obj", result); + REQUIRE(result.get_num_facets() >= 1); + } +} + TEST_CASE("extract cube with transparent numbers", "[texproc][alpha_mask]" LA_CORP_FLAG) { const auto path = lagrange::testing::get_data_dir() / "corp/texproc/alpha_cube_numbers.glb"; diff --git a/modules/texproc/tests/test_texture_processing.cpp b/modules/texproc/tests/test_texture_processing.cpp index 565056aa..1fee47bb 100644 --- a/modules/texproc/tests/test_texture_processing.cpp +++ b/modules/texproc/tests/test_texture_processing.cpp @@ -92,7 +92,7 @@ TEST_CASE("Grid bounds", "[texproc]" LA_SLOW_DEBUG_FLAG LA_CORP_FLAG) lagrange::testing::get_data_path("corp/texproc/segfault/scene_with_cameras.glb"), scene_options); - const auto& [mesh, _] = lagrange::texproc::single_mesh_from_scene(scene); + const auto& [mesh, _] = lagrange::scene::internal::single_mesh_from_scene(scene); const auto cameras = lagrange::texproc::cameras_from_scene(scene); REQUIRE(cameras.size() == 16); @@ -125,7 +125,7 @@ TEST_CASE("Pumpkin pipeline", "[texproc]" LA_SLOW_DEBUG_FLAG LA_CORP_FLAG) lagrange::testing::get_data_path("corp/texproc/prepared/pumpkin.glb"), scene_options); - const auto& [mesh, _] = lagrange::texproc::single_mesh_from_scene(scene); + const auto& [mesh, _] = lagrange::scene::internal::single_mesh_from_scene(scene); const auto cameras = lagrange::texproc::cameras_from_scene(scene); REQUIRE(cameras.size() == 16); @@ -166,7 +166,7 @@ TEST_CASE("Check benchmark", "[texproc][!benchmark]" LA_CORP_FLAG) lagrange::testing::get_data_path("corp/texproc/prepared/pumpkin.glb"), scene_options); - const auto mesh = std::get<0>(lagrange::texproc::single_mesh_from_scene(scene)); + const auto mesh = std::get<0>(lagrange::scene::internal::single_mesh_from_scene(scene)); const auto cameras = lagrange::texproc::cameras_from_scene(scene); REQUIRE(cameras.size() == 16); diff --git a/modules/ui/src/utils/colormap.cpp b/modules/ui/src/utils/colormap.cpp index 30db5b80..461cd612 100644 --- a/modules/ui/src/utils/colormap.cpp +++ b/modules/ui/src/utils/colormap.cpp @@ -12,6 +12,8 @@ #include #include +#include + namespace { #include #include diff --git a/modules/volume/include/lagrange/volume/mesh_to_volume.h b/modules/volume/include/lagrange/volume/mesh_to_volume.h index 2262eff7..d9b91ede 100644 --- a/modules/volume/include/lagrange/volume/mesh_to_volume.h +++ b/modules/volume/include/lagrange/volume/mesh_to_volume.h @@ -46,7 +46,7 @@ struct MeshToVolumeOptions /// /// Converts a triangle mesh to a OpenVDB sparse voxel grid. /// -/// @param[in] mesh Input mesh. Must be a triangle mesh, a quad-mesh, or a quad-dominant +/// @param[in] mesh Input mesh. Must be a triangle mesh, a quad mesh, or a quad-dominant /// mesh. /// @param[in] options Conversion options. /// diff --git a/modules/volume/python/CMakeLists.txt b/modules/volume/python/CMakeLists.txt index bda6e5a0..adc3331a 100644 --- a/modules/volume/python/CMakeLists.txt +++ b/modules/volume/python/CMakeLists.txt @@ -11,10 +11,17 @@ # lagrange_add_python_binding() -target_link_libraries(lagrange_python PRIVATE OpenVDB::nanovdb) +if(TARGET OpenVDB::nanovdb) + target_link_libraries(lagrange_python PRIVATE OpenVDB::nanovdb) + target_compile_definitions(lagrange_python PRIVATE NANOVDB_ENABLED) +endif() if(SKBUILD) - foreach(vdb_target IN ITEMS OpenVDB::openvdb OpenVDB::nanovdb) + set(_vdb_targets OpenVDB::openvdb) + if(TARGET OpenVDB::nanovdb) + list(APPEND _vdb_targets OpenVDB::nanovdb) + endif() + foreach(vdb_target IN LISTS _vdb_targets) get_target_property(_aliased ${vdb_target} ALIASED_TARGET) if(_aliased) diff --git a/modules/volume/python/src/volume.cpp b/modules/volume/python/src/volume.cpp index 97082efd..f7547a32 100644 --- a/modules/volume/python/src/volume.cpp +++ b/modules/volume/python/src/volume.cpp @@ -24,9 +24,11 @@ #include #include #include +#ifdef NANOVDB_ENABLED #include #include #include +#endif #include #include // clang-format on @@ -126,6 +128,7 @@ uint32_t to_vdb_compression(Compression compression) } } +#ifdef NANOVDB_ENABLED nanovdb::io::Codec to_nanovdb_compression(Compression compression) { switch (compression) { @@ -135,6 +138,7 @@ nanovdb::io::Codec to_nanovdb_compression(Compression compression) default: throw Error("Unknown compression type."); } } +#endif struct GridWrapper { @@ -180,9 +184,11 @@ openvdb::GridBase::Ptr grid_from_data(std::variant input_pa grids && grids->size() == 1, "Input VDB must contain exactly one grid."); return (*grids)[0]; +#ifdef NANOVDB_ENABLED } else if (input_path->extension() == ".nvdb") { auto handle = nanovdb::io::readGrid(input_path->string()); return nanovdb::tools::nanoToOpenVDB(handle); +#endif } else { throw Error("Unsupported input file extension: " + input_path->extension().string()); } @@ -202,6 +208,7 @@ openvdb::GridBase::Ptr grid_from_data(std::variant input_pa "Input VDB must contain exactly one grid."); return (*grids)[0]; } catch (const openvdb::IoError&) { +#ifdef NANOVDB_ENABLED // Not a VDB grid, try NanoVDB LA_IGNORE_DEPRECATION_WARNING_BEGIN // TODO: Switch to std::ispanstream() when we can use C++23 (probably when I @@ -217,6 +224,9 @@ openvdb::GridBase::Ptr grid_from_data(std::variant input_pa } catch (const std::runtime_error&) { throw Error("Invalid input grid: not a valid VDB or NanoVDB grid."); } +#else + throw Error("Invalid input grid: not a valid VDB grid."); +#endif } } else { throw Error("Invalid input grid: not a path or buffer."); @@ -234,9 +244,11 @@ void grid_to_file( file.setCompression(to_vdb_compression(compression)); file.write({grid}); file.close(); +#ifdef NANOVDB_ENABLED } else if (output_path.extension() == ".nvdb") { auto handle = nanovdb::tools::createNanoGrid(*grid); nanovdb::io::writeGrid(output_path.string(), handle, to_nanovdb_compression(compression)); +#endif } else { throw Error("Unsupported output file extension: " + output_path.string()); } @@ -265,9 +277,11 @@ std::string grid_to_buffer( openvdb::io::Stream oss(output_stream); oss.setCompression(to_vdb_compression(compression)); oss.write({grid}); +#ifdef NANOVDB_ENABLED } else if (ext == "nvdb") { auto handle = nanovdb::tools::createNanoGrid(*grid); nanovdb::io::writeGrid(output_stream, handle, to_nanovdb_compression(compression)); +#endif } else { throw Error("Unsupported grid extension: " + ext); } @@ -316,6 +330,7 @@ sample_trilinear_index_space(const GridWrapper& self, IndexArray& idx, VecType) void populate_volume_module(nb::module_& m) { + openvdb::initialize(); using Scalar = double; using Index = uint32_t; diff --git a/modules/volume/python/tests/assets.py b/modules/volume/python/tests/assets.py deleted file mode 100644 index f1666094..00000000 --- a/modules/volume/python/tests/assets.py +++ /dev/null @@ -1,47 +0,0 @@ -# -# Copyright 2025 Adobe. All rights reserved. -# This file is licensed to you under the Apache License, Version 2.0 (the "License"); -# you may not use this file except in compliance with the License. You may obtain a copy -# of the License at http://www.apache.org/licenses/LICENSE-2.0 -# -# Unless required by applicable law or agreed to in writing, software distributed under -# the License is distributed on an "AS IS" BASIS, WITHOUT WARRANTIES OR REPRESENTATIONS -# OF ANY KIND, either express or implied. See the License for the specific language -# governing permissions and limitations under the License. -# -import lagrange - -import numpy as np -import pytest - - -@pytest.fixture -def cube(): - vertices = np.array( - [ - [0, 0, 0], - [1, 0, 0], - [1, 1, 0], - [0, 1, 0], - [0, 0, 1], - [1, 0, 1], - [1, 1, 1], - [0, 1, 1], - ], - dtype=float, - ) - facets = np.array( - [ - [0, 3, 2, 1], - [4, 5, 6, 7], - [1, 2, 6, 5], - [4, 7, 3, 0], - [2, 3, 7, 6], - [0, 1, 5, 4], - ], - dtype=np.uint32, - ) - mesh = lagrange.SurfaceMesh() - mesh.vertices = vertices - mesh.facets = facets - return mesh diff --git a/modules/volume/python/tests/test_volume.py b/modules/volume/python/tests/test_volume.py index b56f562b..557d53f9 100644 --- a/modules/volume/python/tests/test_volume.py +++ b/modules/volume/python/tests/test_volume.py @@ -10,13 +10,11 @@ # governing permissions and limitations under the License. # import lagrange -from lagrange.lagrange.volume import Grid # why?? +from lagrange.volume import Grid import numpy as np import tempfile import pathlib -from .assets import cube # noqa: F401 - class TestMeshToVolume: def test_bbox(self, cube): diff --git a/modules/volume/src/mesh_to_volume.cpp b/modules/volume/src/mesh_to_volume.cpp index e49255dd..3d830d12 100644 --- a/modules/volume/src/mesh_to_volume.cpp +++ b/modules/volume/src/mesh_to_volume.cpp @@ -12,6 +12,7 @@ #include #include +#include #include #include #include @@ -96,14 +97,6 @@ auto mesh_to_volume(const SurfaceMesh& mesh_, const MeshToVolumeO auto mesh = SurfaceMesh::stripped_copy(mesh_); la_runtime_assert(mesh.get_dimension() == 3, "Input mesh must be 3D"); - if (mesh.is_hybrid()) { - for (Index f = 0; f < mesh.get_num_facets(); ++f) { - if (auto nv = mesh.get_facet_size(f); nv < 3 || nv > 4) { - throw Error( - fmt::format("Facet size should be 3 or 4, but f{} has #{} vertices", f, nv)); - } - } - } // Winding number requires triangle meshes. To ensure consistent discretization, we triangulate // before letting OpenVDB compute the unsigned distance field. @@ -111,6 +104,27 @@ auto mesh_to_volume(const SurfaceMesh& mesh_, const MeshToVolumeO if (!mesh.is_triangle_mesh()) { triangulate_polygonal_facets(mesh); } + } else { + if (mesh.is_hybrid() || mesh.get_vertex_per_facet() > 4) { + // Check that the maximum facet size is <= 4. If not, we need to triangulate. + for (Index f = 0; f < mesh.get_num_facets(); ++f) { + Index nv = mesh.get_facet_size(f); + if (nv > 4) { + logger().debug("Triangulating mesh because of facets with > 4 vertices"); + triangulate_polygonal_facets(mesh); + break; + } + } + } + } + + if (mesh.is_hybrid()) { + for (Index f = 0; f < mesh.get_num_facets(); ++f) { + if (auto nv = mesh.get_facet_size(f); nv < 3 || nv > 4) { + throw Error( + fmt::format("Facet size should be 3 or 4, but f{} has #{} vertices", f, nv)); + } + } } openvdb::initialize(); @@ -118,10 +132,7 @@ auto mesh_to_volume(const SurfaceMesh& mesh_, const MeshToVolumeO auto voxel_size = options.voxel_size; if (voxel_size < 0) { // Compute bbox - Eigen::AlignedBox bbox; - for (auto p : vertex_view(mesh).rowwise()) { - bbox.extend(p.transpose()); - } + auto bbox = mesh_bbox<3>(mesh); const Scalar diag = bbox.diagonal().norm(); voxel_size = std::abs(voxel_size); diff --git a/modules/volume/tests/test_voxelization.cpp b/modules/volume/tests/test_voxelization.cpp index a4194444..0b169de5 100644 --- a/modules/volume/tests/test_voxelization.cpp +++ b/modules/volume/tests/test_voxelization.cpp @@ -10,12 +10,15 @@ * governing permissions and limitations under the License. */ #include +#include #include #include #include #include #include +#include + #ifdef LAGRANGE_ENABLE_LEGACY_FUNCTIONS TEST_CASE("voxelization: reproducibility (legacy)", "[volume]") { @@ -66,3 +69,38 @@ TEST_CASE("voxelization: winding number", "[volume]") REQUIRE(mesh3.get_num_vertices() > mesh2.get_num_vertices()); REQUIRE(mesh3.get_num_facets() > mesh2.get_num_facets()); } + +TEST_CASE("mesh_to_volume: polygonal mesh", "[volume]") +{ + using Scalar = float; + using Index = uint32_t; + + lagrange::volume::MeshToVolumeOptions m2v_opt; + m2v_opt.signing_method = lagrange::volume::MeshToVolumeOptions::Sign::FloodFill; + m2v_opt.voxel_size = 0.1; + + SECTION("hybrid mesh") + { + auto mesh = lagrange::testing::load_surface_mesh( + "open/core/poly/mixedFaringPart.obj"); + REQUIRE(mesh.is_hybrid()); + auto grid = lagrange::volume::mesh_to_volume(mesh, m2v_opt); + REQUIRE(grid->activeVoxelCount() > 0); + } + + SECTION("poly mesh") + { + lagrange::SurfaceMesh mesh; + mesh.add_vertices(7); + auto vertices = vertex_ref(mesh); + for (Index i = 0; i < 7; ++i) { + vertices.row(i) << std::cos(2 * lagrange::internal::pi * i / 7), + std::sin(2 * lagrange::internal::pi * i / 7), 0; + } + mesh.add_polygon({0, 1, 2, 3, 4, 5, 6}); + REQUIRE(mesh.is_regular()); + REQUIRE(mesh.get_vertex_per_facet() == 7); + auto grid = lagrange::volume::mesh_to_volume(mesh, m2v_opt); + REQUIRE(grid->activeVoxelCount() > 0); + } +} diff --git a/modules/winding/examples/sample_points_in_mesh.cpp b/modules/winding/examples/sample_points_in_mesh.cpp index da84b732..9402219f 100644 --- a/modules/winding/examples/sample_points_in_mesh.cpp +++ b/modules/winding/examples/sample_points_in_mesh.cpp @@ -11,6 +11,7 @@ */ #include #include +#include #include #include @@ -56,10 +57,7 @@ int main(int argc, char** argv) auto mesh = lagrange::io::load_mesh(args.input); // Compute bbox - Eigen::AlignedBox bbox; - for (auto p : vertex_view(mesh).rowwise()) { - bbox.extend(p.transpose()); - } + auto bbox = lagrange::mesh_bbox<3>(mesh); std::uniform_real_distribution px(bbox.min().x(), bbox.max().x()); std::uniform_real_distribution py(bbox.min().y(), bbox.max().y()); std::uniform_real_distribution pz(bbox.min().z(), bbox.max().z()); diff --git a/modules/winding/tests/test_fast_winding_number.cpp b/modules/winding/tests/test_fast_winding_number.cpp index 2fbdacfb..adb0f8cb 100644 --- a/modules/winding/tests/test_fast_winding_number.cpp +++ b/modules/winding/tests/test_fast_winding_number.cpp @@ -10,6 +10,7 @@ * governing permissions and limitations under the License. */ #include +#include #include #include #include @@ -88,10 +89,7 @@ TEST_CASE("fast winding number", "[winding][!benchmark]") auto mesh = lagrange::testing::load_surface_mesh("open/core/dragon.obj"); - Eigen::AlignedBox bbox; - for (auto p : vertex_view(mesh).rowwise()) { - bbox.extend(p.transpose()); - } + auto bbox = lagrange::mesh_bbox<3>(mesh); std::uniform_real_distribution px(bbox.min().x(), bbox.max().x()); std::uniform_real_distribution py(bbox.min().y(), bbox.max().y()); std::uniform_real_distribution pz(bbox.min().z(), bbox.max().z()); diff --git a/pyproject.toml b/pyproject.toml index 7f0270bd..0f8d44f9 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -102,10 +102,11 @@ LAGRANGE_MODULE_PRIMITIVE = true LAGRANGE_MODULE_PYTHON = true LAGRANGE_MODULE_RAYCASTING = true LAGRANGE_MODULE_SCENE = true +LAGRANGE_MODULE_SERIALIZATION2 = true LAGRANGE_MODULE_SOLVER = true LAGRANGE_MODULE_SUBDIVISION = true -LAGRANGE_MODULE_VOLUME = true LAGRANGE_MODULE_TEXPROC = true +LAGRANGE_MODULE_VOLUME = true LAGRANGE_UNIT_TESTS = false LAGRANGE_WITH_ASSIMP = true TBB_PREFER_STATIC = false @@ -118,7 +119,6 @@ line_length = 100 [tool.ruff] line-length = 100 -lint.extend-per-file-ignores."modules/**/python/tests/test_*.py" = [ "F811" ] [tool.pyproject-fmt] indent = 4 From b0afe812145ec785026a09e45c6d5c352821157a Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Je=CC=81re=CC=81mie=20Dumas?= Date: Mon, 30 Mar 2026 11:43:46 -0700 Subject: [PATCH 2/5] Increase swap space on Linux. --- .github/workflows/continuous.yaml | 10 +++++++++- 1 file changed, 9 insertions(+), 1 deletion(-) diff --git a/.github/workflows/continuous.yaml b/.github/workflows/continuous.yaml index 131a7b7a..58f8eff3 100644 --- a/.github/workflows/continuous.yaml +++ b/.github/workflows/continuous.yaml @@ -58,10 +58,18 @@ jobs: docker-images: true swap-storage: true - - name: Show disk space + - name: Set swap space + if: runner.os == 'Linux' && matrix.compiler == 'gcc' && matrix.config == 'RelWithDebInfo' + uses: pierotofy/set-swap-space@fc79b3f + with: + swap-size-gb: 16 + + - name: Show disk and memory space run: | echo "disk usage:" df -h + echo "memory:" + free -h - name: Select build dir (Linux) if: runner.os == 'Linux' From ca5d116dc8468635a808fe69aa3c98c0b64ba3e1 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Je=CC=81re=CC=81mie=20Dumas?= Date: Mon, 30 Mar 2026 11:54:29 -0700 Subject: [PATCH 3/5] Use full sha1. --- .github/workflows/continuous.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/continuous.yaml b/.github/workflows/continuous.yaml index 58f8eff3..42a1fbff 100644 --- a/.github/workflows/continuous.yaml +++ b/.github/workflows/continuous.yaml @@ -60,7 +60,7 @@ jobs: - name: Set swap space if: runner.os == 'Linux' && matrix.compiler == 'gcc' && matrix.config == 'RelWithDebInfo' - uses: pierotofy/set-swap-space@fc79b3f + uses: pierotofy/set-swap-space@fc79b3f67fa8a838184ce84a674ca12238d2c761 with: swap-size-gb: 16 From a97537bdc0158c5fa5e5471e0ebf5dfdd0f9b95d Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Je=CC=81re=CC=81mie=20Dumas?= Date: Mon, 30 Mar 2026 12:12:02 -0700 Subject: [PATCH 4/5] Also use mold. --- .github/workflows/continuous.yaml | 3 +++ 1 file changed, 3 insertions(+) diff --git a/.github/workflows/continuous.yaml b/.github/workflows/continuous.yaml index 42a1fbff..dfffc6e3 100644 --- a/.github/workflows/continuous.yaml +++ b/.github/workflows/continuous.yaml @@ -104,6 +104,9 @@ jobs: - name: Install Ninja uses: seanmiddleditch/gha-setup-ninja@master + - name: Install mold + uses: rui314/setup-mold@v1 + - name: Dependencies (Linux) if: runner.os == 'Linux' run: | From 55d6b7f83661cce8737661889e73fd51f3784e67 Mon Sep 17 00:00:00 2001 From: =?UTF-8?q?Je=CC=81re=CC=81mie=20Dumas?= Date: Mon, 30 Mar 2026 12:58:15 -0700 Subject: [PATCH 5/5] Fix macos ci. --- .github/workflows/continuous.yaml | 2 +- 1 file changed, 1 insertion(+), 1 deletion(-) diff --git a/.github/workflows/continuous.yaml b/.github/workflows/continuous.yaml index dfffc6e3..dad633ee 100644 --- a/.github/workflows/continuous.yaml +++ b/.github/workflows/continuous.yaml @@ -69,7 +69,7 @@ jobs: echo "disk usage:" df -h echo "memory:" - free -h + if command -v free &>/dev/null; then free -h; else sysctl hw.memsize; fi - name: Select build dir (Linux) if: runner.os == 'Linux'