commit 413a3e5cee577a09efe984774bd581551cc9d2ab Author: Tzu-Mao Li Date: Thu Sep 3 22:30:30 2020 -0400 initial commit diff --git a/.gitignore b/.gitignore new file mode 100644 index 0000000..bb2ce51 --- /dev/null +++ b/.gitignore @@ -0,0 +1,10 @@ +build +apps/results +apps/files +apps/__pycache__ +compile_commands.json +.vimrc +diffvg.egg-info +dist +__pycache__ +.DS_Store diff --git a/.gitmodules b/.gitmodules new file mode 100644 index 0000000..30869ff --- /dev/null +++ b/.gitmodules @@ -0,0 +1,6 @@ +[submodule "pybind11"] + path = pybind11 + url = https://github.com/pybind/pybind11 +[submodule "thrust"] + path = thrust + url = https://github.com/thrust/thrust.git diff --git a/CMakeLists.txt b/CMakeLists.txt new file mode 100644 index 0000000..233e4be --- /dev/null +++ b/CMakeLists.txt @@ -0,0 +1,140 @@ +cmake_minimum_required(VERSION 3.12) + +project(diffvg VERSION 0.0.1 DESCRIPTION "Differentiable Vector Graphics") + +set(CMAKE_MODULE_PATH ${CMAKE_MODULE_PATH} "${CMAKE_SOURCE_DIR}/cmake/") +set(CMAKE_EXPORT_COMPILE_COMMANDS ON) + +if(WIN32) + find_package(Python 3.6 COMPONENTS Development REQUIRED) +else() + find_package(Python 3.7 COMPONENTS Development REQUIRED) +endif() +add_subdirectory(pybind11) + +option(DIFFVG_CUDA "Build diffvg with GPU code path?" ON) + +if(DIFFVG_CUDA) + message(STATUS "Build with CUDA support") + find_package(CUDA 10 REQUIRED) + set(CMAKE_CUDA_STANDARD 11) + if(NOT WIN32) + # Hack: for some reason the line above doesn't work on some Linux systems. + set(CUDA_NVCC_FLAGS "${CUDA_NVCC_FLAGS} -std=c++11") + #set(CUDA_NVCC_FLAGS_DEBUG "-g -G") + endif() +else() + message(STATUS "Build without CUDA support") + find_package(Thrust REQUIRED) +endif() + +# include_directories(${CMAKE_SOURCE_DIR}/pybind11/include) +include_directories(${PYTHON_INCLUDE_PATH}) +find_package(PythonLibs REQUIRED) +include_directories(${PYTHON_INCLUDE_PATH}) +include_directories(${PYTHON_INCLUDE_DIRS}) +include_directories(pybind11/include) +if(DIFFVG_CUDA) + link_directories(${CUDA_LIBRARIES}) +else() + include_directories(${THRUST_INCLUDE_DIR}) +endif() + +if(NOT MSVC) + # These compile definitions are not meaningful for MSVC + add_compile_options(-Wall -g -O3 -fvisibility=hidden -Wno-unknown-pragmas) +else() + add_compile_options(/Wall /Zi) + add_link_options(/DEBUG) +endif() + +if(NOT DIFFVG_CUDA) + add_compile_options("-DTHRUST_DEVICE_SYSTEM=THRUST_DEVICE_SYSTEM_CPP") +endif() + +set(SRCS atomic.h + color.h + cdf.h + cuda_utils.h + diffvg.h + edge_query.h + filter.h + matrix.h + parallel.h + pcg.h + ptr.h + sample_boundary.h + scene.h + shape.h + solve.h + vector.h + within_distance.h + winding_number.h + atomic.cpp + color.cpp + diffvg.cpp + parallel.cpp + scene.cpp + shape.cpp) + +if(DIFFVG_CUDA) + add_compile_definitions(COMPILE_WITH_CUDA) + set_source_files_properties( + diffvg.cpp + scene.cpp + PROPERTIES CUDA_SOURCE_PROPERTY_FORMAT OBJ) + + cuda_add_library(diffvg MODULE ${SRCS}) +else() + add_library(diffvg MODULE ${SRCS}) +endif() + +if(APPLE) + # The "-undefined dynamic_lookup" is a hack for systems with + # multiple Python installed. If we link a particular Python version + # here, and we import it with a different Python version later. + # likely a segmentation fault. + # The solution for Linux Mac OS machines, as mentioned in + # https://github.com/pybind/pybind11/blob/master/tools/pybind11Tools.cmake + # is to not link against Python library at all and resolve the symbols + # at compile time. + set(DYNAMIC_LOOKUP "-undefined dynamic_lookup") +endif() + +target_link_libraries(diffvg ${DYNAMIC_LOOKUP}) + +if(WIN32) + # See: https://pybind11.readthedocs.io/en/master/compiling.html#advanced-interface-library-target + target_link_libraries(diffvg pybind11::module) + set_target_properties(diffvg PROPERTIES PREFIX "${PYTHON_MODULE_PREFIX}" + SUFFIX "${PYTHON_MODULE_EXTENSION}") +endif() + +set_target_properties(diffvg PROPERTIES SKIP_BUILD_RPATH FALSE) +set_target_properties(diffvg PROPERTIES BUILD_WITH_INSTALL_RPATH TRUE) +if(UNIX AND NOT APPLE) + set_target_properties(diffvg PROPERTIES INSTALL_RPATH "$ORIGIN") +elseif(APPLE) + set_target_properties(diffvg PROPERTIES INSTALL_RPATH "@loader_path") +endif() + +set_property(TARGET diffvg PROPERTY CXX_STANDARD 11) +set_target_properties(diffvg PROPERTIES PREFIX "") +# Still enable assertion in release mode +string( REPLACE "/DNDEBUG" "" CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE}") +string( REPLACE "-DNDEBUG" "" CMAKE_CXX_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE}") +string( REPLACE "/DNDEBUG" "" CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO}") +string( REPLACE "-DNDEBUG" "" CMAKE_CXX_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO}") +string( REPLACE "/DNDEBUG" "" CMAKE_C_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE}") +string( REPLACE "-DNDEBUG" "" CMAKE_C_FLAGS_RELEASE "${CMAKE_CXX_FLAGS_RELEASE}") +string( REPLACE "/DNDEBUG" "" CMAKE_C_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO}") +string( REPLACE "-DNDEBUG" "" CMAKE_C_FLAGS_RELWITHDEBINFO "${CMAKE_CXX_FLAGS_RELWITHDEBINFO}") + +if(NOT WIN32) + find_package(TensorFlow) + if(TensorFlow_FOUND) + add_subdirectory(pydiffvg_tensorflow/custom_ops) + else() + message(INFO " Building without TensorFlow support (not found)") + endif() +endif() diff --git a/README.md b/README.md new file mode 100644 index 0000000..1cbe3e9 --- /dev/null +++ b/README.md @@ -0,0 +1,115 @@ +# diffvg +Differentiable Rasterizer for Vector Graphics +https://people.csail.mit.edu/tzumao/diffvg + +diffvg is a differentiable rasterizer for 2D vector graphics. See the webpage for more info. + +![teaser](https://user-images.githubusercontent.com/951021/92184822-2a0bc500-ee20-11ea-81a6-f26af2d120f4.jpg) + +![circle](https://user-images.githubusercontent.com/951021/63556018-0b2ddf80-c4f8-11e9-849c-b4ecfcb9a865.gif) +![ellipse](https://user-images.githubusercontent.com/951021/63556021-0ec16680-c4f8-11e9-8fc6-8b34de45b8be.gif) +![rect](https://user-images.githubusercontent.com/951021/63556028-12ed8400-c4f8-11e9-8072-81702c9193e1.gif) +![polygon](https://user-images.githubusercontent.com/951021/63980999-1e99f700-ca72-11e9-9786-1cba14d2d862.gif) +![curve](https://user-images.githubusercontent.com/951021/64042667-3d9e9480-cb17-11e9-88d8-2f7b9da8b8ab.gif) +![path](https://user-images.githubusercontent.com/951021/64070625-7a52b480-cc19-11e9-9380-eac02f56f693.gif) +![gradient](https://user-images.githubusercontent.com/951021/64898668-da475300-d63c-11e9-917a-825b94be0710.gif) +![circle_outline](https://user-images.githubusercontent.com/951021/65125594-84f7a280-d9aa-11e9-8bc4-669fd2eff2f4.gif) +![ellipse_transform](https://user-images.githubusercontent.com/951021/67149013-06b54700-f25b-11e9-91eb-a61171c6d4a4.gif) + +# Install +``` +git submodule update --init --recursive +conda install pytorch torchvision -c pytorch +conda install numpy +conda install scikit-image +pip install svgwrite +pip install svgpathtools +pip install cssutils +pip install numba +pip install torch-tools +pip install visdom +python setup.py install +``` + +# Building in debug mode + +``` +python setup.py build --debug install +``` + +# Run +``` +cd apps +``` + +Optimizing a single circle to a target. +``` +python single_circle.py +``` + +Finite difference comparison. +``` +finite_difference_comp.py [-h] [--size_scale SIZE_SCALE] + [--clamping_factor CLAMPING_FACTOR] + [--use_prefiltering USE_PREFILTERING] + svg_file +``` +e.g., +``` +python finite_difference_comp.py imgs/tiger.svg +``` + +Interactive editor +``` +python svg_brush.py +``` + +Painterly rendering +``` +painterly_rendering.py [-h] [--num_paths NUM_PATHS] + [--max_width MAX_WIDTH] [--use_lpips_loss] + [--num_iter NUM_ITER] [--use_blob] + target +``` +e.g., +``` +python painterly_rendering.py imgs/fallingwater.jpg --num_paths 2048 --max_width 4.0 --use_lpips_loss +``` + +Image vectorization +``` +python refine_svg.py [-h] [--use_lpips_loss] [--num_iter NUM_ITER] svg target +``` +e.g., +``` +python refine_svg.py imgs/flower.svg imgs/flower.jpg +``` + +Seam carving +``` +python seam_carving.py [-h] [--svg SVG] [--optim_steps OPTIM_STEPS] +``` +e.g., +``` +python seam_carving.py imgs/hokusai.svg +``` + +Vector variational autoencoder & vector GAN: + +For the GAN models, see `apps/generative_models/train_gan.py`. Generate samples from a pretrained using `apps/generative_models/eval_gan.py`. + +For the VAE models, see `apps/generative_models/mnist_vae.py`. + +If you use diffvg in your academic work, please cite + +``` +@article{Li:2020:DVG, + title = {Differentiable Vector Graphics Rasterization for Editing and Learning}, + author = {Li, Tzu-Mao and Luk\'{a}\v{c}, Michal and Gharbi Micha\"{e}l and Jonathan Ragan-Kelley}, + journal = {ACM Trans. Graph. (Proc. SIGGRAPH Asia)}, + volume = {39}, + number = {6}, + pages = {193:1--193:15}, + year = {2020} +} +``` diff --git a/aabb.h b/aabb.h new file mode 100644 index 0000000..c35968e --- /dev/null +++ b/aabb.h @@ -0,0 +1,67 @@ +#pragma once + +#include "diffvg.h" +#include "cuda_utils.h" +#include "vector.h" +#include "matrix.h" + +struct AABB { + DEVICE + inline AABB(const Vector2f &p_min = Vector2f{infinity(), infinity()}, + const Vector2f &p_max = Vector2f{-infinity(), -infinity()}) + : p_min(p_min), p_max(p_max) {} + Vector2f p_min, p_max; +}; + +DEVICE +inline +AABB merge(const AABB &box, const Vector2f &p) { + return AABB{Vector2f{min(p.x, box.p_min.x), min(p.y, box.p_min.y)}, + Vector2f{max(p.x, box.p_max.x), max(p.y, box.p_max.y)}}; +} + +DEVICE +inline +AABB merge(const AABB &box0, const AABB &box1) { + return AABB{Vector2f{min(box0.p_min.x, box1.p_min.x), min(box0.p_min.y, box1.p_min.y)}, + Vector2f{max(box0.p_max.x, box1.p_max.x), max(box0.p_max.y, box1.p_max.y)}}; +} + +DEVICE +inline +bool inside(const AABB &box, const Vector2f &p) { + return p.x >= box.p_min.x && p.x <= box.p_max.x && + p.y >= box.p_min.y && p.y <= box.p_max.y; +} + +DEVICE +inline +bool inside(const AABB &box, const Vector2f &p, float radius) { + return p.x >= box.p_min.x - radius && p.x <= box.p_max.x + radius && + p.y >= box.p_min.y - radius && p.y <= box.p_max.y + radius; +} + +DEVICE +inline +AABB enlarge(const AABB &box, float width) { + return AABB{Vector2f{box.p_min.x - width, box.p_min.y - width}, + Vector2f{box.p_max.x + width, box.p_max.y + width}}; +} + +DEVICE +inline +AABB transform(const Matrix3x3f &xform, const AABB &box) { + auto ret = AABB(); + ret = merge(ret, xform_pt(xform, Vector2f{box.p_min.x, box.p_min.y})); + ret = merge(ret, xform_pt(xform, Vector2f{box.p_min.x, box.p_max.y})); + ret = merge(ret, xform_pt(xform, Vector2f{box.p_max.x, box.p_min.y})); + ret = merge(ret, xform_pt(xform, Vector2f{box.p_max.x, box.p_max.y})); + return ret; +} + +DEVICE +inline +bool within_distance(const AABB &box, const Vector2f &pt, float r) { + return pt.x >= box.p_min.x - r && pt.x <= box.p_max.x + r && + pt.y >= box.p_min.y - r && pt.y <= box.p_max.y + r; +} diff --git a/apps/.gitignore b/apps/.gitignore new file mode 100644 index 0000000..fe18dd3 --- /dev/null +++ b/apps/.gitignore @@ -0,0 +1,3 @@ +mnist +data/sketchrnn_cat.npz +data diff --git a/apps/Makefile b/apps/Makefile new file mode 100644 index 0000000..ecc25e4 --- /dev/null +++ b/apps/Makefile @@ -0,0 +1,10 @@ +SEAM_IMAGES=seaside2 sunset2 hokusai cat ice_cream +SEAM_OUT=results/seam_carving +SEAM_RESULTS=$(addsuffix /out.mp4,$(addprefix $(SEAM_OUT)/,$(SEAM_IMAGES))) + +all: $(SEAM_RESULTS) + echo $(SEAM_RESULTS) + +$(SEAM_OUT)/%/out.mp4: imgs/seamcarving/%.svg + python seam_carving.py --svg $^ + diff --git a/apps/curve_subdivision.py b/apps/curve_subdivision.py new file mode 100644 index 0000000..7f03df1 --- /dev/null +++ b/apps/curve_subdivision.py @@ -0,0 +1,85 @@ +import svgpathtools +import numpy as np +import math + +def split_cubic(c, t): + c0, c1 = svgpathtools.split_bezier(c, t) + return svgpathtools.CubicBezier(c0[0], c0[1], c0[2], c0[3]), svgpathtools.CubicBezier(c1[0], c1[1], c1[2], c1[3]) + +def cubic_to_quadratic(curve): + # Best L2 approximation + m = (-curve.start + 3 * curve.control1 + 3 * curve.control2 - curve.end) / 4.0 + return svgpathtools.QuadraticBezier(curve.start, m, curve.end) + +def convert_and_write_svg(cubic, filename): + cubic_path = svgpathtools.Path(cubic) + cubic_ctrl = svgpathtools.Path(svgpathtools.Line(cubic.start, cubic.control1), + svgpathtools.Line(cubic.control1, cubic.control2), + svgpathtools.Line(cubic.control2, cubic.end)) + cubic_color = (50, 50, 200) + cubic_ctrl_color = (150, 150, 150) + + r = 4.0 + + paths = [cubic_path, cubic_ctrl] + colors = [cubic_color, cubic_ctrl_color] + dots = [cubic_path[0].start, cubic_path[0].control1, cubic_path[0].control2, cubic_path[0].end] + ncols = ['green', 'green', 'green', 'green'] + nradii = [r, r, r, r] + stroke_widths = [3.0, 1.5] + + def add_quadratic(q): + paths.append(q) + q_ctrl = svgpathtools.Path(svgpathtools.Line(q.start, q.control), + svgpathtools.Line(q.control, q.end)) + paths.append(q_ctrl) + colors.append((200, 50, 50)) # q_color + colors.append((150, 150, 150)) # q_ctrl_color + dots.append(q.start) + dots.append(q.control) + dots.append(q.end) + ncols.append('purple') + ncols.append('purple') + ncols.append('purple') + nradii.append(r) + nradii.append(r) + nradii.append(r) + stroke_widths.append(3.0) + stroke_widths.append(1.5) + + prec = 1.0 + queue = [cubic] + num_quadratics = 0 + while len(queue) > 0: + c = queue[-1] + queue = queue[:-1] + + # Criteria for conversion + # http://caffeineowl.com/graphics/2d/vectorial/cubic2quad01.html + p = c.end - 3 * c.control2 + 3 * c.control1 - c.start + d = math.sqrt(p.real * p.real + p.imag * p.imag) * math.sqrt(3.0) / 36 + t = math.pow(1.0 / d, 1.0 / 3.0) + + if t < 1.0: + c0, c1 = split_cubic(c, 0.5) + queue.append(c0) + queue.append(c1) + else: + quadratic = cubic_to_quadratic(c) + print(quadratic) + add_quadratic(quadratic) + num_quadratics += 1 + print('num_quadratics:', num_quadratics) + + svgpathtools.wsvg(paths, + colors = colors, + stroke_widths = stroke_widths, + nodes = dots, + node_colors = ncols, + node_radii = nradii, + filename = filename) + +convert_and_write_svg(svgpathtools.CubicBezier(100+200j, 426+50j, 50+50j, 300+200j), + 'results/curve_subdivision/subdiv_curve0.svg') +convert_and_write_svg(svgpathtools.CubicBezier(100+200j, 427+50j, 50+50j, 300+200j), + 'results/curve_subdivision/subdiv_curve1.svg') diff --git a/apps/finite_difference_comp.py b/apps/finite_difference_comp.py new file mode 100644 index 0000000..331f6d4 --- /dev/null +++ b/apps/finite_difference_comp.py @@ -0,0 +1,197 @@ +# python finite_difference_comp.py imgs/tiger.svg +# python finite_difference_comp.py --use_prefiltering True imgs/tiger.svg +# python finite_difference_comp.py imgs/boston.svg +# python finite_difference_comp.py --use_prefiltering True imgs/boston.svg +# python finite_difference_comp.py imgs/contour.svg +# python finite_difference_comp.py --use_prefiltering True imgs/contour.svg +# python finite_difference_comp.py --size_scale 0.5 --clamping_factor 0.05 imgs/hawaii.svg +# python finite_difference_comp.py --size_scale 0.5 --clamping_factor 0.05 --use_prefiltering True imgs/hawaii.svg +# python finite_difference_comp.py imgs/mcseem2.svg +# python finite_difference_comp.py --use_prefiltering True imgs/mcseem2.svg +# python finite_difference_comp.py imgs/reschart.svg +# python finite_difference_comp.py --use_prefiltering True imgs/reschart.svg + +import pydiffvg +import diffvg +from matplotlib import cm +import matplotlib.pyplot as plt +import argparse +import torch + +pydiffvg.set_print_timing(True) +#pydiffvg.set_use_gpu(False) + +def normalize(x, min_, max_): + range = max(abs(min_), abs(max_)) + return (x + range) / (2 * range) + +def main(args): + canvas_width, canvas_height, shapes, shape_groups = \ + pydiffvg.svg_to_scene(args.svg_file) + + w = int(canvas_width * args.size_scale) + h = int(canvas_height * args.size_scale) + + print(w, h) + curve_counts = 0 + for s in shapes: + if isinstance(s, pydiffvg.Circle): + curve_counts += 1 + elif isinstance(s, pydiffvg.Ellipse): + curve_counts += 1 + elif isinstance(s, pydiffvg.Path): + curve_counts += len(s.num_control_points) + elif isinstance(s, pydiffvg.Polygon): + curve_counts += len(s.points) - 1 + if s.is_closed: + curve_counts += 1 + elif isinstance(s, pydiffvg.Rect): + curve_counts += 1 + print('curve_counts:', curve_counts) + + pfilter = pydiffvg.PixelFilter(type = diffvg.FilterType.box, + radius = torch.tensor(0.5)) + + use_prefiltering = args.use_prefiltering + print('use_prefiltering:', use_prefiltering) + + scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups, + filter = pfilter, + use_prefiltering = use_prefiltering) + + num_samples_x = args.num_spp + num_samples_y = args.num_spp + if (use_prefiltering): + num_samples_x = 1 + num_samples_y = 1 + + render = pydiffvg.RenderFunction.apply + img = render(w, # width + h, # height + num_samples_x, # num_samples_x + num_samples_y, # num_samples_y + 0, # seed + None, # background_image + *scene_args) + pydiffvg.imwrite(img.cpu(), 'results/finite_difference_comp/img.png', gamma=1.0) + + epsilon = 0.1 + def perturb_scene(axis, epsilon): + for s in shapes: + if isinstance(s, pydiffvg.Circle): + s.center[axis] += epsilon + elif isinstance(s, pydiffvg.Ellipse): + s.center[axis] += epsilon + elif isinstance(s, pydiffvg.Path): + s.points[:, axis] += epsilon + elif isinstance(s, pydiffvg.Polygon): + s.points[:, axis] += epsilon + elif isinstance(s, pydiffvg.Rect): + s.p_min[axis] += epsilon + s.p_max[axis] += epsilon + for s in shape_groups: + if isinstance(s.fill_color, pydiffvg.LinearGradient): + s.fill_color.begin[axis] += epsilon + s.fill_color.end[axis] += epsilon + + perturb_scene(0, epsilon) + scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups, + filter = pfilter, + use_prefiltering = use_prefiltering) + render = pydiffvg.RenderFunction.apply + img0 = render(w, # width + h, # height + num_samples_x, # num_samples_x + num_samples_y, # num_samples_y + 0, # seed + None, # background_image + *scene_args) + + perturb_scene(0, -2 * epsilon) + scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups, + filter = pfilter, + use_prefiltering = use_prefiltering) + img1 = render(w, # width + h, # height + num_samples_x, # num_samples_x + num_samples_y, # num_samples_y + 0, # seed + None, # background_image + *scene_args) + x_diff = (img0 - img1) / (2 * epsilon) + x_diff = x_diff.sum(axis = 2) + x_diff_max = x_diff.max() * args.clamping_factor + x_diff_min = x_diff.min() * args.clamping_factor + print(x_diff.max()) + print(x_diff.min()) + x_diff = cm.viridis(normalize(x_diff, x_diff_min, x_diff_max).cpu().numpy()) + pydiffvg.imwrite(x_diff, 'results/finite_difference_comp/finite_x_diff.png', gamma=1.0) + + perturb_scene(0, epsilon) + + perturb_scene(1, epsilon) + scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups, + filter = pfilter, + use_prefiltering = use_prefiltering) + render = pydiffvg.RenderFunction.apply + img0 = render(w, # width + h, # height + num_samples_x, # num_samples_x + num_samples_y, # num_samples_y + 0, # seed + None, # background_image + *scene_args) + + perturb_scene(1, -2 * epsilon) + scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups, + filter = pfilter, + use_prefiltering = use_prefiltering) + img1 = render(w, # width + h, # height + num_samples_x, # num_samples_x + num_samples_y, # num_samples_y + 0, # seed + None, # background_image + *scene_args) + y_diff = (img0 - img1) / (2 * epsilon) + y_diff = y_diff.sum(axis = 2) + y_diff_max = y_diff.max() * args.clamping_factor + y_diff_min = y_diff.min() * args.clamping_factor + y_diff = cm.viridis(normalize(y_diff, y_diff_min, y_diff_max).cpu().numpy()) + pydiffvg.imwrite(y_diff, 'results/finite_difference_comp/finite_y_diff.png', gamma=1.0) + perturb_scene(1, epsilon) + + scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups, + filter = pfilter, + use_prefiltering = use_prefiltering) + render_grad = pydiffvg.RenderFunction.render_grad + img_grad = render_grad(torch.ones(h, w, 4, device = pydiffvg.get_device()), + w, # width + h, # height + num_samples_x, # num_samples_x + num_samples_y, # num_samples_y + 0, # seed + None, # background_image + *scene_args) + print(img_grad[:, :, 0].max()) + print(img_grad[:, :, 0].min()) + x_diff = cm.viridis(normalize(img_grad[:, :, 0], x_diff_min, x_diff_max).cpu().numpy()) + y_diff = cm.viridis(normalize(img_grad[:, :, 1], y_diff_min, y_diff_max).cpu().numpy()) + pydiffvg.imwrite(x_diff, 'results/finite_difference_comp/ours_x_diff.png', gamma=1.0) + pydiffvg.imwrite(y_diff, 'results/finite_difference_comp/ours_y_diff.png', gamma=1.0) + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("svg_file", help="source SVG path") + parser.add_argument("--size_scale", type=float, default=1.0) + parser.add_argument("--clamping_factor", type=float, default=0.1) + parser.add_argument("--num_spp", type=int, default=4) + parser.add_argument("--use_prefiltering", type=bool, default=False) + args = parser.parse_args() + main(args) diff --git a/apps/gaussian_blur.py b/apps/gaussian_blur.py new file mode 100644 index 0000000..8d14802 --- /dev/null +++ b/apps/gaussian_blur.py @@ -0,0 +1,93 @@ +""" +""" +import os +import pydiffvg +import torch as th +import scipy.ndimage.filters as F + + +def render(canvas_width, canvas_height, shapes, shape_groups): + _render = pydiffvg.RenderFunction.apply + scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) + img = _render(canvas_width, # width + canvas_height, # height + 2, # num_samples_x + 2, # num_samples_y + 0, # seed + None, + *scene_args) + return img + + +def main(): + pydiffvg.set_device(th.device('cuda:1')) + + # Load SVG + svg = os.path.join("imgs", "peppers.svg") + canvas_width, canvas_height, shapes, shape_groups = \ + pydiffvg.svg_to_scene(svg) + + # Save initial state + ref = render(canvas_width, canvas_height, shapes, shape_groups) + pydiffvg.imwrite(ref.cpu(), 'results/gaussian_blur/init.png', gamma=2.2) + + target = F.gaussian_filter(ref.cpu().numpy(), [10, 10, 0]) + target = th.from_numpy(target).to(ref.device) + pydiffvg.imwrite(target.cpu(), 'results/gaussian_blur/target.png', gamma=2.2) + + # Collect variables to optimize + points_vars = [] + width_vars = [] + for path in shapes: + path.points.requires_grad = True + points_vars.append(path.points) + path.stroke_width.requires_grad = True + width_vars.append(path.stroke_width) + color_vars = [] + for group in shape_groups: + # do not optimize alpha + group.fill_color[..., :3].requires_grad = True + color_vars.append(group.fill_color) + + # Optimize + points_optim = th.optim.Adam(points_vars, lr=1.0) + width_optim = th.optim.Adam(width_vars, lr=1.0) + color_optim = th.optim.Adam(color_vars, lr=0.01) + + for t in range(20): + print('\niteration:', t) + points_optim.zero_grad() + width_optim.zero_grad() + color_optim.zero_grad() + # Forward pass: render the image. + img = render(canvas_width, canvas_height, shapes, shape_groups) + # Save the intermediate render. + pydiffvg.imwrite(img.cpu(), 'results/gaussian_blur/iter_{}.png'.format(t), gamma=2.2) + loss = (img - target)[..., :3].pow(2).mean() + + print('alpha:', img[..., 3].mean().item()) + print('render loss:', loss.item()) + + # Backpropagate the gradients. + loss.backward() + + # Take a gradient descent step. + points_optim.step() + width_optim.step() + color_optim.step() + for group in shape_groups: + group.fill_color.data.clamp_(0.0, 1.0) + + # Final render + img = render(canvas_width, canvas_height, shapes, shape_groups) + pydiffvg.imwrite(img.cpu(), 'results/gaussian_blur/final.png', gamma=2.2) + + # Convert the intermediate renderings to a video. + from subprocess import call + call(["ffmpeg", "-framerate", "24", "-i", + "results/gaussian_blur/iter_%d.png", "-vb", "20M", + "results/gaussian_blur/out.mp4"]) + +if __name__ == "__main__": + main() diff --git a/apps/generative_models/.gitignore b/apps/generative_models/.gitignore new file mode 100644 index 0000000..78fb1fc --- /dev/null +++ b/apps/generative_models/.gitignore @@ -0,0 +1 @@ +.gdb_history diff --git a/apps/generative_models/README.md b/apps/generative_models/README.md new file mode 100644 index 0000000..6b987ee --- /dev/null +++ b/apps/generative_models/README.md @@ -0,0 +1,5 @@ +# Usage + +For the GAN models, see `train_gan.py`. Generate samples from a pretrained using `eval_gan.py` + +For the VAE models, see `mnist_vae.py`. diff --git a/apps/generative_models/__init__.py b/apps/generative_models/__init__.py new file mode 100644 index 0000000..e69de29 diff --git a/apps/generative_models/data.py b/apps/generative_models/data.py new file mode 100644 index 0000000..bdbac65 --- /dev/null +++ b/apps/generative_models/data.py @@ -0,0 +1,229 @@ +import os +import time +import torch as th +import numpy as np +import torchvision.datasets as dset +import torchvision.transforms as transforms +import imageio + +import ttools +import rendering + +BASE_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), os.pardir) +DATA = os.path.join(BASE_DIR, "data") + +LOG = ttools.get_logger(__name__) + + +class QuickDrawImageDataset(th.utils.data.Dataset): + BASE_DATA_URL = \ + "https://console.cloud.google.com/storage/browser/_details/quickdraw_dataset/full/numpy_bitmap/cat.npy" + """ + Args: + spatial_limit(int): maximum spatial extent in pixels. + """ + def __init__(self, imsize, train=True): + super(QuickDrawImageDataset, self).__init__() + file = os.path.join(DATA, "cat.npy") + + self.imsize = imsize + + if not os.path.exists(file): + msg = "Dataset file %s does not exist, please download" + " it from %s" % (file, QuickDrawImageDataset.BASE_DATA_URL) + LOG.error(msg) + raise RuntimeError(msg) + + self.data = np.load(file, allow_pickle=True, encoding="latin1") + + def __len__(self): + return self.data.shape[0] + + def __getitem__(self, idx): + im = np.reshape(self.data[idx], (1, 1, 28, 28)) + im = th.from_numpy(im).float() / 255.0 + im = th.nn.functional.interpolate(im, size=(self.imsize, self.imsize)) + + # Bring it to [-1, 1] + im = th.clamp(im, 0, 1) + im -= 0.5 + im /= 0.5 + + return im.squeeze(0) + + +class QuickDrawDataset(th.utils.data.Dataset): + BASE_DATA_URL = \ + "https://storage.cloud.google.com/quickdraw_dataset/sketchrnn" + + """ + Args: + spatial_limit(int): maximum spatial extent in pixels. + """ + def __init__(self, dataset, mode="train", + max_seq_length=250, + spatial_limit=1000): + super(QuickDrawDataset, self).__init__() + file = os.path.join(DATA, "sketchrnn_"+dataset) + remote = os.path.join(QuickDrawDataset.BASE_DATA_URL, dataset) + + self.max_seq_length = max_seq_length + self.spatial_limit = spatial_limit + + if mode not in ["train", "test", "valid"]: + return ValueError("Only allowed data mode are 'train' and 'test'," + " 'valid'.") + + if not os.path.exists(file): + msg = "Dataset file %s does not exist, please download" + " it from %s" % (file, remote) + LOG.error(msg) + raise RuntimeError(msg) + + data = np.load(file, allow_pickle=True, encoding="latin1")[mode] + data = self.purify(data) + data = self.normalize(data) + + # Length of longest sequence in the dataset + self.nmax = max([len(seq) for seq in data]) + self.sketches = data + + def __repr__(self): + return "Dataset with %d sequences of max length %d" % \ + (len(self.sketches), self.nmax) + + def __len__(self): + return len(self.sketches) + + def __getitem__(self, idx): + """Return the idx-th stroke in 5-D format, padded to length (Nmax+2). + + The first and last element of the sequence are fixed to "start-" and + "end-of-sequence" token. + + dx, dy, + 3 numbers for one-hot encoding of state: + 1 0 0: pen touching paper till next point + 0 1 0: pen lifted from paper after current point + 0 0 1: drawing has ended, next points (including current will not be + drawn) + """ + sample_data = self.sketches[idx] + + # Allow two extra slots for start/end of sequence tokens + sample = np.zeros((self.nmax+2, 5), dtype=np.float32) + + n = sample_data.shape[0] + + # normalize dx, dy + deltas = sample_data[:, :2] + # Absolute coordinates + positions = deltas[..., :2].cumsum(0) + maxi = np.abs(positions).max() + 1e-8 + deltas = deltas / (1.1 * maxi) # leave some margin on edges + + # fill in dx, dy coordinates + sample[1:n+1, :2] = deltas + + # on paper indicator: 0 means touching paper in the 3d format, flip it + sample[1:n+1, 2] = 1 - sample_data[:, 2] + + # off-paper indicator, complement of previous flag + sample[1:n+1, 3] = 1 - sample[1:n+1, 2] + + # fill with end of sequence tokens for the remainder + sample[n+1:, 4] = 1 + + # Start of sequence token + sample[0] = [0, 0, 1, 0, 0] + + return sample + + def purify(self, strokes): + """removes to small or too long sequences + removes large gaps""" + data = [] + for seq in strokes: + if seq.shape[0] <= self.max_seq_length: + # and seq.shape[0] > 10: + + # Limit large spatial gaps + seq = np.minimum(seq, self.spatial_limit) + seq = np.maximum(seq, -self.spatial_limit) + seq = np.array(seq, dtype=np.float32) + data.append(seq) + return data + + def calculate_normalizing_scale_factor(self, strokes): + """Calculate the normalizing factor explained in appendix of + sketch-rnn.""" + data = [] + for i, stroke_i in enumerate(strokes): + for j, pt in enumerate(strokes[i]): + data.append(pt[0]) + data.append(pt[1]) + data = np.array(data) + return np.std(data) + + def normalize(self, strokes): + """Normalize entire dataset (delta_x, delta_y) by the scaling + factor.""" + data = [] + scale_factor = self.calculate_normalizing_scale_factor(strokes) + for seq in strokes: + seq[:, 0:2] /= scale_factor + data.append(seq) + return data + + +class FixedLengthQuickDrawDataset(QuickDrawDataset): + """A variant of the QuickDraw dataset where the strokes are represented as + a fixed-length sequence of triplets (dx, dy, opacity), where opacity = 0, 1. + """ + def __init__(self, *args, canvas_size=64, **kwargs): + super(FixedLengthQuickDrawDataset, self).__init__(*args, **kwargs) + self.canvas_size = canvas_size + + def __getitem__(self, idx): + sample = super(FixedLengthQuickDrawDataset, self).__getitem__(idx) + + # We construct a stroke opacity variable from the pen down state, dx, dy remain unchanged + strokes = sample[:, :3] + + im = np.zeros((1, 1)) + + # render image + # start = time.time() + im = rendering.opacityStroke2diffvg( + th.from_numpy(strokes).unsqueeze(0), canvas_size=self.canvas_size, + relative=True, debug=False) + im = im.squeeze(0).numpy() + # elapsed = (time.time() - start)*1000 + # print("item %d pipeline gt rendering took %.2fms" % (idx, elapsed)) + + return strokes, im + + +class MNISTDataset(th.utils.data.Dataset): + def __init__(self, imsize, train=True): + super(MNISTDataset, self).__init__() + self.mnist = dset.MNIST(root=os.path.join(DATA, "mnist"), + train=train, + download=True, + transform=transforms.Compose([ + transforms.Resize((imsize, imsize)), + transforms.ToTensor(), + ])) + + def __len__(self): + return len(self.mnist) + + def __getitem__(self, idx): + im, label = self.mnist[idx] + + # make sure data uses [0, 1] range + im -= im.min() + im /= im.max() + 1e-8 + + # Bring it to [-1, 1] + im -= 0.5 + im /= 0.5 + return im diff --git a/apps/generative_models/eval_gan.py b/apps/generative_models/eval_gan.py new file mode 100644 index 0000000..f415a51 --- /dev/null +++ b/apps/generative_models/eval_gan.py @@ -0,0 +1,182 @@ +"""Evaluate a pretrained GAN model. +Usage: + +`python eval_gan.py `, e.g. +`../results/quickdraw_gan_vector_bezier_fc_wgan`. + +""" +import os +import argparse +import torch as th +import numpy as np +import ttools +import imageio +from subprocess import call + +import pydiffvg + +import models + + +LOG = ttools.get_logger(__name__) + + +def postprocess(im, invert=False): + im = th.clamp((im + 1.0) / 2.0, 0, 1) + if invert: + im = (1.0 - im) + im = ttools.tensor2image(im) + return im + + +def imsave(im, path): + os.makedirs(os.path.dirname(path), exist_ok=True) + imageio.imwrite(path, im) + + +def save_scene(scn, path): + os.makedirs(os.path.dirname(path), exist_ok=True) + pydiffvg.save_svg(path, *scn, use_gamma=False) + + +def run(args): + th.manual_seed(0) + np.random.seed(0) + + meta = ttools.Checkpointer.load_meta(args.model, "vect_g_") + + if meta is None: + LOG.warning("Could not load metadata at %s, aborting.", args.model) + return + + LOG.info("Loaded model %s with metadata:\n %s", args.model, meta) + + if args.output_dir is None: + outdir = os.path.join(args.model, "eval") + else: + outdir = args.output_dir + os.makedirs(outdir, exist_ok=True) + + model_params = meta["model_params"] + if args.imsize is not None: + LOG.info("Overriding output image size to: %dx%d", args.imsize, + args.imsize) + old_size = model_params["imsize"] + scale = args.imsize * 1.0 / old_size + model_params["imsize"] = args.imsize + model_params["stroke_width"] = [w*scale for w in + model_params["stroke_width"]] + LOG.info("Overriding width to: %s", model_params["stroke_width"]) + + # task = meta["task"] + generator = meta["generator"] + if generator == "fc": + model = models.VectorGenerator(**model_params) + elif generator == "bezier_fc": + model = models.BezierVectorGenerator(**model_params) + elif generator in ["rnn"]: + model = models.RNNVectorGenerator(**model_params) + elif generator in ["chain_rnn"]: + model = models.ChainRNNVectorGenerator(**model_params) + else: + raise NotImplementedError() + model.eval() + + device = "cpu" + if th.cuda.is_available(): + device = "cuda" + + model.to(device) + + checkpointer = ttools.Checkpointer( + args.model, model, meta=meta, prefix="vect_g_") + checkpointer.load_latest() + + LOG.info("Computing latent space interpolation") + for i in range(args.nsamples): + z0 = model.sample_z(1) + z1 = model.sample_z(1) + + # interpolation + alpha = th.linspace(0, 1, args.nsteps).view(args.nsteps, 1).to(device) + alpha_video = th.linspace(0, 1, args.nframes).view(args.nframes, 1) + alpha_video = alpha_video.to(device) + + length = [args.nsteps, args.nframes] + for idx, a in enumerate([alpha, alpha_video]): + _z0 = z0.repeat(length[idx], 1).to(device) + _z1 = z1.repeat(length[idx], 1).to(device) + batch = _z0*(1-a) + _z1*a + out = model(batch) + if idx == 0: # image viz + n, c, h, w = out.shape + out = out.permute(1, 2, 0, 3) + out = out.contiguous().view(1, c, h, w*n) + out = postprocess(out, invert=args.invert) + imsave(out, os.path.join(outdir, + "latent_interp", "%03d.png" % i)) + + scenes = model.get_vector(batch) + for scn_idx, scn in enumerate(scenes): + save_scene(scn, os.path.join(outdir, "latent_interp_svg", + "%03d" % i, "%03d.svg" % + scn_idx)) + else: # video viz + anim_root = os.path.join(outdir, + "latent_interp_video", "%03d" % i) + LOG.info("Rendering animation %d", i) + for frame_idx, frame in enumerate(out): + LOG.info("frame %d", frame_idx) + frame = frame.unsqueeze(0) + frame = postprocess(frame, invert=args.invert) + imsave(frame, os.path.join(anim_root, + "frame%04d.png" % frame_idx)) + call(["ffmpeg", "-framerate", "30", "-i", + os.path.join(anim_root, "frame%04d.png"), "-vb", "20M", + os.path.join(outdir, + "latent_interp_video", "%03d.mp4" % i)]) + LOG.info(" saved %d", i) + + LOG.info("Sampling latent space") + + for i in range(args.nsamples): + n = 8 + bs = n*n + z = model.sample_z(bs).to(device) + out = model(z) + _, c, h, w = out.shape + out = out.view(n, n, c, h, w).permute(2, 0, 3, 1, 4) + out = out.contiguous().view(1, c, h*n, w*n) + out = postprocess(out) + imsave(out, os.path.join(outdir, "samples_%03d.png" % i)) + LOG.info(" saved %d", i) + + LOG.info("output images saved to %s", outdir) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + + parser.add_argument("model") + parser.add_argument("--output_dir", help="output directory for " + " the samples. Defaults to the model's path") + parser.add_argument("--nsamples", default=16, type=int, + help="number of output to compute") + parser.add_argument("--imsize", type=int, + help="if provided, override the raster output " + "resolution") + parser.add_argument("--nsteps", default=9, type=int, help="number of " + "interpolation steps for the interpolation") + parser.add_argument("--nframes", default=120, type=int, help="number of " + "frames for the interpolation video") + parser.add_argument("--invert", default=False, action="store_true", + help="if True, render black on white rather than the" + " opposite") + + args = parser.parse_args() + + pydiffvg.set_use_gpu(False) + + ttools.set_logger(False) + + run(args) diff --git a/apps/generative_models/losses.py b/apps/generative_models/losses.py new file mode 100644 index 0000000..8bebb7c --- /dev/null +++ b/apps/generative_models/losses.py @@ -0,0 +1,99 @@ +"""Losses for the generative models and baselines.""" +import torch as th +import numpy as np + +import ttools.modules.image_operators as imops + + +class KLDivergence(th.nn.Module): + """ + Args: + min_value(float): the loss is clipped so that value below this + number don't affect the optimization. + """ + def __init__(self, min_value=0.2): + super(KLDivergence, self).__init__() + self.min_value = min_value + + def forward(self, mu, log_sigma): + loss = -0.5 * (1.0 + log_sigma - mu.pow(2) - log_sigma.exp()) + loss = loss.mean() + loss = th.max(loss, self.min_value*th.ones_like(loss)) + return loss + + +class MultiscaleMSELoss(th.nn.Module): + def __init__(self, channels=3): + super(MultiscaleMSELoss, self).__init__() + self.blur = imops.GaussianBlur(1, channels=channels) + + def forward(self, im, target): + bs, c, h, w = im.shape + num_levels = max(int(np.ceil(np.log2(h))) - 2, 1) + + losses = [] + for lvl in range(num_levels): + loss = th.nn.functional.mse_loss(im, target) + losses.append(loss) + im = th.nn.functional.interpolate(self.blur(im), + scale_factor=0.5, + mode="nearest") + target = th.nn.functional.interpolate(self.blur(target), + scale_factor=0.5, + mode="nearest") + + losses = th.stack(losses) + return losses.sum() + + +def gaussian_pdfs(dx, dy, params): + """Returns the pdf at (dx, dy) for each Gaussian in the mixture. + """ + dx = dx.unsqueeze(-1) # replicate dx, dy to evaluate all pdfs at once + dy = dy.unsqueeze(-1) + + mu_x = params[..., 0] + mu_y = params[..., 1] + sigma_x = params[..., 2].exp() + sigma_y = params[..., 3].exp() + rho_xy = th.tanh(params[..., 4]) + + x = ((dx-mu_x) / sigma_x).pow(2) + y = ((dy-mu_y) / sigma_y).pow(2) + + xy = (dx-mu_x)*(dy-mu_y) / (sigma_x * sigma_y) + arg = x + y - 2.0*rho_xy*xy + pdf = th.exp(-arg / (2*(1.0 - rho_xy.pow(2)))) + norm = 2.0 * np.pi * sigma_x * sigma_y * (1.0 - rho_xy.pow(2)).sqrt() + + return pdf / norm + + +class GaussianMixtureReconstructionLoss(th.nn.Module): + """ + Args: + """ + def __init__(self, eps=1e-5): + super(GaussianMixtureReconstructionLoss, self).__init__() + self.eps = eps + + def forward(self, pen_logits, mixture_logits, gaussian_params, targets): + dx = targets[..., 0] + dy = targets[..., 1] + pen_state = targets[..., 2:].argmax(-1) # target index + + # Likelihood loss on the stroke position + # No need to predict accurate pen position for end-of-sequence tokens + valid_stroke = (targets[..., -1] != 1.0).float() + mixture_weights = th.nn.functional.softmax(mixture_logits, -1) + pdfs = gaussian_pdfs(dx, dy, gaussian_params) + position_loss = - th.log(self.eps + (pdfs * mixture_weights).sum(-1)) + + # by actual non-empty count + position_loss = (position_loss*valid_stroke).sum() / valid_stroke.sum() + + # Classification loss for the stroke mode + pen_loss = th.nn.functional.cross_entropy(pen_logits.view(-1, 3), + pen_state.view(-1)) + + return position_loss + pen_loss diff --git a/apps/generative_models/mnist_vae.py b/apps/generative_models/mnist_vae.py new file mode 100644 index 0000000..884c147 --- /dev/null +++ b/apps/generative_models/mnist_vae.py @@ -0,0 +1,1026 @@ +#!/bin/env python +"""Train a VAE MNIST generator. + +Usage: + +* Train a model: + +`python mnist_vae.py train` + +* Generate samples from a trained model: + +`python mnist_vae.py sample` + +* Generate latent space interpolations from a trained model: + +`python mnist_vae.py interpolate` +""" +import argparse +import os + +import numpy as np +import torch as th +from torch.utils.data import DataLoader +import torchvision.datasets as dset +import torchvision.transforms as transforms + +import ttools +import ttools.interfaces +from ttools.modules import networks + +from modules import Flatten + +import pydiffvg + +LOG = ttools.get_logger(__name__) + + +BASE_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), os.pardir) +VAE_OUTPUT = os.path.join(BASE_DIR, "results", "mnist_vae") +AE_OUTPUT = os.path.join(BASE_DIR, "results", "mnist_ae") + + +def _onehot(label): + bs = label.shape[0] + label_onehot = label.new(bs, 10) + label_onehot = label_onehot.zero_() + label_onehot.scatter_(1, label.unsqueeze(1), 1) + return label_onehot.float() + + +def render(canvas_width, canvas_height, shapes, shape_groups, samples=2): + _render = pydiffvg.RenderFunction.apply + scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) + img = _render(canvas_width, # width + canvas_height, # height + samples, # num_samples_x + samples, # num_samples_y + 0, # seed + None, # background + *scene_args) + return img + + +class MNISTCallback(ttools.callbacks.ImageDisplayCallback): + """Simple callback that visualize generated images during training.""" + def visualized_image(self, batch, step_data, is_val=False): + im = step_data["rendering"].detach().cpu() + im = 0.5 + 0.5*im + ref = batch[0].cpu() + + vizdata = [im, ref] + + # tensor to visualize, concatenate images + viz = th.clamp(th.cat(vizdata, 2), 0, 1) + return viz + + def caption(self, batch, step_data, is_val=False): + return "fake, real" + + +class VAEInterface(ttools.ModelInterface): + def __init__(self, model, lr=1e-4, cuda=True, max_grad_norm=10, + variational=True, w_kld=1.0): + super(VAEInterface, self).__init__() + + self.max_grad_norm = max_grad_norm + + self.model = model + + self.w_kld = w_kld + + self.variational = variational + + self.device = "cpu" + if cuda: + self.device = "cuda" + + self.model.to(self.device) + + self.opt = th.optim.Adam( + self.model.parameters(), lr=lr, betas=(0.5, 0.5), eps=1e-12) + + def training_step(self, batch): + im, label = batch[0], batch[1] + im = im.to(self.device) + label = label.to(self.device) + rendering, auxdata = self.model(im, label) + + im = batch[0] + im = im.to(self.device) + + logvar = auxdata["logvar"] + mu = auxdata["mu"] + + data_loss = th.nn.functional.mse_loss(rendering, im) + + ret = {} + if self.variational: # VAE mode + kld = -0.5 * th.sum(1 + logvar - mu.pow(2) - logvar.exp(), 1) + kld = kld.mean() + loss = data_loss + kld*self.w_kld + ret["kld"] = kld.item() + else: # Regular autoencoder + loss = data_loss + + # optimize + self.opt.zero_grad() + loss.backward() + + # Clip large gradients if needed + if self.max_grad_norm is not None: + nrm = th.nn.utils.clip_grad_norm_( + self.model.parameters(), self.max_grad_norm) + if nrm > self.max_grad_norm: + LOG.warning("Clipping generator gradients. norm = %.3f > %.3f", + nrm, self.max_grad_norm) + + self.opt.step() + + ret["loss"] = loss.item() + ret["data_loss"] = data_loss.item() + ret["auxdata"] = auxdata + ret["rendering"] = rendering + + return ret + + # def init_validation(self): + # return {"count": 0, "loss": 0} + # + # def update_validation(self, batch, fwd, running_data): + # with th.no_grad(): + # ref = batch[1].to(self.device) + # loss = th.nn.functional.mse_loss(fwd, ref) + # n = ref.shape[0] + # + # return { + # "loss": running_data["loss"] + loss.item()*n, + # "count": running_data["count"] + n + # } + # + # def finalize_validation(self, running_data): + # return { + # "loss": running_data["loss"] / running_data["count"] + # } + + +class MNISTGenerator(th.nn.Module): + def __init__(self, imsize=28): + super(MNISTGenerator, self).__init__() + if imsize != 28: + raise NotImplementedError() + + mul = 2 + self.convnet = th.nn.Sequential( + # 4x4 + th.nn.ConvTranspose2d(16 + 1, mul*32, 4, padding=1, stride=2), + th.nn.LeakyReLU(inplace=True), + th.nn.Conv2d(mul*32, mul*32, 3, padding=1), + th.nn.LeakyReLU(inplace=True), + + # 8x8 + th.nn.ConvTranspose2d(mul*32, mul*64, 4, padding=1, stride=2), + th.nn.LeakyReLU(inplace=True), + th.nn.Conv2d(mul*64, mul*64, 3, padding=1), + th.nn.LeakyReLU(inplace=True), + + # 16x16 + th.nn.ConvTranspose2d(mul*64, mul*128, 4, padding=1, stride=2), + th.nn.LeakyReLU(inplace=True), + th.nn.Conv2d(mul*128, mul*128, 3, padding=1), + th.nn.LeakyReLU(inplace=True), + + # 32x32 + th.nn.Conv2d(mul*128, mul*128, 3, padding=1), + th.nn.LeakyReLU(inplace=True), + + th.nn.Conv2d(mul*128, mul*128, 3, padding=1), + th.nn.LeakyReLU(inplace=True), + + th.nn.Conv2d(mul*128, 1, 1), + # th.nn.Tanh(), + ) + + def forward(self, im, label): + bs = im.shape[0] + + # sample a hidden vector + z = th.randn(bs, 16, 4, 4).to(im.device) + + # make the model conditional + in_ = th.cat([z, label.float().view(bs, 1, 1, 1).repeat(1, 1, 4, 4)], 1) + + out = self.convnet(in_) + return out, None + + +class VectorMNISTVAE(th.nn.Module): + def __init__(self, imsize=28, paths=4, segments=5, samples=2, zdim=128, + conditional=False, variational=True, raster=False, fc=False): + super(VectorMNISTVAE, self).__init__() + + # if imsize != 28: + # raise NotImplementedError() + + self.samples = samples + self.imsize = imsize + self.paths = paths + self.segments = segments + self.zdim = zdim + self.conditional = conditional + self.variational = variational + + ncond = 0 + if self.conditional: # one hot encoded input for conditional model + ncond = 10 + + self.fc = fc + mult = 1 + nc = 1024 + + if not self.fc: # conv model + self.encoder = th.nn.Sequential( + # 32x32 + th.nn.Conv2d(1 + ncond, mult*64, 4, padding=0, stride=2), + th.nn.LeakyReLU(0.2, inplace=True), + + # 16x16 + th.nn.Conv2d(mult*64, mult*128, 4, padding=0, stride=2), + th.nn.LeakyReLU(0.2, inplace=True), + + # 8x8 + th.nn.Conv2d(mult*128, mult*256, 4, padding=0, stride=2), + th.nn.LeakyReLU(0.2, inplace=True), + Flatten(), + ) + else: + self.encoder = th.nn.Sequential( + # 32x32 + Flatten(), + th.nn.Linear(28*28 + ncond, mult*256), + th.nn.LeakyReLU(0.2, inplace=True), + + # 8x8 + th.nn.Linear(mult*256, mult*256, 4), + th.nn.LeakyReLU(0.2, inplace=True), + ) + + self.mu_predictor = th.nn.Linear(256*1*1, zdim) + if self.variational: + self.logvar_predictor = th.nn.Linear(256*1*1, zdim) + + self.decoder = th.nn.Sequential( + th.nn.Linear(zdim + ncond, nc), + th.nn.SELU(inplace=True), + + th.nn.Linear(nc, nc), + th.nn.SELU(inplace=True), + ) + + + self.raster = raster + if self.raster: + self.raster_decoder = th.nn.Sequential( + th.nn.Linear(nc, imsize*imsize), + ) + else: + # 4 points bezier with n_segments -> 3*n_segments + 1 points + self.point_predictor = th.nn.Sequential( + th.nn.Linear(nc, 2*self.paths*(self.segments*3+1)), + th.nn.Tanh() # bound spatial extent + ) + + self.width_predictor = th.nn.Sequential( + th.nn.Linear(nc, self.paths), + th.nn.Tanh() + ) + + self.alpha_predictor = th.nn.Sequential( + th.nn.Linear(nc, self.paths), + th.nn.Tanh() + ) + + self._reset_weights() + + def _reset_weights(self): + for n, p in self.encoder.named_parameters(): + if 'bias' in n: + p.data.zero_() + elif 'weight' in n: + th.nn.init.kaiming_normal_(p.data, nonlinearity="leaky_relu") + + th.nn.init.kaiming_normal_(self.mu_predictor.weight.data, nonlinearity="linear") + if self.variational: + th.nn.init.kaiming_normal_(self.logvar_predictor.weight.data, nonlinearity="linear") + + for n, p in self.decoder.named_parameters(): + if 'bias' in n: + p.data.zero_() + elif 'weight' in n: + th.nn.init.kaiming_normal_(p, nonlinearity="linear") + + if not self.raster: + for n, p in self.point_predictor.named_parameters(): + pass + # if 'bias' in n: + # p.data.zero_() + # if 'weight' in n: + # th.nn.init.orthogonal_(p) + + for n, p in self.width_predictor.named_parameters(): + if 'bias' in n: + p.data.zero_() + elif 'weight' in n: + th.nn.init.orthogonal_(p) + + for n, p in self.alpha_predictor.named_parameters(): + if 'bias' in n: + p.data.zero_() + elif 'weight' in n: + th.nn.init.orthogonal_(p) + + def encode(self, im, label): + bs, _, h, w = im.shape + if self.conditional: + label_onehot = _onehot(label) + if not self.fc: + label_onehot = label_onehot.view(bs, 10, 1, 1).repeat(1, 1, h, w) + out = self.encoder(th.cat([im, label_onehot], 1)) + else: + out = self.encoder(th.cat([im.view(bs, -1), label_onehot], 1)) + else: + out = self.encoder(im) + mu = self.mu_predictor(out) + if self.variational: + logvar = self.logvar_predictor(out) + return mu, logvar + else: + return mu + + def reparameterize(self, mu, logvar): + std = th.exp(0.5*logvar) + eps = th.randn_like(logvar) + return mu + std*eps + + def _decode_features(self, z, label): + if label is not None: + assert self.conditional, "decoding with an input label requires a conditional AE" + label_onehot = _onehot(label) + z = th.cat([z, label_onehot], 1) + + decoded = self.decoder(z) + return decoded + + def decode(self, z, label=None): + bs = z.shape[0] + + feats = self._decode_features(z, label) + + if self.raster: + out = self.raster_decoder(feats).view(bs, 1, self.imsize, self.imsize) + return out, {} + + all_points = self.point_predictor(feats) + all_points = all_points.view(bs, self.paths, -1, 2) + + all_points = all_points*(self.imsize//2-2) + self.imsize//2 + + if False: + all_widths = th.ones(bs, self.paths) * 0.5 + else: + all_widths = self.width_predictor(feats) * 1.5 + .25 + + if False: + all_alphas = th.ones(bs, self.paths) + else: + all_alphas = self.alpha_predictor(feats) + + # Process the batch sequentially + outputs = [] + scenes = [] + for k in range(bs): + # Get point parameters from network + shapes = [] + shape_groups = [] + for p in range(self.paths): + points = all_points[k, p].contiguous().cpu() + width = all_widths[k, p].cpu() + alpha = all_alphas[k, p].cpu() + + color = th.cat([th.ones(3), alpha.view(1,)]) + num_ctrl_pts = th.zeros(self.segments, dtype=th.int32) + 2 + + path = pydiffvg.Path( + num_control_points=num_ctrl_pts, points=points, + stroke_width=width, is_closed=False) + + shapes.append(path) + path_group = pydiffvg.ShapeGroup( + shape_ids=th.tensor([len(shapes) - 1]), + fill_color=None, + stroke_color=color) + shape_groups.append(path_group) + + scenes.append( + [shapes, shape_groups, (self.imsize, self.imsize)]) + + # Rasterize + out = render(self.imsize, self.imsize, shapes, shape_groups, samples=self.samples) + + # Torch format, discard alpha, make gray + out = out.permute(2, 0, 1).view(4, self.imsize, self.imsize)[:3].mean(0, keepdim=True) + + outputs.append(out) + + output = th.stack(outputs).to(z.device) + + auxdata = { + "points": all_points, + "scenes": scenes, + } + + # map to [-1, 1] + output = output*2.0 - 1.0 + + return output, auxdata + + def forward(self, im, label): + bs = im.shape[0] + + if self.variational: + mu, logvar = self.encode(im, label) + z = self.reparameterize(mu, logvar) + else: + mu = self.encode(im, label) + z = mu + logvar = None + + if self.conditional: + output, aux = self.decode(z, label=label) + else: + output, aux = self.decode(z) + + aux["logvar"] = logvar + aux["mu"] = mu + + return output, aux + + +class VectorMNISTGenerator(th.nn.Module): + def __init__(self, imsize=28, paths=4, segments=5, samples=2, conditional=False, + zdim=20, fc=False): + super(VectorMNISTGenerator, self).__init__() + if imsize != 28: + raise NotImplementedError() + + self.samples = samples + self.imsize = imsize + self.paths = paths + self.segments = segments + self.conditional = conditional + self.zdim = zdim + self.fc = fc + + ncond = 0 + if self.conditional: # one hot encoded input for conditional model + ncond = 10 + + nc = 1024 + self.trunk = th.nn.Sequential( + th.nn.Linear(zdim + ncond, nc), # noise + one-hot + th.nn.SELU(inplace=True), + + # th.nn.Linear(nc, nc), + # th.nn.SELU(inplace=True), + + th.nn.Linear(nc, nc), + th.nn.SELU(inplace=True), + + # th.nn.Linear(nc, nc), + # th.nn.SELU(inplace=True), + ) + + # 4 points bezier so n_segments -> 3*n_segments + 1 points + self.point_predictor = th.nn.Sequential( + th.nn.Linear(nc, 2*self.paths*(self.segments*3+1)), + # th.nn.Linear(nc, 2*self.paths*(self.segments*1+1)), + th.nn.Tanh() # bound spatial extent + ) + + self.width_predictor = th.nn.Sequential( + th.nn.Linear(nc, self.paths), + th.nn.Tanh() + ) + + self.alpha_predictor = th.nn.Sequential( + th.nn.Linear(nc, self.paths), + th.nn.Tanh() + ) + + # self.postprocessor = th.nn.Sequential( + # th.nn.Conv2d(1, 32, 3, padding=1), + # th.nn.LeakyReLU(inplace=True), + # th.nn.Conv2d(32, 1, 1), + # ) + self._reset_weights() + + def _reset_weights(self): + for n, p in self.trunk.named_parameters(): + if 'bias' in n: + p.data.zero_() + elif 'weight' in n: + th.nn.init.kaiming_normal_(p) + p.data.mul_(0.7) + # th.nn.init.kaiming_normal_(p, nonlinearity="leaky_relu") + + for n, p in self.point_predictor.named_parameters(): + # if 'bias' in n: + # p.data.zero_() + if 'weight' in n: + th.nn.init.orthogonal_(p) + # th.nn.init.kaiming_normal_(p, nonlinearity="tanh") + + for n, p in self.width_predictor.named_parameters(): + if 'bias' in n: + p.data.zero_() + elif 'weight' in n: + # th.nn.init.orthogonal_(p) + th.nn.init.kaiming_normal_(p, nonlinearity="tanh") + + for n, p in self.alpha_predictor.named_parameters(): + if 'bias' in n: + p.data.zero_() + elif 'weight' in n: + th.nn.init.kaiming_normal_(p, nonlinearity="tanh") + # th.nn.init.orthogonal_(p) + + def sample_z(self, bs): + return th.randn(bs, self.zdim) + + def gen_sample(self, z, label=None): + bs = z.shape[0] + if self.conditional: + if label is None: + raise ValueError("GAN is conditional, please provide a label") + + # One-hot encoding of the image label + label_onehot = _onehot(label) + + # get some embedding + in_ = th.cat([z, label_onehot.float()], 1) + else: + in_ = z + + feats = self.trunk(in_) + + all_points = self.point_predictor(feats) + all_points = all_points.view(bs, self.paths, -1, 2) + + if False: + all_alphas = th.ones(bs, self.paths) + else: + all_alphas = self.alpha_predictor(feats) + + # stroke size between 0.5 and 3.5 px + if False: + all_widths = th.ones(bs, self.paths) * 1 + else: + all_widths = self.width_predictor(feats) + all_widths = 1.5*all_widths + 0.5 + + all_points = all_points*(self.imsize//2) + self.imsize//2 + + # Process the batch sequentially + outputs = [] + for k in range(bs): + # Get point parameters from network + shapes = [] + shape_groups = [] + for p in range(self.paths): + points = all_points[k, p].contiguous().cpu() + # num_ctrl_pts = th.zeros(self.segments, dtype=th.int32)+0 + num_ctrl_pts = th.zeros(self.segments, dtype=th.int32)+2 + width = all_widths[k, p].cpu() + alpha = all_alphas[k, p].cpu() + color = th.cat([th.ones(3), alpha.view(1,)]) + path = pydiffvg.Path( + num_control_points=num_ctrl_pts, points=points, + stroke_width=width, is_closed=False) + shapes.append(path) + path_group = pydiffvg.ShapeGroup( + shape_ids=th.tensor([len(shapes) - 1]), + fill_color=None, + stroke_color=color) + shape_groups.append(path_group) + + # Rasterize + out = render(self.imsize, self.imsize, shapes, shape_groups, samples=self.samples) + + # Torch format, discard alpha, make gray + out = out.permute(2, 0, 1).view(4, self.imsize, self.imsize)[:3].mean(0, keepdim=True) + + outputs.append(out) + + output = th.stack(outputs).to(z.device) + aux_data = { + "points": all_points, + "raw_vector": output, + } + + # output = self.postprocessor(output) + + # map to [-1, 1] + output = output*2.0 - 1.0 + + return output, aux_data + + def forward(self, im, label): + bs = label.shape[0] + + # sample a hidden vector (same dim as the raster version) + z = self.sample_z(bs).to(im.device) + if args.conditional: + return self.gen_sample(z, label=label) + else: + return self.gen_sample(z) + + + +class Discriminator(th.nn.Module): + def __init__(self, conditional=False, fc=False): + super(Discriminator, self).__init__() + + self.conditional = conditional + + ncond = 0 + if self.conditional: # one hot encoded input for conditional model + ncond = 10 + + sn = th.nn.utils.spectral_norm + # sn = lambda x: x + + self.fc = fc + + mult = 2 + if self.fc: + self.net = th.nn.Sequential( + Flatten(), + th.nn.Linear(28*28 + ncond, mult*256), + th.nn.LeakyReLU(0.2, inplace=True), + + # th.nn.Linear(mult*256, mult*256, 4), + # th.nn.LeakyReLU(0.2, inplace=True), + # th.nn.Dropout(0.5), + + th.nn.Linear(mult*256, mult*256, 4), + th.nn.LeakyReLU(0.2, inplace=True), + + th.nn.Linear(mult*256*1*1, 1), + ) + else: + self.net = th.nn.Sequential( + th.nn.Conv2d(1 + ncond, mult*64, 4, padding=0, stride=2), + + th.nn.LeakyReLU(0.2, inplace=True), + # 16x16 + + sn(th.nn.Conv2d(mult*64, mult*128, 4, padding=0, stride=2)), + th.nn.LeakyReLU(0.2, inplace=True), + # 8x8 + + sn(th.nn.Conv2d(mult*128, mult*256, 4, padding=0, stride=2)), + th.nn.LeakyReLU(0.2, inplace=True), + # 4x4 + + Flatten(), + + th.nn.Linear(mult*256*1*1, 1), + ) + + self._reset_weights() + + def _reset_weights(self): + for n, p in self.net.named_parameters(): + if 'bias' in n: + p.data.zero_() + if 'weight' in n: + th.nn.init.kaiming_normal_(p, nonlinearity="leaky_relu") + + def forward(self, x): + out = self.net(x) + return out + +class Dataset(th.utils.data.Dataset): + def __init__(self, data_dir, imsize): + super(Dataset, self).__init__() + self.mnist = dset.MNIST(root=data_dir, download=True, + transform=transforms.Compose([ + transforms.ToTensor(), + ])) + + def __len__(self): + return len(self.mnist) + + def __getitem__(self, idx): + im, label = self.mnist[idx] + + # make sure data uses [0, 1] range + im -= im.min() + im /= im.max() + 1e-8 + im -= 0.5 + im /= 0.5 + return im, label + + +def train(args): + th.manual_seed(0) + np.random.seed(0) + + pydiffvg.set_use_gpu(args.cuda) + + # Initialize datasets + imsize = 28 + dataset = Dataset(args.data_dir, imsize) + dataloader = DataLoader(dataset, batch_size=args.bs, + num_workers=4, shuffle=True) + + if args.generator in ["vae", "ae"]: + LOG.info("Vector config:\n samples %d\n" + " paths: %d\n segments: %d\n" + " zdim: %d\n" + " conditional: %d\n" + " fc: %d\n", + args.samples, args.paths, args.segments, + args.zdim, args.conditional, args.fc) + + model_params = dict(samples=args.samples, paths=args.paths, + segments=args.segments, conditional=args.conditional, + zdim=args.zdim, fc=args.fc) + + if args.generator == "vae": + model = VectorMNISTVAE(variational=True, **model_params) + chkpt = VAE_OUTPUT + name = "mnist_vae" + elif args.generator == "ae": + model = VectorMNISTVAE(variational=False, **model_params) + chkpt = AE_OUTPUT + name = "mnist_ae" + else: + raise ValueError("unknown generator") + + if args.conditional: + name += "_conditional" + chkpt += "_conditional" + + if args.fc: + name += "_fc" + chkpt += "_fc" + + # Resume from checkpoint, if any + checkpointer = ttools.Checkpointer( + chkpt, model, meta=model_params, prefix="g_") + extras, meta = checkpointer.load_latest() + + if meta is not None and meta != model_params: + LOG.info("Checkpoint's metaparams differ from CLI, aborting: %s and %s", + meta, model_params) + + # Hook interface + if args.generator in ["vae", "ae"]: + variational = args.generator == "vae" + if variational: + LOG.info("Using a VAE") + else: + LOG.info("Using an AE") + interface = VAEInterface(model, lr=args.lr, cuda=args.cuda, + variational=variational, w_kld=args.kld_weight) + + trainer = ttools.Trainer(interface) + + # Add callbacks + keys = ["loss_g", "loss_d"] + if args.generator == "vae": + keys = ["kld", "data_loss", "loss"] + elif args.generator == "ae": + keys = ["data_loss", "loss"] + port = 8097 + trainer.add_callback(ttools.callbacks.ProgressBarCallback( + keys=keys, val_keys=keys)) + trainer.add_callback(ttools.callbacks.VisdomLoggingCallback( + keys=keys, val_keys=keys, env=name, port=port)) + trainer.add_callback(MNISTCallback( + env=name, win="samples", port=port, frequency=args.freq)) + trainer.add_callback(ttools.callbacks.CheckpointingCallback( + checkpointer, max_files=2, interval=600, max_epochs=50)) + + # Start training + trainer.train(dataloader, num_epochs=args.num_epochs) + + +def generate_samples(args): + chkpt = VAE_OUTPUT + if args.conditional: + chkpt += "_conditional" + if args.fc: + chkpt += "_fc" + + meta = ttools.Checkpointer.load_meta(chkpt, prefix="g_") + if meta is None: + LOG.info("No metadata in checkpoint (or no checkpoint), aborting.") + return + + model = VectorMNISTVAE(**meta) + checkpointer = ttools.Checkpointer(chkpt, model, prefix="g_") + checkpointer.load_latest() + model.eval() + + # Sample some latent vectors + n = 8 + bs = n*n + z = th.randn(bs, model.zdim) + + imsize = 28 + dataset = Dataset(args.data_dir, imsize) + dataloader = DataLoader(dataset, batch_size=bs, + num_workers=1, shuffle=True) + + for batch in dataloader: + ref, label = batch + break + + autoencode = True + if autoencode: + LOG.info("Sampling with auto-encoder code") + if not args.conditional: + label = None + mu, logvar = model.encode(ref, label) + z = model.reparameterize(mu, logvar) + else: + label = None + if args.conditional: + label = th.clamp(th.rand(bs)*10, 0, 9).long() + if args.digit is not None: + label[:] = args.digit + + with th.no_grad(): + images, aux = model.decode(z, label=label) + scenes = aux["scenes"] + images += 1.0 + images /= 2.0 + + h = w = model.imsize + + images = images.view(n, n, h, w).permute(0, 2, 1, 3) + images = images.contiguous().view(n*h, n*w) + images = th.clamp(images, 0, 1).cpu().numpy() + path = os.path.join(chkpt, "samples.png") + pydiffvg.imwrite(images, path, gamma=2.2) + + if autoencode: + ref += 1.0 + ref /= 2.0 + ref = ref.view(n, n, h, w).permute(0, 2, 1, 3) + ref = ref.contiguous().view(n*h, n*w) + ref = th.clamp(ref, 0, 1).cpu().numpy() + path = os.path.join(chkpt, "ref.png") + pydiffvg.imwrite(ref, path, gamma=2.2) + + # merge scenes + all_shapes = [] + all_shape_groups = [] + cur_id = 0 + for idx, s in enumerate(scenes): + shapes, shape_groups, _ = s + # width, height = sizes + + # Shift digit on canvas + center_x = idx % n + center_y = idx // n + for shape in shapes: + shape.points[:, 0] += center_x * model.imsize + shape.points[:, 1] += center_y * model.imsize + all_shapes.append(shape) + for grp in shape_groups: + grp.shape_ids[:] = cur_id + cur_id += 1 + all_shape_groups.append(grp) + + LOG.info("Generated %d shapes", len(all_shapes)) + + fname = os.path.join(chkpt, "digits.svg") + pydiffvg.save_svg(fname, n*model.imsize, n*model.imsize, all_shapes, + all_shape_groups, use_gamma=False) + + LOG.info("Results saved to %s", chkpt) + + +def interpolate(args): + chkpt = VAE_OUTPUT + if args.conditional: + chkpt += "_conditional" + if args.fc: + chkpt += "_fc" + + meta = ttools.Checkpointer.load_meta(chkpt, prefix="g_") + if meta is None: + LOG.info("No metadata in checkpoint (or no checkpoint), aborting.") + return + + model = VectorMNISTVAE(imsize=128, **meta) + checkpointer = ttools.Checkpointer(chkpt, model, prefix="g_") + checkpointer.load_latest() + model.eval() + + # Sample some latent vectors + bs = 10 + z = th.randn(bs, model.zdim) + + label = None + label = th.arange(0, 10) + + animation = [] + nframes = 60 + with th.no_grad(): + for idx, _z in enumerate(z): + if idx == 0: # skip first + continue + _z0 = z[idx-1].unsqueeze(0).repeat(nframes, 1) + _z = _z.unsqueeze(0).repeat(nframes, 1) + if args.conditional: + _label = label[idx].unsqueeze(0).repeat(nframes) + else: + _label = None + + # interp weights + alpha = th.linspace(0, 1, nframes).view(nframes, 1) + batch = alpha*_z + (1.0 - alpha)*_z0 + images, aux = model.decode(batch, label=_label) + images += 1.0 + images /= 2.0 + animation.append(images) + + anim_dir = os.path.join(chkpt, "interpolation") + os.makedirs(anim_dir, exist_ok=True) + animation = th.cat(animation, 0) + for idx, frame in enumerate(animation): + frame = frame.squeeze() + frame = th.clamp(frame, 0, 1).cpu().numpy() + path = os.path.join(anim_dir, "frame%03d.png" % idx) + pydiffvg.imwrite(frame, path, gamma=2.2) + + LOG.info("Results saved to %s", anim_dir) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + subs = parser.add_subparsers() + + parser.add_argument("--cpu", dest="cuda", action="store_false", + default=th.cuda.is_available(), + help="if true, use CPU instead of GPU.") + parser.add_argument("--conditional", action="store_true", default=False) + parser.add_argument("--fc", action="store_true", default=False) + parser.add_argument("--data_dir", default="mnist", + help="path to download and store the data.") + + # -- Train ---------------------------------------------------------------- + parser_train = subs.add_parser("train") + parser_train.add_argument("--generator", choices=["vae", "ae"], + default="vae", + help="choice of regular or variational " + "autoencoder") + parser_train.add_argument("--freq", type=int, default=100, + help="number of steps between visualizations") + parser_train.add_argument("--lr", type=float, default=1e-4, + help="learning rate") + parser_train.add_argument("--kld_weight", type=float, default=1.0, + help="scalar weight for the KL divergence term.") + parser_train.add_argument("--bs", type=int, default=8, help="batch size") + parser_train.add_argument("--num_epochs", type=int, + help="max number of epochs") + # Vector configs + parser_train.add_argument("--paths", type=int, default=1, + help="number of unique vector paths to generate.") + parser_train.add_argument("--segments", type=int, default=3, + help="number of segments per vector path") + parser_train.add_argument("--samples", type=int, default=2, + help="number of samples in the MC rasterizer") + parser_train.add_argument("--zdim", type=int, default=20, + help="dimension of the latent space") + parser_train.set_defaults(func=train) + + # -- Eval ----------------------------------------------------------------- + parser_sample = subs.add_parser("sample") + parser_sample.add_argument("--digit", type=int, choices=list(range(10)), + help="digits to synthesize, " + "random if not specified") + parser_sample.set_defaults(func=generate_samples) + + parser_interpolate = subs.add_parser("interpolate") + parser_interpolate.set_defaults(func=interpolate) + + args = parser.parse_args() + + ttools.set_logger(True) + args.func(args) diff --git a/apps/generative_models/models.py b/apps/generative_models/models.py new file mode 100644 index 0000000..06d4b1c --- /dev/null +++ b/apps/generative_models/models.py @@ -0,0 +1,484 @@ +"""Collection of generative models.""" + +import torch as th +import ttools + +import rendering +import modules + +LOG = ttools.get_logger(__name__) + + +class BaseModel(th.nn.Module): + def sample_z(self, bs, device="cpu"): + return th.randn(bs, self.zdim).to(device) + + +class BaseVectorModel(BaseModel): + def get_vector(self, z): + _, scenes = self._forward(z) + return scenes + + def _forward(self, x): + raise NotImplementedError() + + def forward(self, z): + # Only return the raster + return self._forward(z)[0] + + +class BezierVectorGenerator(BaseVectorModel): + NUM_SEGMENTS = 2 + def __init__(self, num_strokes=4, + zdim=128, width=32, imsize=32, + color_output=False, + stroke_width=None): + super(BezierVectorGenerator, self).__init__() + + if stroke_width is None: + self.stroke_width = (0.5, 3.0) + LOG.warning("Setting default stroke with %s", self.stroke_width) + else: + self.stroke_width = stroke_width + + self.imsize = imsize + self.num_strokes = num_strokes + self.zdim = zdim + + self.trunk = th.nn.Sequential( + th.nn.Linear(zdim, width), + th.nn.SELU(inplace=True), + + th.nn.Linear(width, 2*width), + th.nn.SELU(inplace=True), + + th.nn.Linear(2*width, 4*width), + th.nn.SELU(inplace=True), + + th.nn.Linear(4*width, 8*width), + th.nn.SELU(inplace=True), + ) + + # 4 points bezier with n_segments -> 3*n_segments + 1 points + self.point_predictor = th.nn.Sequential( + th.nn.Linear(8*width, + 2*self.num_strokes*( + BezierVectorGenerator.NUM_SEGMENTS*3 + 1)), + th.nn.Tanh() # bound spatial extent + ) + + self.width_predictor = th.nn.Sequential( + th.nn.Linear(8*width, self.num_strokes), + th.nn.Sigmoid() + ) + + self.alpha_predictor = th.nn.Sequential( + th.nn.Linear(8*width, self.num_strokes), + th.nn.Sigmoid() + ) + + self.color_predictor = None + if color_output: + self.color_predictor = th.nn.Sequential( + th.nn.Linear(8*width, 3*self.num_strokes), + th.nn.Sigmoid() + ) + + def _forward(self, z): + bs = z.shape[0] + + feats = self.trunk(z) + all_points = self.point_predictor(feats) + all_alphas = self.alpha_predictor(feats) + + if self.color_predictor: + all_colors = self.color_predictor(feats) + all_colors = all_colors.view(bs, self.num_strokes, 3) + else: + all_colors = None + + all_widths = self.width_predictor(feats) + min_width = self.stroke_width[0] + max_width = self.stroke_width[1] + all_widths = (max_width - min_width) * all_widths + min_width + + all_points = all_points.view( + bs, self.num_strokes, BezierVectorGenerator.NUM_SEGMENTS*3+1, 2) + + output, scenes = rendering.bezier_render(all_points, all_widths, all_alphas, + colors=all_colors, + canvas_size=self.imsize) + + # map to [-1, 1] + output = output*2.0 - 1.0 + + return output, scenes + + +class VectorGenerator(BaseVectorModel): + def __init__(self, num_strokes=4, + zdim=128, width=32, imsize=32, + color_output=False, + stroke_width=None): + super(VectorGenerator, self).__init__() + + if stroke_width is None: + self.stroke_width = (0.5, 3.0) + LOG.warning("Setting default stroke with %s", self.stroke_width) + else: + self.stroke_width = stroke_width + + self.imsize = imsize + self.num_strokes = num_strokes + self.zdim = zdim + + self.trunk = th.nn.Sequential( + th.nn.Linear(zdim, width), + th.nn.SELU(inplace=True), + + th.nn.Linear(width, 2*width), + th.nn.SELU(inplace=True), + + th.nn.Linear(2*width, 4*width), + th.nn.SELU(inplace=True), + + th.nn.Linear(4*width, 8*width), + th.nn.SELU(inplace=True), + ) + + # straight lines so n_segments -> n_segments - 1 points + self.point_predictor = th.nn.Sequential( + th.nn.Linear(8*width, 2*(self.num_strokes*2)), + th.nn.Tanh() # bound spatial extent + ) + + self.width_predictor = th.nn.Sequential( + th.nn.Linear(8*width, self.num_strokes), + th.nn.Sigmoid() + ) + + self.alpha_predictor = th.nn.Sequential( + th.nn.Linear(8*width, self.num_strokes), + th.nn.Sigmoid() + ) + + self.color_predictor = None + if color_output: + self.color_predictor = th.nn.Sequential( + th.nn.Linear(8*width, 3*self.num_strokes), + th.nn.Sigmoid() + ) + + def _forward(self, z): + bs = z.shape[0] + + feats = self.trunk(z) + + all_points = self.point_predictor(feats) + + all_alphas = self.alpha_predictor(feats) + + if self.color_predictor: + all_colors = self.color_predictor(feats) + all_colors = all_colors.view(bs, self.num_strokes, 3) + else: + all_colors = None + + all_widths = self.width_predictor(feats) + min_width = self.stroke_width[0] + max_width = self.stroke_width[1] + all_widths = (max_width - min_width) * all_widths + min_width + + all_points = all_points.view(bs, self.num_strokes, 2, 2) + output, scenes = rendering.line_render(all_points, all_widths, all_alphas, + colors=all_colors, + canvas_size=self.imsize) + + # map to [-1, 1] + output = output*2.0 - 1.0 + + return output, scenes + + +class RNNVectorGenerator(BaseVectorModel): + def __init__(self, num_strokes=64, + zdim=128, width=32, imsize=32, + hidden_size=512, dropout=0.9, + color_output=False, + num_layers=3, stroke_width=None): + super(RNNVectorGenerator, self).__init__() + + + if stroke_width is None: + self.stroke_width = (0.5, 3.0) + LOG.warning("Setting default stroke with %s", self.stroke_width) + else: + self.stroke_width = stroke_width + + self.num_layers = num_layers + self.imsize = imsize + self.num_strokes = num_strokes + self.hidden_size = hidden_size + self.zdim = zdim + + self.hidden_cell_predictor = th.nn.Linear( + zdim, 2*hidden_size*num_layers) + + self.lstm = th.nn.LSTM( + zdim, hidden_size, + num_layers=self.num_layers, dropout=dropout, + batch_first=True) + + # straight lines so n_segments -> n_segments - 1 points + self.point_predictor = th.nn.Sequential( + th.nn.Linear(hidden_size, 2*2), # 2 points, (x,y) + th.nn.Tanh() # bound spatial extent + ) + + self.width_predictor = th.nn.Sequential( + th.nn.Linear(hidden_size, 1), + th.nn.Sigmoid() + ) + + self.alpha_predictor = th.nn.Sequential( + th.nn.Linear(hidden_size, 1), + th.nn.Sigmoid() + ) + + def _forward(self, z, hidden_and_cell=None): + steps = self.num_strokes + + # z is passed at each step, duplicate it + bs = z.shape[0] + expanded_z = z.unsqueeze(1).repeat(1, steps, 1) + + # First step in the RNN + if hidden_and_cell is None: + # Initialize from latent vector + hidden_and_cell = self.hidden_cell_predictor(th.tanh(z)) + hidden = hidden_and_cell[:, :self.hidden_size*self.num_layers] + hidden = hidden.view(-1, self.num_layers, self.hidden_size) + hidden = hidden.permute(1, 0, 2).contiguous() + cell = hidden_and_cell[:, self.hidden_size*self.num_layers:] + cell = cell.view(-1, self.num_layers, self.hidden_size) + cell = cell.permute(1, 0, 2).contiguous() + hidden_and_cell = (hidden, cell) + + feats, hidden_and_cell = self.lstm(expanded_z, hidden_and_cell) + hidden, cell = hidden_and_cell + + feats = feats.reshape(bs*steps, self.hidden_size) + + all_points = self.point_predictor(feats).view(bs, steps, 2, 2) + all_alphas = self.alpha_predictor(feats).view(bs, steps) + all_widths = self.width_predictor(feats).view(bs, steps) + + min_width = self.stroke_width[0] + max_width = self.stroke_width[1] + all_widths = (max_width - min_width) * all_widths + min_width + + output, scenes = rendering.line_render(all_points, all_widths, all_alphas, + canvas_size=self.imsize) + + # map to [-1, 1] + output = output*2.0 - 1.0 + + return output, scenes + + +class ChainRNNVectorGenerator(BaseVectorModel): + """Strokes form a single long chain.""" + def __init__(self, num_strokes=64, + zdim=128, width=32, imsize=32, + hidden_size=512, dropout=0.9, + color_output=False, + num_layers=3, stroke_width=None): + super(ChainRNNVectorGenerator, self).__init__() + + if stroke_width is None: + self.stroke_width = (0.5, 3.0) + LOG.warning("Setting default stroke with %s", self.stroke_width) + else: + self.stroke_width = stroke_width + + self.num_layers = num_layers + self.imsize = imsize + self.num_strokes = num_strokes + self.hidden_size = hidden_size + self.zdim = zdim + + self.hidden_cell_predictor = th.nn.Linear( + zdim, 2*hidden_size*num_layers) + + self.lstm = th.nn.LSTM( + zdim, hidden_size, + num_layers=self.num_layers, dropout=dropout, + batch_first=True) + + # straight lines so n_segments -> n_segments - 1 points + self.point_predictor = th.nn.Sequential( + th.nn.Linear(hidden_size, 2), # 1 point, (x,y) + th.nn.Tanh() # bound spatial extent + ) + + self.width_predictor = th.nn.Sequential( + th.nn.Linear(hidden_size, 1), + th.nn.Sigmoid() + ) + + self.alpha_predictor = th.nn.Sequential( + th.nn.Linear(hidden_size, 1), + th.nn.Sigmoid() + ) + + def _forward(self, z, hidden_and_cell=None): + steps = self.num_strokes + + # z is passed at each step, duplicate it + bs = z.shape[0] + expanded_z = z.unsqueeze(1).repeat(1, steps, 1) + + # First step in the RNN + if hidden_and_cell is None: + # Initialize from latent vector + hidden_and_cell = self.hidden_cell_predictor(th.tanh(z)) + hidden = hidden_and_cell[:, :self.hidden_size*self.num_layers] + hidden = hidden.view(-1, self.num_layers, self.hidden_size) + hidden = hidden.permute(1, 0, 2).contiguous() + cell = hidden_and_cell[:, self.hidden_size*self.num_layers:] + cell = cell.view(-1, self.num_layers, self.hidden_size) + cell = cell.permute(1, 0, 2).contiguous() + hidden_and_cell = (hidden, cell) + + feats, hidden_and_cell = self.lstm(expanded_z, hidden_and_cell) + hidden, cell = hidden_and_cell + + feats = feats.reshape(bs*steps, self.hidden_size) + + # Construct the chain + end_points = self.point_predictor(feats).view(bs, steps, 1, 2) + start_points = th.cat([ + # first point is canvas center + th.zeros(bs, 1, 1, 2, device=feats.device), + end_points[:, 1:, :, :]], 1) + all_points = th.cat([start_points, end_points], 2) + + all_alphas = self.alpha_predictor(feats).view(bs, steps) + all_widths = self.width_predictor(feats).view(bs, steps) + + min_width = self.stroke_width[0] + max_width = self.stroke_width[1] + all_widths = (max_width - min_width) * all_widths + min_width + + output, scenes = rendering.line_render(all_points, all_widths, all_alphas, + canvas_size=self.imsize) + + # map to [-1, 1] + output = output*2.0 - 1.0 + + return output, scenes + + +class Generator(BaseModel): + def __init__(self, width=64, imsize=32, zdim=128, + stroke_width=None, + color_output=False, + num_strokes=4): + super(Generator, self).__init__() + assert imsize == 32 + + self.imsize = imsize + self.zdim = zdim + + num_in_chans = self.zdim // (2*2) + num_out_chans = 3 if color_output else 1 + + self.net = th.nn.Sequential( + th.nn.ConvTranspose2d(num_in_chans, width*8, 4, padding=1, + stride=2), + th.nn.LeakyReLU(0.2, inplace=True), + th.nn.Conv2d(width*8, width*8, 3, padding=1), + th.nn.BatchNorm2d(width*8), + th.nn.LeakyReLU(0.2, inplace=True), + # 4x4 + + th.nn.ConvTranspose2d(8*width, 4*width, 4, padding=1, stride=2), + th.nn.LeakyReLU(0.2, inplace=True), + th.nn.Conv2d(4*width, 4*width, 3, padding=1), + th.nn.BatchNorm2d(width*4), + th.nn.LeakyReLU(0.2, inplace=True), + # 8x8 + + th.nn.ConvTranspose2d(4*width, 2*width, 4, padding=1, stride=2), + th.nn.LeakyReLU(0.2, inplace=True), + th.nn.Conv2d(2*width, 2*width, 3, padding=1), + th.nn.BatchNorm2d(width*2), + th.nn.LeakyReLU(0.2, inplace=True), + # 16x16 + + th.nn.ConvTranspose2d(2*width, width, 4, padding=1, stride=2), + th.nn.LeakyReLU(0.2, inplace=True), + th.nn.Conv2d(width, width, 3, padding=1), + th.nn.BatchNorm2d(width), + th.nn.LeakyReLU(0.2, inplace=True), + # 32x32 + + th.nn.Conv2d(width, width, 3, padding=1), + th.nn.BatchNorm2d(width), + th.nn.LeakyReLU(0.2, inplace=True), + th.nn.Conv2d(width, width, 3, padding=1), + th.nn.LeakyReLU(0.2, inplace=True), + th.nn.Conv2d(width, num_out_chans, 1), + + th.nn.Tanh(), + ) + + def forward(self, z): + bs = z.shape[0] + num_in_chans = self.zdim // (2*2) + raster = self.net(z.view(bs, num_in_chans, 2, 2)) + return raster + + +class Discriminator(th.nn.Module): + def __init__(self, conditional=False, width=64, color_output=False): + super(Discriminator, self).__init__() + + self.conditional = conditional + + sn = th.nn.utils.spectral_norm + + num_chan_in = 3 if color_output else 1 + + self.net = th.nn.Sequential( + th.nn.Conv2d(num_chan_in, width, 3, padding=1), + th.nn.LeakyReLU(0.2, inplace=True), + th.nn.Conv2d(width, 2*width, 4, padding=1, stride=2), + th.nn.LeakyReLU(0.2, inplace=True), + # 16x16 + + sn(th.nn.Conv2d(2*width, 2*width, 3, padding=1)), + th.nn.LeakyReLU(0.2, inplace=True), + sn(th.nn.Conv2d(2*width, 4*width, 4, padding=1, stride=2)), + th.nn.LeakyReLU(0.2, inplace=True), + # 8x8 + + sn(th.nn.Conv2d(4*width, 4*width, 3, padding=1)), + th.nn.LeakyReLU(0.2, inplace=True), + sn(th.nn.Conv2d(4*width, width*4, 4, padding=1, stride=2)), + th.nn.LeakyReLU(0.2, inplace=True), + # 4x4 + + sn(th.nn.Conv2d(4*width, 4*width, 3, padding=1)), + th.nn.LeakyReLU(0.2, inplace=True), + sn(th.nn.Conv2d(4*width, width*4, 4, padding=1, stride=2)), + th.nn.LeakyReLU(0.2, inplace=True), + # 2x2 + + modules.Flatten(), + th.nn.Linear(width*4*2*2, 1), + ) + + def forward(self, x): + out = self.net(x) + return out diff --git a/apps/generative_models/modules.py b/apps/generative_models/modules.py new file mode 100644 index 0000000..e8589ae --- /dev/null +++ b/apps/generative_models/modules.py @@ -0,0 +1,11 @@ +"""Helper modules to build our networks.""" +import torch as th + + +class Flatten(th.nn.Module): + def __init__(self): + super(Flatten, self).__init__() + + def forward(self, x): + bs = x.shape[0] + return x.view(bs, -1) diff --git a/apps/generative_models/rendering.py b/apps/generative_models/rendering.py new file mode 100644 index 0000000..4ef475e --- /dev/null +++ b/apps/generative_models/rendering.py @@ -0,0 +1,307 @@ +import os +import torch as th +import torch.multiprocessing as mp +import threading as mt +import numpy as np +import random + +import ttools + +import pydiffvg +import time + + +def render(canvas_width, canvas_height, shapes, shape_groups, samples=2, + seed=None): + if seed is None: + seed = random.randint(0, 1000000) + _render = pydiffvg.RenderFunction.apply + scene_args = pydiffvg.RenderFunction.serialize_scene( + canvas_width, canvas_height, shapes, shape_groups) + img = _render(canvas_width, canvas_height, samples, samples, + seed, # seed + None, # background image + *scene_args) + return img + + +def opacityStroke2diffvg(strokes, canvas_size=128, debug=False, relative=True, + force_cpu=True): + + dev = strokes.device + if force_cpu: + strokes = strokes.to("cpu") + + + # pydiffvg.set_use_gpu(False) + # if strokes.is_cuda: + # pydiffvg.set_use_gpu(True) + + """Rasterize strokes given in (dx, dy, opacity) sequence format.""" + bs, nsegs, dims = strokes.shape + out = [] + + start = time.time() + for batch_idx, stroke in enumerate(strokes): + + if relative: # Absolute coordinates + all_points = stroke[..., :2].cumsum(0) + else: + all_points = stroke[..., :2] + + all_opacities = stroke[..., 2] + + # Transform from [-1, 1] to canvas coordinates + # Make sure points are in canvas + all_points = 0.5*(all_points + 1.0) * canvas_size + # all_points = th.clamp(0.5*(all_points + 1.0), 0, 1) * canvas_size + + # Avoid overlapping points + eps = 1e-4 + all_points = all_points + eps*th.randn_like(all_points) + + shapes = [] + shape_groups = [] + + for start_idx in range(0, nsegs-1): + points = all_points[start_idx:start_idx+2].contiguous().float() + opacity = all_opacities[start_idx] + + num_ctrl_pts = th.zeros(points.shape[0] - 1, dtype=th.int32) + width = th.ones(1) + + path = pydiffvg.Path( + num_control_points=num_ctrl_pts, points=points, + stroke_width=width, is_closed=False) + + shapes.append(path) + + color = th.cat([th.ones(3, device=opacity.device), + opacity.unsqueeze(0)], 0) + path_group = pydiffvg.ShapeGroup( + shape_ids=th.tensor([len(shapes) - 1]), + fill_color=None, + stroke_color=color) + shape_groups.append(path_group) + + # Rasterize only if there are shapes + if shapes: + inner_start = time.time() + out.append(render(canvas_size, canvas_size, shapes, shape_groups, + samples=4)) + if debug: + inner_elapsed = time.time() - inner_start + print("diffvg call took %.2fms" % inner_elapsed) + else: + out.append(th.zeros(canvas_size, canvas_size, 4, + device=strokes.device)) + + if debug: + elapsed = (time.time() - start)*1000 + print("rendering took %.2fms" % elapsed) + images = th.stack(out, 0).permute(0, 3, 1, 2).contiguous() + + # Return data on the same device as input + return images.to(dev) + + +def stroke2diffvg(strokes, canvas_size=128): + """Rasterize strokes given some sequential data.""" + bs, nsegs, dims = strokes.shape + out = [] + for stroke_idx, stroke in enumerate(strokes): + end_of_stroke = stroke[:, 4] == 1 + last = end_of_stroke.cpu().numpy().argmax() + stroke = stroke[:last+1, :] + # stroke = stroke[~end_of_stroke] + # TODO: stop at the first end of stroke + # import ipdb; ipdb.set_trace() + split_idx = stroke[:, 3].nonzero().squeeze(1) + + # Absolute coordinates + all_points = stroke[..., :2].cumsum(0) + + # Transform to canvas coordinates + all_points[..., 0] += 0.5 + all_points[..., 0] *= canvas_size + all_points[..., 1] += 0.5 + all_points[..., 1] *= canvas_size + + # Make sure points are in canvas + all_points[..., :2] = th.clamp(all_points[..., :2], 0, canvas_size) + + shape_groups = [] + shapes = [] + start_idx = 0 + + for count, end_idx in enumerate(split_idx): + points = all_points[start_idx:end_idx+1].contiguous().float() + + if points.shape[0] <= 2: # we need at least 2 points for a line + continue + + num_ctrl_pts = th.zeros(points.shape[0] - 1, dtype=th.int32) + width = th.ones(1) + path = pydiffvg.Path( + num_control_points=num_ctrl_pts, points=points, + stroke_width=width, is_closed=False) + + start_idx = end_idx+1 + shapes.append(path) + + color = th.ones(4, 1) + path_group = pydiffvg.ShapeGroup( + shape_ids=th.tensor([len(shapes) - 1]), + fill_color=None, + stroke_color=color) + shape_groups.append(path_group) + + # Rasterize + if shapes: + # draw only if there are shapes + out.append(render(canvas_size, canvas_size, shapes, shape_groups, samples=2)) + else: + out.append(th.zeros(canvas_size, canvas_size, 4, + device=strokes.device)) + + return th.stack(out, 0).permute(0, 3, 1, 2)[:, :3].contiguous() + + +def line_render(all_points, all_widths, all_alphas, force_cpu=True, + canvas_size=32, colors=None): + dev = all_points.device + if force_cpu: + all_points = all_points.to("cpu") + all_widths = all_widths.to("cpu") + all_alphas = all_alphas.to("cpu") + + if colors is not None: + colors = colors.to("cpu") + + all_points = 0.5*(all_points + 1.0) * canvas_size + + eps = 1e-4 + all_points = all_points + eps*th.randn_like(all_points) + + bs, num_segments, _, _ = all_points.shape + n_out = 3 if colors is not None else 1 + output = th.zeros(bs, n_out, canvas_size, canvas_size, + device=all_points.device) + + scenes = [] + for k in range(bs): + shapes = [] + shape_groups = [] + for p in range(num_segments): + points = all_points[k, p].contiguous().cpu() + num_ctrl_pts = th.zeros(1, dtype=th.int32) + width = all_widths[k, p].cpu() + alpha = all_alphas[k, p].cpu() + if colors is not None: + color = colors[k, p] + else: + color = th.ones(3, device=alpha.device) + + color = th.cat([color, alpha.view(1,)]) + + path = pydiffvg.Path( + num_control_points=num_ctrl_pts, points=points, + stroke_width=width, is_closed=False) + shapes.append(path) + path_group = pydiffvg.ShapeGroup( + shape_ids=th.tensor([len(shapes) - 1]), + fill_color=None, + stroke_color=color) + shape_groups.append(path_group) + + # Rasterize + scenes.append((canvas_size, canvas_size, shapes, shape_groups)) + raster = render(canvas_size, canvas_size, shapes, shape_groups, + samples=2) + raster = raster.permute(2, 0, 1).view(4, canvas_size, canvas_size) + + alpha = raster[3:4] + if colors is not None: # color output + image = raster[:3] + alpha = alpha.repeat(3, 1, 1) + else: + image = raster[:1] + + # alpha compositing + image = image*alpha + output[k] = image + + output = output.to(dev) + + return output, scenes + + +def bezier_render(all_points, all_widths, all_alphas, force_cpu=True, + canvas_size=32, colors=None): + dev = all_points.device + if force_cpu: + all_points = all_points.to("cpu") + all_widths = all_widths.to("cpu") + all_alphas = all_alphas.to("cpu") + + if colors is not None: + colors = colors.to("cpu") + + all_points = 0.5*(all_points + 1.0) * canvas_size + + eps = 1e-4 + all_points = all_points + eps*th.randn_like(all_points) + + bs, num_strokes, num_pts, _ = all_points.shape + num_segments = (num_pts - 1) // 3 + n_out = 3 if colors is not None else 1 + output = th.zeros(bs, n_out, canvas_size, canvas_size, + device=all_points.device) + + scenes = [] + for k in range(bs): + shapes = [] + shape_groups = [] + for p in range(num_strokes): + points = all_points[k, p].contiguous().cpu() + # bezier + num_ctrl_pts = th.zeros(num_segments, dtype=th.int32) + 2 + width = all_widths[k, p].cpu() + alpha = all_alphas[k, p].cpu() + if colors is not None: + color = colors[k, p] + else: + color = th.ones(3, device=alpha.device) + + color = th.cat([color, alpha.view(1,)]) + + path = pydiffvg.Path( + num_control_points=num_ctrl_pts, points=points, + stroke_width=width, is_closed=False) + shapes.append(path) + path_group = pydiffvg.ShapeGroup( + shape_ids=th.tensor([len(shapes) - 1]), + fill_color=None, + stroke_color=color) + shape_groups.append(path_group) + + # Rasterize + scenes.append((canvas_size, canvas_size, shapes, shape_groups)) + raster = render(canvas_size, canvas_size, shapes, shape_groups, + samples=2) + raster = raster.permute(2, 0, 1).view(4, canvas_size, canvas_size) + + alpha = raster[3:4] + if colors is not None: # color output + image = raster[:3] + alpha = alpha.repeat(3, 1, 1) + else: + image = raster[:1] + + # alpha compositing + image = image*alpha + output[k] = image + + output = output.to(dev) + + return output, scenes diff --git a/apps/generative_models/sketch_rnn.py b/apps/generative_models/sketch_rnn.py new file mode 100755 index 0000000..2b88767 --- /dev/null +++ b/apps/generative_models/sketch_rnn.py @@ -0,0 +1,461 @@ +#!/bin/env python +"""Train a Sketch-RNN.""" +import argparse +from enum import Enum +import os +import wget + +import numpy as np +import torch as th +from torch.utils.data import DataLoader +import torchvision.datasets as dset +import torchvision.transforms as transforms + +import ttools +import ttools.interfaces +from ttools.modules import networks + +import pydiffvg + +import rendering +import losses +import data + +LOG = ttools.get_logger(__name__) + + +BASE_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), os.pardir) +OUTPUT = os.path.join(BASE_DIR, "results", "sketch_rnn_diffvg") +OUTPUT_BASELINE = os.path.join(BASE_DIR, "results", "sketch_rnn") + + +class SketchRNN(th.nn.Module): + class Encoder(th.nn.Module): + def __init__(self, hidden_size=512, dropout=0.9, zdim=128, + num_layers=1): + super(SketchRNN.Encoder, self).__init__() + self.hidden_size = hidden_size + self.num_layers = num_layers + self.zdim = zdim + + self.lstm = th.nn.LSTM(5, hidden_size, num_layers=self.num_layers, + dropout=dropout, bidirectional=True, + batch_first=True) + + # bidirectional model -> *2 + self.mu_predictor = th.nn.Linear(2*hidden_size, zdim) + self.sigma_predictor = th.nn.Linear(2*hidden_size, zdim) + + def forward(self, sequences, hidden_and_cell=None): + bs = sequences.shape[0] + if hidden_and_cell is None: + hidden = th.zeros(self.num_layers*2, bs, self.hidden_size).to( + sequences.device) + cell = th.zeros(self.num_layers*2, bs, self.hidden_size).to( + sequences.device) + hidden_and_cell = (hidden, cell) + + out, hidden_and_cell = self.lstm(sequences, hidden_and_cell) + hidden = hidden_and_cell[0] + + # Concat the forward/backward states + fc_input = th.cat([hidden[0], hidden[1]], 1) + + # VAE params + mu = self.mu_predictor(fc_input) + log_sigma = self.sigma_predictor(fc_input) + + # Sample a latent vector + sigma = th.exp(log_sigma/2.0) + z0 = th.randn(self.zdim, device=mu.device) + z = mu + sigma*z0 + + # KL divergence needs mu/sigma + return z, mu, log_sigma + + class Decoder(th.nn.Module): + """ + The decoder outputs a sequence where each time step models (dx, dy) as + a mixture of `num_gaussians` 2D Gaussians and the state triplet is a + categorical distribution. + + The model outputs at each time step: + - 5 parameters for each Gaussian: mu_x, mu_y, sigma_x, sigma_y, + rho_xy + - 1 logit for each Gaussian (the mixture weight) + - 3 logits for the state triplet probabilities + """ + def __init__(self, hidden_size=512, dropout=0.9, zdim=128, + num_layers=1, num_gaussians=20): + super(SketchRNN.Decoder, self).__init__() + self.hidden_size = hidden_size + self.num_layers = num_layers + self.zdim = zdim + self.num_gaussians = num_gaussians + + # Maps the latent vector to an initial cell/hidden vector + self.hidden_cell_predictor = th.nn.Linear(zdim, 2*hidden_size) + + self.lstm = th.nn.LSTM( + 5 + zdim, hidden_size, + num_layers=self.num_layers, dropout=dropout, + batch_first=True) + + self.parameters_predictor = th.nn.Linear( + hidden_size, num_gaussians + 5*num_gaussians + 3) + + def forward(self, inputs, z, hidden_and_cell=None): + # Every step in the sequence takes the latent vector as input so we + # replicate it here + expanded_z = z.unsqueeze(1).repeat(1, inputs.shape[1], 1) + inputs = th.cat([inputs, expanded_z], 2) + + bs, steps = inputs.shape[:2] + if hidden_and_cell is None: + # Initialize from latent vector + hidden_and_cell = self.hidden_cell_predictor(th.tanh(z)) + hidden = hidden_and_cell[:, :self.hidden_size] + hidden = hidden.unsqueeze(0).contiguous() + cell = hidden_and_cell[:, self.hidden_size:] + cell = cell.unsqueeze(0).contiguous() + hidden_and_cell = (hidden, cell) + + outputs, hidden_and_cell = self.lstm(inputs, hidden_and_cell) + hidden, cell = hidden_and_cell + + # if self.training: + # At train time we want parameters for each time step + outputs = outputs.reshape(bs*steps, self.hidden_size) + params = self.parameters_predictor(outputs).view(bs, steps, -1) + + pen_logits = params[..., -3:] + gaussian_params = params[..., :-3] + mixture_logits = gaussian_params[..., :self.num_gaussians] + gaussian_params = gaussian_params[..., self.num_gaussians:].view( + bs, steps, self.num_gaussians, -1) + + return pen_logits, mixture_logits, gaussian_params, hidden_and_cell + + def __init__(self, zdim=128, num_gaussians=20, encoder_dim=256, + decoder_dim=512): + super(SketchRNN, self).__init__() + self.encoder = SketchRNN.Encoder(zdim=zdim, hidden_size=encoder_dim) + self.decoder = SketchRNN.Decoder(zdim=zdim, hidden_size=decoder_dim, + num_gaussians=num_gaussians) + + def forward(self, sequences): + # Encode the sequences as latent vectors + # We skip the first time step since it is the same for all sequences: + # (0, 0, 1, 0, 0) + z, mu, log_sigma = self.encoder(sequences[:, 1:]) + + # Decode the latent vector into a model sequence + # Do not process the last time step (it is an end-of-sequence token) + pen_logits, mixture_logits, gaussian_params, hidden_and_cell = \ + self.decoder(sequences[:, :-1], z) + + return { + "pen_logits": pen_logits, + "mixture_logits": mixture_logits, + "gaussian_params": gaussian_params, + "z": z, + "mu": mu, + "log_sigma": log_sigma, + "hidden_and_cell": hidden_and_cell, + } + + def sample(self, sequences, temperature=1.0): + # Compute a latent vector conditionned based on a real sequence + z, _, _ = self.encoder(sequences[:, 1:]) + + start_of_seq = sequences[:, :1] + + max_steps = sequences.shape[1] - 1 # last step is an end-of-seq token + + output_sequences = th.zeros_like(sequences) + output_sequences[:, 0] = start_of_seq.squeeze(1) + + current_input = start_of_seq + hidden_and_cell = None + for step in range(max_steps): + pen_logits, mixture_logits, gaussian_params, hidden_and_cell = \ + self.decoder(current_input, z, hidden_and_cell=hidden_and_cell) + + # Pen and displacement state for the next step + next_state = th.zeros_like(current_input) + + # Adjust temperature to control randomness + mixture_logits = mixture_logits*temperature + pen_logits = pen_logits*temperature + + # Select one of 3 pen states + pen_distrib = \ + th.distributions.categorical.Categorical(logits=pen_logits) + pen_state = pen_distrib.sample() + + # One-hot encoding of the state + next_state[:, :, 2:].scatter_(2, pen_state.unsqueeze(-1), + th.ones_like(next_state[:, :, 2:])) + + # Select one of the Gaussians from the mixture + mixture_distrib = \ + th.distributions.categorical.Categorical(logits=mixture_logits) + mixture_idx = mixture_distrib.sample() + + # select the Gaussian parameter + mixture_idx = mixture_idx.unsqueeze(-1).unsqueeze(-1) + mixture_idx = mixture_idx.repeat(1, 1, 1, 5) + params = th.gather(gaussian_params, 2, mixture_idx).squeeze(2) + + # Sample a Gaussian from the corresponding Gaussian + mu = params[..., :2] + sigma_x = params[..., 2].exp() + sigma_y = params[..., 3].exp() + rho_xy = th.tanh(params[..., 4]) + cov = th.zeros(params.shape[0], params.shape[1], 2, 2, + device=params.device) + cov[..., 0, 0] = sigma_x.pow(2)*temperature + cov[..., 1, 1] = sigma_x.pow(2)*temperature + cov[..., 1, 0] = sigma_x*sigma_y*rho_xy*temperature + point_distrib = \ + th.distributions.multivariate_normal.MultivariateNormal( + mu, scale_tril=cov) + point = point_distrib.sample() + next_state[:, :, :2] = point + + # Commit step to output + output_sequences[:, step + 1] = next_state.squeeze(1) + + # Prepare next recurrent step + current_input = next_state + + return output_sequences + + +class SketchRNNCallback(ttools.callbacks.ImageDisplayCallback): + """Simple callback that visualize images.""" + def visualized_image(self, batch, step_data, is_val=False): + if not is_val: + # No need to render training data + return None + + with th.no_grad(): + # only display the first n drawings + n = 8 + batch = batch[:n] + + out_im = rendering.stroke2diffvg(step_data["sample"][:n]) + im = rendering.stroke2diffvg(batch) + im = th.cat([im, out_im], 2) + + return im + + def caption(self, batch, step_data, is_val=False): + if is_val: + return "top: truth, bottom: sample" + else: + return "top: truth, bottom: sample" + + +class Interface(ttools.ModelInterface): + def __init__(self, model, lr=1e-3, lr_decay=0.9999, + kl_weight=0.5, kl_min_weight=0.01, kl_decay=0.99995, + device="cpu", grad_clip=1.0, sampling_temperature=0.4): + super(Interface, self).__init__() + self.grad_clip = grad_clip + self.sampling_temperature = sampling_temperature + + self.model = model + self.device = device + self.model.to(self.device) + self.enc_opt = th.optim.Adam(self.model.encoder.parameters(), lr=lr) + self.dec_opt = th.optim.Adam(self.model.decoder.parameters(), lr=lr) + + self.kl_weight = kl_weight + self.kl_min_weight = kl_min_weight + self.kl_decay = kl_decay + self.kl_loss = losses.KLDivergence() + + self.schedulers = [ + th.optim.lr_scheduler.ExponentialLR(self.enc_opt, lr_decay), + th.optim.lr_scheduler.ExponentialLR(self.dec_opt, lr_decay), + ] + + self.reconstruction_loss = losses.GaussianMixtureReconstructionLoss() + + def optimizers(self): + return [self.enc_opt, self.dec_opt] + + def training_step(self, batch): + batch = batch.to(self.device) + out = self.model(batch) + + kl_loss = self.kl_loss( + out["mu"], out["log_sigma"]) + + # The target to predict is the next sequence step + targets = batch[:, 1:].to(self.device) + + # Scale the KL divergence weight + try: + state = self.enc_opt.state_dict()["param_groups"][0]["params"][0] + optim_step = self.enc_opt.state_dict()["state"][state]["step"] + except KeyError: + optim_step = 0 # no step taken yet + kl_scaling = 1.0 - (1.0 - + self.kl_min_weight)*(self.kl_decay**optim_step) + kl_weight = self.kl_weight * kl_scaling + + reconstruction_loss = self.reconstruction_loss( + out["pen_logits"], out["mixture_logits"], + out["gaussian_params"], targets) + loss = kl_loss*self.kl_weight + reconstruction_loss + + self.enc_opt.zero_grad() + self.dec_opt.zero_grad() + loss.backward() + + # clip gradients + enc_nrm = th.nn.utils.clip_grad_norm_( + self.model.encoder.parameters(), self.grad_clip) + dec_nrm = th.nn.utils.clip_grad_norm_( + self.model.decoder.parameters(), self.grad_clip) + + if enc_nrm > self.grad_clip: + LOG.debug("Clipped encoder gradient (%.5f) to %.2f", + enc_nrm, self.grad_clip) + + if dec_nrm > self.grad_clip: + LOG.debug("Clipped decoder gradient (%.5f) to %.2f", + dec_nrm, self.grad_clip) + + self.enc_opt.step() + self.dec_opt.step() + + return { + "loss": loss.item(), + "kl_loss": kl_loss.item(), + "kl_weight": kl_weight, + "recons_loss": reconstruction_loss.item(), + "lr": self.enc_opt.param_groups[0]["lr"], + } + + def init_validation(self): + return dict(sample=None) + + def validation_step(self, batch, running_data): + # Switch to eval mode for dropout, batchnorm, etc + self.model.eval() + with th.no_grad(): + sample = self.model.sample( + batch.to(self.device), temperature=self.sampling_temperature) + running_data["sample"] = sample + self.model.train() + return running_data + + +def train(args): + th.manual_seed(0) + np.random.seed(0) + + dataset = data.QuickDrawDataset(args.dataset) + dataloader = DataLoader( + dataset, batch_size=args.bs, num_workers=4, shuffle=True, + pin_memory=False) + + val_dataset = [s for idx, s in enumerate(dataset) if idx < 8] + val_dataloader = DataLoader( + val_dataset, batch_size=8, num_workers=4, shuffle=False, + pin_memory=False) + + model_params = { + "zdim": args.zdim, + "num_gaussians": args.num_gaussians, + "encoder_dim": args.encoder_dim, + "decoder_dim": args.decoder_dim, + } + model = SketchRNN(**model_params) + model.train() + + device = "cpu" + if th.cuda.is_available(): + device = "cuda" + LOG.info("Using CUDA") + + interface = Interface(model, lr=args.lr, lr_decay=args.lr_decay, + kl_decay=args.kl_decay, kl_weight=args.kl_weight, + sampling_temperature=args.sampling_temperature, + device=device) + + chkpt = OUTPUT_BASELINE + env_name = "sketch_rnn" + + # Resume from checkpoint, if any + checkpointer = ttools.Checkpointer( + chkpt, model, meta=model_params, + optimizers=interface.optimizers(), + schedulers=interface.schedulers) + extras, meta = checkpointer.load_latest() + epoch = extras["epoch"] if extras and "epoch" in extras.keys() else 0 + + if meta is not None and meta != model_params: + LOG.info("Checkpoint's metaparams differ " + "from CLI, aborting: %s and %s", meta, model_params) + + trainer = ttools.Trainer(interface) + + # Add callbacks + losses = ["loss", "kl_loss", "recons_loss"] + training_debug = ["lr", "kl_weight"] + trainer.add_callback(ttools.callbacks.ProgressBarCallback( + keys=losses, val_keys=None)) + trainer.add_callback(ttools.callbacks.VisdomLoggingCallback( + keys=losses, val_keys=None, env=env_name, port=args.port)) + trainer.add_callback(ttools.callbacks.VisdomLoggingCallback( + keys=training_debug, smoothing=0, val_keys=None, env=env_name, + port=args.port)) + trainer.add_callback(ttools.callbacks.CheckpointingCallback( + checkpointer, max_files=2, interval=600, max_epochs=10)) + trainer.add_callback( + ttools.callbacks.LRSchedulerCallback(interface.schedulers)) + + trainer.add_callback(SketchRNNCallback( + env=env_name, win="samples", port=args.port, frequency=args.freq)) + + # Start training + trainer.train(dataloader, starting_epoch=epoch, + val_dataloader=val_dataloader, + num_epochs=args.num_epochs) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--dataset", default="cat.npz") + + # Training params + parser.add_argument("--bs", type=int, default=100) + parser.add_argument("--num_epochs", type=int, default=10000) + parser.add_argument("--lr", type=float, default=1e-4) + parser.add_argument("--lr_decay", type=float, default=0.9999) + parser.add_argument("--kl_weight", type=float, default=0.5) + parser.add_argument("--kl_decay", type=float, default=0.99995) + + # Model configuration + parser.add_argument("--zdim", type=int, default=128) + parser.add_argument("--num_gaussians", type=int, default=20) + parser.add_argument("--encoder_dim", type=int, default=256) + parser.add_argument("--decoder_dim", type=int, default=512) + + parser.add_argument("--sampling_temperature", type=float, default=0.4, + help="controls sampling randomness. " + "0.0: deterministic, 1.0: unchanged") + + # Viz params + parser.add_argument("--freq", type=int, default=100) + parser.add_argument("--port", type=int, default=5000) + + args = parser.parse_args() + + pydiffvg.set_use_gpu(th.cuda.is_available()) + + train(args) diff --git a/apps/generative_models/sketch_vae.py b/apps/generative_models/sketch_vae.py new file mode 100755 index 0000000..797c3e5 --- /dev/null +++ b/apps/generative_models/sketch_vae.py @@ -0,0 +1,524 @@ +#!/bin/env python +"""Train a Sketch-VAE.""" +import argparse +from enum import Enum +import os +import wget +import time + +import numpy as np +import torch as th +from torch.utils.data import DataLoader +import torchvision.datasets as dset +import torchvision.transforms as transforms + +import ttools +import ttools.interfaces +from ttools.modules import networks + +import rendering +import losses +import modules +import data + +import pydiffvg + +LOG = ttools.get_logger(__name__) + + +BASE_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), os.pardir) +OUTPUT = os.path.join(BASE_DIR, "results") + + +class SketchVAE(th.nn.Module): + class ImageEncoder(th.nn.Module): + def __init__(self, image_size=64, width=64, zdim=128): + super(SketchVAE.ImageEncoder, self).__init__() + self.zdim = zdim + + self.net = th.nn.Sequential( + th.nn.Conv2d(4, width, 5, padding=2), + th.nn.InstanceNorm2d(width), + th.nn.ReLU(inplace=True), + # 64x64 + + th.nn.Conv2d(width, width, 5, padding=2), + th.nn.InstanceNorm2d(width), + th.nn.ReLU( inplace=True), + # 64x64 + + th.nn.Conv2d(width, 2*width, 5, stride=1, padding=2), + th.nn.InstanceNorm2d(2*width), + th.nn.ReLU( inplace=True), + # 32x32 + + th.nn.Conv2d(2*width, 2*width, 5, stride=2, padding=2), + th.nn.InstanceNorm2d(2*width), + th.nn.ReLU( inplace=True), + # 16x16 + + th.nn.Conv2d(2*width, 2*width, 5, stride=2, padding=2), + th.nn.InstanceNorm2d(2*width), + th.nn.ReLU( inplace=True), + # 16x16 + + th.nn.Conv2d(2*width, 2*width, 5, stride=2, padding=2), + th.nn.InstanceNorm2d(2*width), + th.nn.ReLU( inplace=True), + # 8x8 + + th.nn.Conv2d(2*width, 2*width, 5, stride=2, padding=2), + th.nn.InstanceNorm2d(2*width), + th.nn.ReLU( inplace=True), + # 4x4 + + modules.Flatten(), + th.nn.Linear(4*4*2*width, 2*zdim) + ) + + def forward(self, images): + features = self.net(images) + + # VAE params + mu = features[:, :self.zdim] + log_sigma = features[:, self.zdim:] + + # Sample a latent vector + sigma = th.exp(log_sigma/2.0) + z0 = th.randn(self.zdim, device=mu.device) + z = mu + sigma*z0 + + # KL divergence needs mu/sigma + return z, mu, log_sigma + + class ImageDecoder(th.nn.Module): + """""" + def __init__(self, zdim=128, image_size=64, width=64): + super(SketchVAE.ImageDecoder, self).__init__() + self.zdim = zdim + self.width = width + + self.embedding = th.nn.Linear(zdim, 4*4*2*width) + + self.net = th.nn.Sequential( + th.nn.ConvTranspose2d(2*width, 2*width, 4, padding=1, stride=2), + th.nn.InstanceNorm2d(2*width), + th.nn.ReLU( inplace=True), + # 8x8 + + th.nn.ConvTranspose2d(2*width, 2*width, 4, padding=1, stride=2), + th.nn.InstanceNorm2d(2*width), + th.nn.ReLU( inplace=True), + # 16x16 + + th.nn.ConvTranspose2d(2*width, 2*width, 4, padding=1, stride=2), + th.nn.InstanceNorm2d(2*width), + th.nn.ReLU( inplace=True), + # 16x16 + + th.nn.Conv2d(2*width, 2*width, 5, padding=2, stride=1), + th.nn.InstanceNorm2d(2*width), + th.nn.ReLU( inplace=True), + # 16x16 + + th.nn.ConvTranspose2d(2*width, 2*width, 4, padding=1, stride=2), + th.nn.InstanceNorm2d(2*width), + th.nn.ReLU( inplace=True), + # 32x32 + + th.nn.Conv2d(2*width, width, 5, padding=2, stride=1), + th.nn.InstanceNorm2d(width), + th.nn.ReLU( inplace=True), + # 32x32 + + th.nn.ConvTranspose2d(width, width, 5, padding=2, stride=1), + th.nn.InstanceNorm2d(width), + th.nn.ReLU( inplace=True), + # 64x64 + + th.nn.Conv2d(width, width, 5, padding=2, stride=1), + th.nn.InstanceNorm2d(width), + th.nn.ReLU( inplace=True), + # 64x64 + + th.nn.Conv2d(width, 4, 5, padding=2, stride=1), + ) + + def forward(self, z): + bs = z.shape[0] + im = self.embedding(z).view(bs, 2*self.width, 4, 4) + out = self.net(im) + return out + + class SketchDecoder(th.nn.Module): + """ + The decoder outputs a sequence where each time step models (dx, dy, + opacity). + """ + def __init__(self, sequence_length, hidden_size=512, dropout=0.9, + zdim=128, num_layers=3): + super(SketchVAE.SketchDecoder, self).__init__() + self.sequence_length = sequence_length + self.hidden_size = hidden_size + self.num_layers = num_layers + self.zdim = zdim + + # Maps the latent vector to an initial cell/hidden vector + self.hidden_cell_predictor = th.nn.Linear(zdim, 2*hidden_size*num_layers) + + self.lstm = th.nn.LSTM( + zdim, hidden_size, + num_layers=self.num_layers, dropout=dropout, + batch_first=True) + + self.dxdy_predictor = th.nn.Sequential( + th.nn.Linear(hidden_size, 2), + th.nn.Tanh(), + ) + self.opacity_predictor = th.nn.Sequential( + th.nn.Linear(hidden_size, 1), + th.nn.Sigmoid(), + ) + + def forward(self, z, hidden_and_cell=None): + # Every step in the sequence takes the latent vector as input so we + # replicate it here + bs = z.shape[0] + steps = self.sequence_length - 1 # no need to predict the start of sequence + expanded_z = z.unsqueeze(1).repeat(1, steps, 1) + + if hidden_and_cell is None: + # Initialize from latent vector + hidden_and_cell = self.hidden_cell_predictor( + th.tanh(z)) + hidden = hidden_and_cell[:, :self.hidden_size*self.num_layers] + hidden = hidden.view(-1, self.num_layers, self.hidden_size) + hidden = hidden.permute(1, 0, 2).contiguous() + # hidden = hidden.unsqueeze(1).contiguous() + cell = hidden_and_cell[:, self.hidden_size*self.num_layers:] + cell = cell.view(-1, self.num_layers, self.hidden_size) + cell = cell.permute(1, 0, 2).contiguous() + # cell = cell.unsqueeze(1).contiguous() + hidden_and_cell = (hidden, cell) + + outputs, hidden_and_cell = self.lstm(expanded_z, hidden_and_cell) + hidden, cell = hidden_and_cell + + dxdy = self.dxdy_predictor( + outputs.reshape(bs*steps, self.hidden_size)).view(bs, steps, -1) + + opacity = self.opacity_predictor( + outputs.reshape(bs*steps, self.hidden_size)).view(bs, steps, -1) + + strokes = th.cat([dxdy, opacity], -1) + + return strokes + + def __init__(self, sequence_length, zdim=128, image_size=64): + super(SketchVAE, self).__init__() + self.im_encoder = SketchVAE.ImageEncoder( + zdim=zdim, image_size=image_size) + self.im_decoder = SketchVAE.ImageDecoder( + zdim=zdim, image_size=image_size) + self.sketch_decoder = SketchVAE.SketchDecoder( + sequence_length, zdim=zdim) + + def forward(self, images): + # Encode the images as latent vectors + z, mu, log_sigma = self.im_encoder(images) + decoded_im = self.im_decoder(z) + decoded_sketch = self.sketch_decoder(z) + + return { + "decoded_im": decoded_im, + "decoded_sketch": decoded_sketch, + "z": z, + "mu": mu, + "log_sigma": log_sigma, + } + + +class SketchVAECallback(ttools.callbacks.ImageDisplayCallback): + """Simple callback that visualize images.""" + def visualized_image(self, batch, step_data, is_val=False): + if is_val: + return None + + # only display the first n drawings + n = 8 + gt = step_data["gt_image"][:n].detach() + vae_im = step_data["vae_image"][:n].detach() + sketch_im = step_data["sketch_image"][:n].detach() + + rendering = th.cat([gt, vae_im, sketch_im], 2) + rendering = th.clamp(rendering, 0, 1) + alpha = rendering[:, 3:4] + rendering = rendering[:, :3] * alpha + + return rendering + + def caption(self, batch, step_data, is_val=False): + if is_val: + return "" + else: + return "top: truth, middle: vae sample, output: rnn-output" + + + + +class Interface(ttools.ModelInterface): + def __init__(self, model, lr=1e-4, lr_decay=0.9999, + kl_weight=0.5, kl_min_weight=0.01, kl_decay=0.99995, + raster_resolution=64, absolute_coords=False, + device="cpu", grad_clip=1.0): + super(Interface, self).__init__() + + self.grad_clip = grad_clip + self.raster_resolution = raster_resolution + self.absolute_coords = absolute_coords + + self.model = model + self.device = device + self.model.to(self.device) + self.im_enc_opt = th.optim.Adam( + self.model.im_encoder.parameters(), lr=lr) + self.im_dec_opt = th.optim.Adam( + self.model.im_decoder.parameters(), lr=lr) + self.sketch_dec_opt = th.optim.Adam( + self.model.sketch_decoder.parameters(), lr=lr) + + self.kl_weight = kl_weight + self.kl_min_weight = kl_min_weight + self.kl_decay = kl_decay + self.kl_loss = losses.KLDivergence() + + self.schedulers = [ + th.optim.lr_scheduler.ExponentialLR(self.im_enc_opt, lr_decay), + th.optim.lr_scheduler.ExponentialLR(self.im_dec_opt, lr_decay), + th.optim.lr_scheduler.ExponentialLR(self.sketch_dec_opt, lr_decay), + ] + + # include loss on alpha + self.im_loss = losses.MultiscaleMSELoss(channels=4).to(self.device) + + def optimizers(self): + return [self.im_enc_opt, self.im_dec_opt, self.sketch_dec_opt] + + def kl_scaling(self): + # Scale the KL divergence weight + try: + state = self.im_enc_opt.state_dict()["param_groups"][0]["params"][0] + optim_step = self.im_enc_opt.state_dict()["state"][state]["step"] + except KeyError: + optim_step = 0 # no step taken yet + kl_scaling = 1.0 - (1.0 - + self.kl_min_weight)*(self.kl_decay**optim_step) + return kl_scaling + + def training_step(self, batch): + gt_strokes, gt_im = batch + gt_strokes = gt_strokes.to(self.device) + gt_im = gt_im.to(self.device) + + out = self.model(gt_im) + + kl_loss = self.kl_loss( + out["mu"], out["log_sigma"]) + kl_weight = self.kl_weight * self.kl_scaling() + + # add start of sequence + sos = gt_strokes[:, :1] + sketch = th.cat([sos, out["decoded_sketch"]], 1) + + vae_im = out["decoded_im"] + + # start = time.time() + sketch_im = rendering.opacityStroke2diffvg( + sketch, canvas_size=self.raster_resolution, debug=False, + force_cpu=True, relative=not self.absolute_coords) + # elapsed = (time.time() - start)*1000 + # print("out rendering took %.2fms" % elapsed) + + vae_im_loss = self.im_loss(vae_im, gt_im) + sketch_im_loss = self.im_loss(sketch_im, gt_im) + + # vae_im_loss = th.nn.functional.mse_loss(vae_im, gt_im) + # sketch_im_loss = th.nn.functional.mse_loss(sketch_im, gt_im) + + loss = vae_im_loss + kl_loss*kl_weight + sketch_im_loss + + self.im_enc_opt.zero_grad() + self.im_dec_opt.zero_grad() + self.sketch_dec_opt.zero_grad() + loss.backward() + + # clip gradients + enc_nrm = th.nn.utils.clip_grad_norm_( + self.model.im_encoder.parameters(), self.grad_clip) + dec_nrm = th.nn.utils.clip_grad_norm_( + self.model.im_decoder.parameters(), self.grad_clip) + sketch_dec_nrm = th.nn.utils.clip_grad_norm_( + self.model.sketch_decoder.parameters(), self.grad_clip) + + if enc_nrm > self.grad_clip: + LOG.debug("Clipped encoder gradient (%.5f) to %.2f", + enc_nrm, self.grad_clip) + + if dec_nrm > self.grad_clip: + LOG.debug("Clipped decoder gradient (%.5f) to %.2f", + dec_nrm, self.grad_clip) + + if sketch_dec_nrm > self.grad_clip: + LOG.debug("Clipped sketch decoder gradient (%.5f) to %.2f", + sketch_dec_nrm, self.grad_clip) + + self.im_enc_opt.step() + self.im_dec_opt.step() + self.sketch_dec_opt.step() + + return { + "vae_image": vae_im, + "sketch_image": sketch_im, + "gt_image": gt_im, + "loss": loss.item(), + "vae_im_loss": vae_im_loss.item(), + "sketch_im_loss": sketch_im_loss.item(), + "kl_loss": kl_loss.item(), + "kl_weight": kl_weight, + "lr": self.im_enc_opt.param_groups[0]["lr"], + } + + def init_validation(self): + return dict(sample=None) + + def validation_step(self, batch, running_data): + # Switch to eval mode for dropout, batchnorm, etc + # self.model.eval() + # with th.no_grad(): + # # sample = self.model.sample( + # # batch.to(self.device), temperature=self.sampling_temperature) + # # running_data["sample"] = sample + # self.model.train() + return running_data + + +def train(args): + th.manual_seed(0) + np.random.seed(0) + + dataset = data.FixedLengthQuickDrawDataset( + args.dataset, max_seq_length=args.sequence_length, + canvas_size=args.raster_resolution) + dataloader = DataLoader( + dataset, batch_size=args.bs, num_workers=args.workers, shuffle=True) + + # val_dataset = [s for idx, s in enumerate(dataset) if idx < 8] + # val_dataloader = DataLoader( + # val_dataset, batch_size=8, num_workers=4, shuffle=False) + + val_dataloader = None + + model_params = { + "zdim": args.zdim, + "sequence_length": args.sequence_length, + "image_size": args.raster_resolution, + # "encoder_dim": args.encoder_dim, + # "decoder_dim": args.decoder_dim, + } + model = SketchVAE(**model_params) + model.train() + + LOG.info("Model parameters:\n%s", model_params) + + device = "cpu" + if th.cuda.is_available(): + device = "cuda" + LOG.info("Using CUDA") + + interface = Interface(model, raster_resolution=args.raster_resolution, + lr=args.lr, lr_decay=args.lr_decay, + kl_decay=args.kl_decay, kl_weight=args.kl_weight, + absolute_coords=args.absolute_coordinates, + device=device) + + env_name = "sketch_vae" + if args.custom_name is not None: + env_name += "_" + args.custom_name + + if args.absolute_coordinates: + env_name += "_abs_coords" + + chkpt = os.path.join(OUTPUT, env_name) + + # Resume from checkpoint, if any + checkpointer = ttools.Checkpointer( + chkpt, model, meta=model_params, + optimizers=interface.optimizers(), + schedulers=interface.schedulers) + extras, meta = checkpointer.load_latest() + epoch = extras["epoch"] if extras and "epoch" in extras.keys() else 0 + + if meta is not None and meta != model_params: + LOG.info("Checkpoint's metaparams differ " + "from CLI, aborting: %s and %s", meta, model_params) + + trainer = ttools.Trainer(interface) + + # Add callbacks + losses = ["loss", "kl_loss", "vae_im_loss", "sketch_im_loss"] + training_debug = ["lr", "kl_weight"] + trainer.add_callback(ttools.callbacks.ProgressBarCallback( + keys=losses, val_keys=None)) + trainer.add_callback(ttools.callbacks.VisdomLoggingCallback( + keys=losses, val_keys=None, env=env_name, port=args.port)) + trainer.add_callback(ttools.callbacks.VisdomLoggingCallback( + keys=training_debug, smoothing=0, val_keys=None, env=env_name, + port=args.port)) + trainer.add_callback(ttools.callbacks.CheckpointingCallback( + checkpointer, max_files=2, interval=600, max_epochs=10)) + trainer.add_callback( + ttools.callbacks.LRSchedulerCallback(interface.schedulers)) + + trainer.add_callback(SketchVAECallback( + env=env_name, win="samples", port=args.port, frequency=args.freq)) + + # Start training + trainer.train(dataloader, starting_epoch=epoch, + val_dataloader=val_dataloader, + num_epochs=args.num_epochs) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--dataset", default="cat.npz") + + parser.add_argument("--absolute_coordinates", action="store_true", + default=False) + + parser.add_argument("--custom_name") + + # Training params + parser.add_argument("--bs", type=int, default=1) + parser.add_argument("--workers", type=int, default=0) + parser.add_argument("--num_epochs", type=int, default=10000) + parser.add_argument("--lr", type=float, default=1e-4) + parser.add_argument("--lr_decay", type=float, default=0.9999) + parser.add_argument("--kl_weight", type=float, default=0.5) + parser.add_argument("--kl_decay", type=float, default=0.99995) + + # Model configuration + parser.add_argument("--zdim", type=int, default=128) + parser.add_argument("--sequence_length", type=int, default=50) + parser.add_argument("--raster_resolution", type=int, default=64) + # parser.add_argument("--encoder_dim", type=int, default=256) + # parser.add_argument("--decoder_dim", type=int, default=512) + + # Viz params + parser.add_argument("--freq", type=int, default=10) + parser.add_argument("--port", type=int, default=5000) + + args = parser.parse_args() + + pydiffvg.set_use_gpu(False) + + train(args) diff --git a/apps/generative_models/train_gan.py b/apps/generative_models/train_gan.py new file mode 100755 index 0000000..9b5eaa1 --- /dev/null +++ b/apps/generative_models/train_gan.py @@ -0,0 +1,489 @@ +#!/bin/env python +"""Train a GAN. + +Usage: + +* Train a MNIST model: + +`python train_gan.py` + +* Train a Quickdraw model: + +`python train_gan.py --task quickdraw` + +""" +import argparse +import os + +import numpy as np +import torch as th +from torch.utils.data import DataLoader + +import ttools +import ttools.interfaces + +import losses +import data +import models + +import pydiffvg + +LOG = ttools.get_logger(__name__) + + +BASE_DIR = os.path.join(os.path.dirname(os.path.abspath(__file__)), os.pardir) +OUTPUT = os.path.join(BASE_DIR, "results") + + +class Callback(ttools.callbacks.ImageDisplayCallback): + """Simple callback that visualize images.""" + def visualized_image(self, batch, step_data, is_val=False): + if is_val: + return + + gen = step_data["gen_image"][:16].detach() + ref = step_data["gt_image"][:16].detach() + + # tensor to visualize, concatenate images + vizdata = th.cat([ref, gen], 2) + + vector = step_data["vector_image"] + if vector is not None: + vector = vector[:16].detach() + vizdata = th.cat([vizdata, vector], 2) + + vizdata = (vizdata + 1.0 ) * 0.5 + viz = th.clamp(vizdata, 0, 1) + return viz + + def caption(self, batch, step_data, is_val=False): + if step_data["vector_image"] is not None: + s = "top: real, middle: raster, bottom: vector" + else: + s = "top: real, bottom: fake" + return s + + +class Interface(ttools.ModelInterface): + def __init__(self, generator, vect_generator, + discriminator, vect_discriminator, + lr=1e-4, lr_decay=0.9999, + gradient_penalty=10, + wgan_gp=False, + raster_resolution=32, device="cpu", grad_clip=1.0): + super(Interface, self).__init__() + + self.wgan_gp = wgan_gp + self.w_gradient_penalty = gradient_penalty + + self.n_critic = 1 + if self.wgan_gp: + self.n_critic = 5 + + self.grad_clip = grad_clip + self.raster_resolution = raster_resolution + + self.gen = generator + self.vect_gen = vect_generator + self.discrim = discriminator + self.vect_discrim = vect_discriminator + + self.device = device + self.gen.to(self.device) + self.discrim.to(self.device) + + beta1 = 0.5 + beta2 = 0.9 + + self.gen_opt = th.optim.Adam( + self.gen.parameters(), lr=lr, betas=(beta1, beta2)) + self.discrim_opt = th.optim.Adam( + self.discrim.parameters(), lr=lr, betas=(beta1, beta2)) + + self.schedulers = [ + th.optim.lr_scheduler.ExponentialLR(self.gen_opt, lr_decay), + th.optim.lr_scheduler.ExponentialLR(self.discrim_opt, lr_decay), + ] + + self.optimizers = [self.gen_opt, self.discrim_opt] + + if self.vect_gen is not None: + assert self.vect_discrim is not None + + self.vect_gen.to(self.device) + self.vect_discrim.to(self.device) + + self.vect_gen_opt = th.optim.Adam( + self.vect_gen.parameters(), lr=lr, betas=(beta1, beta2)) + self.vect_discrim_opt = th.optim.Adam( + self.vect_discrim.parameters(), lr=lr, betas=(beta1, beta2)) + + self.schedulers += [ + th.optim.lr_scheduler.ExponentialLR(self.vect_gen_opt, + lr_decay), + th.optim.lr_scheduler.ExponentialLR(self.vect_discrim_opt, + lr_decay), + ] + + self.optimizers += [self.vect_gen_opt, self.vect_discrim_opt] + + # include loss on alpha + self.im_loss = losses.MultiscaleMSELoss(channels=4).to(self.device) + + self.iter = 0 + + self.cross_entropy = th.nn.BCEWithLogitsLoss() + self.mse = th.nn.MSELoss() + + def _gradient_penalty(self, discrim, fake, real): + bs = real.size(0) + epsilon = th.rand(bs, 1, 1, 1, device=real.device) + epsilon = epsilon.expand_as(real) + + interpolation = epsilon * real.data + (1 - epsilon) * fake.data + interpolation = th.autograd.Variable(interpolation, requires_grad=True) + + interpolation_logits = discrim(interpolation) + grad_outputs = th.ones(interpolation_logits.size(), device=real.device) + + gradients = th.autograd.grad(outputs=interpolation_logits, + inputs=interpolation, + grad_outputs=grad_outputs, + create_graph=True, retain_graph=True)[0] + + gradients = gradients.view(bs, -1) + gradients_norm = th.sqrt(th.sum(gradients ** 2, dim=1) + 1e-12) + + # [Tanh-Tung 2019] https://openreview.net/pdf?id=ByxPYjC5KQ + return self.w_gradient_penalty * ((gradients_norm - 0) ** 2).mean() + + # return self.w_gradient_penalty * ((gradients_norm - 1) ** 2).mean() + + def _discriminator_step(self, discrim, opt, fake, real): + """Try to classify fake as 0 and real as 1.""" + + opt.zero_grad() + + # no backprop to gen + fake = fake.detach() + + fake_pred = discrim(fake) + real_pred = discrim(real) + + if self.wgan_gp: + gradient_penalty = self._gradient_penalty(discrim, fake, real) + loss_d = fake_pred.mean() - real_pred.mean() + gradient_penalty + gradient_penalty = gradient_penalty.item() + else: + fake_loss = self.cross_entropy(fake_pred, th.zeros_like(fake_pred)) + real_loss = self.cross_entropy(real_pred, th.ones_like(real_pred)) + # fake_loss = self.mse(fake_pred, th.zeros_like(fake_pred)) + # real_loss = self.mse(real_pred, th.ones_like(real_pred)) + loss_d = 0.5*(fake_loss + real_loss) + gradient_penalty = None + + loss_d.backward() + nrm = th.nn.utils.clip_grad_norm_( + discrim.parameters(), self.grad_clip) + if nrm > self.grad_clip: + LOG.debug("Clipped discriminator gradient (%.5f) to %.2f", + nrm, self.grad_clip) + + opt.step() + + return loss_d.item(), gradient_penalty + + def _generator_step(self, gen, discrim, opt, fake): + """Try to classify fake as 1.""" + + opt.zero_grad() + + fake_pred = discrim(fake) + + if self.wgan_gp: + loss_g = -fake_pred.mean() + else: + loss_g = self.cross_entropy(fake_pred, th.ones_like(fake_pred)) + # loss_g = self.mse(fake_pred, th.ones_like(fake_pred)) + + loss_g.backward() + + # clip gradients + nrm = th.nn.utils.clip_grad_norm_( + gen.parameters(), self.grad_clip) + if nrm > self.grad_clip: + LOG.debug("Clipped generator gradient (%.5f) to %.2f", + nrm, self.grad_clip) + + opt.step() + + return loss_g.item() + + def training_step(self, batch): + im = batch + im = im.to(self.device) + + z = self.gen.sample_z(im.shape[0], device=self.device) + + generated = self.gen(z) + + vect_generated = None + if self.vect_gen is not None: + vect_generated = self.vect_gen(z) + + loss_g = None + loss_d = None + loss_g_vect = None + loss_d_vect = None + + gp = None + gp_vect = None + + if self.iter < self.n_critic: # Discriminator update + self.iter += 1 + + loss_d, gp = self._discriminator_step( + self.discrim, self.discrim_opt, generated, im) + + if vect_generated is not None: + loss_d_vect, gp_vect = self._discriminator_step( + self.vect_discrim, self.vect_discrim_opt, vect_generated, im) + + else: # Generator update + self.iter = 0 + + loss_g = self._generator_step( + self.gen, self.discrim, self.gen_opt, generated) + + if vect_generated is not None: + loss_g_vect = self._generator_step( + self.vect_gen, self.vect_discrim, self.vect_gen_opt, vect_generated) + + return { + "loss_g": loss_g, + "loss_d": loss_d, + "loss_g_vect": loss_g_vect, + "loss_d_vect": loss_d_vect, + "gp": gp, + "gp_vect": gp_vect, + "gt_image": im, + "gen_image": generated, + "vector_image": vect_generated, + "lr": self.gen_opt.param_groups[0]["lr"], + } + + def init_validation(self): + return dict(sample=None) + + def validation_step(self, batch, running_data): + # Switch to eval mode for dropout, batchnorm, etc + self.model.eval() + return running_data + + +def train(args): + th.manual_seed(0) + np.random.seed(0) + + color_output = False + if args.task == "mnist": + dataset = data.MNISTDataset(args.raster_resolution, train=True) + elif args.task == "quickdraw": + dataset = data.QuickDrawImageDataset( + args.raster_resolution, train=True) + else: + raise NotImplementedError() + + dataloader = DataLoader( + dataset, batch_size=args.bs, num_workers=args.workers, shuffle=True) + + val_dataloader = None + + model_params = { + "zdim": args.zdim, + "num_strokes": args.num_strokes, + "imsize": args.raster_resolution, + "stroke_width": args.stroke_width, + "color_output": color_output, + } + gen = models.Generator(**model_params) + gen.train() + + discrim = models.Discriminator(color_output=color_output) + discrim.train() + + if args.raster_only: + vect_gen = None + vect_discrim = None + else: + if args.generator == "fc": + vect_gen = models.VectorGenerator(**model_params) + elif args.generator == "bezier_fc": + vect_gen = models.BezierVectorGenerator(**model_params) + elif args.generator in ["rnn"]: + vect_gen = models.RNNVectorGenerator(**model_params) + elif args.generator in ["chain_rnn"]: + vect_gen = models.ChainRNNVectorGenerator(**model_params) + else: + raise NotImplementedError() + vect_gen.train() + + vect_discrim = models.Discriminator(color_output=color_output) + vect_discrim.train() + + LOG.info("Model parameters:\n%s", model_params) + + device = "cpu" + if th.cuda.is_available(): + device = "cuda" + LOG.info("Using CUDA") + + interface = Interface(gen, vect_gen, discrim, vect_discrim, + raster_resolution=args.raster_resolution, lr=args.lr, + wgan_gp=args.wgan_gp, + lr_decay=args.lr_decay, device=device) + + env_name = args.task + "_gan" + + if args.raster_only: + env_name += "_raster" + else: + env_name += "_vector" + + env_name += "_" + args.generator + + if args.wgan_gp: + env_name += "_wgan" + + chkpt = os.path.join(OUTPUT, env_name) + + meta = { + "model_params": model_params, + "task": args.task, + "generator": args.generator, + } + checkpointer = ttools.Checkpointer( + chkpt, gen, meta=meta, + optimizers=interface.optimizers, + schedulers=interface.schedulers, + prefix="g_") + checkpointer_d = ttools.Checkpointer( + chkpt, discrim, + prefix="d_") + + # Resume from checkpoint, if any + extras, _ = checkpointer.load_latest() + checkpointer_d.load_latest() + + if not args.raster_only: + checkpointer_vect = ttools.Checkpointer( + chkpt, vect_gen, meta=meta, + optimizers=interface.optimizers, + schedulers=interface.schedulers, + prefix="vect_g_") + checkpointer_d_vect = ttools.Checkpointer( + chkpt, vect_discrim, + prefix="vect_d_") + extras, _ = checkpointer_vect.load_latest() + checkpointer_d_vect.load_latest() + + epoch = extras["epoch"] if extras and "epoch" in extras.keys() else 0 + + # if meta is not None and meta["model_parameters"] != model_params: + # LOG.info("Checkpoint's metaparams differ " + # "from CLI, aborting: %s and %s", meta, model_params) + + trainer = ttools.Trainer(interface) + + # Add callbacks + losses = ["loss_g", "loss_d", "loss_g_vect", "loss_d_vect", "gp", + "gp_vect"] + training_debug = ["lr"] + + trainer.add_callback(Callback( + env=env_name, win="samples", port=args.port, frequency=args.freq)) + trainer.add_callback(ttools.callbacks.ProgressBarCallback( + keys=losses, val_keys=None)) + trainer.add_callback(ttools.callbacks.MultiPlotCallback( + keys=losses, val_keys=None, env=env_name, port=args.port, + server=args.server, base_url=args.base_url, + win="losses", frequency=args.freq)) + trainer.add_callback(ttools.callbacks.VisdomLoggingCallback( + keys=training_debug, smoothing=0, val_keys=None, env=env_name, + server=args.server, base_url=args.base_url, + port=args.port)) + trainer.add_callback(ttools.callbacks.CheckpointingCallback( + checkpointer, max_files=2, interval=600, max_epochs=10)) + trainer.add_callback(ttools.callbacks.CheckpointingCallback( + checkpointer_d, max_files=2, interval=600, max_epochs=10)) + + if not args.raster_only: + trainer.add_callback(ttools.callbacks.CheckpointingCallback( + checkpointer_vect, max_files=2, interval=600, max_epochs=10)) + trainer.add_callback(ttools.callbacks.CheckpointingCallback( + checkpointer_d_vect, max_files=2, interval=600, max_epochs=10)) + + trainer.add_callback( + ttools.callbacks.LRSchedulerCallback(interface.schedulers)) + + # Start training + trainer.train(dataloader, starting_epoch=epoch, + val_dataloader=val_dataloader, + num_epochs=args.num_epochs) + + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("--task", + default="mnist", + choices=["mnist", "quickdraw"]) + parser.add_argument("--generator", + default="bezier_fc", + choices=["bezier_fc", "fc", "rnn", "chain_rnn"], + help="model to use as generator") + + parser.add_argument("--raster_only", action="store_true", default=False, + help="if true only train the raster baseline") + + parser.add_argument("--standard_gan", dest="wgan_gp", action="store_false", + default=True, + help="if true, use regular GAN instead of WGAN") + + # Training params + parser.add_argument("--bs", type=int, default=4, help="batch size") + parser.add_argument("--workers", type=int, default=4, + help="number of dataloader threads") + parser.add_argument("--num_epochs", type=int, default=200, + help="number of epochs to train for") + parser.add_argument("--lr", type=float, default=1e-4, + help="learning rate") + parser.add_argument("--lr_decay", type=float, default=0.9999, + help="exponential learning rate decay rate") + + # Model configuration + parser.add_argument("--zdim", type=int, default=32, + help="latent space dimension") + parser.add_argument("--stroke_width", type=float, nargs=2, + default=(0.5, 1.5), + help="min and max stroke width") + parser.add_argument("--num_strokes", type=int, default=16, + help="number of strokes to generate") + parser.add_argument("--raster_resolution", type=int, default=32, + help="raster canvas resolution on each side") + + # Viz params + parser.add_argument("--freq", type=int, default=10, + help="visualization frequency") + parser.add_argument("--port", type=int, default=8097, + help="visdom port") + parser.add_argument("--server", default=None, + help="visdom server if not local.") + parser.add_argument("--base_url", default="", help="visdom entrypoint URL") + + args = parser.parse_args() + + pydiffvg.set_use_gpu(False) + + ttools.set_logger(False) + + train(args) diff --git a/apps/geometry.py b/apps/geometry.py new file mode 100644 index 0000000..59e00db --- /dev/null +++ b/apps/geometry.py @@ -0,0 +1,226 @@ +import math +import torch + +class GeometryLoss: + def __init__(self, pathObj, xyalign=True, parallel=True, smooth_node=True): + self.pathObj=pathObj + self.pathId=pathObj.id + self.get_segments(pathObj) + if xyalign: + self.make_hor_ver_constraints(pathObj) + + self.xyalign=xyalign + self.parallel=parallel + self.smooth_node=smooth_node + + if parallel: + self.make_parallel_constraints(pathObj) + + if smooth_node: + self.make_smoothness_constraints(pathObj) + + def make_smoothness_constraints(self,pathObj): + self.smooth_nodes=[] + for idx, node in enumerate(self.iterate_nodes()): + sm, t0, t1=self.node_smoothness(node,pathObj) + if abs(sm)<1e-2: + self.smooth_nodes.append((node,((t0.norm()/self.segment_approx_length(node[0],pathObj)).item(),(t1.norm()/self.segment_approx_length(node[1],pathObj)).item()))) + #print("Node {} is smooth (smoothness {})".format(idx,sm)) + else: + #print("Node {} is not smooth (smoothness {})".format(idx, sm)) + pass + + def node_smoothness(self,node,pathObj): + t0=self.tangent_out(node[0],pathObj) + t1=self.tangent_in(node[1],pathObj) + t1rot=torch.stack((-t1[1],t1[0])) + smoothness=t0.dot(t1rot)/(t0.norm()*t1.norm()) + + return smoothness, t0, t1 + + def segment_approx_length(self,segment,pathObj): + if segment[0]==0: + #line + idxs=self.segList[segment[0]][segment[1]] + #should have a pair of indices now + length=(pathObj.points[idxs[1],:]-pathObj.points[idxs[0],:]).norm() + return length + elif segment[0]==1: + #quadric + idxs = self.segList[segment[0]][segment[1]] + # should have a pair of indices now + length = (pathObj.points[idxs[1],:] - pathObj.points[idxs[0],:]).norm()+(pathObj.points[idxs[2],:] - pathObj.points[idxs[1],:]).norm() + return length + elif segment[0]==2: + #cubic + idxs = self.segList[segment[0]][segment[1]] + # should have a pair of indices now + length = (pathObj.points[idxs[1],:] - pathObj.points[idxs[0],:]).norm()+(pathObj.points[idxs[2],:] - pathObj.points[idxs[1],:]).norm()+(pathObj.points[idxs[3],:] - pathObj.points[idxs[2],:]).norm() + return length + + def tangent_in(self, segment,pathObj): + if segment[0]==0: + #line + idxs=self.segList[segment[0]][segment[1]] + #should have a pair of indices now + tangent=(pathObj.points[idxs[1],:]-pathObj.points[idxs[0],:])/2 + return tangent + elif segment[0]==1: + #quadric + idxs = self.segList[segment[0]][segment[1]] + # should have a pair of indices now + tangent = (pathObj.points[idxs[1],:] - pathObj.points[idxs[0],:]) + return tangent + elif segment[0]==2: + #cubic + idxs = self.segList[segment[0]][segment[1]] + # should have a pair of indices now + tangent = (pathObj.points[idxs[1],:] - pathObj.points[idxs[0],:]) + return tangent + + assert(False) + + def tangent_out(self, segment, pathObj): + if segment[0] == 0: + # line + idxs = self.segList[segment[0]][segment[1]] + # should have a pair of indices now + tangent = (pathObj.points[idxs[0],:] - pathObj.points[idxs[1],:]) / 2 + return tangent + elif segment[0] == 1: + # quadric + idxs = self.segList[segment[0]][segment[1]] + # should have a pair of indices now + tangent = (pathObj.points[idxs[1],:] - pathObj.points[idxs[2],:]) + return tangent + elif segment[0] == 2: + # cubic + idxs = self.segList[segment[0]][segment[1]] + # should have a pair of indices now + tangent = (pathObj.points[idxs[2],:] - pathObj.points[idxs[3],:]) + return tangent + + assert (False) + + def get_segments(self, pathObj): + self.segments=[] + self.lines = [] + self.quadrics=[] + self.cubics=[] + self.segList =(self.lines,self.quadrics,self.cubics) + idx=0 + total_points=pathObj.points.shape[0] + for ncp in pathObj.num_control_points.numpy(): + if ncp==0: + self.segments.append((0,len(self.lines))) + self.lines.append((idx, (idx + 1) % total_points)) + idx+=1 + elif ncp==1: + self.segments.append((1, len(self.quadrics))) + self.quadrics.append((idx, (idx + 1), (idx+2) % total_points)) + idx+=ncp+1 + elif ncp==2: + self.segments.append((2, len(self.cubics))) + self.cubics.append((idx, (idx + 1), (idx+2), (idx + 3) % total_points)) + idx += ncp + 1 + + def iterate_nodes(self): + for prev, next in zip([self.segments[-1]]+self.segments[:-1],self.segments): + yield (prev, next) + + def make_hor_ver_constraints(self, pathObj): + self.horizontals=[] + self.verticals=[] + for idx, line in enumerate(self.lines): + startPt=pathObj.points[line[0],:] + endPt=pathObj.points[line[1],:] + + dif=endPt-startPt + + if abs(dif[0])<1e-6: + #is horizontal + self.horizontals.append(idx) + + if abs(dif[1])<1e-6: + #is vertical + self.verticals.append(idx) + + def make_parallel_constraints(self,pathObj): + slopes=[] + for lidx, line in enumerate(self.lines): + startPt = pathObj.points[line[0], :] + endPt = pathObj.points[line[1], :] + + dif = endPt - startPt + + slope=math.atan2(dif[1],dif[0]) + if slope<0: + slope+=math.pi + + minidx=-1 + for idx, s in enumerate(slopes): + if abs(s[0]-slope)<1e-3: + minidx=idx + break + + if minidx>=0: + slopes[minidx][1].append(lidx) + else: + slopes.append((slope,[lidx])) + + self.parallel_groups=[sgroup[1] for sgroup in slopes if len(sgroup[1])>1 and (not self.xyalign or (sgroup[0]>1e-3 and abs(sgroup[0]-(math.pi/2))>1e-3))] + + def make_line_diff(self,pathObj,lidx): + line = self.lines[lidx] + startPt = pathObj.points[line[0], :] + endPt = pathObj.points[line[1], :] + + dif = endPt - startPt + return dif + + def calc_hor_ver_loss(self,loss,pathObj): + for lidx in self.horizontals: + dif = self.make_line_diff(pathObj,lidx) + loss+=dif[0].pow(2) + + for lidx in self.verticals: + dif = self.make_line_diff(pathObj,lidx) + loss += dif[1].pow(2) + + def calc_parallel_loss(self,loss,pathObj): + for group in self.parallel_groups: + diffs=[self.make_line_diff(pathObj,lidx) for lidx in group] + difmat=torch.stack(diffs,1) + lengths=difmat.pow(2).sum(dim=0).sqrt() + difmat=difmat/lengths + difmat=torch.cat((difmat,torch.zeros(1,difmat.shape[1]))) + rotmat=difmat[:,list(range(1,difmat.shape[1]))+[0]] + cross=difmat.cross(rotmat) + ploss=cross.pow(2).sum()*lengths.sum()*10 + loss+=ploss + + def calc_smoothness_loss(self,loss,pathObj): + for node, tlengths in self.smooth_nodes: + sl,t0,t1=self.node_smoothness(node,pathObj) + #add smoothness loss + loss+=sl.pow(2)*t0.norm().sqrt()*t1.norm().sqrt() + tl=((t0.norm()/self.segment_approx_length(node[0],pathObj))-tlengths[0]).pow(2)+((t1.norm()/self.segment_approx_length(node[1],pathObj))-tlengths[1]).pow(2) + loss+=tl*10 + + def compute(self, pathObj): + if pathObj.id != self.pathId: + raise ValueError("Path ID {} does not match construction-time ID {}".format(pathObj.id,self.pathId)) + + loss=torch.tensor(0.) + if self.xyalign: + self.calc_hor_ver_loss(loss,pathObj) + + if self.parallel: + self.calc_parallel_loss(loss, pathObj) + + if self.smooth_node: + self.calc_smoothness_loss(loss,pathObj) + + #print(loss.item()) + + return loss diff --git a/apps/image_compare.py b/apps/image_compare.py new file mode 100644 index 0000000..fc55aa2 --- /dev/null +++ b/apps/image_compare.py @@ -0,0 +1,45 @@ +import argparse +import skimage.io +import numpy as np +from matplotlib import cm +import math +from skimage.metrics import structural_similarity as ssim + +def normalize(x, min_, max_): + return (x - min_) / (max_ - min_) + +def main(args): + img1 = skimage.img_as_float(skimage.io.imread(args.img1)).astype(np.float32) + img2 = skimage.img_as_float(skimage.io.imread(args.img2)).astype(np.float32) + ref = skimage.img_as_float(skimage.io.imread(args.ref)).astype(np.float32) + img1 = img1[:, :, :3] + img2 = img2[:, :, :3] + ref = ref[:, :, :3] + + diff1 = np.sum(np.abs(img1 - ref), axis = 2) + diff2 = np.sum(np.abs(img2 - ref), axis = 2) + min_ = min(np.min(diff1), np.min(diff2)) + max_ = max(np.max(diff1), np.max(diff2)) * 0.5 + diff1 = cm.viridis(normalize(diff1, min_, max_)) + diff2 = cm.viridis(normalize(diff2, min_, max_)) + + # MSE + print('MSE img1:', np.mean(np.power(img1 - ref, 2.0))) + print('MSE img2:', np.mean(np.power(img2 - ref, 2.0))) + # PSNR + print('PSNR img1:', 20 * math.log10(1.0 / math.sqrt(np.mean(np.power(img1 - ref, 2.0))))) + print('PSNR img2:', 20 * math.log10(1.0 / math.sqrt(np.mean(np.power(img2 - ref, 2.0))))) + # SSIM + print('SSIM img1:', ssim(img1, ref, multichannel=True)) + print('SSIM img2:', ssim(img2, ref, multichannel=True)) + + skimage.io.imsave('diff1.png', (diff1 * 255).astype(np.uint8)) + skimage.io.imsave('diff2.png', (diff2 * 255).astype(np.uint8)) + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("img1", help="img1") + parser.add_argument("img2", help="img2") + parser.add_argument("ref", help="ref") + args = parser.parse_args() + main(args) diff --git a/apps/imgs/baboon.png b/apps/imgs/baboon.png new file mode 100644 index 0000000..2b1499a Binary files /dev/null and b/apps/imgs/baboon.png differ diff --git a/apps/imgs/baboon.svg b/apps/imgs/baboon.svg new file mode 100644 index 0000000..a50bdc9 --- /dev/null +++ b/apps/imgs/baboon.svg @@ -0,0 +1,8694 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/apps/imgs/boston.svg b/apps/imgs/boston.svg new file mode 100644 index 0000000..006718a --- /dev/null +++ b/apps/imgs/boston.svg @@ -0,0 +1,1936 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/apps/imgs/circle.svg b/apps/imgs/circle.svg new file mode 100644 index 0000000..dde6e72 --- /dev/null +++ b/apps/imgs/circle.svg @@ -0,0 +1,12 @@ + + + + diff --git a/apps/imgs/contour.svg b/apps/imgs/contour.svg new file mode 100644 index 0000000..d173fd9 --- /dev/null +++ b/apps/imgs/contour.svg @@ -0,0 +1,53256 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/apps/imgs/eleven_below_single.svg b/apps/imgs/eleven_below_single.svg new file mode 100644 index 0000000..f28c3c5 --- /dev/null +++ b/apps/imgs/eleven_below_single.svg @@ -0,0 +1,247 @@ + + + + + + + + + image/svg+xml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + \ No newline at end of file diff --git a/apps/imgs/fallingwater.jpg b/apps/imgs/fallingwater.jpg new file mode 100644 index 0000000..9abc664 Binary files /dev/null and b/apps/imgs/fallingwater.jpg differ diff --git a/apps/imgs/fallingwater.svg b/apps/imgs/fallingwater.svg new file mode 100644 index 0000000..6276b0f --- /dev/null +++ b/apps/imgs/fallingwater.svg @@ -0,0 +1,3457 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/apps/imgs/flower.jpg b/apps/imgs/flower.jpg new file mode 100644 index 0000000..6801f62 Binary files /dev/null and b/apps/imgs/flower.jpg differ diff --git a/apps/imgs/flower.svg b/apps/imgs/flower.svg new file mode 100644 index 0000000..0eb9d97 --- /dev/null +++ b/apps/imgs/flower.svg @@ -0,0 +1,3771 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/apps/imgs/hawaii.svg b/apps/imgs/hawaii.svg new file mode 100644 index 0000000..c867448 --- /dev/null +++ b/apps/imgs/hawaii.svg @@ -0,0 +1,1151 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/apps/imgs/hokusai.png b/apps/imgs/hokusai.png new file mode 100644 index 0000000..7556f71 Binary files /dev/null and b/apps/imgs/hokusai.png differ diff --git a/apps/imgs/johnny_automatic_flower_pot.svg b/apps/imgs/johnny_automatic_flower_pot.svg new file mode 100644 index 0000000..19a9350 --- /dev/null +++ b/apps/imgs/johnny_automatic_flower_pot.svg @@ -0,0 +1,376 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + +image/svg+xmlOpenclipartflower pot2006-10-07T14:41:15a drawing of a flower pothttp://openclipart.org/detail/421/flower-pot-by-johnny_automaticjohnny_automaticbowlclip artclipartcontainerflowerflowerpothouseholdpotpublic domainvasevintage diff --git a/apps/imgs/kitty.jpg b/apps/imgs/kitty.jpg new file mode 100644 index 0000000..df4fa08 Binary files /dev/null and b/apps/imgs/kitty.jpg differ diff --git a/apps/imgs/kitty.svg b/apps/imgs/kitty.svg new file mode 100644 index 0000000..f2813a4 --- /dev/null +++ b/apps/imgs/kitty.svg @@ -0,0 +1,3676 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/apps/imgs/linux.svg b/apps/imgs/linux.svg new file mode 100644 index 0000000..2c52c93 --- /dev/null +++ b/apps/imgs/linux.svg @@ -0,0 +1,711 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + image/svg+xml + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/apps/imgs/male-face1.svg b/apps/imgs/male-face1.svg new file mode 100644 index 0000000..5e6113c --- /dev/null +++ b/apps/imgs/male-face1.svg @@ -0,0 +1,997 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/apps/imgs/mcseem2.svg b/apps/imgs/mcseem2.svg new file mode 100644 index 0000000..14b091e --- /dev/null +++ b/apps/imgs/mcseem2.svg @@ -0,0 +1,2191 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/apps/imgs/note_small.svg b/apps/imgs/note_small.svg new file mode 100644 index 0000000..56a1902 --- /dev/null +++ b/apps/imgs/note_small.svg @@ -0,0 +1,25 @@ + +image/svg+xml \ No newline at end of file diff --git a/apps/imgs/peppers.svg b/apps/imgs/peppers.svg new file mode 100644 index 0000000..5db63c9 --- /dev/null +++ b/apps/imgs/peppers.svg @@ -0,0 +1 @@ +peppers \ No newline at end of file diff --git a/apps/imgs/peppers.tiff b/apps/imgs/peppers.tiff new file mode 100644 index 0000000..8c956f8 Binary files /dev/null and b/apps/imgs/peppers.tiff differ diff --git a/apps/imgs/reschart.svg b/apps/imgs/reschart.svg new file mode 100644 index 0000000..2c9fa9c --- /dev/null +++ b/apps/imgs/reschart.svg @@ -0,0 +1,761 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/apps/imgs/scream.jpg b/apps/imgs/scream.jpg new file mode 100644 index 0000000..c975bac Binary files /dev/null and b/apps/imgs/scream.jpg differ diff --git a/apps/imgs/seamcarving/cat.svg b/apps/imgs/seamcarving/cat.svg new file mode 100644 index 0000000..efd2d3d --- /dev/null +++ b/apps/imgs/seamcarving/cat.svg @@ -0,0 +1,355 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/apps/imgs/seamcarving/hokusai.svg b/apps/imgs/seamcarving/hokusai.svg new file mode 100644 index 0000000..7f800c2 --- /dev/null +++ b/apps/imgs/seamcarving/hokusai.svg @@ -0,0 +1,9840 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/apps/imgs/seamcarving/ice_cream.svg b/apps/imgs/seamcarving/ice_cream.svg new file mode 100644 index 0000000..18b4399 --- /dev/null +++ b/apps/imgs/seamcarving/ice_cream.svg @@ -0,0 +1,353 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/apps/imgs/seamcarving/license.txt b/apps/imgs/seamcarving/license.txt new file mode 100644 index 0000000..6d41acb --- /dev/null +++ b/apps/imgs/seamcarving/license.txt @@ -0,0 +1,7 @@ +https://www.vecteezy.com/vector-art/192818-vector-landscape-illustration +https://www.vecteezy.com/vector-art/217221-vector-nature-landscape-illustration +https://www.vecteezy.com/vector-art/538989-a-panorama-view-od-urban-city +https://www.vecteezy.com/vector-art/419761-mushroom-house-in-the-dark-forest +https://www.vecteezy.com/vector-art/376425-brown-cat-looking-at-little-mouse +https://freesvg.org/great-wave-off-kanagawa +https://www.vecteezy.com/vector-art/298600-friendly-cat-and-dog-on-white-background diff --git a/apps/imgs/seamcarving/seaside2.svg b/apps/imgs/seamcarving/seaside2.svg new file mode 100644 index 0000000..7f4dca4 --- /dev/null +++ b/apps/imgs/seamcarving/seaside2.svg @@ -0,0 +1,224 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/apps/imgs/seamcarving/sunset2.svg b/apps/imgs/seamcarving/sunset2.svg new file mode 100644 index 0000000..eae3a0a --- /dev/null +++ b/apps/imgs/seamcarving/sunset2.svg @@ -0,0 +1,899 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/apps/imgs/seurat.jpg b/apps/imgs/seurat.jpg new file mode 100644 index 0000000..0243a64 Binary files /dev/null and b/apps/imgs/seurat.jpg differ diff --git a/apps/imgs/shared_edge.svg b/apps/imgs/shared_edge.svg new file mode 100644 index 0000000..fb9afd2 --- /dev/null +++ b/apps/imgs/shared_edge.svg @@ -0,0 +1,13 @@ + + + + + + + + diff --git a/apps/imgs/starry_night.jpg b/apps/imgs/starry_night.jpg new file mode 100644 index 0000000..6339ac1 Binary files /dev/null and b/apps/imgs/starry_night.jpg differ diff --git a/apps/imgs/tiger.svg b/apps/imgs/tiger.svg new file mode 100644 index 0000000..366de41 --- /dev/null +++ b/apps/imgs/tiger.svg @@ -0,0 +1,317 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/apps/optimize_pixel_filter.py b/apps/optimize_pixel_filter.py new file mode 100644 index 0000000..f5380fc --- /dev/null +++ b/apps/optimize_pixel_filter.py @@ -0,0 +1,115 @@ +import diffvg +import pydiffvg +import torch +import skimage +import numpy as np + +# Use GPU if available +pydiffvg.set_use_gpu(torch.cuda.is_available()) + +canvas_width = 256 +canvas_height = 256 +circle = pydiffvg.Circle(radius = torch.tensor(40.0), + center = torch.tensor([128.0, 128.0])) +shapes = [circle] +circle_group = pydiffvg.ShapeGroup(shape_ids = torch.tensor([0]), + fill_color = torch.tensor([0.3, 0.6, 0.3, 1.0])) +shape_groups = [circle_group] +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width=canvas_width, + canvas_height=canvas_height, + shapes=shapes, + shape_groups=shape_groups, + filter=pydiffvg.PixelFilter(type = diffvg.FilterType.hann, + radius = torch.tensor(8.0))) + +render = pydiffvg.RenderFunction.apply +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 0, # seed + None, + *scene_args) +# The output image is in linear RGB space. Do Gamma correction before saving the image. +pydiffvg.imwrite(img.cpu(), 'results/optimize_pixel_filter/target.png', gamma=2.2) +target = img.clone() + +# Change the pixel filter radius +radius = torch.tensor(1.0, requires_grad = True) +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width=canvas_width, + canvas_height=canvas_height, + shapes=shapes, + shape_groups=shape_groups, + filter=pydiffvg.PixelFilter(type = diffvg.FilterType.hann, + radius = radius)) +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 1, # seed + None, + *scene_args) +pydiffvg.imwrite(img.cpu(), 'results/optimize_pixel_filter/init.png', gamma=2.2) + +# Optimize for radius & center +optimizer = torch.optim.Adam([radius], lr=1.0) +# Run 100 Adam iterations. +for t in range(100): + print('iteration:', t) + optimizer.zero_grad() + # Forward pass: render the image. + scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width=canvas_width, + canvas_height=canvas_height, + shapes=shapes, + shape_groups=shape_groups, + filter=pydiffvg.PixelFilter(type = diffvg.FilterType.hann, + radius = radius)) + img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + t+1, # seed + None, + *scene_args) + # Save the intermediate render. + pydiffvg.imwrite(img.cpu(), 'results/optimize_pixel_filter/iter_{}.png'.format(t), gamma=2.2) + # Compute the loss function. Here it is L2. + loss = (img - target).pow(2).sum() + print('loss:', loss.item()) + + # Backpropagate the gradients. + loss.backward() + # Print the gradients + print('radius.grad:', radius.grad) + + # Take a gradient descent step. + optimizer.step() + # Print the current params. + print('radius:', radius) + +# Render the final result. +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width=canvas_width, + canvas_height=canvas_height, + shapes=shapes, + shape_groups=shape_groups, + filter=pydiffvg.PixelFilter(type = diffvg.FilterType.hann, + radius = radius)) +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 102, # seed + None, + *scene_args) +# Save the images and differences. +pydiffvg.imwrite(img.cpu(), 'results/optimize_pixel_filter/final.png') + +# Convert the intermediate renderings to a video. +from subprocess import call +call(["ffmpeg", "-framerate", "24", "-i", + "results/optimize_pixel_filter/iter_%d.png", "-vb", "20M", + "results/optimize_pixel_filter/out.mp4"]) diff --git a/apps/painterly_rendering.py b/apps/painterly_rendering.py new file mode 100644 index 0000000..2b61f36 --- /dev/null +++ b/apps/painterly_rendering.py @@ -0,0 +1,223 @@ +""" +Scream: python painterly_rendering.py imgs/scream.jpg --num_paths 2048 --max_width 4.0 +Fallingwater: python painterly_rendering.py imgs/fallingwater.jpg --num_paths 2048 --max_width 4.0 +Fallingwater: python painterly_rendering.py imgs/fallingwater.jpg --num_paths 2048 --max_width 4.0 --use_lpips_loss +Baboon: python painterly_rendering.py imgs/baboon.png --num_paths 1024 --max_width 4.0 --num_iter 250 +Baboon Lpips: python painterly_rendering.py imgs/baboon.png --num_paths 1024 --max_width 4.0 --num_iter 500 --use_lpips_loss +Kitty: python painterly_rendering.py imgs/kitty.jpg --num_paths 1024 --use_blob +""" +import pydiffvg +import torch +import skimage +import skimage.io +import random +import ttools.modules +import argparse +import math + +pydiffvg.set_print_timing(True) + +gamma = 1.0 + +def main(args): + # Use GPU if available + pydiffvg.set_use_gpu(torch.cuda.is_available()) + + perception_loss = ttools.modules.LPIPS().to(pydiffvg.get_device()) + + #target = torch.from_numpy(skimage.io.imread('imgs/lena.png')).to(torch.float32) / 255.0 + target = torch.from_numpy(skimage.io.imread(args.target)).to(torch.float32) / 255.0 + target = target.pow(gamma) + target = target.to(pydiffvg.get_device()) + target = target.unsqueeze(0) + target = target.permute(0, 3, 1, 2) # NHWC -> NCHW + #target = torch.nn.functional.interpolate(target, size = [256, 256], mode = 'area') + canvas_width, canvas_height = target.shape[3], target.shape[2] + num_paths = args.num_paths + max_width = args.max_width + + random.seed(1234) + torch.manual_seed(1234) + + shapes = [] + shape_groups = [] + if args.use_blob: + for i in range(num_paths): + num_segments = random.randint(3, 5) + num_control_points = torch.zeros(num_segments, dtype = torch.int32) + 2 + points = [] + p0 = (random.random(), random.random()) + points.append(p0) + for j in range(num_segments): + radius = 0.05 + p1 = (p0[0] + radius * (random.random() - 0.5), p0[1] + radius * (random.random() - 0.5)) + p2 = (p1[0] + radius * (random.random() - 0.5), p1[1] + radius * (random.random() - 0.5)) + p3 = (p2[0] + radius * (random.random() - 0.5), p2[1] + radius * (random.random() - 0.5)) + points.append(p1) + points.append(p2) + if j < num_segments - 1: + points.append(p3) + p0 = p3 + points = torch.tensor(points) + points[:, 0] *= canvas_width + points[:, 1] *= canvas_height + path = pydiffvg.Path(num_control_points = num_control_points, + points = points, + stroke_width = torch.tensor(1.0), + is_closed = True) + shapes.append(path) + path_group = pydiffvg.ShapeGroup(shape_ids = torch.tensor([len(shapes) - 1]), + fill_color = torch.tensor([random.random(), + random.random(), + random.random(), + random.random()])) + shape_groups.append(path_group) + else: + for i in range(num_paths): + num_segments = random.randint(1, 3) + num_control_points = torch.zeros(num_segments, dtype = torch.int32) + 2 + points = [] + p0 = (random.random(), random.random()) + points.append(p0) + for j in range(num_segments): + radius = 0.05 + p1 = (p0[0] + radius * (random.random() - 0.5), p0[1] + radius * (random.random() - 0.5)) + p2 = (p1[0] + radius * (random.random() - 0.5), p1[1] + radius * (random.random() - 0.5)) + p3 = (p2[0] + radius * (random.random() - 0.5), p2[1] + radius * (random.random() - 0.5)) + points.append(p1) + points.append(p2) + points.append(p3) + p0 = p3 + points = torch.tensor(points) + points[:, 0] *= canvas_width + points[:, 1] *= canvas_height + #points = torch.rand(3 * num_segments + 1, 2) * min(canvas_width, canvas_height) + path = pydiffvg.Path(num_control_points = num_control_points, + points = points, + stroke_width = torch.tensor(1.0), + is_closed = False) + shapes.append(path) + path_group = pydiffvg.ShapeGroup(shape_ids = torch.tensor([len(shapes) - 1]), + fill_color = None, + stroke_color = torch.tensor([random.random(), + random.random(), + random.random(), + random.random()])) + shape_groups.append(path_group) + + scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) + + render = pydiffvg.RenderFunction.apply + img = render(canvas_width, # width + canvas_height, # height + 2, # num_samples_x + 2, # num_samples_y + 0, # seed + None, + *scene_args) + pydiffvg.imwrite(img.cpu(), 'results/painterly_rendering/init.png', gamma=gamma) + + points_vars = [] + stroke_width_vars = [] + color_vars = [] + for path in shapes: + path.points.requires_grad = True + points_vars.append(path.points) + if not args.use_blob: + for path in shapes: + path.stroke_width.requires_grad = True + stroke_width_vars.append(path.stroke_width) + if args.use_blob: + for group in shape_groups: + group.fill_color.requires_grad = True + color_vars.append(group.fill_color) + else: + for group in shape_groups: + group.stroke_color.requires_grad = True + color_vars.append(group.stroke_color) + + # Optimize + points_optim = torch.optim.Adam(points_vars, lr=1.0) + if len(stroke_width_vars) > 0: + width_optim = torch.optim.Adam(stroke_width_vars, lr=0.1) + color_optim = torch.optim.Adam(color_vars, lr=0.01) + # Adam iterations. + for t in range(args.num_iter): + print('iteration:', t) + points_optim.zero_grad() + if len(stroke_width_vars) > 0: + width_optim.zero_grad() + color_optim.zero_grad() + # Forward pass: render the image. + scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) + img = render(canvas_width, # width + canvas_height, # height + 2, # num_samples_x + 2, # num_samples_y + t, # seed + None, + *scene_args) + # Compose img with white background + img = img[:, :, 3:4] * img[:, :, :3] + torch.ones(img.shape[0], img.shape[1], 3, device = pydiffvg.get_device()) * (1 - img[:, :, 3:4]) + # Save the intermediate render. + pydiffvg.imwrite(img.cpu(), 'results/painterly_rendering/iter_{}.png'.format(t), gamma=gamma) + img = img[:, :, :3] + # Convert img from HWC to NCHW + img = img.unsqueeze(0) + img = img.permute(0, 3, 1, 2) # NHWC -> NCHW + if args.use_lpips_loss: + loss = perception_loss(img, target) + (img.mean() - target.mean()).pow(2) + else: + loss = (img - target).pow(2).mean() + print('render loss:', loss.item()) + + # Backpropagate the gradients. + loss.backward() + + # Take a gradient descent step. + points_optim.step() + if len(stroke_width_vars) > 0: + width_optim.step() + color_optim.step() + if len(stroke_width_vars) > 0: + for path in shapes: + path.stroke_width.data.clamp_(1.0, max_width) + if args.use_blob: + for group in shape_groups: + group.fill_color.data.clamp_(0.0, 1.0) + else: + for group in shape_groups: + group.stroke_color.data.clamp_(0.0, 1.0) + + if t % 10 == 0 or t == args.num_iter - 1: + pydiffvg.save_svg('results/painterly_rendering/iter_{}.svg'.format(t), + canvas_width, canvas_height, shapes, shape_groups) + + # Render the final result. + img = render(target.shape[1], # width + target.shape[0], # height + 2, # num_samples_x + 2, # num_samples_y + 0, # seed + None, + *scene_args) + # Save the intermediate render. + pydiffvg.imwrite(img.cpu(), 'results/painterly_rendering/final.png'.format(t), gamma=gamma) + # Convert the intermediate renderings to a video. + from subprocess import call + call(["ffmpeg", "-framerate", "24", "-i", + "results/painterly_rendering/iter_%d.png", "-vb", "20M", + "results/painterly_rendering/out.mp4"]) + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("target", help="target image path") + parser.add_argument("--num_paths", type=int, default=512) + parser.add_argument("--max_width", type=float, default=2.0) + parser.add_argument("--use_lpips_loss", dest='use_lpips_loss', action='store_true') + parser.add_argument("--num_iter", type=int, default=500) + parser.add_argument("--use_blob", dest='use_blob', action='store_true') + args = parser.parse_args() + main(args) diff --git a/apps/quadratic_distance_approx.py b/apps/quadratic_distance_approx.py new file mode 100644 index 0000000..10e2a34 --- /dev/null +++ b/apps/quadratic_distance_approx.py @@ -0,0 +1,76 @@ +import pydiffvg +import torch +import skimage +import numpy as np +import matplotlib.pyplot as plt + +# Use GPU if available +pydiffvg.set_use_gpu(torch.cuda.is_available()) + +canvas_width, canvas_height = 256, 256 +num_control_points = torch.tensor([1]) +points = torch.tensor([[ 50.0, 30.0], # base + [125.0, 400.0], # control point + [170.0, 30.0]]) # base +path = pydiffvg.Path(num_control_points = num_control_points, + points = points, + stroke_width = torch.tensor([30.0]), + is_closed = False, + use_distance_approx = False) +shapes = [path] +path_group = pydiffvg.ShapeGroup(shape_ids = torch.tensor([0]), + fill_color = None, + stroke_color = torch.tensor([0.5, 0.5, 0.5, 0.5])) +shape_groups = [path_group] +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups, + output_type = pydiffvg.OutputType.sdf) +render = pydiffvg.RenderFunction.apply +img = render(256, # width + 256, # height + 1, # num_samples_x + 1, # num_samples_y + 0, # seed + None, # background_image + *scene_args) +img /= 256.0 +cm = plt.get_cmap('viridis') +img = cm(img.squeeze()) +pydiffvg.imwrite(img, 'results/quadratic_distance_approx/ref_sdf.png') + +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 0, # seed + None, # background_image + *scene_args) +pydiffvg.imwrite(img, 'results/quadratic_distance_approx/ref_color.png') + +shapes[0].use_distance_approx = True +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups, + output_type = pydiffvg.OutputType.sdf) +img = render(256, # width + 256, # height + 1, # num_samples_x + 1, # num_samples_y + 0, # seed + None, # background_image + *scene_args) +img /= 256.0 +img = cm(img.squeeze()) +pydiffvg.imwrite(img, 'results/quadratic_distance_approx/approx_sdf.png') + +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 0, # seed + None, # background_image + *scene_args) +pydiffvg.imwrite(img, 'results/quadratic_distance_approx/approx_color.png') \ No newline at end of file diff --git a/apps/refine_svg.py b/apps/refine_svg.py new file mode 100644 index 0000000..7e324fb --- /dev/null +++ b/apps/refine_svg.py @@ -0,0 +1,115 @@ +import pydiffvg +import argparse +import ttools.modules +import torch +import skimage.io + +gamma = 1.0 + +def main(args): + perception_loss = ttools.modules.LPIPS().to(pydiffvg.get_device()) + + target = torch.from_numpy(skimage.io.imread(args.target)).to(torch.float32) / 255.0 + target = target.pow(gamma) + target = target.to(pydiffvg.get_device()) + target = target.unsqueeze(0) + target = target.permute(0, 3, 1, 2) # NHWC -> NCHW + + canvas_width, canvas_height, shapes, shape_groups = \ + pydiffvg.svg_to_scene(args.svg) + scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) + + render = pydiffvg.RenderFunction.apply + img = render(canvas_width, # width + canvas_height, # height + 2, # num_samples_x + 2, # num_samples_y + 0, # seed + None, # bg + *scene_args) + # The output image is in linear RGB space. Do Gamma correction before saving the image. + pydiffvg.imwrite(img.cpu(), 'results/refine_svg/init.png', gamma=gamma) + + points_vars = [] + for path in shapes: + path.points.requires_grad = True + points_vars.append(path.points) + color_vars = {} + for group in shape_groups: + group.fill_color.requires_grad = True + color_vars[group.fill_color.data_ptr()] = group.fill_color + color_vars = list(color_vars.values()) + + # Optimize + points_optim = torch.optim.Adam(points_vars, lr=1.0) + color_optim = torch.optim.Adam(color_vars, lr=0.01) + + # Adam iterations. + for t in range(args.num_iter): + print('iteration:', t) + points_optim.zero_grad() + color_optim.zero_grad() + # Forward pass: render the image. + scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) + img = render(canvas_width, # width + canvas_height, # height + 2, # num_samples_x + 2, # num_samples_y + 0, # seed + None, # bg + *scene_args) + # Compose img with white background + img = img[:, :, 3:4] * img[:, :, :3] + torch.ones(img.shape[0], img.shape[1], 3, device = pydiffvg.get_device()) * (1 - img[:, :, 3:4]) + # Save the intermediate render. + pydiffvg.imwrite(img.cpu(), 'results/refine_svg/iter_{}.png'.format(t), gamma=gamma) + img = img[:, :, :3] + # Convert img from HWC to NCHW + img = img.unsqueeze(0) + img = img.permute(0, 3, 1, 2) # NHWC -> NCHW + if args.use_lpips_loss: + loss = perception_loss(img, target) + else: + loss = (img - target).pow(2).mean() + print('render loss:', loss.item()) + + # Backpropagate the gradients. + loss.backward() + + # Take a gradient descent step. + points_optim.step() + color_optim.step() + for group in shape_groups: + group.fill_color.data.clamp_(0.0, 1.0) + + if t % 10 == 0 or t == args.num_iter - 1: + pydiffvg.save_svg('results/refine_svg/iter_{}.svg'.format(t), + canvas_width, canvas_height, shapes, shape_groups) + + # Render the final result. + scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) + img = render(canvas_width, # width + canvas_height, # height + 2, # num_samples_x + 2, # num_samples_y + 0, # seed + None, # bg + *scene_args) + # Save the intermediate render. + pydiffvg.imwrite(img.cpu(), 'results/refine_svg/final.png'.format(t), gamma=gamma) + # Convert the intermediate renderings to a video. + from subprocess import call + call(["ffmpeg", "-framerate", "24", "-i", + "results/refine_svg/iter_%d.png", "-vb", "20M", + "results/refine_svg/out.mp4"]) + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("svg", help="source SVG path") + parser.add_argument("target", help="target image path") + parser.add_argument("--use_lpips_loss", dest='use_lpips_loss', action='store_true') + parser.add_argument("--num_iter", type=int, default=250) + args = parser.parse_args() + main(args) diff --git a/apps/render_svg.py b/apps/render_svg.py new file mode 100644 index 0000000..0aa9273 --- /dev/null +++ b/apps/render_svg.py @@ -0,0 +1,41 @@ +""" +Simple utility to render an .svg to a .png +""" +import os +import argparse +import pydiffvg +import torch as th + + +def render(canvas_width, canvas_height, shapes, shape_groups): + _render = pydiffvg.RenderFunction.apply + scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) + img = _render(canvas_width, # width + canvas_height, # height + 2, # num_samples_x + 2, # num_samples_y + 0, # seed + None, + *scene_args) + return img + + +def main(args): + pydiffvg.set_device(th.device('cuda:1')) + + # Load SVG + svg = os.path.join(args.svg) + canvas_width, canvas_height, shapes, shape_groups = \ + pydiffvg.svg_to_scene(svg) + + # Save initial state + ref = render(canvas_width, canvas_height, shapes, shape_groups) + pydiffvg.imwrite(ref.cpu(), args.out, gamma=2.2) + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("svg", help="source SVG path") + parser.add_argument("out", help="output image path") + args = parser.parse_args() + main(args) diff --git a/apps/seam_carving.py b/apps/seam_carving.py new file mode 100644 index 0000000..aa0176e --- /dev/null +++ b/apps/seam_carving.py @@ -0,0 +1,284 @@ +"""Retargets an .svg using image-domain seam carving to shrink it.""" +import os +import pydiffvg +import argparse +import torch as th +import scipy.ndimage.filters as filters +import numba +import numpy as np +import skimage.io + + +def energy(im): + """Compute image energy. + + Args: + im(np.ndarray) with shape [h, w, 3]: input image. + + Returns: + (np.ndarray) with shape [h, w]: energy map. + """ + f_dx = np.array([ + [-1, 0, 1 ], + [-2, 0, 2 ], + [-1, 0, 1 ], + ]) + f_dy = f_dx.T + dx = filters.convolve(im.mean(2), f_dx) + dy = filters.convolve(im.mean(2), f_dy) + + return np.abs(dx) + np.abs(dy) + + +@numba.jit(nopython=True) +def min_seam(e): + """Finds the seam with minimal cost in an energy map. + + Args: + e(np.ndarray) with shape [h, w]: energy map. + + Returns: + min_e(np.ndarray) with shape [h, w]: for all (y,x) min_e[y, x] + is the cost of the minimal seam from 0 to y (top to bottom). + The minimal seam can be found by looking at the last row of min_e. + This is computed by dynamic programming. + argmin_e(np.ndarray) with shape [h, w]: for all (y,x) argmin_e[y, x] + contains the x coordinate corresponding to this seam in the + previous row (y-1). We use this for backtracking. + """ + # initialize to local energy + min_e = e.copy() + argmin_e = np.zeros_like(e, dtype=np.int64) + + h, w = e.shape + + # propagate vertically + for y in range(1, h): + for x in range(w): + if x == 0: + idx = np.argmin(e[y-1, x:x+2]) + argmin_e[y, x] = idx + x + mini = e[y-1, x + idx] + elif x == w-1: + idx = np.argmin(e[y-1, x-1:x+1]) + argmin_e[y, x] = idx + x - 1 + mini = e[y-1, x + idx - 1] + else: + idx = np.argmin(e[y-1, x-1:x+2]) + argmin_e[y, x] = idx + x - 1 + mini = e[y-1, x + idx - 1] + + min_e[y, x] = min_e[y, x] + mini + + return min_e, argmin_e + + +def carve_seam(im): + """Carves a vertical seam in an image, reducing it's horizontal size by 1. + + Args: + im(np.ndarray) with shape [h, w, 3]: input image. + + Returns: + (np.ndarray) with shape [h, w-1, 1]: the image with one seam removed. + """ + + e = energy(im) + min_e, argmin_e = min_seam(e) + h, w = im.shape[:2] + + # boolean flags for the pixels to preserve + to_keep = np.ones((h, w), dtype=np.bool) + + # get lowest energy (from last row) + x = np.argmin(min_e[-1]) + print("carving seam", x, "with energy", min_e[-1, x]) + + # backtract to identify the seam + for y in range(h-1, -1, -1): + # remove seam pixel + to_keep[y, x] = False + x = argmin_e[y, x] + + # replicate mask over color channels + to_keep = np.stack(3*[to_keep], axis=2) + new_im = im[to_keep].reshape((h, w-1, 3)) + return new_im + + +def render(canvas_width, canvas_height, shapes, shape_groups, samples=2): + _render = pydiffvg.RenderFunction.apply + scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) + + img = _render(canvas_width, # width + canvas_height, # height + samples, # num_samples_x + samples, # num_samples_y + 0, # seed + None, + *scene_args) + return img + + +def vector_rescale(shapes, scale_x=1.00, scale_y=1.00): + new_shapes = [] + for path in shapes: + path.points[..., 0] *= scale_x + path.points[..., 1] *= scale_y + + +def main(): + parser = argparse.ArgumentParser() + parser.add_argument("--svg", default=os.path.join("imgs", "hokusai.svg")) + parser.add_argument("--optim_steps", default=10, type=int) + parser.add_argument("--lr", default=1e-1, type=int) + args = parser.parse_args() + + name = os.path.splitext(os.path.basename(args.svg))[0] + root = os.path.join("results", "seam_carving", name) + svg_root = os.path.join(root, "svg") + os.makedirs(root, exist_ok=True) + os.makedirs(os.path.join(root, "svg"), exist_ok=True) + + pydiffvg.set_use_gpu(False) + # pydiffvg.set_device(th.device('cuda')) + + # Load SVG + print("loading svg %s" % args.svg) + canvas_width, canvas_height, shapes, shape_groups = \ + pydiffvg.svg_to_scene(args.svg) + print("done loading") + + max_size = 512 + scale_factor = max_size / max(canvas_width, canvas_height) + print("rescaling from %dx%d with scale %f" % (canvas_width, canvas_height, scale_factor)) + canvas_width = int(canvas_width*scale_factor) + canvas_height = int(canvas_height*scale_factor) + print("new shape %dx%d" % (canvas_width, canvas_height)) + vector_rescale(shapes, scale_x=scale_factor, scale_y=scale_factor) + + # Shrink image by 33 % + # num_seams_to_remove = 2 + num_seams_to_remove = canvas_width // 3 + new_canvas_width = canvas_width - num_seams_to_remove + scaling = new_canvas_width * 1.0 / canvas_width + + # Naive scaling baseline + print("rendering naive rescaling...") + vector_rescale(shapes, scale_x=scaling) + resized = render(new_canvas_width, canvas_height, shapes, shape_groups) + pydiffvg.imwrite(resized.cpu(), os.path.join(root, 'uniform_scaling.png'), gamma=2.2) + pydiffvg.save_svg(os.path.join(svg_root, 'uniform_scaling.svg') , canvas_width, + canvas_height, shapes, shape_groups, use_gamma=False) + vector_rescale(shapes, scale_x=1.0/scaling) # bring back original coordinates + print("saved naiving scaling") + + # Save initial state + print("rendering initial state...") + im = render(canvas_width, canvas_height, shapes, shape_groups) + pydiffvg.imwrite(im.cpu(), os.path.join(root, 'init.png'), gamma=2.2) + pydiffvg.save_svg(os.path.join(svg_root, 'init.svg'), canvas_width, + canvas_height, shapes, shape_groups, use_gamma=False) + print("saved initial state") + + # Optimize + # color_optim = th.optim.Adam(color_vars, lr=0.01) + + retargeted = im[..., :3].cpu().numpy() + previous_width = canvas_width + print("carving seams") + for seam_idx in range(num_seams_to_remove): + print('\nseam', seam_idx+1, 'of', num_seams_to_remove) + + # Remove a seam + retargeted = carve_seam(retargeted) + + current_width = canvas_width - seam_idx - 1 + scale_factor = current_width * 1.0 / previous_width + previous_width = current_width + + padded = np.zeros((canvas_height, canvas_width, 4)) + padded[:, :-seam_idx-1, :3] = retargeted + padded[:, :-seam_idx-1, -1] = 1.0 # alpha + padded = th.from_numpy(padded).to(im.device) + + # Remap points to the smaller canvas and + # collect variables to optimize + points_vars = [] + # width_vars = [] + mini, maxi = canvas_width, 0 + for path in shapes: + path.points.requires_grad = False + x = path.points[..., 0] + y = path.points[..., 1] + # rescale + + x = x * scale_factor + + # clip to canvas + path.points[..., 0] = th.clamp(x, 0, current_width) + path.points[..., 1] = th.clamp(y, 0, canvas_height) + + path.points.requires_grad = True + points_vars.append(path.points) + path.stroke_width.requires_grad = True + # width_vars.append(path.stroke_width) + + mini = min(mini, path.points.min().item()) + maxi = max(maxi, path.points.max().item()) + print("points", mini, maxi, "scale", scale_factor) + + # recreate an optimizer so we don't carry over the previous update + # (momentum)? + geom_optim = th.optim.Adam(points_vars, lr=args.lr) + + for step in range(args.optim_steps): + geom_optim.zero_grad() + + img = render(canvas_width, canvas_height, shapes, shape_groups, + samples=2) + + pydiffvg.imwrite( + img.cpu(), + os.path.join(root, "seam_%03d_iter_%02d.png" % (seam_idx, step)), gamma=2.2) + + # NO alpha + loss = (img - padded)[..., :3].pow(2).mean() + # loss = (img - padded).pow(2).mean() + print('render loss:', loss.item()) + + # Backpropagate the gradients. + loss.backward() + + # Take a gradient descent step. + geom_optim.step() + pydiffvg.save_svg(os.path.join(svg_root, "seam%03d.svg" % seam_idx), + canvas_width-seam_idx, canvas_height, shapes, + shape_groups, use_gamma=False) + + for path in shapes: + mini = min(mini, path.points.min().item()) + maxi = max(maxi, path.points.max().item()) + print("points", mini, maxi) + + img = render(canvas_width, canvas_height, shapes, shape_groups) + img = img[:, :-num_seams_to_remove] + + pydiffvg.imwrite(img.cpu(), os.path.join(root, 'final.png'), + gamma=2.2) + pydiffvg.imwrite(retargeted, os.path.join(root, 'ref.png'), + gamma=2.2) + + pydiffvg.save_svg(os.path.join(svg_root, 'final.svg'), + canvas_width-seam_idx, canvas_height, shapes, + shape_groups, use_gamma=False) + + # Convert the intermediate renderings to a video. + from subprocess import call + call(["ffmpeg", "-framerate", "24", "-i", os.path.join(root, "seam_%03d_iter_00.png"), "-vb", "20M", + os.path.join(root, "out.mp4")]) + + +if __name__ == "__main__": + main() diff --git a/apps/shared_edge_compare.py b/apps/shared_edge_compare.py new file mode 100644 index 0000000..e7a5aef --- /dev/null +++ b/apps/shared_edge_compare.py @@ -0,0 +1,127 @@ +import pydiffvg +import diffvg +from matplotlib import cm +import matplotlib.pyplot as plt +import argparse +import torch + +def normalize(x, min_, max_): + range = max(abs(min_), abs(max_)) + return (x + range) / (2 * range) + +def main(args): + canvas_width, canvas_height, shapes, shape_groups = \ + pydiffvg.svg_to_scene(args.svg_file) + + w = int(canvas_width * args.size_scale) + h = int(canvas_height * args.size_scale) + + pfilter = pydiffvg.PixelFilter(type = diffvg.FilterType.box, + radius = torch.tensor(0.5)) + + use_prefiltering = False + scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups, + filter = pfilter, + use_prefiltering = use_prefiltering) + + num_samples_x = 16 + num_samples_y = 16 + render = pydiffvg.RenderFunction.apply + img = render(w, # width + h, # height + num_samples_x, # num_samples_x + num_samples_y, # num_samples_y + 0, # seed + None, + *scene_args) + pydiffvg.imwrite(img.cpu(), 'results/finite_difference_comp/img.png', gamma=1.0) + + epsilon = 0.1 + def perturb_scene(axis, epsilon): + shapes[2].points[:, axis] += epsilon + # for s in shapes: + # if isinstance(s, pydiffvg.Circle): + # s.center[axis] += epsilon + # elif isinstance(s, pydiffvg.Ellipse): + # s.center[axis] += epsilon + # elif isinstance(s, pydiffvg.Path): + # s.points[:, axis] += epsilon + # elif isinstance(s, pydiffvg.Polygon): + # s.points[:, axis] += epsilon + # elif isinstance(s, pydiffvg.Rect): + # s.p_min[axis] += epsilon + # s.p_max[axis] += epsilon + # for s in shape_groups: + # if isinstance(s.fill_color, pydiffvg.LinearGradient): + # s.fill_color.begin[axis] += epsilon + # s.fill_color.end[axis] += epsilon + + perturb_scene(0, epsilon) + scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups, + filter = pfilter, + use_prefiltering = use_prefiltering) + render = pydiffvg.RenderFunction.apply + img0 = render(w, # width + h, # height + num_samples_x, # num_samples_x + num_samples_y, # num_samples_y + 0, # seed + None, + *scene_args) + + forward_diff = (img0 - img) / (epsilon) + forward_diff = forward_diff.sum(axis = 2) + x_diff_max = 1.5 + x_diff_min = -1.5 + print(forward_diff.max()) + print(forward_diff.min()) + forward_diff = cm.viridis(normalize(forward_diff, x_diff_min, x_diff_max).cpu().numpy()) + pydiffvg.imwrite(forward_diff, 'results/finite_difference_comp/shared_edge_forward_diff.png', gamma=1.0) + + perturb_scene(0, -2 * epsilon) + scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups, + filter = pfilter, + use_prefiltering = use_prefiltering) + img1 = render(w, # width + h, # height + num_samples_x, # num_samples_x + num_samples_y, # num_samples_y + 0, # seed + None, + *scene_args) + backward_diff = (img - img1) / (epsilon) + backward_diff = backward_diff.sum(axis = 2) + print(backward_diff.max()) + print(backward_diff.min()) + backward_diff = cm.viridis(normalize(backward_diff, x_diff_min, x_diff_max).cpu().numpy()) + pydiffvg.imwrite(backward_diff, 'results/finite_difference_comp/shared_edge_backward_diff.png', gamma=1.0) + perturb_scene(0, epsilon) + + num_samples_x = 4 + num_samples_y = 4 + scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups, + filter = pfilter, + use_prefiltering = use_prefiltering) + render_grad = pydiffvg.RenderFunction.render_grad + img_grad = render_grad(torch.ones(h, w, 4), + w, # width + h, # height + num_samples_x, # num_samples_x + num_samples_y, # num_samples_y + 0, # seed + *scene_args) + print(img_grad[:, :, 0].max()) + print(img_grad[:, :, 0].min()) + x_diff = cm.viridis(normalize(img_grad[:, :, 0], x_diff_min, x_diff_max).cpu().numpy()) + pydiffvg.imwrite(x_diff, 'results/finite_difference_comp/ours_x_diff.png', gamma=1.0) + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("svg_file", help="source SVG path") + parser.add_argument("--size_scale", type=float, default=1.0) + args = parser.parse_args() + main(args) diff --git a/apps/simple_transform_svg.py b/apps/simple_transform_svg.py new file mode 100644 index 0000000..3faec31 --- /dev/null +++ b/apps/simple_transform_svg.py @@ -0,0 +1,237 @@ +import pydiffvg +import torch +import torchvision +from PIL import Image +import numpy as np + +# Use GPU if available +pydiffvg.set_use_gpu(torch.cuda.is_available()) + +def inv_exp(a,x,xpow=1): + return pow(a,pow(1.-x,xpow)) + +import math +import numbers +import torch +from torch import nn +from torch.nn import functional as F + +import visdom + +class GaussianSmoothing(nn.Module): + """ + Apply gaussian smoothing on a + 1d, 2d or 3d tensor. Filtering is performed seperately for each channel + in the input using a depthwise convolution. + Arguments: + channels (int, sequence): Number of channels of the input tensors. Output will + have this number of channels as well. + kernel_size (int, sequence): Size of the gaussian kernel. + sigma (float, sequence): Standard deviation of the gaussian kernel. + dim (int, optional): The number of dimensions of the data. + Default value is 2 (spatial). + """ + def __init__(self, channels, kernel_size, sigma, dim=2): + super(GaussianSmoothing, self).__init__() + if isinstance(kernel_size, numbers.Number): + kernel_size = [kernel_size] * dim + if isinstance(sigma, numbers.Number): + sigma = [sigma] * dim + + # The gaussian kernel is the product of the + # gaussian function of each dimension. + kernel = 1 + meshgrids = torch.meshgrid( + [ + torch.arange(size, dtype=torch.float32) + for size in kernel_size + ] + ) + for size, std, mgrid in zip(kernel_size, sigma, meshgrids): + mean = (size - 1) / 2 + kernel *= 1 / (std * math.sqrt(2 * math.pi)) * \ + torch.exp(-((mgrid - mean) / std) ** 2 / 2) + + # Make sure sum of values in gaussian kernel equals 1. + kernel = kernel / torch.sum(kernel) + + # Reshape to depthwise convolutional weight + kernel = kernel.view(1, 1, *kernel.size()) + kernel = kernel.repeat(channels, *[1] * (kernel.dim() - 1)) + + self.register_buffer('weight', kernel) + self.groups = channels + + if dim == 1: + self.conv = F.conv1d + elif dim == 2: + self.conv = F.conv2d + elif dim == 3: + self.conv = F.conv3d + else: + raise RuntimeError( + 'Only 1, 2 and 3 dimensions are supported. Received {}.'.format(dim) + ) + + def forward(self, input): + """ + Apply gaussian filter to input. + Arguments: + input (torch.Tensor): Input to apply gaussian filter on. + Returns: + filtered (torch.Tensor): Filtered output. + """ + return self.conv(input, weight=self.weight, groups=self.groups) + +vis=visdom.Visdom(port=8080) + +smoothing = GaussianSmoothing(4, 5, 1) + +settings=pydiffvg.SvgOptimizationSettings() +settings.global_override(["optimize_color"],False) +settings.global_override(["optimize_alpha"],False) +settings.global_override(["gradients","optimize_color"],False) +settings.global_override(["gradients","optimize_alpha"],False) +settings.global_override(["gradients","optimize_stops"],False) +settings.global_override(["gradients","optimize_location"],False) +settings.global_override(["optimizer"],"Adam") +settings.global_override(["paths","optimize_points"],False) +settings.global_override(["transforms","transform_lr"],1e-2) +settings.undefault("linearGradient3152") +settings.retrieve("linearGradient3152")[0]["transforms"]["optimize_transforms"]=False + +#optim=pydiffvg.OptimizableSvg("note_small.svg",settings,verbose=True) +optim=pydiffvg.OptimizableSvg("heart_green.svg",settings,verbose=True) + +#img=torchvision.transforms.ToTensor()(Image.open("note_transformed.png")).permute(1,2,0) +img=torchvision.transforms.ToTensor()(Image.open("heart_green_90.png")).permute(1,2,0) + +name="heart_green_90" + +pydiffvg.imwrite(img.cpu(), 'results/simple_transform_svg/target.png') +target = img.clone().detach().requires_grad_(False) + +img=optim.render() +pydiffvg.imwrite(img.cpu(), 'results/simple_transform_svg/init.png') + +def smooth(input, kernel): + input=torch.nn.functional.pad(input.permute(2,0,1).unsqueeze(0), (2, 2, 2, 2), mode='reflect') + output=kernel(input) + return output + +def printimg(optim): + img=optim.render() + comp = img.clone().detach() + bg = torch.tensor([[[1., 1., 1.]]]) + comprgb = comp[:, :, 0:3] + compalpha = comp[:, :, 3].unsqueeze(2) + comp = comprgb * compalpha \ + + bg * (1 - compalpha) + return comp + +def comp_loss_and_grad(img, tgt, it, sz): + dif=img-tgt + + loss=dif.pow(2).mean() + + dif=dif.detach() + + cdif=dif.clone().abs() + cdif[:,:,3]=1. + + resdif=torch.nn.functional.interpolate(cdif.permute(2,0,1).unsqueeze(0),sz,mode='bilinear').squeeze().permute(1,2,0).abs() + pydiffvg.imwrite(resdif[:,:,0:4], 'results/simple_transform_svg/dif_{:04}.png'.format(it)) + + dif=dif.numpy() + padded=np.pad(dif,[(1,1),(1,1),(0,0)],mode='edge') + #print(padded[:-2,:,:].shape) + grad_x=(padded[:-2,:,:]-padded[2:,:,:])[:,1:-1,:] + grad_y=(padded[:,:-2,:]-padded[:,2:,:])[1:-1,:,:] + + resshape=dif.shape + resshape=(resshape[0],resshape[1],2) + res=np.zeros(resshape) + + for x in range(resshape[0]): + for y in range(resshape[1]): + A=np.concatenate((grad_x[x,y,:][:,np.newaxis],grad_y[x,y,:][:,np.newaxis]),axis=1) + b=-dif[x,y,:] + v=np.linalg.lstsq(np.dot(A.T,A),np.dot(A.T,b)) + res[x,y,:]=v[0] + + return loss, res + +import colorsys +def print_gradimg(gradimg,it,shape=None): + out=torch.zeros((gradimg.shape[0],gradimg.shape[1],3),requires_grad=False,dtype=torch.float32) + for x in range(gradimg.shape[0]): + for y in range(gradimg.shape[1]): + h=math.atan2(gradimg[x,y,1],gradimg[x,y,0]) + s=math.tanh(np.linalg.norm(gradimg[x,y,:])) + v=1. + vec=(gradimg[x,y,:].clip(min=-1,max=1)/2)+.5 + #out[x,y,:]=torch.tensor(colorsys.hsv_to_rgb(h,s,v),dtype=torch.float32) + out[x,y,:]=torch.tensor([vec[0],vec[1],0]) + + if shape is not None: + out=torch.nn.functional.interpolate(out.permute(2,0,1).unsqueeze(0),shape,mode='bilinear').squeeze().permute(1,2,0) + pydiffvg.imwrite(out.cpu(), 'results/simple_transform_svg/grad_{:04}.png'.format(it)) + +# Run 150 Adam iterations. +for t in range(1000): + print('iteration:', t) + optim.zero_grad() + with open('results/simple_transform_svg/viter_{:04}.svg'.format(t),"w") as f: + f.write(optim.write_xml()) + scale=inv_exp(1/16,math.pow(t/1000,1),0.5) + #print(scale) + #img = optim.render(seed=t+1,scale=scale) + img = optim.render(seed=t + 1, scale=None) + vis.line(torch.tensor([img.shape[0]]), X=torch.tensor([t]), win=name + " size", update="append", + opts={"title": name + " size"}) + #print(img.shape) + #img = optim.render(seed=t + 1) + + ptgt=target.permute(2,0,1).unsqueeze(0) + sz=img.shape[0:2] + restgt=torch.nn.functional.interpolate(ptgt,size=sz,mode='bilinear').squeeze().permute(1,2,0) + + # Compute the loss function. Here it is L2. + #loss = (smooth(img,smoothing) - smooth(restgt,smoothing)).pow(2).mean() + #loss = (img - restgt).pow(2).mean() + #loss=(img-target).pow(2).mean() + loss,gradimg=comp_loss_and_grad(img, restgt,t,target.shape[0:2]) + print_gradimg(gradimg,t,target.shape[0:2]) + print('loss:', loss.item()) + vis.line(loss.unsqueeze(0), X=torch.tensor([t]), win=name+" loss", update="append", + opts={"title": name + " loss"}) + + # Backpropagate the gradients. + loss.backward() + + # Take a gradient descent step. + optim.step() + + # Save the intermediate render. + comp=printimg(optim) + pydiffvg.imwrite(comp.cpu(), 'results/simple_transform_svg/iter_{:04}.png'.format(t)) + + +# Render the final result. + +img = optim.render() +# Save the images and differences. +pydiffvg.imwrite(img.cpu(), 'results/simple_transform_svg/final.png') +with open('results/simple_transform_svg/final.svg', "w") as f: + f.write(optim.write_xml()) + +# Convert the intermediate renderings to a video. +from subprocess import call +call(["ffmpeg", "-framerate", "24", "-i", + "results/simple_transform_svg/iter_%04d.png", "-vb", "20M", + "results/simple_transform_svg/out.mp4"]) + +call(["ffmpeg", "-framerate", "24", "-i", + "results/simple_transform_svg/grad_%04d.png", "-vb", "20M", + "results/simple_transform_svg/out_grad.mp4"]) + diff --git a/apps/single_circle.py b/apps/single_circle.py new file mode 100644 index 0000000..0f4070d --- /dev/null +++ b/apps/single_circle.py @@ -0,0 +1,106 @@ +import pydiffvg +import torch +import skimage +import numpy as np + +# Use GPU if available +pydiffvg.set_use_gpu(torch.cuda.is_available()) + +canvas_width = 256 +canvas_height = 256 +circle = pydiffvg.Circle(radius = torch.tensor(40.0), + center = torch.tensor([128.0, 128.0])) +shapes = [circle] +circle_group = pydiffvg.ShapeGroup(shape_ids = torch.tensor([0]), + fill_color = torch.tensor([0.3, 0.6, 0.3, 1.0])) +shape_groups = [circle_group] +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) + +render = pydiffvg.RenderFunction.apply +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 0, # seed + None, + *scene_args) +# The output image is in linear RGB space. Do Gamma correction before saving the image. +pydiffvg.imwrite(img.cpu(), 'results/single_circle/target.png', gamma=2.2) +target = img.clone() + +# Move the circle to produce initial guess +# normalize radius & center for easier learning rate +radius_n = torch.tensor(20.0 / 256.0, requires_grad=True) +center_n = torch.tensor([108.0 / 256.0, 138.0 / 256.0], requires_grad=True) +color = torch.tensor([0.3, 0.2, 0.8, 1.0], requires_grad=True) +circle.radius = radius_n * 256 +circle.center = center_n * 256 +circle_group.fill_color = color +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 1, # seed + None, + *scene_args) +pydiffvg.imwrite(img.cpu(), 'results/single_circle/init.png', gamma=2.2) + +# Optimize for radius & center +optimizer = torch.optim.Adam([radius_n, center_n, color], lr=1e-2) +# Run 100 Adam iterations. +for t in range(100): + print('iteration:', t) + optimizer.zero_grad() + # Forward pass: render the image. + circle.radius = radius_n * 256 + circle.center = center_n * 256 + circle_group.fill_color = color + scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) + img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + t+1, # seed + None, + *scene_args) + # Save the intermediate render. + pydiffvg.imwrite(img.cpu(), 'results/single_circle/iter_{}.png'.format(t), gamma=2.2) + # Compute the loss function. Here it is L2. + loss = (img - target).pow(2).sum() + print('loss:', loss.item()) + + # Backpropagate the gradients. + loss.backward() + # Print the gradients + print('radius.grad:', radius_n.grad) + print('center.grad:', center_n.grad) + print('color.grad:', color.grad) + + # Take a gradient descent step. + optimizer.step() + # Print the current params. + print('radius:', circle.radius) + print('center:', circle.center) + print('color:', circle_group.fill_color) + +# Render the final result. +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 102, # seed + *scene_args) +# Save the images and differences. +pydiffvg.imwrite(img.cpu(), 'results/single_circle/final.png') + +# Convert the intermediate renderings to a video. +from subprocess import call +call(["ffmpeg", "-framerate", "24", "-i", + "results/single_circle/iter_%d.png", "-vb", "20M", + "results/single_circle/out.mp4"]) diff --git a/apps/single_circle_outline.py b/apps/single_circle_outline.py new file mode 100644 index 0000000..78952df --- /dev/null +++ b/apps/single_circle_outline.py @@ -0,0 +1,118 @@ +import pydiffvg +import torch +import skimage +import numpy as np + +# Use GPU if available +pydiffvg.set_use_gpu(torch.cuda.is_available()) + +canvas_width, canvas_height = 256, 256 +circle = pydiffvg.Circle(radius = torch.tensor(40.0), + center = torch.tensor([128.0, 128.0]), + stroke_width = torch.tensor(5.0)) +shapes = [circle] +circle_group = pydiffvg.ShapeGroup(shape_ids = torch.tensor([0]), + fill_color = torch.tensor([0.3, 0.6, 0.3, 1.0]), + stroke_color = torch.tensor([0.6, 0.3, 0.6, 0.8])) +shape_groups = [circle_group] +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) + +render = pydiffvg.RenderFunction.apply +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 0, # seed + None, + *scene_args) +# The output image is in linear RGB space. Do Gamma correction before saving the image. +pydiffvg.imwrite(img.cpu(), 'results/single_circle_outline/target.png', gamma=2.2) +target = img.clone() + +# Move the circle to produce initial guess +# normalize radius & center for easier learning rate +radius_n = torch.tensor(20.0 / 256.0, requires_grad=True) +center_n = torch.tensor([108.0 / 256.0, 138.0 / 256.0], requires_grad=True) +fill_color = torch.tensor([0.3, 0.2, 0.8, 1.0], requires_grad=True) +stroke_color = torch.tensor([0.4, 0.7, 0.5, 0.5], requires_grad=True) +stroke_width_n = torch.tensor(10.0 / 100.0, requires_grad=True) +circle.radius = radius_n * 256 +circle.center = center_n * 256 +circle.stroke_width = stroke_width_n * 100 +circle_group.fill_color = fill_color +circle_group.stroke_color = stroke_color +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 1, # seed + None, + *scene_args) +pydiffvg.imwrite(img.cpu(), 'results/single_circle_outline/init.png', gamma=2.2) + +# Optimize for radius & center +optimizer = torch.optim.Adam([radius_n, center_n, fill_color, stroke_color, stroke_width_n], lr=1e-2) +# Run 200 Adam iterations. +for t in range(200): + print('iteration:', t) + optimizer.zero_grad() + # Forward pass: render the image. + circle.radius = radius_n * 256 + circle.center = center_n * 256 + circle.stroke_width = stroke_width_n * 100 + circle_group.fill_color = fill_color + circle_group.stroke_color = stroke_color + scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) + img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + t+1, # seed + None, + *scene_args) + # Save the intermediate render. + pydiffvg.imwrite(img.cpu(), 'results/single_circle_outline/iter_{}.png'.format(t), gamma=2.2) + # Compute the loss function. Here it is L2. + loss = (img - target).pow(2).sum() + print('loss:', loss.item()) + + # Backpropagate the gradients. + loss.backward() + # Print the gradients + print('radius.grad:', radius_n.grad) + print('center.grad:', center_n.grad) + print('fill_color.grad:', fill_color.grad) + print('stroke_color.grad:', stroke_color.grad) + print('stroke_width.grad:', stroke_width_n.grad) + + # Take a gradient descent step. + optimizer.step() + # Print the current params. + print('radius:', circle.radius) + print('center:', circle.center) + print('stroke_width:', circle.stroke_width) + print('fill_color:', circle_group.fill_color) + print('stroke_color:', circle_group.stroke_color) + +# Render the final result. +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 202, # seed + None, + *scene_args) +# Save the images and differences. +pydiffvg.imwrite(img.cpu(), 'results/single_circle_outline/final.png') + +# Convert the intermediate renderings to a video. +from subprocess import call +call(["ffmpeg", "-framerate", "24", "-i", + "results/single_circle_outline/iter_%d.png", "-vb", "20M", + "results/single_circle_outline/out.mp4"]) diff --git a/apps/single_circle_sdf.py b/apps/single_circle_sdf.py new file mode 100644 index 0000000..60c10cd --- /dev/null +++ b/apps/single_circle_sdf.py @@ -0,0 +1,114 @@ +import pydiffvg +import torch +import skimage +import numpy as np + +# Use GPU if available +pydiffvg.set_use_gpu(torch.cuda.is_available()) + +canvas_width = 256 +canvas_height = 256 +circle = pydiffvg.Circle(radius = torch.tensor(40.0), + center = torch.tensor([128.0, 128.0])) +shapes = [circle] +circle_group = pydiffvg.ShapeGroup(shape_ids = torch.tensor([0]), + fill_color = torch.tensor([0.3, 0.6, 0.3, 1.0])) +shape_groups = [circle_group] +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups, + output_type = pydiffvg.OutputType.sdf) + +render = pydiffvg.RenderFunction.apply +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 0, # seed + None, + *scene_args) +img = img / 256 # Normalize SDF to [0, 1] +pydiffvg.imwrite(img.cpu(), 'results/single_circle_sdf/target.png') +target = img.clone() + +# Move the circle to produce initial guess +# normalize radius & center for easier learning rate +radius_n = torch.tensor(20.0 / 256.0, requires_grad=True) +center_n = torch.tensor([108.0 / 256.0, 138.0 / 256.0], requires_grad=True) +color = torch.tensor([0.3, 0.2, 0.8, 1.0], requires_grad=True) +circle.radius = radius_n * 256 +circle.center = center_n * 256 +circle_group.fill_color = color +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups, + output_type = pydiffvg.OutputType.sdf) +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 1, # seed + None, + *scene_args) +img = img / 256 # Normalize SDF to [0, 1] +pydiffvg.imwrite(img.cpu(), 'results/single_circle_sdf/init.png') + +# Optimize for radius & center +optimizer = torch.optim.Adam([radius_n, center_n, color], lr=1e-2) +# Run 100 Adam iterations. +for t in range(100): + print('iteration:', t) + optimizer.zero_grad() + # Forward pass: render the image. + circle.radius = radius_n * 256 + circle.center = center_n * 256 + circle_group.fill_color = color + scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups, + output_type = pydiffvg.OutputType.sdf) + img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + t+1, # seed + None, + *scene_args) + img = img / 256 # Normalize SDF to [0, 1] + # Save the intermediate render. + pydiffvg.imwrite(img.cpu(), 'results/single_circle_sdf/iter_{}.png'.format(t), gamma=2.2) + # Compute the loss function. Here it is L2. + loss = (img - target).pow(2).sum() + print('loss:', loss.item()) + + # Backpropagate the gradients. + loss.backward() + # Print the gradients + print('radius.grad:', radius_n.grad) + print('center.grad:', center_n.grad) + print('color.grad:', color.grad) + + # Take a gradient descent step. + optimizer.step() + # Print the current params. + print('radius:', circle.radius) + print('center:', circle.center) + print('color:', circle_group.fill_color) + +# Render the final result. +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups, + output_type = pydiffvg.OutputType.sdf) +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 102, # seed + None, + *scene_args) +img = img / 256 # Normalize SDF to [0, 1] +# Save the images and differences. +pydiffvg.imwrite(img.cpu(), 'results/single_circle_sdf/final.png') + +# Convert the intermediate renderings to a video. +from subprocess import call +call(["ffmpeg", "-framerate", "24", "-i", + "results/single_circle_sdf/iter_%d.png", "-vb", "20M", + "results/single_circle_sdf/out.mp4"]) \ No newline at end of file diff --git a/apps/single_circle_tf.py b/apps/single_circle_tf.py new file mode 100644 index 0000000..d77285d --- /dev/null +++ b/apps/single_circle_tf.py @@ -0,0 +1,94 @@ +import pydiffvg_tensorflow as pydiffvg +import tensorflow as tf +import skimage +import numpy as np + +canvas_width = 256 +canvas_height = 256 +circle = pydiffvg.Circle(radius = tf.constant(40.0), + center = tf.constant([128.0, 128.0])) +shapes = [circle] +circle_group = pydiffvg.ShapeGroup(shape_ids = tf.constant([0], dtype = tf.int32), + fill_color = tf.constant([0.3, 0.6, 0.3, 1.0])) +shape_groups = [circle_group] +scene_args = pydiffvg.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) + +render = pydiffvg.render +img = render(tf.constant(256), # width + tf.constant(256), # height + tf.constant(2), # num_samples_x + tf.constant(2), # num_samples_y + tf.constant(0), # seed + *scene_args) +# The output image is in linear RGB space. Do Gamma correction before saving the image. +pydiffvg.imwrite(img, 'results/single_circle_tf/target.png', gamma=2.2) +target = tf.identity(img) + +# Move the circle to produce initial guess +# normalize radius & center for easier learning rate +radius_n = tf.Variable(20.0 / 256.0) +center_n = tf.Variable([108.0 / 256.0, 138.0 / 256.0]) +color = tf.Variable([0.3, 0.2, 0.8, 1.0]) +circle.radius = radius_n * 256 +circle.center = center_n * 256 +circle_group.fill_color = color +scene_args = pydiffvg.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) +img = render(tf.constant(256), # width + tf.constant(256), # height + tf.constant(2), # num_samples_x + tf.constant(2), # num_samples_y + tf.constant(1), # seed + *scene_args) +pydiffvg.imwrite(img, 'results/single_circle_tf/init.png', gamma=2.2) + +optimizer = tf.compat.v1.train.AdamOptimizer(1e-2) + +for t in range(100): + print('iteration:', t) + + with tf.GradientTape() as tape: + # Forward pass: render the image. + circle.radius = radius_n * 256 + circle.center = center_n * 256 + circle_group.fill_color = color + # Important to use a different seed every iteration, otherwise the result + # would be biased. + scene_args = pydiffvg.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) + img = render(tf.constant(256), # width + tf.constant(256), # height + tf.constant(2), # num_samples_x + tf.constant(2), # num_samples_y + tf.constant(t+1), # seed, + *scene_args) + loss_value = tf.reduce_sum(tf.square(img - target)) + + print(f"loss_value: {loss_value}") + pydiffvg.imwrite(img, 'results/single_circle_tf/iter_{}.png'.format(t)) + + grads = tape.gradient(loss_value, [radius_n, center_n, color]) + print(grads) + optimizer.apply_gradients(zip(grads, [radius_n, center_n, color])) + +# Render the final result. +circle.radius = radius_n * 256 +circle.center = center_n * 256 +circle_group.fill_color = color +scene_args = pydiffvg.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) +img = render(tf.constant(256), # width + tf.constant(256), # height + tf.constant(2), # num_samples_x + tf.constant(2), # num_samples_y + tf.constant(101), # seed + *scene_args) +# Save the images and differences. +pydiffvg.imwrite(img.cpu(), 'results/single_circle_tf/final.png') + +# Convert the intermediate renderings to a video. +from subprocess import call +call(["ffmpeg", "-framerate", "24", "-i", + "results/single_circle_tf/iter_%d.png", "-vb", "20M", + "results/single_circle_tf/out.mp4"]) diff --git a/apps/single_curve.py b/apps/single_curve.py new file mode 100644 index 0000000..cd64f47 --- /dev/null +++ b/apps/single_curve.py @@ -0,0 +1,121 @@ +import pydiffvg +import torch +import skimage +import numpy as np + +# Use GPU if available +pydiffvg.set_use_gpu(torch.cuda.is_available()) + +canvas_width, canvas_height = 256, 256 +num_control_points = torch.tensor([2, 2, 2]) +points = torch.tensor([[120.0, 30.0], # base + [150.0, 60.0], # control point + [ 90.0, 198.0], # control point + [ 60.0, 218.0], # base + [ 90.0, 180.0], # control point + [200.0, 65.0], # control point + [210.0, 98.0], # base + [220.0, 70.0], # control point + [130.0, 55.0]]) # control point +path = pydiffvg.Path(num_control_points = num_control_points, + points = points, + is_closed = True) +shapes = [path] +path_group = pydiffvg.ShapeGroup(shape_ids = torch.tensor([0]), + fill_color = torch.tensor([0.3, 0.6, 0.3, 1.0])) +shape_groups = [path_group] +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) + +render = pydiffvg.RenderFunction.apply +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 0, # seed + None, + *scene_args) +# The output image is in linear RGB space. Do Gamma correction before saving the image. +pydiffvg.imwrite(img.cpu(), 'results/single_curve/target.png', gamma=2.2) +target = img.clone() + +# Move the path to produce initial guess +# normalize points for easier learning rate +points_n = torch.tensor([[100.0/256.0, 40.0/256.0], # base + [155.0/256.0, 65.0/256.0], # control point + [100.0/256.0, 180.0/256.0], # control point + [ 65.0/256.0, 238.0/256.0], # base + [100.0/256.0, 200.0/256.0], # control point + [170.0/256.0, 55.0/256.0], # control point + [220.0/256.0, 100.0/256.0], # base + [210.0/256.0, 80.0/256.0], # control point + [140.0/256.0, 60.0/256.0]], # control point + requires_grad = True) +color = torch.tensor([0.3, 0.2, 0.5, 1.0], requires_grad=True) +path.points = points_n * 256 +path_group.fill_color = color +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 1, # seed + None, + *scene_args) +pydiffvg.imwrite(img.cpu(), 'results/single_curve/init.png', gamma=2.2) + +# Optimize +optimizer = torch.optim.Adam([points_n, color], lr=1e-2) +# Run 100 Adam iterations. +for t in range(100): + print('iteration:', t) + optimizer.zero_grad() + # Forward pass: render the image. + path.points = points_n * 256 + path_group.fill_color = color + scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) + img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + t+1, # seed + None, + *scene_args) + # Save the intermediate render. + pydiffvg.imwrite(img.cpu(), 'results/single_curve/iter_{}.png'.format(t), gamma=2.2) + # Compute the loss function. Here it is L2. + loss = (img - target).pow(2).sum() + print('loss:', loss.item()) + + # Backpropagate the gradients. + loss.backward() + # Print the gradients + print('points_n.grad:', points_n.grad) + print('color.grad:', color.grad) + + # Take a gradient descent step. + optimizer.step() + # Print the current params. + print('points:', path.points) + print('color:', path_group.fill_color) + +# Render the final result. +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 102, # seed + None, + *scene_args) +# Save the images and differences. +pydiffvg.imwrite(img.cpu(), 'results/single_curve/final.png') + +# Convert the intermediate renderings to a video. +from subprocess import call +call(["ffmpeg", "-framerate", "24", "-i", + "results/single_curve/iter_%d.png", "-vb", "20M", + "results/single_curve/out.mp4"]) diff --git a/apps/single_curve_outline.py b/apps/single_curve_outline.py new file mode 100644 index 0000000..476da66 --- /dev/null +++ b/apps/single_curve_outline.py @@ -0,0 +1,132 @@ +import pydiffvg +import torch +import skimage + +# Use GPU if available +pydiffvg.set_use_gpu(torch.cuda.is_available()) + +canvas_width, canvas_height = 256, 256 +num_control_points = torch.tensor([2, 2, 2]) +points = torch.tensor([[120.0, 30.0], # base + [150.0, 60.0], # control point + [ 90.0, 198.0], # control point + [ 60.0, 218.0], # base + [ 90.0, 180.0], # control point + [200.0, 65.0], # control point + [210.0, 98.0], # base + [220.0, 70.0], # control point + [130.0, 55.0]]) # control point +path = pydiffvg.Path(num_control_points = num_control_points, + points = points, + is_closed = True, + stroke_width = torch.tensor(5.0)) +shapes = [path] +path_group = pydiffvg.ShapeGroup(shape_ids = torch.tensor([0]), + fill_color = torch.tensor([0.3, 0.6, 0.3, 1.0]), + stroke_color = torch.tensor([0.6, 0.3, 0.6, 0.8])) +shape_groups = [path_group] +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) + +render = pydiffvg.RenderFunction.apply +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 0, # seed + *scene_args) +# The output image is in linear RGB space. Do Gamma correction before saving the image. +pydiffvg.imwrite(img.cpu(), 'results/single_curve_outline/target.png', gamma=2.2) +target = img.clone() + +# Move the path to produce initial guess +# normalize points for easier learning rate +points_n = torch.tensor([[100.0/256.0, 40.0/256.0], # base + [155.0/256.0, 65.0/256.0], # control point + [100.0/256.0, 180.0/256.0], # control point + [ 65.0/256.0, 238.0/256.0], # base + [100.0/256.0, 200.0/256.0], # control point + [170.0/256.0, 55.0/256.0], # control point + [220.0/256.0, 100.0/256.0], # base + [210.0/256.0, 80.0/256.0], # control point + [140.0/256.0, 60.0/256.0]], # control point + requires_grad = True) +fill_color = torch.tensor([0.3, 0.2, 0.8, 1.0], requires_grad=True) +stroke_color = torch.tensor([0.4, 0.7, 0.5, 0.5], requires_grad=True) +stroke_width_n = torch.tensor(10.0 / 100.0, requires_grad=True) +path.points = points_n * 256 +path.stroke_width = stroke_width_n * 100 +path_group.fill_color = fill_color +path_group.stroke_color = stroke_color +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 1, # seed + *scene_args) +pydiffvg.imwrite(img.cpu(), 'results/single_curve_outline/init.png', gamma=2.2) + +# Optimize +optimizer = torch.optim.Adam([points_n, fill_color, stroke_color, stroke_width_n], lr=1e-2) +# Run 200 Adam iterations. +for t in range(200): + print('iteration:', t) + optimizer.zero_grad() + # Forward pass: render the image. + path.points = points_n * 256 + path.stroke_width = stroke_width_n * 100 + path_group.fill_color = fill_color + path_group.stroke_color = stroke_color + scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) + img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + t+1, # seed + *scene_args) + # Save the intermediate render. + pydiffvg.imwrite(img.cpu(), 'results/single_curve_outline/iter_{}.png'.format(t), gamma=2.2) + # Compute the loss function. Here it is L2. + loss = (img - target).pow(2).sum() + print('loss:', loss.item()) + + # Backpropagate the gradients. + loss.backward() + # Print the gradients + print('points_n.grad:', points_n.grad) + print('fill_color.grad:', fill_color.grad) + print('stroke_color.grad:', stroke_color.grad) + print('stroke_width.grad:', stroke_width_n.grad) + + # Take a gradient descent step. + optimizer.step() + # Print the current params. + print('points:', path.points) + print('fill_color:', path_group.fill_color) + print('stroke_color:', path_group.stroke_color) + print('stroke_width:', path.stroke_width) + +# Render the final result. +path.points = points_n * 256 +path.stroke_width = stroke_width_n * 100 +path_group.fill_color = fill_color +path_group.stroke_color = stroke_color +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 202, # seed + *scene_args) +# Save the images and differences. +pydiffvg.imwrite(img.cpu(), 'results/single_curve_outline/final.png') + +# Convert the intermediate renderings to a video. +from subprocess import call +call(["ffmpeg", "-framerate", "24", "-i", + "results/single_curve_outline/iter_%d.png", "-vb", "20M", + "results/single_curve_outline/out.mp4"]) diff --git a/apps/single_curve_sdf.py b/apps/single_curve_sdf.py new file mode 100644 index 0000000..182cdad --- /dev/null +++ b/apps/single_curve_sdf.py @@ -0,0 +1,125 @@ +import pydiffvg +import torch +import skimage +import numpy as np + +# Use GPU if available +pydiffvg.set_use_gpu(torch.cuda.is_available()) + +canvas_width, canvas_height = 256, 256 +num_control_points = torch.tensor([2, 2, 2]) +points = torch.tensor([[120.0, 30.0], # base + [150.0, 60.0], # control point + [ 90.0, 198.0], # control point + [ 60.0, 218.0], # base + [ 90.0, 180.0], # control point + [200.0, 65.0], # control point + [210.0, 98.0], # base + [220.0, 70.0], # control point + [130.0, 55.0]]) # control point +path = pydiffvg.Path(num_control_points = num_control_points, + points = points, + is_closed = True) +shapes = [path] +path_group = pydiffvg.ShapeGroup(shape_ids = torch.tensor([0]), + fill_color = torch.tensor([0.3, 0.6, 0.3, 1.0])) +shape_groups = [path_group] +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups, + output_type = pydiffvg.OutputType.sdf) + +render = pydiffvg.RenderFunction.apply +img = render(256, # width + 256, # height + 1, # num_samples_x + 1, # num_samples_y + 0, # seed + *scene_args) +img /= 256.0 +# The output image is in linear RGB space. Do Gamma correction before saving the image. +pydiffvg.imwrite(img.cpu(), 'results/single_curve_sdf/target.png', gamma=1.0) +target = img.clone() + +# Move the path to produce initial guess +# normalize points for easier learning rate +points_n = torch.tensor([[100.0/256.0, 40.0/256.0], # base + [155.0/256.0, 65.0/256.0], # control point + [100.0/256.0, 180.0/256.0], # control point + [ 65.0/256.0, 238.0/256.0], # base + [100.0/256.0, 200.0/256.0], # control point + [170.0/256.0, 55.0/256.0], # control point + [220.0/256.0, 100.0/256.0], # base + [210.0/256.0, 80.0/256.0], # control point + [140.0/256.0, 60.0/256.0]], # control point + requires_grad = True) +color = torch.tensor([0.3, 0.2, 0.5, 1.0], requires_grad=True) +path.points = points_n * 256 +path_group.fill_color = color +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups, + output_type = pydiffvg.OutputType.sdf) +img = render(256, # width + 256, # height + 1, # num_samples_x + 1, # num_samples_y + 1, # seed + *scene_args) +img /= 256.0 +pydiffvg.imwrite(img.cpu(), 'results/single_curve_sdf/init.png', gamma=1.0) + +# Optimize +optimizer = torch.optim.Adam([points_n, color], lr=1e-2) +# Run 100 Adam iterations. +for t in range(100): + print('iteration:', t) + optimizer.zero_grad() + # Forward pass: render the image. + path.points = points_n * 256 + path_group.fill_color = color + scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups, + output_type = pydiffvg.OutputType.sdf) + img = render(256, # width + 256, # height + 1, # num_samples_x + 1, # num_samples_y + t+1, # seed + *scene_args) + img /= 256.0 + # Save the intermediate render. + pydiffvg.imwrite(img.cpu(), 'results/single_curve_sdf/iter_{}.png'.format(t), gamma=1.0) + # Compute the loss function. Here it is L2. + loss = (img - target).pow(2).sum() + print('loss:', loss.item()) + + # Backpropagate the gradients. + loss.backward() + # Print the gradients + print('points_n.grad:', points_n.grad) + print('color.grad:', color.grad) + + # Take a gradient descent step. + optimizer.step() + # Print the current params. + print('points:', path.points) + print('color:', path_group.fill_color) + +# Render the final result. +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups, + output_type = pydiffvg.OutputType.sdf) +img = render(256, # width + 256, # height + 1, # num_samples_x + 1, # num_samples_y + 102, # seed + *scene_args) +img /= 256.0 +# Save the images and differences. +pydiffvg.imwrite(img.cpu(), 'results/single_curve_sdf/final.png', gamma=1.0) + +# Convert the intermediate renderings to a video. +from subprocess import call +call(["ffmpeg", "-framerate", "24", "-i", + "results/single_curve_sdf/iter_%d.png", "-vb", "20M", + "results/single_curve_sdf/out.mp4"]) diff --git a/apps/single_curve_sdf_trans.py b/apps/single_curve_sdf_trans.py new file mode 100644 index 0000000..f7f5141 --- /dev/null +++ b/apps/single_curve_sdf_trans.py @@ -0,0 +1,172 @@ +import pydiffvg +import torch +import skimage +import numpy as np + +# Use GPU if available +pydiffvg.set_use_gpu(torch.cuda.is_available()) + +canvas_width, canvas_height = 256, 256 +num_control_points = torch.tensor([2]) +# points = torch.tensor([[120.0, 30.0], # base +# [150.0, 60.0], # control point +# [ 90.0, 198.0], # control point +# [ 60.0, 218.0], # base +# [ 90.0, 180.0], # control point +# [200.0, 65.0], # control point +# [210.0, 98.0], # base +# [220.0, 70.0], # control point +# [130.0, 55.0]]) # control point +points = torch.tensor([[ 20.0, 128.0], # base + [ 50.0, 128.0], # control point + [170.0, 128.0], # control point + [200.0, 128.0]]) # base +path = pydiffvg.Path(num_control_points = num_control_points, + points = points, + is_closed = False, + stroke_width = torch.tensor(10.0)) +shapes = [path] +path_group = pydiffvg.ShapeGroup(shape_ids = torch.tensor([0]), + fill_color = None, + stroke_color = torch.tensor([0.3, 0.6, 0.3, 1.0])) +shape_groups = [path_group] +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups, + output_type = pydiffvg.OutputType.sdf) + +render = pydiffvg.RenderFunction.apply +img = render(256, # width + 256, # height + 1, # num_samples_x + 1, # num_samples_y + 0, # seed + *scene_args) + +path.points[:, 1] += 1e-3 +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups, + output_type = pydiffvg.OutputType.sdf) +img2 = render(256, # width + 256, # height + 1, # num_samples_x + 1, # num_samples_y + 0, # seed + *scene_args) + +# diff = img2 - img +# diff = diff[:, :, 0] / 1e-3 +# import matplotlib.pyplot as plt +# plt.imshow(diff) +# plt.show() + +# # The output image is in linear RGB space. Do Gamma correction before saving the image. +# pydiffvg.imwrite(img.cpu(), 'results/single_curve_sdf/target.png', gamma=1.0) +# target = img.clone() + +render_grad = pydiffvg.RenderFunction.render_grad +img = render_grad(torch.ones(256, 256, 1), # grad_img + 256, # width + 256, # height + 1, # num_samples_x + 1, # num_samples_y + 0, # seed + *scene_args) +img = img[:, :, 0] +import matplotlib.pyplot as plt +plt.imshow(img) +plt.show() + +# # Move the path to produce initial guess +# # normalize points for easier learning rate +# # points_n = torch.tensor([[100.0/256.0, 40.0/256.0], # base +# # [155.0/256.0, 65.0/256.0], # control point +# # [100.0/256.0, 180.0/256.0], # control point +# # [ 65.0/256.0, 238.0/256.0], # base +# # [100.0/256.0, 200.0/256.0], # control point +# # [170.0/256.0, 55.0/256.0], # control point +# # [220.0/256.0, 100.0/256.0], # base +# # [210.0/256.0, 80.0/256.0], # control point +# # [140.0/256.0, 60.0/256.0]], # control point +# # requires_grad = True) +# points_n = torch.tensor([[118.4274/256.0, 32.0159/256.0], +# [174.9657/256.0, 28.1877/256.0], +# [ 87.6629/256.0, 175.1049/256.0], +# [ 57.8093/256.0, 232.8987/256.0], +# [ 80.1829/256.0, 165.4280/256.0], +# [197.3640/256.0, 83.4058/256.0], +# [209.3676/256.0, 97.9176/256.0], +# [219.1048/256.0, 72.0000/256.0], +# [143.1226/256.0, 57.0636/256.0]], +# requires_grad = True) +# color = torch.tensor([0.3, 0.2, 0.5, 1.0], requires_grad=True) +# path.points = points_n * 256 +# path_group.fill_color = color +# scene_args = pydiffvg.RenderFunction.serialize_scene(\ +# canvas_width, canvas_height, shapes, shape_groups, +# output_type = pydiffvg.OutputType.sdf) +# img = render(256, # width +# 256, # height +# 1, # num_samples_x +# 1, # num_samples_y +# 1, # seed +# *scene_args) +# img /= 256.0 +# pydiffvg.imwrite(img.cpu(), 'results/single_curve_sdf/init.png', gamma=1.0) + +# # Optimize +# optimizer = torch.optim.Adam([points_n, color], lr=1e-3) +# # Run 100 Adam iterations. +# for t in range(2): +# print('iteration:', t) +# optimizer.zero_grad() +# # Forward pass: render the image. +# path.points = points_n * 256 +# path_group.fill_color = color +# scene_args = pydiffvg.RenderFunction.serialize_scene(\ +# canvas_width, canvas_height, shapes, shape_groups, +# output_type = pydiffvg.OutputType.sdf) +# img = render(256, # width +# 256, # height +# 1, # num_samples_x +# 1, # num_samples_y +# t+1, # seed +# *scene_args) +# img /= 256.0 +# # Save the intermediate render. +# pydiffvg.imwrite(img.cpu(), 'results/single_curve_sdf/iter_{}.png'.format(t), gamma=1.0) +# # Compute the loss function. Here it is L2. +# loss = (img - target).pow(2).sum() +# print('loss:', loss.item()) + +# # Backpropagate the gradients. +# loss.backward() +# # Print the gradients +# print('points_n.grad:', points_n.grad) +# print('color.grad:', color.grad) + +# # Take a gradient descent step. +# optimizer.step() +# # Print the current params. +# print('points:', path.points) +# print('color:', path_group.fill_color) +# exit() + +# # Render the final result. +# scene_args = pydiffvg.RenderFunction.serialize_scene(\ +# canvas_width, canvas_height, shapes, shape_groups, +# output_type = pydiffvg.OutputType.sdf) +# img = render(256, # width +# 256, # height +# 1, # num_samples_x +# 1, # num_samples_y +# 102, # seed +# *scene_args) +# img /= 256.0 +# # Save the images and differences. +# pydiffvg.imwrite(img.cpu(), 'results/single_curve_sdf/final.png', gamma=1.0) + +# # Convert the intermediate renderings to a video. +# from subprocess import call +# call(["ffmpeg", "-framerate", "24", "-i", +# "results/single_curve_sdf/iter_%d.png", "-vb", "20M", +# "results/single_curve_sdf/out.mp4"]) diff --git a/apps/single_ellipse.py b/apps/single_ellipse.py new file mode 100644 index 0000000..9d116e6 --- /dev/null +++ b/apps/single_ellipse.py @@ -0,0 +1,105 @@ +import pydiffvg +import torch +import skimage +import numpy as np + +# Use GPU if available +pydiffvg.set_use_gpu(torch.cuda.is_available()) + +canvas_width, canvas_height = 256, 256 +ellipse = pydiffvg.Ellipse(radius = torch.tensor([60.0, 30.0]), + center = torch.tensor([128.0, 128.0])) +shapes = [ellipse] +ellipse_group = pydiffvg.ShapeGroup(shape_ids = torch.tensor([0]), + fill_color = torch.tensor([0.3, 0.6, 0.3, 1.0])) +shape_groups = [ellipse_group] +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) + +render = pydiffvg.RenderFunction.apply +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 0, # seed + *scene_args) +# The output image is in linear RGB space. Do Gamma correction before saving the image. +pydiffvg.imwrite(img.cpu(), 'results/single_ellipse/target.png', gamma=2.2) +target = img.clone() + +# Move the ellipse to produce initial guess +# normalize radius & center for easier learning rate +radius_n = torch.tensor([20.0 / 256.0, 40.0 / 256.0], requires_grad=True) +center_n = torch.tensor([108.0 / 256.0, 138.0 / 256.0], requires_grad=True) +color = torch.tensor([0.3, 0.2, 0.8, 1.0], requires_grad=True) +ellipse.radius = radius_n * 256 +ellipse.center = center_n * 256 +ellipse_group.fill_color = color +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 1, # seed + *scene_args) +pydiffvg.imwrite(img.cpu(), 'results/single_ellipse/init.png', gamma=2.2) + +# Optimize for radius & center +optimizer = torch.optim.Adam([radius_n, center_n, color], lr=1e-2) +# Run 50 Adam iterations. +for t in range(50): + print('iteration:', t) + optimizer.zero_grad() + # Forward pass: render the image. + ellipse.radius = radius_n * 256 + ellipse.center = center_n * 256 + ellipse_group.fill_color = color + scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) + img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + t+1, # seed + *scene_args) + # Save the intermediate render. + pydiffvg.imwrite(img.cpu(), 'results/single_ellipse/iter_{}.png'.format(t), gamma=2.2) + # Compute the loss function. Here it is L2. + loss = (img - target).pow(2).sum() + print('loss:', loss.item()) + + # Backpropagate the gradients. + loss.backward() + # Print the gradients + print('radius.grad:', radius_n.grad) + print('center.grad:', center_n.grad) + print('color.grad:', color.grad) + + # Take a gradient descent step. + optimizer.step() + # Print the current params. + print('radius:', ellipse.radius) + print('center:', ellipse.center) + print('color:', ellipse_group.fill_color) + +# Render the final result. +ellipse.radius = radius_n * 256 +ellipse.center = center_n * 256 +ellipse_group.fill_color = color +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 52, # seed + *scene_args) +# Save the images and differences. +pydiffvg.imwrite(img.cpu(), 'results/single_ellipse/final.png') + +# Convert the intermediate renderings to a video. +from subprocess import call +call(["ffmpeg", "-framerate", "24", "-i", + "results/single_ellipse/iter_%d.png", "-vb", "20M", + "results/single_ellipse/out.mp4"]) diff --git a/apps/single_ellipse_transform.py b/apps/single_ellipse_transform.py new file mode 100644 index 0000000..0c11cac --- /dev/null +++ b/apps/single_ellipse_transform.py @@ -0,0 +1,108 @@ +import pydiffvg +import torch +import skimage +import numpy as np + +# Use GPU if available +pydiffvg.set_use_gpu(torch.cuda.is_available()) + +canvas_width, canvas_height = 256, 256 +ellipse = pydiffvg.Ellipse(radius = torch.tensor([60.0, 30.0]), + center = torch.tensor([128.0, 128.0])) +shapes = [ellipse] +ellipse_group = pydiffvg.ShapeGroup(\ + shape_ids = torch.tensor([0]), + fill_color = torch.tensor([0.3, 0.6, 0.3, 1.0]), + shape_to_canvas = torch.eye(3, 3)) +shape_groups = [ellipse_group] +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) + +render = pydiffvg.RenderFunction.apply +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 0, # seed + *scene_args) +# The output image is in linear RGB space. Do Gamma correction before saving the image. +pydiffvg.imwrite(img.cpu(), 'results/single_ellipse_transform/target.png', gamma=2.2) +target = img.clone() + +# Affine transform the ellipse to produce initial guess +color = torch.tensor([0.3, 0.2, 0.8, 1.0], requires_grad=True) +affine = torch.zeros(2, 3) +affine[0, 0] = 1.3 +affine[0, 1] = 0.2 +affine[0, 2] = 0.1 +affine[1, 0] = 0.2 +affine[1, 1] = 0.6 +affine[1, 2] = 0.3 +affine.requires_grad = True +shape_to_canvas = torch.cat((affine, torch.tensor([[0.0, 0.0, 1.0]])), axis=0) +ellipse_group.fill_color = color +ellipse_group.shape_to_canvas = shape_to_canvas +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 1, # seed + *scene_args) +pydiffvg.imwrite(img.cpu(), 'results/single_ellipse_transform/init.png', gamma=2.2) + +# Optimize for radius & center +optimizer = torch.optim.Adam([color, affine], lr=1e-2) +# Run 150 Adam iterations. +for t in range(150): + print('iteration:', t) + optimizer.zero_grad() + # Forward pass: render the image. + ellipse_group.fill_color = color + ellipse_group.shape_to_canvas = torch.cat((affine, torch.tensor([[0.0, 0.0, 1.0]])), axis=0) + scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) + img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + t+1, # seed + *scene_args) + # Save the intermediate render. + pydiffvg.imwrite(img.cpu(), 'results/single_ellipse_transform/iter_{}.png'.format(t), gamma=2.2) + # Compute the loss function. Here it is L2. + loss = (img - target).pow(2).sum() + print('loss:', loss.item()) + + # Backpropagate the gradients. + loss.backward() + # Print the gradients + print('color.grad:', color.grad) + print('affine.grad:', affine.grad) + + # Take a gradient descent step. + optimizer.step() + # Print the current params. + print('color:', ellipse_group.fill_color) + print('affine:', affine) + +# Render the final result. +ellipse_group.fill_color = color +ellipse_group.shape_to_canvas = torch.cat((affine, torch.tensor([[0.0, 0.0, 1.0]])), axis=0) +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 52, # seed + *scene_args) +# Save the images and differences. +pydiffvg.imwrite(img.cpu(), 'results/single_ellipse_transform/final.png') + +# Convert the intermediate renderings to a video. +from subprocess import call +call(["ffmpeg", "-framerate", "24", "-i", + "results/single_ellipse_transform/iter_%d.png", "-vb", "20M", + "results/single_ellipse_transform/out.mp4"]) diff --git a/apps/single_gradient.py b/apps/single_gradient.py new file mode 100644 index 0000000..289ff8a --- /dev/null +++ b/apps/single_gradient.py @@ -0,0 +1,127 @@ +import pydiffvg +import torch +import skimage +import numpy as np + +# Use GPU if available +pydiffvg.set_use_gpu(torch.cuda.is_available()) + +canvas_width, canvas_height = 256, 256 +color = pydiffvg.LinearGradient(\ + begin = torch.tensor([50.0, 50.0]), + end = torch.tensor([200.0, 200.0]), + offsets = torch.tensor([0.0, 1.0]), + stop_colors = torch.tensor([[0.2, 0.5, 0.7, 1.0], + [0.7, 0.2, 0.5, 1.0]])) +circle = pydiffvg.Circle(radius = torch.tensor(40.0), + center = torch.tensor([128.0, 128.0])) +shapes = [circle] +circle_group = pydiffvg.ShapeGroup(shape_ids = torch.tensor([0]), fill_color = color) +shape_groups = [circle_group] +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) + +render = pydiffvg.RenderFunction.apply +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 0, # seed + *scene_args) +# The output image is in linear RGB space. Do Gamma correction before saving the image. +pydiffvg.imwrite(img.cpu(), 'results/single_gradient/target.png', gamma=2.2) +target = img.clone() + +# Move the circle to produce initial guess +# normalize radius & center for easier learning rate +radius_n = torch.tensor(20.0 / 256.0, requires_grad=True) +center_n = torch.tensor([108.0 / 256.0, 138.0 / 256.0], requires_grad=True) +begin_n = torch.tensor([100.0 / 256.0, 100.0 / 256.0], requires_grad=True) +end_n = torch.tensor([150.0 / 256.0, 150.0 / 256.0], requires_grad=True) +stop_colors = torch.tensor([[0.1, 0.9, 0.2, 1.0], + [0.5, 0.3, 0.6, 1.0]], requires_grad=True) +color.begin = begin_n * 256 +color.end = end_n * 256 +color.stop_colors = stop_colors +circle.radius = radius_n * 256 +circle.center = center_n * 256 +circle_group.fill_color = color +shapes = [circle] +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 1, # seed + *scene_args) +pydiffvg.imwrite(img.cpu(), 'results/single_gradient/init.png', gamma=2.2) + +# Optimize for radius & center +optimizer = torch.optim.Adam([radius_n, center_n, begin_n, end_n, stop_colors], lr=1e-2) +# Run 50 Adam iterations. +for t in range(100): + print('iteration:', t) + optimizer.zero_grad() + # Forward pass: render the image. + color.begin = begin_n * 256 + color.end = end_n * 256 + color.stop_colors = stop_colors + circle.radius = radius_n * 256 + circle.center = center_n * 256 + circle_group.fill_color = color + scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) + img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + t+1, # seed + *scene_args) + # Save the intermediate render. + pydiffvg.imwrite(img.cpu(), 'results/single_gradient/iter_{}.png'.format(t), gamma=2.2) + # Compute the loss function. Here it is L2. + loss = (img - target).pow(2).sum() + print('loss:', loss.item()) + + # Backpropagate the gradients. + loss.backward() + # Print the gradients + print('radius.grad:', radius_n.grad) + print('center.grad:', center_n.grad) + print('begin.grad:', begin_n.grad) + print('end.grad:', end_n.grad) + print('stop_colors.grad:', stop_colors.grad) + + # Take a gradient descent step. + optimizer.step() + # Print the current params. + print('radius:', circle.radius) + print('center:', circle.center) + print('begin:', begin_n) + print('end:', end_n) + print('stop_colors:', stop_colors) + +# Render the final result. +color.begin = begin_n * 256 +color.end = end_n * 256 +color.stop_colors = stop_colors +circle.radius = radius_n * 256 +circle.center = center_n * 256 +circle_group.fill_color = color +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 52, # seed + *scene_args) +# Save the images and differences. +pydiffvg.imwrite(img.cpu(), 'results/single_gradient/final.png') + +# Convert the intermediate renderings to a video. +from subprocess import call +call(["ffmpeg", "-framerate", "24", "-i", + "results/single_gradient/iter_%d.png", "-vb", "20M", + "results/single_gradient/out.mp4"]) diff --git a/apps/single_open_curve.py b/apps/single_open_curve.py new file mode 100644 index 0000000..3f6b5d8 --- /dev/null +++ b/apps/single_open_curve.py @@ -0,0 +1,117 @@ +import pydiffvg +import torch +import skimage + +# Use GPU if available +pydiffvg.set_use_gpu(torch.cuda.is_available()) + +canvas_width, canvas_height = 256, 256 +num_control_points = torch.tensor([2]) +points = torch.tensor([[120.0, 30.0], # base + [150.0, 60.0], # control point + [ 90.0, 198.0], # control point + [ 60.0, 218.0]]) # base +path = pydiffvg.Path(num_control_points = num_control_points, + points = points, + thickness = None, + is_closed = False, + stroke_width = torch.tensor(5.0)) +shapes = [path] +path_group = pydiffvg.ShapeGroup(shape_ids = torch.tensor([0]), + fill_color = None, + stroke_color = torch.tensor([0.6, 0.3, 0.6, 0.8])) +shape_groups = [path_group] +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) + +render = pydiffvg.RenderFunction.apply +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 0, # seed + *scene_args) +# The output image is in linear RGB space. Do Gamma correction before saving the image. +pydiffvg.imwrite(img.cpu(), 'results/single_open_curve/target.png', gamma=2.2) +target = img.clone() + +# Move the path to produce initial guess +# normalize points for easier learning rate +points_n = torch.tensor([[100.0/256.0, 40.0/256.0], # base + [155.0/256.0, 65.0/256.0], # control point + [100.0/256.0, 180.0/256.0], # control point + [ 65.0/256.0, 238.0/256.0]], # base + requires_grad = True) +stroke_color = torch.tensor([0.4, 0.7, 0.5, 0.5], requires_grad=True) +stroke_width_n = torch.tensor(10.0 / 100.0, requires_grad=True) +path.points = points_n * 256 +path.stroke_width = stroke_width_n * 100 +path_group.stroke_color = stroke_color +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 1, # seed + *scene_args) +pydiffvg.imwrite(img.cpu(), 'results/single_open_curve/init.png', gamma=2.2) + +# Optimize +optimizer = torch.optim.Adam([points_n, stroke_color, stroke_width_n], lr=1e-2) +# Run 200 Adam iterations. +for t in range(200): + print('iteration:', t) + optimizer.zero_grad() + # Forward pass: render the image. + path.points = points_n * 256 + path.stroke_width = stroke_width_n * 100 + path_group.stroke_color = stroke_color + scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) + img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + t+1, # seed + *scene_args) + # Save the intermediate render. + pydiffvg.imwrite(img.cpu(), 'results/single_open_curve/iter_{}.png'.format(t), gamma=2.2) + # Compute the loss function. Here it is L2. + loss = (img - target).pow(2).sum() + print('loss:', loss.item()) + + # Backpropagate the gradients. + loss.backward() + # Print the gradients + print('points_n.grad:', points_n.grad) + print('stroke_color.grad:', stroke_color.grad) + print('stroke_width.grad:', stroke_width_n.grad) + + # Take a gradient descent step. + optimizer.step() + # Print the current params. + print('points:', path.points) + print('stroke_color:', path_group.stroke_color) + print('stroke_width:', path.stroke_width) + +# Render the final result. +path.points = points_n * 256 +path.stroke_width = stroke_width_n * 100 +path_group.stroke_color = stroke_color +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 202, # seed + *scene_args) +# Save the images and differences. +pydiffvg.imwrite(img.cpu(), 'results/single_open_curve/final.png') + +# Convert the intermediate renderings to a video. +from subprocess import call +call(["ffmpeg", "-framerate", "24", "-i", + "results/single_open_curve/iter_%d.png", "-vb", "20M", + "results/single_open_curve/out.mp4"]) diff --git a/apps/single_open_curve_thickness.py b/apps/single_open_curve_thickness.py new file mode 100644 index 0000000..877d835 --- /dev/null +++ b/apps/single_open_curve_thickness.py @@ -0,0 +1,120 @@ +import pydiffvg +import torch +import skimage + +pydiffvg.set_print_timing(True) + +# Use GPU if available +pydiffvg.set_use_gpu(torch.cuda.is_available()) + +canvas_width, canvas_height = 256, 256 +num_control_points = torch.tensor([2]) +points = torch.tensor([[120.0, 30.0], # base + [150.0, 60.0], # control point + [ 90.0, 198.0], # control point + [ 60.0, 218.0]]) # base +thickness = torch.tensor([10.0, 5.0, 4.0, 20.0]) +path = pydiffvg.Path(num_control_points = num_control_points, + points = points, + is_closed = False, + stroke_width = thickness) +shapes = [path] +path_group = pydiffvg.ShapeGroup(shape_ids = torch.tensor([0]), + fill_color = None, + stroke_color = torch.tensor([0.6, 0.3, 0.6, 0.8])) +shape_groups = [path_group] +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) + +render = pydiffvg.RenderFunction.apply +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 0, # seed + *scene_args) +# The output image is in linear RGB space. Do Gamma correction before saving the image. +pydiffvg.imwrite(img.cpu(), 'results/single_open_curve_thickness/target.png', gamma=2.2) +target = img.clone() + +# Move the path to produce initial guess +# normalize points for easier learning rate +points_n = torch.tensor([[100.0/256.0, 40.0/256.0], # base + [155.0/256.0, 65.0/256.0], # control point + [100.0/256.0, 180.0/256.0], # control point + [ 65.0/256.0, 238.0/256.0]], # base + requires_grad = True) +thickness_n = torch.tensor([10.0 / 100.0, 10.0 / 100.0, 10.0 / 100.0, 10.0 / 100.0], + requires_grad = True) +stroke_color = torch.tensor([0.4, 0.7, 0.5, 0.5], requires_grad=True) +path.points = points_n * 256 +path.stroke_width = thickness_n * 100 +path_group.stroke_color = stroke_color +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 1, # seed + *scene_args) +pydiffvg.imwrite(img.cpu(), 'results/single_open_curve_thickness/init.png', gamma=2.2) + +# Optimize +optimizer = torch.optim.Adam([points_n, thickness_n, stroke_color], lr=1e-2) +# Run 200 Adam iterations. +for t in range(200): + print('iteration:', t) + optimizer.zero_grad() + # Forward pass: render the image. + path.points = points_n * 256 + path.stroke_width = thickness_n * 100 + path_group.stroke_color = stroke_color + scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) + img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + t+1, # seed + *scene_args) + # Save the intermediate render. + pydiffvg.imwrite(img.cpu(), 'results/single_open_curve_thickness/iter_{}.png'.format(t), gamma=2.2) + # Compute the loss function. Here it is L2. + loss = (img - target).pow(2).sum() + print('loss:', loss.item()) + + # Backpropagate the gradients. + loss.backward() + # Print the gradients + print('points_n.grad:', points_n.grad) + print('thickness_n.grad:', thickness_n.grad) + print('stroke_color.grad:', stroke_color.grad) + + # Take a gradient descent step. + optimizer.step() + # Print the current params. + print('points:', path.points) + print('thickness:', path.stroke_width) + print('stroke_color:', path_group.stroke_color) + +# Render the final result. +path.points = points_n * 256 +path.stroke_width = thickness_n * 100 +path_group.stroke_color = stroke_color +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 202, # seed + *scene_args) +# Save the images and differences. +pydiffvg.imwrite(img.cpu(), 'results/single_open_curve_thickness/final.png') + +# Convert the intermediate renderings to a video. +from subprocess import call +call(["ffmpeg", "-framerate", "24", "-i", + "results/single_open_curve_thickness/iter_%d.png", "-vb", "20M", + "results/single_open_curve_thickness/out.mp4"]) diff --git a/apps/single_path.py b/apps/single_path.py new file mode 100644 index 0000000..5261cfb --- /dev/null +++ b/apps/single_path.py @@ -0,0 +1,99 @@ +import pydiffvg +import torch +import skimage + +# Use GPU if available +pydiffvg.set_use_gpu(torch.cuda.is_available()) + +canvas_width, canvas_height = 510, 510 +# https://www.flaticon.com/free-icon/black-plane_61212#term=airplane&page=1&position=8 +shapes = pydiffvg.from_svg_path('M510,255c0-20.4-17.85-38.25-38.25-38.25H331.5L204,12.75h-51l63.75,204H76.5l-38.25-51H0L25.5,255L0,344.25h38.25l38.25-51h140.25l-63.75,204h51l127.5-204h140.25C492.15,293.25,510,275.4,510,255z') +path_group = pydiffvg.ShapeGroup(shape_ids = torch.tensor([0]), + fill_color = torch.tensor([0.3, 0.6, 0.3, 1.0])) +shape_groups = [path_group] +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) + +render = pydiffvg.RenderFunction.apply +img = render(510, # width + 510, # height + 2, # num_samples_x + 2, # num_samples_y + 0, # seed + *scene_args) +# The output image is in linear RGB space. Do Gamma correction before saving the image. +pydiffvg.imwrite(img.cpu(), 'results/single_path/target.png', gamma=2.2) +target = img.clone() + +# Move the path to produce initial guess +# normalize points for easier learning rate +noise = torch.FloatTensor(shapes[0].points.shape).uniform_(0.0, 1.0) +points_n = (shapes[0].points.clone() + (noise * 60 - 30)) / 510.0 +points_n.requires_grad = True +color = torch.tensor([0.3, 0.2, 0.5, 1.0], requires_grad=True) +shapes[0].points = points_n * 510 +path_group.fill_color = color +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) +img = render(510, # width + 510, # height + 2, # num_samples_x + 2, # num_samples_y + 1, # seed + *scene_args) +pydiffvg.imwrite(img.cpu(), 'results/single_path/init.png', gamma=2.2) + +# Optimize +optimizer = torch.optim.Adam([points_n, color], lr=1e-2) +# Run 100 Adam iterations. +for t in range(100): + print('iteration:', t) + optimizer.zero_grad() + # Forward pass: render the image. + shapes[0].points = points_n * 510 + path_group.fill_color = color + scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) + img = render(510, # width + 510, # height + 2, # num_samples_x + 2, # num_samples_y + t+1, # seed + *scene_args) + # Save the intermediate render. + pydiffvg.imwrite(img.cpu(), 'results/single_path/iter_{:02}.png'.format(t), gamma=2.2) + # Compute the loss function. Here it is L2. + loss = (img - target).pow(2).sum() + print('loss:', loss.item()) + + # Backpropagate the gradients. + loss.backward() + # Print the gradients + print('points_n.grad:', points_n.grad) + print('color.grad:', color.grad) + + # Take a gradient descent step. + optimizer.step() + # Print the current params. + print('points:', shapes[0].points) + print('color:', path_group.fill_color) + +# Render the final result. +shapes[0].points = points_n * 510 +path_group.fill_color = color +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) +img = render(510, # width + 510, # height + 2, # num_samples_x + 2, # num_samples_y + 102, # seed + *scene_args) +# Save the images and differences. +pydiffvg.imwrite(img.cpu(), 'results/single_path/final.png') + +# Convert the intermediate renderings to a video. +from subprocess import call +call(["ffmpeg", "-framerate", "20", "-i", + "results/single_path/iter_%02d.png", "-vb", "20M", + "results/single_path/out.mp4"]) diff --git a/apps/single_path_sdf.py b/apps/single_path_sdf.py new file mode 100644 index 0000000..b225f5a --- /dev/null +++ b/apps/single_path_sdf.py @@ -0,0 +1,105 @@ +import pydiffvg +import torch +import skimage + +# Use GPU if available +pydiffvg.set_use_gpu(torch.cuda.is_available()) + +canvas_width, canvas_height = 510, 510 +# https://www.flaticon.com/free-icon/black-plane_61212#term=airplane&page=1&position=8 +shapes = pydiffvg.from_svg_path('M510,255c0-20.4-17.85-38.25-38.25-38.25H331.5L204,12.75h-51l63.75,204H76.5l-38.25-51H0L25.5,255L0,344.25h38.25l38.25-51h140.25l-63.75,204h51l127.5-204h140.25C492.15,293.25,510,275.4,510,255z') +path_group = pydiffvg.ShapeGroup(shape_ids = torch.tensor([0]), + fill_color = torch.tensor([0.3, 0.6, 0.3, 1.0])) +shape_groups = [path_group] +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups, + output_type = pydiffvg.OutputType.sdf) + +render = pydiffvg.RenderFunction.apply +img = render(510, # width + 510, # height + 1, # num_samples_x + 1, # num_samples_y + 0, # seed + *scene_args) +img = img / 510 # Normalize SDF to [0, 1] +pydiffvg.imwrite(img.cpu(), 'results/single_path_sdf/target.png', gamma=1.0) +target = img.clone() + +# Move the path to produce initial guess +# normalize points for easier learning rate +noise = torch.FloatTensor(shapes[0].points.shape).uniform_(0.0, 1.0) +points_n = (shapes[0].points.clone() + (noise * 60 - 30)) / 510.0 +points_n.requires_grad = True +color = torch.tensor([0.3, 0.2, 0.5, 1.0], requires_grad=True) +shapes[0].points = points_n * 510 +path_group.fill_color = color +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups, + output_type = pydiffvg.OutputType.sdf) +img = render(510, # width + 510, # height + 1, # num_samples_x + 1, # num_samples_y + 1, # seed + *scene_args) +img = img / 510 # Normalize SDF to [0, 1] +pydiffvg.imwrite(img.cpu(), 'results/single_path_sdf/init.png', gamma=1.0) + +# Optimize +optimizer = torch.optim.Adam([points_n, color], lr=1e-2) +# Run 100 Adam iterations. +for t in range(100): + print('iteration:', t) + optimizer.zero_grad() + # Forward pass: render the image. + shapes[0].points = points_n * 510 + path_group.fill_color = color + scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups, + output_type = pydiffvg.OutputType.sdf) + img = render(510, # width + 510, # height + 1, # num_samples_x + 1, # num_samples_y + t+1, # seed + *scene_args) + img = img / 510 # Normalize SDF to [0, 1] + # Save the intermediate render. + pydiffvg.imwrite(img.cpu(), 'results/single_path_sdf/iter_{}.png'.format(t), gamma=1.0) + # Compute the loss function. Here it is L2. + loss = (img - target).pow(2).sum() + print('loss:', loss.item()) + + # Backpropagate the gradients. + loss.backward() + # Print the gradients + print('points_n.grad:', points_n.grad) + print('color.grad:', color.grad) + + # Take a gradient descent step. + optimizer.step() + # Print the current params. + print('points:', shapes[0].points) + print('color:', path_group.fill_color) + +# Render the final result. +shapes[0].points = points_n * 510 +path_group.fill_color = color +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups, + output_type = pydiffvg.OutputType.sdf) +img = render(510, # width + 510, # height + 1, # num_samples_x + 1, # num_samples_y + 102, # seed + *scene_args) +# Save the images and differences. +pydiffvg.imwrite(img.cpu(), 'results/single_path_sdf/final.png') + +# Convert the intermediate renderings to a video. +from subprocess import call +call(["ffmpeg", "-framerate", "24", "-i", + "results/single_path_sdf/iter_%d.png", "-vb", "20M", + "results/single_path_sdf/out.mp4"]) diff --git a/apps/single_polygon.py b/apps/single_polygon.py new file mode 100644 index 0000000..88a7c15 --- /dev/null +++ b/apps/single_polygon.py @@ -0,0 +1,108 @@ +import pydiffvg +import torch +import skimage +import numpy as np + +# Use GPU if available +pydiffvg.set_use_gpu(torch.cuda.is_available()) + +canvas_width, canvas_height = 256, 256 +# https://www.w3schools.com/graphics/svg_polygon.asp +points = torch.tensor([[120.0, 30.0], + [ 60.0, 218.0], + [210.0, 98.0], + [ 30.0, 98.0], + [180.0, 218.0]]) +polygon = pydiffvg.Polygon(points = points, is_closed = True) +shapes = [polygon] +polygon_group = pydiffvg.ShapeGroup(shape_ids = torch.tensor([0]), + fill_color = torch.tensor([0.3, 0.6, 0.3, 1.0])) +shape_groups = [polygon_group] +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) + +render = pydiffvg.RenderFunction.apply +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 0, # seed + *scene_args) +# The output image is in linear RGB space. Do Gamma correction before saving the image. +pydiffvg.imwrite(img.cpu(), 'results/single_polygon/target.png', gamma=2.2) +target = img.clone() + +# Move the polygon to produce initial guess +# normalize points for easier learning rate +points_n = torch.tensor([[140.0 / 256.0, 20.0 / 256.0], + [ 65.0 / 256.0, 228.0 / 256.0], + [215.0 / 256.0, 100.0 / 256.0], + [ 35.0 / 256.0, 90.0 / 256.0], + [160.0 / 256.0, 208.0 / 256.0]], requires_grad=True) +color = torch.tensor([0.3, 0.2, 0.5, 1.0], requires_grad=True) +polygon.points = points_n * 256 +polygon_group.color = color +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 1, # seed + *scene_args) +pydiffvg.imwrite(img.cpu(), 'results/single_polygon/init.png', gamma=2.2) + +# Optimize for radius & center +optimizer = torch.optim.Adam([points_n, color], lr=1e-2) +# Run 100 Adam iterations. +for t in range(100): + print('iteration:', t) + optimizer.zero_grad() + # Forward pass: render the image. + polygon.points = points_n * 256 + polygon_group.color = color + scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) + img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + t+1, # seed + *scene_args) + # Save the intermediate render. + pydiffvg.imwrite(img.cpu(), 'results/single_polygon/iter_{}.png'.format(t), gamma=2.2) + # Compute the loss function. Here it is L2. + loss = (img - target).pow(2).sum() + print('loss:', loss.item()) + + # Backpropagate the gradients. + loss.backward() + # Print the gradients + print('points_n.grad:', points_n.grad) + print('color.grad:', color.grad) + + # Take a gradient descent step. + optimizer.step() + # Print the current params. + print('points:', polygon.points) + print('color:', polygon_group.fill_color) + +# Render the final result. +polygon.points = points_n * 256 +polygon_group.color = color +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 102, # seed + *scene_args) +# Save the images and differences. +pydiffvg.imwrite(img.cpu(), 'results/single_polygon/final.png') + +# Convert the intermediate renderings to a video. +from subprocess import call +call(["ffmpeg", "-framerate", "24", "-i", + "results/single_polygon/iter_%d.png", "-vb", "20M", + "results/single_polygon/out.mp4"]) diff --git a/apps/single_rect.py b/apps/single_rect.py new file mode 100644 index 0000000..4fc51e3 --- /dev/null +++ b/apps/single_rect.py @@ -0,0 +1,102 @@ +import pydiffvg +import torch +import skimage +import numpy as np + +# Use GPU if available +pydiffvg.set_use_gpu(torch.cuda.is_available()) + +canvas_width, canvas_height = 256 ,256 +rect = pydiffvg.Rect(p_min = torch.tensor([40.0, 40.0]), + p_max = torch.tensor([160.0, 160.0])) +shapes = [rect] +rect_group = pydiffvg.ShapeGroup(shape_ids = torch.tensor([0]), + fill_color = torch.tensor([0.3, 0.6, 0.3, 1.0])) +shape_groups = [rect_group] +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) + +render = pydiffvg.RenderFunction.apply +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 0, # seed + *scene_args) +# The output image is in linear RGB space. Do Gamma correction before saving the image. +pydiffvg.imwrite(img.cpu(), 'results/single_rect/target.png', gamma=2.2) +target = img.clone() + +# Move the rect to produce initial guess +# normalize p_min & p_max for easier learning rate +p_min_n = torch.tensor([80.0 / 256.0, 20.0 / 256.0], requires_grad=True) +p_max_n = torch.tensor([100.0 / 256.0, 60.0 / 256.0], requires_grad=True) +color = torch.tensor([0.3, 0.2, 0.5, 1.0], requires_grad=True) +rect.p_min = p_min_n * 256 +rect.p_max = p_max_n * 256 +rect_group.fill_color = color +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 1, # seed + *scene_args) +pydiffvg.imwrite(img.cpu(), 'results/single_rect/init.png', gamma=2.2) + +# Optimize for radius & center +optimizer = torch.optim.Adam([p_min_n, p_max_n, color], lr=1e-2) +# Run 100 Adam iterations. +for t in range(100): + print('iteration:', t) + optimizer.zero_grad() + # Forward pass: render the image. + rect.p_min = p_min_n * 256 + rect.p_max = p_max_n * 256 + rect_group.fill_color = color + scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) + img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + t+1, # seed + *scene_args) + # Save the intermediate render. + pydiffvg.imwrite(img.cpu(), 'results/single_rect/iter_{}.png'.format(t), gamma=2.2) + # Compute the loss function. Here it is L2. + loss = (img - target).pow(2).sum() + print('loss:', loss.item()) + + # Backpropagate the gradients. + loss.backward() + # Print the gradients + print('p_min.grad:', p_min_n.grad) + print('p_max.grad:', p_max_n.grad) + print('color.grad:', color.grad) + + # Take a gradient descent step. + optimizer.step() + # Print the current params. + print('p_min:', rect.p_min) + print('p_max:', rect.p_max) + print('color:', rect_group.fill_color) + +# Render the final result. +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 102, # seed + *scene_args) +# Save the images and differences. +pydiffvg.imwrite(img.cpu(), 'results/single_rect/final.png') + +# Convert the intermediate renderings to a video. +from subprocess import call +call(["ffmpeg", "-framerate", "24", "-i", + "results/single_rect/iter_%d.png", "-vb", "20M", + "results/single_rect/out.mp4"]) diff --git a/apps/single_stroke.py b/apps/single_stroke.py new file mode 100644 index 0000000..256f79e --- /dev/null +++ b/apps/single_stroke.py @@ -0,0 +1,117 @@ +import pydiffvg +import torch +import skimage +import numpy as np + +# Use GPU if available +pydiffvg.set_use_gpu(torch.cuda.is_available()) + +canvas_width, canvas_height = 256, 256 +num_control_points = torch.tensor([2]) +points = torch.tensor([[120.0, 30.0], # base + [150.0, 60.0], # control point + [ 90.0, 198.0], # control point + [ 60.0, 218.0]]) # base +path = pydiffvg.Path(num_control_points = num_control_points, + points = points, + is_closed = False, + stroke_width = torch.tensor(5.0)) +shapes = [path] +path_group = pydiffvg.ShapeGroup(shape_ids = torch.tensor([0]), + fill_color = torch.tensor([0.0, 0.0, 0.0, 0.0]), + stroke_color = torch.tensor([0.6, 0.3, 0.6, 0.8])) +shape_groups = [path_group] +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) + +render = pydiffvg.RenderFunction.apply +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 0, # seed + *scene_args) +# The output image is in linear RGB space. Do Gamma correction before saving the image. +pydiffvg.imwrite(img.cpu(), 'results/single_stroke/target.png', gamma=2.2) +target = img.clone() + +# Move the path to produce initial guess +# normalize points for easier learning rate +points_n = torch.tensor([[100.0/256.0, 40.0/256.0], # base + [155.0/256.0, 65.0/256.0], # control point + [100.0/256.0, 180.0/256.0], # control point + [ 65.0/256.0, 238.0/256.0]], # base + requires_grad = True) +stroke_color = torch.tensor([0.4, 0.7, 0.5, 0.5], requires_grad=True) +stroke_width_n = torch.tensor(10.0 / 100.0, requires_grad=True) +path.points = points_n * 256 +path.stroke_width = stroke_width_n * 100 +path_group.stroke_color = stroke_color +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 1, # seed + *scene_args) +pydiffvg.imwrite(img.cpu(), 'results/single_stroke/init.png', gamma=2.2) + +# Optimize +optimizer = torch.optim.Adam([points_n, stroke_color, stroke_width_n], lr=1e-2) +# Run 200 Adam iterations. +for t in range(200): + print('iteration:', t) + optimizer.zero_grad() + # Forward pass: render the image. + path.points = points_n * 256 + path.stroke_width = stroke_width_n * 100 + path_group.stroke_color = stroke_color + scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) + img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + t+1, # seed + *scene_args) + # Save the intermediate render. + pydiffvg.imwrite(img.cpu(), 'results/single_stroke/iter_{}.png'.format(t), gamma=2.2) + # Compute the loss function. Here it is L2. + loss = (img - target).pow(2).sum() + print('loss:', loss.item()) + + # Backpropagate the gradients. + loss.backward() + # Print the gradients + print('points_n.grad:', points_n.grad) + print('stroke_color.grad:', stroke_color.grad) + print('stroke_width.grad:', stroke_width_n.grad) + + # Take a gradient descent step. + optimizer.step() + # Print the current params. + print('points:', path.points) + print('stroke_color:', path_group.stroke_color) + print('stroke_width:', path.stroke_width) + +# Render the final result. +path.points = points_n * 256 +path.stroke_width = stroke_width_n * 100 +path_group.stroke_color = stroke_color +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 202, # seed + *scene_args) +# Save the images and differences. +pydiffvg.imwrite(img.cpu(), 'results/single_stroke/final.png') + +# Convert the intermediate renderings to a video. +from subprocess import call +call(["ffmpeg", "-framerate", "24", "-i", + "results/single_stroke/iter_%d.png", "-vb", "20M", + "results/single_stroke/out.mp4"]) diff --git a/apps/sketch_gan.py b/apps/sketch_gan.py new file mode 100644 index 0000000..6f3da59 --- /dev/null +++ b/apps/sketch_gan.py @@ -0,0 +1,213 @@ +"""A simple training interface using ttools.""" +import argparse +import os +import logging +import random + +import numpy as np +import torch +from torchvision.datasets import MNIST +import torchvision.transforms as xforms +from torch.utils.data import DataLoader + +import ttools +import ttools.interfaces + +import pydiffvg + +LOG = ttools.get_logger(__name__) + +pydiffvg.render_pytorch.print_timing = False + +torch.manual_seed(123) +np.random.seed(123) +torch.backends.cudnn.deterministic = True + +latent_dim = 100 +img_size = 32 +num_paths = 8 +num_segments = 8 + +def weights_init_normal(m): + classname = m.__class__.__name__ + if classname.find("Conv") != -1: + torch.nn.init.normal_(m.weight.data, 0.0, 0.02) + elif classname.find("BatchNorm2d") != -1: + torch.nn.init.normal_(m.weight.data, 1.0, 0.02) + torch.nn.init.constant_(m.bias.data, 0.0) + +class VisdomImageCallback(ttools.callbacks.ImageDisplayCallback): + def visualized_image(self, batch, fwd_result): + return torch.cat([batch[0], fwd_result.cpu()], dim = 2) + +# From https://github.com/eriklindernoren/PyTorch-GAN/blob/master/implementations/dcgan/dcgan.py +class Generator(torch.nn.Module): + def __init__(self): + super(Generator, self).__init__() + + self.fc = torch.nn.Sequential( + torch.nn.Linear(latent_dim, 128), + torch.nn.LeakyReLU(0.2, inplace=True), + torch.nn.Linear(128, 256), + torch.nn.LeakyReLU(0.2, inplace=True), + torch.nn.Linear(256, 512), + torch.nn.LeakyReLU(0.2, inplace=True), + torch.nn.Linear(512, 1024), + torch.nn.LeakyReLU(0.2, inplace=True), + torch.nn.Linear(1024, 2 * num_paths * (num_segments + 1) + num_paths + num_paths), + torch.nn.Sigmoid() + ) + + def forward(self, z): + out = self.fc(z) + # construct paths + imgs = [] + for b in range(out.shape[0]): + index = 0 + shapes = [] + shape_groups = [] + for i in range(num_paths): + points = img_size * out[b, index: index + 2 * (num_segments + 1)].view(-1, 2).cpu() + index += 2 * (num_segments + 1) + stroke_width = img_size * out[b, index].view(1).cpu() + index += 1 + + num_control_points = torch.zeros(num_segments, dtype = torch.int32) + 2 + path = pydiffvg.Path(num_control_points = num_control_points, + points = points, + stroke_width = stroke_width, + is_closed = False) + shapes.append(path) + + stroke_color = out[b, index].view(1).cpu() + index += 1 + stroke_color = torch.cat([stroke_color, torch.tensor([0.0, 0.0, 1.0])]) + path_group = pydiffvg.ShapeGroup(shape_ids = torch.tensor([len(shapes) - 1]), + fill_color = None, + stroke_color = stroke_color) + shape_groups.append(path_group) + scene_args = pydiffvg.RenderFunction.serialize_scene(img_size, img_size, shapes, shape_groups) + render = pydiffvg.RenderFunction.apply + img = render(img_size, # width + img_size, # height + 2, # num_samples_x + 2, # num_samples_y + random.randint(0, 1048576), # seed + None, + *scene_args) + img = img[:, :, :1] + # HWC -> NCHW + img = img.unsqueeze(0) + img = img.permute(0, 3, 1, 2) # NHWC -> NCHW + imgs.append(img) + img = torch.cat(imgs, dim = 0) + return img + +class Discriminator(torch.nn.Module): + def __init__(self): + super(Discriminator, self).__init__() + + def discriminator_block(in_filters, out_filters, bn=True): + block = [torch.nn.Conv2d(in_filters, out_filters, 3, 2, 1), + torch.nn.LeakyReLU(0.2, inplace=True), + torch.nn.Dropout2d(0.25)] + if bn: + block.append(torch.nn.BatchNorm2d(out_filters, 0.8)) + return block + + self.model = torch.nn.Sequential( + *discriminator_block(1, 16, bn=False), + *discriminator_block(16, 32), + *discriminator_block(32, 64), + *discriminator_block(64, 128), + ) + + # The height and width of downsampled image + ds_size = img_size // 2 ** 4 + self.adv_layer = torch.nn.Sequential( + torch.nn.Linear(128 * ds_size ** 2, 1), + torch.nn.Sigmoid()) + + def forward(self, img): + out = self.model(img) + out = out.view(out.shape[0], -1) + validity = self.adv_layer(out) + + return validity + +class MNISTInterface(ttools.interfaces.SGANInterface): + """An adapter to run or train a model.""" + + def __init__(self, gen, discrim, lr=2e-4): + super(MNISTInterface, self).__init__(gen, discrim, lr, opt = 'adam') + + def forward(self, batch): + return self.gen(torch.zeros([batch[0].shape[0], latent_dim], device = self.device).normal_()) + + def _discriminator_input(self, batch, fwd_data, fake=False): + if fake: + return fwd_data + else: + return batch[0].to(self.device) + +def train(args): + """Train a MNIST classifier.""" + + # Setup train and val data + _xform = xforms.Compose([xforms.Resize([32, 32]), xforms.ToTensor()]) + data = MNIST("data/mnist", train=True, download=True, transform=_xform) + + # Initialize asynchronous dataloaders + loader = DataLoader(data, batch_size=args.bs, num_workers=2) + + # Instantiate the models + gen = Generator() + discrim = Discriminator() + + gen.apply(weights_init_normal) + discrim.apply(weights_init_normal) + + # Checkpointer to save/recall model parameters + checkpointer_gen = ttools.Checkpointer(os.path.join(args.out, "checkpoints"), model=gen, prefix="gen_") + checkpointer_discrim = ttools.Checkpointer(os.path.join(args.out, "checkpoints"), model=discrim, prefix="discrim_") + + # resume from a previous checkpoint, if any + checkpointer_gen.load_latest() + checkpointer_discrim.load_latest() + + # Setup a training interface for the model + interface = MNISTInterface(gen, discrim, lr=args.lr) + + # Create a training looper with the interface we defined + trainer = ttools.Trainer(interface) + + # Adds several callbacks, that will be called by the trainer -------------- + # A periodic checkpointing operation + trainer.add_callback(ttools.callbacks.CheckpointingCallback(checkpointer_gen)) + trainer.add_callback(ttools.callbacks.CheckpointingCallback(checkpointer_discrim)) + # A simple progress bar + trainer.add_callback(ttools.callbacks.ProgressBarCallback( + keys=["loss_g", "loss_d", "loss"])) + # A volatile logging using visdom + trainer.add_callback(ttools.callbacks.VisdomLoggingCallback( + keys=["loss_g", "loss_d", "loss"], + port=8080, env="mnist_demo")) + # Image + trainer.add_callback(VisdomImageCallback(port=8080, env="mnist_demo")) + # ------------------------------------------------------------------------- + + # Start the training + LOG.info("Training started, press Ctrl-C to interrupt.") + trainer.train(loader, num_epochs=args.epochs) + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + # TODO: subparsers + parser.add_argument("data", help="directory where we download and store the MNIST dataset.") + parser.add_argument("out", help="directory where we write the checkpoints and visualizations.") + parser.add_argument("--lr", type=float, default=1e-4, help="learning rate for the optimizer.") + parser.add_argument("--epochs", type=int, default=500, help="number of epochs to train for.") + parser.add_argument("--bs", type=int, default=64, help="number of elements per batch.") + args = parser.parse_args() + ttools.set_logger(True) # activate debug prints + train(args) diff --git a/apps/style_transfer.py b/apps/style_transfer.py new file mode 100644 index 0000000..43ba38c --- /dev/null +++ b/apps/style_transfer.py @@ -0,0 +1,291 @@ +import torch +import torch.nn as nn +import torch.nn.functional as F +import torch.optim as optim +import torchvision.transforms as transforms +import torchvision.models as models +from PIL import Image +import copy +import pydiffvg +import argparse + +def main(args): + pydiffvg.set_use_gpu(torch.cuda.is_available()) + + canvas_width, canvas_height, shapes, shape_groups = pydiffvg.svg_to_scene(args.content_file) + scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) + render = pydiffvg.RenderFunction.apply + img = render(canvas_width, # width + canvas_height, # height + 2, # num_samples_x + 2, # num_samples_y + 0, # seed + None, + *scene_args) + # Transform to gamma space + pydiffvg.imwrite(img.cpu(), 'results/style_transfer/init.png', gamma=1.0) + # HWC -> NCHW + img = img.unsqueeze(0) + img = img.permute(0, 3, 1, 2) # NHWC -> NCHW + + loader = transforms.Compose([ + transforms.ToTensor()]) # transform it into a torch tensor + + def image_loader(image_name): + image = Image.open(image_name) + # fake batch dimension required to fit network's input dimensions + image = loader(image).unsqueeze(0) + return image.to(pydiffvg.get_device(), torch.float) + + style_img = image_loader(args.style_img) + # alpha blend content with a gray background + content_img = img[:, :3, :, :] * img[:, 3, :, :] + \ + 0.5 * torch.ones([1, 3, img.shape[2], img.shape[3]]) * \ + (1 - img[:, 3, :, :]) + + assert style_img.size() == content_img.size(), \ + "we need to import style and content images of the same size" + + unloader = transforms.ToPILImage() # reconvert into PIL image + + class ContentLoss(nn.Module): + def __init__(self, target,): + super(ContentLoss, self).__init__() + # we 'detach' the target content from the tree used + # to dynamically compute the gradient: this is a stated value, + # not a variable. Otherwise the forward method of the criterion + # will throw an error. + self.target = target.detach() + + def forward(self, input): + self.loss = F.mse_loss(input, self.target) + return input + + def gram_matrix(input): + a, b, c, d = input.size() # a=batch size(=1) + # b=number of feature maps + # (c,d)=dimensions of a f. map (N=c*d) + + features = input.view(a * b, c * d) # resise F_XL into \hat F_XL + + G = torch.mm(features, features.t()) # compute the gram product + + # we 'normalize' the values of the gram matrix + # by dividing by the number of element in each feature maps. + return G.div(a * b * c * d) + + class StyleLoss(nn.Module): + + def __init__(self, target_feature): + super(StyleLoss, self).__init__() + self.target = gram_matrix(target_feature).detach() + + def forward(self, input): + G = gram_matrix(input) + self.loss = F.mse_loss(G, self.target) + return input + + device = pydiffvg.get_device() + cnn = models.vgg19(pretrained=True).features.to(device).eval() + + cnn_normalization_mean = torch.tensor([0.485, 0.456, 0.406]).to(device) + cnn_normalization_std = torch.tensor([0.229, 0.224, 0.225]).to(device) + + # create a module to normalize input image so we can easily put it in a + # nn.Sequential + class Normalization(nn.Module): + def __init__(self, mean, std): + super(Normalization, self).__init__() + # .view the mean and std to make them [C x 1 x 1] so that they can + # directly work with image Tensor of shape [B x C x H x W]. + # B is batch size. C is number of channels. H is height and W is width. + self.mean = mean.clone().view(-1, 1, 1) + self.std = std.clone().view(-1, 1, 1) + + def forward(self, img): + # normalize img + return (img - self.mean) / self.std + + # desired depth layers to compute style/content losses : + content_layers_default = ['conv_4'] + style_layers_default = ['conv_1', 'conv_2', 'conv_3', 'conv_4', 'conv_5'] + + def get_style_model_and_losses(cnn, normalization_mean, normalization_std, + style_img, content_img, + content_layers=content_layers_default, + style_layers=style_layers_default): + cnn = copy.deepcopy(cnn) + + # normalization module + normalization = Normalization(normalization_mean, normalization_std).to(device) + + # just in order to have an iterable access to or list of content/syle + # losses + content_losses = [] + style_losses = [] + + # assuming that cnn is a nn.Sequential, so we make a new nn.Sequential + # to put in modules that are supposed to be activated sequentially + model = nn.Sequential(normalization) + + i = 0 # increment every time we see a conv + for layer in cnn.children(): + if isinstance(layer, nn.Conv2d): + i += 1 + name = 'conv_{}'.format(i) + elif isinstance(layer, nn.ReLU): + name = 'relu_{}'.format(i) + # The in-place version doesn't play very nicely with the ContentLoss + # and StyleLoss we insert below. So we replace with out-of-place + # ones here. + layer = nn.ReLU(inplace=False) + elif isinstance(layer, nn.MaxPool2d): + name = 'pool_{}'.format(i) + elif isinstance(layer, nn.BatchNorm2d): + name = 'bn_{}'.format(i) + else: + raise RuntimeError('Unrecognized layer: {}'.format(layer.__class__.__name__)) + + model.add_module(name, layer) + + if name in content_layers: + # add content loss: + target = model(content_img).detach() + content_loss = ContentLoss(target) + model.add_module("content_loss_{}".format(i), content_loss) + content_losses.append(content_loss) + + if name in style_layers: + # add style loss: + target_feature = model(style_img).detach() + style_loss = StyleLoss(target_feature) + model.add_module("style_loss_{}".format(i), style_loss) + style_losses.append(style_loss) + + # now we trim off the layers after the last content and style losses + for i in range(len(model) - 1, -1, -1): + if isinstance(model[i], ContentLoss) or isinstance(model[i], StyleLoss): + break + + model = model[:(i + 1)] + + return model, style_losses, content_losses + + def run_style_transfer(cnn, normalization_mean, normalization_std, + content_img, style_img, + canvas_width, canvas_height, + shapes, shape_groups, + num_steps=500, style_weight=5000, content_weight=1): + """Run the style transfer.""" + print('Building the style transfer model..') + model, style_losses, content_losses = get_style_model_and_losses(cnn, + normalization_mean, normalization_std, style_img, content_img) + point_params = [] + color_params = [] + stroke_width_params = [] + for shape in shapes: + if isinstance(shape, pydiffvg.Path): + point_params.append(shape.points.requires_grad_()) + stroke_width_params.append(shape.stroke_width.requires_grad_()) + for shape_group in shape_groups: + if isinstance(shape_group.fill_color, torch.Tensor): + color_params.append(shape_group.fill_color.requires_grad_()) + elif isinstance(shape_group.fill_color, pydiffvg.LinearGradient): + point_params.append(shape_group.fill_color.begin.requires_grad_()) + point_params.append(shape_group.fill_color.end.requires_grad_()) + color_params.append(shape_group.fill_color.stop_colors.requires_grad_()) + if isinstance(shape_group.stroke_color, torch.Tensor): + color_params.append(shape_group.stroke_color.requires_grad_()) + elif isinstance(shape_group.stroke_color, pydiffvg.LinearGradient): + point_params.append(shape_group.stroke_color.begin.requires_grad_()) + point_params.append(shape_group.stroke_color.end.requires_grad_()) + color_params.append(shape_group.stroke_color.stop_colors.requires_grad_()) + + point_optimizer = optim.Adam(point_params, lr=1.0) + color_optimizer = optim.Adam(color_params, lr=0.01) + stroke_width_optimizers = optim.Adam(stroke_width_params, lr=0.1) + print('Optimizing..') + run = [0] + while run[0] <= num_steps: + point_optimizer.zero_grad() + color_optimizer.zero_grad() + stroke_width_optimizers.zero_grad() + + scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) + render = pydiffvg.RenderFunction.apply + img = render(canvas_width, # width + canvas_height, # height + 2, # num_samples_x + 2, # num_samples_y + 0, # seed + None, + *scene_args) + # alpha blend img with a gray background + img = img[:, :, :3] * img[:, :, 3:4] + \ + 0.5 * torch.ones([img.shape[0], img.shape[1], 3]) * \ + (1 - img[:, :, 3:4]) + + pydiffvg.imwrite(img.cpu(), + 'results/style_transfer/step_{}.png'.format(run[0]), + gamma=1.0) + + # HWC to NCHW + img = img.permute([2, 0, 1]).unsqueeze(0) + model(img) + style_score = 0 + content_score = 0 + + for sl in style_losses: + style_score += sl.loss + for cl in content_losses: + content_score += cl.loss + + style_score *= style_weight + content_score *= content_weight + + loss = style_score + content_score + loss.backward() + + run[0] += 1 + if run[0] % 1 == 0: + print("run {}:".format(run)) + print('Style Loss : {:4f} Content Loss: {:4f}'.format( + style_score.item(), content_score.item())) + print() + + point_optimizer.step() + color_optimizer.step() + stroke_width_optimizers.step() + + for color in color_params: + color.data.clamp_(0, 1) + for w in stroke_width_params: + w.data.clamp_(0.5, 4.0) + + return shapes, shape_groups + + shapes, shape_groups = run_style_transfer(\ + cnn, cnn_normalization_mean, cnn_normalization_std, + content_img, style_img, + canvas_width, canvas_height, shapes, shape_groups) + + scene_args = pydiffvg.RenderFunction.serialize_scene(shapes, shape_groups) + render = pydiffvg.RenderFunction.apply + img = render(canvas_width, # width + canvas_height, # height + 2, # num_samples_x + 2, # num_samples_y + 0, # seed + None, + *scene_args) + # Transform to gamma space + pydiffvg.imwrite(img.cpu(), 'results/style_transfer/output.png', gamma=1.0) + +if __name__ == "__main__": + parser = argparse.ArgumentParser() + parser.add_argument("content_file", help="source SVG path") + parser.add_argument("style_img", help="target image path") + args = parser.parse_args() + main(args) diff --git a/apps/svg_brush.py b/apps/svg_brush.py new file mode 100644 index 0000000..de54e48 --- /dev/null +++ b/apps/svg_brush.py @@ -0,0 +1,167 @@ +import sys +sys.path.append("../svg") +from geometry import GeometryLoss +import numpy as np +import pygame as pg +import torch +import pydiffvg +import tkinter as tk +from tkinter import filedialog + +def box_kernel(val): + return np.heaviside(-val+1,0) + +def cone_kernel(val): + return np.maximum(0,1-val) + +def nptosurf(arr): + if arr.shape[2]==1: + #greyscale + shape=arr.shape + shape=(shape[0],shape[1],3) + arr=np.broadcast_to(arr,shape) + return pg.surfarray.make_surface(arr*255) + +def brush_tensor(screen_size,coords,radius,kernel): + coordarr=np.stack(np.meshgrid(np.linspace(0,screen_size[0]-1,screen_size[0]),np.linspace(0,screen_size[1]-1,screen_size[1]),indexing='ij'),axis=2) + ctrarr = np.reshape(np.array(coords), [1, 1, 2]) + distarr=np.sqrt(np.sum(np.power(coordarr-ctrarr,2),axis=2)) + valarr=kernel(distarr/radius) + return torch.tensor(valarr,requires_grad=False,dtype=torch.float32) + +def checkerboard(shape, square_size=2): + xv,yv=np.meshgrid(np.floor(np.linspace(0,shape[1]-1,shape[1])/square_size),np.floor(np.linspace(0,shape[0]-1,shape[0])/square_size)) + bin=np.expand_dims(((xv+yv)%2),axis=2) + res=bin*np.array([[[1., 1., 1.,]]])+(1-bin)*np.array([[[.75, .75, .75,]]]) + return torch.tensor(res,requires_grad=False,dtype=torch.float32) + +def render(optim, viewport): + scene_args = pydiffvg.RenderFunction.serialize_scene(*optim.build_scene()) + render = pydiffvg.RenderFunction.apply + img = render(viewport[0], # width + viewport[1], # height + 2, # num_samples_x + 2, # num_samples_y + 0, # seed + None, + *scene_args) + return img + +def optimize(optim, viewport, brush_kernel, increase=True, strength=0.1): + optim.zero_grad() + + geomLoss=torch.tensor(0.) + + for shape, gloss in zip(optim.scene[2],geometryLosses): + geomLoss+=gloss.compute(shape) + + img=render(optim,viewport) + + imalpha=img[:,:,3] + + multiplied=imalpha*brush_kernel + + loss=((1-multiplied).mean() if increase else multiplied.mean())*strength + + loss+=geomLoss + + loss.backward() + + optim.step() + + return render(optim,viewport) + +def get_infile(): + pydiffvg.set_use_gpu(False) + root = tk.Tk() + #root.withdraw() + + file_path = filedialog.askopenfilename(initialdir = ".",title = "Select graphic to optimize",filetypes = (("SVG files","*.svg"),("all files","*.*"))) + + root.destroy() + + return file_path + +def compositebg(img): + bg=checkerboard(img.shape,2) + color=img[:,:,0:3] + alpha=img[:,:,3] + composite=alpha.unsqueeze(2)*color+(1-alpha).unsqueeze(2)*bg + + return composite + +def main(): + infile=get_infile() + + settings=pydiffvg.SvgOptimizationSettings() + settings.global_override(["optimize_color"],False) + settings.global_override(["transforms","optimize_transforms"], False) + settings.global_override(["optimizer"], "SGD") + settings.global_override(["paths","shape_lr"], 1e-1) + + optim=pydiffvg.OptimizableSvg(infile,settings) + + global geometryLosses + geometryLosses = [] + + for shape in optim.build_scene()[2]: + geometryLosses.append(GeometryLoss(shape)) + + scaling=1 + brush_radius=100 + graphic_size=optim.canvas + screen_size=(graphic_size[1]*scaling, graphic_size[0]*scaling) + + pg.init() + + screen=pg.display.set_mode(screen_size) + screen.fill((255,255,255)) + + img=render(optim,graphic_size) + print(img.max()) + + npsurf = pg.transform.scale(nptosurf(compositebg(img).detach().permute(1,0,2).numpy()), screen_size) + + screen.blit(npsurf,(0,0)) + + pg.display.update() + clock=pg.time.Clock() + + z=0 + btn=0 + + while True: + clock.tick(60) + for event in pg.event.get(): + if event.type==pg.QUIT: + pg.quit() + sys.exit() + + y, x = pg.mouse.get_pos() + if event.type == pg.MOUSEBUTTONDOWN: + if event.button in [1,3]: + z=1 + btn=event.button + elif event.button == 4: + brush_radius*=1.1 + elif event.button == 5: + brush_radius/=1.1 + brush_radius=max(brush_radius,5) + elif event.type == pg.MOUSEBUTTONUP: + if event.button in [1,3]: + z=0 + + if z==1: + brush=brush_tensor((graphic_size[0],graphic_size[1]), (x/scaling, y/scaling), brush_radius, box_kernel) + img=optimize(optim,graphic_size,brush,btn==1) + npsurf = pg.transform.scale(nptosurf(compositebg(img).detach().permute(1,0,2).numpy()), screen_size) + + + screen.blit(npsurf,(0,0)) + pg.draw.circle(screen, (255,255,255), (y,x), int(brush_radius*scaling), 1) + pg.display.update() + + +if __name__ == '__main__': + main() + diff --git a/apps/svg_parse_test.py b/apps/svg_parse_test.py new file mode 100644 index 0000000..f6fb5ea --- /dev/null +++ b/apps/svg_parse_test.py @@ -0,0 +1,63 @@ +import pydiffvg +import sys +import numpy as np +import torch +sys.path.append("../pydiffvg") + +from optimize_svg import OptimizableSvg + +pydiffvg.set_use_gpu(False) + +""" +for x in range(100000): + inmat=np.eye(3) + inmat[0:2,:]=(np.random.rand(2,3)-0.5)*2 + decomp=OptimizableSvg.TransformTools.decompose(inmat) + outmat=OptimizableSvg.TransformTools.recompose(torch.tensor(decomp[0],dtype=torch.float32),torch.tensor(decomp[1],dtype=torch.float32),torch.tensor(decomp[2],dtype=torch.float32),torch.tensor(decomp[3],dtype=torch.float32)).numpy() + dif=np.linalg.norm(inmat-outmat) + if dif > 1e-3: + print(dif) + print(inmat) + print(outmat) + print(decomp)""" + +#infile='../../data/test_data/linear_grad_alpha_aspaths.svg' +#infile='../../data/note_small.svg' +infile='linux.svg' + +canvas_width, canvas_height, shapes, shape_groups = \ + pydiffvg.svg_to_scene(infile) +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) +render = pydiffvg.RenderFunction.apply +img = render(canvas_width, # width + canvas_height, # height + 2, # num_samples_x + 2, # num_samples_y + 0, # seed + *scene_args) +# The output image is in linear RGB space. Do Gamma correction before saving the image. +pydiffvg.imwrite(img.cpu(), 'test_old.png', gamma=1.0) + +#optim=OptimizableSvg('linux.svg',verbose=True) +optim=OptimizableSvg(infile,verbose=True) + +scene=optim.build_scene() +scene_args = pydiffvg.RenderFunction.serialize_scene(*scene) +render = pydiffvg.RenderFunction.apply +img = render(scene[0], # width + scene[1], # height + 2, # num_samples_x + 2, # num_samples_y + 0, # seed + *scene_args) + + + +with open("resaved.svg","w") as f: + f.write(optim.write_xml()) + +# The output image is in linear RGB space. Do Gamma correction before saving the image. +pydiffvg.imwrite(img.cpu(), 'test_new.png', gamma=1.0) + +print("Done!") \ No newline at end of file diff --git a/apps/test_eval_positions.py b/apps/test_eval_positions.py new file mode 100644 index 0000000..c79e3a8 --- /dev/null +++ b/apps/test_eval_positions.py @@ -0,0 +1,109 @@ +import pydiffvg +import torch +import skimage +import numpy as np + +# Use GPU if available +pydiffvg.set_use_gpu(torch.cuda.is_available()) + +canvas_width = 256 +canvas_height = 256 +circle = pydiffvg.Circle(radius = torch.tensor(40.0), + center = torch.tensor([128.0, 128.0])) +shapes = [circle] +circle_group = pydiffvg.ShapeGroup(shape_ids = torch.tensor([0]), + fill_color = torch.tensor([0.3, 0.6, 0.3, 1.0])) +shape_groups = [circle_group] +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups, + output_type = pydiffvg.OutputType.sdf) + +render = pydiffvg.RenderFunction.apply +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 0, # seed + *scene_args) +img = img / 256 # Normalize SDF to [0, 1] +pydiffvg.imwrite(img.cpu(), 'results/test_eval_positions/target.png') +target = img.clone() + +# Move the circle to produce initial guess +# normalize radius & center for easier learning rate +radius_n = torch.tensor(20.0 / 256.0, requires_grad=True) +center_n = torch.tensor([108.0 / 256.0, 138.0 / 256.0], requires_grad=True) +color = torch.tensor([0.3, 0.2, 0.8, 1.0], requires_grad=True) +circle.radius = radius_n * 256 +circle.center = center_n * 256 +circle_group.fill_color = color +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups, + output_type = pydiffvg.OutputType.sdf) +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 1, # seed + *scene_args) +img = img / 256 # Normalize SDF to [0, 1] +pydiffvg.imwrite(img.cpu(), 'results/test_eval_positions/init.png') + +# Optimize for radius & center +optimizer = torch.optim.Adam([radius_n, center_n, color], lr=1e-2) +# Run 200 Adam iterations. +for t in range(200): + print('iteration:', t) + optimizer.zero_grad() + # Forward pass: render the image. + circle.radius = radius_n * 256 + circle.center = center_n * 256 + circle_group.fill_color = color + # Evaluate 1000 positions + eval_positions = torch.rand(1000, 2) * 256 + # for grid_sample() + grid_eval_positions = (eval_positions / 256.0) * 2.0 - 1.0 + scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups, + output_type = pydiffvg.OutputType.sdf, + eval_positions = eval_positions) + samples = render(256, # width + 256, # height + 0, # num_samples_x + 0, # num_samples_y + t+1, # seed + *scene_args) + samples = samples / 256 # Normalize SDF to [0, 1] + target_sampled = torch.nn.functional.grid_sample(\ + target.view(1, 1, target.shape[0], target.shape[1]), + grid_eval_positions.view(1, -1, 1, 2), mode='nearest') + loss = (samples - target_sampled).pow(2).mean() + print('loss:', loss.item()) + + # Backpropagate the gradients. + loss.backward() + # Print the gradients + print('radius.grad:', radius_n.grad) + print('center.grad:', center_n.grad) + print('color.grad:', color.grad) + + # Take a gradient descent step. + optimizer.step() + # Print the current params. + print('radius:', circle.radius) + print('center:', circle.center) + print('color:', circle_group.fill_color) + +# Render the final result. +scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups, + output_type = pydiffvg.OutputType.sdf) +img = render(256, # width + 256, # height + 2, # num_samples_x + 2, # num_samples_y + 102, # seed + *scene_args) +img = img / 256 # Normalize SDF to [0, 1] +# Save the images and differences. +pydiffvg.imwrite(img.cpu(), 'results/test_eval_positions/final.png') diff --git a/apps/textureSyn/1.jpg b/apps/textureSyn/1.jpg new file mode 100755 index 0000000..0e35118 Binary files /dev/null and b/apps/textureSyn/1.jpg differ diff --git a/apps/textureSyn/2.jpg b/apps/textureSyn/2.jpg new file mode 100755 index 0000000..c2f6ea1 Binary files /dev/null and b/apps/textureSyn/2.jpg differ diff --git a/apps/textureSyn/3.jpg b/apps/textureSyn/3.jpg new file mode 100755 index 0000000..a3270d6 Binary files /dev/null and b/apps/textureSyn/3.jpg differ diff --git a/apps/textureSyn/make_gif.py b/apps/textureSyn/make_gif.py new file mode 100644 index 0000000..906c0ee --- /dev/null +++ b/apps/textureSyn/make_gif.py @@ -0,0 +1,26 @@ +# for gif making +import imageio +import numpy as np +import os +from PIL import Image +from math import floor + +def make_gif(savePath, outputPath, frame_every_X_steps=15, repeat_ending=15, total_iter=200): + number_files = len(os.listdir(savePath)) - 2 + frame_every_X_steps = frame_every_X_steps + repeat_ending = repeat_ending + steps = np.arange(floor(number_files / frame_every_X_steps)) * frame_every_X_steps + steps = steps + (number_files - np.max(steps)) + + images = [] + for f in range(total_iter-1): + # for f in steps: + filename = savePath + 'iter_' + str(f+1) + '.png' + images.append(imageio.imread(filename)) + + # repeat ending + for _ in range(repeat_ending): + filename = savePath + 'final.png' + # filename = savePath + 'iter_' + str(number_files) + '.png' + images.append(imageio.imread(filename)) + imageio.mimsave(outputPath, images) \ No newline at end of file diff --git a/apps/textureSyn/patchBasedTextureSynthesis.py b/apps/textureSyn/patchBasedTextureSynthesis.py new file mode 100755 index 0000000..91ce585 --- /dev/null +++ b/apps/textureSyn/patchBasedTextureSynthesis.py @@ -0,0 +1,388 @@ +#imports +import numpy as np +import matplotlib.pyplot as plt +import os + +from math import floor, ceil +from random import randint + +from sklearn.neighbors import KDTree +from skimage.util.shape import view_as_windows +from skimage import io + +from PIL import Image, ImageDraw +from IPython.display import clear_output + +class patchBasedTextureSynthesis: + + def __init__(self, exampleMapPath, in_outputPath, in_outputSize, in_patchSize, in_overlapSize, in_windowStep = 5, in_mirror_hor = True, in_mirror_vert = True, in_shapshots = True): + self.exampleMap = self.loadExampleMap(exampleMapPath) + self.snapshots = in_shapshots + self.outputPath = in_outputPath + self.outputSize = in_outputSize + self.patchSize = in_patchSize + self.overlapSize = in_overlapSize + self.mirror_hor = in_mirror_hor + self.mirror_vert = in_mirror_vert + self.total_patches_count = 0 #excluding mirrored versions + self.windowStep = 5 + self.iter = 0 + + self.checkIfDirectoryExists() #check if output directory exists + self.examplePatches = self.prepareExamplePatches() + self.canvas, self.filledMap, self.idMap = self.initCanvas() + self.initFirstPatch() #place random block to start with + self.kdtree_topOverlap, self.kdtree_leftOverlap, self.kdtree_combined = self.initKDtrees() + + self.PARM_truncation = 0.8 + self.PARM_attenuation = 2 + + def checkIfDirectoryExists(self): + if not os.path.exists(self.outputPath): + os.makedirs(self.outputPath) + + def resolveAll(self): + self.saveParams() + #resolve all unresolved patches + for i in range(np.sum(1-self.filledMap).astype(int)): + self.resolveNext() + + if not self.snapshots: + img = Image.fromarray(np.uint8(self.canvas*255)) + img = img.resize((self.outputSize[0], self.outputSize[1]), resample=0, box=None) + img.save(self.outputPath + 'out.jpg') + # else: + # self.visualize([0,0], [], [], showCandidates=False) + return img + def saveParams(self): + #write + text_file = open(self.outputPath + 'params.txt', "w") + text_file.write("PatchSize: %d \nOverlapSize: %d \nMirror Vert: %d \nMirror Hor: %d" % (self.patchSize, self.overlapSize, self.mirror_vert, self.mirror_hor)) + text_file.close() + + def resolveNext(self): + #coordinate of the next one to resolve + coord = self.idCoordTo2DCoord(np.sum(self.filledMap), np.shape(self.filledMap)) #get 2D coordinate of next to resolve patch + #get overlap areas of the patch we want to resolve + overlapArea_Top = self.getOverlapAreaTop(coord) + overlapArea_Left = self.getOverlapAreaLeft(coord) + #find most similar patch from the examples + dist, ind = self.findMostSimilarPatches(overlapArea_Top, overlapArea_Left, coord) + + if self.mirror_hor or self.mirror_vert: + #check that top and left neighbours are not mirrors + dist, ind = self.checkForMirrors(dist, ind, coord) + + #choose random valid patch + probabilities = self.distances2probability(dist, self.PARM_truncation, self.PARM_attenuation) + chosenPatchId = np.random.choice(ind, 1, p=probabilities) + + #update canvas + blend_top = (overlapArea_Top is not None) + blend_left = (overlapArea_Left is not None) + self.updateCanvas(chosenPatchId, coord[0], coord[1], blend_top, blend_left) + + #update filledMap and id map ;) + self.filledMap[coord[0], coord[1]] = 1 + self.idMap[coord[0], coord[1]] = chosenPatchId + + #visualize + # self.visualize(coord, chosenPatchId, ind) + + self.iter += 1 + + def visualize(self, coord, chosenPatchId, nonchosenPatchId, showCandidates = True): + #full visualization includes both example and generated img + canvasSize = np.shape(self.canvas) + #insert generated image + vis = np.zeros((canvasSize[0], canvasSize[1] * 2, 3)) + 0.2 + vis[:, 0:canvasSize[1]] = self.canvas + #insert example + exampleHighlited = np.copy(self.exampleMap) + if showCandidates: + exampleHighlited = self.hightlightPatchCandidates(chosenPatchId, nonchosenPatchId) + h = floor(canvasSize[0] / 2) + w = floor(canvasSize[1] / 2) + exampleResized = self.resize(exampleHighlited, [h, w]) + offset_h = floor((canvasSize[0] - h) / 2) + offset_w = floor((canvasSize[1] - w) / 2) + + vis[offset_h:offset_h+h, canvasSize[1]+offset_w:canvasSize[1]+offset_w+w] = exampleResized + + #show live update + plt.imshow(vis) + clear_output(wait=True) + display(plt.show()) + + if self.snapshots: + img = Image.fromarray(np.uint8(vis*255)) + img = img.resize((self.outputSize[0]*2, self.outputSize[1]), resample=0, box=None) + img.save(self.outputPath + 'out' + str(self.iter) + '.jpg') + + def hightlightPatchCandidates(self, chosenPatchId, nonchosenPatchId): + + result = np.copy(self.exampleMap) + + #mod patch ID + chosenPatchId = chosenPatchId[0] % self.total_patches_count + if len(nonchosenPatchId)>0: + nonchosenPatchId = nonchosenPatchId % self.total_patches_count + #exlcude chosen from nonchosen + nonchosenPatchId = np.delete(nonchosenPatchId, np.where(nonchosenPatchId == chosenPatchId)) + #highlight non chosen candidates + c = [0.25, 0.9 ,0.45] + self.highlightPatches(result, nonchosenPatchId, color=c, highlight_width = 4, alpha = 0.5) + + #hightlight chosen + c = [1.0, 0.25, 0.15] + self.highlightPatches(result, [chosenPatchId], color=c, highlight_width = 4, alpha = 1) + + return result + + def highlightPatches(self, writeResult, patchesIDs, color, highlight_width = 2, solid = False, alpha = 0.1): + + searchWindow = self.patchSize + 2*self.overlapSize + + #number of possible steps + row_steps = floor((np.shape(writeResult)[0] - searchWindow) / self.windowStep) + 1 + col_steps = floor((np.shape(writeResult)[1] - searchWindow) / self.windowStep) + 1 + + for i in range(len(patchesIDs)): + + chosenPatchId = patchesIDs[i] + + #patch Id to step + patch_row = floor(chosenPatchId / col_steps) + patch_col = chosenPatchId - patch_row * col_steps + + #highlight chosen patch (below are boundaries of the example patch) + row_start = self.windowStep* patch_row + row_end = self.windowStep * patch_row + searchWindow + col_start = self.windowStep * patch_col + col_end = self.windowStep * patch_col + searchWindow + + if not solid: + w = highlight_width + overlap = np.copy(writeResult[row_start:row_start+w, col_start:col_end]) + writeResult[row_start:row_start+w, col_start:col_end] = overlap * (1-alpha) + (np.zeros(np.shape(overlap))+color) * alpha #top + overlap = np.copy(writeResult[row_end-w:row_end, col_start:col_end]) + writeResult[row_end-w:row_end, col_start:col_end] = overlap * (1-alpha) + (np.zeros(np.shape(overlap))+color) * alpha #bot + overlap = np.copy( writeResult[row_start:row_end, col_start:col_start+w]) + writeResult[row_start:row_end, col_start:col_start+w] = overlap * (1-alpha) + (np.zeros(np.shape(overlap))+color) * alpha #left + overlap = np.copy(writeResult[row_start:row_end, col_end-w:col_end]) + writeResult[row_start:row_end, col_end-w:col_end] = overlap * (1-alpha) + (np.zeros(np.shape(overlap))+color) * alpha #end + else: + a = alpha + writeResult[row_start:row_end, col_start:col_end] = writeResult[row_start:row_end, col_start:col_end] * (1-a) + (np.zeros(np.shape(writeResult[row_start:row_end, col_start:col_end]))+color) * a + + + def resize(self, imgArray, targetSize): + img = Image.fromarray(np.uint8(imgArray*255)) + img = img.resize((targetSize[0], targetSize[1]), resample=0, box=None) + return np.array(img)/255 + + def findMostSimilarPatches(self, overlapArea_Top, overlapArea_Left, coord, in_k=5): + + #check which KD tree we need to use + if (overlapArea_Top is not None) and (overlapArea_Left is not None): + combined = self.getCombinedOverlap(overlapArea_Top.reshape(-1), overlapArea_Left.reshape(-1)) + dist, ind = self.kdtree_combined.query([combined], k=in_k) + elif overlapArea_Top is not None: + dist, ind = self.kdtree_topOverlap.query([overlapArea_Top.reshape(-1)], k=in_k) + elif overlapArea_Left is not None: + dist, ind = self.kdtree_leftOverlap.query([overlapArea_Left.reshape(-1)], k=in_k) + else: + raise Exception("ERROR: no valid overlap area is passed to -findMostSimilarPatch-") + dist = dist[0] + ind = ind[0] + + return dist, ind + + #disallow visually similar blocks to be placed next to each other + def checkForMirrors(self, dist, ind, coord, thres = 3): + remove_i = [] + #do I have a top or left neighbour + if coord[0]-1>-1: + top_neigh = int(self.idMap[coord[0]-1, coord[1]]) + for i in range(len(ind)): + if (abs(ind[i]%self.total_patches_count - top_neigh%self.total_patches_count) < thres): + remove_i.append(i) + if coord[1]-1>-1: + left_neigh = int(self.idMap[coord[0], coord[1]-1]) + for i in range(len(ind)): + if (abs(ind[i]%self.total_patches_count - left_neigh%self.total_patches_count) < thres): + remove_i.append(i) + + dist = np.delete(dist, remove_i) + ind = np.delete(ind, remove_i) + + return dist, ind + + + def distances2probability(self, distances, PARM_truncation, PARM_attenuation): + + probabilities = 1 - distances / np.max(distances) + probabilities *= (probabilities > PARM_truncation) + probabilities = pow(probabilities, PARM_attenuation) #attenuate the values + #check if we didn't truncate everything! + if np.sum(probabilities) == 0: + #then just revert it + probabilities = 1 - distances / np.max(distances) + probabilities *= (probabilities > PARM_truncation*np.max(probabilities)) # truncate the values (we want top truncate%) + probabilities = pow(probabilities, PARM_attenuation) + probabilities /= np.sum(probabilities) #normalize so they add up to one + + return probabilities + + def getOverlapAreaTop(self, coord): + #do I have a top neighbour + if coord[0]-1>-1: + canvasPatch = self.patchCoord2canvasPatch(coord) + return canvasPatch[0:self.overlapSize, :, :] + else: + return None + + def getOverlapAreaLeft(self, coord): + #do I have a left neighbour + if coord[1]-1>-1: + canvasPatch = self.patchCoord2canvasPatch(coord) + return canvasPatch[:, 0:self.overlapSize, :] + else: + return None + + def initKDtrees(self): + #prepate overlap patches + topOverlap = self.examplePatches[:, 0:self.overlapSize, :, :] + leftOverlap = self.examplePatches[:, :, 0:self.overlapSize, :] + shape_top = np.shape(topOverlap) + shape_left = np.shape(leftOverlap) + + flatten_top = topOverlap.reshape(shape_top[0], -1) + flatten_left = leftOverlap.reshape(shape_left[0], -1) + flatten_combined = self.getCombinedOverlap(flatten_top, flatten_left) + + tree_top = KDTree(flatten_top) + tree_left = KDTree(flatten_left) + tree_combined = KDTree(flatten_combined) + + return tree_top, tree_left, tree_combined + + #the corner of 2 overlaps is counted double + def getCombinedOverlap(self, top, left): + shape = np.shape(top) + if len(shape) > 1: + combined = np.zeros((shape[0], shape[1]*2)) + combined[0:shape[0], 0:shape[1]] = top + combined[0:shape[0], shape[1]:shape[1]*2] = left + else: + combined = np.zeros((shape[0]*2)) + combined[0:shape[0]] = top + combined[shape[0]:shape[0]*2] = left + return combined + + def initFirstPatch(self): + #grab a random block + patchId = randint(0, np.shape(self.examplePatches)[0]) + #mark out fill map + self.filledMap[0, 0] = 1 + self.idMap[0, 0] = patchId % self.total_patches_count + #update canvas + self.updateCanvas(patchId, 0, 0, False, False) + #visualize + # self.visualize([0,0], [patchId], []) + + + def prepareExamplePatches(self): + + searchKernelSize = self.patchSize + 2 * self.overlapSize + + result = view_as_windows(self.exampleMap, [searchKernelSize, searchKernelSize, 3] , self.windowStep) + shape = np.shape(result) + result = result.reshape(shape[0]*shape[1], searchKernelSize, searchKernelSize, 3) + + self.total_patches_count = shape[0]*shape[1] + + if self.mirror_hor: + #flip along horizonal axis + hor_result = np.zeros(np.shape(result)) + + for i in range(self.total_patches_count): + hor_result[i] = result[i][::-1, :, :] + + result = np.concatenate((result, hor_result)) + if self.mirror_vert: + vert_result = np.zeros((shape[0]*shape[1], searchKernelSize, searchKernelSize, 3)) + + for i in range(self.total_patches_count): + vert_result[i] = result[i][:, ::-1, :] + + result = np.concatenate((result, vert_result)) + + return result + + def initCanvas(self): + + #check whether the outputSize adheres to patch+overlap size + num_patches_X = ceil((self.outputSize[0]-self.overlapSize)/(self.patchSize+self.overlapSize)) + num_patches_Y = ceil((self.outputSize[1]-self.overlapSize)/(self.patchSize+self.overlapSize)) + #calc needed output image size + required_size_X = num_patches_X*self.patchSize + (num_patches_X+1)*self.overlapSize + required_size_Y = num_patches_Y*self.patchSize + (num_patches_X+1)*self.overlapSize + + #create empty canvas + canvas = np.zeros((required_size_X, required_size_Y, 3)) + filledMap = np.zeros((num_patches_X, num_patches_Y)) #map showing which patches have been resolved + idMap = np.zeros((num_patches_X, num_patches_Y)) - 1 #stores patches id + + print("modified output size: ", np.shape(canvas)) + print("number of patches: ", np.shape(filledMap)[0]) + + return canvas, filledMap, idMap + + def idCoordTo2DCoord(self, idCoord, imgSize): + row = int(floor(idCoord / imgSize[0])) + col = int(idCoord - row * imgSize[1]) + return [row, col] + + def updateCanvas(self, inputPatchId, coord_X, coord_Y, blendTop = False, blendLeft = False): + #translate Patch coordinate into Canvas coordinate + x_range = self.patchCoord2canvasCoord(coord_X) + y_range = self.patchCoord2canvasCoord(coord_Y) + examplePatch = self.examplePatches[inputPatchId] + if blendLeft: + canvasOverlap = self.canvas[x_range[0]:x_range[1], y_range[0]:y_range[0]+self.overlapSize] + examplePatchOverlap = np.copy(examplePatch[0][:, 0:self.overlapSize]) + examplePatch[0][:, 0:self.overlapSize] = self.linearBlendOverlaps(canvasOverlap, examplePatchOverlap, 'left') + if blendTop: + canvasOverlap = self.canvas[x_range[0]:x_range[0]+self.overlapSize, y_range[0]:y_range[1]] + examplePatchOverlap = np.copy(examplePatch[0][0:self.overlapSize, :]) + examplePatch[0][0:self.overlapSize, :] = self.linearBlendOverlaps(canvasOverlap, examplePatchOverlap, 'top') + self.canvas[x_range[0]:x_range[1], y_range[0]:y_range[1]] = examplePatch + + def linearBlendOverlaps(self, canvasOverlap, examplePatchOverlap, mode): + if mode == 'left': + mask = np.repeat(np.arange(self.overlapSize)[np.newaxis, :], np.shape(canvasOverlap)[0], axis=0) / self.overlapSize + elif mode == 'top': + mask = np.repeat(np.arange(self.overlapSize)[:, np.newaxis], np.shape(canvasOverlap)[1], axis=1) / self.overlapSize + mask = np.repeat(mask[:, :, np.newaxis], 3, axis=2) #cast to 3d array + return canvasOverlap * (1 - mask) + examplePatchOverlap * mask + + #def minimumBoundaryError(self, canvasOverlap, examplePatchOverlap, mode) + + def patchCoord2canvasCoord(self, coord): + return [(self.patchSize+self.overlapSize)*coord, (self.patchSize+self.overlapSize)*(coord+1) + self.overlapSize] + + def patchCoord2canvasPatch(self, coord): + x_range = self.patchCoord2canvasCoord(coord[0]) + y_range = self.patchCoord2canvasCoord(coord[1]) + return np.copy(self.canvas[x_range[0]:x_range[1], y_range[0]:y_range[1]]) + + def loadExampleMap(self, exampleMapPath): + exampleMap = io.imread(exampleMapPath) #returns an MxNx3 array + exampleMap = exampleMap / 255.0 #normalize + #make sure it is 3channel RGB + if (np.shape(exampleMap)[-1] > 3): + exampleMap = exampleMap[:,:,:3] #remove Alpha Channel + elif (len(np.shape(exampleMap)) == 2): + exampleMap = np.repeat(exampleMap[np.newaxis, :, :], 3, axis=0) #convert from Grayscale to RGB + return exampleMap diff --git a/apps/textureSyn/traced_1.png b/apps/textureSyn/traced_1.png new file mode 100644 index 0000000..f3af50e Binary files /dev/null and b/apps/textureSyn/traced_1.png differ diff --git a/apps/textureSyn/traced_1.svg b/apps/textureSyn/traced_1.svg new file mode 100644 index 0000000..d811c65 --- /dev/null +++ b/apps/textureSyn/traced_1.svg @@ -0,0 +1,1969 @@ + + + + +traced_1 + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/apps/textureSyn/traced_2.png b/apps/textureSyn/traced_2.png new file mode 100644 index 0000000..614fed6 Binary files /dev/null and b/apps/textureSyn/traced_2.png differ diff --git a/apps/textureSyn/traced_2.svg b/apps/textureSyn/traced_2.svg new file mode 100644 index 0000000..478292e --- /dev/null +++ b/apps/textureSyn/traced_2.svg @@ -0,0 +1,8204 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/apps/textureSyn/traced_3.png b/apps/textureSyn/traced_3.png new file mode 100644 index 0000000..f0fe2f2 Binary files /dev/null and b/apps/textureSyn/traced_3.png differ diff --git a/apps/textureSyn/traced_3.svg b/apps/textureSyn/traced_3.svg new file mode 100644 index 0000000..0403cdc --- /dev/null +++ b/apps/textureSyn/traced_3.svg @@ -0,0 +1,13302 @@ + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + + diff --git a/apps/texture_synthesis.py b/apps/texture_synthesis.py new file mode 100644 index 0000000..3a7ccce --- /dev/null +++ b/apps/texture_synthesis.py @@ -0,0 +1,180 @@ +import os, sys +import pydiffvg +import argparse +import torch +# import torch as th +import scipy.ndimage.filters as filters +# import numba +import numpy as np +from skimage import io +sys.path.append('./textureSyn') +from patchBasedTextureSynthesis import * +from make_gif import make_gif +import random +import ttools.modules + +from svgpathtools import svg2paths2, Path, is_path_segment +""" +python texture_synthesis.py textureSyn/traced_1.png --svg-path textureSyn/traced_1.svg --case 1 +""" + +def texture_syn(img_path): + ## get the width and height first + # input_img = io.imread(img_path) # returns an MxNx3 array + # output_size = [input_img.shape[1], input_img.shape[0]] + # output_path = "textureSyn/1/" + output_path = "results/texture_synthesis/%d"%(args.case) + patch_size = 40 # size of the patch (without the overlap) + overlap_size = 10 # the width of the overlap region + output_size = [300, 300] + pbts = patchBasedTextureSynthesis(img_path, output_path, output_size, patch_size, overlap_size, in_windowStep=5, + in_mirror_hor=True, in_mirror_vert=True, in_shapshots=False) + target_img = pbts.resolveAll() + return np.array(target_img) + + +def render(canvas_width, canvas_height, shapes, shape_groups, samples=2): + _render = pydiffvg.RenderFunction.apply + scene_args = pydiffvg.RenderFunction.serialize_scene(\ + canvas_width, canvas_height, shapes, shape_groups) + img = _render(canvas_width, # width + canvas_height, # height + samples, # num_samples_x + samples, # num_samples_y + 0, # seed + None, + *scene_args) + return img + +def big_bounding_box(paths_n_stuff): + """Finds a BB containing a collection of paths, Bezier path segments, and + points (given as complex numbers).""" + bbs = [] + for thing in paths_n_stuff: + if is_path_segment(thing) or isinstance(thing, Path): + bbs.append(thing.bbox()) + elif isinstance(thing, complex): + bbs.append((thing.real, thing.real, thing.imag, thing.imag)) + else: + try: + complexthing = complex(thing) + bbs.append((complexthing.real, complexthing.real, + complexthing.imag, complexthing.imag)) + except ValueError: + raise TypeError( + "paths_n_stuff can only contains Path, CubicBezier, " + "QuadraticBezier, Line, and complex objects.") + xmins, xmaxs, ymins, ymaxs = list(zip(*bbs)) + xmin = min(xmins) + xmax = max(xmaxs) + ymin = min(ymins) + ymax = max(ymaxs) + return xmin, xmax, ymin, ymax + + +def main(args): + ## set device -> use cpu now since I haven't solved the nvcc issue + pydiffvg.set_use_gpu(False) + # pydiffvg.set_device(torch.device('cuda:1')) + ## use L2 for now + # perception_loss = ttools.modules.LPIPS().to(pydiffvg.get_device()) + + ## generate a texture synthesized + target_img = texture_syn(args.target) + tar_h, tar_w = target_img.shape[1], target_img.shape[0] + canvas_width, canvas_height, shapes, shape_groups = \ + pydiffvg.svg_to_scene(args.svg_path) + + + ## svgpathtools for checking the bounding box + # paths, _, _ = svg2paths2(args.svg_path) + # print(len(paths)) + # xmin, xmax, ymin, ymax = big_bounding_box(paths) + # print(xmin, xmax, ymin, ymax) + # input("check") + + + print('tar h : %d tar w : %d'%(tar_h, tar_w)) + print('canvas h : %d canvas w : %d' % (canvas_height, canvas_width)) + scale_ratio = tar_h / canvas_height + print("scale ratio : ", scale_ratio) + # input("check") + for path in shapes: + path.points[..., 0] = path.points[..., 0] * scale_ratio + path.points[..., 1] = path.points[..., 1] * scale_ratio + + init_img = render(tar_w, tar_h, shapes, shape_groups) + pydiffvg.imwrite(init_img.cpu(), 'results/texture_synthesis/%d/init.png'%(args.case), gamma=2.2) + # input("check") + random.seed(1234) + torch.manual_seed(1234) + + points_vars = [] + for path in shapes: + path.points.requires_grad = True + points_vars.append(path.points) + color_vars = [] + for group in shape_groups: + group.fill_color.requires_grad = True + color_vars.append(group.fill_color) + # Optimize + points_optim = torch.optim.Adam(points_vars, lr=1.0) + color_optim = torch.optim.Adam(color_vars, lr=0.01) + + target = torch.from_numpy(target_img).to(torch.float32) / 255.0 + target = target.pow(2.2) + target = target.to(pydiffvg.get_device()) + target = target.unsqueeze(0) + target = target.permute(0, 3, 1, 2) # NHWC -> NCHW + canvas_width, canvas_height = target.shape[3], target.shape[2] + # print('canvas h : %d canvas w : %d' % (canvas_height, canvas_width)) + # input("check") + + for t in range(args.max_iter): + print('iteration:', t) + points_optim.zero_grad() + color_optim.zero_grad() + cur_img = render(canvas_width, canvas_height, shapes, shape_groups) + pydiffvg.imwrite(cur_img.cpu(), 'results/texture_synthesis/%d/iter_%d.png'%(args.case, t), gamma=2.2) + cur_img = cur_img[:, :, :3] + cur_img = cur_img.unsqueeze(0) + cur_img = cur_img.permute(0, 3, 1, 2) # NHWC -> NCHW + + ## perceptual loss + # loss = perception_loss(cur_img, target) + ## l2 loss + loss = (cur_img - target).pow(2).mean() + print('render loss:', loss.item()) + loss.backward() + + points_optim.step() + color_optim.step() + + for group in shape_groups: + group.fill_color.data.clamp_(0.0, 1.0) + ## write svg + if t % 10 == 0 or t == args.max_iter - 1: + pydiffvg.save_svg('results/texture_synthesis/%d/iter_%d.svg'%(args.case, t), + canvas_width, canvas_height, shapes, shape_groups) + + ## render final result + final_img = render(tar_h, tar_w, shapes, shape_groups) + pydiffvg.imwrite(final_img.cpu(), 'results/texture_synthesis/%d/final.png'%(args.case), gamma=2.2) + + + from subprocess import call + call(["ffmpeg", "-framerate", "24", "-i", + "results/texture_synthesis/%d/iter_%d.png"%(args.case), "-vb", "20M", + "results/texture_synthesis/%d/out.mp4"%(args.case)]) + ## make gif + make_gif("results/texture_synthesis/%d"%(args.case), "results/texture_synthesis/%d/out.gif"%(args.case), frame_every_X_steps=1, repeat_ending=3, total_iter=args.max_iter) + +if __name__ == '__main__': + parser = argparse.ArgumentParser() + ## target image path + parser.add_argument("target", help="target image path") + parser.add_argument("--svg-path", type=str, help="the corresponding svg file path") + parser.add_argument("--max-iter", type=int, default=500, help="the max optimization iterations") + parser.add_argument("--case", type=int, default=1, help="just the case id for a separate result folder") + args = parser.parse_args() + main(args) \ No newline at end of file diff --git a/atomic.cpp b/atomic.cpp new file mode 100644 index 0000000..9c642b9 --- /dev/null +++ b/atomic.cpp @@ -0,0 +1,27 @@ +//A hacky solution to get around the Ellipse include + +#ifdef WIN32 +#include +#include + +float win_atomic_add(float &target, float source) { + union { int i; float f; } old_val; + union { int i; float f; } new_val; + do { + old_val.f = target; + new_val.f = old_val.f + (float)source; + } while (InterlockedCompareExchange((LONG*)&target, (LONG)new_val.i, (LONG)old_val.i) != old_val.i); + return old_val.f; +} + +double win_atomic_add(double &target, double source) { + union { int64_t i; double f; } old_val; + union { int64_t i; double f; } new_val; + do { + old_val.f = target; + new_val.f = old_val.f + (double)source; + } while (InterlockedCompareExchange64((LONG64*)&target, (LONG64)new_val.i, (LONG64)old_val.i) != old_val.i); + return old_val.f; +} + +#endif \ No newline at end of file diff --git a/atomic.h b/atomic.h new file mode 100644 index 0000000..c721722 --- /dev/null +++ b/atomic.h @@ -0,0 +1,139 @@ +#pragma once + +#include "diffvg.h" +#include "vector.h" +#include "matrix.h" + +// https://stackoverflow.com/questions/39274472/error-function-atomicadddouble-double-has-already-been-defined +#if !defined(__CUDA_ARCH__) || __CUDA_ARCH__ >= 600 +#else +static inline DEVICE double atomicAdd(double *address, double val) { + unsigned long long int* address_as_ull = (unsigned long long int*)address; + unsigned long long int old = *address_as_ull, assumed; + if (val == 0.0) + return __longlong_as_double(old); + do { + assumed = old; + old = atomicCAS(address_as_ull, assumed, __double_as_longlong(val +__longlong_as_double(assumed))); + } while (assumed != old); + return __longlong_as_double(old); +} +#endif + +#ifndef WIN32 + template + DEVICE + inline T0 atomic_add_(T0 &target, T1 source) { + #ifdef __CUDA_ARCH__ + return atomicAdd(&target, (T0)source); + #else + T0 old_val; + T0 new_val; + do { + old_val = target; + new_val = old_val + source; + } while (!__atomic_compare_exchange(&target, &old_val, &new_val, true, + std::memory_order::memory_order_seq_cst, + std::memory_order::memory_order_seq_cst)); + return old_val; + #endif + } + + DEVICE + inline + float atomic_add(float &target, float source) { + return atomic_add_(target, source); + } + DEVICE + inline + double atomic_add(double &target, double source) { + return atomic_add_(target, source); + } +#else + float win_atomic_add(float &target, float source); + double win_atomic_add(double &target, double source); + DEVICE + static float atomic_add(float &target, float source) { + #ifdef __CUDA_ARCH__ + return atomicAdd(&target, source); + #else + return win_atomic_add(target, source); + #endif + } + DEVICE + static double atomic_add(double &target, double source) { + #ifdef __CUDA_ARCH__ + return atomicAdd(&target, (double)source); + #else + return win_atomic_add(target, source); + #endif + } +#endif + +template +DEVICE +inline T0 atomic_add(T0 *target, T1 source) { + return atomic_add(*target, (T0)source); +} + +template +DEVICE +inline TVector2 atomic_add(TVector2 &target, const TVector2 &source) { + atomic_add(target[0], source[0]); + atomic_add(target[1], source[1]); + return target; +} + +template +DEVICE +inline void atomic_add(T0 *target, const TVector2 &source) { + atomic_add(target[0], (T0)source[0]); + atomic_add(target[1], (T0)source[1]); +} + +template +DEVICE +inline TVector3 atomic_add(TVector3 &target, const TVector3 &source) { + atomic_add(target[0], source[0]); + atomic_add(target[1], source[1]); + atomic_add(target[2], source[2]); + return target; +} + +template +DEVICE +inline void atomic_add(T0 *target, const TVector3 &source) { + atomic_add(target[0], (T0)source[0]); + atomic_add(target[1], (T0)source[1]); + atomic_add(target[2], (T0)source[2]); +} + +template +DEVICE +inline TVector4 atomic_add(TVector4 &target, const TVector4 &source) { + atomic_add(target[0], source[0]); + atomic_add(target[1], source[1]); + atomic_add(target[2], source[2]); + atomic_add(target[3], source[3]); + return target; +} + +template +DEVICE +inline void atomic_add(T0 *target, const TVector4 &source) { + atomic_add(target[0], (T0)source[0]); + atomic_add(target[1], (T0)source[1]); + atomic_add(target[2], (T0)source[2]); + atomic_add(target[3], (T0)source[3]); +} + +template +DEVICE +inline void atomic_add(T0 *target, const TMatrix3x3 &source) { + for (int i = 0; i < 3; i++) { + for (int j = 0; j < 3; j++) { + atomic_add(target[3 * i + j], (T0)source(i, j)); + } + } +} + diff --git a/cdf.h b/cdf.h new file mode 100644 index 0000000..48a64f8 --- /dev/null +++ b/cdf.h @@ -0,0 +1,29 @@ +#pragma once + +#include "diffvg.h" + +DEVICE int sample(const float *cdf, int num_entries, float u, float *updated_u = nullptr) { + // Binary search the cdf + auto lb = 0; + auto len = num_entries - 1 - lb; + while (len > 0) { + auto half_len = len / 2; + auto mid = lb + half_len; + assert(mid >= 0 && mid < num_entries); + if (u < cdf[mid]) { + len = half_len; + } else { + lb = mid + 1; + len = len - half_len - 1; + } + } + lb = clamp(lb, 0, num_entries - 1); + if (updated_u != nullptr) { + if (lb > 0) { + *updated_u = (u - cdf[lb - 1]) / (cdf[lb] - cdf[lb - 1]); + } else { + *updated_u = u / cdf[lb]; + } + } + return lb; +} diff --git a/cmake/FindTensorFlow.cmake b/cmake/FindTensorFlow.cmake new file mode 100644 index 0000000..b251b10 --- /dev/null +++ b/cmake/FindTensorFlow.cmake @@ -0,0 +1,34 @@ +# https://github.com/PatWie/tensorflow-cmake/blob/master/cmake/modules/FindTensorFlow.cmake + +execute_process( + COMMAND python -c "exec(\"try:\\n import tensorflow as tf; print(tf.__version__); print(tf.__cxx11_abi_flag__);print(tf.sysconfig.get_include()); print(tf.sysconfig.get_lib())\\nexcept ImportError:\\n exit(1)\")" + OUTPUT_VARIABLE TF_INFORMATION_STRING + OUTPUT_STRIP_TRAILING_WHITESPACE + RESULT_VARIABLE retcode) + +if("${retcode}" STREQUAL "0") + string(REPLACE "\n" ";" TF_INFORMATION_LIST ${TF_INFORMATION_STRING}) + list(GET TF_INFORMATION_LIST 0 TF_DETECTED_VERSION) + list(GET TF_INFORMATION_LIST 1 TF_DETECTED_ABI) + list(GET TF_INFORMATION_LIST 2 TF_DETECTED_INCLUDE_DIR) + list(GET TF_INFORMATION_LIST 3 TF_DETECTED_LIBRARY_DIR) + if(WIN32) + find_library(TF_DETECTED_LIBRARY NAMES _pywrap_tensorflow_internal PATHS + ${TF_DETECTED_LIBRARY_DIR}/python) + else() + # For some reason my tensorflow doesn't have a .so file + list(APPEND CMAKE_FIND_LIBRARY_SUFFIXES .so.1) + list(APPEND CMAKE_FIND_LIBRARY_SUFFIXES .so.2) + find_library(TF_DETECTED_LIBRARY NAMES tensorflow_framework PATHS + ${TF_DETECTED_LIBRARY_DIR}) + endif() + set(TensorFlow_VERSION ${TF_DETECTED_VERSION}) + set(TensorFlow_ABI ${TF_DETECTED_ABI}) + set(TensorFlow_INCLUDE_DIR ${TF_DETECTED_INCLUDE_DIR}) + set(TensorFlow_LIBRARY ${TF_DETECTED_LIBRARY}) + if(TensorFlow_LIBRARY AND TensorFlow_INCLUDE_DIR) + set(TensorFlow_FOUND TRUE) + else() + set(TensorFlow_FOUND FALSE) + endif() +endif() diff --git a/cmake/FindThrust.cmake b/cmake/FindThrust.cmake new file mode 100644 index 0000000..61eef29 --- /dev/null +++ b/cmake/FindThrust.cmake @@ -0,0 +1,40 @@ +##============================================================================= +## +## Copyright (c) Kitware, Inc. +## All rights reserved. +## See LICENSE.txt for details. +## +## This software is distributed WITHOUT ANY WARRANTY; without even +## the implied warranty of MERCHANTABILITY or FITNESS FOR A PARTICULAR +## PURPOSE. See the above copyright notice for more information. +## +## Copyright 2012 Sandia Corporation. +## Under the terms of Contract DE-AC04-94AL85000 with Sandia Corporation, +## the U.S. Government retains certain rights in this software. +## +##============================================================================= + +# +# FindThrust +# +# This module finds the Thrust header files and extrats their version. It +# sets the following variables. +# +# THRUST_INCLUDE_DIR - Include directory for thrust header files. (All header +# files will actually be in the thrust subdirectory.) +# THRUST_VERSION - Version of thrust in the form "major.minor.patch". +# + +find_path(THRUST_INCLUDE_DIR + HINTS /usr/include/cuda + /usr/local/include + /usr/local/cuda/include + ${CUDA_INCLUDE_DIRS} + ./thrust + ../thrust + NAMES thrust/version.h +) + +if (THRUST_INCLUDE_DIR) + set(THRUST_FOUND TRUE) +endif () \ No newline at end of file diff --git a/color.cpp b/color.cpp new file mode 100644 index 0000000..2a2e8ab --- /dev/null +++ b/color.cpp @@ -0,0 +1,25 @@ +#include "color.h" + +void LinearGradient::copy_to(ptr stop_offsets, + ptr stop_colors) const { + float *o = stop_offsets.get(); + float *c = stop_colors.get(); + for (int i = 0; i < num_stops; i++) { + o[i] = this->stop_offsets[i]; + } + for (int i = 0; i < 4 * num_stops; i++) { + c[i] = this->stop_colors[i]; + } +} + +void RadialGradient::copy_to(ptr stop_offsets, + ptr stop_colors) const { + float *o = stop_offsets.get(); + float *c = stop_colors.get(); + for (int i = 0; i < num_stops; i++) { + o[i] = this->stop_offsets[i]; + } + for (int i = 0; i < 4 * num_stops; i++) { + c[i] = this->stop_colors[i]; + } +} diff --git a/color.h b/color.h new file mode 100644 index 0000000..c787105 --- /dev/null +++ b/color.h @@ -0,0 +1,63 @@ +#pragma once + +#include "diffvg.h" +#include "vector.h" +#include "ptr.h" + +enum class ColorType { + Constant, + LinearGradient, + RadialGradient +}; + +struct Constant { + Vector4f color; + + ptr get_ptr() { + return ptr(this); + } +}; + +struct LinearGradient { + LinearGradient(const Vector2f &begin, + const Vector2f &end, + int num_stops, + ptr stop_offsets, + ptr stop_colors) + : begin(begin), end(end), num_stops(num_stops), + stop_offsets(stop_offsets.get()), stop_colors(stop_colors.get()) {} + + ptr get_ptr() { + return ptr(this); + } + + void copy_to(ptr stop_offset, + ptr stop_colors) const; + + Vector2f begin, end; + int num_stops; + float *stop_offsets; + float *stop_colors; // rgba +}; + +struct RadialGradient { + RadialGradient(const Vector2f ¢er, + const Vector2f &radius, + int num_stops, + ptr stop_offsets, + ptr stop_colors) + : center(center), radius(radius), num_stops(num_stops), + stop_offsets(stop_offsets.get()), stop_colors(stop_colors.get()) {} + + ptr get_ptr() { + return ptr(this); + } + + void copy_to(ptr stop_offset, + ptr stop_colors) const; + + Vector2f center, radius; + int num_stops; + float *stop_offsets; + float *stop_colors; // rgba +}; diff --git a/compute_distance.h b/compute_distance.h new file mode 100644 index 0000000..c125641 --- /dev/null +++ b/compute_distance.h @@ -0,0 +1,949 @@ +#pragma once + +#include "diffvg.h" +#include "edge_query.h" +#include "scene.h" +#include "shape.h" +#include "solve.h" +#include "vector.h" + +#include + +struct ClosestPointPathInfo { + int base_point_id; + int point_id; + float t_root; +}; + +DEVICE +inline +bool closest_point(const Circle &circle, const Vector2f &pt, + Vector2f *result) { + *result = circle.center + circle.radius * normalize(pt - circle.center); + return false; +} + +DEVICE +inline +bool closest_point(const Path &path, const BVHNode *bvh_nodes, const Vector2f &pt, float max_radius, + ClosestPointPathInfo *path_info, + Vector2f *result) { + auto min_dist = max_radius; + auto ret_pt = Vector2f{0, 0}; + auto found = false; + auto num_segments = path.num_base_points; + constexpr auto max_bvh_size = 128; + int bvh_stack[max_bvh_size]; + auto stack_size = 0; + bvh_stack[stack_size++] = 2 * num_segments - 2; + while (stack_size > 0) { + const BVHNode &node = bvh_nodes[bvh_stack[--stack_size]]; + if (node.child1 < 0) { + // leaf + auto base_point_id = node.child0; + auto point_id = - node.child1 - 1; + assert(base_point_id < num_segments); + assert(point_id < path.num_points); + auto dist = 0.f; + auto closest_pt = Vector2f{0, 0}; + auto t_root = 0.f; + if (path.num_control_points[base_point_id] == 0) { + // Straight line + auto i0 = point_id; + auto i1 = (point_id + 1) % path.num_points; + auto p0 = Vector2f{path.points[2 * i0], path.points[2 * i0 + 1]}; + auto p1 = Vector2f{path.points[2 * i1], path.points[2 * i1 + 1]}; + // project pt to line + auto t = dot(pt - p0, p1 - p0) / dot(p1 - p0, p1 - p0); + if (t < 0) { + dist = distance(p0, pt); + closest_pt = p0; + t_root = 0; + } else if (t > 1) { + dist = distance(p1, pt); + closest_pt = p1; + t_root = 1; + } else { + dist = distance(p0 + t * (p1 - p0), pt); + closest_pt = p0 + t * (p1 - p0); + t_root = t; + } + } else if (path.num_control_points[base_point_id] == 1) { + // Quadratic Bezier curve + auto i0 = point_id; + auto i1 = point_id + 1; + auto i2 = (point_id + 2) % path.num_points; + auto p0 = Vector2f{path.points[2 * i0], path.points[2 * i0 + 1]}; + auto p1 = Vector2f{path.points[2 * i1], path.points[2 * i1 + 1]}; + auto p2 = Vector2f{path.points[2 * i2], path.points[2 * i2 + 1]}; + if (path.use_distance_approx) { + closest_pt = quadratic_closest_pt_approx(p0, p1, p2, pt, &t_root); + dist = distance(closest_pt, pt); + } else { + auto eval = [&](float t) -> Vector2f { + auto tt = 1 - t; + return (tt*tt)*p0 + (2*tt*t)*p1 + (t*t)*p2; + }; + auto pt0 = eval(0); + auto pt1 = eval(1); + auto dist0 = distance(pt0, pt); + auto dist1 = distance(pt1, pt); + { + dist = dist0; + closest_pt = pt0; + t_root = 0; + } + if (dist1 < dist) { + dist = dist1; + closest_pt = pt1; + t_root = 1; + } + // The curve is (1-t)^2p0 + 2(1-t)tp1 + t^2p2 + // = (p0-2p1+p2)t^2+(-2p0+2p1)t+p0 = q + // Want to solve (q - pt) dot q' = 0 + // q' = (p0-2p1+p2)t + (-p0+p1) + // Expanding (p0-2p1+p2)^2 t^3 + + // 3(p0-2p1+p2)(-p0+p1) t^2 + + // (2(-p0+p1)^2+(p0-2p1+p2)(p0-pt))t + + // (-p0+p1)(p0-pt) = 0 + auto A = sum((p0-2*p1+p2)*(p0-2*p1+p2)); + auto B = sum(3*(p0-2*p1+p2)*(-p0+p1)); + auto C = sum(2*(-p0+p1)*(-p0+p1)+(p0-2*p1+p2)*(p0-pt)); + auto D = sum((-p0+p1)*(p0-pt)); + float t[3]; + int num_sol = solve_cubic(A, B, C, D, t); + for (int j = 0; j < num_sol; j++) { + if (t[j] >= 0 && t[j] <= 1) { + auto p = eval(t[j]); + auto distp = distance(p, pt); + if (distp < dist) { + dist = distp; + closest_pt = p; + t_root = t[j]; + } + } + } + } + } else if (path.num_control_points[base_point_id] == 2) { + // Cubic Bezier curve + auto i0 = point_id; + auto i1 = point_id + 1; + auto i2 = point_id + 2; + auto i3 = (point_id + 3) % path.num_points; + auto p0 = Vector2f{path.points[2 * i0], path.points[2 * i0 + 1]}; + auto p1 = Vector2f{path.points[2 * i1], path.points[2 * i1 + 1]}; + auto p2 = Vector2f{path.points[2 * i2], path.points[2 * i2 + 1]}; + auto p3 = Vector2f{path.points[2 * i3], path.points[2 * i3 + 1]}; + auto eval = [&](float t) -> Vector2f { + auto tt = 1 - t; + return (tt*tt*tt)*p0 + (3*tt*tt*t)*p1 + (3*tt*t*t)*p2 + (t*t*t)*p3; + }; + auto pt0 = eval(0); + auto pt1 = eval(1); + auto dist0 = distance(pt0, pt); + auto dist1 = distance(pt1, pt); + { + dist = dist0; + closest_pt = pt0; + t_root = 0; + } + if (dist1 < dist) { + dist = dist1; + closest_pt = pt1; + t_root = 1; + } + // The curve is (1 - t)^3 p0 + 3 * (1 - t)^2 t p1 + 3 * (1 - t) t^2 p2 + t^3 p3 + // = (-p0+3p1-3p2+p3) t^3 + (3p0-6p1+3p2) t^2 + (-3p0+3p1) t + p0 + // Want to solve (q - pt) dot q' = 0 + // q' = 3*(-p0+3p1-3p2+p3)t^2 + 2*(3p0-6p1+3p2)t + (-3p0+3p1) + // Expanding + // 3*(-p0+3p1-3p2+p3)^2 t^5 + // 5*(-p0+3p1-3p2+p3)(3p0-6p1+3p2) t^4 + // 4*(-p0+3p1-3p2+p3)(-3p0+3p1) + 2*(3p0-6p1+3p2)^2 t^3 + // 3*(3p0-6p1+3p2)(-3p0+3p1) + 3*(-p0+3p1-3p2+p3)(p0-pt) t^2 + // (-3p0+3p1)^2+2(p0-pt)(3p0-6p1+3p2) t + // (p0-pt)(-3p0+3p1) + double A = 3*sum((-p0+3*p1-3*p2+p3)*(-p0+3*p1-3*p2+p3)); + double B = 5*sum((-p0+3*p1-3*p2+p3)*(3*p0-6*p1+3*p2)); + double C = 4*sum((-p0+3*p1-3*p2+p3)*(-3*p0+3*p1)) + 2*sum((3*p0-6*p1+3*p2)*(3*p0-6*p1+3*p2)); + double D = 3*(sum((3*p0-6*p1+3*p2)*(-3*p0+3*p1)) + sum((-p0+3*p1-3*p2+p3)*(p0-pt))); + double E = sum((-3*p0+3*p1)*(-3*p0+3*p1)) + 2*sum((p0-pt)*(3*p0-6*p1+3*p2)); + double F = sum((p0-pt)*(-3*p0+3*p1)); + // normalize the polynomial + B /= A; + C /= A; + D /= A; + E /= A; + F /= A; + // Isolator Polynomials: + // https://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.133.2233&rep=rep1&type=pdf + // x/5 + B/25 + // /----------------------------------------------------- + // 5x^4 + 4B x^3 + 3C x^2 + 2D x + E / x^5 + B x^4 + C x^3 + D x^2 + E x + F + // x^5 + 4B/5 x^4 + 3C/5 x^3 + 2D/5 x^2 + E/5 x + // ---------------------------------------------------- + // B/5 x^4 + 2C/5 x^3 + 3D/5 x^2 + 4E/5 x + F + // B/5 x^4 + 4B^2/25 x^3 + 3BC/25 x^2 + 2BD/25 x + BE/25 + // ---------------------------------------------------- + // (2C/5 - 4B^2/25)x^3 + (3D/5-3BC/25)x^2 + (4E/5-2BD/25) + (F-BE/25) + auto p1A = ((2 / 5.f) * C - (4 / 25.f) * B * B); + auto p1B = ((3 / 5.f) * D - (3 / 25.f) * B * C); + auto p1C = ((4 / 5.f) * E - (2 / 25.f) * B * D); + auto p1D = F - B * E / 25.f; + // auto q1A = 1 / 5.f; + // auto q1B = B / 25.f; + // x/5 + B/25 = 0 + // x = -B/5 + auto q_root = -B/5.f; + double p_roots[3]; + int num_sol = solve_cubic(p1A, p1B, p1C, p1D, p_roots); + float intervals[4]; + if (q_root >= 0 && q_root <= 1) { + intervals[0] = q_root; + } + for (int j = 0; j < num_sol; j++) { + intervals[j + 1] = p_roots[j]; + } + auto num_intervals = 1 + num_sol; + // sort intervals + for (int j = 1; j < num_intervals; j++) { + for (int k = j; k > 0 && intervals[k - 1] > intervals[k]; k--) { + auto tmp = intervals[k]; + intervals[k] = intervals[k - 1]; + intervals[k - 1] = tmp; + } + } + auto eval_polynomial = [&] (double t) { + return t*t*t*t*t+ + B*t*t*t*t+ + C*t*t*t+ + D*t*t+ + E*t+ + F; + }; + auto eval_polynomial_deriv = [&] (double t) { + return 5*t*t*t*t+ + 4*B*t*t*t+ + 3*C*t*t+ + 2*D*t+ + E; + }; + auto lower_bound = 0.f; + for (int j = 0; j < num_intervals + 1; j++) { + if (j < num_intervals && intervals[j] < 0.f) { + continue; + } + auto upper_bound = j < num_intervals ? + min(intervals[j], 1.f) : 1.f; + auto lb = lower_bound; + auto ub = upper_bound; + auto lb_eval = eval_polynomial(lb); + auto ub_eval = eval_polynomial(ub); + if (lb_eval * ub_eval > 0) { + // Doesn't have root + continue; + } + if (lb_eval > ub_eval) { + swap_(lb, ub); + } + auto t = 0.5f * (lb + ub); + auto num_iter = 20; + for (int it = 0; it < num_iter; it++) { + if (!(t >= lb && t <= ub)) { + t = 0.5f * (lb + ub); + } + auto value = eval_polynomial(t); + if (fabs(value) < 1e-5f || it == num_iter - 1) { + break; + } + // The derivative may not be entirely accurate, + // but the bisection is going to handle this + if (value > 0.f) { + ub = t; + } else { + lb = t; + } + auto derivative = eval_polynomial_deriv(t); + t -= value / derivative; + } + auto p = eval(t); + auto distp = distance(p, pt); + if (distp < dist) { + dist = distp; + closest_pt = p; + t_root = t; + } + if (upper_bound >= 1.f) { + break; + } + lower_bound = upper_bound; + } + } else { + assert(false); + } + if (dist < min_dist) { + min_dist = dist; + ret_pt = closest_pt; + path_info->base_point_id = base_point_id; + path_info->point_id = point_id; + path_info->t_root = t_root; + found = true; + } + } else { + assert(node.child0 >= 0 && node.child1 >= 0); + const AABB &b0 = bvh_nodes[node.child0].box; + if (within_distance(b0, pt, min_dist)) { + bvh_stack[stack_size++] = node.child0; + } + const AABB &b1 = bvh_nodes[node.child1].box; + if (within_distance(b1, pt, min_dist)) { + bvh_stack[stack_size++] = node.child1; + } + assert(stack_size <= max_bvh_size); + } + } + if (found) { + assert(path_info->base_point_id < num_segments); + } + *result = ret_pt; + return found; +} + +DEVICE +inline +bool closest_point(const Rect &rect, const Vector2f &pt, + Vector2f *result) { + auto min_dist = 0.f; + auto closest_pt = Vector2f{0, 0}; + auto update = [&](const Vector2f &p0, const Vector2f &p1, bool first) { + // project pt to line + auto t = dot(pt - p0, p1 - p0) / dot(p1 - p0, p1 - p0); + if (t < 0) { + auto d = distance(p0, pt); + if (first || d < min_dist) { + min_dist = d; + closest_pt = p0; + } + } else if (t > 1) { + auto d = distance(p1, pt); + if (first || d < min_dist) { + min_dist = d; + closest_pt = p1; + } + } else { + auto p = p0 + t * (p1 - p0); + auto d = distance(p, pt); + if (first || d < min_dist) { + min_dist = d; + closest_pt = p0; + } + } + }; + auto left_top = rect.p_min; + auto right_top = Vector2f{rect.p_max.x, rect.p_min.y}; + auto left_bottom = Vector2f{rect.p_min.x, rect.p_max.y}; + auto right_bottom = rect.p_max; + update(left_top, left_bottom, true); + update(left_top, right_top, false); + update(right_top, right_bottom, false); + update(left_bottom, right_bottom, false); + *result = closest_pt; + return true; +} + +DEVICE +inline +bool closest_point(const Shape &shape, const BVHNode *bvh_nodes, const Vector2f &pt, float max_radius, + ClosestPointPathInfo *path_info, + Vector2f *result) { + switch (shape.type) { + case ShapeType::Circle: + return closest_point(*(const Circle *)shape.ptr, pt, result); + case ShapeType::Ellipse: + // https://www.geometrictools.com/Documentation/DistancePointEllipseEllipsoid.pdf + assert(false); + return false; + case ShapeType::Path: + return closest_point(*(const Path *)shape.ptr, bvh_nodes, pt, max_radius, path_info, result); + case ShapeType::Rect: + return closest_point(*(const Rect *)shape.ptr, pt, result); + } + assert(false); + return false; +} + +DEVICE +inline +bool compute_distance(const SceneData &scene, + int shape_group_id, + const Vector2f &pt, + float max_radius, + int *min_shape_id, + Vector2f *closest_pt_, + ClosestPointPathInfo *path_info, + float *result) { + const ShapeGroup &shape_group = scene.shape_groups[shape_group_id]; + // pt is in canvas space, transform it to shape's local space + auto local_pt = xform_pt(shape_group.canvas_to_shape, pt); + + constexpr auto max_bvh_stack_size = 64; + int bvh_stack[max_bvh_stack_size]; + auto stack_size = 0; + bvh_stack[stack_size++] = 2 * shape_group.num_shapes - 2; + const auto &bvh_nodes = scene.shape_groups_bvh_nodes[shape_group_id]; + + auto min_dist = max_radius; + auto found = false; + + while (stack_size > 0) { + const BVHNode &node = bvh_nodes[bvh_stack[--stack_size]]; + if (node.child1 < 0) { + // leaf + auto shape_id = node.child0; + const auto &shape = scene.shapes[shape_id]; + ClosestPointPathInfo local_path_info{-1, -1}; + auto local_closest_pt = Vector2f{0, 0}; + if (closest_point(shape, scene.path_bvhs[shape_id], local_pt, max_radius, &local_path_info, &local_closest_pt)) { + auto closest_pt = xform_pt(shape_group.shape_to_canvas, local_closest_pt); + auto dist = distance(closest_pt, pt); + if (!found || dist < min_dist) { + found = true; + min_dist = dist; + if (min_shape_id != nullptr) { + *min_shape_id = shape_id; + } + if (closest_pt_ != nullptr) { + *closest_pt_ = closest_pt; + } + if (path_info != nullptr) { + *path_info = local_path_info; + } + } + } + } else { + assert(node.child0 >= 0 && node.child1 >= 0); + const AABB &b0 = bvh_nodes[node.child0].box; + if (inside(b0, local_pt, max_radius)) { + bvh_stack[stack_size++] = node.child0; + } + const AABB &b1 = bvh_nodes[node.child1].box; + if (inside(b1, local_pt, max_radius)) { + bvh_stack[stack_size++] = node.child1; + } + assert(stack_size <= max_bvh_stack_size); + } + } + + *result = min_dist; + return found; +} + + +DEVICE +inline +void d_closest_point(const Circle &circle, + const Vector2f &pt, + const Vector2f &d_closest_pt, + Circle &d_circle, + Vector2f &d_pt) { + // return circle.center + circle.radius * normalize(pt - circle.center); + auto d_center = d_closest_pt * + (1 + d_normalize(pt - circle.center, circle.radius * d_closest_pt)); + atomic_add(&d_circle.center.x, d_center); + atomic_add(&d_circle.radius, dot(d_closest_pt, normalize(pt - circle.center))); +} + +DEVICE +inline +void d_closest_point(const Path &path, + const Vector2f &pt, + const Vector2f &d_closest_pt, + const ClosestPointPathInfo &path_info, + Path &d_path, + Vector2f &d_pt) { + auto base_point_id = path_info.base_point_id; + auto point_id = path_info.point_id; + auto min_t_root = path_info.t_root; + + if (path.num_control_points[base_point_id] == 0) { + // Straight line + auto i0 = point_id; + auto i1 = (point_id + 1) % path.num_points; + auto p0 = Vector2f{path.points[2 * i0], path.points[2 * i0 + 1]}; + auto p1 = Vector2f{path.points[2 * i1], path.points[2 * i1 + 1]}; + // project pt to line + auto t = dot(pt - p0, p1 - p0) / dot(p1 - p0, p1 - p0); + auto d_p0 = Vector2f{0, 0}; + auto d_p1 = Vector2f{0, 0}; + if (t < 0) { + d_p0 += d_closest_pt; + } else if (t > 1) { + d_p1 += d_closest_pt; + } else { + auto d_p = d_closest_pt; + // p = p0 + t * (p1 - p0) + d_p0 += d_p * (1 - t); + d_p1 += d_p * t; + } + atomic_add(d_path.points + 2 * i0, d_p0); + atomic_add(d_path.points + 2 * i1, d_p1); + } else if (path.num_control_points[base_point_id] == 1) { + // Quadratic Bezier curve + auto i0 = point_id; + auto i1 = point_id + 1; + auto i2 = (point_id + 2) % path.num_points; + auto p0 = Vector2f{path.points[2 * i0], path.points[2 * i0 + 1]}; + auto p1 = Vector2f{path.points[2 * i1], path.points[2 * i1 + 1]}; + auto p2 = Vector2f{path.points[2 * i2], path.points[2 * i2 + 1]}; + // auto eval = [&](float t) -> Vector2f { + // auto tt = 1 - t; + // return (tt*tt)*p0 + (2*tt*t)*p1 + (t*t)*p2; + // }; + // auto dist0 = distance(eval(0), pt); + // auto dist1 = distance(eval(1), pt); + auto d_p0 = Vector2f{0, 0}; + auto d_p1 = Vector2f{0, 0}; + auto d_p2 = Vector2f{0, 0}; + auto t = min_t_root; + if (t == 0) { + d_p0 += d_closest_pt; + } else if (t == 1) { + d_p2 += d_closest_pt; + } else { + // The curve is (1-t)^2p0 + 2(1-t)tp1 + t^2p2 + // = (p0-2p1+p2)t^2+(-2p0+2p1)t+p0 = q + // Want to solve (q - pt) dot q' = 0 + // q' = (p0-2p1+p2)t + (-p0+p1) + // Expanding (p0-2p1+p2)^2 t^3 + + // 3(p0-2p1+p2)(-p0+p1) t^2 + + // (2(-p0+p1)^2+(p0-2p1+p2)(p0-pt))t + + // (-p0+p1)(p0-pt) = 0 + auto A = sum((p0-2*p1+p2)*(p0-2*p1+p2)); + auto B = sum(3*(p0-2*p1+p2)*(-p0+p1)); + auto C = sum(2*(-p0+p1)*(-p0+p1)+(p0-2*p1+p2)*(p0-pt)); + // auto D = sum((-p0+p1)*(p0-pt)); + auto d_p = d_closest_pt; + // p = eval(t) + auto tt = 1 - t; + // (tt*tt)*p0 + (2*tt*t)*p1 + (t*t)*p2 + auto d_tt = 2 * tt * dot(d_p, p0) + 2 * t * dot(d_p, p1); + auto d_t = -d_tt + 2 * tt * dot(d_p, p1) + 2 * t * dot(d_p, p2); + auto d_p0 = d_p * tt * tt; + auto d_p1 = 2 * d_p * tt * t; + auto d_p2 = d_p * t * t; + // implicit function theorem: dt/dA = -1/(p'(t)) * dp/dA + auto poly_deriv_t = 3 * A * t * t + 2 * B * t + C; + if (fabs(poly_deriv_t) > 1e-6f) { + auto d_A = - (d_t / poly_deriv_t) * t * t * t; + auto d_B = - (d_t / poly_deriv_t) * t * t; + auto d_C = - (d_t / poly_deriv_t) * t; + auto d_D = - (d_t / poly_deriv_t); + // A = sum((p0-2*p1+p2)*(p0-2*p1+p2)) + // B = sum(3*(p0-2*p1+p2)*(-p0+p1)) + // C = sum(2*(-p0+p1)*(-p0+p1)+(p0-2*p1+p2)*(p0-pt)) + // D = sum((-p0+p1)*(p0-pt)) + d_p0 += 2*d_A*(p0-2*p1+p2)+ + 3*d_B*((-p0+p1)-(p0-2*p1+p2))+ + 2*d_C*(-2*(-p0+p1))+ + d_C*((p0-pt)+(p0-2*p1+p2))+ + 2*d_D*(-(p0-pt)+(-p0+p1)); + d_p1 += (-2)*2*d_A*(p0-2*p1+p2)+ + 3*d_B*(-2*(-p0+p1)+(p0-2*p1+p2))+ + 2*d_C*(2*(-p0+p1))+ + d_C*((-2)*(p0-pt))+ + d_D*(p0-pt); + d_p2 += 2*d_A*(p0-2*p1+p2)+ + 3*d_B*(-p0+p1)+ + d_C*(p0-pt); + d_pt += d_C*(-(p0-2*p1+p2))+ + d_D*(-(-p0+p1)); + } + } + atomic_add(d_path.points + 2 * i0, d_p0); + atomic_add(d_path.points + 2 * i1, d_p1); + atomic_add(d_path.points + 2 * i2, d_p2); + } else if (path.num_control_points[base_point_id] == 2) { + // Cubic Bezier curve + auto i0 = point_id; + auto i1 = point_id + 1; + auto i2 = point_id + 2; + auto i3 = (point_id + 3) % path.num_points; + auto p0 = Vector2f{path.points[2 * i0], path.points[2 * i0 + 1]}; + auto p1 = Vector2f{path.points[2 * i1], path.points[2 * i1 + 1]}; + auto p2 = Vector2f{path.points[2 * i2], path.points[2 * i2 + 1]}; + auto p3 = Vector2f{path.points[2 * i3], path.points[2 * i3 + 1]}; + // auto eval = [&](float t) -> Vector2f { + // auto tt = 1 - t; + // return (tt*tt*tt)*p0 + (3*tt*tt*t)*p1 + (3*tt*t*t)*p2 + (t*t*t)*p3; + // }; + auto d_p0 = Vector2f{0, 0}; + auto d_p1 = Vector2f{0, 0}; + auto d_p2 = Vector2f{0, 0}; + auto d_p3 = Vector2f{0, 0}; + auto t = min_t_root; + if (t == 0) { + // closest_pt = p0 + d_p0 += d_closest_pt; + } else if (t == 1) { + // closest_pt = p1 + d_p3 += d_closest_pt; + } else { + // The curve is (1 - t)^3 p0 + 3 * (1 - t)^2 t p1 + 3 * (1 - t) t^2 p2 + t^3 p3 + // = (-p0+3p1-3p2+p3) t^3 + (3p0-6p1+3p2) t^2 + (-3p0+3p1) t + p0 + // Want to solve (q - pt) dot q' = 0 + // q' = 3*(-p0+3p1-3p2+p3)t^2 + 2*(3p0-6p1+3p2)t + (-3p0+3p1) + // Expanding + // 3*(-p0+3p1-3p2+p3)^2 t^5 + // 5*(-p0+3p1-3p2+p3)(3p0-6p1+3p2) t^4 + // 4*(-p0+3p1-3p2+p3)(-3p0+3p1) + 2*(3p0-6p1+3p2)^2 t^3 + // 3*(3p0-6p1+3p2)(-3p0+3p1) + 3*(-p0+3p1-3p2+p3)(p0-pt) t^2 + // (-3p0+3p1)^2+2(p0-pt)(3p0-6p1+3p2) t + // (p0-pt)(-3p0+3p1) + double A = 3*sum((-p0+3*p1-3*p2+p3)*(-p0+3*p1-3*p2+p3)); + double B = 5*sum((-p0+3*p1-3*p2+p3)*(3*p0-6*p1+3*p2)); + double C = 4*sum((-p0+3*p1-3*p2+p3)*(-3*p0+3*p1)) + 2*sum((3*p0-6*p1+3*p2)*(3*p0-6*p1+3*p2)); + double D = 3*(sum((3*p0-6*p1+3*p2)*(-3*p0+3*p1)) + sum((-p0+3*p1-3*p2+p3)*(p0-pt))); + double E = sum((-3*p0+3*p1)*(-3*p0+3*p1)) + 2*sum((p0-pt)*(3*p0-6*p1+3*p2)); + double F = sum((p0-pt)*(-3*p0+3*p1)); + B /= A; + C /= A; + D /= A; + E /= A; + F /= A; + // auto eval_polynomial = [&] (double t) { + // return t*t*t*t*t+ + // B*t*t*t*t+ + // C*t*t*t+ + // D*t*t+ + // E*t+ + // F; + // }; + auto eval_polynomial_deriv = [&] (double t) { + return 5*t*t*t*t+ + 4*B*t*t*t+ + 3*C*t*t+ + 2*D*t+ + E; + }; + + // auto p = eval(t); + auto d_p = d_closest_pt; + // (tt*tt*tt)*p0 + (3*tt*tt*t)*p1 + (3*tt*t*t)*p2 + (t*t*t)*p3 + auto tt = 1 - t; + auto d_tt = 3 * tt * tt * dot(d_p, p0) + + 6 * tt * t * dot(d_p, p1) + + 3 * t * t * dot(d_p, p2); + auto d_t = -d_tt + + 3 * tt * tt * dot(d_p, p1) + + 6 * tt * t * dot(d_p, p2) + + 3 * t * t * dot(d_p, p3); + d_p0 += d_p * (tt * tt * tt); + d_p1 += d_p * (3 * tt * tt * t); + d_p2 += d_p * (3 * tt * t * t); + d_p3 += d_p * (t * t * t); + // implicit function theorem: dt/dA = -1/(p'(t)) * dp/dA + auto poly_deriv_t = eval_polynomial_deriv(t); + if (fabs(poly_deriv_t) > 1e-10f) { + auto d_B = -(d_t / poly_deriv_t) * t * t * t * t; + auto d_C = -(d_t / poly_deriv_t) * t * t * t; + auto d_D = -(d_t / poly_deriv_t) * t * t; + auto d_E = -(d_t / poly_deriv_t) * t; + auto d_F = -(d_t / poly_deriv_t); + // B = B' / A + // C = C' / A + // D = D' / A + // E = E' / A + // F = F' / A + auto d_A = -d_B * B / A + -d_C * C / A + -d_D * D / A + -d_E * E / A + -d_F * F / A; + d_B /= A; + d_C /= A; + d_D /= A; + d_E /= A; + d_F /= A; + { + double A = 3*sum((-p0+3*p1-3*p2+p3)*(-p0+3*p1-3*p2+p3)) + 1e-3; + double B = 5*sum((-p0+3*p1-3*p2+p3)*(3*p0-6*p1+3*p2)); + double C = 4*sum((-p0+3*p1-3*p2+p3)*(-3*p0+3*p1)) + 2*sum((3*p0-6*p1+3*p2)*(3*p0-6*p1+3*p2)); + double D = 3*(sum((3*p0-6*p1+3*p2)*(-3*p0+3*p1)) + sum((-p0+3*p1-3*p2+p3)*(p0-pt))); + double E = sum((-3*p0+3*p1)*(-3*p0+3*p1)) + 2*sum((p0-pt)*(3*p0-6*p1+3*p2)); + double F = sum((p0-pt)*(-3*p0+3*p1)); + B /= A; + C /= A; + D /= A; + E /= A; + F /= A; + auto eval_polynomial = [&] (double t) { + return t*t*t*t*t+ + B*t*t*t*t+ + C*t*t*t+ + D*t*t+ + E*t+ + F; + }; + auto eval_polynomial_deriv = [&] (double t) { + return 5*t*t*t*t+ + 4*B*t*t*t+ + 3*C*t*t+ + 2*D*t+ + E; + }; + auto lb = t - 1e-2f; + auto ub = t + 1e-2f; + auto lb_eval = eval_polynomial(lb); + auto ub_eval = eval_polynomial(ub); + if (lb_eval > ub_eval) { + swap_(lb, ub); + } + auto t_ = 0.5f * (lb + ub); + auto num_iter = 20; + for (int it = 0; it < num_iter; it++) { + if (!(t_ >= lb && t_ <= ub)) { + t_ = 0.5f * (lb + ub); + } + auto value = eval_polynomial(t_); + if (fabs(value) < 1e-5f || it == num_iter - 1) { + break; + } + // The derivative may not be entirely accurate, + // but the bisection is going to handle this + if (value > 0.f) { + ub = t_; + } else { + lb = t_; + } + auto derivative = eval_polynomial_deriv(t); + t_ -= value / derivative; + } + } + // A = 3*sum((-p0+3*p1-3*p2+p3)*(-p0+3*p1-3*p2+p3)) + d_p0 += d_A * 3 * (-1) * 2 * (-p0+3*p1-3*p2+p3); + d_p1 += d_A * 3 * 3 * 2 * (-p0+3*p1-3*p2+p3); + d_p2 += d_A * 3 * (-3) * 2 * (-p0+3*p1-3*p2+p3); + d_p3 += d_A * 3 * 1 * 2 * (-p0+3*p1-3*p2+p3); + // B = 5*sum((-p0+3*p1-3*p2+p3)*(3*p0-6*p1+3*p2)) + d_p0 += d_B * 5 * ((-1) * (3*p0-6*p1+3*p2) + 3 * (-p0+3*p1-3*p2+p3)); + d_p1 += d_B * 5 * (3 * (3*p0-6*p1+3*p2) + (-6) * (-p0+3*p1-3*p2+p3)); + d_p2 += d_B * 5 * ((-3) * (3*p0-6*p1+3*p2) + 3 * (-p0+3*p1-3*p2+p3)); + d_p3 += d_B * 5 * (3*p0-6*p1+3*p2); + // C = 4*sum((-p0+3*p1-3*p2+p3)*(-3*p0+3*p1)) + 2*sum((3*p0-6*p1+3*p2)*(3*p0-6*p1+3*p2)) + d_p0 += d_C * 4 * ((-1) * (-3*p0+3*p1) + (-3) * (-p0+3*p1-3*p2+p3)) + + d_C * 2 * (3 * 2 * (3*p0-6*p1+3*p2)); + d_p1 += d_C * 4 * (3 * (-3*p0+3*p1) + 3 * (-p0+3*p1-3*p2+p3)) + + d_C * 2 * ((-6) * 2 * (3*p0-6*p1+3*p2)); + d_p2 += d_C * 4 * ((-3) * (-3*p0+3*p1)) + + d_C * 2 * (3 * 2 * (3*p0-6*p1+3*p2)); + d_p3 += d_C * 4 * (-3*p0+3*p1); + // D = 3*(sum((3*p0-6*p1+3*p2)*(-3*p0+3*p1)) + sum((-p0+3*p1-3*p2+p3)*(p0-pt))) + d_p0 += d_D * 3 * (3 * (-3*p0+3*p1) + (-3) * (3*p0-6*p1+3*p2)) + + d_D * 3 * ((-1) * (p0-pt) + 1 * (-p0+3*p1-3*p2+p3)); + d_p1 += d_D * 3 * ((-6) * (-3*p0+3*p1) + (3) * (3*p0-6*p1+3*p2)) + + d_D * 3 * (3 * (p0-pt)); + d_p2 += d_D * 3 * (3 * (-3*p0+3*p1)) + + d_D * 3 * ((-3) * (p0-pt)); + d_pt += d_D * 3 * ((-1) * (-p0+3*p1-3*p2+p3)); + // E = sum((-3*p0+3*p1)*(-3*p0+3*p1)) + 2*sum((p0-pt)*(3*p0-6*p1+3*p2)) + d_p0 += d_E * ((-3) * 2 * (-3*p0+3*p1)) + + d_E * 2 * (1 * (3*p0-6*p1+3*p2) + 3 * (p0-pt)); + d_p1 += d_E * ( 3 * 2 * (-3*p0+3*p1)) + + d_E * 2 * ((-6) * (p0-pt)); + d_p2 += d_E * 2 * ( 3 * (p0-pt)); + d_pt += d_E * 2 * ((-1) * (3*p0-6*p1+3*p2)); + // F = sum((p0-pt)*(-3*p0+3*p1)) + d_p0 += d_F * (1 * (-3*p0+3*p1)) + + d_F * ((-3) * (p0-pt)); + d_p1 += d_F * (3 * (p0-pt)); + d_pt += d_F * ((-1) * (-3*p0+3*p1)); + } + } + atomic_add(d_path.points + 2 * i0, d_p0); + atomic_add(d_path.points + 2 * i1, d_p1); + atomic_add(d_path.points + 2 * i2, d_p2); + atomic_add(d_path.points + 2 * i3, d_p3); + } else { + assert(false); + } +} + +DEVICE +inline +void d_closest_point(const Rect &rect, + const Vector2f &pt, + const Vector2f &d_closest_pt, + Rect &d_rect, + Vector2f &d_pt) { + auto dist = [&](const Vector2f &p0, const Vector2f &p1) -> float { + // project pt to line + auto t = dot(pt - p0, p1 - p0) / dot(p1 - p0, p1 - p0); + if (t < 0) { + return distance(p0, pt); + } else if (t > 1) { + return distance(p1, pt); + } else { + return distance(p0 + t * (p1 - p0), pt); + } + // return 0; + }; + auto left_top = rect.p_min; + auto right_top = Vector2f{rect.p_max.x, rect.p_min.y}; + auto left_bottom = Vector2f{rect.p_min.x, rect.p_max.y}; + auto right_bottom = rect.p_max; + auto left_dist = dist(left_top, left_bottom); + auto top_dist = dist(left_top, right_top); + auto right_dist = dist(right_top, right_bottom); + auto bottom_dist = dist(left_bottom, right_bottom); + int min_id = 0; + auto min_dist = left_dist; + if (top_dist < min_dist) { min_dist = top_dist; min_id = 1; } + if (right_dist < min_dist) { min_dist = right_dist; min_id = 2; } + if (bottom_dist < min_dist) { min_dist = bottom_dist; min_id = 3; } + + auto d_update = [&](const Vector2f &p0, const Vector2f &p1, + const Vector2f &d_closest_pt, + Vector2f &d_p0, Vector2f &d_p1) { + // project pt to line + auto t = dot(pt - p0, p1 - p0) / dot(p1 - p0, p1 - p0); + if (t < 0) { + d_p0 += d_closest_pt; + } else if (t > 1) { + d_p1 += d_closest_pt; + } else { + // p = p0 + t * (p1 - p0) + auto d_p = d_closest_pt; + d_p0 += d_p * (1 - t); + d_p1 += d_p * t; + auto d_t = sum(d_p * (p1 - p0)); + // t = dot(pt - p0, p1 - p0) / dot(p1 - p0, p1 - p0) + auto d_numerator = d_t / dot(p1 - p0, p1 - p0); + auto d_denominator = d_t * (-t) / dot(p1 - p0, p1 - p0); + // numerator = dot(pt - p0, p1 - p0) + d_pt += (p1 - p0) * d_numerator; + d_p1 += (pt - p0) * d_numerator; + d_p0 += ((p0 - p1) + (p0 - pt)) * d_numerator; + // denominator = dot(p1 - p0, p1 - p0) + d_p1 += 2 * (p1 - p0) * d_denominator; + d_p0 += 2 * (p0 - p1) * d_denominator; + } + }; + auto d_left_top = Vector2f{0, 0}; + auto d_right_top = Vector2f{0, 0}; + auto d_left_bottom = Vector2f{0, 0}; + auto d_right_bottom = Vector2f{0, 0}; + if (min_id == 0) { + d_update(left_top, left_bottom, d_closest_pt, d_left_top, d_left_bottom); + } else if (min_id == 1) { + d_update(left_top, right_top, d_closest_pt, d_left_top, d_right_top); + } else if (min_id == 2) { + d_update(right_top, right_bottom, d_closest_pt, d_right_top, d_right_bottom); + } else { + assert(min_id == 3); + d_update(left_bottom, right_bottom, d_closest_pt, d_left_bottom, d_right_bottom); + } + auto d_p_min = Vector2f{0, 0}; + auto d_p_max = Vector2f{0, 0}; + // left_top = rect.p_min + // right_top = Vector2f{rect.p_max.x, rect.p_min.y} + // left_bottom = Vector2f{rect.p_min.x, rect.p_max.y} + // right_bottom = rect.p_max + d_p_min += d_left_top; + d_p_max.x += d_right_top.x; + d_p_min.y += d_right_top.y; + d_p_min.x += d_left_bottom.x; + d_p_max.y += d_left_bottom.y; + d_p_max += d_right_bottom; + atomic_add(d_rect.p_min, d_p_min); + atomic_add(d_rect.p_max, d_p_max); +} + +DEVICE +inline +void d_closest_point(const Shape &shape, + const Vector2f &pt, + const Vector2f &d_closest_pt, + const ClosestPointPathInfo &path_info, + Shape &d_shape, + Vector2f &d_pt) { + switch (shape.type) { + case ShapeType::Circle: + d_closest_point(*(const Circle *)shape.ptr, + pt, + d_closest_pt, + *(Circle *)d_shape.ptr, + d_pt); + break; + case ShapeType::Ellipse: + // https://www.geometrictools.com/Documentation/DistancePointEllipseEllipsoid.pdf + assert(false); + break; + case ShapeType::Path: + d_closest_point(*(const Path *)shape.ptr, + pt, + d_closest_pt, + path_info, + *(Path *)d_shape.ptr, + d_pt); + break; + case ShapeType::Rect: + d_closest_point(*(const Rect *)shape.ptr, + pt, + d_closest_pt, + *(Rect *)d_shape.ptr, + d_pt); + break; + } +} + +DEVICE +inline +void d_compute_distance(const Matrix3x3f &canvas_to_shape, + const Matrix3x3f &shape_to_canvas, + const Shape &shape, + const Vector2f &pt, + const Vector2f &closest_pt, + const ClosestPointPathInfo &path_info, + float d_dist, + Matrix3x3f &d_shape_to_canvas, + Shape &d_shape, + float *d_translation) { + if (distance_squared(pt, closest_pt) < 1e-10f) { + // The derivative at distance=0 is undefined + return; + } + assert(isfinite(d_dist)); + // pt is in canvas space, transform it to shape's local space + auto local_pt = xform_pt(canvas_to_shape, pt); + auto local_closest_pt = xform_pt(canvas_to_shape, closest_pt); + // auto local_closest_pt = closest_point(shape, local_pt); + // auto closest_pt = xform_pt(shape_group.shape_to_canvas, local_closest_pt); + // auto dist = distance(closest_pt, pt); + auto d_pt = Vector2f{0, 0}; + auto d_closest_pt = Vector2f{0, 0}; + d_distance(closest_pt, pt, d_dist, d_closest_pt, d_pt); + assert(isfinite(d_pt)); + assert(isfinite(d_closest_pt)); + // auto closest_pt = xform_pt(shape_group.shape_to_canvas, local_closest_pt); + auto d_local_closest_pt = Vector2f{0, 0}; + auto d_shape_to_canvas_ = Matrix3x3f(); + d_xform_pt(shape_to_canvas, local_closest_pt, d_closest_pt, + d_shape_to_canvas_, d_local_closest_pt); + assert(isfinite(d_local_closest_pt)); + auto d_local_pt = Vector2f{0, 0}; + d_closest_point(shape, local_pt, d_local_closest_pt, path_info, d_shape, d_local_pt); + assert(isfinite(d_local_pt)); + auto d_canvas_to_shape = Matrix3x3f(); + d_xform_pt(canvas_to_shape, + pt, + d_local_pt, + d_canvas_to_shape, + d_pt); + // http://jack.valmadre.net/notes/2016/09/04/back-prop-differentials/#back-propagation-using-differentials + auto tc2s = transpose(canvas_to_shape); + d_shape_to_canvas_ += -tc2s * d_canvas_to_shape * tc2s; + atomic_add(&d_shape_to_canvas(0, 0), d_shape_to_canvas_); + if (d_translation != nullptr) { + atomic_add(d_translation, -d_pt); + } +} diff --git a/cuda_utils.h b/cuda_utils.h new file mode 100644 index 0000000..1e4609b --- /dev/null +++ b/cuda_utils.h @@ -0,0 +1,53 @@ +#pragma once + +#ifdef __CUDACC__ + #include + #include +#endif +#include +#include +#include + +#ifdef __CUDACC__ +#define checkCuda(x) do { if((x)!=cudaSuccess) { \ + printf("CUDA Runtime Error: %s at %s:%d\n",\ + cudaGetErrorString(x),__FILE__,__LINE__);\ + exit(1);}} while(0) +#endif + +template +DEVICE +inline T infinity() { +#ifdef __CUDA_ARCH__ + const unsigned long long ieee754inf = 0x7ff0000000000000; + return __longlong_as_double(ieee754inf); +#else + return std::numeric_limits::infinity(); +#endif +} + +template <> +DEVICE +inline double infinity() { +#ifdef __CUDA_ARCH__ + return __longlong_as_double(0x7ff0000000000000ULL); +#else + return std::numeric_limits::infinity(); +#endif +} + +template <> +DEVICE +inline float infinity() { +#ifdef __CUDA_ARCH__ + return __int_as_float(0x7f800000); +#else + return std::numeric_limits::infinity(); +#endif +} + +inline void cuda_synchronize() { +#ifdef __CUDACC__ + checkCuda(cudaDeviceSynchronize()); +#endif +} diff --git a/diffvg.cpp b/diffvg.cpp new file mode 100644 index 0000000..7346d24 --- /dev/null +++ b/diffvg.cpp @@ -0,0 +1,1792 @@ +#include "diffvg.h" +#include "aabb.h" +#include "shape.h" +#include "sample_boundary.h" +#include "atomic.h" +#include "cdf.h" +#include "compute_distance.h" +#include "cuda_utils.h" +#include "edge_query.h" +#include "filter.h" +#include "matrix.h" +#include "parallel.h" +#include "pcg.h" +#include "ptr.h" +#include "scene.h" +#include "vector.h" +#include "winding_number.h" +#include "within_distance.h" +#include +#include +#include +#include +#include + +namespace py = pybind11; + +struct Command { + int shape_group_id; + int shape_id; + int point_id; // Only used by path +}; + +DEVICE +bool is_inside(const SceneData &scene_data, + int shape_group_id, + const Vector2f &pt, + EdgeQuery *edge_query) { + const ShapeGroup &shape_group = scene_data.shape_groups[shape_group_id]; + // pt is in canvas space, transform it to shape's local space + auto local_pt = xform_pt(shape_group.canvas_to_shape, pt); + const auto &bvh_nodes = scene_data.shape_groups_bvh_nodes[shape_group_id]; + const AABB &bbox = bvh_nodes[2 * shape_group.num_shapes - 2].box; + if (!inside(bbox, local_pt)) { + return false; + } + auto winding_number = 0; + // Traverse the shape group BVH + constexpr auto max_bvh_stack_size = 64; + int bvh_stack[max_bvh_stack_size]; + auto stack_size = 0; + bvh_stack[stack_size++] = 2 * shape_group.num_shapes - 2; + while (stack_size > 0) { + const BVHNode &node = bvh_nodes[bvh_stack[--stack_size]]; + if (node.child1 < 0) { + // leaf + auto shape_id = node.child0; + auto w = compute_winding_number( + scene_data.shapes[shape_id], scene_data.path_bvhs[shape_id], local_pt); + winding_number += w; + if (edge_query != nullptr) { + if (edge_query->shape_group_id == shape_group_id && + edge_query->shape_id == shape_id) { + if ((shape_group.use_even_odd_rule && abs(w) % 2 == 1) || + (!shape_group.use_even_odd_rule && w != 0)) { + edge_query->hit = true; + } + } + } + } else { + assert(node.child0 >= 0 && node.child1 >= 0); + const AABB &b0 = bvh_nodes[node.child0].box; + if (inside(b0, local_pt)) { + bvh_stack[stack_size++] = node.child0; + } + const AABB &b1 = bvh_nodes[node.child1].box; + if (inside(b1, local_pt)) { + bvh_stack[stack_size++] = node.child1; + } + assert(stack_size <= max_bvh_stack_size); + } + } + if (shape_group.use_even_odd_rule) { + return abs(winding_number) % 2 == 1; + } else { + return winding_number != 0; + } +} + +DEVICE void accumulate_boundary_gradient(const Shape &shape, + float contrib, + float t, + const Vector2f &normal, + const BoundaryData &boundary_data, + Shape &d_shape, + const Matrix3x3f &shape_to_canvas, + const Vector2f &local_boundary_pt, + Matrix3x3f &d_shape_to_canvas) { + assert(isfinite(contrib)); + assert(isfinite(normal)); + // According to Reynold transport theorem, + // the Jacobian of the boundary integral is dot(velocity, normal), + // where the velocity depends on the variable being differentiated with. + if (boundary_data.is_stroke) { + auto has_path_thickness = false; + if (shape.type == ShapeType::Path) { + const Path &path = *(const Path *)shape.ptr; + has_path_thickness = path.thickness != nullptr; + } + // differentiate stroke width: velocity is the same as normal + if (has_path_thickness) { + Path *d_p = (Path*)d_shape.ptr; + auto base_point_id = boundary_data.path.base_point_id; + auto point_id = boundary_data.path.point_id; + auto t = boundary_data.path.t; + const Path &path = *(const Path *)shape.ptr; + if (path.num_control_points[base_point_id] == 0) { + // Straight line + auto i0 = point_id; + auto i1 = (point_id + 1) % path.num_points; + // r = r0 + t * (r1 - r0) + atomic_add(&d_p->thickness[i0], (1 - t) * contrib); + atomic_add(&d_p->thickness[i1], ( t) * contrib); + } else if (path.num_control_points[base_point_id] == 1) { + // Quadratic Bezier curve + auto i0 = point_id; + auto i1 = point_id + 1; + auto i2 = (point_id + 2) % path.num_points; + // r = (1-t)^2r0 + 2(1-t)t r1 + t^2 r2 + atomic_add(&d_p->thickness[i0], square(1 - t) * contrib); + atomic_add(&d_p->thickness[i1], (2*(1-t)*t) * contrib); + atomic_add(&d_p->thickness[i2], (t*t) * contrib); + } else if (path.num_control_points[base_point_id] == 2) { + auto i0 = point_id; + auto i1 = point_id + 1; + auto i2 = point_id + 2; + auto i3 = (point_id + 3) % path.num_points; + // r = (1-t)^3r0 + 3*(1-t)^2tr1 + 3*(1-t)t^2r2 + t^3r3 + atomic_add(&d_p->thickness[i0], cubic(1 - t) * contrib); + atomic_add(&d_p->thickness[i1], 3 * square(1 - t) * t * contrib); + atomic_add(&d_p->thickness[i2], 3 * (1 - t) * t * t * contrib); + atomic_add(&d_p->thickness[i3], t * t * t * contrib); + } else { + assert(false); + } + } else { + atomic_add(&d_shape.stroke_width, contrib); + } + } + switch (shape.type) { + case ShapeType::Circle: { + Circle *d_p = (Circle*)d_shape.ptr; + // velocity for the center is (1, 0) for x and (0, 1) for y + atomic_add(&d_p->center[0], normal * contrib); + // velocity for the radius is the same as the normal + atomic_add(&d_p->radius, contrib); + break; + } case ShapeType::Ellipse: { + Ellipse *d_p = (Ellipse*)d_shape.ptr; + // velocity for the center is (1, 0) for x and (0, 1) for y + atomic_add(&d_p->center[0], normal * contrib); + // velocity for the radius: + // x = center.x + r.x * cos(2pi * t) + // y = center.y + r.y * sin(2pi * t) + // for r.x: (cos(2pi * t), 0) + // for r.y: (0, sin(2pi * t)) + atomic_add(&d_p->radius.x, cos(2 * float(M_PI) * t) * normal.x * contrib); + atomic_add(&d_p->radius.y, sin(2 * float(M_PI) * t) * normal.y * contrib); + break; + } case ShapeType::Path: { + Path *d_p = (Path*)d_shape.ptr; + auto base_point_id = boundary_data.path.base_point_id; + auto point_id = boundary_data.path.point_id; + auto t = boundary_data.path.t; + const Path &path = *(const Path *)shape.ptr; + if (path.num_control_points[base_point_id] == 0) { + // Straight line + auto i0 = point_id; + auto i1 = (point_id + 1) % path.num_points; + // pt = p0 + t * (p1 - p0) + // velocity for p0.x: (1 - t, 0) + // p0.y: ( 0, 1 - t) + // p1.x: ( t, 0) + // p1.y: ( 0, t) + atomic_add(&d_p->points[2 * i0 + 0], (1 - t) * normal.x * contrib); + atomic_add(&d_p->points[2 * i0 + 1], (1 - t) * normal.y * contrib); + atomic_add(&d_p->points[2 * i1 + 0], ( t) * normal.x * contrib); + atomic_add(&d_p->points[2 * i1 + 1], ( t) * normal.y * contrib); + } else if (path.num_control_points[base_point_id] == 1) { + // Quadratic Bezier curve + auto i0 = point_id; + auto i1 = point_id + 1; + auto i2 = (point_id + 2) % path.num_points; + // pt = (1-t)^2p0 + 2(1-t)t p1 + t^2 p2 + // velocity for p0.x: ((1-t)^2, 0) + // p0.y: ( 0, (1-t)^2) + // p1.x: (2(1-t)t, 0) + // p1.y: ( 0, 2(1-t)t) + // p1.x: ( t^2, 0) + // p1.y: ( 0, t^2) + atomic_add(&d_p->points[2 * i0 + 0], square(1 - t) * normal.x * contrib); + atomic_add(&d_p->points[2 * i0 + 1], square(1 - t) * normal.y * contrib); + atomic_add(&d_p->points[2 * i1 + 0], (2*(1-t)*t) * normal.x * contrib); + atomic_add(&d_p->points[2 * i1 + 1], (2*(1-t)*t) * normal.y * contrib); + atomic_add(&d_p->points[2 * i2 + 0], (t*t) * normal.x * contrib); + atomic_add(&d_p->points[2 * i2 + 1], (t*t) * normal.y * contrib); + } else if (path.num_control_points[base_point_id] == 2) { + auto i0 = point_id; + auto i1 = point_id + 1; + auto i2 = point_id + 2; + auto i3 = (point_id + 3) % path.num_points; + // pt = (1-t)^3p0 + 3*(1-t)^2tp1 + 3*(1-t)t^2p2 + t^3p3 + // velocity for p0.x: ( (1-t)^3, 0) + // p0.y: ( 0, (1-t)^3) + // p1.x: (3*(1-t)^2t, 0) + // p1.y: ( 0, 3*(1-t)^2t) + // p2.x: (3*(1-t)t^2, 0) + // p2.y: ( 0, 3*(1-t)t^2) + // p2.x: ( t^3, 0) + // p2.y: ( 0, t^3) + atomic_add(&d_p->points[2 * i0 + 0], cubic(1 - t) * normal.x * contrib); + atomic_add(&d_p->points[2 * i0 + 1], cubic(1 - t) * normal.y * contrib); + atomic_add(&d_p->points[2 * i1 + 0], 3 * square(1 - t) * t * normal.x * contrib); + atomic_add(&d_p->points[2 * i1 + 1], 3 * square(1 - t) * t * normal.y * contrib); + atomic_add(&d_p->points[2 * i2 + 0], 3 * (1 - t) * t * t * normal.x * contrib); + atomic_add(&d_p->points[2 * i2 + 1], 3 * (1 - t) * t * t * normal.y * contrib); + atomic_add(&d_p->points[2 * i3 + 0], t * t * t * normal.x * contrib); + atomic_add(&d_p->points[2 * i3 + 1], t * t * t * normal.y * contrib); + } else { + assert(false); + } + break; + } case ShapeType::Rect: { + Rect *d_p = (Rect*)d_shape.ptr; + // The velocity depends on the position of the boundary + if (normal == Vector2f{-1, 0}) { + // left + // velocity for p_min is (1, 0) for x and (0, 0) for y + atomic_add(&d_p->p_min.x, -contrib); + } else if (normal == Vector2f{1, 0}) { + // right + // velocity for p_max is (1, 0) for x and (0, 0) for y + atomic_add(&d_p->p_max.x, contrib); + } else if (normal == Vector2f{0, -1}) { + // top + // velocity for p_min is (0, 0) for x and (0, 1) for y + atomic_add(&d_p->p_min.y, -contrib); + } else if (normal == Vector2f{0, 1}) { + // bottom + // velocity for p_max is (0, 0) for x and (0, 1) for y + atomic_add(&d_p->p_max.y, contrib); + } else { + // incorrect normal assignment? + assert(false); + } + break; + } default: { + assert(false); + break; + } + } + // for shape_to_canvas we have the following relationship: + // boundary_pt = xform_pt(shape_to_canvas, local_pt) + // the velocity is the derivative of boundary_pt with respect to shape_to_canvas + // we can use reverse-mode AD to compute the dot product of the velocity and the Jacobian + // by passing the normal in d_xform_pt + auto d_shape_to_canvas_ = Matrix3x3f(); + auto d_local_boundary_pt = Vector2f{0, 0}; + d_xform_pt(shape_to_canvas, + local_boundary_pt, + normal * contrib, + d_shape_to_canvas_, + d_local_boundary_pt); + atomic_add(&d_shape_to_canvas(0, 0), d_shape_to_canvas_); +} + +DEVICE +Vector4f sample_color(const ColorType &color_type, + void *color, + const Vector2f &pt) { + switch (color_type) { + case ColorType::Constant: { + auto c = (const Constant*)color; + assert(isfinite(c->color)); + return c->color; + } case ColorType::LinearGradient: { + auto c = (const LinearGradient*)color; + // Project pt to (c->begin, c->end) + auto beg = c->begin; + auto end = c->end; + auto t = dot(pt - beg, end - beg) / max(dot(end - beg, end - beg), 1e-3f); + // Find the correponding stop: + if (t < c->stop_offsets[0]) { + return Vector4f{c->stop_colors[0], + c->stop_colors[1], + c->stop_colors[2], + c->stop_colors[3]}; + } + for (int i = 0; i < c->num_stops - 1; i++) { + auto offset_curr = c->stop_offsets[i]; + auto offset_next = c->stop_offsets[i + 1]; + assert(offset_next > offset_curr); + if (t >= offset_curr && t < offset_next) { + auto color_curr = Vector4f{ + c->stop_colors[4 * i + 0], + c->stop_colors[4 * i + 1], + c->stop_colors[4 * i + 2], + c->stop_colors[4 * i + 3]}; + auto color_next = Vector4f{ + c->stop_colors[4 * (i + 1) + 0], + c->stop_colors[4 * (i + 1) + 1], + c->stop_colors[4 * (i + 1) + 2], + c->stop_colors[4 * (i + 1) + 3]}; + auto tt = (t - offset_curr) / (offset_next - offset_curr); + assert(isfinite(tt)); + assert(isfinite(color_curr)); + assert(isfinite(color_next)); + return color_curr * (1 - tt) + color_next * tt; + } + } + return Vector4f{c->stop_colors[4 * (c->num_stops - 1) + 0], + c->stop_colors[4 * (c->num_stops - 1) + 1], + c->stop_colors[4 * (c->num_stops - 1) + 2], + c->stop_colors[4 * (c->num_stops - 1) + 3]}; + } case ColorType::RadialGradient: { + auto c = (const RadialGradient*)color; + // Distance from pt to center + auto offset = pt - c->center; + auto normalized_offset = offset / c->radius; + auto t = length(normalized_offset); + // Find the correponding stop: + if (t < c->stop_offsets[0]) { + return Vector4f{c->stop_colors[0], + c->stop_colors[1], + c->stop_colors[2], + c->stop_colors[3]}; + } + for (int i = 0; i < c->num_stops - 1; i++) { + auto offset_curr = c->stop_offsets[i]; + auto offset_next = c->stop_offsets[i + 1]; + assert(offset_next > offset_curr); + if (t >= offset_curr && t < offset_next) { + auto color_curr = Vector4f{ + c->stop_colors[4 * i + 0], + c->stop_colors[4 * i + 1], + c->stop_colors[4 * i + 2], + c->stop_colors[4 * i + 3]}; + auto color_next = Vector4f{ + c->stop_colors[4 * (i + 1) + 0], + c->stop_colors[4 * (i + 1) + 1], + c->stop_colors[4 * (i + 1) + 2], + c->stop_colors[4 * (i + 1) + 3]}; + auto tt = (t - offset_curr) / (offset_next - offset_curr); + assert(isfinite(tt)); + assert(isfinite(color_curr)); + assert(isfinite(color_next)); + return color_curr * (1 - tt) + color_next * tt; + } + } + return Vector4f{c->stop_colors[4 * (c->num_stops - 1) + 0], + c->stop_colors[4 * (c->num_stops - 1) + 1], + c->stop_colors[4 * (c->num_stops - 1) + 2], + c->stop_colors[4 * (c->num_stops - 1) + 3]}; + } default: { + assert(false); + } + } + return Vector4f{}; +} + +DEVICE +void d_sample_color(const ColorType &color_type, + void *color_ptr, + const Vector2f &pt, + const Vector4f &d_color, + void *d_color_ptr, + float *d_translation) { + switch (color_type) { + case ColorType::Constant: { + auto d_c = (Constant*)d_color_ptr; + atomic_add(&d_c->color[0], d_color); + return; + } case ColorType::LinearGradient: { + auto c = (const LinearGradient*)color_ptr; + auto d_c = (LinearGradient*)d_color_ptr; + // Project pt to (c->begin, c->end) + auto beg = c->begin; + auto end = c->end; + auto t = dot(pt - beg, end - beg) / max(dot(end - beg, end - beg), 1e-3f); + // Find the correponding stop: + if (t < c->stop_offsets[0]) { + atomic_add(&d_c->stop_colors[0], d_color); + return; + } + for (int i = 0; i < c->num_stops - 1; i++) { + auto offset_curr = c->stop_offsets[i]; + auto offset_next = c->stop_offsets[i + 1]; + assert(offset_next > offset_curr); + if (t >= offset_curr && t < offset_next) { + auto color_curr = Vector4f{ + c->stop_colors[4 * i + 0], + c->stop_colors[4 * i + 1], + c->stop_colors[4 * i + 2], + c->stop_colors[4 * i + 3]}; + auto color_next = Vector4f{ + c->stop_colors[4 * (i + 1) + 0], + c->stop_colors[4 * (i + 1) + 1], + c->stop_colors[4 * (i + 1) + 2], + c->stop_colors[4 * (i + 1) + 3]}; + auto tt = (t - offset_curr) / (offset_next - offset_curr); + // return color_curr * (1 - tt) + color_next * tt; + auto d_color_curr = d_color * (1 - tt); + auto d_color_next = d_color * tt; + auto d_tt = sum(d_color * (color_next - color_curr)); + auto d_offset_next = -d_tt * tt / (offset_next - offset_curr); + auto d_offset_curr = d_tt * ((tt - 1.f) / (offset_next - offset_curr)); + auto d_t = d_tt / (offset_next - offset_curr); + assert(isfinite(d_tt)); + atomic_add(&d_c->stop_colors[4 * i], d_color_curr); + atomic_add(&d_c->stop_colors[4 * (i + 1)], d_color_next); + atomic_add(&d_c->stop_offsets[i], d_offset_curr); + atomic_add(&d_c->stop_offsets[i + 1], d_offset_next); + // auto t = dot(pt - beg, end - beg) / max(dot(end - beg, end - beg), 1e-6f); + // l = max(dot(end - beg, end - beg), 1e-3f) + // t = dot(pt - beg, end - beg) / l; + auto l = max(dot(end - beg, end - beg), 1e-3f); + auto d_beg = d_t * (-(pt - beg)-(end - beg)) / l; + auto d_end = d_t * (pt - beg) / l; + auto d_l = -d_t * t / l; + if (dot(end - beg, end - beg) > 1e-3f) { + d_beg += 2 * d_l * (beg - end); + d_end += 2 * d_l * (end - beg); + } + atomic_add(&d_c->begin[0], d_beg); + atomic_add(&d_c->end[0], d_end); + if (d_translation != nullptr) { + atomic_add(d_translation, (d_beg + d_end)); + } + return; + } + } + atomic_add(&d_c->stop_colors[4 * (c->num_stops - 1)], d_color); + return; + } case ColorType::RadialGradient: { + auto c = (const RadialGradient*)color_ptr; + auto d_c = (RadialGradient*)d_color_ptr; + // Distance from pt to center + auto offset = pt - c->center; + auto normalized_offset = offset / c->radius; + auto t = length(normalized_offset); + // Find the correponding stop: + if (t < c->stop_offsets[0]) { + atomic_add(&d_c->stop_colors[0], d_color); + return; + } + for (int i = 0; i < c->num_stops - 1; i++) { + auto offset_curr = c->stop_offsets[i]; + auto offset_next = c->stop_offsets[i + 1]; + assert(offset_next > offset_curr); + if (t >= offset_curr && t < offset_next) { + auto color_curr = Vector4f{ + c->stop_colors[4 * i + 0], + c->stop_colors[4 * i + 1], + c->stop_colors[4 * i + 2], + c->stop_colors[4 * i + 3]}; + auto color_next = Vector4f{ + c->stop_colors[4 * (i + 1) + 0], + c->stop_colors[4 * (i + 1) + 1], + c->stop_colors[4 * (i + 1) + 2], + c->stop_colors[4 * (i + 1) + 3]}; + auto tt = (t - offset_curr) / (offset_next - offset_curr); + assert(isfinite(tt)); + // return color_curr * (1 - tt) + color_next * tt; + auto d_color_curr = d_color * (1 - tt); + auto d_color_next = d_color * tt; + auto d_tt = sum(d_color * (color_next - color_curr)); + auto d_offset_next = -d_tt * tt / (offset_next - offset_curr); + auto d_offset_curr = d_tt * ((tt - 1.f) / (offset_next - offset_curr)); + auto d_t = d_tt / (offset_next - offset_curr); + assert(isfinite(d_t)); + atomic_add(&d_c->stop_colors[4 * i], d_color_curr); + atomic_add(&d_c->stop_colors[4 * (i + 1)], d_color_next); + atomic_add(&d_c->stop_offsets[i], d_offset_curr); + atomic_add(&d_c->stop_offsets[i + 1], d_offset_next); + // offset = pt - c->center + // normalized_offset = offset / c->radius + // t = length(normalized_offset) + auto d_normalized_offset = d_length(normalized_offset, d_t); + auto d_offset = d_normalized_offset / c->radius; + auto d_radius = -d_normalized_offset * offset / (c->radius * c->radius); + auto d_center = -d_offset; + atomic_add(&d_c->center[0], d_center); + atomic_add(&d_c->radius[0], d_radius); + if (d_translation != nullptr) { + atomic_add(d_translation, d_center); + } + } + } + atomic_add(&d_c->stop_colors[4 * (c->num_stops - 1)], d_color); + return; + } default: { + assert(false); + } + } +} + +struct Fragment { + Vector3f color; + float alpha; + int group_id; + bool is_stroke; +}; + +struct PrefilterFragment { + Vector3f color; + float alpha; + int group_id; + bool is_stroke; + int shape_id; + float distance; + Vector2f closest_pt; + ClosestPointPathInfo path_info; + bool within_distance; +}; + +DEVICE +Vector4f sample_color(const SceneData &scene, + const Vector4f *background_color, + const Vector2f &screen_pt, + const Vector4f *d_color = nullptr, + EdgeQuery *edge_query = nullptr, + Vector4f *d_background_color = nullptr, + float *d_translation = nullptr) { + if (edge_query != nullptr) { + edge_query->hit = false; + } + + // screen_pt is in screen space ([0, 1), [0, 1)), + // need to transform to canvas space + auto pt = screen_pt; + pt.x *= scene.canvas_width; + pt.y *= scene.canvas_height; + constexpr auto max_hit_shapes = 256; + constexpr auto max_bvh_stack_size = 64; + Fragment fragments[max_hit_shapes]; + int bvh_stack[max_bvh_stack_size]; + auto stack_size = 0; + auto num_fragments = 0; + bvh_stack[stack_size++] = 2 * scene.num_shape_groups - 2; + while (stack_size > 0) { + const BVHNode &node = scene.bvh_nodes[bvh_stack[--stack_size]]; + if (node.child1 < 0) { + // leaf + auto group_id = node.child0; + const ShapeGroup &shape_group = scene.shape_groups[group_id]; + if (shape_group.stroke_color != nullptr) { + if (within_distance(scene, group_id, pt, edge_query)) { + auto color_alpha = sample_color(shape_group.stroke_color_type, + shape_group.stroke_color, + pt); + Fragment f; + f.color = Vector3f{color_alpha[0], color_alpha[1], color_alpha[2]}; + f.alpha = color_alpha[3]; + f.group_id = group_id; + f.is_stroke = true; + assert(num_fragments < max_hit_shapes); + fragments[num_fragments++] = f; + } + } + if (shape_group.fill_color != nullptr) { + if (is_inside(scene, group_id, pt, edge_query)) { + auto color_alpha = sample_color(shape_group.fill_color_type, + shape_group.fill_color, + pt); + Fragment f; + f.color = Vector3f{color_alpha[0], color_alpha[1], color_alpha[2]}; + f.alpha = color_alpha[3]; + f.group_id = group_id; + f.is_stroke = false; + assert(num_fragments < max_hit_shapes); + fragments[num_fragments++] = f; + } + } + } else { + assert(node.child0 >= 0 && node.child1 >= 0); + const AABB &b0 = scene.bvh_nodes[node.child0].box; + if (inside(b0, pt, scene.bvh_nodes[node.child0].max_radius)) { + bvh_stack[stack_size++] = node.child0; + } + const AABB &b1 = scene.bvh_nodes[node.child1].box; + if (inside(b1, pt, scene.bvh_nodes[node.child1].max_radius)) { + bvh_stack[stack_size++] = node.child1; + } + assert(stack_size <= max_bvh_stack_size); + } + } + if (num_fragments <= 0) { + if (background_color != nullptr) { + if (d_background_color != nullptr) { + *d_background_color = *d_color; + } + return *background_color; + } + return Vector4f{0, 0, 0, 0}; + } + // Sort the fragments from back to front (i.e. increasing order of group id) + // https://github.com/frigaut/yorick-imutil/blob/master/insort.c#L37 + for (int i = 1; i < num_fragments; i++) { + auto j = i; + auto temp = fragments[j]; + while (j > 0 && fragments[j - 1].group_id > temp.group_id) { + fragments[j] = fragments[j - 1]; + j--; + } + fragments[j] = temp; + } + // Blend the color + Vector3f accum_color[max_hit_shapes]; + float accum_alpha[max_hit_shapes]; + // auto hit_opaque = false; + auto first_alpha = 0.f; + auto first_color = Vector3f{0, 0, 0}; + if (background_color != nullptr) { + first_alpha = background_color->w; + first_color = Vector3f{background_color->x, + background_color->y, + background_color->z}; + } + for (int i = 0; i < num_fragments; i++) { + const Fragment &fragment = fragments[i]; + auto new_color = fragment.color; + auto new_alpha = fragment.alpha; + auto prev_alpha = i > 0 ? accum_alpha[i - 1] : first_alpha; + auto prev_color = i > 0 ? accum_color[i - 1] : first_color; + if (edge_query != nullptr) { + // Do we hit the target shape? + if (new_alpha >= 1.f && edge_query->hit) { + // A fully opaque shape in front of the target occludes it + edge_query->hit = false; + } + if (edge_query->shape_group_id == fragment.group_id) { + edge_query->hit = true; + } + } + // prev_color is alpha premultiplied, don't need to multiply with + // prev_alpha + accum_color[i] = prev_color * (1 - new_alpha) + new_alpha * new_color; + accum_alpha[i] = prev_alpha * (1 - new_alpha) + new_alpha; + } + auto final_color = accum_color[num_fragments - 1]; + auto final_alpha = accum_alpha[num_fragments - 1]; + if (final_alpha > 1e-6f) { + final_color /= final_alpha; + } + assert(isfinite(final_color)); + assert(isfinite(final_alpha)); + if (d_color != nullptr) { + // Backward pass + auto d_final_color = Vector3f{(*d_color)[0], (*d_color)[1], (*d_color)[2]}; + auto d_final_alpha = (*d_color)[3]; + auto d_curr_color = d_final_color; + auto d_curr_alpha = d_final_alpha; + if (final_alpha > 1e-6f) { + // final_color = curr_color / final_alpha + d_curr_color = d_final_color / final_alpha; + d_curr_alpha -= sum(d_final_color * final_color) / final_alpha; + } + assert(isfinite(*d_color)); + assert(isfinite(d_curr_color)); + assert(isfinite(d_curr_alpha)); + for (int i = num_fragments - 1; i >= 0; i--) { + // color[n] = prev_color * (1 - new_alpha) + new_alpha * new_color; + // alpha[n] = prev_alpha * (1 - new_alpha) + new_alpha; + auto prev_alpha = i > 0 ? accum_alpha[i - 1] : first_alpha; + auto prev_color = i > 0 ? accum_color[i - 1] : first_color; + auto d_prev_alpha = d_curr_alpha * (1.f - fragments[i].alpha); + auto d_alpha_i = d_curr_alpha * (1.f - prev_alpha); + d_alpha_i += sum(d_curr_color * (fragments[i].color - prev_color)); + auto d_prev_color = d_curr_color * (1 - fragments[i].alpha); + auto d_color_i = d_curr_color * fragments[i].alpha; + auto group_id = fragments[i].group_id; + if (fragments[i].is_stroke) { + d_sample_color(scene.shape_groups[group_id].stroke_color_type, + scene.shape_groups[group_id].stroke_color, + pt, + Vector4f{d_color_i[0], d_color_i[1], d_color_i[2], d_alpha_i}, + scene.d_shape_groups[group_id].stroke_color, + d_translation); + } else { + d_sample_color(scene.shape_groups[group_id].fill_color_type, + scene.shape_groups[group_id].fill_color, + pt, + Vector4f{d_color_i[0], d_color_i[1], d_color_i[2], d_alpha_i}, + scene.d_shape_groups[group_id].fill_color, + d_translation); + } + d_curr_color = d_prev_color; + d_curr_alpha = d_prev_alpha; + } + if (d_background_color != nullptr) { + d_background_color->x += d_curr_color.x; + d_background_color->y += d_curr_color.y; + d_background_color->z += d_curr_color.z; + d_background_color->w += d_curr_alpha; + } + } + return Vector4f{final_color[0], final_color[1], final_color[2], final_alpha}; +} + +DEVICE +float sample_distance(const SceneData &scene, + const Vector2f &screen_pt, + float weight, + const float *d_dist = nullptr, + float *d_translation = nullptr) { + // screen_pt is in screen space ([0, 1), [0, 1)), + // need to transform to canvas space + auto pt = screen_pt; + pt.x *= scene.canvas_width; + pt.y *= scene.canvas_height; + // for each shape + auto min_group_id = -1; + auto min_distance = 0.f; + auto min_shape_id = -1; + auto closest_pt = Vector2f{0, 0}; + auto min_path_info = ClosestPointPathInfo{-1, -1, 0}; + for (int group_id = scene.num_shape_groups - 1; group_id >= 0; group_id--) { + auto s = -1; + auto p = Vector2f{0, 0}; + ClosestPointPathInfo local_path_info; + auto d = infinity(); + if (compute_distance(scene, group_id, pt, infinity(), &s, &p, &local_path_info, &d)) { + if (min_group_id == -1 || d < min_distance) { + min_distance = d; + min_group_id = group_id; + min_shape_id = s; + closest_pt = p; + min_path_info = local_path_info; + } + } + } + if (min_group_id == -1) { + return min_distance; + } + min_distance *= weight; + auto inside = false; + const ShapeGroup &shape_group = scene.shape_groups[min_group_id]; + if (shape_group.fill_color != nullptr) { + inside = is_inside(scene, + min_group_id, + pt, + nullptr); + if (inside) { + min_distance = -min_distance; + } + } + assert((min_group_id >= 0 && min_shape_id >= 0) || scene.num_shape_groups == 0); + if (d_dist != nullptr) { + auto d_abs_dist = inside ? -(*d_dist) : (*d_dist); + const ShapeGroup &shape_group = scene.shape_groups[min_group_id]; + const Shape &shape = scene.shapes[min_shape_id]; + ShapeGroup &d_shape_group = scene.d_shape_groups[min_group_id]; + Shape &d_shape = scene.d_shapes[min_shape_id]; + d_compute_distance(shape_group.canvas_to_shape, + shape_group.shape_to_canvas, + shape, + pt, + closest_pt, + min_path_info, + d_abs_dist, + d_shape_group.shape_to_canvas, + d_shape, + d_translation); + } + return min_distance; +} + +// Gather d_color from d_image inside the filter kernel, normalize by +// weight_image. +DEVICE +Vector4f gather_d_color(const Filter &filter, + const float *d_color_image, + const float *weight_image, + int width, + int height, + const Vector2f &pt) { + auto x = int(pt.x); + auto y = int(pt.y); + auto radius = filter.radius; + assert(radius > 0); + auto ri = (int)ceil(radius); + auto d_color = Vector4f{0, 0, 0, 0}; + for (int dy = -ri; dy <= ri; dy++) { + for (int dx = -ri; dx <= ri; dx++) { + auto xx = x + dx; + auto yy = y + dy; + if (xx >= 0 && xx < width && yy >= 0 && yy < height) { + auto xc = xx + 0.5f; + auto yc = yy + 0.5f; + auto filter_weight = + compute_filter_weight(filter, xc - pt.x, yc - pt.y); + // pixel = \sum weight * color / \sum weight + auto weight_sum = weight_image[yy * width + xx]; + if (weight_sum > 0) { + d_color += (filter_weight / weight_sum) * Vector4f{ + d_color_image[4 * (yy * width + xx) + 0], + d_color_image[4 * (yy * width + xx) + 1], + d_color_image[4 * (yy * width + xx) + 2], + d_color_image[4 * (yy * width + xx) + 3], + }; + } + } + } + } + return d_color; +} + +DEVICE +float smoothstep(float d) { + auto t = clamp((d + 1.f) / 2.f, 0.f, 1.f); + return t * t * (3 - 2 * t); +} + +DEVICE +float d_smoothstep(float d, float d_ret) { + if (d < -1.f || d > 1.f) { + return 0.f; + } + auto t = (d + 1.f) / 2.f; + // ret = t * t * (3 - 2 * t) + // = 3 * t * t - 2 * t * t * t + auto d_t = d_ret * (6 * t - 6 * t * t); + return d_t / 2.f; +} + +DEVICE +Vector4f sample_color_prefiltered(const SceneData &scene, + const Vector4f *background_color, + const Vector2f &screen_pt, + const Vector4f *d_color = nullptr, + Vector4f *d_background_color = nullptr, + float *d_translation = nullptr) { + // screen_pt is in screen space ([0, 1), [0, 1)), + // need to transform to canvas space + auto pt = screen_pt; + pt.x *= scene.canvas_width; + pt.y *= scene.canvas_height; + constexpr auto max_hit_shapes = 64; + constexpr auto max_bvh_stack_size = 64; + PrefilterFragment fragments[max_hit_shapes]; + int bvh_stack[max_bvh_stack_size]; + auto stack_size = 0; + auto num_fragments = 0; + bvh_stack[stack_size++] = 2 * scene.num_shape_groups - 2; + while (stack_size > 0) { + const BVHNode &node = scene.bvh_nodes[bvh_stack[--stack_size]]; + if (node.child1 < 0) { + // leaf + auto group_id = node.child0; + const ShapeGroup &shape_group = scene.shape_groups[group_id]; + if (shape_group.stroke_color != nullptr) { + auto min_shape_id = -1; + auto closest_pt = Vector2f{0, 0}; + auto local_path_info = ClosestPointPathInfo{-1, -1, 0}; + auto d = infinity(); + compute_distance(scene, group_id, pt, infinity(), + &min_shape_id, &closest_pt, &local_path_info, &d); + assert(min_shape_id != -1); + const auto &shape = scene.shapes[min_shape_id]; + auto w = smoothstep(fabs(d) + shape.stroke_width) - + smoothstep(fabs(d) - shape.stroke_width); + if (w > 0) { + auto color_alpha = sample_color(shape_group.stroke_color_type, + shape_group.stroke_color, + pt); + color_alpha[3] *= w; + + PrefilterFragment f; + f.color = Vector3f{color_alpha[0], color_alpha[1], color_alpha[2]}; + f.alpha = color_alpha[3]; + f.group_id = group_id; + f.shape_id = min_shape_id; + f.distance = d; + f.closest_pt = closest_pt; + f.is_stroke = true; + f.path_info = local_path_info; + f.within_distance = true; + assert(num_fragments < max_hit_shapes); + fragments[num_fragments++] = f; + } + } + if (shape_group.fill_color != nullptr) { + auto min_shape_id = -1; + auto closest_pt = Vector2f{0, 0}; + auto local_path_info = ClosestPointPathInfo{-1, -1, 0}; + auto d = infinity(); + auto found = compute_distance(scene, + group_id, + pt, + 1.f, + &min_shape_id, + &closest_pt, + &local_path_info, + &d); + auto inside = is_inside(scene, group_id, pt, nullptr); + if (found || inside) { + if (!inside) { + d = -d; + } + auto w = smoothstep(d); + if (w > 0) { + auto color_alpha = sample_color(shape_group.fill_color_type, + shape_group.fill_color, + pt); + color_alpha[3] *= w; + + PrefilterFragment f; + f.color = Vector3f{color_alpha[0], color_alpha[1], color_alpha[2]}; + f.alpha = color_alpha[3]; + f.group_id = group_id; + f.shape_id = min_shape_id; + f.distance = d; + f.closest_pt = closest_pt; + f.is_stroke = false; + f.path_info = local_path_info; + f.within_distance = found; + assert(num_fragments < max_hit_shapes); + fragments[num_fragments++] = f; + } + } + } + } else { + assert(node.child0 >= 0 && node.child1 >= 0); + const AABB &b0 = scene.bvh_nodes[node.child0].box; + if (inside(b0, pt, scene.bvh_nodes[node.child0].max_radius)) { + bvh_stack[stack_size++] = node.child0; + } + const AABB &b1 = scene.bvh_nodes[node.child1].box; + if (inside(b1, pt, scene.bvh_nodes[node.child1].max_radius)) { + bvh_stack[stack_size++] = node.child1; + } + assert(stack_size <= max_bvh_stack_size); + } + } + if (num_fragments <= 0) { + if (background_color != nullptr) { + if (d_background_color != nullptr) { + *d_background_color = *d_color; + } + return *background_color; + } + return Vector4f{0, 0, 0, 0}; + } + // Sort the fragments from back to front (i.e. increasing order of group id) + // https://github.com/frigaut/yorick-imutil/blob/master/insort.c#L37 + for (int i = 1; i < num_fragments; i++) { + auto j = i; + auto temp = fragments[j]; + while (j > 0 && fragments[j - 1].group_id > temp.group_id) { + fragments[j] = fragments[j - 1]; + j--; + } + fragments[j] = temp; + } + // Blend the color + Vector3f accum_color[max_hit_shapes]; + float accum_alpha[max_hit_shapes]; + auto first_alpha = 0.f; + auto first_color = Vector3f{0, 0, 0}; + if (background_color != nullptr) { + first_alpha = background_color->w; + first_color = Vector3f{background_color->x, + background_color->y, + background_color->z}; + } + for (int i = 0; i < num_fragments; i++) { + const PrefilterFragment &fragment = fragments[i]; + auto new_color = fragment.color; + auto new_alpha = fragment.alpha; + auto prev_alpha = i > 0 ? accum_alpha[i - 1] : first_alpha; + auto prev_color = i > 0 ? accum_color[i - 1] : first_color; + // prev_color is alpha premultiplied, don't need to multiply with + // prev_alpha + accum_color[i] = prev_color * (1 - new_alpha) + new_alpha * new_color; + accum_alpha[i] = prev_alpha * (1 - new_alpha) + new_alpha; + } + auto final_color = accum_color[num_fragments - 1]; + auto final_alpha = accum_alpha[num_fragments - 1]; + if (final_alpha > 1e-6f) { + final_color /= final_alpha; + } + assert(isfinite(final_color)); + assert(isfinite(final_alpha)); + if (d_color != nullptr) { + // Backward pass + auto d_final_color = Vector3f{(*d_color)[0], (*d_color)[1], (*d_color)[2]}; + auto d_final_alpha = (*d_color)[3]; + auto d_curr_color = d_final_color; + auto d_curr_alpha = d_final_alpha; + if (final_alpha > 1e-6f) { + // final_color = curr_color / final_alpha + d_curr_color = d_final_color / final_alpha; + d_curr_alpha -= sum(d_final_color * final_color) / final_alpha; + } + assert(isfinite(*d_color)); + assert(isfinite(d_curr_color)); + assert(isfinite(d_curr_alpha)); + for (int i = num_fragments - 1; i >= 0; i--) { + // color[n] = prev_color * (1 - new_alpha) + new_alpha * new_color; + // alpha[n] = prev_alpha * (1 - new_alpha) + new_alpha; + auto prev_alpha = i > 0 ? accum_alpha[i - 1] : first_alpha; + auto prev_color = i > 0 ? accum_color[i - 1] : first_color; + auto d_prev_alpha = d_curr_alpha * (1.f - fragments[i].alpha); + auto d_alpha_i = d_curr_alpha * (1.f - prev_alpha); + d_alpha_i += sum(d_curr_color * (fragments[i].color - prev_color)); + auto d_prev_color = d_curr_color * (1 - fragments[i].alpha); + auto d_color_i = d_curr_color * fragments[i].alpha; + auto group_id = fragments[i].group_id; + if (fragments[i].is_stroke) { + const auto &shape = scene.shapes[fragments[i].shape_id]; + auto d = fragments[i].distance; + auto abs_d_plus_width = fabs(d) + shape.stroke_width; + auto abs_d_minus_width = fabs(d) - shape.stroke_width; + auto w = smoothstep(abs_d_plus_width) - + smoothstep(abs_d_minus_width); + if (w != 0) { + auto d_w = w > 0 ? (fragments[i].alpha / w) * d_alpha_i : 0.f; + d_alpha_i *= w; + + // Backprop to color + d_sample_color(scene.shape_groups[group_id].stroke_color_type, + scene.shape_groups[group_id].stroke_color, + pt, + Vector4f{d_color_i[0], d_color_i[1], d_color_i[2], d_alpha_i}, + scene.d_shape_groups[group_id].stroke_color, + d_translation); + + auto d_abs_d_plus_width = d_smoothstep(abs_d_plus_width, d_w); + auto d_abs_d_minus_width = -d_smoothstep(abs_d_minus_width, d_w); + + auto d_d = d_abs_d_plus_width + d_abs_d_minus_width; + if (d < 0) { + d_d = -d_d; + } + auto d_stroke_width = d_abs_d_plus_width - d_abs_d_minus_width; + + const auto &shape_group = scene.shape_groups[group_id]; + ShapeGroup &d_shape_group = scene.d_shape_groups[group_id]; + Shape &d_shape = scene.d_shapes[fragments[i].shape_id]; + if (fabs(d_d) > 1e-10f) { + d_compute_distance(shape_group.canvas_to_shape, + shape_group.shape_to_canvas, + shape, + pt, + fragments[i].closest_pt, + fragments[i].path_info, + d_d, + d_shape_group.shape_to_canvas, + d_shape, + d_translation); + } + atomic_add(&d_shape.stroke_width, d_stroke_width); + } + } else { + const auto &shape = scene.shapes[fragments[i].shape_id]; + auto d = fragments[i].distance; + auto w = smoothstep(d); + if (w != 0) { + // color_alpha[3] = color_alpha[3] * w; + auto d_w = w > 0 ? (fragments[i].alpha / w) * d_alpha_i : 0.f; + d_alpha_i *= w; + + d_sample_color(scene.shape_groups[group_id].fill_color_type, + scene.shape_groups[group_id].fill_color, + pt, + Vector4f{d_color_i[0], d_color_i[1], d_color_i[2], d_alpha_i}, + scene.d_shape_groups[group_id].fill_color, + d_translation); + + // w = smoothstep(d) + auto d_d = d_smoothstep(d, d_w); + if (d < 0) { + d_d = -d_d; + } + + const auto &shape_group = scene.shape_groups[group_id]; + ShapeGroup &d_shape_group = scene.d_shape_groups[group_id]; + Shape &d_shape = scene.d_shapes[fragments[i].shape_id]; + if (fabs(d_d) > 1e-10f && fragments[i].within_distance) { + d_compute_distance(shape_group.canvas_to_shape, + shape_group.shape_to_canvas, + shape, + pt, + fragments[i].closest_pt, + fragments[i].path_info, + d_d, + d_shape_group.shape_to_canvas, + d_shape, + d_translation); + } + } + } + d_curr_color = d_prev_color; + d_curr_alpha = d_prev_alpha; + } + if (d_background_color != nullptr) { + d_background_color->x += d_curr_color.x; + d_background_color->y += d_curr_color.y; + d_background_color->z += d_curr_color.z; + d_background_color->w += d_curr_alpha; + } + } + return Vector4f{final_color[0], final_color[1], final_color[2], final_alpha}; +} + +struct weight_kernel { + DEVICE void operator()(int idx) { + auto rng_state = init_pcg32(idx, seed); + // height * width * num_samples_y * num_samples_x + auto sx = idx % num_samples_x; + auto sy = (idx / num_samples_x) % num_samples_y; + auto x = (idx / (num_samples_x * num_samples_y)) % width; + auto y = (idx / (num_samples_x * num_samples_y * width)); + assert(y < height); + auto rx = next_pcg32_float(&rng_state); + auto ry = next_pcg32_float(&rng_state); + if (use_prefiltering) { + rx = ry = 0.5f; + } + auto pt = Vector2f{x + ((float)sx + rx) / num_samples_x, + y + ((float)sy + ry) / num_samples_y}; + auto radius = scene.filter->radius; + assert(radius >= 0); + auto ri = (int)ceil(radius); + for (int dy = -ri; dy <= ri; dy++) { + for (int dx = -ri; dx <= ri; dx++) { + auto xx = x + dx; + auto yy = y + dy; + if (xx >= 0 && xx < width && yy >= 0 && yy < height) { + auto xc = xx + 0.5f; + auto yc = yy + 0.5f; + auto filter_weight = compute_filter_weight(*scene.filter, + xc - pt.x, + yc - pt.y); + atomic_add(weight_image[yy * width + xx], filter_weight); + } + } + } + } + + SceneData scene; + float *weight_image; + int width; + int height; + int num_samples_x; + int num_samples_y; + uint64_t seed; + bool use_prefiltering; +}; + +// We use a "mega kernel" for rendering +struct render_kernel { + DEVICE void operator()(int idx) { + // height * width * num_samples_y * num_samples_x + auto pt = Vector2f{0, 0}; + auto x = 0; + auto y = 0; + if (eval_positions == nullptr) { + auto rng_state = init_pcg32(idx, seed); + auto sx = idx % num_samples_x; + auto sy = (idx / num_samples_x) % num_samples_y; + x = (idx / (num_samples_x * num_samples_y)) % width; + y = (idx / (num_samples_x * num_samples_y * width)); + assert(x < width && y < height); + auto rx = next_pcg32_float(&rng_state); + auto ry = next_pcg32_float(&rng_state); + if (use_prefiltering) { + rx = ry = 0.5f; + } + pt = Vector2f{x + ((float)sx + rx) / num_samples_x, + y + ((float)sy + ry) / num_samples_y}; + } else { + pt = Vector2f{eval_positions[2 * idx], + eval_positions[2 * idx + 1]}; + x = int(pt.x); + y = int(pt.y); + } + + // normalize pt to [0, 1] + auto npt = pt; + npt.x /= width; + npt.y /= height; + auto num_samples = num_samples_x * num_samples_y; + if (render_image != nullptr || d_render_image != nullptr) { + Vector4f d_color = Vector4f{0, 0, 0, 0}; + if (d_render_image != nullptr) { + // Gather d_color from d_render_image inside the filter kernel + // normalize using weight_image + d_color = gather_d_color(*scene.filter, + d_render_image, + weight_image, + width, + height, + pt); + } + auto color = Vector4f{0, 0, 0, 0}; + if (use_prefiltering) { + color = sample_color_prefiltered(scene, + background_image != nullptr ? (const Vector4f*)&background_image[4 * ((y * width) + x)] : nullptr, + npt, + d_render_image != nullptr ? &d_color : nullptr, + d_background_image != nullptr ? (Vector4f*)&d_background_image[4 * ((y * width) + x)] : nullptr, + d_translation != nullptr ? &d_translation[2 * (y * width + x)] : nullptr); + } else { + color = sample_color(scene, + background_image != nullptr ? (const Vector4f*)&background_image[4 * ((y * width) + x)] : nullptr, + npt, + d_render_image != nullptr ? &d_color : nullptr, + nullptr, + d_background_image != nullptr ? (Vector4f*)&d_background_image[4 * ((y * width) + x)] : nullptr, + d_translation != nullptr ? &d_translation[2 * (y * width + x)] : nullptr); + } + assert(isfinite(color)); + // Splat color onto render_image + auto radius = scene.filter->radius; + assert(radius >= 0); + auto ri = (int)ceil(radius); + for (int dy = -ri; dy <= ri; dy++) { + for (int dx = -ri; dx <= ri; dx++) { + auto xx = x + dx; + auto yy = y + dy; + if (xx >= 0 && xx < width && yy >= 0 && yy < height && + weight_image[yy * width + xx] > 0) { + auto weight_sum = weight_image[yy * width + xx]; + auto xc = xx + 0.5f; + auto yc = yy + 0.5f; + auto filter_weight = compute_filter_weight(*scene.filter, + xc - pt.x, + yc - pt.y); + auto weighted_color = filter_weight * color / weight_sum; + if (render_image != nullptr) { + atomic_add(render_image[4 * (yy * width + xx) + 0], + weighted_color[0]); + atomic_add(render_image[4 * (yy * width + xx) + 1], + weighted_color[1]); + atomic_add(render_image[4 * (yy * width + xx) + 2], + weighted_color[2]); + atomic_add(render_image[4 * (yy * width + xx) + 3], + weighted_color[3]); + } + if (d_render_image != nullptr) { + // Backprop to filter_weight + // pixel = \sum weight * color / \sum weight + auto d_pixel = Vector4f{ + d_render_image[4 * (yy * width + xx) + 0], + d_render_image[4 * (yy * width + xx) + 1], + d_render_image[4 * (yy * width + xx) + 2], + d_render_image[4 * (yy * width + xx) + 3], + }; + auto d_weight = + (dot(d_pixel, color) * weight_sum - + filter_weight * dot(d_pixel, color) * (weight_sum - filter_weight)) / + square(weight_sum); + d_compute_filter_weight(*scene.filter, + xc - pt.x, + yc - pt.y, + d_weight, + scene.d_filter); + } + } + } + } + } + if (sdf_image != nullptr || d_sdf_image != nullptr) { + float d_dist = 0.f; + if (d_sdf_image != nullptr) { + if (eval_positions == nullptr) { + d_dist = d_sdf_image[y * width + x]; + } else { + d_dist = d_sdf_image[idx]; + } + } + auto weight = eval_positions == nullptr ? 1.f / num_samples : 1.f; + auto dist = sample_distance(scene, npt, weight, + d_sdf_image != nullptr ? &d_dist : nullptr, + d_translation != nullptr ? &d_translation[2 * (y * width + x)] : nullptr); + if (sdf_image != nullptr) { + if (eval_positions == nullptr) { + atomic_add(sdf_image[y * width + x], dist); + } else { + atomic_add(sdf_image[idx], dist); + } + } + } + } + + SceneData scene; + float *background_image; + float *render_image; + float *weight_image; + float *sdf_image; + float *d_background_image; + float *d_render_image; + float *d_sdf_image; + float *d_translation; + int width; + int height; + int num_samples_x; + int num_samples_y; + uint64_t seed; + bool use_prefiltering; + float *eval_positions; +}; + +struct BoundarySample { + Vector2f pt; + Vector2f local_pt; + Vector2f normal; + int shape_group_id; + int shape_id; + float t; + BoundaryData data; + float pdf; +}; + +struct sample_boundary_kernel { + DEVICE void operator()(int idx) { + boundary_samples[idx].pt = Vector2f{0, 0}; + boundary_samples[idx].shape_id = -1; + boundary_ids[idx] = idx; + morton_codes[idx] = 0; + + auto rng_state = init_pcg32(idx, seed); + auto u = next_pcg32_float(&rng_state); + // Sample a shape + auto sample_id = sample(scene.sample_shapes_cdf, + scene.num_total_shapes, + u); + assert(sample_id >= 0 && sample_id < scene.num_total_shapes); + auto shape_id = scene.sample_shape_id[sample_id]; + assert(shape_id >= 0 && shape_id < scene.num_shapes); + auto shape_group_id = scene.sample_group_id[sample_id]; + assert(shape_group_id >= 0 && shape_group_id < scene.num_shape_groups); + auto shape_pmf = scene.sample_shapes_pmf[shape_id]; + if (shape_pmf <= 0) { + return; + } + // Sample a point on the boundary of the shape + auto boundary_pdf = 0.f; + auto normal = Vector2f{0, 0}; + auto t = next_pcg32_float(&rng_state); + BoundaryData boundary_data; + const ShapeGroup &shape_group = scene.shape_groups[shape_group_id]; + auto local_boundary_pt = sample_boundary( + scene, shape_group_id, shape_id, + t, normal, boundary_pdf, boundary_data); + if (boundary_pdf <= 0) { + return; + } + + // local_boundary_pt & normal are in shape's local space, + // transform them to canvas space + auto boundary_pt = xform_pt(shape_group.shape_to_canvas, local_boundary_pt); + normal = xform_normal(shape_group.canvas_to_shape, normal); + // Normalize boundary_pt to [0, 1) + boundary_pt.x /= scene.canvas_width; + boundary_pt.y /= scene.canvas_height; + + boundary_samples[idx].pt = boundary_pt; + boundary_samples[idx].local_pt = local_boundary_pt; + boundary_samples[idx].normal = normal; + boundary_samples[idx].shape_group_id = shape_group_id; + boundary_samples[idx].shape_id = shape_id; + boundary_samples[idx].t = t; + boundary_samples[idx].data = boundary_data; + boundary_samples[idx].pdf = shape_pmf * boundary_pdf; + TVector2 p_i{boundary_pt.x * 1023, boundary_pt.y * 1023}; + morton_codes[idx] = (expand_bits(p_i.x) << 1u) | + (expand_bits(p_i.y) << 0u); + } + + SceneData scene; + uint64_t seed; + BoundarySample *boundary_samples; + int *boundary_ids; + uint32_t *morton_codes; +}; + +struct render_edge_kernel { + DEVICE void operator()(int idx) { + auto bid = boundary_ids[idx]; + if (boundary_samples[bid].shape_id == -1) { + return; + } + auto boundary_pt = boundary_samples[bid].pt; + auto local_boundary_pt = boundary_samples[bid].local_pt; + auto normal = boundary_samples[bid].normal; + auto shape_group_id = boundary_samples[bid].shape_group_id; + auto shape_id = boundary_samples[bid].shape_id; + auto t = boundary_samples[bid].t; + auto boundary_data = boundary_samples[bid].data; + auto pdf = boundary_samples[bid].pdf; + + const ShapeGroup &shape_group = scene.shape_groups[shape_group_id]; + + auto bx = int(boundary_pt.x * width); + auto by = int(boundary_pt.y * height); + if (bx < 0 || bx >= width || by < 0 || by >= height) { + return; + } + + // Sample the two sides of the boundary + auto inside_query = EdgeQuery{shape_group_id, shape_id, false}; + auto outside_query = EdgeQuery{shape_group_id, shape_id, false}; + auto color_inside = sample_color(scene, + background_image != nullptr ? (const Vector4f *)&background_image[4 * ((by * width) + bx)] : nullptr, + boundary_pt - 1e-4f * normal, + nullptr, &inside_query); + auto color_outside = sample_color(scene, + background_image != nullptr ? (const Vector4f *)&background_image[4 * ((by * width) + bx)] : nullptr, + boundary_pt + 1e-4f * normal, + nullptr, &outside_query); + if (!inside_query.hit && !outside_query.hit) { + // occluded + return; + } + if (!inside_query.hit) { + normal = -normal; + swap_(inside_query, outside_query); + swap_(color_inside, color_outside); + } + // Boundary point in screen space + auto sboundary_pt = boundary_pt; + sboundary_pt.x *= width; + sboundary_pt.y *= height; + auto d_color = gather_d_color(*scene.filter, + d_render_image, + weight_image, + width, + height, + sboundary_pt); + // Normalization factor + d_color /= float(scene.canvas_width * scene.canvas_height); + + assert(isfinite(d_color)); + assert(isfinite(pdf) && pdf > 0); + auto contrib = dot(color_inside - color_outside, d_color) / pdf; + ShapeGroup &d_shape_group = scene.d_shape_groups[shape_group_id]; + accumulate_boundary_gradient(scene.shapes[shape_id], + contrib, t, normal, boundary_data, scene.d_shapes[shape_id], + shape_group.shape_to_canvas, local_boundary_pt, d_shape_group.shape_to_canvas); + // Don't need to backprop to filter weights: + // \int f'(x) g(x) dx doesn't contain discontinuities + // if f is continuous, even if g is discontinuous + if (d_translation != nullptr) { + // According to Reynold transport theorem, + // the Jacobian of the boundary integral is dot(velocity, normal) + // The velocity of the object translating x is (1, 0) + // The velocity of the object translating y is (0, 1) + atomic_add(&d_translation[2 * (by * width + bx) + 0], normal.x * contrib); + atomic_add(&d_translation[2 * (by * width + bx) + 1], normal.y * contrib); + } + } + + SceneData scene; + const float *background_image; + const BoundarySample *boundary_samples; + const int *boundary_ids; + float *weight_image; + float *d_render_image; + float *d_translation; + int width; + int height; + int num_samples_x; + int num_samples_y; +}; + +void render(std::shared_ptr scene, + ptr background_image, + ptr render_image, + ptr render_sdf, + int width, + int height, + int num_samples_x, + int num_samples_y, + uint64_t seed, + ptr d_background_image, + ptr d_render_image, + ptr d_render_sdf, + ptr d_translation, + bool use_prefiltering, + ptr eval_positions, + int num_eval_positions) { +#ifdef __NVCC__ + int old_device_id = -1; + if (scene->use_gpu) { + checkCuda(cudaGetDevice(&old_device_id)); + if (scene->gpu_index != -1) { + checkCuda(cudaSetDevice(scene->gpu_index)); + } + } +#endif + parallel_init(); + + float *weight_image = nullptr; + // Allocate and zero the weight image + if (scene->use_gpu) { +#ifdef __CUDACC__ + if (eval_positions.get() == nullptr) { + checkCuda(cudaMallocManaged(&weight_image, width * height * sizeof(float))); + cudaMemset(weight_image, 0, width * height * sizeof(float)); + } +#else + assert(false); +#endif + } else { + if (eval_positions.get() == nullptr) { + weight_image = (float*)malloc(width * height * sizeof(float)); + memset(weight_image, 0, width * height * sizeof(float)); + } + } + + if (render_image.get() != nullptr || d_render_image.get() != nullptr || + render_sdf.get() != nullptr || d_render_sdf.get() != nullptr) { + if (weight_image != nullptr) { + parallel_for(weight_kernel{ + get_scene_data(*scene.get()), + weight_image, + width, + height, + num_samples_x, + num_samples_y, + seed + }, width * height * num_samples_x * num_samples_y, scene->use_gpu); + } + + auto num_samples = eval_positions.get() == nullptr ? + width * height * num_samples_x * num_samples_y : num_eval_positions; + parallel_for(render_kernel{ + get_scene_data(*scene.get()), + background_image.get(), + render_image.get(), + weight_image, + render_sdf.get(), + d_background_image.get(), + d_render_image.get(), + d_render_sdf.get(), + d_translation.get(), + width, + height, + num_samples_x, + num_samples_y, + seed, + use_prefiltering, + eval_positions.get() + }, num_samples, scene->use_gpu); + } + + // Boundary sampling + if (!use_prefiltering && d_render_image.get() != nullptr) { + auto num_samples = width * height * num_samples_x * num_samples_y; + BoundarySample *boundary_samples = nullptr; + int *boundary_ids = nullptr; // for sorting + uint32_t *morton_codes = nullptr; // for sorting + // Allocate boundary samples + if (scene->use_gpu) { +#ifdef __CUDACC__ + checkCuda(cudaMallocManaged(&boundary_samples, + num_samples * sizeof(BoundarySample))); + checkCuda(cudaMallocManaged(&boundary_ids, + num_samples * sizeof(int))); + checkCuda(cudaMallocManaged(&morton_codes, + num_samples * sizeof(uint32_t))); +#else + assert(false); + #endif + } else { + boundary_samples = (BoundarySample*)malloc( + num_samples * sizeof(BoundarySample)); + boundary_ids = (int*)malloc( + num_samples * sizeof(int)); + morton_codes = (uint32_t*)malloc( + num_samples * sizeof(uint32_t)); + } + + // Edge sampling + // We sort the boundary samples for better thread coherency + parallel_for(sample_boundary_kernel{ + get_scene_data(*scene.get()), + seed, + boundary_samples, + boundary_ids, + morton_codes + }, num_samples, scene->use_gpu); + if (scene->use_gpu) { + thrust::sort_by_key(thrust::device, morton_codes, morton_codes + num_samples, boundary_ids); + } else { + // Don't need to sort for CPU, we are not using SIMD hardware anyway. + // thrust::sort_by_key(thrust::host, morton_codes, morton_codes + num_samples, boundary_ids); + } + parallel_for(render_edge_kernel{ + get_scene_data(*scene.get()), + background_image.get(), + boundary_samples, + boundary_ids, + weight_image, + d_render_image.get(), + d_translation.get(), + width, + height, + num_samples_x, + num_samples_y + }, num_samples, scene->use_gpu); + if (scene->use_gpu) { +#ifdef __CUDACC__ + checkCuda(cudaFree(boundary_samples)); + checkCuda(cudaFree(boundary_ids)); + checkCuda(cudaFree(morton_codes)); +#else + assert(false); +#endif + } else { + free(boundary_samples); + free(boundary_ids); + free(morton_codes); + } + } + + // Clean up weight image + if (scene->use_gpu) { +#ifdef __CUDACC__ + checkCuda(cudaFree(weight_image)); +#else + assert(false); +#endif + } else { + free(weight_image); + } + + if (scene->use_gpu) { + cuda_synchronize(); + } + + parallel_cleanup(); +#ifdef __NVCC__ + if (old_device_id != -1) { + checkCuda(cudaSetDevice(old_device_id)); + } +#endif +} + +PYBIND11_MODULE(diffvg, m) { + m.doc() = "Differential Vector Graphics"; + + py::class_>(m, "void_ptr") + .def(py::init()) + .def("as_size_t", &ptr::as_size_t); + py::class_>(m, "float_ptr") + .def(py::init()); + py::class_>(m, "int_ptr") + .def(py::init()); + + py::class_(m, "Vector2f") + .def(py::init()) + .def_readwrite("x", &Vector2f::x) + .def_readwrite("y", &Vector2f::y); + + py::class_(m, "Vector3f") + .def(py::init()) + .def_readwrite("x", &Vector3f::x) + .def_readwrite("y", &Vector3f::y) + .def_readwrite("z", &Vector3f::z); + + py::class_(m, "Vector4f") + .def(py::init()) + .def_readwrite("x", &Vector4f::x) + .def_readwrite("y", &Vector4f::y) + .def_readwrite("z", &Vector4f::z) + .def_readwrite("w", &Vector4f::w); + + py::enum_(m, "ShapeType") + .value("circle", ShapeType::Circle) + .value("ellipse", ShapeType::Ellipse) + .value("path", ShapeType::Path) + .value("rect", ShapeType::Rect); + + py::class_(m, "Circle") + .def(py::init()) + .def("get_ptr", &Circle::get_ptr) + .def_readonly("radius", &Circle::radius) + .def_readonly("center", &Circle::center); + + py::class_(m, "Ellipse") + .def(py::init()) + .def("get_ptr", &Ellipse::get_ptr) + .def_readonly("radius", &Ellipse::radius) + .def_readonly("center", &Ellipse::center); + + py::class_(m, "Path") + .def(py::init, ptr, ptr, int, int, bool, bool>()) + .def("get_ptr", &Path::get_ptr) + .def("has_thickness", &Path::has_thickness) + .def("copy_to", &Path::copy_to) + .def_readonly("num_points", &Path::num_points); + + py::class_(m, "Rect") + .def(py::init()) + .def("get_ptr", &Rect::get_ptr) + .def_readonly("p_min", &Rect::p_min) + .def_readonly("p_max", &Rect::p_max); + + py::enum_(m, "ColorType") + .value("constant", ColorType::Constant) + .value("linear_gradient", ColorType::LinearGradient) + .value("radial_gradient", ColorType::RadialGradient); + + py::class_(m, "Constant") + .def(py::init()) + .def("get_ptr", &Constant::get_ptr) + .def_readonly("color", &Constant::color); + + py::class_(m, "LinearGradient") + .def(py::init, ptr>()) + .def("get_ptr", &LinearGradient::get_ptr) + .def("copy_to", &LinearGradient::copy_to) + .def_readonly("begin", &LinearGradient::begin) + .def_readonly("end", &LinearGradient::end) + .def_readonly("num_stops", &LinearGradient::num_stops); + + py::class_(m, "RadialGradient") + .def(py::init, ptr>()) + .def("get_ptr", &RadialGradient::get_ptr) + .def("copy_to", &RadialGradient::copy_to) + .def_readonly("center", &RadialGradient::center) + .def_readonly("radius", &RadialGradient::radius) + .def_readonly("num_stops", &RadialGradient::num_stops); + + py::class_(m, "Shape") + .def(py::init, float>()) + .def("as_circle", &Shape::as_circle) + .def("as_ellipse", &Shape::as_ellipse) + .def("as_path", &Shape::as_path) + .def("as_rect", &Shape::as_rect) + .def_readonly("type", &Shape::type) + .def_readonly("stroke_width", &Shape::stroke_width); + + py::class_(m, "ShapeGroup") + .def(py::init, + int, + ColorType, + ptr, + ColorType, + ptr, + bool, + ptr>()) + .def("fill_color_as_constant", &ShapeGroup::fill_color_as_constant) + .def("fill_color_as_linear_gradient", &ShapeGroup::fill_color_as_linear_gradient) + .def("fill_color_as_radial_gradient", &ShapeGroup::fill_color_as_radial_gradient) + .def("stroke_color_as_constant", &ShapeGroup::stroke_color_as_constant) + .def("stroke_color_as_linear_gradient", &ShapeGroup::stroke_color_as_linear_gradient) + .def("stroke_color_as_radial_gradient", &ShapeGroup::fill_color_as_radial_gradient) + .def("has_fill_color", &ShapeGroup::has_fill_color) + .def("has_stroke_color", &ShapeGroup::has_stroke_color) + .def("copy_to", &ShapeGroup::copy_to) + .def_readonly("fill_color_type", &ShapeGroup::fill_color_type) + .def_readonly("stroke_color_type", &ShapeGroup::stroke_color_type); + + py::enum_(m, "FilterType") + .value("box", FilterType::Box) + .value("tent", FilterType::Tent) + .value("parabolic", FilterType::RadialParabolic) + .value("hann", FilterType::Hann); + + py::class_(m, "Filter") + .def(py::init()); + + py::class_>(m, "Scene") + .def(py::init &, + const std::vector &, + const Filter &, + bool, + int>()) + .def("get_d_shape", &Scene::get_d_shape) + .def("get_d_shape_group", &Scene::get_d_shape_group) + .def("get_d_filter_radius", &Scene::get_d_filter_radius) + .def_readonly("num_shapes", &Scene::num_shapes) + .def_readonly("num_shape_groups", &Scene::num_shape_groups); + + m.def("render", &render, ""); +} diff --git a/diffvg.h b/diffvg.h new file mode 100644 index 0000000..400e4dc --- /dev/null +++ b/diffvg.h @@ -0,0 +1,156 @@ +#pragma once + +#ifdef __NVCC__ + #define DEVICE __device__ __host__ +#else + #define DEVICE +#endif + +#ifndef __NVCC__ + #include + namespace { + inline float fmodf(float a, float b) { + return std::fmod(a, b); + } + inline double fmod(double a, double b) { + return std::fmod(a, b); + } + } + using std::isfinite; +#endif + +#ifndef M_PI +#define M_PI 3.14159265358979323846 +#endif + +#include +#include + +// We use Real for most of the internal computation. +// However, for PyTorch interfaces, Optix Prime and Embree queries +// we use float +using Real = float; + +template +DEVICE +inline T square(const T &x) { + return x * x; +} + +template +DEVICE +inline T cubic(const T &x) { + return x * x * x; +} + +template +DEVICE +inline T clamp(const T &v, const T &lo, const T &hi) { + if (v < lo) return lo; + else if (v > hi) return hi; + else return v; +} + +DEVICE +inline int modulo(int a, int b) { + auto r = a % b; + return (r < 0) ? r+b : r; +} + +DEVICE +inline float modulo(float a, float b) { + float r = ::fmodf(a, b); + return (r < 0.0f) ? r+b : r; +} + +DEVICE +inline double modulo(double a, double b) { + double r = ::fmod(a, b); + return (r < 0.0) ? r+b : r; +} + +template +DEVICE +inline T max(const T &a, const T &b) { + return a > b ? a : b; +} + +template +DEVICE +inline T min(const T &a, const T &b) { + return a < b ? a : b; +} + +/// Return ceil(x/y) for integers x and y +inline int idiv_ceil(int x, int y) { + return (x + y-1) / y; +} + +template +DEVICE +inline void swap_(T &a, T &b) { + T tmp = a; + a = b; + b = tmp; +} + +inline double log2(double x) { + return log(x) / log(Real(2)); +} + +template +DEVICE +inline T safe_acos(const T &x) { + if (x >= 1) return T(0); + else if(x <= -1) return T(M_PI); + return acos(x); +} + +// For Morton code computation. This can be made faster. +DEVICE +inline uint32_t expand_bits(uint32_t x) { + // Insert one zero after every bit given a 10-bit integer + constexpr uint64_t mask = 0x1u; + // We start from LSB (bit 31) + auto result = (x & (mask << 0u)); + result |= ((x & (mask << 1u)) << 1u); + result |= ((x & (mask << 2u)) << 2u); + result |= ((x & (mask << 3u)) << 3u); + result |= ((x & (mask << 4u)) << 4u); + result |= ((x & (mask << 5u)) << 5u); + result |= ((x & (mask << 6u)) << 6u); + result |= ((x & (mask << 7u)) << 7u); + result |= ((x & (mask << 8u)) << 8u); + result |= ((x & (mask << 9u)) << 9u); + return result; +} + +// DEVICE +// inline int clz(uint64_t x) { +// #ifdef __CUDA_ARCH__ +// return __clzll(x); +// #else +// // TODO: use _BitScanReverse in windows +// return x == 0 ? 64 : __builtin_clzll(x); +// #endif +// } + +// DEVICE +// inline int ffs(uint8_t x) { +// #ifdef __CUDA_ARCH__ +// return __ffs(x); +// #else +// // TODO: use _BitScanReverse in windows +// return __builtin_ffs(x); +// #endif +// } + +// DEVICE +// inline int popc(uint8_t x) { +// #ifdef __CUDA_ARCH__ +// return __popc(x); +// #else +// // TODO: use _popcnt in windows +// return __builtin_popcount(x); +// #endif +// } diff --git a/edge_query.h b/edge_query.h new file mode 100644 index 0000000..57f233a --- /dev/null +++ b/edge_query.h @@ -0,0 +1,7 @@ +#pragma once + +struct EdgeQuery { + int shape_group_id; + int shape_id; + bool hit; // Do we hit the specified shape_group_id & shape_id? +}; diff --git a/filter.h b/filter.h new file mode 100644 index 0000000..2dd0b62 --- /dev/null +++ b/filter.h @@ -0,0 +1,106 @@ +#pragma once + +#include "diffvg.h" +#include "atomic.h" + +enum class FilterType { + Box, + Tent, + RadialParabolic, // 4/3(1 - (d/r)) + Hann // https://en.wikipedia.org/wiki/Window_function#Hann_and_Hamming_windows +}; + +struct Filter { + FilterType type; + float radius; +}; + +struct DFilter { + float radius; +}; + +DEVICE +inline +float compute_filter_weight(const Filter &filter, + float dx, + float dy) { + if (fabs(dx) > filter.radius || fabs(dy) > filter.radius) { + return 0; + } + if (filter.type == FilterType::Box) { + return 1.f / square(2 * filter.radius); + } else if (filter.type == FilterType::Tent) { + return (filter.radius - fabs(dx)) * (filter.radius - fabs(dy)) / + square(square(filter.radius)); + } else if (filter.type == FilterType::RadialParabolic) { + return (4.f / 3.f) * (1 - square(dx / filter.radius)) * + (4.f / 3.f) * (1 - square(dy / filter.radius)); + } else { + assert(filter.type == FilterType::Hann); + // normalize dx, dy to [0, 1] + auto ndx = (dx / (2*filter.radius)) + 0.5f; + auto ndy = (dy / (2*filter.radius)) + 0.5f; + // the normalization factor is R^2 + return 0.5f * (1.f - cos(float(2 * M_PI) * ndx)) * + 0.5f * (1.f - cos(float(2 * M_PI) * ndy)) / + square(filter.radius); + } +} + +DEVICE +inline +void d_compute_filter_weight(const Filter &filter, + float dx, + float dy, + float d_return, + DFilter *d_filter) { + if (filter.type == FilterType::Box) { + // return 1.f / square(2 * filter.radius); + atomic_add(d_filter->radius, + d_return * (-2) * 2 * filter.radius / cubic(2 * filter.radius)); + } else if (filter.type == FilterType::Tent) { + // return (filer.radius - fabs(dx)) * (filer.radius - fabs(dy)) / + // square(square(filter.radius)); + auto fx = filter.radius - fabs(dx); + auto fy = filter.radius - fabs(dy); + auto norm = 1 / square(filter.radius); + auto d_fx = d_return * fy * norm; + auto d_fy = d_return * fx * norm; + auto d_norm = d_return * fx * fy; + atomic_add(d_filter->radius, + d_fx + d_fy + (-4) * d_norm / pow(filter.radius, 5)); + } else if (filter.type == FilterType::RadialParabolic) { + // return (4.f / 3.f) * (1 - square(dx / filter.radius)) * + // (4.f / 3.f) * (1 - square(dy / filter.radius)); + // auto d_square_x = d_return * (-4.f / 3.f); + // auto d_square_y = d_return * (-4.f / 3.f); + auto r3 = filter.radius * filter.radius * filter.radius; + auto d_radius = -(2 * square(dx) + 2 * square(dy)) / r3; + atomic_add(d_filter->radius, d_radius); + } else { + assert(filter.type == FilterType::Hann); + // // normalize dx, dy to [0, 1] + // auto ndx = (dx / (2*filter.radius)) + 0.5f; + // auto ndy = (dy / (2*filter.radius)) + 0.5f; + // // the normalization factor is R^2 + // return 0.5f * (1.f - cos(float(2 * M_PI) * ndx)) * + // 0.5f * (1.f - cos(float(2 * M_PI) * ndy)) / + // square(filter.radius); + + // normalize dx, dy to [0, 1] + auto ndx = (dx / (2*filter.radius)) + 0.5f; + auto ndy = (dy / (2*filter.radius)) + 0.5f; + auto fx = 0.5f * (1.f - cos(float(2*M_PI) * ndx)); + auto fy = 0.5f * (1.f - cos(float(2*M_PI) * ndy)); + auto norm = 1 / square(filter.radius); + auto d_fx = d_return * fy * norm; + auto d_fy = d_return * fx * norm; + auto d_norm = d_return * fx * fy; + auto d_ndx = d_fx * 0.5f * sin(float(2*M_PI) * ndx) * float(2*M_PI); + auto d_ndy = d_fy * 0.5f * sin(float(2*M_PI) * ndy) * float(2*M_PI); + atomic_add(d_filter->radius, + d_ndx * (-2*dx / square(2*filter.radius)) + + d_ndy * (-2*dy / square(2*filter.radius)) + + (-2) * d_norm / cubic(filter.radius)); + } +} diff --git a/matrix.h b/matrix.h new file mode 100644 index 0000000..b53f484 --- /dev/null +++ b/matrix.h @@ -0,0 +1,544 @@ +#pragma once + +#include "diffvg.h" +#include "vector.h" +#include + +template +struct TMatrix3x3 { + DEVICE + TMatrix3x3() { + for (int i = 0; i < 3; i++) { + for (int j = 0; j < 3; j++) { + data[i][j] = T(0); + } + } + } + + template + DEVICE + TMatrix3x3(T2 *arr) { + data[0][0] = arr[0]; + data[0][1] = arr[1]; + data[0][2] = arr[2]; + data[1][0] = arr[3]; + data[1][1] = arr[4]; + data[1][2] = arr[5]; + data[2][0] = arr[6]; + data[2][1] = arr[7]; + data[2][2] = arr[8]; + } + DEVICE + TMatrix3x3(T v00, T v01, T v02, + T v10, T v11, T v12, + T v20, T v21, T v22) { + data[0][0] = v00; + data[0][1] = v01; + data[0][2] = v02; + data[1][0] = v10; + data[1][1] = v11; + data[1][2] = v12; + data[2][0] = v20; + data[2][1] = v21; + data[2][2] = v22; + } + + DEVICE + const T& operator()(int i, int j) const { + return data[i][j]; + } + DEVICE + T& operator()(int i, int j) { + return data[i][j]; + } + DEVICE + static TMatrix3x3 identity() { + TMatrix3x3 m(1, 0, 0, + 0, 1, 0, + 0, 0, 1); + return m; + } + + T data[3][3]; +}; + +using Matrix3x3 = TMatrix3x3; +using Matrix3x3f = TMatrix3x3; + +template +struct TMatrix4x4 { + DEVICE TMatrix4x4() { + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + data[i][j] = T(0); + } + } + } + + template + DEVICE TMatrix4x4(const T2 *arr) { + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + data[i][j] = (T)arr[i * 4 + j]; + } + } + } + + template + DEVICE TMatrix4x4(const TMatrix4x4 &m) { + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + data[i][j] = T(m.data[i][j]); + } + } + } + + template + DEVICE TMatrix4x4(T2 v00, T2 v01, T2 v02, T2 v03, + T2 v10, T2 v11, T2 v12, T2 v13, + T2 v20, T2 v21, T2 v22, T2 v23, + T2 v30, T2 v31, T2 v32, T2 v33) { + data[0][0] = (T)v00; + data[0][1] = (T)v01; + data[0][2] = (T)v02; + data[0][3] = (T)v03; + data[1][0] = (T)v10; + data[1][1] = (T)v11; + data[1][2] = (T)v12; + data[1][3] = (T)v13; + data[2][0] = (T)v20; + data[2][1] = (T)v21; + data[2][2] = (T)v22; + data[2][3] = (T)v23; + data[3][0] = (T)v30; + data[3][1] = (T)v31; + data[3][2] = (T)v32; + data[3][3] = (T)v33; + } + + DEVICE + const T& operator()(int i, int j) const { + return data[i][j]; + } + + DEVICE + T& operator()(int i, int j) { + return data[i][j]; + } + + DEVICE + static TMatrix4x4 identity() { + TMatrix4x4 m(1, 0, 0, 0, + 0, 1, 0, 0, + 0, 0, 1, 0, + 0, 0, 0, 1); + return m; + } + + T data[4][4]; +}; + +using Matrix4x4 = TMatrix4x4; +using Matrix4x4f = TMatrix4x4; + +template +DEVICE +inline auto operator+(const TMatrix3x3 &m0, const TMatrix3x3 &m1) -> TMatrix3x3 { + TMatrix3x3 m; + for (int i = 0; i < 3; i++) { + for (int j = 0; j < 3; j++) { + m(i, j) = m0(i, j) + m1(i, j); + } + } + return m; +} + +template +DEVICE +inline auto operator-(const TMatrix3x3 &m0, const TMatrix3x3 &m1) -> TMatrix3x3 { + TMatrix3x3 m; + for (int i = 0; i < 3; i++) { + for (int j = 0; j < 3; j++) { + m(i, j) = m0(i, j) - m1(i, j); + } + } + return m; +} + +template +DEVICE +inline auto operator*(const TMatrix3x3 &m0, const TMatrix3x3 &m1) -> TMatrix3x3 { + TMatrix3x3 ret; + for (int i = 0; i < 3; i++) { + for (int j = 0; j < 3; j++) { + ret(i, j) = T(0); + for (int k = 0; k < 3; k++) { + ret(i, j) += m0(i, k) * m1(k, j); + } + } + } + return ret; +} + +template +DEVICE +inline auto operator*(const TVector3 &v, const TMatrix3x3 &m) -> TVector3 { + TVector3 ret; + for (int i = 0; i < 3; i++) { + ret[i] = T(0); + for (int j = 0; j < 3; j++) { + ret[i] += v[j] * m(j, i); + } + } + return ret; +} + +template +DEVICE +inline auto operator*(const TMatrix3x3 &m, const TVector3 &v) -> TVector3 { + TVector3 ret; + for (int i = 0; i < 3; i++) { + ret[i] = 0.f; + for (int j = 0; j < 3; j++) { + ret[i] += m(i, j) * v[j]; + } + } + return ret; +} + +template +DEVICE +inline auto inverse(const TMatrix3x3 &m) -> TMatrix3x3 { + // computes the inverse of a matrix m + auto det = m(0, 0) * (m(1, 1) * m(2, 2) - m(2, 1) * m(1, 2)) - + m(0, 1) * (m(1, 0) * m(2, 2) - m(1, 2) * m(2, 0)) + + m(0, 2) * (m(1, 0) * m(2, 1) - m(1, 1) * m(2, 0)); + + auto invdet = 1 / det; + + auto m_inv = TMatrix3x3{}; + m_inv(0, 0) = (m(1, 1) * m(2, 2) - m(2, 1) * m(1, 2)) * invdet; + m_inv(0, 1) = (m(0, 2) * m(2, 1) - m(0, 1) * m(2, 2)) * invdet; + m_inv(0, 2) = (m(0, 1) * m(1, 2) - m(0, 2) * m(1, 1)) * invdet; + m_inv(1, 0) = (m(1, 2) * m(2, 0) - m(1, 0) * m(2, 2)) * invdet; + m_inv(1, 1) = (m(0, 0) * m(2, 2) - m(0, 2) * m(2, 0)) * invdet; + m_inv(1, 2) = (m(1, 0) * m(0, 2) - m(0, 0) * m(1, 2)) * invdet; + m_inv(2, 0) = (m(1, 0) * m(2, 1) - m(2, 0) * m(1, 1)) * invdet; + m_inv(2, 1) = (m(2, 0) * m(0, 1) - m(0, 0) * m(2, 1)) * invdet; + m_inv(2, 2) = (m(0, 0) * m(1, 1) - m(1, 0) * m(0, 1)) * invdet; + return m_inv; +} + +template +DEVICE +inline auto operator+(const TMatrix4x4 &m0, const TMatrix4x4 &m1) -> TMatrix4x4 { + TMatrix4x4 m; + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + m(i, j) = m0(i, j) + m1(i, j); + } + } + return m; +} + +template +DEVICE +TMatrix3x3 transpose(const TMatrix3x3 &m) { + return TMatrix3x3(m(0, 0), m(1, 0), m(2, 0), + m(0, 1), m(1, 1), m(2, 1), + m(0, 2), m(1, 2), m(2, 2)); +} + +template +DEVICE +TMatrix4x4 transpose(const TMatrix4x4 &m) { + return TMatrix4x4(m(0, 0), m(1, 0), m(2, 0), m(3, 0), + m(0, 1), m(1, 1), m(2, 1), m(3, 1), + m(0, 2), m(1, 2), m(2, 2), m(3, 2), + m(0, 3), m(1, 3), m(2, 3), m(3, 3)); +} + +template +DEVICE +inline TMatrix3x3 operator-(const TMatrix3x3 &m0) { + TMatrix3x3 m; + for (int i = 0; i < 3; i++) { + for (int j = 0; j < 3; j++) { + m(i, j) = -m0(i, j); + } + } + return m; +} + +template +DEVICE +inline TMatrix4x4 operator-(const TMatrix4x4 &m0) { + TMatrix4x4 m; + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + m(i, j) = -m0(i, j); + } + } + return m; +} + +template +DEVICE +inline TMatrix4x4 operator-(const TMatrix4x4 &m0, const TMatrix4x4 &m1) { + TMatrix4x4 m; + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + m(i, j) = m0(i, j) - m1(i, j); + } + } + return m; +} + +template +DEVICE +inline TMatrix3x3& operator+=(TMatrix3x3 &m0, const TMatrix3x3 &m1) { + for (int i = 0; i < 3; i++) { + for (int j = 0; j < 3; j++) { + m0(i, j) += m1(i, j); + } + } + return m0; +} + +template +DEVICE +inline TMatrix4x4& operator+=(TMatrix4x4 &m0, const TMatrix4x4 &m1) { + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + m0(i, j) += m1(i, j); + } + } + return m0; +} + +template +DEVICE +inline TMatrix4x4& operator-=(TMatrix4x4 &m0, const TMatrix4x4 &m1) { + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + m0(i, j) -= m1(i, j); + } + } + return m0; +} + +template +DEVICE +inline TMatrix4x4 operator*(const TMatrix4x4 &m0, const TMatrix4x4 &m1) { + TMatrix4x4 m; + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + for (int k = 0; k < 4; k++) { + m(i, j) += m0(i, k) * m1(k, j); + } + } + } + return m; +} + +template +DEVICE +TMatrix4x4 inverse(const TMatrix4x4 &m) { + // https://stackoverflow.com/questions/1148309/inverting-a-4x4-matrix + TMatrix4x4 inv; + + inv(0, 0) = m(1, 1) * m(2, 2) * m(3, 3) - + m(1, 1) * m(2, 3) * m(3, 2) - + m(2, 1) * m(1, 2) * m(3, 3) + + m(2, 1) * m(1, 3) * m(3, 2) + + m(3, 1) * m(1, 2) * m(2, 3) - + m(3, 1) * m(1, 3) * m(2, 2); + + inv(1, 0) = -m(1, 0) * m(2, 2) * m(3, 3) + + m(1, 0) * m(2, 3) * m(3, 2) + + m(2, 0) * m(1, 2) * m(3, 3) - + m(2, 0) * m(1, 3) * m(3, 2) - + m(3, 0) * m(1, 2) * m(2, 3) + + m(3, 0) * m(1, 3) * m(2, 2); + + inv(2, 0) = m(1, 0) * m(2, 1) * m(3, 3) - + m(1, 0) * m(2, 3) * m(3, 1) - + m(2, 0) * m(1, 1) * m(3, 3) + + m(2, 0) * m(1, 3) * m(3, 1) + + m(3, 0) * m(1, 1) * m(2, 3) - + m(3, 0) * m(1, 3) * m(2, 1); + + inv(3, 0) = -m(1, 0) * m(2, 1) * m(3, 2) + + m(1, 0) * m(2, 2) * m(3, 1) + + m(2, 0) * m(1, 1) * m(3, 2) - + m(2, 0) * m(1, 2) * m(3, 1) - + m(3, 0) * m(1, 1) * m(2, 2) + + m(3, 0) * m(1, 2) * m(2, 1); + + inv(0, 1) = -m(0, 1) * m(2, 2) * m(3, 3) + + m(0, 1) * m(2, 3) * m(3, 2) + + m(2, 1) * m(0, 2) * m(3, 3) - + m(2, 1) * m(0, 3) * m(3, 2) - + m(3, 1) * m(0, 2) * m(2, 3) + + m(3, 1) * m(0, 3) * m(2, 2); + + inv(1, 1) = m(0, 0) * m(2, 2) * m(3, 3) - + m(0, 0) * m(2, 3) * m(3, 2) - + m(2, 0) * m(0, 2) * m(3, 3) + + m(2, 0) * m(0, 3) * m(3, 2) + + m(3, 0) * m(0, 2) * m(2, 3) - + m(3, 0) * m(0, 3) * m(2, 2); + + inv(2, 1) = -m(0, 0) * m(2, 1) * m(3, 3) + + m(0, 0) * m(2, 3) * m(3, 1) + + m(2, 0) * m(0, 1) * m(3, 3) - + m(2, 0) * m(0, 3) * m(3, 1) - + m(3, 0) * m(0, 1) * m(2, 3) + + m(3, 0) * m(0, 3) * m(2, 1); + + inv(3, 1) = m(0, 0) * m(2, 1) * m(3, 2) - + m(0, 0) * m(2, 2) * m(3, 1) - + m(2, 0) * m(0, 1) * m(3, 2) + + m(2, 0) * m(0, 2) * m(3, 1) + + m(3, 0) * m(0, 1) * m(2, 2) - + m(3, 0) * m(0, 2) * m(2, 1); + + inv(0, 2) = m(0, 1) * m(1, 2) * m(3, 3) - + m(0, 1) * m(1, 3) * m(3, 2) - + m(1, 1) * m(0, 2) * m(3, 3) + + m(1, 1) * m(0, 3) * m(3, 2) + + m(3, 1) * m(0, 2) * m(1, 3) - + m(3, 1) * m(0, 3) * m(1, 2); + + inv(1, 2) = -m(0, 0) * m(1, 2) * m(3, 3) + + m(0, 0) * m(1, 3) * m(3, 2) + + m(1, 0) * m(0, 2) * m(3, 3) - + m(1, 0) * m(0, 3) * m(3, 2) - + m(3, 0) * m(0, 2) * m(1, 3) + + m(3, 0) * m(0, 3) * m(1, 2); + + inv(2, 2) = m(0, 0) * m(1, 1) * m(3, 3) - + m(0, 0) * m(1, 3) * m(3, 1) - + m(1, 0) * m(0, 1) * m(3, 3) + + m(1, 0) * m(0, 3) * m(3, 1) + + m(3, 0) * m(0, 1) * m(1, 3) - + m(3, 0) * m(0, 3) * m(1, 1); + + inv(3, 2) = -m(0, 0) * m(1, 1) * m(3, 2) + + m(0, 0) * m(1, 2) * m(3, 1) + + m(1, 0) * m(0, 1) * m(3, 2) - + m(1, 0) * m(0, 2) * m(3, 1) - + m(3, 0) * m(0, 1) * m(1, 2) + + m(3, 0) * m(0, 2) * m(1, 1); + + inv(0, 3) = -m(0, 1) * m(1, 2) * m(2, 3) + + m(0, 1) * m(1, 3) * m(2, 2) + + m(1, 1) * m(0, 2) * m(2, 3) - + m(1, 1) * m(0, 3) * m(2, 2) - + m(2, 1) * m(0, 2) * m(1, 3) + + m(2, 1) * m(0, 3) * m(1, 2); + + inv(1, 3) = m(0, 0) * m(1, 2) * m(2, 3) - + m(0, 0) * m(1, 3) * m(2, 2) - + m(1, 0) * m(0, 2) * m(2, 3) + + m(1, 0) * m(0, 3) * m(2, 2) + + m(2, 0) * m(0, 2) * m(1, 3) - + m(2, 0) * m(0, 3) * m(1, 2); + + inv(2, 3) = -m(0, 0) * m(1, 1) * m(2, 3) + + m(0, 0) * m(1, 3) * m(2, 1) + + m(1, 0) * m(0, 1) * m(2, 3) - + m(1, 0) * m(0, 3) * m(2, 1) - + m(2, 0) * m(0, 1) * m(1, 3) + + m(2, 0) * m(0, 3) * m(1, 1); + + inv(3, 3) = m(0, 0) * m(1, 1) * m(2, 2) - + m(0, 0) * m(1, 2) * m(2, 1) - + m(1, 0) * m(0, 1) * m(2, 2) + + m(1, 0) * m(0, 2) * m(2, 1) + + m(2, 0) * m(0, 1) * m(1, 2) - + m(2, 0) * m(0, 2) * m(1, 1); + + auto det = m(0, 0) * inv(0, 0) + + m(0, 1) * inv(1, 0) + + m(0, 2) * inv(2, 0) + + m(0, 3) * inv(3, 0); + + if (det == 0) { + return TMatrix4x4{}; + } + + auto inv_det = 1.0 / det; + + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + inv(i, j) *= inv_det; + } + } + + return inv; +} + +template +inline std::ostream& operator<<(std::ostream &os, const TMatrix3x3 &m) { + for (int i = 0; i < 3; i++) { + for (int j = 0; j < 3; j++) { + os << m(i, j) << " "; + } + os << std::endl; + } + return os; +} + +template +inline std::ostream& operator<<(std::ostream &os, const TMatrix4x4 &m) { + for (int i = 0; i < 4; i++) { + for (int j = 0; j < 4; j++) { + os << m(i, j) << " "; + } + os << std::endl; + } + return os; +} + +template +DEVICE +TVector2 xform_pt(const TMatrix3x3 &m, const TVector2 &pt) { + TVector3 t{m(0, 0) * pt[0] + m(0, 1) * pt[1] + m(0, 2), + m(1, 0) * pt[0] + m(1, 1) * pt[1] + m(1, 2), + m(2, 0) * pt[0] + m(2, 1) * pt[1] + m(2, 2)}; + return TVector2{t[0] / t[2], t[1] / t[2]}; +} + +template +DEVICE +void d_xform_pt(const TMatrix3x3 &m, const TVector2 &pt, + const TVector2 &d_out, + TMatrix3x3 &d_m, + TVector2 &d_pt) { + TVector3 t{m(0, 0) * pt[0] + m(0, 1) * pt[1] + m(0, 2), + m(1, 0) * pt[0] + m(1, 1) * pt[1] + m(1, 2), + m(2, 0) * pt[0] + m(2, 1) * pt[1] + m(2, 2)}; + auto out = TVector2{t[0] / t[2], t[1] / t[2]}; + TVector3 d_t{d_out[0] / t[2], + d_out[1] / t[2], + -(d_out[0] * out[0] + d_out[1] * out[1]) / t[2]}; + d_m(0, 0) += d_t[0] * pt[0]; + d_m(0, 1) += d_t[0] * pt[1]; + d_m(0, 2) += d_t[0]; + d_m(1, 0) += d_t[1] * pt[0]; + d_m(1, 1) += d_t[1] * pt[1]; + d_m(1, 2) += d_t[1]; + d_m(2, 0) += d_t[2] * pt[0]; + d_m(2, 1) += d_t[2] * pt[1]; + d_m(2, 2) += d_t[2]; + d_pt[0] += d_t[0] * m(0, 0) + d_t[1] * m(1, 0) + d_t[2] * m(2, 0); + d_pt[1] += d_t[0] * m(0, 1) + d_t[1] * m(1, 1) + d_t[2] * m(2, 1); +} + +template +DEVICE +TVector2 xform_normal(const TMatrix3x3 &m_inv, const TVector2 &n) { + return normalize(TVector2{m_inv(0, 0) * n[0] + m_inv(1, 0) * n[1], + m_inv(0, 1) * n[0] + m_inv(1, 1) * n[1]}); +} diff --git a/parallel.cpp b/parallel.cpp new file mode 100644 index 0000000..365fc5b --- /dev/null +++ b/parallel.cpp @@ -0,0 +1,273 @@ +#include "parallel.h" +#include +#include +#include +#include +#include + +// From https://github.com/mmp/pbrt-v3/blob/master/src/core/parallel.cpp + +static std::vector threads; +static bool shutdownThreads = false; +struct ParallelForLoop; +static ParallelForLoop *workList = nullptr; +static std::mutex workListMutex; + +struct ParallelForLoop { + ParallelForLoop(std::function func1D, int64_t maxIndex, int chunkSize) + : func1D(std::move(func1D)), maxIndex(maxIndex), chunkSize(chunkSize) { + } + ParallelForLoop(const std::function &f, const Vector2i count) + : func2D(f), maxIndex(count[0] * count[1]), chunkSize(1) { + nX = count[0]; + } + + std::function func1D; + std::function func2D; + const int64_t maxIndex; + const int chunkSize; + int64_t nextIndex = 0; + int activeWorkers = 0; + ParallelForLoop *next = nullptr; + int nX = -1; + + bool Finished() const { + return nextIndex >= maxIndex && activeWorkers == 0; + } +}; + +void Barrier::Wait() { + std::unique_lock lock(mutex); + assert(count > 0); + if (--count == 0) { + // This is the last thread to reach the barrier; wake up all of the + // other ones before exiting. + cv.notify_all(); + } else { + // Otherwise there are still threads that haven't reached it. Give + // up the lock and wait to be notified. + cv.wait(lock, [this] { return count == 0; }); + } +} + +static std::condition_variable workListCondition; + +static void worker_thread_func(const int tIndex, std::shared_ptr barrier) { + ThreadIndex = tIndex; + + // The main thread sets up a barrier so that it can be sure that all + // workers have called ProfilerWorkerThreadInit() before it continues + // (and actually starts the profiling system). + barrier->Wait(); + + // Release our reference to the Barrier so that it's freed once all of + // the threads have cleared it. + barrier.reset(); + + std::unique_lock lock(workListMutex); + while (!shutdownThreads) { + if (!workList) { + // Sleep until there are more tasks to run + workListCondition.wait(lock); + } else { + // Get work from _workList_ and run loop iterations + ParallelForLoop &loop = *workList; + + // Run a chunk of loop iterations for _loop_ + + // Find the set of loop iterations to run next + int64_t indexStart = loop.nextIndex; + int64_t indexEnd = std::min(indexStart + loop.chunkSize, loop.maxIndex); + + // Update _loop_ to reflect iterations this thread will run + loop.nextIndex = indexEnd; + if (loop.nextIndex == loop.maxIndex) + workList = loop.next; + loop.activeWorkers++; + + // Run loop indices in _[indexStart, indexEnd)_ + lock.unlock(); + for (int64_t index = indexStart; index < indexEnd; ++index) { + if (loop.func1D) { + loop.func1D(index); + } + // Handle other types of loops + else { + assert(loop.func2D != nullptr); + loop.func2D(Vector2i{int(index % loop.nX), + int(index / loop.nX)}); + } + } + lock.lock(); + + // Update _loop_ to reflect completion of iterations + loop.activeWorkers--; + if (loop.Finished()) { + workListCondition.notify_all(); + } + } + } +} + +void parallel_for_host(const std::function &func, + int64_t count, + int chunkSize) { + // Run iterations immediately if not using threads or if _count_ is small + if (threads.empty() || count < chunkSize) { + for (int64_t i = 0; i < count; ++i) { + func(i); + } + return; + } + + // Create and enqueue _ParallelForLoop_ for this loop + ParallelForLoop loop(func, count, chunkSize); + workListMutex.lock(); + loop.next = workList; + workList = &loop; + workListMutex.unlock(); + + // Notify worker threads of work to be done + std::unique_lock lock(workListMutex); + workListCondition.notify_all(); + + // Help out with parallel loop iterations in the current thread + while (!loop.Finished()) { + // Run a chunk of loop iterations for _loop_ + + // Find the set of loop iterations to run next + int64_t indexStart = loop.nextIndex; + int64_t indexEnd = std::min(indexStart + loop.chunkSize, loop.maxIndex); + + // Update _loop_ to reflect iterations this thread will run + loop.nextIndex = indexEnd; + if (loop.nextIndex == loop.maxIndex) { + workList = loop.next; + } + loop.activeWorkers++; + + // Run loop indices in _[indexStart, indexEnd)_ + lock.unlock(); + for (int64_t index = indexStart; index < indexEnd; ++index) { + if (loop.func1D) { + loop.func1D(index); + } + // Handle other types of loops + else { + assert(loop.func2D != nullptr); + loop.func2D(Vector2i{int(index % loop.nX), + int(index / loop.nX)}); + } + } + lock.lock(); + + // Update _loop_ to reflect completion of iterations + loop.activeWorkers--; + } +} + +thread_local int ThreadIndex; + +void parallel_for_host( + std::function func, const Vector2i count) { + // Launch worker threads if needed + if (threads.empty() || count.x * count.y <= 1) { + for (int y = 0; y < count.y; ++y) { + for (int x = 0; x < count.x; ++x) { + func(Vector2i{x, y}); + } + } + return; + } + + ParallelForLoop loop(std::move(func), count); + { + std::lock_guard lock(workListMutex); + loop.next = workList; + workList = &loop; + } + + std::unique_lock lock(workListMutex); + workListCondition.notify_all(); + + // Help out with parallel loop iterations in the current thread + while (!loop.Finished()) { + // Run a chunk of loop iterations for _loop_ + + // Find the set of loop iterations to run next + int64_t indexStart = loop.nextIndex; + int64_t indexEnd = std::min(indexStart + loop.chunkSize, loop.maxIndex); + + // Update _loop_ to reflect iterations this thread will run + loop.nextIndex = indexEnd; + if (loop.nextIndex == loop.maxIndex) { + workList = loop.next; + } + loop.activeWorkers++; + + // Run loop indices in _[indexStart, indexEnd)_ + lock.unlock(); + for (int64_t index = indexStart; index < indexEnd; ++index) { + if (loop.func1D) { + loop.func1D(index); + } + // Handle other types of loops + else { + assert(loop.func2D != nullptr); + loop.func2D(Vector2i{int(index % loop.nX), + int(index / loop.nX)}); + } + } + lock.lock(); + + // Update _loop_ to reflect completion of iterations + loop.activeWorkers--; + } +} + +int num_system_cores() { + // return 1; + int ret = std::thread::hardware_concurrency(); + if (ret == 0) { + return 16; + } + return ret; +} + +void parallel_init() { + assert(threads.size() == 0); + int nThreads = num_system_cores(); + ThreadIndex = 0; + + // Create a barrier so that we can be sure all worker threads get past + // their call to ProfilerWorkerThreadInit() before we return from this + // function. In turn, we can be sure that the profiling system isn't + // started until after all worker threads have done that. + std::shared_ptr barrier = std::make_shared(nThreads); + + // Launch one fewer worker thread than the total number we want doing + // work, since the main thread helps out, too. + for (int i = 0; i < nThreads - 1; ++i) { + threads.push_back(std::thread(worker_thread_func, i + 1, barrier)); + } + + barrier->Wait(); +} + +void parallel_cleanup() { + if (threads.empty()) { + return; + } + + { + std::lock_guard lock(workListMutex); + shutdownThreads = true; + workListCondition.notify_all(); + } + + for (std::thread &thread : threads) { + thread.join(); + } + threads.erase(threads.begin(), threads.end()); + shutdownThreads = false; +} diff --git a/parallel.h b/parallel.h new file mode 100644 index 0000000..b7f9c71 --- /dev/null +++ b/parallel.h @@ -0,0 +1,91 @@ +#pragma once + +#include "vector.h" + +#include +#include +#include +#include +#include +#include +#include +// From https://github.com/mmp/pbrt-v3/blob/master/src/core/parallel.h + +class Barrier { + public: + Barrier(int count) : count(count) { assert(count > 0); } + ~Barrier() { assert(count == 0); } + void Wait(); + + private: + std::mutex mutex; + std::condition_variable cv; + int count; +}; + +void parallel_for_host(const std::function &func, + int64_t count, + int chunkSize = 1); +extern thread_local int ThreadIndex; +void parallel_for_host( + std::function func, const Vector2i count); +int num_system_cores(); + +void parallel_init(); +void parallel_cleanup(); + +#ifdef __CUDACC__ +template +__global__ void parallel_for_device_kernel(T functor, int count) { + auto idx = threadIdx.x + blockIdx.x * blockDim.x; + if (idx >= count) { + return; + } + functor(idx); +} +template +inline void parallel_for_device(T functor, + int count, + int work_per_thread = 256) { + if (count <= 0) { + return; + } + auto block_size = work_per_thread; + auto block_count = idiv_ceil(count, block_size); + parallel_for_device_kernel<<>>(functor, count); +} +#endif + +template +inline void parallel_for(T functor, + int count, + bool use_gpu, + int work_per_thread = -1) { + if (work_per_thread == -1) { + work_per_thread = use_gpu ? 64 : 256; + } + if (count <= 0) { + return; + } + if (use_gpu) { +#ifdef __CUDACC__ + auto block_size = work_per_thread; + auto block_count = idiv_ceil(count, block_size); + parallel_for_device_kernel<<>>(functor, count); +#else + throw std::runtime_error("diffvg not compiled with GPU"); + assert(false); +#endif + } else { + auto num_threads = idiv_ceil(count, work_per_thread); + parallel_for_host([&](int thread_index) { + auto id_offset = work_per_thread * thread_index; + auto work_end = std::min(id_offset + work_per_thread, count); + for (int work_id = id_offset; work_id < work_end; work_id++) { + auto idx = work_id; + assert(idx < count); + functor(idx); + } + }, num_threads); + } +} diff --git a/pcg.h b/pcg.h new file mode 100644 index 0000000..55859a1 --- /dev/null +++ b/pcg.h @@ -0,0 +1,40 @@ +#pragma once + +#include "diffvg.h" + +// http://www.pcg-random.org/download.html +struct pcg32_state { + uint64_t state; + uint64_t inc; +}; + +DEVICE inline uint32_t next_pcg32(pcg32_state *rng) { + uint64_t oldstate = rng->state; + // Advance internal state + rng->state = oldstate * 6364136223846793005ULL + (rng->inc|1); + // Calculate output function (XSH RR), uses old state for max ILP + uint32_t xorshifted = ((oldstate >> 18u) ^ oldstate) >> 27u; + uint32_t rot = oldstate >> 59u; + return (xorshifted >> rot) | (xorshifted << ((-rot) & 31)); +} + +// https://github.com/wjakob/pcg32/blob/master/pcg32.h +DEVICE inline float next_pcg32_float(pcg32_state *rng) { + union { + uint32_t u; + float f; + } x; + x.u = (next_pcg32(rng) >> 9) | 0x3f800000u; + return x.f - 1.0f; +} + +// Initialize each pixel with a PCG rng with a different stream +DEVICE inline pcg32_state init_pcg32(int idx, uint64_t seed) { + pcg32_state state; + state.state = 0U; + state.inc = (((uint64_t)idx + 1) << 1u) | 1u; + next_pcg32(&state); + state.state += (0x853c49e6748fea9bULL + seed); + next_pcg32(&state); + return state; +} diff --git a/ptr.h b/ptr.h new file mode 100644 index 0000000..f3f8e43 --- /dev/null +++ b/ptr.h @@ -0,0 +1,23 @@ +#pragma once + +#include + +/** + * Python doesn't have a pointer type, therefore we create a pointer wrapper + * see https://stackoverflow.com/questions/48982143/returning-and-passing-around-raw-pod-pointers-arrays-with-python-c-and-pyb?rq=1 + */ +template +class ptr { +public: + ptr() : p(nullptr) {} + ptr(T* p) : p(p) {} + ptr(std::size_t p) : p((T*)p) {} + ptr(const ptr& other) : ptr(other.p) {} + T* operator->() const { return p; } + T* get() const { return p; } + void destroy() { delete p; } + bool is_null() const { return p == nullptr; } + size_t as_size_t() const {return (size_t)p;} +private: + T* p; +}; diff --git a/pydiffvg/__init__.py b/pydiffvg/__init__.py new file mode 100644 index 0000000..24f3dd1 --- /dev/null +++ b/pydiffvg/__init__.py @@ -0,0 +1,9 @@ +from .device import * +from .shape import * +from .pixel_filter import * +from .render_pytorch import * +from .image import * +from .parse_svg import * +from .color import * +from .optimize_svg import * +from .save_svg import * \ No newline at end of file diff --git a/pydiffvg/color.py b/pydiffvg/color.py new file mode 100644 index 0000000..68c360f --- /dev/null +++ b/pydiffvg/color.py @@ -0,0 +1,24 @@ +import pydiffvg +import torch + +class LinearGradient: + def __init__(self, + begin = torch.tensor([0.0, 0.0]), + end = torch.tensor([0.0, 0.0]), + offsets = torch.tensor([0.0]), + stop_colors = torch.tensor([0.0, 0.0, 0.0, 0.0])): + self.begin = begin + self.end = end + self.offsets = offsets + self.stop_colors = stop_colors + +class RadialGradient: + def __init__(self, + center = torch.tensor([0.0, 0.0]), + radius = torch.tensor([0.0, 0.0]), + offsets = torch.tensor([0.0]), + stop_colors = torch.tensor([0.0, 0.0, 0.0, 0.0])): + self.center = center + self.radius = radius + self.offsets = offsets + self.stop_colors = stop_colors diff --git a/pydiffvg/device.py b/pydiffvg/device.py new file mode 100644 index 0000000..420883d --- /dev/null +++ b/pydiffvg/device.py @@ -0,0 +1,25 @@ +import torch + +use_gpu = torch.cuda.is_available() +device = torch.device('cuda') if use_gpu else torch.device('cpu') + +def set_use_gpu(v): + global use_gpu + global device + use_gpu = v + if not use_gpu: + device = torch.device('cpu') + +def get_use_gpu(): + global use_gpu + return use_gpu + +def set_device(d): + global device + global use_gpu + device = d + use_gpu = device.type == 'cuda' + +def get_device(): + global device + return device diff --git a/pydiffvg/image.py b/pydiffvg/image.py new file mode 100644 index 0000000..f83fea2 --- /dev/null +++ b/pydiffvg/image.py @@ -0,0 +1,22 @@ +import numpy as np +import skimage +import skimage.io +import os + +def imwrite(img, filename, gamma = 2.2, normalize = False): + directory = os.path.dirname(filename) + if directory != '' and not os.path.exists(directory): + os.makedirs(directory) + + if not isinstance(img, np.ndarray): + img = img.data.numpy() + if normalize: + img_rng = np.max(img) - np.min(img) + if img_rng > 0: + img = (img - np.min(img)) / img_rng + img = np.clip(img, 0.0, 1.0) + if img.ndim==2: + #repeat along the third dimension + img=np.expand_dims(img,2) + img[:, :, :3] = np.power(img[:, :, :3], 1.0/gamma) + skimage.io.imsave(filename, (img * 255).astype(np.uint8)) \ No newline at end of file diff --git a/pydiffvg/optimize_svg.py b/pydiffvg/optimize_svg.py new file mode 100644 index 0000000..a3a58ab --- /dev/null +++ b/pydiffvg/optimize_svg.py @@ -0,0 +1,1606 @@ +import json +import copy +import xml.etree.ElementTree as etree +from xml.dom import minidom +import warnings +import torch +import numpy as np +import re +import sys +import pydiffvg +import math +from collections import namedtuple +import cssutils + +class SvgOptimizationSettings: + + default_params = { + "optimize_color": True, + "color_lr": 2e-3, + "optimize_alpha": False, + "alpha_lr": 2e-3, + "optimizer": "Adam", + "transforms": { + "optimize_transforms":True, + "transform_mode":"rigid", + "translation_mult":1e-3, + "transform_lr":2e-3 + }, + "circles": { + "optimize_center": True, + "optimize_radius": True, + "shape_lr": 2e-1 + }, + "paths": { + "optimize_points": True, + "shape_lr": 2e-1 + }, + "gradients": { + "optimize_stops": True, + "stop_lr": 2e-3, + "optimize_color": True, + "color_lr": 2e-3, + "optimize_alpha": False, + "alpha_lr": 2e-3, + "optimize_location": True, + "location_lr": 2e-1 + } + } + + optims = { + "Adam": torch.optim.Adam, + "SGD": torch.optim.SGD, + "ASGD": torch.optim.ASGD, + } + + #region methods + def __init__(self, f=None): + self.store = {} + if f is None: + self.store["default"] = copy.deepcopy(SvgOptimizationSettings.default_params) + else: + self.store = json.load(f) + + # create default alias for root + def default_name(self, dname): + self.dname = dname + if dname not in self.store: + self.store[dname] = self.store["default"] + + def retrieve(self, node_id): + if node_id not in self.store: + return (self.store["default"], False) + else: + return (self.store[node_id], True) + + def reset_to_defaults(self, node_id): + if node_id in self.store: + del self.store[node_id] + + return self.store["default"] + + def undefault(self, node_id): + if node_id not in self.store: + self.store[node_id] = copy.deepcopy(self.store["default"]) + + return self.store[node_id] + + def override_optimizer(self, optimizer): + if optimizer is not None: + for v in self.store.values(): + v["optimizer"] = optimizer + + def global_override(self, path, value): + for store in self.store.values(): + d = store + for key in path[:-1]: + d = d[key] + + d[path[-1]] = value + + def save(self, file): + self.store["default"] = self.store[self.dname] + json.dump(self.store, file, indent="\t") + #endregion + +class OptimizableSvg: + + class TransformTools: + @staticmethod + def parse_matrix(vals): + assert(len(vals)==6) + return np.array([[vals[0],vals[2],vals[4]],[vals[1], vals[3], vals[5]],[0,0,1]]) + + @staticmethod + def parse_translate(vals): + assert(len(vals)>=1 and len(vals)<=2) + mat=np.eye(3) + mat[0,2]=vals[0] + if len(vals)>1: + mat[1,2]=vals[1] + return mat + + @staticmethod + def parse_rotate(vals): + assert (len(vals) == 1 or len(vals) == 3) + mat = np.eye(3) + rads=math.radians(vals[0]) + sint=math.sin(rads) + cost=math.cos(rads) + mat[0:2, 0:2] = np.array([[cost,-sint],[sint,cost]]) + if len(vals) > 1: + tr1=parse_translate(vals[1:3]) + tr2=parse_translate([-vals[1],-vals[2]]) + mat=tr1 @ mat @ tr2 + return mat + + @staticmethod + def parse_scale(vals): + assert (len(vals) >= 1 and len(vals) <= 2) + d=np.array([vals[0], vals[1] if len(vals)>1 else vals[0],1]) + return np.diag(d) + + @staticmethod + def parse_skewx(vals): + assert(len(vals)==1) + m=np.eye(3) + m[0,1]=vals[0] + return m + + @staticmethod + def parse_skewy(vals): + assert (len(vals) == 1) + m = np.eye(3) + m[1, 0] = vals[0] + return m + + @staticmethod + def transformPoints(pointsTensor, transform): + assert(transform is not None) + one=torch.ones((pointsTensor.shape[0],1),device=pointsTensor.device) + homo_points = torch.cat([pointsTensor, one], dim=1) + mult = transform.mm(homo_points.permute(1,0)).permute(1,0) + tfpoints=mult[:, 0:2].contiguous() + #print(torch.norm(mult[:,2]-one)) + assert(pointsTensor.shape == tfpoints.shape) + return tfpoints + + @staticmethod + def promote_numpy(M): + ret = np.eye(3) + ret[0:2, 0:2] = M + return ret + + @staticmethod + def recompose_numpy(Theta,ScaleXY,ShearX,TXY): + cost=math.cos(Theta) + sint=math.sin(Theta) + Rot=np.array([[cost, -sint],[sint, cost]]) + Scale=np.diag(ScaleXY) + Shear=np.eye(2) + Shear[0,1]=ShearX + + Translate=np.eye(3) + Translate[0:2,2]=TXY + + M=OptimizableSvg.TransformTools.promote_numpy(Rot @ Scale @ Shear) @ Translate + return M + + @staticmethod + def promote(m): + M=torch.eye(3).to(m.device) + M[0:2,0:2]=m + return M + + @staticmethod + def make_rot(Theta): + sint=Theta.sin().squeeze() + cost=Theta.cos().squeeze() + #m=torch.tensor([[cost, -sint],[sint, cost]]) + Rot=torch.stack((torch.stack((cost,-sint)),torch.stack((sint,cost)))) + return Rot + + @staticmethod + def make_scale(ScaleXY): + if ScaleXY.squeeze().dim()==0: + ScaleXY=ScaleXY.squeeze() + #uniform scale + return torch.diag(torch.stack([ScaleXY,ScaleXY])).to(ScaleXY.device) + else: + return torch.diag(ScaleXY).to(ScaleXY.device) + + @staticmethod + def make_shear(ShearX): + m=torch.eye(2).to(ShearX.device) + m[0,1]=ShearX + return m + + @staticmethod + def make_translate(TXY): + m=torch.eye(3).to(TXY.device) + m[0:2,2]=TXY + return m + + @staticmethod + def recompose(Theta,ScaleXY,ShearX,TXY): + Rot=OptimizableSvg.TransformTools.make_rot(Theta) + Scale=OptimizableSvg.TransformTools.make_scale(ScaleXY) + Shear=OptimizableSvg.TransformTools.make_shear(ShearX) + Translate=OptimizableSvg.TransformTools.make_translate(TXY) + + return OptimizableSvg.TransformTools.promote(Rot.mm(Scale).mm(Shear)).mm(Translate) + + TransformDecomposition=namedtuple("TransformDecomposition","theta scale shear translate") + TransformProperties=namedtuple("TransformProperties", "has_rotation has_scale has_mirror scale_uniform has_shear has_translation") + + @staticmethod + def make_named(decomp): + if not isinstance(decomp,OptimizableSvg.TransformTools.TransformDecomposition): + decomp=OptimizableSvg.TransformTools.TransformDecomposition(theta=decomp[0],scale=decomp[1],shear=decomp[2],translate=decomp[3]) + return decomp + + @staticmethod + def analyze_transform(decomp): + decomp=OptimizableSvg.TransformTools.make_named(decomp) + epsilon=1e-3 + has_rotation=abs(decomp.theta)>epsilon + has_scale=abs((abs(decomp.scale)-1)).max()>epsilon + scale_len=decomp.scale.squeeze().ndim>0 if isinstance(decomp.scale,np.ndarray) else decomp.scale.squeeze().dim() > 0 + has_mirror=scale_len and decomp.scale[0]*decomp.scale[1] < 0 + scale_uniform=not scale_len or abs(abs(decomp.scale[0])-abs(decomp.scale[1]))epsilon + has_translate=max(abs(decomp.translate[0]),abs(decomp.translate[1]))>epsilon + + return OptimizableSvg.TransformTools.TransformProperties(has_rotation=has_rotation,has_scale=has_scale,has_mirror=has_mirror,scale_uniform=scale_uniform,has_shear=has_shear,has_translation=has_translate) + + @staticmethod + def check_and_decomp(M): + decomp=OptimizableSvg.TransformTools.decompose(M) if M is not None else OptimizableSvg.TransformTools.TransformDecomposition(theta=0,scale=(1,1),shear=0,translate=(0,0)) + props=OptimizableSvg.TransformTools.analyze_transform(decomp) + return (decomp, props) + + @staticmethod + def tf_to_string(M): + tfstring = "matrix({} {} {} {} {} {})".format(M[0, 0], M[1, 0], M[0, 1], M[1, 1], M[0, 2], M[1, 2]) + return tfstring + + @staticmethod + def decomp_to_string(decomp): + decomp = OptimizableSvg.TransformTools.make_named(decomp) + ret="" + props=OptimizableSvg.TransformTools.analyze_transform(decomp) + if props.has_rotation: + ret+="rotate({}) ".format(math.degrees(decomp.theta.item())) + if props.has_scale: + if decomp.scale.dim()==0: + ret += "scale({}) ".format(decomp.scale.item()) + else: + ret+="scale({} {}) ".format(decomp.scale[0], decomp.scale[1]) + if props.has_shear: + ret+="skewX({}) ".format(decomp.shear.item()) + if props.has_translation: + ret+="translate({} {}) ".format(decomp.translate[0],decomp.translate[1]) + + return ret + + @staticmethod + def decompose(M): + m = M[0:2, 0:2] + t0=M[0:2, 2] + #get translation so that we can post-multiply with it + TXY=np.linalg.solve(m,t0) + + T=np.eye(3) + T[0:2,2]=TXY + + q, r = np.linalg.qr(m) + + ref = np.array([[1, 0], [0, np.sign(np.linalg.det(q))]]) + + Rot = np.dot(q, ref) + + ref2 = np.array([[1, 0], [0, np.sign(np.linalg.det(r))]]) + + r2 = np.dot(ref2, r) + + Ref = np.dot(ref, ref2) + + sc = np.diag(r2) + Scale = np.diagflat(sc) + + Shear = np.eye(2) + Shear[0, 1] = r2[0, 1] / sc[0] + #the actual shear coefficient + ShearX=r2[0, 1] / sc[0] + + if np.sum(sc) < 0: + # both scales are negative, flip this and add a 180 rotation + Rot = np.dot(Rot, -np.eye(2)) + Scale = -Scale + + Theta = math.atan2(Rot[1, 0], Rot[0, 0]) + ScaleXY = np.array([Scale[0,0],Scale[1,1]*Ref[1,1]]) + + return OptimizableSvg.TransformTools.TransformDecomposition(theta=Theta, scale=ScaleXY, shear=ShearX, translate=TXY) + + #region suboptimizers + + #optimizes color, but really any tensor that needs to stay between 0 and 1 per-entry + class ColorOptimizer: + def __init__(self,tensor,optim_type,lr): + self.tensor=tensor + self.optim=optim_type([tensor],lr=lr) + + def zero_grad(self): + self.optim.zero_grad() + + def step(self): + self.optim.step() + self.tensor.data.clamp_(min=1e-4,max=1.) + + #optimizes gradient stop positions + class StopOptimizer: + def __init__(self,stops,optim_type,lr): + self.stops=stops + self.optim=optim_type([stops],lr=lr) + + def zero_grad(self): + self.optim.zero_grad() + + def step(self): + self.optim.step() + self.stops.data.clamp_(min=0., max=1.) + self.stops.data, _ = self.stops.sort() + self.stops.data[0] = 0. + self.stops.data[-1]=1. + + #optimizes gradient: stop, positions, colors+opacities, locations + class GradientOptimizer: + def __init__(self, begin, end, offsets, stops, optim_params): + self.begin=begin.clone().detach() if begin is not None else None + self.end=end.clone().detach() if end is not None else None + self.offsets=offsets.clone().detach() if offsets is not None else None + self.stop_colors=stops[:,0:3].clone().detach() if stops is not None else None + self.stop_alphas=stops[:,3].clone().detach() if stops is not None else None + self.optimizers=[] + + if optim_params["gradients"]["optimize_stops"] and self.offsets is not None: + self.offsets.requires_grad_(True) + self.optimizers.append(OptimizableSvg.StopOptimizer(self.offsets,SvgOptimizationSettings.optims[optim_params["optimizer"]],optim_params["gradients"]["stop_lr"])) + if optim_params["gradients"]["optimize_color"] and self.stop_colors is not None: + self.stop_colors.requires_grad_(True) + self.optimizers.append(OptimizableSvg.ColorOptimizer(self.stop_colors,SvgOptimizationSettings.optims[optim_params["optimizer"]],optim_params["gradients"]["color_lr"])) + if optim_params["gradients"]["optimize_alpha"] and self.stop_alphas is not None: + self.stop_alphas.requires_grad_(True) + self.optimizers.append(OptimizableSvg.ColorOptimizer(self.stop_alphas,SvgOptimizationSettings.optims[optim_params["optimizer"]],optim_params["gradients"]["alpha_lr"])) + if optim_params["gradients"]["optimize_location"] and self.begin is not None and self.end is not None: + self.begin.requires_grad_(True) + self.end.requires_grad_(True) + self.optimizers.append(SvgOptimizationSettings.optims[optim_params["optimizer"]]([self.begin,self.end],lr=optim_params["gradients"]["location_lr"])) + + + def get_vals(self): + return self.begin, self.end, self.offsets, torch.cat((self.stop_colors,self.stop_alphas.unsqueeze(1)),1) if self.stop_colors is not None and self.stop_alphas is not None else None + + def zero_grad(self): + for optim in self.optimizers: + optim.zero_grad() + + def step(self): + for optim in self.optimizers: + optim.step() + + class TransformOptimizer: + def __init__(self,transform,optim_params): + self.transform=transform + self.optimizes=optim_params["transforms"]["optimize_transforms"] and transform is not None + self.params=copy.deepcopy(optim_params) + self.transform_mode=optim_params["transforms"]["transform_mode"] + + if self.optimizes: + optimvars=[] + self.residual=None + lr=optim_params["transforms"]["transform_lr"] + tmult=optim_params["transforms"]["translation_mult"] + decomp,props=OptimizableSvg.TransformTools.check_and_decomp(transform.cpu().numpy()) + if self.transform_mode=="move": + #only translation and rotation should be set + if props.has_scale or props.has_shear or props.has_mirror: + print("Warning: set to optimize move only, but input transform has residual scale or shear") + self.residual=self.transform.clone().detach().requires_grad_(False) + self.Theta=torch.tensor(0,dtype=torch.float32,requires_grad=True,device=transform.device) + self.translation=torch.tensor([0, 0],dtype=torch.float32,requires_grad=True,device=transform.device) + else: + self.residual=None + self.Theta=torch.tensor(decomp.theta,dtype=torch.float32,requires_grad=True,device=transform.device) + self.translation=torch.tensor(decomp.translate,dtype=torch.float32,requires_grad=True,device=transform.device) + optimvars+=[{'params':x,'lr':lr} for x in [self.Theta]]+[{'params':self.translation,'lr':lr*tmult}] + elif self.transform_mode=="rigid": + #only translation, rotation, and uniform scale should be set + if props.has_shear or props.has_mirror or not props.scale_uniform: + print("Warning: set to optimize rigid transform only, but input transform has residual shear, mirror or non-uniform scale") + self.residual = self.transform.clone().detach().requires_grad_(False) + self.Theta = torch.tensor(0, dtype=torch.float32, requires_grad=True,device=transform.device) + self.translation = torch.tensor([0, 0], dtype=torch.float32, requires_grad=True,device=transform.device) + self.scale=torch.tensor(1, dtype=torch.float32, requires_grad=True,device=transform.device) + else: + self.residual = None + self.Theta = torch.tensor(decomp.theta, dtype=torch.float32, requires_grad=True,device=transform.device) + self.translation = torch.tensor(decomp.translate, dtype=torch.float32, requires_grad=True,device=transform.device) + self.scale = torch.tensor(decomp.scale[0], dtype=torch.float32, requires_grad=True,device=transform.device) + optimvars += [{'params':x,'lr':lr} for x in [self.Theta, self.scale]]+[{'params':self.translation,'lr':lr*tmult}] + elif self.transform_mode=="similarity": + if props.has_shear or not props.scale_uniform: + print("Warning: set to optimize rigid transform only, but input transform has residual shear or non-uniform scale") + self.residual = self.transform.clone().detach().requires_grad_(False) + self.Theta = torch.tensor(0, dtype=torch.float32, requires_grad=True,device=transform.device) + self.translation = torch.tensor([0, 0], dtype=torch.float32, requires_grad=True,device=transform.device) + self.scale=torch.tensor(1, dtype=torch.float32, requires_grad=True,device=transform.device) + self.scale_sign=torch.tensor(1,dtype=torch.float32,requires_grad=False,device=transform.device) + else: + self.residual = None + self.Theta = torch.tensor(decomp.theta, dtype=torch.float32, requires_grad=True,device=transform.device) + self.translation = torch.tensor(decomp.translate, dtype=torch.float32, requires_grad=True,device=transform.device) + self.scale = torch.tensor(decomp.scale[0], dtype=torch.float32, requires_grad=True,device=transform.device) + self.scale_sign = torch.tensor(np.sign(decomp.scale[0]*decomp.scale[1]), dtype=torch.float32, requires_grad=False,device=transform.device) + optimvars += [{'params':x,'lr':lr} for x in [self.Theta, self.scale]]+[{'params':self.translation,'lr':lr*tmult}] + elif self.transform_mode=="affine": + self.Theta = torch.tensor(decomp.theta, dtype=torch.float32, requires_grad=True,device=transform.device) + self.translation = torch.tensor(decomp.translate, dtype=torch.float32, requires_grad=True,device=transform.device) + self.scale = torch.tensor(decomp.scale, dtype=torch.float32, requires_grad=True,device=transform.device) + self.shear = torch.tensor(decomp.shear, dtype=torch.float32, requires_grad=True,device=transform.device) + optimvars += [{'params':x,'lr':lr} for x in [self.Theta, self.scale, self.shear]]+[{'params':self.translation,'lr':lr*tmult}] + else: + raise ValueError("Unrecognized transform mode '{}'".format(self.transform_mode)) + self.optimizer=SvgOptimizationSettings.optims[optim_params["optimizer"]](optimvars) + + def get_transform(self): + if not self.optimizes: + return self.transform + else: + if self.transform_mode == "move": + composed=OptimizableSvg.TransformTools.recompose(self.Theta,torch.tensor([1.],device=self.Theta.device),torch.tensor(0.,device=self.Theta.device),self.translation) + return self.residual.mm(composed) if self.residual is not None else composed + elif self.transform_mode == "rigid": + composed = OptimizableSvg.TransformTools.recompose(self.Theta, self.scale, torch.tensor(0.,device=self.Theta.device), + self.translation) + return self.residual.mm(composed) if self.residual is not None else composed + elif self.transform_mode == "similarity": + composed=OptimizableSvg.TransformTools.recompose(self.Theta, torch.cat((self.scale,self.scale*self.scale_sign)),torch.tensor(0.,device=self.Theta.device),self.translation) + return self.residual.mm(composed) if self.residual is not None else composed + elif self.transform_mode == "affine": + composed = OptimizableSvg.TransformTools.recompose(self.Theta, self.scale, self.shear, self.translation) + return composed + else: + raise ValueError("Unrecognized transform mode '{}'".format(self.transform_mode)) + + def tfToString(self): + if self.transform is None: + return None + elif not self.optimizes: + return OptimizableSvg.TransformTools.tf_to_string(self.transform) + else: + if self.transform_mode == "move": + str=OptimizableSvg.TransformTools.decomp_to_string((self.Theta,torch.tensor([1.]),torch.tensor(0.),self.translation)) + return (OptimizableSvg.TransformTools.tf_to_string(self.residual) if self.residual is not None else "")+" "+str + elif self.transform_mode == "rigid": + str = OptimizableSvg.TransformTools.decomp_to_string((self.Theta, self.scale, torch.tensor(0.), + self.translation)) + return (OptimizableSvg.TransformTools.tf_to_string(self.residual) if self.residual is not None else "")+" "+str + elif self.transform_mode == "similarity": + str=OptimizableSvg.TransformTools.decomp_to_string((self.Theta, torch.cat((self.scale,self.scale*self.scale_sign)),torch.tensor(0.),self.translation)) + return (OptimizableSvg.TransformTools.tf_to_string(self.residual) if self.residual is not None else "")+" "+str + elif self.transform_mode == "affine": + str = OptimizableSvg.TransformTools.decomp_to_string((self.Theta, self.scale, self.shear, self.translation)) + return composed + + def zero_grad(self): + if self.optimizes: + self.optimizer.zero_grad() + + def step(self): + if self.optimizes: + self.optimizer.step() + + #endregion + + #region Nodes + class SvgNode: + def __init__(self,id,transform,appearance,settings): + self.id=id + self.children=[] + self.optimizers=[] + self.device = settings.device + self.transform=torch.tensor(transform,dtype=torch.float32,device=self.device) if transform is not None else None + self.transform_optim=OptimizableSvg.TransformOptimizer(self.transform,settings.retrieve(self.id)[0]) + self.optimizers.append(self.transform_optim) + self.proc_appearance(appearance,settings.retrieve(self.id)[0]) + + def tftostring(self): + return self.transform_optim.tfToString() + + def appearanceToString(self): + appstring="" + for key,value in self.appearance.items(): + if key in ["fill", "stroke"]: + #a paint-type value + if value[0] == "none": + appstring+="{}:none;".format(key) + elif value[0] == "solid": + appstring += "{}:{};".format(key,OptimizableSvg.rgb_to_string(value[1])) + elif value[0] == "url": + appstring += "{}:url(#{});".format(key,value[1].id) + #appstring += "{}:{};".format(key,"#ff00ff") + elif key in ["opacity", "fill-opacity", "stroke-opacity", "stroke-width", "fill-rule"]: + appstring+="{}:{};".format(key,value) + else: + raise ValueError("Don't know how to write appearance parameter '{}'".format(key)) + return appstring + + + def write_xml_common_attrib(self,node,tfname="transform"): + if self.transform is not None: + node.set(tfname,self.tftostring()) + if len(self.appearance)>0: + node.set('style',self.appearanceToString()) + if self.id is not None: + node.set('id',self.id) + + + def proc_appearance(self,appearance,optim_params): + self.appearance=appearance + for key, value in appearance.items(): + if key == "fill" or key == "stroke": + if optim_params["optimize_color"] and value[0]=="solid": + value[1].requires_grad_(True) + self.optimizers.append(OptimizableSvg.ColorOptimizer(value[1],SvgOptimizationSettings.optims[optim_params["optimizer"]],optim_params["color_lr"])) + elif key == "fill-opacity" or key == "stroke-opacity" or key == "opacity": + if optim_params["optimize_alpha"]: + value[1].requires_grad_(True) + self.optimizers.append(OptimizableSvg.ColorOptimizer(value[1], optim_params["optimizer"], + optim_params["alpha_lr"])) + elif key == "fill-rule" or key == "stroke-width": + pass + else: + raise RuntimeError("Unrecognized appearance key '{}'".format(key)) + + def prop_transform(self,intform): + return intform.matmul(self.transform_optim.get_transform()) if self.transform is not None else intform + + def prop_appearance(self,inappearance): + outappearance=copy.copy(inappearance) + for key,value in self.appearance.items(): + if key == "fill": + #gets replaced + outappearance[key]=value + elif key == "fill-opacity": + #gets multiplied + outappearance[key] = outappearance[key]*value + elif key == "fill-rule": + #gets replaced + outappearance[key] = value + elif key =="opacity": + # gets multiplied + outappearance[key] = outappearance[key]*value + elif key == "stroke": + # gets replaced + outappearance[key] = value + elif key == "stroke-opacity": + # gets multiplied + outappearance[key] = outappearance[key]*value + elif key =="stroke-width": + # gets replaced + outappearance[key] = value + else: + raise RuntimeError("Unrecognized appearance key '{}'".format(key)) + return outappearance + + def zero_grad(self): + for optim in self.optimizers: + optim.zero_grad() + for child in self.children: + child.zero_grad() + + def step(self): + for optim in self.optimizers: + optim.step() + for child in self.children: + child.step() + + def get_type(self): + return "Generic node" + + def is_shape(self): + return False + + def build_scene(self,shapes,shape_groups,transform,appearance): + raise NotImplementedError("Abstract SvgNode cannot recurse") + + class GroupNode(SvgNode): + def __init__(self, id, transform, appearance,settings): + super().__init__(id, transform, appearance,settings) + + def get_type(self): + return "Group node" + + def build_scene(self,shapes,shape_groups,transform,appearance): + outtf=self.prop_transform(transform) + outapp=self.prop_appearance(appearance) + for child in self.children: + child.build_scene(shapes,shape_groups,outtf,outapp) + + def write_xml(self, parent): + elm=etree.SubElement(parent,"g") + self.write_xml_common_attrib(elm) + + for child in self.children: + child.write_xml(elm) + + class RootNode(SvgNode): + def __init__(self, id, transform, appearance,settings): + super().__init__(id, transform, appearance,settings) + + def write_xml(self,document): + elm=etree.Element('svg') + self.write_xml_common_attrib(elm) + elm.set("version","2.0") + elm.set("width",str(document.canvas[0])) + elm.set("height", str(document.canvas[1])) + elm.set("xmlns","http://www.w3.org/2000/svg") + elm.set("xmlns:xlink","http://www.w3.org/1999/xlink") + #write definitions before we write any children + document.write_defs(elm) + + #write the children + for child in self.children: + child.write_xml(elm) + + return elm + + def get_type(self): + return "Root node" + + def build_scene(self,shapes,shape_groups,transform,appearance): + outtf = self.prop_transform(transform).to(self.device) + for child in self.children: + child.build_scene(shapes,shape_groups,outtf,appearance) + + @staticmethod + def get_default_appearance(device): + default_appearance = {"fill": ("solid", torch.tensor([0., 0., 0.],device=device)), + "fill-opacity": torch.tensor([1.],device=device), + "fill-rule": "nonzero", + "opacity": torch.tensor([1.],device=device), + "stroke": ("none", None), + "stroke-opacity": torch.tensor([1.],device=device), + "stroke-width": torch.tensor([0.],device=device)} + return default_appearance + + @staticmethod + def get_default_transform(): + return torch.eye(3) + + + + class ShapeNode(SvgNode): + def __init__(self, id, transform, appearance,settings): + super().__init__(id, transform, appearance,settings) + + def get_type(self): + return "Generic shape node" + + def is_shape(self): + return True + + def construct_paint(self,value,combined_opacity,transform): + if value[0] == "none": + return None + elif value[0] == "solid": + return torch.cat([value[1],combined_opacity]).to(self.device) + elif value[0] == "url": + #get the gradient object from this node + return value[1].getGrad(combined_opacity,transform) + else: + raise ValueError("Unknown paint value type '{}'".format(value[0])) + + def make_shape_group(self,appearance,transform,num_shapes,num_subobjects): + fill=self.construct_paint(appearance["fill"],appearance["opacity"]*appearance["fill-opacity"],transform) + stroke=self.construct_paint(appearance["stroke"],appearance["opacity"]*appearance["stroke-opacity"],transform) + sg = pydiffvg.ShapeGroup(shape_ids=torch.tensor(range(num_shapes, num_shapes + num_subobjects)), + fill_color=fill, + use_even_odd_rule=appearance["fill-rule"]=="evenodd", + stroke_color=stroke, + shape_to_canvas=transform, + id=self.id) + return sg + + class PathNode(ShapeNode): + def __init__(self, id, transform, appearance,settings, paths): + super().__init__(id, transform, appearance,settings) + self.proc_paths(paths,settings.retrieve(self.id)[0]) + + def proc_paths(self,paths,optim_params): + self.paths=paths + if optim_params["paths"]["optimize_points"]: + ptlist=[] + for path in paths: + ptlist.append(path.points.requires_grad_(True)) + self.optimizers.append(SvgOptimizationSettings.optims[optim_params["optimizer"]](ptlist,lr=optim_params["paths"]["shape_lr"])) + + def get_type(self): + return "Path node" + + def build_scene(self,shapes,shape_groups,transform,appearance): + applytf=self.prop_transform(transform) + applyapp = self.prop_appearance(appearance) + sg=self.make_shape_group(applyapp,applytf,len(shapes),len(self.paths)) + for path in self.paths: + disp_path=pydiffvg.Path(path.num_control_points,path.points,path.is_closed,applyapp["stroke-width"],path.id) + shapes.append(disp_path) + shape_groups.append(sg) + + def path_to_string(self,path): + path_string = "M {},{} ".format(path.points[0][0].item(), path.points[0][1].item()) + idx = 1 + numpoints = path.points.shape[0] + for type in path.num_control_points: + toproc = type + 1 + if type == 0: + # add line + path_string += "L " + elif type == 1: + # add quadric + path_string += "Q " + elif type == 2: + # add cubic + path_string += "C " + while toproc > 0: + path_string += "{},{} ".format(path.points[idx % numpoints][0].item(), + path.points[idx % numpoints][1].item()) + idx += 1 + toproc -= 1 + if path.is_closed: + path_string += "Z " + + return path_string + + def paths_string(self): + pstr="" + for path in self.paths: + pstr+=self.path_to_string(path) + return pstr + + def write_xml(self, parent): + elm = etree.SubElement(parent, "path") + self.write_xml_common_attrib(elm) + elm.set("d",self.paths_string()) + + for child in self.children: + child.write_xml(elm) + + class RectNode(ShapeNode): + def __init__(self, id, transform, appearance,settings, rect): + super().__init__(id, transform, appearance,settings) + self.rect=torch.tensor(rect,dtype=torch.float,device=settings.device) + optim_params=settings.retrieve(self.id)[0] + #borrowing path settings for this + if optim_params["paths"]["optimize_points"]: + self.optimizers.append(SvgOptimizationSettings.optims[optim_params["optimizer"]]([self.rect],lr=optim_params["paths"]["shape_lr"])) + + def get_type(self): + return "Rect node" + + def build_scene(self,shapes,shape_groups,transform,appearance): + applytf=self.prop_transform(transform) + applyapp = self.prop_appearance(appearance) + sg=self.make_shape_group(applyapp,applytf,len(shapes),1) + shapes.append(pydiffvg.Rect(self.rect[0:2],self.rect[0:2]+self.rect[2:4],applyapp["stroke-width"],self.id)) + shape_groups.append(sg) + + def write_xml(self, parent): + elm = etree.SubElement(parent, "rect") + self.write_xml_common_attrib(elm) + elm.set("x",str(self.rect[0])) + elm.set("y", str(self.rect[1])) + elm.set("width", str(self.rect[2])) + elm.set("height", str(self.rect[3])) + + for child in self.children: + child.write_xml(elm) + + class CircleNode(ShapeNode): + def __init__(self, id, transform, appearance,settings, rect): + super().__init__(id, transform, appearance,settings) + self.circle=torch.tensor(rect,dtype=torch.float,device=settings.device) + optim_params=settings.retrieve(self.id)[0] + #borrowing path settings for this + if optim_params["paths"]["optimize_points"]: + self.optimizers.append(SvgOptimizationSettings.optims[optim_params["optimizer"]]([self.circle],lr=optim_params["paths"]["shape_lr"])) + + def get_type(self): + return "Circle node" + + def build_scene(self,shapes,shape_groups,transform,appearance): + applytf=self.prop_transform(transform) + applyapp = self.prop_appearance(appearance) + sg=self.make_shape_group(applyapp,applytf,len(shapes),1) + shapes.append(pydiffvg.Circle(self.circle[2],self.circle[0:2],applyapp["stroke-width"],self.id)) + shape_groups.append(sg) + + def write_xml(self, parent): + elm = etree.SubElement(parent, "circle") + self.write_xml_common_attrib(elm) + elm.set("cx",str(self.circle[0])) + elm.set("cy", str(self.circle[1])) + elm.set("r", str(self.circle[2])) + + for child in self.children: + child.write_xml(elm) + + + class EllipseNode(ShapeNode): + def __init__(self, id, transform, appearance,settings, ellipse): + super().__init__(id, transform, appearance,settings) + self.ellipse=torch.tensor(ellipse,dtype=torch.float,device=settings.device) + optim_params=settings.retrieve(self.id)[0] + #borrowing path settings for this + if optim_params["paths"]["optimize_points"]: + self.optimizers.append(SvgOptimizationSettings.optims[optim_params["optimizer"]]([self.ellipse],lr=optim_params["paths"]["shape_lr"])) + + def get_type(self): + return "Ellipse node" + + def build_scene(self,shapes,shape_groups,transform,appearance): + applytf=self.prop_transform(transform) + applyapp = self.prop_appearance(appearance) + sg=self.make_shape_group(applyapp,applytf,len(shapes),1) + shapes.append(pydiffvg.Ellipse(self.ellipse[2:4],self.ellipse[0:2],applyapp["stroke-width"],self.id)) + shape_groups.append(sg) + + def write_xml(self, parent): + elm = etree.SubElement(parent, "ellipse") + self.write_xml_common_attrib(elm) + elm.set("cx", str(self.ellipse[0])) + elm.set("cy", str(self.ellipse[1])) + elm.set("rx", str(self.ellipse[2])) + elm.set("ry", str(self.ellipse[3])) + + for child in self.children: + child.write_xml(elm) + + class PolygonNode(ShapeNode): + def __init__(self, id, transform, appearance,settings, points): + super().__init__(id, transform, appearance,settings) + self.points=points + optim_params=settings.retrieve(self.id)[0] + #borrowing path settings for this + if optim_params["paths"]["optimize_points"]: + self.optimizers.append(SvgOptimizationSettings.optims[optim_params["optimizer"]]([self.points],lr=optim_params["paths"]["shape_lr"])) + + def get_type(self): + return "Polygon node" + + def build_scene(self,shapes,shape_groups,transform,appearance): + applytf=self.prop_transform(transform) + applyapp = self.prop_appearance(appearance) + sg=self.make_shape_group(applyapp,applytf,len(shapes),1) + shapes.append(pydiffvg.Polygon(self.points,True,applyapp["stroke-width"],self.id)) + shape_groups.append(sg) + + def point_string(self): + ret="" + for i in range(self.points.shape[0]): + pt=self.points[i,:] + #assert pt.shape == (1,2) + ret+= str(pt[0])+","+str(pt[1])+" " + return ret + + def write_xml(self, parent): + elm = etree.SubElement(parent, "polygon") + self.write_xml_common_attrib(elm) + elm.set("points",self.point_string()) + + for child in self.children: + child.write_xml(elm) + + class GradientNode(SvgNode): + def __init__(self, id, transform,settings,begin,end,offsets,stops,href): + super().__init__(id, transform, {},settings) + self.optim=OptimizableSvg.GradientOptimizer(begin, end, offsets, stops, settings.retrieve(id)[0]) + self.optimizers.append(self.optim) + self.href=href + + def is_ref(self): + return self.href is not None + + def get_type(self): + return "Gradient node" + + def get_stops(self): + _, _, offsets, stops=self.optim.get_vals() + return offsets, stops + + def get_points(self): + begin, end, _, _ =self.optim.get_vals() + return begin, end + + def write_xml(self, parent): + elm = etree.SubElement(parent, "linearGradient") + self.write_xml_common_attrib(elm,tfname="gradientTransform") + + begin, end, offsets, stops = self.optim.get_vals() + + if self.href is None: + #we have stops + for idx, offset in enumerate(offsets): + stop=etree.SubElement(elm,"stop") + stop.set("offset",str(offset.item())) + stop.set("stop-color",OptimizableSvg.rgb_to_string(stops[idx,0:3])) + stop.set("stop-opacity",str(stops[idx,3].item())) + else: + elm.set('xlink:href', "#{}".format(self.href.id)) + + if begin is not None and end is not None: + #no stops + elm.set('x1', str(begin[0].item())) + elm.set('y1', str(begin[1].item())) + elm.set('x2', str(end[0].item())) + elm.set('y2', str(end[1].item())) + + # magic value to make this work + elm.set("gradientUnits", "userSpaceOnUse") + + for child in self.children: + child.write_xml(elm) + + def getGrad(self,combined_opacity,transform): + if self.is_ref(): + offsets, stops=self.href.get_stops() + else: + offsets, stops=self.get_stops() + + stops=stops.clone() + stops[:,3]*=combined_opacity + + begin,end = self.get_points() + + applytf=self.prop_transform(transform) + begin=OptimizableSvg.TransformTools.transformPoints(begin.unsqueeze(0),applytf).squeeze() + end = OptimizableSvg.TransformTools.transformPoints(end.unsqueeze(0), applytf).squeeze() + + return pydiffvg.LinearGradient(begin, end, offsets, stops) + #endregion + + def __init__(self, filename, settings=SvgOptimizationSettings(),optimize_background=False, verbose=False, device=torch.device("cpu")): + self.settings=settings + self.verbose=verbose + self.device=device + self.settings.device=device + + tree = etree.parse(filename) + root = tree.getroot() + + #in case we need global optimization + self.optimizers=[] + self.background=torch.tensor([1.,1.,1.],dtype=torch.float32,requires_grad=optimize_background,device=self.device) + + if optimize_background: + p=settings.retrieve("default")[0] + self.optimizers.append(OptimizableSvg.ColorOptimizer(self.background,SvgOptimizationSettings.optims[p["optimizer"]],p["color_lr"])) + + self.defs={} + + self.depth=0 + + self.dirty=True + self.scene=None + + self.parseRoot(root) + + recognised_shapes=["path","circle","rect","ellipse","polygon"] + + #region core functionality + def build_scene(self): + if self.dirty: + shape_groups=[] + shapes=[] + self.root.build_scene(shapes,shape_groups,OptimizableSvg.RootNode.get_default_transform().to(self.device),OptimizableSvg.RootNode.get_default_appearance(self.device)) + self.scene=(self.canvas[0],self.canvas[1],shapes,shape_groups) + self.dirty=False + return self.scene + + def zero_grad(self): + self.root.zero_grad() + for optim in self.optimizers: + optim.zero_grad() + for item in self.defs.values(): + if issubclass(item.__class__,OptimizableSvg.SvgNode): + item.zero_grad() + + def render(self,scale=None,seed=0): + #render at native resolution + scene = self.build_scene() + scene_args = pydiffvg.RenderFunction.serialize_scene(*scene) + render = pydiffvg.RenderFunction.apply + out_size=(scene[0],scene[1]) if scale is None else (int(scene[0]*scale),int(scene[1]*scale)) + img = render(out_size[0], # width + out_size[1], # height + 2, # num_samples_x + 2, # num_samples_y + seed, # seed + *scene_args) + return img + + def step(self): + self.dirty=True + self.root.step() + for optim in self.optimizers: + optim.step() + for item in self.defs.values(): + if issubclass(item.__class__, OptimizableSvg.SvgNode): + item.step() + #endregion + + #region reporting + + def offset_str(self,s): + return ("\t"*self.depth)+s + + def reportSkippedAttribs(self, node, non_skipped=[]): + skipped=set([k for k in node.attrib.keys() if not OptimizableSvg.is_namespace(k)])-set(non_skipped) + if len(skipped)>0: + tag=OptimizableSvg.remove_namespace(node.tag) if "id" not in node.attrib else "{}#{}".format(OptimizableSvg.remove_namespace(node.tag),node.attrib["id"]) + print(self.offset_str("Warning: Skipping the following attributes of node '{}': {}".format(tag,", ".join(["'{}'".format(atr) for atr in skipped])))) + + def reportSkippedChildren(self,node,skipped): + skipped_names=["{}#{}".format(elm.tag,elm.attrib["id"]) if "id" in elm.attrib else elm.tag for elm in skipped] + if len(skipped)>0: + tag = OptimizableSvg.remove_namespace(node.tag) if "id" not in node.attrib else "{}#{}".format(OptimizableSvg.remove_namespace(node.tag), + node.attrib["id"]) + print(self.offset_str("Warning: Skipping the following children of node '{}': {}".format(tag,", ".join(["'{}'".format(name) for name in skipped_names])))) + + #endregion + + #region parsing + @staticmethod + def remove_namespace(s): + """ + {...} ... -> ... + """ + return re.sub('{.*}', '', s) + + @staticmethod + def is_namespace(s): + return re.match('{.*}', s) is not None + + @staticmethod + def parseTransform(node): + if "transform" not in node.attrib and "gradientTransform" not in node.attrib: + return None + + tf_string=node.attrib["transform"] if "transform" in node.attrib else node.attrib["gradientTransform"] + tforms=tf_string.split(")")[:-1] + mat=np.eye(3) + for tform in tforms: + type = tform.split("(")[0] + args = [float(val) for val in re.split("[, ]+",tform.split("(")[1])] + if type == "matrix": + mat=mat @ OptimizableSvg.TransformTools.parse_matrix(args) + elif type == "translate": + mat = mat @ OptimizableSvg.TransformTools.parse_translate(args) + elif type == "rotate": + mat = mat @ OptimizableSvg.TransformTools.parse_rotate(args) + elif type == "scale": + mat = mat @ OptimizableSvg.TransformTools.parse_scale(args) + elif type == "skewX": + mat = mat @ OptimizableSvg.TransformTools.parse_skewx(args) + elif type == "skewY": + mat = mat @ OptimizableSvg.TransformTools.parse_skewy(args) + else: + raise ValueError("Unknown transform type '{}'".format(type)) + return mat + + #dictionary that defines what constant do we need to multiply different units to get the value in pixels + #gleaned from the CSS definition + unit_dict = {"px":1, + "mm":4, + "cm":40, + "in":25.4*4, + "pt":25.4*4/72, + "pc":25.4*4/6 + } + + @staticmethod + def parseLength(s): + #length is a number followed possibly by a unit definition + #we assume that default unit is the pixel (px) equal to 0.25mm + #last two characters might be unit + val=None + for i in range(len(s)): + try: + val=float(s[:len(s)-i]) + unit=s[len(s)-i:] + break + except ValueError: + continue + if len(unit)>0 and unit not in OptimizableSvg.unit_dict: + raise ValueError("Unknown or unsupported unit '{}' encountered while parsing".format(unit)) + if unit != "": + val*=OptimizableSvg.unit_dict[unit] + return val + + @staticmethod + def parseOpacity(s): + is_percent=s.endswith("%") + s=s.rstrip("%") + val=float(s) + if is_percent: + val=val/100 + return np.clip(val,0.,1.) + + @staticmethod + def parse_color(s): + """ + Hex to tuple + """ + if s[0] != '#': + raise ValueError("Color argument `{}` not supported".format(s)) + s = s.lstrip('#') + if len(s)==6: + rgb = tuple(int(s[i:i + 2], 16) for i in (0, 2, 4)) + return torch.tensor([rgb[0] / 255.0, rgb[1] / 255.0, rgb[2] / 255.0]) + elif len(s)==3: + rgb = tuple((int(s[i:i + 1], 16)) for i in (0, 1, 2)) + return torch.tensor([rgb[0] / 15.0, rgb[1] / 15.0, rgb[2] / 15.0]) + else: + raise ValueError("Color argument `{}` not supported".format(s)) + # sRGB to RGB + # return torch.pow(torch.tensor([rgb[0] / 255.0, rgb[1] / 255.0, rgb[2] / 255.0]), 2.2) + + + @staticmethod + def rgb_to_string(val): + byte_rgb=(val.clone().detach()*255).type(torch.int) + byte_rgb.clamp_(min=0,max=255) + s="#{:02x}{:02x}{:02x}".format(*byte_rgb) + return s + + #parses a "paint" string for use in fill and stroke definitions + @staticmethod + def parsePaint(paintStr,defs,device): + paintStr=paintStr.strip() + if paintStr=="none": + return ("none", None) + elif paintStr[0]=="#": + return ("solid",OptimizableSvg.parse_color(paintStr).to(device)) + elif paintStr.startswith("url"): + url=paintStr.lstrip("url(").rstrip(")").strip("\'\"").lstrip("#") + if url not in defs: + raise ValueError("Paint-type attribute referencing an unknown object with ID '#{}'".format(url)) + return ("url",defs[url]) + else: + raise ValueError("Unrecognized paint string: '{}'".format(paintStr)) + + appearance_keys=["fill","fill-opacity","fill-rule","opacity","stroke","stroke-opacity","stroke-width"] + + @staticmethod + def parseAppearance(node, defs, device): + ret={} + parse_keys = OptimizableSvg.appearance_keys + local_dict={key:value for key,value in node.attrib.items() if key in parse_keys} + css_dict={} + style_dict={} + appearance_dict={} + if "class" in node.attrib: + cls=node.attrib["class"] + if "."+cls in defs: + css_string=defs["."+cls] + css_dict={item.split(":")[0]:item.split(":")[1] for item in css_string.split(";") if len(item)>0 and item.split(":")[0] in parse_keys} + if "style" in node.attrib: + style_string=node.attrib["style"] + style_dict={item.split(":")[0]:item.split(":")[1] for item in style_string.split(";") if len(item)>0 and item.split(":")[0] in parse_keys} + appearance_dict.update(css_dict) + appearance_dict.update(style_dict) + appearance_dict.update(local_dict) + for key,value in appearance_dict.items(): + if key=="fill": + ret[key]=OptimizableSvg.parsePaint(value,defs,device) + elif key == "fill-opacity": + ret[key]=torch.tensor(OptimizableSvg.parseOpacity(value),device=device) + elif key == "fill-rule": + ret[key]=value + elif key == "opacity": + ret[key]=torch.tensor(OptimizableSvg.parseOpacity(value),device=device) + elif key == "stroke": + ret[key]=OptimizableSvg.parsePaint(value,defs,device) + elif key == "stroke-opacity": + ret[key]=torch.tensor(OptimizableSvg.parseOpacity(value),device=device) + elif key == "stroke-width": + ret[key]=torch.tensor(OptimizableSvg.parseLength(value),device=device) + else: + raise ValueError("Error while parsing appearance attributes: key '{}' should not be here".format(key)) + + return ret + + def parseRoot(self,root): + if self.verbose: + print(self.offset_str("Parsing root")) + self.depth += 1 + + # get document canvas dimensions + self.parseViewport(root) + canvmax=np.max(self.canvas) + self.settings.global_override(["transforms","translation_mult"],canvmax) + id=root.attrib["id"] if "id" in root.attrib else None + + transform=OptimizableSvg.parseTransform(root) + appearance=OptimizableSvg.parseAppearance(root,self.defs,self.device) + + version=root.attrib["version"] if "version" in root.attrib else "" + if version != "2.0": + print(self.offset_str("Warning: Version {} is not 2.0, strange things may happen".format(version))) + + self.root=OptimizableSvg.RootNode(id,transform,appearance,self.settings) + + if self.verbose: + self.reportSkippedAttribs(root, ["width", "height", "id", "transform","version", "style"]+OptimizableSvg.appearance_keys) + + #go through the root children and parse them appropriately + skipped=[] + for child in root: + if OptimizableSvg.remove_namespace(child.tag) in OptimizableSvg.recognised_shapes: + self.parseShape(child,self.root) + elif OptimizableSvg.remove_namespace(child.tag) == "defs": + self.parseDefs(child) + elif OptimizableSvg.remove_namespace(child.tag) == "style": + self.parseStyle(child) + elif OptimizableSvg.remove_namespace(child.tag) == "g": + self.parseGroup(child,self.root) + else: + skipped.append(child) + + if self.verbose: + self.reportSkippedChildren(root,skipped) + + self.depth-=1 + + def parseShape(self,shape,parent): + tag=OptimizableSvg.remove_namespace(shape.tag) + if self.verbose: + print(self.offset_str("Parsing {}#{}".format(tag,shape.attrib["id"] if "id" in shape.attrib else ""))) + + self.depth+=1 + if tag == "path": + self.parsePath(shape,parent) + elif tag == "circle": + self.parseCircle(shape,parent) + elif tag == "rect": + self.parseRect(shape,parent) + elif tag == "ellipse": + self.parseEllipse(shape,parent) + elif tag == "polygon": + self.parsePolygon(shape,parent) + else: + raise ValueError("Encountered unknown shape type '{}'".format(tag)) + self.depth -= 1 + + def parsePath(self,shape,parent): + path_string=shape.attrib['d'] + name = '' + if 'id' in shape.attrib: + name = shape.attrib['id'] + paths = pydiffvg.from_svg_path(path_string) + for idx, path in enumerate(paths): + path.stroke_width = torch.tensor([0.],device=self.device) + path.num_control_points=path.num_control_points.to(self.device) + path.points=path.points.to(self.device) + path.source_id = name + path.id = "{}-{}".format(name,idx) if len(paths)>1 else name + transform = OptimizableSvg.parseTransform(shape) + appearance = OptimizableSvg.parseAppearance(shape,self.defs,self.device) + node=OptimizableSvg.PathNode(name,transform,appearance,self.settings,paths) + parent.children.append(node) + + if self.verbose: + self.reportSkippedAttribs(shape, ["id","d","transform","style"]+OptimizableSvg.appearance_keys) + self.reportSkippedChildren(shape,list(shape)) + + def parseEllipse(self, shape, parent): + cx = float(shape.attrib["cx"]) if "cx" in shape.attrib else 0. + cy = float(shape.attrib["cy"]) if "cy" in shape.attrib else 0. + rx = float(shape.attrib["rx"]) + ry = float(shape.attrib["ry"]) + name = '' + if 'id' in shape.attrib: + name = shape.attrib['id'] + transform = OptimizableSvg.parseTransform(shape) + appearance = OptimizableSvg.parseAppearance(shape, self.defs, self.device) + node = OptimizableSvg.EllipseNode(name, transform, appearance, self.settings, (cx, cy, rx, ry)) + parent.children.append(node) + + if self.verbose: + self.reportSkippedAttribs(shape, ["id", "x", "y", "r", "transform", + "style"] + OptimizableSvg.appearance_keys) + self.reportSkippedChildren(shape, list(shape)) + + def parsePolygon(self, shape, parent): + points_string = shape.attrib['points'] + name = '' + points=[] + for point_string in points_string.split(" "): + if len(point_string) == 0: + continue + coord_strings=point_string.split(",") + assert len(coord_strings)==2 + points.append([float(coord_strings[0]),float(coord_strings[1])]) + points=torch.tensor(points,dtype=torch.float,device=self.device) + if 'id' in shape.attrib: + name = shape.attrib['id'] + transform = OptimizableSvg.parseTransform(shape) + appearance = OptimizableSvg.parseAppearance(shape, self.defs, self.device) + node = OptimizableSvg.PolygonNode(name, transform, appearance, self.settings, points) + parent.children.append(node) + + if self.verbose: + self.reportSkippedAttribs(shape, ["id", "points", "transform", "style"] + OptimizableSvg.appearance_keys) + self.reportSkippedChildren(shape, list(shape)) + + def parseCircle(self,shape,parent): + cx = float(shape.attrib["cx"]) if "cx" in shape.attrib else 0. + cy = float(shape.attrib["cy"]) if "cy" in shape.attrib else 0. + r = float(shape.attrib["r"]) + name = '' + if 'id' in shape.attrib: + name = shape.attrib['id'] + transform = OptimizableSvg.parseTransform(shape) + appearance = OptimizableSvg.parseAppearance(shape, self.defs, self.device) + node = OptimizableSvg.CircleNode(name, transform, appearance, self.settings, (cx, cy, r)) + parent.children.append(node) + + if self.verbose: + self.reportSkippedAttribs(shape, ["id", "x", "y", "r", "transform", + "style"] + OptimizableSvg.appearance_keys) + self.reportSkippedChildren(shape, list(shape)) + + def parseRect(self,shape,parent): + x = float(shape.attrib["x"]) if "x" in shape.attrib else 0. + y = float(shape.attrib["y"]) if "y" in shape.attrib else 0. + width = float(shape.attrib["width"]) + height = float(shape.attrib["height"]) + name = '' + if 'id' in shape.attrib: + name = shape.attrib['id'] + transform = OptimizableSvg.parseTransform(shape) + appearance = OptimizableSvg.parseAppearance(shape, self.defs, self.device) + node = OptimizableSvg.RectNode(name, transform, appearance, self.settings, (x,y,width,height)) + parent.children.append(node) + + if self.verbose: + self.reportSkippedAttribs(shape, ["id", "x", "y", "width", "height", "transform", "style"] + OptimizableSvg.appearance_keys) + self.reportSkippedChildren(shape, list(shape)) + + def parseGroup(self,group,parent): + tag = OptimizableSvg.remove_namespace(group.tag) + id = group.attrib["id"] if "id" in group.attrib else "" + if self.verbose: + print(self.offset_str("Parsing {}#{}".format(tag, id))) + + self.depth+=1 + + transform=self.parseTransform(group) + + #todo process more attributes + appearance=OptimizableSvg.parseAppearance(group,self.defs,self.device) + node=OptimizableSvg.GroupNode(id,transform,appearance,self.settings) + parent.children.append(node) + + if self.verbose: + self.reportSkippedAttribs(group,["id","transform","style"]+OptimizableSvg.appearance_keys) + + skipped_children=[] + for child in group: + if OptimizableSvg.remove_namespace(child.tag) in OptimizableSvg.recognised_shapes: + self.parseShape(child,node) + elif OptimizableSvg.remove_namespace(child.tag) == "defs": + self.parseDefs(child) + elif OptimizableSvg.remove_namespace(child.tag) == "style": + self.parseStyle(child) + elif OptimizableSvg.remove_namespace(child.tag) == "g": + self.parseGroup(child,node) + else: + skipped_children.append(child) + + if self.verbose: + self.reportSkippedChildren(group,skipped_children) + + self.depth-=1 + + def parseStyle(self,style_node): + tag = OptimizableSvg.remove_namespace(style_node.tag) + id = style_node.attrib["id"] if "id" in style_node.attrib else "" + if self.verbose: + print(self.offset_str("Parsing {}#{}".format(tag, id))) + + if style_node.attrib["type"] != "text/css": + raise ValueError("Only text/css style recognized, got {}".format(style_node.attrib["type"])) + + self.depth += 1 + + # creating only a dummy node + node = OptimizableSvg.SvgNode(id, None, {}, self.settings) + + if self.verbose: + self.reportSkippedAttribs(def_node, ["id"]) + + if len(style_node)>0: + raise ValueError("Style node should not have children (has {})".format(len(style_node))) + + # collect CSS classes + sheet = cssutils.parseString(style_node.text) + for rule in sheet: + if hasattr(rule, 'selectorText') and hasattr(rule, 'style'): + name = rule.selectorText + if len(name) >= 2 and name[0] == '.': + self.defs[name] = rule.style.getCssText().replace("\n","") + else: + raise ValueError("Unrecognized CSS selector {}".format(name)) + else: + raise ValueError("No style or selector text in CSS rule") + + if self.verbose: + self.reportSkippedChildren(def_node, skipped_children) + + self.depth -= 1 + + def parseDefs(self,def_node): + #only linear gradients are currently supported + tag = OptimizableSvg.remove_namespace(def_node.tag) + id = def_node.attrib["id"] if "id" in def_node.attrib else "" + if self.verbose: + print(self.offset_str("Parsing {}#{}".format(tag, id))) + + self.depth += 1 + + + # creating only a dummy node + node = OptimizableSvg.SvgNode(id, None, {},self.settings) + + if self.verbose: + self.reportSkippedAttribs(def_node, ["id"]) + + skipped_children = [] + for child in def_node: + if OptimizableSvg.remove_namespace(child.tag) == "linearGradient": + self.parseGradient(child,node) + elif OptimizableSvg.remove_namespace(child.tag) in OptimizableSvg.recognised_shapes: + raise NotImplementedError("Definition/instantiation of shapes not supported") + elif OptimizableSvg.remove_namespace(child.tag) == "defs": + raise NotImplementedError("Definition within definition not supported") + elif OptimizableSvg.remove_namespace(child.tag) == "g": + raise NotImplementedError("Groups within definition not supported") + else: + skipped_children.append(child) + + if len(node.children)>0: + #take this node out and enter it into defs + self.defs[node.children[0].id]=node.children[0] + node.children.pop() + + + if self.verbose: + self.reportSkippedChildren(def_node, skipped_children) + + self.depth -= 1 + + def parseGradientStop(self,stop): + param_dict={key:value for key,value in stop.attrib.items() if key in ["id","offset","stop-color","stop-opacity"]} + style_dict={} + if "style" in stop.attrib: + style_dict={item.split(":")[0]:item.split(":")[1] for item in stop.attrib["style"].split(";") if len(item)>0} + param_dict.update(style_dict) + + offset=OptimizableSvg.parseOpacity(param_dict["offset"]) + color=OptimizableSvg.parse_color(param_dict["stop-color"]) + opacity=OptimizableSvg.parseOpacity(param_dict["stop-opacity"]) if "stop-opacity" in param_dict else 1. + + return offset, color, opacity + + def parseGradient(self, gradient_node, parent): + tag = OptimizableSvg.remove_namespace(gradient_node.tag) + id = gradient_node.attrib["id"] if "id" in gradient_node.attrib else "" + if self.verbose: + print(self.offset_str("Parsing {}#{}".format(tag, id))) + + self.depth += 1 + if "stop" not in [OptimizableSvg.remove_namespace(child.tag) for child in gradient_node]\ + and "href" not in [OptimizableSvg.remove_namespace(key) for key in gradient_node.attrib.keys()]: + raise ValueError("Gradient {} has neither stops nor a href link to them".format(id)) + + transform=self.parseTransform(gradient_node) + begin=None + end = None + offsets=[] + stops=[] + href=None + + if "x1" in gradient_node.attrib or "y1" in gradient_node.attrib: + begin=np.array([0.,0.]) + if "x1" in gradient_node.attrib: + begin[0] = float(gradient_node.attrib["x1"]) + if "y1" in gradient_node.attrib: + begin[1] = float(gradient_node.attrib["y1"]) + begin = torch.tensor(begin.transpose(),dtype=torch.float32) + + if "x2" in gradient_node.attrib or "y2" in gradient_node.attrib: + end=np.array([0.,0.]) + if "x2" in gradient_node.attrib: + end[0] = float(gradient_node.attrib["x2"]) + if "y2" in gradient_node.attrib: + end[1] = float(gradient_node.attrib["y2"]) + end=torch.tensor(end.transpose(),dtype=torch.float32) + + stop_nodes=[node for node in list(gradient_node) if OptimizableSvg.remove_namespace(node.tag)=="stop"] + if len(stop_nodes)>0: + stop_nodes=sorted(stop_nodes,key=lambda n: float(n.attrib["offset"])) + + for stop in stop_nodes: + offset, color, opacity = self.parseGradientStop(stop) + offsets.append(offset) + stops.append(np.concatenate((color,np.array([opacity])))) + + hkey=next((value for key,value in gradient_node.attrib.items() if OptimizableSvg.remove_namespace(key)=="href"),None) + if hkey is not None: + href=self.defs[hkey.lstrip("#")] + + parent.children.append(OptimizableSvg.GradientNode(id,transform,self.settings,begin.to(self.device) if begin is not None else begin,end.to(self.device) if end is not None else end,torch.tensor(offsets,dtype=torch.float32,device=self.device) if len(offsets)>0 else None,torch.tensor(np.array(stops),dtype=torch.float32,device=self.device) if len(stops)>0 else None,href)) + + self.depth -= 1 + + def parseViewport(self, root): + if "width" in root.attrib and "height" in root.attrib: + self.canvas = np.array([int(math.ceil(float(root.attrib["width"]))), int(math.ceil(float(root.attrib["height"])))]) + elif "viewBox" in root.attrib: + s=root.attrib["viewBox"].split(" ") + w=s[2] + h=s[3] + self.canvas = np.array( + [int(math.ceil(float(w))), int(math.ceil(float(h)))]) + else: + raise ValueError("Size information is missing from document definition") + #endregion + + #region writing + def write_xml(self): + tree=self.root.write_xml(self) + + return minidom.parseString(etree.tostring(tree, 'utf-8')).toprettyxml(indent=" ") + + def write_defs(self,root): + if len(self.defs)==0: + return + + defnode = etree.SubElement(root, 'defs') + stylenode = etree.SubElement(root,'style') + stylenode.set('type','text/css') + stylenode.text="" + + defcpy=copy.copy(self.defs) + while len(defcpy)>0: + torem=[] + for key,value in defcpy.items(): + if issubclass(value.__class__,OptimizableSvg.SvgNode): + if value.href is None or value.href not in defcpy: + value.write_xml(defnode) + torem.append(key) + else: + continue + else: + #this is a string, and hence a CSS attribute + stylenode.text+=key+" {"+value+"}\n" + torem.append(key) + + for key in torem: + del defcpy[key] + #endregion + + diff --git a/pydiffvg/parse_svg.py b/pydiffvg/parse_svg.py new file mode 100644 index 0000000..17d91b3 --- /dev/null +++ b/pydiffvg/parse_svg.py @@ -0,0 +1,578 @@ +import torch +import xml.etree.ElementTree as etree +import numpy as np +import diffvg +import os +import pydiffvg +import svgpathtools +import svgpathtools.parser +import re +import warnings +import cssutils +import logging +cssutils.log.setLevel(logging.ERROR) + +def remove_namespaces(s): + """ + {...} ... -> ... + """ + return re.sub('{.*}', '', s) + +def parse_style(s, defs): + style_dict = {} + for e in s.split(';'): + key_value = e.split(':') + if len(key_value) == 2: + key = key_value[0].strip() + value = key_value[1].strip() + if key == 'fill' or key == 'stroke': + # Special case: convert colors into tensor in definitions so + # that different shapes can share the same color + value = parse_color(value, defs) + style_dict[key] = value + return style_dict + +def parse_hex(s): + """ + Hex to tuple + """ + s = s.lstrip('#') + if len(s) == 3: + s = s[0] + s[0] + s[1] + s[1] + s[2] + s[2] + rgb = tuple(int(s[i:i+2], 16) for i in (0, 2, 4)) + # sRGB to RGB + # return torch.pow(torch.tensor([rgb[0] / 255.0, rgb[1] / 255.0, rgb[2] / 255.0]), 2.2) + return torch.pow(torch.tensor([rgb[0] / 255.0, rgb[1] / 255.0, rgb[2] / 255.0]), 1.0) + +def parse_int(s): + """ + trim alphabets + """ + return int(float(''.join(i for i in s if (not i.isalpha())))) + +def parse_color(s, defs): + if s is None: + return None + if isinstance(s, torch.Tensor): + return s + s = s.lstrip(' ') + color = torch.tensor([0.0, 0.0, 0.0, 1.0]) + if s[0] == '#': + color[:3] = parse_hex(s) + elif s[:3] == 'url': + # url(#id) + color = defs[s[4:-1].lstrip('#')] + elif s == 'none': + color = None + elif s[:4] == 'rgb(': + rgb = s[4:-1].split(',') + color = torch.tensor([int(rgb[0]) / 255.0, int(rgb[1]) / 255.0, int(rgb[2]) / 255.0, 1.0]) + elif s == 'none': + return None + else: + warnings.warn('Unknown color command ' + s) + return color + +# https://github.com/mathandy/svgpathtools/blob/7ebc56a831357379ff22216bec07e2c12e8c5bc6/svgpathtools/parser.py +def _parse_transform_substr(transform_substr): + type_str, value_str = transform_substr.split('(') + value_str = value_str.replace(',', ' ') + values = list(map(float, filter(None, value_str.split(' ')))) + + transform = np.identity(3) + if 'matrix' in type_str: + transform[0:2, 0:3] = np.array([values[0:6:2], values[1:6:2]]) + elif 'translate' in transform_substr: + transform[0, 2] = values[0] + if len(values) > 1: + transform[1, 2] = values[1] + elif 'scale' in transform_substr: + x_scale = values[0] + y_scale = values[1] if (len(values) > 1) else x_scale + transform[0, 0] = x_scale + transform[1, 1] = y_scale + elif 'rotate' in transform_substr: + angle = values[0] * np.pi / 180.0 + if len(values) == 3: + offset = values[1:3] + else: + offset = (0, 0) + tf_offset = np.identity(3) + tf_offset[0:2, 2:3] = np.array([[offset[0]], [offset[1]]]) + tf_rotate = np.identity(3) + tf_rotate[0:2, 0:2] = np.array([[np.cos(angle), -np.sin(angle)], [np.sin(angle), np.cos(angle)]]) + tf_offset_neg = np.identity(3) + tf_offset_neg[0:2, 2:3] = np.array([[-offset[0]], [-offset[1]]]) + + transform = tf_offset.dot(tf_rotate).dot(tf_offset_neg) + elif 'skewX' in transform_substr: + transform[0, 1] = np.tan(values[0] * np.pi / 180.0) + elif 'skewY' in transform_substr: + transform[1, 0] = np.tan(values[0] * np.pi / 180.0) + else: + # Return an identity matrix if the type of transform is unknown, and warn the user + warnings.warn('Unknown SVG transform type: {0}'.format(type_str)) + return transform + +def parse_transform(transform_str): + """ + Converts a valid SVG transformation string into a 3x3 matrix. + If the string is empty or null, this returns a 3x3 identity matrix + """ + if not transform_str: + return np.identity(3) + elif not isinstance(transform_str, str): + raise TypeError('Must provide a string to parse') + + total_transform = np.identity(3) + transform_substrs = transform_str.split(')')[:-1] # Skip the last element, because it should be empty + for substr in transform_substrs: + total_transform = total_transform.dot(_parse_transform_substr(substr)) + + return torch.from_numpy(total_transform).type(torch.float32) + +def parse_linear_gradient(node, transform, defs): + begin = torch.tensor([0.0, 0.0]) + end = torch.tensor([0.0, 0.0]) + offsets = [] + stop_colors = [] + # Inherit from parent + for key in node.attrib: + if remove_namespaces(key) == 'href': + value = node.attrib[key] + parent = defs[value.lstrip('#')] + begin = parent.begin + end = parent.end + offsets = parent.offsets + stop_colors = parent.stop_colors + + for attrib in node.attrib: + attrib = remove_namespaces(attrib) + if attrib == 'x1': + begin[0] = float(node.attrib['x1']) + elif attrib == 'y1': + begin[1] = float(node.attrib['y1']) + elif attrib == 'x2': + end[0] = float(node.attrib['x2']) + elif attrib == 'y2': + end[1] = float(node.attrib['y2']) + elif attrib == 'gradientTransform': + transform = transform @ parse_transform(node.attrib['gradientTransform']) + + begin = transform @ torch.cat((begin, torch.ones([1]))) + begin = begin / begin[2] + begin = begin[:2] + end = transform @ torch.cat((end, torch.ones([1]))) + end = end / end[2] + end = end[:2] + + for child in node: + tag = remove_namespaces(child.tag) + if tag == 'stop': + offset = float(child.attrib['offset']) + color = [0.0, 0.0, 0.0, 1.0] + if 'stop-color' in child.attrib: + c = parse_color(child.attrib['stop-color'], defs) + color[:3] = [c[0], c[1], c[2]] + if 'stop-opacity' in child.attrib: + color[3] = float(child.attrib['stop-opacity']) + if 'style' in child.attrib: + style = parse_style(child.attrib['style'], defs) + if 'stop-color' in style: + c = parse_color(style['stop-color'], defs) + color[:3] = [c[0], c[1], c[2]] + if 'stop-opacity' in style: + color[3] = float(style['stop-opacity']) + offsets.append(offset) + stop_colors.append(color) + if isinstance(offsets, list): + offsets = torch.tensor(offsets) + if isinstance(stop_colors, list): + stop_colors = torch.tensor(stop_colors) + + return pydiffvg.LinearGradient(begin, end, offsets, stop_colors) + + +def parse_radial_gradient(node, transform, defs): + begin = torch.tensor([0.0, 0.0]) + end = torch.tensor([0.0, 0.0]) + center = torch.tensor([0.0, 0.0]) + radius = torch.tensor([0.0, 0.0]) + offsets = [] + stop_colors = [] + # Inherit from parent + for key in node.attrib: + if remove_namespaces(key) == 'href': + value = node.attrib[key] + parent = defs[value.lstrip('#')] + begin = parent.begin + end = parent.end + offsets = parent.offsets + stop_colors = parent.stop_colors + + for attrib in node.attrib: + attrib = remove_namespaces(attrib) + if attrib == 'cx': + center[0] = float(node.attrib['cx']) + elif attrib == 'cy': + center[1] = float(node.attrib['cy']) + elif attrib == 'fx': + radius[0] = float(node.attrib['fx']) + elif attrib == 'fy': + radius[1] = float(node.attrib['fy']) + elif attrib == 'fr': + radius[0] = float(node.attrib['fr']) + radius[1] = float(node.attrib['fr']) + elif attrib == 'gradientTransform': + transform = transform @ parse_transform(node.attrib['gradientTransform']) + + # TODO: this is incorrect + center = transform @ torch.cat((center, torch.ones([1]))) + center = center / center[2] + center = center[:2] + + for child in node: + tag = remove_namespaces(child.tag) + if tag == 'stop': + offset = float(child.attrib['offset']) + color = [0.0, 0.0, 0.0, 1.0] + if 'stop-color' in child.attrib: + c = parse_color(child.attrib['stop-color'], defs) + color[:3] = [c[0], c[1], c[2]] + if 'stop-opacity' in child.attrib: + color[3] = float(child.attrib['stop-opacity']) + if 'style' in child.attrib: + style = parse_style(child.attrib['style'], defs) + if 'stop-color' in style: + c = parse_color(style['stop-color'], defs) + color[:3] = [c[0], c[1], c[2]] + if 'stop-opacity' in style: + color[3] = float(style['stop-opacity']) + offsets.append(offset) + stop_colors.append(color) + if isinstance(offsets, list): + offsets = torch.tensor(offsets) + if isinstance(stop_colors, list): + stop_colors = torch.tensor(stop_colors) + + return pydiffvg.RadialGradient(begin, end, offsets, stop_colors) + +def parse_stylesheet(node, transform, defs): + # collect CSS classes + sheet = cssutils.parseString(node.text) + for rule in sheet: + if hasattr(rule, 'selectorText') and hasattr(rule, 'style'): + name = rule.selectorText + if len(name) >= 2 and name[0] == '.': + defs[name[1:]] = parse_style(rule.style.getCssText(), defs) + return defs + +def parse_defs(node, transform, defs): + for child in node: + tag = remove_namespaces(child.tag) + if tag == 'linearGradient': + if 'id' in child.attrib: + defs[child.attrib['id']] = parse_linear_gradient(child, transform, defs) + elif tag == 'radialGradient': + if 'id' in child.attrib: + defs[child.attrib['id']] = parse_radial_gradient(child, transform, defs) + elif tag == 'style': + defs = parse_stylesheet(child, transform, defs) + return defs + +def parse_common_attrib(node, transform, fill_color, defs): + attribs = {} + if 'class' in node.attrib: + attribs.update(defs[node.attrib['class']]) + attribs.update(node.attrib) + + name = '' + if 'id' in node.attrib: + name = node.attrib['id'] + + stroke_color = None + stroke_width = torch.tensor(0.5) + use_even_odd_rule = False + + new_transform = transform + if 'transform' in attribs: + new_transform = transform @ parse_transform(attribs['transform']) + if 'fill' in attribs: + fill_color = parse_color(attribs['fill'], defs) + fill_opacity = 1.0 + if 'fill-opacity' in attribs: + fill_opacity *= float(attribs['fill-opacity']) + if 'opacity' in attribs: + fill_opacity *= float(attribs['opacity']) + # Ignore opacity if the color is a gradient + if isinstance(fill_color, torch.Tensor): + fill_color[3] = fill_opacity + + if 'fill-rule' in attribs: + if attribs['fill-rule'] == "evenodd": + use_even_odd_rule = True + elif attribs['fill-rule'] == "nonzero": + use_even_odd_rule = False + else: + warnings.warn('Unknown fill-rule: {}'.format(attribs['fill-rule'])) + + if 'stroke' in attribs: + stroke_color = parse_color(attribs['stroke'], defs) + + if 'stroke-width' in attribs: + stroke_width = attribs['stroke-width'] + if stroke_width[-2:] == 'px': + stroke_width = stroke_width[:-2] + stroke_width = torch.tensor(float(stroke_width) / 2.0) + + if 'style' in attribs: + style = parse_style(attribs['style'], defs) + if 'fill' in style: + fill_color = parse_color(style['fill'], defs) + fill_opacity = 1.0 + if 'fill-opacity' in style: + fill_opacity *= float(style['fill-opacity']) + if 'opacity' in style: + fill_opacity *= float(style['opacity']) + if 'fill-rule' in style: + if style['fill-rule'] == "evenodd": + use_even_odd_rule = True + elif style['fill-rule'] == "nonzero": + use_even_odd_rule = False + else: + warnings.warn('Unknown fill-rule: {}'.format(style['fill-rule'])) + # Ignore opacity if the color is a gradient + if isinstance(fill_color, torch.Tensor): + fill_color[3] = fill_opacity + if 'stroke' in style: + if style['stroke'] != 'none': + stroke_color = parse_color(style['stroke'], defs) + # Ignore opacity if the color is a gradient + if isinstance(stroke_color, torch.Tensor): + if 'stroke-opacity' in style: + stroke_color[3] = float(style['stroke-opacity']) + if 'opacity' in style: + stroke_color[3] *= float(style['opacity']) + if 'stroke-width' in style: + stroke_width = style['stroke-width'] + if stroke_width[-2:] == 'px': + stroke_width = stroke_width[:-2] + stroke_width = torch.tensor(float(stroke_width) / 2.0) + + if isinstance(fill_color, pydiffvg.LinearGradient): + fill_color.begin = new_transform @ torch.cat((fill_color.begin, torch.ones([1]))) + fill_color.begin = fill_color.begin / fill_color.begin[2] + fill_color.begin = fill_color.begin[:2] + fill_color.end = new_transform @ torch.cat((fill_color.end, torch.ones([1]))) + fill_color.end = fill_color.end / fill_color.end[2] + fill_color.end = fill_color.end[:2] + if isinstance(stroke_color, pydiffvg.LinearGradient): + stroke_color.begin = new_transform @ torch.cat((stroke_color.begin, torch.ones([1]))) + stroke_color.begin = stroke_color.begin / stroke_color.begin[2] + stroke_color.begin = stroke_color.begin[:2] + stroke_color.end = new_transform @ torch.cat((stroke_color.end, torch.ones([1]))) + stroke_color.end = stroke_color.end / stroke_color.end[2] + stroke_color.end = stroke_color.end[:2] + if 'filter' in style: + print('*** WARNING ***: Ignoring filter for path with id "{}"'.format(name)) + + return new_transform, fill_color, stroke_color, stroke_width, use_even_odd_rule + +def is_shape(tag): + return tag == 'path' or tag == 'polygon' or tag == 'line' or tag == 'circle' or tag == 'rect' + +def parse_shape(node, transform, fill_color, shapes, shape_groups, defs): + tag = remove_namespaces(node.tag) + new_transform, new_fill_color, stroke_color, stroke_width, use_even_odd_rule = \ + parse_common_attrib(node, transform, fill_color, defs) + if tag == 'path': + d = node.attrib['d'] + name = '' + if 'id' in node.attrib: + name = node.attrib['id'] + force_closing = new_fill_color is not None + paths = pydiffvg.from_svg_path(d, new_transform, force_closing) + for idx, path in enumerate(paths): + assert(path.points.shape[1] == 2) + path.stroke_width = stroke_width + path.source_id = name + path.id = "{}-{}".format(name,idx) if len(paths)>1 else name + prev_shapes_size = len(shapes) + shapes = shapes + paths + shape_ids = torch.tensor(list(range(prev_shapes_size, len(shapes)))) + shape_groups.append(pydiffvg.ShapeGroup(\ + shape_ids = shape_ids, + fill_color = new_fill_color, + stroke_color = stroke_color, + use_even_odd_rule = use_even_odd_rule, + id = name)) + elif tag == 'polygon': + name = '' + if 'id' in node.attrib: + name = node.attrib['id'] + force_closing = new_fill_color is not None + pts = node.attrib['points'].strip() + pts = pts.split(' ') + # import ipdb; ipdb.set_trace() + pts = [[float(y) for y in re.split(',| ', x)] for x in pts if x] + pts = torch.tensor(pts, dtype=torch.float32).view(-1, 2) + polygon = pydiffvg.Polygon(pts, force_closing) + polygon.stroke_width = stroke_width + shape_ids = torch.tensor([len(shapes)]) + shapes.append(polygon) + shape_groups.append(pydiffvg.ShapeGroup(\ + shape_ids = shape_ids, + fill_color = new_fill_color, + stroke_color = stroke_color, + use_even_odd_rule = use_even_odd_rule, + shape_to_canvas = new_transform, + id = name)) + elif tag == 'line': + x1 = float(node.attrib['x1']) + y1 = float(node.attrib['y1']) + x2 = float(node.attrib['x2']) + y2 = float(node.attrib['y2']) + p1 = torch.tensor([x1, y1]) + p2 = torch.tensor([x2, y2]) + points = torch.stack((p1, p2)) + line = pydiffvg.Polygon(points, False) + line.stroke_width = stroke_width + shape_ids = torch.tensor([len(shapes)]) + shapes.append(line) + shape_groups.append(pydiffvg.ShapeGroup(\ + shape_ids = shape_ids, + fill_color = new_fill_color, + stroke_color = stroke_color, + use_even_odd_rule = use_even_odd_rule, + shape_to_canvas = new_transform)) + elif tag == 'circle': + radius = float(node.attrib['r']) + cx = float(node.attrib['cx']) + cy = float(node.attrib['cy']) + name = '' + if 'id' in node.attrib: + name = node.attrib['id'] + center = torch.tensor([cx, cy]) + circle = pydiffvg.Circle(radius = torch.tensor(radius), + center = center) + circle.stroke_width = stroke_width + shape_ids = torch.tensor([len(shapes)]) + shapes.append(circle) + shape_groups.append(pydiffvg.ShapeGroup(\ + shape_ids = shape_ids, + fill_color = new_fill_color, + stroke_color = stroke_color, + use_even_odd_rule = use_even_odd_rule, + shape_to_canvas = new_transform)) + elif tag == 'ellipse': + rx = float(node.attrib['rx']) + ry = float(node.attrib['ry']) + cx = float(node.attrib['cx']) + cy = float(node.attrib['cy']) + name = '' + if 'id' in node.attrib: + name = node.attrib['id'] + center = torch.tensor([cx, cy]) + circle = pydiffvg.Circle(radius = torch.tensor(radius), + center = center) + circle.stroke_width = stroke_width + shape_ids = torch.tensor([len(shapes)]) + shapes.append(circle) + shape_groups.append(pydiffvg.ShapeGroup(\ + shape_ids = shape_ids, + fill_color = new_fill_color, + stroke_color = stroke_color, + use_even_odd_rule = use_even_odd_rule, + shape_to_canvas = new_transform)) + elif tag == 'rect': + x = 0.0 + y = 0.0 + if x in node.attrib: + x = float(node.attrib['x']) + if y in node.attrib: + y = float(node.attrib['y']) + w = float(node.attrib['width']) + h = float(node.attrib['height']) + p_min = torch.tensor([x, y]) + p_max = torch.tensor([x + w, x + h]) + rect = pydiffvg.Rect(p_min = p_min, p_max = p_max) + rect.stroke_width = stroke_width + shape_ids = torch.tensor([len(shapes)]) + shapes.append(rect) + shape_groups.append(pydiffvg.ShapeGroup(\ + shape_ids = shape_ids, + fill_color = new_fill_color, + stroke_color = stroke_color, + use_even_odd_rule = use_even_odd_rule, + shape_to_canvas = new_transform)) + return shapes, shape_groups + +def parse_group(node, transform, fill_color, shapes, shape_groups, defs): + if 'transform' in node.attrib: + transform = transform @ parse_transform(node.attrib['transform']) + if 'fill' in node.attrib: + fill_color = parse_color(node.attrib['fill'], defs) + for child in node: + tag = remove_namespaces(child.tag) + if is_shape(tag): + shapes, shape_groups = parse_shape(\ + child, transform, fill_color, shapes, shape_groups, defs) + elif tag == 'g': + shapes, shape_groups = parse_group(\ + child, transform, fill_color, shapes, shape_groups, defs) + return shapes, shape_groups + +def parse_scene(node): + canvas_width = -1 + canvas_height = -1 + defs = {} + shapes = [] + shape_groups = [] + fill_color = torch.tensor([0.0, 0.0, 0.0, 1.0]) + transform = torch.eye(3) + if 'viewBox' in node.attrib: + view_box_array = node.attrib['viewBox'].split() + canvas_width = parse_int(view_box_array[2]) + canvas_height = parse_int(view_box_array[3]) + else: + if 'width' in node.attrib: + canvas_width = parse_int(node.attrib['width']) + else: + print('Warning: Can\'t find canvas width.') + if 'height' in node.attrib: + canvas_height = parse_int(node.attrib['height']) + else: + print('Warning: Can\'t find canvas height.') + for child in node: + tag = remove_namespaces(child.tag) + if tag == 'defs': + defs = parse_defs(child, transform, defs) + elif tag == 'style': + defs = parse_stylesheet(child, transform, defs) + elif tag == 'linearGradient': + if 'id' in child.attrib: + defs[child.attrib['id']] = parse_linear_gradient(child, transform, defs) + elif tag == 'radialGradient': + if 'id' in child.attrib: + defs[child.attrib['id']] = parse_radial_gradient(child, transform, defs) + elif is_shape(tag): + shapes, shape_groups = parse_shape(\ + child, transform, fill_color, shapes, shape_groups, defs) + elif tag == 'g': + shapes, shape_groups = parse_group(\ + child, transform, fill_color, shapes, shape_groups, defs) + return canvas_width, canvas_height, shapes, shape_groups + +def svg_to_scene(filename): + """ + Load from a SVG file and convert to PyTorch tensors. + """ + + tree = etree.parse(filename) + root = tree.getroot() + cwd = os.getcwd() + if (os.path.dirname(filename) != ''): + os.chdir(os.path.dirname(filename)) + ret = parse_scene(root) + os.chdir(cwd) + return ret diff --git a/pydiffvg/pixel_filter.py b/pydiffvg/pixel_filter.py new file mode 100644 index 0000000..9b0ff22 --- /dev/null +++ b/pydiffvg/pixel_filter.py @@ -0,0 +1,9 @@ +import torch +import pydiffvg + +class PixelFilter: + def __init__(self, + type, + radius = torch.tensor(0.5)): + self.type = type + self.radius = radius diff --git a/pydiffvg/render_pytorch.py b/pydiffvg/render_pytorch.py new file mode 100644 index 0000000..b776ce6 --- /dev/null +++ b/pydiffvg/render_pytorch.py @@ -0,0 +1,870 @@ +import torch +import diffvg +import pydiffvg +import time +from enum import IntEnum +import warnings + +print_timing = False + +def set_print_timing(val): + global print_timing + print_timing=val + +class OutputType(IntEnum): + color = 1 + sdf = 2 + +class RenderFunction(torch.autograd.Function): + """ + The PyTorch interface of diffvg. + """ + @staticmethod + def serialize_scene(canvas_width, + canvas_height, + shapes, + shape_groups, + filter = pydiffvg.PixelFilter(type = diffvg.FilterType.box, + radius = torch.tensor(0.5)), + output_type = OutputType.color, + use_prefiltering = False, + eval_positions = torch.tensor([])): + """ + Given a list of shapes, convert them to a linear list of argument, + so that we can use it in PyTorch. + """ + num_shapes = len(shapes) + num_shape_groups = len(shape_groups) + args = [] + args.append(canvas_width) + args.append(canvas_height) + args.append(num_shapes) + args.append(num_shape_groups) + args.append(output_type) + args.append(use_prefiltering) + args.append(eval_positions.to(pydiffvg.get_device())) + for shape in shapes: + use_thickness = False + if isinstance(shape, pydiffvg.Circle): + assert(shape.center.is_contiguous()) + args.append(diffvg.ShapeType.circle) + args.append(shape.radius.cpu()) + args.append(shape.center.cpu()) + elif isinstance(shape, pydiffvg.Ellipse): + assert(shape.radius.is_contiguous()) + assert(shape.center.is_contiguous()) + args.append(diffvg.ShapeType.ellipse) + args.append(shape.radius.cpu()) + args.append(shape.center.cpu()) + elif isinstance(shape, pydiffvg.Path): + assert(shape.num_control_points.is_contiguous()) + assert(shape.points.is_contiguous()) + assert(shape.points.shape[1] == 2) + assert(torch.isfinite(shape.points).all()) + args.append(diffvg.ShapeType.path) + args.append(shape.num_control_points.to(torch.int32).cpu()) + args.append(shape.points.cpu()) + if len(shape.stroke_width.shape) > 0 and shape.stroke_width.shape[0] > 1: + assert(torch.isfinite(shape.stroke_width).all()) + use_thickness = True + args.append(shape.stroke_width.cpu()) + else: + args.append(None) + args.append(shape.is_closed) + args.append(shape.use_distance_approx) + elif isinstance(shape, pydiffvg.Polygon): + assert(shape.points.is_contiguous()) + assert(shape.points.shape[1] == 2) + args.append(diffvg.ShapeType.path) + if shape.is_closed: + args.append(torch.zeros(shape.points.shape[0], dtype = torch.int32)) + else: + args.append(torch.zeros(shape.points.shape[0] - 1, dtype = torch.int32)) + args.append(shape.points.cpu()) + args.append(None) + args.append(shape.is_closed) + args.append(False) # use_distance_approx + elif isinstance(shape, pydiffvg.Rect): + assert(shape.p_min.is_contiguous()) + assert(shape.p_max.is_contiguous()) + args.append(diffvg.ShapeType.rect) + args.append(shape.p_min.cpu()) + args.append(shape.p_max.cpu()) + else: + assert(False) + if use_thickness: + args.append(torch.tensor(0.0)) + else: + args.append(shape.stroke_width.cpu()) + + for shape_group in shape_groups: + assert(shape_group.shape_ids.is_contiguous()) + args.append(shape_group.shape_ids.to(torch.int32).cpu()) + # Fill color + if shape_group.fill_color is None: + args.append(None) + elif isinstance(shape_group.fill_color, torch.Tensor): + assert(shape_group.fill_color.is_contiguous()) + args.append(diffvg.ColorType.constant) + args.append(shape_group.fill_color.cpu()) + elif isinstance(shape_group.fill_color, pydiffvg.LinearGradient): + assert(shape_group.fill_color.begin.is_contiguous()) + assert(shape_group.fill_color.end.is_contiguous()) + assert(shape_group.fill_color.offsets.is_contiguous()) + assert(shape_group.fill_color.stop_colors.is_contiguous()) + args.append(diffvg.ColorType.linear_gradient) + args.append(shape_group.fill_color.begin.cpu()) + args.append(shape_group.fill_color.end.cpu()) + args.append(shape_group.fill_color.offsets.cpu()) + args.append(shape_group.fill_color.stop_colors.cpu()) + elif isinstance(shape_group.fill_color, pydiffvg.RadialGradient): + assert(shape_group.fill_color.center.is_contiguous()) + assert(shape_group.fill_color.radius.is_contiguous()) + assert(shape_group.fill_color.offsets.is_contiguous()) + assert(shape_group.fill_color.stop_colors.is_contiguous()) + args.append(diffvg.ColorType.radial_gradient) + args.append(shape_group.fill_color.center.cpu()) + args.append(shape_group.fill_color.radius.cpu()) + args.append(shape_group.fill_color.offsets.cpu()) + args.append(shape_group.fill_color.stop_colors.cpu()) + + if shape_group.fill_color is not None: + # go through the underlying shapes and check if they are all closed + for shape_id in shape_group.shape_ids: + if isinstance(shapes[shape_id], pydiffvg.Path): + if not shapes[shape_id].is_closed: + warnings.warn("Detected non-closed paths with fill color. This might causes unexpected results.", Warning) + + # Stroke color + if shape_group.stroke_color is None: + args.append(None) + elif isinstance(shape_group.stroke_color, torch.Tensor): + assert(shape_group.stroke_color.is_contiguous()) + args.append(diffvg.ColorType.constant) + args.append(shape_group.stroke_color.cpu()) + elif isinstance(shape_group.stroke_color, pydiffvg.LinearGradient): + assert(shape_group.stroke_color.begin.is_contiguous()) + assert(shape_group.stroke_color.end.is_contiguous()) + assert(shape_group.stroke_color.offsets.is_contiguous()) + assert(shape_group.stroke_color.stop_colors.is_contiguous()) + assert(torch.isfinite(shape_group.stroke_color.stop_colors).all()) + args.append(diffvg.ColorType.linear_gradient) + args.append(shape_group.stroke_color.begin.cpu()) + args.append(shape_group.stroke_color.end.cpu()) + args.append(shape_group.stroke_color.offsets.cpu()) + args.append(shape_group.stroke_color.stop_colors.cpu()) + elif isinstance(shape_group.stroke_color, pydiffvg.RadialGradient): + assert(shape_group.stroke_color.center.is_contiguous()) + assert(shape_group.stroke_color.radius.is_contiguous()) + assert(shape_group.stroke_color.offsets.is_contiguous()) + assert(shape_group.stroke_color.stop_colors.is_contiguous()) + assert(torch.isfinite(shape_group.stroke_color.stop_colors).all()) + args.append(diffvg.ColorType.radial_gradient) + args.append(shape_group.stroke_color.center.cpu()) + args.append(shape_group.stroke_color.radius.cpu()) + args.append(shape_group.stroke_color.offsets.cpu()) + args.append(shape_group.stroke_color.stop_colors.cpu()) + args.append(shape_group.use_even_odd_rule) + # Transformation + args.append(shape_group.shape_to_canvas.contiguous().cpu()) + args.append(filter.type) + args.append(filter.radius.cpu()) + return args + + @staticmethod + def forward(ctx, + width, + height, + num_samples_x, + num_samples_y, + seed, + background_image, + *args): + """ + Forward rendering pass. + """ + # Unpack arguments + current_index = 0 + canvas_width = args[current_index] + current_index += 1 + canvas_height = args[current_index] + current_index += 1 + num_shapes = args[current_index] + current_index += 1 + num_shape_groups = args[current_index] + current_index += 1 + output_type = args[current_index] + current_index += 1 + use_prefiltering = args[current_index] + current_index += 1 + eval_positions = args[current_index] + current_index += 1 + shapes = [] + shape_groups = [] + shape_contents = [] # Important to avoid GC deleting the shapes + color_contents = [] # Same as above + for shape_id in range(num_shapes): + shape_type = args[current_index] + current_index += 1 + if shape_type == diffvg.ShapeType.circle: + radius = args[current_index] + current_index += 1 + center = args[current_index] + current_index += 1 + shape = diffvg.Circle(radius, diffvg.Vector2f(center[0], center[1])) + elif shape_type == diffvg.ShapeType.ellipse: + radius = args[current_index] + current_index += 1 + center = args[current_index] + current_index += 1 + shape = diffvg.Ellipse(diffvg.Vector2f(radius[0], radius[1]), + diffvg.Vector2f(center[0], center[1])) + elif shape_type == diffvg.ShapeType.path: + num_control_points = args[current_index] + current_index += 1 + points = args[current_index] + current_index += 1 + thickness = args[current_index] + current_index += 1 + is_closed = args[current_index] + current_index += 1 + use_distance_approx = args[current_index] + current_index += 1 + shape = diffvg.Path(diffvg.int_ptr(num_control_points.data_ptr()), + diffvg.float_ptr(points.data_ptr()), + diffvg.float_ptr(thickness.data_ptr() if thickness is not None else 0), + num_control_points.shape[0], + points.shape[0], + is_closed, + use_distance_approx) + elif shape_type == diffvg.ShapeType.rect: + p_min = args[current_index] + current_index += 1 + p_max = args[current_index] + current_index += 1 + shape = diffvg.Rect(diffvg.Vector2f(p_min[0], p_min[1]), + diffvg.Vector2f(p_max[0], p_max[1])) + else: + assert(False) + stroke_width = args[current_index] + current_index += 1 + shapes.append(diffvg.Shape(\ + shape_type, shape.get_ptr(), stroke_width.item())) + shape_contents.append(shape) + + for shape_group_id in range(num_shape_groups): + shape_ids = args[current_index] + current_index += 1 + fill_color_type = args[current_index] + current_index += 1 + if fill_color_type == diffvg.ColorType.constant: + color = args[current_index] + current_index += 1 + fill_color = diffvg.Constant(\ + diffvg.Vector4f(color[0], color[1], color[2], color[3])) + elif fill_color_type == diffvg.ColorType.linear_gradient: + beg = args[current_index] + current_index += 1 + end = args[current_index] + current_index += 1 + offsets = args[current_index] + current_index += 1 + stop_colors = args[current_index] + current_index += 1 + assert(offsets.shape[0] == stop_colors.shape[0]) + fill_color = diffvg.LinearGradient(diffvg.Vector2f(beg[0], beg[1]), + diffvg.Vector2f(end[0], end[1]), + offsets.shape[0], + diffvg.float_ptr(offsets.data_ptr()), + diffvg.float_ptr(stop_colors.data_ptr())) + elif fill_color_type == diffvg.ColorType.radial_gradient: + center = args[current_index] + current_index += 1 + radius = args[current_index] + current_index += 1 + offsets = args[current_index] + current_index += 1 + stop_colors = args[current_index] + current_index += 1 + assert(offsets.shape[0] == stop_colors.shape[0]) + fill_color = diffvg.RadialGradient(diffvg.Vector2f(center[0], center[1]), + diffvg.Vector2f(radius[0], radius[1]), + offsets.shape[0], + diffvg.float_ptr(offsets.data_ptr()), + diffvg.float_ptr(stop_colors.data_ptr())) + elif fill_color_type is None: + fill_color = None + else: + assert(False) + stroke_color_type = args[current_index] + current_index += 1 + if stroke_color_type == diffvg.ColorType.constant: + color = args[current_index] + current_index += 1 + stroke_color = diffvg.Constant(\ + diffvg.Vector4f(color[0], color[1], color[2], color[3])) + elif stroke_color_type == diffvg.ColorType.linear_gradient: + beg = args[current_index] + current_index += 1 + end = args[current_index] + current_index += 1 + offsets = args[current_index] + current_index += 1 + stop_colors = args[current_index] + current_index += 1 + assert(offsets.shape[0] == stop_colors.shape[0]) + stroke_color = diffvg.LinearGradient(diffvg.Vector2f(beg[0], beg[1]), + diffvg.Vector2f(end[0], end[1]), + offsets.shape[0], + diffvg.float_ptr(offsets.data_ptr()), + diffvg.float_ptr(stop_colors.data_ptr())) + elif stroke_color_type == diffvg.ColorType.radial_gradient: + center = args[current_index] + current_index += 1 + radius = args[current_index] + current_index += 1 + offsets = args[current_index] + current_index += 1 + stop_colors = args[current_index] + current_index += 1 + assert(offsets.shape[0] == stop_colors.shape[0]) + stroke_color = diffvg.RadialGradient(diffvg.Vector2f(center[0], center[1]), + diffvg.Vector2f(radius[0], radius[1]), + offsets.shape[0], + diffvg.float_ptr(offsets.data_ptr()), + diffvg.float_ptr(stop_colors.data_ptr())) + elif stroke_color_type is None: + stroke_color = None + else: + assert(False) + use_even_odd_rule = args[current_index] + current_index += 1 + shape_to_canvas = args[current_index] + current_index += 1 + + if fill_color is not None: + color_contents.append(fill_color) + if stroke_color is not None: + color_contents.append(stroke_color) + shape_groups.append(diffvg.ShapeGroup(\ + diffvg.int_ptr(shape_ids.data_ptr()), + shape_ids.shape[0], + diffvg.ColorType.constant if fill_color_type is None else fill_color_type, + diffvg.void_ptr(0) if fill_color is None else fill_color.get_ptr(), + diffvg.ColorType.constant if stroke_color_type is None else stroke_color_type, + diffvg.void_ptr(0) if stroke_color is None else stroke_color.get_ptr(), + use_even_odd_rule, + diffvg.float_ptr(shape_to_canvas.data_ptr()))) + + filter_type = args[current_index] + current_index += 1 + filter_radius = args[current_index] + current_index += 1 + filt = diffvg.Filter(filter_type, filter_radius) + + start = time.time() + scene = diffvg.Scene(canvas_width, canvas_height, + shapes, shape_groups, filt, pydiffvg.get_use_gpu(), + pydiffvg.get_device().index if pydiffvg.get_device().index is not None else -1) + time_elapsed = time.time() - start + global print_timing + if print_timing: + print('Scene construction, time: %.5f s' % time_elapsed) + + if output_type == OutputType.color: + assert(eval_positions.shape[0] == 0) + rendered_image = torch.zeros(height, width, 4, device = pydiffvg.get_device()) + else: + assert(output_type == OutputType.sdf) + if eval_positions.shape[0] == 0: + rendered_image = torch.zeros(height, width, 1, device = pydiffvg.get_device()) + else: + rendered_image = torch.zeros(eval_positions.shape[0], 1, device = pydiffvg.get_device()) + + if background_image is not None: + background_image = background_image.to(pydiffvg.get_device()) + if background_image.shape[2] == 3: + background_image = torch.cat((\ + background_image, torch.ones(background_image.shape[0], background_image.shape[1], 1, + device = background_image.device)), dim = 2) + background_image = background_image.contiguous() + assert(background_image.shape[0] == rendered_image.shape[0]) + assert(background_image.shape[1] == rendered_image.shape[1]) + assert(background_image.shape[2] == 4) + + start = time.time() + diffvg.render(scene, + diffvg.float_ptr(background_image.data_ptr() if background_image is not None else 0), + diffvg.float_ptr(rendered_image.data_ptr() if output_type == OutputType.color else 0), + diffvg.float_ptr(rendered_image.data_ptr() if output_type == OutputType.sdf else 0), + width, + height, + num_samples_x, + num_samples_y, + seed, + diffvg.float_ptr(0), # d_background_image + diffvg.float_ptr(0), # d_render_image + diffvg.float_ptr(0), # d_render_sdf + diffvg.float_ptr(0), # d_translation + use_prefiltering, + diffvg.float_ptr(eval_positions.data_ptr()), + eval_positions.shape[0]) + assert(torch.isfinite(rendered_image).all()) + time_elapsed = time.time() - start + if print_timing: + print('Forward pass, time: %.5f s' % time_elapsed) + + ctx.scene = scene + ctx.background_image = background_image + ctx.shape_contents = shape_contents + ctx.color_contents = color_contents + ctx.filter = filt + ctx.width = width + ctx.height = height + ctx.num_samples_x = num_samples_x + ctx.num_samples_y = num_samples_y + ctx.seed = seed + ctx.output_type = output_type + ctx.use_prefiltering = use_prefiltering + ctx.eval_positions = eval_positions + return rendered_image + + @staticmethod + def render_grad(grad_img, + width, + height, + num_samples_x, + num_samples_y, + seed, + background_image, + *args): + if not grad_img.is_contiguous(): + grad_img = grad_img.contiguous() + assert(torch.isfinite(grad_img).all()) + + # Unpack arguments + current_index = 0 + canvas_width = args[current_index] + current_index += 1 + canvas_height = args[current_index] + current_index += 1 + num_shapes = args[current_index] + current_index += 1 + num_shape_groups = args[current_index] + current_index += 1 + output_type = args[current_index] + current_index += 1 + use_prefiltering = args[current_index] + current_index += 1 + eval_positions = args[current_index] + current_index += 1 + shapes = [] + shape_groups = [] + shape_contents = [] # Important to avoid GC deleting the shapes + color_contents = [] # Same as above + for shape_id in range(num_shapes): + shape_type = args[current_index] + current_index += 1 + if shape_type == diffvg.ShapeType.circle: + radius = args[current_index] + current_index += 1 + center = args[current_index] + current_index += 1 + shape = diffvg.Circle(radius, diffvg.Vector2f(center[0], center[1])) + elif shape_type == diffvg.ShapeType.ellipse: + radius = args[current_index] + current_index += 1 + center = args[current_index] + current_index += 1 + shape = diffvg.Ellipse(diffvg.Vector2f(radius[0], radius[1]), + diffvg.Vector2f(center[0], center[1])) + elif shape_type == diffvg.ShapeType.path: + num_control_points = args[current_index] + current_index += 1 + points = args[current_index] + current_index += 1 + thickness = args[current_index] + current_index += 1 + is_closed = args[current_index] + current_index += 1 + use_distance_approx = args[current_index] + current_index += 1 + shape = diffvg.Path(diffvg.int_ptr(num_control_points.data_ptr()), + diffvg.float_ptr(points.data_ptr()), + diffvg.float_ptr(thickness.data_ptr() if thickness is not None else 0), + num_control_points.shape[0], + points.shape[0], + is_closed, + use_distance_approx) + elif shape_type == diffvg.ShapeType.rect: + p_min = args[current_index] + current_index += 1 + p_max = args[current_index] + current_index += 1 + shape = diffvg.Rect(diffvg.Vector2f(p_min[0], p_min[1]), + diffvg.Vector2f(p_max[0], p_max[1])) + else: + assert(False) + stroke_width = args[current_index] + current_index += 1 + shapes.append(diffvg.Shape(\ + shape_type, shape.get_ptr(), stroke_width.item())) + shape_contents.append(shape) + + for shape_group_id in range(num_shape_groups): + shape_ids = args[current_index] + current_index += 1 + fill_color_type = args[current_index] + current_index += 1 + if fill_color_type == diffvg.ColorType.constant: + color = args[current_index] + current_index += 1 + fill_color = diffvg.Constant(\ + diffvg.Vector4f(color[0], color[1], color[2], color[3])) + elif fill_color_type == diffvg.ColorType.linear_gradient: + beg = args[current_index] + current_index += 1 + end = args[current_index] + current_index += 1 + offsets = args[current_index] + current_index += 1 + stop_colors = args[current_index] + current_index += 1 + assert(offsets.shape[0] == stop_colors.shape[0]) + fill_color = diffvg.LinearGradient(diffvg.Vector2f(beg[0], beg[1]), + diffvg.Vector2f(end[0], end[1]), + offsets.shape[0], + diffvg.float_ptr(offsets.data_ptr()), + diffvg.float_ptr(stop_colors.data_ptr())) + elif fill_color_type == diffvg.ColorType.radial_gradient: + center = args[current_index] + current_index += 1 + radius = args[current_index] + current_index += 1 + offsets = args[current_index] + current_index += 1 + stop_colors = args[current_index] + current_index += 1 + assert(offsets.shape[0] == stop_colors.shape[0]) + fill_color = diffvg.RadialGradient(diffvg.Vector2f(center[0], center[1]), + diffvg.Vector2f(radius[0], radius[1]), + offsets.shape[0], + diffvg.float_ptr(offsets.data_ptr()), + diffvg.float_ptr(stop_colors.data_ptr())) + elif fill_color_type is None: + fill_color = None + else: + assert(False) + stroke_color_type = args[current_index] + current_index += 1 + if stroke_color_type == diffvg.ColorType.constant: + color = args[current_index] + current_index += 1 + stroke_color = diffvg.Constant(\ + diffvg.Vector4f(color[0], color[1], color[2], color[3])) + elif stroke_color_type == diffvg.ColorType.linear_gradient: + beg = args[current_index] + current_index += 1 + end = args[current_index] + current_index += 1 + offsets = args[current_index] + current_index += 1 + stop_colors = args[current_index] + current_index += 1 + assert(offsets.shape[0] == stop_colors.shape[0]) + stroke_color = diffvg.LinearGradient(diffvg.Vector2f(beg[0], beg[1]), + diffvg.Vector2f(end[0], end[1]), + offsets.shape[0], + diffvg.float_ptr(offsets.data_ptr()), + diffvg.float_ptr(stop_colors.data_ptr())) + elif stroke_color_type == diffvg.ColorType.radial_gradient: + center = args[current_index] + current_index += 1 + radius = args[current_index] + current_index += 1 + offsets = args[current_index] + current_index += 1 + stop_colors = args[current_index] + current_index += 1 + assert(offsets.shape[0] == stop_colors.shape[0]) + stroke_color = diffvg.RadialGradient(diffvg.Vector2f(center[0], center[1]), + diffvg.Vector2f(radius[0], radius[1]), + offsets.shape[0], + diffvg.float_ptr(offsets.data_ptr()), + diffvg.float_ptr(stop_colors.data_ptr())) + elif stroke_color_type is None: + stroke_color = None + else: + assert(False) + use_even_odd_rule = args[current_index] + current_index += 1 + shape_to_canvas = args[current_index] + current_index += 1 + + if fill_color is not None: + color_contents.append(fill_color) + if stroke_color is not None: + color_contents.append(stroke_color) + shape_groups.append(diffvg.ShapeGroup(\ + diffvg.int_ptr(shape_ids.data_ptr()), + shape_ids.shape[0], + diffvg.ColorType.constant if fill_color_type is None else fill_color_type, + diffvg.void_ptr(0) if fill_color is None else fill_color.get_ptr(), + diffvg.ColorType.constant if stroke_color_type is None else stroke_color_type, + diffvg.void_ptr(0) if stroke_color is None else stroke_color.get_ptr(), + use_even_odd_rule, + diffvg.float_ptr(shape_to_canvas.data_ptr()))) + + filter_type = args[current_index] + current_index += 1 + filter_radius = args[current_index] + current_index += 1 + filt = diffvg.Filter(filter_type, filter_radius) + + scene = diffvg.Scene(canvas_width, canvas_height, + shapes, shape_groups, filt, pydiffvg.get_use_gpu(), + pydiffvg.get_device().index if pydiffvg.get_device().index is not None else -1) + + if output_type == OutputType.color: + assert(grad_img.shape[2] == 4) + else: + assert(grad_img.shape[2] == 1) + + if background_image is not None: + background_image = background_image.to(pydiffvg.get_device()) + if background_image.shape[2] == 3: + background_image = torch.cat((\ + background_image, torch.ones(background_image.shape[0], background_image.shape[1], 1, + device = background_image.device)), dim = 2) + background_image = background_image.contiguous() + assert(background_image.shape[0] == rendered_image.shape[0]) + assert(background_image.shape[1] == rendered_image.shape[1]) + assert(background_image.shape[2] == 4) + + translation_grad_image = \ + torch.zeros(height, width, 2, device = pydiffvg.get_device()) + start = time.time() + diffvg.render(scene, + diffvg.float_ptr(background_image.data_ptr() if background_image is not None else 0), + diffvg.float_ptr(0), # render_image + diffvg.float_ptr(0), # render_sdf + width, + height, + num_samples_x, + num_samples_y, + seed, + diffvg.float_ptr(0), # d_background_image + diffvg.float_ptr(grad_img.data_ptr() if output_type == OutputType.color else 0), + diffvg.float_ptr(grad_img.data_ptr() if output_type == OutputType.sdf else 0), + diffvg.float_ptr(translation_grad_image.data_ptr()), + use_prefiltering, + diffvg.float_ptr(eval_positions.data_ptr()), + eval_positions.shape[0]) + time_elapsed = time.time() - start + if print_timing: + print('Gradient pass, time: %.5f s' % time_elapsed) + assert(torch.isfinite(translation_grad_image).all()) + + return translation_grad_image + + @staticmethod + def backward(ctx, + grad_img): + if not grad_img.is_contiguous(): + grad_img = grad_img.contiguous() + assert(torch.isfinite(grad_img).all()) + + scene = ctx.scene + width = ctx.width + height = ctx.height + num_samples_x = ctx.num_samples_x + num_samples_y = ctx.num_samples_y + seed = ctx.seed + output_type = ctx.output_type + use_prefiltering = ctx.use_prefiltering + eval_positions = ctx.eval_positions + background_image = ctx.background_image + + if background_image is not None: + d_background_image = torch.zeros_like(background_image) + else: + d_background_image = None + + start = time.time() + diffvg.render(scene, + diffvg.float_ptr(background_image.data_ptr() if background_image is not None else 0), + diffvg.float_ptr(0), # render_image + diffvg.float_ptr(0), # render_sdf + width, + height, + num_samples_x, + num_samples_y, + seed, + diffvg.float_ptr(d_background_image.data_ptr() if background_image is not None else 0), + diffvg.float_ptr(grad_img.data_ptr() if output_type == OutputType.color else 0), + diffvg.float_ptr(grad_img.data_ptr() if output_type == OutputType.sdf else 0), + diffvg.float_ptr(0), # d_translation + use_prefiltering, + diffvg.float_ptr(eval_positions.data_ptr()), + eval_positions.shape[0]) + time_elapsed = time.time() - start + global print_timing + if print_timing: + print('Backward pass, time: %.5f s' % time_elapsed) + + d_args = [] + d_args.append(None) # width + d_args.append(None) # height + d_args.append(None) # num_samples_x + d_args.append(None) # num_samples_y + d_args.append(None) # seed + d_args.append(d_background_image) + d_args.append(None) # canvas_width + d_args.append(None) # canvas_height + d_args.append(None) # num_shapes + d_args.append(None) # num_shape_groups + d_args.append(None) # output_type + d_args.append(None) # use_prefiltering + d_args.append(None) # eval_positions + for shape_id in range(scene.num_shapes): + d_args.append(None) # type + d_shape = scene.get_d_shape(shape_id) + use_thickness = False + if d_shape.type == diffvg.ShapeType.circle: + d_circle = d_shape.as_circle() + radius = torch.tensor(d_circle.radius) + assert(torch.isfinite(radius).all()) + d_args.append(radius) + c = d_circle.center + c = torch.tensor((c.x, c.y)) + assert(torch.isfinite(c).all()) + d_args.append(c) + elif d_shape.type == diffvg.ShapeType.ellipse: + d_ellipse = d_shape.as_ellipse() + r = d_ellipse.radius + r = torch.tensor((d_ellipse.radius.x, d_ellipse.radius.y)) + assert(torch.isfinite(r).all()) + d_args.append(r) + c = d_ellipse.center + c = torch.tensor((c.x, c.y)) + assert(torch.isfinite(c).all()) + d_args.append(c) + elif d_shape.type == diffvg.ShapeType.path: + d_path = d_shape.as_path() + points = torch.zeros((d_path.num_points, 2)) + thickness = None + if d_path.has_thickness(): + use_thickness = True + thickness = torch.zeros(d_path.num_points) + d_path.copy_to(diffvg.float_ptr(points.data_ptr()), diffvg.float_ptr(thickness.data_ptr())) + else: + d_path.copy_to(diffvg.float_ptr(points.data_ptr()), diffvg.float_ptr(0)) + assert(torch.isfinite(points).all()) + if thickness is not None: + assert(torch.isfinite(thickness).all()) + d_args.append(None) # num_control_points + d_args.append(points) + d_args.append(thickness) + d_args.append(None) # is_closed + d_args.append(None) # use_distance_approx + elif d_shape.type == diffvg.ShapeType.rect: + d_rect = d_shape.as_rect() + p_min = torch.tensor((d_rect.p_min.x, d_rect.p_min.y)) + p_max = torch.tensor((d_rect.p_max.x, d_rect.p_max.y)) + assert(torch.isfinite(p_min).all()) + assert(torch.isfinite(p_max).all()) + d_args.append(p_min) + d_args.append(p_max) + else: + assert(False) + if use_thickness: + d_args.append(None) + else: + w = torch.tensor((d_shape.stroke_width)) + assert(torch.isfinite(w).all()) + d_args.append(w) + + for group_id in range(scene.num_shape_groups): + d_shape_group = scene.get_d_shape_group(group_id) + d_args.append(None) # shape_ids + d_args.append(None) # fill_color_type + if d_shape_group.has_fill_color(): + if d_shape_group.fill_color_type == diffvg.ColorType.constant: + d_constant = d_shape_group.fill_color_as_constant() + c = d_constant.color + d_args.append(torch.tensor((c.x, c.y, c.z, c.w))) + elif d_shape_group.fill_color_type == diffvg.ColorType.linear_gradient: + d_linear_gradient = d_shape_group.fill_color_as_linear_gradient() + beg = d_linear_gradient.begin + d_args.append(torch.tensor((beg.x, beg.y))) + end = d_linear_gradient.end + d_args.append(torch.tensor((end.x, end.y))) + offsets = torch.zeros((d_linear_gradient.num_stops)) + stop_colors = torch.zeros((d_linear_gradient.num_stops, 4)) + d_linear_gradient.copy_to(\ + diffvg.float_ptr(offsets.data_ptr()), + diffvg.float_ptr(stop_colors.data_ptr())) + assert(torch.isfinite(stop_colors).all()) + d_args.append(offsets) + d_args.append(stop_colors) + elif d_shape_group.fill_color_type == diffvg.ColorType.radial_gradient: + d_radial_gradient = d_shape_group.fill_color_as_radial_gradient() + center = d_radial_gradient.center + d_args.append(torch.tensor((center.x, center.y))) + radius = d_radial_gradient.radius + d_args.append(torch.tensor((radius.x, radius.y))) + offsets = torch.zeros((d_radial_gradient.num_stops)) + stop_colors = torch.zeros((d_radial_gradient.num_stops, 4)) + d_radial_gradient.copy_to(\ + diffvg.float_ptr(offsets.data_ptr()), + diffvg.float_ptr(stop_colors.data_ptr())) + assert(torch.isfinite(stop_colors).all()) + d_args.append(offsets) + d_args.append(stop_colors) + else: + assert(False) + d_args.append(None) # stroke_color_type + if d_shape_group.has_stroke_color(): + if d_shape_group.stroke_color_type == diffvg.ColorType.constant: + d_constant = d_shape_group.stroke_color_as_constant() + c = d_constant.color + d_args.append(torch.tensor((c.x, c.y, c.z, c.w))) + elif d_shape_group.stroke_color_type == diffvg.ColorType.linear_gradient: + d_linear_gradient = d_shape_group.stroke_color_as_linear_gradient() + beg = d_linear_gradient.begin + d_args.append(torch.tensor((beg.x, beg.y))) + end = d_linear_gradient.end + d_args.append(torch.tensor((end.x, end.y))) + offsets = torch.zeros((d_linear_gradient.num_stops)) + stop_colors = torch.zeros((d_linear_gradient.num_stops, 4)) + d_linear_gradient.copy_to(\ + diffvg.float_ptr(offsets.data_ptr()), + diffvg.float_ptr(stop_colors.data_ptr())) + assert(torch.isfinite(stop_colors).all()) + d_args.append(offsets) + d_args.append(stop_colors) + elif d_shape_group.fill_color_type == diffvg.ColorType.radial_gradient: + d_radial_gradient = d_shape_group.stroke_color_as_radial_gradient() + center = d_radial_gradient.center + d_args.append(torch.tensor((center.x, center.y))) + radius = d_radial_gradient.radius + d_args.append(torch.tensor((radius.x, radius.y))) + offsets = torch.zeros((d_radial_gradient.num_stops)) + stop_colors = torch.zeros((d_radial_gradient.num_stops, 4)) + d_radial_gradient.copy_to(\ + diffvg.float_ptr(offsets.data_ptr()), + diffvg.float_ptr(stop_colors.data_ptr())) + assert(torch.isfinite(stop_colors).all()) + d_args.append(offsets) + d_args.append(stop_colors) + else: + assert(False) + d_args.append(None) # use_even_odd_rule + d_shape_to_canvas = torch.zeros((3, 3)) + d_shape_group.copy_to(diffvg.float_ptr(d_shape_to_canvas.data_ptr())) + assert(torch.isfinite(d_shape_to_canvas).all()) + d_args.append(d_shape_to_canvas) + d_args.append(None) # filter_type + d_args.append(torch.tensor(scene.get_d_filter_radius())) + + return tuple(d_args) diff --git a/pydiffvg/save_svg.py b/pydiffvg/save_svg.py new file mode 100644 index 0000000..15dd370 --- /dev/null +++ b/pydiffvg/save_svg.py @@ -0,0 +1,150 @@ +import torch +import pydiffvg +import xml.etree.ElementTree as etree +from xml.dom import minidom + +def prettify(elem): + """Return a pretty-printed XML string for the Element. + """ + rough_string = etree.tostring(elem, 'utf-8') + reparsed = minidom.parseString(rough_string) + return reparsed.toprettyxml(indent=" ") + +def save_svg(filename, width, height, shapes, shape_groups, use_gamma = False): + root = etree.Element('svg') + root.set('version', '1.1') + root.set('xmlns', 'http://www.w3.org/2000/svg') + root.set('width', str(width)) + root.set('height', str(height)) + defs = etree.SubElement(root, 'defs') + g = etree.SubElement(root, 'g') + if use_gamma: + f = etree.SubElement(defs, 'filter') + f.set('id', 'gamma') + f.set('x', '0') + f.set('y', '0') + f.set('width', '100%') + f.set('height', '100%') + gamma = etree.SubElement(f, 'feComponentTransfer') + gamma.set('color-interpolation-filters', 'sRGB') + feFuncR = etree.SubElement(gamma, 'feFuncR') + feFuncR.set('type', 'gamma') + feFuncR.set('amplitude', str(1)) + feFuncR.set('exponent', str(1/2.2)) + feFuncG = etree.SubElement(gamma, 'feFuncG') + feFuncG.set('type', 'gamma') + feFuncG.set('amplitude', str(1)) + feFuncG.set('exponent', str(1/2.2)) + feFuncB = etree.SubElement(gamma, 'feFuncB') + feFuncB.set('type', 'gamma') + feFuncB.set('amplitude', str(1)) + feFuncB.set('exponent', str(1/2.2)) + feFuncA = etree.SubElement(gamma, 'feFuncA') + feFuncA.set('type', 'gamma') + feFuncA.set('amplitude', str(1)) + feFuncA.set('exponent', str(1/2.2)) + g.set('style', 'filter:url(#gamma)') + + # Store color + for i, shape_group in enumerate(shape_groups): + def add_color(shape_color, name): + if isinstance(shape_color, pydiffvg.LinearGradient): + lg = shape_color + color = etree.SubElement(defs, 'linearGradient') + color.set('id', name) + color.set('x1', str(lg.begin[0].item())) + color.set('y1', str(lg.begin[1].item())) + color.set('x2', str(lg.end[0].item())) + color.set('y2', str(lg.end[1].item())) + offsets = lg.offsets.data.cpu().numpy() + stop_colors = lg.stop_colors.data.cpu().numpy() + for j in range(offsets.shape[0]): + stop = etree.SubElement(color, 'stop') + stop.set('offset', offsets[j]) + c = lg.stop_colors[j, :] + stop.set('stop-color', 'rgb({}, {}, {})'.format(\ + int(255 * c[0]), int(255 * c[1]), int(255 * c[2]))) + stop.set('stop-opacity', '{}'.format(c[3])) + + if shape_group.fill_color is not None: + add_color(shape_group.fill_color, 'shape_{}_fill'.format(i)) + if shape_group.stroke_color is not None: + add_color(shape_group.stroke_color, 'shape_{}_stroke'.format(i)) + + for i, shape_group in enumerate(shape_groups): + shape = shapes[shape_group.shape_ids[0]] + if isinstance(shape, pydiffvg.Circle): + shape_node = etree.SubElement(g, 'circle') + shape_node.set('r', shape.radius.item()) + shape_node.set('cx', shape.center[0].item()) + shape_node.set('cy', shape.center[1].item()) + elif isinstance(shape, pydiffvg.Polygon): + shape_node = etree.SubElement(g, 'polygon') + points = shape.points.data.cpu().numpy() + path_str = '' + for j in range(0, shape.points.shape[0]): + path_str += '{} {}'.format(points[j, 0], points[j, 1]) + if j != shape.points.shape[0] - 1: + path_str += ' ' + shape_node.set('points', path_str) + elif isinstance(shape, pydiffvg.Path): + shape_node = etree.SubElement(g, 'path') + num_segments = shape.num_control_points.shape[0] + num_control_points = shape.num_control_points.data.cpu().numpy() + points = shape.points.data.cpu().numpy() + num_points = shape.points.shape[0] + path_str = 'M {} {}'.format(points[0, 0], points[0, 1]) + point_id = 1 + for j in range(0, num_segments): + if num_control_points[j] == 0: + p = point_id % num_points + path_str += ' L {} {}'.format(\ + points[p, 0], points[p, 1]) + point_id += 1 + elif num_control_points[j] == 1: + p1 = (point_id + 1) % num_points + path_str += ' Q {} {} {} {}'.format(\ + points[point_id, 0], points[point_id, 1], + points[p1, 0], points[p1, 1]) + point_id += 2 + elif num_control_points[j] == 2: + p2 = (point_id + 2) % num_points + path_str += ' C {} {} {} {} {} {}'.format(\ + points[point_id, 0], points[point_id, 1], + points[point_id + 1, 0], points[point_id + 1, 1], + points[p2, 0], points[p2, 1]) + point_id += 3 + shape_node.set('d', path_str) + elif isinstance(shape, pydiffvg.Rect): + shape_node = etree.SubElement(g, 'rect') + shape_node.set('x', shape.p_min[0].item()) + shape_node.set('y', shape.p_min[1].item()) + shape_node.set('width', shape.p_max[0].item() - shape.p_min[0].item()) + shape_node.set('height', shape.p_max[1].item() - shape.p_min[1].item()) + else: + assert(False) + + shape_node.set('stroke-width', str(2 * shape.stroke_width.data.cpu().item())) + if shape_group.fill_color is not None: + if isinstance(shape_group.fill_color, pydiffvg.LinearGradient): + shape_node.set('fill', 'url(#shape_{}_fill)'.format(i)) + else: + c = shape_group.fill_color.data.cpu().numpy() + shape_node.set('fill', 'rgb({}, {}, {})'.format(\ + int(255 * c[0]), int(255 * c[1]), int(255 * c[2]))) + shape_node.set('opacity', str(c[3])) + else: + shape_node.set('fill', 'none') + if shape_group.stroke_color is not None: + if isinstance(shape_group.stroke_color, pydiffvg.LinearGradient): + shape_node.set('stroke', 'url(#shape_{}_stroke)'.format(i)) + else: + c = shape_group.stroke_color.data.cpu().numpy() + shape_node.set('stroke', 'rgb({}, {}, {})'.format(\ + int(255 * c[0]), int(255 * c[1]), int(255 * c[2]))) + shape_node.set('stroke-opacity', str(c[3])) + shape_node.set('stroke-linecap', 'round') + shape_node.set('stroke-linejoin', 'round') + + with open(filename, "w") as f: + f.write(prettify(root)) diff --git a/pydiffvg/shape.py b/pydiffvg/shape.py new file mode 100644 index 0000000..a87e9e5 --- /dev/null +++ b/pydiffvg/shape.py @@ -0,0 +1,172 @@ +import torch +import svgpathtools +import math + +class Circle: + def __init__(self, radius, center, stroke_width = torch.tensor(1.0), id = ''): + self.radius = radius + self.center = center + self.stroke_width = stroke_width + self.id = id + +class Ellipse: + def __init__(self, radius, center, stroke_width = torch.tensor(1.0), id = ''): + self.radius = radius + self.center = center + self.stroke_width = stroke_width + self.id = id + +class Path: + def __init__(self, + num_control_points, + points, + is_closed, + stroke_width = torch.tensor(1.0), + id = '', + use_distance_approx = False): + self.num_control_points = num_control_points + self.points = points + self.is_closed = is_closed + self.stroke_width = stroke_width + self.id = id + self.use_distance_approx = use_distance_approx + +class Polygon: + def __init__(self, points, is_closed, stroke_width = torch.tensor(1.0), id = ''): + self.points = points + self.is_closed = is_closed + self.stroke_width = stroke_width + self.id = id + +class Rect: + def __init__(self, p_min, p_max, stroke_width = torch.tensor(1.0), id = ''): + self.p_min = p_min + self.p_max = p_max + self.stroke_width = stroke_width + self.id = id + +class ShapeGroup: + def __init__(self, + shape_ids, + fill_color, + use_even_odd_rule = True, + stroke_color = None, + shape_to_canvas = torch.eye(3), + id = ''): + self.shape_ids = shape_ids + self.fill_color = fill_color + self.use_even_odd_rule = use_even_odd_rule + self.stroke_color = stroke_color + self.shape_to_canvas = shape_to_canvas + self.id = id + +def from_svg_path(path_str, shape_to_canvas = torch.eye(3), force_close = False): + path = svgpathtools.parse_path(path_str) + if len(path) == 0: + return [] + ret_paths = [] + subpaths = path.continuous_subpaths() + for subpath in subpaths: + if subpath.isclosed(): + if len(subpath) > 1 and isinstance(subpath[-1], svgpathtools.Line) and subpath[-1].length() < 1e-5: + subpath.remove(subpath[-1]) + subpath[-1].end = subpath[0].start # Force closing the path + subpath.end = subpath[-1].end + assert(subpath.isclosed()) + else: + beg = subpath[0].start + end = subpath[-1].end + if abs(end - beg) < 1e-5: + subpath[-1].end = beg # Force closing the path + subpath.end = subpath[-1].end + assert(subpath.isclosed()) + elif force_close: + subpath.append(svgpathtools.Line(end, beg)) + subpath.end = subpath[-1].end + assert(subpath.isclosed()) + + num_control_points = [] + points = [] + + for i, e in enumerate(subpath): + if i == 0: + points.append((e.start.real, e.start.imag)) + else: + # Must begin from the end of previous segment + assert(e.start.real == points[-1][0]) + assert(e.start.imag == points[-1][1]) + if isinstance(e, svgpathtools.Line): + num_control_points.append(0) + elif isinstance(e, svgpathtools.QuadraticBezier): + num_control_points.append(1) + points.append((e.control.real, e.control.imag)) + elif isinstance(e, svgpathtools.CubicBezier): + num_control_points.append(2) + points.append((e.control1.real, e.control1.imag)) + points.append((e.control2.real, e.control2.imag)) + elif isinstance(e, svgpathtools.Arc): + # Convert to Cubic curves + # https://www.joecridge.me/content/pdf/bezier-arcs.pdf + start = e.theta * math.pi / 180.0 + stop = (e.theta + e.delta) * math.pi / 180.0 + + sign = 1.0 + if stop < start: + sign = -1.0 + + epsilon = 0.00001 + debug = abs(e.delta) >= 90.0 + while (sign * (stop - start) > epsilon): + arc_to_draw = stop - start + if arc_to_draw > 0.0: + arc_to_draw = min(arc_to_draw, 0.5 * math.pi) + else: + arc_to_draw = max(arc_to_draw, -0.5 * math.pi) + alpha = arc_to_draw / 2.0 + cos_alpha = math.cos(alpha) + sin_alpha = math.sin(alpha) + cot_alpha = 1.0 / math.tan(alpha) + phi = start + alpha + cos_phi = math.cos(phi) + sin_phi = math.sin(phi) + lambda_ = (4.0 - cos_alpha) / 3.0 + mu = sin_alpha + (cos_alpha - lambda_) * cot_alpha + last = sign * (stop - (start + arc_to_draw)) <= epsilon + num_control_points.append(2) + rx = e.radius.real + ry = e.radius.imag + cx = e.center.real + cy = e.center.imag + rot = e.phi * math.pi / 180.0 + cos_rot = math.cos(rot) + sin_rot = math.sin(rot) + x = lambda_ * cos_phi + mu * sin_phi + y = lambda_ * sin_phi - mu * cos_phi + xx = x * cos_rot - y * sin_rot + yy = x * sin_rot + y * cos_rot + points.append((cx + rx * xx, cy + ry * yy)) + x = lambda_ * cos_phi - mu * sin_phi + y = lambda_ * sin_phi + mu * cos_phi + xx = x * cos_rot - y * sin_rot + yy = x * sin_rot + y * cos_rot + points.append((cx + rx * xx, cy + ry * yy)) + if not last: + points.append((cx + rx * math.cos(rot + start + arc_to_draw), + cy + ry * math.sin(rot + start + arc_to_draw))) + start += arc_to_draw + first = False + if i != len(subpath) - 1: + points.append((e.end.real, e.end.imag)) + else: + if subpath.isclosed(): + # Must end at the beginning of first segment + assert(e.end.real == points[0][0]) + assert(e.end.imag == points[0][1]) + else: + points.append((e.end.real, e.end.imag)) + points = torch.tensor(points) + points = torch.cat((points, torch.ones([points.shape[0], 1])), dim = 1) @ torch.transpose(shape_to_canvas, 0, 1) + points = points / points[:, 2:3] + points = points[:, :2].contiguous() + ret_paths.append(Path(torch.tensor(num_control_points), points, subpath.isclosed())) + return ret_paths diff --git a/pydiffvg_tensorflow/__init__.py b/pydiffvg_tensorflow/__init__.py new file mode 100644 index 0000000..2686524 --- /dev/null +++ b/pydiffvg_tensorflow/__init__.py @@ -0,0 +1,24 @@ +import tensorflow as tf +try: + import diffvg +except ImportError: + print("Warning: diffvg is not installed when you import pydiffvg_tensorflow.") +from .device import * +from .shape import * +from .pixel_filter import * +from .render_tensorflow import * +from .image import * +from .color import * +import os.path + +print(os.path.dirname(diffvg.__file__)) + +if tf.__cxx11_abi_flag__ == 0: + __data_ptr_module = tf.load_op_library(os.path.join(os.path.dirname(diffvg.__file__), 'libdiffvg_tf_data_ptr_no_cxx11_abi.so')) +else: + assert(tf.__cxx11_abi_flag__ == 1) + __data_ptr_module = tf.load_op_library(os.path.join(os.path.dirname(diffvg.__file__), 'libdiffvg_tf_data_ptr_cxx11_abi.so')) + +def data_ptr(tensor): + addr_as_uint64 = __data_ptr_module.data_ptr(tensor) + return int(addr_as_uint64) diff --git a/pydiffvg_tensorflow/color.py b/pydiffvg_tensorflow/color.py new file mode 100644 index 0000000..e0db612 --- /dev/null +++ b/pydiffvg_tensorflow/color.py @@ -0,0 +1,23 @@ +import tensorflow as tf + +class LinearGradient: + def __init__(self, + begin = tf.constant([0.0, 0.0]), + end = tf.constant([0.0, 0.0]), + offsets = tf.constant([0.0]), + stop_colors = tf.constant([0.0, 0.0, 0.0, 0.0])): + self.begin = begin + self.end = end + self.offsets = offsets + self.stop_colors = stop_colors + +class RadialGradient: + def __init__(self, + center = tf.constant([0.0, 0.0]), + radius = tf.constant([0.0, 0.0]), + offsets = tf.constant([0.0]), + stop_colors = tf.constant([0.0, 0.0, 0.0, 0.0])): + self.center = center + self.radius = radius + self.offsets = offsets + self.stop_colors = stop_colors diff --git a/pydiffvg_tensorflow/custom_ops/CMakeLists.txt b/pydiffvg_tensorflow/custom_ops/CMakeLists.txt new file mode 100644 index 0000000..e15c953 --- /dev/null +++ b/pydiffvg_tensorflow/custom_ops/CMakeLists.txt @@ -0,0 +1,29 @@ +cmake_minimum_required(VERSION 3.12) + +set(CMAKE_CXX_STANDARD 11) +set(CMAKE_CXX_STANDARD_REQUIRED ON) + +project(diffvgTFCustomOp) + +set(CMAKE_POSITION_INDEPENDENT_CODE ON) + +include_directories(SYSTEM ${TensorFlow_INCLUDE_DIR}) + +# Compile two versions of the library +add_library(diffvg_tf_data_ptr_cxx11_abi SHARED data_ptr.cc) +set_target_properties(diffvg_tf_data_ptr_cxx11_abi PROPERTIES COMPILE_FLAGS -D_GLIBCXX_USE_CXX11_ABI=1) +set_target_properties(diffvg_tf_data_ptr_cxx11_abi PROPERTIES LINK_FLAGS -D_GLIBCXX_USE_CXX11_ABI=1) +if(APPLE) + # .so instead of .dylib + set_target_properties(diffvg_tf_data_ptr_cxx11_abi PROPERTIES SUFFIX .so) +endif() +target_link_libraries(diffvg_tf_data_ptr_cxx11_abi ${TensorFlow_LIBRARY}) + +add_library(diffvg_tf_data_ptr_no_cxx11_abi SHARED data_ptr.cc) +set_target_properties(diffvg_tf_data_ptr_no_cxx11_abi PROPERTIES COMPILE_FLAGS -D_GLIBCXX_USE_CXX11_ABI=0) +set_target_properties(diffvg_tf_data_ptr_no_cxx11_abi PROPERTIES LINK_FLAGS -D_GLIBCXX_USE_CXX11_ABI=0) +if(APPLE) + # .so instead of .dylib + set_target_properties(diffvg_tf_data_ptr_no_cxx11_abi PROPERTIES SUFFIX .so) +endif() +target_link_libraries(diffvg_tf_data_ptr_no_cxx11_abi ${TensorFlow_LIBRARY}) diff --git a/pydiffvg_tensorflow/custom_ops/data_ptr.cc b/pydiffvg_tensorflow/custom_ops/data_ptr.cc new file mode 100644 index 0000000..cb3caff --- /dev/null +++ b/pydiffvg_tensorflow/custom_ops/data_ptr.cc @@ -0,0 +1,88 @@ +// TODO: add back acknowledgement to the original author when release. + +#pragma warning(disable : 4003 4061 4100 4127 4242 4244 4267 4355 4365 4388 4464 4514 4574 4623 4625 4626 4647 4668 4710 4820 4946 5026 5027 5031 5039) + +// For windows +#define NOMINMAX + +#include "tensorflow/core/framework/op.h" +#include "tensorflow/core/framework/shape_inference.h" +#include "tensorflow/core/framework/op_kernel.h" +#include +#include + +using namespace tensorflow; + +/* Tensorflow custom ops does not allow parameter types of list of + various data types. Therefore, we can't pass a list but we have + to pass each objects individually. + + Consult Tensorflow source code: /tensorflow/core/framework/tensor.h + for what is supported by Tensorflow +*/ + +REGISTER_OP("DataPtr") + .Attr("T: {float, int32} = DT_INT32") // To preserve backwards compatibility, you should specify a default value when adding an attr to an existing op: + .Input("input: T") // Tensor + .Output("output: uint64") // scalar + .SetShapeFn([](::tensorflow::shape_inference::InferenceContext* c) { + c->set_output(0, {}); // scalar + return Status::OK(); + }); + +template +class DataPtrOp : public OpKernel { + public: + explicit DataPtrOp(OpKernelConstruction* context) : OpKernel(context) {} + + void Compute(OpKernelContext* context) override { + // Grab the input tensor + const Tensor& input_tensor = context->input(0); + const T *tensor = input_tensor.flat().data(); + + // Create an output tensor + // NOTE: The output datatype must match the Ops definition!!!. + Tensor* output_tensor = NULL; + // Always allocate on CPU + AllocatorAttributes alloc_attr; + alloc_attr.set_on_host(true); + OP_REQUIRES_OK(context, + context->allocate_output(0, {}, // Initialize a one-element scalar + &output_tensor, + alloc_attr) + ); + auto output_flat = output_tensor->flat(); + + // Cast pointer to unsigned long int + uintptr_t addr = (uintptr_t)tensor; + + // Cast unsigned long int -> unsigned int64 + uint64 addr_converted = addr; + + output_flat(0) = addr_converted; + } +}; + +// Polymorphism: https://www.tensorflow.org/guide/extend/op#polymorphism +REGISTER_KERNEL_BUILDER( + Name("DataPtr") + .Device(DEVICE_CPU) + .TypeConstraint("T"), + DataPtrOp); +REGISTER_KERNEL_BUILDER( + Name("DataPtr") + .Device(DEVICE_CPU) + .TypeConstraint("T"), + DataPtrOp); +REGISTER_KERNEL_BUILDER( + Name("DataPtr") + .Device(DEVICE_GPU) + .TypeConstraint("T") + .HostMemory("output"), + DataPtrOp); +REGISTER_KERNEL_BUILDER( + Name("DataPtr") + .Device(DEVICE_GPU) + .TypeConstraint("T") + .HostMemory("output"), + DataPtrOp); diff --git a/pydiffvg_tensorflow/device.py b/pydiffvg_tensorflow/device.py new file mode 100644 index 0000000..271b6bd --- /dev/null +++ b/pydiffvg_tensorflow/device.py @@ -0,0 +1,59 @@ +import tensorflow as tf + +use_gpu = tf.test.is_gpu_available( + cuda_only=True, + min_cuda_compute_capability=None +) +cpu_device_id = 0 +gpu_device_id = 0 + +def get_device_name(): + """ + Get the current tensorflow device name we are using. + """ + global use_gpu + global cpu_device_id + global gpu_device_id + return '/device:gpu:' + str(gpu_device_id) if use_gpu else '/device:cpu:' + str(cpu_device_id) + +def set_use_gpu(v: bool): + """ + Set whether to use CUDA or not. + """ + global use_gpu + use_gpu = v + +def get_use_gpu(): + """ + Get whether we are using CUDA or not. + """ + global use_gpu + return use_gpu + +def set_cpu_device_id(did: int): + """ + Set the cpu device id we are using. + """ + global cpu_device_id + cpu_device_id = did + +def get_cpu_device_id(): + """ + Get the cpu device id we are using. + """ + global cpu_device_id + return cpu_device_id + +def set_gpu_device_id(did: int): + """ + Set the gpu device id we are using. + """ + global gpu_device_id + gpu_device_id = did + +def get_gpu_device_id(): + """ + Get the gpu device id we are using. + """ + global gpu_device_id + return gpu_device_id diff --git a/pydiffvg_tensorflow/image.py b/pydiffvg_tensorflow/image.py new file mode 100644 index 0000000..18eb1e6 --- /dev/null +++ b/pydiffvg_tensorflow/image.py @@ -0,0 +1,22 @@ +import numpy as np +import skimage +import skimage.io +import os + +def imwrite(img, filename, gamma = 2.2, normalize = False): + directory = os.path.dirname(filename) + if directory != '' and not os.path.exists(directory): + os.makedirs(directory) + + if not isinstance(img, np.ndarray): + img = img.numpy() + if normalize: + img_rng = np.max(img) - np.min(img) + if img_rng > 0: + img = (img - np.min(img)) / img_rng + img = np.clip(img, 0.0, 1.0) + if img.ndim==2: + #repeat along the third dimension + img=np.expand_dims(img,2) + img[:, :, :3] = np.power(img[:, :, :3], 1.0/gamma) + skimage.io.imsave(filename, (img * 255).astype(np.uint8)) \ No newline at end of file diff --git a/pydiffvg_tensorflow/pixel_filter.py b/pydiffvg_tensorflow/pixel_filter.py new file mode 100644 index 0000000..0eff017 --- /dev/null +++ b/pydiffvg_tensorflow/pixel_filter.py @@ -0,0 +1,8 @@ +import tensorflow as tf + +class PixelFilter: + def __init__(self, + type, + radius = tf.constant(0.5)): + self.type = type + self.radius = radius diff --git a/pydiffvg_tensorflow/render_tensorflow.py b/pydiffvg_tensorflow/render_tensorflow.py new file mode 100644 index 0000000..6a7875b --- /dev/null +++ b/pydiffvg_tensorflow/render_tensorflow.py @@ -0,0 +1,649 @@ +import tensorflow as tf +import diffvg +import pydiffvg_tensorflow as pydiffvg +import time +from enum import IntEnum +import warnings + +print_timing = False +__EMPTY_TENSOR = tf.constant([]) + +def is_empty_tensor(tensor): + return tf.equal(tf.size(tensor), 0) + +def set_print_timing(val): + global print_timing + print_timing=val + +class OutputType(IntEnum): + color = 1 + sdf = 2 + +class ShapeType: + __shapetypes = [ + diffvg.ShapeType.circle, + diffvg.ShapeType.ellipse, + diffvg.ShapeType.path, + diffvg.ShapeType.rect + ] + + @staticmethod + def asTensor(type): + for i in range(len(ShapeType.__shapetypes)): + if ShapeType.__shapetypes[i] == type: + return tf.constant(i) + + @staticmethod + def asShapeType(index: tf.Tensor): + if is_empty_tensor(index): + return None + try: + type = ShapeType.__shapetypes[index] + except IndexError: + print(f'{index} is out of range: [0, {len(ShapeType.__shapetypes)})') + import sys + sys.exit() + else: + return type + +class ColorType: + __colortypes = [ + diffvg.ColorType.constant, + diffvg.ColorType.linear_gradient, + diffvg.ColorType.radial_gradient + ] + + @staticmethod + def asTensor(type): + for i in range(len(ColorType.__colortypes)): + if ColorType.__colortypes[i] == type: + return tf.constant(i) + + @staticmethod + def asColorType(index: tf.Tensor): + if is_empty_tensor(index): + return None + try: + type = ColorType.__colortypes[index] + except IndexError: + print(f'{index} is out of range: [0, {len(ColorType.__colortypes)})') + import sys + sys.exit() + else: + return type + +class FilterType: + __filtertypes = [ + diffvg.FilterType.box, + diffvg.FilterType.tent, + diffvg.FilterType.hann + ] + + @staticmethod + def asTensor(type): + for i in range(len(FilterType.__filtertypes)): + if FilterType.__filtertypes[i] == type: + return tf.constant(i) + + @staticmethod + def asFilterType(index: tf.Tensor): + if is_empty_tensor(index): + return None + try: + type = FilterType.__filtertypes[index] + except IndexError: + print(f'{index} is out of range: [0, {len(FilterType.__filtertypes)})') + import sys + sys.exit() + else: + return type + +def serialize_scene(canvas_width, + canvas_height, + shapes, + shape_groups, + filter = pydiffvg.PixelFilter(type = diffvg.FilterType.box, + radius = tf.constant(0.5)), + output_type = OutputType.color, + use_prefiltering = False): + """ + Given a list of shapes, convert them to a linear list of argument, + so that we can use it in TF. + """ + with tf.device('/device:cpu:' + str(pydiffvg.get_cpu_device_id())): + num_shapes = len(shapes) + num_shape_groups = len(shape_groups) + args = [] + args.append(tf.constant(canvas_width)) + args.append(tf.constant(canvas_height)) + args.append(tf.constant(num_shapes)) + args.append(tf.constant(num_shape_groups)) + args.append(tf.constant(output_type)) + args.append(tf.constant(use_prefiltering)) + for shape in shapes: + if isinstance(shape, pydiffvg.Circle): + args.append(ShapeType.asTensor(diffvg.ShapeType.circle)) + args.append(tf.identity(shape.radius)) + args.append(tf.identity(shape.center)) + elif isinstance(shape, pydiffvg.Ellipse): + args.append(ShapeType.asTensor(diffvg.ShapeType.ellipse)) + args.append(tf.identity(shape.radius)) + args.append(tf.identity(shape.center)) + elif isinstance(shape, pydiffvg.Path): + assert(shape.points.shape[1] == 2) + args.append(ShapeType.asTensor(diffvg.ShapeType.path)) + args.append(tf.identity(shape.num_control_points, type=tf.int32)) + args.append(tf.identity(shape.points)) + args.append(tf.constant(shape.is_closed)) + elif isinstance(shape, pydiffvg.Polygon): + assert(shape.points.shape[1] == 2) + args.append(ShapeType.asTensor(diffvg.ShapeType.path)) + if shape.is_closed: + args.append(tf.zeros(shape.points.shape[0], dtype = tf.int32)) + else: + args.append(tf.zeros(shape.points.shape[0] - 1, dtype = tf.int32)) + args.append(tf.identity(shape.points)) + args.append(tf.constant(shape.is_closed)) + elif isinstance(shape, pydiffvg.Rect): + args.append(ShapeType.asTensor(diffvg.ShapeType.rect)) + args.append(tf.identity(shape.p_min)) + args.append(tf.identity(shape.p_max)) + else: + assert(False) + args.append(tf.identity(shape.stroke_width)) + + for shape_group in shape_groups: + args.append(tf.identity(shape_group.shape_ids)) + # Fill color + if shape_group.fill_color is None: + args.append(__EMPTY_TENSOR) + elif tf.is_tensor(shape_group.fill_color): + args.append(ColorType.asTensor(diffvg.ColorType.constant)) + args.append(tf.identity(shape_group.fill_color)) + elif isinstance(shape_group.fill_color, pydiffvg.LinearGradient): + args.append(ColorType.asTensor(diffvg.ColorType.linear_gradient)) + args.append(tf.identity(shape_group.fill_color.begin)) + args.append(tf.identity(shape_group.fill_color.end)) + args.append(tf.identity(shape_group.fill_color.offsets)) + args.append(tf.identity(shape_group.fill_color.stop_colors)) + elif isinstance(shape_group.fill_color, pydiffvg.RadialGradient): + args.append(ColorType.asTensor(diffvg.ColorType.radial_gradient)) + args.append(tf.identity(shape_group.fill_color.center)) + args.append(tf.identity(shape_group.fill_color.radius)) + args.append(tf.identity(shape_group.fill_color.offsets)) + args.append(tf.identity(shape_group.fill_color.stop_colors)) + + if shape_group.fill_color is not None: + # go through the underlying shapes and check if they are all closed + for shape_id in shape_group.shape_ids: + if isinstance(shapes[shape_id], pydiffvg.Path): + if not shapes[shape_id].is_closed: + warnings.warn("Detected non-closed paths with fill color. This might causes unexpected results.", Warning) + + # Stroke color + if shape_group.stroke_color is None: + args.append(__EMPTY_TENSOR) + elif tf.is_tensor(shape_group.stroke_color): + args.append(tf.constant(0)) + args.append(tf.identity(shape_group.stroke_color)) + elif isinstance(shape_group.stroke_color, pydiffvg.LinearGradient): + args.append(ColorType.asTensor(diffvg.ColorType.linear_gradient)) + args.append(tf.identity(shape_group.stroke_color.begin)) + args.append(tf.identity(shape_group.stroke_color.end)) + args.append(tf.identity(shape_group.stroke_color.offsets)) + args.append(tf.identity(shape_group.stroke_color.stop_colors)) + elif isinstance(shape_group.stroke_color, pydiffvg.RadialGradient): + args.append(ColorType.asTensor(diffvg.ColorType.radial_gradient)) + args.append(tf.identity(shape_group.stroke_color.center)) + args.append(tf.identity(shape_group.stroke_color.radius)) + args.append(tf.identity(shape_group.stroke_color.offsets)) + args.append(tf.identity(shape_group.stroke_color.stop_colors)) + args.append(tf.constant(shape_group.use_even_odd_rule)) + # Transformation + args.append(tf.identity(shape_group.shape_to_canvas)) + args.append(FilterType.asTensor(filter.type)) + args.append(tf.constant(filter.radius)) + return args + +class Context: pass + +def forward(width, + height, + num_samples_x, + num_samples_y, + seed, + *args): + """ + Forward rendering pass: given a serialized scene and output an image. + """ + # Unpack arguments + with tf.device('/device:cpu:' + str(pydiffvg.get_cpu_device_id())): + current_index = 0 + canvas_width = int(args[current_index]) + current_index += 1 + canvas_height = int(args[current_index]) + current_index += 1 + num_shapes = int(args[current_index]) + current_index += 1 + num_shape_groups = int(args[current_index]) + current_index += 1 + output_type = OutputType(int(args[current_index])) + current_index += 1 + use_prefiltering = bool(args[current_index]) + current_index += 1 + shapes = [] + shape_groups = [] + shape_contents = [] # Important to avoid GC deleting the shapes + color_contents = [] # Same as above + for shape_id in range(num_shapes): + shape_type = ShapeType.asShapeType(args[current_index]) + current_index += 1 + if shape_type == diffvg.ShapeType.circle: + radius = args[current_index] + current_index += 1 + center = args[current_index] + current_index += 1 + shape = diffvg.Circle(float(radius), + diffvg.Vector2f(float(center[0]), float(center[1]))) + elif shape_type == diffvg.ShapeType.ellipse: + radius = args[current_index] + current_index += 1 + center = args[current_index] + current_index += 1 + shape = diffvg.Ellipse(diffvg.Vector2f(float(radius[0]), float(radius[1])), + diffvg.Vector2f(float(center[0]), float(center[1]))) + elif shape_type == diffvg.ShapeType.path: + num_control_points = args[current_index] + current_index += 1 + points = args[current_index] + current_index += 1 + is_closed = args[current_index] + current_index += 1 + shape = diffvg.Path(diffvg.int_ptr(pydiffvg.data_ptr(num_control_points)), + diffvg.float_ptr(pydiffvg.data_ptr(points)), + num_control_points.shape[0], + points.shape[0], + is_closed) + elif shape_type == diffvg.ShapeType.rect: + p_min = args[current_index] + current_index += 1 + p_max = args[current_index] + current_index += 1 + shape = diffvg.Rect(diffvg.Vector2f(float(p_min[0]), float(p_min[1])), + diffvg.Vector2f(float(p_max[0]), float(p_max[1]))) + else: + assert(False) + stroke_width = args[current_index] + current_index += 1 + shapes.append(diffvg.Shape(\ + shape_type, shape.get_ptr(), float(stroke_width))) + shape_contents.append(shape) + + for shape_group_id in range(num_shape_groups): + shape_ids = args[current_index] + current_index += 1 + fill_color_type = ColorType.asColorType(args[current_index]) + current_index += 1 + if fill_color_type == diffvg.ColorType.constant: + color = args[current_index] + current_index += 1 + fill_color = diffvg.Constant(\ + diffvg.Vector4f(color[0], color[1], color[2], color[3])) + elif fill_color_type == diffvg.ColorType.linear_gradient: + beg = args[current_index] + current_index += 1 + end = args[current_index] + current_index += 1 + offsets = args[current_index] + current_index += 1 + stop_colors = args[current_index] + current_index += 1 + assert(offsets.shape[0] == stop_colors.shape[0]) + fill_color = diffvg.LinearGradient(diffvg.Vector2f(float(beg[0]), float(beg[1])), + diffvg.Vector2f(float(end[0]), float(end[1])), + offsets.shape[0], + diffvg.float_ptr(pydiffvg.data_ptr(offsets)), + diffvg.float_ptr(pydiffvg.data_ptr(stop_colors))) + elif fill_color_type == diffvg.ColorType.radial_gradient: + center = args[current_index] + current_index += 1 + radius = args[current_index] + current_index += 1 + offsets = args[current_index] + current_index += 1 + stop_colors = args[current_index] + current_index += 1 + assert(offsets.shape[0] == stop_colors.shape[0]) + fill_color = diffvg.RadialGradient(diffvg.Vector2f(float(center[0]), float(center[1])), + diffvg.Vector2f(float(radius[0]), float(radius[1])), + offsets.shape[0], + diffvg.float_ptr(pydiffvg.data_ptr(offsets)), + diffvg.float_ptr(pydiffvg.data_ptr(stop_colors))) + elif fill_color_type is None: + fill_color = None + else: + assert(False) + + stroke_color_type = ColorType.asColorType(args[current_index]) + current_index += 1 + if stroke_color_type == diffvg.ColorType.constant: + color = args[current_index] + current_index += 1 + stroke_color = diffvg.Constant(\ + diffvg.Vector4f(float(color[0]), + float(color[1]), + float(color[2]), + float(color[3]))) + elif stroke_color_type == diffvg.ColorType.linear_gradient: + beg = args[current_index] + current_index += 1 + end = args[current_index] + current_index += 1 + offsets = args[current_index] + current_index += 1 + stop_colors = args[current_index] + current_index += 1 + assert(offsets.shape[0] == stop_colors.shape[0]) + stroke_color = diffvg.LinearGradient(\ + diffvg.Vector2f(float(beg[0]), float(beg[1])), + diffvg.Vector2f(float(end[0]), float(end[1])), + offsets.shape[0], + diffvg.float_ptr(pydiffvg.data_ptr(offsets)), + diffvg.float_ptr(stop_colors.data_ptr())) + elif stroke_color_type == diffvg.ColorType.radial_gradient: + center = args[current_index] + current_index += 1 + radius = args[current_index] + current_index += 1 + offsets = args[current_index] + current_index += 1 + stop_colors = args[current_index] + current_index += 1 + assert(offsets.shape[0] == stop_colors.shape[0]) + stroke_color = diffvg.RadialGradient(\ + diffvg.Vector2f(float(center[0]), float(center[1])), + diffvg.Vector2f(float(radius[0]), float(radius[1])), + offsets.shape[0], + diffvg.float_ptr(pydiffvg.data_ptr(offsets)), + diffvg.float_ptr(pydiffvg.data_ptr(stop_colors))) + elif stroke_color_type is None: + stroke_color = None + else: + assert(False) + use_even_odd_rule = bool(args[current_index]) + current_index += 1 + shape_to_canvas = args[current_index] + current_index += 1 + + if fill_color is not None: + color_contents.append(fill_color) + if stroke_color is not None: + color_contents.append(stroke_color) + shape_groups.append(diffvg.ShapeGroup(\ + diffvg.int_ptr(pydiffvg.data_ptr(shape_ids)), + shape_ids.shape[0], + diffvg.ColorType.constant if fill_color_type is None else fill_color_type, + diffvg.void_ptr(0) if fill_color is None else fill_color.get_ptr(), + diffvg.ColorType.constant if stroke_color_type is None else stroke_color_type, + diffvg.void_ptr(0) if stroke_color is None else stroke_color.get_ptr(), + use_even_odd_rule, + diffvg.float_ptr(pydiffvg.data_ptr(shape_to_canvas)))) + + filter_type = FilterType.asFilterType(args[current_index]) + current_index += 1 + filter_radius = args[current_index] + current_index += 1 + filt = diffvg.Filter(filter_type, filter_radius) + + device_name = pydiffvg.get_device_name() + device_spec = tf.DeviceSpec.from_string(device_name) + use_gpu = device_spec.device_type == 'GPU' + gpu_index = device_spec.device_index if device_spec.device_index is not None else 0 + + start = time.time() + scene = diffvg.Scene(canvas_width, + canvas_height, + shapes, + shape_groups, + filt, + use_gpu, + gpu_index) + time_elapsed = time.time() - start + global print_timing + if print_timing: + print('Scene construction, time: %.5f s' % time_elapsed) + + with tf.device(device_name): + if output_type == OutputType.color: + rendered_image = tf.zeros((int(height), int(width), 4), dtype = tf.float32) + else: + assert(output_type == OutputType.sdf) + rendered_image = tf.zeros((int(height), int(width), 1), dtype = tf.float32) + + start = time.time() + diffvg.render(scene, + diffvg.float_ptr(pydiffvg.data_ptr(rendered_image) if output_type == OutputType.color else 0), + diffvg.float_ptr(pydiffvg.data_ptr(rendered_image) if output_type == OutputType.sdf else 0), + width, + height, + int(num_samples_x), + int(num_samples_y), + seed, + diffvg.float_ptr(0), # d_render_image + diffvg.float_ptr(0), # d_render_sdf + diffvg.float_ptr(0), # d_translation + use_prefiltering) + time_elapsed = time.time() - start + if print_timing: + print('Forward pass, time: %.5f s' % time_elapsed) + + ctx = Context() + ctx.scene = scene + ctx.shape_contents = shape_contents + ctx.color_contents = color_contents + ctx.filter = filt + ctx.width = width + ctx.height = height + ctx.num_samples_x = num_samples_x + ctx.num_samples_y = num_samples_y + ctx.seed = seed + ctx.output_type = output_type + ctx.use_prefiltering = use_prefiltering + return rendered_image, ctx + +@tf.custom_gradient +def render(*x): + """ + The main TensorFlow interface of C++ diffvg. + """ + assert(tf.executing_eagerly()) + if pydiffvg.get_use_gpu() and os.environ['TF_FORCE_GPU_ALLOW_GROWTH'] != 'true': + print('******************** WARNING ********************') + print('Tensorflow by default allocates all GPU memory,') + print('causing huge amount of page faults when rendering.') + print('Please set the environment variable TF_FORCE_GPU_ALLOW_GROWTH to true,') + print('so that Tensorflow allocates memory on demand.') + print('*************************************************') + + width = x[0] + height = x[1] + num_samples_x = x[2] + num_samples_y = x[3] + seed = x[4] + args = x[5:] + img, ctx = forward(width, height, num_samples_x, num_samples_y, seed, *args) + + def backward(grad_img): + scene = ctx.scene + width = ctx.width + height = ctx.height + num_samples_x = ctx.num_samples_x + num_samples_y = ctx.num_samples_y + seed = ctx.seed + output_type = ctx.output_type + use_prefiltering = ctx.use_prefiltering + + start = time.time() + with tf.device(pydiffvg.get_device_name()): + diffvg.render(scene, + diffvg.float_ptr(0), # render_image + diffvg.float_ptr(0), # render_sdf + width, + height, + num_samples_x, + num_samples_y, + seed, + diffvg.float_ptr(pydiffvg.data_ptr(grad_img) if output_type == OutputType.color else 0), + diffvg.float_ptr(pydiffvg.data_ptr(grad_img) if output_type == OutputType.sdf else 0), + diffvg.float_ptr(0), # d_translation + use_prefiltering) + time_elapsed = time.time() - start + global print_timing + if print_timing: + print('Backward pass, time: %.5f s' % time_elapsed) + + with tf.device('/device:cpu:' + str(pydiffvg.get_cpu_device_id())): + d_args = [] + d_args.append(None) # width + d_args.append(None) # height + d_args.append(None) # num_samples_x + d_args.append(None) # num_samples_y + d_args.append(None) # seed + d_args.append(None) # canvas_width + d_args.append(None) # canvas_height + d_args.append(None) # num_shapes + d_args.append(None) # num_shape_groups + d_args.append(None) # output_type + d_args.append(None) # use_prefiltering + for shape_id in range(scene.num_shapes): + d_args.append(None) # type + d_shape = scene.get_d_shape(shape_id) + if d_shape.type == diffvg.ShapeType.circle: + d_circle = d_shape.as_circle() + radius = tf.constant(d_circle.radius) + d_args.append(radius) + c = d_circle.center + c = tf.constant((c.x, c.y)) + d_args.append(c) + elif d_shape.type == diffvg.ShapeType.ellipse: + d_ellipse = d_shape.as_ellipse() + r = d_ellipse.radius + r = tf.constant((d_ellipse.radius.x, d_ellipse.radius.y)) + d_args.append(r) + c = d_ellipse.center + c = tf.constant((c.x, c.y)) + d_args.append(c) + elif d_shape.type == diffvg.ShapeType.path: + d_path = d_shape.as_path() + points = tf.zeros((d_path.num_points, 2), dtype=tf.float32) + d_path.copy_to(diffvg.float_ptr(points.data_ptr())) + d_args.append(None) # num_control_points + d_args.append(points) + d_args.append(None) # is_closed + elif d_shape.type == diffvg.ShapeType.rect: + d_rect = d_shape.as_rect() + p_min = tf.constant((d_rect.p_min.x, d_rect.p_min.y)) + p_max = tf.constant((d_rect.p_max.x, d_rect.p_max.y)) + d_args.append(p_min) + d_args.append(p_max) + else: + assert(False) + w = tf.constant((d_shape.stroke_width)) + d_args.append(w) + + for group_id in range(scene.num_shape_groups): + d_shape_group = scene.get_d_shape_group(group_id) + d_args.append(None) # shape_ids + d_args.append(None) # fill_color_type + if d_shape_group.has_fill_color(): + if d_shape_group.fill_color_type == diffvg.ColorType.constant: + d_constant = d_shape_group.fill_color_as_constant() + c = d_constant.color + d_args.append(tf.constant((c.x, c.y, c.z, c.w))) + elif d_shape_group.fill_color_type == diffvg.ColorType.linear_gradient: + d_linear_gradient = d_shape_group.fill_color_as_linear_gradient() + beg = d_linear_gradient.begin + d_args.append(tf.constant((beg.x, beg.y))) + end = d_linear_gradient.end + d_args.append(tf.constant((end.x, end.y))) + offsets = tf.zeros((d_linear_gradient.num_stops), dtype=tf.float32) + stop_colors = tf.zeros((d_linear_gradient.num_stops, 4), dtype=tf.float32) + # HACK: tensorflow's eager mode uses a cache to store scalar + # constants to avoid memory copy. If we pass scalar tensors + # into the C++ code and modify them, we would corrupt the + # cache, causing incorrect result in future scalar constant + # creations. Thus we force tensorflow to copy by plusing a zero. + # (also see https://github.com/tensorflow/tensorflow/issues/11186 + # for more discussion regarding copying tensors) + if offsets.shape.num_elements() == 1: + offsets = offsets + 0 + d_linear_gradient.copy_to(\ + diffvg.float_ptr(pydiffvg.data_ptr(offsets)), + diffvg.float_ptr(pydiffvg.data_ptr(stop_colors))) + d_args.append(offsets) + d_args.append(stop_colors) + elif d_shape_group.fill_color_type == diffvg.ColorType.radial_gradient: + d_radial_gradient = d_shape_group.fill_color_as_radial_gradient() + center = d_radial_gradient.center + d_args.append(tf.constant((center.x, center.y))) + radius = d_radial_gradient.radius + d_args.append(tf.constant((radius.x, radius.y))) + offsets = tf.zeros((d_radial_gradient.num_stops)) + if offsets.shape.num_elements() == 1: + offsets = offsets + 0 + stop_colors = tf.zeros((d_radial_gradient.num_stops, 4)) + d_radial_gradient.copy_to(\ + diffvg.float_ptr(pydiffvg.data_ptr(offsets)), + diffvg.float_ptr(pydiffvg.data_ptr(stop_colors))) + d_args.append(offsets) + d_args.append(stop_colors) + else: + assert(False) + d_args.append(None) # stroke_color_type + if d_shape_group.has_stroke_color(): + if d_shape_group.stroke_color_type == diffvg.ColorType.constant: + d_constant = d_shape_group.stroke_color_as_constant() + c = d_constant.color + d_args.append(tf.constant((c.x, c.y, c.z, c.w))) + elif d_shape_group.stroke_color_type == diffvg.ColorType.linear_gradient: + d_linear_gradient = d_shape_group.stroke_color_as_linear_gradient() + beg = d_linear_gradient.begin + d_args.append(tf.constant((beg.x, beg.y))) + end = d_linear_gradient.end + d_args.append(tf.constant((end.x, end.y))) + offsets = tf.zeros((d_linear_gradient.num_stops)) + stop_colors = tf.zeros((d_linear_gradient.num_stops, 4)) + if offsets.shape.num_elements() == 1: + offsets = offsets + 0 + d_linear_gradient.copy_to(\ + diffvg.float_ptr(pydiffvg.data_ptr(offsets)), + diffvg.float_ptr(pydiffvg.data_ptr(stop_colors))) + d_args.append(offsets) + d_args.append(stop_colors) + elif d_shape_group.fill_color_type == diffvg.ColorType.radial_gradient: + d_radial_gradient = d_shape_group.stroke_color_as_radial_gradient() + center = d_radial_gradient.center + d_args.append(tf.constant((center.x, center.y))) + radius = d_radial_gradient.radius + d_args.append(tf.constant((radius.x, radius.y))) + offsets = tf.zeros((d_radial_gradient.num_stops)) + stop_colors = tf.zeros((d_radial_gradient.num_stops, 4)) + if offsets.shape.num_elements() == 1: + offsets = offsets + 0 + d_radial_gradient.copy_to(\ + diffvg.float_ptr(pydiffvg.data_ptr(offsets)), + diffvg.float_ptr(pydiffvg.data_ptr(stop_colors))) + d_args.append(offsets) + d_args.append(stop_colors) + else: + assert(False) + d_args.append(None) # use_even_odd_rule + d_shape_to_canvas = tf.zeros((3, 3), dtype = tf.float32) + d_shape_group.copy_to(diffvg.float_ptr(pydiffvg.data_ptr(d_shape_to_canvas))) + d_args.append(d_shape_to_canvas) + d_args.append(None) # filter_type + d_args.append(tf.constant(scene.get_d_filter_radius())) + + return d_args + + return img, backward \ No newline at end of file diff --git a/pydiffvg_tensorflow/shape.py b/pydiffvg_tensorflow/shape.py new file mode 100644 index 0000000..f71c2c7 --- /dev/null +++ b/pydiffvg_tensorflow/shape.py @@ -0,0 +1,53 @@ +import tensorflow as tf +import math + +class Circle: + def __init__(self, radius, center, stroke_width = tf.constant(1.0), id = ''): + self.radius = radius + self.center = center + self.stroke_width = stroke_width + self.id = id + +class Ellipse: + def __init__(self, radius, center, stroke_width = tf.constant(1.0), id = ''): + self.radius = radius + self.center = center + self.stroke_width = stroke_width + self.id = id + +class Path: + def __init__(self, num_control_points, points, is_closed, stroke_width = tf.constant(1.0), id = ''): + self.num_control_points = num_control_points + self.points = points + self.is_closed = is_closed + self.stroke_width = stroke_width + self.id = id + +class Polygon: + def __init__(self, points, is_closed, stroke_width = tf.constant(1.0), id = ''): + self.points = points + self.is_closed = is_closed + self.stroke_width = stroke_width + self.id = id + +class Rect: + def __init__(self, p_min, p_max, stroke_width = tf.constant(1.0), id = ''): + self.p_min = p_min + self.p_max = p_max + self.stroke_width = stroke_width + self.id = id + +class ShapeGroup: + def __init__(self, + shape_ids, + fill_color, + use_even_odd_rule = True, + stroke_color = None, + shape_to_canvas = tf.eye(3), + id = ''): + self.shape_ids = shape_ids + self.fill_color = fill_color + self.use_even_odd_rule = use_even_odd_rule + self.stroke_color = stroke_color + self.shape_to_canvas = shape_to_canvas + self.id = id diff --git a/sample_boundary.h b/sample_boundary.h new file mode 100644 index 0000000..28af129 --- /dev/null +++ b/sample_boundary.h @@ -0,0 +1,454 @@ +#pragma once + +#include "diffvg.h" +#include "shape.h" +#include "scene.h" +#include "vector.h" +#include "cdf.h" + +struct PathBoundaryData { + int base_point_id; + int point_id; + float t; +}; + +struct BoundaryData { + PathBoundaryData path; + bool is_stroke; +}; + +DEVICE +Vector2f sample_boundary(const Circle &circle, + float t, + Vector2f &normal, + float &pdf, + BoundaryData &, + float stroke_perturb_direction, + float stroke_radius) { + // Parametric form of a circle (t in [0, 1)): + // x = center.x + r * cos(2pi * t) + // y = center.y + r * sin(2pi * t) + auto offset = Vector2f{ + circle.radius * cos(2 * float(M_PI) * t), + circle.radius * sin(2 * float(M_PI) * t) + }; + normal = normalize(offset); + pdf /= (2 * float(M_PI) * circle.radius); + auto ret = circle.center + offset; + if (stroke_perturb_direction != 0.f) { + ret += stroke_perturb_direction * stroke_radius * normal; + if (stroke_perturb_direction < 0) { + // normal should point towards the perturb direction + normal = -normal; + } + } + return ret; +} + +DEVICE +Vector2f sample_boundary(const Ellipse &ellipse, + float t, + Vector2f &normal, + float &pdf, + BoundaryData &, + float stroke_perturb_direction, + float stroke_radius) { + // Parametric form of a ellipse (t in [0, 1)): + // x = center.x + r.x * cos(2pi * t) + // y = center.y + r.y * sin(2pi * t) + const auto &r = ellipse.radius; + auto offset = Vector2f{ + r.x * cos(2 * float(M_PI) * t), + r.y * sin(2 * float(M_PI) * t) + }; + auto dxdt = -r.x * sin(2 * float(M_PI) * t) * 2 * float(M_PI); + auto dydt = r.y * cos(2 * float(M_PI) * t) * 2 * float(M_PI); + // tangent is normalize(dxdt, dydt) + normal = normalize(Vector2f{dydt, -dxdt}); + pdf /= sqrt(square(dxdt) + square(dydt)); + auto ret = ellipse.center + offset; + if (stroke_perturb_direction != 0.f) { + ret += stroke_perturb_direction * stroke_radius * normal; + if (stroke_perturb_direction < 0) { + // normal should point towards the perturb direction + normal = -normal; + } + } + return ret; +} + +DEVICE +Vector2f sample_boundary(const Path &path, + const float *path_length_cdf, + const float *path_length_pmf, + const int *point_id_map, + float path_length, + float t, + Vector2f &normal, + float &pdf, + BoundaryData &data, + float stroke_perturb_direction, + float stroke_radius) { + if (stroke_perturb_direction != 0.f && !path.is_closed) { + // We need to samples the "caps" of the path + // length of a cap is pi * abs(stroke_perturb_direction) + // there are two caps + auto cap_length = 0.f; + if (path.thickness != nullptr) { + auto r0 = path.thickness[0]; + auto r1 = path.thickness[path.num_points - 1]; + cap_length = float(M_PI) * (r0 + r1); + } else { + cap_length = 2 * float(M_PI) * stroke_radius; + } + auto cap_prob = cap_length / (cap_length + path_length); + if (t < cap_prob) { + t = t / cap_prob; + pdf *= cap_prob; + auto r0 = stroke_radius; + auto r1 = stroke_radius; + if (path.thickness != nullptr) { + r0 = path.thickness[0]; + r1 = path.thickness[path.num_points - 1]; + } + // HACK: in theory we want to compute the tangent and + // sample the hemi-circle, but here we just sample the + // full circle since it's less typing + if (stroke_perturb_direction < 0) { + // Sample the cap at the beginning + auto p0 = Vector2f{path.points[0], path.points[1]}; + auto offset = Vector2f{ + r0 * cos(2 * float(M_PI) * t), + r0 * sin(2 * float(M_PI) * t) + }; + normal = normalize(offset); + pdf /= (2 * float(M_PI) * r0); + data.path.base_point_id = 0; + data.path.point_id = 0; + data.path.t = 0; + return p0 + offset; + } else { + // Sample the cap at the end + auto p0 = Vector2f{path.points[2 * (path.num_points - 1)], + path.points[2 * (path.num_points - 1) + 1]}; + auto offset = Vector2f{ + r1 * cos(2 * float(M_PI) * t), + r1 * sin(2 * float(M_PI) * t) + }; + normal = normalize(offset); + pdf /= (2 * float(M_PI) * r1); + data.path.base_point_id = path.num_base_points - 1; + data.path.point_id = path.num_points - 2 - + path.num_control_points[data.path.base_point_id]; + data.path.t = 1; + return p0 + offset; + } + } else { + t = (t - cap_prob) / (1 - cap_prob); + pdf *= (1 - cap_prob); + } + } + // Binary search on path_length_cdf + auto sample_id = sample(path_length_cdf, + path.num_base_points, + t, + &t); + assert(sample_id >= 0 && sample_id < path.num_base_points); + auto point_id = point_id_map[sample_id]; + if (path.num_control_points[sample_id] == 0) { + // Straight line + auto i0 = point_id; + auto i1 = (i0 + 1) % path.num_points; + assert(i0 < path.num_points); + auto p0 = Vector2f{path.points[2 * i0], path.points[2 * i0 + 1]}; + auto p1 = Vector2f{path.points[2 * i1], path.points[2 * i1 + 1]}; + data.path.base_point_id = sample_id; + data.path.point_id = point_id; + data.path.t = t; + if (t < -1e-3f || t > 1+1e-3f) { + // return invalid sample + pdf = 0; + return Vector2f{0, 0}; + } + auto tangent = (p1 - p0); + auto tan_len = length(tangent); + if (tan_len == 0) { + // return invalid sample + pdf = 0; + return Vector2f{0, 0}; + } + normal = Vector2f{-tangent.y, tangent.x} / tan_len; + // length of tangent is the Jacobian of the sampling transformation + pdf *= path_length_pmf[sample_id] / tan_len; + auto ret = p0 + t * (p1 - p0); + if (stroke_perturb_direction != 0.f) { + auto r0 = stroke_radius; + auto r1 = stroke_radius; + if (path.thickness != nullptr) { + r0 = path.thickness[i0]; + r1 = path.thickness[i1]; + } + auto r = r0 + t * (r1 - r0); + ret += stroke_perturb_direction * r * normal; + if (stroke_perturb_direction < 0) { + // normal should point towards the perturb direction + normal = -normal; + } + } + return ret; + } else if (path.num_control_points[sample_id] == 1) { + // Quadratic Bezier curve + auto i0 = point_id; + auto i1 = i0 + 1; + auto i2 = (i0 + 2) % path.num_points; + auto p0 = Vector2f{path.points[2 * i0], path.points[2 * i0 + 1]}; + auto p1 = Vector2f{path.points[2 * i1], path.points[2 * i1 + 1]}; + auto p2 = Vector2f{path.points[2 * i2], path.points[2 * i2 + 1]}; + auto eval = [&](float t) -> Vector2f { + auto tt = 1 - t; + return (tt*tt)*p0 + (2*tt*t)*p1 + (t*t)*p2; + }; + data.path.base_point_id = sample_id; + data.path.point_id = point_id; + data.path.t = t; + if (t < -1e-3f || t > 1+1e-3f) { + // return invalid sample + pdf = 0; + return Vector2f{0, 0}; + } + auto tangent = 2 * (1 - t) * (p1 - p0) + 2 * t * (p2 - p1); + auto tan_len = length(tangent); + if (tan_len == 0) { + // return invalid sample + pdf = 0; + return Vector2f{0, 0}; + } + normal = Vector2f{-tangent.y, tangent.x} / tan_len; + // length of tangent is the Jacobian of the sampling transformation + pdf *= path_length_pmf[sample_id] / tan_len; + auto ret = eval(t); + if (stroke_perturb_direction != 0.f) { + auto r0 = stroke_radius; + auto r1 = stroke_radius; + auto r2 = stroke_radius; + if (path.thickness != nullptr) { + r0 = path.thickness[i0]; + r1 = path.thickness[i1]; + r2 = path.thickness[i2]; + } + auto tt = 1 - t; + auto r = (tt*tt)*r0 + (2*tt*t)*r1 + (t*t)*r2; + ret += stroke_perturb_direction * r * normal; + if (stroke_perturb_direction < 0) { + // normal should point towards the perturb direction + normal = -normal; + } + } + return ret; + } else if (path.num_control_points[sample_id] == 2) { + // Cubic Bezier curve + auto i0 = point_id; + auto i1 = point_id + 1; + auto i2 = point_id + 2; + auto i3 = (point_id + 3) % path.num_points; + assert(i0 >= 0 && i2 < path.num_points); + auto p0 = Vector2f{path.points[2 * i0], path.points[2 * i0 + 1]}; + auto p1 = Vector2f{path.points[2 * i1], path.points[2 * i1 + 1]}; + auto p2 = Vector2f{path.points[2 * i2], path.points[2 * i2 + 1]}; + auto p3 = Vector2f{path.points[2 * i3], path.points[2 * i3 + 1]}; + auto eval = [&](float t) -> Vector2f { + auto tt = 1 - t; + return (tt*tt*tt)*p0 + (3*tt*tt*t)*p1 + (3*tt*t*t)*p2 + (t*t*t)*p3; + }; + data.path.base_point_id = sample_id; + data.path.point_id = point_id; + data.path.t = t; + if (t < -1e-3f || t > 1+1e-3f) { + // return invalid sample + pdf = 0; + return Vector2f{0, 0}; + } + auto tangent = 3 * square(1 - t) * (p1 - p0) + 6 * (1 - t) * t * (p2 - p1) + 3 * t * t * (p3 - p2); + auto tan_len = length(tangent); + if (tan_len == 0) { + // return invalid sample + pdf = 0; + return Vector2f{0, 0}; + } + normal = Vector2f{-tangent.y, tangent.x} / tan_len; + // length of tangent is the Jacobian of the sampling transformation + pdf *= path_length_pmf[sample_id] / tan_len; + auto ret = eval(t); + if (stroke_perturb_direction != 0.f) { + auto r0 = stroke_radius; + auto r1 = stroke_radius; + auto r2 = stroke_radius; + auto r3 = stroke_radius; + if (path.thickness != nullptr) { + r0 = path.thickness[i0]; + r1 = path.thickness[i1]; + r2 = path.thickness[i2]; + r3 = path.thickness[i3]; + } + auto tt = 1 - t; + auto r = (tt*tt*tt)*r0 + (3*tt*tt*t)*r1 + (3*tt*t*t)*r2 + (t*t*t)*r3; + ret += stroke_perturb_direction * r * normal; + if (stroke_perturb_direction < 0) { + // normal should point towards the perturb direction + normal = -normal; + } + } + return ret; + } else { + assert(false); + } + assert(false); + return Vector2f{0, 0}; +} + +DEVICE +Vector2f sample_boundary(const Rect &rect, + float t, Vector2f &normal, + float &pdf, + BoundaryData &, + float stroke_perturb_direction, + float stroke_radius) { + // Roll a dice to decide whether to sample width or height + auto w = rect.p_max.x - rect.p_min.x; + auto h = rect.p_max.y - rect.p_min.y; + pdf /= (2 * (w +h)); + if (t <= w / (w + h)) { + // Sample width + // reuse t for the next dice + t *= (w + h) / w; + // Roll a dice to decide whether to sample upper width or lower width + if (t < 0.5f) { + // Sample upper width + normal = Vector2f{0, -1}; + auto ret = rect.p_min + 2 * t * Vector2f{rect.p_max.x - rect.p_min.x, 0.f}; + if (stroke_perturb_direction != 0.f) { + ret += stroke_perturb_direction * stroke_radius * normal; + if (stroke_perturb_direction < 0) { + // normal should point towards the perturb direction + normal = -normal; + } + } + return ret; + } else { + // Sample lower width + normal = Vector2f{0, 1}; + auto ret = Vector2f{rect.p_min.x, rect.p_max.y} + + 2 * (t - 0.5f) * Vector2f{rect.p_max.x - rect.p_min.x, 0.f}; + if (stroke_perturb_direction != 0.f) { + ret += stroke_perturb_direction * stroke_radius * normal; + if (stroke_perturb_direction < 0) { + // normal should point towards the perturb direction + normal = -normal; + } + } + return ret; + } + } else { + // Sample height + // reuse t for the next dice + assert(h > 0); + t = (t - w / (w + h)) * (w + h) / h; + // Roll a dice to decide whether to sample left height or right height + if (t < 0.5f) { + // Sample left height + normal = Vector2f{-1, 0}; + auto ret = rect.p_min + 2 * t * Vector2f{0.f, rect.p_max.y - rect.p_min.y}; + if (stroke_perturb_direction != 0.f) { + ret += stroke_perturb_direction * stroke_radius * normal; + if (stroke_perturb_direction < 0) { + // normal should point towards the perturb direction + normal = -normal; + } + } + return ret; + } else { + // Sample right height + normal = Vector2f{1, 0}; + auto ret = Vector2f{rect.p_max.x, rect.p_min.y} + + 2 * (t - 0.5f) * Vector2f{0.f, rect.p_max.y - rect.p_min.y}; + if (stroke_perturb_direction != 0.f) { + ret += stroke_perturb_direction * stroke_radius * normal; + if (stroke_perturb_direction < 0) { + // normal should point towards the perturb direction + normal = -normal; + } + } + return ret; + } + } +} + +DEVICE +Vector2f sample_boundary(const SceneData &scene, + int shape_group_id, + int shape_id, + float t, + Vector2f &normal, + float &pdf, + BoundaryData &data) { + const ShapeGroup &shape_group = scene.shape_groups[shape_group_id]; + const Shape &shape = scene.shapes[shape_id]; + pdf = 1; + // Choose which one to sample: stroke discontinuities or fill discontinuities. + // TODO: we don't need to sample fill discontinuities when stroke alpha is 1 and both + // fill and stroke color exists + auto stroke_perturb = false; + if (shape_group.fill_color != nullptr && shape_group.stroke_color != nullptr) { + if (t < 0.5f) { + stroke_perturb = false; + t = 2 * t; + pdf = 0.5f; + } else { + stroke_perturb = true; + t = 2 * (t - 0.5f); + pdf = 0.5f; + } + } else if (shape_group.stroke_color != nullptr) { + stroke_perturb = true; + } + data.is_stroke = stroke_perturb; + auto stroke_perturb_direction = 0.f; + if (stroke_perturb) { + if (t < 0.5f) { + stroke_perturb_direction = -1.f; + t = 2 * t; + pdf *= 0.5f; + } else { + stroke_perturb_direction = 1.f; + t = 2 * (t - 0.5f); + pdf *= 0.5f; + } + } + switch (shape.type) { + case ShapeType::Circle: + return sample_boundary( + *(const Circle *)shape.ptr, t, normal, pdf, data, stroke_perturb_direction, shape.stroke_width); + case ShapeType::Ellipse: + return sample_boundary( + *(const Ellipse *)shape.ptr, t, normal, pdf, data, stroke_perturb_direction, shape.stroke_width); + case ShapeType::Path: + return sample_boundary( + *(const Path *)shape.ptr, + scene.path_length_cdf[shape_id], + scene.path_length_pmf[shape_id], + scene.path_point_id_map[shape_id], + scene.shapes_length[shape_id], + t, + normal, + pdf, + data, + stroke_perturb_direction, + shape.stroke_width); + case ShapeType::Rect: + return sample_boundary( + *(const Rect *)shape.ptr, t, normal, pdf, data, stroke_perturb_direction, shape.stroke_width); + } + assert(false); + return Vector2f{}; +} + diff --git a/scene.cpp b/scene.cpp new file mode 100644 index 0000000..e024488 --- /dev/null +++ b/scene.cpp @@ -0,0 +1,1035 @@ +#include "scene.h" +#include "aabb.h" +#include "cuda_utils.h" +#include "filter.h" +#include "shape.h" +#include +#include +#include +#include +#include + +size_t align(size_t s) { + auto a = alignof(std::max_align_t); + return ((s + a - 1) / a) * a; +} + +template +void allocate(bool use_gpu, T **p) { + if (use_gpu) { +#ifdef __NVCC__ + checkCuda(cudaMallocManaged(p, sizeof(T))); +#else + throw std::runtime_error("diffvg not compiled with GPU"); + assert(false); +#endif + } else { + *p = (T*)malloc(sizeof(T)); + } +} + +template +void allocate(bool use_gpu, size_t size, T **p) { + if (use_gpu) { +#ifdef __NVCC__ + checkCuda(cudaMallocManaged(p, size * sizeof(T))); +#else + throw std::runtime_error("diffvg not compiled with GPU"); + assert(false); +#endif + } else { + *p = (T*)malloc(size * sizeof(T)); + } +} + +void copy_and_init_shapes(Scene &scene, + const std::vector &shape_list) { + for (int shape_id = 0; shape_id < scene.num_shapes; shape_id++) { + switch (shape_list[shape_id]->type) { + case ShapeType::Circle: { + Circle *p = (Circle *)scene.shapes[shape_id].ptr; + const Circle *p_ = (const Circle*)(shape_list[shape_id]->ptr); + *p = *p_; + Circle *d_p = (Circle *)scene.d_shapes[shape_id].ptr; + d_p->radius = 0; + d_p->center = Vector2f{0, 0}; + break; + } case ShapeType::Ellipse: { + Ellipse *p = (Ellipse *)scene.shapes[shape_id].ptr; + const Ellipse *p_ = (const Ellipse*)(shape_list[shape_id]->ptr); + *p = *p_; + Ellipse *d_p = (Ellipse *)scene.d_shapes[shape_id].ptr; + d_p->radius = Vector2f{0, 0}; + d_p->center = Vector2f{0, 0}; + break; + } case ShapeType::Path: { + Path *p = (Path *)scene.shapes[shape_id].ptr; + const Path *p_ = (const Path*)(shape_list[shape_id]->ptr); + p->num_points = p_->num_points; + p->num_base_points = p_->num_base_points; + for (int i = 0; i < p_->num_base_points; i++) { + p->num_control_points[i] = p_->num_control_points[i]; + } + for (int i = 0; i < 2 * p_->num_points; i++) { + p->points[i] = p_->points[i]; + } + p->is_closed = p_->is_closed; + p->use_distance_approx = p_->use_distance_approx; + Path *d_p = (Path *)scene.d_shapes[shape_id].ptr; + d_p->num_points = p_->num_points; + d_p->num_base_points = p_->num_base_points; + for (int i = 0; i < 2 * p_->num_points; i++) { + d_p->points[i] = 0; + } + d_p->is_closed = p_->is_closed; + if (p_->thickness != nullptr) { + for (int i = 0; i < p_->num_points; i++) { + p->thickness[i] = p_->thickness[i]; + d_p->thickness[i] = 0; + } + } + d_p->use_distance_approx = p_->use_distance_approx; + break; + } case ShapeType::Rect: { + Rect *p = (Rect *)scene.shapes[shape_id].ptr; + const Rect *p_ = (const Rect*)(shape_list[shape_id]->ptr); + *p = *p_; + Rect *d_p = (Rect *)scene.d_shapes[shape_id].ptr; + d_p->p_min = Vector2f{0, 0}; + d_p->p_max = Vector2f{0, 0}; + break; + } default: { + assert(false); + break; + } + } + scene.shapes[shape_id].type = shape_list[shape_id]->type; + scene.shapes[shape_id].stroke_width = shape_list[shape_id]->stroke_width; + scene.d_shapes[shape_id].type = shape_list[shape_id]->type; + scene.d_shapes[shape_id].stroke_width = 0; + } +} + +std::vector +compute_shape_length(const std::vector &shape_list) { + int num_shapes = (int)shape_list.size(); + std::vector shape_length_list(num_shapes, 0.f); + for (int shape_id = 0; shape_id < num_shapes; shape_id++) { + auto shape_length = 0.f; + switch (shape_list[shape_id]->type) { + case ShapeType::Circle: { + const Circle *p_ = (const Circle*)(shape_list[shape_id]->ptr); + shape_length += float(2.f * M_PI) * p_->radius; + break; + } case ShapeType::Ellipse: { + const Ellipse *p_ = (const Ellipse*)(shape_list[shape_id]->ptr); + // https://en.wikipedia.org/wiki/Ellipse#Circumference + // Ramanujan's ellipse circumference approximation + auto a = p_->radius.x; + auto b = p_->radius.y; + shape_length += float(M_PI) * (3 * (a + b) - sqrt((3 * a + b) * (a + 3 * b))); + break; + } case ShapeType::Path: { + const Path *p_ = (const Path*)(shape_list[shape_id]->ptr); + auto length = 0.f; + auto point_id = 0; + for (int i = 0; i < p_->num_base_points; i++) { + if (p_->num_control_points[i] == 0) { + // Straight line + auto i0 = point_id; + assert(i0 < p_->num_points); + auto i1 = (i0 + 1) % p_->num_points; + point_id += 1; + auto p0 = Vector2f{p_->points[2 * i0], p_->points[2 * i0 + 1]}; + auto p1 = Vector2f{p_->points[2 * i1], p_->points[2 * i1 + 1]}; + length += distance(p1, p0); + } else if (p_->num_control_points[i] == 1) { + // Quadratic Bezier curve + auto i0 = point_id; + auto i1 = i0 + 1; + auto i2 = (i0 + 2) % p_->num_points; + point_id += 2; + auto p0 = Vector2f{p_->points[2 * i0], p_->points[2 * i0 + 1]}; + auto p1 = Vector2f{p_->points[2 * i1], p_->points[2 * i1 + 1]}; + auto p2 = Vector2f{p_->points[2 * i2], p_->points[2 * i2 + 1]}; + auto eval = [&](float t) -> Vector2f { + auto tt = 1 - t; + return (tt*tt)*p0 + (2*tt*t)*p1 + (t*t)*p2; + }; + // We use 3-point samples to approximate the length + auto v0 = p0; + auto v1 = eval(0.5f); + auto v2 = p2; + length += distance(v1, v0) + distance(v1, v2); + } else if (p_->num_control_points[i] == 2) { + // Cubic Bezier curve + auto i0 = point_id; + auto i1 = i0 + 1; + auto i2 = i0 + 2; + auto i3 = (i0 + 3) % p_->num_points; + point_id += 3; + auto p0 = Vector2f{p_->points[2 * i0], p_->points[2 * i0 + 1]}; + auto p1 = Vector2f{p_->points[2 * i1], p_->points[2 * i1 + 1]}; + auto p2 = Vector2f{p_->points[2 * i2], p_->points[2 * i2 + 1]}; + auto p3 = Vector2f{p_->points[2 * i3], p_->points[2 * i3 + 1]}; + auto eval = [&](float t) -> Vector2f { + auto tt = 1 - t; + return (tt*tt*tt)*p0 + (3*tt*tt*t)*p1 + (3*tt*t*t)*p2 + (t*t*t)*p3; + }; + // We use 4-point samples to approximate the length + auto v0 = p0; + auto v1 = eval(1.f/3.f); + auto v2 = eval(2.f/3.f); + auto v3 = p3; + length += distance(v1, v0) + distance(v1, v2) + distance(v2, v3); + } else { + assert(false); + } + } + assert(isfinite(length)); + shape_length += length; + break; + } case ShapeType::Rect: { + const Rect *p_ = (const Rect*)(shape_list[shape_id]->ptr); + shape_length += 2 * (p_->p_max.x - p_->p_min.x + p_->p_max.y - p_->p_min.y); + break; + } default: { + assert(false); + break; + } + } + assert(isfinite(shape_length)); + shape_length_list[shape_id] = shape_length; + } + return shape_length_list; +} + +void build_shape_cdfs(Scene &scene, + const std::vector &shape_group_list, + const std::vector &shape_length_list) { + int sample_id = 0; + for (int shape_group_id = 0; shape_group_id < (int)shape_group_list.size(); shape_group_id++) { + const ShapeGroup *shape_group = shape_group_list[shape_group_id]; + for (int i = 0; i < shape_group->num_shapes; i++) { + int shape_id = shape_group->shape_ids[i]; + float length = shape_length_list[shape_id]; + scene.sample_shape_id[sample_id] = shape_id; + if (sample_id == 0) { + scene.sample_shapes_cdf[sample_id] = length; + } else { + scene.sample_shapes_cdf[sample_id] = length + + scene.sample_shapes_cdf[sample_id - 1]; + } + assert(isfinite(length)); + scene.sample_shapes_pmf[sample_id] = length; + scene.sample_group_id[sample_id] = shape_group_id; + sample_id++; + } + } + assert(sample_id == scene.num_total_shapes); + auto normalization = scene.sample_shapes_cdf[scene.num_total_shapes - 1]; + if (normalization <= 0) { + char buf[256]; + sprintf(buf, "The total length of the shape boundaries in the scene is equal or less than 0. Length = %f", normalization); + throw std::runtime_error(buf); + } + if (!isfinite(normalization)) { + char buf[256]; + sprintf(buf, "The total length of the shape boundaries in the scene is not a number. Length = %f", normalization); + throw std::runtime_error(buf); + } + assert(normalization > 0); + for (int sample_id = 0; sample_id < scene.num_total_shapes; sample_id++) { + scene.sample_shapes_cdf[sample_id] /= normalization; + scene.sample_shapes_pmf[sample_id] /= normalization; + } +} + +void build_path_cdfs(Scene &scene, + const std::vector &shape_list, + const std::vector &shape_length_list) { + for (int shape_id = 0; shape_id < scene.num_shapes; shape_id++) { + if (shape_list[shape_id]->type == ShapeType::Path) { + const Path &path = shape_list[shape_id]->as_path(); + float *pmf = scene.path_length_pmf[shape_id]; + float *cdf = scene.path_length_cdf[shape_id]; + int *point_id_map = scene.path_point_id_map[shape_id]; + auto path_length = shape_length_list[shape_id]; + auto inv_length = 1.f / path_length; + auto point_id = 0; + for (int i = 0; i < path.num_base_points; i++) { + point_id_map[i] = point_id; + if (path.num_control_points[i] == 0) { + // Straight line + auto i0 = point_id; + auto i1 = (i0 + 1) % path.num_points; + point_id += 1; + auto p0 = Vector2f{path.points[2 * i0], path.points[2 * i0 + 1]}; + auto p1 = Vector2f{path.points[2 * i1], path.points[2 * i1 + 1]}; + auto d = distance(p0, p1) * inv_length; + pmf[i] = d; + if (i == 0) { + cdf[i] = d; + } else { + cdf[i] = d + cdf[i - 1]; + } + } else if (path.num_control_points[i] == 1) { + // Quadratic Bezier curve + auto i0 = point_id; + auto i1 = i0 + 1; + auto i2 = (i0 + 2) % path.num_points; + point_id += 2; + auto p0 = Vector2f{path.points[2 * i0], path.points[2 * i0 + 1]}; + auto p1 = Vector2f{path.points[2 * i1], path.points[2 * i1 + 1]}; + auto p2 = Vector2f{path.points[2 * i2], path.points[2 * i2 + 1]}; + auto eval = [&](float t) -> Vector2f { + auto tt = 1 - t; + return (tt*tt)*p0 + (2*tt*t)*p1 + (t*t)*p2; + }; + // We use 3-point samples to approximate the length + auto v0 = p0; + auto v1 = eval(0.5f); + auto v2 = p2; + auto d = (distance(v0, v1) + distance(v1, v2)) * inv_length; + pmf[i] = d; + if (i == 0) { + cdf[i] = d; + } else { + cdf[i] = d + cdf[i - 1]; + } + } else if (path.num_control_points[i] == 2) { + // Cubic Bezier curve + auto i0 = point_id; + auto i1 = point_id + 1; + auto i2 = point_id + 2; + auto i3 = (point_id + 3) % path.num_points; + point_id += 3; + auto p0 = Vector2f{path.points[2 * i0], path.points[2 * i0 + 1]}; + auto p1 = Vector2f{path.points[2 * i1], path.points[2 * i1 + 1]}; + auto p2 = Vector2f{path.points[2 * i2], path.points[2 * i2 + 1]}; + auto p3 = Vector2f{path.points[2 * i3], path.points[2 * i3 + 1]}; + auto eval = [&](float t) -> Vector2f { + auto tt = 1 - t; + return (tt*tt*tt)*p0 + (3*tt*tt*t)*p1 + (3*tt*t*t)*p2 + (t*t*t)*p3; + }; + // We use 4-point samples to approximate the length + auto v0 = p0; + auto v1 = eval(1.f/3.f); + auto v2 = eval(2.f/3.f); + auto v3 = p3; + auto d = (distance(v1, v0) + distance(v1, v2) + distance(v2, v3)) * inv_length; + pmf[i] = d; + if (i == 0) { + cdf[i] = d; + } else { + cdf[i] = d + cdf[i - 1]; + } + } else { + assert(false); + } + } + } + } +} + +void copy_and_init_shape_groups(Scene &scene, + const std::vector &shape_group_list) { + for (int group_id = 0; group_id < scene.num_shape_groups; group_id++) { + const ShapeGroup *shape_group = shape_group_list[group_id]; + auto copy_and_init_color = [&](const ColorType &color_type, void *color_ptr, void *target_ptr, void *d_target_ptr) { + switch (color_type) { + case ColorType::Constant: { + Constant *c = (Constant*)target_ptr; + Constant *d_c = (Constant*)d_target_ptr; + const Constant *c_ = (const Constant*)color_ptr; + *c = *c_; + d_c->color = Vector4{0, 0, 0, 0}; + break; + } case ColorType::LinearGradient: { + LinearGradient *c = (LinearGradient*)target_ptr; + LinearGradient *d_c = (LinearGradient*)d_target_ptr; + const LinearGradient *c_ = (const LinearGradient*)color_ptr; + c->begin = c_->begin; + c->end = c_->end; + c->num_stops = c_->num_stops; + for (int i = 0; i < c_->num_stops; i++) { + c->stop_offsets[i] = c_->stop_offsets[i]; + } + for (int i = 0; i < 4 * c_->num_stops; i++) { + c->stop_colors[i] = c_->stop_colors[i]; + } + d_c->begin = Vector2f{0, 0}; + d_c->end = Vector2f{0, 0}; + d_c->num_stops = c_->num_stops; + for (int i = 0; i < c_->num_stops; i++) { + d_c->stop_offsets[i] = 0; + } + for (int i = 0; i < 4 * c_->num_stops; i++) { + d_c->stop_colors[i] = 0; + } + break; + } case ColorType::RadialGradient: { + RadialGradient *c = (RadialGradient*)target_ptr; + RadialGradient *d_c = (RadialGradient*)d_target_ptr; + const RadialGradient *c_ = (const RadialGradient*)color_ptr; + c->center = c_->center; + c->radius = c_->radius; + c->num_stops = c_->num_stops; + for (int i = 0; i < c_->num_stops; i++) { + c->stop_offsets[i] = c_->stop_offsets[i]; + } + for (int i = 0; i < 4 * c_->num_stops; i++) { + c->stop_colors[i] = c_->stop_colors[i]; + } + d_c->center = Vector2f{0, 0}; + d_c->radius = Vector2f{0, 0}; + d_c->num_stops = c_->num_stops; + for (int i = 0; i < c_->num_stops; i++) { + d_c->stop_offsets[i] = 0; + } + for (int i = 0; i < 4 * c_->num_stops; i++) { + d_c->stop_colors[i] = 0; + } + break; + } default: { + assert(false); + } + } + }; + for (int i = 0; i < shape_group->num_shapes; i++) { + scene.shape_groups[group_id].shape_ids[i] = shape_group->shape_ids[i]; + } + scene.shape_groups[group_id].num_shapes = shape_group->num_shapes; + scene.shape_groups[group_id].use_even_odd_rule = shape_group->use_even_odd_rule; + scene.shape_groups[group_id].canvas_to_shape = shape_group->canvas_to_shape; + scene.shape_groups[group_id].shape_to_canvas = shape_group->shape_to_canvas; + scene.d_shape_groups[group_id].shape_ids = nullptr; + scene.d_shape_groups[group_id].num_shapes = shape_group->num_shapes; + scene.d_shape_groups[group_id].use_even_odd_rule = shape_group->use_even_odd_rule; + scene.d_shape_groups[group_id].canvas_to_shape = Matrix3x3f{}; + scene.d_shape_groups[group_id].shape_to_canvas = Matrix3x3f{}; + + scene.shape_groups[group_id].fill_color_type = shape_group->fill_color_type; + scene.d_shape_groups[group_id].fill_color_type = shape_group->fill_color_type; + if (shape_group->fill_color != nullptr) { + copy_and_init_color(shape_group->fill_color_type, + shape_group->fill_color, + scene.shape_groups[group_id].fill_color, + scene.d_shape_groups[group_id].fill_color); + } + scene.shape_groups[group_id].stroke_color_type = shape_group->stroke_color_type; + scene.d_shape_groups[group_id].stroke_color_type = shape_group->stroke_color_type; + if (shape_group->stroke_color != nullptr) { + copy_and_init_color(shape_group->stroke_color_type, + shape_group->stroke_color, + scene.shape_groups[group_id].stroke_color, + scene.d_shape_groups[group_id].stroke_color); + } + } +} + +DEVICE uint32_t morton2D(const Vector2f &p, int canvas_width, int canvas_height) { + auto scene_bounds = Vector2f{canvas_width, canvas_height}; + auto pp = p / scene_bounds; + TVector2 pp_i{pp.x * 1023, pp.y * 1023}; + return (expand_bits(pp_i.x) << 1u) | + (expand_bits(pp_i.y) << 0u); +} + +template +void build_bvh(const Scene &scene, BVHNode *nodes, int num_primitives) { + auto bvh_size = 2 * num_primitives - 1; + if (bvh_size > 1) { + if (sort) { + // Sort by Morton code + std::sort(nodes, nodes + num_primitives, + [&] (const BVHNode &n0, const BVHNode &n1) { + auto p0 = 0.5f * (n0.box.p_min + n0.box.p_max); + auto p1 = 0.5f * (n1.box.p_min + n1.box.p_max); + auto m0 = morton2D(p0, scene.canvas_width, scene.canvas_height); + auto m1 = morton2D(p1, scene.canvas_width, scene.canvas_height); + return m0 < m1; + }); + } + for (int i = num_primitives; i < bvh_size; i++) { + nodes[i] = BVHNode{-1, -1, AABB{}, 0.f}; + } + int prev_beg = 0; + int prev_end = num_primitives; + // For handling odd number of nodes at a level + int leftover = prev_end % 2 == 0 ? -1 : prev_end - 1; + while (prev_end - prev_beg >= 1 || leftover != -1) { + int length = (prev_end - prev_beg) / 2; + if ((prev_end - prev_beg) % 2 == 1 && leftover != -1 && + leftover != prev_end - 1) { + length += 1; + } + for (int i = 0; i < length; i++) { + BVHNode node; + node.child0 = prev_beg + 2 * i; + node.child1 = prev_beg + 2 * i + 1; + if (node.child1 >= prev_end) { + assert(leftover != -1); + node.child1 = leftover; + leftover = -1; + } + AABB child0_box = nodes[node.child0].box; + AABB child1_box = nodes[node.child1].box; + node.box = merge(child0_box, child1_box); + node.max_radius = std::max(nodes[node.child0].max_radius, + nodes[node.child1].max_radius); + nodes[prev_end + i] = node; + } + if (length == 1 && leftover == -1) { + break; + } + prev_beg = prev_end; + prev_end = prev_beg + length; + if (length % 2 == 1 && leftover == -1) { + leftover = prev_end - 1; + } + } + } + assert(nodes[2 * num_primitives - 2].child0 != -1); +} + +void compute_bounding_boxes(Scene &scene, + const std::vector &shape_list, + const std::vector &shape_group_list) { + for (int shape_id = 0; shape_id < scene.num_shapes; shape_id++) { + switch (shape_list[shape_id]->type) { + case ShapeType::Circle: { + const Circle *p = (const Circle*)(shape_list[shape_id]->ptr); + scene.shapes_bbox[shape_id] = AABB{p->center - p->radius, + p->center + p->radius}; + break; + } case ShapeType::Ellipse: { + const Ellipse *p = (const Ellipse*)(shape_list[shape_id]->ptr); + scene.shapes_bbox[shape_id] = AABB{p->center - p->radius, + p->center + p->radius}; + break; + } case ShapeType::Path: { + const Path *p = (const Path*)(shape_list[shape_id]->ptr); + AABB box; + if (p->num_points > 0) { + box = AABB{Vector2f{p->points[0], p->points[1]}, + Vector2f{p->points[0], p->points[1]}}; + } + for (int i = 1; i < p->num_points; i++) { + box = merge(box, Vector2f{p->points[2 * i], p->points[2 * i + 1]}); + } + scene.shapes_bbox[shape_id] = box; + std::vector boxes(p->num_base_points); + std::vector thickness(p->num_base_points); + std::vector first_point_id(p->num_base_points); + auto r = shape_list[shape_id]->stroke_width; + auto point_id = 0; + for (int i = 0; i < p->num_base_points; i++) { + first_point_id[i] = point_id; + if (p->num_control_points[i] == 0) { + // Straight line + auto i0 = point_id; + auto i1 = (i0 + 1) % p->num_points; + point_id += 1; + auto p0 = Vector2f{p->points[2 * i0], p->points[2 * i0 + 1]}; + auto p1 = Vector2f{p->points[2 * i1], p->points[2 * i1 + 1]}; + boxes[i] = AABB(); + boxes[i] = merge(boxes[i], p0); + boxes[i] = merge(boxes[i], p1); + auto r0 = r; + auto r1 = r; + // override radius if path has thickness + if (p->thickness != nullptr) { + r0 = p->thickness[i0]; + r1 = p->thickness[i1]; + } + thickness[i] = max(r0, r1); + } else if (p->num_control_points[i] == 1) { + // Quadratic Bezier curve + auto i0 = point_id; + auto i1 = i0 + 1; + auto i2 = (i0 + 2) % p->num_points; + point_id += 2; + auto p0 = Vector2f{p->points[2 * i0], p->points[2 * i0 + 1]}; + auto p1 = Vector2f{p->points[2 * i1], p->points[2 * i1 + 1]}; + auto p2 = Vector2f{p->points[2 * i2], p->points[2 * i2 + 1]}; + boxes[i] = AABB(); + boxes[i] = merge(boxes[i], p0); + boxes[i] = merge(boxes[i], p1); + boxes[i] = merge(boxes[i], p2); + auto r0 = r; + auto r1 = r; + auto r2 = r; + // override radius if path has thickness + if (p->thickness != nullptr) { + r0 = p->thickness[i0]; + r1 = p->thickness[i1]; + r2 = p->thickness[i2]; + } + thickness[i] = max(max(r0, r1), r2); + } else if (p->num_control_points[i] == 2) { + // Cubic Bezier curve + auto i0 = point_id; + auto i1 = i0 + 1; + auto i2 = i0 + 2; + auto i3 = (i0 + 3) % p->num_points; + point_id += 3; + auto p0 = Vector2f{p->points[2 * i0], p->points[2 * i0 + 1]}; + auto p1 = Vector2f{p->points[2 * i1], p->points[2 * i1 + 1]}; + auto p2 = Vector2f{p->points[2 * i2], p->points[2 * i2 + 1]}; + auto p3 = Vector2f{p->points[2 * i3], p->points[2 * i3 + 1]}; + boxes[i] = AABB(); + boxes[i] = merge(boxes[i], p0); + boxes[i] = merge(boxes[i], p1); + boxes[i] = merge(boxes[i], p2); + boxes[i] = merge(boxes[i], p3); + auto r0 = r; + auto r1 = r; + auto r2 = r; + auto r3 = r; + // override radius if path has thickness + if (p->thickness != nullptr) { + r0 = p->thickness[i0]; + r1 = p->thickness[i1]; + r2 = p->thickness[i2]; + r3 = p->thickness[i3]; + } + thickness[i] = max(max(max(r0, r1), r2), r3); + } else { + assert(false); + } + } + // Sort the boxes by y + std::vector idx(boxes.size()); + std::iota(idx.begin(), idx.end(), 0); + std::sort(idx.begin(), idx.end(), [&](int i0, int i1) { + const AABB &b0 = boxes[i0]; + const AABB &b1 = boxes[i1]; + auto b0y = 0.5f * (b0.p_min.y + b0.p_max.y); + auto b1y = 0.5f * (b1.p_min.y + b1.p_max.y); + return b0y < b1y; + }); + BVHNode *nodes = scene.path_bvhs[shape_id]; + for (int i = 0; i < (int)idx.size(); i++) { + nodes[i] = BVHNode{idx[i], + -(first_point_id[idx[i]]+1), + boxes[idx[i]], + thickness[idx[i]]}; + } + build_bvh(scene, nodes, boxes.size()); + break; + } case ShapeType::Rect: { + const Rect *p = (const Rect*)(shape_list[shape_id]->ptr); + scene.shapes_bbox[shape_id] = AABB{p->p_min, p->p_max}; + break; + } default: { + assert(false); + break; + } + } + } + + for (int shape_group_id = 0; shape_group_id < (int)shape_group_list.size(); shape_group_id++) { + const ShapeGroup *shape_group = shape_group_list[shape_group_id]; + // Build a BVH for each shape group + BVHNode *nodes = scene.shape_groups_bvh_nodes[shape_group_id]; + for (int i = 0; i < shape_group->num_shapes; i++) { + auto shape_id = shape_group->shape_ids[i]; + auto r = shape_group->stroke_color == nullptr ? 0 : shape_list[shape_id]->stroke_width; + nodes[i] = BVHNode{shape_id, + -1, + scene.shapes_bbox[shape_id], + r}; + } + build_bvh(scene, nodes, shape_group->num_shapes); + } + + BVHNode *nodes = scene.bvh_nodes; + for (int shape_group_id = 0; shape_group_id < (int)shape_group_list.size(); shape_group_id++) { + const ShapeGroup *shape_group = shape_group_list[shape_group_id]; + auto max_radius = shape_list[shape_group->shape_ids[0]]->stroke_width; + if (shape_list[shape_group->shape_ids[0]]->type == ShapeType::Path) { + const Path *p = (const Path*)(shape_list[shape_group->shape_ids[0]]->ptr); + if (p->thickness != nullptr) { + const BVHNode *nodes = scene.path_bvhs[shape_group->shape_ids[0]]; + max_radius = nodes[0].max_radius; + } + } + for (int i = 1; i < shape_group->num_shapes; i++) { + auto shape_id = shape_group->shape_ids[i]; + auto shape = shape_list[shape_id]; + auto r = shape->stroke_width; + if (shape->type == ShapeType::Path) { + const Path *p = (const Path*)(shape_list[shape_id]->ptr); + if (p->thickness != nullptr) { + const BVHNode *nodes = scene.path_bvhs[shape_id]; + r = nodes[0].max_radius; + } + } + max_radius = std::max(max_radius, r); + } + // Fetch group bbox from BVH + auto bbox = scene.shape_groups_bvh_nodes[shape_group_id][2 * shape_group->num_shapes - 2].box; + // Transform box from local to world space + nodes[shape_group_id].child0 = shape_group_id; + nodes[shape_group_id].child1 = -1; + nodes[shape_group_id].box = transform(shape_group->shape_to_canvas, bbox); + if (shape_group->stroke_color == nullptr) { + nodes[shape_group_id].max_radius = 0; + } else { + nodes[shape_group_id].max_radius = max_radius; + } + } + build_bvh(scene, nodes, shape_group_list.size()); +} + +template +size_t allocate_buffers(Scene &scene, + const std::vector &shape_list, + const std::vector &shape_group_list) { + auto num_shapes = shape_list.size(); + auto num_shape_groups = shape_group_list.size(); + + size_t buffer_size = 0; + if (alloc_mode) scene.shapes = (Shape*)&scene.buffer[buffer_size]; + buffer_size += align(sizeof(Shape) * num_shapes); + if (alloc_mode) scene.d_shapes = (Shape*)&scene.buffer[buffer_size]; + buffer_size += align(sizeof(Shape) * num_shapes); + if (alloc_mode) scene.shape_groups = (ShapeGroup*)&scene.buffer[buffer_size]; + buffer_size += align(sizeof(ShapeGroup) * num_shape_groups); + if (alloc_mode) scene.d_shape_groups = (ShapeGroup*)&scene.buffer[buffer_size]; + buffer_size += align(sizeof(ShapeGroup) * num_shape_groups); + if (alloc_mode) scene.sample_shapes_cdf = (float*)&scene.buffer[buffer_size]; + buffer_size += align(sizeof(float) * scene.num_total_shapes); + if (alloc_mode) scene.sample_shapes_pmf = (float*)&scene.buffer[buffer_size]; + buffer_size += align(sizeof(float) * scene.num_total_shapes); + if (alloc_mode) scene.sample_shape_id = (int*)&scene.buffer[buffer_size]; + buffer_size += align(sizeof(int) * scene.num_total_shapes); + if (alloc_mode) scene.sample_group_id = (int*)&scene.buffer[buffer_size]; + buffer_size += align(sizeof(int) * scene.num_total_shapes); + if (alloc_mode) scene.shapes_length = (float*)&scene.buffer[buffer_size]; + buffer_size += align(sizeof(float) * num_shapes); + if (alloc_mode) scene.path_length_cdf = (float**)&scene.buffer[buffer_size]; + buffer_size += align(sizeof(float*) * num_shapes); + if (alloc_mode) scene.path_length_pmf = (float**)&scene.buffer[buffer_size]; + buffer_size += align(sizeof(float*) * num_shapes); + if (alloc_mode) scene.path_point_id_map = (int**)&scene.buffer[buffer_size]; + buffer_size += align(sizeof(int*) * num_shapes); + if (alloc_mode) scene.filter = (Filter*)&scene.buffer[buffer_size]; + buffer_size += align(sizeof(Filter)); + if (alloc_mode) scene.d_filter = (DFilter*)&scene.buffer[buffer_size]; + buffer_size += align(sizeof(DFilter)); + if (alloc_mode) scene.shapes_bbox = (AABB*)&scene.buffer[buffer_size]; + buffer_size += align(sizeof(AABB) * num_shapes); + if (alloc_mode) scene.path_bvhs = (BVHNode**)&scene.buffer[buffer_size]; + buffer_size += align(sizeof(BVHNode*) * num_shapes); + if (alloc_mode) scene.shape_groups_bvh_nodes = (BVHNode**)&scene.buffer[buffer_size]; + buffer_size += align(sizeof(BVHNode*) * num_shape_groups); + if (alloc_mode) scene.bvh_nodes = (BVHNode*)&scene.buffer[buffer_size]; + buffer_size += align(sizeof(BVHNode) * (2 * num_shape_groups - 1)); + + if (alloc_mode) { + for (int i = 0; i < num_shapes; i++) { + scene.path_length_cdf[i] = nullptr; + scene.path_length_pmf[i] = nullptr; + scene.path_point_id_map[i] = nullptr; + scene.path_bvhs[i] = nullptr; + } + } + + for (int shape_id = 0; shape_id < scene.num_shapes; shape_id++) { + switch (shape_list[shape_id]->type) { + case ShapeType::Circle: { + if (alloc_mode) scene.shapes[shape_id].ptr = (Circle*)&scene.buffer[buffer_size]; + buffer_size += align(sizeof(Circle)); // scene.shapes[shape_id].ptr + if (alloc_mode) scene.d_shapes[shape_id].ptr = (Circle*)&scene.buffer[buffer_size]; + buffer_size += align(sizeof(Circle)); // scene.d_shapes[shape_id].ptr + break; + } case ShapeType::Ellipse: { + if (alloc_mode) scene.shapes[shape_id].ptr = (Ellipse*)&scene.buffer[buffer_size]; + buffer_size += align(sizeof(Ellipse)); // scene.shapes[shape_id].ptr + if (alloc_mode) scene.d_shapes[shape_id].ptr = (Ellipse*)&scene.buffer[buffer_size]; + buffer_size += align(sizeof(Ellipse)); // scene.d_shapes[shape_id].ptr + break; + } case ShapeType::Path: { + if (alloc_mode) scene.shapes[shape_id].ptr = (Path*)&scene.buffer[buffer_size]; + buffer_size += align(sizeof(Path)); // scene.shapes[shape_id].ptr + if (alloc_mode) scene.d_shapes[shape_id].ptr = (Path*)&scene.buffer[buffer_size]; + buffer_size += align(sizeof(Path)); // scene.d_shapes[shape_id].ptr + + const Path *p_ = (const Path*)(shape_list[shape_id]->ptr); + Path *p = nullptr, *d_p = nullptr; + if (alloc_mode) p = (Path*)scene.shapes[shape_id].ptr; + if (alloc_mode) d_p = (Path*)scene.d_shapes[shape_id].ptr; + if (alloc_mode) p->num_control_points = (int*)&scene.buffer[buffer_size]; + buffer_size += align(sizeof(int) * p_->num_base_points); // p->num_control_points + if (alloc_mode) p->points = (float*)&scene.buffer[buffer_size]; + buffer_size += align(sizeof(float) * (2 * p_->num_points)); // p->points + if (alloc_mode) d_p->points = (float*)&scene.buffer[buffer_size]; + buffer_size += align(sizeof(float) * (2 * p_->num_points)); // d_p->points + if (p_->thickness != nullptr) { + if (alloc_mode) p->thickness = (float*)&scene.buffer[buffer_size]; + buffer_size += align(sizeof(float) * p_->num_points); // p->thickness + if (alloc_mode) d_p->thickness = (float*)&scene.buffer[buffer_size]; + buffer_size += align(sizeof(float) * p_->num_points); // d_p->thickness + } else { + if (alloc_mode) p->thickness = nullptr; + if (alloc_mode) d_p->thickness = nullptr; + } + if (alloc_mode) scene.path_length_pmf[shape_id] = (float*)&scene.buffer[buffer_size]; + buffer_size += align(sizeof(float) * p_->num_base_points); // scene.path_length_pmf + if (alloc_mode) scene.path_length_cdf[shape_id] = (float*)&scene.buffer[buffer_size]; + buffer_size += align(sizeof(float) * p_->num_base_points); // scene.path_length_cdf + if (alloc_mode) scene.path_point_id_map[shape_id] = (int*)&scene.buffer[buffer_size]; + buffer_size += align(sizeof(int) * p_->num_base_points); // scene.path_point_id_map + if (alloc_mode) scene.path_bvhs[shape_id] = (BVHNode*)&scene.buffer[buffer_size]; + buffer_size += align(sizeof(BVHNode) * (2 * p_->num_base_points - 1)); + break; + } case ShapeType::Rect: { + if (alloc_mode) scene.shapes[shape_id].ptr = (Ellipse*)&scene.buffer[buffer_size]; + buffer_size += align(sizeof(Rect)); // scene.shapes[shape_id].ptr + if (alloc_mode) scene.d_shapes[shape_id].ptr = (Ellipse*)&scene.buffer[buffer_size]; + buffer_size += align(sizeof(Rect)); // scene.d_shapes[shape_id].ptr + break; + } default: { + assert(false); + break; + } + } + } + + for (int group_id = 0; group_id < scene.num_shape_groups; group_id++) { + const ShapeGroup *shape_group = shape_group_list[group_id]; + if (shape_group->fill_color != nullptr) { + switch (shape_group->fill_color_type) { + case ColorType::Constant: { + if (alloc_mode) scene.shape_groups[group_id].fill_color = (Constant*)&scene.buffer[buffer_size]; + buffer_size += align(sizeof(Constant)); // color + if (alloc_mode) scene.d_shape_groups[group_id].fill_color = (Constant*)&scene.buffer[buffer_size]; + buffer_size += align(sizeof(Constant)); // d_color + break; + } case ColorType::LinearGradient: { + if (alloc_mode) scene.shape_groups[group_id].fill_color = (LinearGradient*)&scene.buffer[buffer_size]; + buffer_size += align(sizeof(LinearGradient)); // color + if (alloc_mode) scene.shape_groups[group_id].fill_color = (LinearGradient*)&scene.buffer[buffer_size]; + buffer_size += align(sizeof(LinearGradient)); // d_color + + const LinearGradient *c_ = (const LinearGradient *)shape_group->fill_color; + LinearGradient *c = nullptr, *d_c = nullptr; + if (alloc_mode) c = (LinearGradient *)scene.shape_groups[group_id].fill_color; + if (alloc_mode) d_c = (LinearGradient *)scene.d_shape_groups[group_id].fill_color; + if (alloc_mode) c->stop_offsets = (float*)&scene.buffer[buffer_size]; + buffer_size += align(sizeof(float) * c_->num_stops); // c->stop_offsets + if (alloc_mode) c->stop_colors = (float*)&scene.buffer[buffer_size]; + buffer_size += align(sizeof(float) * 4 * c_->num_stops); // c->stop_colors + if (alloc_mode) d_c->stop_offsets = (float*)&scene.buffer[buffer_size]; + buffer_size += align(sizeof(float) * c_->num_stops); // d_c->stop_offsets + if (alloc_mode) d_c->stop_colors = (float*)&scene.buffer[buffer_size]; + buffer_size += align(sizeof(float) * 4 * c_->num_stops); // d_c->stop_colors + break; + } case ColorType::RadialGradient: { + if (alloc_mode) scene.shape_groups[group_id].fill_color = (RadialGradient*)&scene.buffer[buffer_size]; + buffer_size += align(sizeof(RadialGradient)); // color + if (alloc_mode) scene.shape_groups[group_id].fill_color = (RadialGradient*)&scene.buffer[buffer_size]; + buffer_size += align(sizeof(RadialGradient)); // d_color + + const RadialGradient *c_ = (const RadialGradient *)shape_group->fill_color; + RadialGradient *c = nullptr, *d_c = nullptr; + if (alloc_mode) c = (RadialGradient *)scene.shape_groups[group_id].fill_color; + if (alloc_mode) d_c = (RadialGradient *)scene.d_shape_groups[group_id].fill_color; + if (alloc_mode) c->stop_offsets = (float*)&scene.buffer[buffer_size]; + buffer_size += align(sizeof(float) * c_->num_stops); // c->stop_offsets + if (alloc_mode) c->stop_colors = (float*)&scene.buffer[buffer_size]; + buffer_size += align(sizeof(float) * 4 * c_->num_stops); // c->stop_colors + if (alloc_mode) d_c->stop_offsets = (float*)&scene.buffer[buffer_size]; + buffer_size += align(sizeof(float) * c_->num_stops); // d_c->stop_offsets + if (alloc_mode) d_c->stop_colors = (float*)&scene.buffer[buffer_size]; + buffer_size += align(sizeof(float) * 4 * c_->num_stops); // d_c->stop_colors + break; + } default: { + assert(false); + } + } + } else { + if (alloc_mode) scene.shape_groups[group_id].fill_color = nullptr; + if (alloc_mode) scene.d_shape_groups[group_id].fill_color = nullptr; + } + if (shape_group->stroke_color != nullptr) { + switch (shape_group->stroke_color_type) { + case ColorType::Constant: { + if (alloc_mode) scene.shape_groups[group_id].stroke_color = (Constant*)&scene.buffer[buffer_size]; + buffer_size += align(sizeof(Constant)); // color + if (alloc_mode) scene.d_shape_groups[group_id].stroke_color = (Constant*)&scene.buffer[buffer_size]; + buffer_size += align(sizeof(Constant)); // d_color + break; + } case ColorType::LinearGradient: { + if (alloc_mode) scene.shape_groups[group_id].stroke_color = (LinearGradient*)&scene.buffer[buffer_size]; + buffer_size += align(sizeof(LinearGradient)); // color + if (alloc_mode) scene.shape_groups[group_id].stroke_color = (LinearGradient*)&scene.buffer[buffer_size]; + buffer_size += align(sizeof(LinearGradient)); // d_color + + const LinearGradient *c_ = (const LinearGradient *)shape_group->stroke_color; + LinearGradient *c = nullptr, *d_c = nullptr; + if (alloc_mode) c = (LinearGradient *)scene.shape_groups[group_id].stroke_color; + if (alloc_mode) d_c = (LinearGradient *)scene.d_shape_groups[group_id].stroke_color; + if (alloc_mode) c->stop_offsets = (float*)&scene.buffer[buffer_size]; + buffer_size += align(sizeof(float) * c_->num_stops); // c->stop_offsets + if (alloc_mode) c->stop_colors = (float*)&scene.buffer[buffer_size]; + buffer_size += align(sizeof(float) * 4 * c_->num_stops); // c->stop_colors + if (alloc_mode) d_c->stop_offsets = (float*)&scene.buffer[buffer_size]; + buffer_size += align(sizeof(float) * c_->num_stops); // d_c->stop_offsets + if (alloc_mode) d_c->stop_colors = (float*)&scene.buffer[buffer_size]; + buffer_size += align(sizeof(float) * 4 * c_->num_stops); // d_c->stop_colors + break; + } case ColorType::RadialGradient: { + if (alloc_mode) scene.shape_groups[group_id].stroke_color = (RadialGradient*)&scene.buffer[buffer_size]; + buffer_size += align(sizeof(RadialGradient)); // color + if (alloc_mode) scene.shape_groups[group_id].stroke_color = (RadialGradient*)&scene.buffer[buffer_size]; + buffer_size += align(sizeof(RadialGradient)); // d_color + + const RadialGradient *c_ = (const RadialGradient *)shape_group->stroke_color; + RadialGradient *c = nullptr, *d_c = nullptr; + if (alloc_mode) c = (RadialGradient *)scene.shape_groups[group_id].stroke_color; + if (alloc_mode) d_c = (RadialGradient *)scene.d_shape_groups[group_id].stroke_color; + if (alloc_mode) c->stop_offsets = (float*)&scene.buffer[buffer_size]; + buffer_size += align(sizeof(float) * c_->num_stops); // c->stop_offsets + if (alloc_mode) c->stop_colors = (float*)&scene.buffer[buffer_size]; + buffer_size += align(sizeof(float) * 4 * c_->num_stops); // c->stop_colors + if (alloc_mode) d_c->stop_offsets = (float*)&scene.buffer[buffer_size]; + buffer_size += align(sizeof(float) * c_->num_stops); // d_c->stop_offsets + if (alloc_mode) d_c->stop_colors = (float*)&scene.buffer[buffer_size]; + buffer_size += align(sizeof(float) * 4 * c_->num_stops); // d_c->stop_colors + break; + } default: { + assert(false); + } + } + } else { + if (alloc_mode) scene.shape_groups[group_id].stroke_color = nullptr; + if (alloc_mode) scene.d_shape_groups[group_id].stroke_color = nullptr; + } + if (alloc_mode) scene.shape_groups[group_id].shape_ids = (int*)&scene.buffer[buffer_size]; + buffer_size += align(sizeof(int) * shape_group->num_shapes); // shape_group->shape_ids + if (alloc_mode) scene.shape_groups_bvh_nodes[group_id] = (BVHNode*)&scene.buffer[buffer_size]; + buffer_size += align(sizeof(BVHNode) * (2 * shape_group->num_shapes - 1)); // scene.shape_groups_bvh_nodes[group_id] + } + return buffer_size; +} + +Scene::Scene(int canvas_width, + int canvas_height, + const std::vector &shape_list, + const std::vector &shape_group_list, + const Filter &filter, + bool use_gpu, + int gpu_index) + : canvas_width(canvas_width), + canvas_height(canvas_height), + num_shapes(shape_list.size()), + num_shape_groups(shape_group_list.size()), + use_gpu(use_gpu), + gpu_index(gpu_index) { + if (num_shapes == 0) { + return; + } + // Shape group may reuse some of the shapes, + // record the total number of shapes. + int num_total_shapes = 0; + for (const ShapeGroup *sg : shape_group_list) { + num_total_shapes += sg->num_shapes; + } + this->num_total_shapes = num_total_shapes; + + // Memory initialization +#ifdef __NVCC__ + int old_device_id = -1; +#endif + if (use_gpu) { +#ifdef __NVCC__ + checkCuda(cudaGetDevice(&old_device_id)); + if (gpu_index != -1) { + checkCuda(cudaSetDevice(gpu_index)); + } +#else + throw std::runtime_error("diffvg not compiled with GPU"); + assert(false); +#endif + } + + size_t buffer_size = allocate_buffers(*this, shape_list, shape_group_list); + // Allocate a huge buffer for everything + allocate(use_gpu, buffer_size, &buffer); + // memset(buffer, 111, buffer_size); + // Actually distribute the buffer + allocate_buffers(*this, shape_list, shape_group_list); + copy_and_init_shapes(*this, shape_list); + copy_and_init_shape_groups(*this, shape_group_list); + + std::vector shape_length_list = compute_shape_length(shape_list); + // Copy shape_length + if (use_gpu) { +#ifdef __NVCC__ + checkCuda(cudaMemcpy(this->shapes_length, &shape_length_list[0], num_shapes * sizeof(float), cudaMemcpyHostToDevice)); +#else + throw std::runtime_error("diffvg not compiled with GPU"); + assert(false); +#endif + } else { + memcpy(this->shapes_length, &shape_length_list[0], num_shapes * sizeof(float)); + } + build_shape_cdfs(*this, shape_group_list, shape_length_list); + build_path_cdfs(*this, shape_list, shape_length_list); + compute_bounding_boxes(*this, shape_list, shape_group_list); + + // Filter initialization + *(this->filter) = filter; + this->d_filter->radius = 0; + + if (use_gpu) { +#ifdef __NVCC__ + if (old_device_id != -1) { + checkCuda(cudaSetDevice(old_device_id)); + } +#else + throw std::runtime_error("diffvg not compiled with GPU"); + assert(false); +#endif + } +} + +Scene::~Scene() { + if (num_shapes == 0) { + return; + } + if (use_gpu) { +#ifdef __NVCC__ + int old_device_id = -1; + checkCuda(cudaGetDevice(&old_device_id)); + if (gpu_index != -1) { + checkCuda(cudaSetDevice(gpu_index)); + } + + checkCuda(cudaFree(buffer)); + + checkCuda(cudaSetDevice(old_device_id)); +#else + // Don't throw because C++ don't want a destructor to throw. + std::cerr << "diffvg not compiled with GPU"; + exit(1); +#endif + } else { + free(buffer); + } +} + +Shape Scene::get_d_shape(int shape_id) const { + return d_shapes[shape_id]; +} + +ShapeGroup Scene::get_d_shape_group(int group_id) const { + return d_shape_groups[group_id]; +} + +float Scene::get_d_filter_radius() const { + return d_filter->radius; +} diff --git a/scene.h b/scene.h new file mode 100644 index 0000000..e2f452d --- /dev/null +++ b/scene.h @@ -0,0 +1,120 @@ +#pragma once + +#include "diffvg.h" +#include "aabb.h" +#include + +struct Shape; +struct ShapeGroup; +struct Filter; +struct DFilter; + +struct BVHNode { + int child0, child1; // child1 is negative if it is a leaf + AABB box; + float max_radius; +}; + +struct Scene { + Scene(int canvas_width, + int canvas_height, + const std::vector &shape_list, + const std::vector &shape_group_list, + const Filter &filter, + bool use_gpu, + int gpu_index); + + ~Scene(); + + int canvas_width; + int canvas_height; + + uint8_t *buffer; + + Shape *shapes; + Shape *d_shapes; + ShapeGroup *shape_groups; + ShapeGroup *d_shape_groups; + Filter *filter; + DFilter *d_filter; + // For accelerating intersection + AABB *shapes_bbox; + BVHNode **path_bvhs; // Only for Path + BVHNode **shape_groups_bvh_nodes; // One BVH for each shape group + BVHNode *bvh_nodes; + + int num_shapes; + int num_shape_groups; + // shape_groups reuse shape, so the total number of shapes + // doesn't equal to num_shapes + int num_total_shapes; + bool use_gpu; + int gpu_index; + + // For edge sampling + float *shapes_length; + float *sample_shapes_cdf; + float *sample_shapes_pmf; + int *sample_shape_id; + int *sample_group_id; + float **path_length_cdf; + float **path_length_pmf; + int **path_point_id_map; + + ShapeGroup get_d_shape_group(int group_id) const; + Shape get_d_shape(int shape_id) const; + float get_d_filter_radius() const; +}; + +struct SceneData { + int canvas_width; + int canvas_height; + Shape *shapes; + Shape *d_shapes; + ShapeGroup *shape_groups; + ShapeGroup *d_shape_groups; + Filter *filter; + DFilter *d_filter; + AABB *shapes_bbox; + BVHNode **path_bvhs; // Only for Path + BVHNode **shape_groups_bvh_nodes; + BVHNode *bvh_nodes; + int num_shapes; + int num_shape_groups; + int num_total_shapes; + // For edge sampling + float *shapes_length; + float *sample_shapes_cdf; + float *sample_shapes_pmf; + int *sample_shape_id; + int *sample_group_id; + float **path_length_cdf; + float **path_length_pmf; + int **path_point_id_map; +}; + +inline SceneData get_scene_data(const Scene &scene) { + return SceneData{scene.canvas_width, + scene.canvas_height, + scene.shapes, + scene.d_shapes, + scene.shape_groups, + scene.d_shape_groups, + scene.filter, + scene.d_filter, + scene.shapes_bbox, + scene.path_bvhs, + scene.shape_groups_bvh_nodes, + scene.bvh_nodes, + scene.num_shapes, + scene.num_shape_groups, + scene.num_total_shapes, + scene.shapes_length, + scene.sample_shapes_cdf, + scene.sample_shapes_pmf, + scene.sample_shape_id, + scene.sample_group_id, + scene.path_length_cdf, + scene.path_length_pmf, + scene.path_point_id_map}; +} diff --git a/setup.py b/setup.py new file mode 100644 index 0000000..e01f5a2 --- /dev/null +++ b/setup.py @@ -0,0 +1,96 @@ +# Adapted from https://github.com/pybind/cmake_example/blob/master/setup.py +import os +import re +import sys +import platform +import subprocess +import importlib +from sysconfig import get_paths + +import importlib +from setuptools import setup, Extension +from setuptools.command.build_ext import build_ext +from setuptools.command.install import install +from distutils.sysconfig import get_config_var +from distutils.version import LooseVersion + +class CMakeExtension(Extension): + def __init__(self, name, sourcedir, build_with_cuda): + Extension.__init__(self, name, sources=[]) + self.sourcedir = os.path.abspath(sourcedir) + self.build_with_cuda = build_with_cuda + +class Build(build_ext): + def run(self): + try: + out = subprocess.check_output(['cmake', '--version']) + except OSError: + raise RuntimeError("CMake must be installed to build the following extensions: " + + ", ".join(e.name for e in self.extensions)) + + super().run() + + def build_extension(self, ext): + if isinstance(ext, CMakeExtension): + extdir = os.path.abspath(os.path.dirname(self.get_ext_fullpath(ext.name))) + info = get_paths() + include_path = info['include'] + cmake_args = ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY=' + extdir, + '-DPYTHON_INCLUDE_PATH=' + include_path] + + cfg = 'Debug' if self.debug else 'Release' + build_args = ['--config', cfg] + + if platform.system() == "Windows": + cmake_args += ['-DCMAKE_LIBRARY_OUTPUT_DIRECTORY_{}={}'.format(cfg.upper(), extdir), + '-DCMAKE_RUNTIME_OUTPUT_DIRECTORY_{}={}'.format(cfg.upper(), extdir)] + if sys.maxsize > 2**32: + cmake_args += ['-A', 'x64'] + build_args += ['--', '/m'] + else: + cmake_args += ['-DCMAKE_BUILD_TYPE=' + cfg] + build_args += ['--', '-j8'] + + if ext.build_with_cuda: + cmake_args += ['-DDIFFVG_CUDA=1'] + + env = os.environ.copy() + env['CXXFLAGS'] = '{} -DVERSION_INFO=\\"{}\\"'.format(env.get('CXXFLAGS', ''), + self.distribution.get_version()) + if not os.path.exists(self.build_temp): + os.makedirs(self.build_temp) + subprocess.check_call(['cmake', ext.sourcedir] + cmake_args, cwd=self.build_temp, env=env) + subprocess.check_call(['cmake', '--build', '.'] + build_args, cwd=self.build_temp) + else: + super().build_extension(ext) + +torch_spec = importlib.util.find_spec("torch") +tf_spec = importlib.util.find_spec("tensorflow") +packages = [] +build_with_cuda = False +if torch_spec is not None: + packages.append('pydiffvg') + import torch + if torch.cuda.is_available(): + build_with_cuda = True +if tf_spec is not None and sys.platform != 'win32': + packages.append('pydiffvg_tensorflow') + if not build_with_cuda: + import tensorflow as tf + if tf.test.is_gpu_available(cuda_only=True, min_cuda_compute_capability=None): + build_with_cuda = True +if len(packages) == 0: + print('Error: PyTorch or Tensorflow must be installed. For Windows platform only PyTorch is supported.') + exit() +# Override build_with_cuda with environment variable +if 'DIFFVG_CUDA' in os.environ: + build_with_cuda = os.environ['DIFFVG_CUDA'] == '1' + +setup(name = 'diffvg', + version = '0.0.1', + install_requires = ["svgpathtools"], + description = 'Differentiable Vector Graphics', + ext_modules = [CMakeExtension('diffvg', '', build_with_cuda)], + cmdclass = dict(build_ext=Build, install=install), + packages = packages, + zip_safe = False) diff --git a/shape.cpp b/shape.cpp new file mode 100644 index 0000000..19a3096 --- /dev/null +++ b/shape.cpp @@ -0,0 +1,22 @@ +#include "shape.h" + +void Path::copy_to(ptr points, ptr thickness) const { + float *p = points.get(); + for (int i = 0; i < 2 * num_points; i++) { + p[i] = this->points[i]; + } + if (this->thickness != nullptr) { + float *t = thickness.get(); + for (int i = 0; i < num_points; i++) { + t[i] = this->thickness[i]; + } + } +} + +void ShapeGroup::copy_to(ptr shape_to_canvas) const { + for (int i = 0; i < 3; i++) { + for (int j = 0; j < 3; j++) { + shape_to_canvas.get()[i * 3 + j] = this->shape_to_canvas(i, j); + } + } +} diff --git a/shape.h b/shape.h new file mode 100644 index 0000000..b549f31 --- /dev/null +++ b/shape.h @@ -0,0 +1,169 @@ +#pragma once + +#include "diffvg.h" +#include "color.h" +#include "ptr.h" +#include "vector.h" +#include "matrix.h" + +enum class ShapeType { + Circle, + Ellipse, + Path, + Rect +}; + +struct Circle { + float radius; + Vector2f center; + + ptr get_ptr() { + return ptr(this); + } +}; + +struct Ellipse { + Vector2f radius; + Vector2f center; + + ptr get_ptr() { + return ptr(this); + } +}; + +struct Path { + Path(ptr num_control_points, + ptr points, + ptr thickness, + int num_base_points, + int num_points, + bool is_closed, + bool use_distance_approx) : + num_control_points(num_control_points.get()), + points(points.get()), + thickness(thickness.get()), + num_base_points(num_base_points), + num_points(num_points), + is_closed(is_closed), + use_distance_approx(use_distance_approx) {} + + int *num_control_points; + float *points; + float *thickness; + int num_base_points; + int num_points; + bool is_closed; + bool use_distance_approx; + + bool has_thickness() const { + return thickness != nullptr; + } + void copy_to(ptr points, ptr thickness) const; + + ptr get_ptr() { + return ptr(this); + } +}; + +struct Rect { + Vector2f p_min; + Vector2f p_max; + + ptr get_ptr() { + return ptr(this); + } +}; + +struct Shape { + Shape() {} + Shape(const ShapeType &type, + ptr shape_ptr, + float stroke_width) + : type(type), ptr(shape_ptr.get()), stroke_width(stroke_width) {} + + Circle as_circle() const { + return *(Circle*)ptr; + } + + Ellipse as_ellipse() const { + return *(Ellipse*)ptr; + } + + Path as_path() const { + return *(Path*)ptr; + } + + Rect as_rect() const { + return *(Rect*)ptr; + } + + ShapeType type; + void *ptr; + float stroke_width; +}; + +struct ShapeGroup { + ShapeGroup() {} + ShapeGroup(ptr shape_ids, + int num_shapes, + const ColorType &fill_color_type, + ptr fill_color, + const ColorType &stroke_color_type, + ptr stroke_color, + bool use_even_odd_rule, + ptr shape_to_canvas) + : shape_ids(shape_ids.get()), + num_shapes(num_shapes), + fill_color_type(fill_color_type), + fill_color(fill_color.get()), + stroke_color_type(stroke_color_type), + stroke_color(stroke_color.get()), + use_even_odd_rule(use_even_odd_rule), + shape_to_canvas(shape_to_canvas.get()) { + canvas_to_shape = inverse(this->shape_to_canvas); + } + + bool has_fill_color() const { + return fill_color != nullptr; + } + + Constant fill_color_as_constant() const { + return *(Constant*)fill_color; + } + + LinearGradient fill_color_as_linear_gradient() const { + return *(LinearGradient*)fill_color; + } + + RadialGradient fill_color_as_radial_gradient() const { + return *(RadialGradient*)fill_color; + } + + bool has_stroke_color() const { + return stroke_color != nullptr; + } + + Constant stroke_color_as_constant() const { + return *(Constant*)stroke_color; + } + + LinearGradient stroke_color_as_linear_gradient() const { + return *(LinearGradient*)stroke_color; + } + + RadialGradient stroke_color_as_radial_gradient() const { + return *(RadialGradient*)stroke_color; + } + + void copy_to(ptr shape_to_canvas) const; + + int *shape_ids; + int num_shapes; + ColorType fill_color_type; + void *fill_color; + ColorType stroke_color_type; + void *stroke_color; + bool use_even_odd_rule; + Matrix3x3f canvas_to_shape; + Matrix3x3f shape_to_canvas; +}; diff --git a/solve.h b/solve.h new file mode 100644 index 0000000..99f730d --- /dev/null +++ b/solve.h @@ -0,0 +1,59 @@ +#pragma once + +#include "diffvg.h" + +template +DEVICE +inline bool solve_quadratic(T a, T b, T c, T *t0, T *t1) { + // From https://github.com/mmp/pbrt-v3/blob/master/src/core/pbrt.h#L419 + T discrim = square(b) - 4 * a * c; + if (discrim < 0) { + return false; + } + T root_discrim = sqrt(discrim); + + T q; + if (b < 0) { + q = -0.5f * (b - root_discrim); + } else { + q = -0.5f * (b + root_discrim); + } + *t0 = q / a; + *t1 = c / q; + if (*t0 > *t1) { + swap_(*t0, *t1); + } + return true; +} + +template +DEVICE +inline int solve_cubic(T a, T b, T c, T d, T t[3]) { + if (fabs(a) < 1e-6f) { + if (solve_quadratic(b, c, d, &t[0], &t[1])) { + return 2; + } else { + return 0; + } + } + // normalize cubic equation + b /= a; + c /= a; + d /= a; + T Q = (b * b - 3 * c) / 9.f; + T R = (2 * b * b * b - 9 * b * c + 27 * d) / 54.f; + if (R * R < Q * Q * Q) { + // 3 real roots + T theta = acos(R / sqrt(Q * Q * Q)); + t[0] = -2.f * sqrt(Q) * cos(theta / 3.f) - b / 3.f; + t[1] = -2.f * sqrt(Q) * cos((theta + 2.f * T(M_PI)) / 3.f) - b / 3.f; + t[2] = -2.f * sqrt(Q) * cos((theta - 2.f * T(M_PI)) / 3.f) - b / 3.f; + return 3; + } else { + T A = R > 0 ? -pow(R + sqrt(R * R - Q * Q * Q), T(1./3.)): + pow(-R + sqrt(R * R - Q * Q * Q), T(1./3.)); + T B = fabs(A) > 1e-6f ? Q / A : T(0); + t[0] = (A + B) - b / T(3); + return 1; + } +} diff --git a/vector.h b/vector.h new file mode 100644 index 0000000..3575b26 --- /dev/null +++ b/vector.h @@ -0,0 +1,817 @@ +#pragma once + +#include "diffvg.h" +#include +#include + +template +struct TVector2 { + DEVICE TVector2() {} + + template + DEVICE + TVector2(T2 x, T2 y) : x(T(x)), y(T(y)) {} + + template + DEVICE + TVector2(const TVector2 &v) : x(T(v.x)), y(T(v.y)) {} + + DEVICE T& operator[](int i) { + return *(&x + i); + } + + DEVICE T operator[](int i) const { + return *(&x + i); + } + + T x, y; +}; + +template +struct TVector3 { + DEVICE TVector3() {} + + template + DEVICE + TVector3(T2 x, T2 y, T2 z) : x(T(x)), y(T(y)), z(T(z)) {} + + template + DEVICE + TVector3(const TVector3 &v) : x(T(v.x)), y(T(v.y)), z(T(v.z)) {} + + DEVICE T& operator[](int i) { + return *(&x + i); + } + + DEVICE T operator[](int i) const { + return *(&x + i); + } + + T x, y, z; +}; + +template +struct TVector4 { + DEVICE TVector4() {} + + template + DEVICE + TVector4(T2 x, T2 y, T2 z, T2 w) : x(T(x)), y(T(y)), z(T(z)), w(T(w)) {} + + template + DEVICE + TVector4(const TVector4 &v) : x(T(v.x)), y(T(v.y)), z(T(v.z)), w(T(v.w)) {} + + + DEVICE T& operator[](int i) { + return *(&x + i); + } + + DEVICE T operator[](int i) const { + return *(&x + i); + } + + T x, y, z, w; +}; + +using Vector2f = TVector2; +using Vector2d = TVector2; +using Vector2i = TVector2; +using Vector2 = TVector2; +using Vector3i = TVector3; +using Vector3f = TVector3; +using Vector3d = TVector3; +using Vector3 = TVector3; +using Vector4f = TVector4; +using Vector4d = TVector4; +using Vector4 = TVector4; + +template +DEVICE +inline auto operator+(const TVector2 &v0, + const TVector2 &v1) -> TVector2 { + return TVector2{ + v0[0] + v1[0], v0[1] + v1[1]}; +} + +template +DEVICE +inline auto operator+(const T0 &v0, + const TVector2 &v1) -> TVector2 { + return TVector2{v0 + v1[0], v0 + v1[1]}; +} + +template +DEVICE +inline auto operator+(const T0 &v0, + const TVector3 &v1) -> TVector3 { + return TVector3{ + v0 + v1[0], v0 + v1[1], v0 + v1[2]}; +} + +template +DEVICE +inline auto operator+(const TVector2 &v0, + const T1 &v1) -> TVector2 { + return TVector2{ + v0[0] + v1, v0[1] + v1}; +} + +template +DEVICE +inline auto operator+(const TVector3 &v0, + const T1 &v1) -> TVector3 { + return TVector3{ + v0[0] + v1, v0[1] + v1, v0[2] + v1}; +} + +template +DEVICE +inline auto operator+(const TVector3 &v0, + const TVector3 &v1) -> TVector3 { + return TVector3{ + v0[0] + v1[0], v0[1] + v1[1], v0[2] + v1[2]}; +} + +template +DEVICE +inline auto operator+(const TVector4 &v0, + const TVector4 &v1) -> TVector4 { + return TVector4{ + v0[0] + v1[0], v0[1] + v1[1], v0[2] + v1[2], v0[3] + v1[3]}; +} + +template +DEVICE +inline auto operator+=(TVector2 &v0, + const TVector2 &v1) -> TVector2& { + v0[0] += v1[0]; + v0[1] += v1[1]; + return v0; +} + +template +DEVICE +inline auto operator+=(TVector3 &v0, + const TVector3 &v1) -> TVector3& { + v0[0] += v1[0]; + v0[1] += v1[1]; + v0[2] += v1[2]; + return v0; +} + +template +DEVICE +inline auto operator+=(TVector3 &v0, + const T1 &v1) -> TVector3& { + v0[0] += v1; + v0[1] += v1; + v0[2] += v1; + return v0; +} + +template +DEVICE +inline auto operator+=(TVector4 &v0, + const TVector4 &v1) -> TVector4& { + v0[0] += v1[0]; + v0[1] += v1[1]; + v0[2] += v1[2]; + v0[3] += v1[3]; + return v0; +} + +template +DEVICE +inline auto operator+=(TVector4 &v0, + const T1 &v1) -> TVector4& { + v0[0] += v1; + v0[1] += v1; + v0[2] += v1; + v0[3] += v1; + return v0; +} + +template +DEVICE +inline auto operator-(const T0 &v0, + const TVector2 &v1) -> TVector2 { + return TVector2{v0 - v1[0], v0 - v1[1]}; +} + +template +DEVICE +inline auto operator-(const T0 &v0, + const TVector3 &v1) -> TVector2 { + return TVector3{v0 - v1[0], v0 - v1[1], v0 - v1[2]}; +} + +template +DEVICE +inline auto operator-(const TVector2 &v0, + const T1 &v1) -> TVector2 { + return TVector2{v0[0] - v1, v0[1] - v1}; +} + +template +DEVICE +inline auto operator-(const TVector3 &v0, + const T1 &v1) -> TVector3 { + return TVector3{v0[0] - v1, v0[1] - v1, v0[2] - v1}; +} + +template +DEVICE +inline auto operator-(const TVector2 &v0, + const TVector2 &v1) -> TVector2 { + return TVector2{ + v0[0] - v1[0], v0[1] - v1[1]}; +} + +template +DEVICE +inline auto operator-(const TVector2 &v) -> TVector2 { + return TVector2{-v[0], -v[1]}; +} + +template +DEVICE +inline auto operator-(const TVector3 &v) -> TVector3 { + return TVector3{-v[0], -v[1], -v[2]}; +} + +template +DEVICE +inline auto operator-(const TVector3 &v0, + const TVector3 &v1) -> TVector3 { + return TVector3{ + v0[0] - v1[0], v0[1] - v1[1], v0[2] - v1[2]}; +} + +template +DEVICE +inline auto operator-(const TVector4 &v0, + const TVector4 &v1) -> TVector4 { + return TVector4{ + v0[0] - v1[0], v0[1] - v1[1], v0[2] - v1[2], v0[3] - v1[3]}; +} + +template +DEVICE +inline auto operator-=(TVector2 &v0, + const TVector2 &v1) -> TVector2& { + v0[0] -= v1[0]; + v0[1] -= v1[1]; + return v0; +} + +template +DEVICE +inline auto operator-=(TVector3 &v0, + const TVector3 &v1) -> TVector3& { + v0[0] -= v1[0]; + v0[1] -= v1[1]; + v0[2] -= v1[2]; + return v0; +} + +template +DEVICE +inline auto operator*(const TVector2 &v0, + const TVector2 &v1) -> TVector2 { + return TVector2{ + v0[0] * v1[0], v0[1] * v1[1]}; +} + +template +DEVICE +inline auto operator*(const TVector2 &v0, + const T1 &s) -> TVector2 { + return TVector2{ + v0[0] * s, v0[1] * s}; +} + +template +DEVICE +inline auto operator*(const T0 &s, + const TVector2 &v0) -> TVector2 { + return TVector2{s * v0[0], s * v0[1]}; +} + +template +DEVICE +inline auto operator*=(TVector2 &v0, + const T1 &s) -> TVector2& { + v0[0] *= s; + v0[1] *= s; + return v0; +} + +template +DEVICE +inline auto operator*(const TVector3 &v0, + const T1 &s) -> TVector3 { + return TVector3{ + v0[0] * s, v0[1] * s, v0[2] * s}; +} + +template +DEVICE +inline auto operator*(const T0 &s, + const TVector3 &v0) -> TVector3 { + return TVector3{ + s * v0[0], s * v0[1], s * v0[2]}; +} + +template +DEVICE +inline auto operator*=(TVector3 &v0, + const T1 &s) -> TVector3& { + v0[0] *= s; + v0[1] *= s; + v0[2] *= s; + return v0; +} + +template +DEVICE +inline auto operator*=(TVector4 &v0, + const T1 &s) -> TVector4& { + v0[0] *= s; + v0[1] *= s; + v0[2] *= s; + v0[3] *= s; + return v0; +} + +template +DEVICE +inline auto operator*(const TVector3 &v0, + const TVector3 &v1) -> TVector3 { + return TVector3{ + v0[0] * v1[0], v0[1] * v1[1], v0[2] * v1[2]}; +} + +template +DEVICE +inline auto operator*(const TVector4 &v0, + const T1 &s) -> TVector4 { + return TVector4{ + v0[0] * s, v0[1] * s, v0[2] * s, v0[3] * s}; +} + +template +DEVICE +inline auto operator*(const T0 &s, + const TVector4 &v0) -> TVector4 { + return TVector4{ + s * v0[0], s * v0[1], s * v0[2], s * v0[3]}; +} + +template +DEVICE +inline auto operator*(const TVector4 &v0, + const TVector4 &v1) -> TVector4 { + return TVector4{ + v0[0] * v1[0], v0[1] * v1[1], v0[2] * v1[2], v0[3] * v1[3]}; +} + +template +DEVICE +inline auto operator/(const TVector2 &v0, + const T1 &s) -> TVector2 { + auto inv_s = 1.f / s; + return v0 * inv_s; +} + +template +DEVICE +inline auto operator/(const TVector3 &v0, + const T1 &s) -> TVector3 { + auto inv_s = 1.f / s; + return v0 * inv_s; +} + +template +DEVICE +inline auto operator/(const TVector4 &v0, + const T1 &s) -> TVector4 { + auto inv_s = 1.f / s; + return v0 * inv_s; +} + +template +DEVICE +inline auto operator/(const T0 &s, + const TVector3 &v1) -> TVector3 { + return TVector3{ + s / v1[0], s / v1[2], s / v1[2]}; +} + +template +DEVICE +inline auto operator/(const TVector3 &v0, + const TVector3 &v1) -> TVector3 { + return TVector3{ + v0[0] / v1[0], v0[1] / v1[2], v0[2] / v1[2]}; +} + +template +DEVICE +inline auto operator/(const TVector2 &v0, + const TVector2 &v1) -> TVector2 { + return TVector2{ + v0[0] / v1[0], v0[1] / v1[1]}; +} + +template +DEVICE +inline auto operator/=(TVector3 &v0, + const T1 &s) -> TVector3& { + auto inv_s = 1.f / s; + v0[0] *= inv_s; + v0[1] *= inv_s; + v0[2] *= inv_s; + return v0; +} + +template +DEVICE +inline auto operator/=(TVector4 &v0, + const T1 &s) -> TVector4& { + auto inv_s = 1.f / s; + v0[0] *= inv_s; + v0[1] *= inv_s; + v0[2] *= inv_s; + v0[3] *= inv_s; + return v0; +} + +template +DEVICE +inline bool operator==(const TVector2 &v0, + const TVector2 &v1) { + return v0.x == v1.x && v0.y == v1.y; +} + +template +DEVICE +inline bool operator==(const TVector3 &v0, + const TVector3 &v1) { + return v0.x == v1.x && v0.y == v1.y && v0.z == v1.z; +} + +template +DEVICE +inline bool operator!=(const TVector3 &v0, + const TVector3 &v1) { + return v0.x != v1.x || v0.y != v1.y || v0.z != v1.z; +} + +template +DEVICE +inline TVector2 get_normal(const TVector2 &v) { + return TVector2{v.y, -v.x}; +} + +template +DEVICE +inline T length_squared(const TVector2 &v0) { + return square(v0[0]) + square(v0[1]); +} + +template +DEVICE +inline TVector2 d_length_squared(const TVector2 &v0, const T &d_l_sq) { + //l_sq = square(v0[0]) + square(v0[1]) + return 2 * d_l_sq * v0; +} + +template +DEVICE +inline T length(const TVector2 &v0) { + return sqrt(length_squared(v0)); +} + +template +DEVICE +inline TVector2 d_length(const TVector2 &v0, const T &d_l) { + auto l_sq = length_squared(v0); + auto l = sqrt(l_sq); + auto d_l_sq = 0.5f * d_l / l; + return d_length_squared(v0, T(d_l_sq)); +} + +template +DEVICE +inline T length_squared(const TVector3 &v0) { + return square(v0[0]) + square(v0[1]) + square(v0[2]); +} + +template +DEVICE +inline TVector3 d_length_squared(const TVector3 &v0, const T &d_l_sq) { + //l_sq = square(v0[0]) + square(v0[1]) + square(v0[2]) + return 2 * d_l_sq * v0; +} + +template +DEVICE +inline T length(const TVector3 &v0) { + return sqrt(length_squared(v0)); +} + +template +DEVICE +inline TVector3 d_length(const TVector3 &v0, const T &d_l) { + auto l_sq = length_squared(v0); + auto l = sqrt(l_sq); + auto d_l_sq = 0.5f * d_l / l; + return d_length_squared(v0, d_l_sq); +} + +template +DEVICE +inline auto distance_squared(const TVector2 &v0, + const TVector2 &v1) -> decltype(length_squared(v1 - v0)) { + return length_squared(v1 - v0); +} + +template +DEVICE +inline auto distance_squared(const TVector3 &v0, + const TVector3 &v1) -> decltype(length_squared(v1 - v0)) { + return length_squared(v1 - v0); +} + +template +DEVICE +inline auto distance(const TVector2 &v0, + const TVector2 &v1) -> decltype(length(v1 - v0)) { + return length(v1 - v0); +} + +template +DEVICE +inline void d_distance(const TVector2 &v0, + const TVector2 &v1, + const T &d_output, + TVector2 &d_v0, + TVector2 &d_v1) { + auto d_v1_v0 = d_length(v1 - v0, d_output); + d_v0 -= d_v1_v0; + d_v1 += d_v1_v0; +} + +template +DEVICE +inline auto distance(const TVector3 &v0, + const TVector3 &v1) -> decltype(length(v1 - v0)) { + return length(v1 - v0); +} + +template +DEVICE +inline void d_distance(const TVector3 &v0, + const TVector3 &v1, + const T &d_output, + TVector3 &d_v0, + TVector3 &d_v1) { + auto d_v1_v0 = d_length(v1 - v0, d_output); + d_v0 -= d_v1_v0; + d_v1 += d_v1_v0; +} + +template +DEVICE +inline TVector2 normalize(const TVector2 &v0) { + return v0 / length(v0); +} + +template +DEVICE +inline TVector2 d_normalize(const TVector2 &v0, const TVector2 &d_n) { + auto l = length(v0); + auto n = v0 / l; + auto d_v0 = d_n / l; + auto d_l = -dot(d_n, n) / l; + // l = length(v0) + d_v0 += d_length(v0, d_l); + return d_v0; +} + +template +DEVICE +inline TVector3 normalize(const TVector3 &v0) { + return v0 / length(v0); +} + +template +DEVICE +inline TVector3 d_normalize(const TVector3 &v0, const TVector3 &d_n) { + auto l = length(v0); + auto n = v0 / l; + auto d_v0 = d_n / l; + auto d_l = -dot(d_n, n) / l; + // l = length(v0) + d_v0 += d_length(v0, d_l); + return d_v0; +} + +template +DEVICE +inline auto dot(const TVector2 &v0, const TVector2 &v1) -> decltype(v0[0] * v1[0]) { + return v0[0] * v1[0] + + v0[1] * v1[1]; +} + +template +DEVICE +inline auto dot(const TVector3 &v0, const TVector3 &v1) -> decltype(v0[0] * v1[0]) { + return v0[0] * v1[0] + + v0[1] * v1[1] + + v0[2] * v1[2]; +} + +template +DEVICE +inline auto dot(const TVector4 &v0, const TVector4 &v1) -> decltype(v0[0] * v1[0]) { + return v0[0] * v1[0] + + v0[1] * v1[1] + + v0[2] * v1[2] + + v0[3] * v1[3]; +} + +template +DEVICE +inline auto cross(const TVector3 &v0, const TVector3 &v1) -> TVector3 { + return TVector3{ + v0[1] * v1[2] - v0[2] * v1[1], + v0[2] * v1[0] - v0[0] * v1[2], + v0[0] * v1[1] - v0[1] * v1[0]}; +} + +template +DEVICE +inline void d_cross(const TVector3 &v0, const TVector3 &v1, const TVector3 &d_output, + TVector3 &d_v0, TVector3 &d_v1) { + d_v0 += cross(v1, d_output); + d_v1 += cross(d_output, v0); +} + +template +DEVICE +inline T luminance(const TVector3 &v) { + return 0.212671f * v[0] + + 0.715160f * v[1] + + 0.072169f * v[2]; +} + +template +DEVICE +inline T sum(const T &v) { + return v; +} + +template +DEVICE +inline T sum(const TVector2 &v) { + return v[0] + v[1]; +} + +template +DEVICE +inline T sum(const TVector3 &v) { + return v[0] + v[1] + v[2]; +} + +template +DEVICE +inline T sum(const TVector4 &v) { + return v[0] + v[1] + v[2] + v[3]; +} + +template +DEVICE +void coordinate_system(const TVector3 &n, TVector3 &x, TVector3 &y) { + if (n[2] < -1.f + 1e-6f) { + x = TVector3{T(0), T(-1), T(0)}; + y = TVector3{T(-1), T(0), T(0)}; + } else { + auto a = 1.f / (1.f + n[2]); + auto b = -n[0] * n[1] * a; + x = TVector3{1.f - square(n[0]) * a, b, -n[0]}; + y = TVector3{b, 1.f - square(n[1]) * a, -n[1]}; + } +} + +template +DEVICE +void d_coordinate_system(const TVector3 &n, const TVector3 &d_x, const TVector3 &d_y, + TVector3 &d_n) { + if (n[2] < -1.f + 1e-6f) { + //x = TVector3{T(0), T(-1), T(0)}; + //y = TVector3{T(-1), T(0), T(0)}; + // don't need to do anything + } else { + auto a = 1.f / (1.f + n[2]); + // auto b = -n[0] * n[1] * a; + // x = TVector3{1.f - square(n[0]) * a, b, -n[0]} + d_n[0] -= 2.f * n[0] * d_x[0] * a; + auto d_a = -square(n[0]) * d_x[0]; + auto d_b = d_x[1]; + d_n[0] -= d_x[2]; + // y = TVector3{b, 1.f - square(n[1]) * a, -n[1]} + d_b += d_y[0]; + d_n[1] -= 2.f * d_y[1] * n[1] * a; + d_a -= d_y[1] * square(n[1]); + d_n[1] -= d_y[2]; + // b = -n[0] * n[1] * a + d_n[0] -= d_b * n[1] * a; + d_n[1] -= d_b * n[0] * a; + d_a -= d_b * n[0] * n[1]; + // a = 1 / (1 + n[2]) + d_n[2] -= d_a * a / (1 + n[2]); + } +} + +DEVICE +inline bool isfinite(const Vector2 &v) { + return isfinite(v.x) && + isfinite(v.y); +} + +DEVICE +inline bool isfinite(const Vector3 &v) { + return isfinite(v.x) && + isfinite(v.y) && + isfinite(v.z); +} + +DEVICE +inline bool isfinite(const Vector4 &v) { + return isfinite(v.x) && + isfinite(v.y) && + isfinite(v.z) && + isfinite(v.w); +} + +DEVICE +inline bool is_zero(const Vector3 &v) { + return v.x == 0 && v.y == 0 && v.z == 0; +} + +template +inline std::ostream& operator<<(std::ostream &os, const TVector2 &v) { + return os << "(" << v[0] << ", " << v[1] << ")"; +} + +template +inline std::ostream& operator<<(std::ostream &os, const TVector3 &v) { + return os << "(" << v[0] << ", " << v[1] << ", " << v[2] << ")"; +} + +template +inline std::ostream& operator<<(std::ostream &os, const TVector4 &v) { + return os << "(" << v[0] << ", " << v[1] << ", " << v[2] << ", " << v[3] << ")"; +} + +DEVICE +inline +float det(const Vector2f &a, const Vector2f &b) { + return a.x*b.y-b.x*a.y; +} + +DEVICE +inline +Vector2f quadratic_closest_pt_approx(const Vector2f &b0, + const Vector2f &b1, + const Vector2f &b2, + float *t_ = nullptr) { + // From http://w3.impa.br/~diego/publications/NehHop08.pdf + float a=det(b0,b2), b=2*det(b1,b0), d=2*det(b2,b1); + float f=b*d-a*a; + Vector2f d21=b2-b1, d10=b1-b0, d20=b2-b0; + Vector2f gf=2*(b*d21+d*d10+a*d20); + gf=Vector2f(gf.y,-gf.x); + Vector2f pp=-f*gf/dot(gf,gf); + Vector2f d0p=b0-pp; + float ap=det(d0p,d20), bp=2*det(d10,d0p); + float t=clamp((ap+bp)/(2*a+b+d),0.f,1.f); + float tt = 1 - t; + if (t_ != nullptr) { + *t_ = t; + } + return (tt*tt)*b0 + (2*tt*t)*b1 + (t*t)*b2; +} + +DEVICE +inline +Vector2f quadratic_closest_pt_approx(const Vector2f &b0, + const Vector2f &b1, + const Vector2f &b2, + const Vector2f &pt, + float *t = nullptr) { + // Approximate closest point to a quadratic curve + return quadratic_closest_pt_approx(b0 - pt, b1 - pt, b2 - pt, t) + pt; +} diff --git a/winding_number.h b/winding_number.h new file mode 100644 index 0000000..8791a4c --- /dev/null +++ b/winding_number.h @@ -0,0 +1,202 @@ +#pragma once + +#include "diffvg.h" +#include "scene.h" +#include "shape.h" +#include "solve.h" +#include "vector.h" + +DEVICE +int compute_winding_number(const Circle &circle, const Vector2f &pt) { + const auto &c = circle.center; + auto r = circle.radius; + // inside the circle: return 1, outside the circle: return 0 + if (distance_squared(c, pt) < r * r) { + return 1; + } else { + return 0; + } +} + +DEVICE +int compute_winding_number(const Ellipse &ellipse, const Vector2f &pt) { + const auto &c = ellipse.center; + const auto &r = ellipse.radius; + // inside the ellipse: return 1, outside the ellipse: return 0 + if (square(c.x - pt.x) / square(r.x) + square(c.y - pt.y) / square(r.y) < 1) { + return 1; + } else { + return 0; + } +} + +DEVICE +bool intersect(const AABB &box, const Vector2f &pt) { + if (pt.y < box.p_min.y || pt.y > box.p_max.y) { + return false; + } + if (pt.x > box.p_max.x) { + return false; + } + return true; +} + +DEVICE +int compute_winding_number(const Path &path, const BVHNode *bvh_nodes, const Vector2f &pt) { + // Shoot a horizontal ray from pt to right, intersect with all curves of the path, + // count intersection + auto num_segments = path.num_base_points; + constexpr auto max_bvh_size = 128; + int bvh_stack[max_bvh_size]; + auto stack_size = 0; + auto winding_number = 0; + bvh_stack[stack_size++] = 2 * num_segments - 2; + while (stack_size > 0) { + const BVHNode &node = bvh_nodes[bvh_stack[--stack_size]]; + if (node.child1 < 0) { + // leaf + auto base_point_id = node.child0; + auto point_id = - node.child1 - 1; + assert(base_point_id < num_segments); + assert(point_id < path.num_points); + if (path.num_control_points[base_point_id] == 0) { + // Straight line + auto i0 = point_id; + auto i1 = (point_id + 1) % path.num_points; + auto p0 = Vector2f{path.points[2 * i0], path.points[2 * i0 + 1]}; + auto p1 = Vector2f{path.points[2 * i1], path.points[2 * i1 + 1]}; + // intersect p0 + t * (p1 - p0) with pt + t' * (1, 0) + // solve: + // pt.x + t' = v0.x + t * (v1.x - v0.x) + // pt.y = v0.y + t * (v1.y - v0.y) + if (p1.y != p0.y) { + auto t = (pt.y - p0.y) / (p1.y - p0.y); + if (t >= 0 && t <= 1) { + auto tp = p0.x - pt.x + t * (p1.x - p0.x); + if (tp >= 0) { + if (p1.y - p0.y > 0) { + winding_number += 1; + } else { + winding_number -= 1; + } + } + } + } + } else if (path.num_control_points[base_point_id] == 1) { + // Quadratic Bezier curve + auto i0 = point_id; + auto i1 = point_id + 1; + auto i2 = (point_id + 2) % path.num_points; + auto p0 = Vector2f{path.points[2 * i0], path.points[2 * i0 + 1]}; + auto p1 = Vector2f{path.points[2 * i1], path.points[2 * i1 + 1]}; + auto p2 = Vector2f{path.points[2 * i2], path.points[2 * i2 + 1]}; + // The curve is (1-t)^2p0 + 2(1-t)tp1 + t^2p2 + // = (p0-2p1+p2)t^2+(-2p0+2p1)t+p0 + // intersect with pt + t' * (1 0) + // solve + // pt.y = (p0-2p1+p2)t^2+(-2p0+2p1)t+p0 + float t[2]; + if (solve_quadratic(p0.y-2*p1.y+p2.y, + -2*p0.y+2*p1.y, + p0.y-pt.y, + &t[0], &t[1])) { + for (int j = 0; j < 2; j++) { + if (t[j] >= 0 && t[j] <= 1) { + auto tp = (p0.x-2*p1.x+p2.x)*t[j]*t[j] + + (-2*p0.x+2*p1.x)*t[j] + + p0.x-pt.x; + if (tp >= 0) { + if (2*(p0.y-2*p1.y+p2.y)*t[j]+(-2*p0.y+2*p1.y) > 0) { + winding_number += 1; + } else { + winding_number -= 1; + } + } + } + } + } + } else if (path.num_control_points[base_point_id] == 2) { + // Cubic Bezier curve + auto i0 = point_id; + auto i1 = point_id + 1; + auto i2 = point_id + 2; + auto i3 = (point_id + 3) % path.num_points; + auto p0 = Vector2f{path.points[2 * i0], path.points[2 * i0 + 1]}; + auto p1 = Vector2f{path.points[2 * i1], path.points[2 * i1 + 1]}; + auto p2 = Vector2f{path.points[2 * i2], path.points[2 * i2 + 1]}; + auto p3 = Vector2f{path.points[2 * i3], path.points[2 * i3 + 1]}; + // The curve is (1 - t)^3 p0 + 3 * (1 - t)^2 t p1 + 3 * (1 - t) t^2 p2 + t^3 p3 + // = (-p0+3p1-3p2+p3) t^3 + (3p0-6p1+3p2) t^2 + (-3p0+3p1) t + p0 + // intersect with pt + t' * (1 0) + // solve: + // pt.y = (-p0+3p1-3p2+p3) t^3 + (3p0-6p1+3p2) t^2 + (-3p0+3p1) t + p0 + double t[3]; + int num_sol = solve_cubic(double(-p0.y+3*p1.y-3*p2.y+p3.y), + double(3*p0.y-6*p1.y+3*p2.y), + double(-3*p0.y+3*p1.y), + double(p0.y-pt.y), + t); + for (int j = 0; j < num_sol; j++) { + if (t[j] >= 0 && t[j] <= 1) { + // t' = (-p0+3p1-3p2+p3) t^3 + (3p0-6p1+3p2) t^2 + (-3p0+3p1) t + p0 - pt.x + auto tp = (-p0.x+3*p1.x-3*p2.x+p3.x)*t[j]*t[j]*t[j]+ + (3*p0.x-6*p1.x+3*p2.x)*t[j]*t[j]+ + (-3*p0.x+3*p1.x)*t[j]+ + p0.x-pt.x; + if (tp > 0) { + if (3*(-p0.y+3*p1.y-3*p2.y+p3.y)*t[j]*t[j]+ + 2*(3*p0.y-6*p1.y+3*p2.y)*t[j]+ + (-3*p0.y+3*p1.y) > 0) { + winding_number += 1; + } else { + winding_number -= 1; + } + } + } + } + } else { + assert(false); + } + } else { + assert(node.child0 >= 0 && node.child1 >= 0); + const AABB &b0 = bvh_nodes[node.child0].box; + if (intersect(b0, pt)) { + bvh_stack[stack_size++] = node.child0; + } + const AABB &b1 = bvh_nodes[node.child1].box; + if (intersect(b1, pt)) { + bvh_stack[stack_size++] = node.child1; + } + assert(stack_size <= max_bvh_size); + } + } + return winding_number; +} + +DEVICE +int compute_winding_number(const Rect &rect, const Vector2f &pt) { + const auto &p_min = rect.p_min; + const auto &p_max = rect.p_max; + // inside the rectangle: return 1, outside the rectangle: return 0 + if (pt.x > p_min.x && pt.x < p_max.x && pt.y > p_min.y && pt.y < p_max.y) { + return 1; + } else { + return 0; + } +} + +DEVICE +int compute_winding_number(const Shape &shape, const BVHNode *bvh_nodes, const Vector2f &pt) { + switch (shape.type) { + case ShapeType::Circle: + return compute_winding_number(*(const Circle *)shape.ptr, pt); + case ShapeType::Ellipse: + return compute_winding_number(*(const Ellipse *)shape.ptr, pt); + case ShapeType::Path: + return compute_winding_number(*(const Path *)shape.ptr, bvh_nodes, pt); + case ShapeType::Rect: + return compute_winding_number(*(const Rect *)shape.ptr, pt); + } + assert(false); + return 0; +} diff --git a/within_distance.h b/within_distance.h new file mode 100644 index 0000000..e815377 --- /dev/null +++ b/within_distance.h @@ -0,0 +1,446 @@ +#pragma once + +#include "diffvg.h" +#include "edge_query.h" +#include "shape.h" +#include "vector.h" + +DEVICE +inline +bool within_distance(const Circle &circle, const Vector2f &pt, float r) { + auto dist_to_center = distance(circle.center, pt); + if (fabs(dist_to_center - circle.radius) < r) { + return true; + } + return false; +} + +DEVICE +inline +bool within_distance(const Path &path, const BVHNode *bvh_nodes, const Vector2f &pt, float r) { + auto num_segments = path.num_base_points; + constexpr auto max_bvh_size = 128; + int bvh_stack[max_bvh_size]; + auto stack_size = 0; + bvh_stack[stack_size++] = 2 * num_segments - 2; + while (stack_size > 0) { + const BVHNode &node = bvh_nodes[bvh_stack[--stack_size]]; + if (node.child1 < 0) { + // leaf + auto base_point_id = node.child0; + auto point_id = - node.child1 - 1; + assert(base_point_id < num_segments); + assert(point_id < path.num_points); + if (path.num_control_points[base_point_id] == 0) { + // Straight line + auto i0 = point_id; + auto i1 = (point_id + 1) % path.num_points; + auto p0 = Vector2f{path.points[2 * i0], path.points[2 * i0 + 1]}; + auto p1 = Vector2f{path.points[2 * i1], path.points[2 * i1 + 1]}; + // project pt to line + auto t = dot(pt - p0, p1 - p0) / dot(p1 - p0, p1 - p0); + auto r0 = r; + auto r1 = r; + // override radius if path has thickness + if (path.thickness != nullptr) { + r0 = path.thickness[i0]; + r1 = path.thickness[i1]; + } + if (t < 0) { + if (distance_squared(p0, pt) < r0 * r0) { + return true; + } + } else if (t > 1) { + if (distance_squared(p1, pt) < r1 * r1) { + return true; + } + } else { + auto r = r0 + t * (r1 - r0); + if (distance_squared(p0 + t * (p1 - p0), pt) < r * r) { + return true; + } + } + } else if (path.num_control_points[base_point_id] == 1) { + // Quadratic Bezier curve + auto i0 = point_id; + auto i1 = point_id + 1; + auto i2 = (point_id + 2) % path.num_points; + auto p0 = Vector2f{path.points[2 * i0], path.points[2 * i0 + 1]}; + auto p1 = Vector2f{path.points[2 * i1], path.points[2 * i1 + 1]}; + auto p2 = Vector2f{path.points[2 * i2], path.points[2 * i2 + 1]}; + if (path.use_distance_approx) { + auto cp = quadratic_closest_pt_approx(p0, p1, p2, pt); + return distance_squared(cp, pt) < r * r; + } + auto eval = [&](float t) -> Vector2f { + auto tt = 1 - t; + return (tt*tt)*p0 + (2*tt*t)*p1 + (t*t)*p2; + }; + auto r0 = r; + auto r1 = r; + auto r2 = r; + // override radius if path has thickness + if (path.thickness != nullptr) { + r0 = path.thickness[i0]; + r1 = path.thickness[i1]; + r2 = path.thickness[i2]; + } + if (distance_squared(eval(0), pt) < r0 * r0) { + return true; + } + if (distance_squared(eval(1), pt) < r2 * r2) { + return true; + } + + // The curve is (1-t)^2p0 + 2(1-t)tp1 + t^2p2 + // = (p0-2p1+p2)t^2+(-2p0+2p1)t+p0 = q + // Want to solve (q - pt) dot q' = 0 + // q' = (p0-2p1+p2)t + (-p0+p1) + // Expanding (p0-2p1+p2)^2 t^3 + + // 3(p0-2p1+p2)(-p0+p1) t^2 + + // (2(-p0+p1)^2+(p0-2p1+p2)(p0-pt))t + + // (-p0+p1)(p0-pt) = 0 + auto A = sum((p0-2*p1+p2)*(p0-2*p1+p2)); + auto B = sum(3*(p0-2*p1+p2)*(-p0+p1)); + auto C = sum(2*(-p0+p1)*(-p0+p1)+(p0-2*p1+p2)*(p0-pt)); + auto D = sum((-p0+p1)*(p0-pt)); + float t[3]; + int num_sol = solve_cubic(A, B, C, D, t); + for (int j = 0; j < num_sol; j++) { + if (t[j] >= 0 && t[j] <= 1) { + auto tt = 1 - t[j]; + auto r = (tt*tt)*r0 + (2*tt*t[j])*r1 + (t[j]*t[j])*r2; + auto p = eval(t[j]); + if (distance_squared(p, pt) < r*r) { + return true; + } + } + } + } else if (path.num_control_points[base_point_id] == 2) { + // Cubic Bezier curve + auto i0 = point_id; + auto i1 = point_id + 1; + auto i2 = point_id + 2; + auto i3 = (point_id + 3) % path.num_points; + auto p0 = Vector2f{path.points[2 * i0], path.points[2 * i0 + 1]}; + auto p1 = Vector2f{path.points[2 * i1], path.points[2 * i1 + 1]}; + auto p2 = Vector2f{path.points[2 * i2], path.points[2 * i2 + 1]}; + auto p3 = Vector2f{path.points[2 * i3], path.points[2 * i3 + 1]}; + auto eval = [&](float t) -> Vector2f { + auto tt = 1 - t; + return (tt*tt*tt)*p0 + (3*tt*tt*t)*p1 + (3*tt*t*t)*p2 + (t*t*t)*p3; + }; + auto r0 = r; + auto r1 = r; + auto r2 = r; + auto r3 = r; + // override radius if path has thickness + if (path.thickness != nullptr) { + r0 = path.thickness[i0]; + r1 = path.thickness[i1]; + r2 = path.thickness[i2]; + r3 = path.thickness[i3]; + } + if (distance_squared(eval(0), pt) < r0*r0) { + return true; + } + if (distance_squared(eval(1), pt) < r3*r3) { + return true; + } + // The curve is (1 - t)^3 p0 + 3 * (1 - t)^2 t p1 + 3 * (1 - t) t^2 p2 + t^3 p3 + // = (-p0+3p1-3p2+p3) t^3 + (3p0-6p1+3p2) t^2 + (-3p0+3p1) t + p0 + // Want to solve (q - pt) dot q' = 0 + // q' = 3*(-p0+3p1-3p2+p3)t^2 + 2*(3p0-6p1+3p2)t + (-3p0+3p1) + // Expanding + // 3*(-p0+3p1-3p2+p3)^2 t^5 + // 5*(-p0+3p1-3p2+p3)(3p0-6p1+3p2) t^4 + // 4*(-p0+3p1-3p2+p3)(-3p0+3p1) + 2*(3p0-6p1+3p2)^2 t^3 + // 3*(3p0-6p1+3p2)(-3p0+3p1) + 3*(-p0+3p1-3p2+p3)(p0-pt) t^2 + // (-3p0+3p1)^2+2(p0-pt)(3p0-6p1+3p2) t + // (p0-pt)(-3p0+3p1) + double A = 3*sum((-p0+3*p1-3*p2+p3)*(-p0+3*p1-3*p2+p3)); + double B = 5*sum((-p0+3*p1-3*p2+p3)*(3*p0-6*p1+3*p2)); + double C = 4*sum((-p0+3*p1-3*p2+p3)*(-3*p0+3*p1)) + 2*sum((3*p0-6*p1+3*p2)*(3*p0-6*p1+3*p2)); + double D = 3*(sum((3*p0-6*p1+3*p2)*(-3*p0+3*p1)) + sum((-p0+3*p1-3*p2+p3)*(p0-pt))); + double E = sum((-3*p0+3*p1)*(-3*p0+3*p1)) + 2*sum((p0-pt)*(3*p0-6*p1+3*p2)); + double F = sum((p0-pt)*(-3*p0+3*p1)); + // normalize the polynomial + B /= A; + C /= A; + D /= A; + E /= A; + F /= A; + // Isolator Polynomials: + // https://citeseerx.ist.psu.edu/viewdoc/download?doi=10.1.1.133.2233&rep=rep1&type=pdf + // x/5 + B/25 + // /----------------------------------------------------- + // 5x^4 + 4B x^3 + 3C x^2 + 2D x + E / x^5 + B x^4 + C x^3 + D x^2 + E x + F + // x^5 + 4B/5 x^4 + 3C/5 x^3 + 2D/5 x^2 + E/5 x + // ---------------------------------------------------- + // B/5 x^4 + 2C/5 x^3 + 3D/5 x^2 + 4E/5 x + F + // B/5 x^4 + 4B^2/25 x^3 + 3BC/25 x^2 + 2BD/25 x + BE/25 + // ---------------------------------------------------- + // (2C/5 - 4B^2/25)x^3 + (3D/5-3BC/25)x^2 + (4E/5-2BD/25) + (F-BE/25) + auto p1A = ((2 / 5.f) * C - (4 / 25.f) * B * B); + auto p1B = ((3 / 5.f) * D - (3 / 25.f) * B * C); + auto p1C = ((4 / 5.f) * E - (2 / 25.f) * B * D); + auto p1D = F - B * E / 25.f; + // auto q1A = 1 / 5.f; + // auto q1B = B / 25.f; + // x/5 + B/25 = 0 + // x = -B/5 + auto q_root = -B/5.f; + double p_roots[3]; + int num_sol = solve_cubic(p1A, p1B, p1C, p1D, p_roots); + float intervals[4]; + if (q_root >= 0 && q_root <= 1) { + intervals[0] = q_root; + } + for (int j = 0; j < num_sol; j++) { + intervals[j + 1] = p_roots[j]; + } + auto num_intervals = 1 + num_sol; + // sort intervals + for (int j = 1; j < num_intervals; j++) { + for (int k = j; k > 0 && intervals[k - 1] > intervals[k]; k--) { + auto tmp = intervals[k]; + intervals[k] = intervals[k - 1]; + intervals[k - 1] = tmp; + } + } + auto eval_polynomial = [&] (double t) { + return t*t*t*t*t+ + B*t*t*t*t+ + C*t*t*t+ + D*t*t+ + E*t+ + F; + }; + auto eval_polynomial_deriv = [&] (double t) { + return 5*t*t*t*t+ + 4*B*t*t*t+ + 3*C*t*t+ + 2*D*t+ + E; + }; + auto lower_bound = 0.f; + for (int j = 0; j < num_intervals + 1; j++) { + if (j < num_intervals && intervals[j] < 0.f) { + continue; + } + auto upper_bound = j < num_intervals ? + min(intervals[j], 1.f) : 1.f; + auto lb = lower_bound; + auto ub = upper_bound; + auto lb_eval = eval_polynomial(lb); + auto ub_eval = eval_polynomial(ub); + if (lb_eval * ub_eval > 0) { + // Doesn't have root + continue; + } + if (lb_eval > ub_eval) { + swap_(lb, ub); + } + auto t = 0.5f * (lb + ub); + for (int it = 0; it < 20; it++) { + if (!(t >= lb && t <= ub)) { + t = 0.5f * (lb + ub); + } + auto value = eval_polynomial(t); + if (fabs(value) < 1e-5f || it == 19) { + break; + } + // The derivative may not be entirely accurate, + // but the bisection is going to handle this + if (value > 0.f) { + ub = t; + } else { + lb = t; + } + auto derivative = eval_polynomial_deriv(t); + t -= value / derivative; + } + auto tt = 1 - t; + auto r = (tt*tt*tt)*r0 + (3*tt*tt*t)*r1 + (3*tt*t*t)*r2 + (t*t*t)*r3; + if (distance_squared(eval(t), pt) < r * r) { + return true; + } + if (upper_bound >= 1.f) { + break; + } + lower_bound = upper_bound; + } + } else { + assert(false); + } + } else { + assert(node.child0 >= 0 && node.child1 >= 0); + const AABB &b0 = bvh_nodes[node.child0].box; + if (within_distance(b0, pt, bvh_nodes[node.child0].max_radius)) { + bvh_stack[stack_size++] = node.child0; + } + const AABB &b1 = bvh_nodes[node.child1].box; + if (within_distance(b1, pt, bvh_nodes[node.child1].max_radius)) { + bvh_stack[stack_size++] = node.child1; + } + assert(stack_size <= max_bvh_size); + } + } + return false; +} + +DEVICE +inline +int within_distance(const Rect &rect, const Vector2f &pt, float r) { + auto test = [&](const Vector2f &p0, const Vector2f &p1) { + // project pt to line + auto t = dot(pt - p0, p1 - p0) / dot(p1 - p0, p1 - p0); + if (t < 0) { + if (distance_squared(p0, pt) < r * r) { + return true; + } + } else if (t > 1) { + if (distance_squared(p1, pt) < r * r) { + return true; + } + } else { + if (distance_squared(p0 + t * (p1 - p0), pt) < r * r) { + return true; + } + } + return false; + }; + auto left_top = rect.p_min; + auto right_top = Vector2f{rect.p_max.x, rect.p_min.y}; + auto left_bottom = Vector2f{rect.p_min.x, rect.p_max.y}; + auto right_bottom = rect.p_max; + // left + if (test(left_top, left_bottom)) { + return true; + } + // top + if (test(left_top, right_top)) { + return true; + } + // right + if (test(right_top, right_bottom)) { + return true; + } + // bottom + if (test(left_bottom, right_bottom)) { + return true; + } + return false; +} + +DEVICE +inline +bool within_distance(const Shape &shape, const BVHNode *bvh_nodes, const Vector2f &pt, float r) { + switch (shape.type) { + case ShapeType::Circle: + return within_distance(*(const Circle *)shape.ptr, pt, r); + case ShapeType::Ellipse: + // https://www.geometrictools.com/Documentation/DistancePointEllipseEllipsoid.pdf + assert(false); + return false; + case ShapeType::Path: + return within_distance(*(const Path *)shape.ptr, bvh_nodes, pt, r); + case ShapeType::Rect: + return within_distance(*(const Rect *)shape.ptr, pt, r); + } + assert(false); + return false; +} + +DEVICE +inline +bool within_distance(const SceneData &scene, + int shape_group_id, + const Vector2f &pt) { + const ShapeGroup &shape_group = scene.shape_groups[shape_group_id]; + // pt is in canvas space, transform it to shape's local space + auto local_pt = xform_pt(shape_group.canvas_to_shape, pt); + + constexpr auto max_bvh_stack_size = 64; + int bvh_stack[max_bvh_stack_size]; + auto stack_size = 0; + bvh_stack[stack_size++] = 2 * shape_group.num_shapes - 2; + const auto &bvh_nodes = scene.shape_groups_bvh_nodes[shape_group_id]; + + while (stack_size > 0) { + const BVHNode &node = bvh_nodes[bvh_stack[--stack_size]]; + if (node.child1 < 0) { + // leaf + auto shape_id = node.child0; + const auto &shape = scene.shapes[shape_id]; + if (within_distance(shape, scene.path_bvhs[shape_id], + local_pt, shape.stroke_width)) { + return true; + } + } else { + assert(node.child0 >= 0 && node.child1 >= 0); + const AABB &b0 = bvh_nodes[node.child0].box; + if (inside(b0, local_pt, bvh_nodes[node.child0].max_radius)) { + bvh_stack[stack_size++] = node.child0; + } + const AABB &b1 = bvh_nodes[node.child1].box; + if (inside(b1, local_pt, bvh_nodes[node.child1].max_radius)) { + bvh_stack[stack_size++] = node.child1; + } + assert(stack_size <= max_bvh_stack_size); + } + } + + return false; +} + +DEVICE +inline +bool within_distance(const SceneData &scene, + int shape_group_id, + const Vector2f &pt, + EdgeQuery *edge_query) { + if (edge_query == nullptr || shape_group_id != edge_query->shape_group_id) { + // Specialized version + return within_distance(scene, shape_group_id, pt); + } + const ShapeGroup &shape_group = scene.shape_groups[shape_group_id]; + // pt is in canvas space, transform it to shape's local space + auto local_pt = xform_pt(shape_group.canvas_to_shape, pt); + + constexpr auto max_bvh_stack_size = 64; + int bvh_stack[max_bvh_stack_size]; + auto stack_size = 0; + bvh_stack[stack_size++] = 2 * shape_group.num_shapes - 2; + const auto &bvh_nodes = scene.shape_groups_bvh_nodes[shape_group_id]; + + auto ret = false; + while (stack_size > 0) { + const BVHNode &node = bvh_nodes[bvh_stack[--stack_size]]; + if (node.child1 < 0) { + // leaf + auto shape_id = node.child0; + const auto &shape = scene.shapes[shape_id]; + if (within_distance(shape, scene.path_bvhs[shape_id], + local_pt, shape.stroke_width)) { + ret = true; + if (shape_id == edge_query->shape_id) { + edge_query->hit = true; + } + } + } else { + assert(node.child0 >= 0 && node.child1 >= 0); + const AABB &b0 = bvh_nodes[node.child0].box; + if (inside(b0, local_pt, bvh_nodes[node.child0].max_radius)) { + bvh_stack[stack_size++] = node.child0; + } + const AABB &b1 = bvh_nodes[node.child1].box; + if (inside(b1, local_pt, bvh_nodes[node.child1].max_radius)) { + bvh_stack[stack_size++] = node.child1; + } + assert(stack_size <= max_bvh_stack_size); + } + } + + return ret; +}