diff --git a/.idea/fluency.iml b/.idea/fluency.iml
index b10ce10..aa0e86d 100644
--- a/.idea/fluency.iml
+++ b/.idea/fluency.iml
@@ -5,7 +5,7 @@
-
+
\ No newline at end of file
diff --git a/.idea/misc.xml b/.idea/misc.xml
index 3064ac6..62bb7a8 100644
--- a/.idea/misc.xml
+++ b/.idea/misc.xml
@@ -3,7 +3,7 @@
-
+
diff --git a/.idea/vcs.xml b/.idea/vcs.xml
index 57f2ca5..94a25f7 100644
--- a/.idea/vcs.xml
+++ b/.idea/vcs.xml
@@ -2,6 +2,5 @@
-
\ No newline at end of file
diff --git a/.idea/workspace.xml b/.idea/workspace.xml
index 13a4842..3cccce2 100644
--- a/.idea/workspace.xml
+++ b/.idea/workspace.xml
@@ -4,23 +4,14 @@
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
-
+
+
+
+
+
+
+
+
@@ -35,6 +26,11 @@
+
@@ -48,7 +44,6 @@
"associatedIndex": 6
}
-
@@ -57,17 +52,23 @@
"keyToString": {
"Python.2dtest.executor": "Run",
"Python.3d_windows.executor": "Run",
+ "Python.Unnamed.executor": "Run",
+ "Python.draw_widget2d.executor": "Run",
+ "Python.draw_widget_solve.executor": "Run",
"Python.fluency.executor": "Run",
"Python.fluencyb.executor": "Run",
"Python.gl_widget.executor": "Run",
"Python.main.executor": "Run",
"Python.meshtest.executor": "Run",
"Python.side_fluency.executor": "Run",
+ "Python.simple_mesh.executor": "Run",
+ "Python.vtk_widget.executor": "Run",
"Python.vulkan.executor": "Run",
"RunOnceActivity.OpenProjectViewOnStart": "true",
"RunOnceActivity.ShowReadmeOnStart": "true",
+ "RunOnceActivity.git.unshallow": "true",
"git-widget-placeholder": "master",
- "last_opened_file_path": "/Volumes/Data_drive/Programming/fluency/modules",
+ "last_opened_file_path": "/Volumes/Data_drive/Programming/fluency",
"settings.editor.selected.configurable": "project.propVCSSupport.DirectoryMappings"
}
}]]>
@@ -78,6 +79,8 @@
+
+
@@ -87,7 +90,7 @@
-
+
@@ -108,7 +111,143 @@
1703951701948
-
+
+
+ 1729958532384
+
+
+
+ 1729958532384
+
+
+
+ 1735563255455
+
+
+
+ 1735563255455
+
+
+
+ 1735585968733
+
+
+
+ 1735585968733
+
+
+
+ 1735601610504
+
+
+
+ 1735601610504
+
+
+
+ 1735601786207
+
+
+
+ 1735601786207
+
+
+
+ 1735652081552
+
+
+
+ 1735652081552
+
+
+
+ 1735662119176
+
+
+
+ 1735662119176
+
+
+
+ 1735685300102
+
+
+
+ 1735685300102
+
+
+
+ 1735763743346
+
+
+
+ 1735763743346
+
+
+
+ 1735825176611
+
+
+
+ 1735825176611
+
+
+
+ 1735842523870
+
+
+
+ 1735842523870
+
+
+
+ 1739739664763
+
+
+
+ 1739739664763
+
+
+
+ 1743193041868
+
+
+
+ 1743193041868
+
+
+
+ 1743284173326
+
+
+
+ 1743284173326
+
+
+
+ 1744555255868
+
+
+
+ 1744555255868
+
+
+
+ 1748764814845
+
+
+
+ 1748764814845
+
+
+
+ 1748765318267
+
+
+
+ 1748765318267
+
+
@@ -127,6 +266,21 @@
-
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
+
\ No newline at end of file
diff --git a/main.py b/main.py
index edc4842..fb03d04 100644
--- a/main.py
+++ b/main.py
@@ -9,7 +9,8 @@ from PySide6.QtCore import Qt, QPoint, Signal, QSize
from PySide6.QtWidgets import QApplication, QMainWindow, QSizePolicy, QInputDialog, QDialog, QVBoxLayout, QHBoxLayout, QLabel, QDoubleSpinBox, QCheckBox, QPushButton, QButtonGroup
from Gui import Ui_fluencyCAD # Import the generated GUI module
from drawing_modules.vtk_widget import VTKWidget
-from drawing_modules.vysta_widget import PyVistaWidget
+import numpy as np
+
from drawing_modules.draw_widget_solve import SketchWidget
from sdf import *
from python_solvespace import SolverSystem, ResultFlag
diff --git a/mesh_modules/simple_mesh.py b/mesh_modules/simple_mesh.py
index 48fc829..2f4f027 100644
--- a/mesh_modules/simple_mesh.py
+++ b/mesh_modules/simple_mesh.py
@@ -1,6 +1,6 @@
import numpy as np
from scipy.spatial import Delaunay, ConvexHull
-from shapely.geometry import Polygon, Point
+#from shapely.geometry import Polygon, Point
def alpha_shape(points, alpha):
diff --git a/requirements.txt b/requirements.txt
index dfbcfbb..959becb 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -1,60 +1,61 @@
-certifi==2025.1.31
-cffi==1.17.1
-charset-normalizer==3.4.1
-contourpy==1.3.1
+asttokens==3.0.0
+attrs==25.3.0
+black==24.10.0
+click==8.2.1
+contourpy==1.3.2
cycler==0.12.1
+decorator==5.2.1
+executing==2.2.0
flexcache==0.3
flexparser==0.4
-fonttools==4.56.0
-freetype-py==2.5.1
-hsluv==5.0.4
-idna==3.10
+fonttools==4.58.1
+h5py==3.13.0
imageio==2.37.0
+ipython==9.3.0
+ipython_pygments_lexers==1.1.1
+jedi==0.19.2
kiwisolver==1.4.8
lazy_loader==0.4
markdown-it-py==3.0.0
-matplotlib==3.10.1
+matplotlib==3.10.3
+matplotlib-inline==0.1.7
mdurl==0.1.2
meshio==5.3.5
+mypy_extensions==1.1.0
names==0.3.0
-networkx==3.4.2
-Nuitka==2.6.9
-numpy==2.2.4
-numpy-stl==3.2.0
+networkx==3.5
+Nuitka==2.7.10
+numpy==2.2.6
ordered-set==4.1.0
-packaging==24.2
-panda3d-gltf==1.3.0
-panda3d-simplepbr==0.13.0
-pillow==11.1.0
+packaging==25.0
+parso==0.8.4
+pathspec==0.12.1
+pexpect==4.9.0
+pillow==11.2.1
Pint==0.24.4
-platformdirs==4.3.7
-pooch==1.8.2
-pycparser==2.22
-pygame==2.6.1
+platformdirs==4.3.8
+prompt_toolkit==3.0.51
+ptyprocess==0.7.0
+pure_eval==0.2.3
Pygments==2.19.1
pyparsing==3.2.3
-PySide6_Essentials==6.8.3
+PySide6==6.9.0
+PySide6_Addons==6.9.0
+PySide6_Essentials==6.9.0
python-dateutil==2.9.0.post0
-python-solvespace==3.0.8
-python-utils==3.9.1
-pyvista==0.44.2
-pyvistaqt==0.11.2
-QtPy==2.4.3
-requests==2.32.3
+python_solvespace==3.0.8
rich==13.9.4
scikit-image==0.25.2
-scipy==1.15.2
-scooby==0.10.0
-sdfcad @ git+https://gitlab.com/nobodyinperson/sdfCAD@9bd4e9021c6ee7e685ee28e8a3a5d2d2c028190c
-shapely==2.1.0rc1
-shiboken6==6.8.3
+scipy==1.15.3
+sdfcad @ git+https://gitlab.com/nobodyinperson/sdfCAD@42505b5181c88dda2fd66ac9d387533fbe4145f3
+shiboken6==6.9.0
six==1.17.0
-tifffile==2025.3.13
-trimesh==4.6.5
-tripy==1.0.0
-typing_extensions==4.13.0
-urllib3==2.3.0
-vispy==0.14.3
-vtk==9.4.1
-vulkan==1.3.275.1
+stack-data==0.6.3
+tifffile==2025.5.26
+tokenize_rt==6.2.0
+traitlets==5.14.3
+typing_extensions==4.13.2
+vtk==9.4.2
+wcwidth==0.2.13
+xlrd==2.0.2
zstandard==0.23.0
diff --git a/sdf/__init__.py b/sdf/__init__.py
new file mode 100644
index 0000000..9a544f5
--- /dev/null
+++ b/sdf/__init__.py
@@ -0,0 +1,26 @@
+from . import d2, d3, ease
+
+from .util import *
+from .units import units
+
+from .d2 import *
+
+from .d3 import *
+
+from .text import (
+ measure_image,
+ measure_text,
+ image,
+ text,
+)
+
+from .mesh import (
+ generate,
+ save,
+ sample_slice,
+ show_slice,
+)
+
+from .stl import (
+ write_binary_stl,
+)
diff --git a/sdf/d2.py b/sdf/d2.py
new file mode 100644
index 0000000..283b7c9
--- /dev/null
+++ b/sdf/d2.py
@@ -0,0 +1,390 @@
+import functools
+import numpy as np
+import operator
+import copy
+
+from . import dn, d3, ease
+
+# Constants
+
+ORIGIN = np.array((0, 0))
+
+X = np.array((1, 0))
+Y = np.array((0, 1))
+
+UP = Y
+
+# SDF Class
+
+_ops = {}
+
+
+class SDF2:
+ def __init__(self, f):
+ self.f = f
+
+ def __call__(self, p):
+ return self.f(p).reshape((-1, 1))
+
+ def __getattr__(self, name):
+ if name in _ops:
+ f = _ops[name]
+ return functools.partial(f, self)
+ raise AttributeError
+
+ def __or__(self, other):
+ return union(self, other)
+
+ def __and__(self, other):
+ return intersection(self, other)
+
+ def __sub__(self, other):
+ return difference(self, other)
+
+ def fillet(self, r):
+ newSelf = copy.deepcopy(self)
+ newSelf._r = r
+ return newSelf
+
+ def radius(self, *args, **kwargs):
+ return self.fillet(*args, **kwargs)
+
+ def k(self, *args, **kwargs):
+ return self.fillet(*args, **kwargs)
+
+ def r(self, *args, **kwargs):
+ return self.fillet(*args, **kwargs)
+
+ def chamfer(self, c):
+ newSelf = copy.deepcopy(self)
+ newSelf._c = c
+ return newSelf
+
+ def c(self, *args, **kwargs):
+ return self.chamfer(*args, **kwargs)
+
+
+def sdf2(f):
+ @functools.wraps(f)
+ def wrapper(*args, **kwargs):
+ return SDF2(f(*args, **kwargs))
+
+ return wrapper
+
+
+def op2(f):
+ @functools.wraps(f)
+ def wrapper(*args, **kwargs):
+ return SDF2(f(*args, **kwargs))
+
+ _ops[f.__name__] = wrapper
+ return wrapper
+
+
+def op23(f):
+ @functools.wraps(f)
+ def wrapper(*args, **kwargs):
+ return d3.SDF3(f(*args, **kwargs))
+
+ _ops[f.__name__] = wrapper
+ return wrapper
+
+
+# Helpers
+
+
+def _length(a):
+ return np.linalg.norm(a, axis=1)
+
+
+def _normalize(a):
+ return a / np.linalg.norm(a)
+
+
+def _dot(a, b):
+ return np.sum(a * b, axis=1)
+
+
+def _vec(*arrs):
+ return np.stack(arrs, axis=-1)
+
+
+_min = np.minimum
+_max = np.maximum
+
+# Primitives
+
+
+@sdf2
+def circle(radius=None, diameter=None, center=ORIGIN):
+ if (radius is not None) == (diameter is not None):
+ raise ValueError(f"Specify either radius or diameter")
+ if radius is None:
+ radius = diameter / 2
+
+ def f(p):
+ return _length(p - center) - radius
+
+ return f
+
+
+@sdf2
+def line(normal=UP, point=ORIGIN):
+ normal = _normalize(normal)
+
+ def f(p):
+ return np.dot(point - p, normal)
+
+ return f
+
+
+@sdf2
+def slab(x0=None, y0=None, x1=None, y1=None, r=None):
+ fs = []
+ if x0 is not None:
+ fs.append(line(X, (x0, 0)))
+ if x1 is not None:
+ fs.append(line(-X, (x1, 0)))
+ if y0 is not None:
+ fs.append(line(Y, (0, y0)))
+ if y1 is not None:
+ fs.append(line(-Y, (0, y1)))
+ return intersection(*fs, r=r)
+
+
+@sdf2
+def rectangle(size=1, center=ORIGIN, a=None, b=None):
+ if a is not None and b is not None:
+ a = np.array(a)
+ b = np.array(b)
+ size = b - a
+ center = a + size / 2
+ return rectangle(size, center)
+ size = np.array(size)
+
+ def f(p):
+ q = np.abs(p - center) - size / 2
+ return _length(_max(q, 0)) + _min(np.amax(q, axis=1), 0)
+
+ return f
+
+
+@sdf2
+def rounded_rectangle(size, radius, center=ORIGIN):
+ try:
+ r0, r1, r2, r3 = radius
+ except TypeError:
+ r0 = r1 = r2 = r3 = radius
+
+ def f(p):
+ x = p[:, 0]
+ y = p[:, 1]
+ r = np.zeros(len(p)).reshape((-1, 1))
+ r[np.logical_and(x > 0, y > 0)] = r0
+ r[np.logical_and(x > 0, y <= 0)] = r1
+ r[np.logical_and(x <= 0, y <= 0)] = r2
+ r[np.logical_and(x <= 0, y > 0)] = r3
+ q = np.abs(p) - size / 2 + r
+ return (
+ _min(_max(q[:, 0], q[:, 1]), 0).reshape((-1, 1))
+ + _length(_max(q, 0)).reshape((-1, 1))
+ - r
+ )
+
+ return f
+
+
+@sdf2
+def equilateral_triangle():
+ def f(p):
+ k = 3**0.5
+ p = _vec(np.abs(p[:, 0]) - 1, p[:, 1] + 1 / k)
+ w = p[:, 0] + k * p[:, 1] > 0
+ q = _vec(p[:, 0] - k * p[:, 1], -k * p[:, 0] - p[:, 1]) / 2
+ p = np.where(w.reshape((-1, 1)), q, p)
+ p = _vec(p[:, 0] - np.clip(p[:, 0], -2, 0), p[:, 1])
+ return -_length(p) * np.sign(p[:, 1])
+
+ return f
+
+
+@sdf2
+def hexagon(radius=None, diameter=None):
+ if (radius is not None) == (diameter is not None):
+ raise ValueError(f"Specify either radius or diameter")
+ if radius is None:
+ radius = diameter / 2
+ radius *= 3**0.5 / 2
+
+ def f(p):
+ k = np.array((3**0.5 / -2, 0.5, np.tan(np.pi / 6)))
+ p = np.abs(p)
+ p -= 2 * k[:2] * _min(_dot(k[:2], p), 0).reshape((-1, 1))
+ p -= _vec(
+ np.clip(p[:, 0], -k[2] * radius, k[2] * radius), np.zeros(len(p)) + radius
+ )
+ return _length(p) * np.sign(p[:, 1])
+
+ return f
+
+
+@sdf2
+def rounded_x(w, r):
+ def f(p):
+ p = np.abs(p)
+ q = (_min(p[:, 0] + p[:, 1], w) * 0.5).reshape((-1, 1))
+ return _length(p - q) - r
+
+ return f
+
+
+def RegularPolygon(n, r=1):
+ ri = r * np.cos(np.pi / n)
+ return intersection(
+ *[slab(y0=-ri).rotate(a) for a in np.arange(0, 2 * np.pi, 2 * np.pi / n)]
+ )
+
+
+@sdf2
+def polygon(points):
+ points = [np.array(p) for p in points]
+
+ def f(p):
+ n = len(points)
+ d = _dot(p - points[0], p - points[0])
+ s = np.ones(len(p))
+ for i in range(n):
+ j = (i + n - 1) % n
+ vi = points[i]
+ vj = points[j]
+ e = vj - vi
+ w = p - vi
+ b = w - e * np.clip(np.dot(w, e) / np.dot(e, e), 0, 1).reshape((-1, 1))
+ d = _min(d, _dot(b, b))
+ c1 = p[:, 1] >= vi[1]
+ c2 = p[:, 1] < vj[1]
+ c3 = e[0] * w[:, 1] > e[1] * w[:, 0]
+ c = _vec(c1, c2, c3)
+ s = np.where(np.all(c, axis=1) | np.all(~c, axis=1), -s, s)
+ return s * np.sqrt(d)
+
+ return f
+
+
+# Positioning
+
+
+@op2
+def translate(other, offset):
+ def f(p):
+ return other(p - offset)
+
+ return f
+
+
+@op2
+def scale(other, factor):
+ try:
+ x, y = factor
+ except TypeError:
+ x = y = factor
+ s = (x, y)
+ m = min(x, y)
+
+ def f(p):
+ return other(p / s) * m
+
+ return f
+
+
+@op2
+def rotate(other, angle):
+ s = np.sin(angle)
+ c = np.cos(angle)
+ m = 1 - c
+ matrix = np.array(
+ [
+ [c, -s],
+ [s, c],
+ ]
+ ).T
+
+ def f(p):
+ return other(np.dot(p, matrix))
+
+ return f
+
+
+@op2
+def circular_array(other, count):
+ angles = [i / count * 2 * np.pi for i in range(count)]
+ return union(*[other.rotate(a) for a in angles])
+
+
+# Alterations
+
+
+@op2
+def elongate(other, size):
+ def f(p):
+ q = np.abs(p) - size
+ x = q[:, 0].reshape((-1, 1))
+ y = q[:, 1].reshape((-1, 1))
+ w = _min(_max(x, y), 0)
+ return other(_max(q, 0)) + w
+
+ return f
+
+
+# 2D => 3D Operations
+
+
+@op23
+def extrude(other, h=np.inf):
+ def f(p):
+ d = other(p[:, [0, 1]])
+ w = _vec(d.reshape(-1), np.abs(p[:, 2]) - h / 2)
+ return _min(_max(w[:, 0], w[:, 1]), 0) + _length(_max(w, 0))
+
+ return f
+
+
+@op23
+def extrude_to(a, b, h, e=ease.linear):
+ def f(p):
+ d1 = a(p[:, [0, 1]])
+ d2 = b(p[:, [0, 1]])
+ t = e(np.clip(p[:, 2] / h, -0.5, 0.5) + 0.5)
+ d = d1 + (d2 - d1) * t.reshape((-1, 1))
+ w = _vec(d.reshape(-1), np.abs(p[:, 2]) - h / 2)
+ return _min(_max(w[:, 0], w[:, 1]), 0) + _length(_max(w, 0))
+
+ return f
+
+
+@op23
+def revolve(other, offset=0):
+ def f(p):
+ xy = p[:, [0, 1]]
+ # use horizontal distance to Z axis as X coordinate in 2D shape
+ # use Z coordinate as Y coordinate in 2D shape
+ q = _vec(_length(xy) - offset, p[:, 2])
+ return other(q)
+
+ return f
+
+
+# Common
+
+union = op2(dn.union)
+difference = op2(dn.difference)
+intersection = op2(dn.intersection)
+blend = op2(dn.blend)
+negate = op2(dn.negate)
+dilate = op2(dn.dilate)
+erode = op2(dn.erode)
+shell = op2(dn.shell)
+repeat = op2(dn.repeat)
+mirror = op2(dn.mirror)
+modulate_between = op2(dn.modulate_between)
+stretch = op2(dn.stretch)
diff --git a/sdf/d3.py b/sdf/d3.py
new file mode 100644
index 0000000..2f4bca7
--- /dev/null
+++ b/sdf/d3.py
@@ -0,0 +1,1666 @@
+import os
+import time
+import logging
+import functools
+import numpy as np
+import operator
+import warnings
+import copy
+
+# internal modules
+from . import dn, d2, ease, mesh, errors, util
+from .units import units
+
+# external modules
+import scipy.optimize
+from scipy.linalg import LinAlgWarning
+import rich
+import matplotlib.pyplot as plt
+
+# Constants
+
+logger = logging.getLogger(__name__)
+
+ORIGIN = np.array((0, 0, 0))
+
+X = np.array((1, 0, 0))
+Y = np.array((0, 1, 0))
+Z = np.array((0, 0, 1))
+
+UP = Z
+DOWN = -Z
+RIGHT = X
+LEFT = -X
+BACK = Y
+FRONT = -Y
+
+# SDF Class
+
+_ops = {}
+
+
+class SDF3:
+ def __init__(self, f):
+ self.f = f
+
+ def __call__(self, p):
+ return self.f(p).reshape((-1, 1))
+
+ def __getattr__(self, name):
+ if name in _ops:
+ f = _ops[name]
+ return functools.partial(f, self)
+ raise AttributeError
+
+ def __or__(self, other):
+ return union(self, other)
+
+ def __and__(self, other):
+ return intersection(self, other)
+
+ def __sub__(self, other):
+ return difference(self, other)
+
+ def fillet(self, r):
+ newSelf = copy.deepcopy(self)
+ newSelf._r = r
+ return newSelf
+
+ def radius(self, *args, **kwargs):
+ return self.fillet(*args, **kwargs)
+
+ def k(self, *args, **kwargs):
+ return self.fillet(*args, **kwargs)
+
+ def r(self, *args, **kwargs):
+ return self.fillet(*args, **kwargs)
+
+ def chamfer(self, c):
+ newSelf = copy.deepcopy(self)
+ newSelf._c = c
+ return newSelf
+
+ def c(self, *args, **kwargs):
+ return self.chamfer(*args, **kwargs)
+
+ def generate(self, *args, **kwargs):
+ return mesh.generate(self, *args, **kwargs)
+
+ @errors.alpha_quality
+ def closest_surface_point(self, point):
+ def distance(p):
+ # root() wants same input/output dims (yeah...)
+ return np.repeat(self.f(np.expand_dims(p, axis=0)).ravel()[0], 3)
+
+ dist = self.f(np.expand_dims(point, axis=0)).ravel()[0]
+ optima = dict()
+ with warnings.catch_warnings():
+ warnings.simplefilter(
+ "ignore", (LinAlgWarning, RuntimeWarning, UserWarning)
+ )
+ for method in (
+ # loosely sorted by speed
+ "lm",
+ "broyden2",
+ "df-sane",
+ "hybr",
+ "broyden1",
+ "anderson",
+ "linearmixing",
+ "diagbroyden",
+ "excitingmixing",
+ "krylov",
+ ):
+ try:
+ optima[method] = (
+ opt := scipy.optimize.root(
+ distance, x0=np.array(point), method=method
+ )
+ )
+ except Exception as e:
+ pass
+ opt.zero_error = abs(opt.fun[0])
+ opt.zero_error_rel = opt.zero_error / dist
+ opt.dist_error = np.linalg.norm(opt.x - point) - dist
+ opt.dist_error_rel = opt.dist_error / dist
+ logger.debug(f"{method = }, {opt = }")
+ # shortcut if fit is good
+ if (
+ np.allclose(opt.fun, 0)
+ and abs(opt.dist_error / dist - 1) < 0.01
+ and opt.dist_error < 0.01
+ ):
+ break
+
+ def cost(m):
+ penalty = (
+ # unsuccessfulness is penaltied
+ (not optima[m].success)
+ # a higher status normally means something bad
+ + abs(getattr(optima[m], "status", 1))
+ # the more we're away from zero, the worse it is
+ # „1mm of away from boundary is as bad as one status or success step”
+ + optima[m].zero_error
+ # the distance error can be quite large e.g. for non-uniform scaling,
+ # and methods often find weird points, it makes sense to compare to the SDF
+ + optima[m].dist_error
+ )
+ logger.debug(f"{m = :20s}: {penalty = }")
+ return penalty
+
+ best_root = optima[best_method := min(optima, key=cost)]
+ closest_point = best_root.x
+ if (
+ best_root.zero_error > 1
+ or best_root.zero_error_rel > 0.01
+ or best_root.dist_error > 1
+ or best_root.dist_error_rel > 0.01
+ ):
+ warnings.warn(
+ f"Closest surface point to {point} acc. to method {best_method!r} seems to be {closest_point}. "
+ f"The SDF there is {best_root.fun[0]} (should be 0, that's {best_root.zero_error} or {best_root.zero_error_rel*100:.2f}% off).\n"
+ f"Distance between {closest_point} and {point} is {np.linalg.norm(point - closest_point)}, "
+ f"SDF says it should be {dist} (that's {best_root.dist_error} or {best_root.dist_error_rel*100:.2f}% off)).\n"
+ f"The root finding algorithms seem to have a problem with your SDF, "
+ f"this might be caused due to operations breaking the metric like non-uniform scaling.",
+ errors.SDFCADWarning,
+ )
+ return closest_point
+
+ @errors.alpha_quality
+ def surface_intersection(self, start, direction=None):
+ """
+ ``start`` at a point, move (back or forth) along a line following a
+ ``direction`` and return surface intersection coordinates.
+
+ .. note::
+
+ In case there is no intersection, the result *might* (not sure
+ about that) return the point on the line that's closest to the
+ surface 🤔.
+
+ Args:
+ start (3d vector): starting point
+ direction (3d vector or None): direction to move into, defaults to
+ ``-start`` (”move to origin”).
+
+ Returns:
+ 3d vector: the optimized surface intersection
+ """
+ if direction is None:
+ direction = -start
+
+ def transform(t):
+ return start + t * direction
+
+ def distance(t):
+ # root() wants same input/output dims (yeah...)
+ return np.repeat(self.f(np.expand_dims(transform(t), axis=0)).ravel()[0], 3)
+
+ dist = self.f(np.expand_dims(start, axis=0)).ravel()[0]
+ optima = dict()
+ with warnings.catch_warnings():
+ warnings.simplefilter(
+ "ignore", (LinAlgWarning, RuntimeWarning, UserWarning)
+ )
+ for method in (
+ # loosely sorted by speed
+ "lm",
+ "broyden2",
+ "df-sane",
+ "hybr",
+ "broyden1",
+ "anderson",
+ "linearmixing",
+ "diagbroyden",
+ "excitingmixing",
+ "krylov",
+ ):
+ try:
+ optima[method] = (
+ opt := scipy.optimize.root(distance, x0=[0], method=method)
+ )
+ except Exception as e:
+ pass
+ opt.zero_error = abs(opt.fun[0])
+ opt.zero_error_rel = opt.zero_error / dist
+ opt.point = transform(opt.x[0])
+ logger.debug(f"{method = }, {opt = }")
+ # shortcut if fit is good
+ if np.allclose(opt.fun, 0):
+ break
+
+ def cost(m):
+ penalty = (
+ # unsuccessfulness is penaltied
+ (not optima[m].success)
+ # a higher status normally means something bad
+ + abs(getattr(optima[m], "status", 1))
+ # the more we're away from zero, the worse it is
+ # „1mm of away from boundary is as bad as one status or success step”
+ + optima[m].zero_error
+ )
+ logger.debug(f"{m = :20s}: {penalty = }")
+ return penalty
+
+ best_root = optima[best_method := min(optima, key=cost)]
+ closest_point = transform(best_root.x[0])
+ if best_root.zero_error > 1 or best_root.zero_error_rel > 0.01:
+ warnings.warn(
+ f"Surface intersection point from {start = } to {direction = }, acc. to method {best_method!r} seems to be {closest_point}. "
+ f"The SDF there is {best_root.fun[0]} (should be 0, that's {best_root.zero_error} or {best_root.zero_error_rel*100:.2f}% off).\n"
+ f"The root finding algorithms seem to have a problem with your SDF, "
+ f"this might be caused due to operations breaking the metric like non-uniform scaling "
+ f"or just because there is no intersection...",
+ errors.SDFCADWarning,
+ )
+ return closest_point
+
+ @errors.alpha_quality
+ def minimum_sdf_on_plane(self, origin, normal, return_point=False):
+ """
+ Find the minimum SDF distance (not necessarily the real distance if you
+ have non-uniform scaling!) on a plane around an ``origin`` that points
+ into the ``normal`` direction.
+
+ Args:
+ origin (3d vector): a point on the plane
+ normal (3d vector): normal vector of the plane
+ return_point (bool): whether to also return the closest point (on
+ the plane!)
+
+ Returns:
+ float: the (minimum) distance to the plane
+ float, 3d vector : distance and closest point (on the plane!) if
+ ``return_point=True``
+ """
+ basemat = np.array(
+ [(e1 := _perpendicular(normal)), (e2 := np.cross(normal, e1))]
+ ).T
+
+ def transform(t):
+ return origin + basemat @ t
+
+ def distance(t):
+ return self.f(np.expand_dims(transform(t), axis=0)).ravel()[0]
+
+ optima = dict()
+ with warnings.catch_warnings():
+ warnings.simplefilter(
+ "ignore", (LinAlgWarning, RuntimeWarning, UserWarning)
+ )
+ for method in (
+ "Nelder-Mead",
+ "Powell",
+ "CG",
+ "BFGS",
+ "L-BFGS-B",
+ "TNC",
+ "COBYLA",
+ "SLSQP",
+ "trust-constr",
+ ):
+ try:
+ optima[method] = (
+ opt := scipy.optimize.minimize(
+ distance, x0=[0, 0], method=method
+ )
+ )
+ opt.point = transform(opt.x)
+ logger.debug(f"{method = }, {opt = }")
+ except Exception as e:
+ logger.error(f"{method = } error {e!r}")
+
+ best_min = optima[min(optima, key=lambda m: optima[m].fun)]
+ if return_point:
+ return best_min.fun, best_min.point
+ else:
+ return best_min.fun
+
+ @errors.alpha_quality
+ def extent_in(self, direction):
+ """
+ Determine the largest distance from the origin in a given ``direction``
+ that's still within the object.
+
+ Args:
+ direction (3d vector): the direction to check
+
+ Returns:
+ float: distance from origin
+ """
+ # create probing points along direction to check where object ends roughly
+ # object ends when SDF is only increasing (not the case for infinite repetitions e.g.)
+ probing_points = np.expand_dims(np.logspace(0, 9, 30), axis=1) * direction
+ d = self.f(probing_points) # get SDF value at probing points
+ n_trailing_ascending = util.n_trailing_ascending_positive(d)
+ if not n_trailing_ascending:
+ return np.inf
+ if (ratio := n_trailing_ascending / d.size) < 0.5:
+ warnings.warn(
+ f"extent_in({direction = !r}): "
+ f"Only {n_trailing_ascending}/{d.size} ({ratio*100:.1f}%) of probed points in "
+ f"{direction = } have ascending positive SDF distance values. "
+ f"This can be caused by infinite objects. "
+ f"Result of might be wrong. ",
+ errors.SDFCADWarning,
+ )
+ faraway = probing_points[
+ -n_trailing_ascending + 1
+ ] # choose first point after which SDF only increases
+ closest_surface_point = self.closest_surface_point_to_plane(
+ origin=faraway, normal=direction
+ )
+ extent = np.linalg.norm(closest_surface_point)
+ return extent
+
+ @property
+ @errors.alpha_quality
+ def bounds(self):
+ """
+ Return X Y Z bounds based on :any:`extent_in`.
+
+ Return:
+ 6-sequence: lower X, upper X, lower Y, upper Y, lower Z, upper Z
+ """
+ return tuple(
+ np.sign(d.sum()) * self.extent_in(d) for d in [-X, X, -Y, Y, -Z, Z]
+ )
+
+ @errors.alpha_quality
+ def closest_surface_point_to_plane(self, origin, normal):
+ """
+ Find the closest surface point to a plane around an ``origin`` that points
+ into the ``normal`` direction.
+
+ Args:
+ origin (3d vector): a point on the plane
+ normal (3d vector): normal vector of the plane
+
+ Returns:
+ 3d vector : closest surface point
+ """
+ distance, plane_point = self.minimum_sdf_on_plane(
+ origin=origin, normal=normal, return_point=True
+ )
+ return self.surface_intersection(start=plane_point, direction=normal)
+
+ @errors.alpha_quality
+ def move_to_positive(self, direction=Z):
+ return self.translate(self.extent_in(-direction) * direction)
+
+ def cut(self, direction=UP, point=ORIGIN, at=None, return_cutouts=False, **kwargs):
+ """
+ Split an object along a direction and return resulting parts.
+
+ Args:
+ direction (3d vector): direction to cut into
+ point (3d vector): point to perform the cut at
+ at (sequence of float): where to perform the cuts along the
+ ``direction`` starting at ``point``. Defaults to ``[0]``,
+ meaning it only splits at ``point``.
+ k (float): passed to :any:`intersection`
+ return_cutouts (bool): whether to return the used cutout masks as
+ second value
+
+
+ Returns:
+ sequence of SDFs: the parts
+ two sequences of SDFs: the parts and cutouts if ``return_cutouts=True``
+ """
+ direction = np.array(direction).reshape(3)
+ direction = _normalize(direction)
+ point = np.array(point).reshape(3)
+ if at is None:
+ at = [0]
+ at = [-np.inf] + list(at) + [np.inf]
+ parts = []
+ cutouts = []
+ for start, end in zip(at[:-1], at[1:]):
+ cuts = []
+ if np.isfinite(start):
+ cuts.append(plane(normal=direction, point=point + start * direction))
+ if np.isfinite(end):
+ cuts.append(plane(normal=-direction, point=point + end * direction))
+ if len(cuts) > 1:
+ cutout = intersection(*cuts)
+ elif len(cuts) == 1:
+ cutout = cuts[0]
+ else:
+ cutout = self
+ cutouts.append(cutout)
+ parts.append(intersection(self, cutout, **kwargs))
+ if return_cutouts:
+ return parts, cutouts
+ else:
+ return parts
+
+ def chamfercut(self, size, at=ORIGIN, direction=Z, e=ease.linear):
+ """
+ Chamfer (and then cut) an object along a plane
+
+ Args:
+ size (float): size to chamfer along ``direction``.
+ at (3d point): A point on the plane where to chamfer at. Defaults
+ to ORIGIN.
+ direction (3d vector): direction to chamfer to. Defaults to Z.
+ e (ease.Easing): the easing to use. Will be scaled with ``size``.
+ """
+ direction = direction / np.linalg.norm(direction)
+ result = self.stretch(at, at - size * direction)
+ result = result.modulate_between(at + size * direction, at, e=-size * e)
+ result &= plane(direction, point=at)
+ return result
+
+ def volume(
+ self, bounds=None, step=None, timeout=3, convergence_precision=0.01, plot=False
+ ):
+ """
+ Approximate the volume by sampling the SDF and counting the inside
+ hits.
+
+ Args:
+ bounds (6-sequence, optional): bounds as returned by `extent_in()`
+ step (float, optional): The (single) step size in all dimensions to use.
+ The default is an iterative process gradually refining the grid until
+ it looks like it converges.
+ timeout (float, optional): stop iterating after this time
+ convergence_precision (float, optional): relative volume variance
+ threshold to allow stopping iteration. For example `0.05` means that if the
+ last couple iterations don't vary more than 5% of the current
+ volume estimate, stop.
+ plot (bool, optional): whether to plot the process after completion
+
+ Returns:
+ float : the volume approximation
+ """
+ vols = []
+ ns = []
+ bounds = bounds or self.bounds
+ if not np.all(np.isfinite(bounds) & ~np.isnan(bounds)):
+ raise errors.SDFCADInfiniteObjectError(
+ f"Object has {bounds = }, can't approximate volume with that. "
+ f"You can specify bounds=(x,x,y,y,z,z) manually."
+ )
+ xl, xu, yl, yu, zl, zu = bounds
+ # expand bounds a bit outwards
+ xl -= (dx := abs(xu - xl)) * 0.05
+ xu += dx * 0.05
+ yl -= (dy := abs(yu - yl)) * 0.05
+ yu += dy * 0.05
+ zl -= (dz := abs(zu - zl)) * 0.05
+ zu += dz * 0.05
+ steps = (
+ np.linspace(min(dx, dy, dz) / 10, min(dx, dy, dz) / 1000, 100)
+ if step is None
+ else [step]
+ )
+ _steps = []
+ _samples = []
+ _hits = []
+ _vols = []
+ _times = []
+ _variances = []
+ _relvariances = []
+ time_start = time.perf_counter()
+ for step in steps:
+ if time.perf_counter() - time_start > timeout:
+ logger.warning(f"volume(): timeout of {timeout}s hit")
+ if not _vols:
+ _vols.append(np.nan)
+ break
+ time_before = time.perf_counter()
+ _steps.append(step)
+ grid = np.stack(
+ np.meshgrid(
+ np.arange(bounds[0], bounds[1] + step, step),
+ np.arange(bounds[2], bounds[3] + step, step),
+ np.arange(bounds[4], bounds[5] + step, step),
+ ),
+ axis=-1,
+ )
+ d = self.f(grid.reshape(-1, 3)).reshape(grid.shape[:-1])
+ _samples.append(samples := d.size)
+ _hits.append(hits := np.sum(d < 0))
+ _vols.append(vol := hits * (cellvol := step**3))
+ _times.append(time.perf_counter() - time_before)
+ # _variances.append(np.var(_vols[-5:]))
+ _variances.append(abs(max(_vols[-5:]) - min(_vols[-5:])))
+ _relvariances.append(_variances[-1] / _vols[-1])
+ if len(_variances) > 5 and _relvariances[-1] < convergence_precision:
+ logger.info(f"volume(): convergence_precision hit")
+ break
+ if plot:
+ fig, axes = plt.subplots(nrows=5, sharex=True)
+ axes[-1].set_xlabel("cell side size")
+ axes[0].scatter(_steps, _vols)
+ axes[0].set_ylabel("est. volume")
+ axes[1].scatter(_steps, np.array(_hits) / np.array(_samples))
+ axes[1].set_ylabel("hit ratio")
+ axes[2].scatter(_steps, np.array(_times))
+ axes[2].set_ylabel("time estimation took")
+ axes[3].scatter(_steps, _variances)
+ axes[3].set_ylabel("volume variance of last couple steps bigger than...")
+ axes[4].scatter(_steps, _relvariances)
+ axes[4].set_ylabel("rel variance of last couple steps bigger than...")
+ plt.show()
+ return _vols[-1]
+
+ def save(
+ self,
+ path="out.stl",
+ screenshot=False,
+ add_text=None,
+ openscad=False,
+ plot=True,
+ antialiasing=True,
+ plot_kwargs=None,
+ **kwargs,
+ ):
+ mesh.save(path, self, **{**dict(samples=2**18), **kwargs})
+ print(f"💾 Saved mesh to {path!r} ({os.stat(path).st_size} bytes)")
+ if openscad:
+ with open((p := f"{path}.scad"), "w") as fh:
+ fh.write(
+ f"""
+ import("{path}");
+ """.strip()
+ )
+ print(f"💾 Saved OpenSCAD viewer script to {p!r}")
+ if plot or screenshot:
+ try:
+ import pyvista as pv
+ except ImportError as e:
+ print(
+ f"To use the plotting functionality, "
+ f"install pyvista and trame (pip install pyvista trame). "
+ f"While trying to import them, the following error occured:\n{e!r}"
+ )
+ return
+ # pv.set_jupyter_backend("client")
+ plotter = pv.Plotter()
+ # axes = pv.Axes(show_actor=True, actor_scale=2, line_width=5)
+ # plotter.camera = pv.Camera()
+ # plotter.camera.position = (-1, -1, 1)
+ # plotter.add_actor(axes.actor)
+ plotter.enable_parallel_projection()
+ if antialiasing:
+ plotter.enable_anti_aliasing(aa_type="msaa")
+ m = pv.read(path)
+ plotter.add_mesh(m)
+ # xl, xu, yl, yu, zl, zu = m.bounds
+ # plotter.add_ruler(
+ # pointa=[0, 0, 0],
+ # pointb=[max(0, xu), 0, 0],
+ # number_minor_ticks=10,
+ # title="X",
+ # )
+ # plotter.add_ruler(
+ # pointa=[0, 0, 0],
+ # pointb=[0, max(0, yu), 0],
+ # number_minor_ticks=10,
+ # title="Y",
+ # )
+ # plotter.add_ruler(
+ # pointa=[0, 0, 0],
+ # pointb=[0, 0, max(0, zu)],
+ # number_minor_ticks=10,
+ # title="Z",
+ # )
+ plotter.add_axes()
+ if add_text:
+ if isinstance(add_text, str):
+ add_text = dict(text=add_text)
+ plotter.add_text(**add_text)
+ with warnings.catch_warnings():
+ warnings.simplefilter(action="ignore", category=UserWarning)
+ if screenshot:
+ if not isinstance(screenshot, str):
+ screenshot = f"{path}.png"
+ plotter.screenshot(screenshot)
+ print(f"🖼️ Saved screenshot to {screenshot!r}")
+ if plot:
+ try:
+ plotter.show(**(plot_kwargs or {}))
+ except TypeError as e:
+ logger.error(f"While showing the plotter: {e!r}, trying again")
+ plotter.show(**(plot_kwargs or {}))
+ else:
+ plotter.close()
+
+ def show_slice(self, *args, **kwargs):
+ return mesh.show_slice(self, *args, **kwargs)
+
+
+def sdf3(f):
+ @functools.wraps(f)
+ def wrapper(*args, **kwargs):
+ return SDF3(f(*args, **kwargs))
+
+ return wrapper
+
+
+def op3(f):
+ @functools.wraps(f)
+ def wrapper(*args, **kwargs):
+ return SDF3(f(*args, **kwargs))
+
+ _ops[f.__name__] = wrapper
+ return wrapper
+
+
+def op32(f):
+ @functools.wraps(f)
+ def wrapper(*args, **kwargs):
+ return d2.SDF2(f(*args, **kwargs))
+
+ _ops[f.__name__] = wrapper
+ return wrapper
+
+
+# Helpers
+
+
+def _length(a):
+ return np.linalg.norm(a, axis=1)
+
+
+def _normalize(a):
+ return a / np.linalg.norm(a)
+
+
+def _dot(a, b):
+ return np.sum(a * b, axis=1)
+
+
+def _vec(*arrs):
+ return np.stack(arrs, axis=-1)
+
+
+def _perpendicular(v):
+ if v[1] == 0 and v[2] == 0:
+ if v[0] == 0:
+ raise ValueError("zero vector")
+ else:
+ return np.cross(v, [0, 1, 0])
+ return np.cross(v, [1, 0, 0])
+
+
+_min = np.minimum
+_max = np.maximum
+
+# Primitives
+
+
+@sdf3
+def sphere(radius=0, diameter=0, r=0, d=0, center=ORIGIN):
+ if bool(radius := max(radius, r)) == bool(diameter := max(diameter, d)):
+ raise ValueError(f"Specify either radius or diameter")
+ if not radius:
+ radius = diameter / 2
+
+ def f(p):
+ return _length(p - center) - radius
+
+ return f
+
+
+@sdf3
+def plane(normal=UP, point=ORIGIN):
+ normal = _normalize(normal)
+
+ def f(p):
+ return np.dot(point - p, normal)
+
+ return f
+
+
+@sdf3
+def slab(
+ x0=None,
+ y0=None,
+ z0=None,
+ x1=None,
+ y1=None,
+ z1=None,
+ dx=None,
+ dy=None,
+ dz=None,
+ **kwargs,
+):
+ # How to improve this if/None madness?
+ if dx is not None:
+ if x0 is None:
+ x0 = -dx / 2
+ if x1 is None:
+ x1 = dx / 2
+ if dy is not None:
+ if y0 is None:
+ y0 = -dy / 2
+ if y1 is None:
+ y1 = dy / 2
+ if dz is not None:
+ if z0 is None:
+ z0 = -dz / 2
+ if z1 is None:
+ z1 = dz / 2
+ fs = []
+ if x0 is not None:
+ fs.append(plane(X, (x0, 0, 0)))
+ if x1 is not None:
+ fs.append(plane(-X, (x1, 0, 0)))
+ if y0 is not None:
+ fs.append(plane(Y, (0, y0, 0)))
+ if y1 is not None:
+ fs.append(plane(-Y, (0, y1, 0)))
+ if z0 is not None:
+ fs.append(plane(Z, (0, 0, z0)))
+ if z1 is not None:
+ fs.append(plane(-Z, (0, 0, z1)))
+ return intersection(*fs, **kwargs)
+
+
+@sdf3
+def box(size=1, center=ORIGIN, a=None, b=None):
+ if a is not None and b is not None:
+ a = np.array(a)
+ b = np.array(b)
+ size = b - a
+ center = a + size / 2
+ return box(size, center)
+ size = np.array(size)
+
+ def f(p):
+ q = np.abs(p - center) - size / 2
+ return _length(_max(q, 0)) + _min(np.amax(q, axis=1), 0)
+
+ return f
+
+
+@sdf3
+def rounded_box(size, radius):
+ size = np.array(size)
+
+ def f(p):
+ q = np.abs(p) - size / 2 + radius
+ return _length(_max(q, 0)) + _min(np.amax(q, axis=1), 0) - radius
+
+ return f
+
+
+@sdf3
+def wireframe_box(size, thickness):
+ size = np.array(size)
+
+ def g(a, b, c):
+ return _length(_max(_vec(a, b, c), 0)) + _min(_max(a, _max(b, c)), 0)
+
+ def f(p):
+ p = np.abs(p) - size / 2 - thickness / 2
+ q = np.abs(p + thickness / 2) - thickness / 2
+ px, py, pz = p[:, 0], p[:, 1], p[:, 2]
+ qx, qy, qz = q[:, 0], q[:, 1], q[:, 2]
+ return _min(_min(g(px, qy, qz), g(qx, py, qz)), g(qx, qy, pz))
+
+ return f
+
+
+@sdf3
+def torus(r1, r2):
+ def f(p):
+ xy = p[:, [0, 1]]
+ z = p[:, 2]
+ a = _length(xy) - r1
+ b = _length(_vec(a, z)) - r2
+ return b
+
+ return f
+
+
+@sdf3
+def capsule(a, b, radius=None, diameter=None):
+ if (radius is not None) == (diameter is not None):
+ raise ValueError(f"Specify either radius or diameter")
+ if radius is None:
+ radius = diameter / 2
+ a = np.array(a)
+ b = np.array(b)
+ ba = b - a
+ babadot = np.dot(ba, ba)
+
+ r = radius if hasattr(radius, "__call__") else lambda h: radius
+
+ def f(p):
+ pa = p - a
+ h = np.clip(np.dot(pa, ba) / babadot, 0, 1).reshape((-1, 1))
+ return _length(pa - np.multiply(ba, h)) - r(h.reshape(-1))
+
+ return f
+
+
+def pieslice(angle, centered=False):
+ """
+ Make a pie slice starting at X axis, rotated ``angle`` in mathematically
+ positive direction. Infinite in Z direction.
+
+ Args:
+ angle: the angle to use
+ centered: center the slice at X axis
+ """
+ angle = angle % units("360°")
+ if angle <= units("180°"):
+ s = plane(Y) & plane(-Y).rotate(angle)
+ else:
+ s = plane(Y) | plane(-Y).rotate(angle)
+ if centered:
+ s = s.rotate(-angle / 2)
+ return s
+
+
+@sdf3
+def cylinder(radius=0, r=0, diameter=0, d=0):
+ if bool(radius := max(radius, r)) == bool(diameter := max(diameter, d)):
+ raise ValueError(f"Specify either radius or diameter")
+ if not radius:
+ radius = diameter / 2
+
+ def f(p):
+ return _length(p[:, [0, 1]]) - radius
+
+ return f
+
+
+@sdf3
+def capped_cylinder(a, b, radius=None, diameter=None):
+ if (radius is not None) == (diameter is not None):
+ raise ValueError(f"Specify either radius or diameter")
+ if radius is None:
+ radius = diameter / 2
+
+ a = np.array(a)
+ b = np.array(b)
+
+ def f(p):
+ ba = b - a
+ pa = p - a
+ baba = np.dot(ba, ba)
+ paba = np.dot(pa, ba).reshape((-1, 1))
+ x = _length(pa * baba - ba * paba) - radius * baba
+ y = np.abs(paba - baba * 0.5) - baba * 0.5
+ x = x.reshape((-1, 1))
+ y = y.reshape((-1, 1))
+ x2 = x * x
+ y2 = y * y * baba
+ d = np.where(
+ _max(x, y) < 0,
+ -_min(x2, y2),
+ np.where(x > 0, x2, 0) + np.where(y > 0, y2, 0),
+ )
+ return np.sign(d) * np.sqrt(np.abs(d)) / baba
+
+ return f
+
+
+@sdf3
+def rounded_cylinder(ra, rb, h):
+ def f(p):
+ d = _vec(_length(p[:, [0, 1]]) - ra + rb, np.abs(p[:, 2]) - h / 2 + rb)
+ return _min(_max(d[:, 0], d[:, 1]), 0) + _length(_max(d, 0)) - rb
+
+ return f
+
+
+@sdf3
+def capped_cone(a, b, ra, rb):
+ a = np.array(a)
+ b = np.array(b)
+
+ def f(p):
+ rba = rb - ra
+ baba = np.dot(b - a, b - a)
+ papa = _dot(p - a, p - a)
+ paba = np.dot(p - a, b - a) / baba
+ x = np.sqrt(papa - paba * paba * baba)
+ cax = _max(0, x - np.where(paba < 0.5, ra, rb))
+ cay = np.abs(paba - 0.5) - 0.5
+ k = rba * rba + baba
+ f = np.clip((rba * (x - ra) + paba * baba) / k, 0, 1)
+ cbx = x - ra - f * rba
+ cby = paba - f
+ s = np.where(np.logical_and(cbx < 0, cay < 0), -1, 1)
+ return s * np.sqrt(
+ _min(cax * cax + cay * cay * baba, cbx * cbx + cby * cby * baba)
+ )
+
+ return f
+
+
+@sdf3
+def rounded_cone(r1, r2, h):
+ def f(p):
+ q = _vec(_length(p[:, [0, 1]]), p[:, 2])
+ b = (r1 - r2) / h
+ a = np.sqrt(1 - b * b)
+ k = np.dot(q, _vec(-b, a))
+ c1 = _length(q) - r1
+ c2 = _length(q - _vec(0, h)) - r2
+ c3 = np.dot(q, _vec(a, b)) - r1
+ return np.where(k < 0, c1, np.where(k > a * h, c2, c3))
+
+ return f
+
+
+@sdf3
+def ellipsoid(size):
+ size = np.array(size)
+
+ def f(p):
+ k0 = _length(p / size)
+ k1 = _length(p / (size * size))
+ return k0 * (k0 - 1) / k1
+
+ return f
+
+
+@sdf3
+def pyramid(h):
+ def f(p):
+ a = np.abs(p[:, [0, 1]]) - 0.5
+ w = a[:, 1] > a[:, 0]
+ a[w] = a[:, [1, 0]][w]
+ px = a[:, 0]
+ py = p[:, 2]
+ pz = a[:, 1]
+ m2 = h * h + 0.25
+ qx = pz
+ qy = h * py - 0.5 * px
+ qz = h * px + 0.5 * py
+ s = _max(-qx, 0)
+ t = np.clip((qy - 0.5 * pz) / (m2 + 0.25), 0, 1)
+ a = m2 * (qx + s) ** 2 + qy * qy
+ b = m2 * (qx + 0.5 * t) ** 2 + (qy - m2 * t) ** 2
+ d2 = np.where(_min(qy, -qx * m2 - qy * 0.5) > 0, 0, _min(a, b))
+ return np.sqrt((d2 + qz * qz) / m2) * np.sign(_max(qz, -py))
+
+ return f
+
+
+# Platonic Solids
+
+
+@sdf3
+def tetrahedron(r):
+ def f(p):
+ x = p[:, 0]
+ y = p[:, 1]
+ z = p[:, 2]
+ return (_max(np.abs(x + y) - z, np.abs(x - y) + z) - r) / np.sqrt(3)
+
+ return f
+
+
+@sdf3
+def octahedron(r):
+ def f(p):
+ return (np.sum(np.abs(p), axis=1) - r) * np.tan(np.radians(30))
+
+ return f
+
+
+@sdf3
+def dodecahedron(r):
+ x, y, z = _normalize(((1 + np.sqrt(5)) / 2, 1, 0))
+
+ def f(p):
+ p = np.abs(p / r)
+ a = np.dot(p, (x, y, z))
+ b = np.dot(p, (z, x, y))
+ c = np.dot(p, (y, z, x))
+ q = (_max(_max(a, b), c) - x) * r
+ return q
+
+ return f
+
+
+@sdf3
+def icosahedron(r):
+ r *= 0.8506507174597755
+ x, y, z = _normalize(((np.sqrt(5) + 3) / 2, 1, 0))
+ w = np.sqrt(3) / 3
+
+ def f(p):
+ p = np.abs(p / r)
+ a = np.dot(p, (x, y, z))
+ b = np.dot(p, (z, x, y))
+ c = np.dot(p, (y, z, x))
+ d = np.dot(p, (w, w, w)) - x
+ return _max(_max(_max(a, b), c) - x, d) * r
+
+ return f
+
+
+# Shapes
+
+
+def lerp(x1, x2, t):
+ return (1 - t) * x1 + t * x2
+
+
+def bezier_via_lerp(p1, p2, p3, p4, t):
+ t = np.array(t).reshape(-1, 1)
+ p12 = lerp(p1, p2, t)
+ p23 = lerp(p2, p3, t)
+ p34 = lerp(p3, p4, t)
+ p1223 = lerp(p12, p23, t)
+ p2334 = lerp(p23, p34, t)
+ return lerp(p1223, p2334, t)
+
+
+@sdf3
+def bezier(
+ p1=ORIGIN,
+ p2=10 * X,
+ p3=10 * Y,
+ p4=10 * Z,
+ radius=None,
+ diameter=None,
+ steps=20,
+ **kwargs,
+):
+ """
+ Generate a single bezier curve :any:`capsule_chain` from four
+ control points with a fixed or variable thickness
+
+ Args:
+ p1, p2, p3, p4 (point vectors): the control points. Segment will start at p1 and end at p4.
+ radius,diameter (float or callable): either a fixed number as radius/diameter or a
+ callable taking a number within [0;1] and returning a radius/diameter, e.g.
+ the easing function :any:`ease.linear` or
+ ``radius=ease.linear.between(10,2)`` for a linear transition
+ between radii/diameters.
+ **kwargs: handed to :any:`capsule_chain`
+ """
+ if (radius is not None) == (diameter is not None):
+ raise ValueError(f"Specify either radius or diameter")
+ if radius is None:
+ radius = diameter / 2
+ if isinstance(radius, (float, int)):
+ radius = ease.constant(radius)
+ points = bezier_via_lerp(p1, p2, p3, p4, (t := np.linspace(0, 1, steps)))
+ lengths = np.linalg.norm(np.diff(points, axis=0), axis=1)
+
+ # TODO: better steps taking curvature and changing radius into account
+ t_eq = np.interp(
+ np.arange(0, lengths.sum() + radius.mean / 4, radius.mean / 4),
+ # np.linspace(0, lengths.sum(), steps),
+ np.hstack([0, np.cumsum(lengths)]),
+ t,
+ )
+ points = bezier_via_lerp(p1, p2, p3, p4, t_eq)
+ return capsule_chain(points, radius=radius, **kwargs)
+
+
+def capsule_chain(points, radius=None, diameter=None, **kwargs):
+ if (radius is not None) == (diameter is not None):
+ raise ValueError(f"Specify either radius or diameter")
+ if radius is None:
+ radius = diameter / 2
+ lengths = np.linalg.norm(np.diff(points, axis=0), axis=1)
+ cumlengths = np.hstack([0, np.cumsum(lengths)])
+ relcumlengths = cumlengths / lengths.sum()
+ return union(
+ *[
+ capsule(
+ p1,
+ p2,
+ radius=(radius[a:b] if isinstance(radius, ease.Easing) else radius),
+ )
+ for p1, p2, a, b in zip(
+ points[0:-1], points[1:], relcumlengths[0:-1], relcumlengths[1:]
+ )
+ ],
+ **kwargs,
+ )
+
+
+def Thread(
+ pitch=5,
+ diameter=20,
+ offset=1,
+ left=False,
+):
+ """
+ An infinite thread
+ """
+ angleperz = -(2 * np.pi) / pitch
+ if left:
+ angleperz *= -1
+ thread = (
+ cylinder(swipediameter := diameter / 2 - offset)
+ .translate(X * offset)
+ .twist(angleperz)
+ )
+ thread.diameter = diameter
+ thread.pitch = pitch
+ thread.offset = offset
+ thread.left = left
+ return thread
+
+
+def Screw(
+ length=40,
+ head_shape=None,
+ head_height=10,
+ k_tip=10,
+ k_head=0,
+ **threadkwargs,
+):
+ if head_shape is None:
+ head_shape = d2.hexagon(30)
+ if not (thread := threadkwargs.pop("thread", None)):
+ thread = Thread(**threadkwargs)
+ k_tip = np.clip(k_tip, 0, min(thread.diameter, length)) or None
+ k_head = np.clip(k_head, 0, thread.diameter) or None
+ head = head_shape.extrude(head_height).translate(-Z * head_height / 2)
+ return head | (thread & slab(z0=0) & slab(z1=length).k(k_tip)).k(k_head)
+
+
+def RegularPolygonColumn(n, r=1):
+ ri = r * np.cos(np.pi / n)
+ return intersection(
+ *[slab(y0=-ri).rotate(a, Z) for a in np.arange(0, 2 * np.pi, 2 * np.pi / n)]
+ )
+
+
+# Positioning
+
+
+@op3
+def translate(other, offset):
+ def f(p):
+ return other(p - offset)
+
+ return f
+
+
+@op3
+def scale(other, factor):
+ try:
+ x, y, z = factor
+ except TypeError:
+ x = y = z = factor
+ s = (x, y, z)
+ m = min(x, min(y, z))
+
+ def f(p):
+ return other(p / s) * m
+
+ return f
+
+
+def rotation_matrix(angle, axis=Z):
+ """
+ Euler-Rodriguez Formula for arbitrary-axis rotation:
+
+ https://en.m.wikipedia.org/wiki/Rodrigues%27_rotation_formula
+ """
+ try:
+ angle = angle.to("radians").m
+ except AttributeError:
+ pass
+ x, y, z = _normalize(axis)
+ s = np.sin(angle)
+ c = np.cos(angle)
+ m = 1 - c
+ # code moved from mfogleman's rotate() below
+ return np.array(
+ [
+ [m * x * x + c, m * x * y + z * s, m * z * x - y * s],
+ [m * x * y - z * s, m * y * y + c, m * y * z + x * s],
+ [m * z * x + y * s, m * y * z - x * s, m * z * z + c],
+ ]
+ ).T
+ # Alternative matrix construction (slightly slower than the above):
+ # kx, ky, kz = _normalize(axis)
+ # K = np.array([[0, -kz, ky], [kz, 0, -kx], [-ky, kx, 0]])
+ # return np.identity(3) + np.sin(angle) * K + (1 - np.cos(angle)) * (K @ K)
+ # Using np.linalg.matrix_power(K,2) is not faster
+
+
+@op3
+def rotate(other, angle, vector=Z):
+ matrix = rotation_matrix(axis=vector, angle=angle)
+
+ def f(p):
+ # np.dot(p, matrix) actually rotates *backwards*,
+ # (matrix @ p) or np.dot(matrix,p) would rotate
+ # forwards but that doesn't work with the shapes here.
+ # In this case, rotating backwards is actually what we want:
+ # we want to inverse the rotation to look up the SDF there
+ return other(np.dot(p, matrix))
+
+ return f
+
+
+@op3
+def rotate_to(other, a, b):
+ a = _normalize(np.array(a))
+ b = _normalize(np.array(b))
+ dot = np.dot(b, a)
+ if dot == 1:
+ return other
+ if dot == -1:
+ return rotate(other, np.pi, _perpendicular(a))
+ angle = np.arccos(dot)
+ v = _normalize(np.cross(b, a))
+ return rotate(other, angle, v)
+
+
+@op3
+def orient(other, axis):
+ # quick fix that probably is a problem in rotate_to()
+ axis = axis * np.array([-1, -1, 1])
+ return rotate_to(other, UP, axis)
+
+
+@op3
+def circular_array(other, count, offset=0):
+ other = other.translate(X * offset)
+ da = 2 * np.pi / count
+
+ def f(p):
+ x = p[:, 0]
+ y = p[:, 1]
+ z = p[:, 2]
+ d = np.hypot(x, y)
+ a = np.arctan2(y, x) % da
+ d1 = other(_vec(np.cos(a - da) * d, np.sin(a - da) * d, z))
+ d2 = other(_vec(np.cos(a) * d, np.sin(a) * d, z))
+ return _min(d1, d2)
+
+ return f
+
+
+# Alterations
+
+
+@op3
+def rotate_stretch(
+ sdf,
+ start=X,
+ stop=None,
+ axis=None,
+ angle=None,
+ stretch=ease.linear,
+ shear=None,
+ fling=None,
+ thicken=None,
+ transition=None,
+ transition_ease=ease.linear,
+ symmetric=False,
+ verbose=False,
+):
+ """
+ Grab the object at point ``a``, rotate it around and ``axis`` and stretch
+ it to point ``b``, optionally modulating the thickness, shearing in the
+ rotation axis or flinging in the direction of 'centrifual force' along the
+ way.
+
+ Args:
+ start (point vector): the starting control point. Defaults to ``X``.
+ stop (point vector): the end control point. Must not be collinear to ``a``.
+ axis (vector): the axis. auto-determined from a and b if not given.
+ Specify this if a and b are collinear (e.g. opposite) or if you
+ don't like the direction of rotation of the default. Defaults to Z.
+ angle (float or unit): how far to rotate. Defaults to a full rotation
+ (``2*np.pi`` or ``units("360°")``) if ``stop`` is not given. More
+ than a full rotation is not possible.
+ symmetric (bool, optional): perform the stretch symmetrically in both
+ directions.
+ stretch (Easing): easing to apply in the stretching direction. Using an
+ easing that doesn't start at (0,0) and end at (1,1) does not make much
+ sense here.
+ shear (Easing): easing to apply for shearing in the rotation axis direction
+ fling (Easing): easing to apply for moving in the outwards direction
+ thicken (Easing): easing to apply to modify the thickness while stretching
+ transition (SDF): transition to another SDF along the way
+ transition_ease (Easing): easing for transition
+
+ Examples
+ --------
+
+ .. code-block:: python
+
+ from sdf import *
+
+ # make a quarter capsule sweep
+ # (”grab X, stretch it around origin to Y”)
+ capsule(ORIGIN,30*X,10).rotate_stretch(X,Y).save()
+
+ # make one edge of a box round
+ # (”grab X, stretch it around origin to Y”)
+ box(10).rotate_stretch(X,Y).save()
+
+ # full revolve of a capsule
+ # (”grab X, rotate it around Z axis to -X, symmetrically”)
+ capsule(ORIGIN, 30*X,10).rotate_stretch(X,-X,Z,symmetric=True).save()
+
+ # make a saddle-like thing by using an easing have the stretch look 'stretchy'
+ capsule(ORIGIN,30*X,10).rotate_stretch(X,-X+Y,symmetric=True,e=ease.out_sine).save()
+
+ """
+ start = _normalize(start)
+ if stop is None:
+ axis = Z if axis is None else axis
+ angle = angle or 2 * np.pi
+ else: # so: stop is not None
+ if axis is not None or angle is not None:
+ raise errors.SDFCADError(
+ f"rotate_stretch(): Too many constraints. Check arguments."
+ )
+ stop = _normalize(stop)
+ if axis is None:
+ axis = np.cross(start, stop) # rotation axis
+ if np.allclose(L := np.linalg.norm(axis), 0):
+ raise errors.SDFCADError(
+ f"rotate_stretch(): "
+ f"Your stretch points {start = } and {stop = } are collinear "
+ f"(cross product length is {L}). "
+ f"Either fix this or specify a rotation axis= manually. "
+ )
+ else:
+ axis = axis / L
+
+ # matrix projecting a 3D p onto 2D rotation plane coordinates
+ basemat = np.array([start, _normalize(np.cross(axis, start))])
+
+ def angle_on_plane(p):
+ p_on_plane = (basemat @ p.T).T
+ if verbose:
+ # rich.print(f"angle_on_plane(): {p = }")
+ rich.print(f"angle_on_plane(): {p_on_plane = }")
+ angle = np.arctan2(p_on_plane[:, 1], p_on_plane[:, 0])
+ if not symmetric:
+ angle += 2 * np.pi
+ angle %= 2 * np.pi
+ return angle
+
+ if angle is None and stop is not None:
+ angle = angle_on_plane(np.array([stop]))[0]
+
+ if hasattr(angle, "to"):
+ angle = angle.to("radians").m
+
+ if verbose:
+ rich.print(f"{start,stop = }")
+ rich.print(f"{angle = }, {np.rad2deg(angle) = }")
+ rich.print(f"{axis = }")
+ rich.print(f"{basemat = }")
+
+ if symmetric: # mirror
+ # stretch = stretch.mirror(x=0, copy=True) # not this one 🤷
+ shear = shear.mirror(x=0, copy=True)
+ fling = fling.mirror(x=0, copy=True)
+ thicken = thicken.mirror(x=0, copy=True)
+
+ def f(p):
+ if verbose:
+ rich.print(f"{p = }")
+ p_on_plane = (basemat @ p.T).T
+ if verbose:
+ rich.print(f"{p_on_plane = }")
+ # project p onto rotation plane
+ p_angle_on_plane = angle_on_plane(p)
+ if verbose:
+ rich.print(f"{p_angle_on_plane = }")
+ p_angle_fraction = np.clip(p_angle_on_plane / angle, -1 if symmetric else 0, 1)
+ if verbose:
+ rich.print(f"{p_angle_fraction = }")
+ back_angle = stretch(p_angle_fraction) * angle
+ verbose and rich.print(f"{back_angle = }")
+ # the coordinates at which we'll look for the SDF
+ plook = p # start with original points
+ # ↔️ FLINGING outwards
+ # get 3D vector of p projected onto plane
+ if fling:
+ outwards = p_on_plane @ basemat
+ verbose and rich.print(f"{outwards = }")
+ verbose and rich.print(f"{np.linalg.norm(outwards, axis=1) = }")
+ outwards = outwards / np.linalg.norm(outwards, axis=1)[:, np.newaxis]
+ if verbose:
+ rich.print(f"{p_on_plane @ basemat = }")
+ rich.print(f"{outwards = }")
+ rich.print(f"{fling(p_angle_fraction) = }")
+ flinging = (fling(p_angle_fraction) * outwards.T).T
+ verbose and rich.print(f"{flinging = }")
+ plook = plook - flinging
+ # ↕️ SHEARING in rotation axis direction
+ if shear:
+ plook = plook - (shear(p_angle_fraction) * np.expand_dims(axis, axis=1)).T
+ verbose and rich.print(f"after shear: {plook = }")
+ # 🔄 ROTATION around rotation axis
+ # create many rotation matrices (along 3rd dim) for all angles
+ rotmatrix = rotation_matrix(axis=axis, angle=-back_angle)
+ # (this is a slow Python loop, I wonder how to optmize this 🤔)
+ plook = np.array([m @ p for p, m in zip(plook, rotmatrix)])
+ if verbose:
+ rich.print(f"after rotation: {plook = }")
+ rich.print(f"{thicken(p_angle_fraction).T = }")
+ rich.print(f"{plook = }")
+ rich.print(f"{sdf(plook) = }")
+ # get SDF at (reverse) flinged -> sheared -> rotated position
+ distance = sdf(plook)
+ verbose and rich.print(f"{distance = }")
+ # ➕ THICKENING
+ if thicken:
+ thickening = np.expand_dims(thicken(p_angle_fraction), axis=1)
+ verbose and rich.print(f"{thickening = }")
+ distance = distance - thickening
+ if transition is not None:
+ t = transition_ease(p_angle_fraction)[:, None]
+ verbose and rich.print(f"{t = }")
+ verbose and rich.print(f"{transition(plook) = }")
+ distance = t * transition(plook) + (1 - t) * distance
+ verbose and rich.print(f"{distance = }")
+ return distance
+
+ return f
+
+
+@op3
+def elongate(other, size):
+ def f(p):
+ q = np.abs(p) - size
+ x = q[:, 0].reshape((-1, 1))
+ y = q[:, 1].reshape((-1, 1))
+ z = q[:, 2].reshape((-1, 1))
+ w = _min(_max(x, _max(y, z)), 0)
+ return other(_max(q, 0)) + w
+
+ return f
+
+
+@op3
+def twist(other, k):
+ def f(p):
+ x = p[:, 0]
+ y = p[:, 1]
+ z = p[:, 2]
+ c = np.cos(k * z)
+ s = np.sin(k * z)
+ x2 = c * x - s * y
+ y2 = s * x + c * y
+ z2 = z
+ return other(_vec(x2, y2, z2))
+
+ return f
+
+
+@op3
+def twist_between(sdf, a, b, e=2 * np.pi * ease.in_out_cubic):
+ """
+ Twist an object between two control points
+
+ Args:
+ a, b (vectors): the two control points
+ e (scalar function): the angle to rotate, will be called with
+ values between 0 (at control point ``a``) and 1 (at control point
+ ``b``). Its result will be used as rotation angle in radians.
+ """
+
+ # unit vector from control point a to b
+ ab = (ab := b - a) / (L := np.linalg.norm(ab))
+
+ def f(p):
+ # project current point onto control direction, clip and apply easing
+ angle = e(np.clip((p - a) @ ab / L, 0, 1))
+ # move to origin ”-a”, then rotate, then move back ”+a”
+ # create many rotation matrices (along 3rd dim) for all angles
+ matrix = rotation_matrix(axis=ab, angle=-angle)
+ # apply rotation matrix to points moved back to origin
+ # (this is a slow Python loop, I wonder how to optmize this 🤔)
+ rotated = np.array([m @ p for p, m in zip((p - a), matrix)])
+ # move rotated points back to where they were
+ return sdf(rotated + a)
+
+ return f
+
+
+@op3
+def bend(other, k):
+ def f(p):
+ x = p[:, 0]
+ y = p[:, 1]
+ z = p[:, 2]
+ c = np.cos(k * x)
+ s = np.sin(k * x)
+ x2 = c * x - s * y
+ y2 = s * x + c * y
+ z2 = z
+ return other(_vec(x2, y2, z2))
+
+ return f
+
+
+@op3
+def bend_linear(other, p0, p1, v, e=ease.linear):
+ p0 = np.array(p0)
+ p1 = np.array(p1)
+ v = -np.array(v)
+ ab = p1 - p0
+
+ def f(p):
+ t = np.clip(np.dot(p - p0, ab) / np.dot(ab, ab), 0, 1)
+ t = e(t).reshape((-1, 1))
+ return other(p + t * v)
+
+ return f
+
+
+@op3
+def bend_radial(other, r0, r1, dz, e=ease.linear):
+ def f(p):
+ x = p[:, 0]
+ y = p[:, 1]
+ z = p[:, 2]
+ r = np.hypot(x, y)
+ t = np.clip((r - r0) / (r1 - r0), 0, 1)
+ z = z - dz * e(t)
+ return other(_vec(x, y, z))
+
+ return f
+
+
+@op3
+def transition_linear(f0, f1, p0=-Z, p1=Z, e=ease.linear):
+ p0 = np.array(p0)
+ p1 = np.array(p1)
+ ab = p1 - p0
+
+ def f(p):
+ d1 = f0(p)
+ d2 = f1(p)
+ t = np.clip(np.dot(p - p0, ab) / np.dot(ab, ab), 0, 1)
+ t = e(t).reshape((-1, 1))
+ return t * d2 + (1 - t) * d1
+
+ return f
+
+
+@op3
+def transition_radial(f0, f1, r0=0, r1=1, e=ease.linear):
+ def f(p):
+ d1 = f0(p)
+ d2 = f1(p)
+ r = np.hypot(p[:, 0], p[:, 1])
+ t = np.clip((r - r0) / (r1 - r0), 0, 1)
+ t = e(t).reshape((-1, 1))
+ return t * d2 + (1 - t) * d1
+
+ return f
+
+
+@op3
+def wrap_around(other, x0, x1, r=None, e=ease.linear):
+ p0 = X * x0
+ p1 = X * x1
+ v = -Y
+ if r is None:
+ r = np.linalg.norm(p1 - p0) / (2 * np.pi)
+
+ def f(p):
+ x = p[:, 0]
+ y = p[:, 1]
+ z = p[:, 2]
+ d = np.hypot(x, y) - r
+ d = d.reshape((-1, 1))
+ a = np.arctan2(y, x)
+ t = (a + np.pi) / (2 * np.pi)
+ t = e(t).reshape((-1, 1))
+ q = p0 + (p1 - p0) * t + v * d
+ q[:, 2] = z
+ return other(q)
+
+ return f
+
+
+# 3D => 2D Operations
+
+
+@op32
+def slice(other):
+ # TODO: support specifying a slice plane
+ # TODO: probably a better way to do this
+ s = slab(z0=-1e-9, z1=1e-9)
+ a = other & s
+ b = other.negate() & s
+
+ def f(p):
+ p = _vec(p[:, 0], p[:, 1], np.zeros(len(p)))
+ A = a(p).reshape(-1)
+ B = -b(p).reshape(-1)
+ w = A <= 0
+ A[w] = B[w]
+ return A
+
+ return f
+
+
+# Common
+
+union = op3(dn.union)
+union_legacy = op3(dn.union_legacy)
+difference = op3(dn.difference)
+difference_legacy = op3(dn.difference_legacy)
+intersection = op3(dn.intersection)
+intersection_legacy = op3(dn.intersection_legacy)
+blend = op3(dn.blend)
+negate = op3(dn.negate)
+dilate = op3(dn.dilate)
+erode = op3(dn.erode)
+shell = op3(dn.shell)
+repeat = op3(dn.repeat)
+mirror = op3(dn.mirror)
+modulate_between = op3(dn.modulate_between)
+stretch = op3(dn.stretch)
+shear = op3(dn.shear)
diff --git a/sdf/dn.py b/sdf/dn.py
new file mode 100644
index 0000000..fef7b39
--- /dev/null
+++ b/sdf/dn.py
@@ -0,0 +1,366 @@
+import itertools
+from functools import reduce, partial
+import warnings
+
+from . import ease
+
+import numpy as np
+
+_min = np.minimum
+_max = np.maximum
+
+
+def distance_to_plane(p, origin, normal):
+ """
+ Calculate the distance of a point ``p`` to the plane around ``origin`` with
+ normal ``normal``. This is dimension-independent, so e.g. the z-coordinate
+ can be omitted.
+
+ Args:
+ p (array): either [x,y,z] or [[x,y,z],[x,y,z],...]
+ origin (vector): a point on the plane
+ normal (vector): normal vector of the plane
+
+ Returns:
+ int: distance to plane
+ """
+ normal = normal / np.linalg.norm(normal)
+ return abs((p - origin) @ normal)
+
+
+def minimum(a, b, r=0):
+ if r:
+ Δ = b - a
+ h = np.clip(0.5 + 0.5 * Δ / r, 0, 1)
+ return b - Δ * h - r * h * (1 - h)
+ else:
+ return np.minimum(a, b)
+
+
+def maximum(a, b, r=0):
+ if r:
+ Δ = b - a
+ h = np.clip(0.5 - 0.5 * Δ / r, 0, 1)
+ return b - Δ * h + r * h * (1 - h)
+ else:
+ return np.maximum(a, b)
+
+
+def union(*sdfs, chamfer=0, c=0, radius=0, r=0, fillet=0, f=0):
+ c = max(chamfer, c)
+ r = max(radius, r, fillet, f)
+ sqrt05 = np.sqrt(0.5)
+
+ def f(p):
+ sdfs_ = iter(sdfs)
+ d1 = next(sdfs_)(p)
+ for sdf in sdfs_:
+ d2 = sdf(p)
+ R = r or getattr(sdf, "_r", 0)
+ C = c or getattr(sdf, "_c", 0)
+ parts = (d1, d2)
+ if C:
+ parts = (minimum(d1, d2), (d1 + d2 - C) * sqrt05)
+ d1 = minimum(*parts, R)
+
+ return d1
+
+ return f
+
+
+def intersection(*sdfs, chamfer=0, c=0, radius=0, r=0, fillet=0, f=0):
+ c = max(chamfer, c)
+ r = max(radius, r, fillet, f)
+ sqrt05 = np.sqrt(0.5)
+
+ def f(p):
+ sdfs_ = iter(sdfs)
+ d1 = next(sdfs_)(p)
+ for sdf in sdfs_:
+ d2 = sdf(p)
+ R = r or getattr(sdf, "_r", 0)
+ C = c or getattr(sdf, "_c", 0)
+ parts = (d1, d2)
+ if C:
+ parts = (maximum(d1, d2), (d1 + d2 + C) * sqrt05)
+ d1 = maximum(*parts, R)
+
+ return d1
+
+ return f
+
+
+def difference(*sdfs, chamfer=0, c=0, radius=0, r=0, fillet=0, f=0):
+ c = max(chamfer, c)
+ r = max(radius, r, fillet, f)
+ sqrt05 = np.sqrt(0.5)
+
+ def f(p):
+ sdfs_ = iter(sdfs)
+ d1 = next(sdfs_)(p)
+ for sdf in sdfs_:
+ d2 = sdf(p)
+ R = r or getattr(sdf, "_r", 0)
+ C = c or getattr(sdf, "_c", 0)
+ parts = (d1, -d2)
+ if C:
+ parts = (maximum(d1, -d2), (d1 - d2 + C) * sqrt05)
+ d1 = maximum(*parts, R)
+
+ return d1
+
+ return f
+
+
+def union_legacy(a, *bs, r=None):
+ def f(p):
+ d1 = a(p)
+ for b in bs:
+ d2 = b(p)
+ K = k or getattr(b, "_r", None)
+ if K is None:
+ d1 = _min(d1, d2)
+ else:
+ h = np.clip(0.5 + 0.5 * (d2 - d1) / K, 0, 1)
+ m = d2 + (d1 - d2) * h
+ d1 = m - K * h * (1 - h)
+ return d1
+
+ return f
+
+
+def difference_legacy(a, *bs, r=None):
+ def f(p):
+ d1 = a(p)
+ for b in bs:
+ d2 = b(p)
+ K = k or getattr(b, "_r", None)
+ if K is None:
+ d1 = _max(d1, -d2)
+ else:
+ h = np.clip(0.5 - 0.5 * (d2 + d1) / K, 0, 1)
+ m = d1 + (-d2 - d1) * h
+ d1 = m + K * h * (1 - h)
+ return d1
+
+ return f
+
+
+def intersection_legacy(a, *bs, r=None):
+ def f(p):
+ d1 = a(p)
+ for b in bs:
+ d2 = b(p)
+ K = k or getattr(b, "_r", None)
+ if K is None:
+ d1 = _max(d1, d2)
+ else:
+ h = np.clip(0.5 - 0.5 * (d2 - d1) / K, 0, 1)
+ m = d2 + (d1 - d2) * h
+ d1 = m + K * h * (1 - h)
+ return d1
+
+ return f
+
+
+def blend(a, *bs, r=0.5):
+ def f(p):
+ d1 = a(p)
+ for b in bs:
+ d2 = b(p)
+ K = k or getattr(b, "_r", None)
+ d1 = K * d2 + (1 - K) * d1
+ return d1
+
+ return f
+
+
+def negate(other):
+ def f(p):
+ return -other(p)
+
+ return f
+
+
+def dilate(other, r):
+ def f(p):
+ return other(p) - r
+
+ return f
+
+
+def erode(other, r):
+ def f(p):
+ return other(p) + r
+
+ return f
+
+
+def shell(other, thickness=1, type="center"):
+ """
+ Keep only a margin of a given thickness around the object's boundary.
+
+ Args:
+ thickness (float): the resulting thickness
+ type (str): what kind of shell to generate.
+
+ ``"center"`` (default)
+ shell is spaced symmetrically around boundary
+ ``"outer"``
+ the resulting shell will be ``thickness`` larger than before
+ ``"inner"``
+ the resulting shell will be as large as before
+ """
+ return dict(
+ center=lambda p: np.abs(other(p)) - thickness / 2,
+ inner=other - other.erode(thickness),
+ outer=other.dilate(thickness) - other,
+ )[type]
+
+
+def modulate_between(sdf, a, b, e=ease.in_out_cubic):
+ """
+ Apply a distance offset transition between two control points
+ (e.g. make a rod thicker or thinner at some point or add a bump)
+
+ Args:
+ a, b (vectors): the two control points
+ e (scalar function): the distance offset function, will be called with
+ values between 0 (at control point ``a``) and 1 (at control point
+ ``b``). Its result will be subtracted from the given SDF, thus
+ enlarging the object by that value.
+ """
+
+ # unit vector from control point a to b
+ ab = (ab := b - a) / (L := np.linalg.norm(ab))
+
+ def f(p):
+ # project current point onto control direction, clip and apply easing
+ offset = e(np.clip((p - a) @ ab / L, 0, 1))
+ return (dist := sdf(p)) - offset.reshape(dist.shape)
+
+ return f
+
+
+def stretch(sdf, a, b, symmetric=False, e=ease.linear):
+ """
+ Grab the object at point ``a`` and stretch the entire plane to ``b``.
+
+ Args:
+ a, b (point vectors): the control points
+ symmetric (bool): also stretch the same into the other direction.
+ e (Easing): easing to apply
+
+ Examples
+ ========
+
+ .. code-block:: python
+
+ # make a capsule
+ sphere(5).stretch(ORIGIN, 10*Z).save() # same as capsule(ORIGIN, 10*Z, 5)
+ # make an egg
+ sphere(5).stretch(ORIGIN, 10*Z, e=ease.smoothstep[:0.44]).save()
+ """
+ ab = (ab := b - a) / (L := np.linalg.norm(ab))
+
+ def f(p):
+ # s = ”how far are we between a and b as fraction?”
+ # if symmetric=True this also goes into the negative direction
+ s = np.clip((p - a) @ ab / L, -1 if symmetric else 0, 1)
+ # we return the sdf at a point 'behind' (p minus ...)
+ # the current point, but we go only as far back as the stretch distance
+ # at max
+ return sdf(p - (np.sign(s) * e(abs(s)) * L * ab[:, np.newaxis]).T)
+
+ return f
+
+
+def shear(sdf, fix, grab, move, e=ease.linear):
+ """
+ Grab the object at point ``grab`` and shear the entire plane in direction
+ ``move``, keeping point ``fix`` in place. If ``move`` is orthogonal to the
+ direction ``fix``->``grab``, then this operation is a shear.
+
+ Args:
+ fix, grab (point vectors): the control points
+ move (point vector): direction to shear to
+ e (Easing): easing to apply
+
+ Examples
+ ========
+
+ .. code-block:: python
+
+ # make a capsule
+ box([20,10,50]).shear(fix=-15*Z, grab=15*Z, move=-5*X, e=ease.smoothstep)
+ """
+ ab = (ab := grab - fix) / (L := np.linalg.norm(ab))
+
+ def f(p):
+ # s = ”how far are we between a and b as fraction?”
+ s = (p - fix) @ ab / L
+ return sdf(p - move * np.expand_dims(e(np.clip(s, 0, 1)), axis=1))
+
+ return f
+
+
+def mirror(other, direction, at=0):
+ """
+ Mirror around a given plane defined by ``origin`` reference point and
+ ``direction``.
+
+ Args:
+ direction (vector): direction to mirror to (e.g. :any:`X` to mirror along X axis)
+ at (3D vector): point to mirror at. Default is the origin.
+ """
+ direction = direction / np.linalg.norm(direction)
+
+ def f(p):
+ projdir = np.expand_dims((p - at) @ direction, axis=1) * direction
+ # mirrored point:
+ # - project 'p' onto 'direction' (result goes into 'projdir' direction)
+ # - projected point is at 'at + projdir'
+ # - remember direction from projected point to the original point (p - (at + projdir))
+ # - from origin 'at' go backwards the projected direction (at - projdir)
+ # - from that target, move along the remembered direction (p - (at + projdir))
+ # - pmirr = at - projdir + (p - (at + projdir))
+ # - the 'at' cancels out, the projdir is subtracted twice from the point
+ return other(p - 2 * projdir)
+
+ return f
+
+
+def repeat(other, spacing, count=None, padding=0):
+ count = np.array(count) if count is not None else None
+ spacing = np.array(spacing)
+
+ def neighbors(dim, padding, spacing):
+ try:
+ padding = [padding[i] for i in range(dim)]
+ except Exception:
+ padding = [padding] * dim
+ try:
+ spacing = [spacing[i] for i in range(dim)]
+ except Exception:
+ spacing = [spacing] * dim
+ for i, s in enumerate(spacing):
+ if s == 0:
+ padding[i] = 0
+ axes = [list(range(-p, p + 1)) for p in padding]
+ return list(itertools.product(*axes))
+
+ def f(p):
+ q = np.divide(p, spacing, out=np.zeros_like(p), where=spacing != 0)
+ if count is None:
+ index = np.round(q)
+ else:
+ index = np.clip(np.round(q), -count, count)
+
+ indexes = [index + n for n in neighbors(p.shape[-1], padding, spacing)]
+ A = [other(p - spacing * i) for i in indexes]
+ a = A[0]
+ for b in A[1:]:
+ a = _min(a, b)
+ return a
+
+ return f
diff --git a/sdf/ease.py b/sdf/ease.py
new file mode 100644
index 0000000..743562a
--- /dev/null
+++ b/sdf/ease.py
@@ -0,0 +1,637 @@
+# system modules
+from dataclasses import dataclass
+from typing import Callable
+import itertools
+import functools
+import warnings
+
+# external modules
+import numpy as np
+import scipy.optimize
+
+
+@dataclass
+@functools.total_ordering
+class Extremum:
+ """
+ Container for min and max in Easing
+ """
+
+ pos: float
+ value: float
+
+ def __eq__(self, other):
+ return self.value == other.value
+
+ def __lt__(self, other):
+ return self.value < other.value
+
+
+@dataclass
+@functools.total_ordering
+class Easing:
+ """
+ A function defined on the interval [0;1]
+ """
+
+ f: Callable[float, float]
+ name: str
+
+ def modifier(decorated_fun):
+ @functools.wraps(decorated_fun)
+ def wrapper(self, *args, **kwargs):
+ newfun = decorated_fun(self, *args, **kwargs)
+ arglist = ",".join(
+ itertools.chain(map(str, args), (f"{k}={v}" for k, v in kwargs.items()))
+ )
+ newfun.__name__ = f"{self.f.__name__}.{decorated_fun.__name__}({arglist})"
+ return type(self)(f=newfun, name=newfun.__name__)
+
+ return wrapper
+
+ def __repr__(self):
+ return self.name
+
+ def __str__(self):
+ return self.name
+
+ @functools.cached_property
+ def is_ascending(self):
+ return np.all(np.diff(self.f(np.linspace(0, 1, 100))) >= 0)
+
+ @functools.cached_property
+ def is_symmetric(self):
+ t = np.linspace(0, 0.5, 100)
+ return np.allclose(self.f(t), self.f(1 - t))
+
+ @property
+ @modifier
+ def reverse(self):
+ """
+ Revert the function so it goes the other way round (starts at the end)
+ """
+ return lambda t: self.f(1 - t)
+
+ @property
+ @modifier
+ def symmetric(self):
+ """
+ Mirror and squash function to make it symmetric
+ """
+ return lambda t: self.f(-2 * (np.abs(t - 0.5) - 0.5))
+
+ @modifier
+ def mirror(self, x=None, y=None, copy=False):
+ """
+ Mirror function around an x and/or y value.
+
+ Args:
+ x (float): x value to mirror around
+ y (float): y value to mirror around
+ copy (bool): when mirroring around x, do copy-mirror
+ """
+ if (x, y) == (None, None):
+ x = 0.5
+
+ def mirrored(t):
+ if x is not None:
+ t = 2 * x - t
+ if copy:
+ t = np.abs(-t)
+ if y is None:
+ return self.f(t)
+ else:
+ return y - self.f(t)
+
+ return mirrored
+
+ @modifier
+ def clip(self, min=None, max=None):
+ """
+ Clip function at low and/or high values
+ """
+ if min is None and max is None:
+ min = 0
+ max = 1
+ return lambda t: np.clip(self.f(t), min, max)
+
+ @modifier
+ def clip_input(self, min=None, max=None):
+ """
+ Clip input parameter, i.e. extrapolate constantly outside the interval.
+ """
+ if min is None and max is None:
+ min = 0
+ max = 1
+ return lambda t: self.f(np.clip(t, min, max))
+
+ @property
+ @modifier
+ def clipped(self):
+ """
+ Clipped parameter and result to [0;1]
+ """
+ return lambda t: np.clip(self(np.clip(t, 0, 1)), 0, 1)
+
+ @modifier
+ def append(self, other, e=None):
+ """
+ Append another easing function and squish both into the [0;1] interval
+ """
+ if e is None:
+ e = in_out_square
+
+ def f(t):
+ mix = e(t)
+ return self.f(t * 2) * (1 - mix) + other((t - 0.5) * 2) * mix
+
+ return f
+
+ @modifier
+ def prepend(self, other, e=None):
+ """
+ Prepend another easing function and squish both into the [0;1] interval
+ """
+ if e is None:
+ e = in_out_square
+
+ def f(t):
+ mix = e(t)
+ return other(t * 2) * (1 - mix) + self.f((t - 0.5) * 2) * mix
+
+ return f
+
+ @modifier
+ def shift(self, offset):
+ """
+ Shift function on x-axis into positive direction by ``offset``.
+ """
+ return lambda t: self.f(t - offset)
+
+ @modifier
+ def repeat(self, n=2):
+ """
+ Repeat the function a total of n times in the interval [0;1].
+ """
+ return lambda t: self.f(t % (1 / n) * n)
+
+ @modifier
+ def multiply(self, factor):
+ """
+ Scale function by ``factor``
+ """
+ if isinstance(factor, Easing):
+ return lambda t: self(t) * factor(t)
+ else:
+ return lambda t: factor * self.f(t)
+
+ @modifier
+ def add(self, offset):
+ """
+ Add ``offset`` to function
+ """
+ if isinstance(offset, Easing):
+ return lambda t: self(t) + offset(t)
+ else:
+ return lambda t: self.f(t) + offset
+
+ def __add__(self, offset):
+ return self.add(offset)
+
+ def __radd__(self, offset):
+ return self.add(offset)
+
+ def __sub__(self, offset):
+ return self.add(-offset)
+
+ def __rsub__(self, offset):
+ return self.add(-offset)
+
+ def __mul__(self, factor):
+ return self.multiply(factor)
+
+ def __rmul__(self, factor):
+ return self.multiply(factor)
+
+ def __neg__(self):
+ return self.multiply(-1)
+
+ def __truediv__(self, factor):
+ return self.multiply(1 / factor)
+
+ def __or__(self, other):
+ return self.transition(other)
+
+ def __rshift__(self, offset):
+ return self.shift(offset)
+
+ def __lshift__(self, offset):
+ return self.shift(-offset)
+
+ def __getitem__(self, index):
+ if isinstance(index, Easing):
+ return self.chain(index)
+ if isinstance(index, slice):
+ return self.zoom(
+ 0 if index.start is None else index.start,
+ 1 if index.stop is None else index.stop,
+ )
+ else:
+ raise ValueError(
+ f"{index = } has to be slice of floats or an easing function"
+ )
+
+ @modifier
+ def chain(self, f=None):
+ """
+ Feed parameter through the given function before evaluating this function.
+ """
+ if f is None:
+ f = self.f
+ return lambda t: self.f(f(t))
+
+ @modifier
+ def zoom(self, left, right=None):
+ """
+ Arrange so that the interval [left;right] is moved into [0;1]
+ If only one argument is given, zoom in/out by moving edges that far.
+ """
+ if left is not None and right is None:
+ if left >= 0.5:
+ raise ValueError(
+ f"{left = } is > 0.5 which doesn't make sense (bounds would cross)"
+ )
+ left = left
+ right = 1 - left
+ if left >= right:
+ raise ValueError(f"{right = } bound must be greater than {left = }")
+ return self.chain(linear.between(left, right)).f
+
+ @modifier
+ def between(self, left=0, right=1, e=None):
+ """
+ Arrange so ``f(0)==a`` and ``f(1)==b``.
+ """
+ f0, f1 = self.f(np.array([0, 1]))
+ la = f0 - left
+ lb = f1 - right
+ if e is None: # linear is defined later
+ e = (
+ self # use ourself as transition when we're ascending within [0;1]
+ if (self.is_ascending and np.allclose(self.f(np.array([0, 1])), [0, 1]))
+ else linear
+ )
+
+ def f(t):
+ t_ = e(t)
+ return self.f(t_) - (la * (1 - t_)) - lb * t_
+
+ return f
+
+ @modifier
+ def transition(self, other, e=None):
+ """
+ Transiton from one easing to another
+ """
+ if e is None:
+ e = linear
+
+ def f(t):
+ t_ = e(t)
+ return self.f(t) * (1 - t_) + other(t) * t_
+
+ return f
+
+ @classmethod
+ def function(cls, decorated_fun):
+ return cls(f=decorated_fun, name=decorated_fun.__name__)
+
+ def plot(self, *others, xlim=(0, 1), ax=None):
+ import matplotlib.pyplot as plt # lazy import for speed
+ from cycler import cycler
+
+ if ax is None:
+ fig, ax_ = plt.subplots()
+ else:
+ ax_ = ax
+
+ try:
+ ax_.set_prop_cycle(
+ cycler(linestyle=["solid", "dashed", "dotted"], linewidth=[1, 1, 2])
+ * plt.rcParams["axes.prop_cycle"]
+ )
+ except ValueError as e:
+ pass
+
+ t = np.linspace(*xlim, 1000)
+ funs = list(others or [])
+ if isinstance(self, Easing):
+ funs.insert(0, self)
+ for f in funs:
+ ax_.plot(t, f(t), label=getattr(f, "name", getattr(f, "__name__", str(f))))
+ ax_.legend(ncol=int(np.ceil(len(ax_.get_lines()) / 10)))
+ if ax is None:
+ plt.show()
+ return ax_
+
+ @functools.cached_property
+ def min(self):
+ v = self.f(t := np.linspace(0, 1, 1000))
+ approxmin = Extremum(pos=t[i := np.argmin(v)], value=v[i])
+ opt = scipy.optimize.minimize(self, x0=[approxmin.pos], bounds=[(0, 1)])
+ optmin = Extremum(pos=opt.x[0], value=opt.fun)
+ return min(approxmin, optmin)
+
+ @functools.cached_property
+ def max(self):
+ """
+ Determine the maximum value
+ """
+ v = self.f(t := np.linspace(0, 1, 1000))
+ approxmax = Extremum(pos=t[i := np.argmax(v)], value=v[i])
+ opt = scipy.optimize.minimize(-self, x0=[approxmax.pos], bounds=[(0, 1)])
+ optmax = Extremum(pos=opt.x[0], value=-opt.fun)
+ return max(approxmax, optmax)
+
+ @functools.cached_property
+ def mean(self):
+ return np.mean(self.f(np.linspace(0, 1, 1000)))
+
+ def __lt__(self, e):
+ return np.all(self.f(t := np.linspace(0, 1, 50)) < e.f(t))
+
+ def __eq__(self, e):
+ return np.allclose(self.f(t := np.linspace(0, 1, 50)), e.f(t))
+
+ def __call__(self, t):
+ return self.f(t)
+
+
+@Easing.function
+def linear(t):
+ return t
+
+
+@Easing.function
+def in_quad(t):
+ return t * t
+
+
+@Easing.function
+def out_quad(t):
+ return -t * (t - 2)
+
+
+@Easing.function
+def in_out_quad(t):
+ u = 2 * t - 1
+ a = 2 * t * t
+ b = -0.5 * (u * (u - 2) - 1)
+ return np.where(t < 0.5, a, b)
+
+
+@Easing.function
+def in_cubic(t):
+ return t * t * t
+
+
+@Easing.function
+def out_cubic(t):
+ u = t - 1
+ return u * u * u + 1
+
+
+@Easing.function
+def in_out_cubic(t):
+ u = t * 2
+ v = u - 2
+ a = 0.5 * u * u * u
+ b = 0.5 * (v * v * v + 2)
+ return np.where(u < 1, a, b)
+
+
+@Easing.function
+def in_quart(t):
+ return t * t * t * t
+
+
+@Easing.function
+def out_quart(t):
+ u = t - 1
+ return -(u * u * u * u - 1)
+
+
+@Easing.function
+def in_out_quart(t):
+ u = t * 2
+ v = u - 2
+ a = 0.5 * u * u * u * u
+ b = -0.5 * (v * v * v * v - 2)
+ return np.where(u < 1, a, b)
+
+
+@Easing.function
+def in_quint(t):
+ return t * t * t * t * t
+
+
+@Easing.function
+def out_quint(t):
+ u = t - 1
+ return u * u * u * u * u + 1
+
+
+@Easing.function
+def in_out_quint(t):
+ u = t * 2
+ v = u - 2
+ a = 0.5 * u * u * u * u * u
+ b = 0.5 * (v * v * v * v * v + 2)
+ return np.where(u < 1, a, b)
+
+
+@Easing.function
+def in_sine(t):
+ return -np.cos(t * np.pi / 2) + 1
+
+
+@Easing.function
+def out_sine(t):
+ return np.sin(t * np.pi / 2)
+
+
+@Easing.function
+def in_out_sine(t):
+ return -0.5 * (np.cos(np.pi * t) - 1)
+
+
+@Easing.function
+def in_expo(t):
+ a = np.zeros(len(t))
+ b = 2 ** (10 * (t - 1))
+ return np.where(t == 0, a, b)
+
+
+@Easing.function
+def out_expo(t):
+ a = np.zeros(len(t)) + 1
+ b = 1 - 2 ** (-10 * t)
+ return np.where(t == 1, a, b)
+
+
+@Easing.function
+def in_out_expo(t):
+ zero = np.zeros(len(t))
+ one = zero + 1
+ a = 0.5 * 2 ** (20 * t - 10)
+ b = 1 - 0.5 * 2 ** (-20 * t + 10)
+ return np.where(t == 0, zero, np.where(t == 1, one, np.where(t < 0.5, a, b)))
+
+
+@Easing.function
+def in_circ(t):
+ return -1 * (np.sqrt(1 - t * t) - 1)
+
+
+@Easing.function
+def out_circ(t):
+ u = t - 1
+ return np.sqrt(1 - u * u)
+
+
+@Easing.function
+def in_out_circ(t):
+ u = t * 2
+ v = u - 2
+ a = -0.5 * (np.sqrt(1 - u * u) - 1)
+ b = 0.5 * (np.sqrt(1 - v * v) + 1)
+ return np.where(u < 1, a, b)
+
+
+@Easing.function
+def in_elastic(t, k=0.5):
+ u = t - 1
+ return -1 * (2 ** (10.0 * u) * np.sin((u - k / 4) * (2 * np.pi) / k))
+
+
+@Easing.function
+def out_elastic(t, k=0.5):
+ return 2 ** (-10.0 * t) * np.sin((t - k / 4) * (2 * np.pi / k)) + 1
+
+
+@Easing.function
+def in_out_elastic(t, k=0.5):
+ u = t * 2
+ v = u - 1
+ a = -0.5 * (2 ** (10 * v) * np.sin((v - k / 4) * 2 * np.pi / k))
+ b = 2 ** (-10 * v) * np.sin((v - k / 4) * 2 * np.pi / k) * 0.5 + 1
+ return np.where(u < 1, a, b)
+
+
+@Easing.function
+def in_back(t):
+ k = 1.70158
+ return t * t * ((k + 1) * t - k)
+
+
+@Easing.function
+def out_back(t):
+ k = 1.70158
+ u = t - 1
+ return u * u * ((k + 1) * u + k) + 1
+
+
+@Easing.function
+def in_out_back(t):
+ k = 1.70158 * 1.525
+ u = t * 2
+ v = u - 2
+ a = 0.5 * (u * u * ((k + 1) * u - k))
+ b = 0.5 * (v * v * ((k + 1) * v + k) + 2)
+ return np.where(u < 1, a, b)
+
+
+@Easing.function
+def in_bounce(t):
+ return 1 - out_bounce(1 - t)
+
+
+@Easing.function
+def out_bounce(t):
+ a = (121 * t * t) / 16
+ b = (363 / 40 * t * t) - (99 / 10 * t) + 17 / 5
+ c = (4356 / 361 * t * t) - (35442 / 1805 * t) + 16061 / 1805
+ d = (54 / 5 * t * t) - (513 / 25 * t) + 268 / 25
+ return np.where(t < 4 / 11, a, np.where(t < 8 / 11, b, np.where(t < 9 / 10, c, d)))
+
+
+@Easing.function
+def in_out_bounce(t):
+ a = in_bounce(2 * t) * 0.5
+ b = out_bounce(2 * t - 1) * 0.5 + 0.5
+ return np.where(t < 0.5, a, b)
+
+
+@Easing.function
+def in_square(t):
+ return np.heaviside(t - 1, 0)
+
+
+@Easing.function
+def out_square(t):
+ return np.heaviside(t + 1, 0)
+
+
+@Easing.function
+def in_out_square(t):
+ return np.heaviside(t - 0.5, 0)
+
+
+def constant(x):
+ return Easing(f=lambda t: np.full_like(t, x), name=f"constant({x})")
+
+
+zero = constant(0)
+one = constant(1)
+
+
+@Easing.function
+def smoothstep(t):
+ t = np.clip(t, 0, 1)
+ return 3 * t * t - 2 * t * t * t
+
+
+def _main():
+ import matplotlib.pyplot as plt
+ from cycler import cycler
+
+ plt.rcParams["axes.prop_cycle"] *= cycler(
+ linestyle=["solid", "dashed", "dotted"], linewidth=[1, 2, 3]
+ )
+ plt.rcParams["figure.autolayout"] = True
+ plt.rcParams["axes.grid"] = True
+ plt.rcParams["axes.axisbelow"] = True
+ plt.rcParams["legend.fontsize"] = "small"
+ LOCALS = globals()
+ print(f"{LOCALS = }")
+ fig, axes = plt.subplots(nrows=2)
+ Easing.plot(
+ *sorted((obj for n, obj in LOCALS.items() if isinstance(obj, Easing)), key=str),
+ ax=axes[0],
+ )
+ Easing.plot(
+ in_sine.symmetric,
+ in_out_sine.symmetric.multiply(-0.6),
+ linear.symmetric.multiply(-0.7),
+ in_out_sine.multiply(-0.6).symmetric,
+ out_sine.multiply(-0.6).reverse.symmetric.multiply(2),
+ out_bounce.add(-0.5),
+ ax=axes[1],
+ )
+ axes[0].set_title("Standard")
+ axes[1].set_title("Derived")
+ plt.show()
+
+
+if __name__ == "__main__":
+ _main()
diff --git a/sdf/errors.py b/sdf/errors.py
new file mode 100644
index 0000000..52f642e
--- /dev/null
+++ b/sdf/errors.py
@@ -0,0 +1,42 @@
+import warnings
+import functools
+
+
+class SDFCADError(Exception):
+ pass
+
+
+class SDFCADInfiniteObjectError(Exception):
+ """
+ Error raised when an infinite object is encountered where not suitable.
+ """
+
+ pass
+
+
+class SDFCADWarning(Warning):
+ pass
+
+
+class SDFCADAlphaQualityWarning(SDFCADWarning):
+ show = True
+
+
+def alpha_quality(decorated_fun):
+ @functools.wraps(decorated_fun)
+ def wrapper(*args, **kwargs):
+ if SDFCADAlphaQualityWarning.show:
+ warnings.warn(
+ f"{decorated_fun.__name__}() is alpha quality "
+ f"and might give wrong results. Use with care. "
+ f"Hide this warning by setting sdf.errors.SDFCADAlphaQualityWarning.show=False.",
+ SDFCADAlphaQualityWarning,
+ )
+ with warnings.catch_warnings():
+ # Don't reissue nested alpha quality warnings
+ warnings.simplefilter("ignore", SDFCADAlphaQualityWarning)
+ return decorated_fun(*args, **kwargs)
+ else:
+ return decorated_fun(*args, **kwargs)
+
+ return wrapper
diff --git a/sdf/mesh.py b/sdf/mesh.py
new file mode 100644
index 0000000..c3892cd
--- /dev/null
+++ b/sdf/mesh.py
@@ -0,0 +1,282 @@
+from functools import partial
+from multiprocessing.pool import ThreadPool
+from skimage import measure
+
+import multiprocessing
+import itertools
+import numpy as np
+import time
+
+from . import progress, stl
+
+WORKERS = multiprocessing.cpu_count()
+SAMPLES = 2**18
+BATCH_SIZE = 32
+
+
+def _marching_cubes(volume, level=0):
+ verts, faces, _, _ = measure.marching_cubes(volume, level)
+ return verts[faces].reshape((-1, 3))
+
+
+def _cartesian_product(*arrays):
+ la = len(arrays)
+ dtype = np.result_type(*arrays)
+ arr = np.empty([len(a) for a in arrays] + [la], dtype=dtype)
+ for i, a in enumerate(np.ix_(*arrays)):
+ arr[..., i] = a
+ return arr.reshape(-1, la)
+
+
+def _skip(sdf, job):
+ X, Y, Z = job
+ x0, x1 = X[0], X[-1]
+ y0, y1 = Y[0], Y[-1]
+ z0, z1 = Z[0], Z[-1]
+ x = (x0 + x1) / 2
+ y = (y0 + y1) / 2
+ z = (z0 + z1) / 2
+ r = abs(sdf(np.array([(x, y, z)])).reshape(-1)[0])
+ d = np.linalg.norm(np.array((x - x0, y - y0, z - z0)))
+ if r <= d:
+ return False
+ corners = np.array(list(itertools.product((x0, x1), (y0, y1), (z0, z1))))
+ values = sdf(corners).reshape(-1)
+ same = np.all(values > 0) if values[0] > 0 else np.all(values < 0)
+ return same
+
+
+def _worker(sdf, job, sparse):
+ X, Y, Z = job
+ if sparse and _skip(sdf, job):
+ return None
+ # return _debug_triangles(X, Y, Z)
+ P = _cartesian_product(X, Y, Z)
+ volume = sdf(P).reshape((len(X), len(Y), len(Z)))
+ try:
+ points = _marching_cubes(volume)
+ except Exception:
+ return []
+ # return _debug_triangles(X, Y, Z)
+ scale = np.array([X[1] - X[0], Y[1] - Y[0], Z[1] - Z[0]])
+ offset = np.array([X[0], Y[0], Z[0]])
+ return points * scale + offset
+
+
+def _estimate_bounds(sdf):
+ # TODO: raise exception if bound estimation fails
+ s = 16
+ x0 = y0 = z0 = -1e9
+ x1 = y1 = z1 = 1e9
+ prev = None
+ for i in range(32):
+ X = np.linspace(x0, x1, s)
+ Y = np.linspace(y0, y1, s)
+ Z = np.linspace(z0, z1, s)
+ d = np.array([X[1] - X[0], Y[1] - Y[0], Z[1] - Z[0]])
+ threshold = np.linalg.norm(d) / 2
+ if threshold == prev:
+ break
+ prev = threshold
+ P = _cartesian_product(X, Y, Z)
+ volume = sdf(P).reshape((len(X), len(Y), len(Z)))
+ where = np.argwhere(np.abs(volume) <= threshold)
+ x1, y1, z1 = (x0, y0, z0) + where.max(axis=0) * d + d / 2
+ x0, y0, z0 = (x0, y0, z0) + where.min(axis=0) * d - d / 2
+ return ((x0, y0, z0), (x1, y1, z1))
+
+
+def generate(
+ sdf,
+ step=None,
+ bounds=None,
+ samples=SAMPLES,
+ workers=WORKERS,
+ batch_size=BATCH_SIZE,
+ verbose=True,
+ sparse=True,
+):
+ start = time.time()
+
+ if bounds is None:
+ bounds = _estimate_bounds(sdf)
+ (x0, y0, z0), (x1, y1, z1) = bounds
+
+ if step is None and samples is not None:
+ volume = (x1 - x0) * (y1 - y0) * (z1 - z0)
+ step = (volume / samples) ** (1 / 3)
+
+ try:
+ dx, dy, dz = step
+ except TypeError:
+ dx = dy = dz = step
+
+ if verbose:
+ print("min %g, %g, %g" % (x0, y0, z0))
+ print("max %g, %g, %g" % (x1, y1, z1))
+ print("step %g, %g, %g" % (dx, dy, dz))
+
+ X = np.arange(x0, x1, dx)
+ Y = np.arange(y0, y1, dy)
+ Z = np.arange(z0, z1, dz)
+
+ s = batch_size
+ Xs = [X[i : i + s + 1] for i in range(0, len(X), s)]
+ Ys = [Y[i : i + s + 1] for i in range(0, len(Y), s)]
+ Zs = [Z[i : i + s + 1] for i in range(0, len(Z), s)]
+
+ batches = list(itertools.product(Xs, Ys, Zs))
+ num_batches = len(batches)
+ num_samples = sum(len(xs) * len(ys) * len(zs) for xs, ys, zs in batches)
+
+ if verbose:
+ print(
+ "%d samples in %d batches with %d workers"
+ % (num_samples, num_batches, workers)
+ )
+
+ points = []
+ skipped = empty = nonempty = 0
+ bar = progress.Bar(num_batches, enabled=verbose)
+ f = partial(_worker, sdf, sparse=sparse)
+ with ThreadPool(workers) as pool:
+ for result in pool.imap(f, batches):
+ bar.increment(1)
+ if result is None:
+ skipped += 1
+ elif len(result) == 0:
+ empty += 1
+ else:
+ nonempty += 1
+ points.extend(result)
+ bar.done()
+
+ if verbose:
+ print("%d skipped, %d empty, %d nonempty" % (skipped, empty, nonempty))
+ triangles = len(points) // 3
+ seconds = time.time() - start
+ print("%d triangles in %g seconds" % (triangles, seconds))
+
+ return points
+
+
+def save(path, *args, **kwargs):
+ points = generate(*args, **kwargs)
+ if str(path).lower().endswith(".stl"):
+ stl.write_binary_stl(path, points)
+ else:
+ mesh = _mesh(points)
+ mesh.write(path)
+
+
+def _mesh(points):
+ import meshio
+
+ points, cells = np.unique(points, axis=0, return_inverse=True)
+ cells = [("triangle", cells.reshape((-1, 3)))]
+ return meshio.Mesh(points, cells)
+
+
+def _debug_triangles(X, Y, Z):
+ x0, x1 = X[0], X[-1]
+ y0, y1 = Y[0], Y[-1]
+ z0, z1 = Z[0], Z[-1]
+
+ p = 0.25
+ x0, x1 = x0 + (x1 - x0) * p, x1 - (x1 - x0) * p
+ y0, y1 = y0 + (y1 - y0) * p, y1 - (y1 - y0) * p
+ z0, z1 = z0 + (z1 - z0) * p, z1 - (z1 - z0) * p
+
+ v = [
+ (x0, y0, z0),
+ (x0, y0, z1),
+ (x0, y1, z0),
+ (x0, y1, z1),
+ (x1, y0, z0),
+ (x1, y0, z1),
+ (x1, y1, z0),
+ (x1, y1, z1),
+ ]
+
+ return [
+ v[3],
+ v[5],
+ v[7],
+ v[5],
+ v[3],
+ v[1],
+ v[0],
+ v[6],
+ v[4],
+ v[6],
+ v[0],
+ v[2],
+ v[0],
+ v[5],
+ v[1],
+ v[5],
+ v[0],
+ v[4],
+ v[5],
+ v[6],
+ v[7],
+ v[6],
+ v[5],
+ v[4],
+ v[6],
+ v[3],
+ v[7],
+ v[3],
+ v[6],
+ v[2],
+ v[0],
+ v[3],
+ v[2],
+ v[3],
+ v[0],
+ v[1],
+ ]
+
+
+def sample_slice(sdf, w=1024, h=1024, x=None, y=None, z=None, bounds=None):
+ if bounds is None:
+ bounds = _estimate_bounds(sdf)
+ (x0, y0, z0), (x1, y1, z1) = bounds
+
+ if x is not None:
+ X = np.array([x])
+ Y = np.linspace(y0, y1, w)
+ Z = np.linspace(z0, z1, h)
+ extent = (Z[0], Z[-1], Y[0], Y[-1])
+ axes = "ZY"
+ elif y is not None:
+ Y = np.array([y])
+ X = np.linspace(x0, x1, w)
+ Z = np.linspace(z0, z1, h)
+ extent = (Z[0], Z[-1], X[0], X[-1])
+ axes = "ZX"
+ elif z is not None:
+ Z = np.array([z])
+ X = np.linspace(x0, x1, w)
+ Y = np.linspace(y0, y1, h)
+ extent = (Y[0], Y[-1], X[0], X[-1])
+ axes = "YX"
+ else:
+ raise Exception("x, y, or z position must be specified")
+
+ P = _cartesian_product(X, Y, Z)
+ return sdf(P).reshape((w, h)), extent, axes
+
+
+def show_slice(*args, **kwargs):
+ import matplotlib.pyplot as plt
+
+ show_abs = kwargs.pop("abs", False)
+ a, extent, axes = sample_slice(*args, **kwargs)
+ if show_abs:
+ a = np.abs(a)
+ im = plt.imshow(a, extent=extent, origin="lower")
+ plt.xlabel(axes[0])
+ plt.ylabel(axes[1])
+ plt.colorbar(im)
+ plt.show()
diff --git a/sdf/progress.py b/sdf/progress.py
new file mode 100644
index 0000000..0cbbc9c
--- /dev/null
+++ b/sdf/progress.py
@@ -0,0 +1,83 @@
+import sys
+import time
+
+
+def pretty_time(seconds):
+ seconds = int(round(seconds))
+ s = seconds % 60
+ m = (seconds // 60) % 60
+ h = seconds // 3600
+ return "%d:%02d:%02d" % (h, m, s)
+
+
+class Bar(object):
+ def __init__(self, max_value=100, min_value=0, enabled=True):
+ self.min_value = min_value
+ self.max_value = max_value
+ self.value = min_value
+ self.start_time = time.time()
+ self.enabled = enabled
+
+ @property
+ def percent_complete(self):
+ t = (self.value - self.min_value) / (self.max_value - self.min_value)
+ return t * 100
+
+ @property
+ def elapsed_time(self):
+ return time.time() - self.start_time
+
+ @property
+ def eta(self):
+ t = self.percent_complete / 100
+ if t == 0:
+ return 0
+ return (1 - t) * self.elapsed_time / t
+
+ def increment(self, delta):
+ self.update(self.value + delta)
+
+ def update(self, value):
+ self.value = value
+ if self.enabled:
+ sys.stdout.write(" %s \r" % self.render())
+ sys.stdout.flush()
+
+ def done(self):
+ self.update(self.max_value)
+ self.stop()
+
+ def stop(self):
+ if self.enabled:
+ sys.stdout.write("\n")
+ sys.stdout.flush()
+
+ def render(self):
+ items = [
+ self.render_percent_complete(),
+ self.render_value(),
+ self.render_bar(),
+ self.render_elapsed_time(),
+ self.render_eta(),
+ ]
+ return " ".join(items)
+
+ def render_percent_complete(self):
+ return "%3.0f%%" % self.percent_complete
+
+ def render_value(self):
+ if self.min_value == 0:
+ return "(%g of %g)" % (self.value, self.max_value)
+ else:
+ return "(%g)" % (self.value)
+
+ def render_bar(self, size=30):
+ a = int(round(self.percent_complete / 100.0 * size))
+ b = size - a
+ return "[" + "#" * a + "-" * b + "]"
+
+ def render_elapsed_time(self):
+ return pretty_time(self.elapsed_time)
+
+ def render_eta(self):
+ return pretty_time(self.eta)
diff --git a/sdf/stl.py b/sdf/stl.py
new file mode 100644
index 0000000..6f263f5
--- /dev/null
+++ b/sdf/stl.py
@@ -0,0 +1,27 @@
+import numpy as np
+import struct
+
+
+def write_binary_stl(path, points):
+ n = len(points) // 3
+
+ points = np.array(points, dtype="float32").reshape((-1, 3, 3))
+ normals = np.cross(points[:, 1] - points[:, 0], points[:, 2] - points[:, 0])
+ normals /= np.linalg.norm(normals, axis=1).reshape((-1, 1))
+
+ dtype = np.dtype(
+ [
+ ("normal", ("= tw - 1) | (j < 0) | (j >= th - 1)
+ d[outside] = q[outside]
+ return d
+
+ return f
+
+
+def _bilinear_interpolate(a, x, y):
+ x0 = np.floor(x).astype(int)
+ x1 = x0 + 1
+ y0 = np.floor(y).astype(int)
+ y1 = y0 + 1
+
+ x0 = np.clip(x0, 0, a.shape[1] - 1)
+ x1 = np.clip(x1, 0, a.shape[1] - 1)
+ y0 = np.clip(y0, 0, a.shape[0] - 1)
+ y1 = np.clip(y1, 0, a.shape[0] - 1)
+
+ pa = a[y0, x0]
+ pb = a[y1, x0]
+ pc = a[y0, x1]
+ pd = a[y1, x1]
+
+ wa = (x1 - x) * (y1 - y)
+ wb = (x1 - x) * (y - y0)
+ wc = (x - x0) * (y1 - y)
+ wd = (x - x0) * (y - y0)
+
+ return wa * pa + wb * pb + wc * pc + wd * pd
diff --git a/sdf/units.py b/sdf/units.py
new file mode 100644
index 0000000..8e9e8da
--- /dev/null
+++ b/sdf/units.py
@@ -0,0 +1,3 @@
+import pint
+
+units = pint.UnitRegistry()
diff --git a/sdf/util.py b/sdf/util.py
new file mode 100644
index 0000000..285e710
--- /dev/null
+++ b/sdf/util.py
@@ -0,0 +1,32 @@
+import math
+import functools
+import inspect
+import numpy as np
+
+pi = math.pi
+
+degrees = math.degrees
+radians = math.radians
+
+
+def n_trailing_ascending_positive(d):
+ """
+ Determine how many elements in a given sequence are positive and ascending.
+
+ Args:
+ d (sequence of numbers): the sequence to check
+
+ Returns:
+ int : the amount of trailing ascending positive elements
+ """
+ d = np.array(d).flatten()
+ # is the next element larger than previous and positive?
+ order = (d[1:] > d[:-1]) & (d[:-1] > 0)
+ # TODO: Not happy at all with this if/else mess. Is there no easier way to find the
+ # index in a numpy array after which the values are only ascending? 🤔
+ if np.all(order): # all ascending
+ return d.size
+ elif np.all(~order): # none ascending
+ return 0
+ else: # count from end how many are ascending
+ return np.argmin(order[::-1]) + 1