Added preliminary 3d animation abilities with (as of now poorly shaded) parametric curves

This commit is contained in:
Grant Sanderson
2018-08-15 16:23:29 -07:00
parent 148469486c
commit f926611d34
9 changed files with 371 additions and 225 deletions

View File

@ -264,7 +264,7 @@ class Camera(object):
(VMobject, self.display_multiple_vectorized_mobjects),
(PMobject, self.display_multiple_point_cloud_mobjects),
(AbstractImageMobject, self.display_multiple_image_mobjects),
(Mobject, lambda batch: batch), # Do nothing
(Mobject, lambda batch, pa: batch), # Do nothing
]
def get_mobject_type(mobject):
@ -370,10 +370,11 @@ class Camera(object):
)
else:
points = vmobject.get_gradient_start_and_end_points()
points = self.transform_points_pre_display(points)
pat = cairo.LinearGradient(*it.chain(*[
point[:2] for point in points
]))
offsets = np.linspace(1, 0, len(rgbas))
offsets = np.linspace(0, 1, len(rgbas))
for rgba, offset in zip(rgbas, offsets):
pat.add_color_stop_rgba(
offset, *rgba[2::-1], rgba[3]

View File

@ -4,60 +4,73 @@ import numpy as np
from constants import *
from camera.moving_camera import MovingCamera
from mobject.types.vectorized_mobject import VectorizedPoint
from mobject.three_dimensions import should_shade_in_3d
from camera.camera import Camera
from mobject.types.point_cloud_mobject import Point
from mobject.three_dimensions import ThreeDVMobject
from mobject.value_tracker import ValueTracker
from utils.bezier import interpolate
from utils.color import get_shaded_rgb
from utils.space_ops import rotation_about_z
from utils.space_ops import rotation_matrix
# TODO: Make sure this plays well with latest camera updates
# class CameraWithPerspective(Camera):
# CONFIG = {
# "camera_distance": 20,
# }
# def points_to_pixel_coords(self, points):
# distance_ratios = np.divide(
# self.camera_distance,
# self.camera_distance - points[:, 2]
# )
# scale_factors = interpolate(0, 1, distance_ratios)
# adjusted_points = np.array(points)
# for i in 0, 1:
# adjusted_points[:, i] *= scale_factors
# return Camera.points_to_pixel_coords(self, adjusted_points)
class ThreeDCamera(MovingCamera):
class ThreeDCamera(Camera):
CONFIG = {
"sun_vect": 5 * UP + LEFT,
"shading_factor": 0.2,
"distance": 5.0,
"distance": 20.0,
"default_distance": 5.0,
"phi": 0, # Angle off z axis
"theta": -TAU / 4, # Rotation about z axis
"theta": -90 * DEGREES, # Rotation about z axis
"gamma": 0, # Rotation about normal vector to camera
"light_source_start_point": 10 * DOWN + 7 * LEFT + 5 * OUT,
"frame_center": ORIGIN,
}
def __init__(self, *args, **kwargs):
MovingCamera.__init__(self, *args, **kwargs)
self.unit_sun_vect = self.sun_vect / np.linalg.norm(self.sun_vect)
# rotation_mobject lives in the phi-theta-distance space
# TODO, use ValueTracker for this instead
self.rotation_mobject = VectorizedPoint()
# Moving_center lives in the x-y-z space
# It representes the center of rotation
self.moving_center = VectorizedPoint(self.frame_center)
self.set_position(self.phi, self.theta, self.distance)
Camera.__init__(self, *args, **kwargs)
self.phi_tracker = ValueTracker(self.phi)
self.theta_tracker = ValueTracker(self.theta)
self.distance_tracker = ValueTracker(self.distance)
self.gamma_tracker = ValueTracker(self.gamma)
self.light_source = Point(self.light_source_start_point)
self.frame_center = Point(self.frame_center)
self.reset_rotation_matrix()
def capture_mobjects(self, mobjects, **kwargs):
self.reset_rotation_matrix()
Camera.capture_mobjects(self, mobjects, **kwargs)
def get_value_trackers(self):
return [
self.phi_tracker,
self.theta_tracker,
self.distance_tracker,
self.gamma_tracker,
]
def modified_rgbas(self, vmobject, rgbas):
if should_shade_in_3d(vmobject):
return self.get_shaded_rgbas(rgbas, self.get_unit_normal_vect(vmobject))
is_3d = isinstance(vmobject, ThreeDVMobject)
has_points = (vmobject.get_num_points() > 0)
if is_3d and has_points:
light_source_point = self.light_source.points[0]
if len(rgbas) < 2:
shaded_rgbas = rgbas.repeat(2, axis=0)
else:
shaded_rgbas = np.array(rgbas[:2])
shaded_rgbas[0, :3] = get_shaded_rgb(
shaded_rgbas[0, :3],
vmobject.get_start_corner(),
vmobject.get_start_corner_unit_normal(),
light_source_point,
)
shaded_rgbas[1, :3] = get_shaded_rgb(
shaded_rgbas[1, :3],
vmobject.get_end_corner(),
vmobject.get_end_corner_unit_normal(),
light_source_point,
)
return shaded_rgbas
return rgbas
def get_stroke_rgbas(self, vmobject, background=False):
@ -70,111 +83,81 @@ class ThreeDCamera(MovingCamera):
vmobject, vmobject.get_fill_rgbas()
)
def get_shaded_rgbas(self, rgbas, normal_vect):
brightness = np.dot(normal_vect, self.unit_sun_vect)**2
target = np.ones(rgbas.shape)
target[:, 3] = rgbas[:, 3]
if brightness > 0:
alpha = self.shading_factor * brightness
return interpolate(rgbas, target, alpha)
else:
target[:, :3] = 0
alpha = -self.shading_factor * brightness
return interpolate(rgbas, target, alpha)
def get_unit_normal_vect(self, vmobject):
anchors = vmobject.get_anchors()
if len(anchors) < 3:
return OUT
normal = np.cross(anchors[1] - anchors[0], anchors[2] - anchors[1])
if normal[2] < 0:
normal = -normal
length = np.linalg.norm(normal)
if length == 0:
return OUT
return normal / length
def display_multiple_vectorized_mobjects(self, vmobjects, pixel_array):
# camera_point = self.spherical_coords_to_point(
# *self.get_spherical_coords()
# )
rot_matrix = self.get_rotation_matrix()
def z_key(vmob):
# Assign a number to a three dimensional mobjects
# based on how close it is to the camera
three_d_status = should_shade_in_3d(vmob)
has_points = vmob.get_num_points() > 0
if three_d_status and has_points:
return vmob.get_center()[2]
if isinstance(vmob, ThreeDVMobject):
return np.dot(
vmob.get_center(),
rot_matrix.T
)[2]
else:
return 0
MovingCamera.display_multiple_vectorized_mobjects(
return np.inf
Camera.display_multiple_vectorized_mobjects(
self, sorted(vmobjects, key=z_key), pixel_array
)
def get_spherical_coords(self, phi=None, theta=None, distance=None):
curr_phi, curr_theta, curr_d = self.rotation_mobject.points[0]
if phi is None:
phi = curr_phi
if theta is None:
theta = curr_theta
if distance is None:
distance = curr_d
return np.array([phi, theta, distance])
def get_cartesian_coords(self, phi=None, theta=None, distance=None):
spherical_coords_array = self.get_spherical_coords(
phi, theta, distance)
phi2 = spherical_coords_array[0]
theta2 = spherical_coords_array[1]
d2 = spherical_coords_array[2]
return self.spherical_coords_to_point(phi2, theta2, d2)
def get_phi(self):
return self.get_spherical_coords()[0]
return self.phi_tracker.get_value()
def get_theta(self):
return self.get_spherical_coords()[1]
return self.theta_tracker.get_value()
def get_distance(self):
return self.get_spherical_coords()[2]
return self.distance_tracker.get_value()
def spherical_coords_to_point(self, phi, theta, distance):
return distance * np.array([
np.sin(phi) * np.cos(theta),
np.sin(phi) * np.sin(theta),
np.cos(phi)
])
def get_center_of_rotation(self, x=None, y=None, z=None):
curr_x, curr_y, curr_z = self.moving_center.points[0]
if x is None:
x = curr_x
if y is None:
y = curr_y
if z is None:
z = curr_z
return np.array([x, y, z])
def set_position(self, phi=None, theta=None, distance=None,
center_x=None, center_y=None, center_z=None):
point = self.get_spherical_coords(phi, theta, distance)
self.rotation_mobject.move_to(point)
self.phi, self.theta, self.distance = point
center_of_rotation = self.get_center_of_rotation(
center_x, center_y, center_z)
self.moving_center.move_to(center_of_rotation)
self.frame_center = self.moving_center.points[0]
def get_view_transformation_matrix(self):
return (self.default_distance / self.get_distance()) * np.dot(
rotation_matrix(self.get_phi(), LEFT),
rotation_about_z(-self.get_theta() - np.pi / 2),
)
def transform_points_pre_display(self, points):
matrix = self.get_view_transformation_matrix()
return np.dot(points, matrix.T)
def get_gamma(self):
return self.gamma_tracker.get_value()
def get_frame_center(self):
return self.moving_center.points[0]
return self.frame_center.points[0]
def set_phi(self, value):
self.phi_tracker.set_value(value)
def set_theta(self, value):
self.theta_tracker.set_value(value)
def set_distance(self, value):
self.distance_tracker.set_value(value)
def set_gamma(self, value):
self.gamma_tracker.set_value(value)
def set_frame_center(self, point):
self.frame_center.move_to(point)
def reset_rotation_matrix(self):
self.rotation_matrix = self.generate_rotation_matrix()
def get_rotation_matrix(self):
return self.rotation_matrix
def generate_rotation_matrix(self):
phi = self.get_phi()
theta = self.get_theta()
gamma = self.get_gamma()
matrices = [
rotation_about_z(-theta - 90 * DEGREES),
rotation_matrix(-phi, RIGHT),
rotation_about_z(gamma),
]
result = np.identity(3)
for matrix in matrices:
result = np.dot(matrix, result)
return result
def transform_points_pre_display(self, points):
fc = self.get_frame_center()
distance = self.get_distance()
points -= fc
rot_matrix = self.get_rotation_matrix()
points = np.dot(points, rot_matrix.T)
zs = points[:, 2]
points[:, 0] *= (distance + zs) / distance
points[:, 1] *= (distance + zs) / distance
points += fc
return points

View File

@ -681,12 +681,13 @@ class Mobject(Container):
]
def get_merged_array(self, array_attr):
result = None
for mob in self.family_members_with_points():
if result is None:
result = getattr(mob, array_attr)
else:
result = np.append(result, getattr(mob, array_attr), 0)
result = getattr(self, array_attr)
for submob in self.submobjects:
result = np.append(
result, submob.get_merged_array(array_attr),
axis=0
)
submob.get_merged_array(array_attr)
return result
def get_all_points(self):
@ -816,11 +817,11 @@ class Mobject(Container):
if n_rows is not None:
v1 = RIGHT
v2 = DOWN
n = len(submobs) / n_rows
n = len(submobs) // n_rows
elif n_cols is not None:
v1 = DOWN
v2 = RIGHT
n = len(submobs) / n_cols
n = len(submobs) // n_cols
Group(*[
Group(*submobs[i:i + n]).arrange_submobjects(v1, **kwargs)
for i in range(0, len(submobs), n)
@ -829,7 +830,7 @@ class Mobject(Container):
def sort_submobjects(self, point_to_num_func=lambda p: p[0]):
self.submobjects.sort(
key=lambda m: point_to_num_func(mob.get_center())
key=lambda m: point_to_num_func(m.get_center())
)
return self

View File

@ -3,34 +3,165 @@
from constants import *
from mobject.types.vectorized_mobject import VMobject
from mobject.types.vectorized_mobject import VGroup
from mobject.geometry import Square
from utils.config_ops import digest_config
from utils.space_ops import z_to_vector
from utils.space_ops import get_unit_normal
##############
def should_shade_in_3d(mobject):
return hasattr(mobject, "shade_in_3d") and mobject.shade_in_3d
class ThreeDVMobject(VMobject):
CONFIG = {}
def __init__(self, vmobject=None, **kwargs):
VMobject.__init__(self, **kwargs)
if vmobject is not None:
self.points = np.array(vmobject.points)
self.match_style(vmobject)
self.submobjects = map(
ThreeDVMobject, vmobject.submobjects
)
def get_gradient_start_and_end_points(self):
return self.get_start_corner(), self.get_end_corner()
def get_start_corner_index(self):
return 0
def get_end_corner_index(self):
return ((len(self.points) - 1) // 6) * 3
# return ((len(self.points) - 1) // 12) * 3
def get_start_corner(self):
if self.get_num_points() == 0:
return np.array(ORIGIN)
return self.points[self.get_start_corner_index()]
def get_end_corner(self):
if self.get_num_points() == 0:
return np.array(ORIGIN)
return self.points[self.get_end_corner_index()]
def get_unit_normal(self, point_index):
n_points = self.get_num_points()
if self.get_num_points() == 0:
return np.array(ORIGIN)
i = point_index
im1 = i - 1 if i > 0 else (n_points - 2)
ip1 = i + 1 if i < (n_points - 1) else 1
return get_unit_normal(
self.points[ip1] - self.points[i],
self.points[im1] - self.points[i],
)
def get_start_corner_unit_normal(self):
return self.get_unit_normal(
self.get_start_corner_index()
)
def get_end_corner_unit_normal(self):
return self.get_unit_normal(
self.get_end_corner_index()
)
def shade_in_3d(mobject):
for submob in mobject.submobject_family():
submob.shade_in_3d = True
class ParametricSurface(VGroup):
CONFIG = {
"u_min": 0,
"u_max": 1,
"v_min": 0,
"v_max": 1,
"resolution": 10,
"u_resolution": None,
"v_resolution": None,
"surface_piece_config": {},
"fill_color": BLUE_D,
"fill_opacity": 1.0,
"stroke_color": LIGHT_GREY,
"stroke_width": 0.5,
"should_make_jagged": False,
}
def __init__(self, func, **kwargs):
VGroup.__init__(self, **kwargs)
self.setup_in_uv_space()
self.apply_function(lambda p: func(p[0], p[1]))
if self.should_make_jagged:
self.make_jagged()
def setup_in_uv_space(self):
u_min = self.u_min
u_max = self.u_max
u_res = self.u_resolution or self.resolution
v_min = self.v_min
v_max = self.v_max
v_res = self.v_resolution or self.resolution
u_values = np.linspace(u_min, u_max, u_res + 1)
v_values = np.linspace(v_min, v_max, v_res + 1)
faces = VGroup()
for u1, u2 in zip(u_values[:-1], u_values[1:]):
for v1, v2 in zip(v_values[:-1], v_values[1:]):
piece = ThreeDVMobject()
piece.set_points_as_corners([
[u1, v1, 0],
[u2, v1, 0],
[u2, v2, 0],
[u1, v2, 0],
[u1, v1, 0],
])
faces.add(piece)
faces.set_stroke(width=0)
faces.set_fill(
color=self.fill_color,
opacity=self.fill_opacity
)
# TODO
mesh = VGroup()
mesh.set_stroke(
color=self.stroke_color,
width=self.stroke_width,
opacity=self.stroke_opacity,
)
self.faces = faces
self.mesh = mesh
self.add(self.faces, self.mesh)
def turn_off_3d_shading(mobject):
for submob in mobject.submobject_family():
submob.shade_in_3d = False
# Specific shapes
class ThreeDMobject(VMobject):
def __init__(self, *args, **kwargs):
VMobject.__init__(self, *args, **kwargs)
shade_in_3d(self)
class Sphere(ParametricSurface):
CONFIG = {
"resolution": 15,
"radius": 3,
"u_min": 0.001,
}
def __init__(self, **kwargs):
digest_config(self, kwargs)
kwargs["u_resolution"] = self.u_resolution or self.resolution
kwargs["v_resolution"] = self.u_resolution or 2 * self.resolution
ParametricSurface.__init__(
self, self.func, **kwargs
)
self.scale(self.radius)
def func(self, u, v):
return np.array([
np.cos(TAU * v) * np.sin(PI * u),
np.sin(TAU * v) * np.sin(PI * u),
np.cos(PI * u)
])
class Cube(ThreeDMobject):
class Cube(VGroup):
CONFIG = {
"fill_opacity": 0.75,
"fill_color": BLUE,
@ -41,9 +172,13 @@ class Cube(ThreeDMobject):
def generate_points(self):
for vect in IN, OUT, LEFT, RIGHT, UP, DOWN:
face = Square(side_length=self.side_length)
face = ThreeDVMobject(
Square(side_length=self.side_length)
)
face.make_jagged()
face.flip()
face.shift(self.side_length * OUT / 2.0)
face.apply_function(lambda p: np.dot(p, z_to_vector(vect).T))
face.apply_matrix(z_to_vector(vect))
self.add(face)

View File

@ -74,7 +74,7 @@ class VMobject(Mobject):
)
return self
def get_rgbas_array(self, color, opacity):
def generate_rgbas_array(self, color, opacity):
"""
First arg can be either a color, or a tuple/list of colors.
Likewise, opacity can either be a float, or a tuple of floats.
@ -100,7 +100,7 @@ class VMobject(Mobject):
def update_rgbas_array(self, array_name, color=None, opacity=None):
passed_color = color or BLACK
passed_opacity = opacity or 0
rgbas = self.get_rgbas_array(passed_color, passed_opacity)
rgbas = self.generate_rgbas_array(passed_color, passed_opacity)
if not hasattr(self, array_name):
setattr(self, array_name, rgbas)
return self
@ -113,7 +113,7 @@ class VMobject(Mobject):
)
setattr(self, array_name, curr_rgbas)
elif len(rgbas) < len(curr_rgbas):
rgbas = stretch_array_to_length(len(curr_rgbas))
rgbas = stretch_array_to_length(rgbas, len(curr_rgbas))
# Only update rgb if color was not None, and only
# update alpha channel if opacity was passed in
if color is not None:
@ -266,6 +266,7 @@ class VMobject(Mobject):
# already be handled above
self.set_sheen_direction(direction, family=False)
# Reset color to put sheen into effect
if factor != 0:
self.set_stroke(self.get_stroke_color(), family=family)
self.set_fill(self.get_fill_color(), family=family)
return self
@ -284,7 +285,7 @@ class VMobject(Mobject):
for vect in [RIGHT, UP, OUT]
]).transpose()
offset = np.dot(bases, direction)
return (c + offset, c - offset)
return (c - offset, c + offset)
def color_using_background_image(self, background_image_file):
self.background_image_file = background_image_file
@ -366,7 +367,7 @@ class VMobject(Mobject):
def change_anchor_mode(self, mode):
for submob in self.family_members_with_points():
anchors, h1, h2 = submob.get_anchors_and_handles()
anchors = submob.get_anchors()
submob.set_anchor_points(anchors, mode=mode)
return self
@ -426,15 +427,14 @@ class VMobject(Mobject):
handles closer to their anchors, apply the function then push them out
again.
"""
if self.get_num_points() == 0:
return
anchors, handles1, handles2 = self.get_anchors_and_handles()
for submob in self.family_members_with_points():
anchors, handles1, handles2 = submob.get_anchors_and_handles()
# print len(anchors), len(handles1), len(handles2)
a_to_h1 = handles1 - anchors[:-1]
a_to_h2 = handles2 - anchors[1:]
handles1 = anchors[:-1] + factor * a_to_h1
handles2 = anchors[1:] + factor * a_to_h2
self.set_anchors_and_handles(anchors, handles1, handles2)
submob.set_anchors_and_handles(anchors, handles1, handles2)
# Information about line

View File

@ -1,15 +1,11 @@
import numpy as np
from constants import *
from mobject.types.vectorized_mobject import VectorizedPoint
# TODO: Rather than using VectorizedPoint, there should be some UndisplayedPointSet type
from mobject.mobject import Mobject
from utils.bezier import interpolate
class ValueTracker(VectorizedPoint):
class ValueTracker(Mobject):
"""
Note meant to be displayed. Instead the position encodes some
number, often one which another animation or continual_animation
@ -18,14 +14,15 @@ class ValueTracker(VectorizedPoint):
"""
def __init__(self, value=0, **kwargs):
VectorizedPoint.__init__(self, **kwargs)
Mobject.__init__(self, **kwargs)
self.points = np.zeros((1, 3))
self.set_value(value)
def get_value(self):
return self.get_center()[0]
return self.points[0, 0]
def set_value(self, value):
self.move_to(value * RIGHT)
self.points[0, 0] = value
return self
def increment_value(self, d_value):
@ -40,8 +37,7 @@ class ExponentialValueTracker(ValueTracker):
"""
def get_value(self):
return np.exp(self.get_center()[0])
return np.exp(ValueTracker.get_value(self))
def set_value(self, value):
self.move_to(np.log(value) * RIGHT)
return self
return ValueTracker.set_value(self, np.log(value))

View File

@ -1,14 +1,10 @@
from constants import *
from continual_animation.continual_animation import ContinualMovement
from continual_animation.update import ContinualGrowValue
from animation.transform import ApplyMethod
from camera.three_d_camera import ThreeDCamera
from scene.scene import Scene
from utils.iterables import list_update
class ThreeDScene(Scene):
CONFIG = {
@ -16,17 +12,19 @@ class ThreeDScene(Scene):
"ambient_camera_rotation": None,
}
def set_camera_position(self, phi=None, theta=None, distance=None,
center_x=None, center_y=None, center_z=None):
self.camera.set_position(
phi, theta, distance,
center_x, center_y, center_z
)
def set_camera_orientation(self, phi=None, theta=None, distance=None, gamma=None):
if phi is not None:
self.camera.set_phi(phi)
if theta is not None:
self.camera.set_theta(theta)
if distance is not None:
self.camera.set_distance(distance)
if gamma is not None:
self.camera.set_gamma(gamma)
def begin_ambient_camera_rotation(self, rate=0.01):
self.ambient_camera_rotation = ContinualMovement(
self.camera.rotation_mobject,
direction=UP,
def begin_ambient_camera_rotation(self, rate=0.1):
self.ambient_camera_rotation = ContinualGrowValue(
self.camera.theta_tracker,
rate=rate
)
self.add(self.ambient_camera_rotation)
@ -36,36 +34,33 @@ class ThreeDScene(Scene):
self.remove(self.ambient_camera_rotation)
self.ambient_camera_rotation = None
def move_camera(
self,
phi=None, theta=None, distance=None,
center_x=None, center_y=None, center_z=None,
def move_camera(self,
phi=None, theta=None,
distance=None, gamma=None,
added_anims=[],
**kwargs
):
target_point = self.camera.get_spherical_coords(phi, theta, distance)
movement = ApplyMethod(
self.camera.rotation_mobject.move_to,
target_point,
**kwargs
)
target_center = self.camera.get_center_of_rotation(
center_x, center_y, center_z)
movement_center = ApplyMethod(
self.camera.moving_center.move_to,
target_center,
**kwargs
**kwargs):
anims = []
value_tracker_pairs = [
(phi, self.camera.phi_tracker),
(theta, self.camera.theta_tracker),
(distance, self.camera.distance_tracker),
(gamma, self.camera.gamma_tracker),
]
for value, tracker in value_tracker_pairs:
if value is not None:
anims.append(
ApplyMethod(tracker.set_value, value, **kwargs)
)
is_camera_rotating = self.ambient_camera_rotation in self.continual_animations
if is_camera_rotating:
self.remove(self.ambient_camera_rotation)
self.play(movement, movement_center, *added_anims)
target_point = self.camera.get_spherical_coords(phi, theta, distance)
self.play(*anims + added_anims)
if is_camera_rotating:
self.add(self.ambient_camera_rotation)
def get_moving_mobjects(self, *animations):
moving_mobjects = Scene.get_moving_mobjects(self, *animations)
if self.camera.rotation_mobject in moving_mobjects:
return list_update(self.mobjects, moving_mobjects)
camera_mobjects = self.camera.get_value_trackers()
if any([cm in moving_mobjects for cm in camera_mobjects]):
return self.mobjects
return moving_mobjects

View File

@ -6,6 +6,7 @@ from constants import WHITE
from constants import PALETTE
from utils.bezier import interpolate
from utils.space_ops import get_norm
def color_to_rgb(color):
@ -82,3 +83,12 @@ def random_bright_color():
def random_color():
return random.choice(PALETTE)
def get_shaded_rgb(rgb, point, unit_normal_vect, light_source):
to_sun = light_source - point
to_sun /= get_norm(to_sun)
factor = 0.5 * np.dot(unit_normal_vect, to_sun)**3
if factor < 0:
factor *= 0.5
return np.clip(rgb + factor, 0, 1)

View File

@ -92,6 +92,31 @@ def project_along_vector(point, vector):
matrix = np.identity(3) - np.outer(vector, vector)
return np.dot(point, matrix.T)
def get_norm(vect):
return sum([x**2 for x in vect])**0.5
def normalize(vect):
norm = get_norm(vect)
if norm > 0:
return vect / norm
else:
return np.zeros(len(vect))
def cross(v1, v2):
return np.array([
v1[1] * v2[2] - v1[2] * v2[1],
v1[2] * v2[0] - v1[0] * v2[2],
v1[0] * v2[1] - v1[1] * v2[0]
])
def get_unit_normal(v1, v2):
return normalize(cross(v1, v2))
###