Working initial visualization of a CNN.

This commit is contained in:
Alec Helbling
2022-12-29 14:09:16 -05:00
parent 330ba170a0
commit 8cee86e884
18 changed files with 384 additions and 236 deletions

View File

@ -1,36 +1,16 @@
from manim import *
import numpy as np
class CornersRectangle(Rectangle):
"""Rectangle with functionality for getting the corner coordinates"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
self.corners = VGroup(
*[Dot(corner_point) for corner_point in self.get_vertices()]
)
self.corners.set_fill(opacity=0.0)
self.add(self.corners)
def get_corners_dict(self):
"""Returns a dictionary of the corners"""
return {
"top_left": self.corners[3],
"top_right": self.corners[0],
"bottom_left": self.corners[2],
"bottom_right": self.corners[1],
}
class GriddedRectangle(VGroup):
"""Rectangle object with grid lines"""
def __init__(self, center, color=WHITE, height=2.0, width=4.0,
def __init__(self, color=ORANGE, height=2.0, width=4.0,
mark_paths_closed=True, close_new_points=True,
grid_xstep=None, grid_ystep=None, grid_stroke_width=0.0, #DEFAULT_STROKE_WIDTH/2,
grid_stroke_color=None, grid_stroke_opacity=None, **kwargs):
grid_stroke_color=None, grid_stroke_opacity=None,
stroke_width=2.0, fill_opacity=0.2, **kwargs):
super().__init__()
# Fields
self.center = center
self.mark_paths_closed = mark_paths_closed
self.close_new_points = close_new_points
self.grid_xstep = grid_xstep
@ -38,75 +18,28 @@ class GriddedRectangle(VGroup):
self.grid_stroke_width = grid_stroke_width
self.grid_stroke_color = grid_stroke_color
self.grid_stroke_opacity = grid_stroke_opacity
self.stroke_width = stroke_width
self.rotation_angles = [0, 0, 0]
# Make inner_rectangle
self.inner_rectangle = Rectangle(
# Make rectangle
self.rectangle = Rectangle(
width=width,
height=height,
stroke_opacity=0.0,
stroke_width=0.0
)
print(self.inner_rectangle.get_vertices())
# self.inner_rectangle = Polygon(
"""
points = [
self.center + np.array([width / 2, height / 2, 0]),
self.center + np.array([width / 2, -1*(height / 2), 0]),
self.center + np.array([-1 * (width / 2), -1 * (height / 2), 0]),
self.center + np.array([-1 * (width / 2), height / 2, 0]),
]
self.inner_rectangle = Polygram(
points,
stroke_opacity=0.0,
stroke_width=0.0
)
"""
self.add(self.inner_rectangle)
# Make outline rectangle
self.outline_rectangle = SurroundingRectangle(
self.inner_rectangle,
color=color,
buff=0.0,
**kwargs
stroke_width=stroke_width,
fill_color=color,
fill_opacity=fill_opacity
)
self.add(self.outline_rectangle)
# Move to center
self.move_to(self.center)
# Setup Object
# TODO re-implement gridded rectangle
# self.grid_lines = self.make_grid_lines()
# self.add(self.grid_lines)
# Make dots for the corners
# Make outer corner dots
self.outer_corners = VGroup(
*[Dot(corner_point) for corner_point in self.outline_rectangle.get_vertices()]
)
self.outer_corners.set_fill(opacity=0.0)
self.add(self.outer_corners)
# Make inner corner dots
self.inner_corners = VGroup(
*[Dot(corner_point) for corner_point in self.inner_rectangle.get_vertices()]
)
self.inner_corners.set_fill(opacity=0.0)
self.add(self.inner_corners)
def get_corners_dict(self, inner_rectangle=False):
self.add(self.rectangle)
def get_corners_dict(self):
"""Returns a dictionary of the corners"""
if inner_rectangle:
return {
"top_left": self.inner_corners[3],
"top_right": self.inner_corners[0],
"bottom_left": self.inner_corners[2],
"bottom_right": self.inner_corners[1],
}
else:
return {
"top_left": self.outer_corners[3],
"top_right": self.outer_corners[0],
"bottom_left": self.outer_corners[2],
"bottom_right": self.outer_corners[1],
}
# Sort points through clockwise rotation of a vector in the xy plane
return{
"top_right": Dot(self.rectangle.get_corner([1, 1, 0])),
"top_left": Dot(self.rectangle.get_corner([-1, 1, 0])),
"bottom_left": Dot(self.rectangle.get_corner([-1, -1, 0])),
"bottom_right": Dot(self.rectangle.get_corner([1, -1, 0])),
}
def make_grid_lines(self):
"""Make grid lines in rectangle"""
@ -149,49 +82,5 @@ class GriddedRectangle(VGroup):
return grid_lines
def rotate_about_origin(self, angle, axis=OUT, axes=[]):
self.rotation_angles[np.nonzero(axis)[0][0]] = angle
return super().rotate_about_origin(angle, axis, axes)
def get_normal_vector(self):
"""Gets the vector normal to main rectangle face"""
# Get three corner points
corner_1 = self.rectangle.get_top()
corner_2 = self.rectangle.get_left()
corner_3 = self.rectangle.get_right()
# Make vectors from them
a = corner_1 - corner_3
b = corner_1 - corner_2
# Compute cross product
normal_vector = np.cross(b, a)
normal_vector /= np.linalg.norm(normal_vector)
return normal_vector
def get_rotation_axis_and_angle(self):
"""Gets the angle of rotation necessary to rotate something from the default z-axis to the rectangle"""
def unit_vector(vector):
""" Returns the unit vector of the vector. """
return vector / np.linalg.norm(vector)
def angle_between(v1, v2):
""" Returns the angle in radians between vectors 'v1' and 'v2'::
>>> angle_between((1, 0, 0), (0, 1, 0))
1.5707963267948966
>>> angle_between((1, 0, 0), (1, 0, 0))
0.0
>>> angle_between((1, 0, 0), (-1, 0, 0))
3.141592653589793
"""
v1_u = unit_vector(v1)
v2_u = unit_vector(v2)
return np.arccos(np.clip(np.dot(v1_u, v2_u), -1.0, 1.0))
normal_vector = self.get_normal_vector()
z_axis = Z_AXIS
# Get angle between normal vector and z axis
axis = np.cross(normal_vector, z_axis)
angle = angle_between(normal_vector, z_axis)
return axis, angle
def get_center(self):
return self.rectangle.get_center()

View File

@ -70,4 +70,14 @@ class ListGroup(Mobject):
def set_z_index(self, z_index_value, family=True):
"""Sets z index of all values in ListGroup"""
for item in self.items:
item.set_z_index(z_index_value, family=True)
item.set_z_index(z_index_value, family=True)
def __iter__(self):
self.current_index = -1
return self
def __next__(self): # Python 2: def next(self)
self.current_index += 1
if self.current_index < len(self.items):
return self.items[self.current_index]
raise StopIteration

View File

@ -1,3 +1,4 @@
from manim_ml.neural_network.layers.image_to_convolutional3d import ImageToConvolutional3DLayer
from .convolutional3d_to_convolutional3d import Convolutional3DToConvolutional3D
from .convolutional2d_to_convolutional2d import Convolutional2DToConvolutional2D
from .convolutional3d import Convolutional3DLayer
@ -30,4 +31,5 @@ connective_layers_list = (
FeedForwardToVector,
Convolutional3DToConvolutional3D,
Convolutional2DToConvolutional2D,
ImageToConvolutional3DLayer,
)

View File

@ -9,7 +9,7 @@ class Convolutional3DLayer(VGroupNeuralNetworkLayer, ThreeDLayer):
def __init__(self, num_feature_maps, feature_map_width, feature_map_height,
filter_width, filter_height, cell_width=0.2, filter_spacing=0.1, color=BLUE,
pulse_color=ORANGE, filter_color=ORANGE, stride=1, stroke_width=2.0, **kwargs):
super(VGroupNeuralNetworkLayer, self).__init__(**kwargs)
super().__init__(**kwargs)
self.num_feature_maps = num_feature_maps
self.feature_map_height = feature_map_height
self.filter_color = filter_color
@ -25,6 +25,17 @@ class Convolutional3DLayer(VGroupNeuralNetworkLayer, ThreeDLayer):
# Make the feature maps
self.feature_maps = self.construct_feature_maps()
self.add(self.feature_maps)
# Rotate stuff properly
self.rotate(
ThreeDLayer.three_d_x_rotation,
about_point=self.get_center(),
axis=[1, 0, 0]
)
self.rotate(
ThreeDLayer.three_d_y_rotation,
about_point=self.get_center(),
axis=[0, 1, 0]
)
def construct_feature_maps(self):
"""Creates the neural network layer"""
@ -32,7 +43,6 @@ class Convolutional3DLayer(VGroupNeuralNetworkLayer, ThreeDLayer):
feature_maps = VGroup()
for filter_index in range(self.num_feature_maps):
rectangle = GriddedRectangle(
center=[0, 0, filter_index * self.filter_spacing], # Center coordinate
color=self.color,
height=self.feature_map_height * self.cell_width,
width=self.feature_map_width * self.cell_width,
@ -44,11 +54,16 @@ class Convolutional3DLayer(VGroupNeuralNetworkLayer, ThreeDLayer):
# grid_ystep=self.cell_width,
# grid_stroke_width=DEFAULT_STROKE_WIDTH/2
)
rectangle.move_to(
[0, 0, filter_index * self.filter_spacing]
)
# Rotate about z axis
"""
rectangle.rotate_about_origin(
90 * DEGREES,
np.array([0, 1, 0])
)
"""
feature_maps.add(rectangle)
return feature_maps

View File

@ -1,12 +1,20 @@
from manim import *
from manim_ml.neural_network.layers.convolutional3d import Convolutional3DLayer
from manim_ml.neural_network.layers.parent_layers import ConnectiveLayer, ThreeDLayer
from manim_ml.gridded_rectangle import GriddedRectangle, CornersRectangle
from manim_ml.gridded_rectangle import GriddedRectangle
from manim.utils.space_ops import rotation_matrix
class Filters(VGroup):
"""Group for showing a collection of filters connecting two layers"""
def __init__(self, input_layer, output_layer, line_color=ORANGE, stroke_width=2.0):
def __init__(
self,
input_layer,
output_layer,
line_color=ORANGE,
stroke_width=2.0,
):
super().__init__()
self.input_layer = input_layer
self.output_layer = output_layer
@ -29,7 +37,6 @@ class Filters(VGroup):
for index, feature_map in enumerate(self.input_layer.feature_maps):
rectangle = GriddedRectangle(
center=feature_map.get_center(),
width=rectangle_width,
height=rectangle_height,
fill_color=filter_color,
@ -38,21 +45,20 @@ class Filters(VGroup):
z_index=2,
stroke_width=self.stroke_width,
)
# Center on feature map
# rectangle.move_to(feature_map.get_center())
# Rotate so it is in the yz plane
rectangle.rotate(
90 * DEGREES,
ThreeDLayer.three_d_x_rotation,
about_point=rectangle.get_center(),
axis=[1, 0, 0]
)
rectangle.rotate(
ThreeDLayer.three_d_y_rotation,
about_point=rectangle.get_center(),
axis=[0, 1, 0]
)
# Get the feature map top left corner
feature_map_top_left = feature_map.get_corners_dict(inner_rectangle=True)["top_left"]
rectangle_top_left = rectangle.get_corners_dict()["top_left"]
# Move the rectangle to the corner location
rectangle.next_to(
feature_map_top_left,
submobject_to_align=rectangle_top_left,
buff=0.0
# Move the rectangle to the corner of the feature map
rectangle.move_to(
feature_map,
aligned_edge=np.array([-1, 1, 0])
)
rectangles.append(rectangle)
@ -70,7 +76,6 @@ class Filters(VGroup):
for index, feature_map in enumerate(self.output_layer.feature_maps):
rectangle = GriddedRectangle(
center=feature_map.get_center(),
width=rectangle_width,
height=rectangle_height,
fill_color=filter_color,
@ -81,21 +86,22 @@ class Filters(VGroup):
)
# Center on feature map
# rectangle.move_to(feature_map.get_center())
# Rotate so it is in the yz plane
# Rotate the rectangle
rectangle.rotate(
90 * DEGREES,
ThreeDLayer.three_d_x_rotation,
about_point=rectangle.get_center(),
axis=[1, 0, 0]
)
rectangle.rotate(
ThreeDLayer.three_d_y_rotation,
about_point=rectangle.get_center(),
axis=[0, 1, 0]
)
# Get the feature map top left corner
feature_map_top_left = feature_map.get_corners_dict(inner_rectangle=True)["top_left"]
rectangle_top_left = rectangle.get_corners_dict()["top_left"]
# Move the rectangle to the corner location
rectangle.next_to(
feature_map_top_left,
submobject_to_align=rectangle_top_left,
buff=0.0
rectangle.move_to(
feature_map,
aligned_edge=np.array([-1, 1, 0])
)
rectangles.append(rectangle)
feature_map_rectangles = VGroup(*rectangles)
@ -202,6 +208,9 @@ class Convolutional3DToConvolutional3D(ConnectiveLayer, ThreeDLayer):
self.filter_opacity = filter_opacity
self.line_color = line_color
self.pulse_color = pulse_color
# Make filters
self.filters = Filters(self.input_layer, self.output_layer)
self.add(self.filters)
def make_filter_propagation_animation(self):
"""Make filter propagation animation"""
@ -219,32 +228,71 @@ class Convolutional3DToConvolutional3D(ConnectiveLayer, ThreeDLayer):
return animation_group
def get_rotated_shift_vectors(self):
"""
Rotates the shift vectors
"""
x_rot_mat = rotation_matrix(
ThreeDLayer.three_d_x_rotation,
[1, 0, 0]
)
y_rot_mat = rotation_matrix(
ThreeDLayer.three_d_y_rotation,
[0, 1, 0]
)
# Make base shift vectors
right_shift = np.array([self.input_layer.cell_width, 0, 0])
down_shift = np.array([0, -self.input_layer.cell_width, 0])
# Rotate the vectors
right_shift = np.dot(right_shift, x_rot_mat.T)
right_shift = np.dot(right_shift, y_rot_mat.T)
down_shift = np.dot(down_shift, x_rot_mat.T)
down_shift = np.dot(down_shift, y_rot_mat.T)
return right_shift, down_shift
def make_forward_pass_animation(self, layer_args={}, run_time=10.5, **kwargs):
"""Forward pass animation from conv2d to conv2d"""
animations = []
# Create the filters, output nodes (feature map square), and lines
filters = Filters(self.input_layer, self.output_layer)
self.add(filters)
# Rotate given three_d_phi and three_d_theta
# Rotate about center
# filters.rotate(110 * DEGREES, about_point=filters.get_center(), axis=[0, 0, 1])
"""
self.filters.rotate(
ThreeDLayer.three_d_x_rotation,
about_point=self.filters.get_center(),
axis=[1, 0, 0]
)
self.filters.rotate(
ThreeDLayer.three_d_y_rotation,
about_point=self.filters.get_center(),
axis=[0, 1, 0]
)
"""
# Get shift vectors
right_shift, down_shift = self.get_rotated_shift_vectors()
left_shift = -1 * right_shift
# filters.rotate(ThreeDLayer.three_d_theta, axis=[0, 0, 1])
# filters.rotate(ThreeDLayer.three_d_phi, axis=-filters.get_center())
# Make animations for creating the filters, output_nodes, and filter_lines
# TODO decide if I want to create the filters at the start of a conv
# animation or have them there by default
# animations.append(
# Create(filters)
# )
# Make shift amounts
right_shift = np.array([0, self.input_layer.cell_width, 0])# * 1.55
left_shift = np.array([0, -1*self.input_layer.cell_width, 0])# * 1.55
up_shift = np.array([0, 0, -1*self.input_layer.cell_width])# * 1.55
down_shift = np.array([0, 0, self.input_layer.cell_width])# * 1.55
# Rotate the base shift vectors
# Make filter shifting animations
num_y_moves = int((self.feature_map_height - self.filter_height) / self.stride)
num_x_moves = int((self.feature_map_width - self.filter_width) / self.stride)
for y_move in range(num_y_moves):
# Go right num_x_moves
for x_move in range(num_x_moves):
print(right_shift)
# Shift right
shift_animation = ApplyMethod(
filters.shift,
self.filters.shift,
self.stride * right_shift
)
# shift_animation = self.animate.shift(right_shift)
@ -254,7 +302,7 @@ class Convolutional3DToConvolutional3D(ConnectiveLayer, ThreeDLayer):
shift_amount = self.stride * num_x_moves * left_shift + self.stride * down_shift
# Make the animation
shift_animation = ApplyMethod(
filters.shift,
self.filters.shift,
shift_amount
)
animations.append(shift_animation)
@ -262,7 +310,7 @@ class Convolutional3DToConvolutional3D(ConnectiveLayer, ThreeDLayer):
for x_move in range(num_x_moves):
# Shift right
shift_animation = ApplyMethod(
filters.shift,
self.filters.shift,
self.stride * right_shift
)
# shift_animation = self.animate.shift(right_shift)

View File

@ -1,3 +1,6 @@
from typing import List, Union
import numpy as np
from manim import *
from manim_ml.neural_network.layers.feed_forward import FeedForwardLayer
from manim_ml.neural_network.layers.parent_layers import ConnectiveLayer
@ -9,7 +12,7 @@ class FeedForwardToFeedForward(ConnectiveLayer):
def __init__(self, input_layer, output_layer, passing_flash=True,
dot_radius=0.05, animation_dot_color=RED, edge_color=WHITE,
edge_width=1.5, **kwargs):
edge_width=1.5, camera=None, **kwargs):
super().__init__(input_layer, output_layer, input_class=FeedForwardLayer, output_class=FeedForwardLayer,
**kwargs)
self.passing_flash = passing_flash
@ -49,14 +52,27 @@ class FeedForwardToFeedForward(ConnectiveLayer):
path_animations = []
dots = []
for edge in self.edges:
dot = Dot(color=self.animation_dot_color, fill_opacity=1.0, radius=self.dot_radius)
dot = Dot(
color=self.animation_dot_color,
fill_opacity=1.0,
radius=self.dot_radius
)
# Add to dots group
dots.append(dot)
# Make the animation
if self.passing_flash:
anim = ShowPassingFlash(edge.copy().set_color(self.animation_dot_color), time_width=0.2)
copy_edge = edge.copy()
anim = ShowPassingFlash(
copy_edge.set_color(self.animation_dot_color),
time_width=0.2
)
else:
anim = MoveAlongPath(dot, edge, run_time=run_time, rate_function=sigmoid)
anim = MoveAlongPath(
dot,
edge,
run_time=run_time,
rate_function=sigmoid
)
path_animations.append(anim)
if not self.passing_flash:
@ -67,6 +83,36 @@ class FeedForwardToFeedForward(ConnectiveLayer):
return path_animations
def modify_edge_colors(
self,
colors=None,
magnitudes=None,
color_scheme="inferno"
):
"""Changes the colors of edges"""
# TODO implement
pass
def modify_edge_stroke_widths(self, widths):
"""Changes the widths of the edges"""
assert len(widths) > 0
# Note: 1d-arrays are assumed to be in row major order
widths = np.array(widths)
widths = np.flatten(widths)
# Check thickness size
assert np.shape(widths)[0] == len(self.edges)
# Make animation
animations = []
for index, edge in enumerate(self.edges):
width = widths[index]
change_width = edge.animate.set_stroke_width(width)
animations.append(change_width)
animation_group = AnimationGroup(*animations)
return animation_group
@override_animation(Create)
def _create_override(self, **kwargs):
animations = []

View File

@ -13,9 +13,11 @@ class ImageLayer(NeuralNetworkLayer):
self.show_image_on_create = show_image_on_create
if len(np.shape(self.numpy_image)) == 2:
# Assumed Grayscale
self.num_channels = 1
self.image_mobject = GrayscaleImageMobject(self.numpy_image, height=height)
elif len(np.shape(self.numpy_image)) == 3:
# Assumed RGB
self.num_channels = 3
self.image_mobject = ImageMobject(self.numpy_image).scale_to_fit_height(height)
self.add(self.image_mobject)

View File

@ -0,0 +1,84 @@
import numpy as np
from manim import *
from manim_ml.neural_network.layers.convolutional3d import Convolutional3DLayer
from manim_ml.neural_network.layers.image import ImageLayer
from manim_ml.neural_network.layers.parent_layers import ThreeDLayer, VGroupNeuralNetworkLayer
from manim_ml.gridded_rectangle import GriddedRectangle
class ImageToConvolutional3DLayer(VGroupNeuralNetworkLayer, ThreeDLayer):
"""Handles rendering a convolutional layer for a nn"""
input_class = ImageLayer
output_class = Convolutional3DLayer
def __init__(self, input_layer: ImageLayer, output_layer: Convolutional3DLayer, **kwargs):
super().__init__(input_layer, output_layer, **kwargs)
self.input_layer = input_layer
self.output_layer = output_layer
def make_forward_pass_animation(
self,
run_time=5,
layer_args={},
**kwargs
):
"""Maps image to convolutional layer"""
# Transform the image from the input layer to the
num_image_channels = self.input_layer.num_channels
if num_image_channels == 3:
return self.rbg_image_animation()
elif num_image_channels == 1:
return self.grayscale_image_animation()
else:
raise Exception(f"Unrecognized number of image channels: {num_image_channels}")
def rbg_image_animation(self):
"""Handles animation for 3 channel image"""
image_mobject = self.input_layer.image_mobject
# TODO get each color channel and turn it into an image
# TODO create image mobjects for each channel and transform
# it to the feature maps of the output_layer
raise NotImplementedError()
pass
def grayscale_image_animation(self):
"""Handles animation for 1 channel image"""
animations = []
image_mobject = self.input_layer.image_mobject
target_feature_map = self.output_layer.feature_maps[0]
# Make the object 3D by adding it back into camera frame
def remove_fixed_func(image_mobject):
# self.camera.remove_fixed_orientation_mobjects(image_mobject)
# self.camera.remove_fixed_in_frame_mobjects(image_mobject)
return image_mobject
remove_fixed = ApplyFunction(
remove_fixed_func,
image_mobject
)
animations.append(remove_fixed)
# Make a transformation of the image_mobject to the first feature map
input_to_feature_map_transformation = Transform(image_mobject, target_feature_map)
animations.append(input_to_feature_map_transformation)
# Make the object fixed in 2D again
def make_fixed_func(image_mobject):
# self.camera.add_fixed_orientation_mobjects(image_mobject)
# self.camera.add_fixed_in_frame_mobjects(image_mobject)
return image_mobject
make_fixed = ApplyFunction(
make_fixed_func,
image_mobject
)
animations.append(make_fixed)
return AnimationGroup()
return AnimationGroup(*animations)
def scale(self, scale_factor, **kwargs):
super().scale(scale_factor, **kwargs)
@override_animation(Create)
def _create_override(self, **kwargs):
return AnimationGroup()

View File

@ -4,10 +4,13 @@ from abc import ABC, abstractmethod
class NeuralNetworkLayer(ABC, Group):
"""Abstract Neural Network Layer class"""
def __init__(self, text=None, **kwargs):
def __init__(self, text=None, *args, **kwargs):
super(Group, self).__init__()
self.title_text = kwargs["title"] if "title" in kwargs else " "
self.title = Text(self.title_text, font_size=DEFAULT_FONT_SIZE/3).scale(0.6)
self.title = Text(
self.title_text,
font_size=DEFAULT_FONT_SIZE/3
).scale(0.6)
self.title.next_to(self, UP, 1.2)
# self.add(self.title)
@ -24,8 +27,9 @@ class NeuralNetworkLayer(ABC, Group):
class VGroupNeuralNetworkLayer(NeuralNetworkLayer):
def __init__(self, **kwargs):
super().__init__(**kwargs)
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
# self.camera = camera
@abstractmethod
def make_forward_pass_animation(self, **kwargs):
@ -37,24 +41,21 @@ class VGroupNeuralNetworkLayer(NeuralNetworkLayer):
class ThreeDLayer(ABC):
"""Abstract class for 3D layers"""
def __init__(self):
pass
# Angle of ThreeD layers is static context
three_d_x_rotation = 0 * DEGREES #-90 * DEGREES
three_d_y_rotation = 75 * DEGREES # -10 * DEGREES
class ConnectiveLayer(VGroupNeuralNetworkLayer):
"""Forward pass animation for a given pair of layers"""
@abstractmethod
def __init__(self, input_layer, output_layer, input_class=None, output_class=None,
**kwargs):
def __init__(self, input_layer, output_layer, **kwargs):
super(VGroupNeuralNetworkLayer, self).__init__(**kwargs)
self.input_layer = input_layer
self.output_layer = output_layer
self.input_class = input_class
self.output_class = output_class
# Handle input and output class
assert isinstance(input_layer, self.input_class)
assert isinstance(output_layer, self.output_class)
# assert isinstance(input_layer, self.input_class), f"{input_layer}, {self.input_class}"
# assert isinstance(output_layer, self.output_class), f"{output_layer}, {self.output_class}"
@abstractmethod
def make_forward_pass_animation(self, run_time=2.0, layer_args={}, **kwargs):
@ -67,10 +68,8 @@ class ConnectiveLayer(VGroupNeuralNetworkLayer):
class BlankConnective(ConnectiveLayer):
"""Connective layer to be used when the given pair of layers is undefined"""
def __init__(self, input_layer, output_layer, input_class=None, output_class=None, **kwargs):
input_class = input_layer.__class__
output_class = output_layer.__class__
super().__init__(input_layer, output_layer, input_class, output_class, **kwargs)
def __init__(self, input_layer, output_layer, **kwargs):
super().__init__(input_layer, output_layer, **kwargs)
def make_forward_pass_animation(self, run_time=1.5, layer_args={}, **kwargs):
return AnimationGroup(run_time=run_time)

View File

@ -1,25 +1,27 @@
from manim import *
import warnings
from manim_ml.neural_network.layers.parent_layers import BlankConnective
from manim import *
from manim_ml.neural_network.layers.parent_layers import BlankConnective, ThreeDLayer
from ..layers import connective_layers_list
def get_connective_layer(input_layer, output_layer):
"""
Deduces the relevant connective layer
"""
connective_layer = None
for connective_layer_class in connective_layers_list:
input_class = connective_layer_class.input_class
output_class = connective_layer_class.output_class
connective_layer_class = None
for candidate_class in connective_layers_list:
input_class = candidate_class.input_class
output_class = candidate_class.output_class
if isinstance(input_layer, input_class) \
and isinstance(output_layer, output_class):
connective_layer = connective_layer_class(input_layer, output_layer)
connective_layer_class = candidate_class
break
if connective_layer is None:
connective_layer = BlankConnective(input_layer, output_layer)
"""
raise Exception(f"Unrecognized class pair {input_layer.__class__.__name__}" + \
f" and {output_layer.__class__.__name__}")
"""
if connective_layer_class is None:
connective_layer_class = BlankConnective
warnings.warn(f"Unrecognized input/output class pair: {input_class} and {output_class}")
# Make the instance now
connective_layer = connective_layer_class(input_layer, output_layer)
return connective_layer

View File

@ -9,10 +9,10 @@ Example:
# Create the object with default style settings
NeuralNetwork(layer_node_count)
"""
from manim import *
import textwrap
from manim_ml.neural_network.layers.embedding import EmbeddingLayer
from manim import *
from manim_ml.neural_network.layers.embedding import EmbeddingLayer
from manim_ml.neural_network.layers.feed_forward import FeedForwardLayer
from manim_ml.neural_network.layers.parent_layers import ConnectiveLayer, ThreeDLayer
from manim_ml.neural_network.layers.util import get_connective_layer
@ -24,8 +24,8 @@ class NeuralNetwork(Group):
def __init__(self, input_layers, edge_color=WHITE, layer_spacing=0.2,
animation_dot_color=RED, edge_width=2.5, dot_radius=0.03,
title=" ", camera=None, camera_phi=-70 * DEGREES,
camera_theta=-80 * DEGREES):
title=" ", three_d_phi=-70 * DEGREES,
three_d_theta=-80 * DEGREES):
super(Group, self).__init__()
self.input_layers = ListGroup(*input_layers)
self.edge_width = edge_width
@ -35,17 +35,31 @@ class NeuralNetwork(Group):
self.dot_radius = dot_radius
self.title_text = title
self.created = False
self.camera = camera
# Set the camera orientation for 3D Layers
if not self.camera is None:
self.camera.set_phi(camera_phi)
self.camera.set_theta(camera_theta)
# Make the layer fixed in frame if its not 3D
ThreeDLayer.three_d_theta = three_d_theta
ThreeDLayer.three_d_phi = three_d_phi
"""
for layer in self.input_layers:
if not isinstance(layer, ThreeDLayer):
self.camera.add_fixed_orientation_mobjects(layer)
self.camera.add_fixed_in_frame_mobjects(layer)
"""
# TODO take layer_node_count [0, (1, 2), 0]
# and make it have explicit distinct subspaces
# Add camera to input layers
"""
for input_layer in input_layers:
if input_layer.camera is None:
input_layer.camera = self.camera
"""
# Place the layers
self._place_layers()
self.connective_layers, self.all_layers = self._construct_connective_layers()
# Make overhead title
self.title = Text(self.title_text, font_size=DEFAULT_FONT_SIZE/2)
self.title = Text(
self.title_text,
font_size=DEFAULT_FONT_SIZE/2
)
self.title.next_to(self, UP, 1.0)
self.add(self.title)
# Place layers at correct z index
@ -56,6 +70,12 @@ class NeuralNetwork(Group):
self.add(self.all_layers)
# Print neural network
print(repr(self))
# Set the camera orientation for 3D Layers
"""
if not self.camera is None and isinstance(self.camera, ThreeDCamera):
self.camera.set_phi(camera_phi)
self.camera.set_theta(camera_theta)
"""
def _place_layers(self):
"""Creates the neural network"""
@ -79,10 +99,6 @@ class NeuralNetwork(Group):
all_layers = ListGroup()
for layer_index in range(len(self.input_layers) - 1):
current_layer = self.input_layers[layer_index]
# Make the layer fixed in frame if its not 3D
if not isinstance(current_layer, ThreeDLayer):
self.camera.add_fixed_orientation_mobjects(current_layer)
self.camera.add_fixed_in_frame_mobjects(current_layer)
# Add the layer to the list of layers
all_layers.add(current_layer)
next_layer = self.input_layers[layer_index + 1]
@ -95,17 +111,21 @@ class NeuralNetwork(Group):
next_layer = next_layer.all_layers[0]
# Find connective layer with correct layer pair
connective_layer = get_connective_layer(current_layer, next_layer)
connective_layers.add(connective_layer)
# Make the layer fixed in frame if its not 3D
if not isinstance(current_layer, ThreeDLayer):
"""
if not isinstance(connective_layer, ThreeDLayer):
# Make the layer fixed in frame if its not 3D
self.camera.add_fixed_orientation_mobjects(connective_layer)
self.camera.add_fixed_in_frame_mobjects(connective_layer)
"""
connective_layers.add(connective_layer)
# Add the layer to the list of layers
all_layers.add(connective_layer)
# Check if final layer is a 3D layer
"""
if not isinstance(self.input_layers[-1], ThreeDLayer):
self.camera.add_fixed_orientation_mobjects(self.input_layers[-1])
self.camera.add_fixed_in_frame_mobjects(self.input_layers[-1])
"""
# Add final layer
all_layers.add(self.input_layers[-1])
# Handle layering

View File

@ -9,7 +9,7 @@ class RemoveLayer(AnimationGroup):
Animation for removing a layer from a neural network.
Note: I needed to do something strange for creating the new connective layer.
The issue with creating it intially is that the positions of the sides of the
The issue with creating it initially is that the positions of the sides of the
connective layer depend upon the location of the moved layers **after** the
move animations are performed. However, all of these animations are performed
after the animations have been created. This means that the animation depends upon
@ -142,7 +142,7 @@ class RemoveLayer(AnimationGroup):
if self.anim_count == 1:
if not self.before_layer is None and not self.after_layer is None:
print(neural_network)
new_connective = get_connective_layer(self.before_layer, self.after_layer)
new_connective_class = get_connective_layer(self.before_layer, self.after_layer)
before_layer_index = neural_network.all_layers.index_of(self.before_layer) + 1
neural_network.all_layers.insert(before_layer_index, new_connective)
print(neural_network)

17
manim_ml/scene.py Normal file
View File

@ -0,0 +1,17 @@
from manim import *
class ManimML3DScene(ThreeDScene):
"""
This is a wrapper class for the Manim ThreeDScene
Note: the primary purpose of this is to make it so
that everything inside of a layer
"""
def __init__(self, *args, **kwargs):
super().__init__(*args, **kwargs)
def play(self):
"""
"""
pass

View File

@ -20,21 +20,24 @@ class CombinedScene(ThreeDScene):
nn = NeuralNetwork(
[
ImageLayer(numpy_image, height=1.4),
Convolutional3DLayer(1, 5, 5, 3, 3, filter_spacing=0.2),
Convolutional3DLayer(2, 3, 3, 1, 1, filter_spacing=0.2),
Convolutional3DLayer(1, 7, 7, 3, 3, filter_spacing=0.2),
Convolutional3DLayer(3, 5, 5, 3, 3, filter_spacing=0.2),
Convolutional3DLayer(5, 3, 3, 1, 1, filter_spacing=0.2),
FeedForwardLayer(3, rectangle_stroke_width=4, node_stroke_width=4),
FeedForwardLayer(3, rectangle_stroke_width=4, node_stroke_width=4),
],
layer_spacing=0.5,
camera=self.camera
# camera=self.camera
)
nn.scale(1.3)
# Center the nn
nn.move_to(ORIGIN)
self.add(nn)
nn.move_to(ORIGIN)
# Play animation
forward_pass = nn.make_forward_pass_animation(
corner_pulses=False
corner_pulses=False,
layer_args={
"all_filters_at_once": True
}
)
self.play(
forward_pass

View File

@ -10,10 +10,14 @@ config.frame_width = 12.0
class TestConv2d(Scene):
def construct(self):
nn = NeuralNetwork([
Convolutional2DLayer(5, 5, 3, 3, cell_width=0.5, stride=1),
Convolutional2DLayer(3, 3, 2, 2, cell_width=0.5, stride=1),
], layer_spacing=1.5)
nn = NeuralNetwork(
[
Convolutional2DLayer(5, 5, 3, 3, cell_width=0.5, stride=1),
Convolutional2DLayer(3, 3, 2, 2, cell_width=0.5, stride=1),
],
layer_spacing=1.5,
camera=self.camera
)
# Center the nn
nn.scale(1.3)
nn.move_to(ORIGIN)

View File

@ -0,0 +1,6 @@
"""
Tests for feed forward to feed forward weight
change animations.
"""
class