From 58aec269cf2dea91484d83646b5a72c477a2e7d8 Mon Sep 17 00:00:00 2001 From: Alec Helbling Date: Sun, 15 May 2022 13:42:21 -0400 Subject: [PATCH] Convolutional Layers --- manim_ml/lazy_animation.py | 13 ++ manim_ml/neural_network/layers/__init__.py | 1 + .../convolutional2d_to_convolutional2d.py | 217 ++++++++++++++++++ ... => convolutional3d_to_convolutional3d.py} | 17 +- .../neural_network/layers/convolutional_2d.py | 43 ++++ .../{convolutional.py => convolutional_3d.py} | 32 ++- .../neural_network/layers/parent_layers.py | 6 +- manim_ml/neural_network/neural_network.py | 26 ++- setup.py | 8 + tests/test_convolutional_2d_layer.py | 26 +++ ...ayer.py => test_convolutional_3d_layer.py} | 16 +- 11 files changed, 376 insertions(+), 29 deletions(-) create mode 100644 manim_ml/lazy_animation.py create mode 100644 manim_ml/neural_network/layers/convolutional2d_to_convolutional2d.py rename manim_ml/neural_network/layers/{convolutional_to_convolutional.py => convolutional3d_to_convolutional3d.py} (81%) create mode 100644 manim_ml/neural_network/layers/convolutional_2d.py rename manim_ml/neural_network/layers/{convolutional.py => convolutional_3d.py} (78%) create mode 100644 setup.py create mode 100644 tests/test_convolutional_2d_layer.py rename tests/{test_convolutional_layer.py => test_convolutional_3d_layer.py} (77%) diff --git a/manim_ml/lazy_animation.py b/manim_ml/lazy_animation.py new file mode 100644 index 0000000..4bb4f99 --- /dev/null +++ b/manim_ml/lazy_animation.py @@ -0,0 +1,13 @@ +from manim import * + +class LazyAnimation(Animation): + + def __init__(self, animation_function): + self.animation_function = animation_function + super.__init__() + + def begin(self): + update_func_anim = UpdateFromFunc(self.neural_network, create_new_connective) + self.add + + super.begin() \ No newline at end of file diff --git a/manim_ml/neural_network/layers/__init__.py b/manim_ml/neural_network/layers/__init__.py index f597e55..224a1c1 100644 --- a/manim_ml/neural_network/layers/__init__.py +++ b/manim_ml/neural_network/layers/__init__.py @@ -29,4 +29,5 @@ connective_layers_list = ( PairedQueryToFeedForward, FeedForwardToVector, Convolutional3DToConvolutional3D, + Convolutional2DToConvolutional2D, ) diff --git a/manim_ml/neural_network/layers/convolutional2d_to_convolutional2d.py b/manim_ml/neural_network/layers/convolutional2d_to_convolutional2d.py new file mode 100644 index 0000000..79dee83 --- /dev/null +++ b/manim_ml/neural_network/layers/convolutional2d_to_convolutional2d.py @@ -0,0 +1,217 @@ +from cv2 import line +from manim import * +from manim_ml.neural_network.layers.convolutional_2d import Convolutional2DLayer +from manim_ml.neural_network.layers.parent_layers import ConnectiveLayer + +class Convolutional2DToConvolutional2D(ConnectiveLayer): + """2D Conv to 2d Conv""" + input_class = Convolutional2DLayer + output_class = Convolutional2DLayer + + def __init__(self, input_layer, output_layer, color=WHITE, + filter_opacity=0.3, line_color=WHITE, pulse_color=ORANGE, **kwargs): + super().__init__(input_layer, output_layer, input_class=Convolutional2DLayer, + output_class=Convolutional2DLayer, **kwargs) + self.color = color + self.filter_color = self.input_layer.filter_color + self.filter_width = self.input_layer.filter_width + self.filter_height = self.input_layer.filter_height + self.feature_map_width = self.input_layer.feature_map_width + self.feature_map_height = self.input_layer.feature_map_height + self.cell_width = self.input_layer.cell_width + self.stride = self.input_layer.stride + self.filter_opacity = filter_opacity + self.line_color = line_color + self.pulse_color = pulse_color + + @override_animation(Create) + def _create_override(self, **kwargs): + return AnimationGroup() + + def make_filter(self): + """Make filter object""" + # Make opaque rectangle + filter = Rectangle( + color=self.filter_color, + fill_color=self.filter_color, + width=self.cell_width * self.filter_width, + height=self.cell_width * self.filter_height, + grid_xstep=self.cell_width, + grid_ystep=self.cell_width, + fill_opacity=self.filter_opacity + ) + # Move filter to top left of feature map + filter.move_to(self.input_layer.feature_map.get_corner(LEFT + UP), aligned_edge=LEFT + UP) + + return filter + + def make_output_node(self): + """Put output node in top left corner of output feature map""" + # Make opaque rectangle + filter = Rectangle( + color=self.filter_color, + fill_color=self.filter_color, + width=self.cell_width, + height=self.cell_width, + fill_opacity=self.filter_opacity + ) + # Move filter to top left of feature map + filter.move_to(self.output_layer.feature_map.get_corner(LEFT + UP), aligned_edge=LEFT + UP) + + return filter + + def make_filter_propagation_animation(self): + """Make filter propagation animation""" + old_z_index = self.filter_lines.z_index + lines_copy = self.filter_lines.copy().set_color(ORANGE).set_z_index(old_z_index + 1) + animation_group = AnimationGroup( + Create(lines_copy, lag_ratio=0.0), + # FadeOut(self.filter_lines), + FadeOut(lines_copy), + lag_ratio=1.0 + ) + + return animation_group + + def make_filter_lines(self): + """Lines connecting input filter with output node""" + filter_lines = [] + corner_directions = [LEFT + UP, RIGHT + UP, RIGHT + DOWN, LEFT + DOWN] + for corner_direction in corner_directions: + filter_corner = self.filter.get_corner(corner_direction) + output_corner = self.output_node.get_corner(corner_direction) + line = Line(filter_corner, output_corner, stroke_color=self.line_color) + filter_lines.append(line) + + filter_lines = VGroup(*filter_lines) + filter_lines.set_z_index(5) + # Make updater that links the lines to the filter and output node + def filter_updater(filter_lines): + for corner_index, corner_direction in enumerate(corner_directions): + line = filter_lines[corner_index] + filter_corner = self.filter.get_corner(corner_direction) + output_corner = self.output_node.get_corner(corner_direction) + #line._set_start_and_end_attrs(filter_corner, output_corner) + # line.put_start_and_end_on(filter_corner, output_corner) + line.set_points_by_ends(filter_corner, output_corner) + # line._set_start_and_end_attrs(filter_corner, output_corner) + # line.set_points([filter_corner, output_corner]) + + filter_lines.add_updater(filter_updater) + + return filter_lines + + def make_assets(self): + """Make all of the assets""" + # Make the filter + self.filter = self.make_filter() + self.add(self.filter) + # Make output node + self.output_node = self.make_output_node() + self.add(self.output_node) + # Make filter lines + self.filter_lines = self.make_filter_lines() + self.add(self.filter_lines) + + super().set_z_index(5) + + def make_forward_pass_animation(self, layer_args={}, run_time=1.5, **kwargs): + """Forward pass animation from conv2d to conv2d""" + # Make assets + self.make_assets() + self.lines_copies = VGroup() + self.add(self.lines_copies) + # Make the animations + animations = [] + # Create filter animation + animations.append( + AnimationGroup( + Create(self.filter), + Create(self.output_node), + # Create(self.filter_lines) + ) + ) + # Make scan filter animation + num_y_moves = int((self.feature_map_height - self.filter_height) / self.stride) + 1 + num_x_moves = int((self.feature_map_width - self.filter_width) / self.stride) + for y_location in range(num_y_moves): + if y_location > 0: + # Shift filter back to start and down + shift_animation = ApplyMethod( + self.filter.shift, + np.array([ + -self.cell_width * (self.feature_map_width - self.filter_width), + -self.stride * self.cell_width, + 0 + ]) + ) + # Shift output node + shift_output_node = ApplyMethod( + self.output_node.shift, + np.array([ + -(self.output_layer.feature_map_width - 1) * self.cell_width, + -self.cell_width, + 0 + ]) + ) + # Make animation group + animation_group = AnimationGroup( + shift_animation, + shift_output_node, + ) + animations.append(animation_group) + # Make filter passing flash + # animation = self.make_filter_propagation_animation() + animations.append(Create(self.filter_lines, lag_ratio=0.0)) + # animations.append(animation) + + for x_location in range(num_x_moves): + # Shift filter right + shift_animation = ApplyMethod( + self.filter.shift, + np.array([self.stride * self.cell_width, 0, 0]) + ) + # Shift output node + shift_output_node = ApplyMethod( + self.output_node.shift, + np.array([self.cell_width, 0, 0]) + ) + # Make animation group + animation_group = AnimationGroup( + shift_animation, + shift_output_node, + ) + animations.append(animation_group) + # Make filter passing flash + old_z_index = self.filter_lines.z_index + lines_copy = self.filter_lines.copy().set_color(ORANGE).set_z_index(old_z_index + 1) + # self.add(lines_copy) + # self.lines_copies.add(lines_copy) + animations.append(Create(self.filter_lines, lag_ratio=0.0)) + # animations.append(FadeOut(self.filter_lines)) + # animation = self.make_filter_propagation_animation() + # animations.append(animation) + # animations.append(Create(self.filter_lines, lag_ratio=1.0)) + # animations.append(FadeOut(self.filter_lines)) + # Fade out + animations.append( + AnimationGroup( + FadeOut(self.filter), + FadeOut(self.output_node), + FadeOut(self.filter_lines) + ) + ) + # Make animation group + animation_group = Succession( + *animations, + lag_ratio=1.0 + ) + return animation_group + + def set_z_index(self, z_index, family=False): + """Override set_z_index""" + super().set_z_index(4) + + def scale(self, scale_factor, **kwargs): + self.cell_width *= scale_factor + super().scale(scale_factor, **kwargs) \ No newline at end of file diff --git a/manim_ml/neural_network/layers/convolutional_to_convolutional.py b/manim_ml/neural_network/layers/convolutional3d_to_convolutional3d.py similarity index 81% rename from manim_ml/neural_network/layers/convolutional_to_convolutional.py rename to manim_ml/neural_network/layers/convolutional3d_to_convolutional3d.py index 525c106..6cc110c 100644 --- a/manim_ml/neural_network/layers/convolutional_to_convolutional.py +++ b/manim_ml/neural_network/layers/convolutional3d_to_convolutional3d.py @@ -1,15 +1,16 @@ +from cProfile import run from manim import * -from manim_ml.neural_network.layers.convolutional import ConvolutionalLayer +from manim_ml.neural_network.layers.convolutional_3d import Convolutional3DLayer from manim_ml.neural_network.layers.parent_layers import ConnectiveLayer -class ConvolutionalToConvolutional(ConnectiveLayer): +class Convolutional3DToConvolutional3D(ConnectiveLayer): """Feed Forward to Embedding Layer""" - input_class = ConvolutionalLayer - output_class = ConvolutionalLayer + input_class = Convolutional3DLayer + output_class = Convolutional3DLayer def __init__(self, input_layer, output_layer, color=WHITE, pulse_color=RED, **kwargs): - super().__init__(input_layer, output_layer, input_class=ConvolutionalLayer, output_class=ConvolutionalLayer, + super().__init__(input_layer, output_layer, input_class=Convolutional3DLayer, output_class=Convolutional3DLayer, **kwargs) self.color = color self.pulse_color = pulse_color @@ -47,12 +48,14 @@ class ConvolutionalToConvolutional(ConnectiveLayer): line.copy() .set_color(self.pulse_color) .set_stroke(opacity=1.0), - time_width=0.5 + time_width=0.5, + run_time=run_time ) animations.append(pulse) # Make animation group animation_group = AnimationGroup( - *animations + *animations, + run_time=run_time ) return animation_group diff --git a/manim_ml/neural_network/layers/convolutional_2d.py b/manim_ml/neural_network/layers/convolutional_2d.py new file mode 100644 index 0000000..efdf43f --- /dev/null +++ b/manim_ml/neural_network/layers/convolutional_2d.py @@ -0,0 +1,43 @@ +from manim import * +from matplotlib import animation +from xarray import align +from manim_ml.neural_network.layers.parent_layers import VGroupNeuralNetworkLayer + +class Convolutional2DLayer(VGroupNeuralNetworkLayer): + + def __init__(self, feature_map_height, feature_map_width, filter_width, filter_height, + stride=1, cell_width=0.5, pixel_width=0.5, feature_map_color=BLUE, filter_color=ORANGE, + **kwargs): + super(VGroupNeuralNetworkLayer, self).__init__(**kwargs) + self.feature_map_height = feature_map_height + self.feature_map_width = feature_map_width + self.filter_width = filter_width + self.filter_height = filter_height + self.pixel_width = pixel_width + self.feature_map_color = feature_map_color + self.filter_color = filter_color + self.stride = stride + self.cell_width = cell_width + # Construct the input + self.construct_feature_map() + + def construct_feature_map(self): + """Makes feature map""" + # Make feature map rectangle + self.feature_map = Rectangle( + width=self.feature_map_width * self.cell_width, + height=self.feature_map_height * self.cell_width, + color=self.feature_map_color, + grid_xstep=self.cell_width, + grid_ystep=self.cell_width + ) + + self.add(self.feature_map) + + @override_animation(Create) + def _create_override(self, **kwargs): + return FadeIn(self.feature_map) + + def make_forward_pass_animation(self, **kwargs): + """Make feed forward animation""" + return AnimationGroup() \ No newline at end of file diff --git a/manim_ml/neural_network/layers/convolutional.py b/manim_ml/neural_network/layers/convolutional_3d.py similarity index 78% rename from manim_ml/neural_network/layers/convolutional.py rename to manim_ml/neural_network/layers/convolutional_3d.py index 80afe33..80b1402 100644 --- a/manim_ml/neural_network/layers/convolutional.py +++ b/manim_ml/neural_network/layers/convolutional_3d.py @@ -1,9 +1,7 @@ - from manim import * -from torch import _fake_quantize_learnable_per_tensor_affine from manim_ml.neural_network.layers.parent_layers import VGroupNeuralNetworkLayer -class ConvolutionalLayer(VGroupNeuralNetworkLayer): +class Convolutional3DLayer(VGroupNeuralNetworkLayer): """Handles rendering a convolutional layer for a nn""" def __init__(self, num_filters, filter_width, filter_height, filter_spacing=0.1, color=BLUE, @@ -83,20 +81,38 @@ class ConvolutionalLayer(VGroupNeuralNetworkLayer): return corner_lines - def make_forward_pass_animation(self, layer_args={}, **kwargs): + def make_forward_pass_animation(self, run_time=5, layer_args={}, **kwargs): """Convolution forward pass animation""" - animations = [] + passing_flashes = [] for line in self.corner_lines: pulse = ShowPassingFlash( line.copy() .set_color(self.pulse_color) .set_stroke(opacity=1.0), - time_width=0.5 + time_width=0.5, + run_time=run_time, + rate_func=rate_functions.linear ) - animations.append(pulse) + passing_flashes.append(pulse) + + per_filter_run_time = run_time / len(self.rectangles) + filter_flashes = [] + for filter in self.rectangles: + single_flash = Succession( + ApplyMethod(filter.set_color, self.pulse_color, run_time=per_filter_run_time/4), + Wait(per_filter_run_time/2), + ApplyMethod(filter.set_color, self.color, run_time=per_filter_run_time/4), + ApplyMethod(filter.set_stroke_color, WHITE, run_time=0.0) + ) + filter_flashes.append(single_flash) + + filter_flashes = Succession( + *filter_flashes, + ) # Make animation group animation_group = AnimationGroup( - *animations + *passing_flashes, + filter_flashes ) return animation_group diff --git a/manim_ml/neural_network/layers/parent_layers.py b/manim_ml/neural_network/layers/parent_layers.py index 00dd8b8..6ef782c 100644 --- a/manim_ml/neural_network/layers/parent_layers.py +++ b/manim_ml/neural_network/layers/parent_layers.py @@ -51,7 +51,7 @@ class ConnectiveLayer(VGroupNeuralNetworkLayer): assert isinstance(output_layer, self.output_class) @abstractmethod - def make_forward_pass_animation(self, layer_args={}, **kwargs): + def make_forward_pass_animation(self, run_time=2.0, layer_args={}, **kwargs): pass @override_animation(Create) @@ -66,8 +66,8 @@ class BlankConnective(ConnectiveLayer): output_class = output_layer.__class__ super().__init__(input_layer, output_layer, input_class, output_class, **kwargs) - def make_forward_pass_animation(self, layer_args={}, **kwargs): - return AnimationGroup() + def make_forward_pass_animation(self, run_time=1.5, layer_args={}, **kwargs): + return AnimationGroup(run_time=run_time) @override_animation(Create) def _create_override(self): diff --git a/manim_ml/neural_network/neural_network.py b/manim_ml/neural_network/neural_network.py index db000fe..0a51ca5 100644 --- a/manim_ml/neural_network/neural_network.py +++ b/manim_ml/neural_network/neural_network.py @@ -9,9 +9,7 @@ Example: # Create the object with default style settings NeuralNetwork(layer_node_count) """ -from cv2 import AGAST_FEATURE_DETECTOR_NONMAX_SUPPRESSION from manim import * -import warnings import textwrap from manim_ml.neural_network.layers.embedding import EmbeddingLayer @@ -104,6 +102,7 @@ class NeuralNetwork(Group): def replace_layer(self, old_layer, new_layer): """Replaces given layer object""" + raise NotImplementedError() remove_animation = self.remove_layer(insert_index) insert_animation = self.insert_layer(layer, insert_index) # Make the animation @@ -119,6 +118,7 @@ class NeuralNetwork(Group): **kwargs): """Generates an animation for feed forward propagation""" all_animations = [] + per_layer_runtime = run_time/len(self.all_layers) for layer_index, layer in enumerate(self.all_layers): # Get the layer args if isinstance(layer, ConnectiveLayer): @@ -139,10 +139,18 @@ class NeuralNetwork(Group): if layer in layer_args: current_layer_args = layer_args[layer] # Perform the forward pass of the current layer - layer_forward_pass = layer.make_forward_pass_animation(layer_args=current_layer_args, **kwargs) + layer_forward_pass = layer.make_forward_pass_animation( + layer_args=current_layer_args, + run_time=per_layer_runtime, + **kwargs + ) all_animations.append(layer_forward_pass) # Make the animation group - animation_group = AnimationGroup(*all_animations, run_time=run_time, lag_ratio=1.0) + animation_group = Succession( + *all_animations, + run_time=run_time, + lag_ratio=1.0 + ) return animation_group @@ -176,7 +184,15 @@ class NeuralNetwork(Group): def set_z_index(self, z_index_value: float, family=False): """Overriden set_z_index""" # Setting family=False stops sub-neural networks from inheriting parent z_index - return super().set_z_index(z_index_value, family=False) + for layer in self.all_layers: + if not isinstance(NeuralNetwork): + layer.set_z_index(z_index_value) + + def scale(self, scale_factor, **kwargs): + """Overriden scale""" + for layer in self.all_layers: + layer.scale(scale_factor, **kwargs) + # super().scale(scale_factor) def __repr__(self, metadata=["z_index", "title_text"]): """Print string representation of layers""" diff --git a/setup.py b/setup.py new file mode 100644 index 0000000..cb0d6cf --- /dev/null +++ b/setup.py @@ -0,0 +1,8 @@ +from setuptools import setup, find_packages + +setup( + name = "manim_ml", + version = "0.0.11", + description = (" Machine Learning Animations in python using Manim."), + packages=find_packages(), +) \ No newline at end of file diff --git a/tests/test_convolutional_2d_layer.py b/tests/test_convolutional_2d_layer.py new file mode 100644 index 0000000..a00af3b --- /dev/null +++ b/tests/test_convolutional_2d_layer.py @@ -0,0 +1,26 @@ +from manim import * +from manim_ml.neural_network.layers import Convolutional2DLayer +from manim_ml.neural_network.neural_network import NeuralNetwork + +config.pixel_height = 1200 +config.pixel_width = 1900 +config.frame_height = 12.0 +config.frame_width = 12.0 + +class TestConv2d(Scene): + + def construct(self): + nn = NeuralNetwork([ + Convolutional2DLayer(5, 5, 3, 3, cell_width=0.5, stride=1), + Convolutional2DLayer(3, 3, 2, 2, cell_width=0.5, stride=1), + ], layer_spacing=1.5) + # Center the nn + nn.scale(1.3) + nn.move_to(ORIGIN) + self.play(Create(nn), run_time=2) + # Play animation + forward_pass = nn.make_forward_pass_animation(run_time=19) + self.play( + forward_pass, + ) + self.wait(1) \ No newline at end of file diff --git a/tests/test_convolutional_layer.py b/tests/test_convolutional_3d_layer.py similarity index 77% rename from tests/test_convolutional_layer.py rename to tests/test_convolutional_3d_layer.py index 096a673..abe5e92 100644 --- a/tests/test_convolutional_layer.py +++ b/tests/test_convolutional_3d_layer.py @@ -1,7 +1,7 @@ from manim import * from PIL import Image -from manim_ml.neural_network.layers.convolutional import ConvolutionalLayer +from manim_ml.neural_network.layers.convolutional_3d import Convolutional3DLayer from manim_ml.neural_network.layers.feed_forward import FeedForwardLayer from manim_ml.neural_network.layers.image import ImageLayer from manim_ml.neural_network.neural_network import NeuralNetwork @@ -11,7 +11,7 @@ class SingleConvolutionalLayerScene(ThreeDScene): def construct(self): # Make nn layers = [ - ConvolutionalLayer(3, 4) + Convolutional3DLayer(3, 4) ] nn = NeuralNetwork(layers) nn.scale(1.3) @@ -35,9 +35,9 @@ class CombinedScene(ThreeDScene, Scene): # Make nn nn = NeuralNetwork([ ImageLayer(numpy_image, height=1.4), - ConvolutionalLayer(3, 3, 3, filter_spacing=0.2), - ConvolutionalLayer(5, 2, 2, filter_spacing=0.2), - ConvolutionalLayer(10, 2, 1, filter_spacing=0.2), + Convolutional3DLayer(3, 3, 3, filter_spacing=0.2), + Convolutional3DLayer(5, 2, 2, filter_spacing=0.2), + Convolutional3DLayer(10, 2, 1, filter_spacing=0.2), FeedForwardLayer(3, rectangle_stroke_width=4, node_stroke_width=4).scale(2), FeedForwardLayer(1, rectangle_stroke_width=4, node_stroke_width=4).scale(2) ], layer_spacing=0.2) @@ -49,4 +49,8 @@ class CombinedScene(ThreeDScene, Scene): # Play animation # self.set_camera_orientation(phi=280* DEGREES, theta=-20*DEGREES, gamma=90 * DEGREES) # self.begin_ambient_camera_rotation() - self.play(nn.make_forward_pass_animation(run_time=5)) + forward_pass = nn.make_forward_pass_animation(run_time=10) + print(forward_pass) + self.play( + forward_pass + )