diff --git a/Readme.md b/Readme.md index 5c28ada..5c7c09c 100644 --- a/Readme.md +++ b/Readme.md @@ -31,43 +31,40 @@ Checkout the ```examples``` directory for some example videos with source code. ### Neural Networks -This is a visualization of a Neural Network made using ManimML. It has a Pytorch style list of layers that can be composed in arbitrary order. The following video is made with the code from below. +This is a visualization of a Variational Autoencoder made using ManimML. It has a Pytorch style list of layers that can be composed in arbitrary order. The following video is made with the code from below. - + ```python -from manim import * -from manim_ml.neural_network.layers import FeedForwardLayer, ImageLayer -from manim_ml.neural_network.neural_network import NeuralNetwork -from PIL import Image -import numpy as np - -class ImageNeuralNetworkScene(Scene): +class VariationalAutoencoderScene(Scene): def construct(self): + embedding_layer = EmbeddingLayer(dist_theme="ellipse").scale(2) + image = Image.open('images/image.jpeg') numpy_image = np.asarray(image) # Make nn - layers = [ - ImageLayer(numpy_image, height=1.0), - FeedForwardLayer(3), + neural_network = NeuralNetwork([ + ImageLayer(numpy_image, height=1.4), FeedForwardLayer(5), - FeedForwardLayer(3) - ] - nn = NeuralNetwork(layers) - # Center the nn - nn.move_to(ORIGIN) - self.add(nn) - # Play animation - self.play(nn.make_forward_pass_animation()) + FeedForwardLayer(3), + embedding_layer, + FeedForwardLayer(3), + FeedForwardLayer(5), + ImageLayer(numpy_image, height=1.4), + ], layer_spacing=0.1) + + neural_network.scale(1.3) + + self.play(Create(neural_network)) + self.play(neural_network.make_forward_pass_animation(run_time=15)) ``` +### Generative Adversarial Network -### Variational Autoencoders +This is a visualization of a Generative Adversarial Network made using ManimML. -This is a visualization of a Variational Autoencoder. - - + ### VAE Disentanglement diff --git a/examples/gan/gan.py b/examples/gan/gan.py new file mode 100644 index 0000000..2e84926 --- /dev/null +++ b/examples/gan/gan.py @@ -0,0 +1,190 @@ +import random +from PIL import Image +from manim import * +from manim_ml.neural_network.layers.embedding import EmbeddingLayer +from manim_ml.neural_network.layers.feed_forward import FeedForwardLayer +from manim_ml.neural_network.layers.image import ImageLayer +from manim_ml.neural_network.layers.vector import VectorLayer + +from manim_ml.neural_network.neural_network import NeuralNetwork + +config.pixel_height = 1080 +config.pixel_width = 1080 +config.frame_height = 8.3 +config.frame_width = 8.3 + +class GAN(Mobject): + """Generative Adversarial Network""" + + def __init__(self): + super().__init__() + self.make_entities() + self.place_entities() + self.titles = self.make_titles() + + def make_entities(self, image_height=1.2): + """Makes all of the network entities""" + # Make the fake image layer + default_image = Image.open('../../assets/gan/fake_image.png') + numpy_image = np.asarray(default_image) + self.fake_image_layer = ImageLayer(numpy_image, height=image_height, show_image_on_create=False) + # Make the Generator Network + self.generator = NeuralNetwork([ + EmbeddingLayer(covariance=np.array([[3.0, 0], [0, 3.0]])).scale(1.3), + FeedForwardLayer(3), + FeedForwardLayer(5), + self.fake_image_layer + ], layer_spacing=0.1) + + self.add(self.generator) + # Make the Discriminator + self.discriminator = NeuralNetwork([ + FeedForwardLayer(5), + FeedForwardLayer(1), + VectorLayer(1, value_func=lambda: random.uniform(0, 1)), + ], layer_spacing=0.1) + self.add(self.discriminator) + # Make Ground Truth Dataset + default_image = Image.open('../../assets/gan/real_image.jpg') + numpy_image = np.asarray(default_image) + self.ground_truth_layer = ImageLayer(numpy_image, height=image_height) + self.add(self.ground_truth_layer) + + self.scale(1) + + def place_entities(self): + """Positions entities in correct places""" + # Place relative to generator + # Place the ground_truth image layer + self.ground_truth_layer.next_to(self.fake_image_layer, DOWN, 0.8) + # Group the images + image_group = Group(self.ground_truth_layer, self.fake_image_layer) + # Move the discriminator to the right of thee generator + self.discriminator.next_to(self.generator, RIGHT, 0.2) + self.discriminator.match_y(image_group) + # Move the discriminator to the height of the center of the image_group + # self.discriminator.match_y(image_group) + # self.ground_truth_layer.next_to(self.fake_image_layer, DOWN, 0.5) + + def make_titles(self): + """Makes titles for the different entities""" + titles = VGroup() + + self.ground_truth_layer_title = Text("Real Image").scale(0.3) + self.ground_truth_layer_title.next_to(self.ground_truth_layer, UP, 0.1) + self.add(self.ground_truth_layer_title) + titles.add(self.ground_truth_layer_title) + self.fake_image_layer_title = Text("Fake Image").scale(0.3) + self.fake_image_layer_title.next_to(self.fake_image_layer, UP, 0.1) + self.add(self.fake_image_layer_title) + titles.add(self.fake_image_layer_title) + # Overhead title + overhead_title = Text("Generative Adversarial Network").scale(0.75) + overhead_title.shift(np.array([0, 3.5, 0])) + titles.add(overhead_title) + # Probability title + self.probability_title = Text("Probability").scale(0.5) + self.probability_title.move_to(self.discriminator.input_layers[-2]) + self.probability_title.shift(UP) + self.probability_title.shift(RIGHT*1.05) + titles.add(self.probability_title) + + return titles + + def make_highlight_generator_rectangle(self): + """Returns animation that highlights the generators contents""" + group = VGroup() + + generator_surrounding_group = Group( + self.generator, + self.fake_image_layer_title + ) + + generator_surrounding_rectangle = SurroundingRectangle( + generator_surrounding_group, + buff=0.1, + stroke_width=4.0, + color="#0FFF50" + ) + group.add(generator_surrounding_rectangle) + title = Text("Generator").scale(0.5) + title.next_to(generator_surrounding_rectangle, UP, 0.2) + group.add(title) + + return group + + def make_highlight_discriminator_rectangle(self): + """Makes a rectangle for highlighting the discriminator""" + discriminator_group = Group( + self.discriminator, + self.fake_image_layer, + self.ground_truth_layer, + self.fake_image_layer_title, + self.probability_title + ) + + group = VGroup() + + discriminator_surrounding_rectangle = SurroundingRectangle( + discriminator_group, + buff=0.05, + stroke_width=4.0, + color="#0FFF50" + ) + group.add(discriminator_surrounding_rectangle) + title = Text("Discriminator").scale(0.5) + title.next_to(discriminator_surrounding_rectangle, UP, 0.2) + group.add(title) + + return group + + def make_generator_forward_pass(self): + """Makes forward pass of the generator""" + + forward_pass = self.generator.make_forward_pass_animation(dist_theme="ellipse") + + return forward_pass + + def make_discriminator_forward_pass(self): + """Makes forward pass of the discriminator""" + + disc_forward = self.discriminator.make_forward_pass_animation() + + return disc_forward + + @override_animation(Create) + def _create_override(self): + """Overrides create""" + animation_group = AnimationGroup( + Create(self.generator), + Create(self.discriminator), + Create(self.ground_truth_layer), + Create(self.titles) + ) + return animation_group + +class GANScene(Scene): + """GAN Scene""" + + def construct(self): + gan = GAN().scale(1.70) + gan.move_to(ORIGIN) + gan.shift(DOWN*0.35) + gan.shift(LEFT*0.1) + self.play(Create(gan), run_time=3) + # Highlight generator + highlight_generator_rectangle = gan.make_highlight_generator_rectangle() + self.play(Create(highlight_generator_rectangle), run_time=1) + # Generator forward pass + gen_forward_pass = gan.make_generator_forward_pass() + self.play(gen_forward_pass, run_time=5) + # Fade out generator highlight + self.play(Uncreate(highlight_generator_rectangle), run_time=1) + # Highlight discriminator + highlight_discriminator_rectangle = gan.make_highlight_discriminator_rectangle() + self.play(Create(highlight_discriminator_rectangle), run_time=1) + # Discriminator forward pass + discriminator_forward_pass = gan.make_discriminator_forward_pass() + self.play(discriminator_forward_pass, run_time=5) + # Unhighlight discriminator + self.play(Uncreate(highlight_discriminator_rectangle), run_time=1) \ No newline at end of file diff --git a/examples/media/GANScene.gif b/examples/media/GANScene.gif new file mode 100644 index 0000000..4ae3792 Binary files /dev/null and b/examples/media/GANScene.gif differ diff --git a/examples/media/VAEScene.gif b/examples/media/VAEScene.gif index 49b4fa8..8337dfc 100644 Binary files a/examples/media/VAEScene.gif and b/examples/media/VAEScene.gif differ diff --git a/examples/media/VAEScene.mp4 b/examples/media/VAEScene.mp4 deleted file mode 100644 index 02edc88..0000000 Binary files a/examples/media/VAEScene.mp4 and /dev/null differ diff --git a/examples/variational_autoencoder/variational_autoencoder.py b/examples/variational_autoencoder/variational_autoencoder.py index 6322150..1e08077 100644 --- a/examples/variational_autoencoder/variational_autoencoder.py +++ b/examples/variational_autoencoder/variational_autoencoder.py @@ -8,7 +8,12 @@ from manim import * import pickle import numpy as np import os +from PIL import Image import manim_ml.neural_network as neural_network +from manim_ml.neural_network.embedding import EmbeddingLayer +from manim_ml.neural_network.feed_forward import FeedForwardLayer +from manim_ml.neural_network.image import ImageLayer +from manim_ml.neural_network.neural_network import NeuralNetwork class VariationalAutoencoder(VGroup): """Variational Autoencoder Manim Visualization""" @@ -239,6 +244,29 @@ class VariationalAutoencoder(VGroup): return animation_group +class VariationalAutoencoder(VGroup): + + def __init__(self): + embedding_layer = EmbeddingLayer() + + image = Image.open('images/image.jpeg') + numpy_image = np.asarray(image) + # Make nn + neural_network = NeuralNetwork([ + ImageLayer(numpy_image, height=1.4), + FeedForwardLayer(5), + FeedForwardLayer(3), + embedding_layer, + FeedForwardLayer(3), + FeedForwardLayer(5), + ImageLayer(numpy_image, height=1.4), + ]) + + neural_network.scale(1.3) + + self.play(Create(neural_network)) + self.play(neural_network.make_forward_pass_animation(run_time=15)) + class MNISTImageHandler(): """Deals with loading serialized VAE mnist images from "autoencoder_models" """ @@ -295,19 +323,4 @@ class VAEScene(Scene): # Interpolation animation interpolation_images = mnist_image_handler.interpolation_images interpolation_animation = vae.make_interpolation_animation(interpolation_images) - self.play(interpolation_animation) - -class VAEImage(Scene): - - def construct(self): - # Set Scene config - vae = VariationalAutoencoder() - mnist_image_handler = MNISTImageHandler() - image_pair = mnist_image_handler.image_pairs[3] - vae.move_to(ORIGIN) - vae.scale(1.3) - self.play(Create(vae), run_time=3) - # Make a forward pass animation - forward_pass_animation = vae.make_forward_pass_animation(image_pair) - self.play(forward_pass_animation) - \ No newline at end of file + self.play(interpolation_animation) \ No newline at end of file diff --git a/manim_ml/neural_network/layers/embedding.py b/manim_ml/neural_network/layers/embedding.py index d3c7ea1..2a96c98 100644 --- a/manim_ml/neural_network/layers/embedding.py +++ b/manim_ml/neural_network/layers/embedding.py @@ -5,18 +5,17 @@ from manim_ml.neural_network.layers.parent_layers import VGroupNeuralNetworkLaye class EmbeddingLayer(VGroupNeuralNetworkLayer): """NeuralNetwork embedding object that can show probability distributions""" - def __init__(self, point_radius=0.02, **kwargs): + def __init__(self, point_radius=0.02, mean = np.array([0, 0]), + covariance=np.array([[1.5, 0], [0, 1.5]]), **kwargs): super(VGroupNeuralNetworkLayer, self).__init__(**kwargs) self.point_radius = point_radius self.axes = Axes( tips=False, - x_length=1, - y_length=1 + x_length=0.8, + y_length=0.8 ) self.add(self.axes) # Make point cloud - mean = np.array([0, 0]) - covariance = np.array([[1.5, 0], [0, 1.5]]) self.point_cloud = self.construct_gaussian_point_cloud(mean, covariance) self.add(self.point_cloud) # Make latent distribution @@ -50,10 +49,14 @@ class EmbeddingLayer(VGroupNeuralNetworkLayer): return point_dots - def make_forward_pass_animation(self): + def make_forward_pass_animation(self, dist_theme="gaussian", **kwargs): """Forward pass animation""" # Make ellipse object corresponding to the latent distribution - self.latent_distribution = GaussianDistribution(self.axes) # Use defaults + self.latent_distribution = GaussianDistribution( + self.axes, + dist_theme=dist_theme, + cov=np.array([[0.8, 0], [0.0, 0.8]]) + ) # Use defaults # Create animation animations = [] #create_distribution = Create(self.latent_distribution.construct_gaussian_distribution(self.latent_distribution.mean, self.latent_distribution.cov)) #Create(self.latent_distribution) diff --git a/manim_ml/neural_network/layers/embedding_to_feed_forward.py b/manim_ml/neural_network/layers/embedding_to_feed_forward.py index 2ce89ff..9057c3c 100644 --- a/manim_ml/neural_network/layers/embedding_to_feed_forward.py +++ b/manim_ml/neural_network/layers/embedding_to_feed_forward.py @@ -17,7 +17,7 @@ class EmbeddingToFeedForward(ConnectiveLayer): self.animation_dot_color = animation_dot_color self.dot_radius = dot_radius - def make_forward_pass_animation(self, run_time=1.5): + def make_forward_pass_animation(self, run_time=1.5, **kwargs): """Makes dots diverge from the given location and move the decoder""" # Find point to converge on by sampling from gaussian distribution location = self.embedding_layer.sample_point_location_from_distribution() diff --git a/manim_ml/neural_network/layers/feed_forward.py b/manim_ml/neural_network/layers/feed_forward.py index 24f0d23..62af147 100644 --- a/manim_ml/neural_network/layers/feed_forward.py +++ b/manim_ml/neural_network/layers/feed_forward.py @@ -44,7 +44,7 @@ class FeedForwardLayer(VGroupNeuralNetworkLayer): # Add the objects to the class self.add(self.surrounding_rectangle, self.node_group) - def make_forward_pass_animation(self): + def make_forward_pass_animation(self, **kwargs): # make highlight animation succession = Succession( ApplyMethod(self.node_group.set_color, self.animation_dot_color, run_time=0.25), diff --git a/manim_ml/neural_network/layers/feed_forward_to_feed_forward.py b/manim_ml/neural_network/layers/feed_forward_to_feed_forward.py index 43791f0..007aead 100644 --- a/manim_ml/neural_network/layers/feed_forward_to_feed_forward.py +++ b/manim_ml/neural_network/layers/feed_forward_to_feed_forward.py @@ -33,7 +33,18 @@ class FeedForwardToFeedForward(ConnectiveLayer): edges = VGroup(*edges) return edges - def make_forward_pass_animation(self, run_time=1): + @override_animation(FadeOut) + def _fadeout_animation(self): + animations = [] + + for edge in self.edges: + animations.append(FadeOut(edge)) + + animation_group = AnimationGroup(*animations) + + return animation_group + + def make_forward_pass_animation(self, run_time=1, **kwargs): """Animation for passing information from one FeedForwardLayer to the next""" path_animations = [] dots = [] diff --git a/manim_ml/neural_network/layers/feed_forward_to_image.py b/manim_ml/neural_network/layers/feed_forward_to_image.py index 847a1b4..ae3468c 100644 --- a/manim_ml/neural_network/layers/feed_forward_to_image.py +++ b/manim_ml/neural_network/layers/feed_forward_to_image.py @@ -18,7 +18,7 @@ class FeedForwardToImage(ConnectiveLayer): self.feed_forward_layer = input_layer self.image_layer = output_layer - def make_forward_pass_animation(self): + def make_forward_pass_animation(self, **kwargs): """Makes dots diverge from the given location and move to the feed forward nodes decoder""" animations = [] image_mobject = self.image_layer.image_mobject diff --git a/manim_ml/neural_network/layers/image.py b/manim_ml/neural_network/layers/image.py index e3081f9..b37d1a2 100644 --- a/manim_ml/neural_network/layers/image.py +++ b/manim_ml/neural_network/layers/image.py @@ -5,9 +5,10 @@ from manim_ml.neural_network.layers.parent_layers import NeuralNetworkLayer class ImageLayer(NeuralNetworkLayer): """Single Image Layer for Neural Network""" - def __init__(self, numpy_image, height=1.5, **kwargs): + def __init__(self, numpy_image, height=1.5, show_image_on_create=True, **kwargs): super().__init__(**kwargs) self.numpy_image = numpy_image + self.show_image_on_create = show_image_on_create if len(np.shape(self.numpy_image)) == 2: # Assumed Grayscale self.image_mobject = GrayscaleImageMobject(self.numpy_image, height=height) @@ -21,9 +22,12 @@ class ImageLayer(NeuralNetworkLayer): debug_mode = False if debug_mode: return FadeIn(SurroundingRectangle(self.image_mobject)) - return FadeIn(self.image_mobject) + if self.show_image_on_create: + return FadeIn(self.image_mobject) + else: + return AnimationGroup() - def make_forward_pass_animation(self): + def make_forward_pass_animation(self, **kwargs): return FadeIn(self.image_mobject) # def move_to(self, location): @@ -36,4 +40,8 @@ class ImageLayer(NeuralNetworkLayer): @property def width(self): - return self.image_mobject.width \ No newline at end of file + return self.image_mobject.width + + @property + def height(self): + return self.image_mobject.height \ No newline at end of file diff --git a/manim_ml/neural_network/layers/image_to_feed_forward.py b/manim_ml/neural_network/layers/image_to_feed_forward.py index 3cadaf7..8e58a85 100644 --- a/manim_ml/neural_network/layers/image_to_feed_forward.py +++ b/manim_ml/neural_network/layers/image_to_feed_forward.py @@ -18,7 +18,7 @@ class ImageToFeedForward(ConnectiveLayer): self.feed_forward_layer = output_layer self.image_layer = input_layer - def make_forward_pass_animation(self): + def make_forward_pass_animation(self, **kwargs): """Makes dots diverge from the given location and move to the feed forward nodes decoder""" animations = [] dots = [] diff --git a/manim_ml/neural_network/layers/paired_query.py b/manim_ml/neural_network/layers/paired_query.py index 0692911..f16b436 100644 --- a/manim_ml/neural_network/layers/paired_query.py +++ b/manim_ml/neural_network/layers/paired_query.py @@ -60,6 +60,6 @@ class PairedQueryLayer(NeuralNetworkLayer): # TODO make Create animation that is custom return FadeIn(self.assets) - def make_forward_pass_animation(self): + def make_forward_pass_animation(self, **kwargs): """Forward pass for query""" return AnimationGroup() \ No newline at end of file diff --git a/manim_ml/neural_network/layers/paired_query_to_feed_forward.py b/manim_ml/neural_network/layers/paired_query_to_feed_forward.py index deb2a10..df77c5c 100644 --- a/manim_ml/neural_network/layers/paired_query_to_feed_forward.py +++ b/manim_ml/neural_network/layers/paired_query_to_feed_forward.py @@ -17,7 +17,7 @@ class PairedQueryToFeedForward(ConnectiveLayer): self.paired_query_layer = input_layer self.feed_forward_layer = output_layer - def make_forward_pass_animation(self): + def make_forward_pass_animation(self, **kwargs): """Makes dots diverge from the given location and move to the feed forward nodes decoder""" animations = [] # Loop through each image diff --git a/manim_ml/neural_network/layers/parent_layers.py b/manim_ml/neural_network/layers/parent_layers.py index 2a4ab5d..40feba7 100644 --- a/manim_ml/neural_network/layers/parent_layers.py +++ b/manim_ml/neural_network/layers/parent_layers.py @@ -7,10 +7,12 @@ class NeuralNetworkLayer(ABC, Group): def __init__(self, text=None, **kwargs): super(Group, self).__init__() self.title_text = kwargs["title"] if "title" in kwargs else " " - self.title = Text(self.title_text, font_size=DEFAULT_FONT_SIZE/3) + self.title = Text(self.title_text, font_size=DEFAULT_FONT_SIZE/3).scale(0.6) + # self.title.next_to(self, UP, 1.2) + # self.add(self.title) @abstractmethod - def make_forward_pass_animation(self): + def make_forward_pass_animation(self, **kwargs): pass @override_animation(Create) @@ -26,7 +28,7 @@ class VGroupNeuralNetworkLayer(NeuralNetworkLayer): super().__init__(**kwargs) @abstractmethod - def make_forward_pass_animation(self): + def make_forward_pass_animation(self, **kwargs): pass @override_animation(Create) @@ -49,7 +51,7 @@ class ConnectiveLayer(VGroupNeuralNetworkLayer): assert isinstance(output_layer, self.output_class) @abstractmethod - def make_forward_pass_animation(self): + def make_forward_pass_animation(self, **kwargs): pass @override_animation(Create) diff --git a/manim_ml/neural_network/layers/triplet.py b/manim_ml/neural_network/layers/triplet.py index 9685e31..c3b7ab0 100644 --- a/manim_ml/neural_network/layers/triplet.py +++ b/manim_ml/neural_network/layers/triplet.py @@ -71,6 +71,6 @@ class TripletLayer(NeuralNetworkLayer): # TODO make Create animation that is custom return FadeIn(self.assets) - def make_forward_pass_animation(self): + def make_forward_pass_animation(self, **kwargs): """Forward pass for triplet""" return AnimationGroup() diff --git a/manim_ml/neural_network/layers/triplet_to_feed_forward.py b/manim_ml/neural_network/layers/triplet_to_feed_forward.py index c5206d1..9f16333 100644 --- a/manim_ml/neural_network/layers/triplet_to_feed_forward.py +++ b/manim_ml/neural_network/layers/triplet_to_feed_forward.py @@ -18,7 +18,7 @@ class TripletToFeedForward(ConnectiveLayer): self.feed_forward_layer = output_layer self.triplet_layer = input_layer - def make_forward_pass_animation(self): + def make_forward_pass_animation(self, **kwargs): """Makes dots diverge from the given location and move to the feed forward nodes decoder""" animations = [] # Loop through each image diff --git a/manim_ml/neural_network/layers/vector.py b/manim_ml/neural_network/layers/vector.py index 5ec9875..2dcc45c 100644 --- a/manim_ml/neural_network/layers/vector.py +++ b/manim_ml/neural_network/layers/vector.py @@ -23,12 +23,12 @@ class VectorLayer(VGroupNeuralNetworkLayer): values = values[None, :].T vector = Matrix(values) - vector_label = Text(f"[{self.value_func():.2}]") - vector_label.scale(0.5) + vector_label = Text(f"[{self.value_func():.2f}]") + vector_label.scale(0.3) return vector_label - def make_forward_pass_animation(self): + def make_forward_pass_animation(self, **kwargs): return AnimationGroup() @override_animation(Create) diff --git a/manim_ml/neural_network/neural_network.py b/manim_ml/neural_network/neural_network.py index 873a2f3..e98d482 100644 --- a/manim_ml/neural_network/neural_network.py +++ b/manim_ml/neural_network/neural_network.py @@ -9,35 +9,15 @@ Example: # Create the object with default style settings NeuralNetwork(layer_node_count) """ -from socket import create_connection from manim import * import warnings import textwrap -from manim_ml.neural_network.layers import connective_layers_list from manim_ml.neural_network.layers.feed_forward import FeedForwardLayer from manim_ml.neural_network.layers.util import get_connective_layer from manim_ml.list_group import ListGroup -class LazyAnimation(Animation): - """ - Lazily creates animation when the animation is called. - - This is helpful when creating the animation depends upon the internal - state of some set of objects. - """ - - def __init__(self, animation_func): - self.animation_func = animation_func - super().__init__(None) - - def begin(self): - """Begins animation""" - self.mobject, animation = self.animation_func() - animation = Create(self.mobject) - animation.begin() - -class RemoveLayer(Succession): +class RemoveLayer(AnimationGroup): """ Animation for removing a layer from a neural network. @@ -183,6 +163,99 @@ class RemoveLayer(Succession): update_func_anim = UpdateFromFunc(self.neural_network, create_new_connective) return update_func_anim + +class InsertLayer(AnimationGroup): + """Animation for inserting layer at given index""" + + def __init__(self, layer, index, neural_network): + self.layer = layer + self.index = index + self.neural_network = neural_network + # Layers before and after + self.layers_before = self.neural_network.all_layers[:self.index] + self.layers_after = self.neural_network.all_layers[self.index:] + + remove_connective_layer = self.remove_connective_layer() + move_layers = self.make_move_layers() + # create_layer = self.make_create_layer() + # create_connective_layers = self.make_create_connective_layers() + animations = [ + remove_connective_layer, + move_layers, + # create_layer, + # create_connective_layers + ] + + super().__init__(*animations, lag_ratio=1.0) + + def remove_connective_layer(self): + """Removes the connective layer before the insertion index""" + # Check if connective layer exists + if len(self.layers_before) > 0: + removed_connective = self.layers_before[-1] + self.neural_network.all_layers.remove(removed_connective) + # Make remove animation + remove_animation = FadeOut(removed_connective) + return remove_animation + + return AnimationGroup() + + def make_move_layers(self): + """Shifts layers before and after""" + # Before layer shift + before_shift_animation = AnimationGroup() + if len(self.layers_before) > 0: + before_shift = np.array([-self.layer.width/2, 0, 0]) + # Shift layers before + before_shift_animation = Group(*self.layers_before).animate.shift(before_shift) + # After layer shift + after_shift_animation = AnimationGroup() + if len(self.layers_after) > 0: + after_shift = np.array([self.layer.width/2, 0, 0]) + # Shift layers after + after_shift_animation = Group(*self.layers_after).animate.shift(after_shift) + # Make animation group + shift_animations = AnimationGroup( + before_shift_animation, + after_shift_animation + ) + + return shift_animations + + def make_create_layer(self): + """Animates the creation of the layer""" + pass + + def make_create_connective_layers(self): + pass + + + # Make connective layers and shift animations + # Before layer + if len(layers_before) > 0: + before_connective = get_connective_layer(layers_before[-1], layer) + before_shift = np.array([-layer.width/2, 0, 0]) + # Shift layers before + before_shift_animation = Group(*layers_before).animate.shift(before_shift) + else: + before_connective = AnimationGroup() + # After layer + if len(layers_after) > 0: + after_connective = get_connective_layer(layer, layers_after[0]) + after_shift = np.array([layer.width/2, 0, 0]) + # Shift layers after + after_shift_animation = Group(*layers_after).animate.shift(after_shift) + else: + after_connective = AnimationGroup + + insert_animation = Create(layer) + animation_group = AnimationGroup( + shift_animations, + insert_animation, + lag_ratio=1.0 + ) + + return animation_group class NeuralNetwork(Group): @@ -202,9 +275,6 @@ class NeuralNetwork(Group): # and make it have explicit distinct subspaces self._place_layers() self.connective_layers, self.all_layers = self._construct_connective_layers() - # Make layer titles - self.layer_titles = self._make_layer_titles() - self.add(self.layer_titles) # Make overhead title self.title = Text(self.title_text, font_size=DEFAULT_FONT_SIZE/2) self.title.next_to(self, UP, 1.0) @@ -229,15 +299,6 @@ class NeuralNetwork(Group): shift_vector = np.array([(previous_layer.get_width()/2 + current_layer.get_width()/2) + self.layer_spacing, 0, 0]) current_layer.shift(shift_vector) - def _make_layer_titles(self): - """Makes titles""" - titles = VGroup() - for layer in self.all_layers: - title = layer.title - title.next_to(layer, UP, 0.2) - titles.add(title) - return titles - def _construct_connective_layers(self): """Draws connecting lines between layers""" connective_layers = ListGroup() @@ -264,40 +325,9 @@ class NeuralNetwork(Group): def insert_layer(self, layer, insert_index): """Inserts a layer at the given index""" - layers_before = self.all_layers[:insert_index] - layers_after = self.all_layers[insert_index:] - # Make connective layers and shift animations - # Before layer - if len(layers_before) > 0: - before_connective = get_connective_layer(layers_before[-1], layer) - before_shift = np.array([-layer.width/2, 0, 0]) - # Shift layers before - before_shift_animation = Group(*layers_before).animate.shift(before_shift) - else: - before_connective = AnimationGroup() - # After layer - if len(layers_after) > 0: - after_connective = get_connective_layer(layer, layers_after[0]) - after_shift = np.array([layer.width/2, 0, 0]) - # Shift layers after - after_shift_animation = Group(*layers_after).animate.shift(after_shift) - else: - after_connective = AnimationGroup - - # Make animation group - shift_animations = AnimationGroup( - before_shift_animation, - after_shift_animation - ) - - insert_animation = Create(layer) - animation_group = AnimationGroup( - shift_animations, - insert_animation, - lag_ratio=1.0 - ) - - return animation_group + neural_network = self + insert_animation = InsertLayer(layer, insert_index, neural_network) + return insert_animation def remove_layer(self, layer): """Removes layer object if it exists""" @@ -317,17 +347,18 @@ class NeuralNetwork(Group): return animation_group - def make_forward_pass_animation(self, run_time=10, passing_flash=True): + def make_forward_pass_animation(self, run_time=10, passing_flash=True, + **kwargs): """Generates an animation for feed forward propagation""" all_animations = [] for layer_index, layer in enumerate(self.input_layers[:-1]): - layer_forward_pass = layer.make_forward_pass_animation() + layer_forward_pass = layer.make_forward_pass_animation(**kwargs) all_animations.append(layer_forward_pass) connective_layer = self.connective_layers[layer_index] - connective_forward_pass = connective_layer.make_forward_pass_animation() + connective_forward_pass = connective_layer.make_forward_pass_animation(**kwargs) all_animations.append(connective_forward_pass) # Do last layer animation - last_layer_forward_pass = self.input_layers[-1].make_forward_pass_animation() + last_layer_forward_pass = self.input_layers[-1].make_forward_pass_animation(**kwargs) all_animations.append(last_layer_forward_pass) # Make the animation group animation_group = AnimationGroup(*all_animations, run_time=run_time, lag_ratio=1.0) diff --git a/manim_ml/probability.py b/manim_ml/probability.py index c765853..c25bffe 100644 --- a/manim_ml/probability.py +++ b/manim_ml/probability.py @@ -5,17 +5,23 @@ import math class GaussianDistribution(VGroup): """Object for drawing a Gaussian distribution""" - def __init__(self, axes, mean=None, cov=None, **kwargs): + def __init__(self, axes, mean=None, cov=None, dist_theme="gaussian", **kwargs): super(VGroup, self).__init__(**kwargs) self.axes = axes self.mean = mean self.cov = cov + self.dist_theme = dist_theme if mean is None: self.mean = np.array([0.0, 0.0]) if cov is None: - self.cov = np.array([[3, 0], [0, 3]]) + self.cov = np.array([[1, 0], [0, 1]]) # Make the Gaussian - self.ellipses = self.construct_gaussian_distribution(self.mean, self.cov) + if self.dist_theme is "gaussian": + self.ellipses = self.construct_gaussian_distribution(self.mean, self.cov) + elif self.dist_theme is "ellipse": + self.ellipses = self.construct_simple_gaussian_ellipse(self.mean, self.cov) + else: + raise Exception(f"Uncrecognized distribution theme: {self.dist_theme}") @override_animation(Create) def _create_gaussian_distribution(self): @@ -65,3 +71,30 @@ class GaussianDistribution(VGroup): return ellipses + def construct_simple_gaussian_ellipse(self, mean, covariance, color=ORANGE): + """Returns a 2d Gaussian distribution object with given mean and covariance""" + # map mean and covariance to frame coordinates + mean = self.axes.coords_to_point(*mean) + # Figure out the scale and angle of rotation + # TODO fix this + # rotation, width, height = self.compute_covariance_rotation_and_scale(covariance) + mean = np.array([0, 0, 0]) + mean = self.axes.coords_to_point(*mean) + rotation = 0.0 + # Make covariance ellipses + opacity = 0.0 + ellipses = VGroup() + opacity = 0.2 + ellipse = Ellipse( + width=0.6, + height=0.6, + color=color, + fill_opacity=opacity, + stroke_width=2.0 + ) + ellipse.move_to(mean) + ellipse.rotate(rotation) + ellipses.add(ellipse) + + return ellipses + diff --git a/tests/test_convolutional_layer.py b/tests/test_convolutional_layer.py new file mode 100644 index 0000000..462698e --- /dev/null +++ b/tests/test_convolutional_layer.py @@ -0,0 +1,42 @@ +from manim import * + +from manim_ml.neural_network.layers.convolutional import ConvolutionalLayer +from manim_ml.neural_network.layers.feed_forward import FeedForwardLayer +from manim_ml.neural_network.neural_network import NeuralNetwork + +class SingleConvolutionalLayerScence(Scene): + + def construct(self): + + # Make nn + layers = [ + ConvolutionalLayer() + ] + nn = NeuralNetwork(layers) + nn.scale(1.3) + # Center the nn + nn.move_to(ORIGIN) + self.add(nn) + # Play animation + self.play(nn.make_forward_pass_animation(run_time=5)) + self.play(nn.make_forward_pass_animation(run_time=5)) + +class ThreeDLightSourcePosition(ThreeDScene, Scene): + def construct(self): + axes = ThreeDAxes() + sphere = Surface( + lambda u, v: np.array([ + u, + v, + 0 + ]), v_range=[0, TAU], u_range=[-PI / 2, PI / 2], + checkerboard_colors=[RED_D, RED_E], resolution=(15, 32) + ) + self.renderer.camera.light_source.move_to(3*IN) # changes the source of the light + self.set_camera_orientation(phi=90 * DEGREES, theta=0 * DEGREES) + self.add(axes, sphere) + +class CombinedScene(Scene): + + def constuct(self): + pass \ No newline at end of file diff --git a/tests/test_neural_network.py b/tests/test_neural_network.py index b9cb69e..4a5f06e 100644 --- a/tests/test_neural_network.py +++ b/tests/test_neural_network.py @@ -107,18 +107,18 @@ class NeuralNetworkScene(Scene): self.play(forward_propagation_animation) -class ImageNeuralNetworkScene(Scene): +class GrayscaleImageNeuralNetworkScene(Scene): def construct(self): image = Image.open('images/image.jpeg') numpy_image = np.asarray(image) # Make nn layers = [ - ImageLayer(numpy_image, height=1.4), - FeedForwardLayer(3), + FeedForwardLayer(3), FeedForwardLayer(5), FeedForwardLayer(3), - FeedForwardLayer(6) + FeedForwardLayer(6), + ImageLayer(numpy_image, height=1.4) ] nn = NeuralNetwork(layers) nn.scale(1.3) @@ -129,6 +129,27 @@ class ImageNeuralNetworkScene(Scene): self.play(nn.make_forward_pass_animation(run_time=5)) self.play(nn.make_forward_pass_animation(run_time=5)) +class ImageNeuralNetworkScene(Scene): + + def construct(self): + image = Image.open('../assets/gan/real_image.jpg') + numpy_image = np.asarray(image) + # Make nn + layers = [ + FeedForwardLayer(3), + FeedForwardLayer(5), + FeedForwardLayer(3), + FeedForwardLayer(6), + ImageLayer(numpy_image, height=1.4) + ] + nn = NeuralNetwork(layers) + nn.scale(1.3) + # Center the nn + nn.move_to(ORIGIN) + self.add(nn) + # Play animation + self.play(nn.make_forward_pass_animation(run_time=5)) + self.play(nn.make_forward_pass_animation(run_time=5)) class EmbeddingNNScene(Scene): @@ -174,7 +195,7 @@ class LayerRemovalScene(Scene): image = Image.open('images/image.jpeg') numpy_image = np.asarray(image) - layer = FeedForwardLayer(5), + layer = FeedForwardLayer(5) layers = [ ImageLayer(numpy_image, height=1.4), FeedForwardLayer(3), @@ -186,7 +207,34 @@ class LayerRemovalScene(Scene): nn = NeuralNetwork(layers) self.play(Create(nn)) - self.play(nn.remove_layer(layer)) + remove_animation = nn.remove_layer(layer) + print("before remove") + self.play(remove_animation) + print(nn) + print("after remove") + +class LayerInsertionScene(Scene): + + def construct(self): + image = Image.open('images/image.jpeg') + numpy_image = np.asarray(image) + + layers = [ + ImageLayer(numpy_image, height=1.4), + FeedForwardLayer(3), + FeedForwardLayer(3), + FeedForwardLayer(6) + ] + + nn = NeuralNetwork(layers) + + self.play(Create(nn)) + + layer = FeedForwardLayer(5) + insert_animation = nn.insert_layer(layer, 4) + self.play(insert_animation) + print(nn) + print("after remove") if __name__ == "__main__": """Render all scenes""" diff --git a/tests/test_variational_autoencoder.py b/tests/test_variational_autoencoder.py index ac7d201..ede8523 100644 --- a/tests/test_variational_autoencoder.py +++ b/tests/test_variational_autoencoder.py @@ -1,19 +1,17 @@ from manim import * from PIL import Image -from manim_ml.neural_network.embedding import EmbeddingLayer -from manim_ml.neural_network.feed_forward import FeedForwardLayer -from manim_ml.neural_network.image import ImageLayer +from manim_ml.neural_network.layers import EmbeddingLayer, FeedForwardLayer, ImageLayer from manim_ml.neural_network.neural_network import NeuralNetwork config.pixel_height = 720 config.pixel_width = 1280 -config.frame_height = 6.0 -config.frame_width = 6.0 +config.frame_height = 8.0 +config.frame_width = 8.0 class VariationalAutoencoderScene(Scene): def construct(self): - embedding_layer = EmbeddingLayer() + embedding_layer = EmbeddingLayer(dist_theme="ellipse").scale(2) image = Image.open('images/image.jpeg') numpy_image = np.asarray(image) @@ -26,7 +24,7 @@ class VariationalAutoencoderScene(Scene): FeedForwardLayer(3), FeedForwardLayer(5), ImageLayer(numpy_image, height=1.4), - ]) + ], layer_spacing=0.1) neural_network.scale(1.3)