Embedding Neural Network Layer.

This commit is contained in:
Alec Helbling
2022-04-14 00:33:00 -04:00
parent 09c9c4c093
commit b1490c0117
11 changed files with 767 additions and 313 deletions

View File

@ -1,4 +1,4 @@
# Manim Machine Learning # ManimML
<a href="https://github.com/helblazer811/ManimMachineLearning"> <a href="https://github.com/helblazer811/ManimMachineLearning">
<img src="examples/media/ManimMLLogo.gif"> <img src="examples/media/ManimMLLogo.gif">
</a> </a>
@ -8,7 +8,7 @@
![Pypi Downloads](https://img.shields.io/pypi/dm/manim-ml) ![Pypi Downloads](https://img.shields.io/pypi/dm/manim-ml)
[![Follow Twitter](https://img.shields.io/twitter/follow/alec_helbling?style=social)](https://twitter.com/alec_helbling) [![Follow Twitter](https://img.shields.io/twitter/follow/alec_helbling?style=social)](https://twitter.com/alec_helbling)
Manim Machine Learning is a project focused on providing animations and visualizations of common machine learning concepts with the [Manim Community Library](https://www.manim.community/). We want this project to be a compilation of primitive visualizations that can be easily combined to create videos about complex machine learning concepts. Additionally, we want to provide a set of abstractions which allow users to focus on explanations instead of software engineering. ManimML is a project focused on providing animations and visualizations of common machine learning concepts with the [Manim Community Library](https://www.manim.community/). We want this project to be a compilation of primitive visualizations that can be easily combined to create videos about complex machine learning concepts. Additionally, we want to provide a set of abstractions which allow users to focus on explanations instead of software engineering.
## Table of Contents ## Table of Contents

View File

@ -1,110 +0,0 @@
"""
Layers that describe the connections between user layers.
"""
from manim import *
from manim_ml.neural_network.layers import NeuralNetworkLayer
from abc import ABC, abstractmethod
class ConnectiveLayer(NeuralNetworkLayer):
"""Forward pass animation for a given pair of layers"""
@abstractmethod
def __init__(self, input_layer, output_layer):
super(NeuralNetworkLayer, self).__init__()
self.input_layer = input_layer
self.output_layer = output_layer
@abstractmethod
def make_forward_pass_animation(self):
pass
class FeedForwardToFeedForward(ConnectiveLayer):
"""Layer for connecting FeedForward layer to FeedForwardLayer"""
def __init__(self, input_layer, output_layer, passing_flash=True,
dot_radius=0.05, animation_dot_color=RED, edge_color=WHITE,
edge_width=0.5):
super().__init__(input_layer, output_layer)
self.passing_flash = passing_flash
self.edge_color = edge_color
self.dot_radius = dot_radius
self.animation_dot_color = animation_dot_color
self.edge_width = edge_width
self.edges = self.construct_edges()
self.add(self.edges)
def construct_edges(self):
# Go through each node in the two layers and make a connecting line
edges = []
for node_i in self.input_layer.node_group:
for node_j in self.output_layer.node_group:
line = Line(node_i.get_center(), node_j.get_center(),
color=self.edge_color, stroke_width=self.edge_width)
edges.append(line)
edges = VGroup(*edges)
return edges
def make_forward_pass_animation(self, run_time=1):
"""Animation for passing information from one FeedForwardLayer to the next"""
path_animations = []
dots = []
for edge in self.edges:
dot = Dot(color=self.animation_dot_color, fill_opacity=1.0, radius=self.dot_radius)
# Handle layering
dot.set_z_index(1)
# Add to dots group
dots.append(dot)
# Make the animation
if self.passing_flash:
anim = ShowPassingFlash(edge.copy().set_color(self.animation_dot_color), time_width=0.2, run_time=3)
else:
anim = MoveAlongPath(dot, edge, run_time=run_time, rate_function=sigmoid)
path_animations.append(anim)
if not self.passing_flash:
dots = VGroup(*dots)
self.add(dots)
path_animations = AnimationGroup(*path_animations)
return path_animations
@override_animation(Create)
def _create_animation(self, **kwargs):
animations = []
for edge in self.edges:
animations.append(Create(edge))
animation_group = AnimationGroup(*animations, lag_ratio=0.0)
return animation_group
class ImageToFeedForward(ConnectiveLayer):
"""Image Layer to FeedForward layer"""
def __init__(self, input_layer, output_layer, animation_dot_color=RED,
dot_radius=0.05):
self.animation_dot_color = animation_dot_color
self.dot_radius = dot_radius
# Input assumed to be ImageLayer
# Output assumed to be FeedForwardLayer
super().__init__(input_layer, output_layer)
def make_forward_pass_animation(self):
"""Makes dots diverge from the given location and move to the feed forward nodes decoder"""
animations = []
image_mobject = self.input_layer.image_mobject
# Move the dots to the centers of each of the nodes in the FeedForwardLayer
image_location = image_mobject.get_center()
for node in self.output_layer.node_group:
new_dot = Dot(image_location, radius=self.dot_radius, color=self.animation_dot_color)
per_node_succession = Succession(
Create(new_dot),
new_dot.animate.move_to(node.get_center()),
)
animations.append(per_node_succession)
animation_group = AnimationGroup(*animations)
return animation_group

View File

@ -0,0 +1,241 @@
from manim import *
from manim_ml.neural_network.layers import ConnectiveLayer, VGroupNeuralNetworkLayer
import numpy as np
import math
class GaussianDistribution(VGroup):
"""Object for drawing a Gaussian distribution"""
def __init__(self, axes, mean=None, cov=None, **kwargs):
super(VGroup, self).__init__(**kwargs)
self.axes = axes
self.mean = mean
self.cov = cov
if mean is None:
self.mean = np.array([0.0, 0.0])
if cov is None:
self.cov = np.array([[3, 0], [0, 3]])
# Make the Gaussian
self.ellipses = self.construct_gaussian_distribution(self.mean, self.cov)
self.ellipses.set_z_index(2)
@override_animation(Create)
def _create_gaussian_distribution(self):
return Create(self.ellipses)
def compute_covariance_rotation_and_scale(self, covariance):
# Get the eigenvectors and eigenvalues
eigenvalues, eigenvectors = np.linalg.eig(covariance)
y, x = eigenvectors[0, 1], eigenvectors[0, 0]
center_location = np.array([y, x, 0])
center_location = self.axes.coords_to_point(*center_location)
angle = math.atan(x / y) # x over y to denote the angle between y axis and vector
# Calculate the width and height
height = np.abs(eigenvalues[0])
width = np.abs(eigenvalues[1])
shape_coord = np.array([width, height, 0])
shape_coord = self.axes.coords_to_point(*shape_coord)
width = shape_coord[0]
height = shape_coord[1]
return angle, width, height
def construct_gaussian_distribution(self, mean, covariance, color=ORANGE,
num_ellipses=4):
"""Returns a 2d Gaussian distribution object with given mean and covariance"""
# map mean and covariance to frame coordinates
mean = self.axes.coords_to_point(*mean)
# Figure out the scale and angle of rotation
rotation, width, height = self.compute_covariance_rotation_and_scale(covariance)
# Make covariance ellipses
opacity = 0.0
ellipses = VGroup()
for ellipse_number in range(num_ellipses):
opacity += 1.0 / num_ellipses
ellipse_width = width * (1 - opacity)
ellipse_height = height * (1 - opacity)
ellipse = Ellipse(
width=ellipse_width,
height=ellipse_height,
color=color,
fill_opacity=opacity,
stroke_width=0.0
)
ellipse.move_to(mean)
ellipse.rotate(rotation)
ellipses.add(ellipse)
return ellipses
class EmbeddingLayer(VGroupNeuralNetworkLayer):
"""NeuralNetwork embedding object that can show probability distributions"""
def __init__(self, point_radius=0.02):
super(EmbeddingLayer, self).__init__()
self.point_radius = point_radius
self.axes = Axes(
tips=False,
x_length=1,
y_length=1
)
self.add(self.axes)
# Make point cloud
mean = np.array([0, 0])
covariance = np.array([[1.5, 0], [0, 1.5]])
self.point_cloud = self.construct_gaussian_point_cloud(mean, covariance)
self.add(self.point_cloud)
# Make latent distribution
self.latent_distribution = GaussianDistribution(self.axes, mean=mean, cov=covariance) # Use defaults
def sample_point_location_from_distribution(self):
"""Samples from the current latent distribution"""
mean = self.latent_distribution.mean
cov = self.latent_distribution.cov
point = np.random.multivariate_normal(mean, cov)
# Make dot at correct location
location = self.axes.coords_to_point(point[0], point[1])
return location
def get_distribution_location(self):
"""Returns mean of latent distribution in axes frame"""
return self.axes.coords_to_point(self.latent_distribution.mean)
def construct_gaussian_point_cloud(self, mean, covariance, point_color=BLUE,
num_points=200):
"""Plots points sampled from a Gaussian with the given mean and covariance"""
# Sample points from a Gaussian
points = np.random.multivariate_normal(mean, covariance, num_points)
# Add each point to the axes
point_dots = VGroup()
for point in points:
point_location = self.axes.coords_to_point(*point)
dot = Dot(point_location, color=point_color, radius=self.point_radius/2)
point_dots.add(dot)
return point_dots
def make_forward_pass_animation(self):
"""Forward pass animation"""
# Make ellipse object corresponding to the latent distribution
self.latent_distribution = GaussianDistribution(self.axes) # Use defaults
# Create animation
animations = []
#create_distribution = Create(self.latent_distribution.construct_gaussian_distribution(self.latent_distribution.mean, self.latent_distribution.cov)) #Create(self.latent_distribution)
create_distribution = Create(self.latent_distribution.ellipses)
animations.append(create_distribution)
animation_group = AnimationGroup(*animations)
return animation_group
@override_animation(Create)
def _create_embedding_layer(self, **kwargs):
# Plot each point at once
point_animations = []
for point in self.point_cloud:
point_animations.append(GrowFromCenter(point))
point_animation = AnimationGroup(*point_animations, lag_ratio=1.0, run_time=2.5)
return point_animation
class FeedForwardToEmbedding(ConnectiveLayer):
"""Feed Forward to Embedding Layer"""
def __init__(self, input_layer, output_layer, animation_dot_color=RED, dot_radius=0.03):
super().__init__(input_layer, output_layer)
self.feed_forward_layer = input_layer
self.embedding_layer = output_layer
self.animation_dot_color = animation_dot_color
self.dot_radius = dot_radius
def make_forward_pass_animation(self, run_time=1.5):
"""Makes dots converge on a specific location"""
# Find point to converge on by sampling from gaussian distribution
location = self.embedding_layer.sample_point_location_from_distribution()
# Set the embedding layer latent distribution
# Move to location
animations = []
# Move the dots to the centers of each of the nodes in the FeedForwardLayer
dots = []
for node in self.feed_forward_layer.node_group:
new_dot = Dot(node.get_center(), radius=self.dot_radius, color=self.animation_dot_color)
per_node_succession = Succession(
Create(new_dot),
new_dot.animate.move_to(location),
)
animations.append(per_node_succession)
dots.append(new_dot)
self.dots = VGroup(*dots)
self.add(self.dots)
# Follow up with remove animations
remove_animations = []
for dot in dots:
remove_animations.append(FadeOut(dot))
self.remove(self.dots)
remove_animations = AnimationGroup(*remove_animations, run_time=0.2)
animations = AnimationGroup(*animations)
animation_group = Succession(animations, remove_animations, lag_ratio=1.0)
return animation_group
@override_animation(Create)
def _create_embedding_layer(self, **kwargs):
return AnimationGroup()
class EmbeddingToFeedForward(ConnectiveLayer):
"""Feed Forward to Embedding Layer"""
def __init__(self, input_layer, output_layer, animation_dot_color=RED, dot_radius=0.03):
super().__init__(input_layer, output_layer)
self.feed_forward_layer = output_layer
self.embedding_layer = input_layer
self.animation_dot_color = animation_dot_color
self.dot_radius = dot_radius
def make_forward_pass_animation(self, run_time=1.5):
"""Makes dots diverge from the given location and move the decoder"""
# Find point to converge on by sampling from gaussian distribution
location = self.embedding_layer.sample_point_location_from_distribution()
# Move to location
animations = []
# Move the dots to the centers of each of the nodes in the FeedForwardLayer
dots = []
for node in self.feed_forward_layer.node_group:
new_dot = Dot(location, radius=self.dot_radius, color=self.animation_dot_color)
per_node_succession = Succession(
Create(new_dot),
new_dot.animate.move_to(node.get_center()),
)
animations.append(per_node_succession)
dots.append(new_dot)
# Follow up with remove animations
remove_animations = []
for dot in dots:
remove_animations.append(FadeOut(dot))
remove_animations = AnimationGroup(*remove_animations, run_time=0.2)
animations = AnimationGroup(*animations)
animation_group = Succession(animations, remove_animations, lag_ratio=1.0)
return animation_group
@override_animation(Create)
def _create_embedding_layer(self, **kwargs):
return AnimationGroup()
class NeuralNetworkEmbeddingTestScene(Scene):
def construct(self):
nne = EmbeddingLayer()
mean = np.array([0, 0])
cov = np.array([[5.0, 1.0], [0.0, 1.0]])
point_cloud = nne.construct_gaussian_point_cloud(mean, cov)
nne.add(point_cloud)
gaussian = nne.construct_gaussian_distribution(mean, cov)
nne.add(gaussian)
self.add(nne)

View File

@ -1,93 +0,0 @@
from manim import *
from manim_ml.neural_network.layers import NeuralNetworkLayer
import numpy as np
import math
class NeuralNetworkEmbedding(NeuralNetworkLayer, Axes):
"""NeuralNetwork embedding object that can show probability distributions"""
def __init__(self):
super().__init__(NeuralNetworkEmbedding, self)
def compute_covariance_rotation_and_scale(self, covariance):
# Get the eigenvectors and eigenvalues
eigenvalues, eigenvectors = np.linalg.eig(covariance)
y, x = eigenvectors[0, 1], eigenvectors[0, 0]
print(eigenvectors[0])
angle = math.atan(x / y) # x over y to denote the angle between y axis and vector
# Calculate the width and height
height = np.abs(eigenvalues[0])
width = np.abs(eigenvalues[1])
return angle, width, height
def construct_gaussian_distribution(self, mean, covariance, color=ORANGE,
dot_radius=0.05, num_ellipses=4):
"""Returns a 2d Gaussian distribution object with given mean and covariance"""
# map mean and covariance to frame coordinates
mean = self.coords_to_point(*mean)
# Figure out the scale and angle of rotation
rotation, width, height = self.compute_covariance_rotation_and_scale(covariance)
# Make covariance ellipses
opacity = 0.0
ellipses = VGroup()
for ellipse_number in range(num_ellipses):
opacity += 1.0 / num_ellipses
ellipse_width = width * (1 - opacity)
ellipse_height = height * (1 - opacity)
ellipse = Ellipse(
width=ellipse_width,
height=ellipse_height,
color=color,
fill_opacity=opacity,
stroke_width=0.0
)
ellipse.move_to(mean)
ellipse.rotate(rotation)
ellipses.add(ellipse)
return ellipses
def construct_gaussian_point_cloud(self, mean, covariance, color=BLUE):
"""Plots points sampled from a Gaussian with the given mean and covariance"""
embedding = VGroup()
# Sample points from a Gaussian
num_points = 200
standard_deviation = [0.9, 0.9]
mean = [0, 0]
points = np.random.normal(mean, standard_deviation, size=(num_points, 2))
# Make an axes
embedding.axes = Axes(
x_range=[-3, 3],
y_range=[-3, 3],
x_length=2.2,
y_length=2.2,
tips=False,
)
# Add each point to the axes
self.point_dots = VGroup()
for point in points:
point_location = embedding.axes.coords_to_point(*point)
dot = Dot(point_location, color=self.point_color, radius=self.dot_radius/2)
self.point_dots.add(dot)
embedding.add(self.point_dots)
return embedding
def make_forward_pass_animation(self):
pass
class NeuralNetworkEmbeddingTestScene(Scene):
def construct(self):
nne = NeuralNetworkEmbedding()
mean = np.array([0, 0])
cov = np.array([[0.1, 0.8], [0.0, 0.8]])
point_cloud = nne.construct_gaussian_point_cloud(mean, cov)
self.add(point_cloud)
gaussian = nne.construct_gaussian_distribution(mean, cov)
gaussian.scale(3)
self.add(gaussian)

View File

@ -0,0 +1,128 @@
from manim import *
from manim_ml.neural_network.layers import VGroupNeuralNetworkLayer, ConnectiveLayer
class FeedForwardLayer(VGroupNeuralNetworkLayer):
"""Handles rendering a layer for a neural network"""
def __init__(self, num_nodes, layer_buffer=SMALL_BUFF/2, node_radius=0.08,
node_color=BLUE, node_outline_color=WHITE, rectangle_color=WHITE,
node_spacing=0.3, rectangle_fill_color=BLACK, node_stroke_width=2.0,
rectangle_stroke_width=2.0, animation_dot_color=RED):
super(VGroupNeuralNetworkLayer, self).__init__()
self.num_nodes = num_nodes
self.layer_buffer = layer_buffer
self.node_radius = node_radius
self.node_color = node_color
self.node_stroke_width = node_stroke_width
self.node_outline_color = node_outline_color
self.rectangle_stroke_width = rectangle_stroke_width
self.rectangle_color = rectangle_color
self.node_spacing = node_spacing
self.rectangle_fill_color = rectangle_fill_color
self.animation_dot_color = animation_dot_color
self.node_group = VGroup()
self._construct_neural_network_layer()
def _construct_neural_network_layer(self):
"""Creates the neural network layer"""
# Add Nodes
for node_number in range(self.num_nodes):
node_object = Circle(radius=self.node_radius, color=self.node_color,
stroke_width=self.node_stroke_width)
self.node_group.add(node_object)
# Space the nodes
# Assumes Vertical orientation
for node_index, node_object in enumerate(self.node_group):
location = node_index * self.node_spacing
node_object.move_to([0, location, 0])
# Create Surrounding Rectangle
self.surrounding_rectangle = SurroundingRectangle(self.node_group, color=self.rectangle_color,
fill_color=self.rectangle_fill_color, fill_opacity=1.0,
buff=self.layer_buffer, stroke_width=self.rectangle_stroke_width)
# Add the objects to the class
self.add(self.surrounding_rectangle, self.node_group)
def make_forward_pass_animation(self):
# make highlight animation
succession = Succession(
ApplyMethod(self.node_group.set_color, self.animation_dot_color, run_time=0.25),
Wait(1.0),
ApplyMethod(self.node_group.set_color, self.node_color, run_time=0.25),
)
return succession
@override_animation(Create)
def _create_animation(self, **kwargs):
animations = []
animations.append(Create(self.surrounding_rectangle))
for node in self.node_group:
animations.append(Create(node))
animation_group = AnimationGroup(*animations, lag_ratio=0.0)
return animation_group
class FeedForwardToFeedForward(ConnectiveLayer):
"""Layer for connecting FeedForward layer to FeedForwardLayer"""
def __init__(self, input_layer, output_layer, passing_flash=True,
dot_radius=0.05, animation_dot_color=RED, edge_color=WHITE,
edge_width=0.5):
super().__init__(input_layer, output_layer)
self.passing_flash = passing_flash
self.edge_color = edge_color
self.dot_radius = dot_radius
self.animation_dot_color = animation_dot_color
self.edge_width = edge_width
self.edges = self.construct_edges()
self.add(self.edges)
def construct_edges(self):
# Go through each node in the two layers and make a connecting line
edges = []
for node_i in self.input_layer.node_group:
for node_j in self.output_layer.node_group:
line = Line(node_i.get_center(), node_j.get_center(),
color=self.edge_color, stroke_width=self.edge_width)
edges.append(line)
edges = VGroup(*edges)
return edges
def make_forward_pass_animation(self, run_time=1):
"""Animation for passing information from one FeedForwardLayer to the next"""
path_animations = []
dots = []
for edge in self.edges:
dot = Dot(color=self.animation_dot_color, fill_opacity=1.0, radius=self.dot_radius)
# Add to dots group
dots.append(dot)
# Make the animation
if self.passing_flash:
anim = ShowPassingFlash(edge.copy().set_color(self.animation_dot_color), time_width=0.2, run_time=3)
else:
anim = MoveAlongPath(dot, edge, run_time=run_time, rate_function=sigmoid)
path_animations.append(anim)
if not self.passing_flash:
dots = VGroup(*dots)
self.add(dots)
path_animations = AnimationGroup(*path_animations)
return path_animations
@override_animation(Create)
def _create_animation(self, **kwargs):
animations = []
for edge in self.edges:
animations.append(Create(edge))
animation_group = AnimationGroup(*animations, lag_ratio=0.0)
return animation_group

View File

@ -0,0 +1,115 @@
from manim import *
from manim_ml.image import GrayscaleImageMobject
from manim_ml.neural_network.layers import ConnectiveLayer, NeuralNetworkLayer
class ImageLayer(NeuralNetworkLayer):
"""Single Image Layer for Neural Network"""
def __init__(self, numpy_image, height=1.5):
super().__init__()
self.set_z_index(1)
self.numpy_image = numpy_image
if len(np.shape(self.numpy_image)) == 2:
# Assumed Grayscale
self.image_mobject = GrayscaleImageMobject(self.numpy_image, height=height)
elif len(np.shape(self.numpy_image)) == 3:
# Assumed RGB
self.image_mobject = ImageMobject(self.numpy_image)
self.add(self.image_mobject)
"""
# Make an invisible box of the same width as the image object so that
# methods like get_right() work correctly.
self.invisible_rectangle = SurroundingRectangle(self.image_mobject, color=WHITE)
self.invisible_rectangle.set_fill(WHITE, opacity=0.0)
# self.invisible_rectangle.set_stroke(WHITE, opacity=0.0)
self.invisible_rectangle.move_to(self.image_mobject.get_center())
self.add(self.invisible_rectangle)
"""
@override_animation(Create)
def _create_animation(self, **kwargs):
return FadeIn(self.image_mobject)
def make_forward_pass_animation(self):
return Create(self.image_mobject)
def move_to(self, location):
"""Override of move to"""
self.image_mobject.move_to(location)
def get_right(self):
"""Override get right"""
return self.image_mobject.get_right()
@property
def width(self):
return self.image_mobject.width
class ImageToFeedForward(ConnectiveLayer):
"""Image Layer to FeedForward layer"""
def __init__(self, input_layer, output_layer, animation_dot_color=RED,
dot_radius=0.05):
self.animation_dot_color = animation_dot_color
self.dot_radius = dot_radius
self.feed_forward_layer = output_layer
self.image_layer = input_layer
super().__init__(input_layer, output_layer)
def make_forward_pass_animation(self):
"""Makes dots diverge from the given location and move to the feed forward nodes decoder"""
animations = []
dots = []
image_mobject = self.image_layer.image_mobject
# Move the dots to the centers of each of the nodes in the FeedForwardLayer
image_location = image_mobject.get_center()
for node in self.feed_forward_layer.node_group:
new_dot = Dot(image_location, radius=self.dot_radius, color=self.animation_dot_color)
per_node_succession = Succession(
Create(new_dot),
new_dot.animate.move_to(node.get_center()),
)
animations.append(per_node_succession)
dots.append(new_dot)
self.add(VGroup(*dots))
animation_group = AnimationGroup(*animations)
return animation_group
@override_animation(Create)
def _create_override(self):
return AnimationGroup()
class FeedForwardToImage(ConnectiveLayer):
"""Image Layer to FeedForward layer"""
def __init__(self, input_layer, output_layer, animation_dot_color=RED,
dot_radius=0.05):
self.animation_dot_color = animation_dot_color
self.dot_radius = dot_radius
self.feed_forward_layer = input_layer
self.image_layer = output_layer
super().__init__(input_layer, output_layer)
def make_forward_pass_animation(self):
"""Makes dots diverge from the given location and move to the feed forward nodes decoder"""
animations = []
image_mobject = self.image_layer.image_mobject
# Move the dots to the centers of each of the nodes in the FeedForwardLayer
image_location = image_mobject.get_center()
for node in self.feed_forward_layer.node_group:
new_dot = Dot(node.get_center(), radius=self.dot_radius, color=self.animation_dot_color)
per_node_succession = Succession(
Create(new_dot),
new_dot.animate.move_to(image_location),
)
animations.append(per_node_succession)
animation_group = AnimationGroup(*animations)
return animation_group
@override_animation(Create)
def _create_override(self):
return AnimationGroup()

View File

@ -1,102 +1,40 @@
from typing import overload
from manim import * from manim import *
from abc import ABC, abstractmethod from abc import ABC, abstractmethod
from matplotlib import animation class NeuralNetworkLayer(ABC, Group):
from manim_ml.image import GrayscaleImageMobject
class NeuralNetworkLayer(ABC, VGroup):
"""Abstract Neural Network Layer class""" """Abstract Neural Network Layer class"""
def __init__(self, **kwargs):
super(Group, self).__init__()
self.set_z_index(1)
@abstractmethod @abstractmethod
def make_forward_pass_animation(self): def make_forward_pass_animation(self):
pass pass
class FeedForwardLayer(NeuralNetworkLayer): def __repr__(self):
"""Handles rendering a layer for a neural network""" return f"{type(self).__name__}"
def __init__(self, num_nodes, layer_buffer=SMALL_BUFF/2, node_radius=0.08, class VGroupNeuralNetworkLayer(NeuralNetworkLayer):
node_color=BLUE, node_outline_color=WHITE, rectangle_color=WHITE,
node_spacing=0.3, rectangle_fill_color=BLACK, node_stroke_width=2.0, def __init__(self, **kwargs):
rectangle_stroke_width=2.0, animation_dot_color=RED):
super(NeuralNetworkLayer, self).__init__() super(NeuralNetworkLayer, self).__init__()
self.num_nodes = num_nodes
self.layer_buffer = layer_buffer
self.node_radius = node_radius
self.node_color = node_color
self.node_stroke_width = node_stroke_width
self.node_outline_color = node_outline_color
self.rectangle_stroke_width = rectangle_stroke_width
self.rectangle_color = rectangle_color
self.node_spacing = node_spacing
self.rectangle_fill_color = rectangle_fill_color
self.animation_dot_color = animation_dot_color
self.node_group = VGroup()
self._construct_neural_network_layer()
def _construct_neural_network_layer(self):
"""Creates the neural network layer"""
# Add Nodes
for node_number in range(self.num_nodes):
node_object = Circle(radius=self.node_radius, color=self.node_color,
stroke_width=self.node_stroke_width)
self.node_group.add(node_object)
# Space the nodes
# Assumes Vertical orientation
for node_index, node_object in enumerate(self.node_group):
location = node_index * self.node_spacing
node_object.move_to([0, location, 0])
# Create Surrounding Rectangle
self.surrounding_rectangle = SurroundingRectangle(self.node_group, color=self.rectangle_color,
fill_color=self.rectangle_fill_color, fill_opacity=1.0,
buff=self.layer_buffer, stroke_width=self.rectangle_stroke_width)
# Add the objects to the class
self.add(self.surrounding_rectangle, self.node_group)
@abstractmethod
def make_forward_pass_animation(self): def make_forward_pass_animation(self):
# make highlight animation pass
succession = Succession(
ApplyMethod(self.node_group.set_color, self.animation_dot_color, run_time=0.25),
Wait(1.0),
ApplyMethod(self.node_group.set_color, self.node_color, run_time=0.25),
)
return succession class ConnectiveLayer(VGroupNeuralNetworkLayer):
"""Forward pass animation for a given pair of layers"""
@override_animation(Create) @abstractmethod
def _create_animation(self, **kwargs): def __init__(self, input_layer, output_layer):
animations = [] super(VGroupNeuralNetworkLayer, self).__init__()
self.input_layer = input_layer
self.output_layer = output_layer
animations.append(Create(self.surrounding_rectangle)) self.set_z_index(-1)
for node in self.node_group:
animations.append(Create(node))
animation_group = AnimationGroup(*animations, lag_ratio=0.0)
return animation_group
class ImageLayer(NeuralNetworkLayer):
"""Image Layer for Neural Network"""
def __init__(self, numpy_image, height=1.5):
super().__init__()
self.numpy_image = numpy_image
if len(np.shape(self.numpy_image)) == 2:
# Assumed Grayscale
self.image_mobject = GrayscaleImageMobject(self.numpy_image, height=height)
elif len(np.shape(self.numpy_image)) == 3:
# Assumed RGB
self.image_mobject = ImageMobject(self.numpy_image)
@override_animation(Create)
def _create_animation(self, **kwargs):
return FadeIn(self.image_mobject)
@abstractmethod
def make_forward_pass_animation(self): def make_forward_pass_animation(self):
return Create(self.image_mobject) pass
@property
def width(self):
return self.image_mobject.width

View File

@ -10,23 +10,27 @@ Example:
NeuralNetwork(layer_node_count) NeuralNetwork(layer_node_count)
""" """
from manim import * from manim import *
from matplotlib import animation
from numpy import isin
import warnings import warnings
from manim_ml.neural_network.layers import FeedForwardLayer, ImageLayer import textwrap
from manim_ml.neural_network.connective_layers import FeedForwardToFeedForward, ImageToFeedForward
class NeuralNetwork(VGroup): from numpy import string_
from manim_ml.neural_network.embedding import EmbeddingLayer, EmbeddingToFeedForward, FeedForwardToEmbedding
from manim_ml.neural_network.feed_forward import FeedForwardLayer, FeedForwardToFeedForward
from manim_ml.neural_network.image import ImageLayer, ImageToFeedForward, FeedForwardToImage
class NeuralNetwork(Group):
def __init__(self, input_layers, edge_color=WHITE, layer_spacing=0.8, def __init__(self, input_layers, edge_color=WHITE, layer_spacing=0.8,
animation_dot_color=RED, edge_width=1.5, dot_radius=0.03): animation_dot_color=RED, edge_width=1.5, dot_radius=0.03):
super().__init__() super(Group, self).__init__()
self.input_layers = VGroup(*input_layers) self.input_layers = Group(*input_layers)
self.edge_width = edge_width self.edge_width = edge_width
self.edge_color = edge_color self.edge_color = edge_color
self.layer_spacing = layer_spacing self.layer_spacing = layer_spacing
self.animation_dot_color = animation_dot_color self.animation_dot_color = animation_dot_color
self.dot_radius = dot_radius self.dot_radius = dot_radius
self.created = False
# TODO take layer_node_count [0, (1, 2), 0] # TODO take layer_node_count [0, (1, 2), 0]
# and make it have explicit distinct subspaces # and make it have explicit distinct subspaces
self._place_layers() self._place_layers()
@ -34,62 +38,87 @@ class NeuralNetwork(VGroup):
# Center the whole diagram by default # Center the whole diagram by default
self.all_layers.move_to(ORIGIN) self.all_layers.move_to(ORIGIN)
self.add(self.all_layers) self.add(self.all_layers)
# print nn
print(repr(self))
def _place_layers(self): def _place_layers(self):
"""Creates the neural network""" """Creates the neural network"""
# TODO implement more sophisticated custom layouts # TODO implement more sophisticated custom layouts
# Default: Linear layout
for layer_index in range(1, len(self.input_layers)): for layer_index in range(1, len(self.input_layers)):
previous_layer = self.input_layers[layer_index - 1] previous_layer = self.input_layers[layer_index - 1]
current_layer = self.input_layers[layer_index] current_layer = self.input_layers[layer_index]
# Manage spacing
# Default: half each width times 2 current_layer.move_to(previous_layer)
spacing = config.frame_width * 0.05 + (previous_layer.width / 2 + current_layer.width / 2) shift_vector = np.array([(previous_layer.get_width()/2 + current_layer.get_width()/2) + 0.2, 0, 0])
current_layer.move_to(previous_layer.get_center()) current_layer.shift(shift_vector)
current_layer.shift(np.array([spacing, 0, 0]))
# Add layer to VGroup
# Handle layering # Handle layering
self.input_layers.set_z_index(2) self.input_layers.set_z_index(2)
def _construct_connective_layers(self): def _construct_connective_layers(self):
"""Draws connecting lines between layers""" """Draws connecting lines between layers"""
connective_layers = VGroup() connective_layers = Group()
all_layers = VGroup() all_layers = Group()
for layer_index in range(len(self.input_layers) - 1): for layer_index in range(len(self.input_layers) - 1):
current_layer = self.input_layers[layer_index] current_layer = self.input_layers[layer_index]
all_layers.add(current_layer) all_layers.add(current_layer)
next_layer = self.input_layers[layer_index + 1] next_layer = self.input_layers[layer_index + 1]
# Check if layer is actually a nested NeuralNetwork
if isinstance(current_layer, NeuralNetwork):
# Last layer of the current layer
current_layer = current_layer.all_layers[-1]
if isinstance(next_layer, NeuralNetwork):
# First layer of the next layer
next_layer = next_layer.all_layers[0]
if isinstance(current_layer, FeedForwardLayer) \ if isinstance(current_layer, FeedForwardLayer) \
and isinstance(next_layer, FeedForwardLayer): and isinstance(next_layer, FeedForwardLayer):
# FeedForward to Image
edge_layer = FeedForwardToFeedForward(current_layer, next_layer, edge_layer = FeedForwardToFeedForward(current_layer, next_layer,
edge_width=self.edge_width) edge_width=self.edge_width)
connective_layers.add(edge_layer) connective_layers.add(edge_layer)
all_layers.add(edge_layer) all_layers.add(edge_layer)
elif isinstance(current_layer, ImageLayer) \ elif isinstance(current_layer, ImageLayer) \
and isinstance(next_layer, FeedForwardLayer): and isinstance(next_layer, FeedForwardLayer):
# Image to FeedForward
image_to_feedforward = ImageToFeedForward(current_layer, next_layer, dot_radius=self.dot_radius) image_to_feedforward = ImageToFeedForward(current_layer, next_layer, dot_radius=self.dot_radius)
connective_layers.add(image_to_feedforward) connective_layers.add(image_to_feedforward)
all_layers.add(image_to_feedforward) all_layers.add(image_to_feedforward)
elif isinstance(current_layer, FeedForwardLayer) \
and isinstance(next_layer, ImageLayer):
# Image to FeedForward
feed_forward_to_image = FeedForwardToImage(current_layer, next_layer, dot_radius=self.dot_radius)
connective_layers.add(feed_forward_to_image)
all_layers.add(feed_forward_to_image)
elif isinstance(current_layer, FeedForwardLayer) \
and isinstance(next_layer, EmbeddingLayer):
# FeedForward to Embedding
layer = FeedForwardToEmbedding(current_layer, next_layer,
animation_dot_color=self.animation_dot_color, dot_radius=self.dot_radius)
connective_layers.add(layer)
all_layers.add(layer)
elif isinstance(current_layer, EmbeddingLayer) \
and isinstance(next_layer, FeedForwardLayer):
# Embedding to FeedForward
layer = EmbeddingToFeedForward(current_layer, next_layer,
animation_dot_color=self.animation_dot_color, dot_radius=self.dot_radius)
connective_layers.add(layer)
all_layers.add(layer)
else: else:
warnings.warn(f"Warning: unimplemented connection for layer types: {type(current_layer)} and {type(next_layer)}") warnings.warn(f"Warning: unimplemented connection for layer types: {type(current_layer)} and {type(next_layer)}")
# Add final layer # Add final layer
all_layers.add(self.input_layers[-1]) all_layers.add(self.input_layers[-1])
# Handle layering # Handle layering
connective_layers.set_z_index(0)
return connective_layers, all_layers return connective_layers, all_layers
def make_forward_pass_animation(self, run_time=10, passing_flash=True): def make_forward_pass_animation(self, run_time=10, passing_flash=True):
"""Generates an animation for feed forward propogation""" """Generates an animation for feed forward propogation"""
all_animations = [] all_animations = []
for layer_index, layer in enumerate(self.input_layers[:-1]): for layer_index, layer in enumerate(self.input_layers[:-1]):
layer_forward_pass = layer.make_forward_pass_animation() layer_forward_pass = layer.make_forward_pass_animation()
all_animations.append(layer_forward_pass) all_animations.append(layer_forward_pass)
connective_layer = self.connective_layers[layer_index] connective_layer = self.connective_layers[layer_index]
connective_forward_pass = connective_layer.make_forward_pass_animation() connective_forward_pass = connective_layer.make_forward_pass_animation()
all_animations.append(connective_forward_pass) all_animations.append(connective_forward_pass)
# Do last layer animation # Do last layer animation
last_layer_forward_pass = self.input_layers[-1].make_forward_pass_animation() last_layer_forward_pass = self.input_layers[-1].make_forward_pass_animation()
all_animations.append(last_layer_forward_pass) all_animations.append(last_layer_forward_pass)
@ -101,17 +130,39 @@ class NeuralNetwork(VGroup):
@override_animation(Create) @override_animation(Create)
def _create_override(self, **kwargs): def _create_override(self, **kwargs):
"""Overrides Create animation""" """Overrides Create animation"""
# Stop the neural network from being created twice
if self.created:
return AnimationGroup()
self.created = True
# Create each layer one by one # Create each layer one by one
animations = [] animations = []
for layer in self.all_layers: for layer in self.all_layers:
print(layer)
animation = Create(layer) animation = Create(layer)
animations.append(animation) animations.append(animation)
animation_group = AnimationGroup(*animations, lag_ratio=1.0) animation_group = AnimationGroup(*animations, lag_ratio=1.0)
return animation_group return animation_group
def remove_layer(self, layer_index):
"""Removes layer at given index and returns animation for removing the layer"""
raise NotImplementedError()
def add_layer(self, layer):
"""Adds layer and returns animation for adding action"""
raise NotImplementedError()
def __repr__(self):
"""Print string representation of layers"""
inner_string = ""
for layer in self.all_layers:
inner_string += f"{repr(layer)},\n"
inner_string = textwrap.indent(inner_string, " ")
string_repr = "NeuralNetwork([\n" + inner_string + "])"
return string_repr
class FeedForwardNeuralNetwork(NeuralNetwork): class FeedForwardNeuralNetwork(NeuralNetwork):
"""NeuralNetwork with just feed forward layers""" """NeuralNetwork with just feed forward layers"""

View File

@ -0,0 +1,113 @@
"""Variational Autoencoder Manim Visualizations
In this module I define Manim visualizations for Variational Autoencoders
and Traditional Autoencoders.
"""
from types import WrapperDescriptorType
from manim import *
import numpy as np
from PIL import Image
import os
from manim_ml.neural_network.feed_forward import FeedForwardLayer
from manim_ml.neural_network.image import ImageLayer
from manim_ml.neural_network.neural_network import NeuralNetwork
from manim_ml.neural_network.embedding import EmbeddingLayer
class VariationalAutoencoder(VGroup):
"""Variational Autoencoder Manim Visualization"""
def __init__(self, encoder_nodes_per_layer=[5, 3], decoder_nodes_per_layer=[3, 5],
point_color=BLUE, dot_radius=0.05, ellipse_stroke_width=1.0,
layer_spacing=0.5):
super(VGroup, self).__init__()
self.encoder_nodes_per_layer = encoder_nodes_per_layer
self.decoder_nodes_per_layer = decoder_nodes_per_layer
self.point_color = point_color
self.dot_radius = dot_radius
self.layer_spacing = layer_spacing
self.ellipse_stroke_width = ellipse_stroke_width
# Make the VMobjects
self.neural_network, self.embedding_layer = self._construct_neural_network()
def _construct_neural_network(self):
"""Makes the VAE encoder, embedding layer, and decoder"""
embedding_layer = EmbeddingLayer()
neural_network = NeuralNetwork([
FeedForwardLayer(5),
FeedForwardLayer(3),
embedding_layer,
FeedForwardLayer(3),
FeedForwardLayer(5)
])
return neural_network, embedding_layer
@override_animation(Create)
def _create_vae(self):
return Create(self.neural_network)
def make_triplet_forward_pass(self, triplet):
pass
def make_image_forward_pass(self, input_image, output_image, run_time=1.5):
"""Override forward pass animation specific to a VAE"""
# Make a wrapper NN with images
wrapper_neural_network = NeuralNetwork([
ImageLayer(input_image),
self.neural_network,
ImageLayer(output_image)
])
# Make animation
animation_group = AnimationGroup(
Create(wrapper_neural_network),
wrapper_neural_network.make_forward_pass_animation(),
lag_ratio=1.0
)
return animation_group
"""
# Make encoder forward pass
encoder_forward_pass = self.encoder.make_forward_propagation_animation(run_time=per_unit_runtime)
# Make red dot in embedding
mean = [1.0, 1.5]
mean_point = self.embedding.axes.coords_to_point(*mean)
std = [0.8, 1.2]
# Make the dot convergence animation
dot_convergence_animation = self.make_dot_convergence_animation(mean, run_time=per_unit_runtime)
encoding_succesion = Succession(
encoder_forward_pass,
dot_convergence_animation
)
# Make an ellipse centered at mean_point witAnimationGraph std outline
center_dot = Dot(mean_point, radius=self.dot_radius, color=RED)
ellipse = Ellipse(width=std[0], height=std[1], color=RED, fill_opacity=0.3, stroke_width=self.ellipse_stroke_width)
ellipse.move_to(mean_point)
self.distribution_objects = VGroup(
center_dot,
ellipse
)
# Make ellipse animation
ellipse_animation = AnimationGroup(
GrowFromCenter(center_dot),
GrowFromCenter(ellipse),
)
# Make the dot divergence animation
sampled_point = [0.51, 1.0]
divergence_point = self.embedding.axes.coords_to_point(*sampled_point)
dot_divergence_animation = self.make_dot_divergence_animation(divergence_point, run_time=per_unit_runtime)
# Make decoder foward pass
decoder_forward_pass = self.decoder.make_forward_propagation_animation(run_time=per_unit_runtime)
# Add the animations to the group
animation_group = AnimationGroup(
FadeIn(self.input_image),
encoding_succesion,
ellipse_animation,
dot_divergence_animation,
decoder_forward_pass,
FadeIn(self.output_image),
lag_ratio=1,
)
"""

View File

@ -1,5 +1,7 @@
from manim import * from manim import *
from manim_ml.neural_network.layers import FeedForwardLayer, ImageLayer from manim_ml.neural_network.embedding import EmbeddingLayer
from manim_ml.neural_network.feed_forward import FeedForwardLayer
from manim_ml.neural_network.image import ImageLayer
from manim_ml.neural_network.neural_network import NeuralNetwork, FeedForwardNeuralNetwork from manim_ml.neural_network.neural_network import NeuralNetwork, FeedForwardNeuralNetwork
from PIL import Image from PIL import Image
import numpy as np import numpy as np
@ -53,7 +55,42 @@ class ImageNeuralNetworkScene(Scene):
nn.move_to(ORIGIN) nn.move_to(ORIGIN)
self.add(nn) self.add(nn)
# Play animation # Play animation
self.play(nn.make_forward_pass_animation(run_time=10)) self.play(nn.make_forward_pass_animation(run_time=5))
self.play(nn.make_forward_pass_animation(run_time=5))
class EmbeddingNNScene(Scene):
def construct(self):
embedding_layer = EmbeddingLayer()
neural_network = NeuralNetwork([
FeedForwardLayer(5),
FeedForwardLayer(3),
embedding_layer,
FeedForwardLayer(3),
FeedForwardLayer(5)
])
self.play(Create(neural_network))
self.play(neural_network.make_forward_pass_animation(run_time=5))
class RecursiveNNScene(Scene):
def construct(self):
nn = NeuralNetwork([
NeuralNetwork([
FeedForwardLayer(3),
FeedForwardLayer(2)
]),
NeuralNetwork([
FeedForwardLayer(2),
FeedForwardLayer(3)
])
])
self.play(Create(nn))
if __name__ == "__main__": if __name__ == "__main__":
"""Render all scenes""" """Render all scenes"""

View File

@ -0,0 +1,34 @@
from manim import *
from PIL import Image
from manim_ml.neural_network.embedding import EmbeddingLayer, GaussianDistribution
from manim_ml.neural_network.feed_forward import FeedForwardLayer
from manim_ml.neural_network.image import ImageLayer
from manim_ml.neural_network.neural_network import NeuralNetwork
config.pixel_height = 720
config.pixel_width = 1280
config.frame_height = 6.0
config.frame_width = 6.0
class GaussianScene(Scene):
def construct(self):
embedding_layer = EmbeddingLayer()
image = Image.open('images/image.jpeg')
numpy_image = np.asarray(image)
# Make nn
neural_network = NeuralNetwork([
ImageLayer(numpy_image, height=1.4),
FeedForwardLayer(5),
FeedForwardLayer(3),
embedding_layer,
FeedForwardLayer(3),
FeedForwardLayer(5),
ImageLayer(numpy_image, height=1.4),
])
neural_network.scale(1.3)
self.play(Create(neural_network))
self.play(neural_network.make_forward_pass_animation(run_time=15))