mirror of
https://github.com/helblazer811/ManimML.git
synced 2025-05-17 18:55:54 +08:00
Refactored Neural Network Layers into their own files.
This commit is contained in:
@ -1,241 +0,0 @@
|
||||
from manim import *
|
||||
from manim_ml.neural_network.layers import ConnectiveLayer, VGroupNeuralNetworkLayer
|
||||
import numpy as np
|
||||
import math
|
||||
|
||||
class GaussianDistribution(VGroup):
|
||||
"""Object for drawing a Gaussian distribution"""
|
||||
|
||||
def __init__(self, axes, mean=None, cov=None, **kwargs):
|
||||
super(VGroup, self).__init__(**kwargs)
|
||||
self.axes = axes
|
||||
self.mean = mean
|
||||
self.cov = cov
|
||||
if mean is None:
|
||||
self.mean = np.array([0.0, 0.0])
|
||||
if cov is None:
|
||||
self.cov = np.array([[3, 0], [0, 3]])
|
||||
# Make the Gaussian
|
||||
self.ellipses = self.construct_gaussian_distribution(self.mean, self.cov)
|
||||
self.ellipses.set_z_index(2)
|
||||
|
||||
@override_animation(Create)
|
||||
def _create_gaussian_distribution(self):
|
||||
return Create(self.ellipses)
|
||||
|
||||
def compute_covariance_rotation_and_scale(self, covariance):
|
||||
# Get the eigenvectors and eigenvalues
|
||||
eigenvalues, eigenvectors = np.linalg.eig(covariance)
|
||||
y, x = eigenvectors[0, 1], eigenvectors[0, 0]
|
||||
center_location = np.array([y, x, 0])
|
||||
center_location = self.axes.coords_to_point(*center_location)
|
||||
angle = math.atan(x / y) # x over y to denote the angle between y axis and vector
|
||||
# Calculate the width and height
|
||||
height = np.abs(eigenvalues[0])
|
||||
width = np.abs(eigenvalues[1])
|
||||
shape_coord = np.array([width, height, 0])
|
||||
shape_coord = self.axes.coords_to_point(*shape_coord)
|
||||
width = shape_coord[0]
|
||||
height = shape_coord[1]
|
||||
|
||||
return angle, width, height
|
||||
|
||||
def construct_gaussian_distribution(self, mean, covariance, color=ORANGE,
|
||||
num_ellipses=4):
|
||||
"""Returns a 2d Gaussian distribution object with given mean and covariance"""
|
||||
# map mean and covariance to frame coordinates
|
||||
mean = self.axes.coords_to_point(*mean)
|
||||
# Figure out the scale and angle of rotation
|
||||
rotation, width, height = self.compute_covariance_rotation_and_scale(covariance)
|
||||
# Make covariance ellipses
|
||||
opacity = 0.0
|
||||
ellipses = VGroup()
|
||||
for ellipse_number in range(num_ellipses):
|
||||
opacity += 1.0 / num_ellipses
|
||||
ellipse_width = width * (1 - opacity)
|
||||
ellipse_height = height * (1 - opacity)
|
||||
ellipse = Ellipse(
|
||||
width=ellipse_width,
|
||||
height=ellipse_height,
|
||||
color=color,
|
||||
fill_opacity=opacity,
|
||||
stroke_width=0.0
|
||||
)
|
||||
ellipse.move_to(mean)
|
||||
ellipse.rotate(rotation)
|
||||
ellipses.add(ellipse)
|
||||
|
||||
return ellipses
|
||||
|
||||
class EmbeddingLayer(VGroupNeuralNetworkLayer):
|
||||
"""NeuralNetwork embedding object that can show probability distributions"""
|
||||
|
||||
def __init__(self, point_radius=0.02):
|
||||
super(EmbeddingLayer, self).__init__()
|
||||
self.point_radius = point_radius
|
||||
self.axes = Axes(
|
||||
tips=False,
|
||||
x_length=1,
|
||||
y_length=1
|
||||
)
|
||||
self.add(self.axes)
|
||||
# Make point cloud
|
||||
mean = np.array([0, 0])
|
||||
covariance = np.array([[1.5, 0], [0, 1.5]])
|
||||
self.point_cloud = self.construct_gaussian_point_cloud(mean, covariance)
|
||||
self.add(self.point_cloud)
|
||||
# Make latent distribution
|
||||
self.latent_distribution = GaussianDistribution(self.axes, mean=mean, cov=covariance) # Use defaults
|
||||
|
||||
def sample_point_location_from_distribution(self):
|
||||
"""Samples from the current latent distribution"""
|
||||
mean = self.latent_distribution.mean
|
||||
cov = self.latent_distribution.cov
|
||||
point = np.random.multivariate_normal(mean, cov)
|
||||
# Make dot at correct location
|
||||
location = self.axes.coords_to_point(point[0], point[1])
|
||||
|
||||
return location
|
||||
|
||||
def get_distribution_location(self):
|
||||
"""Returns mean of latent distribution in axes frame"""
|
||||
return self.axes.coords_to_point(self.latent_distribution.mean)
|
||||
|
||||
def construct_gaussian_point_cloud(self, mean, covariance, point_color=BLUE,
|
||||
num_points=200):
|
||||
"""Plots points sampled from a Gaussian with the given mean and covariance"""
|
||||
# Sample points from a Gaussian
|
||||
points = np.random.multivariate_normal(mean, covariance, num_points)
|
||||
# Add each point to the axes
|
||||
point_dots = VGroup()
|
||||
for point in points:
|
||||
point_location = self.axes.coords_to_point(*point)
|
||||
dot = Dot(point_location, color=point_color, radius=self.point_radius/2)
|
||||
point_dots.add(dot)
|
||||
|
||||
return point_dots
|
||||
|
||||
def make_forward_pass_animation(self):
|
||||
"""Forward pass animation"""
|
||||
# Make ellipse object corresponding to the latent distribution
|
||||
self.latent_distribution = GaussianDistribution(self.axes) # Use defaults
|
||||
# Create animation
|
||||
animations = []
|
||||
|
||||
#create_distribution = Create(self.latent_distribution.construct_gaussian_distribution(self.latent_distribution.mean, self.latent_distribution.cov)) #Create(self.latent_distribution)
|
||||
create_distribution = Create(self.latent_distribution.ellipses)
|
||||
animations.append(create_distribution)
|
||||
|
||||
animation_group = AnimationGroup(*animations)
|
||||
|
||||
return animation_group
|
||||
|
||||
@override_animation(Create)
|
||||
def _create_embedding_layer(self, **kwargs):
|
||||
# Plot each point at once
|
||||
point_animations = []
|
||||
for point in self.point_cloud:
|
||||
point_animations.append(GrowFromCenter(point))
|
||||
|
||||
point_animation = AnimationGroup(*point_animations, lag_ratio=1.0, run_time=2.5)
|
||||
|
||||
return point_animation
|
||||
|
||||
class FeedForwardToEmbedding(ConnectiveLayer):
|
||||
"""Feed Forward to Embedding Layer"""
|
||||
|
||||
def __init__(self, input_layer, output_layer, animation_dot_color=RED, dot_radius=0.03):
|
||||
super().__init__(input_layer, output_layer)
|
||||
self.feed_forward_layer = input_layer
|
||||
self.embedding_layer = output_layer
|
||||
self.animation_dot_color = animation_dot_color
|
||||
self.dot_radius = dot_radius
|
||||
|
||||
def make_forward_pass_animation(self, run_time=1.5):
|
||||
"""Makes dots converge on a specific location"""
|
||||
# Find point to converge on by sampling from gaussian distribution
|
||||
location = self.embedding_layer.sample_point_location_from_distribution()
|
||||
# Set the embedding layer latent distribution
|
||||
# Move to location
|
||||
animations = []
|
||||
# Move the dots to the centers of each of the nodes in the FeedForwardLayer
|
||||
dots = []
|
||||
for node in self.feed_forward_layer.node_group:
|
||||
new_dot = Dot(node.get_center(), radius=self.dot_radius, color=self.animation_dot_color)
|
||||
per_node_succession = Succession(
|
||||
Create(new_dot),
|
||||
new_dot.animate.move_to(location),
|
||||
)
|
||||
animations.append(per_node_succession)
|
||||
dots.append(new_dot)
|
||||
self.dots = VGroup(*dots)
|
||||
self.add(self.dots)
|
||||
# Follow up with remove animations
|
||||
remove_animations = []
|
||||
for dot in dots:
|
||||
remove_animations.append(FadeOut(dot))
|
||||
self.remove(self.dots)
|
||||
remove_animations = AnimationGroup(*remove_animations, run_time=0.2)
|
||||
animations = AnimationGroup(*animations)
|
||||
animation_group = Succession(animations, remove_animations, lag_ratio=1.0)
|
||||
|
||||
return animation_group
|
||||
|
||||
@override_animation(Create)
|
||||
def _create_embedding_layer(self, **kwargs):
|
||||
return AnimationGroup()
|
||||
|
||||
class EmbeddingToFeedForward(ConnectiveLayer):
|
||||
"""Feed Forward to Embedding Layer"""
|
||||
|
||||
def __init__(self, input_layer, output_layer, animation_dot_color=RED, dot_radius=0.03):
|
||||
super().__init__(input_layer, output_layer)
|
||||
self.feed_forward_layer = output_layer
|
||||
self.embedding_layer = input_layer
|
||||
self.animation_dot_color = animation_dot_color
|
||||
self.dot_radius = dot_radius
|
||||
|
||||
def make_forward_pass_animation(self, run_time=1.5):
|
||||
"""Makes dots diverge from the given location and move the decoder"""
|
||||
# Find point to converge on by sampling from gaussian distribution
|
||||
location = self.embedding_layer.sample_point_location_from_distribution()
|
||||
# Move to location
|
||||
animations = []
|
||||
# Move the dots to the centers of each of the nodes in the FeedForwardLayer
|
||||
dots = []
|
||||
for node in self.feed_forward_layer.node_group:
|
||||
new_dot = Dot(location, radius=self.dot_radius, color=self.animation_dot_color)
|
||||
per_node_succession = Succession(
|
||||
Create(new_dot),
|
||||
new_dot.animate.move_to(node.get_center()),
|
||||
)
|
||||
animations.append(per_node_succession)
|
||||
dots.append(new_dot)
|
||||
# Follow up with remove animations
|
||||
remove_animations = []
|
||||
for dot in dots:
|
||||
remove_animations.append(FadeOut(dot))
|
||||
remove_animations = AnimationGroup(*remove_animations, run_time=0.2)
|
||||
animations = AnimationGroup(*animations)
|
||||
animation_group = Succession(animations, remove_animations, lag_ratio=1.0)
|
||||
|
||||
return animation_group
|
||||
|
||||
@override_animation(Create)
|
||||
def _create_embedding_layer(self, **kwargs):
|
||||
return AnimationGroup()
|
||||
|
||||
class NeuralNetworkEmbeddingTestScene(Scene):
|
||||
|
||||
def construct(self):
|
||||
nne = EmbeddingLayer()
|
||||
mean = np.array([0, 0])
|
||||
cov = np.array([[5.0, 1.0], [0.0, 1.0]])
|
||||
|
||||
point_cloud = nne.construct_gaussian_point_cloud(mean, cov)
|
||||
nne.add(point_cloud)
|
||||
|
||||
gaussian = nne.construct_gaussian_distribution(mean, cov)
|
||||
nne.add(gaussian)
|
||||
|
||||
self.add(nne)
|
@ -1,115 +0,0 @@
|
||||
|
||||
from manim import *
|
||||
from manim_ml.image import GrayscaleImageMobject
|
||||
from manim_ml.neural_network.layers import ConnectiveLayer, NeuralNetworkLayer
|
||||
|
||||
class ImageLayer(NeuralNetworkLayer):
|
||||
"""Single Image Layer for Neural Network"""
|
||||
|
||||
def __init__(self, numpy_image, height=1.5):
|
||||
super().__init__()
|
||||
self.set_z_index(1)
|
||||
self.numpy_image = numpy_image
|
||||
if len(np.shape(self.numpy_image)) == 2:
|
||||
# Assumed Grayscale
|
||||
self.image_mobject = GrayscaleImageMobject(self.numpy_image, height=height)
|
||||
elif len(np.shape(self.numpy_image)) == 3:
|
||||
# Assumed RGB
|
||||
self.image_mobject = ImageMobject(self.numpy_image)
|
||||
self.add(self.image_mobject)
|
||||
"""
|
||||
# Make an invisible box of the same width as the image object so that
|
||||
# methods like get_right() work correctly.
|
||||
self.invisible_rectangle = SurroundingRectangle(self.image_mobject, color=WHITE)
|
||||
self.invisible_rectangle.set_fill(WHITE, opacity=0.0)
|
||||
# self.invisible_rectangle.set_stroke(WHITE, opacity=0.0)
|
||||
self.invisible_rectangle.move_to(self.image_mobject.get_center())
|
||||
self.add(self.invisible_rectangle)
|
||||
"""
|
||||
|
||||
@override_animation(Create)
|
||||
def _create_animation(self, **kwargs):
|
||||
return FadeIn(self.image_mobject)
|
||||
|
||||
def make_forward_pass_animation(self):
|
||||
return Create(self.image_mobject)
|
||||
|
||||
def move_to(self, location):
|
||||
"""Override of move to"""
|
||||
self.image_mobject.move_to(location)
|
||||
|
||||
def get_right(self):
|
||||
"""Override get right"""
|
||||
return self.image_mobject.get_right()
|
||||
|
||||
@property
|
||||
def width(self):
|
||||
return self.image_mobject.width
|
||||
|
||||
class ImageToFeedForward(ConnectiveLayer):
|
||||
"""Image Layer to FeedForward layer"""
|
||||
|
||||
def __init__(self, input_layer, output_layer, animation_dot_color=RED,
|
||||
dot_radius=0.05):
|
||||
self.animation_dot_color = animation_dot_color
|
||||
self.dot_radius = dot_radius
|
||||
|
||||
self.feed_forward_layer = output_layer
|
||||
self.image_layer = input_layer
|
||||
super().__init__(input_layer, output_layer)
|
||||
|
||||
def make_forward_pass_animation(self):
|
||||
"""Makes dots diverge from the given location and move to the feed forward nodes decoder"""
|
||||
animations = []
|
||||
dots = []
|
||||
image_mobject = self.image_layer.image_mobject
|
||||
# Move the dots to the centers of each of the nodes in the FeedForwardLayer
|
||||
image_location = image_mobject.get_center()
|
||||
for node in self.feed_forward_layer.node_group:
|
||||
new_dot = Dot(image_location, radius=self.dot_radius, color=self.animation_dot_color)
|
||||
per_node_succession = Succession(
|
||||
Create(new_dot),
|
||||
new_dot.animate.move_to(node.get_center()),
|
||||
)
|
||||
animations.append(per_node_succession)
|
||||
dots.append(new_dot)
|
||||
self.add(VGroup(*dots))
|
||||
animation_group = AnimationGroup(*animations)
|
||||
return animation_group
|
||||
|
||||
@override_animation(Create)
|
||||
def _create_override(self):
|
||||
return AnimationGroup()
|
||||
|
||||
class FeedForwardToImage(ConnectiveLayer):
|
||||
"""Image Layer to FeedForward layer"""
|
||||
|
||||
def __init__(self, input_layer, output_layer, animation_dot_color=RED,
|
||||
dot_radius=0.05):
|
||||
self.animation_dot_color = animation_dot_color
|
||||
self.dot_radius = dot_radius
|
||||
|
||||
self.feed_forward_layer = input_layer
|
||||
self.image_layer = output_layer
|
||||
super().__init__(input_layer, output_layer)
|
||||
|
||||
def make_forward_pass_animation(self):
|
||||
"""Makes dots diverge from the given location and move to the feed forward nodes decoder"""
|
||||
animations = []
|
||||
image_mobject = self.image_layer.image_mobject
|
||||
# Move the dots to the centers of each of the nodes in the FeedForwardLayer
|
||||
image_location = image_mobject.get_center()
|
||||
for node in self.feed_forward_layer.node_group:
|
||||
new_dot = Dot(node.get_center(), radius=self.dot_radius, color=self.animation_dot_color)
|
||||
per_node_succession = Succession(
|
||||
Create(new_dot),
|
||||
new_dot.animate.move_to(image_location),
|
||||
)
|
||||
animations.append(per_node_succession)
|
||||
|
||||
animation_group = AnimationGroup(*animations)
|
||||
return animation_group
|
||||
|
||||
@override_animation(Create)
|
||||
def _create_override(self):
|
||||
return AnimationGroup()
|
9
manim_ml/neural_network/layers/__init__.py
Normal file
9
manim_ml/neural_network/layers/__init__.py
Normal file
@ -0,0 +1,9 @@
|
||||
from .embedding_to_feed_forward import EmbeddingToFeedForward
|
||||
from .embedding import EmbeddingLayer
|
||||
from .feed_forward_to_embedding import FeedForwardToEmbedding
|
||||
from .feed_forward_to_feed_forward import FeedForwardToFeedForward
|
||||
from .feed_forward_to_image import FeedForwardToImage
|
||||
from .feed_forward import FeedForwardLayer
|
||||
from .image_to_feed_forward import ImageToFeedForward
|
||||
from .image import ImageLayer
|
||||
from .parent_layers import ConnectiveLayer, NeuralNetworkLayer
|
91
manim_ml/neural_network/layers/embedding.py
Normal file
91
manim_ml/neural_network/layers/embedding.py
Normal file
@ -0,0 +1,91 @@
|
||||
from manim import *
|
||||
from manim_ml.probability import GaussianDistribution
|
||||
from manim_ml.neural_network.layers.parent_layers import VGroupNeuralNetworkLayer
|
||||
|
||||
class EmbeddingLayer(VGroupNeuralNetworkLayer):
|
||||
"""NeuralNetwork embedding object that can show probability distributions"""
|
||||
|
||||
def __init__(self, point_radius=0.02):
|
||||
super(EmbeddingLayer, self).__init__()
|
||||
self.point_radius = point_radius
|
||||
self.axes = Axes(
|
||||
tips=False,
|
||||
x_length=1,
|
||||
y_length=1
|
||||
)
|
||||
self.add(self.axes)
|
||||
# Make point cloud
|
||||
mean = np.array([0, 0])
|
||||
covariance = np.array([[1.5, 0], [0, 1.5]])
|
||||
self.point_cloud = self.construct_gaussian_point_cloud(mean, covariance)
|
||||
self.add(self.point_cloud)
|
||||
# Make latent distribution
|
||||
self.latent_distribution = GaussianDistribution(self.axes, mean=mean, cov=covariance) # Use defaults
|
||||
|
||||
def sample_point_location_from_distribution(self):
|
||||
"""Samples from the current latent distribution"""
|
||||
mean = self.latent_distribution.mean
|
||||
cov = self.latent_distribution.cov
|
||||
point = np.random.multivariate_normal(mean, cov)
|
||||
# Make dot at correct location
|
||||
location = self.axes.coords_to_point(point[0], point[1])
|
||||
|
||||
return location
|
||||
|
||||
def get_distribution_location(self):
|
||||
"""Returns mean of latent distribution in axes frame"""
|
||||
return self.axes.coords_to_point(self.latent_distribution.mean)
|
||||
|
||||
def construct_gaussian_point_cloud(self, mean, covariance, point_color=BLUE,
|
||||
num_points=200):
|
||||
"""Plots points sampled from a Gaussian with the given mean and covariance"""
|
||||
# Sample points from a Gaussian
|
||||
points = np.random.multivariate_normal(mean, covariance, num_points)
|
||||
# Add each point to the axes
|
||||
point_dots = VGroup()
|
||||
for point in points:
|
||||
point_location = self.axes.coords_to_point(*point)
|
||||
dot = Dot(point_location, color=point_color, radius=self.point_radius/2)
|
||||
point_dots.add(dot)
|
||||
|
||||
return point_dots
|
||||
|
||||
def make_forward_pass_animation(self):
|
||||
"""Forward pass animation"""
|
||||
# Make ellipse object corresponding to the latent distribution
|
||||
self.latent_distribution = GaussianDistribution(self.axes) # Use defaults
|
||||
# Create animation
|
||||
animations = []
|
||||
#create_distribution = Create(self.latent_distribution.construct_gaussian_distribution(self.latent_distribution.mean, self.latent_distribution.cov)) #Create(self.latent_distribution)
|
||||
create_distribution = Create(self.latent_distribution.ellipses)
|
||||
animations.append(create_distribution)
|
||||
|
||||
animation_group = AnimationGroup(*animations)
|
||||
|
||||
return animation_group
|
||||
|
||||
@override_animation(Create)
|
||||
def _create_embedding_layer(self, **kwargs):
|
||||
# Plot each point at once
|
||||
point_animations = []
|
||||
for point in self.point_cloud:
|
||||
point_animations.append(GrowFromCenter(point))
|
||||
|
||||
point_animation = AnimationGroup(*point_animations, lag_ratio=1.0, run_time=2.5)
|
||||
|
||||
return point_animation
|
||||
|
||||
class NeuralNetworkEmbeddingTestScene(Scene):
|
||||
|
||||
def construct(self):
|
||||
nne = EmbeddingLayer()
|
||||
mean = np.array([0, 0])
|
||||
cov = np.array([[5.0, 1.0], [0.0, 1.0]])
|
||||
|
||||
point_cloud = nne.construct_gaussian_point_cloud(mean, cov)
|
||||
nne.add(point_cloud)
|
||||
|
||||
gaussian = nne.construct_gaussian_distribution(mean, cov)
|
||||
nne.add(gaussian)
|
||||
|
||||
self.add(nne)
|
43
manim_ml/neural_network/layers/embedding_to_feed_forward.py
Normal file
43
manim_ml/neural_network/layers/embedding_to_feed_forward.py
Normal file
@ -0,0 +1,43 @@
|
||||
from manim import *
|
||||
from manim_ml.neural_network.layers.parent_layers import ConnectiveLayer
|
||||
|
||||
class EmbeddingToFeedForward(ConnectiveLayer):
|
||||
"""Feed Forward to Embedding Layer"""
|
||||
|
||||
def __init__(self, input_layer, output_layer, animation_dot_color=RED, dot_radius=0.03):
|
||||
super().__init__(input_layer, output_layer)
|
||||
self.feed_forward_layer = output_layer
|
||||
self.embedding_layer = input_layer
|
||||
self.animation_dot_color = animation_dot_color
|
||||
self.dot_radius = dot_radius
|
||||
|
||||
def make_forward_pass_animation(self, run_time=1.5):
|
||||
"""Makes dots diverge from the given location and move the decoder"""
|
||||
# Find point to converge on by sampling from gaussian distribution
|
||||
location = self.embedding_layer.sample_point_location_from_distribution()
|
||||
# Move to location
|
||||
animations = []
|
||||
# Move the dots to the centers of each of the nodes in the FeedForwardLayer
|
||||
dots = []
|
||||
for node in self.feed_forward_layer.node_group:
|
||||
new_dot = Dot(location, radius=self.dot_radius, color=self.animation_dot_color)
|
||||
per_node_succession = Succession(
|
||||
Create(new_dot),
|
||||
new_dot.animate.move_to(node.get_center()),
|
||||
)
|
||||
animations.append(per_node_succession)
|
||||
dots.append(new_dot)
|
||||
# Follow up with remove animations
|
||||
remove_animations = []
|
||||
for dot in dots:
|
||||
remove_animations.append(FadeOut(dot))
|
||||
remove_animations = AnimationGroup(*remove_animations, run_time=0.2)
|
||||
animations = AnimationGroup(*animations)
|
||||
animation_group = Succession(animations, remove_animations, lag_ratio=1.0)
|
||||
|
||||
return animation_group
|
||||
|
||||
@override_animation(Create)
|
||||
def _create_embedding_layer(self, **kwargs):
|
||||
return AnimationGroup()
|
||||
|
@ -1,5 +1,5 @@
|
||||
from manim import *
|
||||
from manim_ml.neural_network.layers import VGroupNeuralNetworkLayer, ConnectiveLayer
|
||||
from manim_ml.neural_network.layers.parent_layers import VGroupNeuralNetworkLayer
|
||||
|
||||
class FeedForwardLayer(VGroupNeuralNetworkLayer):
|
||||
"""Handles rendering a layer for a neural network"""
|
||||
@ -64,65 +64,4 @@ class FeedForwardLayer(VGroupNeuralNetworkLayer):
|
||||
animations.append(Create(node))
|
||||
|
||||
animation_group = AnimationGroup(*animations, lag_ratio=0.0)
|
||||
return animation_group
|
||||
|
||||
class FeedForwardToFeedForward(ConnectiveLayer):
|
||||
"""Layer for connecting FeedForward layer to FeedForwardLayer"""
|
||||
|
||||
def __init__(self, input_layer, output_layer, passing_flash=True,
|
||||
dot_radius=0.05, animation_dot_color=RED, edge_color=WHITE,
|
||||
edge_width=0.5):
|
||||
super().__init__(input_layer, output_layer)
|
||||
self.passing_flash = passing_flash
|
||||
self.edge_color = edge_color
|
||||
self.dot_radius = dot_radius
|
||||
self.animation_dot_color = animation_dot_color
|
||||
self.edge_width = edge_width
|
||||
|
||||
self.edges = self.construct_edges()
|
||||
self.add(self.edges)
|
||||
|
||||
def construct_edges(self):
|
||||
# Go through each node in the two layers and make a connecting line
|
||||
edges = []
|
||||
for node_i in self.input_layer.node_group:
|
||||
for node_j in self.output_layer.node_group:
|
||||
line = Line(node_i.get_center(), node_j.get_center(),
|
||||
color=self.edge_color, stroke_width=self.edge_width)
|
||||
edges.append(line)
|
||||
|
||||
edges = VGroup(*edges)
|
||||
return edges
|
||||
|
||||
def make_forward_pass_animation(self, run_time=1):
|
||||
"""Animation for passing information from one FeedForwardLayer to the next"""
|
||||
path_animations = []
|
||||
dots = []
|
||||
for edge in self.edges:
|
||||
dot = Dot(color=self.animation_dot_color, fill_opacity=1.0, radius=self.dot_radius)
|
||||
# Add to dots group
|
||||
dots.append(dot)
|
||||
# Make the animation
|
||||
if self.passing_flash:
|
||||
anim = ShowPassingFlash(edge.copy().set_color(self.animation_dot_color), time_width=0.2, run_time=3)
|
||||
else:
|
||||
anim = MoveAlongPath(dot, edge, run_time=run_time, rate_function=sigmoid)
|
||||
path_animations.append(anim)
|
||||
|
||||
if not self.passing_flash:
|
||||
dots = VGroup(*dots)
|
||||
self.add(dots)
|
||||
|
||||
path_animations = AnimationGroup(*path_animations)
|
||||
|
||||
return path_animations
|
||||
|
||||
@override_animation(Create)
|
||||
def _create_animation(self, **kwargs):
|
||||
animations = []
|
||||
|
||||
for edge in self.edges:
|
||||
animations.append(Create(edge))
|
||||
|
||||
animation_group = AnimationGroup(*animations, lag_ratio=0.0)
|
||||
return animation_group
|
||||
return animation_group
|
47
manim_ml/neural_network/layers/feed_forward_to_embedding.py
Normal file
47
manim_ml/neural_network/layers/feed_forward_to_embedding.py
Normal file
@ -0,0 +1,47 @@
|
||||
from manim import *
|
||||
from manim_ml.neural_network.layers.parent_layers import ConnectiveLayer
|
||||
|
||||
class FeedForwardToEmbedding(ConnectiveLayer):
|
||||
"""Feed Forward to Embedding Layer"""
|
||||
|
||||
def __init__(self, input_layer, output_layer, animation_dot_color=RED, dot_radius=0.03):
|
||||
super().__init__(input_layer, output_layer)
|
||||
self.feed_forward_layer = input_layer
|
||||
self.embedding_layer = output_layer
|
||||
self.animation_dot_color = animation_dot_color
|
||||
self.dot_radius = dot_radius
|
||||
|
||||
def make_forward_pass_animation(self, run_time=1.5):
|
||||
"""Makes dots converge on a specific location"""
|
||||
# Find point to converge on by sampling from gaussian distribution
|
||||
location = self.embedding_layer.sample_point_location_from_distribution()
|
||||
# Set the embedding layer latent distribution
|
||||
# Move to location
|
||||
animations = []
|
||||
# Move the dots to the centers of each of the nodes in the FeedForwardLayer
|
||||
dots = []
|
||||
for node in self.feed_forward_layer.node_group:
|
||||
new_dot = Dot(node.get_center(), radius=self.dot_radius, color=self.animation_dot_color)
|
||||
per_node_succession = Succession(
|
||||
Create(new_dot),
|
||||
new_dot.animate.move_to(location),
|
||||
)
|
||||
animations.append(per_node_succession)
|
||||
dots.append(new_dot)
|
||||
self.dots = VGroup(*dots)
|
||||
self.add(self.dots)
|
||||
# Follow up with remove animations
|
||||
remove_animations = []
|
||||
for dot in dots:
|
||||
remove_animations.append(FadeOut(dot))
|
||||
self.remove(self.dots)
|
||||
remove_animations = AnimationGroup(*remove_animations, run_time=0.2)
|
||||
animations = AnimationGroup(*animations)
|
||||
animation_group = Succession(animations, remove_animations, lag_ratio=1.0)
|
||||
|
||||
return animation_group
|
||||
|
||||
@override_animation(Create)
|
||||
def _create_embedding_layer(self, **kwargs):
|
||||
return AnimationGroup()
|
||||
|
@ -0,0 +1,64 @@
|
||||
from manim import *
|
||||
from manim_ml.image import GrayscaleImageMobject
|
||||
from manim_ml.neural_network.layers.parent_layers import NeuralNetworkLayer, ConnectiveLayer
|
||||
|
||||
class FeedForwardToFeedForward(ConnectiveLayer):
|
||||
"""Layer for connecting FeedForward layer to FeedForwardLayer"""
|
||||
|
||||
def __init__(self, input_layer, output_layer, passing_flash=True,
|
||||
dot_radius=0.05, animation_dot_color=RED, edge_color=WHITE,
|
||||
edge_width=0.5):
|
||||
super().__init__(input_layer, output_layer)
|
||||
self.passing_flash = passing_flash
|
||||
self.edge_color = edge_color
|
||||
self.dot_radius = dot_radius
|
||||
self.animation_dot_color = animation_dot_color
|
||||
self.edge_width = edge_width
|
||||
|
||||
self.edges = self.construct_edges()
|
||||
self.add(self.edges)
|
||||
|
||||
def construct_edges(self):
|
||||
# Go through each node in the two layers and make a connecting line
|
||||
edges = []
|
||||
for node_i in self.input_layer.node_group:
|
||||
for node_j in self.output_layer.node_group:
|
||||
line = Line(node_i.get_center(), node_j.get_center(),
|
||||
color=self.edge_color, stroke_width=self.edge_width)
|
||||
edges.append(line)
|
||||
|
||||
edges = VGroup(*edges)
|
||||
return edges
|
||||
|
||||
def make_forward_pass_animation(self, run_time=1):
|
||||
"""Animation for passing information from one FeedForwardLayer to the next"""
|
||||
path_animations = []
|
||||
dots = []
|
||||
for edge in self.edges:
|
||||
dot = Dot(color=self.animation_dot_color, fill_opacity=1.0, radius=self.dot_radius)
|
||||
# Add to dots group
|
||||
dots.append(dot)
|
||||
# Make the animation
|
||||
if self.passing_flash:
|
||||
anim = ShowPassingFlash(edge.copy().set_color(self.animation_dot_color), time_width=0.2, run_time=3)
|
||||
else:
|
||||
anim = MoveAlongPath(dot, edge, run_time=run_time, rate_function=sigmoid)
|
||||
path_animations.append(anim)
|
||||
|
||||
if not self.passing_flash:
|
||||
dots = VGroup(*dots)
|
||||
self.add(dots)
|
||||
|
||||
path_animations = AnimationGroup(*path_animations)
|
||||
|
||||
return path_animations
|
||||
|
||||
@override_animation(Create)
|
||||
def _create_animation(self, **kwargs):
|
||||
animations = []
|
||||
|
||||
for edge in self.edges:
|
||||
animations.append(Create(edge))
|
||||
|
||||
animation_group = AnimationGroup(*animations, lag_ratio=0.0)
|
||||
return animation_group
|
36
manim_ml/neural_network/layers/feed_forward_to_image.py
Normal file
36
manim_ml/neural_network/layers/feed_forward_to_image.py
Normal file
@ -0,0 +1,36 @@
|
||||
from manim import *
|
||||
from manim_ml.image import GrayscaleImageMobject
|
||||
from manim_ml.neural_network.layers.parent_layers import NeuralNetworkLayer, ConnectiveLayer
|
||||
|
||||
class FeedForwardToImage(ConnectiveLayer):
|
||||
"""Image Layer to FeedForward layer"""
|
||||
|
||||
def __init__(self, input_layer, output_layer, animation_dot_color=RED,
|
||||
dot_radius=0.05):
|
||||
self.animation_dot_color = animation_dot_color
|
||||
self.dot_radius = dot_radius
|
||||
|
||||
self.feed_forward_layer = input_layer
|
||||
self.image_layer = output_layer
|
||||
super().__init__(input_layer, output_layer)
|
||||
|
||||
def make_forward_pass_animation(self):
|
||||
"""Makes dots diverge from the given location and move to the feed forward nodes decoder"""
|
||||
animations = []
|
||||
image_mobject = self.image_layer.image_mobject
|
||||
# Move the dots to the centers of each of the nodes in the FeedForwardLayer
|
||||
image_location = image_mobject.get_center()
|
||||
for node in self.feed_forward_layer.node_group:
|
||||
new_dot = Dot(node.get_center(), radius=self.dot_radius, color=self.animation_dot_color)
|
||||
per_node_succession = Succession(
|
||||
Create(new_dot),
|
||||
new_dot.animate.move_to(image_location),
|
||||
)
|
||||
animations.append(per_node_succession)
|
||||
|
||||
animation_group = AnimationGroup(*animations)
|
||||
return animation_group
|
||||
|
||||
@override_animation(Create)
|
||||
def _create_override(self):
|
||||
return AnimationGroup()
|
37
manim_ml/neural_network/layers/image.py
Normal file
37
manim_ml/neural_network/layers/image.py
Normal file
@ -0,0 +1,37 @@
|
||||
from manim import *
|
||||
from manim_ml.image import GrayscaleImageMobject
|
||||
from manim_ml.neural_network.layers.parent_layers import NeuralNetworkLayer
|
||||
|
||||
class ImageLayer(NeuralNetworkLayer):
|
||||
"""Single Image Layer for Neural Network"""
|
||||
|
||||
def __init__(self, numpy_image, height=1.5):
|
||||
super().__init__()
|
||||
self.set_z_index(1)
|
||||
self.numpy_image = numpy_image
|
||||
if len(np.shape(self.numpy_image)) == 2:
|
||||
# Assumed Grayscale
|
||||
self.image_mobject = GrayscaleImageMobject(self.numpy_image, height=height)
|
||||
elif len(np.shape(self.numpy_image)) == 3:
|
||||
# Assumed RGB
|
||||
self.image_mobject = ImageMobject(self.numpy_image)
|
||||
self.add(self.image_mobject)
|
||||
|
||||
@override_animation(Create)
|
||||
def _create_animation(self, **kwargs):
|
||||
return FadeIn(self.image_mobject)
|
||||
|
||||
def make_forward_pass_animation(self):
|
||||
return Create(self.image_mobject)
|
||||
|
||||
def move_to(self, location):
|
||||
"""Override of move to"""
|
||||
self.image_mobject.move_to(location)
|
||||
|
||||
def get_right(self):
|
||||
"""Override get right"""
|
||||
return self.image_mobject.get_right()
|
||||
|
||||
@property
|
||||
def width(self):
|
||||
return self.image_mobject.width
|
38
manim_ml/neural_network/layers/image_to_feed_forward.py
Normal file
38
manim_ml/neural_network/layers/image_to_feed_forward.py
Normal file
@ -0,0 +1,38 @@
|
||||
from manim import *
|
||||
from manim_ml.image import GrayscaleImageMobject
|
||||
from manim_ml.neural_network.layers.parent_layers import NeuralNetworkLayer, ConnectiveLayer
|
||||
|
||||
class ImageToFeedForward(ConnectiveLayer):
|
||||
"""Image Layer to FeedForward layer"""
|
||||
|
||||
def __init__(self, input_layer, output_layer, animation_dot_color=RED,
|
||||
dot_radius=0.05):
|
||||
self.animation_dot_color = animation_dot_color
|
||||
self.dot_radius = dot_radius
|
||||
|
||||
self.feed_forward_layer = output_layer
|
||||
self.image_layer = input_layer
|
||||
super().__init__(input_layer, output_layer)
|
||||
|
||||
def make_forward_pass_animation(self):
|
||||
"""Makes dots diverge from the given location and move to the feed forward nodes decoder"""
|
||||
animations = []
|
||||
dots = []
|
||||
image_mobject = self.image_layer.image_mobject
|
||||
# Move the dots to the centers of each of the nodes in the FeedForwardLayer
|
||||
image_location = image_mobject.get_center()
|
||||
for node in self.feed_forward_layer.node_group:
|
||||
new_dot = Dot(image_location, radius=self.dot_radius, color=self.animation_dot_color)
|
||||
per_node_succession = Succession(
|
||||
Create(new_dot),
|
||||
new_dot.animate.move_to(node.get_center()),
|
||||
)
|
||||
animations.append(per_node_succession)
|
||||
dots.append(new_dot)
|
||||
self.add(VGroup(*dots))
|
||||
animation_group = AnimationGroup(*animations)
|
||||
return animation_group
|
||||
|
||||
@override_animation(Create)
|
||||
def _create_override(self):
|
||||
return AnimationGroup()
|
@ -37,4 +37,4 @@ class ConnectiveLayer(VGroupNeuralNetworkLayer):
|
||||
|
||||
@abstractmethod
|
||||
def make_forward_pass_animation(self):
|
||||
pass
|
||||
pass
|
@ -13,11 +13,10 @@ from manim import *
|
||||
import warnings
|
||||
import textwrap
|
||||
|
||||
from numpy import string_
|
||||
|
||||
from manim_ml.neural_network.embedding import EmbeddingLayer, EmbeddingToFeedForward, FeedForwardToEmbedding
|
||||
from manim_ml.neural_network.feed_forward import FeedForwardLayer, FeedForwardToFeedForward
|
||||
from manim_ml.neural_network.image import ImageLayer, ImageToFeedForward, FeedForwardToImage
|
||||
from manim_ml.neural_network.layers import \
|
||||
FeedForwardLayer, FeedForwardToFeedForward, ImageLayer, \
|
||||
ImageToFeedForward, FeedForwardToImage, EmbeddingLayer, \
|
||||
EmbeddingToFeedForward, FeedForwardToEmbedding
|
||||
|
||||
class NeuralNetwork(Group):
|
||||
|
||||
|
@ -4,7 +4,6 @@ In this module I define Manim visualizations for Variational Autoencoders
|
||||
and Traditional Autoencoders.
|
||||
|
||||
"""
|
||||
from types import WrapperDescriptorType
|
||||
from manim import *
|
||||
import numpy as np
|
||||
from PIL import Image
|
||||
|
68
manim_ml/probability.py
Normal file
68
manim_ml/probability.py
Normal file
@ -0,0 +1,68 @@
|
||||
from manim import *
|
||||
import numpy as np
|
||||
import math
|
||||
|
||||
class GaussianDistribution(VGroup):
|
||||
"""Object for drawing a Gaussian distribution"""
|
||||
|
||||
def __init__(self, axes, mean=None, cov=None, **kwargs):
|
||||
super(VGroup, self).__init__(**kwargs)
|
||||
self.axes = axes
|
||||
self.mean = mean
|
||||
self.cov = cov
|
||||
if mean is None:
|
||||
self.mean = np.array([0.0, 0.0])
|
||||
if cov is None:
|
||||
self.cov = np.array([[3, 0], [0, 3]])
|
||||
# Make the Gaussian
|
||||
self.ellipses = self.construct_gaussian_distribution(self.mean, self.cov)
|
||||
self.ellipses.set_z_index(2)
|
||||
|
||||
@override_animation(Create)
|
||||
def _create_gaussian_distribution(self):
|
||||
return Create(self.ellipses)
|
||||
|
||||
def compute_covariance_rotation_and_scale(self, covariance):
|
||||
# Get the eigenvectors and eigenvalues
|
||||
eigenvalues, eigenvectors = np.linalg.eig(covariance)
|
||||
y, x = eigenvectors[0, 1], eigenvectors[0, 0]
|
||||
center_location = np.array([y, x, 0])
|
||||
center_location = self.axes.coords_to_point(*center_location)
|
||||
angle = math.atan(x / y) # x over y to denote the angle between y axis and vector
|
||||
# Calculate the width and height
|
||||
height = np.abs(eigenvalues[0])
|
||||
width = np.abs(eigenvalues[1])
|
||||
shape_coord = np.array([width, height, 0])
|
||||
shape_coord = self.axes.coords_to_point(*shape_coord)
|
||||
width = shape_coord[0]
|
||||
height = shape_coord[1]
|
||||
|
||||
return angle, width, height
|
||||
|
||||
def construct_gaussian_distribution(self, mean, covariance, color=ORANGE,
|
||||
num_ellipses=4):
|
||||
"""Returns a 2d Gaussian distribution object with given mean and covariance"""
|
||||
# map mean and covariance to frame coordinates
|
||||
mean = self.axes.coords_to_point(*mean)
|
||||
# Figure out the scale and angle of rotation
|
||||
rotation, width, height = self.compute_covariance_rotation_and_scale(covariance)
|
||||
# Make covariance ellipses
|
||||
opacity = 0.0
|
||||
ellipses = VGroup()
|
||||
for ellipse_number in range(num_ellipses):
|
||||
opacity += 1.0 / num_ellipses
|
||||
ellipse_width = width * (1 - opacity)
|
||||
ellipse_height = height * (1 - opacity)
|
||||
ellipse = Ellipse(
|
||||
width=ellipse_width,
|
||||
height=ellipse_height,
|
||||
color=color,
|
||||
fill_opacity=opacity,
|
||||
stroke_width=0.0
|
||||
)
|
||||
ellipse.move_to(mean)
|
||||
ellipse.rotate(rotation)
|
||||
ellipses.add(ellipse)
|
||||
|
||||
return ellipses
|
||||
|
@ -1,33 +0,0 @@
|
||||
from manim import *
|
||||
import numpy as np
|
||||
|
||||
def construct_image_mobject(input_image, height=2.3):
|
||||
"""Constructs an ImageMobject from a numpy grayscale image"""
|
||||
# Convert image to rgb
|
||||
if len(input_image.shape) == 2:
|
||||
input_image = np.repeat(input_image, 3, axis=0)
|
||||
input_image = np.rollaxis(input_image, 0, start=3)
|
||||
# Make the ImageMobject
|
||||
image_mobject = ImageMobject(input_image, image_mode="RGB")
|
||||
image_mobject.set_resampling_algorithm(RESAMPLING_ALGORITHMS["nearest"])
|
||||
image_mobject.height = height
|
||||
|
||||
return image_mobject
|
||||
|
||||
class NumpyImageMobject(ImageMobject):
|
||||
"""Mobject for creating images in Manim from numpy arrays"""
|
||||
|
||||
def __init__(self, numpy_image, height=2.3, grayscale=False):
|
||||
self.numpy_image = numpy_image
|
||||
self.height = height
|
||||
|
||||
if grayscale:
|
||||
assert len(input_image.shape) == 2
|
||||
input_image = np.repeat(self.numpy_image, 3, axis=0)
|
||||
input_image = np.rollaxis(input_image, 0, start=3)
|
||||
|
||||
super().__init__(input_image, image_mode="RGB")
|
||||
|
||||
self.set_resampling_algorithm(RESAMPLING_ALGORITHMS["nearest"])
|
||||
self.height = height
|
||||
|
@ -1,7 +1,7 @@
|
||||
from manim import *
|
||||
from manim_ml.neural_network.embedding import EmbeddingLayer
|
||||
from manim_ml.neural_network.feed_forward import FeedForwardLayer
|
||||
from manim_ml.neural_network.image import ImageLayer
|
||||
from manim_ml.neural_network.layers.embedding import EmbeddingLayer
|
||||
from manim_ml.neural_network.layers.feed_forward import FeedForwardLayer
|
||||
from manim_ml.neural_network.layers.image import ImageLayer
|
||||
from manim_ml.neural_network.neural_network import NeuralNetwork, FeedForwardNeuralNetwork
|
||||
from PIL import Image
|
||||
import numpy as np
|
||||
|
@ -1,6 +1,6 @@
|
||||
from manim import *
|
||||
from PIL import Image
|
||||
from manim_ml.neural_network.embedding import EmbeddingLayer, GaussianDistribution
|
||||
from manim_ml.neural_network.embedding import EmbeddingLayer
|
||||
from manim_ml.neural_network.feed_forward import FeedForwardLayer
|
||||
from manim_ml.neural_network.image import ImageLayer
|
||||
from manim_ml.neural_network.neural_network import NeuralNetwork
|
||||
@ -10,7 +10,7 @@ config.pixel_width = 1280
|
||||
config.frame_height = 6.0
|
||||
config.frame_width = 6.0
|
||||
|
||||
class GaussianScene(Scene):
|
||||
class VariationalAutoencoderScene(Scene):
|
||||
|
||||
def construct(self):
|
||||
embedding_layer = EmbeddingLayer()
|
||||
|
Reference in New Issue
Block a user