Working dropout animation

This commit is contained in:
Alec Helbling
2023-01-02 15:24:51 -05:00
parent 3d6e8072e1
commit 1d9cad8587
8 changed files with 414 additions and 74 deletions

View File

@ -0,0 +1,272 @@
"""
Code for making a dropout animation for the
feed forward layers of a neural network.
"""
from manim import *
import random
from manim_ml.neural_network.layers.feed_forward import FeedForwardLayer
from manim_ml.neural_network.layers.feed_forward_to_feed_forward import FeedForwardToFeedForward
class XMark(VGroup):
def __init__(self, stroke_width=1.0, color=GRAY):
super().__init__()
line_one = Line(
[-0.1, 0.1, 0],
[0.1, -0.1, 0],
stroke_width=1.0,
stroke_color=color,
z_index=4
)
self.add(line_one)
line_two = Line(
[-0.1, -0.1, 0],
[0.1, 0.1, 0],
stroke_width=1.0,
stroke_color=color,
z_index=4
)
self.add(line_two)
def get_edges_to_drop_out(layer: FeedForwardToFeedForward, layers_to_nodes_to_drop_out):
"""Returns edges to drop out for a given FeedForwardToFeedForward layer"""
prev_layer = layer.input_layer
next_layer = layer.output_layer
# Get the nodes to dropout in previous layer
prev_layer_nodes_to_dropout = layers_to_nodes_to_drop_out[prev_layer]
next_layer_nodes_to_dropout = layers_to_nodes_to_drop_out[next_layer]
# Compute the edges to dropout
edges_to_dropout = []
edge_indices_to_dropout = []
for edge_index, edge in enumerate(layer.edges):
prev_node_index = int(edge_index / next_layer.num_nodes)
next_node_index = edge_index % next_layer.num_nodes
# Check if the edges should be dropped out
if prev_node_index in prev_layer_nodes_to_dropout \
or next_node_index in next_layer_nodes_to_dropout:
edges_to_dropout.append(edge)
edge_indices_to_dropout.append(edge_index)
return edges_to_dropout, edge_indices_to_dropout
def make_pre_dropout_animation(
neural_network,
layers_to_nodes_to_drop_out,
dropped_out_color=GRAY,
dropped_out_opacity=0.2
):
"""Makes an animation that sets up the NN layer for dropout"""
animations = []
# Go through the network and get the FeedForwardLayer instances
feed_forward_layers = neural_network.filter_layers(
lambda layer: isinstance(layer, FeedForwardLayer)
)
# Go through the network and get the FeedForwardToFeedForwardLayer instances
feed_forward_to_feed_forward_layers = neural_network.filter_layers(
lambda layer: isinstance(layer, FeedForwardToFeedForward)
)
# Get the edges to drop out
layers_to_edges_to_dropout = {}
for layer in feed_forward_to_feed_forward_layers:
layers_to_edges_to_dropout[layer], _ = get_edges_to_drop_out(
layer,
layers_to_nodes_to_drop_out
)
# Dim the colors of the edges
dim_edge_colors_animations = []
for layer in layers_to_edges_to_dropout.keys():
edges_to_drop_out = layers_to_edges_to_dropout[layer]
# Make color dimming animation
for edge_index, edge in enumerate(edges_to_drop_out):
"""
def modify_edge(edge):
edge.set_stroke_color(dropped_out_color)
edge.set_stroke_width(0.6)
edge.set_stroke_opacity(dropped_out_opacity)
return edge
dim_edge = ApplyFunction(
modify_edge,
edge
)
"""
dim_edge_colors_animations.append(
FadeOut(edge)
)
dim_edge_colors_animation = AnimationGroup(
*dim_edge_colors_animations,
lag_ratio=0.0
)
# Dim the colors of the nodes
dim_nodes_animations = []
x_marks = []
for layer in layers_to_nodes_to_drop_out.keys():
nodes_to_dropout = layers_to_nodes_to_drop_out[layer]
# Make an X over each node
for node_index, node in enumerate(layer.node_group):
if node_index in nodes_to_dropout:
x_mark = XMark()
x_marks.append(x_mark)
x_mark.move_to(node.get_center())
create_x = Create(x_mark)
dim_nodes_animations.append(create_x)
dim_nodes_animation = AnimationGroup(
*dim_nodes_animations,
lag_ratio=0.0
)
animation_group = AnimationGroup(
dim_edge_colors_animation,
dim_nodes_animation,
)
return animation_group, x_marks
def make_post_dropout_animation(
neural_network,
layers_to_nodes_to_drop_out,
x_marks,
):
"""Returns the NN to normal after dropout"""
# Go through the network and get the FeedForwardLayer instances
feed_forward_layers = neural_network.filter_layers(
lambda layer: isinstance(layer, FeedForwardLayer)
)
# Go through the network and get the FeedForwardToFeedForwardLayer instances
feed_forward_to_feed_forward_layers = neural_network.filter_layers(
lambda layer: isinstance(layer, FeedForwardToFeedForward)
)
# Get the edges to drop out
layers_to_edges_to_dropout = {}
for layer in feed_forward_to_feed_forward_layers:
layers_to_edges_to_dropout[layer], _ = get_edges_to_drop_out(
layer,
layers_to_nodes_to_drop_out
)
# Remove the x marks
uncreate_animations = []
for x_mark in x_marks:
uncreate_x_mark = Uncreate(x_mark)
uncreate_animations.append(uncreate_x_mark)
uncreate_x_marks = AnimationGroup(
*uncreate_animations,
lag_ratio=0.0
)
# Add the edges back
create_edge_animations = []
for layer in layers_to_edges_to_dropout.keys():
edges_to_drop_out = layers_to_edges_to_dropout[layer]
# Make color dimming animation
for edge_index, edge in enumerate(edges_to_drop_out):
edge_copy = edge.copy()
edges_to_drop_out[edge_index] = edge_copy
create_edge_animations.append(
FadeIn(edge_copy)
)
create_edge_animation = AnimationGroup(
*create_edge_animations,
lag_ratio=0.0
)
return AnimationGroup(
uncreate_x_marks,
create_edge_animation,
lag_ratio=0.0
)
def make_forward_pass_with_dropout_animation(
neural_network,
layers_to_nodes_to_drop_out,
):
"""Makes forward pass animation with dropout"""
layer_args = {}
# Go through the network and get the FeedForwardLayer instances
feed_forward_layers = neural_network.filter_layers(
lambda layer: isinstance(layer, FeedForwardLayer)
)
# Go through the network and get the FeedForwardToFeedForwardLayer instances
feed_forward_to_feed_forward_layers = neural_network.filter_layers(
lambda layer: isinstance(layer, FeedForwardToFeedForward)
)
# Iterate through network and get feed forward layers
for layer in feed_forward_layers:
layer_args[layer] = {
"dropout_node_indices": layers_to_nodes_to_drop_out[layer]
}
for layer in feed_forward_to_feed_forward_layers:
_, edge_indices = get_edges_to_drop_out(
layer,
layers_to_nodes_to_drop_out
)
layer_args[layer] = {
"edge_indices_to_dropout": edge_indices
}
return neural_network.make_forward_pass_animation(
layer_args=layer_args
)
def make_neural_network_dropout_animation(
neural_network,
dropout_rate=0.5,
do_forward_pass=True
):
"""
Makes a dropout animation for a given neural network.
NOTE Does dropout only on feed forward layers.
1. Does dropout
2. If `do_forward_pass` then do forward pass animation
3. Revert network to pre-dropout appearance
"""
# Go through the network and get the FeedForwardLayer instances
feed_forward_layers = neural_network.filter_layers(
lambda layer: isinstance(layer, FeedForwardLayer)
)
# Go through the network and get the FeedForwardToFeedForwardLayer instances
feed_forward_to_feed_forward_layers = neural_network.filter_layers(
lambda layer: isinstance(layer, FeedForwardToFeedForward)
)
# Get random nodes to drop out for each FeedForward Layer
layers_to_nodes_to_drop_out = {}
for feed_forward_layer in feed_forward_layers:
num_nodes = feed_forward_layer.num_nodes
nodes_to_drop_out = []
# Compute random probability that each node is dropped out
for node_index in range(num_nodes):
dropout_prob = random.random()
if dropout_prob < dropout_rate:
nodes_to_drop_out.append(node_index)
# Add the mapping to the dict
layers_to_nodes_to_drop_out[feed_forward_layer] = nodes_to_drop_out
# Make the animation
pre_dropout_animation, x_marks = make_pre_dropout_animation(
neural_network,
layers_to_nodes_to_drop_out
)
if do_forward_pass:
forward_pass_animation = make_forward_pass_with_dropout_animation(
neural_network,
layers_to_nodes_to_drop_out
)
else:
forward_pass_animation = AnimationGroup()
post_dropout_animation = make_post_dropout_animation(
neural_network,
layers_to_nodes_to_drop_out,
x_marks
)
# Combine the animations into one
return Succession(
pre_dropout_animation,
forward_pass_animation,
post_dropout_animation
)

View File

@ -15,8 +15,8 @@ class Convolutional3DLayer(VGroupNeuralNetworkLayer, ThreeDLayer):
num_feature_maps, num_feature_maps,
feature_map_width, feature_map_width,
feature_map_height, feature_map_height,
filter_width, filter_width=None,
filter_height, filter_height=None,
cell_width=0.2, cell_width=0.2,
filter_spacing=0.1, filter_spacing=0.1,
color=BLUE, color=BLUE,

View File

@ -1,7 +1,6 @@
from manim import * from manim import *
from manim_ml.neural_network.layers.parent_layers import VGroupNeuralNetworkLayer from manim_ml.neural_network.layers.parent_layers import VGroupNeuralNetworkLayer
class FeedForwardLayer(VGroupNeuralNetworkLayer): class FeedForwardLayer(VGroupNeuralNetworkLayer):
"""Handles rendering a layer for a neural network""" """Handles rendering a layer for a neural network"""
@ -65,8 +64,44 @@ class FeedForwardLayer(VGroupNeuralNetworkLayer):
# Add the objects to the class # Add the objects to the class
self.add(self.surrounding_rectangle, self.node_group) self.add(self.surrounding_rectangle, self.node_group)
def make_dropout_forward_pass_animation(self, layer_args, **kwargs):
"""Makes a forward pass animation with dropout"""
# Make sure proper dropout information was passed
assert "dropout_node_indices" in layer_args
dropout_node_indices = layer_args["dropout_node_indices"]
# Only highlight nodes that were note dropped out
nodes_to_highlight = []
for index, node in enumerate(self.node_group):
if not index in dropout_node_indices:
nodes_to_highlight.append(node)
nodes_to_highlight = VGroup(*nodes_to_highlight)
# Make highlight animation
succession = Succession(
ApplyMethod(
nodes_to_highlight.set_color,
self.animation_dot_color,
run_time=0.25
),
Wait(1.0),
ApplyMethod(
nodes_to_highlight.set_color,
self.node_color,
run_time=0.25
),
)
return succession
def make_forward_pass_animation(self, layer_args={}, **kwargs): def make_forward_pass_animation(self, layer_args={}, **kwargs):
# make highlight animation # Check if dropout is a thing
if "dropout_node_indices" in layer_args:
# Drop out certain nodes
return self.make_dropout_forward_pass_animation(
layer_args=layer_args,
**kwargs
)
else:
# Make highlight animation
succession = Succession( succession = Succession(
ApplyMethod( ApplyMethod(
self.node_group.set_color, self.animation_dot_color, run_time=0.25 self.node_group.set_color, self.animation_dot_color, run_time=0.25

View File

@ -67,11 +67,18 @@ class FeedForwardToFeedForward(ConnectiveLayer):
return animation_group return animation_group
def make_forward_pass_animation(self, layer_args={}, run_time=1, **kwargs): def make_forward_pass_animation(
self,
layer_args={},
run_time=1,
feed_forward_dropout=0.0,
**kwargs
):
"""Animation for passing information from one FeedForwardLayer to the next""" """Animation for passing information from one FeedForwardLayer to the next"""
path_animations = [] path_animations = []
dots = [] dots = []
for edge in self.edges: for edge_index, edge in enumerate(self.edges):
if not edge_index in layer_args["edge_indices_to_dropout"]:
dot = Dot( dot = Dot(
color=self.animation_dot_color, fill_opacity=1.0, radius=self.dot_radius color=self.animation_dot_color, fill_opacity=1.0, radius=self.dot_radius
) )

View File

@ -68,6 +68,11 @@ class ConnectiveLayer(VGroupNeuralNetworkLayer):
def _create_override(self): def _create_override(self):
return super()._create_override() return super()._create_override()
def __repr__(self):
return f"{self.__class__.__name__}(" + \
f"input_layer={self.input_layer.__class__.__name__}," + \
f"output_layer={self.output_layer.__class__.__name__}," + \
")"
class BlankConnective(ConnectiveLayer): class BlankConnective(ConnectiveLayer):
"""Connective layer to be used when the given pair of layers is undefined""" """Connective layer to be used when the given pair of layers is undefined"""

View File

@ -22,7 +22,6 @@ from manim_ml.neural_network.neural_network_transformations import (
RemoveLayer, RemoveLayer,
) )
class NeuralNetwork(Group): class NeuralNetwork(Group):
"""Neural Network Visualization Container Class""" """Neural Network Visualization Container Class"""
@ -50,20 +49,8 @@ class NeuralNetwork(Group):
# Make the layer fixed in frame if its not 3D # Make the layer fixed in frame if its not 3D
ThreeDLayer.three_d_theta = three_d_theta ThreeDLayer.three_d_theta = three_d_theta
ThreeDLayer.three_d_phi = three_d_phi ThreeDLayer.three_d_phi = three_d_phi
"""
for layer in self.input_layers:
if not isinstance(layer, ThreeDLayer):
self.camera.add_fixed_orientation_mobjects(layer)
self.camera.add_fixed_in_frame_mobjects(layer)
"""
# TODO take layer_node_count [0, (1, 2), 0] # TODO take layer_node_count [0, (1, 2), 0]
# and make it have explicit distinct subspaces # and make it have explicit distinct subspaces
# Add camera to input layers
"""
for input_layer in input_layers:
if input_layer.camera is None:
input_layer.camera = self.camera
"""
# Place the layers # Place the layers
self._place_layers() self._place_layers()
self.connective_layers, self.all_layers = self._construct_connective_layers() self.connective_layers, self.all_layers = self._construct_connective_layers()
@ -79,12 +66,6 @@ class NeuralNetwork(Group):
self.add(self.all_layers) self.add(self.all_layers)
# Print neural network # Print neural network
print(repr(self)) print(repr(self))
# Set the camera orientation for 3D Layers
"""
if not self.camera is None and isinstance(self.camera, ThreeDCamera):
self.camera.set_phi(camera_phi)
self.camera.set_theta(camera_theta)
"""
def _place_layers(self): def _place_layers(self):
"""Creates the neural network""" """Creates the neural network"""
@ -138,21 +119,9 @@ class NeuralNetwork(Group):
next_layer = next_layer.all_layers[0] next_layer = next_layer.all_layers[0]
# Find connective layer with correct layer pair # Find connective layer with correct layer pair
connective_layer = get_connective_layer(current_layer, next_layer) connective_layer = get_connective_layer(current_layer, next_layer)
"""
if not isinstance(connective_layer, ThreeDLayer):
# Make the layer fixed in frame if its not 3D
self.camera.add_fixed_orientation_mobjects(connective_layer)
self.camera.add_fixed_in_frame_mobjects(connective_layer)
"""
connective_layers.add(connective_layer) connective_layers.add(connective_layer)
# Add the layer to the list of layers # Add the layer to the list of layers
all_layers.add(connective_layer) all_layers.add(connective_layer)
# Check if final layer is a 3D layer
"""
if not isinstance(self.input_layers[-1], ThreeDLayer):
self.camera.add_fixed_orientation_mobjects(self.input_layers[-1])
self.camera.add_fixed_in_frame_mobjects(self.input_layers[-1])
"""
# Add final layer # Add final layer
all_layers.add(self.input_layers[-1]) all_layers.add(self.input_layers[-1])
# Handle layering # Handle layering
@ -194,16 +163,22 @@ class NeuralNetwork(Group):
if isinstance(layer, ConnectiveLayer): if isinstance(layer, ConnectiveLayer):
""" """
NOTE: By default a connective layer will get the combined NOTE: By default a connective layer will get the combined
layer_args of the layers it is connecting. layer_args of the layers it is connecting and itself.
""" """
before_layer_args = {} before_layer_args = {}
current_layer_args = {}
after_layer_args = {} after_layer_args = {}
if layer.input_layer in layer_args: if layer.input_layer in layer_args:
before_layer_args = layer_args[layer.input_layer] before_layer_args = layer_args[layer.input_layer]
current_layer_args = layer_args[layer]
if layer.output_layer in layer_args: if layer.output_layer in layer_args:
after_layer_args = layer_args[layer.output_layer] after_layer_args = layer_args[layer.output_layer]
# Merge the two dicts # Merge the two dicts
current_layer_args = {**before_layer_args, **after_layer_args} current_layer_args = {
**before_layer_args,
**current_layer_args,
**after_layer_args
}
else: else:
current_layer_args = {} current_layer_args = {}
if layer in layer_args: if layer in layer_args:
@ -255,6 +230,17 @@ class NeuralNetwork(Group):
layer.scale(scale_factor, **kwargs) layer.scale(scale_factor, **kwargs)
# super().scale(scale_factor) # super().scale(scale_factor)
def filter_layers(self, function):
"""Filters layers of the network given function"""
layers_to_return = []
for layer in self.all_layers:
func_out = function(layer)
assert isinstance(func_out, bool), "Filter layers function returned a non-boolean type."
if func_out:
layers_to_return.append(layer)
return layers_to_return
def __repr__(self, metadata=["z_index", "title_text"]): def __repr__(self, metadata=["z_index", "title_text"]):
"""Print string representation of layers""" """Print string representation of layers"""
inner_string = "" inner_string = ""
@ -270,7 +256,6 @@ class NeuralNetwork(Group):
string_repr = "NeuralNetwork([\n" + inner_string + "])" string_repr = "NeuralNetwork([\n" + inner_string + "])"
return string_repr return string_repr
class FeedForwardNeuralNetwork(NeuralNetwork): class FeedForwardNeuralNetwork(NeuralNetwork):
"""NeuralNetwork with just feed forward layers""" """NeuralNetwork with just feed forward layers"""

View File

@ -4,7 +4,6 @@
from manim import * from manim import *
from manim_ml.neural_network.layers.util import get_connective_layer from manim_ml.neural_network.layers.util import get_connective_layer
class RemoveLayer(AnimationGroup): class RemoveLayer(AnimationGroup):
""" """
Animation for removing a layer from a neural network. Animation for removing a layer from a neural network.

View File

@ -1,4 +1,5 @@
from manim import * from manim import *
from manim_ml.neural_network.animations.dropout import make_neural_network_dropout_animation
from manim_ml.neural_network.layers.feed_forward import FeedForwardLayer from manim_ml.neural_network.layers.feed_forward import FeedForwardLayer
from manim_ml.neural_network.layers.image import ImageLayer from manim_ml.neural_network.layers.image import ImageLayer
from PIL import Image from PIL import Image
@ -7,31 +8,67 @@ import numpy as np
config.pixel_height = 1200 config.pixel_height = 1200
config.pixel_width = 1900 config.pixel_width = 1900
config.frame_height = 4.0 config.frame_height = 5.0
config.frame_width = 4.0 config.frame_width = 5.0
def make_code_snippet():
code_str = """
nn = NeuralNetwork([
FeedForwardLayer(3),
FeedForwardLayer(5),
FeedForwardLayer(3),
FeedForwardLayer(5),
FeedForwardLayer(4),
])
self.play(
make_neural_network_dropout_animation(
nn, dropout_rate=0.25, do_forward_pass=True
)
)
"""
code = Code(
code=code_str,
tab_width=4,
background_stroke_width=1,
background_stroke_color=WHITE,
insert_line_no=False,
style="monokai",
language="py",
)
code.scale(0.28)
return code
class DropoutNeuralNetworkScene(Scene): class DropoutNeuralNetworkScene(Scene):
def construct(self): def construct(self):
image = Image.open("../assets/gan/real_image.jpg")
numpy_image = np.asarray(image)
# Make nn # Make nn
layers = [ nn = NeuralNetwork([
FeedForwardLayer(3, rectangle_color=BLUE), FeedForwardLayer(3, rectangle_color=BLUE),
FeedForwardLayer(5, rectangle_color=BLUE), FeedForwardLayer(5, rectangle_color=BLUE),
FeedForwardLayer(3, rectangle_color=BLUE), FeedForwardLayer(3, rectangle_color=BLUE),
FeedForwardLayer(6, rectangle_color=BLUE), FeedForwardLayer(5, rectangle_color=BLUE),
] FeedForwardLayer(4, rectangle_color=BLUE),
nn = NeuralNetwork(layers) ],
layer_spacing=0.4
)
# Center the nn # Center the nn
nn.move_to(ORIGIN) nn.move_to(ORIGIN)
self.add(nn) self.add(nn)
# Make code snippet
code_snippet = make_code_snippet()
self.add(code_snippet)
code_snippet.next_to(nn, DOWN * 0.7)
Group(code_snippet, nn).move_to(ORIGIN)
# Play animation # Play animation
for i in range(5):
self.play( self.play(
nn.make_forward_pass_animation(run_time=5, feed_forward_dropout=True) make_neural_network_dropout_animation(
nn,
dropout_rate=0.25,
do_forward_pass=True
) )
)
self.wait(1)
if __name__ == "__main__": if __name__ == "__main__":
"""Render all scenes""" """Render all scenes"""