mirror of
https://github.com/helblazer811/ManimML.git
synced 2025-05-18 03:05:23 +08:00
Added working convolutional layer.
This commit is contained in:
72
examples/cnn/cnn.py
Normal file
72
examples/cnn/cnn.py
Normal file
@ -0,0 +1,72 @@
|
||||
from manim import *
|
||||
from PIL import Image
|
||||
|
||||
from manim_ml.neural_network.layers.convolutional import ConvolutionalLayer
|
||||
from manim_ml.neural_network.layers.feed_forward import FeedForwardLayer
|
||||
from manim_ml.neural_network.layers.image import ImageLayer
|
||||
from manim_ml.neural_network.neural_network import NeuralNetwork
|
||||
|
||||
def make_code_snippet():
|
||||
code_str = """
|
||||
# Make nn
|
||||
nn = NeuralNetwork([
|
||||
ImageLayer(numpy_image),
|
||||
ConvolutionalLayer(3, 3, 3),
|
||||
ConvolutionalLayer(5, 2, 2),
|
||||
ConvolutionalLayer(10, 2, 1),
|
||||
FeedForwardLayer(3),
|
||||
FeedForwardLayer(1)
|
||||
], layer_spacing=0.2)
|
||||
# Center the nn
|
||||
self.play(Create(nn))
|
||||
# Play animation
|
||||
self.play(nn.make_forward_pass_animation(run_time=5))
|
||||
"""
|
||||
|
||||
code = Code(
|
||||
code = code_str,
|
||||
tab_width=4,
|
||||
background_stroke_width=1,
|
||||
background_stroke_color=WHITE,
|
||||
insert_line_no=False,
|
||||
style='monokai',
|
||||
#background="window",
|
||||
language="py",
|
||||
)
|
||||
code.scale(0.6)
|
||||
|
||||
return code
|
||||
|
||||
|
||||
# Make the specific scene
|
||||
config.pixel_height = 1200
|
||||
config.pixel_width = 1900
|
||||
config.frame_height = 12.0
|
||||
config.frame_width = 12.0
|
||||
|
||||
class CombinedScene(ThreeDScene, Scene):
|
||||
def construct(self):
|
||||
image = Image.open('../../assets/mnist/digit.jpeg')
|
||||
numpy_image = np.asarray(image)
|
||||
# Make nn
|
||||
nn = NeuralNetwork([
|
||||
ImageLayer(numpy_image, height=3.5),
|
||||
ConvolutionalLayer(3, 3, 3, filter_spacing=0.2),
|
||||
ConvolutionalLayer(5, 2, 2, filter_spacing=0.2),
|
||||
ConvolutionalLayer(10, 2, 1, filter_spacing=0.2),
|
||||
FeedForwardLayer(3, rectangle_stroke_width=4, node_stroke_width=4).scale(2),
|
||||
FeedForwardLayer(1, rectangle_stroke_width=4, node_stroke_width=4).scale(2)
|
||||
], layer_spacing=0.2)
|
||||
nn.scale(0.9)
|
||||
nn.move_to(ORIGIN)
|
||||
nn.shift(UP*1.8)
|
||||
# Make code snippet
|
||||
code = make_code_snippet()
|
||||
code.shift(DOWN*1.8)
|
||||
# Center the nn
|
||||
self.play(Create(nn))
|
||||
self.add(code)
|
||||
# Play animation
|
||||
# self.set_camera_orientation(phi=280* DEGREES, theta=-20*DEGREES, gamma=90 * DEGREES)
|
||||
# self.begin_ambient_camera_rotation()
|
||||
self.play(nn.make_forward_pass_animation(run_time=5))
|
@ -1,4 +1,4 @@
|
||||
from tempfile import _TemporaryFileWrapper
|
||||
from .convolutional_to_convolutional import ConvolutionalToConvolutional
|
||||
from .feed_forward_to_vector import FeedForwardToVector
|
||||
from .paired_query_to_feed_forward import PairedQueryToFeedForward
|
||||
from .embedding_to_feed_forward import EmbeddingToFeedForward
|
||||
@ -25,4 +25,5 @@ connective_layers_list = (
|
||||
TripletToFeedForward,
|
||||
PairedQueryToFeedForward,
|
||||
FeedForwardToVector,
|
||||
ConvolutionalToConvolutional,
|
||||
)
|
||||
|
@ -1,25 +1,106 @@
|
||||
|
||||
from manim import *
|
||||
from torch import _fake_quantize_learnable_per_tensor_affine
|
||||
from manim_ml.neural_network.layers.parent_layers import VGroupNeuralNetworkLayer
|
||||
|
||||
class ConvolutionalLayer(VGroupNeuralNetworkLayer):
|
||||
"""Handles rendering a convolutional layer for a nn"""
|
||||
|
||||
def __init__(self, num_filters, filter_width, **kwargs):
|
||||
def __init__(self, num_filters, filter_width, filter_height, filter_spacing=0.1, color=BLUE,
|
||||
pulse_color=ORANGE, **kwargs):
|
||||
super(VGroupNeuralNetworkLayer, self).__init__(**kwargs)
|
||||
self.num_filters = num_filters
|
||||
self.filter_width = filter_width
|
||||
self.filter_height = filter_height
|
||||
self.filter_spacing = filter_spacing
|
||||
self.color = color
|
||||
self.pulse_color = pulse_color
|
||||
|
||||
self._construct_neural_network_layer()
|
||||
self._construct_layer(num_filters=self.num_filters, filter_width=self.filter_width, filter_height=self.filter_height)
|
||||
|
||||
def _construct_neural_network_layer(self):
|
||||
def _construct_layer(self, num_filters=5, filter_width=4, filter_height=4):
|
||||
"""Creates the neural network layer"""
|
||||
pass
|
||||
# Make axes, but hide the lines
|
||||
axes = ThreeDAxes(
|
||||
tips=False,
|
||||
x_length=1,
|
||||
y_length=1,
|
||||
x_axis_config={
|
||||
"include_ticks": False,
|
||||
"stroke_width": 0.0
|
||||
},
|
||||
y_axis_config={
|
||||
"include_ticks": False,
|
||||
"stroke_width": 0.0
|
||||
},
|
||||
z_axis_config={
|
||||
"include_ticks": False,
|
||||
"stroke_width": 0.0
|
||||
}
|
||||
)
|
||||
self.add(axes)
|
||||
# Set the camera angle so that the
|
||||
# self.set_camera_orientation(phi=75 * DEGREES, theta=30 * DEGREES)
|
||||
# Draw rectangles that are filled in with opacity
|
||||
self.rectangles = VGroup()
|
||||
for filter_index in range(num_filters):
|
||||
rectangle = Rectangle(
|
||||
color=self.color,
|
||||
height=filter_height,
|
||||
width=filter_width,
|
||||
fill_color=self.color,
|
||||
fill_opacity=0.2,
|
||||
stroke_color=WHITE,
|
||||
)
|
||||
rectangle.rotate_about_origin((80 - filter_index*0.5) * DEGREES, np.array([0, 1, 0])) # Rotate about z axis
|
||||
rectangle.rotate_about_origin(15 * DEGREES, np.array([1, 0, 0])) # Rotate about x axis
|
||||
rectangle.shift(np.array([filter_index*self.filter_spacing, filter_height*0.5, -3]))
|
||||
|
||||
self.rectangles.add(rectangle)
|
||||
|
||||
self.add(self.rectangles)
|
||||
|
||||
self.corner_lines = self.make_filter_corner_lines()
|
||||
self.add(self.corner_lines)
|
||||
|
||||
def make_filter_corner_lines(self):
|
||||
"""Make filter corner lines"""
|
||||
corner_lines = VGroup()
|
||||
|
||||
first_rectangle = self.rectangles[0]
|
||||
last_rectangle = self.rectangles[-1]
|
||||
first_vertices = first_rectangle.get_vertices()
|
||||
last_vertices = last_rectangle.get_vertices()
|
||||
for vertex_index in range(len(first_vertices)):
|
||||
# Make a line
|
||||
line = Line(
|
||||
start=first_vertices[vertex_index],
|
||||
end=last_vertices[vertex_index],
|
||||
color=WHITE,
|
||||
stroke_opacity=0.0
|
||||
)
|
||||
corner_lines.add(line)
|
||||
|
||||
return corner_lines
|
||||
|
||||
def make_forward_pass_animation(self, layer_args={}, **kwargs):
|
||||
# make highlight animation
|
||||
return None
|
||||
"""Convolution forward pass animation"""
|
||||
animations = []
|
||||
for line in self.corner_lines:
|
||||
pulse = ShowPassingFlash(
|
||||
line.copy()
|
||||
.set_color(self.pulse_color)
|
||||
.set_stroke(opacity=1.0),
|
||||
time_width=0.5
|
||||
)
|
||||
animations.append(pulse)
|
||||
# Make animation group
|
||||
animation_group = AnimationGroup(
|
||||
*animations
|
||||
)
|
||||
|
||||
return animation_group
|
||||
|
||||
@override_animation(Create)
|
||||
def _create_override(self, **kwargs):
|
||||
pass
|
||||
return FadeIn(self.rectangles)
|
||||
|
@ -0,0 +1,63 @@
|
||||
from manim import *
|
||||
from manim_ml.neural_network.layers.convolutional import ConvolutionalLayer
|
||||
from manim_ml.neural_network.layers.parent_layers import ConnectiveLayer
|
||||
|
||||
class ConvolutionalToConvolutional(ConnectiveLayer):
|
||||
"""Feed Forward to Embedding Layer"""
|
||||
input_class = ConvolutionalLayer
|
||||
output_class = ConvolutionalLayer
|
||||
|
||||
def __init__(self, input_layer, output_layer, color=WHITE, pulse_color=RED,
|
||||
**kwargs):
|
||||
super().__init__(input_layer, output_layer, input_class=ConvolutionalLayer, output_class=ConvolutionalLayer,
|
||||
**kwargs)
|
||||
self.color = color
|
||||
self.pulse_color = pulse_color
|
||||
|
||||
self.lines = self.make_lines()
|
||||
self.add(self.lines)
|
||||
|
||||
def make_lines(self):
|
||||
"""Make lines connecting the input and output layers"""
|
||||
lines = VGroup()
|
||||
# Get the first and last rectangle
|
||||
input_rectangle = self.input_layer.rectangles[-1]
|
||||
output_rectangle = self.output_layer.rectangles[0]
|
||||
input_vertices = input_rectangle.get_vertices()
|
||||
output_vertices = output_rectangle.get_vertices()
|
||||
# Go through each vertex
|
||||
for vertex_index in range(len(input_vertices)):
|
||||
# Make a line
|
||||
line = Line(
|
||||
start=input_vertices[vertex_index],
|
||||
end=output_vertices[vertex_index],
|
||||
color=self.color,
|
||||
stroke_opacity=0.0
|
||||
)
|
||||
lines.add(line)
|
||||
|
||||
return lines
|
||||
|
||||
def make_forward_pass_animation(self, layer_args={}, run_time=1.5, **kwargs):
|
||||
"""Forward pass animation from conv to conv"""
|
||||
animations = []
|
||||
# Go thorugh the lines
|
||||
for line in self.lines:
|
||||
pulse = ShowPassingFlash(
|
||||
line.copy()
|
||||
.set_color(self.pulse_color)
|
||||
.set_stroke(opacity=1.0),
|
||||
time_width=0.5
|
||||
)
|
||||
animations.append(pulse)
|
||||
# Make animation group
|
||||
animation_group = AnimationGroup(
|
||||
*animations
|
||||
)
|
||||
|
||||
return animation_group
|
||||
|
||||
@override_animation(Create)
|
||||
def _create_override(self, **kwargs):
|
||||
return AnimationGroup()
|
||||
|
||||
|
@ -17,7 +17,7 @@ class NeuralNetworkLayer(ABC, Group):
|
||||
|
||||
@override_animation(Create)
|
||||
def _create_override(self):
|
||||
pass
|
||||
return AnimationGroup()
|
||||
|
||||
def __repr__(self):
|
||||
return f"{type(self).__name__}"
|
||||
@ -54,6 +54,21 @@ class ConnectiveLayer(VGroupNeuralNetworkLayer):
|
||||
def make_forward_pass_animation(self, layer_args={}, **kwargs):
|
||||
pass
|
||||
|
||||
@override_animation(Create)
|
||||
def _create_override(self):
|
||||
return super()._create_override()
|
||||
|
||||
class BlankConnective(ConnectiveLayer):
|
||||
"""Connective layer to be used when the given pair of layers is undefined"""
|
||||
|
||||
def __init__(self, input_layer, output_layer, input_class=None, output_class=None, **kwargs):
|
||||
input_class = input_layer.__class__
|
||||
output_class = output_layer.__class__
|
||||
super().__init__(input_layer, output_layer, input_class, output_class, **kwargs)
|
||||
|
||||
def make_forward_pass_animation(self, layer_args={}, **kwargs):
|
||||
return AnimationGroup()
|
||||
|
||||
@override_animation(Create)
|
||||
def _create_override(self):
|
||||
return super()._create_override()
|
@ -1,4 +1,6 @@
|
||||
from manim import *
|
||||
|
||||
from manim_ml.neural_network.layers.parent_layers import BlankConnective
|
||||
from ..layers import connective_layers_list
|
||||
|
||||
def get_connective_layer(input_layer, output_layer):
|
||||
@ -14,7 +16,10 @@ def get_connective_layer(input_layer, output_layer):
|
||||
connective_layer = connective_layer_class(input_layer, output_layer)
|
||||
|
||||
if connective_layer is None:
|
||||
connective_layer = BlankConnective(input_layer, output_layer)
|
||||
"""
|
||||
raise Exception(f"Unrecognized class pair {input_layer.__class__.__name__}" + \
|
||||
f" and {output_layer.__class__.__name__}")
|
||||
"""
|
||||
|
||||
return connective_layer
|
||||
|
@ -1,16 +1,17 @@
|
||||
from manim import *
|
||||
from PIL import Image
|
||||
|
||||
from manim_ml.neural_network.layers.convolutional import ConvolutionalLayer
|
||||
from manim_ml.neural_network.layers.feed_forward import FeedForwardLayer
|
||||
from manim_ml.neural_network.layers.image import ImageLayer
|
||||
from manim_ml.neural_network.neural_network import NeuralNetwork
|
||||
|
||||
class SingleConvolutionalLayerScence(Scene):
|
||||
class SingleConvolutionalLayerScene(ThreeDScene):
|
||||
|
||||
def construct(self):
|
||||
|
||||
# Make nn
|
||||
layers = [
|
||||
ConvolutionalLayer()
|
||||
ConvolutionalLayer(3, 4)
|
||||
]
|
||||
nn = NeuralNetwork(layers)
|
||||
nn.scale(1.3)
|
||||
@ -18,25 +19,34 @@ class SingleConvolutionalLayerScence(Scene):
|
||||
nn.move_to(ORIGIN)
|
||||
self.add(nn)
|
||||
# Play animation
|
||||
self.play(nn.make_forward_pass_animation(run_time=5))
|
||||
self.play(nn.make_forward_pass_animation(run_time=5))
|
||||
self.set_camera_orientation(phi=280*DEGREES, theta=-10*DEGREES, gamma=90*DEGREES)
|
||||
# self.play(nn.make_forward_pass_animation(run_time=5))
|
||||
|
||||
class ThreeDLightSourcePosition(ThreeDScene, Scene):
|
||||
# Make the specific scene
|
||||
config.pixel_height = 1200
|
||||
config.pixel_width = 1900
|
||||
config.frame_height = 12.0
|
||||
config.frame_width = 12.0
|
||||
|
||||
class CombinedScene(ThreeDScene, Scene):
|
||||
def construct(self):
|
||||
axes = ThreeDAxes()
|
||||
sphere = Surface(
|
||||
lambda u, v: np.array([
|
||||
u,
|
||||
v,
|
||||
0
|
||||
]), v_range=[0, TAU], u_range=[-PI / 2, PI / 2],
|
||||
checkerboard_colors=[RED_D, RED_E], resolution=(15, 32)
|
||||
)
|
||||
self.renderer.camera.light_source.move_to(3*IN) # changes the source of the light
|
||||
self.set_camera_orientation(phi=90 * DEGREES, theta=0 * DEGREES)
|
||||
self.add(axes, sphere)
|
||||
image = Image.open('../assets/mnist/digit.jpeg')
|
||||
numpy_image = np.asarray(image)
|
||||
# Make nn
|
||||
nn = NeuralNetwork([
|
||||
ImageLayer(numpy_image, height=1.4),
|
||||
ConvolutionalLayer(3, 3, 3, filter_spacing=0.2),
|
||||
ConvolutionalLayer(5, 2, 2, filter_spacing=0.2),
|
||||
ConvolutionalLayer(10, 2, 1, filter_spacing=0.2),
|
||||
FeedForwardLayer(3, rectangle_stroke_width=4, node_stroke_width=4).scale(2),
|
||||
FeedForwardLayer(1, rectangle_stroke_width=4, node_stroke_width=4).scale(2)
|
||||
], layer_spacing=0.2)
|
||||
|
||||
class CombinedScene(Scene):
|
||||
|
||||
def constuct(self):
|
||||
pass
|
||||
nn.scale(1.3)
|
||||
# Center the nn
|
||||
nn.move_to(ORIGIN)
|
||||
self.play(Create(nn))
|
||||
# Play animation
|
||||
# self.set_camera_orientation(phi=280* DEGREES, theta=-20*DEGREES, gamma=90 * DEGREES)
|
||||
# self.begin_ambient_camera_rotation()
|
||||
self.play(nn.make_forward_pass_animation(run_time=5))
|
||||
|
Reference in New Issue
Block a user