mirror of
https://github.com/helblazer811/ManimML.git
synced 2025-05-17 18:55:54 +08:00
General changes, got basic visualization of an activation function working for a
convolutinoal layer.
This commit is contained in:
38
tests/test_activation_function.py
Normal file
38
tests/test_activation_function.py
Normal file
@ -0,0 +1,38 @@
|
||||
from manim import *
|
||||
from PIL import Image
|
||||
import numpy as np
|
||||
|
||||
from manim_ml.neural_network.layers.convolutional_2d import Convolutional2DLayer
|
||||
from manim_ml.neural_network.layers.feed_forward import FeedForwardLayer
|
||||
from manim_ml.neural_network.layers.image import ImageLayer
|
||||
from manim_ml.neural_network.neural_network import NeuralNetwork
|
||||
|
||||
# Make the specific scene
|
||||
config.pixel_height = 1200
|
||||
config.pixel_width = 1900
|
||||
config.frame_height = 6.0
|
||||
config.frame_width = 6.0
|
||||
|
||||
class CombinedScene(ThreeDScene):
|
||||
def construct(self):
|
||||
image = Image.open("../assets/mnist/digit.jpeg")
|
||||
numpy_image = np.asarray(image)
|
||||
# Make nn
|
||||
nn = NeuralNetwork([
|
||||
ImageLayer(numpy_image, height=1.5),
|
||||
Convolutional2DLayer(1, 7, filter_spacing=0.32),
|
||||
Convolutional2DLayer(3, 5, 3, filter_spacing=0.32, activation_function="ReLU"),
|
||||
FeedForwardLayer(3),
|
||||
],
|
||||
layer_spacing=0.25,
|
||||
)
|
||||
# Center the nn
|
||||
nn.move_to(ORIGIN)
|
||||
self.add(nn)
|
||||
# Play animation
|
||||
forward_pass = nn.make_forward_pass_animation(
|
||||
corner_pulses=False,
|
||||
all_filters_at_once=False
|
||||
)
|
||||
self.wait(1)
|
||||
self.play(forward_pass)
|
@ -34,10 +34,14 @@ class Simple3DConvScene(ThreeDScene):
|
||||
# Make nn
|
||||
layers = [
|
||||
Convolutional2DLayer(
|
||||
1, feature_map_size=3, filter_size=3
|
||||
num_feature_maps=1,
|
||||
feature_map_size=3,
|
||||
filter_size=3
|
||||
),
|
||||
Convolutional2DLayer(
|
||||
1, feature_map_size=3, filter_size=3
|
||||
num_feature_maps=1,
|
||||
feature_map_size=3,
|
||||
filter_size=3
|
||||
),
|
||||
]
|
||||
nn = NeuralNetwork(layers)
|
||||
@ -59,12 +63,11 @@ class CombinedScene(ThreeDScene):
|
||||
image = Image.open("../assets/mnist/digit.jpeg")
|
||||
numpy_image = np.asarray(image)
|
||||
# Make nn
|
||||
nn = NeuralNetwork(
|
||||
[
|
||||
nn = NeuralNetwork([
|
||||
ImageLayer(numpy_image, height=1.5),
|
||||
Convolutional2DLayer(1, 7, 3, filter_spacing=0.32),
|
||||
Convolutional2DLayer(1, 7, filter_spacing=0.32),
|
||||
Convolutional2DLayer(3, 5, 3, filter_spacing=0.32),
|
||||
Convolutional2DLayer(5, 3, 1, filter_spacing=0.18),
|
||||
Convolutional2DLayer(5, 3, 3, filter_spacing=0.18),
|
||||
FeedForwardLayer(3),
|
||||
FeedForwardLayer(3),
|
||||
],
|
||||
|
@ -19,12 +19,36 @@ class CombinedScene(ThreeDScene):
|
||||
image = Image.open("../assets/mnist/digit.jpeg")
|
||||
numpy_image = np.asarray(image)
|
||||
# Make nn
|
||||
nn = NeuralNetwork(
|
||||
[
|
||||
nn = NeuralNetwork([
|
||||
ImageLayer(numpy_image, height=1.5),
|
||||
Convolutional2DLayer(1, 8, 8, 3, 3, filter_spacing=0.32),
|
||||
Convolutional2DLayer(1, 8, filter_spacing=0.32),
|
||||
MaxPooling2DLayer(kernel_size=2),
|
||||
Convolutional2DLayer(3, 3, 2, filter_spacing=0.32),
|
||||
],
|
||||
layer_spacing=0.25,
|
||||
)
|
||||
# Center the nn
|
||||
nn.move_to(ORIGIN)
|
||||
self.add(nn)
|
||||
self.wait(5)
|
||||
# Play animation
|
||||
forward_pass = nn.make_forward_pass_animation(
|
||||
corner_pulses=False, all_filters_at_once=False
|
||||
)
|
||||
print(forward_pass)
|
||||
print(forward_pass.animations)
|
||||
self.wait(1)
|
||||
self.play(forward_pass)
|
||||
|
||||
class SmallNetwork(ThreeDScene):
|
||||
def construct(self):
|
||||
image = Image.open("../assets/mnist/digit.jpeg")
|
||||
numpy_image = np.asarray(image)
|
||||
# Make nn
|
||||
nn = NeuralNetwork([
|
||||
ImageLayer(numpy_image, height=1.5),
|
||||
Convolutional2DLayer(1, 8, filter_spacing=0.32),
|
||||
MaxPooling2DLayer(kernel_size=2),
|
||||
Convolutional2DLayer(3, 5, 5, 3, 3, filter_spacing=0.32),
|
||||
],
|
||||
layer_spacing=0.25,
|
||||
)
|
||||
@ -36,4 +60,4 @@ class CombinedScene(ThreeDScene):
|
||||
corner_pulses=False, all_filters_at_once=False
|
||||
)
|
||||
self.wait(1)
|
||||
self.play(forward_pass)
|
||||
self.play(forward_pass)
|
19
tests/test_succession.py
Normal file
19
tests/test_succession.py
Normal file
@ -0,0 +1,19 @@
|
||||
from manim import *
|
||||
|
||||
class TestSuccession(Scene):
|
||||
|
||||
def construct(self):
|
||||
white_dot = Dot(color=WHITE)
|
||||
white_dot.shift(UP)
|
||||
|
||||
red_dot = Dot(color=RED)
|
||||
|
||||
self.play(
|
||||
Succession(
|
||||
Create(white_dot),
|
||||
white_dot.animate.shift(RIGHT),
|
||||
Create(red_dot),
|
||||
Wait(1),
|
||||
Uncreate(red_dot),
|
||||
)
|
||||
)
|
Reference in New Issue
Block a user