From 1406acd43ec9662ef629809449888699aaa09f97 Mon Sep 17 00:00:00 2001 From: Alec Helbling Date: Wed, 1 Feb 2023 22:33:42 -0500 Subject: [PATCH] Refactored makefile, moved around files to more appropriate places, deleted unused files, reformatted code. --- Makefile | 21 ++---- .../basic_neural_network.py | 6 +- examples/cnn/activation_functions.py | 4 +- examples/cnn/cnn.py | 2 + examples/cnn/cnn_max_pool.py | 7 +- examples/cnn/one_by_one_convolution.py | 1 + examples/cnn/padding_example.py | 16 +++-- examples/cnn/resnet_block.py | 11 ++-- examples/lenet/lenet.py | 4 +- .../oracle_guidance/oracle_guidance.py | 2 +- .../a_simple_feed_forward_network.py | 13 ++-- .../readme_example/activation_functions.py | 10 ++- .../animating_the_forward_pass.py | 10 ++- ...onvolutional_neural_network_with_images.py | 11 +++- .../convolutional_neural_networks.py | 10 ++- examples/readme_example/example.py | 10 ++- .../readme_example/first_neural_network.py | 10 ++- examples/readme_example/max_pooling.py | 7 +- .../readme_example/neural_network_dropout.py | 7 +- examples/readme_example/old_example.py | 9 +-- examples/readme_example/setting_up_a_scene.py | 5 +- manim_ml/diffusion/mcmc.py | 2 +- manim_ml/flow/__init__.py | 0 manim_ml/flow/flow.py | 22 ------- manim_ml/lazy_animation.py | 13 ---- manim_ml/manifold.py | 3 - manim_ml/neural_network/__init__.py | 38 ++++++++--- .../activation_functions/__init__.py | 11 ++-- .../activation_function.py | 1 + .../activation_functions/sigmoid.py | 1 + .../neural_network_transformations.py | 0 .../architectures/feed_forward.py | 1 + manim_ml/neural_network/layers/__init__.py | 4 +- .../neural_network/layers/convolutional_2d.py | 28 ++++---- .../convolutional_2d_to_convolutional_2d.py | 19 ++---- .../convolutional_2d_to_max_pooling_2d.py | 36 +++++----- manim_ml/neural_network/layers/embedding.py | 2 +- .../neural_network/layers/feed_forward.py | 5 +- manim_ml/neural_network/layers/image.py | 2 +- .../layers/image_to_convolutional_2d.py | 2 +- .../neural_network/layers/max_pooling_2d.py | 2 +- .../layers/max_pooling_2d_to_feed_forward.py | 5 +- .../neural_network/layers/paired_query.py | 2 +- manim_ml/neural_network/layers/triplet.py | 2 +- manim_ml/neural_network/layers/util.py | 6 +- manim_ml/neural_network/neural_network.py | 66 +++++++++---------- manim_ml/one_to_one_sync.py | 11 ---- manim_ml/utils/mobjects/connections.py | 61 ++++++++--------- .../{ => utils/mobjects}/gridded_rectangle.py | 10 +-- manim_ml/{ => utils/mobjects}/image.py | 0 manim_ml/{ => utils/mobjects}/list_group.py | 0 manim_ml/{ => utils/mobjects}/probability.py | 0 manim_ml/utils/testing/frames_comparison.py | 3 +- setup.py | 2 +- tests/conftest.py | 3 +- tests/test_activation_function.py | 10 ++- tests/test_camera_move.py | 14 ++-- tests/test_conv_padding.py | 34 +++++----- tests/test_convolutional_2d_layer.py | 21 +++--- tests/test_decision_tree.py | 4 ++ tests/test_feed_forward.py | 19 ++---- tests/test_gridded_rectangle.py | 12 ++-- tests/test_image_homotopy.py | 2 +- tests/test_max_pool.py | 10 ++- tests/test_mcmc.py | 20 +++--- tests/test_nested_neural_networks.py | 2 +- tests/test_residual_connection.py | 45 ++++++------- tests/test_show_gaussian.py | 8 +-- tests/test_succession.py | 4 +- 69 files changed, 372 insertions(+), 372 deletions(-) delete mode 100644 manim_ml/flow/__init__.py delete mode 100644 manim_ml/flow/flow.py delete mode 100644 manim_ml/lazy_animation.py delete mode 100644 manim_ml/manifold.py rename manim_ml/neural_network/{ => animations}/neural_network_transformations.py (100%) delete mode 100644 manim_ml/one_to_one_sync.py rename manim_ml/{ => utils/mobjects}/gridded_rectangle.py (96%) rename manim_ml/{ => utils/mobjects}/image.py (100%) rename manim_ml/{ => utils/mobjects}/list_group.py (100%) rename manim_ml/{ => utils/mobjects}/probability.py (100%) diff --git a/Makefile b/Makefile index d59f157..1a79895 100644 --- a/Makefile +++ b/Makefile @@ -1,18 +1,9 @@ setup: - conda activate manim + conda activate manim; \ export PROJECT_ROOT=$(pwd) -video: - manim -pqh src/variational_autoencoder.py VAEScene --media_dir media - cp media/videos/vae/720p60/VAEScene.mp4 examples -train: - cd src/autoencoder_models - python vanilla_autoencoder.py - python variational_autoencoder.py -generate_visualizations: - cd src/autoencoder_models - python generate_images.py - python generate_interpolation.py - python generate_disentanglement.py checkstyle: - pycodestyle src - pydocstyle src \ No newline at end of file + black .; \ + pydocstyle . +publish_pip: + python3 -m build; \ + python3 -m twine upload --repository pypi dist/* \ No newline at end of file diff --git a/examples/basic_neural_network/basic_neural_network.py b/examples/basic_neural_network/basic_neural_network.py index 0a823d1..8217c31 100644 --- a/examples/basic_neural_network/basic_neural_network.py +++ b/examples/basic_neural_network/basic_neural_network.py @@ -8,11 +8,7 @@ class NeuralNetworkScene(Scene): def construct(self): # Make the Layer object - layers = [ - FeedForwardLayer(3), - FeedForwardLayer(5), - FeedForwardLayer(3) - ] + layers = [FeedForwardLayer(3), FeedForwardLayer(5), FeedForwardLayer(3)] nn = NeuralNetwork(layers) nn.scale(2) nn.move_to(ORIGIN) diff --git a/examples/cnn/activation_functions.py b/examples/cnn/activation_functions.py index 7465d2a..c7bbca1 100644 --- a/examples/cnn/activation_functions.py +++ b/examples/cnn/activation_functions.py @@ -15,6 +15,7 @@ config.frame_height = 7.0 config.frame_width = 7.0 ROOT_DIR = Path(__file__).parents[2] + def make_code_snippet(): code_str = """ # Make the neural network @@ -42,6 +43,7 @@ def make_code_snippet(): return code + class CombinedScene(ThreeDScene): def construct(self): image = Image.open(ROOT_DIR / "assets/mnist/digit.jpeg") @@ -72,4 +74,4 @@ class CombinedScene(ThreeDScene): # Play animation forward_pass = nn.make_forward_pass_animation() self.wait(1) - self.play(forward_pass) \ No newline at end of file + self.play(forward_pass) diff --git a/examples/cnn/cnn.py b/examples/cnn/cnn.py index a779f2a..ef04747 100644 --- a/examples/cnn/cnn.py +++ b/examples/cnn/cnn.py @@ -15,6 +15,7 @@ config.frame_height = 7.0 config.frame_width = 7.0 ROOT_DIR = Path(__file__).parents[2] + def make_code_snippet(): code_str = """ # Make nn @@ -44,6 +45,7 @@ def make_code_snippet(): return code + class CombinedScene(ThreeDScene): def construct(self): image = Image.open(ROOT_DIR / "assets/mnist/digit.jpeg") diff --git a/examples/cnn/cnn_max_pool.py b/examples/cnn/cnn_max_pool.py index 4a5f5fb..8ac458d 100644 --- a/examples/cnn/cnn_max_pool.py +++ b/examples/cnn/cnn_max_pool.py @@ -14,6 +14,7 @@ config.pixel_width = 1900 config.frame_height = 6.0 config.frame_width = 6.0 + def make_code_snippet(): code_str = """ # Make the neural network @@ -42,12 +43,14 @@ def make_code_snippet(): return code + class CombinedScene(ThreeDScene): def construct(self): image = Image.open("../../assets/mnist/digit.jpeg") numpy_image = np.asarray(image) # Make nn - nn = NeuralNetwork([ + nn = NeuralNetwork( + [ ImageLayer(numpy_image, height=1.5), Convolutional2DLayer(1, 8, filter_spacing=0.32), MaxPooling2DLayer(kernel_size=2), @@ -67,4 +70,4 @@ class CombinedScene(ThreeDScene): # Play animation forward_pass = nn.make_forward_pass_animation() self.wait(1) - self.play(forward_pass) \ No newline at end of file + self.play(forward_pass) diff --git a/examples/cnn/one_by_one_convolution.py b/examples/cnn/one_by_one_convolution.py index 2668885..79aa1a7 100644 --- a/examples/cnn/one_by_one_convolution.py +++ b/examples/cnn/one_by_one_convolution.py @@ -15,6 +15,7 @@ config.frame_height = 7.0 config.frame_width = 7.0 ROOT_DIR = Path(__file__).parents[2] + def make_code_snippet(): code_str = """ # Make nn diff --git a/examples/cnn/padding_example.py b/examples/cnn/padding_example.py index d8cb0a4..aa7e254 100644 --- a/examples/cnn/padding_example.py +++ b/examples/cnn/padding_example.py @@ -15,6 +15,7 @@ config.pixel_width = 1900 config.frame_height = 6.0 config.frame_width = 6.0 + def make_code_snippet(): code_str = """ # Make nn @@ -43,27 +44,28 @@ def make_code_snippet(): return code -class CombinedScene(ThreeDScene): +class CombinedScene(ThreeDScene): def construct(self): # Make nn image = Image.open(ROOT_DIR / "assets/mnist/digit.jpeg") numpy_image = np.asarray(image) # Make nn - nn = NeuralNetwork([ + nn = NeuralNetwork( + [ ImageLayer(numpy_image, height=1.5), Convolutional2DLayer( - num_feature_maps=1, - feature_map_size=6, + num_feature_maps=1, + feature_map_size=6, padding=1, - padding_dashed=True + padding_dashed=True, ), Convolutional2DLayer( - num_feature_maps=3, + num_feature_maps=3, feature_map_size=6, filter_size=3, padding=0, - padding_dashed=False + padding_dashed=False, ), FeedForwardLayer(3), FeedForwardLayer(1), diff --git a/examples/cnn/resnet_block.py b/examples/cnn/resnet_block.py index b784256..0a3f19a 100644 --- a/examples/cnn/resnet_block.py +++ b/examples/cnn/resnet_block.py @@ -9,6 +9,7 @@ config.pixel_width = 1900 config.frame_height = 6.0 config.frame_width = 6.0 + def make_code_snippet(): code_str = """ # Make the neural network @@ -37,13 +38,14 @@ def make_code_snippet(): return code -class ConvScene(ThreeDScene): +class ConvScene(ThreeDScene): def construct(self): image = Image.open("../../assets/mnist/digit.jpeg") numpy_image = np.asarray(image) - nn = NeuralNetwork({ + nn = NeuralNetwork( + { "layer1": Convolutional2DLayer(1, 5, padding=1), "layer2": Convolutional2DLayer(1, 5, 3, padding=1), "layer3": Convolutional2DLayer(1, 5, 3, padding=1), @@ -60,7 +62,4 @@ class ConvScene(ThreeDScene): self.add(code) Group(code, nn).move_to(ORIGIN) - self.play( - nn.make_forward_pass_animation(), - run_time=8 - ) \ No newline at end of file + self.play(nn.make_forward_pass_animation(), run_time=8) diff --git a/examples/lenet/lenet.py b/examples/lenet/lenet.py index a218123..956e95e 100644 --- a/examples/lenet/lenet.py +++ b/examples/lenet/lenet.py @@ -18,12 +18,14 @@ config.frame_width = 20.0 ROOT_DIR = Path(__file__).parents[2] + class CombinedScene(ThreeDScene): def construct(self): image = Image.open(ROOT_DIR / "assets/mnist/digit.jpeg") numpy_image = np.asarray(image) # Make nn - nn = NeuralNetwork([ + nn = NeuralNetwork( + [ ImageLayer(numpy_image, height=4.5), Convolutional2DLayer(1, 28), Convolutional2DLayer(6, 28, 5), diff --git a/examples/paper_visualizations/oracle_guidance/oracle_guidance.py b/examples/paper_visualizations/oracle_guidance/oracle_guidance.py index 3b658c4..f6d5c45 100644 --- a/examples/paper_visualizations/oracle_guidance/oracle_guidance.py +++ b/examples/paper_visualizations/oracle_guidance/oracle_guidance.py @@ -13,7 +13,7 @@ from manim_ml.neural_network.layers import FeedForwardLayer, EmbeddingLayer from manim_ml.neural_network.layers.util import get_connective_layer import os -from manim_ml.probability import GaussianDistribution +from manim_ml.utils.mobjects.probability import GaussianDistribution # Make the specific scene config.pixel_height = 1200 diff --git a/examples/readme_example/a_simple_feed_forward_network.py b/examples/readme_example/a_simple_feed_forward_network.py index f41a01c..e29bce7 100644 --- a/examples/readme_example/a_simple_feed_forward_network.py +++ b/examples/readme_example/a_simple_feed_forward_network.py @@ -8,14 +8,17 @@ config.pixel_width = 1200 config.frame_height = 4.0 config.frame_width = 4.0 + class CombinedScene(ThreeDScene): def construct(self): # Make nn - nn = NeuralNetwork([ - FeedForwardLayer(num_nodes=3), - FeedForwardLayer(num_nodes=5), - FeedForwardLayer(num_nodes=3) - ]) + nn = NeuralNetwork( + [ + FeedForwardLayer(num_nodes=3), + FeedForwardLayer(num_nodes=5), + FeedForwardLayer(num_nodes=3), + ] + ) self.add(nn) # Center the nn nn.move_to(ORIGIN) diff --git a/examples/readme_example/activation_functions.py b/examples/readme_example/activation_functions.py index 803fb47..1f63541 100644 --- a/examples/readme_example/activation_functions.py +++ b/examples/readme_example/activation_functions.py @@ -10,12 +10,16 @@ config.pixel_width = 1900 config.frame_height = 6.0 config.frame_width = 6.0 + class CombinedScene(ThreeDScene): def construct(self): # Make nn - nn = NeuralNetwork([ + nn = NeuralNetwork( + [ Convolutional2DLayer(1, 7, filter_spacing=0.32), - Convolutional2DLayer(3, 5, 3, filter_spacing=0.32, activation_function="ReLU"), + Convolutional2DLayer( + 3, 5, 3, filter_spacing=0.32, activation_function="ReLU" + ), FeedForwardLayer(3, activation_function="Sigmoid"), ], layer_spacing=0.25, @@ -26,4 +30,4 @@ class CombinedScene(ThreeDScene): # Play animation forward_pass = nn.make_forward_pass_animation() self.play(ChangeSpeed(forward_pass, speedinfo={}), run_time=10) - self.wait(1) \ No newline at end of file + self.wait(1) diff --git a/examples/readme_example/animating_the_forward_pass.py b/examples/readme_example/animating_the_forward_pass.py index 66d64b5..c8fbe2b 100644 --- a/examples/readme_example/animating_the_forward_pass.py +++ b/examples/readme_example/animating_the_forward_pass.py @@ -1,6 +1,10 @@ from manim import * -from manim_ml.neural_network import Convolutional2DLayer, FeedForwardLayer, NeuralNetwork +from manim_ml.neural_network import ( + Convolutional2DLayer, + FeedForwardLayer, + NeuralNetwork, +) # Make the specific scene config.pixel_height = 700 @@ -8,10 +12,12 @@ config.pixel_width = 1900 config.frame_height = 7.0 config.frame_width = 7.0 + class CombinedScene(ThreeDScene): def construct(self): # Make nn - nn = NeuralNetwork([ + nn = NeuralNetwork( + [ Convolutional2DLayer(1, 7, 3, filter_spacing=0.32), Convolutional2DLayer(3, 5, 3, filter_spacing=0.32), Convolutional2DLayer(5, 3, 3, filter_spacing=0.18), diff --git a/examples/readme_example/convolutional_neural_network_with_images.py b/examples/readme_example/convolutional_neural_network_with_images.py index 8537910..56eb89d 100644 --- a/examples/readme_example/convolutional_neural_network_with_images.py +++ b/examples/readme_example/convolutional_neural_network_with_images.py @@ -2,7 +2,12 @@ from manim import * from PIL import Image import numpy as np -from manim_ml.neural_network import Convolutional2DLayer, FeedForwardLayer, NeuralNetwork, ImageLayer +from manim_ml.neural_network import ( + Convolutional2DLayer, + FeedForwardLayer, + NeuralNetwork, + ImageLayer, +) # Make the specific scene config.pixel_height = 700 @@ -10,13 +15,15 @@ config.pixel_width = 1900 config.frame_height = 7.0 config.frame_width = 7.0 + class CombinedScene(ThreeDScene): def construct(self): # Make nn image = Image.open("../../assets/mnist/digit.jpeg") numpy_image = np.asarray(image) # Make nn - nn = NeuralNetwork([ + nn = NeuralNetwork( + [ ImageLayer(numpy_image, height=1.5), Convolutional2DLayer(1, 7, filter_spacing=0.32), Convolutional2DLayer(3, 5, 3, filter_spacing=0.32), diff --git a/examples/readme_example/convolutional_neural_networks.py b/examples/readme_example/convolutional_neural_networks.py index 843750a..529f7bb 100644 --- a/examples/readme_example/convolutional_neural_networks.py +++ b/examples/readme_example/convolutional_neural_networks.py @@ -1,6 +1,10 @@ from manim import * -from manim_ml.neural_network import Convolutional2DLayer, FeedForwardLayer, NeuralNetwork +from manim_ml.neural_network import ( + Convolutional2DLayer, + FeedForwardLayer, + NeuralNetwork, +) # Make the specific scene config.pixel_height = 700 @@ -8,10 +12,12 @@ config.pixel_width = 1900 config.frame_height = 7.0 config.frame_width = 7.0 + class CombinedScene(ThreeDScene): def construct(self): # Make nn - nn = NeuralNetwork([ + nn = NeuralNetwork( + [ Convolutional2DLayer(1, 7, 3, filter_spacing=0.32), Convolutional2DLayer(3, 5, 3, filter_spacing=0.32), Convolutional2DLayer(5, 3, 3, filter_spacing=0.18), diff --git a/examples/readme_example/example.py b/examples/readme_example/example.py index 66d64b5..c8fbe2b 100644 --- a/examples/readme_example/example.py +++ b/examples/readme_example/example.py @@ -1,6 +1,10 @@ from manim import * -from manim_ml.neural_network import Convolutional2DLayer, FeedForwardLayer, NeuralNetwork +from manim_ml.neural_network import ( + Convolutional2DLayer, + FeedForwardLayer, + NeuralNetwork, +) # Make the specific scene config.pixel_height = 700 @@ -8,10 +12,12 @@ config.pixel_width = 1900 config.frame_height = 7.0 config.frame_width = 7.0 + class CombinedScene(ThreeDScene): def construct(self): # Make nn - nn = NeuralNetwork([ + nn = NeuralNetwork( + [ Convolutional2DLayer(1, 7, 3, filter_spacing=0.32), Convolutional2DLayer(3, 5, 3, filter_spacing=0.32), Convolutional2DLayer(5, 3, 3, filter_spacing=0.18), diff --git a/examples/readme_example/first_neural_network.py b/examples/readme_example/first_neural_network.py index 66d64b5..c8fbe2b 100644 --- a/examples/readme_example/first_neural_network.py +++ b/examples/readme_example/first_neural_network.py @@ -1,6 +1,10 @@ from manim import * -from manim_ml.neural_network import Convolutional2DLayer, FeedForwardLayer, NeuralNetwork +from manim_ml.neural_network import ( + Convolutional2DLayer, + FeedForwardLayer, + NeuralNetwork, +) # Make the specific scene config.pixel_height = 700 @@ -8,10 +12,12 @@ config.pixel_width = 1900 config.frame_height = 7.0 config.frame_width = 7.0 + class CombinedScene(ThreeDScene): def construct(self): # Make nn - nn = NeuralNetwork([ + nn = NeuralNetwork( + [ Convolutional2DLayer(1, 7, 3, filter_spacing=0.32), Convolutional2DLayer(3, 5, 3, filter_spacing=0.32), Convolutional2DLayer(5, 3, 3, filter_spacing=0.18), diff --git a/examples/readme_example/max_pooling.py b/examples/readme_example/max_pooling.py index 4b5038d..b085bdf 100644 --- a/examples/readme_example/max_pooling.py +++ b/examples/readme_example/max_pooling.py @@ -12,11 +12,12 @@ config.pixel_width = 1900 config.frame_height = 6.0 config.frame_width = 6.0 -class MaxPoolingScene(ThreeDScene): +class MaxPoolingScene(ThreeDScene): def construct(self): # Make nn - nn = NeuralNetwork([ + nn = NeuralNetwork( + [ Convolutional2DLayer(1, 8), Convolutional2DLayer(3, 6, 3), MaxPooling2DLayer(kernel_size=2), @@ -30,4 +31,4 @@ class MaxPoolingScene(ThreeDScene): # Play animation forward_pass = nn.make_forward_pass_animation() self.play(ChangeSpeed(forward_pass, speedinfo={}), run_time=10) - self.wait(1) \ No newline at end of file + self.wait(1) diff --git a/examples/readme_example/neural_network_dropout.py b/examples/readme_example/neural_network_dropout.py index a536643..7f9676c 100644 --- a/examples/readme_example/neural_network_dropout.py +++ b/examples/readme_example/neural_network_dropout.py @@ -9,11 +9,12 @@ config.pixel_width = 1900 config.frame_height = 5.0 config.frame_width = 5.0 + class DropoutNeuralNetworkScene(Scene): - def construct(self): # Make nn - nn = NeuralNetwork([ + nn = NeuralNetwork( + [ FeedForwardLayer(3, rectangle_color=BLUE), FeedForwardLayer(5, rectangle_color=BLUE), FeedForwardLayer(3, rectangle_color=BLUE), @@ -31,4 +32,4 @@ class DropoutNeuralNetworkScene(Scene): nn, dropout_rate=0.25, do_forward_pass=True ) ) - self.wait(1) \ No newline at end of file + self.wait(1) diff --git a/examples/readme_example/old_example.py b/examples/readme_example/old_example.py index bfdaa0d..78a0535 100644 --- a/examples/readme_example/old_example.py +++ b/examples/readme_example/old_example.py @@ -1,4 +1,4 @@ -from manim import * +from manim import * from PIL import Image from manim_ml.neural_network.layers.convolutional_2d import Convolutional2DLayer @@ -6,11 +6,12 @@ from manim_ml.neural_network.layers.feed_forward import FeedForwardLayer from manim_ml.neural_network.layers.image import ImageLayer from manim_ml.neural_network.neural_network import NeuralNetwork -class ConvolutionalNetworkScene(Scene): +class ConvolutionalNetworkScene(Scene): def construct(self): # Make nn - nn = NeuralNetwork([ + nn = NeuralNetwork( + [ Convolutional2DLayer(1, 7, 3, filter_spacing=0.32), Convolutional2DLayer(3, 5, 3, filter_spacing=0.32), Convolutional2DLayer(5, 3, 3, filter_spacing=0.18), @@ -22,4 +23,4 @@ class ConvolutionalNetworkScene(Scene): # Center the nn nn.move_to(ORIGIN) self.add(nn) - self.play(nn.make_forward_pass_animation()) \ No newline at end of file + self.play(nn.make_forward_pass_animation()) diff --git a/examples/readme_example/setting_up_a_scene.py b/examples/readme_example/setting_up_a_scene.py index 81de56e..29ab16f 100644 --- a/examples/readme_example/setting_up_a_scene.py +++ b/examples/readme_example/setting_up_a_scene.py @@ -1,9 +1,10 @@ from manim import * + # Import modules here -class BasicScene(ThreeDScene): +class BasicScene(ThreeDScene): def construct(self): # Your code goes here text = Text("Your first scene!") - self.add(text) \ No newline at end of file + self.add(text) diff --git a/manim_ml/diffusion/mcmc.py b/manim_ml/diffusion/mcmc.py index 7168ed6..f2429a7 100644 --- a/manim_ml/diffusion/mcmc.py +++ b/manim_ml/diffusion/mcmc.py @@ -7,7 +7,7 @@ import scipy import scipy.stats from tqdm import tqdm -from manim_ml.probability import GaussianDistribution +from manim_ml.utils.mobjects.probability import GaussianDistribution def gaussian_proposal(x, sigma=0.2): diff --git a/manim_ml/flow/__init__.py b/manim_ml/flow/__init__.py deleted file mode 100644 index e69de29..0000000 diff --git a/manim_ml/flow/flow.py b/manim_ml/flow/flow.py deleted file mode 100644 index 983cc4a..0000000 --- a/manim_ml/flow/flow.py +++ /dev/null @@ -1,22 +0,0 @@ -""" - Animated flow charts. -""" -from manim import * - - -class FlowGraph(VGroup): - """Graph container""" - - pass - - -class FlowNode(VGroup): - """Node in the FlowGraph""" - - pass - - -class DataNode(FlowNode): - """Node that outputs data""" - - pass diff --git a/manim_ml/lazy_animation.py b/manim_ml/lazy_animation.py deleted file mode 100644 index ba19711..0000000 --- a/manim_ml/lazy_animation.py +++ /dev/null @@ -1,13 +0,0 @@ -from manim import * - - -class LazyAnimation(Animation): - def __init__(self, animation_function): - self.animation_function = animation_function - super.__init__() - - def begin(self): - update_func_anim = UpdateFromFunc(self.neural_network, create_new_connective) - self.add - - super.begin() diff --git a/manim_ml/manifold.py b/manim_ml/manifold.py deleted file mode 100644 index 72c68ff..0000000 --- a/manim_ml/manifold.py +++ /dev/null @@ -1,3 +0,0 @@ -""" - Visaulization of a latent Manifold -""" diff --git a/manim_ml/neural_network/__init__.py b/manim_ml/neural_network/__init__.py index bd0d6cc..643500c 100644 --- a/manim_ml/neural_network/__init__.py +++ b/manim_ml/neural_network/__init__.py @@ -1,23 +1,41 @@ from manim_ml.neural_network.neural_network import NeuralNetwork from manim_ml.neural_network.layers.feed_forward import FeedForwardLayer -from manim_ml.neural_network.layers.convolutional_2d_to_convolutional_2d import Convolutional2DToConvolutional2D -from manim_ml.neural_network.layers.convolutional_2d_to_feed_forward import Convolutional2DToFeedForward -from manim_ml.neural_network.layers.convolutional_2d_to_max_pooling_2d import Convolutional2DToMaxPooling2D +from manim_ml.neural_network.layers.convolutional_2d_to_convolutional_2d import ( + Convolutional2DToConvolutional2D, +) +from manim_ml.neural_network.layers.convolutional_2d_to_feed_forward import ( + Convolutional2DToFeedForward, +) +from manim_ml.neural_network.layers.convolutional_2d_to_max_pooling_2d import ( + Convolutional2DToMaxPooling2D, +) from manim_ml.neural_network.layers.convolutional_2d import Convolutional2DLayer -from manim_ml.neural_network.layers.embedding_to_feed_forward import EmbeddingToFeedForward +from manim_ml.neural_network.layers.embedding_to_feed_forward import ( + EmbeddingToFeedForward, +) from manim_ml.neural_network.layers.embedding import EmbeddingLayer -from manim_ml.neural_network.layers.feed_forward_to_embedding import FeedForwardToEmbedding -from manim_ml.neural_network.layers.feed_forward_to_feed_forward import FeedForwardToFeedForward +from manim_ml.neural_network.layers.feed_forward_to_embedding import ( + FeedForwardToEmbedding, +) +from manim_ml.neural_network.layers.feed_forward_to_feed_forward import ( + FeedForwardToFeedForward, +) from manim_ml.neural_network.layers.feed_forward_to_image import FeedForwardToImage from manim_ml.neural_network.layers.feed_forward_to_vector import FeedForwardToVector from manim_ml.neural_network.layers.feed_forward import FeedForwardLayer -from manim_ml.neural_network.layers.image_to_convolutional_2d import ImageToConvolutional2DLayer +from manim_ml.neural_network.layers.image_to_convolutional_2d import ( + ImageToConvolutional2DLayer, +) from manim_ml.neural_network.layers.image_to_feed_forward import ImageToFeedForward from manim_ml.neural_network.layers.image import ImageLayer -from manim_ml.neural_network.layers.max_pooling_2d_to_convolutional_2d import MaxPooling2DToConvolutional2D +from manim_ml.neural_network.layers.max_pooling_2d_to_convolutional_2d import ( + MaxPooling2DToConvolutional2D, +) from manim_ml.neural_network.layers.max_pooling_2d import MaxPooling2DLayer -from manim_ml.neural_network.layers.paired_query_to_feed_forward import PairedQueryToFeedForward +from manim_ml.neural_network.layers.paired_query_to_feed_forward import ( + PairedQueryToFeedForward, +) from manim_ml.neural_network.layers.paired_query import PairedQueryLayer from manim_ml.neural_network.layers.triplet_to_feed_forward import TripletToFeedForward from manim_ml.neural_network.layers.triplet import TripletLayer -from manim_ml.neural_network.layers.vector import VectorLayer \ No newline at end of file +from manim_ml.neural_network.layers.vector import VectorLayer diff --git a/manim_ml/neural_network/activation_functions/__init__.py b/manim_ml/neural_network/activation_functions/__init__.py index 3d17800..56d701f 100644 --- a/manim_ml/neural_network/activation_functions/__init__.py +++ b/manim_ml/neural_network/activation_functions/__init__.py @@ -1,13 +1,12 @@ from manim_ml.neural_network.activation_functions.relu import ReLUFunction from manim_ml.neural_network.activation_functions.sigmoid import SigmoidFunction -name_to_activation_function_map = { - "ReLU": ReLUFunction, - "Sigmoid": SigmoidFunction -} +name_to_activation_function_map = {"ReLU": ReLUFunction, "Sigmoid": SigmoidFunction} + def get_activation_function_by_name(name): - assert name in name_to_activation_function_map.keys(), \ - f"Unrecognized activation function {name}" + assert ( + name in name_to_activation_function_map.keys() + ), f"Unrecognized activation function {name}" return name_to_activation_function_map[name] diff --git a/manim_ml/neural_network/activation_functions/activation_function.py b/manim_ml/neural_network/activation_functions/activation_function.py index e5d30a6..427bcab 100644 --- a/manim_ml/neural_network/activation_functions/activation_function.py +++ b/manim_ml/neural_network/activation_functions/activation_function.py @@ -4,6 +4,7 @@ import random import manim_ml.neural_network.activation_functions.relu as relu + class ActivationFunction(ABC, VGroup): """Abstract parent class for defining activation functions""" diff --git a/manim_ml/neural_network/activation_functions/sigmoid.py b/manim_ml/neural_network/activation_functions/sigmoid.py index 851a88d..c6d1fdc 100644 --- a/manim_ml/neural_network/activation_functions/sigmoid.py +++ b/manim_ml/neural_network/activation_functions/sigmoid.py @@ -5,6 +5,7 @@ from manim_ml.neural_network.activation_functions.activation_function import ( ActivationFunction, ) + class SigmoidFunction(ActivationFunction): """Sigmoid Activation Function""" diff --git a/manim_ml/neural_network/neural_network_transformations.py b/manim_ml/neural_network/animations/neural_network_transformations.py similarity index 100% rename from manim_ml/neural_network/neural_network_transformations.py rename to manim_ml/neural_network/animations/neural_network_transformations.py diff --git a/manim_ml/neural_network/architectures/feed_forward.py b/manim_ml/neural_network/architectures/feed_forward.py index 3536d6f..3955530 100644 --- a/manim_ml/neural_network/architectures/feed_forward.py +++ b/manim_ml/neural_network/architectures/feed_forward.py @@ -1,5 +1,6 @@ from manim_ml.neural_network.layers.feed_forward import FeedForwardLayer + class FeedForwardNeuralNetwork(NeuralNetwork): """NeuralNetwork with just feed forward layers""" diff --git a/manim_ml/neural_network/layers/__init__.py b/manim_ml/neural_network/layers/__init__.py index a459034..8702326 100644 --- a/manim_ml/neural_network/layers/__init__.py +++ b/manim_ml/neural_network/layers/__init__.py @@ -10,7 +10,9 @@ from manim_ml.neural_network.layers.image_to_convolutional_2d import ( from manim_ml.neural_network.layers.max_pooling_2d_to_convolutional_2d import ( MaxPooling2DToConvolutional2D, ) -from manim_ml.neural_network.layers.max_pooling_2d_to_feed_forward import MaxPooling2DToFeedForward +from manim_ml.neural_network.layers.max_pooling_2d_to_feed_forward import ( + MaxPooling2DToFeedForward, +) from .convolutional_2d_to_convolutional_2d import Convolutional2DToConvolutional2D from .convolutional_2d import Convolutional2DLayer from .feed_forward_to_vector import FeedForwardToVector diff --git a/manim_ml/neural_network/layers/convolutional_2d.py b/manim_ml/neural_network/layers/convolutional_2d.py index 9120101..5b51e81 100644 --- a/manim_ml/neural_network/layers/convolutional_2d.py +++ b/manim_ml/neural_network/layers/convolutional_2d.py @@ -10,7 +10,8 @@ from manim_ml.neural_network.layers.parent_layers import ( ThreeDLayer, VGroupNeuralNetworkLayer, ) -from manim_ml.gridded_rectangle import GriddedRectangle +from manim_ml.utils.mobjects.gridded_rectangle import GriddedRectangle + class FeatureMap(VGroup): """Class for making a feature map""" @@ -25,7 +26,7 @@ class FeatureMap(VGroup): padding=(0, 0), stroke_width=2.0, show_grid_lines=False, - padding_dashed=False + padding_dashed=False, ): super().__init__() self.color = color @@ -40,8 +41,12 @@ class FeatureMap(VGroup): # Check if we have non-zero padding if padding[0] > 0 or padding[1] > 0: # Make the exterior rectangle dashed - width_with_padding = (self.feature_map_size[0] + self.padding[0] * 2) * self.cell_width - height_with_padding = (self.feature_map_size[1] + self.padding[1] * 2) * self.cell_width + width_with_padding = ( + self.feature_map_size[0] + self.padding[0] * 2 + ) * self.cell_width + height_with_padding = ( + self.feature_map_size[1] + self.padding[1] * 2 + ) * self.cell_width self.untransformed_width = width_with_padding self.untransformed_height = height_with_padding @@ -58,7 +63,7 @@ class FeatureMap(VGroup): grid_stroke_width=self.stroke_width / 2, grid_stroke_color=self.color, show_grid_lines=self.show_grid_lines, - dotted_lines=self.padding_dashed + dotted_lines=self.padding_dashed, ) self.add(self.exterior_rectangle) # Add an interior rectangle with no fill color @@ -67,13 +72,13 @@ class FeatureMap(VGroup): fill_opacity=0.0, width=self.feature_map_size[0] * self.cell_width, height=self.feature_map_size[1] * self.cell_width, - stroke_width=self.stroke_width + stroke_width=self.stroke_width, ) self.add(self.interior_rectangle) else: # Just make an exterior rectangle with no dashes. - self.untransformed_height = self.feature_map_size[1] * self.cell_width, - self.untransformed_width = self.feature_map_size[0] * self.cell_width, + self.untransformed_height = (self.feature_map_size[1] * self.cell_width,) + self.untransformed_width = (self.feature_map_size[0] * self.cell_width,) # Make the exterior rectangle self.exterior_rectangle = GriddedRectangle( color=self.color, @@ -96,6 +101,7 @@ class FeatureMap(VGroup): # Sort points through clockwise rotation of a vector in the xy plane return self.exterior_rectangle.get_corners_dict() + class Convolutional2DLayer(VGroupNeuralNetworkLayer, ThreeDLayer): """Handles rendering a convolutional layer for a nn""" @@ -215,7 +221,7 @@ class Convolutional2DLayer(VGroupNeuralNetworkLayer, ThreeDLayer): fill_color=self.color, fill_opacity=self.fill_opacity, padding=self.padding, - padding_dashed=self.padding_dashed + padding_dashed=self.padding_dashed, ) # Move the feature map feature_map.move_to([0, 0, filter_index * self.filter_spacing]) @@ -231,9 +237,7 @@ class Convolutional2DLayer(VGroupNeuralNetworkLayer, ThreeDLayer): ApplyMethod(self.feature_maps.set_color, self.color), ) - def make_forward_pass_animation( - self, run_time=5, layer_args={}, **kwargs - ): + def make_forward_pass_animation(self, run_time=5, layer_args={}, **kwargs): """Convolution forward pass animation""" # Note: most of this animation is done in the Convolution3DToConvolution3D layer if not self.activation_function is None: diff --git a/manim_ml/neural_network/layers/convolutional_2d_to_convolutional_2d.py b/manim_ml/neural_network/layers/convolutional_2d_to_convolutional_2d.py index 8dd2700..2baab5f 100644 --- a/manim_ml/neural_network/layers/convolutional_2d_to_convolutional_2d.py +++ b/manim_ml/neural_network/layers/convolutional_2d_to_convolutional_2d.py @@ -3,10 +3,11 @@ import numpy as np from manim import * from manim_ml.neural_network.layers.convolutional_2d import Convolutional2DLayer from manim_ml.neural_network.layers.parent_layers import ConnectiveLayer, ThreeDLayer -from manim_ml.gridded_rectangle import GriddedRectangle +from manim_ml.utils.mobjects.gridded_rectangle import GriddedRectangle from manim.utils.space_ops import rotation_matrix + def get_rotated_shift_vectors(input_layer, normalized=False): """Rotates the shift vectors""" # Make base shift vectors @@ -24,6 +25,7 @@ def get_rotated_shift_vectors(input_layer, normalized=False): return right_shift, down_shift + class Filters(VGroup): """Group for showing a collection of filters connecting two layers""" @@ -144,7 +146,7 @@ class Filters(VGroup): ) # Shift based on the amount of output layer padding rectangle.shift( - self.output_layer.padding[0] * right_shift, + self.output_layer.padding[0] * right_shift, ) rectangle.shift( self.output_layer.padding[1] * down_shift, @@ -446,10 +448,7 @@ class Convolutional2DToConvolutional2D(ConnectiveLayer, ThreeDLayer): # Do last row move right for x_move in range(num_x_moves): # Shift right - shift_animation = ApplyMethod( - filters.shift, - self.stride * right_shift - ) + shift_animation = ApplyMethod(filters.shift, self.stride * right_shift) # shift_animation = self.animate.shift(right_shift) animations.append(shift_animation) # Remove the filters @@ -460,18 +459,14 @@ class Convolutional2DToConvolutional2D(ConnectiveLayer, ThreeDLayer): # Change the output feature map colors change_color_animations = [] change_color_animations.append( - ApplyMethod( - feature_map.set_color, - original_feature_map_color - ) + ApplyMethod(feature_map.set_color, original_feature_map_color) ) # Change the input feature map colors input_feature_maps = self.input_layer.feature_maps for input_feature_map in input_feature_maps: change_color_animations.append( ApplyMethod( - input_feature_map.set_color, - original_feature_map_color + input_feature_map.set_color, original_feature_map_color ) ) # Combine the animations diff --git a/manim_ml/neural_network/layers/convolutional_2d_to_max_pooling_2d.py b/manim_ml/neural_network/layers/convolutional_2d_to_max_pooling_2d.py index adc40d9..6190e39 100644 --- a/manim_ml/neural_network/layers/convolutional_2d_to_max_pooling_2d.py +++ b/manim_ml/neural_network/layers/convolutional_2d_to_max_pooling_2d.py @@ -1,6 +1,6 @@ import random from manim import * -from manim_ml.gridded_rectangle import GriddedRectangle +from manim_ml.utils.mobjects.gridded_rectangle import GriddedRectangle from manim_ml.neural_network.layers.convolutional_2d_to_convolutional_2d import ( get_rotated_shift_vectors, ) @@ -10,6 +10,7 @@ from manim_ml.neural_network.layers.parent_layers import ConnectiveLayer, ThreeD from manim_ml.neural_network.layers.feed_forward import FeedForwardLayer from manim_ml.neural_network.layers.convolutional_2d import Convolutional2DLayer + class Uncreate(Create): def __init__( self, @@ -27,6 +28,7 @@ class Uncreate(Create): **kwargs, ) + class Convolutional2DToMaxPooling2D(ConnectiveLayer, ThreeDLayer): """Feed Forward to Embedding Layer""" @@ -38,7 +40,7 @@ class Convolutional2DToMaxPooling2D(ConnectiveLayer, ThreeDLayer): input_layer: Convolutional2DLayer, output_layer: MaxPooling2DLayer, active_color=ORANGE, - **kwargs + **kwargs, ): super().__init__(input_layer, output_layer, **kwargs) self.active_color = active_color @@ -47,7 +49,7 @@ class Convolutional2DToMaxPooling2D(ConnectiveLayer, ThreeDLayer): self, input_layer: "NeuralNetworkLayer", output_layer: "NeuralNetworkLayer", - **kwargs + **kwargs, ): return super().construct_layer(input_layer, output_layer, **kwargs) @@ -94,7 +96,7 @@ class Convolutional2DToMaxPooling2D(ConnectiveLayer, ThreeDLayer): width=cell_width, fill_opacity=0.7, stroke_width=0.0, - z_index=10 + z_index=10, ) # Move to the correct location kernel_shift_vector = [ @@ -119,10 +121,7 @@ class Convolutional2DToMaxPooling2D(ConnectiveLayer, ThreeDLayer): highlighted_cells.append(cell_rectangle) # Rotate the gridded rectangles so they match the angle # of the conv maps - gridded_rectangle_group = VGroup( - gridded_rectangle, - *highlighted_cells - ) + gridded_rectangle_group = VGroup(gridded_rectangle, *highlighted_cells) gridded_rectangle_group.rotate( ThreeDLayer.rotation_angle, about_point=gridded_rectangle.get_center(), @@ -137,26 +136,20 @@ class Convolutional2DToMaxPooling2D(ConnectiveLayer, ThreeDLayer): create_rectangle = Create( gridded_rectangle, ) - create_gridded_rectangle_animations.append( - create_rectangle - ) + create_gridded_rectangle_animations.append(create_rectangle) # 4. Create and fade out highlighted cells create_group = AnimationGroup( *[Create(highlighted_cell) for highlighted_cell in highlighted_cells], - lag_ratio=1.0 + lag_ratio=1.0, ) uncreate_group = AnimationGroup( *[Uncreate(highlighted_cell) for highlighted_cell in highlighted_cells], - lag_ratio=0.0 + lag_ratio=0.0, ) create_and_remove_cell_animation = Succession( - create_group, - Wait(1.0), - uncreate_group - ) - create_and_remove_cell_animations.append( - create_and_remove_cell_animation + create_group, Wait(1.0), uncreate_group ) + create_and_remove_cell_animations.append(create_and_remove_cell_animation) # 5. Move and resize the gridded rectangle to the output # feature maps. output_gridded_rectangle = GriddedRectangle( @@ -178,9 +171,10 @@ class Convolutional2DToMaxPooling2D(ConnectiveLayer, ThreeDLayer): self.output_layer.feature_maps[feature_map_index].copy() ) transform_rectangle = ReplacementTransform( - gridded_rectangle, output_gridded_rectangle, + gridded_rectangle, + output_gridded_rectangle, introducer=True, - remover=True + remover=True, ) transform_gridded_rectangle_animations.append( transform_rectangle, diff --git a/manim_ml/neural_network/layers/embedding.py b/manim_ml/neural_network/layers/embedding.py index f8fb527..14f4be4 100644 --- a/manim_ml/neural_network/layers/embedding.py +++ b/manim_ml/neural_network/layers/embedding.py @@ -1,5 +1,5 @@ from manim import * -from manim_ml.probability import GaussianDistribution +from manim_ml.utils.mobjects.probability import GaussianDistribution from manim_ml.neural_network.layers.parent_layers import VGroupNeuralNetworkLayer diff --git a/manim_ml/neural_network/layers/feed_forward.py b/manim_ml/neural_network/layers/feed_forward.py index c144d9f..262046a 100644 --- a/manim_ml/neural_network/layers/feed_forward.py +++ b/manim_ml/neural_network/layers/feed_forward.py @@ -1,9 +1,12 @@ from manim import * from manim_ml.neural_network.activation_functions import get_activation_function_by_name -from manim_ml.neural_network.activation_functions.activation_function import ActivationFunction +from manim_ml.neural_network.activation_functions.activation_function import ( + ActivationFunction, +) from manim_ml.neural_network.layers.parent_layers import VGroupNeuralNetworkLayer + class FeedForwardLayer(VGroupNeuralNetworkLayer): """Handles rendering a layer for a neural network""" diff --git a/manim_ml/neural_network/layers/image.py b/manim_ml/neural_network/layers/image.py index 8a7fd6f..2d70e7e 100644 --- a/manim_ml/neural_network/layers/image.py +++ b/manim_ml/neural_network/layers/image.py @@ -1,6 +1,6 @@ from manim import * import numpy as np -from manim_ml.image import GrayscaleImageMobject +from manim_ml.utils.mobjects.image import GrayscaleImageMobject from manim_ml.neural_network.layers.parent_layers import NeuralNetworkLayer from PIL import Image diff --git a/manim_ml/neural_network/layers/image_to_convolutional_2d.py b/manim_ml/neural_network/layers/image_to_convolutional_2d.py index f471b09..6d3b769 100644 --- a/manim_ml/neural_network/layers/image_to_convolutional_2d.py +++ b/manim_ml/neural_network/layers/image_to_convolutional_2d.py @@ -7,7 +7,7 @@ from manim_ml.neural_network.layers.parent_layers import ( ThreeDLayer, VGroupNeuralNetworkLayer, ) -from manim_ml.gridded_rectangle import GriddedRectangle +from manim_ml.utils.mobjects.gridded_rectangle import GriddedRectangle class ImageToConvolutional2DLayer(VGroupNeuralNetworkLayer, ThreeDLayer): diff --git a/manim_ml/neural_network/layers/max_pooling_2d.py b/manim_ml/neural_network/layers/max_pooling_2d.py index 581bf22..2128914 100644 --- a/manim_ml/neural_network/layers/max_pooling_2d.py +++ b/manim_ml/neural_network/layers/max_pooling_2d.py @@ -1,5 +1,5 @@ from manim import * -from manim_ml.gridded_rectangle import GriddedRectangle +from manim_ml.utils.mobjects.gridded_rectangle import GriddedRectangle from manim_ml.neural_network.layers.parent_layers import ( ThreeDLayer, diff --git a/manim_ml/neural_network/layers/max_pooling_2d_to_feed_forward.py b/manim_ml/neural_network/layers/max_pooling_2d_to_feed_forward.py index 1dc00bb..618d122 100644 --- a/manim_ml/neural_network/layers/max_pooling_2d_to_feed_forward.py +++ b/manim_ml/neural_network/layers/max_pooling_2d_to_feed_forward.py @@ -1,8 +1,11 @@ from manim import * -from manim_ml.neural_network.layers.convolutional_2d_to_feed_forward import Convolutional2DToFeedForward +from manim_ml.neural_network.layers.convolutional_2d_to_feed_forward import ( + Convolutional2DToFeedForward, +) from manim_ml.neural_network.layers.feed_forward import FeedForwardLayer from manim_ml.neural_network.layers.max_pooling_2d import MaxPooling2DLayer + class MaxPooling2DToFeedForward(Convolutional2DToFeedForward): """Feed Forward to Embedding Layer""" diff --git a/manim_ml/neural_network/layers/paired_query.py b/manim_ml/neural_network/layers/paired_query.py index abe2309..8bb1963 100644 --- a/manim_ml/neural_network/layers/paired_query.py +++ b/manim_ml/neural_network/layers/paired_query.py @@ -1,6 +1,6 @@ from manim import * from manim_ml.neural_network.layers.parent_layers import NeuralNetworkLayer -from manim_ml.image import GrayscaleImageMobject, LabeledColorImage +from manim_ml.utils.mobjects.image import GrayscaleImageMobject, LabeledColorImage import numpy as np diff --git a/manim_ml/neural_network/layers/triplet.py b/manim_ml/neural_network/layers/triplet.py index 4f7c5e6..12062e6 100644 --- a/manim_ml/neural_network/layers/triplet.py +++ b/manim_ml/neural_network/layers/triplet.py @@ -1,6 +1,6 @@ from manim import * from manim_ml.neural_network.layers import NeuralNetworkLayer -from manim_ml.image import GrayscaleImageMobject, LabeledColorImage +from manim_ml.utils.mobjects.image import GrayscaleImageMobject, LabeledColorImage import numpy as np diff --git a/manim_ml/neural_network/layers/util.py b/manim_ml/neural_network/layers/util.py index 4948352..ef8df2b 100644 --- a/manim_ml/neural_network/layers/util.py +++ b/manim_ml/neural_network/layers/util.py @@ -4,6 +4,7 @@ from manim import * from manim_ml.neural_network.layers.parent_layers import BlankConnective, ThreeDLayer from manim_ml.neural_network.layers import connective_layers_list + def get_connective_layer(input_layer, output_layer): """ Deduces the relevant connective layer @@ -12,8 +13,9 @@ def get_connective_layer(input_layer, output_layer): for candidate_class in connective_layers_list: input_class = candidate_class.input_class output_class = candidate_class.output_class - if isinstance(input_layer, input_class) and \ - isinstance(output_layer, output_class): + if isinstance(input_layer, input_class) and isinstance( + output_layer, output_class + ): connective_layer_class = candidate_class break diff --git a/manim_ml/neural_network/neural_network.py b/manim_ml/neural_network/neural_network.py index 2843c26..895f60a 100644 --- a/manim_ml/neural_network/neural_network.py +++ b/manim_ml/neural_network/neural_network.py @@ -17,12 +17,13 @@ from manim import * from manim_ml.neural_network.layers.parent_layers import ConnectiveLayer, ThreeDLayer from manim_ml.neural_network.layers.util import get_connective_layer -from manim_ml.list_group import ListGroup -from manim_ml.neural_network.neural_network_transformations import ( +from manim_ml.utils.mobjects.list_group import ListGroup +from manim_ml.neural_network.animations.neural_network_transformations import ( InsertLayer, RemoveLayer, ) + class NeuralNetwork(Group): """Neural Network Visualization Container Class""" @@ -59,10 +60,7 @@ class NeuralNetwork(Group): # Make the connective layers self.connective_layers, self.all_layers = self._construct_connective_layers() # Make overhead title - self.title = Text( - self.title_text, - font_size=DEFAULT_FONT_SIZE / 2 - ) + self.title = Text(self.title_text, font_size=DEFAULT_FONT_SIZE / 2) self.title.next_to(self, UP, 1.0) self.add(self.title) # Place layers at correct z index @@ -92,11 +90,11 @@ class NeuralNetwork(Group): raise Exception(f"Uncrecognized input layers type: {type(input_layers)}") def add_connection( - self, - start_layer_name, - end_layer_name, + self, + start_layer_name, + end_layer_name, connection_style="default", - connection_position="bottom" + connection_position="bottom", ): """Add connection from start layer to end layer""" assert connection_style in ["default"] @@ -106,7 +104,7 @@ class NeuralNetwork(Group): connection = NetworkConnection( self.input_layers_dict[start_layer_name], self.input_layers_dict[end_layer_name], - arc_direction="down" # TODO generalize this more + arc_direction="down", # TODO generalize this more ) self.connections.append(connection) self.add(connection) @@ -138,20 +136,20 @@ class NeuralNetwork(Group): current_layer.move_to(previous_layer.get_center()) if layout_direction == "left_to_right": - x_shift = previous_layer.get_width() / 2 \ - + current_layer.get_width() / 2 \ - + self.layer_spacing + x_shift = ( + previous_layer.get_width() / 2 + + current_layer.get_width() / 2 + + self.layer_spacing + ) shift_vector = np.array([x_shift, 0, 0]) elif layout_direction == "top_to_bottom": - y_shift = -(( - previous_layer.get_width() / 2 \ - + current_layer.get_width() / 2 - ) + self.layer_spacing) + y_shift = -( + (previous_layer.get_width() / 2 + current_layer.get_width() / 2) + + self.layer_spacing + ) shift_vector = np.array([0, y_shift, 0]) else: - raise Exception( - f"Unrecognized layout direction: {layout_direction}" - ) + raise Exception(f"Unrecognized layout direction: {layout_direction}") current_layer.shift(shift_vector) # After all layers have been placed place their activation functions @@ -159,13 +157,15 @@ class NeuralNetwork(Group): # Place activation function if hasattr(current_layer, "activation_function"): if not current_layer.activation_function is None: - up_movement = np.array([ - 0, - current_layer.get_height() / 2 - + current_layer.activation_function.get_height() / 2 - + 0.5 * self.layer_spacing, - 0, - ]) + up_movement = np.array( + [ + 0, + current_layer.get_height() / 2 + + current_layer.activation_function.get_height() / 2 + + 0.5 * self.layer_spacing, + 0, + ] + ) current_layer.activation_function.move_to( current_layer, ) @@ -259,9 +259,7 @@ class NeuralNetwork(Group): current_layer_args = layer_args[layer] # Perform the forward pass of the current layer layer_forward_pass = layer.make_forward_pass_animation( - layer_args=current_layer_args, - run_time=per_layer_runtime, - **kwargs + layer_args=current_layer_args, run_time=per_layer_runtime, **kwargs ) # Animate a forward pass for incoming connections connection_input_pass = AnimationGroup() @@ -272,14 +270,12 @@ class NeuralNetwork(Group): connection_input_pass = ShowPassingFlash( connection, run_time=layer_forward_pass.run_time, - time_width=0.2 + time_width=0.2, ) break layer_forward_pass = AnimationGroup( - layer_forward_pass, - connection_input_pass, - lag_ratio=0.0 + layer_forward_pass, connection_input_pass, lag_ratio=0.0 ) all_animations.append(layer_forward_pass) # Make the animation group diff --git a/manim_ml/one_to_one_sync.py b/manim_ml/one_to_one_sync.py deleted file mode 100644 index 4da14b4..0000000 --- a/manim_ml/one_to_one_sync.py +++ /dev/null @@ -1,11 +0,0 @@ -""" - Module for handling syncing two animations one to one. - The goal here is to zip up two classes and their respective animations, - and create a joint class with the same animations that runs the animations - for both classes at the same time. This way we can connect two isomorphic - views of the same concept and visualize them at the same time. -""" - - -class OneToOneSync: - pass diff --git a/manim_ml/utils/mobjects/connections.py b/manim_ml/utils/mobjects/connections.py index 89b15f9..aead4a9 100644 --- a/manim_ml/utils/mobjects/connections.py +++ b/manim_ml/utils/mobjects/connections.py @@ -1,30 +1,27 @@ import numpy as np from manim import * + class NetworkConnection(VGroup): """ - This class allows for creating connections - between locations in a network + This class allows for creating connections + between locations in a network """ - direction_vector_map = { - "up": UP, - "down": DOWN, - "left": LEFT, - "right": RIGHT - } + + direction_vector_map = {"up": UP, "down": DOWN, "left": LEFT, "right": RIGHT} def __init__( - self, - start_mobject, - end_mobject, + self, + start_mobject, + end_mobject, arc_direction="straight", buffer=0.05, arc_distance=0.3, stroke_width=2.0, color=WHITE, - active_color=ORANGE + active_color=ORANGE, ): - """Creates an arrow with right angles in it connecting + """Creates an arrow with right angles in it connecting two mobjects. Parameters @@ -72,34 +69,28 @@ class NetworkConnection(VGroup): # Make an arrow arrow_line = Line( left_mobject.get_right() + np.array([self.buffer, 0.0, 0.0]), - right_mobject.get_left() + np.array([-1 * self.buffer, 0.0, 0.0]) - ) - arrow = Arrow( - arrow_line, - color=self.color, - stroke_width=self.stroke_width + right_mobject.get_left() + np.array([-1 * self.buffer, 0.0, 0.0]), ) + arrow = Arrow(arrow_line, color=self.color, stroke_width=self.stroke_width) self.straight_arrow = arrow self.add(arrow) else: # Figure out the direction of the arc - direction_vector = NetworkConnection.direction_vector_map[self.arc_direction] + direction_vector = NetworkConnection.direction_vector_map[ + self.arc_direction + ] # Make the start arc piece - start_line_start = left_mobject.get_critical_point( - direction_vector - ) + start_line_start = left_mobject.get_critical_point(direction_vector) start_line_start += direction_vector * self.buffer start_line_end = start_line_start + direction_vector * self.arc_distance self.start_line = Line( start_line_start, start_line_end, color=self.color, - stroke_width=self.stroke_width + stroke_width=self.stroke_width, ) # Make the end arc piece with an arrow - end_line_end = right_mobject.get_critical_point( - direction_vector - ) + end_line_end = right_mobject.get_critical_point(direction_vector) end_line_end += direction_vector * self.buffer end_line_start = end_line_end + direction_vector * self.arc_distance self.end_arrow = Arrow( @@ -108,14 +99,14 @@ class NetworkConnection(VGroup): color=WHITE, fill_color=WHITE, stroke_opacity=1.0, - buff=0.0 + buff=0.0, ) # Make the middle arc piece self.middle_line = Line( start_line_end, end_line_start, color=self.color, - stroke_width=self.stroke_width + stroke_width=self.stroke_width, ) # Add the mobjects self.add( @@ -130,23 +121,23 @@ class NetworkConnection(VGroup): if self.arc_direction == "straight": return ShowPassingFlash( self.straight_arrow.copy().set_color(self.active_color), - time_width=time_width + time_width=time_width, ) else: # Animate the start line start_line_animation = ShowPassingFlash( self.start_line.copy().set_color(self.active_color), - time_width=time_width + time_width=time_width, ) # Animate the middle line middle_line_animation = ShowPassingFlash( self.middle_line.copy().set_color(self.active_color), - time_width=time_width + time_width=time_width, ) # Animate the end line end_line_animation = ShowPassingFlash( self.end_arrow.copy().set_color(self.active_color), - time_width=time_width + time_width=time_width, ) return AnimationGroup( @@ -154,5 +145,5 @@ class NetworkConnection(VGroup): middle_line_animation, end_line_animation, lag_ratio=1.0, - run_time=run_time - ) \ No newline at end of file + run_time=run_time, + ) diff --git a/manim_ml/gridded_rectangle.py b/manim_ml/utils/mobjects/gridded_rectangle.py similarity index 96% rename from manim_ml/gridded_rectangle.py rename to manim_ml/utils/mobjects/gridded_rectangle.py index d0d80ab..e883365 100644 --- a/manim_ml/gridded_rectangle.py +++ b/manim_ml/utils/mobjects/gridded_rectangle.py @@ -14,7 +14,7 @@ class GriddedRectangle(VGroup): close_new_points=True, grid_xstep=None, grid_ystep=None, - grid_stroke_width=0.0, # DEFAULT_STROKE_WIDTH/2, + grid_stroke_width=0.0, # DEFAULT_STROKE_WIDTH/2, grid_stroke_color=ORANGE, grid_stroke_opacity=1.0, stroke_width=2.0, @@ -48,7 +48,7 @@ class GriddedRectangle(VGroup): fill_color=color, stroke_opacity=0.0, fill_opacity=fill_opacity, - shade_in_3d=True + shade_in_3d=True, ) self.rectangle = no_border_rectangle border_rectangle = Rectangle( @@ -58,7 +58,7 @@ class GriddedRectangle(VGroup): fill_color=color, fill_opacity=fill_opacity, shade_in_3d=True, - stroke_width=stroke_width + stroke_width=stroke_width, ) self.dotted_lines = DashedVMobject( border_rectangle, @@ -73,7 +73,7 @@ class GriddedRectangle(VGroup): stroke_width=stroke_width, fill_color=color, fill_opacity=fill_opacity, - shade_in_3d=True + shade_in_3d=True, ) self.add(self.rectangle) # Make grid lines @@ -123,7 +123,7 @@ class GriddedRectangle(VGroup): stroke_color=self.grid_stroke_color, stroke_width=self.grid_stroke_width, stroke_opacity=self.grid_stroke_opacity, - shade_in_3d=True + shade_in_3d=True, ) for i in range(1, count) ) diff --git a/manim_ml/image.py b/manim_ml/utils/mobjects/image.py similarity index 100% rename from manim_ml/image.py rename to manim_ml/utils/mobjects/image.py diff --git a/manim_ml/list_group.py b/manim_ml/utils/mobjects/list_group.py similarity index 100% rename from manim_ml/list_group.py rename to manim_ml/utils/mobjects/list_group.py diff --git a/manim_ml/probability.py b/manim_ml/utils/mobjects/probability.py similarity index 100% rename from manim_ml/probability.py rename to manim_ml/utils/mobjects/probability.py diff --git a/manim_ml/utils/testing/frames_comparison.py b/manim_ml/utils/testing/frames_comparison.py index 5236c2f..2cca206 100644 --- a/manim_ml/utils/testing/frames_comparison.py +++ b/manim_ml/utils/testing/frames_comparison.py @@ -27,6 +27,7 @@ _tests_root_dir_path = Path(__file__).absolute().parents[2] print(f"Tests root path: {_tests_root_dir_path}") PATH_CONTROL_DATA = _tests_root_dir_path / Path("control_data", "graphical_units_data") + def frames_comparison( func=None, *, @@ -254,4 +255,4 @@ def _config_test(last_frame: bool) -> ManimConfig: else "config_graphical_tests_multiframes.cfg" ), ), - ) \ No newline at end of file + ) diff --git a/setup.py b/setup.py index 0841d02..e42ac64 100644 --- a/setup.py +++ b/setup.py @@ -2,7 +2,7 @@ from setuptools import setup, find_packages setup( name="manim_ml", - version="0.0.15", + version="0.0.16", description=(" Machine Learning Animations in python using Manim."), packages=find_packages(), ) diff --git a/tests/conftest.py b/tests/conftest.py index a01d194..b228be2 100644 --- a/tests/conftest.py +++ b/tests/conftest.py @@ -1,5 +1,3 @@ - - from __future__ import annotations import os @@ -9,6 +7,7 @@ import pytest from manim import config, tempconfig + def pytest_addoption(parser): parser.addoption( "--skip_slow", diff --git a/tests/test_activation_function.py b/tests/test_activation_function.py index 92b132f..f391048 100644 --- a/tests/test_activation_function.py +++ b/tests/test_activation_function.py @@ -13,15 +13,19 @@ config.pixel_width = 1900 config.frame_height = 6.0 config.frame_width = 6.0 + class CombinedScene(ThreeDScene): def construct(self): image = Image.open("../assets/mnist/digit.jpeg") numpy_image = np.asarray(image) # Make nn - nn = NeuralNetwork([ + nn = NeuralNetwork( + [ ImageLayer(numpy_image, height=1.5), Convolutional2DLayer(1, 7, filter_spacing=0.32), - Convolutional2DLayer(3, 5, 3, filter_spacing=0.32, activation_function="ReLU"), + Convolutional2DLayer( + 3, 5, 3, filter_spacing=0.32, activation_function="ReLU" + ), FeedForwardLayer(3, activation_function="Sigmoid"), ], layer_spacing=0.25, @@ -32,4 +36,4 @@ class CombinedScene(ThreeDScene): # Play animation forward_pass = nn.make_forward_pass_animation() self.wait(1) - self.play(forward_pass, run_time=30) \ No newline at end of file + self.play(forward_pass, run_time=30) diff --git a/tests/test_camera_move.py b/tests/test_camera_move.py index 16d3e87..77b224b 100644 --- a/tests/test_camera_move.py +++ b/tests/test_camera_move.py @@ -12,14 +12,11 @@ config.pixel_width = 1900 config.frame_height = 6.0 config.frame_width = 6.0 + class NeuralNetworkScene(ThreeDScene): """Test Scene for the Neural Network""" - def play_camera_follow_forward_pass( - self, - neural_network, - buffer=0.1 - ): + def play_camera_follow_forward_pass(self, neural_network, buffer=0.1): per_layer_animations = neural_network.make_forward_pass_animation( return_per_layer_animations=True ) @@ -32,7 +29,7 @@ class NeuralNetworkScene(ThreeDScene): current_layer = all_layers[layer_index] next_layer = all_layers[layer_index + 1] group = Group(prev_layer, current_layer, next_layer) - + max_width = max(max_width, group.width) max_height = max(max_height, group.height) @@ -46,7 +43,8 @@ class NeuralNetworkScene(ThreeDScene): # Make the Layer object image = Image.open("../assets/mnist/digit.jpeg") numpy_image = np.asarray(image) - nn = NeuralNetwork([ + nn = NeuralNetwork( + [ ImageLayer(numpy_image, height=1.5), Convolutional2DLayer(1, 7, filter_spacing=0.32), Convolutional2DLayer(3, 5, 3, filter_spacing=0.32), @@ -60,4 +58,4 @@ class NeuralNetworkScene(ThreeDScene): # Make Animation self.add(nn) # self.play(Create(nn)) - self.play_camera_follow_forward_pass(nn) \ No newline at end of file + self.play_camera_follow_forward_pass(nn) diff --git a/tests/test_conv_padding.py b/tests/test_conv_padding.py index 876f566..0f871c4 100644 --- a/tests/test_conv_padding.py +++ b/tests/test_conv_padding.py @@ -14,23 +14,24 @@ config.pixel_width = 1900 config.frame_height = 6.0 config.frame_width = 6.0 -class CombinedScene(ThreeDScene): +class CombinedScene(ThreeDScene): def construct(self): # Make nn - nn = NeuralNetwork([ + nn = NeuralNetwork( + [ Convolutional2DLayer( - num_feature_maps=1, - feature_map_size=7, + num_feature_maps=1, + feature_map_size=7, padding=1, - padding_dashed=True + padding_dashed=True, ), Convolutional2DLayer( - num_feature_maps=3, - feature_map_size=7, + num_feature_maps=3, + feature_map_size=7, filter_size=3, padding=0, - padding_dashed=False + padding_dashed=False, ), FeedForwardLayer(3), ], @@ -44,23 +45,22 @@ class CombinedScene(ThreeDScene): self.wait(1) self.play(forward_pass, run_time=30) + @frames_comparison def test_ConvPadding(scene): # Make nn - nn = NeuralNetwork([ + nn = NeuralNetwork( + [ Convolutional2DLayer( - num_feature_maps=1, - feature_map_size=7, - padding=1, - padding_dashed=True + num_feature_maps=1, feature_map_size=7, padding=1, padding_dashed=True ), Convolutional2DLayer( - num_feature_maps=3, - feature_map_size=7, + num_feature_maps=3, + feature_map_size=7, filter_size=3, padding=1, filter_spacing=0.35, - padding_dashed=False + padding_dashed=False, ), FeedForwardLayer(3), ], @@ -71,4 +71,4 @@ def test_ConvPadding(scene): scene.add(nn) # Play animation forward_pass = nn.make_forward_pass_animation() - scene.play(forward_pass, run_time=30) \ No newline at end of file + scene.play(forward_pass, run_time=30) diff --git a/tests/test_convolutional_2d_layer.py b/tests/test_convolutional_2d_layer.py index bdbebd1..5887552 100644 --- a/tests/test_convolutional_2d_layer.py +++ b/tests/test_convolutional_2d_layer.py @@ -6,6 +6,7 @@ from manim_ml.neural_network.layers.feed_forward import FeedForwardLayer from manim_ml.neural_network.layers.image import ImageLayer from manim_ml.neural_network.neural_network import NeuralNetwork + class SingleConvolutionalLayerScene(ThreeDScene): def construct(self): # Make nn @@ -21,6 +22,7 @@ class SingleConvolutionalLayerScene(ThreeDScene): ) # self.play(nn.make_forward_pass_animation(run_time=5)) + class Simple3DConvScene(ThreeDScene): def construct(self): """ @@ -33,16 +35,8 @@ class Simple3DConvScene(ThreeDScene): """ # Make nn layers = [ - Convolutional2DLayer( - num_feature_maps=1, - feature_map_size=3, - filter_size=3 - ), - Convolutional2DLayer( - num_feature_maps=1, - feature_map_size=3, - filter_size=3 - ), + Convolutional2DLayer(num_feature_maps=1, feature_map_size=3, filter_size=3), + Convolutional2DLayer(num_feature_maps=1, feature_map_size=3, filter_size=3), ] nn = NeuralNetwork(layers) # Center the nn @@ -52,18 +46,21 @@ class Simple3DConvScene(ThreeDScene): # self.set_camera_orientation(phi=280*DEGREES, theta=-10*DEGREES, gamma=90*DEGREES) self.play(nn.make_forward_pass_animation(run_time=30)) + # Make the specific scene config.pixel_height = 1200 config.pixel_width = 1900 config.frame_height = 6.0 config.frame_width = 6.0 + class CombinedScene(ThreeDScene): def construct(self): image = Image.open("../assets/mnist/digit.jpeg") numpy_image = np.asarray(image) # Make nn - nn = NeuralNetwork([ + nn = NeuralNetwork( + [ ImageLayer(numpy_image, height=1.5), Convolutional2DLayer(1, 7, filter_spacing=0.32), Convolutional2DLayer(3, 5, 3, filter_spacing=0.32), @@ -79,4 +76,4 @@ class CombinedScene(ThreeDScene): # Play animation forward_pass = nn.make_forward_pass_animation() self.wait(1) - self.play(forward_pass) \ No newline at end of file + self.play(forward_pass) diff --git a/tests/test_decision_tree.py b/tests/test_decision_tree.py index 1cc9601..290fbda 100644 --- a/tests/test_decision_tree.py +++ b/tests/test_decision_tree.py @@ -9,6 +9,7 @@ from sklearn import datasets import sklearn import matplotlib.pyplot as plt + def learn_iris_decision_tree(iris): decision_tree = DecisionTreeClassifier( random_state=1, max_depth=3, max_leaf_nodes=6 @@ -17,11 +18,13 @@ def learn_iris_decision_tree(iris): # output the decisioin tree in some format return decision_tree + def make_sklearn_tree(dataset, max_tree_depth=3): tree = learn_iris_decision_tree(dataset) feature_names = dataset.feature_names[0:2] return tree, tree.tree_ + class DecisionTreeScene(Scene): def construct(self): """Makes a decision tree object""" @@ -44,6 +47,7 @@ class DecisionTreeScene(Scene): self.play(create_decision_tree) # self.play(create_decision_tree) + class SurfacePlot(Scene): def construct(self): iris_dataset = datasets.load_iris() diff --git a/tests/test_feed_forward.py b/tests/test_feed_forward.py index 29dd24c..0bce123 100644 --- a/tests/test_feed_forward.py +++ b/tests/test_feed_forward.py @@ -5,26 +5,21 @@ from manim_ml.neural_network import NeuralNetwork, FeedForwardLayer __module_test__ = "feed_forward" + @frames_comparison def test_FeedForwardScene(scene): """Tests the appearance of a feed forward network""" - nn = NeuralNetwork([ - FeedForwardLayer(3), - FeedForwardLayer(5), - FeedForwardLayer(3) - ]) + nn = NeuralNetwork([FeedForwardLayer(3), FeedForwardLayer(5), FeedForwardLayer(3)]) scene.add(nn) -class FeedForwardScene(Scene): +class FeedForwardScene(Scene): def construct(self): - nn = NeuralNetwork([ - FeedForwardLayer(3), - FeedForwardLayer(5), - FeedForwardLayer(3) - ]) + nn = NeuralNetwork( + [FeedForwardLayer(3), FeedForwardLayer(5), FeedForwardLayer(3)] + ) self.add(nn) - self.play(nn.make_forward_pass_animation()) \ No newline at end of file + self.play(nn.make_forward_pass_animation()) diff --git a/tests/test_gridded_rectangle.py b/tests/test_gridded_rectangle.py index 9978722..b0cb07b 100644 --- a/tests/test_gridded_rectangle.py +++ b/tests/test_gridded_rectangle.py @@ -1,12 +1,8 @@ from manim import * -from manim_ml.gridded_rectangle import GriddedRectangle +from manim_ml.utils.mobjects.gridded_rectangle import GriddedRectangle + class TestGriddedRectangleScene(ThreeDScene): - def construct(self): - rect = GriddedRectangle( - color=ORANGE, - width=3, - height=3 - ) - self.add(rect) \ No newline at end of file + rect = GriddedRectangle(color=ORANGE, width=3, height=3) + self.add(rect) diff --git a/tests/test_image_homotopy.py b/tests/test_image_homotopy.py index ca223ea..8e6d7f2 100644 --- a/tests/test_image_homotopy.py +++ b/tests/test_image_homotopy.py @@ -1,7 +1,7 @@ from PIL import Image from manim import * -from manim_ml.image import GrayscaleImageMobject +from manim_ml.utils.mobjects.image import GrayscaleImageMobject from manim_ml.neural_network.layers.parent_layers import ThreeDLayer diff --git a/tests/test_max_pool.py b/tests/test_max_pool.py index f39a431..61502cc 100644 --- a/tests/test_max_pool.py +++ b/tests/test_max_pool.py @@ -14,12 +14,14 @@ config.pixel_width = 1900 config.frame_height = 6.0 config.frame_width = 6.0 + class CombinedScene(ThreeDScene): def construct(self): image = Image.open("../assets/mnist/digit.jpeg") numpy_image = np.asarray(image) # Make nn - nn = NeuralNetwork([ + nn = NeuralNetwork( + [ ImageLayer(numpy_image, height=1.5), Convolutional2DLayer(1, 8, filter_spacing=0.32), Convolutional2DLayer(3, 6, 3, filter_spacing=0.32), @@ -37,12 +39,14 @@ class CombinedScene(ThreeDScene): self.wait(1) self.play(forward_pass) + class SmallNetwork(ThreeDScene): def construct(self): image = Image.open("../assets/mnist/digit.jpeg") numpy_image = np.asarray(image) # Make nn - nn = NeuralNetwork([ + nn = NeuralNetwork( + [ ImageLayer(numpy_image, height=1.5), Convolutional2DLayer(1, 8, filter_spacing=0.32), MaxPooling2DLayer(kernel_size=2), @@ -55,4 +59,4 @@ class SmallNetwork(ThreeDScene): # Play animation forward_pass = nn.make_forward_pass_animation() self.wait(1) - self.play(forward_pass) \ No newline at end of file + self.play(forward_pass) diff --git a/tests/test_mcmc.py b/tests/test_mcmc.py index 2e4a332..745c42d 100644 --- a/tests/test_mcmc.py +++ b/tests/test_mcmc.py @@ -1,31 +1,33 @@ from manim import * -from manim_ml.diffusion.mcmc import MCMCAxes, MultidimensionalGaussianPosterior, metropolis_hastings_sampler +from manim_ml.diffusion.mcmc import ( + MCMCAxes, + MultidimensionalGaussianPosterior, + metropolis_hastings_sampler, +) + # Make the specific scene config.pixel_height = 1200 config.pixel_width = 1200 config.frame_height = 12.0 config.frame_width = 12.0 + def test_metropolis_hastings_sampler(iterations=100): samples, _, candidates = metropolis_hastings_sampler(iterations=iterations) assert samples.shape == (iterations, 2) -class MCMCTest(Scene): +class MCMCTest(Scene): def construct(self): axes = MCMCAxes() self.play(Create(axes)) gaussian_posterior = MultidimensionalGaussianPosterior( - mu=np.array([0.0, 0.0]), - var=np.array([4.0, 2.0]) - ) - show_gaussian_animation = axes.show_ground_truth_gaussian( - gaussian_posterior + mu=np.array([0.0, 0.0]), var=np.array([4.0, 2.0]) ) + show_gaussian_animation = axes.show_ground_truth_gaussian(gaussian_posterior) self.play(show_gaussian_animation) chain_sampling_animation = axes.visualize_metropolis_hastings_chain_sampling( - log_prob_fn=gaussian_posterior, - sampling_kwargs={"iterations": 1000} + log_prob_fn=gaussian_posterior, sampling_kwargs={"iterations": 1000} ) self.play(chain_sampling_animation) diff --git a/tests/test_nested_neural_networks.py b/tests/test_nested_neural_networks.py index 51a8eff..57842c8 100644 --- a/tests/test_nested_neural_networks.py +++ b/tests/test_nested_neural_networks.py @@ -1,4 +1,4 @@ """ The purpose of this test is to ensure that it is possible to have nested neural network layers. -""" \ No newline at end of file +""" diff --git a/tests/test_residual_connection.py b/tests/test_residual_connection.py index b64100e..f78db77 100644 --- a/tests/test_residual_connection.py +++ b/tests/test_residual_connection.py @@ -9,49 +9,53 @@ import numpy as np __module_test__ = "residual" + @frames_comparison def test_ResidualConnectionScene(scene): """Tests the appearance of a residual connection""" - nn = NeuralNetwork({ - "layer1": FeedForwardLayer(3), - "layer2": FeedForwardLayer(5), - "layer3": FeedForwardLayer(3) - }) + nn = NeuralNetwork( + { + "layer1": FeedForwardLayer(3), + "layer2": FeedForwardLayer(5), + "layer3": FeedForwardLayer(3), + } + ) scene.add(nn) + # Make the specific scene config.pixel_height = 1200 config.pixel_width = 1900 config.frame_height = 6.0 config.frame_width = 6.0 -class FeedForwardScene(Scene): +class FeedForwardScene(Scene): def construct(self): - nn = NeuralNetwork({ - "layer1": FeedForwardLayer(4), - "layer2": FeedForwardLayer(4), - "layer3": FeedForwardLayer(4) - }, - layer_spacing=0.45) + nn = NeuralNetwork( + { + "layer1": FeedForwardLayer(4), + "layer2": FeedForwardLayer(4), + "layer3": FeedForwardLayer(4), + }, + layer_spacing=0.45, + ) nn.add_connection("layer1", "layer3") self.add(nn) - self.play( - nn.make_forward_pass_animation(), - run_time=8 - ) + self.play(nn.make_forward_pass_animation(), run_time=8) + class ConvScene(ThreeDScene): - def construct(self): image = Image.open("../assets/mnist/digit.jpeg") numpy_image = np.asarray(image) - nn = NeuralNetwork({ + nn = NeuralNetwork( + { "layer1": Convolutional2DLayer(1, 5, padding=1), "layer2": Convolutional2DLayer(1, 5, 3, padding=1), "layer3": Convolutional2DLayer(1, 5, 3, padding=1), @@ -63,7 +67,4 @@ class ConvScene(ThreeDScene): self.add(nn) - self.play( - nn.make_forward_pass_animation(), - run_time=8 - ) \ No newline at end of file + self.play(nn.make_forward_pass_animation(), run_time=8) diff --git a/tests/test_show_gaussian.py b/tests/test_show_gaussian.py index d838aea..7b35aa9 100644 --- a/tests/test_show_gaussian.py +++ b/tests/test_show_gaussian.py @@ -1,14 +1,12 @@ from manim import * -from manim_ml.probability import GaussianDistribution +from manim_ml.utils.mobjects.probability import GaussianDistribution + class TestShowGaussian(Scene): - def construct(self): axes = Axes() self.add(axes) gaussian = GaussianDistribution( - axes, - mean=np.array([0.0, 0.0]), - cov=np.array([[2.0, 0.0], [0.0, 1.0]]) + axes, mean=np.array([0.0, 0.0]), cov=np.array([[2.0, 0.0], [0.0, 1.0]]) ) self.add(gaussian) diff --git a/tests/test_succession.py b/tests/test_succession.py index bc756e0..6e41bea 100644 --- a/tests/test_succession.py +++ b/tests/test_succession.py @@ -1,7 +1,7 @@ from manim import * -class TestSuccession(Scene): +class TestSuccession(Scene): def construct(self): white_dot = Dot(color=WHITE) white_dot.shift(UP) @@ -16,4 +16,4 @@ class TestSuccession(Scene): Wait(1), Uncreate(red_dot), ) - ) \ No newline at end of file + )