diff --git a/Readme.md b/Readme.md index fa7590e..5f5bacb 100644 --- a/Readme.md +++ b/Readme.md @@ -30,27 +30,27 @@ Checkout the ```examples``` directory for some example videos with source code. ### Convolutional Neural Network -This is a visualization of a Convolutional Neural Network. +This is a visualization of a Convolutional Neural Network. The code needed to generate this visualization is shown below. - +https://user-images.githubusercontent.com/14181830/214898495-ff40c679-3f79-4954-b6fc-13992a5024cb.mp4 ```python -from manim import * -from PIL import Image +from manim import * from manim_ml.neural_network.layers.convolutional_2d import Convolutional2DLayer from manim_ml.neural_network.layers.feed_forward import FeedForwardLayer -from manim_ml.neural_network.layers.image import ImageLayer from manim_ml.neural_network.neural_network import NeuralNetwork -class ConvolutinoalNetworkScene(Scene): +# Make the specific scene +config.pixel_height = 700 +config.pixel_width = 1900 +config.frame_height = 7.0 +config.frame_width = 7.0 +class CombinedScene(ThreeDScene): def construct(self): - image = Image.open(ROOT_DIR / "assets/mnist/digit.jpeg") - numpy_image = np.asarray(image) # Make nn nn = NeuralNetwork([ - ImageLayer(numpy_image, height=1.5), Convolutional2DLayer(1, 7, 3, filter_spacing=0.32), Convolutional2DLayer(3, 5, 3, filter_spacing=0.32), Convolutional2DLayer(5, 3, 3, filter_spacing=0.18), @@ -62,6 +62,18 @@ class ConvolutinoalNetworkScene(Scene): # Center the nn nn.move_to(ORIGIN) self.add(nn) - self.play(neural_network.make_forward_pass_animation()) + # Play animation + forward_pass = nn.make_forward_pass_animation() + self.play(forward_pass) ``` +You can generate the above video by copying the above code into a file called `example.py` and running the following in your command line (assuming everything is installed properly): + +``` + manim -pql example.py +``` +The above generates a low resolution rendering, you can improve the resolution (at the cost of slowing down rendering speed) by running: + +``` + manim -pqh example.py +``` diff --git a/assets/ReadmeVideo.mp4 b/assets/ReadmeVideo.mp4 new file mode 100644 index 0000000..c76c4ef Binary files /dev/null and b/assets/ReadmeVideo.mp4 differ diff --git a/examples/readme_example/example.py b/examples/readme_example/example.py new file mode 100644 index 0000000..cf5e232 --- /dev/null +++ b/examples/readme_example/example.py @@ -0,0 +1,30 @@ +from manim import * + +from manim_ml.neural_network.layers.convolutional_2d import Convolutional2DLayer +from manim_ml.neural_network.layers.feed_forward import FeedForwardLayer +from manim_ml.neural_network.neural_network import NeuralNetwork + +# Make the specific scene +config.pixel_height = 700 +config.pixel_width = 1900 +config.frame_height = 7.0 +config.frame_width = 7.0 + +class CombinedScene(ThreeDScene): + def construct(self): + # Make nn + nn = NeuralNetwork([ + Convolutional2DLayer(1, 7, 3, filter_spacing=0.32), + Convolutional2DLayer(3, 5, 3, filter_spacing=0.32), + Convolutional2DLayer(5, 3, 3, filter_spacing=0.18), + FeedForwardLayer(3), + FeedForwardLayer(3), + ], + layer_spacing=0.25, + ) + # Center the nn + nn.move_to(ORIGIN) + self.add(nn) + # Play animation + forward_pass = nn.make_forward_pass_animation() + self.play(forward_pass) diff --git a/examples/readme_example/old_example.py b/examples/readme_example/old_example.py new file mode 100644 index 0000000..bfdaa0d --- /dev/null +++ b/examples/readme_example/old_example.py @@ -0,0 +1,25 @@ +from manim import * +from PIL import Image + +from manim_ml.neural_network.layers.convolutional_2d import Convolutional2DLayer +from manim_ml.neural_network.layers.feed_forward import FeedForwardLayer +from manim_ml.neural_network.layers.image import ImageLayer +from manim_ml.neural_network.neural_network import NeuralNetwork + +class ConvolutionalNetworkScene(Scene): + + def construct(self): + # Make nn + nn = NeuralNetwork([ + Convolutional2DLayer(1, 7, 3, filter_spacing=0.32), + Convolutional2DLayer(3, 5, 3, filter_spacing=0.32), + Convolutional2DLayer(5, 3, 3, filter_spacing=0.18), + FeedForwardLayer(3), + FeedForwardLayer(3), + ], + layer_spacing=0.25, + ) + # Center the nn + nn.move_to(ORIGIN) + self.add(nn) + self.play(nn.make_forward_pass_animation()) \ No newline at end of file