mirror of
https://github.com/helblazer811/ManimML.git
synced 2025-05-17 18:55:54 +08:00
Refactored parameters for Convolutional2D to make them clearner looking.
This commit is contained in:
@ -21,9 +21,9 @@ def make_code_snippet():
|
||||
# Make nn
|
||||
nn = NeuralNetwork([
|
||||
ImageLayer(numpy_image, height=1.5),
|
||||
Convolutional2DLayer(1, 7, 7, 3, 3),
|
||||
Convolutional2DLayer(3, 5, 5, 3, 3),
|
||||
Convolutional2DLayer(5, 3, 3, 1, 1),
|
||||
Convolutional2DLayer(1, 7, 3),
|
||||
Convolutional2DLayer(3, 5, 3),
|
||||
Convolutional2DLayer(5, 3, 1),
|
||||
FeedForwardLayer(3),
|
||||
FeedForwardLayer(3),
|
||||
])
|
||||
@ -54,9 +54,9 @@ class CombinedScene(ThreeDScene):
|
||||
nn = NeuralNetwork(
|
||||
[
|
||||
ImageLayer(numpy_image, height=1.5),
|
||||
Convolutional2DLayer(1, 7, 7, 3, 3, filter_spacing=0.32),
|
||||
Convolutional2DLayer(3, 5, 5, 3, 3, filter_spacing=0.32),
|
||||
Convolutional2DLayer(5, 3, 3, 1, 1, filter_spacing=0.18),
|
||||
Convolutional2DLayer(1, 7, 3, filter_spacing=0.32),
|
||||
Convolutional2DLayer(3, 5, 3, filter_spacing=0.32),
|
||||
Convolutional2DLayer(5, 3, 1, filter_spacing=0.18),
|
||||
FeedForwardLayer(3),
|
||||
FeedForwardLayer(3),
|
||||
],
|
||||
|
@ -52,8 +52,8 @@ class CombinedScene(ThreeDScene):
|
||||
nn = NeuralNetwork(
|
||||
[
|
||||
ImageLayer(numpy_image, height=1.5),
|
||||
Convolutional2DLayer(1, 5, 5, 1, 1, filter_spacing=0.32),
|
||||
Convolutional2DLayer(4, 5, 5, 1, 1, filter_spacing=0.32),
|
||||
Convolutional2DLayer(1, 5, 1, filter_spacing=0.32),
|
||||
Convolutional2DLayer(4, 5, 1, filter_spacing=0.32),
|
||||
Convolutional2DLayer(2, 5, 5, filter_spacing=0.32),
|
||||
],
|
||||
layer_spacing=0.4,
|
||||
|
@ -1,3 +1,4 @@
|
||||
from typing import Union
|
||||
import numpy as np
|
||||
from manim import *
|
||||
|
||||
@ -13,10 +14,8 @@ class Convolutional2DLayer(VGroupNeuralNetworkLayer, ThreeDLayer):
|
||||
def __init__(
|
||||
self,
|
||||
num_feature_maps,
|
||||
feature_map_width,
|
||||
feature_map_height,
|
||||
filter_width=None,
|
||||
filter_height=None,
|
||||
feature_map_size=None,
|
||||
filter_size=None,
|
||||
cell_width=0.2,
|
||||
filter_spacing=0.1,
|
||||
color=BLUE,
|
||||
@ -29,11 +28,15 @@ class Convolutional2DLayer(VGroupNeuralNetworkLayer, ThreeDLayer):
|
||||
):
|
||||
super().__init__(**kwargs)
|
||||
self.num_feature_maps = num_feature_maps
|
||||
self.feature_map_height = feature_map_height
|
||||
self.filter_color = filter_color
|
||||
self.feature_map_width = feature_map_width
|
||||
self.filter_width = filter_width
|
||||
self.filter_height = filter_height
|
||||
if isinstance(feature_map_size, int):
|
||||
self.feature_map_size = (feature_map_size, feature_map_size)
|
||||
else:
|
||||
self.feature_map_size = feature_map_size
|
||||
if isinstance(filter_size, int):
|
||||
self.filter_size = (filter_size, filter_size)
|
||||
else:
|
||||
self.filter_size = filter_size
|
||||
self.cell_width = cell_width
|
||||
self.filter_spacing = filter_spacing
|
||||
self.color = color
|
||||
@ -66,8 +69,8 @@ class Convolutional2DLayer(VGroupNeuralNetworkLayer, ThreeDLayer):
|
||||
for filter_index in range(self.num_feature_maps):
|
||||
rectangle = GriddedRectangle(
|
||||
color=self.color,
|
||||
height=self.feature_map_height * self.cell_width,
|
||||
width=self.feature_map_width * self.cell_width,
|
||||
height=self.feature_map_size[1] * self.cell_width,
|
||||
width=self.feature_map_size[0] * self.cell_width,
|
||||
fill_color=self.color,
|
||||
fill_opacity=0.2,
|
||||
stroke_color=self.color,
|
||||
|
@ -40,8 +40,8 @@ class Filters(VGroup):
|
||||
def make_input_feature_map_rectangles(self):
|
||||
rectangles = []
|
||||
|
||||
rectangle_width = self.input_layer.filter_width * self.input_layer.cell_width
|
||||
rectangle_height = self.input_layer.filter_height * self.input_layer.cell_width
|
||||
rectangle_width = self.input_layer.filter_size[0] * self.input_layer.cell_width
|
||||
rectangle_height = self.input_layer.filter_size[1] * self.input_layer.cell_width
|
||||
filter_color = self.input_layer.filter_color
|
||||
|
||||
for index, feature_map in enumerate(self.input_layer.feature_maps):
|
||||
@ -271,10 +271,8 @@ class Convolutional2DToConvolutional2D(ConnectiveLayer, ThreeDLayer):
|
||||
)
|
||||
self.color = color
|
||||
self.filter_color = self.input_layer.filter_color
|
||||
self.filter_width = self.input_layer.filter_width
|
||||
self.filter_height = self.input_layer.filter_height
|
||||
self.feature_map_width = self.input_layer.feature_map_width
|
||||
self.feature_map_height = self.input_layer.feature_map_height
|
||||
self.filter_size = self.input_layer.filter_size
|
||||
self.feature_map_size = self.input_layer.feature_map_size
|
||||
self.num_input_feature_maps = self.input_layer.num_feature_maps
|
||||
self.num_output_feature_maps = self.output_layer.num_feature_maps
|
||||
self.cell_width = self.input_layer.cell_width
|
||||
@ -321,8 +319,8 @@ class Convolutional2DToConvolutional2D(ConnectiveLayer, ThreeDLayer):
|
||||
right_shift, down_shift = self.get_rotated_shift_vectors()
|
||||
left_shift = -1 * right_shift
|
||||
# Make the animation
|
||||
num_y_moves = int((self.feature_map_height - self.filter_height) / self.stride)
|
||||
num_x_moves = int((self.feature_map_width - self.filter_width) / self.stride)
|
||||
num_y_moves = int((self.feature_map_size[1] - self.filter_size[1]) / self.stride)
|
||||
num_x_moves = int((self.feature_map_size[0] - self.filter_size[0]) / self.stride)
|
||||
for y_move in range(num_y_moves):
|
||||
# Go right num_x_moves
|
||||
for x_move in range(num_x_moves):
|
||||
@ -387,10 +385,10 @@ class Convolutional2DToConvolutional2D(ConnectiveLayer, ThreeDLayer):
|
||||
left_shift = -1 * right_shift
|
||||
# Make the animation
|
||||
num_y_moves = int(
|
||||
(self.feature_map_height - self.filter_height) / self.stride
|
||||
(self.feature_map_size[1] - self.filter_size[1]) / self.stride
|
||||
)
|
||||
num_x_moves = int(
|
||||
(self.feature_map_width - self.filter_width) / self.stride
|
||||
(self.feature_map_size[0] - self.filter_size[0]) / self.stride
|
||||
)
|
||||
for y_move in range(num_y_moves):
|
||||
# Go right num_x_moves
|
||||
|
@ -6,7 +6,6 @@ from manim_ml.neural_network.layers.feed_forward import FeedForwardLayer
|
||||
from manim_ml.neural_network.layers.image import ImageLayer
|
||||
from manim_ml.neural_network.neural_network import NeuralNetwork
|
||||
|
||||
|
||||
class SingleConvolutionalLayerScene(ThreeDScene):
|
||||
def construct(self):
|
||||
# Make nn
|
||||
@ -22,7 +21,6 @@ class SingleConvolutionalLayerScene(ThreeDScene):
|
||||
)
|
||||
# self.play(nn.make_forward_pass_animation(run_time=5))
|
||||
|
||||
|
||||
class Simple3DConvScene(ThreeDScene):
|
||||
def construct(self):
|
||||
"""
|
||||
@ -36,10 +34,10 @@ class Simple3DConvScene(ThreeDScene):
|
||||
# Make nn
|
||||
layers = [
|
||||
Convolutional2DLayer(
|
||||
1, 5, 5, 5, 5, feature_map_height=3, filter_width=3, filter_height=3
|
||||
1, feature_map_size=3, filter_size=3
|
||||
),
|
||||
Convolutional2DLayer(
|
||||
1, 3, 3, 1, 1, feature_map_width=3, filter_width=3, filter_height=3
|
||||
1, feature_map_size=3, filter_size=3
|
||||
),
|
||||
]
|
||||
nn = NeuralNetwork(layers)
|
||||
@ -64,9 +62,9 @@ class CombinedScene(ThreeDScene):
|
||||
nn = NeuralNetwork(
|
||||
[
|
||||
ImageLayer(numpy_image, height=1.5),
|
||||
Convolutional2DLayer(1, 7, 7, 3, 3, filter_spacing=0.32),
|
||||
Convolutional2DLayer(3, 5, 5, 3, 3, filter_spacing=0.32),
|
||||
Convolutional2DLayer(5, 3, 3, 1, 1, filter_spacing=0.18),
|
||||
Convolutional2DLayer(1, 7, 3, filter_spacing=0.32),
|
||||
Convolutional2DLayer(3, 5, 3, filter_spacing=0.32),
|
||||
Convolutional2DLayer(5, 3, 1, filter_spacing=0.18),
|
||||
FeedForwardLayer(3),
|
||||
FeedForwardLayer(3),
|
||||
],
|
||||
|
Reference in New Issue
Block a user