Changed directory structure to accomodate examples as apposed to everything being a part of the core library. May need to rethink this in the future. Added some boilerplate for pip packaging to the .gitignore.

This commit is contained in:
Alec Helbling
2022-03-28 14:01:00 -04:00
committed by Alec Helbling
parent 4eb5296c9c
commit 3be5c54d26
40 changed files with 30 additions and 15 deletions

9
.gitignore vendored
View File

@ -1,2 +1,9 @@
media
__pycache__
manim_ml.egg-info/
manim_ml/manim_ml.egg-info/
dist
manim_ml/media
media
pyproject.toml
setup.cfg
!examples/media

View File

@ -1,6 +1,6 @@
# Manim Machine Learning
<a href="https://github.com/helblazer811/ManimMachineLearning">
<img src="examples/ManimMLLogo.gif">
<img src="examples/media/ManimMLLogo.gif">
</a>
[![GitHub license](https://img.shields.io/github/license/helblazer811/ManimMachineLearning)](https://github.com/helblazer811/ManimMachineLearning/blob/main/LICENSE.md)
@ -28,16 +28,16 @@ Checkout the ```examples``` directory for some example videos with source code.
This is a visualization of a Variational Autoencoder.
<img src="examples/VAEScene.gif" width="600">
<img src="examples/media/VAEScene.gif" width="600">
### VAE Disentanglement
This is a visualization of disentanglement with a Variational Autoencoder
<img src="examples/DisentanglementScene.gif" width="600">
<img src="examples/media/DisentanglementScene.gif" width="600">
### Neural Networks
This is a visualization of a Neural Network.
<img src="examples/TestNeuralNetworkScene.gif" width="600">
<img src="examples/media/TestNeuralNetworkScene.gif" width="600">

View File

@ -1,7 +1,10 @@
"""This module is dedicated to visualizing VAE disentanglement"""
import sys
import os
sys.path.append(os.environ["PROJECT_ROOT"])
from manim import *
from neural_network import NeuralNetwork
import util
from manim_ml.neural_network import NeuralNetwork
import manim_ml.util as util
import pickle
class VAEDecoder(VGroup):
@ -19,11 +22,11 @@ class VAEDecoder(VGroup):
class DisentanglementVisualization(VGroup):
def __init__(self, model_path="autoencoder_models/saved_models/model_dim2.pth", image_height=0.35):
def __init__(self, model_path=os.path.join(os.environ["PROJECT_ROOT"], "examples/variational_autoencoder/autoencoder_models/saved_models/model_dim2.pth"), image_height=0.35):
self.model_path = model_path
self.image_height = image_height
# Load disentanglement image objects
with open("autoencoder_models/disentanglement.pkl", "rb") as f:
with open(os.path.join(os.environ["PROJECT_ROOT"], "examples/variational_autoencoder/autoencoder_models/disentanglement.pkl"), "rb") as f:
self.image_handler = pickle.load(f)
def make_disentanglement_generation_animation(self):

View File

@ -6,8 +6,8 @@ sys.path.append(os.environ["PROJECT_ROOT"])
from manim import *
import pickle
import numpy as np
import neural_network
import variational_autoencoder
import manim_ml.neural_network as neural_network
import examples.variational_autoencoder.variational_autoencoder as variational_autoencoder
"""
The VAE Scene for the twitter video.

View File

Before

Width:  |  Height:  |  Size: 148 KiB

After

Width:  |  Height:  |  Size: 148 KiB

View File

Before

Width:  |  Height:  |  Size: 2.5 MiB

After

Width:  |  Height:  |  Size: 2.5 MiB

View File

Before

Width:  |  Height:  |  Size: 117 KiB

After

Width:  |  Height:  |  Size: 117 KiB

View File

Before

Width:  |  Height:  |  Size: 137 KiB

After

Width:  |  Height:  |  Size: 137 KiB

View File

Before

Width:  |  Height:  |  Size: 86 KiB

After

Width:  |  Height:  |  Size: 86 KiB

View File

Before

Width:  |  Height:  |  Size: 991 KiB

After

Width:  |  Height:  |  Size: 991 KiB

View File

@ -198,7 +198,12 @@ def train_model(latent_dim=16, plot=True, digit=1, epochs=200):
losses.append(loss.detach().cpu())
outputs.append((epochs, image, reconstructed))
torch.save(model.state_dict(), os.path.join(os.environ["PROJECT_ROOT"], f"saved_models/model_dim{latent_dim}.pth"))
torch.save(model.state_dict(),
os.path.join(
os.environ["PROJECT_ROOT"],
f"examples/variational_autoencoder/autoencoder_model/saved_models/model_dim{latent_dim}.pth"
)
)
if plot:
# Defining the Plot Style

View File

@ -8,7 +8,7 @@ from manim import *
import pickle
import numpy as np
import os
import neural_network
import manim_ml.neural_network as neural_network
class VariationalAutoencoder(VGroup):
"""Variational Autoencoder Manim Visualization"""
@ -244,8 +244,8 @@ class MNISTImageHandler():
def __init__(
self,
image_pairs_file_path=os.path.join(os.environ["PROJECT_ROOT"], "autoencoder_models/image_pairs.pkl"),
interpolations_file_path=os.path.join(os.environ["PROJECT_ROOT"], "autoencoder_models/interpolations.pkl")
image_pairs_file_path=os.path.join(os.environ["PROJECT_ROOT"], "examples/variational_autoencoder/autoencoder_models/image_pairs.pkl"),
interpolations_file_path=os.path.join(os.environ["PROJECT_ROOT"], "examples/variational_autoencoder/autoencoder_models/interpolations.pkl")
):
self.image_pairs_file_path = image_pairs_file_path
self.interpolations_file_path = interpolations_file_path