mirror of
https://github.com/labmlai/annotated_deep_learning_paper_implementations.git
synced 2025-08-26 16:50:39 +08:00
labml app links
This commit is contained in:
@ -76,7 +76,7 @@ If you use LabML for academic research, please cite the library using the follow
|
||||
author = {Varuna Jayasiri, Nipun Wijerathne},
|
||||
title = {LabML: A library to organize machine learning experiments},
|
||||
year = {2020},
|
||||
url = {https://lab-ml.com/},
|
||||
url = {https://nn.labml.ai/},
|
||||
}
|
||||
```
|
||||
"""
|
||||
|
@ -27,7 +27,7 @@ confusions I had with the paper.
|
||||
Here's a notebook for training a Capsule Network on MNIST dataset.
|
||||
|
||||
[](https://colab.research.google.com/github/lab-ml/nn/blob/master/labml_nn/capsule_networks/mnist.ipynb)
|
||||
[](https://web.lab-ml.com/run?uuid=e7c08e08586711ebb3e30242ac1c0002)
|
||||
[](https://app.labml.ai/run/e7c08e08586711ebb3e30242ac1c0002)
|
||||
"""
|
||||
|
||||
import torch.nn as nn
|
||||
|
@ -30,7 +30,7 @@ This file contains the model code as well as the training code.
|
||||
We also have a Google Colab notebook.
|
||||
|
||||
[](https://colab.research.google.com/github/lab-ml/nn/blob/master/labml_nn/gan/cycle_gan.ipynb)
|
||||
[](https://web.lab-ml.com/run?uuid=93b11a665d6811ebaac80242ac1c0002)
|
||||
[](https://app.labml.ai/run/93b11a665d6811ebaac80242ac1c0002)
|
||||
"""
|
||||
|
||||
import itertools
|
||||
|
@ -16,7 +16,7 @@ We have an experiment that trains a HyperLSTM to predict text on Shakespeare dat
|
||||
Here's the link to code: [`experiment.py`](experiment.html)
|
||||
|
||||
[](https://colab.research.google.com/github/lab-ml/nn/blob/master/labml_nn/hypernetworks/experiment.ipynb)
|
||||
[](https://web.lab-ml.com/run?uuid=9e7f39e047e811ebbaff2b26e3148b3d)
|
||||
[](https://app.labml.ai/run/9e7f39e047e811ebbaff2b26e3148b3d)
|
||||
|
||||
HyperNetworks use a smaller network to generate weights of a larger network.
|
||||
There are two variants: static hyper-networks and dynamic hyper-networks.
|
||||
|
@ -92,7 +92,7 @@ Here's [the training code](mnist.html) and a notebook for training
|
||||
a CNN classifier that uses batch normalization for MNIST dataset.
|
||||
|
||||
[](https://colab.research.google.com/github/lab-ml/nn/blob/master/labml_nn/normalization/batch_norm/mnist.ipynb)
|
||||
[](https://web.lab-ml.com/run?uuid=011254fe647011ebbb8e0242ac1c0002)
|
||||
[](https://app.labml.ai/run/011254fe647011ebbb8e0242ac1c0002)
|
||||
"""
|
||||
|
||||
import torch
|
||||
|
@ -195,10 +195,10 @@ def _synthetic_experiment(is_adam: bool):
|
||||
|
||||
if __name__ == '__main__':
|
||||
# Run the synthetic experiment is *Adam*.
|
||||
# [Here are the results](https://web.lab-ml.com/metrics?uuid=61ebfdaa384411eb94d8acde48001122).
|
||||
# [Here are the results](https://app.labml.ai/run/61ebfdaa384411eb94d8acde48001122).
|
||||
# You can see that Adam converges at $x = +1$
|
||||
_synthetic_experiment(True)
|
||||
# Run the synthetic experiment is *AMSGrad*
|
||||
# [Here are the results](https://web.lab-ml.com/metrics?uuid=bc06405c384411eb8b82acde48001122).
|
||||
# [Here are the results](https://app.labml.ai/run/uuid=bc06405c384411eb8b82acde48001122).
|
||||
# You can see that AMSGrad converges to true optimal $x = -1$
|
||||
_synthetic_experiment(False)
|
||||
|
@ -48,7 +48,7 @@ Here are [the training code](experiment.html) and a notebook for training a comp
|
||||
model on the Tiny Shakespeare dataset.
|
||||
|
||||
[](https://colab.research.google.com/github/lab-ml/nn/blob/master/labml_nn/transformers/compressive/experiment.ipynb)
|
||||
[](https://web.lab-ml.com/run?uuid=0d9b5338726c11ebb7c80242ac1c0002)
|
||||
[](https://app.labml.ai/run/0d9b5338726c11ebb7c80242ac1c0002)
|
||||
"""
|
||||
|
||||
from typing import Optional, List
|
||||
|
Reference in New Issue
Block a user