ppo experiment configs

This commit is contained in:
Varuna Jayasiri
2021-10-02 13:57:47 +05:30
parent 980838911c
commit 9e430d2dba
2 changed files with 2 additions and 12 deletions

View File

@ -388,17 +388,7 @@ def main():
experiment.configs(configs)
# Initialize the trainer
m = Trainer(
updates=configs['updates'],
epochs=configs['epochs'],
n_workers=configs['n_workers'],
worker_steps=configs['worker_steps'],
batches=configs['batches'],
value_loss_coef=configs['value_loss_coef'],
entropy_bonus_coef=configs['entropy_bonus_coef'],
clip_range=configs['clip_range'],
learning_rate=configs['learning_rate'],
)
m = Trainer(**configs)
# Run and monitor the experiment
with experiment.start():

View File

@ -5,7 +5,7 @@ with open("readme.md", "r") as f:
setuptools.setup(
name='labml-nn',
version='0.4.114',
version='0.4.115',
author="Varuna Jayasiri, Nipun Wijerathne",
author_email="vpjayasiri@gmail.com, hnipun@gmail.com",
description="🧑‍🏫 Implementations/tutorials of deep learning papers with side-by-side notes 📝; including transformers (original, xl, switch, feedback, vit), optimizers (adam, radam, adabelief), gans(dcgan, cyclegan, stylegan2), 🎮 reinforcement learning (ppo, dqn), capsnet, distillation, etc. 🧠",