From dfefde657b5bc089a4c3d2f69312bbd0e59eeefe Mon Sep 17 00:00:00 2001 From: Varuna Jayasiri Date: Thu, 7 Jan 2021 12:02:19 +0530 Subject: [PATCH] no loss smoothing --- labml_nn/experiments/nlp_autoregression.py | 2 +- labml_nn/transformers/positional_encoding.py | 3 ++- 2 files changed, 3 insertions(+), 2 deletions(-) diff --git a/labml_nn/experiments/nlp_autoregression.py b/labml_nn/experiments/nlp_autoregression.py index 9422c584..970cc3a8 100644 --- a/labml_nn/experiments/nlp_autoregression.py +++ b/labml_nn/experiments/nlp_autoregression.py @@ -49,8 +49,8 @@ class NLPAutoRegressionConfigs(TrainValidConfigs): d_model: int = 512 def init(self): - tracker.set_queue("loss.*", 20, True) tracker.set_scalar("accuracy.*", True) + tracker.set_scalar("loss.*", True) hook_model_outputs(self.mode, self.model, 'model') self.state_modules = [self.accuracy] diff --git a/labml_nn/transformers/positional_encoding.py b/labml_nn/transformers/positional_encoding.py index e8118930..74c56f9f 100644 --- a/labml_nn/transformers/positional_encoding.py +++ b/labml_nn/transformers/positional_encoding.py @@ -22,7 +22,6 @@ Where $1 \leq 2i, 2i + 1 \leq d_{model}$ import math -import matplotlib.pyplot as plt import numpy as np import torch import torch.nn as nn @@ -65,6 +64,8 @@ def get_positional_encoding(d_model: int, max_len: int = 5000): def _test_positional_encoding(): + import matplotlib.pyplot as plt + plt.figure(figsize=(15, 5)) pe = get_positional_encoding(20, 100) plt.plot(np.arange(100), pe[:, 0, 4:8].numpy())