mirror of
https://github.com/labmlai/annotated_deep_learning_paper_implementations.git
synced 2025-11-01 12:01:45 +08:00
no loss smoothing
This commit is contained in:
@ -49,8 +49,8 @@ class NLPAutoRegressionConfigs(TrainValidConfigs):
|
||||
d_model: int = 512
|
||||
|
||||
def init(self):
|
||||
tracker.set_queue("loss.*", 20, True)
|
||||
tracker.set_scalar("accuracy.*", True)
|
||||
tracker.set_scalar("loss.*", True)
|
||||
hook_model_outputs(self.mode, self.model, 'model')
|
||||
self.state_modules = [self.accuracy]
|
||||
|
||||
|
||||
@ -22,7 +22,6 @@ Where $1 \leq 2i, 2i + 1 \leq d_{model}$
|
||||
|
||||
import math
|
||||
|
||||
import matplotlib.pyplot as plt
|
||||
import numpy as np
|
||||
import torch
|
||||
import torch.nn as nn
|
||||
@ -65,6 +64,8 @@ def get_positional_encoding(d_model: int, max_len: int = 5000):
|
||||
|
||||
|
||||
def _test_positional_encoding():
|
||||
import matplotlib.pyplot as plt
|
||||
|
||||
plt.figure(figsize=(15, 5))
|
||||
pe = get_positional_encoding(20, 100)
|
||||
plt.plot(np.arange(100), pe[:, 0, 4:8].numpy())
|
||||
|
||||
Reference in New Issue
Block a user