lab_helpers

This commit is contained in:
Varuna Jayasiri
2020-09-01 08:05:08 +05:30
parent 7db0ced04c
commit 6924f4580c
6 changed files with 6 additions and 6 deletions

View File

@ -6,7 +6,7 @@ import torch.nn as nn
import torch.nn.functional as F import torch.nn.functional as F
from labml.configs import BaseConfigs, option, calculate from labml.configs import BaseConfigs, option, calculate
from labml.helpers.pytorch.module import Module from labml_helpers.module import Module
from transformers.mha import MultiHeadAttention from transformers.mha import MultiHeadAttention
from transformers.positional_encoding import PositionalEncoding, get_positional_encoding from transformers.positional_encoding import PositionalEncoding, get_positional_encoding
from transformers.utils import clone_module_list from transformers.utils import clone_module_list

View File

@ -3,7 +3,7 @@ import numpy as np
import torch import torch
import torch.nn as nn import torch.nn as nn
from labml.helpers.pytorch.module import Module from labml_helpers.module import Module
class LabelSmoothingLoss(Module): class LabelSmoothingLoss(Module):

View File

@ -5,7 +5,7 @@ import torch
from torch import nn as nn from torch import nn as nn
from torch.nn import functional as F from torch.nn import functional as F
from labml.helpers.pytorch.module import Module from labml_helpers.module import Module
class PrepareForMultiHeadAttention(Module): class PrepareForMultiHeadAttention(Module):

View File

@ -5,7 +5,7 @@ import numpy as np
import torch import torch
import torch.nn as nn import torch.nn as nn
from labml.helpers.pytorch.module import Module from labml_helpers.module import Module
class PositionalEncoding(Module): class PositionalEncoding(Module):

View File

@ -6,7 +6,7 @@ https://arxiv.org/abs/1901.02860
import torch import torch
from torch import nn from torch import nn
from labml.helpers.pytorch.module import Module from labml_helpers.module import Module
from labml.logger import inspect from labml.logger import inspect
from transformers.mha import MultiHeadAttention from transformers.mha import MultiHeadAttention

View File

@ -2,7 +2,7 @@ import copy
from torch import nn from torch import nn
from labml.helpers.pytorch.module import Module from labml_helpers.module import Module
def clone_module_list(module: Module, n: int): def clone_module_list(module: Module, n: int):