mirror of
https://github.com/labmlai/annotated_deep_learning_paper_implementations.git
synced 2025-08-14 09:31:42 +08:00
refractor
This commit is contained in:
@ -10,12 +10,12 @@ summary: This experiment trains a transformer model with Rotary Positional Embed
|
||||
|
||||
from labml import experiment
|
||||
from labml.configs import calculate
|
||||
from labml_nn.experiments.arithmetic_dataset import ArithmeticAutoregression
|
||||
from labml_nn.experiments.arithmetic_addition_dataset import ArithmeticAdditionAutoregression
|
||||
from labml_nn.transformers import TransformerConfigs
|
||||
from labml_nn.transformers.rope.experiment import Configs as RoPEConfigs
|
||||
|
||||
|
||||
class Configs(RoPEConfigs, ArithmeticAutoregression):
|
||||
class Configs(RoPEConfigs, ArithmeticAdditionAutoregression):
|
||||
"""
|
||||
We inherit [RoPE experiment](../experiment.html) and use it for
|
||||
[arithmetic addition task](../../experiments/arithmetic_dataset.html).
|
@ -10,7 +10,7 @@ summary: This experiment trains a transformer model with Rotary Positional Embed
|
||||
|
||||
from labml import experiment
|
||||
from labml.configs import calculate
|
||||
from labml_nn.experiments.copy_perm.continous import CopyRepeatAutoregression
|
||||
from labml_nn.experiments.copy_perm.repeat import CopyRepeatAutoregression
|
||||
from labml_nn.transformers import TransformerConfigs
|
||||
from labml_nn.transformers.rope.experiment import Configs as RoPEConfigs
|
||||
|
||||
|
@ -19,7 +19,7 @@ from labml_nn.transformers.rope.experiment import Configs as RoPEConfigs
|
||||
|
||||
# ### Rotary PE attention
|
||||
|
||||
class Configs(RoPEConfigs): # , ArithmeticAutoregression):
|
||||
class Configs(RoPEConfigs):
|
||||
pass
|
||||
|
||||
|
||||
|
Reference in New Issue
Block a user