mirror of
https://github.com/labmlai/annotated_deep_learning_paper_implementations.git
synced 2025-08-14 09:31:42 +08:00
links
This commit is contained in:
@ -90,7 +90,8 @@
|
||||
<li><a href="transformers/mlp_mixer/index.html">MLP-Mixer: An all-MLP Architecture for Vision</a> </li>
|
||||
<li><a href="transformers/gmlp/index.html">Pay Attention to MLPs (gMLP)</a> </li>
|
||||
<li><a href="transformers/vit/index.html">Vision Transformer (ViT)</a> </li>
|
||||
<li><a href="transformers/primer_ez/index.html">Primer EZ</a></li></ul>
|
||||
<li><a href="transformers/primer_ez/index.html">Primer EZ</a> </li>
|
||||
<li><a href="transformers/hourglass/index.html">Hourglass</a></li></ul>
|
||||
<h4>✨ <a href="recurrent_highway_networks/index.html">Recurrent Highway Networks</a></h4>
|
||||
<h4>✨ <a href="lstm/index.html">LSTM</a></h4>
|
||||
<h4>✨ <a href="hypernetworks/hyper_lstm.html">HyperNetworks - HyperLSTM</a></h4>
|
||||
|
@ -104,13 +104,15 @@
|
||||
<p>This is an implementation of the paper <a href="https://papers.labml.ai/paper/2010.11929">An Image Is Worth 16x16 Words: Transformers For Image Recognition At Scale</a>.</p>
|
||||
<h2><a href="primer_ez/index.html">Primer EZ</a></h2>
|
||||
<p>This is an implementation of the paper <a href="https://papers.labml.ai/paper/2109.08668">Primer: Searching for Efficient Transformers for Language Modeling</a>.</p>
|
||||
<h2><a href="hour_glass/index.html">Hourglass</a></h2>
|
||||
<p>This is an implementation of the paper <a href="https://papers.labml.ai/paper/2110.13711">Hierarchical Transformers Are More Efficient Language Models</a></p>
|
||||
|
||||
</div>
|
||||
<div class='code'>
|
||||
<div class="highlight"><pre><span class="lineno">98</span><span></span><span class="kn">from</span> <span class="nn">.configs</span> <span class="kn">import</span> <span class="n">TransformerConfigs</span>
|
||||
<span class="lineno">99</span><span class="kn">from</span> <span class="nn">.models</span> <span class="kn">import</span> <span class="n">TransformerLayer</span><span class="p">,</span> <span class="n">Encoder</span><span class="p">,</span> <span class="n">Decoder</span><span class="p">,</span> <span class="n">Generator</span><span class="p">,</span> <span class="n">EncoderDecoder</span>
|
||||
<span class="lineno">100</span><span class="kn">from</span> <span class="nn">.mha</span> <span class="kn">import</span> <span class="n">MultiHeadAttention</span>
|
||||
<span class="lineno">101</span><span class="kn">from</span> <span class="nn">labml_nn.transformers.xl.relative_mha</span> <span class="kn">import</span> <span class="n">RelativeMultiHeadAttention</span></pre></div>
|
||||
<div class="highlight"><pre><span class="lineno">103</span><span></span><span class="kn">from</span> <span class="nn">.configs</span> <span class="kn">import</span> <span class="n">TransformerConfigs</span>
|
||||
<span class="lineno">104</span><span class="kn">from</span> <span class="nn">.models</span> <span class="kn">import</span> <span class="n">TransformerLayer</span><span class="p">,</span> <span class="n">Encoder</span><span class="p">,</span> <span class="n">Decoder</span><span class="p">,</span> <span class="n">Generator</span><span class="p">,</span> <span class="n">EncoderDecoder</span>
|
||||
<span class="lineno">105</span><span class="kn">from</span> <span class="nn">.mha</span> <span class="kn">import</span> <span class="n">MultiHeadAttention</span>
|
||||
<span class="lineno">106</span><span class="kn">from</span> <span class="nn">labml_nn.transformers.xl.relative_mha</span> <span class="kn">import</span> <span class="n">RelativeMultiHeadAttention</span></pre></div>
|
||||
</div>
|
||||
</div>
|
||||
<div class='footer'>
|
||||
|
Reference in New Issue
Block a user