mirror of
https://github.com/labmlai/annotated_deep_learning_paper_implementations.git
synced 2025-08-14 01:13:00 +08:00
fix \u
This commit is contained in:
@ -65,7 +65,7 @@ class MultiHeadAttention(Module):
|
|||||||
|
|
||||||
This computes scaled multi-headed attention for given `query`, `key` and `value` vectors.
|
This computes scaled multi-headed attention for given `query`, `key` and `value` vectors.
|
||||||
|
|
||||||
$$Attention(Q, K, V) = \underset{seq}{softmax}\Bigg(\frac{Q K^T}{\sqrt{d_k}}\Bigg)V$$
|
$$Attention(Q, K, V) = \\underset{seq}{softmax}\Bigg(\frac{Q K^T}{\sqrt{d_k}}\Bigg)V$$
|
||||||
|
|
||||||
In simple terms, it finds keys that matches the query, and get the values of
|
In simple terms, it finds keys that matches the query, and get the values of
|
||||||
those keys.
|
those keys.
|
||||||
|
2
setup.py
2
setup.py
@ -5,7 +5,7 @@ with open("readme.md", "r") as f:
|
|||||||
|
|
||||||
setuptools.setup(
|
setuptools.setup(
|
||||||
name='labml_nn',
|
name='labml_nn',
|
||||||
version='0.4.72',
|
version='0.4.73',
|
||||||
author="Varuna Jayasiri, Nipun Wijerathne",
|
author="Varuna Jayasiri, Nipun Wijerathne",
|
||||||
author_email="vpjayasiri@gmail.com, hnipun@gmail.com",
|
author_email="vpjayasiri@gmail.com, hnipun@gmail.com",
|
||||||
description="A collection of PyTorch implementations of neural network architectures and layers.",
|
description="A collection of PyTorch implementations of neural network architectures and layers.",
|
||||||
|
Reference in New Issue
Block a user