mirror of
https://github.com/labmlai/annotated_deep_learning_paper_implementations.git
synced 2025-08-26 08:41:23 +08:00
fix math align
This commit is contained in:
@ -52,6 +52,7 @@ class Sampler:
|
||||
alpha_bar_tm1 = torch.cat([self.alpha_bar.new_ones((1,)), self.alpha_bar[:-1]])
|
||||
|
||||
# To calculate
|
||||
#
|
||||
# \begin{align}
|
||||
# q(x_{t-1}|x_t, x_0) &= \mathcal{N} \Big(x_{t-1}; \tilde\mu_t(x_t, x_0), \tilde\beta_t \mathbf{I} \Big) \\
|
||||
# \tilde\mu_t(x_t, x_0) &= \frac{\sqrt{\bar\alpha_{t-1}}\beta_t}{1 - \bar\alpha_t}x_0
|
||||
|
@ -62,10 +62,12 @@ class TimeEmbedding(nn.Module):
|
||||
def forward(self, t: torch.Tensor):
|
||||
# Create sinusoidal position embeddings
|
||||
# [same as those from the transformer](../../transformers/positional_encoding.html)
|
||||
#
|
||||
# \begin{align}
|
||||
# PE^{(1)}_{t,i} &= sin\Bigg(\frac{t}{10000^{\frac{i}{d - 1}}}\Bigg) \\
|
||||
# PE^{(2)}_{t,i} &= cos\Bigg(\frac{t}{10000^{\frac{i}{d - 1}}}\Bigg)
|
||||
# \end{align}
|
||||
#
|
||||
# where $d$ is `half_dim`
|
||||
half_dim = self.n_channels // 8
|
||||
emb = math.log(10_000) / (half_dim - 1)
|
||||
|
Reference in New Issue
Block a user