mirror of
https://github.com/labmlai/annotated_deep_learning_paper_implementations.git
synced 2025-11-02 04:37:46 +08:00
__call__ -> forward
This commit is contained in:
@ -83,7 +83,7 @@ class ShortcutProjection(Module):
|
||||
# Paper suggests adding batch normalization after each convolution operation
|
||||
self.bn = nn.BatchNorm2d(out_channels)
|
||||
|
||||
def __call__(self, x: torch.Tensor):
|
||||
def forward(self, x: torch.Tensor):
|
||||
# Convolution and batch normalization
|
||||
return self.bn(self.conv(x))
|
||||
|
||||
@ -140,7 +140,7 @@ class ResidualBlock(Module):
|
||||
# Second activation function (ReLU) (after adding the shortcut)
|
||||
self.act2 = nn.ReLU()
|
||||
|
||||
def __call__(self, x: torch.Tensor):
|
||||
def forward(self, x: torch.Tensor):
|
||||
"""
|
||||
* `x` is the input of shape `[batch_size, in_channels, height, width]`
|
||||
"""
|
||||
@ -221,7 +221,7 @@ class BottleneckResidualBlock(Module):
|
||||
# Second activation function (ReLU) (after adding the shortcut)
|
||||
self.act3 = nn.ReLU()
|
||||
|
||||
def __call__(self, x: torch.Tensor):
|
||||
def forward(self, x: torch.Tensor):
|
||||
"""
|
||||
* `x` is the input of shape `[batch_size, in_channels, height, width]`
|
||||
"""
|
||||
@ -310,7 +310,7 @@ class ResNetBase(Module):
|
||||
# Stack the blocks
|
||||
self.blocks = nn.Sequential(*blocks)
|
||||
|
||||
def __call__(self, x: torch.Tensor):
|
||||
def forward(self, x: torch.Tensor):
|
||||
"""
|
||||
* `x` has shape `[batch_size, img_channels, height, width]`
|
||||
"""
|
||||
|
||||
@ -106,7 +106,7 @@ class QFuncLoss(Module):
|
||||
self.gamma = gamma
|
||||
self.huber_loss = nn.SmoothL1Loss(reduction='none')
|
||||
|
||||
def __call__(self, q: torch.Tensor, action: torch.Tensor, double_q: torch.Tensor,
|
||||
def forward(self, q: torch.Tensor, action: torch.Tensor, double_q: torch.Tensor,
|
||||
target_q: torch.Tensor, done: torch.Tensor, reward: torch.Tensor,
|
||||
weights: torch.Tensor) -> Tuple[torch.Tensor, torch.Tensor]:
|
||||
"""
|
||||
|
||||
@ -82,7 +82,7 @@ class Model(Module):
|
||||
nn.Linear(in_features=256, out_features=4),
|
||||
)
|
||||
|
||||
def __call__(self, obs: torch.Tensor):
|
||||
def forward(self, obs: torch.Tensor):
|
||||
# Convolution
|
||||
h = self.conv(obs)
|
||||
# Reshape for linear layers
|
||||
|
||||
@ -136,7 +136,7 @@ class ClippedPPOLoss(Module):
|
||||
def __init__(self):
|
||||
super().__init__()
|
||||
|
||||
def __call__(self, log_pi: torch.Tensor, sampled_log_pi: torch.Tensor,
|
||||
def forward(self, log_pi: torch.Tensor, sampled_log_pi: torch.Tensor,
|
||||
advantage: torch.Tensor, clip: float) -> torch.Tensor:
|
||||
# ratio $r_t(\theta) = \frac{\pi_\theta (a_t|s_t)}{\pi_{\theta_{OLD}} (a_t|s_t)}$;
|
||||
# *this is different from rewards* $r_t$.
|
||||
@ -200,7 +200,8 @@ class ClippedValueFunctionLoss(Module):
|
||||
significantly from $V_{\theta_{OLD}}$.
|
||||
|
||||
"""
|
||||
def __call__(self, value: torch.Tensor, sampled_value: torch.Tensor, sampled_return: torch.Tensor, clip: float):
|
||||
|
||||
def forward(self, value: torch.Tensor, sampled_value: torch.Tensor, sampled_return: torch.Tensor, clip: float):
|
||||
clipped_value = sampled_value + (value - sampled_value).clamp(min=-clip, max=clip)
|
||||
vf_loss = torch.max((value - sampled_return) ** 2, (clipped_value - sampled_return) ** 2)
|
||||
return 0.5 * vf_loss.mean()
|
||||
|
||||
@ -69,7 +69,7 @@ class Model(Module):
|
||||
#
|
||||
self.activation = nn.ReLU()
|
||||
|
||||
def __call__(self, obs: torch.Tensor):
|
||||
def forward(self, obs: torch.Tensor):
|
||||
h = self.activation(self.conv1(obs))
|
||||
h = self.activation(self.conv2(h))
|
||||
h = self.activation(self.conv3(h))
|
||||
|
||||
Reference in New Issue
Block a user