diff --git a/src/torchjd/autogram/_module_hook_manager.py b/src/torchjd/autogram/_module_hook_manager.py index a88ad983..273131d2 100644 --- a/src/torchjd/autogram/_module_hook_manager.py +++ b/src/torchjd/autogram/_module_hook_manager.py @@ -163,7 +163,7 @@ def forward( module: nn.Module, *xs: Tensor, ) -> tuple[Tensor, ...]: - return tuple([x.detach() for x in xs]) + return tuple(x.detach() for x in xs) # For Python version > 3.10, the type of `inputs` should become # tuple[BoolRef, TreeSpec, VJPType, PyTree, GramianAccumulator, nn.Module, *tuple[Tensor, ...]] diff --git a/src/torchjd/autojac/_transform/_grad.py b/src/torchjd/autojac/_transform/_grad.py index dae694b6..de8d3e9b 100644 --- a/src/torchjd/autojac/_transform/_grad.py +++ b/src/torchjd/autojac/_transform/_grad.py @@ -51,7 +51,7 @@ def _differentiate(self, grad_outputs: Sequence[Tensor]) -> tuple[Tensor, ...]: return tuple() if len(self.outputs) == 0: - return tuple([torch.zeros_like(input) for input in self.inputs]) + return tuple(torch.zeros_like(input) for input in self.inputs) grads = self._get_vjp(grad_outputs, self.retain_graph) return grads