mirror of
https://github.com/AntoineHX/smart_augmentation.git
synced 2025-05-04 04:00:46 +02:00
21 lines
No EOL
726 B
Python
21 lines
No EOL
726 B
Python
|
|
import higher
|
|
import torch as _torch
|
|
|
|
def detach_(self):
|
|
"""Removes all params from their compute graph in place."""
|
|
# detach param groups
|
|
for group in self.param_groups:
|
|
for k, v in group.items():
|
|
if isinstance(v,_torch.Tensor):
|
|
v.detach_().requires_grad_()
|
|
|
|
# detach state
|
|
for state_dict in self.state:
|
|
for k,v_dict in state_dict.items():
|
|
if isinstance(k,_torch.Tensor): k.detach_().requires_grad_()
|
|
for k2,v2 in v_dict.items():
|
|
if isinstance(v2,_torch.Tensor):
|
|
v2.detach_().requires_grad_()
|
|
|
|
higher.optim.DifferentiableOptimizer.detach_ = detach_ |