mirror of
https://github.com/AntoineHX/smart_augmentation.git
synced 2025-05-03 11:40:46 +02:00
Commentaires
This commit is contained in:
parent
a3bf82c7ca
commit
7221142a9a
2 changed files with 24 additions and 13 deletions
|
@ -1,21 +1,31 @@
|
|||
""" Patch for Higher package
|
||||
|
||||
Recommended use ::
|
||||
|
||||
import higher
|
||||
import higher_patch
|
||||
|
||||
Might become unnecessary with future update of the Higher package.
|
||||
"""
|
||||
import higher
|
||||
import torch as _torch
|
||||
|
||||
def detach_(self):
|
||||
"""Removes all params from their compute graph in place."""
|
||||
# detach param groups
|
||||
for group in self.param_groups:
|
||||
for k, v in group.items():
|
||||
if isinstance(v,_torch.Tensor):
|
||||
v.detach_().requires_grad_()
|
||||
"""Removes all params from their compute graph in place.
|
||||
|
||||
# detach state
|
||||
for state_dict in self.state:
|
||||
for k,v_dict in state_dict.items():
|
||||
if isinstance(k,_torch.Tensor): k.detach_().requires_grad_()
|
||||
for k2,v2 in v_dict.items():
|
||||
if isinstance(v2,_torch.Tensor):
|
||||
v2.detach_().requires_grad_()
|
||||
"""
|
||||
# detach param groups
|
||||
for group in self.param_groups:
|
||||
for k, v in group.items():
|
||||
if isinstance(v,_torch.Tensor):
|
||||
v.detach_().requires_grad_()
|
||||
|
||||
# detach state
|
||||
for state_dict in self.state:
|
||||
for k,v_dict in state_dict.items():
|
||||
if isinstance(k,_torch.Tensor): k.detach_().requires_grad_()
|
||||
for k2,v2 in v_dict.items():
|
||||
if isinstance(v2,_torch.Tensor):
|
||||
v2.detach_().requires_grad_()
|
||||
|
||||
higher.optim.DifferentiableOptimizer.detach_ = detach_
|
|
@ -254,6 +254,7 @@ def run_dist_dataugV3(model, opt_param, epochs=1, inner_it=1, dataug_epoch_start
|
|||
for epoch in range(1, epochs+1):
|
||||
t0 = time.perf_counter()
|
||||
|
||||
#Cross-Validation
|
||||
#dl_train, dl_val = cvs.next_split()
|
||||
#dl_val_it = iter(dl_val)
|
||||
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue