mirror of
https://github.com/AntoineHX/smart_augmentation.git
synced 2025-05-04 12:10:45 +02:00
Modification du early stopping (sur test data...)
This commit is contained in:
parent
bc8e5f2817
commit
198fb06065
3 changed files with 15 additions and 19 deletions
|
@ -3,8 +3,8 @@ from torch.utils.data import SubsetRandomSampler
|
||||||
import torchvision
|
import torchvision
|
||||||
|
|
||||||
BATCH_SIZE = 300
|
BATCH_SIZE = 300
|
||||||
#TEST_SIZE = 300
|
TEST_SIZE = 300
|
||||||
TEST_SIZE = 10000
|
#TEST_SIZE = 10000
|
||||||
|
|
||||||
#ATTENTION : Dataug (Kornia) Expect image in the range of [0, 1]
|
#ATTENTION : Dataug (Kornia) Expect image in the range of [0, 1]
|
||||||
#transform_train = torchvision.transforms.Compose([
|
#transform_train = torchvision.transforms.Compose([
|
||||||
|
|
|
@ -37,8 +37,8 @@ else:
|
||||||
##########################################
|
##########################################
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
|
||||||
n_inner_iter = 0
|
n_inner_iter = 10
|
||||||
epochs = 100
|
epochs = 200
|
||||||
dataug_epoch_start=0
|
dataug_epoch_start=0
|
||||||
|
|
||||||
#### Classic ####
|
#### Classic ####
|
||||||
|
|
|
@ -6,12 +6,6 @@ import higher
|
||||||
from datasets import *
|
from datasets import *
|
||||||
from utils import *
|
from utils import *
|
||||||
|
|
||||||
#Variables a definir
|
|
||||||
#dl_train = None
|
|
||||||
#dl_val = None
|
|
||||||
#dl_test = None
|
|
||||||
#device = torch.device('cuda')
|
|
||||||
|
|
||||||
def test(model):
|
def test(model):
|
||||||
device = next(model.parameters()).device
|
device = next(model.parameters()).device
|
||||||
model.eval()
|
model.eval()
|
||||||
|
@ -21,13 +15,13 @@ def test(model):
|
||||||
pred = model.forward(features)
|
pred = model.forward(features)
|
||||||
return pred.argmax(dim=1).eq(labels).sum().item() / dl_test.batch_size * 100
|
return pred.argmax(dim=1).eq(labels).sum().item() / dl_test.batch_size * 100
|
||||||
|
|
||||||
def compute_vaLoss(model, dl_val_it):
|
def compute_loss(model, dl_it, dl):
|
||||||
device = next(model.parameters()).device
|
device = next(model.parameters()).device
|
||||||
try:
|
try:
|
||||||
xs_val, ys_val = next(dl_val_it)
|
xs_val, ys_val = next(dl_it)
|
||||||
except StopIteration: #Fin epoch val
|
except StopIteration: #Fin epoch val
|
||||||
dl_val_it = iter(dl_val)
|
dl_val_it = iter(dl)
|
||||||
xs_val, ys_val = next(dl_val_it)
|
xs_val, ys_val = next(dl_it)
|
||||||
xs_val, ys_val = xs_val.to(device), ys_val.to(device)
|
xs_val, ys_val = xs_val.to(device), ys_val.to(device)
|
||||||
|
|
||||||
try:
|
try:
|
||||||
|
@ -528,6 +522,7 @@ def run_dist_dataugV2(model, epochs=1, inner_it=0, dataug_epoch_start=0, print_f
|
||||||
countcopy=0
|
countcopy=0
|
||||||
val_loss=torch.tensor(0) #Necessaire si pas de metastep sur une epoch
|
val_loss=torch.tensor(0) #Necessaire si pas de metastep sur une epoch
|
||||||
dl_val_it = iter(dl_val)
|
dl_val_it = iter(dl_val)
|
||||||
|
dl_test_it = iter(dl_test) #ATTENTION A UTILISER SEULEMT POUR EARLY STOP
|
||||||
|
|
||||||
meta_opt = torch.optim.Adam(model['data_aug'].parameters(), lr=1e-2)
|
meta_opt = torch.optim.Adam(model['data_aug'].parameters(), lr=1e-2)
|
||||||
inner_opt = torch.optim.SGD(model['model'].parameters(), lr=1e-2, momentum=0.9)
|
inner_opt = torch.optim.SGD(model['model'].parameters(), lr=1e-2, momentum=0.9)
|
||||||
|
@ -542,7 +537,7 @@ def run_dist_dataugV2(model, epochs=1, inner_it=0, dataug_epoch_start=0, print_f
|
||||||
val_loss_monitor= None
|
val_loss_monitor= None
|
||||||
if loss_patience != None :
|
if loss_patience != None :
|
||||||
if dataug_epoch_start==-1: val_loss_monitor = loss_monitor(patience=loss_patience, end_train=2) #1st limit = dataug start
|
if dataug_epoch_start==-1: val_loss_monitor = loss_monitor(patience=loss_patience, end_train=2) #1st limit = dataug start
|
||||||
else: val_loss_monitor = loss_monitor(patience=loss_patience) #Val loss monitor
|
else: val_loss_monitor = loss_monitor(patience=loss_patience) #Val loss monitor (Not on val data : used by Dataug... => Test data)
|
||||||
|
|
||||||
model.train()
|
model.train()
|
||||||
|
|
||||||
|
@ -594,7 +589,7 @@ def run_dist_dataugV2(model, epochs=1, inner_it=0, dataug_epoch_start=0, print_f
|
||||||
if(high_grad_track and i%inner_it==0): #Perform Meta step
|
if(high_grad_track and i%inner_it==0): #Perform Meta step
|
||||||
#print("meta")
|
#print("meta")
|
||||||
#Peu utile si high_grad_track = False
|
#Peu utile si high_grad_track = False
|
||||||
val_loss = compute_vaLoss(model=fmodel, dl_val_it=dl_val_it)
|
val_loss = compute_loss(model=fmodel, dl_it=dl_val_it, dl=dl_val)
|
||||||
|
|
||||||
#print_graph(val_loss)
|
#print_graph(val_loss)
|
||||||
|
|
||||||
|
@ -619,7 +614,7 @@ def run_dist_dataugV2(model, epochs=1, inner_it=0, dataug_epoch_start=0, print_f
|
||||||
countcopy+=1
|
countcopy+=1
|
||||||
model_copy(src=fmodel, dst=model)
|
model_copy(src=fmodel, dst=model)
|
||||||
optim_copy(dopt=diffopt, opt=inner_opt)
|
optim_copy(dopt=diffopt, opt=inner_opt)
|
||||||
val_loss = compute_vaLoss(model=fmodel, dl_val_it=dl_val_it)
|
val_loss = compute_loss(model=fmodel, dl_it=dl_val_it, dl=dl_val)
|
||||||
|
|
||||||
#Necessaire pour reset higher (Accumule les fast_param meme avec track_higher_grads = False)
|
#Necessaire pour reset higher (Accumule les fast_param meme avec track_higher_grads = False)
|
||||||
fmodel = higher.patch.monkeypatch(model, device=None, copy_initial_weights=True)
|
fmodel = higher.patch.monkeypatch(model, device=None, copy_initial_weights=True)
|
||||||
|
@ -652,9 +647,10 @@ def run_dist_dataugV2(model, epochs=1, inner_it=0, dataug_epoch_start=0, print_f
|
||||||
log.append(data)
|
log.append(data)
|
||||||
#############
|
#############
|
||||||
if val_loss_monitor :
|
if val_loss_monitor :
|
||||||
val_loss_monitor.register(val_loss.item())
|
model.eval()
|
||||||
|
val_loss_monitor.register(compute_loss(model, dl_it=dl_test_it, dl=dl_test))#val_loss.item())
|
||||||
if val_loss_monitor.end_training(): break #Stop training
|
if val_loss_monitor.end_training(): break #Stop training
|
||||||
|
model.train()
|
||||||
|
|
||||||
if not model.is_augmenting() and (epoch == dataug_epoch_start or (val_loss_monitor and val_loss_monitor.limit_reached()==1)):
|
if not model.is_augmenting() and (epoch == dataug_epoch_start or (val_loss_monitor and val_loss_monitor.limit_reached()==1)):
|
||||||
print('Starting Data Augmention...')
|
print('Starting Data Augmention...')
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue