mirror of
https://github.com/AntoineHX/smart_augmentation.git
synced 2025-05-04 20:20:46 +02:00
Modification test pour simplifier early stopping
This commit is contained in:
parent
198fb06065
commit
f0c0559e73
4 changed files with 49 additions and 29 deletions
|
@ -3,8 +3,8 @@ from torch.utils.data import SubsetRandomSampler
|
||||||
import torchvision
|
import torchvision
|
||||||
|
|
||||||
BATCH_SIZE = 300
|
BATCH_SIZE = 300
|
||||||
TEST_SIZE = 300
|
#TEST_SIZE = 300
|
||||||
#TEST_SIZE = 10000
|
TEST_SIZE = 10000
|
||||||
|
|
||||||
#ATTENTION : Dataug (Kornia) Expect image in the range of [0, 1]
|
#ATTENTION : Dataug (Kornia) Expect image in the range of [0, 1]
|
||||||
#transform_train = torchvision.transforms.Compose([
|
#transform_train = torchvision.transforms.Compose([
|
||||||
|
|
|
@ -553,9 +553,10 @@ class Augmented_model(nn.Module):
|
||||||
mode=self._data_augmentation
|
mode=self._data_augmentation
|
||||||
self._mods['data_aug'].augment(mode)
|
self._mods['data_aug'].augment(mode)
|
||||||
super(Augmented_model, self).train(mode)
|
super(Augmented_model, self).train(mode)
|
||||||
|
return self
|
||||||
|
|
||||||
def eval(self):
|
def eval(self):
|
||||||
self.train(mode=False)
|
return self.train(mode=False)
|
||||||
#super(Augmented_model, self).eval()
|
#super(Augmented_model, self).eval()
|
||||||
|
|
||||||
def items(self):
|
def items(self):
|
||||||
|
|
|
@ -65,6 +65,7 @@ if __name__ == "__main__":
|
||||||
'''
|
'''
|
||||||
#### Augmented Model ####
|
#### Augmented Model ####
|
||||||
#'''
|
#'''
|
||||||
|
t0 = time.process_time()
|
||||||
tf_dict = {k: TF.TF_dict[k] for k in tf_names}
|
tf_dict = {k: TF.TF_dict[k] for k in tf_names}
|
||||||
#tf_dict = TF.TF_dict
|
#tf_dict = TF.TF_dict
|
||||||
aug_model = Augmented_model(Data_augV4(TF_dict=tf_dict, N_TF=2, mix_dist=0.0), LeNet(3,10)).to(device)
|
aug_model = Augmented_model(Data_augV4(TF_dict=tf_dict, N_TF=2, mix_dist=0.0), LeNet(3,10)).to(device)
|
||||||
|
@ -77,10 +78,12 @@ if __name__ == "__main__":
|
||||||
print('-'*9)
|
print('-'*9)
|
||||||
times = [x["time"] for x in log]
|
times = [x["time"] for x in log]
|
||||||
out = {"Accuracy": max([x["acc"] for x in log]), "Time": (np.mean(times),np.std(times)), "Device": device_name, "Param_names": aug_model.TF_names(), "Log": log}
|
out = {"Accuracy": max([x["acc"] for x in log]), "Time": (np.mean(times),np.std(times)), "Device": device_name, "Param_names": aug_model.TF_names(), "Log": log}
|
||||||
print(str(aug_model),": acc", out["Accuracy"], "in (ms):", out["Time"][0], "+/-", out["Time"][1])
|
print(str(aug_model),": acc", out["Accuracy"], "in (s ?):", out["Time"][0], "+/-", out["Time"][1])
|
||||||
with open("res/log/%s.json" % "{}-{} epochs (dataug:{})- {} in_it".format(str(aug_model),epochs,dataug_epoch_start,n_inner_iter), "w+") as f:
|
with open("res/log/%s.json" % "{}-{} epochs (dataug:{})- {} in_it".format(str(aug_model),epochs,dataug_epoch_start,n_inner_iter), "w+") as f:
|
||||||
json.dump(out, f, indent=True)
|
json.dump(out, f, indent=True)
|
||||||
print('Log :\"',f.name, '\" saved !')
|
print('Log :\"',f.name, '\" saved !')
|
||||||
|
|
||||||
|
print('Execution Time : %.00f (s ?)'%(time.process_time() - t0))
|
||||||
print('-'*9)
|
print('-'*9)
|
||||||
#'''
|
#'''
|
||||||
#### TF number tests ####
|
#### TF number tests ####
|
||||||
|
|
|
@ -9,26 +9,43 @@ from utils import *
|
||||||
def test(model):
|
def test(model):
|
||||||
device = next(model.parameters()).device
|
device = next(model.parameters()).device
|
||||||
model.eval()
|
model.eval()
|
||||||
for i, (features, labels) in enumerate(dl_test):
|
|
||||||
features,labels = features.to(device), labels.to(device)
|
|
||||||
|
|
||||||
pred = model.forward(features)
|
#for i, (features, labels) in enumerate(dl_test):
|
||||||
return pred.argmax(dim=1).eq(labels).sum().item() / dl_test.batch_size * 100
|
# features,labels = features.to(device), labels.to(device)
|
||||||
|
|
||||||
def compute_loss(model, dl_it, dl):
|
# pred = model.forward(features)
|
||||||
|
# return pred.argmax(dim=1).eq(labels).sum().item() / dl_test.batch_size * 100
|
||||||
|
|
||||||
|
correct = 0
|
||||||
|
total = 0
|
||||||
|
loss = []
|
||||||
|
with torch.no_grad():
|
||||||
|
for features, labels in dl_test:
|
||||||
|
features,labels = features.to(device), labels.to(device)
|
||||||
|
|
||||||
|
outputs = model(features)
|
||||||
|
_, predicted = torch.max(outputs.data, 1)
|
||||||
|
total += labels.size(0)
|
||||||
|
correct += (predicted == labels).sum().item()
|
||||||
|
|
||||||
|
loss.append(F.cross_entropy(outputs, labels).item())
|
||||||
|
|
||||||
|
accuracy = 100 * correct / total
|
||||||
|
|
||||||
|
return accuracy, np.mean(loss)
|
||||||
|
|
||||||
|
def compute_vaLoss(model, dl_it, dl):
|
||||||
device = next(model.parameters()).device
|
device = next(model.parameters()).device
|
||||||
try:
|
try:
|
||||||
xs_val, ys_val = next(dl_it)
|
xs, ys = next(dl_it)
|
||||||
except StopIteration: #Fin epoch val
|
except StopIteration: #Fin epoch val
|
||||||
dl_val_it = iter(dl)
|
dl_it = iter(dl)
|
||||||
xs_val, ys_val = next(dl_it)
|
xs, ys = next(dl_it)
|
||||||
xs_val, ys_val = xs_val.to(device), ys_val.to(device)
|
xs, ys = xs.to(device), ys.to(device)
|
||||||
|
|
||||||
try:
|
model.eval() #Validation sans transfornations !
|
||||||
model.augment(mode=False) #Validation sans transfornations !
|
|
||||||
except:
|
return F.cross_entropy(model(xs), ys)
|
||||||
pass
|
|
||||||
return F.cross_entropy(model(xs_val), ys_val)
|
|
||||||
|
|
||||||
def train_classic(model, epochs=1):
|
def train_classic(model, epochs=1):
|
||||||
device = next(model.parameters()).device
|
device = next(model.parameters()).device
|
||||||
|
@ -61,7 +78,7 @@ def train_classic(model, epochs=1):
|
||||||
xs_val, ys_val = xs_val.to(device), ys_val.to(device)
|
xs_val, ys_val = xs_val.to(device), ys_val.to(device)
|
||||||
|
|
||||||
val_loss = F.cross_entropy(model(xs_val), ys_val)
|
val_loss = F.cross_entropy(model(xs_val), ys_val)
|
||||||
accuracy=test(model)
|
accuracy, _ =test(model)
|
||||||
model.train()
|
model.train()
|
||||||
#### Log ####
|
#### Log ####
|
||||||
data={
|
data={
|
||||||
|
@ -120,7 +137,7 @@ def train_classic_higher(model, epochs=1):
|
||||||
xs_val, ys_val = xs_val.to(device), ys_val.to(device)
|
xs_val, ys_val = xs_val.to(device), ys_val.to(device)
|
||||||
|
|
||||||
val_loss = F.cross_entropy(model(xs_val), ys_val)
|
val_loss = F.cross_entropy(model(xs_val), ys_val)
|
||||||
accuracy=test(model)
|
accuracy, _ =test(model)
|
||||||
model.train()
|
model.train()
|
||||||
#### Log ####
|
#### Log ####
|
||||||
data={
|
data={
|
||||||
|
@ -256,7 +273,7 @@ def train_classic_tests(model, epochs=1):
|
||||||
xs_val, ys_val = xs_val.to(device), ys_val.to(device)
|
xs_val, ys_val = xs_val.to(device), ys_val.to(device)
|
||||||
|
|
||||||
val_loss = F.cross_entropy(model(xs_val), ys_val)
|
val_loss = F.cross_entropy(model(xs_val), ys_val)
|
||||||
accuracy=test(model)
|
accuracy, _ =test(model)
|
||||||
model.train()
|
model.train()
|
||||||
#### Log ####
|
#### Log ####
|
||||||
data={
|
data={
|
||||||
|
@ -309,7 +326,7 @@ def run_simple_dataug(inner_it, epochs=1):
|
||||||
dl_train_it = iter(dl_train)
|
dl_train_it = iter(dl_train)
|
||||||
xs, ys = next(dl_train_it)
|
xs, ys = next(dl_train_it)
|
||||||
|
|
||||||
accuracy=test(aug_model)
|
accuracy, _ =test(model)
|
||||||
aug_model.train()
|
aug_model.train()
|
||||||
|
|
||||||
#### Print ####
|
#### Print ####
|
||||||
|
@ -426,7 +443,7 @@ def run_dist_dataug(model, epochs=1, inner_it=1, dataug_epoch_start=0):
|
||||||
#viz_sample_data(imgs=xs, labels=ys, fig_name='samples/data_sample_epoch{}_noTF'.format(epoch))
|
#viz_sample_data(imgs=xs, labels=ys, fig_name='samples/data_sample_epoch{}_noTF'.format(epoch))
|
||||||
#viz_sample_data(imgs=aug_model['data_aug'](xs), labels=ys, fig_name='samples/data_sample_epoch{}'.format(epoch))
|
#viz_sample_data(imgs=aug_model['data_aug'](xs), labels=ys, fig_name='samples/data_sample_epoch{}'.format(epoch))
|
||||||
|
|
||||||
accuracy=test(model)
|
accuracy, _ =test(model)
|
||||||
model.train()
|
model.train()
|
||||||
|
|
||||||
#### Print ####
|
#### Print ####
|
||||||
|
@ -522,7 +539,6 @@ def run_dist_dataugV2(model, epochs=1, inner_it=0, dataug_epoch_start=0, print_f
|
||||||
countcopy=0
|
countcopy=0
|
||||||
val_loss=torch.tensor(0) #Necessaire si pas de metastep sur une epoch
|
val_loss=torch.tensor(0) #Necessaire si pas de metastep sur une epoch
|
||||||
dl_val_it = iter(dl_val)
|
dl_val_it = iter(dl_val)
|
||||||
dl_test_it = iter(dl_test) #ATTENTION A UTILISER SEULEMT POUR EARLY STOP
|
|
||||||
|
|
||||||
meta_opt = torch.optim.Adam(model['data_aug'].parameters(), lr=1e-2)
|
meta_opt = torch.optim.Adam(model['data_aug'].parameters(), lr=1e-2)
|
||||||
inner_opt = torch.optim.SGD(model['model'].parameters(), lr=1e-2, momentum=0.9)
|
inner_opt = torch.optim.SGD(model['model'].parameters(), lr=1e-2, momentum=0.9)
|
||||||
|
@ -589,7 +605,7 @@ def run_dist_dataugV2(model, epochs=1, inner_it=0, dataug_epoch_start=0, print_f
|
||||||
if(high_grad_track and i%inner_it==0): #Perform Meta step
|
if(high_grad_track and i%inner_it==0): #Perform Meta step
|
||||||
#print("meta")
|
#print("meta")
|
||||||
#Peu utile si high_grad_track = False
|
#Peu utile si high_grad_track = False
|
||||||
val_loss = compute_loss(model=fmodel, dl_it=dl_val_it, dl=dl_val)
|
val_loss = compute_vaLoss(model=fmodel, dl_it=dl_val_it, dl=dl_val)
|
||||||
|
|
||||||
#print_graph(val_loss)
|
#print_graph(val_loss)
|
||||||
|
|
||||||
|
@ -614,20 +630,20 @@ def run_dist_dataugV2(model, epochs=1, inner_it=0, dataug_epoch_start=0, print_f
|
||||||
countcopy+=1
|
countcopy+=1
|
||||||
model_copy(src=fmodel, dst=model)
|
model_copy(src=fmodel, dst=model)
|
||||||
optim_copy(dopt=diffopt, opt=inner_opt)
|
optim_copy(dopt=diffopt, opt=inner_opt)
|
||||||
val_loss = compute_loss(model=fmodel, dl_it=dl_val_it, dl=dl_val)
|
val_loss = compute_vaLoss(model=fmodel, dl_it=dl_val_it, dl=dl_val)
|
||||||
|
|
||||||
#Necessaire pour reset higher (Accumule les fast_param meme avec track_higher_grads = False)
|
#Necessaire pour reset higher (Accumule les fast_param meme avec track_higher_grads = False)
|
||||||
fmodel = higher.patch.monkeypatch(model, device=None, copy_initial_weights=True)
|
fmodel = higher.patch.monkeypatch(model, device=None, copy_initial_weights=True)
|
||||||
diffopt = higher.optim.get_diff_optim(inner_opt, model.parameters(),fmodel=fmodel, track_higher_grads=high_grad_track)
|
diffopt = higher.optim.get_diff_optim(inner_opt, model.parameters(),fmodel=fmodel, track_higher_grads=high_grad_track)
|
||||||
|
|
||||||
accuracy=test(model)
|
accuracy, test_loss =test(model)
|
||||||
model.train()
|
model.train()
|
||||||
|
|
||||||
#### Print ####
|
#### Print ####
|
||||||
if(print_freq and epoch%print_freq==0):
|
if(print_freq and epoch%print_freq==0):
|
||||||
print('-'*9)
|
print('-'*9)
|
||||||
print('Epoch : %d/%d'%(epoch,epochs))
|
print('Epoch : %d/%d'%(epoch,epochs))
|
||||||
print('Time : %.00f ms'%(tf - t0))
|
print('Time : %.00f s'%(tf - t0))
|
||||||
print('Train loss :',loss.item(), '/ val loss', val_loss.item())
|
print('Train loss :',loss.item(), '/ val loss', val_loss.item())
|
||||||
print('Accuracy :', accuracy)
|
print('Accuracy :', accuracy)
|
||||||
print('Data Augmention : {} (Epoch {})'.format(model._data_augmentation, dataug_epoch_start))
|
print('Data Augmention : {} (Epoch {})'.format(model._data_augmentation, dataug_epoch_start))
|
||||||
|
@ -648,7 +664,7 @@ def run_dist_dataugV2(model, epochs=1, inner_it=0, dataug_epoch_start=0, print_f
|
||||||
#############
|
#############
|
||||||
if val_loss_monitor :
|
if val_loss_monitor :
|
||||||
model.eval()
|
model.eval()
|
||||||
val_loss_monitor.register(compute_loss(model, dl_it=dl_test_it, dl=dl_test))#val_loss.item())
|
val_loss_monitor.register(test_loss)#val_loss.item())
|
||||||
if val_loss_monitor.end_training(): break #Stop training
|
if val_loss_monitor.end_training(): break #Stop training
|
||||||
model.train()
|
model.train()
|
||||||
|
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue