mirror of
https://github.com/AntoineHX/smart_augmentation.git
synced 2025-05-04 12:10:45 +02:00
Brutus bis
This commit is contained in:
parent
b67ec3c469
commit
2fe5070b09
5 changed files with 49 additions and 35 deletions
|
@ -6,6 +6,10 @@ BATCH_SIZE = 300
|
|||
TEST_SIZE = 300
|
||||
#TEST_SIZE = 10000 #legerement +Rapide / + Consomation memoire !
|
||||
|
||||
download_data=False
|
||||
num_workers=4 #4
|
||||
pin_memory=False #True :+ GPU memory / + Lent
|
||||
|
||||
#ATTENTION : Dataug (Kornia) Expect image in the range of [0, 1]
|
||||
#transform_train = torchvision.transforms.Compose([
|
||||
# torchvision.transforms.RandomHorizontalFlip(),
|
||||
|
@ -34,10 +38,6 @@ from PIL import Image
|
|||
import augmentation_transforms
|
||||
import numpy as np
|
||||
|
||||
download_data=False
|
||||
num_workers=4 #16
|
||||
pin_memory=False
|
||||
|
||||
class AugmentedDataset(VisionDataset):
|
||||
def __init__(self, root, train=True, transform=None, target_transform=None, download=False, subset=None):
|
||||
|
||||
|
|
|
@ -569,8 +569,8 @@ class Data_augV5(nn.Module): #Optimisation jointe (mag, proba)
|
|||
"mix_dist": nn.Parameter(torch.tensor(mix_dist).clamp(min=0.0,max=0.999))
|
||||
})
|
||||
|
||||
for tf in TF.TF_no_grad :
|
||||
if tf in self._TF: self._params['mag'].data[self._TF.index(tf)]=float(TF.PARAMETER_MAX) #TF fixe a max parameter
|
||||
#for tf in TF.TF_no_grad :
|
||||
# if tf in self._TF: self._params['mag'].data[self._TF.index(tf)]=float(TF.PARAMETER_MAX) #TF fixe a max parameter
|
||||
#for t in TF.TF_no_mag: self._params['mag'][self._TF.index(t)].data-=self._params['mag'][self._TF.index(t)].data #Mag inutile pour les TF ignore_mag
|
||||
|
||||
#Mag regularisation
|
||||
|
|
|
@ -100,7 +100,7 @@ if __name__ == "__main__":
|
|||
dataug_epoch_starts= [0]
|
||||
tf_dict = {k: TF.TF_dict[k] for k in tf_names}
|
||||
TF_nb = [len(tf_dict)] #range(10,len(TF.TF_dict)+1) #[len(TF.TF_dict)]
|
||||
N_seq_TF= [2, 3, 4]
|
||||
N_seq_TF= [4, 3, 2]
|
||||
mag_setup = [(True,True), (False, False)] #(Fixed, Shared)
|
||||
#prob_setup = [True, False]
|
||||
nb_run= 3
|
||||
|
@ -130,12 +130,12 @@ if __name__ == "__main__":
|
|||
#aug_model = Augmented_model(RandAug(TF_dict=tf_dict, N_TF=2), model).to(device)
|
||||
|
||||
print("{} on {} for {} epochs - {} inner_it".format(str(aug_model), device_name, epochs, n_inner_iter))
|
||||
log= run_dist_dataugV2(model=aug_model,
|
||||
log= run_dist_dataugV3(model=aug_model,
|
||||
epochs=epochs,
|
||||
inner_it=n_inner_iter,
|
||||
dataug_epoch_start=dataug_epoch_start,
|
||||
opt_param=optim_param,
|
||||
print_freq=20,
|
||||
print_freq=50,
|
||||
KLdiv=True,
|
||||
loss_patience=None)
|
||||
|
||||
|
@ -146,8 +146,17 @@ if __name__ == "__main__":
|
|||
out = {"Accuracy": max([x["acc"] for x in log]), "Time": (np.mean(times),np.std(times), exec_time), 'Optimizer': optim_param, "Device": device_name, "Param_names": aug_model.TF_names(), "Log": log}
|
||||
print(str(aug_model),": acc", out["Accuracy"], "in:", out["Time"][0], "+/-", out["Time"][1])
|
||||
filename = "{}-{} epochs (dataug:{})- {} in_it-{}".format(str(aug_model),epochs,dataug_epoch_start,n_inner_iter, run)
|
||||
with open(res_folder+"log/%s.json" % filename, "w+") as f:
|
||||
json.dump(out, f, indent=True)
|
||||
print('Log :\"',f.name, '\" saved !')
|
||||
with open("res/log/%s.json" % filename, "w+") as f:
|
||||
try:
|
||||
json.dump(out, f, indent=True)
|
||||
print('Log :\"',f.name, '\" saved !')
|
||||
except:
|
||||
print("Failed to save logs :",f.name)
|
||||
try:
|
||||
plot_resV2(log, fig_name="res/"+filename, param_names=aug_model.TF_names())
|
||||
except:
|
||||
print("Failed to plot res")
|
||||
|
||||
print('Execution Time : %.00f '%(exec_time))
|
||||
print('-'*9)
|
||||
#'''
|
||||
#'''
|
||||
|
|
|
@ -34,18 +34,18 @@ tf_names = [
|
|||
#'=Posterize',
|
||||
#'=Solarize',
|
||||
|
||||
'BShearX',
|
||||
'BShearY',
|
||||
'BTranslateX-',
|
||||
'BTranslateX-',
|
||||
'BTranslateY',
|
||||
'BTranslateY-',
|
||||
#'BShearX',
|
||||
#'BShearY',
|
||||
#'BTranslateX-',
|
||||
#'BTranslateX-',
|
||||
#'BTranslateY',
|
||||
#'BTranslateY-',
|
||||
|
||||
'BadContrast',
|
||||
'BadBrightness',
|
||||
#'BadContrast',
|
||||
#'BadBrightness',
|
||||
|
||||
'Random',
|
||||
'RandBlend'
|
||||
#'Random',
|
||||
#'RandBlend'
|
||||
#Non fonctionnel
|
||||
#'Auto_Contrast', #Pas opti pour des batch (Super lent)
|
||||
#'Equalize',
|
||||
|
@ -171,7 +171,7 @@ if __name__ == "__main__":
|
|||
t0 = time.process_time()
|
||||
|
||||
tf_dict = {k: TF.TF_dict[k] for k in tf_names}
|
||||
aug_model = Augmented_model(Data_augV5(TF_dict=tf_dict, N_TF=3, mix_dist=1.0, fixed_prob=False, fixed_mag=False, shared_mag=False), model).to(device)
|
||||
aug_model = Augmented_model(Data_augV5(TF_dict=tf_dict, N_TF=2, mix_dist=0.8, fixed_prob=False, fixed_mag=False, shared_mag=False), model).to(device)
|
||||
#aug_model = Augmented_model(RandAug(TF_dict=tf_dict, N_TF=2), model).to(device)
|
||||
|
||||
print("{} on {} for {} epochs - {} inner_it".format(str(aug_model), device_name, epochs, n_inner_iter))
|
||||
|
@ -190,7 +190,7 @@ if __name__ == "__main__":
|
|||
times = [x["time"] for x in log]
|
||||
out = {"Accuracy": max([x["acc"] for x in log]), "Time": (np.mean(times),np.std(times), exec_time), 'Optimizer': optim_param, "Device": device_name, "Param_names": aug_model.TF_names(), "Log": log}
|
||||
print(str(aug_model),": acc", out["Accuracy"], "in:", out["Time"][0], "+/-", out["Time"][1])
|
||||
filename = "{}-{} epochs (dataug:{})- {} in_it".format(str(aug_model),epochs,dataug_epoch_start,n_inner_iter)+"-opt_hp"
|
||||
filename = "{}-{} epochs (dataug:{})- {} in_it".format(str(aug_model),epochs,dataug_epoch_start,n_inner_iter)
|
||||
with open("res/log/%s.json" % filename, "w+") as f:
|
||||
try:
|
||||
json.dump(out, f, indent=True)
|
||||
|
|
|
@ -829,9 +829,9 @@ def run_dist_dataugV3(model, opt_param, epochs=1, inner_it=0, dataug_epoch_start
|
|||
dl_val_it = iter(dl_val)
|
||||
|
||||
high_grad_track = True
|
||||
if inner_it == 0:
|
||||
if inner_it == 0: #No HP optimization
|
||||
high_grad_track=False
|
||||
if dataug_epoch_start!=0:
|
||||
if dataug_epoch_start!=0: #Augmentation de donnee differee
|
||||
model.augment(mode=False)
|
||||
high_grad_track = False
|
||||
|
||||
|
@ -874,6 +874,7 @@ def run_dist_dataugV3(model, opt_param, epochs=1, inner_it=0, dataug_epoch_start
|
|||
|
||||
else:
|
||||
#Methode KL div
|
||||
# Supervised loss (classic)
|
||||
if model._data_augmentation :
|
||||
model.augment(mode=False)
|
||||
sup_logits = model(xs)
|
||||
|
@ -883,6 +884,7 @@ def run_dist_dataugV3(model, opt_param, epochs=1, inner_it=0, dataug_epoch_start
|
|||
log_sup=F.log_softmax(sup_logits, dim=1)
|
||||
loss = F.cross_entropy(log_sup, ys)
|
||||
|
||||
# Unsupervised loss (KLdiv)
|
||||
if model._data_augmentation:
|
||||
aug_logits = model(xs)
|
||||
log_aug=F.log_softmax(aug_logits, dim=1)
|
||||
|
@ -916,21 +918,22 @@ def run_dist_dataugV3(model, opt_param, epochs=1, inner_it=0, dataug_epoch_start
|
|||
torch.nn.utils.clip_grad_norm_(model['data_aug'].parameters(), max_norm=10, norm_type=2) #Prevent exploding grad with RNN
|
||||
|
||||
meta_opt.step()
|
||||
model['data_aug'].adjust_param(soft=True) #Contrainte sum(proba)=1
|
||||
|
||||
#Adjust Hyper-parameters
|
||||
model['data_aug'].adjust_param(soft=True) #Contrainte sum(proba)=1
|
||||
if hp_opt:
|
||||
for param_group in diffopt.param_groups:
|
||||
for param in list(opt_param['Inner'].keys())[1:]:
|
||||
param_group[param].data = param_group[param].data.clamp(min=1e-4)
|
||||
|
||||
#Reset gradients
|
||||
diffopt.detach_()
|
||||
model['model'].detach_()
|
||||
|
||||
meta_opt.zero_grad()
|
||||
|
||||
tf = time.process_time()
|
||||
|
||||
if save_sample:
|
||||
if save_sample: #Data sample saving
|
||||
try:
|
||||
viz_sample_data(imgs=xs, labels=ys, fig_name='samples/data_sample_epoch{}_noTF'.format(epoch))
|
||||
viz_sample_data(imgs=model['data_aug'](xs), labels=ys, fig_name='samples/data_sample_epoch{}'.format(epoch))
|
||||
|
@ -939,10 +942,10 @@ def run_dist_dataugV3(model, opt_param, epochs=1, inner_it=0, dataug_epoch_start
|
|||
pass
|
||||
|
||||
|
||||
if(not val_loss):
|
||||
if(not val_loss): #Compute val loss for logs
|
||||
val_loss = compute_vaLoss(model=model, dl_it=dl_val_it, dl=dl_val)
|
||||
|
||||
|
||||
# Test model
|
||||
accuracy, test_loss =test(model)
|
||||
model.train()
|
||||
|
||||
|
@ -956,8 +959,7 @@ def run_dist_dataugV3(model, opt_param, epochs=1, inner_it=0, dataug_epoch_start
|
|||
"time": tf - t0,
|
||||
|
||||
"mix_dist": model['data_aug']['mix_dist'].item(),
|
||||
"param": param, #if isinstance(model['data_aug'], Data_augV5)
|
||||
#else [p.item() for p in model['data_aug']['prob']],
|
||||
"param": param,
|
||||
}
|
||||
if hp_opt : data["opt_param"]=[{'lr': p_grp['lr'].item(), 'momentum': p_grp['momentum'].item()} for p_grp in diffopt.param_groups]
|
||||
log.append(data)
|
||||
|
@ -981,12 +983,15 @@ def run_dist_dataugV3(model, opt_param, epochs=1, inner_it=0, dataug_epoch_start
|
|||
for param_group in diffopt.param_groups:
|
||||
print('Opt param - lr:', param_group['lr'].item(),'- momentum:', param_group['momentum'].item())
|
||||
#############
|
||||
|
||||
#Augmentation de donnee differee
|
||||
if not model.is_augmenting() and (epoch == dataug_epoch_start):
|
||||
print('Starting Data Augmention...')
|
||||
dataug_epoch_start = epoch
|
||||
model.augment(mode=True)
|
||||
if inner_it != 0: high_grad_track = True
|
||||
|
||||
#Data sample saving
|
||||
try:
|
||||
viz_sample_data(imgs=xs, labels=ys, fig_name='samples/data_sample_epoch{}_noTF'.format(epoch))
|
||||
viz_sample_data(imgs=model['data_aug'](xs), labels=ys, fig_name='samples/data_sample_epoch{}'.format(epoch))
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue