mirror of
https://github.com/AntoineHX/smart_augmentation.git
synced 2025-05-04 12:10:45 +02:00
modification mineurs
This commit is contained in:
parent
7732060eb1
commit
27c1890f4c
3 changed files with 32 additions and 37 deletions
|
@ -692,7 +692,8 @@ class Data_augV5(nn.Module): #Optimisation jointe (mag, proba)
|
||||||
else:
|
else:
|
||||||
return "Data_augV5(Mix%.1f%s-%dTFx%d-%s)" % (self._mix_factor,dist_param, self._nb_tf, self._N_seqTF, mag_param)
|
return "Data_augV5(Mix%.1f%s-%dTFx%d-%s)" % (self._mix_factor,dist_param, self._nb_tf, self._N_seqTF, mag_param)
|
||||||
|
|
||||||
class Data_augV6(nn.Module): #Optimisation sequentielle
|
|
||||||
|
class Data_augV6(nn.Module): #Optimisation sequentielle #Mauvais resultats
|
||||||
def __init__(self, TF_dict=TF.TF_dict, N_TF=1, mix_dist=0.0, fixed_prob=False, prob_set_size=None, fixed_mag=True, shared_mag=True):
|
def __init__(self, TF_dict=TF.TF_dict, N_TF=1, mix_dist=0.0, fixed_prob=False, prob_set_size=None, fixed_mag=True, shared_mag=True):
|
||||||
super(Data_augV6, self).__init__()
|
super(Data_augV6, self).__init__()
|
||||||
assert len(TF_dict)>0
|
assert len(TF_dict)>0
|
||||||
|
|
|
@ -6,21 +6,21 @@ from train_utils import *
|
||||||
tf_names = [
|
tf_names = [
|
||||||
## Geometric TF ##
|
## Geometric TF ##
|
||||||
'Identity',
|
'Identity',
|
||||||
#'FlipUD',
|
'FlipUD',
|
||||||
#'FlipLR',
|
'FlipLR',
|
||||||
#'Rotate',
|
'Rotate',
|
||||||
#'TranslateX',
|
'TranslateX',
|
||||||
#'TranslateY',
|
'TranslateY',
|
||||||
#'ShearX',
|
'ShearX',
|
||||||
#'ShearY',
|
'ShearY',
|
||||||
|
|
||||||
## Color TF (Expect image in the range of [0, 1]) ##
|
## Color TF (Expect image in the range of [0, 1]) ##
|
||||||
#'Contrast',
|
'Contrast',
|
||||||
#'Color',
|
'Color',
|
||||||
#'Brightness',
|
'Brightness',
|
||||||
#'Sharpness',
|
'Sharpness',
|
||||||
#'Posterize',
|
'Posterize',
|
||||||
#'Solarize', #=>Image entre [0,1] #Pas opti pour des batch
|
'Solarize', #=>Image entre [0,1] #Pas opti pour des batch
|
||||||
|
|
||||||
#Color TF (Common mag scale)
|
#Color TF (Common mag scale)
|
||||||
#'+Contrast',
|
#'+Contrast',
|
||||||
|
@ -34,23 +34,18 @@ tf_names = [
|
||||||
#'=Posterize',
|
#'=Posterize',
|
||||||
#'=Solarize',
|
#'=Solarize',
|
||||||
|
|
||||||
#'BRotate',
|
'BShearX',
|
||||||
#'BTranslateX',
|
'BShearY',
|
||||||
#'BTranslateY',
|
'BTranslateX-',
|
||||||
#'BShearX',
|
'BTranslateX-',
|
||||||
#'BShearY',
|
'BTranslateY',
|
||||||
#'BadTranslateX',
|
'BTranslateY-',
|
||||||
#'BadTranslateX_neg',
|
|
||||||
#'BadTranslateY',
|
|
||||||
#'BadTranslateY_neg',
|
|
||||||
|
|
||||||
#'BadColor',
|
'BadContrast',
|
||||||
#'BadSharpness',
|
'BadBrightness',
|
||||||
#'BadContrast',
|
|
||||||
#'BadBrightness',
|
|
||||||
|
|
||||||
'Random',
|
'Random',
|
||||||
#'RandBlend'
|
'RandBlend'
|
||||||
#Non fonctionnel
|
#Non fonctionnel
|
||||||
#'Auto_Contrast', #Pas opti pour des batch (Super lent)
|
#'Auto_Contrast', #Pas opti pour des batch (Super lent)
|
||||||
#'Equalize',
|
#'Equalize',
|
||||||
|
@ -71,8 +66,8 @@ if __name__ == "__main__":
|
||||||
#'aug_dataset',
|
#'aug_dataset',
|
||||||
'aug_model'
|
'aug_model'
|
||||||
}
|
}
|
||||||
n_inner_iter = 1
|
n_inner_iter = 10
|
||||||
epochs = 1
|
epochs = 100
|
||||||
dataug_epoch_start=0
|
dataug_epoch_start=0
|
||||||
optim_param={
|
optim_param={
|
||||||
'Meta':{
|
'Meta':{
|
||||||
|
@ -155,7 +150,7 @@ if __name__ == "__main__":
|
||||||
####
|
####
|
||||||
print('-'*9)
|
print('-'*9)
|
||||||
times = [x["time"] for x in log]
|
times = [x["time"] for x in log]
|
||||||
out = {"Accuracy": max([x["acc"] for x in log]), "Time": (np.mean(times),np.std(times), exec_time), 'Optimizer': optim_param['Inner'], "Device": device_name, "Log": log}
|
out = {"Accuracy": max([x["acc"] for x in log]), "Time": (np.mean(times),np.std(times), exec_time), 'Optimizer': optim_param['Inner'], "Device": device_name, "Param_names": data_train_aug._TF, "Log": log}
|
||||||
print(str(model),": acc", out["Accuracy"], "in:", out["Time"][0], "+/-", out["Time"][1])
|
print(str(model),": acc", out["Accuracy"], "in:", out["Time"][0], "+/-", out["Time"][1])
|
||||||
filename = "{}-{}-{} epochs".format(str(data_train_aug),str(model),epochs)
|
filename = "{}-{}-{} epochs".format(str(data_train_aug),str(model),epochs)
|
||||||
with open("res/log/%s.json" % filename, "w+") as f:
|
with open("res/log/%s.json" % filename, "w+") as f:
|
||||||
|
@ -173,8 +168,7 @@ if __name__ == "__main__":
|
||||||
t0 = time.process_time()
|
t0 = time.process_time()
|
||||||
|
|
||||||
tf_dict = {k: TF.TF_dict[k] for k in tf_names}
|
tf_dict = {k: TF.TF_dict[k] for k in tf_names}
|
||||||
#aug_model = Augmented_model(Data_augV6(TF_dict=tf_dict, N_TF=1, mix_dist=0.0, fixed_prob=False, prob_set_size=2, fixed_mag=True, shared_mag=True), model).to(device)
|
aug_model = Augmented_model(Data_augV5(TF_dict=tf_dict, N_TF=2, mix_dist=0.0, fixed_prob=False, fixed_mag=False, shared_mag=False), model).to(device)
|
||||||
aug_model = Augmented_model(Data_augV5(TF_dict=tf_dict, N_TF=1, mix_dist=0.5, fixed_prob=False, fixed_mag=True, shared_mag=True), model).to(device)
|
|
||||||
#aug_model = Augmented_model(RandAug(TF_dict=tf_dict, N_TF=2), model).to(device)
|
#aug_model = Augmented_model(RandAug(TF_dict=tf_dict, N_TF=2), model).to(device)
|
||||||
|
|
||||||
print("{} on {} for {} epochs - {} inner_it".format(str(aug_model), device_name, epochs, n_inner_iter))
|
print("{} on {} for {} epochs - {} inner_it".format(str(aug_model), device_name, epochs, n_inner_iter))
|
||||||
|
@ -183,7 +177,7 @@ if __name__ == "__main__":
|
||||||
inner_it=n_inner_iter,
|
inner_it=n_inner_iter,
|
||||||
dataug_epoch_start=dataug_epoch_start,
|
dataug_epoch_start=dataug_epoch_start,
|
||||||
opt_param=optim_param,
|
opt_param=optim_param,
|
||||||
print_freq=1,
|
print_freq=10,
|
||||||
KLdiv=True,
|
KLdiv=True,
|
||||||
loss_patience=None)
|
loss_patience=None)
|
||||||
|
|
||||||
|
@ -191,14 +185,14 @@ if __name__ == "__main__":
|
||||||
####
|
####
|
||||||
print('-'*9)
|
print('-'*9)
|
||||||
times = [x["time"] for x in log]
|
times = [x["time"] for x in log]
|
||||||
out = {"Accuracy": max([x["acc"] for x in log]), "Time": (np.mean(times),np.std(times), exec_time), 'Optimizer': optim_param, "Device": device_name, "Log": log}
|
out = {"Accuracy": max([x["acc"] for x in log]), "Time": (np.mean(times),np.std(times), exec_time), 'Optimizer': optim_param, "Device": device_name, "Param_names": aug_model.TF_names(), "Log": log}
|
||||||
print(str(aug_model),": acc", out["Accuracy"], "in:", out["Time"][0], "+/-", out["Time"][1])
|
print(str(aug_model),": acc", out["Accuracy"], "in:", out["Time"][0], "+/-", out["Time"][1])
|
||||||
filename = "{}-{} epochs (dataug:{})- {} in_it".format(str(aug_model),epochs,dataug_epoch_start,n_inner_iter)
|
filename = "{}-{} epochs (dataug:{})- {} in_it".format(str(aug_model),epochs,dataug_epoch_start,n_inner_iter)
|
||||||
with open("res/log/%s.json" % filename, "w+") as f:
|
with open("res/log/%s.json" % filename, "w+") as f:
|
||||||
json.dump(out, f, indent=True)
|
json.dump(out, f, indent=True)
|
||||||
print('Log :\"',f.name, '\" saved !')
|
print('Log :\"',f.name, '\" saved !')
|
||||||
|
|
||||||
plot_resV2(log, fig_name="res/"+filename, param_names=tf_names)
|
plot_resV2(log, fig_name="res/"+filename, param_names=aug_model.TF_names())
|
||||||
|
|
||||||
print('Execution Time : %.00f '%(exec_time))
|
print('Execution Time : %.00f '%(exec_time))
|
||||||
print('-'*9)
|
print('-'*9)
|
|
@ -195,7 +195,7 @@ def viz_sample_data(imgs, labels, fig_name='data_sample', weight_labels=None):
|
||||||
plt.grid(False)
|
plt.grid(False)
|
||||||
plt.imshow(sample[i,].detach().numpy(), cmap=plt.cm.binary)
|
plt.imshow(sample[i,].detach().numpy(), cmap=plt.cm.binary)
|
||||||
label = str(labels[i].item())
|
label = str(labels[i].item())
|
||||||
if weight_labels is not None : label+= ("- p %.2f" % weight_labels[i].item())
|
if weight_labels is not None : label+= (" - p %.2f" % weight_labels[i].item())
|
||||||
plt.xlabel(label)
|
plt.xlabel(label)
|
||||||
|
|
||||||
plt.savefig(fig_name)
|
plt.savefig(fig_name)
|
||||||
|
|
Loading…
Add table
Add a link
Reference in a new issue