work in progress - validation brutus res

This commit is contained in:
Harle, Antoine (Contracteur) 2019-12-04 10:05:59 -05:00
parent f8cc38dd6b
commit bb59c7de25
8 changed files with 4278 additions and 4278 deletions

View file

@ -2,11 +2,11 @@ from utils import *
if __name__ == "__main__": if __name__ == "__main__":
''' #'''
files=[ files=[
#"res/good_TF_tests/log/Aug_mod(Data_augV5(Mix0.5-14TFx2-MagFxSh)-LeNet)-100 epochs (dataug:0)- 0 in_it.json", #"res/good_TF_tests/log/Aug_mod(Data_augV5(Mix0.5-14TFx2-MagFxSh)-LeNet)-100 epochs (dataug:0)- 0 in_it.json",
#"res/good_TF_tests/log/Aug_mod(Data_augV5(Uniform-14TFx2-MagFxSh)-LeNet)-100 epochs (dataug:0)- 0 in_it.json", #"res/good_TF_tests/log/Aug_mod(Data_augV5(Uniform-14TFx2-MagFxSh)-LeNet)-100 epochs (dataug:0)- 0 in_it.json",
#"res/brutus-tests/log/Aug_mod(Data_augV5(Mix0.5-14TFx1-Mag)-LeNet)-150epochs(dataug:0)-1in_it-0.json", "res/brutus-tests/log/Aug_mod(Data_augV5(Uniform-14TFx2-MagFxSh)-LeNet)-150epochs(dataug:0)-0in_it-0.json",
#"res/log/Aug_mod(RandAugUDA(18TFx2-Mag1)-LeNet)-100 epochs (dataug:0)- 0 in_it.json", #"res/log/Aug_mod(RandAugUDA(18TFx2-Mag1)-LeNet)-100 epochs (dataug:0)- 0 in_it.json",
] ]
@ -16,7 +16,7 @@ if __name__ == "__main__":
data = json.load(json_file) data = json.load(json_file)
plot_resV2(data['Log'], fig_name=file.replace('.json','').replace('log/',''), param_names=data['Param_names']) plot_resV2(data['Log'], fig_name=file.replace('.json','').replace('log/',''), param_names=data['Param_names'])
#plot_TF_influence(data['Log'], param_names=data['Param_names']) #plot_TF_influence(data['Log'], param_names=data['Param_names'])
''' #'''
## Loss , Acc, Proba = f(epoch) ## ## Loss , Acc, Proba = f(epoch) ##
#plot_compare(filenames=files, fig_name="res/compare") #plot_compare(filenames=files, fig_name="res/compare")
@ -76,11 +76,11 @@ if __name__ == "__main__":
''' '''
#Res print #Res print
#''' '''
nb_run=3 nb_run=3
accs = [] accs = []
times = [] times = []
files = ["res/brutus-tests/log/Aug_mod(Data_augV5(Mix1-14TFx4-Mag)-LeNet)-150epochs(dataug:0)-1in_it-%s.json"%str(run) for run in range(nb_run)] files = ["res/brutus-tests/log/Aug_mod(Data_augV5(Uniform-14TFx2-MagFxSh)-LeNet)-150epochs(dataug:0)-1in_it-%s.json"%str(run) for run in range(nb_run)]
for idx, file in enumerate(files): for idx, file in enumerate(files):
#legend+=str(idx)+'-'+file+'\n' #legend+=str(idx)+'-'+file+'\n'
@ -90,4 +90,4 @@ if __name__ == "__main__":
times.append(data['Time'][0]) times.append(data['Time'][0])
print(files[0], np.mean(accs), np.std(accs), np.mean(times)) print(files[0], np.mean(accs), np.std(accs), np.mean(times))
#''' '''

View file

@ -694,7 +694,7 @@ class Data_augV5(nn.Module): #Optimisation jointe (mag, proba)
import numpy as np import numpy as np
class Data_augV6(nn.Module): #Optimisation sequentielle class Data_augV6(nn.Module): #Optimisation sequentielle
def __init__(self, TF_dict=TF.TF_dict, N_TF=1, mix_dist=0.0, fixed_prob=False, fixed_mag=True, shared_mag=True): def __init__(self, TF_dict=TF.TF_dict, N_TF=1, mix_dist=0.0, fixed_prob=False, prob_set_size=None, fixed_mag=True, shared_mag=True):
super(Data_augV6, self).__init__() super(Data_augV6, self).__init__()
assert len(TF_dict)>0 assert len(TF_dict)>0
@ -708,8 +708,9 @@ class Data_augV6(nn.Module): #Optimisation sequentielle
self._shared_mag = shared_mag self._shared_mag = shared_mag
self._fixed_mag = fixed_mag self._fixed_mag = fixed_mag
self._TF_set_size=3 self._TF_set_size = prob_set_size if prob_set_size else self._nb_tf
self._fixed_TF=[0]
self._fixed_TF=[0] #Identite
assert self._TF_set_size>=len(self._fixed_TF) assert self._TF_set_size>=len(self._fixed_TF)
if self._TF_set_size>self._nb_tf: if self._TF_set_size>self._nb_tf:
@ -723,7 +724,6 @@ class Data_augV6(nn.Module): #Optimisation sequentielle
else: else:
def generate_TF_sets(n_TF, set_size, idx_prefix=[]): def generate_TF_sets(n_TF, set_size, idx_prefix=[]):
TF_sets=[] TF_sets=[]
print(set_size, idx_prefix)
if len(idx_prefix)!=0: if len(idx_prefix)!=0:
if set_size>2: if set_size>2:
for i in range(idx_prefix[-1]+1, n_TF): for i in range(idx_prefix[-1]+1, n_TF):
@ -739,12 +739,12 @@ class Data_augV6(nn.Module): #Optimisation sequentielle
return TF_sets return TF_sets
self._TF_sets=generate_TF_sets(self._nb_tf, self._TF_set_size, self._fixed_TF) self._TF_sets=generate_TF_sets(self._nb_tf, self._TF_set_size, self._fixed_TF)
## Plan TF learning schedule ## ## Plan TF learning schedule ##
self._TF_schedule = [list(range(len(self._TF_sets))) for _ in range(self._N_seqTF)] self._TF_schedule = [list(range(len(self._TF_sets))) for _ in range(self._N_seqTF)]
for n_tf in range(self._N_seqTF) : for n_tf in range(self._N_seqTF) :
TF.random.shuffle(self._TF_schedule[n_tf]) TF.random.shuffle(self._TF_schedule[n_tf])
#print(self._TF_schedule)
self._current_TF_idx=0 #random.randint self._current_TF_idx=0 #random.randint
self._start_prob = 1/self._TF_set_size self._start_prob = 1/self._TF_set_size
@ -881,12 +881,11 @@ class Data_augV6(nn.Module): #Optimisation sequentielle
self._current_TF_idx=idx self._current_TF_idx=idx
else: else:
self._current_TF_idx+=1 self._current_TF_idx+=1
if self._current_TF_idx== len(self._TF_schedule[0]):
self._current_TF_idx=0 if self._current_TF_idx>=len(self._TF_schedule[0]):
#for n_tf in range(self._N_seqTF) : self._current_TF_idx=0
# TF.random.shuffle(self._TF_schedule[n_tf]) for n_tf in range(self._N_seqTF) :
#print(self._TF_schedule) TF.random.shuffle(self._TF_schedule[n_tf])
#print("Current TF :",self._TF_sets[self._current_TF_idx])
def train(self, mode=None): def train(self, mode=None):
if mode is None : if mode is None :

Binary file not shown.

Before

Width:  |  Height:  |  Size: 276 KiB

After

Width:  |  Height:  |  Size: 270 KiB

Before After
Before After

Binary file not shown.

Before

Width:  |  Height:  |  Size: 294 KiB

After

Width:  |  Height:  |  Size: 288 KiB

Before After
Before After

View file

@ -10,17 +10,17 @@ tf_names = [
'FlipLR', 'FlipLR',
'Rotate', 'Rotate',
'TranslateX', 'TranslateX',
#'TranslateY', 'TranslateY',
#'ShearX', 'ShearX',
#'ShearY', 'ShearY',
## Color TF (Expect image in the range of [0, 1]) ## ## Color TF (Expect image in the range of [0, 1]) ##
#'Contrast', 'Contrast',
#'Color', 'Color',
#'Brightness', 'Brightness',
#'Sharpness', 'Sharpness',
#'Posterize', 'Posterize',
#'Solarize', #=>Image entre [0,1] #Pas opti pour des batch 'Solarize', #=>Image entre [0,1] #Pas opti pour des batch
#Color TF (Common mag scale) #Color TF (Common mag scale)
#'+Contrast', #'+Contrast',
@ -64,8 +64,8 @@ else:
########################################## ##########################################
if __name__ == "__main__": if __name__ == "__main__":
n_inner_iter = 10 n_inner_iter = 1
epochs = 100 epochs = 200
dataug_epoch_start=0 dataug_epoch_start=0
#### Classic #### #### Classic ####
@ -95,7 +95,8 @@ if __name__ == "__main__":
t0 = time.process_time() t0 = time.process_time()
tf_dict = {k: TF.TF_dict[k] for k in tf_names} tf_dict = {k: TF.TF_dict[k] for k in tf_names}
#tf_dict = TF.TF_dict #tf_dict = TF.TF_dict
aug_model = Augmented_model(Data_augV6(TF_dict=tf_dict, N_TF=1, mix_dist=0.0, fixed_prob=False, fixed_mag=True, shared_mag=True), LeNet(3,10)).to(device) #aug_model = Augmented_model(Data_augV6(TF_dict=tf_dict, N_TF=1, mix_dist=0.0, fixed_prob=False, prob_set_size=2, fixed_mag=True, shared_mag=True), LeNet(3,10)).to(device)
aug_model = Augmented_model(Data_augV5(TF_dict=tf_dict, N_TF=3, mix_dist=0.0, fixed_prob=False, fixed_mag=False, shared_mag=False), LeNet(3,10)).to(device)
#aug_model = Augmented_model(Data_augV5(TF_dict=tf_dict, N_TF=2, mix_dist=0.5, fixed_mag=True, shared_mag=True), WideResNet(num_classes=10, wrn_size=160)).to(device) #aug_model = Augmented_model(Data_augV5(TF_dict=tf_dict, N_TF=2, mix_dist=0.5, fixed_mag=True, shared_mag=True), WideResNet(num_classes=10, wrn_size=160)).to(device)
#aug_model = Augmented_model(RandAug(TF_dict=tf_dict, N_TF=2), LeNet(3,10)).to(device) #aug_model = Augmented_model(RandAug(TF_dict=tf_dict, N_TF=2), LeNet(3,10)).to(device)
print(str(aug_model), 'on', device_name) print(str(aug_model), 'on', device_name)

View file

@ -618,7 +618,7 @@ def run_dist_dataugV2(model, epochs=1, inner_it=0, dataug_epoch_start=0, print_f
meta_opt.step() meta_opt.step()
model['data_aug'].adjust_param(soft=False) #Contrainte sum(proba)=1 model['data_aug'].adjust_param(soft=False) #Contrainte sum(proba)=1
model['data_aug'].next_TF_set() #model['data_aug'].next_TF_set()
fmodel = higher.patch.monkeypatch(model, device=None, copy_initial_weights=True) fmodel = higher.patch.monkeypatch(model, device=None, copy_initial_weights=True)
diffopt = higher.optim.get_diff_optim(inner_opt, model.parameters(),fmodel=fmodel, track_higher_grads=high_grad_track) diffopt = higher.optim.get_diff_optim(inner_opt, model.parameters(),fmodel=fmodel, track_higher_grads=high_grad_track)