From 7d5aa7c6fbef8b5b3cc25823d47fb8fbe143be49 Mon Sep 17 00:00:00 2001 From: "Harle, Antoine (Contracteur)" Date: Mon, 10 Feb 2020 14:36:12 -0500 Subject: [PATCH] Changement mesure memoire + Tests solarize differentiable --- higher/smart_aug/benchmark.py | 133 +++++++++++++++------------- higher/smart_aug/datasets.py | 6 +- higher/smart_aug/test_dataug.py | 6 +- higher/smart_aug/transformations.py | 23 +++++ 4 files changed, 102 insertions(+), 66 deletions(-) diff --git a/higher/smart_aug/benchmark.py b/higher/smart_aug/benchmark.py index f2d453c..623095b 100644 --- a/higher/smart_aug/benchmark.py +++ b/higher/smart_aug/benchmark.py @@ -83,77 +83,88 @@ if __name__ == "__main__": ### Benchmark ### ''' + n_inner_iter = 1 + dist_mix = [0.5]#[0.5, 1.0] + N_seq_TF= [3, 4] + mag_setup = [(True, True), (False, False)] #(FxSh, Independant) + for model_type in model_list.keys(): for model_name in model_list[model_type]: for run in range(nb_run): - torch.cuda.reset_max_memory_cached() #reset_peak_stats - t0 = time.perf_counter() + for n_tf in N_seq_TF: + for dist in dist_mix: + for m_setup in mag_setup: - model = getattr(model_type, model_name)(pretrained=False) + torch.cuda.reset_max_memory_allocated() #reset_peak_stats + t0 = time.perf_counter() - model = Higher_model(model, model_name) #run_dist_dataugV3 - if n_inner_iter!=0: - aug_model = Augmented_model( - Data_augV5(TF_dict=tf_dict, - N_TF=n_tf, - mix_dist=dist, - fixed_prob=p_setup, - fixed_mag=m_setup[0], - shared_mag=m_setup[1]), - model).to(device) - else: - aug_model = Augmented_model(RandAug(TF_dict=tf_dict, N_TF=n_tf), model).to(device) + model = getattr(model_type, model_name)(pretrained=False) - print("{} on {} for {} epochs - {} inner_it".format(str(aug_model), device_name, epochs, n_inner_iter)) - log= run_dist_dataugV3(model=aug_model, - epochs=epochs, - inner_it=n_inner_iter, - dataug_epoch_start=dataug_epoch_start, - opt_param=optim_param, - print_freq=epochs/4, - unsup_loss=1, - hp_opt=False, - save_sample_freq=None) + model = Higher_model(model, model_name) #run_dist_dataugV3 + if n_inner_iter!=0: + aug_model = Augmented_model( + Data_augV5(TF_dict=tf_dict, + N_TF=n_tf, + mix_dist=dist, + fixed_prob=False, + fixed_mag=m_setup[0], + shared_mag=m_setup[1]), + model).to(device) + else: + aug_model = Augmented_model(RandAug(TF_dict=tf_dict, N_TF=n_tf), model).to(device) - exec_time=time.perf_counter() - t0 - max_cached = torch.cuda.max_memory_cached()/(1024.0 * 1024.0) #torch.cuda.max_memory_reserved() #MB - #### - print('-'*9) - times = [x["time"] for x in log] - out = {"Accuracy": max([x["acc"] for x in log]), - "Time": (np.mean(times),np.std(times), exec_time), - 'Optimizer': optim_param, - "Device": device_name, - "Memory": max_cached, - "Param_names": aug_model.TF_names(), - "Log": log} - print(str(aug_model),": acc", out["Accuracy"], "in:", out["Time"][0], "+/-", out["Time"][1]) - filename = "{}-{} epochs (dataug:{})- {} in_it-{}".format(str(aug_model),epochs,dataug_epoch_start,n_inner_iter, run) - with open(res_folder+"log/%s.json" % filename, "w+") as f: - try: - json.dump(out, f, indent=True) - print('Log :\"',f.name, '\" saved !') - except: - print("Failed to save logs :",f.name) + print("{} on {} for {} epochs - {} inner_it".format(str(aug_model), device_name, epochs, n_inner_iter)) + log= run_dist_dataugV3(model=aug_model, + epochs=epochs, + inner_it=n_inner_iter, + dataug_epoch_start=dataug_epoch_start, + opt_param=optim_param, + print_freq=epochs/4, + unsup_loss=1, + hp_opt=False, + save_sample_freq=None) - print('Execution Time : %.00f '%(exec_time)) - print('-'*9) + exec_time=time.perf_counter() - t0 + max_cached = torch.cuda.max_memory_allocated()/(1024.0 * 1024.0) #torch.cuda.max_memory_reserved() #MB + #### + print('-'*9) + times = [x["time"] for x in log] + out = {"Accuracy": max([x["acc"] for x in log]), + "Time": (np.mean(times),np.std(times), exec_time), + 'Optimizer': optim_param, + "Device": device_name, + "Memory": max_cached, + "Param_names": aug_model.TF_names(), + "Log": log} + print(str(aug_model),": acc", out["Accuracy"], "in:", out["Time"][0], "+/-", out["Time"][1]) + filename = "{}-{} epochs (dataug:{})- {} in_it-{}".format(str(aug_model),epochs,dataug_epoch_start,n_inner_iter, run) + with open(res_folder+"log/%s.json" % filename, "w+") as f: + try: + json.dump(out, f, indent=True) + print('Log :\"',f.name, '\" saved !') + except: + print("Failed to save logs :",f.name) + + print('Execution Time : %.00f '%(exec_time)) + print('-'*9) ''' - ### Benchmark - RandAugment ### + ### Benchmark - RandAugment/Vanilla ### + #''' for model_type in model_list.keys(): for model_name in model_list[model_type]: for run in range(nb_run): - torch.cuda.reset_max_memory_cached() #reset_peak_stats + torch.cuda.reset_max_memory_allocated() #reset_peak_stats t0 = time.perf_counter() model = getattr(model_type, model_name)(pretrained=False).to(device) - print("RandAugment(N{}-M{})-{} on {} for {} epochs".format(rand_aug['N'],rand_aug['M'],model_name, device_name, epochs)) + print("{} on {} for {} epochs".format(model_name, device_name, epochs)) + #print("RandAugment(N{}-M{:.2f})-{} on {} for {} epochs".format(rand_aug['N'],rand_aug['M'],model_name, device_name, epochs)) log= train_classic(model=model, opt_param=optim_param, epochs=epochs, print_freq=epochs/4) exec_time=time.perf_counter() - t0 - max_cached = torch.cuda.max_memory_cached()/(1024.0 * 1024.0) #torch.cuda.max_memory_reserved() #MB + max_cached = torch.cuda.max_memory_allocated()/(1024.0 * 1024.0) #torch.cuda.max_memory_reserved() #MB #### print('-'*9) times = [x["time"] for x in log] @@ -162,10 +173,12 @@ if __name__ == "__main__": 'Optimizer': optim_param, "Device": device_name, "Memory": max_cached, - "Rand_Aug": rand_aug, + #"Rand_Aug": rand_aug, "Log": log} - print("RandAugment-",model_name,": acc", out["Accuracy"], "in:", out["Time"][0], "+/-", out["Time"][1]) - filename = "RandAugment(N{}-M{})-{}-{} epochs -{}".format(rand_aug['N'],rand_aug['M'],model_name,epochs, run) + print(model_name,": acc", out["Accuracy"], "in:", out["Time"][0], "+/-", out["Time"][1]) + filename = "{} epochs -{}".format(model_name,epochs, run) + #print("RandAugment-",model_name,": acc", out["Accuracy"], "in:", out["Time"][0], "+/-", out["Time"][1]) + #filename = "RandAugment(N{}-M{:.2f})-{}-{} epochs -{}".format(rand_aug['N'],rand_aug['M'],model_name,epochs, run) with open(res_folder+"log/%s.json" % filename, "w+") as f: try: json.dump(out, f, indent=True) @@ -177,14 +190,14 @@ if __name__ == "__main__": print('Execution Time : %.00f '%(exec_time)) print('-'*9) - + #''' ### HP Search ### ''' from LeNet import * inner_its = [1] - dist_mix = [0.0, 0.5, 0.8, 1.0] - N_seq_TF= [3, 2, 4] - mag_setup = [(True,True), (False, False)] #(FxSh, Independant) + dist_mix = [1.0]#[0.0, 0.5, 0.8, 1.0] + N_seq_TF= [5, 6] + mag_setup = [(True, True), (False, False)] #(FxSh, Independant) #prob_setup = [True, False] try: @@ -204,8 +217,8 @@ if __name__ == "__main__": t0 = time.perf_counter() - #model = getattr(models.resnet, 'resnet18')(pretrained=False) - model = LeNet(3,10) + model = getattr(models.resnet, 'resnet18')(pretrained=False) + #model = LeNet(3,10) model = Higher_model(model) #run_dist_dataugV3 aug_model = Augmented_model(Data_augV5(TF_dict=tf_dict, N_TF=n_tf, mix_dist=dist, fixed_prob=p_setup, fixed_mag=m_setup[0], shared_mag=m_setup[1]), model).to(device) #aug_model = Augmented_model(RandAug(TF_dict=tf_dict, N_TF=2), model).to(device) diff --git a/higher/smart_aug/datasets.py b/higher/smart_aug/datasets.py index de81d5d..99a3a5c 100755 --- a/higher/smart_aug/datasets.py +++ b/higher/smart_aug/datasets.py @@ -39,14 +39,14 @@ transform_train = torchvision.transforms.Compose([ ]) ## RandAugment ## -from RandAugment import RandAugment +#from RandAugment import RandAugment # Add RandAugment with N, M(hyperparameter) -rand_aug={'N': 2, 'M': 1} +#rand_aug={'N': 2, 'M': 1} #rand_aug={'N': 2, 'M': 9./30} #RN-ImageNet #rand_aug={'N': 3, 'M': 5./30} #WRN-CIFAR10 #rand_aug={'N': 2, 'M': 14./30} #WRN-CIFAR100 #rand_aug={'N': 3, 'M': 7./30} #WRN-SVHN -transform_train.transforms.insert(0, RandAugment(n=rand_aug['N'], m=rand_aug['M'])) +#transform_train.transforms.insert(0, RandAugment(n=rand_aug['N'], m=rand_aug['M'])) ### Classic Dataset ### diff --git a/higher/smart_aug/test_dataug.py b/higher/smart_aug/test_dataug.py index ee7e033..d4b9554 100755 --- a/higher/smart_aug/test_dataug.py +++ b/higher/smart_aug/test_dataug.py @@ -146,7 +146,7 @@ if __name__ == "__main__": #### Augmented Model #### if 'aug_model' in tasks: - torch.cuda.reset_max_memory_cached() #reset_peak_stats + torch.cuda.reset_max_memory_allocated() #reset_peak_stats t0 = time.perf_counter() tf_dict = {k: TF.TF_dict[k] for k in tf_names} @@ -166,7 +166,7 @@ if __name__ == "__main__": save_sample_freq=1) exec_time=time.perf_counter() - t0 - max_cached = torch.cuda.max_memory_cached()/(1024.0 * 1024.0) #torch.cuda.max_memory_reserved() #MB + max_cached = torch.cuda.max_memory_allocated()/(1024.0 * 1024.0) #torch.cuda.max_memory_reserved() #MB #### print('-'*9) times = [x["time"] for x in log] @@ -178,7 +178,7 @@ if __name__ == "__main__": "Param_names": aug_model.TF_names(), "Log": log} print(str(aug_model),": acc", out["Accuracy"], "in:", out["Time"][0], "+/-", out["Time"][1]) - filename = "{}-{} epochs (dataug:{})- {} in_it".format(str(aug_model),epochs,dataug_epoch_start,n_inner_iter)+"(CV0.1)" + filename = "{}-{} epochs (dataug:{})- {} in_it".format(str(aug_model),epochs,dataug_epoch_start,n_inner_iter) with open("../res/log/%s.json" % filename, "w+") as f: try: json.dump(out, f, indent=True) diff --git a/higher/smart_aug/transformations.py b/higher/smart_aug/transformations.py index 5941270..ae2982f 100755 --- a/higher/smart_aug/transformations.py +++ b/higher/smart_aug/transformations.py @@ -346,10 +346,14 @@ def posterize(x, bits): return float_image(x & mask) +import torch.nn.functional as F def solarize(x, thresholds): """Invert all pixel values above a threshold. Be warry that the use of the inequality (x>tresholds) block the gradient propagation. + + TODO : Make differentiable. + Args: x (Tensor): Batch of images. thresholds (Tensor): All pixels above this level are inverted @@ -386,6 +390,25 @@ def solarize(x, thresholds): #x[mask]=inv_x #x=x.masked_scatter(mask, inv_x) + #Differentiable (/Thresholds) ? + + #inv_x_bT= F.relu(x) - F.relu(x - thresholds) + #inv_x_aT= 1-x #Besoin thresholds + + #print('-'*10) + #print(thresholds[0]) + #print(x[0]) + #print(inv_x_bT[0]) + #print(inv_x_aT[0]) + + #x=torch.where(x>thresholds,inv_x_aT, inv_x_bT) + #print(torch.allclose(x, x+0.001, atol=1e-3)) + #print(torch.allclose(x, sol_x, atol=1e-2)) + #print(torch.eq(x,sol_x)[0]) + + #print(x[0]) + #print(sol_x[0]) + #''' return x def blend(x,y,alpha):