Rangement

This commit is contained in:
Harle, Antoine (Contracteur) 2020-01-24 14:32:37 -05:00
parent f83c73ec17
commit f507ff4741
16 changed files with 85 additions and 46 deletions

View file

@ -1,3 +1,7 @@
""" Dataset definition.
MNIST / CIFAR10
"""
import torch
from torch.utils.data import SubsetRandomSampler
import torchvision
@ -28,14 +32,14 @@ transform = torchvision.transforms.Compose([
# torchvision.transforms.ToTensor()
# ])
#)
data_test = torchvision.datasets.MNIST(
"./data", train=False, download=True, transform=torchvision.transforms.ToTensor()
)
#data_test = torchvision.datasets.MNIST(
# "./data", train=False, download=True, transform=torchvision.transforms.ToTensor()
#)
### Classic Dataset ###
data_train = torchvision.datasets.CIFAR10("./data", train=True, download=download_data, transform=transform)
#data_val = torchvision.datasets.CIFAR10("./data", train=True, download=download_data, transform=transform)
data_test = torchvision.datasets.CIFAR10("./data", train=False, download=download_data, transform=transform)
data_train = torchvision.datasets.CIFAR10("../data", train=True, download=download_data, transform=transform)
#data_val = torchvision.datasets.CIFAR10("../data", train=True, download=download_data, transform=transform)
data_test = torchvision.datasets.CIFAR10("../data", train=False, download=download_data, transform=transform)
train_subset_indices=range(int(len(data_train)/2))
val_subset_indices=range(int(len(data_train)/2),len(data_train))

View file

@ -93,7 +93,7 @@ if __name__ == "__main__":
json.dump(out, f, indent=True)
print('Log :\"',f.name, '\" saved !')
'''
res_folder="res/brutus-tests2/"
res_folder="../res/brutus-tests2/"
epochs= 150
inner_its = [1]
dist_mix = [0.0, 0.5, 0.8, 1.0]
@ -147,14 +147,14 @@ if __name__ == "__main__":
out = {"Accuracy": max([x["acc"] for x in log]), "Time": (np.mean(times),np.std(times), exec_time), 'Optimizer': optim_param, "Device": device_name, "Param_names": aug_model.TF_names(), "Log": log}
print(str(aug_model),": acc", out["Accuracy"], "in:", out["Time"][0], "+/-", out["Time"][1])
filename = "{}-{} epochs (dataug:{})- {} in_it-{}".format(str(aug_model),epochs,dataug_epoch_start,n_inner_iter, run)
with open("res/log/%s.json" % filename, "w+") as f:
with open("../res/log/%s.json" % filename, "w+") as f:
try:
json.dump(out, f, indent=True)
print('Log :\"',f.name, '\" saved !')
except:
print("Failed to save logs :",f.name)
try:
plot_resV2(log, fig_name="res/"+filename, param_names=aug_model.TF_names())
plot_resV2(log, fig_name="../res/"+filename, param_names=aug_model.TF_names())
except:
print("Failed to plot res")

View file

@ -1,8 +1,13 @@
""" Script to run experiment on smart augmentation.
"""
from model import *
from dataug import *
#from utils import *
from train_utils import *
# Use available TF (see transformations.py)
tf_names = [
## Geometric TF ##
'Identity',
@ -34,6 +39,8 @@ tf_names = [
#'=Posterize',
#'=Solarize',
## Bad Tranformations ##
# Bad Geometric TF #
#'BShearX',
#'BShearY',
#'BTranslateX-',
@ -46,12 +53,14 @@ tf_names = [
#'Random',
#'RandBlend'
#Non fonctionnel
#'Auto_Contrast', #Pas opti pour des batch (Super lent)
#'Equalize',
]
device = torch.device('cuda')
device = torch.device('cuda') #Select device to use
if device == torch.device('cpu'):
device_name = 'CPU'
@ -61,13 +70,15 @@ else:
##########################################
if __name__ == "__main__":
#Task to perform
tasks={
#'classic',
#'aug_dataset',
#'aug_dataset', #Moved to old code
'aug_model'
}
#Parameters
n_inner_iter = 1
epochs = 150
epochs = 200
dataug_epoch_start=0
optim_param={
'Meta':{
@ -81,6 +92,7 @@ if __name__ == "__main__":
}
}
#Models
model = LeNet(3,10)
#model = ResNet(num_classes=10)
#Lents
@ -103,17 +115,18 @@ if __name__ == "__main__":
out = {"Accuracy": max([x["acc"] for x in log]), "Time": (np.mean(times),np.std(times), exec_time), 'Optimizer': optim_param['Inner'], "Device": device_name, "Log": log}
print(str(model),": acc", out["Accuracy"], "in:", out["Time"][0], "+/-", out["Time"][1])
filename = "{}-{} epochs".format(str(model),epochs)
with open("res/log/%s.json" % filename, "w+") as f:
with open("../res/log/%s.json" % filename, "w+") as f:
json.dump(out, f, indent=True)
print('Log :\"',f.name, '\" saved !')
plot_res(log, fig_name="res/"+filename)
plot_res(log, fig_name="../res/"+filename)
print('Execution Time : %.00f '%(exec_time))
print('-'*9)
#### Augmented Dataset ####
'''
if 'aug_dataset' in tasks:
t0 = time.process_time()
@ -162,7 +175,7 @@ if __name__ == "__main__":
print('Execution Time : %.00f '%(exec_time))
print('-'*9)
'''
#### Augmented Model ####
if 'aug_model' in tasks:
@ -170,7 +183,7 @@ if __name__ == "__main__":
tf_dict = {k: TF.TF_dict[k] for k in tf_names}
model = Higher_model(model) #run_dist_dataugV3
aug_model = Augmented_model(Data_augV5(TF_dict=tf_dict, N_TF=3, mix_dist=0.8, fixed_prob=False, fixed_mag=False, shared_mag=False), model).to(device)
aug_model = Augmented_model(Data_augV7(TF_dict=tf_dict, N_TF=3, mix_dist=0.8, fixed_prob=False, fixed_mag=False, shared_mag=False), model).to(device)
#aug_model = Augmented_model(RandAug(TF_dict=tf_dict, N_TF=2), model).to(device)
print("{} on {} for {} epochs - {} inner_it".format(str(aug_model), device_name, epochs, n_inner_iter))

View file

@ -263,8 +263,8 @@ def run_dist_dataugV3(model, opt_param, epochs=1, inner_it=1, dataug_epoch_start
if (save_sample_freq and epoch%save_sample_freq==0): #Data sample saving
try:
viz_sample_data(imgs=xs, labels=ys, fig_name='samples/data_sample_epoch{}_noTF'.format(epoch))
viz_sample_data(imgs=model['data_aug'](xs), labels=ys, fig_name='samples/data_sample_epoch{}'.format(epoch))
viz_sample_data(imgs=xs, labels=ys, fig_name='../samples/data_sample_epoch{}_noTF'.format(epoch))
viz_sample_data(imgs=model['data_aug'](xs), labels=ys, fig_name='../samples/data_sample_epoch{}'.format(epoch))
except:
print("Couldn't save samples epoch"+epoch)
pass
@ -327,8 +327,8 @@ def run_dist_dataugV3(model, opt_param, epochs=1, inner_it=1, dataug_epoch_start
#Data sample saving
try:
viz_sample_data(imgs=xs, labels=ys, fig_name='samples/data_sample_epoch{}_noTF'.format(epoch))
viz_sample_data(imgs=model['data_aug'](xs), labels=ys, fig_name='samples/data_sample_epoch{}'.format(epoch))
viz_sample_data(imgs=xs, labels=ys, fig_name='../samples/data_sample_epoch{}_noTF'.format(epoch))
viz_sample_data(imgs=model['data_aug'](xs), labels=ys, fig_name='../samples/data_sample_epoch{}'.format(epoch))
except:
print("Couldn't save finals samples")
pass

View file

@ -1,3 +1,6 @@
""" Utilties function.
"""
import numpy as np
import json, math, time, os
import matplotlib.pyplot as plt
@ -12,12 +15,24 @@ import torch.nn.functional as F
import time
def print_graph(PyTorch_obj, fig_name='graph'):
graph=make_dot(PyTorch_obj) #Loss give the whole graph
"""Save the computational graph.
Args:
PyTorch_obj (Tensor): End of the graph. Commonly, the loss tensor to get the whole graph.
fig_name (string): Relative path where to save the graph. (default: graph)
"""
graph=make_dot(PyTorch_obj)
graph.format = 'pdf' #https://graphviz.readthedocs.io/en/stable/manual.html#formats
graph.render(fig_name)
def plot_resV2(log, fig_name='res', param_names=None):
"""Save a visual graph of the logs.
Args:
log (dict): Logs of the training generated by most of train_utils.
fig_name (string): Relative path where to save the graph. (default: res)
param_names (list): Labels for the parameters. (default: None)
"""
epochs = [x["epoch"] for x in log]
fig, ax = plt.subplots(nrows=2, ncols=3, figsize=(30, 15))
@ -63,7 +78,12 @@ def plot_resV2(log, fig_name='res', param_names=None):
plt.close()
def plot_compare(filenames, fig_name='res'):
"""Save a visual graph comparing trainings stats.
Args:
filenames (list[Strings]): Relative paths to the logs (JSON files).
fig_name (string): Relative path where to save the graph. (default: res)
"""
all_data=[]
legend=""
for idx, file in enumerate(filenames):
@ -102,21 +122,15 @@ def plot_compare(filenames, fig_name='res'):
plt.savefig(fig_name, bbox_inches='tight')
plt.close()
def plot_TF_res(log, tf_names, fig_name='res'):
mean = np.mean([x["param"] for x in log], axis=0)
std = np.std([x["param"] for x in log], axis=0)
fig, ax = plt.subplots(1, 1, figsize=(30, 8), sharey=True)
ax.bar(tf_names, mean, yerr=std)
#ax.bar(tf_names, log[-1]["param"])
fig_name = fig_name.replace('.',',')
plt.savefig(fig_name, bbox_inches='tight')
plt.close()
def viz_sample_data(imgs, labels, fig_name='data_sample', weight_labels=None):
"""Save data samples.
Args:
imgs (Tensor): Batch of image to sample from. Intended to contain at least 25 images.
labels (Tensor): Labels of the images.
fig_name (string): Relative path where to save the graph. (default: data_sample)
weight_labels (Tensor): Weights associated to each labels. (default: None)
"""
sample = imgs[0:25,].permute(0, 2, 3, 1).squeeze().cpu()
plt.figure(figsize=(10,10))
@ -135,7 +149,11 @@ def viz_sample_data(imgs, labels, fig_name='data_sample', weight_labels=None):
plt.close('all')
def print_torch_mem(add_info=''):
"""Print informations on PyTorch memory usage.
Args:
add_info (string): Prefix added before the print. (default: None)
"""
nb=0
max_size=0
for obj in gc.get_objects():
@ -165,6 +183,7 @@ def print_torch_mem(add_info=''):
torch.cuda.max_memory_cached()/ mega_bytes)
print(string)
'''
def plot_TF_influence(log, fig_name='TF_influence', param_names=None):
proba=[[x["param"][idx]['p'] for x in log] for idx, _ in enumerate(log[0]["param"])]
mag=[[x["param"][idx]['m'] for x in log] for idx, _ in enumerate(log[0]["param"])]
@ -179,21 +198,24 @@ def plot_TF_influence(log, fig_name='TF_influence', param_names=None):
fig_name = fig_name.replace('.',',')
plt.savefig(fig_name, bbox_inches='tight')
plt.close()
'''
### https://github.com/facebookresearch/higher/issues/18 ####
from torch._six import inf
def clip_norm(tensors, max_norm, norm_type=2):
r"""Clips norm of passed tensors.
The norm is computed over all tensors together, as if they were
concatenated into a single vector. Clipped tensors are returned.
Arguments:
tensors (Iterable[Tensor]): an iterable of Tensors or a
single Tensor to be normalized.
max_norm (float or int): max norm of the gradients
norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for
infinity norm.
Returns:
Clipped (List[Tensor]) tensors.
"""Clips norm of passed tensors.
The norm is computed over all tensors together, as if they were
concatenated into a single vector. Clipped tensors are returned.
See: https://github.com/facebookresearch/higher/issues/18
Args:
tensors (Iterable[Tensor]): an iterable of Tensors or a
single Tensor to be normalized.
max_norm (float or int): max norm of the gradients
norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for
infinity norm.
Returns:
Clipped (List[Tensor]) tensors.
"""
if isinstance(tensors, torch.Tensor):
tensors = [tensors]