mirror of
https://github.com/AntoineHX/smart_augmentation.git
synced 2025-05-04 12:10:45 +02:00
Rangement
This commit is contained in:
parent
f83c73ec17
commit
f507ff4741
16 changed files with 85 additions and 46 deletions
|
@ -1,3 +1,7 @@
|
||||||
|
""" Dataset definition.
|
||||||
|
|
||||||
|
MNIST / CIFAR10
|
||||||
|
"""
|
||||||
import torch
|
import torch
|
||||||
from torch.utils.data import SubsetRandomSampler
|
from torch.utils.data import SubsetRandomSampler
|
||||||
import torchvision
|
import torchvision
|
||||||
|
@ -28,14 +32,14 @@ transform = torchvision.transforms.Compose([
|
||||||
# torchvision.transforms.ToTensor()
|
# torchvision.transforms.ToTensor()
|
||||||
# ])
|
# ])
|
||||||
#)
|
#)
|
||||||
data_test = torchvision.datasets.MNIST(
|
#data_test = torchvision.datasets.MNIST(
|
||||||
"./data", train=False, download=True, transform=torchvision.transforms.ToTensor()
|
# "./data", train=False, download=True, transform=torchvision.transforms.ToTensor()
|
||||||
)
|
#)
|
||||||
|
|
||||||
### Classic Dataset ###
|
### Classic Dataset ###
|
||||||
data_train = torchvision.datasets.CIFAR10("./data", train=True, download=download_data, transform=transform)
|
data_train = torchvision.datasets.CIFAR10("../data", train=True, download=download_data, transform=transform)
|
||||||
#data_val = torchvision.datasets.CIFAR10("./data", train=True, download=download_data, transform=transform)
|
#data_val = torchvision.datasets.CIFAR10("../data", train=True, download=download_data, transform=transform)
|
||||||
data_test = torchvision.datasets.CIFAR10("./data", train=False, download=download_data, transform=transform)
|
data_test = torchvision.datasets.CIFAR10("../data", train=False, download=download_data, transform=transform)
|
||||||
|
|
||||||
train_subset_indices=range(int(len(data_train)/2))
|
train_subset_indices=range(int(len(data_train)/2))
|
||||||
val_subset_indices=range(int(len(data_train)/2),len(data_train))
|
val_subset_indices=range(int(len(data_train)/2),len(data_train))
|
|
@ -93,7 +93,7 @@ if __name__ == "__main__":
|
||||||
json.dump(out, f, indent=True)
|
json.dump(out, f, indent=True)
|
||||||
print('Log :\"',f.name, '\" saved !')
|
print('Log :\"',f.name, '\" saved !')
|
||||||
'''
|
'''
|
||||||
res_folder="res/brutus-tests2/"
|
res_folder="../res/brutus-tests2/"
|
||||||
epochs= 150
|
epochs= 150
|
||||||
inner_its = [1]
|
inner_its = [1]
|
||||||
dist_mix = [0.0, 0.5, 0.8, 1.0]
|
dist_mix = [0.0, 0.5, 0.8, 1.0]
|
||||||
|
@ -147,14 +147,14 @@ if __name__ == "__main__":
|
||||||
out = {"Accuracy": max([x["acc"] for x in log]), "Time": (np.mean(times),np.std(times), exec_time), 'Optimizer': optim_param, "Device": device_name, "Param_names": aug_model.TF_names(), "Log": log}
|
out = {"Accuracy": max([x["acc"] for x in log]), "Time": (np.mean(times),np.std(times), exec_time), 'Optimizer': optim_param, "Device": device_name, "Param_names": aug_model.TF_names(), "Log": log}
|
||||||
print(str(aug_model),": acc", out["Accuracy"], "in:", out["Time"][0], "+/-", out["Time"][1])
|
print(str(aug_model),": acc", out["Accuracy"], "in:", out["Time"][0], "+/-", out["Time"][1])
|
||||||
filename = "{}-{} epochs (dataug:{})- {} in_it-{}".format(str(aug_model),epochs,dataug_epoch_start,n_inner_iter, run)
|
filename = "{}-{} epochs (dataug:{})- {} in_it-{}".format(str(aug_model),epochs,dataug_epoch_start,n_inner_iter, run)
|
||||||
with open("res/log/%s.json" % filename, "w+") as f:
|
with open("../res/log/%s.json" % filename, "w+") as f:
|
||||||
try:
|
try:
|
||||||
json.dump(out, f, indent=True)
|
json.dump(out, f, indent=True)
|
||||||
print('Log :\"',f.name, '\" saved !')
|
print('Log :\"',f.name, '\" saved !')
|
||||||
except:
|
except:
|
||||||
print("Failed to save logs :",f.name)
|
print("Failed to save logs :",f.name)
|
||||||
try:
|
try:
|
||||||
plot_resV2(log, fig_name="res/"+filename, param_names=aug_model.TF_names())
|
plot_resV2(log, fig_name="../res/"+filename, param_names=aug_model.TF_names())
|
||||||
except:
|
except:
|
||||||
print("Failed to plot res")
|
print("Failed to plot res")
|
||||||
|
|
|
@ -1,8 +1,13 @@
|
||||||
|
""" Script to run experiment on smart augmentation.
|
||||||
|
|
||||||
|
"""
|
||||||
|
|
||||||
from model import *
|
from model import *
|
||||||
from dataug import *
|
from dataug import *
|
||||||
#from utils import *
|
#from utils import *
|
||||||
from train_utils import *
|
from train_utils import *
|
||||||
|
|
||||||
|
# Use available TF (see transformations.py)
|
||||||
tf_names = [
|
tf_names = [
|
||||||
## Geometric TF ##
|
## Geometric TF ##
|
||||||
'Identity',
|
'Identity',
|
||||||
|
@ -34,6 +39,8 @@ tf_names = [
|
||||||
#'=Posterize',
|
#'=Posterize',
|
||||||
#'=Solarize',
|
#'=Solarize',
|
||||||
|
|
||||||
|
## Bad Tranformations ##
|
||||||
|
# Bad Geometric TF #
|
||||||
#'BShearX',
|
#'BShearX',
|
||||||
#'BShearY',
|
#'BShearY',
|
||||||
#'BTranslateX-',
|
#'BTranslateX-',
|
||||||
|
@ -46,12 +53,14 @@ tf_names = [
|
||||||
|
|
||||||
#'Random',
|
#'Random',
|
||||||
#'RandBlend'
|
#'RandBlend'
|
||||||
|
|
||||||
#Non fonctionnel
|
#Non fonctionnel
|
||||||
#'Auto_Contrast', #Pas opti pour des batch (Super lent)
|
#'Auto_Contrast', #Pas opti pour des batch (Super lent)
|
||||||
#'Equalize',
|
#'Equalize',
|
||||||
]
|
]
|
||||||
|
|
||||||
device = torch.device('cuda')
|
|
||||||
|
device = torch.device('cuda') #Select device to use
|
||||||
|
|
||||||
if device == torch.device('cpu'):
|
if device == torch.device('cpu'):
|
||||||
device_name = 'CPU'
|
device_name = 'CPU'
|
||||||
|
@ -61,13 +70,15 @@ else:
|
||||||
##########################################
|
##########################################
|
||||||
if __name__ == "__main__":
|
if __name__ == "__main__":
|
||||||
|
|
||||||
|
#Task to perform
|
||||||
tasks={
|
tasks={
|
||||||
#'classic',
|
#'classic',
|
||||||
#'aug_dataset',
|
#'aug_dataset', #Moved to old code
|
||||||
'aug_model'
|
'aug_model'
|
||||||
}
|
}
|
||||||
|
#Parameters
|
||||||
n_inner_iter = 1
|
n_inner_iter = 1
|
||||||
epochs = 150
|
epochs = 200
|
||||||
dataug_epoch_start=0
|
dataug_epoch_start=0
|
||||||
optim_param={
|
optim_param={
|
||||||
'Meta':{
|
'Meta':{
|
||||||
|
@ -81,6 +92,7 @@ if __name__ == "__main__":
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
#Models
|
||||||
model = LeNet(3,10)
|
model = LeNet(3,10)
|
||||||
#model = ResNet(num_classes=10)
|
#model = ResNet(num_classes=10)
|
||||||
#Lents
|
#Lents
|
||||||
|
@ -103,17 +115,18 @@ if __name__ == "__main__":
|
||||||
out = {"Accuracy": max([x["acc"] for x in log]), "Time": (np.mean(times),np.std(times), exec_time), 'Optimizer': optim_param['Inner'], "Device": device_name, "Log": log}
|
out = {"Accuracy": max([x["acc"] for x in log]), "Time": (np.mean(times),np.std(times), exec_time), 'Optimizer': optim_param['Inner'], "Device": device_name, "Log": log}
|
||||||
print(str(model),": acc", out["Accuracy"], "in:", out["Time"][0], "+/-", out["Time"][1])
|
print(str(model),": acc", out["Accuracy"], "in:", out["Time"][0], "+/-", out["Time"][1])
|
||||||
filename = "{}-{} epochs".format(str(model),epochs)
|
filename = "{}-{} epochs".format(str(model),epochs)
|
||||||
with open("res/log/%s.json" % filename, "w+") as f:
|
with open("../res/log/%s.json" % filename, "w+") as f:
|
||||||
json.dump(out, f, indent=True)
|
json.dump(out, f, indent=True)
|
||||||
print('Log :\"',f.name, '\" saved !')
|
print('Log :\"',f.name, '\" saved !')
|
||||||
|
|
||||||
plot_res(log, fig_name="res/"+filename)
|
plot_res(log, fig_name="../res/"+filename)
|
||||||
|
|
||||||
print('Execution Time : %.00f '%(exec_time))
|
print('Execution Time : %.00f '%(exec_time))
|
||||||
print('-'*9)
|
print('-'*9)
|
||||||
|
|
||||||
|
|
||||||
#### Augmented Dataset ####
|
#### Augmented Dataset ####
|
||||||
|
'''
|
||||||
if 'aug_dataset' in tasks:
|
if 'aug_dataset' in tasks:
|
||||||
|
|
||||||
t0 = time.process_time()
|
t0 = time.process_time()
|
||||||
|
@ -162,7 +175,7 @@ if __name__ == "__main__":
|
||||||
|
|
||||||
print('Execution Time : %.00f '%(exec_time))
|
print('Execution Time : %.00f '%(exec_time))
|
||||||
print('-'*9)
|
print('-'*9)
|
||||||
|
'''
|
||||||
|
|
||||||
#### Augmented Model ####
|
#### Augmented Model ####
|
||||||
if 'aug_model' in tasks:
|
if 'aug_model' in tasks:
|
||||||
|
@ -170,7 +183,7 @@ if __name__ == "__main__":
|
||||||
|
|
||||||
tf_dict = {k: TF.TF_dict[k] for k in tf_names}
|
tf_dict = {k: TF.TF_dict[k] for k in tf_names}
|
||||||
model = Higher_model(model) #run_dist_dataugV3
|
model = Higher_model(model) #run_dist_dataugV3
|
||||||
aug_model = Augmented_model(Data_augV5(TF_dict=tf_dict, N_TF=3, mix_dist=0.8, fixed_prob=False, fixed_mag=False, shared_mag=False), model).to(device)
|
aug_model = Augmented_model(Data_augV7(TF_dict=tf_dict, N_TF=3, mix_dist=0.8, fixed_prob=False, fixed_mag=False, shared_mag=False), model).to(device)
|
||||||
#aug_model = Augmented_model(RandAug(TF_dict=tf_dict, N_TF=2), model).to(device)
|
#aug_model = Augmented_model(RandAug(TF_dict=tf_dict, N_TF=2), model).to(device)
|
||||||
|
|
||||||
print("{} on {} for {} epochs - {} inner_it".format(str(aug_model), device_name, epochs, n_inner_iter))
|
print("{} on {} for {} epochs - {} inner_it".format(str(aug_model), device_name, epochs, n_inner_iter))
|
|
@ -263,8 +263,8 @@ def run_dist_dataugV3(model, opt_param, epochs=1, inner_it=1, dataug_epoch_start
|
||||||
|
|
||||||
if (save_sample_freq and epoch%save_sample_freq==0): #Data sample saving
|
if (save_sample_freq and epoch%save_sample_freq==0): #Data sample saving
|
||||||
try:
|
try:
|
||||||
viz_sample_data(imgs=xs, labels=ys, fig_name='samples/data_sample_epoch{}_noTF'.format(epoch))
|
viz_sample_data(imgs=xs, labels=ys, fig_name='../samples/data_sample_epoch{}_noTF'.format(epoch))
|
||||||
viz_sample_data(imgs=model['data_aug'](xs), labels=ys, fig_name='samples/data_sample_epoch{}'.format(epoch))
|
viz_sample_data(imgs=model['data_aug'](xs), labels=ys, fig_name='../samples/data_sample_epoch{}'.format(epoch))
|
||||||
except:
|
except:
|
||||||
print("Couldn't save samples epoch"+epoch)
|
print("Couldn't save samples epoch"+epoch)
|
||||||
pass
|
pass
|
||||||
|
@ -327,8 +327,8 @@ def run_dist_dataugV3(model, opt_param, epochs=1, inner_it=1, dataug_epoch_start
|
||||||
|
|
||||||
#Data sample saving
|
#Data sample saving
|
||||||
try:
|
try:
|
||||||
viz_sample_data(imgs=xs, labels=ys, fig_name='samples/data_sample_epoch{}_noTF'.format(epoch))
|
viz_sample_data(imgs=xs, labels=ys, fig_name='../samples/data_sample_epoch{}_noTF'.format(epoch))
|
||||||
viz_sample_data(imgs=model['data_aug'](xs), labels=ys, fig_name='samples/data_sample_epoch{}'.format(epoch))
|
viz_sample_data(imgs=model['data_aug'](xs), labels=ys, fig_name='../samples/data_sample_epoch{}'.format(epoch))
|
||||||
except:
|
except:
|
||||||
print("Couldn't save finals samples")
|
print("Couldn't save finals samples")
|
||||||
pass
|
pass
|
|
@ -1,3 +1,6 @@
|
||||||
|
""" Utilties function.
|
||||||
|
|
||||||
|
"""
|
||||||
import numpy as np
|
import numpy as np
|
||||||
import json, math, time, os
|
import json, math, time, os
|
||||||
import matplotlib.pyplot as plt
|
import matplotlib.pyplot as plt
|
||||||
|
@ -12,12 +15,24 @@ import torch.nn.functional as F
|
||||||
import time
|
import time
|
||||||
|
|
||||||
def print_graph(PyTorch_obj, fig_name='graph'):
|
def print_graph(PyTorch_obj, fig_name='graph'):
|
||||||
graph=make_dot(PyTorch_obj) #Loss give the whole graph
|
"""Save the computational graph.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
PyTorch_obj (Tensor): End of the graph. Commonly, the loss tensor to get the whole graph.
|
||||||
|
fig_name (string): Relative path where to save the graph. (default: graph)
|
||||||
|
"""
|
||||||
|
graph=make_dot(PyTorch_obj)
|
||||||
graph.format = 'pdf' #https://graphviz.readthedocs.io/en/stable/manual.html#formats
|
graph.format = 'pdf' #https://graphviz.readthedocs.io/en/stable/manual.html#formats
|
||||||
graph.render(fig_name)
|
graph.render(fig_name)
|
||||||
|
|
||||||
def plot_resV2(log, fig_name='res', param_names=None):
|
def plot_resV2(log, fig_name='res', param_names=None):
|
||||||
|
"""Save a visual graph of the logs.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
log (dict): Logs of the training generated by most of train_utils.
|
||||||
|
fig_name (string): Relative path where to save the graph. (default: res)
|
||||||
|
param_names (list): Labels for the parameters. (default: None)
|
||||||
|
"""
|
||||||
epochs = [x["epoch"] for x in log]
|
epochs = [x["epoch"] for x in log]
|
||||||
|
|
||||||
fig, ax = plt.subplots(nrows=2, ncols=3, figsize=(30, 15))
|
fig, ax = plt.subplots(nrows=2, ncols=3, figsize=(30, 15))
|
||||||
|
@ -63,7 +78,12 @@ def plot_resV2(log, fig_name='res', param_names=None):
|
||||||
plt.close()
|
plt.close()
|
||||||
|
|
||||||
def plot_compare(filenames, fig_name='res'):
|
def plot_compare(filenames, fig_name='res'):
|
||||||
|
"""Save a visual graph comparing trainings stats.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
filenames (list[Strings]): Relative paths to the logs (JSON files).
|
||||||
|
fig_name (string): Relative path where to save the graph. (default: res)
|
||||||
|
"""
|
||||||
all_data=[]
|
all_data=[]
|
||||||
legend=""
|
legend=""
|
||||||
for idx, file in enumerate(filenames):
|
for idx, file in enumerate(filenames):
|
||||||
|
@ -102,21 +122,15 @@ def plot_compare(filenames, fig_name='res'):
|
||||||
plt.savefig(fig_name, bbox_inches='tight')
|
plt.savefig(fig_name, bbox_inches='tight')
|
||||||
plt.close()
|
plt.close()
|
||||||
|
|
||||||
def plot_TF_res(log, tf_names, fig_name='res'):
|
|
||||||
|
|
||||||
mean = np.mean([x["param"] for x in log], axis=0)
|
|
||||||
std = np.std([x["param"] for x in log], axis=0)
|
|
||||||
|
|
||||||
fig, ax = plt.subplots(1, 1, figsize=(30, 8), sharey=True)
|
|
||||||
ax.bar(tf_names, mean, yerr=std)
|
|
||||||
#ax.bar(tf_names, log[-1]["param"])
|
|
||||||
|
|
||||||
fig_name = fig_name.replace('.',',')
|
|
||||||
plt.savefig(fig_name, bbox_inches='tight')
|
|
||||||
plt.close()
|
|
||||||
|
|
||||||
def viz_sample_data(imgs, labels, fig_name='data_sample', weight_labels=None):
|
def viz_sample_data(imgs, labels, fig_name='data_sample', weight_labels=None):
|
||||||
|
"""Save data samples.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
imgs (Tensor): Batch of image to sample from. Intended to contain at least 25 images.
|
||||||
|
labels (Tensor): Labels of the images.
|
||||||
|
fig_name (string): Relative path where to save the graph. (default: data_sample)
|
||||||
|
weight_labels (Tensor): Weights associated to each labels. (default: None)
|
||||||
|
"""
|
||||||
sample = imgs[0:25,].permute(0, 2, 3, 1).squeeze().cpu()
|
sample = imgs[0:25,].permute(0, 2, 3, 1).squeeze().cpu()
|
||||||
|
|
||||||
plt.figure(figsize=(10,10))
|
plt.figure(figsize=(10,10))
|
||||||
|
@ -135,7 +149,11 @@ def viz_sample_data(imgs, labels, fig_name='data_sample', weight_labels=None):
|
||||||
plt.close('all')
|
plt.close('all')
|
||||||
|
|
||||||
def print_torch_mem(add_info=''):
|
def print_torch_mem(add_info=''):
|
||||||
|
"""Print informations on PyTorch memory usage.
|
||||||
|
|
||||||
|
Args:
|
||||||
|
add_info (string): Prefix added before the print. (default: None)
|
||||||
|
"""
|
||||||
nb=0
|
nb=0
|
||||||
max_size=0
|
max_size=0
|
||||||
for obj in gc.get_objects():
|
for obj in gc.get_objects():
|
||||||
|
@ -165,6 +183,7 @@ def print_torch_mem(add_info=''):
|
||||||
torch.cuda.max_memory_cached()/ mega_bytes)
|
torch.cuda.max_memory_cached()/ mega_bytes)
|
||||||
print(string)
|
print(string)
|
||||||
|
|
||||||
|
'''
|
||||||
def plot_TF_influence(log, fig_name='TF_influence', param_names=None):
|
def plot_TF_influence(log, fig_name='TF_influence', param_names=None):
|
||||||
proba=[[x["param"][idx]['p'] for x in log] for idx, _ in enumerate(log[0]["param"])]
|
proba=[[x["param"][idx]['p'] for x in log] for idx, _ in enumerate(log[0]["param"])]
|
||||||
mag=[[x["param"][idx]['m'] for x in log] for idx, _ in enumerate(log[0]["param"])]
|
mag=[[x["param"][idx]['m'] for x in log] for idx, _ in enumerate(log[0]["param"])]
|
||||||
|
@ -179,21 +198,24 @@ def plot_TF_influence(log, fig_name='TF_influence', param_names=None):
|
||||||
fig_name = fig_name.replace('.',',')
|
fig_name = fig_name.replace('.',',')
|
||||||
plt.savefig(fig_name, bbox_inches='tight')
|
plt.savefig(fig_name, bbox_inches='tight')
|
||||||
plt.close()
|
plt.close()
|
||||||
|
'''
|
||||||
|
|
||||||
### https://github.com/facebookresearch/higher/issues/18 ####
|
|
||||||
from torch._six import inf
|
from torch._six import inf
|
||||||
def clip_norm(tensors, max_norm, norm_type=2):
|
def clip_norm(tensors, max_norm, norm_type=2):
|
||||||
r"""Clips norm of passed tensors.
|
"""Clips norm of passed tensors.
|
||||||
The norm is computed over all tensors together, as if they were
|
The norm is computed over all tensors together, as if they were
|
||||||
concatenated into a single vector. Clipped tensors are returned.
|
concatenated into a single vector. Clipped tensors are returned.
|
||||||
Arguments:
|
|
||||||
tensors (Iterable[Tensor]): an iterable of Tensors or a
|
See: https://github.com/facebookresearch/higher/issues/18
|
||||||
single Tensor to be normalized.
|
|
||||||
max_norm (float or int): max norm of the gradients
|
Args:
|
||||||
norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for
|
tensors (Iterable[Tensor]): an iterable of Tensors or a
|
||||||
infinity norm.
|
single Tensor to be normalized.
|
||||||
Returns:
|
max_norm (float or int): max norm of the gradients
|
||||||
Clipped (List[Tensor]) tensors.
|
norm_type (float or int): type of the used p-norm. Can be ``'inf'`` for
|
||||||
|
infinity norm.
|
||||||
|
Returns:
|
||||||
|
Clipped (List[Tensor]) tensors.
|
||||||
"""
|
"""
|
||||||
if isinstance(tensors, torch.Tensor):
|
if isinstance(tensors, torch.Tensor):
|
||||||
tensors = [tensors]
|
tensors = [tensors]
|
Loading…
Add table
Add a link
Reference in a new issue