mirror of
https://github.com/AntoineHX/smart_augmentation.git
synced 2025-05-04 04:00:46 +02:00
Test doxygen
This commit is contained in:
parent
f507ff4741
commit
99f15b8946
4 changed files with 54 additions and 25 deletions
1
.gitignore
vendored
1
.gitignore
vendored
|
@ -1,5 +1,6 @@
|
||||||
/higher/data
|
/higher/data
|
||||||
/higher/samples
|
/higher/samples
|
||||||
|
/higher/doc
|
||||||
/Gradient-Descent-The-Ultimate-Optimizer/data
|
/Gradient-Descent-The-Ultimate-Optimizer/data
|
||||||
/FAR-HO/data
|
/FAR-HO/data
|
||||||
/__pycache__
|
/__pycache__
|
||||||
|
|
|
@ -6,12 +6,17 @@ import torch
|
||||||
from torch.utils.data import SubsetRandomSampler
|
from torch.utils.data import SubsetRandomSampler
|
||||||
import torchvision
|
import torchvision
|
||||||
|
|
||||||
|
#Train/Validation batch size.
|
||||||
BATCH_SIZE = 300
|
BATCH_SIZE = 300
|
||||||
TEST_SIZE = 300
|
#Test batch size.
|
||||||
|
TEST_SIZE = BATCH_SIZE
|
||||||
#TEST_SIZE = 10000 #legerement +Rapide / + Consomation memoire !
|
#TEST_SIZE = 10000 #legerement +Rapide / + Consomation memoire !
|
||||||
|
|
||||||
|
#Wether to download data.
|
||||||
download_data=False
|
download_data=False
|
||||||
|
#Number of worker to use.
|
||||||
num_workers=2 #4
|
num_workers=2 #4
|
||||||
|
#Pin GPU memory
|
||||||
pin_memory=False #True :+ GPU memory / + Lent
|
pin_memory=False #True :+ GPU memory / + Lent
|
||||||
|
|
||||||
#ATTENTION : Dataug (Kornia) Expect image in the range of [0, 1]
|
#ATTENTION : Dataug (Kornia) Expect image in the range of [0, 1]
|
||||||
|
@ -37,8 +42,10 @@ transform = torchvision.transforms.Compose([
|
||||||
#)
|
#)
|
||||||
|
|
||||||
### Classic Dataset ###
|
### Classic Dataset ###
|
||||||
|
#Training data
|
||||||
data_train = torchvision.datasets.CIFAR10("../data", train=True, download=download_data, transform=transform)
|
data_train = torchvision.datasets.CIFAR10("../data", train=True, download=download_data, transform=transform)
|
||||||
#data_val = torchvision.datasets.CIFAR10("../data", train=True, download=download_data, transform=transform)
|
#data_val = torchvision.datasets.CIFAR10("../data", train=True, download=download_data, transform=transform)
|
||||||
|
#Testing data
|
||||||
data_test = torchvision.datasets.CIFAR10("../data", train=False, download=download_data, transform=transform)
|
data_test = torchvision.datasets.CIFAR10("../data", train=False, download=download_data, transform=transform)
|
||||||
|
|
||||||
train_subset_indices=range(int(len(data_train)/2))
|
train_subset_indices=range(int(len(data_train)/2))
|
||||||
|
|
|
@ -5,7 +5,13 @@ import torch.nn.functional as F
|
||||||
|
|
||||||
## Basic CNN ##
|
## Basic CNN ##
|
||||||
class LeNet(nn.Module):
|
class LeNet(nn.Module):
|
||||||
|
"""Basic CNN.
|
||||||
|
|
||||||
|
"""
|
||||||
def __init__(self, num_inp, num_out):
|
def __init__(self, num_inp, num_out):
|
||||||
|
"""Init LeNet.
|
||||||
|
|
||||||
|
"""
|
||||||
super(LeNet, self).__init__()
|
super(LeNet, self).__init__()
|
||||||
self.conv1 = nn.Conv2d(num_inp, 20, 5)
|
self.conv1 = nn.Conv2d(num_inp, 20, 5)
|
||||||
self.pool = nn.MaxPool2d(2, 2)
|
self.pool = nn.MaxPool2d(2, 2)
|
||||||
|
@ -15,6 +21,9 @@ class LeNet(nn.Module):
|
||||||
self.fc2 = nn.Linear(500, num_out)
|
self.fc2 = nn.Linear(500, num_out)
|
||||||
|
|
||||||
def forward(self, x):
|
def forward(self, x):
|
||||||
|
"""Main method of LeNet
|
||||||
|
|
||||||
|
"""
|
||||||
x = self.pool(F.relu(self.conv1(x)))
|
x = self.pool(F.relu(self.conv1(x)))
|
||||||
x = self.pool2(F.relu(self.conv2(x)))
|
x = self.pool2(F.relu(self.conv2(x)))
|
||||||
x = x.view(x.size(0), -1)
|
x = x.view(x.size(0), -1)
|
||||||
|
@ -23,4 +32,7 @@ class LeNet(nn.Module):
|
||||||
return x
|
return x
|
||||||
|
|
||||||
def __str__(self):
|
def __str__(self):
|
||||||
|
""" Get name of model
|
||||||
|
|
||||||
|
"""
|
||||||
return "LeNet"
|
return "LeNet"
|
||||||
|
|
|
@ -18,15 +18,18 @@ import torch
|
||||||
import kornia
|
import kornia
|
||||||
import random
|
import random
|
||||||
|
|
||||||
|
#TF that don't have use for magnitude parameter.
|
||||||
|
TF_no_mag={'Identity', 'FlipUD', 'FlipLR', 'Random', 'RandBlend'}
|
||||||
|
#TF which implemetation doesn't allow gradient propagaition.
|
||||||
|
TF_no_grad={'Solarize', 'Posterize', '=Solarize', '=Posterize'}
|
||||||
|
#TF for which magnitude should be ignored (Magnitude fixed).
|
||||||
|
TF_ignore_mag= TF_no_mag | TF_no_grad
|
||||||
|
|
||||||
TF_no_mag={'Identity', 'FlipUD', 'FlipLR', 'Random', 'RandBlend'} #TF that don't have use for magnitude parameter.
|
# What is the max 'level' a transform could be predicted
|
||||||
TF_no_grad={'Solarize', 'Posterize', '=Solarize', '=Posterize'} #TF which implemetation doesn't allow gradient propagaition.
|
PARAMETER_MAX = 1
|
||||||
TF_ignore_mag= TF_no_mag | TF_no_grad #TF for which magnitude should be ignored (Magnitude fixed).
|
# What is the min 'level' a transform could be predicted
|
||||||
|
PARAMETER_MIN = 0.1
|
||||||
|
|
||||||
PARAMETER_MAX = 1 # What is the max 'level' a transform could be predicted
|
|
||||||
PARAMETER_MIN = 0.1 # What is the min 'level' a transform could be predicted
|
|
||||||
|
|
||||||
### Available TF for Dataug ###
|
|
||||||
# Dictionnary mapping tranformations identifiers to their function.
|
# Dictionnary mapping tranformations identifiers to their function.
|
||||||
# Each value of the dict should be a lambda function taking a (batch of data, magnitude of transformations) tuple as input and returns a batch of data.
|
# Each value of the dict should be a lambda function taking a (batch of data, magnitude of transformations) tuple as input and returns a batch of data.
|
||||||
TF_dict={ #Dataugv5+
|
TF_dict={ #Dataugv5+
|
||||||
|
@ -416,13 +419,16 @@ def blend(x,y,alpha):
|
||||||
return res
|
return res
|
||||||
|
|
||||||
#Not working
|
#Not working
|
||||||
def auto_contrast(x): #PAS OPTIMISE POUR DES BATCH #EXTRA LENT
|
def auto_contrast(x):
|
||||||
# Optimisation : Application de LUT efficace / Calcul d'histogramme par batch/channel
|
"""NOT TESTED - EXTRA SLOW
|
||||||
print("Warning : Pas encore check !")
|
|
||||||
(batch_size, channels, h, w) = x.shape
|
"""
|
||||||
x = int_image(x) #Expect image in the range of [0, 1]
|
# Optimisation : Application de LUT efficace / Calcul d'histogramme par batch/channel
|
||||||
#print('Start',x[0])
|
print("Warning : Pas encore check !")
|
||||||
for im_idx, img in enumerate(x.chunk(batch_size, dim=0)): #Operation par image
|
(batch_size, channels, h, w) = x.shape
|
||||||
|
x = int_image(x) #Expect image in the range of [0, 1]
|
||||||
|
#print('Start',x[0])
|
||||||
|
for im_idx, img in enumerate(x.chunk(batch_size, dim=0)): #Operation par image
|
||||||
#print(img.shape)
|
#print(img.shape)
|
||||||
for chan_idx, chan in enumerate(img.chunk(channels, dim=1)): # Operation par channel
|
for chan_idx, chan in enumerate(img.chunk(channels, dim=1)): # Operation par channel
|
||||||
#print(chan.shape)
|
#print(chan.shape)
|
||||||
|
@ -449,19 +455,22 @@ def auto_contrast(x): #PAS OPTIMISE POUR DES BATCH #EXTRA LENT
|
||||||
chan[chan==ix]=n_ix
|
chan[chan==ix]=n_ix
|
||||||
x[im_idx, chan_idx]=chan
|
x[im_idx, chan_idx]=chan
|
||||||
|
|
||||||
#print('End',x[0])
|
#print('End',x[0])
|
||||||
return float_image(x)
|
return float_image(x)
|
||||||
|
|
||||||
def equalize(x): #PAS OPTIMISE POUR DES BATCH
|
def equalize(x):
|
||||||
raise Exception(self, "not implemented")
|
""" NOT WORKING
|
||||||
# Optimisation : Application de LUT efficace / Calcul d'histogramme par batch/channel
|
|
||||||
(batch_size, channels, h, w) = x.shape
|
"""
|
||||||
x = int_image(x) #Expect image in the range of [0, 1]
|
raise Exception(self, "not implemented")
|
||||||
#print('Start',x[0])
|
# Optimisation : Application de LUT efficace / Calcul d'histogramme par batch/channel
|
||||||
for im_idx, img in enumerate(x.chunk(batch_size, dim=0)): #Operation par image
|
(batch_size, channels, h, w) = x.shape
|
||||||
|
x = int_image(x) #Expect image in the range of [0, 1]
|
||||||
|
#print('Start',x[0])
|
||||||
|
for im_idx, img in enumerate(x.chunk(batch_size, dim=0)): #Operation par image
|
||||||
#print(img.shape)
|
#print(img.shape)
|
||||||
for chan_idx, chan in enumerate(img.chunk(channels, dim=1)): # Operation par channel
|
for chan_idx, chan in enumerate(img.chunk(channels, dim=1)): # Operation par channel
|
||||||
#print(chan.shape)
|
#print(chan.shape)
|
||||||
hist = torch.histc(chan, bins=256, min=0, max=255) #PAS DIFFERENTIABLE
|
hist = torch.histc(chan, bins=256, min=0, max=255) #PAS DIFFERENTIABLE
|
||||||
|
|
||||||
return float_image(x)
|
return float_image(x)
|
Loading…
Add table
Add a link
Reference in a new issue