mirror of
https://github.com/AntoineHX/smart_augmentation.git
synced 2025-05-04 12:10:45 +02:00
Amelioration visualisation des proba
This commit is contained in:
parent
f0c0559e73
commit
93d91815f5
7 changed files with 720 additions and 211 deletions
|
@ -21,17 +21,22 @@ if __name__ == "__main__":
|
|||
|
||||
## Acc, Time, Epochs = f(n_tf) ##
|
||||
fig_name="res/TF_seq_tests_compare"
|
||||
inner_its = [10]
|
||||
inner_its = [0]
|
||||
dataug_epoch_starts= [0]
|
||||
TF_nb = 14 #range(1,14+1)
|
||||
N_seq_TF= [1, 2, 3, 4]
|
||||
TF_nb = range(1,14+1)
|
||||
N_seq_TF= [1] #, 2, 3, 4]
|
||||
|
||||
fig, ax = plt.subplots(ncols=3, figsize=(30, 8))
|
||||
for in_it in inner_its:
|
||||
for dataug in dataug_epoch_starts:
|
||||
|
||||
n_tf = TF_nb
|
||||
#filenames =["res/TF_nb_tests/log/Aug_mod(Data_augV4(Uniform-{} TF)-LeNet)-200 epochs (dataug:{})- {} in_it.json".format(n_tf, dataug, in_it) for n_tf in TF_nb]
|
||||
filenames =["res/TF_nb_tests/log/Aug_mod(Data_augV4(Uniform-{} TF x {})-LeNet)-200 epochs (dataug:{})- {} in_it.json".format(TF_nb, n_tf, dataug, in_it) for n_tf in N_seq_TF]
|
||||
filenames =["res/TF_nb_tests/log/Aug_mod(Data_augV4(Uniform-{} TF x {})-LeNet)-100 epochs (dataug:{})- {} in_it.json".format(TF_nb, n_tf, dataug, in_it) for n_tf in N_seq_TF]
|
||||
filenames =["res/TF_nb_tests/log/Aug_mod(Data_augV4(Uniform-{} TF x {})-LeNet)-200 epochs (dataug:{})- {} in_it.json".format(n_tf, 1, dataug, in_it) for n_tf in TF_nb]
|
||||
|
||||
#n_tf = N_seq_TF
|
||||
#filenames =["res/TF_nb_tests/log/Aug_mod(Data_augV4(Uniform-{} TF x {})-LeNet)-200 epochs (dataug:{})- {} in_it.json".format(TF_nb, n_tf, dataug, in_it) for n_tf in N_seq_TF]
|
||||
#filenames =["res/TF_nb_tests/log/Aug_mod(Data_augV4(Uniform-{} TF x {})-LeNet)-100 epochs (dataug:{})- {} in_it.json".format(TF_nb, n_tf, dataug, in_it) for n_tf in N_seq_TF]
|
||||
|
||||
|
||||
all_data=[]
|
||||
|
@ -41,9 +46,7 @@ if __name__ == "__main__":
|
|||
with open(file) as json_file:
|
||||
data = json.load(json_file)
|
||||
all_data.append(data)
|
||||
|
||||
n_tf = N_seq_TF
|
||||
#n_tf = [len(x["Param_names"]) for x in all_data]
|
||||
|
||||
acc = [x["Accuracy"] for x in all_data]
|
||||
epochs = [len(x["Log"]) for x in all_data]
|
||||
time = [x["Time"][0] for x in all_data]
|
||||
|
|
|
@ -54,6 +54,7 @@ class LeNet(nn.Module):
|
|||
## Wide ResNet ##
|
||||
#https://github.com/xternalz/WideResNet-pytorch/blob/master/wideresnet.py
|
||||
#https://github.com/arcelien/pba/blob/master/pba/wrn.py
|
||||
#https://github.com/szagoruyko/wide-residual-networks/blob/master/pytorch/resnet.py
|
||||
class BasicBlock(nn.Module):
|
||||
def __init__(self, in_planes, out_planes, stride, dropRate=0.0):
|
||||
super(BasicBlock, self).__init__()
|
||||
|
@ -97,9 +98,10 @@ class WideResNet(nn.Module):
|
|||
def __init__(self, num_classes, wrn_size, depth=28, dropRate=0.0):
|
||||
super(WideResNet, self).__init__()
|
||||
|
||||
kernel_size = wrn_size
|
||||
self.kernel_size = wrn_size
|
||||
self.depth=depth
|
||||
filter_size = 3
|
||||
nChannels = [min(kernel_size, 16), kernel_size, kernel_size * 2, kernel_size * 4]
|
||||
nChannels = [min(self.kernel_size, 16), self.kernel_size, self.kernel_size * 2, self.kernel_size * 4]
|
||||
strides = [1, 2, 2] # stride for each resblock
|
||||
|
||||
#nChannels = [16, 16*widen_factor, 32*widen_factor, 64*widen_factor]
|
||||
|
@ -137,4 +139,10 @@ class WideResNet(nn.Module):
|
|||
out = self.relu(self.bn1(out))
|
||||
out = F.avg_pool2d(out, 8)
|
||||
out = out.view(-1, self.nChannels)
|
||||
return self.fc(out)
|
||||
return self.fc(out)
|
||||
|
||||
def architecture(self):
|
||||
return super(WideResNet, self).__str__()
|
||||
|
||||
def __str__(self):
|
||||
return "WideResNet(s{}-d{})".format(self.kernel_size, self.depth)
|
Binary file not shown.
Before Width: | Height: | Size: 52 KiB After Width: | Height: | Size: 54 KiB |
File diff suppressed because it is too large
Load diff
Binary file not shown.
Before Width: | Height: | Size: 64 KiB After Width: | Height: | Size: 77 KiB |
|
@ -38,13 +38,13 @@ else:
|
|||
if __name__ == "__main__":
|
||||
|
||||
n_inner_iter = 10
|
||||
epochs = 200
|
||||
epochs = 2
|
||||
dataug_epoch_start=0
|
||||
|
||||
#### Classic ####
|
||||
'''
|
||||
model = LeNet(3,10).to(device)
|
||||
#model = torchvision.models.resnet18()
|
||||
#model = LeNet(3,10).to(device)
|
||||
model = WideResNet(num_classes=10, wrn_size=16).to(device)
|
||||
#model = Augmented_model(Data_augV3(mix_dist=0.0), LeNet(3,10)).to(device)
|
||||
#model.augment(mode=False)
|
||||
|
||||
|
@ -69,31 +69,32 @@ if __name__ == "__main__":
|
|||
tf_dict = {k: TF.TF_dict[k] for k in tf_names}
|
||||
#tf_dict = TF.TF_dict
|
||||
aug_model = Augmented_model(Data_augV4(TF_dict=tf_dict, N_TF=2, mix_dist=0.0), LeNet(3,10)).to(device)
|
||||
#aug_model = Augmented_model(Data_augV4(TF_dict=tf_dict, N_TF=2, mix_dist=0.0), WideResNet(num_classes=10, wrn_size=160)).to(device)
|
||||
print(str(aug_model), 'on', device_name)
|
||||
#run_simple_dataug(inner_it=n_inner_iter, epochs=epochs)
|
||||
log= run_dist_dataugV2(model=aug_model, epochs=epochs, inner_it=n_inner_iter, dataug_epoch_start=dataug_epoch_start, print_freq=10, loss_patience=10)
|
||||
log= run_dist_dataugV2(model=aug_model, epochs=epochs, inner_it=n_inner_iter, dataug_epoch_start=dataug_epoch_start, print_freq=1, loss_patience=10)
|
||||
|
||||
####
|
||||
plot_res(log, fig_name="res/{}-{} epochs (dataug:{})- {} in_it".format(str(aug_model),epochs,dataug_epoch_start,n_inner_iter))
|
||||
plot_res(log, fig_name="res/{}-{} epochs (dataug:{})- {} in_it".format(str(aug_model),epochs,dataug_epoch_start,n_inner_iter), param_names=tf_names)
|
||||
print('-'*9)
|
||||
times = [x["time"] for x in log]
|
||||
out = {"Accuracy": max([x["acc"] for x in log]), "Time": (np.mean(times),np.std(times)), "Device": device_name, "Param_names": aug_model.TF_names(), "Log": log}
|
||||
print(str(aug_model),": acc", out["Accuracy"], "in (s ?):", out["Time"][0], "+/-", out["Time"][1])
|
||||
print(str(aug_model),": acc", out["Accuracy"], "in (s?):", out["Time"][0], "+/-", out["Time"][1])
|
||||
with open("res/log/%s.json" % "{}-{} epochs (dataug:{})- {} in_it".format(str(aug_model),epochs,dataug_epoch_start,n_inner_iter), "w+") as f:
|
||||
json.dump(out, f, indent=True)
|
||||
print('Log :\"',f.name, '\" saved !')
|
||||
|
||||
print('Execution Time : %.00f (s ?)'%(time.process_time() - t0))
|
||||
print('Execution Time : %.00f (s?)'%(time.process_time() - t0))
|
||||
print('-'*9)
|
||||
#'''
|
||||
#### TF number tests ####
|
||||
'''
|
||||
res_folder="res/TF_nb_tests/"
|
||||
epochs= 100
|
||||
epochs= 200
|
||||
inner_its = [10]
|
||||
dataug_epoch_starts= [0]
|
||||
TF_nb = [len(TF.TF_dict)] #range(1,len(TF.TF_dict)+1)
|
||||
N_seq_TF= [1, 2, 3, 4]
|
||||
TF_nb = range(1,len(TF.TF_dict)+1) #[len(TF.TF_dict)]
|
||||
N_seq_TF= [1] #[1, 2, 3, 4]
|
||||
|
||||
try:
|
||||
os.mkdir(res_folder)
|
||||
|
@ -106,7 +107,6 @@ if __name__ == "__main__":
|
|||
for dataug_epoch_start in dataug_epoch_starts:
|
||||
print("---Starting dataug", dataug_epoch_start,"---")
|
||||
for n_tf in N_seq_TF:
|
||||
print("---Starting N_TF", n_tf,"---")
|
||||
for i in TF_nb:
|
||||
keys = list(TF.TF_dict.keys())[0:i]
|
||||
ntf_dict = {k: TF.TF_dict[k] for k in keys}
|
||||
|
@ -114,7 +114,7 @@ if __name__ == "__main__":
|
|||
aug_model = Augmented_model(Data_augV4(TF_dict=ntf_dict, N_TF=n_tf, mix_dist=0.0), LeNet(3,10)).to(device)
|
||||
print(str(aug_model), 'on', device_name)
|
||||
#run_simple_dataug(inner_it=n_inner_iter, epochs=epochs)
|
||||
log= run_dist_dataugV2(model=aug_model, epochs=epochs, inner_it=n_inner_iter, dataug_epoch_start=dataug_epoch_start, print_freq=10, loss_patience=None)
|
||||
log= run_dist_dataugV2(model=aug_model, epochs=epochs, inner_it=n_inner_iter, dataug_epoch_start=dataug_epoch_start, print_freq=10, loss_patience=10)
|
||||
|
||||
####
|
||||
plot_res(log, fig_name=res_folder+"{}-{} epochs (dataug:{})- {} in_it".format(str(aug_model),epochs,dataug_epoch_start,n_inner_iter))
|
||||
|
@ -127,6 +127,4 @@ if __name__ == "__main__":
|
|||
print('Log :\"',f.name, '\" saved !')
|
||||
print('-'*9)
|
||||
|
||||
'''
|
||||
|
||||
|
||||
'''
|
|
@ -15,7 +15,7 @@ def print_graph(PyTorch_obj, fig_name='graph'):
|
|||
graph.format = 'svg' #https://graphviz.readthedocs.io/en/stable/manual.html#formats
|
||||
graph.render(fig_name)
|
||||
|
||||
def plot_res(log, fig_name='res'):
|
||||
def plot_res(log, fig_name='res', param_names=None):
|
||||
|
||||
epochs = [x["epoch"] for x in log]
|
||||
|
||||
|
@ -36,10 +36,13 @@ def plot_res(log, fig_name='res'):
|
|||
ax[2].legend()
|
||||
else :
|
||||
ax[2].set_title('Prob')
|
||||
for idx, _ in enumerate(log[0]["param"]):
|
||||
ax[2].plot(epochs,[x["param"][idx] for x in log], label='P'+str(idx))
|
||||
ax[2].legend()
|
||||
#ax[2].legend(('P-0', 'P-45', 'P-180'))
|
||||
#for idx, _ in enumerate(log[0]["param"]):
|
||||
#ax[2].plot(epochs,[x["param"][idx] for x in log], label='P'+str(idx))
|
||||
if not param_names : param_names = ['P'+str(idx) for idx, _ in enumerate(log[0]["param"])]
|
||||
proba=[[x["param"][idx] for x in log] for idx, _ in enumerate(log[0]["param"])]
|
||||
ax[2].stackplot(epochs, proba, labels=param_names)
|
||||
ax[2].legend(param_names, loc='center left', bbox_to_anchor=(1, 0.5))
|
||||
|
||||
|
||||
fig_name = fig_name.replace('.',',')
|
||||
plt.savefig(fig_name)
|
||||
|
@ -193,6 +196,20 @@ def print_torch_mem(add_info=''):
|
|||
|
||||
#print(add_info, "-Garbage size :",len(gc.garbage))
|
||||
|
||||
"""Simple GPU memory report."""
|
||||
|
||||
mega_bytes = 1024.0 * 1024.0
|
||||
string = add_info + ' memory (MB)'
|
||||
string += ' | allocated: {}'.format(
|
||||
torch.cuda.memory_allocated() / mega_bytes)
|
||||
string += ' | max allocated: {}'.format(
|
||||
torch.cuda.max_memory_allocated() / mega_bytes)
|
||||
string += ' | cached: {}'.format(torch.cuda.memory_cached() / mega_bytes)
|
||||
string += ' | max cached: {}'.format(
|
||||
torch.cuda.max_memory_cached()/ mega_bytes)
|
||||
print(string)
|
||||
|
||||
|
||||
class loss_monitor(): #Voir https://github.com/pytorch/ignite
|
||||
def __init__(self, patience, end_train=1):
|
||||
self.patience = patience
|
||||
|
|
Loading…
Add table
Add a link
Reference in a new issue