Changes since Teledyne

This commit is contained in:
Antoine Harlé 2024-08-20 11:53:35 +02:00 committed by AntoineH
parent 03ffd7fe05
commit b89dac9084
185 changed files with 16668 additions and 484 deletions

34
jobs/benchmark.sh Normal file
View file

@ -0,0 +1,34 @@
#!/bin/bash
#SBATCH --gres=gpu:v100l:1 # https://docs.computecanada.ca/wiki/Using_GPUs_with_Slurm
#SBATCH --cpus-per-task=6 #6 # Cores proportional to GPUs: 6 on Cedar, 16 on Graham.
#SBATCH --mem=32000M #32000M # Memory proportional to CPUs: 32000 Cedar, 64000 Graham.
#SBATCH --account=def-mpederso
#SBATCH --time=20:00:00
#SBATCH --job-name=Benchmark
#SBATCH --output=log/%x-%j.out
#SBATCH --mail-user=harle.collette.antoine@gmail.com
#SBATCH --mail-type=END
#SBATCH --mail-type=FAIL
#SBATCH --array=1-3
# Setup
source ~/dataug/bin/activate
#Execute
# echo $(pwd) = /home/antoh/projects/def-mpederso/antoh/smart_augmentation/jobs
cd ../higher/smart_aug/
python test_dataug.py \
-a true \
-m resnet18 \
-ep 200 \
-K 1 \
-N 3 \
-tfc ../config/invScale_wide_tf_config2.json \
-ds CIFAR10 \
-al 10 \
-wu 2 \
-rf '../res/benchmark/' \
-pf __$SLURM_ARRAY_TASK_ID

24
jobs/classic.sh Normal file
View file

@ -0,0 +1,24 @@
#!/bin/bash
#SBATCH --gres=gpu:1 #gpu:v100l:1 # https://docs.computecanada.ca/wiki/Using_GPUs_with_Slurm
#SBATCH --cpus-per-task=6 #6 (V100:8) # Cores proportional to GPUs: 6 on Cedar, 16 on Graham.
#SBATCH --mem=32000M #32000M # Memory proportional to CPUs: 32000 Cedar, 64000 Graham.
#SBATCH --account=def-mpederso
#SBATCH --time=8:00:00
#SBATCH --job-name=Classic-WRN50_2-TinyImageNet
#SBATCH --output=log/%x-%j.out
#SBATCH --mail-user=harle.collette.antoine@gmail.com
#SBATCH --mail-type=END
#SBATCH --mail-type=FAIL
# Setup
source ~/dataug/bin/activate
#Execute
# echo $(pwd) = /home/antoh/projects/def-mpederso/antoh/smart_augmentation/jobs
cd ../higher/smart_aug/
python test_dataug.py \
-m wide_resnet50_2 \
-ep 40 \
-pt true \
-pf __pretrained

27
jobs/cpu_test.sh Normal file
View file

@ -0,0 +1,27 @@
#!/bin/bash
#SBATCH --cpus-per-task=6 #6 # Cores proportional to GPUs: 6 on Cedar, 16 on Graham.
#SBATCH --mem=32000M #32000M # Memory proportional to CPUs: 32000 Cedar, 64000 Graham.
#SBATCH --account=def-mpederso
#SBATCH --time=01:00:00
#SBATCH --job-name=test_dataug
#SBATCH --output=log/%x-%j.out
# Setup
source ~/dataug/bin/activate
#Execute
# echo $(pwd) = /home/antoh/projects/def-mpederso/antoh/smart_augmentation/jobs
cd ../higher/smart_aug/
python test_dataug.py \
-dv 'cpu' \
-a true \
-m LeNet \
-ep 2 \
-K 1 \
-N 3 \
-tfc ../config/wide_geom_tf_config.json \
-ms 1 \
-ls true \
-pf __test

30
jobs/dataug.sh Normal file
View file

@ -0,0 +1,30 @@
#!/bin/bash
#SBATCH --gres=gpu:v100l:1 # https://docs.computecanada.ca/wiki/Using_GPUs_with_Slurm
#SBATCH --cpus-per-task=6 #6 # Cores proportional to GPUs: 6 on Cedar, 16 on Graham.
#SBATCH --mem=32000M # Memory proportional to CPUs: 32000 Cedar, 64000 Graham.
#SBATCH --account=def-mpederso
#SBATCH --time=10:00:00
#SBATCH --job-name=Dataug-ResNet18
# --exclusive #In case of Out of Memory ?
#SBATCH --output=log/%x-%j.out
#SBATCH --mail-user=harle.collette.antoine@gmail.com
#SBATCH --mail-type=END
#SBATCH --mail-type=FAIL
# Setup
source ~/dataug/bin/activate
#Execute
# echo $(pwd) = /home/antoh/projects/def-mpederso/antoh/smart_augmentation/jobs
cd ../higher/smart_aug/
python test_dataug.py \
-a true \
-m resnet18 \
-ep 50 \
-K 1 \
-N 3 \
-tfc ../config/invScale_wide_tf_config.json \
-ds CIFAR10 \
-al 1 \
-pf __test_noCopy

37
jobs/imageNet_job.sh Normal file
View file

@ -0,0 +1,37 @@
#!/bin/bash
#SBATCH --gres=gpu:v100l:1 # https://docs.computecanada.ca/wiki/Using_GPUs_with_Slurm
#SBATCH --cpus-per-task=6 # Cores proportional to GPUs: 6 on Cedar, 16 on Graham.
#SBATCH --mem=32000M # Memory proportional to GPUs: 32000 Cedar, 64000 Graham.
#SBATCH --account=def-mpederso
#SBATCH --time=300:00:00
#SBATCH --job-name=imageNet-RandAug-ResNet18
#SBATCH --output=log/%x-%j.out
#SBATCH --mail-user=harle.collette.antoine@gmail.com
#SBATCH --mail-type=END
#SBATCH --mail-type=FAIL
#ImageNet Extract
echo "Extracting ImageNet..."
mkdir $SLURM_TMPDIR/data
time tar -xf ~/projects/def-mpederso/dataset/imagenet/imagenet.tar -C $SLURM_TMPDIR/data
echo "Executing code..."
# Setup
source ~/dataug/bin/activate
#Execute
# echo $(pwd) = /home/antoh/projects/def-mpederso/antoh/smart_augmentation/jobs
cd ../higher/smart_aug/
time python test_dataug.py \
-a true \
-m resnet18 \
-ep 40 \
-K 0 \
-N 3 \
-tfc ../config/invScale_wide_tf_config.json \
-dr $SLURM_TMPDIR/data \
-ds ImageNet \
-al 10 \
-pt true \
-pf __pretrained_AL10

30
jobs/randaug.sh Normal file
View file

@ -0,0 +1,30 @@
#!/bin/bash
#SBATCH --gres=gpu:1 #v100l:1 # https://docs.computecanada.ca/wiki/Using_GPUs_with_Slurm
#SBATCH --cpus-per-task=6 #6 # Cores proportional to GPUs: 6 on Cedar, 16 on Graham.
#SBATCH --mem=32000M #32000M # Memory proportional to CPUs: 32000 Cedar, 64000 Graham.
#SBATCH --account=def-mpederso
#SBATCH --time=10:00:00
#SBATCH --job-name=RandAug-ResNet18
#SBATCH --output=log/%x-%j.out
#SBATCH --mail-user=harle.collette.antoine@gmail.com
#SBATCH --mail-type=END
#SBATCH --mail-type=FAIL
# Setup
source ~/dataug/bin/activate
#Execute
# echo $(pwd) = /home/antoh/projects/def-mpederso/antoh/smart_augmentation/jobs
cd ../higher/smart_aug/
python test_dataug.py \
-a true \
-m resnet18 \
-ep 200 \
-K 0 \
-N 3 \
-tfc ../config/invScale_wide_tf_config.json \
-ds CIFAR10 \
-al 10 \
-dt FP16 \
-pf __AL10_FP16

30
jobs/setup_env.sh Normal file
View file

@ -0,0 +1,30 @@
#!/bin/bash
#SBATCH --gres=gpu:1 # Request GPU "generic resources"
#SBATCH --cpus-per-task=6 # Cores proportional to GPUs: 6 on Cedar, 16 on Graham.
#SBATCH --mem=32000M # Memory proportional to GPUs: 32000 Cedar, 64000 Graham.
#SBATCH --account=def-mpederso
#SBATCH --time=00:03:00
#SBATCH --job-name=setup
#SBATCH --output=log/%x-%j.out
# Setup
#sudo rm -rf ~/dataug/
module load python/3.7.4
virtualenv --no-download ~/dataug
source ~/dataug/bin/activate
pip install --no-index --upgrade pip
pip install --no-index torch torchvision #torchviz
pip install kornia # --no-deps ?
git clone https://github.com/facebookresearch/higher.git
cd higher
pip install .
cd ..
pip install git+https://github.com/ildoonet/pytorch-gradual-warmup-lr.git
pip install efficientnet_pytorch
pip install --no-index matplotlib scipy
#

25
jobs/test.sh Normal file
View file

@ -0,0 +1,25 @@
#!/bin/bash
#SBATCH --gres=gpu:1 #:v100l:1 # https://docs.computecanada.ca/wiki/Using_GPUs_with_Slurm
#SBATCH --cpus-per-task=6 #6 # Cores proportional to GPUs: 6 on Cedar, 16 on Graham.
#SBATCH --mem=32000M #32000M # Memory proportional to CPUs: 32000 Cedar, 64000 Graham.
#SBATCH --account=def-mpederso
#SBATCH --time=01:00:00
#SBATCH --job-name=test_dataug
#SBATCH --output=log/%x-%j.out
# Setup
source ~/dataug/bin/activate
#Execute
# echo $(pwd) = /home/antoh/projects/def-mpederso/antoh/smart_augmentation/jobs
cd ../higher/smart_aug/
python test_dataug.py \
-a true \
-m LeNet \
-ep 20 \
-K 1 \
-N 3 \
-tfc ../config/wide_geom_tf_config.json \
-pf __test_LossFix