Commit cba2e1d6 by Zhihong Ma

fix: add quantization feature for loss trajectory mia

parent 68add01f
import openpyxl
from global_var import GlobalVariables
from utils import *
import gol
import argparse
import numpy as np
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='DIV_TrajectoryMIA')
parser.add_argument('--model', type=str, default='resnet18', help=['resnet18','resnet50','resnet152','mobilenetv2'])
parser.add_argument('--data', type=str, default='cifar10', help=['cinic10', 'cifar10', 'cifar100', 'gtsrb'])
args = parser.parse_args()
data_path = f'networks/0/target/{args.data}_{args.model}/trajectory_test_data.npy'
# 统一计算所有的js
gol._init()
quant_type_list = ['INT','POT','FLOAT']
filename =f'{args.model}_result.xlsx'
workbook = openpyxl.load_workbook(filename)
worksheet = workbook[args.data]
for quant_type in quant_type_list:
num_bit_list = numbit_list(quant_type)
for num_bits in num_bit_list:
e_bit_list = ebit_list(quant_type,num_bits)
for e_bits in e_bit_list:
if quant_type == 'FLOAT':
title = '%s_%d_E%d' % (quant_type, num_bits, e_bits)
else:
title = '%s_%d' % (quant_type, num_bits)
model_name_ptq = f'{args.data}_{args.model}_{title}'
p_data_path = f'networks/0/target/{model_name_ptq}/trajectory_test_data.npy'
dataSet = np.load(data_path, allow_pickle=True).item()
p_dataSet = np.load(p_data_path, allow_pickle=True).item()
data = torch.from_numpy(np.array(dataSet['model_trajectory'], dtype='f')).transpose(0,1)
p_data = torch.from_numpy(np.array(p_dataSet['model_trajectory'], dtype='f')).transpose(0,1)
# data = torch.from_numpy(np.array(dataSet['model_trajectory'], dtype='f'))
# p_data = torch.from_numpy(np.array(p_dataSet['model_trajectory'], dtype='f'))
div = js_div(data,p_data)
div = div.item()
if div<0:
div = 0
print(f"js div of {model_name_ptq}: {div}")
idx = GlobalVariables.title_list.index(title)
idx += 4
worksheet.cell(row=idx,column=2,value=div)
workbook.save(filename)
class GlobalVariables: class GlobalVariables:
SELF_INPLANES = 0 SELF_INPLANES = 0
title_list = ['INT_2','INT_3','INT_4','INT_5','INT_6','INT_7','INT_8','INT_9','INT_10','INT_11','INT_12','INT_13','INT_14','INT_15','INT_16','POT_2','POT_3','POT_4','POT_5','POT_6','POT_7','POT_8','FLOAT_3_E1','FLOAT_4_E1','FLOAT_4_E2','FLOAT_5_E1','FLOAT_5_E2','FLOAT_5_E3','FLOAT_6_E1','FLOAT_6_E2','FLOAT_6_E3','FLOAT_6_E4','FLOAT_7_E1','FLOAT_7_E2','FLOAT_7_E3','FLOAT_7_E4','FLOAT_7_E5','FLOAT_8_E1','FLOAT_8_E2','FLOAT_8_E3','FLOAT_8_E4','FLOAT_8_E5','FLOAT_8_E6']
\ No newline at end of file
...@@ -44,6 +44,12 @@ if __name__ == '__main__': ...@@ -44,6 +44,12 @@ if __name__ == '__main__':
parser.add_argument('--model_distill', type=str, default='resnet18',help=['resnet18','resnet50','resnet152','mobilenetv2']) parser.add_argument('--model_distill', type=str, default='resnet18',help=['resnet18','resnet50','resnet152','mobilenetv2'])
parser.add_argument('--epochs_distill', type=int, default=100) parser.add_argument('--epochs_distill', type=int, default=100)
parser.add_argument('--mia_type', type=str, help=['build-dataset', 'black-box']) parser.add_argument('--mia_type', type=str, help=['build-dataset', 'black-box'])
parser.add_argument('--load_attack',action='store_true', help='load a trained attack model')
parser.add_argument('--store_ptq',action='store_true', help='store a ptq model')
parser.add_argument('--quant_type', type=str, choices=['INT', 'POT', 'FLOAT'], default=None,help='choose a ptq mode for target model')
parser.add_argument("--num_bits",type=int,default=0)
parser.add_argument("--e_bits",type=int,default=0)
parser.add_argument('--load_ptq',action='store_true', help='load a ptq target model')
args = parser.parse_args() args = parser.parse_args()
utils.set_random_seeds(args.seed) utils.set_random_seeds(args.seed)
......
## Loss Trajectory MIA ## Loss Trajectory MIA
#### Update 2023.6.1
1\. 增加的内容:
- 使用量化后的Target Model训练Distill Target Model (简称为Q-Disitll)
- 通过Q-Distill的loss trajectory构建Attack Model的测试集
- 使用Attack Model攻击量化后的Target Model
- 使用JS散度衡量量化Target Model前后得到的Distill Target Model的Loss Trajectory数据的相似度
2\. 思路
- 攻击场景假设:
首先继承了之前叙述的攻击场景(假设攻击者知道Target Model的结构,且对数据集有一定了解,因此将Shadow Model,Distill Target Model,Distill Shadow Model都采用了于Target Model相同的结构,将CIFAR10数据集切割成多个不相交的部分,分别作为Target Model,Shadow Model,Distill Target Model,Distill Shadow Model的训练、测试集);
还假设了攻击者不知道或不在意Target Model是否为量化模型,仍然采用FP32的Shadow Model,Distill Target Model,Distill Shadow Model.
保持原有的Attack Model,攻击量化后的Target Model,通过Acc和Auc指标的变化来检验模型迁移的隐私安全性。
- 实验设计
由攻击场景假设,我们需要改动的部分集中在了Distill Target Model的训练,Attack Model的测试集数据构建,Attack Model的测试这三方面。
Distill Target Model在训练时,学习的Target Model的output由原来的FP32模型的输出改为量化后的Target Model的输出,对量化后的Target Model进行蒸馏,把Distill Target Model重新训练。
由于攻击者的Shadow Model和Distill Shadow Model仍然采用FP32的模型,因此不需要重新训练,相应的,Attack Model是通过Distill Shadow Model的Loss Trajectory构建的训练集训练的,Attack Model也不需要重新训练。
分别将经过各种量化后的Distill Target Model的Loss Trajectory构造成Attack Model的测试数据集,并对Attack Model进行测试,记录Best Acc和相应的Auc.
计算量化前后得到的Loss Trajectory的JS散度,作为相似度衡量依据。
- 预期结果
由于Attack Model的训练是由Shadow Model和Distill Shadow Model决定的,他们都是FP32的模型,因此量化后的Target Model与FP32的Target Model越相似,则Distill Model的Loss Trajectory越接近,Attack Model的攻击效果应该会越好,迁移后的隐私安全性越差。
3\. 实验结果
在ResNet18 + CIFAR10上进行了实验,得到的数据在**resnet18_result.xlsx**中。以下为拟合曲线,经过尝试,仍是分子分母次数为2的有理数函数效果最好。
acc_loss - js
<img src = "fig/acc_loss_curve.png" class="h-90 auto">
auc_loss - js
<img src = "fig/auc_loss_curve.png" class="h-90 auto">
在js距离较小(loss trajectory较为相似)时,acc_loss和auc_loss相对波动较大,说明对于量化后的Target Model,即使与FP32的Target Model较为相似,也可能具有隐私安全性提升。
4\. 问题
Q1: 运行速度慢。对每一种量化,都需要经过Target Model量化,训练一个相应的Distill Target Model,构造Attack Model测试集,测试Attack Model,流程很长。
A1: 目前准备减少一些比较慢的测试点,主要是FLOAT系列的,包括FLOAT_6_E2, FLOAT_7_E2, FLOAT_7_E3, FLOAT8_E2, FLOAT_8_E3, FLOAT_8_E6. (主要是FLOAT的量化的比较慢,实际上distill model在Resnet50+CIFAR10的情况下每个epoch的训练用时小于1min,distill model的训练耗时在使用CIFAR10,100时不是非常大,如果使用CINIC-10,则distill model的训练耗时也变得过于大了)
​ POT_7, POT_8也可以考虑去掉。上述计划去掉的数据点都是信息比较重复,对曲线趋势影响小,而且量化耗时比较长的。
Q2:对各个模型、数据集实验的流程较长。
**注**:.sh训练脚本集中放到了sh文件夹中,其中``train_attack_18_10_ptq_FP_S1, train_attack_18_10_ptq_FP_S2,train_attack_18_10_ptq_FP_S3,train_attack_18_10_ptq_FP_S4,train_attack_18_10_ptq_INT_S1,train_attack_18_10_ptq_INT_S2,train_attack_18_10_ptq_POT,train_div.sh ``等是本次更新用到的。
<br><br>
#### Update 2023.5.28 #### Update 2023.5.28
1\. 思路 1\. 思路
...@@ -12,7 +79,7 @@ ...@@ -12,7 +79,7 @@
但传统方法无法区分那些虽然不是训练集的数据,但模型loss仍然很小的输入。文章作者发现了在模型的训练过程中,对于这类小loss数据,在训练集或不在训练集会在训练过程的loss收敛速度和进程方面有差异。如图所示,他们有着不同的Loss轨迹(Loss Trajectory) 但传统方法无法区分那些虽然不是训练集的数据,但模型loss仍然很小的输入。文章作者发现了在模型的训练过程中,对于这类小loss数据,在训练集或不在训练集会在训练过程的loss收敛速度和进程方面有差异。如图所示,他们有着不同的Loss轨迹(Loss Trajectory)
![p1](fig\p1.png) <img src = "fig/p1.png" class="h-90 auto">
因为小loss数据如果不在训练集中,一般是比较简单的图片,其训练loss会下降的很快,因此其loss轨迹在训练中期是低于在训练集中的数据的。通过捕获Loss Trajectory的区别,可以实现效果更好的MIA. 因为小loss数据如果不在训练集中,一般是比较简单的图片,其训练loss会下降的很快,因此其loss轨迹在训练中期是低于在训练集中的数据的。通过捕获Loss Trajectory的区别,可以实现效果更好的MIA.
...@@ -773,8 +840,3 @@ Q2:如何进行预测? ...@@ -773,8 +840,3 @@ Q2:如何进行预测?
A2:可以考虑将loss trajectory作为property,去计算in,out数据(member_status可以作为in, out的标记)的loss trajectory的相似度然后预测? 可以用Distill Target Model的Loss Trajectory与Attack Acc(AUC...)之类的配对构成数据点(也可以增加Distill Shadow Model的作为补充)。 A2:可以考虑将loss trajectory作为property,去计算in,out数据(member_status可以作为in, out的标记)的loss trajectory的相似度然后预测? 可以用Distill Target Model的Loss Trajectory与Attack Acc(AUC...)之类的配对构成数据点(也可以增加Distill Shadow Model的作为补充)。
预计的效果是相似度越低,则攻击成功率越高 预计的效果是相似度越低,则攻击成功率越高
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J Tra_152 # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 0-12:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo "Job start at $(date "+%Y-%m-%d %H:%M:%S")"
echo "Job run at:"
echo "$(hostnamectl)"
#- Load environments
source /tools/module_env.sh
source ~/pyt1.5/bin/activate
module list # list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo $(module list) # list modules loaded
echo $(which gcc)
echo $(which python)
echo $(which python3)
cluster-quota # nas quota
nvidia-smi --format=csv --query-gpu=name,driver_version,power.limit # gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo "Use GPU ${CUDA_VISIBLE_DEVICES}" # which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
echo "python main.py --mode target --model resnet152 --data cifar10"
python main.py --mode target --model resnet152 --data cifar10
echo "python main.py --mode shadow --model resnet152 --data cifar10"
python main.py --mode shadow --model resnet152 --data cifar10
echo "python main.py --mode distill_target --model resnet152 --data cifar10"
python main.py --mode distill_target --model resnet152 --data cifar10
echo "python main.py --mode distill_shadow --model resnet152 --data cifar10"
python main.py --mode distill_shadow --model resnet152 --data cifar10
echo "python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet152 --model_distill resnet152 --data cifar10"
python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet152 --model_distill resnet152 --data cifar10
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet152 --model_distill resnet152 --data cifar10"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet152 --model_distill resnet152 --data cifar10
echo "python main.py --action 1 --mia_type black-box --model resnet152 --model_distill resnet152 --data cifar10"
python main.py --action 1 --mia_type black-box --model resnet152 --model_distill resnet152 --data cifar10
#- End
echo "Job end at $(date "+%Y-%m-%d %H:%M:%S")"
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J Tra_152_10 # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 0-12:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo "Job start at $(date "+%Y-%m-%d %H:%M:%S")"
echo "Job run at:"
echo "$(hostnamectl)"
#- Load environments
source /tools/module_env.sh
source ~/pyt1.5/bin/activate
module list # list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo $(module list) # list modules loaded
echo $(which gcc)
echo $(which python)
echo $(which python3)
cluster-quota # nas quota
nvidia-smi --format=csv --query-gpu=name,driver_version,power.limit # gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo "Use GPU ${CUDA_VISIBLE_DEVICES}" # which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
echo "python main.py --mode target --model resnet152 --data cifar100"
python main.py --mode target --model resnet152 --data cifar100
echo "python main.py --mode shadow --model resnet152 --data cifar100"
python main.py --mode shadow --model resnet152 --data cifar100
echo "python main.py --mode distill_target --model resnet152 --data cifar100"
python main.py --mode distill_target --model resnet152 --data cifar100
echo "python main.py --mode distill_shadow --model resnet152 --data cifar100"
python main.py --mode distill_shadow --model resnet152 --data cifar100
echo "python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet152 --model_distill resnet152 --data cifar100"
python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet152 --model_distill resnet152 --data cifar100
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet152 --model_distill resnet152 --data cifar100"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet152 --model_distill resnet152 --data cifar100
echo "python main.py --action 1 --mia_type black-box --model resnet152 --model_distill resnet152 --data cifar100"
python main.py --action 1 --mia_type black-box --model resnet152 --model_distill resnet152 --data cifar100
#- End
echo "Job end at $(date "+%Y-%m-%d %H:%M:%S")"
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J Tra_CIN_10 # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 3-00:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-long # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo "Job start at $(date "+%Y-%m-%d %H:%M:%S")"
echo "Job run at:"
echo "$(hostnamectl)"
#- Load environments
source /tools/module_env.sh
source ~/pyt1.5/bin/activate
module list # list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo $(module list) # list modules loaded
echo $(which gcc)
echo $(which python)
echo $(which python3)
cluster-quota # nas quota
nvidia-smi --format=csv --query-gpu=name,driver_version,power.limit # gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo "Use GPU ${CUDA_VISIBLE_DEVICES}" # which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
echo "python main.py --mode target --model resnet152 --data cinic10"
python main.py --mode target --model resnet152 --data cinic10
echo "python main.py --mode shadow --model resnet152 --data cinic10"
python main.py --mode shadow --model resnet152 --data cinic10
echo "python main.py --mode distill_target --model resnet152 --data cinic10"
python main.py --mode distill_target --model resnet152 --data cinic10
echo "python main.py --mode distill_shadow --model resnet152 --data cinic10"
python main.py --mode distill_shadow --model resnet152 --data cinic10
echo "python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet152 --model_distill resnet152 --data cinic10"
python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet152 --model_distill resnet152 --data cinic10
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet152 --model_distill resnet152 --data cinic10"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet152 --model_distill resnet152 --data cinic10
echo "python main.py --action 1 --mia_type black-box --model resnet152 --model_distill resnet152 --data cinic10"
python main.py --action 1 --mia_type black-box --model resnet152 --model_distill resnet152 --data cinic10
#- End
echo "Job end at $(date "+%Y-%m-%d %H:%M:%S")"
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J Tra_10_18 # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 1-00:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo "Job start at $(date "+%Y-%m-%d %H:%M:%S")"
echo "Job run at:"
echo "$(hostnamectl)"
#- Load environments
source /tools/module_env.sh
source ~/pyt1.5/bin/activate
module list # list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo $(module list) # list modules loaded
echo $(which gcc)
echo $(which python)
echo $(which python3)
cluster-quota # nas quota
nvidia-smi --format=csv --query-gpu=name,driver_version,power.limit # gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo "Use GPU ${CUDA_VISIBLE_DEVICES}" # which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
# echo "python main.py --mode target --model resnet18 --data cifar10"
# python main.py --mode target --model resnet18 --data cifar10
# echo "python main.py --mode shadow --model resnet18 --data cifar10"
# python main.py --mode shadow --model resnet18 --data cifar10
echo "python main.py --mode distill_target --model resnet18 --data cifar10"
python main.py --mode distill_target --model resnet18 --data cifar10
# echo "python main.py --mode distill_shadow --model resnet18 --data cifar10"
# python main.py --mode distill_shadow --model resnet18 --data cifar10
# echo "python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10"
# python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10
echo "python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10"
python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10
#- End
echo "Job end at $(date "+%Y-%m-%d %H:%M:%S")"
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J Tra_C100_18 # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 1-00:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo "Job start at $(date "+%Y-%m-%d %H:%M:%S")"
echo "Job run at:"
echo "$(hostnamectl)"
#- Load environments
source /tools/module_env.sh
source ~/pyt1.5/bin/activate
module list # list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo $(module list) # list modules loaded
echo $(which gcc)
echo $(which python)
echo $(which python3)
cluster-quota # nas quota
nvidia-smi --format=csv --query-gpu=name,driver_version,power.limit # gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo "Use GPU ${CUDA_VISIBLE_DEVICES}" # which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
# echo "python main.py --mode target --model resnet18 --data cifar100"
# python main.py --mode target --model resnet18 --data cifar100
# echo "python main.py --mode shadow --model resnet18 --data cifar100"
# python main.py --mode shadow --model resnet18 --data cifar100
# echo "python main.py --mode distill_target --model resnet18 --data cifar100"
# python main.py --mode distill_target --model resnet18 --data cifar100
# echo "python main.py --mode distill_shadow --model resnet18 --data cifar100"
# python main.py --mode distill_shadow --model resnet18 --data cifar100
# echo "python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar100"
# python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar100
# echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar100"
# python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar100
echo "python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar100"
python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar100
#- End
echo "Job end at $(date "+%Y-%m-%d %H:%M:%S")"
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J Tra_C100_18 # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 1-00:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo "Job start at $(date "+%Y-%m-%d %H:%M:%S")"
echo "Job run at:"
echo "$(hostnamectl)"
#- Load environments
source /tools/module_env.sh
source ~/pyt1.5/bin/activate
module list # list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo $(module list) # list modules loaded
echo $(which gcc)
echo $(which python)
echo $(which python3)
cluster-quota # nas quota
nvidia-smi --format=csv --query-gpu=name,driver_version,power.limit # gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo "Use GPU ${CUDA_VISIBLE_DEVICES}" # which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
# echo "python main.py --mode target --model resnet18 --data cifar100"
# python main.py --mode target --model resnet18 --data cifar100
# echo "python main.py --mode shadow --model resnet18 --data cifar100"
# python main.py --mode shadow --model resnet18 --data cifar100
# echo "python main.py --mode distill_target --model resnet18 --data cifar100"
# python main.py --mode distill_target --model resnet18 --data cifar100
# echo "python main.py --mode distill_shadow --model resnet18 --data cifar100"
# python main.py --mode distill_shadow --model resnet18 --data cifar100
# echo "python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar100"
# python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar100
# echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar100"
# python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar100
echo "python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar100 --load_attack"
python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar100 --load_attack
#- End
echo "Job end at $(date "+%Y-%m-%d %H:%M:%S")"
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J L_Tra_10_18 # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 1-00:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo "Job start at $(date "+%Y-%m-%d %H:%M:%S")"
echo "Job run at:"
echo "$(hostnamectl)"
#- Load environments
source /tools/module_env.sh
source ~/pyt1.5/bin/activate
module list # list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo $(module list) # list modules loaded
echo $(which gcc)
echo $(which python)
echo $(which python3)
cluster-quota # nas quota
nvidia-smi --format=csv --query-gpu=name,driver_version,power.limit # gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo "Use GPU ${CUDA_VISIBLE_DEVICES}" # which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
# echo "python main.py --mode target --model resnet18 --data cifar10"
# python main.py --mode target --model resnet18 --data cifar10
# echo "python main.py --mode shadow --model resnet18 --data cifar10"
# python main.py --mode shadow --model resnet18 --data cifar10
# echo "python main.py --mode distill_target --model resnet18 --data cifar10"
# python main.py --mode distill_target --model resnet18 --data cifar10
# echo "python main.py --mode distill_shadow --model resnet18 --data cifar10"
# python main.py --mode distill_shadow --model resnet18 --data cifar10
# echo "python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10"
# python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10
# echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10"
# python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10
echo "python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack"
python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack
#- End
echo "Job end at $(date "+%Y-%m-%d %H:%M:%S")"
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J I_Tra_10_18 # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 1-06:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo "Job start at $(date "+%Y-%m-%d %H:%M:%S")"
echo "Job run at:"
echo "$(hostnamectl)"
#- Load environments
source /tools/module_env.sh
source ~/pyt1.5/bin/activate
module list # list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo $(module list) # list modules loaded
echo $(which gcc)
echo $(which python)
echo $(which python3)
cluster-quota # nas quota
nvidia-smi --format=csv --query-gpu=name,driver_version,power.limit # gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo "Use GPU ${CUDA_VISIBLE_DEVICES}" # which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
# TRAIN DISTILL MODEL
echo "python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 3 --e_bits 1"
python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 3 --e_bits 1
echo "python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 4 --e_bits 1"
python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 4 --e_bits 1
echo "python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 4 --e_bits 2"
python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 4 --e_bits 2
echo "python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 5 --e_bits 1"
python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 5 --e_bits 1
echo "python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 5 --e_bits 2"
python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 5 --e_bits 2
echo "python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 5 --e_bits 3"
python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 5 --e_bits 3
# CONSTRUCT TEST DATASET
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type FLOAT --num_bits 3 --e_bits 1"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type FLOAT --num_bits 3 --e_bits 1
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type FLOAT --num_bits 4 --e_bits 1"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type FLOAT --num_bits 4 --e_bits 1
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type FLOAT --num_bits 4 --e_bits 2"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type FLOAT --num_bits 4 --e_bits 2
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type FLOAT --num_bits 5 --e_bits 1"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type FLOAT --num_bits 5 --e_bits 1
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type FLOAT --num_bits 5 --e_bits 2"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type FLOAT --num_bits 5 --e_bits 2
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type FLOAT --num_bits 5 --e_bits 3"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type FLOAT --num_bits 5 --e_bits 3
# ATTACK
echo "python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type FLOAT --num_bits 3 --e_bits 1"
python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type FLOAT --num_bits 3 --e_bits 1
echo "python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type FLOAT --num_bits 4 --e_bits 1"
python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type FLOAT --num_bits 4 --e_bits 1
echo "python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type FLOAT --num_bits 4 --e_bits 2"
python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type FLOAT --num_bits 4 --e_bits 2
echo "python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type FLOAT --num_bits 5 --e_bits 1"
python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type FLOAT --num_bits 5 --e_bits 1
echo "python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type FLOAT --num_bits 5 --e_bits 2"
python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type FLOAT --num_bits 5 --e_bits 2
echo "python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type FLOAT --num_bits 5 --e_bits 3"
python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type FLOAT --num_bits 5 --e_bits 3
#- End
echo "Job end at $(date "+%Y-%m-%d %H:%M:%S")"
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J I_Tra_10_18 # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 1-06:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo "Job start at $(date "+%Y-%m-%d %H:%M:%S")"
echo "Job run at:"
echo "$(hostnamectl)"
#- Load environments
source /tools/module_env.sh
source ~/pyt1.5/bin/activate
module list # list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo $(module list) # list modules loaded
echo $(which gcc)
echo $(which python)
echo $(which python3)
cluster-quota # nas quota
nvidia-smi --format=csv --query-gpu=name,driver_version,power.limit # gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo "Use GPU ${CUDA_VISIBLE_DEVICES}" # which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
# TRAIN DISTILL MODEL
echo "python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 6 --e_bits 1"
python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 6 --e_bits 1
echo "python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 6 --e_bits 2"
python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 6 --e_bits 2
echo "python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 6 --e_bits 3"
python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 6 --e_bits 3
echo "python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 6 --e_bits 4"
python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 6 --e_bits 4
# CONSTRUCT TEST DATASET
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type FLOAT --num_bits 6 --e_bits 1"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type FLOAT --num_bits 6 --e_bits 1
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type FLOAT --num_bits 6 --e_bits 2"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type FLOAT --num_bits 6 --e_bits 2
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type FLOAT --num_bits 6 --e_bits 3"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type FLOAT --num_bits 6 --e_bits 3
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type FLOAT --num_bits 6 --e_bits 4"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type FLOAT --num_bits 6 --e_bits 4
# ATTACK
echo "python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type FLOAT --num_bits 6 --e_bits 1"
python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type FLOAT --num_bits 6 --e_bits 1
echo "python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type FLOAT --num_bits 6 --e_bits 2"
python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type FLOAT --num_bits 6 --e_bits 2
echo "python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type FLOAT --num_bits 6 --e_bits 3"
python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type FLOAT --num_bits 6 --e_bits 3
echo "python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type FLOAT --num_bits 6 --e_bits 4"
python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type FLOAT --num_bits 6 --e_bits 4
#- End
echo "Job end at $(date "+%Y-%m-%d %H:%M:%S")"
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J I_Tra_10_18 # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 1-06:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo "Job start at $(date "+%Y-%m-%d %H:%M:%S")"
echo "Job run at:"
echo "$(hostnamectl)"
#- Load environments
source /tools/module_env.sh
source ~/pyt1.5/bin/activate
module list # list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo $(module list) # list modules loaded
echo $(which gcc)
echo $(which python)
echo $(which python3)
cluster-quota # nas quota
nvidia-smi --format=csv --query-gpu=name,driver_version,power.limit # gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo "Use GPU ${CUDA_VISIBLE_DEVICES}" # which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
# TRAIN DISTILL MODEL
echo "python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 7 --e_bits 1"
python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 7 --e_bits 1
echo "python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 7 --e_bits 2"
python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 7 --e_bits 2
echo "python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 7 --e_bits 3"
python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 7 --e_bits 3
echo "python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 7 --e_bits 4"
python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 7 --e_bits 4
echo "python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 7 --e_bits 5"
python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 7 --e_bits 5
# CONSTRUCT TEST DATASET
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type FLOAT --num_bits 7 --e_bits 1"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type FLOAT --num_bits 7 --e_bits 1
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type FLOAT --num_bits 7 --e_bits 2"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type FLOAT --num_bits 7 --e_bits 2
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type FLOAT --num_bits 7 --e_bits 3"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type FLOAT --num_bits 7 --e_bits 3
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type FLOAT --num_bits 7 --e_bits 4"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type FLOAT --num_bits 7 --e_bits 4
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type FLOAT --num_bits 7 --e_bits 5"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type FLOAT --num_bits 7 --e_bits 5
# ATTACK
echo "python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type FLOAT --num_bits 7 --e_bits 1"
python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type FLOAT --num_bits 7 --e_bits 1
echo "python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type FLOAT --num_bits 7 --e_bits 2"
python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type FLOAT --num_bits 7 --e_bits 2
echo "python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type FLOAT --num_bits 7 --e_bits 3"
python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type FLOAT --num_bits 7 --e_bits 3
echo "python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type FLOAT --num_bits 7 --e_bits 4"
python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type FLOAT --num_bits 7 --e_bits 4
echo "python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type FLOAT --num_bits 7 --e_bits 5"
python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type FLOAT --num_bits 7 --e_bits 5
#- End
echo "Job end at $(date "+%Y-%m-%d %H:%M:%S")"
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J I_Tra_10_18 # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 1-06:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo "Job start at $(date "+%Y-%m-%d %H:%M:%S")"
echo "Job run at:"
echo "$(hostnamectl)"
#- Load environments
source /tools/module_env.sh
source ~/pyt1.5/bin/activate
module list # list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo $(module list) # list modules loaded
echo $(which gcc)
echo $(which python)
echo $(which python3)
cluster-quota # nas quota
nvidia-smi --format=csv --query-gpu=name,driver_version,power.limit # gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo "Use GPU ${CUDA_VISIBLE_DEVICES}" # which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
# TRAIN DISTILL MODEL
echo "python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 8 --e_bits 1"
python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 8 --e_bits 1
echo "python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 8 --e_bits 2"
python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 8 --e_bits 2
echo "python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 8 --e_bits 3"
python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 8 --e_bits 3
echo "python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 8 --e_bits 4"
python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 8 --e_bits 4
echo "python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 8 --e_bits 5"
python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 8 --e_bits 5
echo "python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 8 --e_bits 6"
python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 8 --e_bits 6
# CONSTRUCT TEST DATASET
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type FLOAT --num_bits 8 --e_bits 1"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type FLOAT --num_bits 8 --e_bits 1
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type FLOAT --num_bits 8 --e_bits 2"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type FLOAT --num_bits 8 --e_bits 2
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type FLOAT --num_bits 8 --e_bits 3"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type FLOAT --num_bits 8 --e_bits 3
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type FLOAT --num_bits 8 --e_bits 4"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type FLOAT --num_bits 8 --e_bits 4
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type FLOAT --num_bits 8 --e_bits 5"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type FLOAT --num_bits 8 --e_bits 5
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type FLOAT --num_bits 8 --e_bits 6"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type FLOAT --num_bits 8 --e_bits 6
# ATTACK
echo "python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type FLOAT --num_bits 8 --e_bits 1"
python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type FLOAT --num_bits 8 --e_bits 1
echo "python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type FLOAT --num_bits 8 --e_bits 2"
python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type FLOAT --num_bits 8 --e_bits 2
echo "python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type FLOAT --num_bits 8 --e_bits 3"
python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type FLOAT --num_bits 8 --e_bits 3
echo "python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type FLOAT --num_bits 8 --e_bits 4"
python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type FLOAT --num_bits 8 --e_bits 4
echo "python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type FLOAT --num_bits 8 --e_bits 5"
python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type FLOAT --num_bits 8 --e_bits 5
echo "python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type FLOAT --num_bits 8 --e_bits 6"
python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type FLOAT --num_bits 8 --e_bits 6
#- End
echo "Job end at $(date "+%Y-%m-%d %H:%M:%S")"
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J I_Tra_10_18 # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 1-00:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo "Job start at $(date "+%Y-%m-%d %H:%M:%S")"
echo "Job run at:"
echo "$(hostnamectl)"
#- Load environments
source /tools/module_env.sh
source ~/pyt1.5/bin/activate
module list # list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo $(module list) # list modules loaded
echo $(which gcc)
echo $(which python)
echo $(which python3)
cluster-quota # nas quota
nvidia-smi --format=csv --query-gpu=name,driver_version,power.limit # gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo "Use GPU ${CUDA_VISIBLE_DEVICES}" # which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
# echo "python main.py --mode target --model resnet18 --data cifar10"
# python main.py --mode target --model resnet18 --data cifar10
# echo "python main.py --mode shadow --model resnet18 --data cifar10"
# python main.py --mode shadow --model resnet18 --data cifar10
# echo "python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 4"
# python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 4
# echo "python main.py --mode distill_shadow --model resnet18 --data cifar10"
# python main.py --mode distill_shadow --model resnet18 --data cifar10
# echo "python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10"
# python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10
# echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type INT --num_bits 4"
# python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type INT --num_bits 4
echo "python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type INT --num_bits 4"
python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type INT --num_bits 4
#- End
echo "Job end at $(date "+%Y-%m-%d %H:%M:%S")"
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J I_Tra_10_18 # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 1-00:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo "Job start at $(date "+%Y-%m-%d %H:%M:%S")"
echo "Job run at:"
echo "$(hostnamectl)"
#- Load environments
source /tools/module_env.sh
source ~/pyt1.5/bin/activate
module list # list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo $(module list) # list modules loaded
echo $(which gcc)
echo $(which python)
echo $(which python3)
cluster-quota # nas quota
nvidia-smi --format=csv --query-gpu=name,driver_version,power.limit # gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo "Use GPU ${CUDA_VISIBLE_DEVICES}" # which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
# echo "python main.py --mode target --model resnet18 --data cifar10"
# python main.py --mode target --model resnet18 --data cifar10
# echo "python main.py --mode shadow --model resnet18 --data cifar10"
# python main.py --mode shadow --model resnet18 --data cifar10
# echo "python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 5"
# python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 5
# echo "python main.py --mode distill_shadow --model resnet18 --data cifar10"
# python main.py --mode distill_shadow --model resnet18 --data cifar10
# echo "python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10"
# python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10
# echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type INT --num_bits 5"
# python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type INT --num_bits 5
# echo "python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack"
# python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack
echo "python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type INT --num_bits 5"
python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type INT --num_bits 5
#- End
echo "Job end at $(date "+%Y-%m-%d %H:%M:%S")"
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J I_Tra_10_18 # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 1-00:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo "Job start at $(date "+%Y-%m-%d %H:%M:%S")"
echo "Job run at:"
echo "$(hostnamectl)"
#- Load environments
source /tools/module_env.sh
source ~/pyt1.5/bin/activate
module list # list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo $(module list) # list modules loaded
echo $(which gcc)
echo $(which python)
echo $(which python3)
cluster-quota # nas quota
nvidia-smi --format=csv --query-gpu=name,driver_version,power.limit # gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo "Use GPU ${CUDA_VISIBLE_DEVICES}" # which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
# echo "python main.py --mode target --model resnet18 --data cifar10"
# python main.py --mode target --model resnet18 --data cifar10
# echo "python main.py --mode shadow --model resnet18 --data cifar10"
# python main.py --mode shadow --model resnet18 --data cifar10
# echo "python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 6"
# python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 6
# echo "python main.py --mode distill_shadow --model resnet18 --data cifar10"
# python main.py --mode distill_shadow --model resnet18 --data cifar10
# echo "python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10"
# python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10
# echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type INT --num_bits 6"
# python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type INT --num_bits 6
# echo "python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack"
# python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack
echo "python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type INT --num_bits 6"
python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type INT --num_bits 6
#- End
echo "Job end at $(date "+%Y-%m-%d %H:%M:%S")"
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J I_Tra_10_18 # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 1-00:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo "Job start at $(date "+%Y-%m-%d %H:%M:%S")"
echo "Job run at:"
echo "$(hostnamectl)"
#- Load environments
source /tools/module_env.sh
source ~/pyt1.5/bin/activate
module list # list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo $(module list) # list modules loaded
echo $(which gcc)
echo $(which python)
echo $(which python3)
cluster-quota # nas quota
nvidia-smi --format=csv --query-gpu=name,driver_version,power.limit # gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo "Use GPU ${CUDA_VISIBLE_DEVICES}" # which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
# echo "python main.py --mode target --model resnet18 --data cifar10"
# python main.py --mode target --model resnet18 --data cifar10
# echo "python main.py --mode shadow --model resnet18 --data cifar10"
# python main.py --mode shadow --model resnet18 --data cifar10
echo "python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 7"
python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 7
# echo "python main.py --mode distill_shadow --model resnet18 --data cifar10"
# python main.py --mode distill_shadow --model resnet18 --data cifar10
# echo "python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10"
# python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type INT --num_bits 7"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type INT --num_bits 7
# echo "python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack"
# python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack
echo "python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type INT --num_bits 7"
python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type INT --num_bits 7
#- End
echo "Job end at $(date "+%Y-%m-%d %H:%M:%S")"
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J I_Tra_10_18 # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 1-00:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo "Job start at $(date "+%Y-%m-%d %H:%M:%S")"
echo "Job run at:"
echo "$(hostnamectl)"
#- Load environments
source /tools/module_env.sh
source ~/pyt1.5/bin/activate
module list # list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo $(module list) # list modules loaded
echo $(which gcc)
echo $(which python)
echo $(which python3)
cluster-quota # nas quota
nvidia-smi --format=csv --query-gpu=name,driver_version,power.limit # gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo "Use GPU ${CUDA_VISIBLE_DEVICES}" # which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
# echo "python main.py --mode target --model resnet18 --data cifar10"
# python main.py --mode target --model resnet18 --data cifar10
# echo "python main.py --mode shadow --model resnet18 --data cifar10"
# python main.py --mode shadow --model resnet18 --data cifar10
echo "python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 8"
python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 8
# echo "python main.py --mode distill_shadow --model resnet18 --data cifar10"
# python main.py --mode distill_shadow --model resnet18 --data cifar10
# echo "python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10"
# python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type INT --num_bits 8"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type INT --num_bits 8
# echo "python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack"
# python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack
echo "python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type INT --num_bits 8"
python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type INT --num_bits 8
#- End
echo "Job end at $(date "+%Y-%m-%d %H:%M:%S")"
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J I_Tra_10_18 # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 1-00:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo "Job start at $(date "+%Y-%m-%d %H:%M:%S")"
echo "Job run at:"
echo "$(hostnamectl)"
#- Load environments
source /tools/module_env.sh
source ~/pyt1.5/bin/activate
module list # list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo $(module list) # list modules loaded
echo $(which gcc)
echo $(which python)
echo $(which python3)
cluster-quota # nas quota
nvidia-smi --format=csv --query-gpu=name,driver_version,power.limit # gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo "Use GPU ${CUDA_VISIBLE_DEVICES}" # which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
# TRAIN DISTILL MODEL
echo "python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 2"
python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 2
echo "python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 3"
python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 3
echo "python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 4"
python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 4
echo "python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 5"
python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 5
echo "python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 6"
python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 6
echo "python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 7"
python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 7
echo "python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 8"
python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 8
# # CONSTRUCT TEST DATASET
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type INT --num_bits 2"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type INT --num_bits 2
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type INT --num_bits 3"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type INT --num_bits 3
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type INT --num_bits 4"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type INT --num_bits 4
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type INT --num_bits 5"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type INT --num_bits 5
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type INT --num_bits 6"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type INT --num_bits 6
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type INT --num_bits 7"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type INT --num_bits 7
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type INT --num_bits 8"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type INT --num_bits 8
# ATTACK
# for test full precision mia result
echo "python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack"
python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack
echo "python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type INT --num_bits 2"
python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type INT --num_bits 2
echo "python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type INT --num_bits 3"
python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type INT --num_bits 3
echo "python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type INT --num_bits 4"
python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type INT --num_bits 4
echo "python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type INT --num_bits 5"
python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type INT --num_bits 5
echo "python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type INT --num_bits 6"
python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type INT --num_bits 6
echo "python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type INT --num_bits 7"
python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type INT --num_bits 7
echo "python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type INT --num_bits 8"
python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type INT --num_bits 8
#- End
echo "Job end at $(date "+%Y-%m-%d %H:%M:%S")"
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J I_Tra_10_18 # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 1-00:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo "Job start at $(date "+%Y-%m-%d %H:%M:%S")"
echo "Job run at:"
echo "$(hostnamectl)"
#- Load environments
source /tools/module_env.sh
source ~/pyt1.5/bin/activate
module list # list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo $(module list) # list modules loaded
echo $(which gcc)
echo $(which python)
echo $(which python3)
cluster-quota # nas quota
nvidia-smi --format=csv --query-gpu=name,driver_version,power.limit # gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo "Use GPU ${CUDA_VISIBLE_DEVICES}" # which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
# TRAIN DISTILL MODEL
echo "python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 9"
python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 9
echo "python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 10"
python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 10
echo "python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 11"
python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 11
echo "python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 12"
python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 12
echo "python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 13"
python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 13
echo "python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 14"
python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 14
echo "python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 15"
python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 15
echo "python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 16"
python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 16
# # CONSTRUCT TEST DATASET
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type INT --num_bits 9"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type INT --num_bits 9
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type INT --num_bits 10"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type INT --num_bits 10
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type INT --num_bits 11"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type INT --num_bits 11
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type INT --num_bits 12"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type INT --num_bits 12
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type INT --num_bits 13"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type INT --num_bits 13
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type INT --num_bits 14"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type INT --num_bits 14
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type INT --num_bits 15"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type INT --num_bits 15
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type INT --num_bits 16"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type INT --num_bits 16
# ATTACK
echo "python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type INT --num_bits 9"
python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type INT --num_bits 9
echo "python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type INT --num_bits 10"
python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type INT --num_bits 10
echo "python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type INT --num_bits 11"
python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type INT --num_bits 11
echo "python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type INT --num_bits 12"
python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type INT --num_bits 12
echo "python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type INT --num_bits 13"
python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type INT --num_bits 13
echo "python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type INT --num_bits 14"
python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type INT --num_bits 14
echo "python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type INT --num_bits 15"
python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type INT --num_bits 15
echo "python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type INT --num_bits 16"
python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type INT --num_bits 16
#- End
echo "Job end at $(date "+%Y-%m-%d %H:%M:%S")"
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J I_Tra_10_18 # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 1-06:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo "Job start at $(date "+%Y-%m-%d %H:%M:%S")"
echo "Job run at:"
echo "$(hostnamectl)"
#- Load environments
source /tools/module_env.sh
source ~/pyt1.5/bin/activate
module list # list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo $(module list) # list modules loaded
echo $(which gcc)
echo $(which python)
echo $(which python3)
cluster-quota # nas quota
nvidia-smi --format=csv --query-gpu=name,driver_version,power.limit # gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo "Use GPU ${CUDA_VISIBLE_DEVICES}" # which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
# TRAIN DISTILL MODEL
echo "python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type POT --num_bits 2"
python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type POT --num_bits 2
echo "python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type POT --num_bits 3"
python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type POT --num_bits 3
echo "python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type POT --num_bits 4"
python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type POT --num_bits 4
echo "python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type POT --num_bits 5"
python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type POT --num_bits 5
echo "python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type POT --num_bits 6"
python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type POT --num_bits 6
echo "python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type POT --num_bits 7"
python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type POT --num_bits 7
echo "python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type POT --num_bits 8"
python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type POT --num_bits 8
# CONSTRUCT TEST DATASET
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type POT --num_bits 2"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type POT --num_bits 2
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type POT --num_bits 3"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type POT --num_bits 3
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type POT --num_bits 4"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type POT --num_bits 4
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type POT --num_bits 5"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type POT --num_bits 5
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type POT --num_bits 6"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type POT --num_bits 6
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type POT --num_bits 7"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type POT --num_bits 7
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type POT --num_bits 8"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type POT --num_bits 8
# ATTACK
echo "python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type POT --num_bits 2"
python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type POT --num_bits 2
echo "python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type POT --num_bits 3"
python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type POT --num_bits 3
echo "python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type POT --num_bits 4"
python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type POT --num_bits 4
echo "python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type POT --num_bits 5"
python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type POT --num_bits 5
echo "python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type POT --num_bits 6"
python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type POT --num_bits 6
echo "python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type POT --num_bits 7"
python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type POT --num_bits 7
echo "python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type POT --num_bits 8"
python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type POT --num_bits 8
#- End
echo "Job end at $(date "+%Y-%m-%d %H:%M:%S")"
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J I_Tra_10_18 # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 1-00:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo "Job start at $(date "+%Y-%m-%d %H:%M:%S")"
echo "Job run at:"
echo "$(hostnamectl)"
#- Load environments
source /tools/module_env.sh
source ~/pyt1.5/bin/activate
module list # list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo $(module list) # list modules loaded
echo $(which gcc)
echo $(which python)
echo $(which python3)
cluster-quota # nas quota
nvidia-smi --format=csv --query-gpu=name,driver_version,power.limit # gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo "Use GPU ${CUDA_VISIBLE_DEVICES}" # which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
# echo "python main.py --mode target --model resnet18 --data cifar10"
# python main.py --mode target --model resnet18 --data cifar10
# echo "python main.py --mode shadow --model resnet18 --data cifar10"
# python main.py --mode shadow --model resnet18 --data cifar10
echo "python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type POT --num_bits 3"
python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type POT --num_bits 3
# echo "python main.py --mode distill_shadow --model resnet18 --data cifar10"
# python main.py --mode distill_shadow --model resnet18 --data cifar10
# echo "python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10"
# python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10
# echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 8"
# python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 8
# echo "python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack"
# python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack
#- End
echo "Job end at $(date "+%Y-%m-%d %H:%M:%S")"
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J I_Tra_10_18 # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 1-00:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo "Job start at $(date "+%Y-%m-%d %H:%M:%S")"
echo "Job run at:"
echo "$(hostnamectl)"
#- Load environments
source /tools/module_env.sh
source ~/pyt1.5/bin/activate
module list # list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo $(module list) # list modules loaded
echo $(which gcc)
echo $(which python)
echo $(which python3)
cluster-quota # nas quota
nvidia-smi --format=csv --query-gpu=name,driver_version,power.limit # gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo "Use GPU ${CUDA_VISIBLE_DEVICES}" # which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
# echo "python main.py --mode target --model resnet18 --data cifar10"
# python main.py --mode target --model resnet18 --data cifar10
# echo "python main.py --mode shadow --model resnet18 --data cifar10"
# python main.py --mode shadow --model resnet18 --data cifar10
echo "python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type POT --num_bits 4"
python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type POT --num_bits 4
# echo "python main.py --mode distill_shadow --model resnet18 --data cifar10"
# python main.py --mode distill_shadow --model resnet18 --data cifar10
# echo "python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10"
# python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10
# echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 8"
# python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 8
# echo "python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack"
# python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack
#- End
echo "Job end at $(date "+%Y-%m-%d %H:%M:%S")"
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J I_Tra_10_18 # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 1-00:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo "Job start at $(date "+%Y-%m-%d %H:%M:%S")"
echo "Job run at:"
echo "$(hostnamectl)"
#- Load environments
source /tools/module_env.sh
source ~/pyt1.5/bin/activate
module list # list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo $(module list) # list modules loaded
echo $(which gcc)
echo $(which python)
echo $(which python3)
cluster-quota # nas quota
nvidia-smi --format=csv --query-gpu=name,driver_version,power.limit # gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo "Use GPU ${CUDA_VISIBLE_DEVICES}" # which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
# echo "python main.py --mode target --model resnet18 --data cifar10"
# python main.py --mode target --model resnet18 --data cifar10
# echo "python main.py --mode shadow --model resnet18 --data cifar10"
# python main.py --mode shadow --model resnet18 --data cifar10
echo "python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type POT --num_bits 5"
python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type POT --num_bits 5
# echo "python main.py --mode distill_shadow --model resnet18 --data cifar10"
# python main.py --mode distill_shadow --model resnet18 --data cifar10
# echo "python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10"
# python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10
# echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 8"
# python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 8
# echo "python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack"
# python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack
#- End
echo "Job end at $(date "+%Y-%m-%d %H:%M:%S")"
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J I_Tra_10_18 # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 1-00:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo "Job start at $(date "+%Y-%m-%d %H:%M:%S")"
echo "Job run at:"
echo "$(hostnamectl)"
#- Load environments
source /tools/module_env.sh
source ~/pyt1.5/bin/activate
module list # list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo $(module list) # list modules loaded
echo $(which gcc)
echo $(which python)
echo $(which python3)
cluster-quota # nas quota
nvidia-smi --format=csv --query-gpu=name,driver_version,power.limit # gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo "Use GPU ${CUDA_VISIBLE_DEVICES}" # which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
# echo "python main.py --mode target --model resnet18 --data cifar10"
# python main.py --mode target --model resnet18 --data cifar10
# echo "python main.py --mode shadow --model resnet18 --data cifar10"
# python main.py --mode shadow --model resnet18 --data cifar10
echo "python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type POT --num_bits 6"
python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type POT --num_bits 6
# echo "python main.py --mode distill_shadow --model resnet18 --data cifar10"
# python main.py --mode distill_shadow --model resnet18 --data cifar10
# echo "python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10"
# python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10
# echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 8"
# python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 8
# echo "python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack"
# python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack
#- End
echo "Job end at $(date "+%Y-%m-%d %H:%M:%S")"
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J I_Tra_10_18 # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 1-00:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo "Job start at $(date "+%Y-%m-%d %H:%M:%S")"
echo "Job run at:"
echo "$(hostnamectl)"
#- Load environments
source /tools/module_env.sh
source ~/pyt1.5/bin/activate
module list # list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo $(module list) # list modules loaded
echo $(which gcc)
echo $(which python)
echo $(which python3)
cluster-quota # nas quota
nvidia-smi --format=csv --query-gpu=name,driver_version,power.limit # gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo "Use GPU ${CUDA_VISIBLE_DEVICES}" # which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
# echo "python main.py --mode target --model resnet18 --data cifar10"
# python main.py --mode target --model resnet18 --data cifar10
# echo "python main.py --mode shadow --model resnet18 --data cifar10"
# python main.py --mode shadow --model resnet18 --data cifar10
echo "python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type POT --num_bits 7"
python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type POT --num_bits 7
# echo "python main.py --mode distill_shadow --model resnet18 --data cifar10"
# python main.py --mode distill_shadow --model resnet18 --data cifar10
# echo "python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10"
# python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10
# echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 8"
# python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 8
# echo "python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack"
# python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack
#- End
echo "Job end at $(date "+%Y-%m-%d %H:%M:%S")"
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J I_Tra_10_18 # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 1-00:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo "Job start at $(date "+%Y-%m-%d %H:%M:%S")"
echo "Job run at:"
echo "$(hostnamectl)"
#- Load environments
source /tools/module_env.sh
source ~/pyt1.5/bin/activate
module list # list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo $(module list) # list modules loaded
echo $(which gcc)
echo $(which python)
echo $(which python3)
cluster-quota # nas quota
nvidia-smi --format=csv --query-gpu=name,driver_version,power.limit # gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo "Use GPU ${CUDA_VISIBLE_DEVICES}" # which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
# echo "python main.py --mode target --model resnet18 --data cifar10"
# python main.py --mode target --model resnet18 --data cifar10
# echo "python main.py --mode shadow --model resnet18 --data cifar10"
# python main.py --mode shadow --model resnet18 --data cifar10
echo "python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type POT --num_bits 8"
python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type POT --num_bits 8
# echo "python main.py --mode distill_shadow --model resnet18 --data cifar10"
# python main.py --mode distill_shadow --model resnet18 --data cifar10
# echo "python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10"
# python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10
# echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 8"
# python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 8
# echo "python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack"
# python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack
#- End
echo "Job end at $(date "+%Y-%m-%d %H:%M:%S")"
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J Tra_CIN_18 # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 1-12:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-long # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo "Job start at $(date "+%Y-%m-%d %H:%M:%S")"
echo "Job run at:"
echo "$(hostnamectl)"
#- Load environments
source /tools/module_env.sh
source ~/pyt1.5/bin/activate
module list # list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo $(module list) # list modules loaded
echo $(which gcc)
echo $(which python)
echo $(which python3)
cluster-quota # nas quota
nvidia-smi --format=csv --query-gpu=name,driver_version,power.limit # gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo "Use GPU ${CUDA_VISIBLE_DEVICES}" # which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
echo "python main.py --mode target --model resnet18 --data cinic10"
python main.py --mode target --model resnet18 --data cinic10
echo "python main.py --mode shadow --model resnet18 --data cinic10"
python main.py --mode shadow --model resnet18 --data cinic10
echo "python main.py --mode distill_target --model resnet18 --data cinic10"
python main.py --mode distill_target --model resnet18 --data cinic10
echo "python main.py --mode distill_shadow --model resnet18 --data cinic10"
python main.py --mode distill_shadow --model resnet18 --data cinic10
echo "python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cinic10"
python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cinic10
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cinic10"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cinic10
echo "python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cinic10"
python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cinic10
#- End
echo "Job end at $(date "+%Y-%m-%d %H:%M:%S")"
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J Tra_50 # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 0-12:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo "Job start at $(date "+%Y-%m-%d %H:%M:%S")"
echo "Job run at:"
echo "$(hostnamectl)"
#- Load environments
source /tools/module_env.sh
source ~/pyt1.5/bin/activate
module list # list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo $(module list) # list modules loaded
echo $(which gcc)
echo $(which python)
echo $(which python3)
cluster-quota # nas quota
nvidia-smi --format=csv --query-gpu=name,driver_version,power.limit # gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo "Use GPU ${CUDA_VISIBLE_DEVICES}" # which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
echo "python main.py --mode target --model resnet50 "
python main.py --mode target --model resnet50
echo "python main.py --mode shadow --model resnet50 "
python main.py --mode shadow --model resnet50
echo "python main.py --mode distill_target --model resnet50 "
python main.py --mode distill_target --model resnet50
echo "python main.py --mode distill_shadow --model resnet50 "
python main.py --mode distill_shadow --model resnet50
echo "python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet50 --model_distill resnet50 "
python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet50 --model_distill resnet50
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 "
python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50
echo "python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 "
python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50
#- End
echo "Job end at $(date "+%Y-%m-%d %H:%M:%S")"
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J Tra_50_10 # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 0-12:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo "Job start at $(date "+%Y-%m-%d %H:%M:%S")"
echo "Job run at:"
echo "$(hostnamectl)"
#- Load environments
source /tools/module_env.sh
source ~/pyt1.5/bin/activate
module list # list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo $(module list) # list modules loaded
echo $(which gcc)
echo $(which python)
echo $(which python3)
cluster-quota # nas quota
nvidia-smi --format=csv --query-gpu=name,driver_version,power.limit # gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo "Use GPU ${CUDA_VISIBLE_DEVICES}" # which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
echo "python main.py --mode target --model resnet50 --data cifar100"
python main.py --mode target --model resnet50 --data cifar100
echo "python main.py --mode shadow --model resnet50 --data cifar100"
python main.py --mode shadow --model resnet50 --data cifar100
echo "python main.py --mode distill_target --model resnet50 --data cifar100"
python main.py --mode distill_target --model resnet50 --data cifar100
echo "python main.py --mode distill_shadow --model resnet50 --data cifar100"
python main.py --mode distill_shadow --model resnet50 --data cifar100
echo "python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar100"
python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar100
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar100"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar100
echo "python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar100"
python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar100
#- End
echo "Job end at $(date "+%Y-%m-%d %H:%M:%S")"
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J I_Tra_10_18 # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 1-06:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo "Job start at $(date "+%Y-%m-%d %H:%M:%S")"
echo "Job run at:"
echo "$(hostnamectl)"
#- Load environments
source /tools/module_env.sh
source ~/pyt1.5/bin/activate
module list # list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo $(module list) # list modules loaded
echo $(which gcc)
echo $(which python)
echo $(which python3)
cluster-quota # nas quota
nvidia-smi --format=csv --query-gpu=name,driver_version,power.limit # gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo "Use GPU ${CUDA_VISIBLE_DEVICES}" # which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
# TRAIN DISTILL MODEL
echo "python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 3 --e_bits 1"
python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 3 --e_bits 1
echo "python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 4 --e_bits 1"
python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 4 --e_bits 1
echo "python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 4 --e_bits 2"
python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 4 --e_bits 2
echo "python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 5 --e_bits 1"
python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 5 --e_bits 1
echo "python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 5 --e_bits 2"
python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 5 --e_bits 2
echo "python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 5 --e_bits 3"
python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 5 --e_bits 3
# CONSTRUCT TEST DATASET
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type FLOAT --num_bits 3 --e_bits 1"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type FLOAT --num_bits 3 --e_bits 1
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type FLOAT --num_bits 4 --e_bits 1"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type FLOAT --num_bits 4 --e_bits 1
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type FLOAT --num_bits 4 --e_bits 2"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type FLOAT --num_bits 4 --e_bits 2
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type FLOAT --num_bits 5 --e_bits 1"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type FLOAT --num_bits 5 --e_bits 1
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type FLOAT --num_bits 5 --e_bits 2"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type FLOAT --num_bits 5 --e_bits 2
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type FLOAT --num_bits 5 --e_bits 3"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type FLOAT --num_bits 5 --e_bits 3
# ATTACK
echo "python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type FLOAT --num_bits 3 --e_bits 1"
python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type FLOAT --num_bits 3 --e_bits 1
echo "python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type FLOAT --num_bits 4 --e_bits 1"
python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type FLOAT --num_bits 4 --e_bits 1
echo "python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type FLOAT --num_bits 4 --e_bits 2"
python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type FLOAT --num_bits 4 --e_bits 2
echo "python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type FLOAT --num_bits 5 --e_bits 1"
python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type FLOAT --num_bits 5 --e_bits 1
echo "python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type FLOAT --num_bits 5 --e_bits 2"
python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type FLOAT --num_bits 5 --e_bits 2
echo "python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type FLOAT --num_bits 5 --e_bits 3"
python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type FLOAT --num_bits 5 --e_bits 3
#- End
echo "Job end at $(date "+%Y-%m-%d %H:%M:%S")"
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J I_Tra_10_18 # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 1-06:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo "Job start at $(date "+%Y-%m-%d %H:%M:%S")"
echo "Job run at:"
echo "$(hostnamectl)"
#- Load environments
source /tools/module_env.sh
source ~/pyt1.5/bin/activate
module list # list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo $(module list) # list modules loaded
echo $(which gcc)
echo $(which python)
echo $(which python3)
cluster-quota # nas quota
nvidia-smi --format=csv --query-gpu=name,driver_version,power.limit # gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo "Use GPU ${CUDA_VISIBLE_DEVICES}" # which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
# TRAIN DISTILL MODEL
echo "python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 6 --e_bits 1"
python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 6 --e_bits 1
echo "python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 6 --e_bits 2"
python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 6 --e_bits 2
echo "python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 6 --e_bits 3"
python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 6 --e_bits 3
echo "python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 6 --e_bits 4"
python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 6 --e_bits 4
# CONSTRUCT TEST DATASET
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type FLOAT --num_bits 6 --e_bits 1"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type FLOAT --num_bits 6 --e_bits 1
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type FLOAT --num_bits 6 --e_bits 2"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type FLOAT --num_bits 6 --e_bits 2
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type FLOAT --num_bits 6 --e_bits 3"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type FLOAT --num_bits 6 --e_bits 3
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type FLOAT --num_bits 6 --e_bits 4"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type FLOAT --num_bits 6 --e_bits 4
# ATTACK
echo "python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type FLOAT --num_bits 6 --e_bits 1"
python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type FLOAT --num_bits 6 --e_bits 1
echo "python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type FLOAT --num_bits 6 --e_bits 2"
python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type FLOAT --num_bits 6 --e_bits 2
echo "python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type FLOAT --num_bits 6 --e_bits 3"
python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type FLOAT --num_bits 6 --e_bits 3
echo "python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type FLOAT --num_bits 6 --e_bits 4"
python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type FLOAT --num_bits 6 --e_bits 4
#- End
echo "Job end at $(date "+%Y-%m-%d %H:%M:%S")"
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J I_Tra_10_18 # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 1-06:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo "Job start at $(date "+%Y-%m-%d %H:%M:%S")"
echo "Job run at:"
echo "$(hostnamectl)"
#- Load environments
source /tools/module_env.sh
source ~/pyt1.5/bin/activate
module list # list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo $(module list) # list modules loaded
echo $(which gcc)
echo $(which python)
echo $(which python3)
cluster-quota # nas quota
nvidia-smi --format=csv --query-gpu=name,driver_version,power.limit # gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo "Use GPU ${CUDA_VISIBLE_DEVICES}" # which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
# TRAIN DISTILL MODEL
echo "python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 7 --e_bits 1"
python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 7 --e_bits 1
echo "python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 7 --e_bits 2"
python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 7 --e_bits 2
echo "python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 7 --e_bits 3"
python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 7 --e_bits 3
echo "python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 7 --e_bits 4"
python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 7 --e_bits 4
echo "python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 7 --e_bits 5"
python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 7 --e_bits 5
# CONSTRUCT TEST DATASET
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type FLOAT --num_bits 7 --e_bits 1"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type FLOAT --num_bits 7 --e_bits 1
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type FLOAT --num_bits 7 --e_bits 2"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type FLOAT --num_bits 7 --e_bits 2
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type FLOAT --num_bits 7 --e_bits 3"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type FLOAT --num_bits 7 --e_bits 3
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type FLOAT --num_bits 7 --e_bits 4"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type FLOAT --num_bits 7 --e_bits 4
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type FLOAT --num_bits 7 --e_bits 5"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type FLOAT --num_bits 7 --e_bits 5
# ATTACK
echo "python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type FLOAT --num_bits 7 --e_bits 1"
python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type FLOAT --num_bits 7 --e_bits 1
echo "python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type FLOAT --num_bits 7 --e_bits 2"
python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type FLOAT --num_bits 7 --e_bits 2
echo "python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type FLOAT --num_bits 7 --e_bits 3"
python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type FLOAT --num_bits 7 --e_bits 3
echo "python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type FLOAT --num_bits 7 --e_bits 4"
python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type FLOAT --num_bits 7 --e_bits 4
echo "python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type FLOAT --num_bits 7 --e_bits 5"
python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type FLOAT --num_bits 7 --e_bits 5
#- End
echo "Job end at $(date "+%Y-%m-%d %H:%M:%S")"
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J I_Tra_10_18 # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 1-06:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo "Job start at $(date "+%Y-%m-%d %H:%M:%S")"
echo "Job run at:"
echo "$(hostnamectl)"
#- Load environments
source /tools/module_env.sh
source ~/pyt1.5/bin/activate
module list # list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo $(module list) # list modules loaded
echo $(which gcc)
echo $(which python)
echo $(which python3)
cluster-quota # nas quota
nvidia-smi --format=csv --query-gpu=name,driver_version,power.limit # gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo "Use GPU ${CUDA_VISIBLE_DEVICES}" # which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
# TRAIN DISTILL MODEL
echo "python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 8 --e_bits 1"
python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 8 --e_bits 1
echo "python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 8 --e_bits 2"
python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 8 --e_bits 2
echo "python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 8 --e_bits 3"
python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 8 --e_bits 3
echo "python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 8 --e_bits 4"
python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 8 --e_bits 4
echo "python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 8 --e_bits 5"
python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 8 --e_bits 5
echo "python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 8 --e_bits 6"
python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 8 --e_bits 6
# CONSTRUCT TEST DATASET
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type FLOAT --num_bits 8 --e_bits 1"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type FLOAT --num_bits 8 --e_bits 1
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type FLOAT --num_bits 8 --e_bits 2"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type FLOAT --num_bits 8 --e_bits 2
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type FLOAT --num_bits 8 --e_bits 3"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type FLOAT --num_bits 8 --e_bits 3
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type FLOAT --num_bits 8 --e_bits 4"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type FLOAT --num_bits 8 --e_bits 4
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type FLOAT --num_bits 8 --e_bits 5"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type FLOAT --num_bits 8 --e_bits 5
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type FLOAT --num_bits 8 --e_bits 6"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type FLOAT --num_bits 8 --e_bits 6
# ATTACK
echo "python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type FLOAT --num_bits 8 --e_bits 1"
python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type FLOAT --num_bits 8 --e_bits 1
echo "python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type FLOAT --num_bits 8 --e_bits 2"
python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type FLOAT --num_bits 8 --e_bits 2
echo "python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type FLOAT --num_bits 8 --e_bits 3"
python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type FLOAT --num_bits 8 --e_bits 3
echo "python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type FLOAT --num_bits 8 --e_bits 4"
python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type FLOAT --num_bits 8 --e_bits 4
echo "python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type FLOAT --num_bits 8 --e_bits 5"
python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type FLOAT --num_bits 8 --e_bits 5
echo "python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type FLOAT --num_bits 8 --e_bits 6"
python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type FLOAT --num_bits 8 --e_bits 6
#- End
echo "Job end at $(date "+%Y-%m-%d %H:%M:%S")"
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J I_Tra_10_18 # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 1-00:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo "Job start at $(date "+%Y-%m-%d %H:%M:%S")"
echo "Job run at:"
echo "$(hostnamectl)"
#- Load environments
source /tools/module_env.sh
source ~/pyt1.5/bin/activate
module list # list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo $(module list) # list modules loaded
echo $(which gcc)
echo $(which python)
echo $(which python3)
cluster-quota # nas quota
nvidia-smi --format=csv --query-gpu=name,driver_version,power.limit # gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo "Use GPU ${CUDA_VISIBLE_DEVICES}" # which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
# TRAIN DISTILL MODEL
echo "python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type INT --num_bits 2"
python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type INT --num_bits 2
echo "python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type INT --num_bits 3"
python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type INT --num_bits 3
echo "python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type INT --num_bits 4"
python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type INT --num_bits 4
echo "python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type INT --num_bits 5"
python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type INT --num_bits 5
echo "python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type INT --num_bits 6"
python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type INT --num_bits 6
echo "python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type INT --num_bits 7"
python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type INT --num_bits 7
echo "python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type INT --num_bits 8"
python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type INT --num_bits 8
# # CONSTRUCT TEST DATASET
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type INT --num_bits 2"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type INT --num_bits 2
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type INT --num_bits 3"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type INT --num_bits 3
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type INT --num_bits 4"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type INT --num_bits 4
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type INT --num_bits 5"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type INT --num_bits 5
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type INT --num_bits 6"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type INT --num_bits 6
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type INT --num_bits 7"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type INT --num_bits 7
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type INT --num_bits 8"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type INT --num_bits 8
# ATTACK
# for test full precision mia result
echo "python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack"
python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack
echo "python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type INT --num_bits 2"
python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type INT --num_bits 2
echo "python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type INT --num_bits 3"
python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type INT --num_bits 3
echo "python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type INT --num_bits 4"
python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type INT --num_bits 4
echo "python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type INT --num_bits 5"
python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type INT --num_bits 5
echo "python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type INT --num_bits 6"
python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type INT --num_bits 6
echo "python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type INT --num_bits 7"
python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type INT --num_bits 7
echo "python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type INT --num_bits 8"
python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type INT --num_bits 8
#- End
echo "Job end at $(date "+%Y-%m-%d %H:%M:%S")"
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J I_Tra_10_18 # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 1-00:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo "Job start at $(date "+%Y-%m-%d %H:%M:%S")"
echo "Job run at:"
echo "$(hostnamectl)"
#- Load environments
source /tools/module_env.sh
source ~/pyt1.5/bin/activate
module list # list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo $(module list) # list modules loaded
echo $(which gcc)
echo $(which python)
echo $(which python3)
cluster-quota # nas quota
nvidia-smi --format=csv --query-gpu=name,driver_version,power.limit # gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo "Use GPU ${CUDA_VISIBLE_DEVICES}" # which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
# TRAIN DISTILL MODEL
echo "python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type INT --num_bits 9"
python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type INT --num_bits 9
echo "python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type INT --num_bits 10"
python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type INT --num_bits 10
echo "python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type INT --num_bits 11"
python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type INT --num_bits 11
echo "python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type INT --num_bits 12"
python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type INT --num_bits 12
echo "python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type INT --num_bits 13"
python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type INT --num_bits 13
echo "python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type INT --num_bits 14"
python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type INT --num_bits 14
echo "python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type INT --num_bits 15"
python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type INT --num_bits 15
echo "python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type INT --num_bits 16"
python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type INT --num_bits 16
# # CONSTRUCT TEST DATASET
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type INT --num_bits 9"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type INT --num_bits 9
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type INT --num_bits 10"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type INT --num_bits 10
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type INT --num_bits 11"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type INT --num_bits 11
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type INT --num_bits 12"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type INT --num_bits 12
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type INT --num_bits 13"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type INT --num_bits 13
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type INT --num_bits 14"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type INT --num_bits 14
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type INT --num_bits 15"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type INT --num_bits 15
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type INT --num_bits 16"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type INT --num_bits 16
# ATTACK
echo "python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type INT --num_bits 9"
python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type INT --num_bits 9
echo "python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type INT --num_bits 10"
python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type INT --num_bits 10
echo "python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type INT --num_bits 11"
python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type INT --num_bits 11
echo "python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type INT --num_bits 12"
python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type INT --num_bits 12
echo "python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type INT --num_bits 13"
python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type INT --num_bits 13
echo "python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type INT --num_bits 14"
python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type INT --num_bits 14
echo "python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type INT --num_bits 15"
python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type INT --num_bits 15
echo "python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type INT --num_bits 16"
python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type INT --num_bits 16
#- End
echo "Job end at $(date "+%Y-%m-%d %H:%M:%S")"
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J I_Tra_10_18 # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 1-06:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo "Job start at $(date "+%Y-%m-%d %H:%M:%S")"
echo "Job run at:"
echo "$(hostnamectl)"
#- Load environments
source /tools/module_env.sh
source ~/pyt1.5/bin/activate
module list # list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo $(module list) # list modules loaded
echo $(which gcc)
echo $(which python)
echo $(which python3)
cluster-quota # nas quota
nvidia-smi --format=csv --query-gpu=name,driver_version,power.limit # gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo "Use GPU ${CUDA_VISIBLE_DEVICES}" # which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
# TRAIN DISTILL MODEL
echo "python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type POT --num_bits 2"
python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type POT --num_bits 2
echo "python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type POT --num_bits 3"
python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type POT --num_bits 3
echo "python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type POT --num_bits 4"
python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type POT --num_bits 4
echo "python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type POT --num_bits 5"
python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type POT --num_bits 5
echo "python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type POT --num_bits 6"
python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type POT --num_bits 6
echo "python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type POT --num_bits 7"
python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type POT --num_bits 7
echo "python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type POT --num_bits 8"
python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type POT --num_bits 8
# CONSTRUCT TEST DATASET
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type POT --num_bits 2"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type POT --num_bits 2
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type POT --num_bits 3"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type POT --num_bits 3
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type POT --num_bits 4"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type POT --num_bits 4
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type POT --num_bits 5"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type POT --num_bits 5
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type POT --num_bits 6"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type POT --num_bits 6
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type POT --num_bits 7"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type POT --num_bits 7
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type POT --num_bits 8"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type POT --num_bits 8
# ATTACK
echo "python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type POT --num_bits 2"
python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type POT --num_bits 2
echo "python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type POT --num_bits 3"
python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type POT --num_bits 3
echo "python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type POT --num_bits 4"
python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type POT --num_bits 4
echo "python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type POT --num_bits 5"
python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type POT --num_bits 5
echo "python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type POT --num_bits 6"
python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type POT --num_bits 6
echo "python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type POT --num_bits 7"
python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type POT --num_bits 7
echo "python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type POT --num_bits 8"
python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type POT --num_bits 8
#- End
echo "Job end at $(date "+%Y-%m-%d %H:%M:%S")"
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J Tra_CIN_50 # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 1-12:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-long # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo "Job start at $(date "+%Y-%m-%d %H:%M:%S")"
echo "Job run at:"
echo "$(hostnamectl)"
#- Load environments
source /tools/module_env.sh
source ~/pyt1.5/bin/activate
module list # list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo $(module list) # list modules loaded
echo $(which gcc)
echo $(which python)
echo $(which python3)
cluster-quota # nas quota
nvidia-smi --format=csv --query-gpu=name,driver_version,power.limit # gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo "Use GPU ${CUDA_VISIBLE_DEVICES}" # which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
echo "python main.py --mode target --model resnet50 --data cinic10"
python main.py --mode target --model resnet50 --data cinic10
echo "python main.py --mode shadow --model resnet50 --data cinic10"
python main.py --mode shadow --model resnet50 --data cinic10
echo "python main.py --mode distill_target --model resnet50 --data cinic10"
python main.py --mode distill_target --model resnet50 --data cinic10
echo "python main.py --mode distill_shadow --model resnet50 --data cinic10"
python main.py --mode distill_shadow --model resnet50 --data cinic10
echo "python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cinic10"
python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cinic10
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cinic10"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cinic10
echo "python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cinic10"
python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cinic10
#- End
echo "Job end at $(date "+%Y-%m-%d %H:%M:%S")"
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J Tra_Mobile # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 0-12:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo "Job start at $(date "+%Y-%m-%d %H:%M:%S")"
echo "Job run at:"
echo "$(hostnamectl)"
#- Load environments
source /tools/module_env.sh
source ~/pyt1.5/bin/activate
module list # list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo $(module list) # list modules loaded
echo $(which gcc)
echo $(which python)
echo $(which python3)
cluster-quota # nas quota
nvidia-smi --format=csv --query-gpu=name,driver_version,power.limit # gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo "Use GPU ${CUDA_VISIBLE_DEVICES}" # which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
echo "python main.py --mode target --model mobilenetv2"
python main.py --mode target --model mobilenetv2
echo "python main.py --mode shadow --model mobilenetv2"
python main.py --mode shadow --model mobilenetv2
echo "python main.py --mode distill_target --model mobilenetv2"
python main.py --mode distill_target --model mobilenetv2
echo "python main.py --mode distill_shadow --model mobilenetv2"
python main.py --mode distill_shadow --model mobilenetv2
echo "python main.py --action 1 --mode shadow --mia_type build-dataset --model mobilenetv2 --model_distill mobilenetv2 "
python main.py --action 1 --mode shadow --mia_type build-dataset --model mobilenetv2 --model_distill mobilenetv2
echo "python main.py --action 1 --mode target --mia_type build-dataset --model mobilenetv2 --model_distill mobilenetv2"
python main.py --action 1 --mode target --mia_type build-dataset --model mobilenetv2 --model_distill mobilenetv2
echo "python main.py --action 1 --mia_type black-box --model mobilenetv2 --model_distill mobilenetv2"
python main.py --action 1 --mia_type black-box --model mobilenetv2 --model_distill mobilenetv2
#- End
echo "Job end at $(date "+%Y-%m-%d %H:%M:%S")"
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J Tra_100_Mobile # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 0-12:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo "Job start at $(date "+%Y-%m-%d %H:%M:%S")"
echo "Job run at:"
echo "$(hostnamectl)"
#- Load environments
source /tools/module_env.sh
source ~/pyt1.5/bin/activate
module list # list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo $(module list) # list modules loaded
echo $(which gcc)
echo $(which python)
echo $(which python3)
cluster-quota # nas quota
nvidia-smi --format=csv --query-gpu=name,driver_version,power.limit # gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo "Use GPU ${CUDA_VISIBLE_DEVICES}" # which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
echo "python main.py --mode target --model mobilenetv2 --data cifar100"
python main.py --mode target --model mobilenetv2 --data cifar100
echo "python main.py --mode shadow --model mobilenetv2 --data cifar100"
python main.py --mode shadow --model mobilenetv2 --data cifar100
echo "python main.py --mode distill_target --model mobilenetv2 --data cifar100"
python main.py --mode distill_target --model mobilenetv2 --data cifar100
echo "python main.py --mode distill_shadow --model mobilenetv2 --data cifar100"
python main.py --mode distill_shadow --model mobilenetv2 --data cifar100
echo "python main.py --action 1 --mode shadow --mia_type build-dataset --model mobilenetv2 --data cifar100 --model_distill mobilenetv2"
python main.py --action 1 --mode shadow --mia_type build-dataset --model mobilenetv2 --model_distill mobilenetv2 --data cifar100
echo "python main.py --action 1 --mode target --mia_type build-dataset --model mobilenetv2 --data cifar100 --model_distill mobilenetv2"
python main.py --action 1 --mode target --mia_type build-dataset --model mobilenetv2 --model_distill mobilenetv2 --data cifar100
echo "python main.py --action 1 --mia_type black-box --model mobilenetv2 --model_distill mobilenetv2 --data cifar100"
python main.py --action 1 --mia_type black-box --model mobilenetv2 --model_distill mobilenetv2 --data cifar100
#- End
echo "Job end at $(date "+%Y-%m-%d %H:%M:%S")"
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J Tra_CIN_Mobile # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 3-00:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-long # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo "Job start at $(date "+%Y-%m-%d %H:%M:%S")"
echo "Job run at:"
echo "$(hostnamectl)"
#- Load environments
source /tools/module_env.sh
source ~/pyt1.5/bin/activate
module list # list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo $(module list) # list modules loaded
echo $(which gcc)
echo $(which python)
echo $(which python3)
cluster-quota # nas quota
nvidia-smi --format=csv --query-gpu=name,driver_version,power.limit # gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo "Use GPU ${CUDA_VISIBLE_DEVICES}" # which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
echo "python main.py --mode target --model mobilenetv2 --data cinic10"
python main.py --mode target --model mobilenetv2 --data cinic10
echo "python main.py --mode shadow --model mobilenetv2 --data cinic10"
python main.py --mode shadow --model mobilenetv2 --data cinic10
echo "python main.py --mode distill_target --model mobilenetv2 --data cinic10"
python main.py --mode distill_target --model mobilenetv2 --data cinic10
echo "python main.py --mode distill_shadow --model mobilenetv2 --data cinic10"
python main.py --mode distill_shadow --model mobilenetv2 --data cinic10
echo "python main.py --action 1 --mode shadow --mia_type build-dataset --model mobilenetv2 --data cinic10 --model_distill mobilenetv2"
python main.py --action 1 --mode shadow --mia_type build-dataset --model mobilenetv2 --model_distill mobilenetv2 --data cinic10
echo "python main.py --action 1 --mode target --mia_type build-dataset --model mobilenetv2 --data cinic10 --model_distill mobilenetv2"
python main.py --action 1 --mode target --mia_type build-dataset --model mobilenetv2 --model_distill mobilenetv2 --data cinic10
echo "python main.py --action 1 --mia_type black-box --model mobilenetv2 --model_distill mobilenetv2 --data cinic10"
python main.py --action 1 --mia_type black-box --model mobilenetv2 --model_distill mobilenetv2 --data cinic10
#- End
echo "Job end at $(date "+%Y-%m-%d %H:%M:%S")"
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment