Commit b9f4739f by Zhihong Ma

feat: Loss Trajectory Full precision Model

parent 85f1d4cd
from torch.autograd import Function
class FakeQuantize(Function):
@staticmethod
def forward(ctx, x, qparam):
x = qparam.quantize_tensor(x)
x = qparam.dequantize_tensor(x)
return x
@staticmethod
def backward(ctx, grad_output):
return grad_output, None
class GlobalVariables:
SELF_INPLANES = 0
# -*- coding: utf-8 -*-
# 用于多个module之间共享全局变量
def _init(): # 初始化
global _global_dict
_global_dict = {}
def set_value(value,is_bias=False):
# 定义一个全局变量
if is_bias:
_global_dict[0] = value
else:
_global_dict[1] = value
def get_value(is_bias=False): # 给bias独立于各变量外的精度
if is_bias:
return _global_dict[0]
else:
return _global_dict[1]
import os
import argparse
import utils
import normal
import MIA
def train_networks(args):
device = utils.get_pytorch_device()
utils.create_path('./outputs')
if 'distill' in args.mode:
model_path_tar = 'networks/{}/{}'.format(0, args.mode.split('_')[-1])
utils.create_path(model_path_tar)
model_path_dis = 'networks/{}/{}'.format(args.seed, args.mode)
utils.create_path(model_path_dis)
else:
model_path_tar = 'networks/{}/{}'.format(args.seed, args.mode)
utils.create_path(model_path_tar)
model_path_dis = None
utils.set_logger('outputs/train_models'.format(args.seed))
normal.train_models(args, model_path_tar, model_path_dis, device)
def membership_inference_attack(args):
print(f'--------------{args.mia_type}-------------')
device = utils.get_pytorch_device()
if args.mia_type == 'build-dataset':
models_path = 'networks/{}'.format(0)
MIA.build_trajectory_membership_dataset(args, models_path, device)
if args.mia_type == 'black-box':
trained_models_path = 'networks/{}'.format(args.seed)
MIA.trajectory_black_box_membership_inference_attack(args, trained_models_path, device)
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='TrajectoryMIA')
parser.add_argument('--action', type=int, default=0, help=[0, 1])
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--mode', type=str, default='target', help=['target', 'shadow', 'distill_target', 'distill_shadow'])
parser.add_argument('--model', type=str, default='resnet18', help=['resnet18','resnet50','resnet152','mobilenetv2'])
parser.add_argument('--data', type=str, default='cifar10', help=['cinic10', 'cifar10', 'cifar100', 'gtsrb'])
parser.add_argument('--epochs', type=int, default=100)
parser.add_argument('--model_distill', type=str, default='resnet18',help=['resnet18','resnet50','resnet152','mobilenetv2'])
parser.add_argument('--epochs_distill', type=int, default=100)
parser.add_argument('--mia_type', type=str, help=['build-dataset', 'black-box'])
args = parser.parse_args()
utils.set_random_seeds(args.seed)
print('random seed:{}'.format(args.seed))
if args.action == 0:
train_networks(args)
elif args.action == 1:
membership_inference_attack(args)
import numpy as np
import matplotlib.pyplot as plt
from sklearn import metrics
import argparse
if __name__ == '__main__':
# 在 test_mia_attack_model存储的最优的数据
parser = argparse.ArgumentParser(description='PLOT_TrajectoryMIA')
parser.add_argument('--model', type=str, default='resnet18', help=['resnet18','resnet50','resnet152','mobilenetv2'])
parser.add_argument('--data', type=str, default='cifar10', help=['cinic10', 'cifar10', 'cifar100', 'gtsrb'])
parser.add_argument('--model_distill', type=str, default='resnet18',help=['resnet18','resnet50','resnet152','mobilenetv2'])
args = parser.parse_args()
data_auc = np.load(f'./outputs/{args.data}_{args.model}_{args.model_distill}_trajectory_auc.npy', allow_pickle=True).item()
for i in range(len(data_auc['fpr'])):
if data_auc['fpr'][i] > 0.001:
print('TPR at 0.1% FPR: {:.1%}'.format(data_auc['tpr'][i-1]))
break
plt.plot(data_auc['fpr'], data_auc['tpr'], color='darkorange', lw=2, label='ROC curve')
plt.plot([0, 1], [0, 1], color='navy', lw=2, linestyle='--')
plt.xlim([0.0, 1.0])
plt.ylim([0.0, 1.05])
plt.xlabel('False Positive Rate')
plt.ylabel('True Positive Rate')
plt.title('Receiver operating characteristic example')
plt.legend(loc="lower right")
# plt.show()
plt.savefig(f'./img/{args.data}_{args.model}_{args.model_distill}_roc_curve.png')
\ No newline at end of file
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J Tra_152 # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 0-12:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo "Job start at $(date "+%Y-%m-%d %H:%M:%S")"
echo "Job run at:"
echo "$(hostnamectl)"
#- Load environments
source /tools/module_env.sh
source ~/pyt1.5/bin/activate
module list # list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo $(module list) # list modules loaded
echo $(which gcc)
echo $(which python)
echo $(which python3)
cluster-quota # nas quota
nvidia-smi --format=csv --query-gpu=name,driver_version,power.limit # gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo "Use GPU ${CUDA_VISIBLE_DEVICES}" # which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
echo "python main.py --mode target --model resnet152 --data cifar10"
python main.py --mode target --model resnet152 --data cifar10
echo "python main.py --mode shadow --model resnet152 --data cifar10"
python main.py --mode shadow --model resnet152 --data cifar10
echo "python main.py --mode distill_target --model resnet152 --data cifar10"
python main.py --mode distill_target --model resnet152 --data cifar10
echo "python main.py --mode distill_shadow --model resnet152 --data cifar10"
python main.py --mode distill_shadow --model resnet152 --data cifar10
echo "python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet152 --model_distill resnet152 --data cifar10"
python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet152 --model_distill resnet152 --data cifar10
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet152 --model_distill resnet152 --data cifar10"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet152 --model_distill resnet152 --data cifar10
echo "python main.py --action 1 --mia_type black-box --model resnet152 --model_distill resnet152 --data cifar10"
python main.py --action 1 --mia_type black-box --model resnet152 --model_distill resnet152 --data cifar10
#- End
echo "Job end at $(date "+%Y-%m-%d %H:%M:%S")"
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J Tra_152_10 # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 0-12:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo "Job start at $(date "+%Y-%m-%d %H:%M:%S")"
echo "Job run at:"
echo "$(hostnamectl)"
#- Load environments
source /tools/module_env.sh
source ~/pyt1.5/bin/activate
module list # list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo $(module list) # list modules loaded
echo $(which gcc)
echo $(which python)
echo $(which python3)
cluster-quota # nas quota
nvidia-smi --format=csv --query-gpu=name,driver_version,power.limit # gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo "Use GPU ${CUDA_VISIBLE_DEVICES}" # which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
echo "python main.py --mode target --model resnet152 --data cifar100"
python main.py --mode target --model resnet152 --data cifar100
echo "python main.py --mode shadow --model resnet152 --data cifar100"
python main.py --mode shadow --model resnet152 --data cifar100
echo "python main.py --mode distill_target --model resnet152 --data cifar100"
python main.py --mode distill_target --model resnet152 --data cifar100
echo "python main.py --mode distill_shadow --model resnet152 --data cifar100"
python main.py --mode distill_shadow --model resnet152 --data cifar100
echo "python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet152 --model_distill resnet152 --data cifar100"
python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet152 --model_distill resnet152 --data cifar100
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet152 --model_distill resnet152 --data cifar100"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet152 --model_distill resnet152 --data cifar100
echo "python main.py --action 1 --mia_type black-box --model resnet152 --model_distill resnet152 --data cifar100"
python main.py --action 1 --mia_type black-box --model resnet152 --model_distill resnet152 --data cifar100
#- End
echo "Job end at $(date "+%Y-%m-%d %H:%M:%S")"
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J Tra_CIN_10 # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 3-00:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-long # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo "Job start at $(date "+%Y-%m-%d %H:%M:%S")"
echo "Job run at:"
echo "$(hostnamectl)"
#- Load environments
source /tools/module_env.sh
source ~/pyt1.5/bin/activate
module list # list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo $(module list) # list modules loaded
echo $(which gcc)
echo $(which python)
echo $(which python3)
cluster-quota # nas quota
nvidia-smi --format=csv --query-gpu=name,driver_version,power.limit # gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo "Use GPU ${CUDA_VISIBLE_DEVICES}" # which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
echo "python main.py --mode target --model resnet152 --data cinic10"
python main.py --mode target --model resnet152 --data cinic10
echo "python main.py --mode shadow --model resnet152 --data cinic10"
python main.py --mode shadow --model resnet152 --data cinic10
echo "python main.py --mode distill_target --model resnet152 --data cinic10"
python main.py --mode distill_target --model resnet152 --data cinic10
echo "python main.py --mode distill_shadow --model resnet152 --data cinic10"
python main.py --mode distill_shadow --model resnet152 --data cinic10
echo "python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet152 --model_distill resnet152 --data cinic10"
python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet152 --model_distill resnet152 --data cinic10
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet152 --model_distill resnet152 --data cinic10"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet152 --model_distill resnet152 --data cinic10
echo "python main.py --action 1 --mia_type black-box --model resnet152 --model_distill resnet152 --data cinic10"
python main.py --action 1 --mia_type black-box --model resnet152 --model_distill resnet152 --data cinic10
#- End
echo "Job end at $(date "+%Y-%m-%d %H:%M:%S")"
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J Tra_10_18 # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 1-00:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo "Job start at $(date "+%Y-%m-%d %H:%M:%S")"
echo "Job run at:"
echo "$(hostnamectl)"
#- Load environments
source /tools/module_env.sh
source ~/pyt1.5/bin/activate
module list # list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo $(module list) # list modules loaded
echo $(which gcc)
echo $(which python)
echo $(which python3)
cluster-quota # nas quota
nvidia-smi --format=csv --query-gpu=name,driver_version,power.limit # gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo "Use GPU ${CUDA_VISIBLE_DEVICES}" # which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
echo "python main.py --mode target --model resnet18 --data cifar10"
python main.py --mode target --model resnet18 --data cifar10
echo "python main.py --mode shadow --model resnet18 --data cifar10"
python main.py --mode shadow --model resnet18 --data cifar10
echo "python main.py --mode distill_target --model resnet18 --data cifar10"
python main.py --mode distill_target --model resnet18 --data cifar10
echo "python main.py --mode distill_shadow --model resnet18 --data cifar10"
python main.py --mode distill_shadow --model resnet18 --data cifar10
echo "python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10"
python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10
echo "python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10"
python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10
#- End
echo "Job end at $(date "+%Y-%m-%d %H:%M:%S")"
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J Tra_C100_18 # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 1-00:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo "Job start at $(date "+%Y-%m-%d %H:%M:%S")"
echo "Job run at:"
echo "$(hostnamectl)"
#- Load environments
source /tools/module_env.sh
source ~/pyt1.5/bin/activate
module list # list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo $(module list) # list modules loaded
echo $(which gcc)
echo $(which python)
echo $(which python3)
cluster-quota # nas quota
nvidia-smi --format=csv --query-gpu=name,driver_version,power.limit # gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo "Use GPU ${CUDA_VISIBLE_DEVICES}" # which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
echo "python main.py --mode target --model resnet18 --data cifar100"
python main.py --mode target --model resnet18 --data cifar100
echo "python main.py --mode shadow --model resnet18 --data cifar100"
python main.py --mode shadow --model resnet18 --data cifar100
echo "python main.py --mode distill_target --model resnet18 --data cifar100"
python main.py --mode distill_target --model resnet18 --data cifar100
echo "python main.py --mode distill_shadow --model resnet18 --data cifar100"
python main.py --mode distill_shadow --model resnet18 --data cifar100
echo "python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar100"
python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar100
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar100"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar100
echo "python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar100"
python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar100
#- End
echo "Job end at $(date "+%Y-%m-%d %H:%M:%S")"
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J Tra_CIN_18 # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 1-12:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-long # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo "Job start at $(date "+%Y-%m-%d %H:%M:%S")"
echo "Job run at:"
echo "$(hostnamectl)"
#- Load environments
source /tools/module_env.sh
source ~/pyt1.5/bin/activate
module list # list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo $(module list) # list modules loaded
echo $(which gcc)
echo $(which python)
echo $(which python3)
cluster-quota # nas quota
nvidia-smi --format=csv --query-gpu=name,driver_version,power.limit # gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo "Use GPU ${CUDA_VISIBLE_DEVICES}" # which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
echo "python main.py --mode target --model resnet18 --data cinic10"
python main.py --mode target --model resnet18 --data cinic10
echo "python main.py --mode shadow --model resnet18 --data cinic10"
python main.py --mode shadow --model resnet18 --data cinic10
echo "python main.py --mode distill_target --model resnet18 --data cinic10"
python main.py --mode distill_target --model resnet18 --data cinic10
echo "python main.py --mode distill_shadow --model resnet18 --data cinic10"
python main.py --mode distill_shadow --model resnet18 --data cinic10
echo "python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cinic10"
python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cinic10
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cinic10"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cinic10
echo "python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cinic10"
python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cinic10
#- End
echo "Job end at $(date "+%Y-%m-%d %H:%M:%S")"
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J Tra_50 # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 0-12:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo "Job start at $(date "+%Y-%m-%d %H:%M:%S")"
echo "Job run at:"
echo "$(hostnamectl)"
#- Load environments
source /tools/module_env.sh
source ~/pyt1.5/bin/activate
module list # list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo $(module list) # list modules loaded
echo $(which gcc)
echo $(which python)
echo $(which python3)
cluster-quota # nas quota
nvidia-smi --format=csv --query-gpu=name,driver_version,power.limit # gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo "Use GPU ${CUDA_VISIBLE_DEVICES}" # which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
echo "python main.py --mode target --model resnet50 "
python main.py --mode target --model resnet50
echo "python main.py --mode shadow --model resnet50 "
python main.py --mode shadow --model resnet50
echo "python main.py --mode distill_target --model resnet50 "
python main.py --mode distill_target --model resnet50
echo "python main.py --mode distill_shadow --model resnet50 "
python main.py --mode distill_shadow --model resnet50
echo "python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet50 --model_distill resnet50 "
python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet50 --model_distill resnet50
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 "
python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50
echo "python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 "
python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50
#- End
echo "Job end at $(date "+%Y-%m-%d %H:%M:%S")"
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J Tra_50_10 # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 0-12:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo "Job start at $(date "+%Y-%m-%d %H:%M:%S")"
echo "Job run at:"
echo "$(hostnamectl)"
#- Load environments
source /tools/module_env.sh
source ~/pyt1.5/bin/activate
module list # list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo $(module list) # list modules loaded
echo $(which gcc)
echo $(which python)
echo $(which python3)
cluster-quota # nas quota
nvidia-smi --format=csv --query-gpu=name,driver_version,power.limit # gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo "Use GPU ${CUDA_VISIBLE_DEVICES}" # which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
echo "python main.py --mode target --model resnet50 --data cifar100"
python main.py --mode target --model resnet50 --data cifar100
echo "python main.py --mode shadow --model resnet50 --data cifar100"
python main.py --mode shadow --model resnet50 --data cifar100
echo "python main.py --mode distill_target --model resnet50 --data cifar100"
python main.py --mode distill_target --model resnet50 --data cifar100
echo "python main.py --mode distill_shadow --model resnet50 --data cifar100"
python main.py --mode distill_shadow --model resnet50 --data cifar100
echo "python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar100"
python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar100
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar100"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar100
echo "python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar100"
python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar100
#- End
echo "Job end at $(date "+%Y-%m-%d %H:%M:%S")"
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J Tra_CIN_50 # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 1-12:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-long # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo "Job start at $(date "+%Y-%m-%d %H:%M:%S")"
echo "Job run at:"
echo "$(hostnamectl)"
#- Load environments
source /tools/module_env.sh
source ~/pyt1.5/bin/activate
module list # list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo $(module list) # list modules loaded
echo $(which gcc)
echo $(which python)
echo $(which python3)
cluster-quota # nas quota
nvidia-smi --format=csv --query-gpu=name,driver_version,power.limit # gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo "Use GPU ${CUDA_VISIBLE_DEVICES}" # which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
echo "python main.py --mode target --model resnet50 --data cinic10"
python main.py --mode target --model resnet50 --data cinic10
echo "python main.py --mode shadow --model resnet50 --data cinic10"
python main.py --mode shadow --model resnet50 --data cinic10
echo "python main.py --mode distill_target --model resnet50 --data cinic10"
python main.py --mode distill_target --model resnet50 --data cinic10
echo "python main.py --mode distill_shadow --model resnet50 --data cinic10"
python main.py --mode distill_shadow --model resnet50 --data cinic10
echo "python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cinic10"
python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cinic10
echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cinic10"
python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cinic10
echo "python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cinic10"
python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cinic10
#- End
echo "Job end at $(date "+%Y-%m-%d %H:%M:%S")"
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J Tra_Mobile # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 0-12:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo "Job start at $(date "+%Y-%m-%d %H:%M:%S")"
echo "Job run at:"
echo "$(hostnamectl)"
#- Load environments
source /tools/module_env.sh
source ~/pyt1.5/bin/activate
module list # list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo $(module list) # list modules loaded
echo $(which gcc)
echo $(which python)
echo $(which python3)
cluster-quota # nas quota
nvidia-smi --format=csv --query-gpu=name,driver_version,power.limit # gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo "Use GPU ${CUDA_VISIBLE_DEVICES}" # which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
echo "python main.py --mode target --model mobilenetv2"
python main.py --mode target --model mobilenetv2
echo "python main.py --mode shadow --model mobilenetv2"
python main.py --mode shadow --model mobilenetv2
echo "python main.py --mode distill_target --model mobilenetv2"
python main.py --mode distill_target --model mobilenetv2
echo "python main.py --mode distill_shadow --model mobilenetv2"
python main.py --mode distill_shadow --model mobilenetv2
echo "python main.py --action 1 --mode shadow --mia_type build-dataset --model mobilenetv2 --model_distill mobilenetv2 "
python main.py --action 1 --mode shadow --mia_type build-dataset --model mobilenetv2 --model_distill mobilenetv2
echo "python main.py --action 1 --mode target --mia_type build-dataset --model mobilenetv2 --model_distill mobilenetv2"
python main.py --action 1 --mode target --mia_type build-dataset --model mobilenetv2 --model_distill mobilenetv2
echo "python main.py --action 1 --mia_type black-box --model mobilenetv2 --model_distill mobilenetv2"
python main.py --action 1 --mia_type black-box --model mobilenetv2 --model_distill mobilenetv2
#- End
echo "Job end at $(date "+%Y-%m-%d %H:%M:%S")"
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J Tra_100_Mobile # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 0-12:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo "Job start at $(date "+%Y-%m-%d %H:%M:%S")"
echo "Job run at:"
echo "$(hostnamectl)"
#- Load environments
source /tools/module_env.sh
source ~/pyt1.5/bin/activate
module list # list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo $(module list) # list modules loaded
echo $(which gcc)
echo $(which python)
echo $(which python3)
cluster-quota # nas quota
nvidia-smi --format=csv --query-gpu=name,driver_version,power.limit # gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo "Use GPU ${CUDA_VISIBLE_DEVICES}" # which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
echo "python main.py --mode target --model mobilenetv2 --data cifar100"
python main.py --mode target --model mobilenetv2 --data cifar100
echo "python main.py --mode shadow --model mobilenetv2 --data cifar100"
python main.py --mode shadow --model mobilenetv2 --data cifar100
echo "python main.py --mode distill_target --model mobilenetv2 --data cifar100"
python main.py --mode distill_target --model mobilenetv2 --data cifar100
echo "python main.py --mode distill_shadow --model mobilenetv2 --data cifar100"
python main.py --mode distill_shadow --model mobilenetv2 --data cifar100
echo "python main.py --action 1 --mode shadow --mia_type build-dataset --model mobilenetv2 --data cifar100 --model_distill mobilenetv2"
python main.py --action 1 --mode shadow --mia_type build-dataset --model mobilenetv2 --model_distill mobilenetv2 --data cifar100
echo "python main.py --action 1 --mode target --mia_type build-dataset --model mobilenetv2 --data cifar100 --model_distill mobilenetv2"
python main.py --action 1 --mode target --mia_type build-dataset --model mobilenetv2 --model_distill mobilenetv2 --data cifar100
echo "python main.py --action 1 --mia_type black-box --model mobilenetv2 --model_distill mobilenetv2 --data cifar100"
python main.py --action 1 --mia_type black-box --model mobilenetv2 --model_distill mobilenetv2 --data cifar100
#- End
echo "Job end at $(date "+%Y-%m-%d %H:%M:%S")"
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J Tra_CIN_Mobile # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 3-00:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-long # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo "Job start at $(date "+%Y-%m-%d %H:%M:%S")"
echo "Job run at:"
echo "$(hostnamectl)"
#- Load environments
source /tools/module_env.sh
source ~/pyt1.5/bin/activate
module list # list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo $(module list) # list modules loaded
echo $(which gcc)
echo $(which python)
echo $(which python3)
cluster-quota # nas quota
nvidia-smi --format=csv --query-gpu=name,driver_version,power.limit # gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo "Use GPU ${CUDA_VISIBLE_DEVICES}" # which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
echo "python main.py --mode target --model mobilenetv2 --data cinic10"
python main.py --mode target --model mobilenetv2 --data cinic10
echo "python main.py --mode shadow --model mobilenetv2 --data cinic10"
python main.py --mode shadow --model mobilenetv2 --data cinic10
echo "python main.py --mode distill_target --model mobilenetv2 --data cinic10"
python main.py --mode distill_target --model mobilenetv2 --data cinic10
echo "python main.py --mode distill_shadow --model mobilenetv2 --data cinic10"
python main.py --mode distill_shadow --model mobilenetv2 --data cinic10
echo "python main.py --action 1 --mode shadow --mia_type build-dataset --model mobilenetv2 --data cinic10 --model_distill mobilenetv2"
python main.py --action 1 --mode shadow --mia_type build-dataset --model mobilenetv2 --model_distill mobilenetv2 --data cinic10
echo "python main.py --action 1 --mode target --mia_type build-dataset --model mobilenetv2 --data cinic10 --model_distill mobilenetv2"
python main.py --action 1 --mode target --mia_type build-dataset --model mobilenetv2 --model_distill mobilenetv2 --data cinic10
echo "python main.py --action 1 --mia_type black-box --model mobilenetv2 --model_distill mobilenetv2 --data cinic10"
python main.py --action 1 --mia_type black-box --model mobilenetv2 --model_distill mobilenetv2 --data cinic10
#- End
echo "Job end at $(date "+%Y-%m-%d %H:%M:%S")"
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J PLOT # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 0-12:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo "Job start at $(date "+%Y-%m-%d %H:%M:%S")"
echo "Job run at:"
echo "$(hostnamectl)"
#- Load environments
source /tools/module_env.sh
source ~/pyt1.5/bin/activate
module list # list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo $(module list) # list modules loaded
echo $(which gcc)
echo $(which python)
echo $(which python3)
cluster-quota # nas quota
nvidia-smi --format=csv --query-gpu=name,driver_version,power.limit # gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo "Use GPU ${CUDA_VISIBLE_DEVICES}" # which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
echo "python plot.py --model resnet50 --model_distill resnet50 --data cifar100"
python plot.py --model resnet50 --model_distill resnet50 --data cifar100
#- End
echo "Job end at $(date "+%Y-%m-%d %H:%M:%S")"
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment