Skip to content
Projects
Groups
Snippets
Help
This project
Loading...
Sign in / Register
Toggle navigation
M
Model-Transfer-Adaptability
Overview
Overview
Details
Activity
Cycle Analytics
Repository
Repository
Files
Commits
Branches
Tags
Contributors
Graph
Compare
Charts
Issues
0
Issues
0
List
Board
Labels
Milestones
Merge Requests
0
Merge Requests
0
CI / CD
CI / CD
Pipelines
Jobs
Schedules
Charts
Wiki
Wiki
Snippets
Snippets
Members
Members
Collapse sidebar
Close sidebar
Activity
Graph
Charts
Create a new issue
Jobs
Commits
Issue Boards
Open sidebar
haoyifan
Model-Transfer-Adaptability
Commits
cba2e1d6
Commit
cba2e1d6
authored
Jun 01, 2023
by
Zhihong Ma
Browse files
Options
Browse Files
Download
Email Patches
Plain Diff
fix: add quantization feature for loss trajectory mia
parent
68add01f
Show whitespace changes
Inline
Side-by-side
Showing
51 changed files
with
5763 additions
and
40 deletions
+5763
-40
mzh/new_mzh/Loss_Trajectory_MIA/MIA.py
+160
-13
mzh/new_mzh/Loss_Trajectory_MIA/div.py
+54
-0
mzh/new_mzh/Loss_Trajectory_MIA/fig/acc_loss_curve.png
+0
-0
mzh/new_mzh/Loss_Trajectory_MIA/fig/auc_loss_curve.png
+0
-0
mzh/new_mzh/Loss_Trajectory_MIA/global_var.py
+2
-0
mzh/new_mzh/Loss_Trajectory_MIA/main.py
+6
-0
mzh/new_mzh/Loss_Trajectory_MIA/normal.py
+271
-19
mzh/new_mzh/Loss_Trajectory_MIA/readme.md
+68
-6
mzh/new_mzh/Loss_Trajectory_MIA/resnet18_result.xlsx
+0
-0
mzh/new_mzh/Loss_Trajectory_MIA/sh/train_attack_152_10.sh
+107
-0
mzh/new_mzh/Loss_Trajectory_MIA/sh/train_attack_152_100.sh
+107
-0
mzh/new_mzh/Loss_Trajectory_MIA/sh/train_attack_152_cinic.sh
+107
-0
mzh/new_mzh/Loss_Trajectory_MIA/sh/train_attack_18_10.sh
+107
-0
mzh/new_mzh/Loss_Trajectory_MIA/sh/train_attack_18_100.sh
+107
-0
mzh/new_mzh/Loss_Trajectory_MIA/sh/train_attack_18_100_load.sh
+107
-0
mzh/new_mzh/Loss_Trajectory_MIA/sh/train_attack_18_10_load.sh
+107
-0
mzh/new_mzh/Loss_Trajectory_MIA/sh/train_attack_18_10_ptq_FP_S1.sh
+152
-0
mzh/new_mzh/Loss_Trajectory_MIA/sh/train_attack_18_10_ptq_FP_S2.sh
+129
-0
mzh/new_mzh/Loss_Trajectory_MIA/sh/train_attack_18_10_ptq_FP_S3.sh
+139
-0
mzh/new_mzh/Loss_Trajectory_MIA/sh/train_attack_18_10_ptq_FP_S4.sh
+147
-0
mzh/new_mzh/Loss_Trajectory_MIA/sh/train_attack_18_10_ptq_INT4.sh
+108
-0
mzh/new_mzh/Loss_Trajectory_MIA/sh/train_attack_18_10_ptq_INT5.sh
+112
-0
mzh/new_mzh/Loss_Trajectory_MIA/sh/train_attack_18_10_ptq_INT6.sh
+112
-0
mzh/new_mzh/Loss_Trajectory_MIA/sh/train_attack_18_10_ptq_INT7.sh
+111
-0
mzh/new_mzh/Loss_Trajectory_MIA/sh/train_attack_18_10_ptq_INT8.sh
+112
-0
mzh/new_mzh/Loss_Trajectory_MIA/sh/train_attack_18_10_ptq_INT_S1.sh
+164
-0
mzh/new_mzh/Loss_Trajectory_MIA/sh/train_attack_18_10_ptq_INT_S2.sh
+165
-0
mzh/new_mzh/Loss_Trajectory_MIA/sh/train_attack_18_10_ptq_POT.sh
+159
-0
mzh/new_mzh/Loss_Trajectory_MIA/sh/train_attack_18_10_ptq_POT3.sh
+109
-0
mzh/new_mzh/Loss_Trajectory_MIA/sh/train_attack_18_10_ptq_POT4.sh
+109
-0
mzh/new_mzh/Loss_Trajectory_MIA/sh/train_attack_18_10_ptq_POT5.sh
+109
-0
mzh/new_mzh/Loss_Trajectory_MIA/sh/train_attack_18_10_ptq_POT6.sh
+109
-0
mzh/new_mzh/Loss_Trajectory_MIA/sh/train_attack_18_10_ptq_POT7.sh
+109
-0
mzh/new_mzh/Loss_Trajectory_MIA/sh/train_attack_18_10_ptq_POT8.sh
+109
-0
mzh/new_mzh/Loss_Trajectory_MIA/sh/train_attack_18_cinic.sh
+107
-0
mzh/new_mzh/Loss_Trajectory_MIA/sh/train_attack_50_10.sh
+107
-0
mzh/new_mzh/Loss_Trajectory_MIA/sh/train_attack_50_100.sh
+107
-0
mzh/new_mzh/Loss_Trajectory_MIA/sh/train_attack_50_10_ptq_FP_S1.sh
+152
-0
mzh/new_mzh/Loss_Trajectory_MIA/sh/train_attack_50_10_ptq_FP_S2.sh
+129
-0
mzh/new_mzh/Loss_Trajectory_MIA/sh/train_attack_50_10_ptq_FP_S3.sh
+139
-0
mzh/new_mzh/Loss_Trajectory_MIA/sh/train_attack_50_10_ptq_FP_S4.sh
+147
-0
mzh/new_mzh/Loss_Trajectory_MIA/sh/train_attack_50_10_ptq_INT_S1.sh
+164
-0
mzh/new_mzh/Loss_Trajectory_MIA/sh/train_attack_50_10_ptq_INT_S2.sh
+165
-0
mzh/new_mzh/Loss_Trajectory_MIA/sh/train_attack_50_10_ptq_POT.sh
+159
-0
mzh/new_mzh/Loss_Trajectory_MIA/sh/train_attack_50_cinic.sh
+107
-0
mzh/new_mzh/Loss_Trajectory_MIA/sh/train_attack_mobile_10.sh
+107
-0
mzh/new_mzh/Loss_Trajectory_MIA/sh/train_attack_mobile_100.sh
+107
-0
mzh/new_mzh/Loss_Trajectory_MIA/sh/train_attack_mobile_cinic.sh
+107
-0
mzh/new_mzh/Loss_Trajectory_MIA/sh/train_div.sh
+92
-0
mzh/new_mzh/Loss_Trajectory_MIA/sh/train_plot.sh
+92
-0
mzh/new_mzh/Loss_Trajectory_MIA/utils.py
+201
-2
No files found.
mzh/new_mzh/Loss_Trajectory_MIA/MIA.py
View file @
cba2e1d6
...
...
@@ -8,6 +8,10 @@ import normal
import
dataset
as
DATA
from
typing
import
TYPE_CHECKING
,
Callable
,
List
,
Optional
,
Tuple
,
Union
from
sklearn
import
metrics
import
openpyxl
from
global_var
import
GlobalVariables
import
gol
import
sys
class
MLP_BLACKBOX
(
nn
.
Module
):
def
__init__
(
self
,
dim_in
):
...
...
@@ -88,7 +92,8 @@ def test_mia_attack_model(args, epoch, model, attack_test_loader, loss_fn, max_a
'fpr'
:
fpr
,
'tpr'
:
tpr
}
np
.
save
(
f
'./outputs/{args.data}_{args.model}_{args.model_distill}_trajectory_auc'
,
save_data
)
# 不再save,不关注,暂时只关注acc
# np.save(f'./outputs/{args.data}_{args.model}_{args.model_distill}_trajectory_auc', save_data)
if
accuracy
>
max_acc
:
max_acc
=
accuracy
...
...
@@ -177,13 +182,40 @@ def build_trajectory_membership_dataset(args, ori_model_path, device='cpu'):
elif
args
.
model
==
'mobilenetv2'
:
model_name
=
'{}_mobilenetv2'
.
format
(
args
.
data
)
if
args
.
quant_type
is
not
None
:
if
args
.
quant_type
==
'FLOAT'
:
title
=
'
%
s_
%
d_E
%
d'
%
(
args
.
quant_type
,
args
.
num_bits
,
args
.
e_bits
)
else
:
title
=
'
%
s_
%
d'
%
(
args
.
quant_type
,
args
.
num_bits
)
# 在这里设置一下各种表
gol
.
_init
()
if
args
.
quant_type
!=
'INT'
:
bias_list
=
utils
.
build_bias_list
(
args
.
quant_type
)
gol
.
set_value
(
bias_list
,
is_bias
=
True
)
if
args
.
quant_type
!=
'INT'
:
plist
=
utils
.
build_list
(
args
.
quant_type
,
args
.
num_bits
,
args
.
e_bits
)
gol
.
set_value
(
plist
)
if
args
.
mode
==
'shadow'
:
cnn_model
,
cnn_params
=
normal
.
load_model
(
args
,
ori_model_path
+
'/shadow'
,
model_name
,
epoch
=
args
.
epochs
)
MODEL
=
cnn_model
.
to
(
device
)
# 对于量化后的 这里应该要改吧,需要load量化后的模型来做推理
elif
args
.
mode
==
'target'
:
cnn_model
,
cnn_params
=
normal
.
load_model
(
args
,
ori_model_path
+
'/target'
,
model_name
,
epoch
=
args
.
epochs
)
if
args
.
quant_type
is
None
:
MODEL
=
cnn_model
.
to
(
device
)
else
:
ptq_file_prefix
=
'networks/{}/{}'
.
format
(
args
.
seed
,
args
.
mode
)
+
'/'
+
f
'{args.data}_{args.model}/'
cnn_model
.
quantize
(
args
.
quant_type
,
args
.
num_bits
,
args
.
e_bits
)
cnn_model
.
load_state_dict
(
torch
.
load
(
ptq_file_prefix
+
title
+
'.pt'
))
MODEL
=
cnn_model
.
to
(
device
)
MODEL
.
freeze
()
print
(
'Successfully load ptq model: '
+
title
)
print
(
'pt file path:'
+
ptq_file_prefix
+
title
+
'.pt'
)
# params['task']中记录用什么数据集
dataset
=
utils
.
get_dataset
(
cnn_params
[
'task'
],
mode
=
args
.
mode
,
aug
=
True
,
batch_size
=
384
)
...
...
@@ -218,13 +250,14 @@ def build_trajectory_membership_dataset(args, ori_model_path, device='cpu'):
# distill model的output与label得到的loss trajectory 按一列列的形式组织为一个tensor
batch_trajectory
=
get_trajectory
(
data
,
target
,
args
,
ori_model_path
,
device
)
data
,
target
=
data
.
to
(
device
),
target
.
to
(
device
)
if
args
.
quant_type
is
None
:
batch_logit_target
=
MODEL
(
data
)
else
:
batch_logit_target
=
MODEL
.
quantize_inference
(
data
)
#
每行的最大值 (predict label)
#
最大值的位置 (predict label) 相当于是哪类
_
,
batch_predict_label
=
batch_logit_target
.
max
(
1
)
batch_predicted_label
=
batch_predict_label
.
long
()
.
cpu
()
.
detach
()
.
numpy
()
batch_original_label
=
target
.
long
()
.
cpu
()
.
detach
()
.
numpy
()
# 相当于最后一次loss,可以拼接到loss trajectory末尾
...
...
@@ -266,13 +299,25 @@ def build_trajectory_membership_dataset(args, ori_model_path, device='cpu'):
'nb_classes'
:
dataset
.
num_classes
}
# target model和shadow model的分别作为trajector_train_data, trajectory_test_data保存
# distill target model和distill shadow model的分别作为trajector_train_data, trajectory_test_data保存
# TODO 把(ptq) distill target model的存储路径进行修改
dataset_type
=
'trajectory_train_data'
if
args
.
mode
==
'shadow'
else
'trajectory_test_data'
utils
.
create_path
(
ori_model_path
+
f
'/{args.mode}/{model_name}'
)
model_name_ptq
=
model_name
+
'_'
+
title
utils
.
create_path
(
ori_model_path
+
f
'/{args.mode}/{model_name_ptq}'
)
if
args
.
quant_type
is
None
:
np
.
save
(
ori_model_path
+
f
'/{args.mode}/{model_name}/{dataset_type}'
,
data
)
else
:
np
.
save
(
ori_model_path
+
f
'/{args.mode}/{model_name_ptq}/{dataset_type}'
,
data
)
def
trajectory_black_box_membership_inference_attack
(
args
,
models_path
,
device
=
'cpu'
):
if
args
.
quant_type
is
not
None
:
if
args
.
quant_type
==
'FLOAT'
:
title
=
'
%
s_
%
d_E
%
d'
%
(
args
.
quant_type
,
args
.
num_bits
,
args
.
e_bits
)
else
:
title
=
'
%
s_
%
d'
%
(
args
.
quant_type
,
args
.
num_bits
)
# if args.model == 'vgg':
# model_name = '{}_vgg16bn'.format(args.data)
# elif args.model == 'mobilenet':
...
...
@@ -290,7 +335,7 @@ def trajectory_black_box_membership_inference_attack(args, models_path, device='
elif
args
.
model
==
'mobilenetv2'
:
model_name
=
'{}_mobilenetv2'
.
format
(
args
.
data
)
print
(
f
"MODEL NAME IS :{model_name}"
)
# if args.model_distill == 'vgg':
# model_distill_name = '{}_vgg16bn'.format(args.data)
# elif args.model_distill == 'mobilenet':
...
...
@@ -309,23 +354,44 @@ def trajectory_black_box_membership_inference_attack(args, models_path, device='
elif
args
.
model_distill
==
'mobilenetv2'
:
model_distill_name
=
'{}_mobilenetv2'
.
format
(
args
.
data
)
if
args
.
quant_type
is
None
:
print
(
f
"MODEL NAME IS :{model_name}"
)
print
(
f
"MODEL DISTILL NAME IS :{model_distill_name}"
)
else
:
print
(
f
"MODEL NAME IS :{model_name}_{title}"
)
print
(
f
"MODEL DISTILL NAME IS :{model_distill_name}_{title}"
)
cnn
=
model_name
print
(
f
'------------------model: {model_name}-------------------'
)
#
print(f'------------------model: {model_name}-------------------')
orgin_model_name
=
model_name
model_name_ptq
=
model_name
+
'_'
+
title
# 唯一的attack model路径 (因为对target model量化后蒸馏并不会影响攻击者的训练)
save_path
=
models_path
+
'/attack/'
+
model_name
# if args.quant_type is None:
# save_path = models_path + '/attack/' + model_name
# else:
# save_path = models_path + '/attack/' + model_name_ptq
utils
.
create_path
(
save_path
)
best_prec1
=
0.0
best_auc
=
0.0
epoch
=
0
AttackModelTrainSet
=
np
.
load
(
models_path
+
f
'/shadow/{model_name}/trajectory_train_data.npy'
,
allow_pickle
=
True
)
.
item
()
# 支持load (ptq) distill target model loss trajectory
if
args
.
quant_type
is
None
:
AttackModelTestSet
=
np
.
load
(
models_path
+
f
'/target/{model_name}/trajectory_test_data.npy'
,
allow_pickle
=
True
)
.
item
()
else
:
AttackModelTestSet
=
np
.
load
(
models_path
+
f
'/target/{model_name_ptq}/trajectory_test_data.npy'
,
allow_pickle
=
True
)
.
item
()
# not load trained model, need to train a new one
if
args
.
load_attack
is
False
:
train_set
=
torch
.
utils
.
data
.
TensorDataset
(
torch
.
from_numpy
(
np
.
array
(
AttackModelTrainSet
[
'model_loss_ori'
],
dtype
=
'f'
)),
torch
.
from_numpy
(
np
.
array
(
AttackModelTrainSet
[
'model_trajectory'
],
dtype
=
'f'
)),
...
...
@@ -335,6 +401,7 @@ def trajectory_black_box_membership_inference_attack(args, models_path, device='
# train/test i.e. in or out
torch
.
from_numpy
(
np
.
array
(
check_and_transform_label_format
(
AttackModelTrainSet
[
'predicted_status'
],
nb_classes
=
2
,
return_one_hot
=
True
)[:,:
2
]))
.
type
(
torch
.
long
),
torch
.
from_numpy
(
np
.
array
(
AttackModelTrainSet
[
'member_status'
]))
.
type
(
torch
.
long
),)
attack_train_loader
=
torch
.
utils
.
data
.
DataLoader
(
train_set
,
batch_size
=
128
,
shuffle
=
True
)
test_set
=
torch
.
utils
.
data
.
TensorDataset
(
torch
.
from_numpy
(
np
.
array
(
AttackModelTestSet
[
'model_loss_ori'
],
dtype
=
'f'
)),
...
...
@@ -344,19 +411,22 @@ def trajectory_black_box_membership_inference_attack(args, models_path, device='
# train/test i.e. in or out
torch
.
from_numpy
(
np
.
array
(
check_and_transform_label_format
(
AttackModelTestSet
[
'predicted_status'
],
nb_classes
=
2
,
return_one_hot
=
True
)[:,:
2
]))
.
type
(
torch
.
long
),
torch
.
from_numpy
(
np
.
array
(
AttackModelTestSet
[
'member_status'
]))
.
type
(
torch
.
long
),)
attack_train_loader
=
torch
.
utils
.
data
.
DataLoader
(
train_set
,
batch_size
=
128
,
shuffle
=
True
)
attack_test_loader
=
torch
.
utils
.
data
.
DataLoader
(
test_set
,
batch_size
=
128
,
shuffle
=
True
)
print
(
f
'-------------------"Loss Trajectory"------------------'
)
# 训练Attack Model
attack_model
=
MLP_BLACKBOX
(
dim_in
=
args
.
epochs_distill
+
1
)
attack_optimizer
=
torch
.
optim
.
SGD
(
attack_model
.
parameters
(),
lr
=
0.01
,
momentum
=
0.9
,
weight_decay
=
0.0001
)
attack_model
=
attack_model
.
to
(
device
)
attack_optimizer
=
torch
.
optim
.
SGD
(
attack_model
.
parameters
(),
lr
=
0.01
,
momentum
=
0.9
,
weight_decay
=
0.0001
)
loss_fn
=
nn
.
CrossEntropyLoss
()
max_auc
=
0
max_acc
=
0
# not load trained Attack Model
if
args
.
load_attack
is
False
:
print
(
"You should not retrain the Attack Model !!!"
)
sys
.
exit
()
for
epoch
in
range
(
100
):
train_loss
,
train_prec1
=
train_mia_attack_model
(
args
,
epoch
,
attack_model
,
attack_train_loader
,
attack_optimizer
,
loss_fn
,
device
)
val_loss
,
val_prec1
,
val_auc
,
max_auc
,
max_acc
=
test_mia_attack_model
(
args
,
epoch
,
attack_model
,
attack_test_loader
,
loss_fn
,
max_auc
,
max_acc
,
device
)
...
...
@@ -364,17 +434,23 @@ def trajectory_black_box_membership_inference_attack(args, models_path, device='
is_best_auc
=
val_auc
>
best_auc
if
is_best_prec1
:
best_prec1
=
val_prec1
torch
.
save
(
attack_model
.
state_dict
(),
save_path
+
'/'
+
'trajectory'
+
'.pkl'
)
print
(
f
"Save Best ACC Attack Model: acc:{val_prec1}, auc:{val_auc}"
)
if
is_best_auc
:
best_auc
=
val_auc
if
epoch
%
10
==
0
:
print
((
'epoch:{}
\t
train_loss:{:.4f}
\t
test_loss:{:.4f}
\t
train_prec1:{:.4f}
\t
test_prec1:{:.4f}
\t
val_prec1:{:.4f}
\t
val_auc:{:.4f}'
)
.
format
(
epoch
,
train_loss
,
val_loss
,
train_prec1
,
val_prec1
,
val_prec1
,
val_auc
))
print
(
f
'Load Trained Attack Model:'
)
attack_model
.
load_state_dict
(
torch
.
load
(
save_path
+
'/'
+
'trajectory'
+
'.pkl'
))
val_loss
,
val_prec1
,
val_auc
,
max_auc
,
max_acc
=
test_mia_attack_model
(
args
,
epoch
,
attack_model
,
attack_test_loader
,
loss_fn
,
max_auc
,
max_acc
,
device
)
print
((
'Test load Attack Model
\t
test_loss:{:.4f}
\t
test_prec1:{:.4f}
\t
val_prec1:{:.4f}
\t
val_auc:{:.4f}'
)
.
format
(
val_loss
,
val_prec1
,
val_prec1
,
val_auc
))
print
(
'Max AUC: '
,
max_auc
)
print
(
'Max ACC: '
,
max_acc
/
100
)
# 这里可以改 仅存储is_best_auc or is_best_prec1的
torch
.
save
(
attack_model
.
state_dict
(),
save_path
+
'/'
+
'trajectory'
+
'.pkl'
)
# 在 test_mia_attack_model存储的最优的数据
data_auc
=
np
.
load
(
f
'./outputs/{args.data}_{args.model}_{args.model_distill}_trajectory_auc.npy'
,
allow_pickle
=
True
)
.
item
()
for
i
in
range
(
len
(
data_auc
[
'fpr'
])):
...
...
@@ -382,6 +458,57 @@ def trajectory_black_box_membership_inference_attack(args, models_path, device='
print
(
'TPR at 0.1
%
FPR: {:.1
%
}'
.
format
(
data_auc
[
'tpr'
][
i
-
1
]))
break
# load trained Attack Model
# 对于量化后,只会load trained attack model,不会重新训练.
else
:
print
(
f
'Load Trained Attack Model:'
)
attack_model
.
load_state_dict
(
torch
.
load
(
save_path
+
'/'
+
'trajectory'
+
'.pkl'
))
val_loss
,
val_prec1
,
val_auc
,
max_auc
,
max_acc
=
test_mia_attack_model
(
args
,
epoch
,
attack_model
,
attack_test_loader
,
loss_fn
,
max_auc
,
max_acc
,
device
)
print
((
'Test load Attack Model
\t
test_loss:{:.4f}
\t
test_prec1:{:.4f}
\t
val_prec1:{:.4f}
\t
val_auc:{:.4f}'
)
.
format
(
val_loss
,
val_prec1
,
val_prec1
,
val_auc
))
print
(
'Max AUC: '
,
max_auc
)
print
(
'Max ACC: '
,
max_acc
/
100
)
filename
=
f
'{args.model}_result.xlsx'
try
:
# 如果文件已经存在,则加载它
workbook
=
openpyxl
.
load_workbook
(
filename
)
except
FileNotFoundError
:
# 如果文件不存在,则创建一个新的Excel工作簿
workbook
=
openpyxl
.
Workbook
()
if
args
.
data
not
in
workbook
.
sheetnames
:
# 如果工作表不存在,则创建一个新的工作表
worksheet
=
workbook
.
create_sheet
(
title
=
args
.
data
)
# 在工作表中写入表头
worksheet
.
cell
(
row
=
1
,
column
=
1
,
value
=
'FP32-acc'
)
# worksheet.cell(row=1,column=2,value=top1)
worksheet
.
cell
(
row
=
3
,
column
=
1
,
value
=
'title'
)
worksheet
.
cell
(
row
=
3
,
column
=
2
,
value
=
'js_div'
)
worksheet
.
cell
(
row
=
3
,
column
=
4
,
value
=
'ptq_acc'
)
worksheet
.
cell
(
row
=
3
,
column
=
5
,
value
=
'acc_loss'
)
worksheet
.
cell
(
row
=
3
,
column
=
6
,
value
=
'AUC'
)
worksheet
.
cell
(
row
=
3
,
column
=
7
,
value
=
'ACC'
)
else
:
worksheet
=
workbook
[
args
.
data
]
# 全精度下的
if
args
.
quant_type
is
None
:
worksheet
.
cell
(
row
=
1
,
column
=
4
,
value
=
'origin_auc'
)
worksheet
.
cell
(
row
=
1
,
column
=
5
,
value
=
max_auc
)
worksheet
.
cell
(
row
=
1
,
column
=
7
,
value
=
'origin_acc'
)
worksheet
.
cell
(
row
=
1
,
column
=
8
,
value
=
max_acc
/
100
)
else
:
idx
=
GlobalVariables
.
title_list
.
index
(
title
)
idx
+=
4
worksheet
.
cell
(
row
=
idx
,
column
=
1
,
value
=
title
)
worksheet
.
cell
(
row
=
idx
,
column
=
6
,
value
=
max_auc
)
worksheet
.
cell
(
row
=
idx
,
column
=
7
,
value
=
max_acc
/
100
)
workbook
.
save
(
filename
)
#
def
get_trajectory
(
data
,
target
,
args
,
model_path
,
device
=
'cpu'
):
...
...
@@ -408,6 +535,11 @@ def get_trajectory(data, target, args, model_path, device='cpu'):
# 创建一个形状为 (data.shape[0], 1) 的 NumPy 数组 predicted_label,并将其初始化为 -1 data.shape[0]即batch_size
predicted_label
=
np
.
array
([
-
1
])
.
repeat
(
data
.
shape
[
0
],
0
)
.
reshape
(
data
.
shape
[
0
],
1
)
# TODO 需要适配由PTQ Target Model得到的Distill Target Model
# 虽然是通过mode == shadow和target来区分,但load的model是ditill model
for
s
in
range
(
1
):
trajectory_current
=
None
model_path_current
=
'networks/{}'
.
format
(
s
)
...
...
@@ -417,14 +549,29 @@ def get_trajectory(data, target, args, model_path, device='cpu'):
if
args
.
mode
==
'shadow'
:
cnn_model_target
,
cnn_params_target
=
normal
.
load_model
(
args
,
model_path_current
+
'/distill_shadow'
,
model_name
,
epoch
=
i
)
elif
args
.
mode
==
'target'
:
if
args
.
quant_type
is
None
:
cnn_model_target
,
cnn_params_target
=
normal
.
load_model
(
args
,
model_path_current
+
'/distill_target'
,
model_name
,
epoch
=
i
)
# TODO 调整load的路径,把(ptq) Distill Target Model的权值参数load进来
else
:
if
args
.
quant_type
==
'FLOAT'
:
title
=
'
%
s_
%
d_E
%
d'
%
(
args
.
quant_type
,
args
.
num_bits
,
args
.
e_bits
)
else
:
title
=
'
%
s_
%
d'
%
(
args
.
quant_type
,
args
.
num_bits
)
cnn_model_target
,
cnn_params_target
=
normal
.
load_model
(
args
,
model_path_current
+
'/distill_target'
,
model_name
+
'_'
+
title
,
epoch
=
i
)
MODEL_target
=
cnn_model_target
.
to
(
device
)
# data是一个包含batch个向量的list
data
=
data
.
to
(
device
)
# label
target
=
target
.
to
(
device
)
# 获取target model的输出
# 都是distill model的output,因此不需要quantize_inference
logit_target
=
MODEL_target
(
data
)
# 看target model的output与label的loss (batch list中的各个数据分别算)
loss
=
[
F
.
cross_entropy
(
logit_target_i
.
unsqueeze
(
0
),
target_i
.
unsqueeze
(
0
))
for
(
logit_target_i
,
target_i
)
in
zip
(
logit_target
,
target
)]
# list -> nparray 一列的
...
...
mzh/new_mzh/Loss_Trajectory_MIA/div.py
0 → 100644
View file @
cba2e1d6
import
openpyxl
from
global_var
import
GlobalVariables
from
utils
import
*
import
gol
import
argparse
import
numpy
as
np
if
__name__
==
'__main__'
:
parser
=
argparse
.
ArgumentParser
(
description
=
'DIV_TrajectoryMIA'
)
parser
.
add_argument
(
'--model'
,
type
=
str
,
default
=
'resnet18'
,
help
=
[
'resnet18'
,
'resnet50'
,
'resnet152'
,
'mobilenetv2'
])
parser
.
add_argument
(
'--data'
,
type
=
str
,
default
=
'cifar10'
,
help
=
[
'cinic10'
,
'cifar10'
,
'cifar100'
,
'gtsrb'
])
args
=
parser
.
parse_args
()
data_path
=
f
'networks/0/target/{args.data}_{args.model}/trajectory_test_data.npy'
# 统一计算所有的js
gol
.
_init
()
quant_type_list
=
[
'INT'
,
'POT'
,
'FLOAT'
]
filename
=
f
'{args.model}_result.xlsx'
workbook
=
openpyxl
.
load_workbook
(
filename
)
worksheet
=
workbook
[
args
.
data
]
for
quant_type
in
quant_type_list
:
num_bit_list
=
numbit_list
(
quant_type
)
for
num_bits
in
num_bit_list
:
e_bit_list
=
ebit_list
(
quant_type
,
num_bits
)
for
e_bits
in
e_bit_list
:
if
quant_type
==
'FLOAT'
:
title
=
'
%
s_
%
d_E
%
d'
%
(
quant_type
,
num_bits
,
e_bits
)
else
:
title
=
'
%
s_
%
d'
%
(
quant_type
,
num_bits
)
model_name_ptq
=
f
'{args.data}_{args.model}_{title}'
p_data_path
=
f
'networks/0/target/{model_name_ptq}/trajectory_test_data.npy'
dataSet
=
np
.
load
(
data_path
,
allow_pickle
=
True
)
.
item
()
p_dataSet
=
np
.
load
(
p_data_path
,
allow_pickle
=
True
)
.
item
()
data
=
torch
.
from_numpy
(
np
.
array
(
dataSet
[
'model_trajectory'
],
dtype
=
'f'
))
.
transpose
(
0
,
1
)
p_data
=
torch
.
from_numpy
(
np
.
array
(
p_dataSet
[
'model_trajectory'
],
dtype
=
'f'
))
.
transpose
(
0
,
1
)
# data = torch.from_numpy(np.array(dataSet['model_trajectory'], dtype='f'))
# p_data = torch.from_numpy(np.array(p_dataSet['model_trajectory'], dtype='f'))
div
=
js_div
(
data
,
p_data
)
div
=
div
.
item
()
if
div
<
0
:
div
=
0
print
(
f
"js div of {model_name_ptq}: {div}"
)
idx
=
GlobalVariables
.
title_list
.
index
(
title
)
idx
+=
4
worksheet
.
cell
(
row
=
idx
,
column
=
2
,
value
=
div
)
workbook
.
save
(
filename
)
mzh/new_mzh/Loss_Trajectory_MIA/fig/acc_loss_curve.png
0 → 100644
View file @
cba2e1d6
65 KB
mzh/new_mzh/Loss_Trajectory_MIA/fig/auc_loss_curve.png
0 → 100644
View file @
cba2e1d6
68 KB
mzh/new_mzh/Loss_Trajectory_MIA/global_var.py
View file @
cba2e1d6
class
GlobalVariables
:
SELF_INPLANES
=
0
title_list
=
[
'INT_2'
,
'INT_3'
,
'INT_4'
,
'INT_5'
,
'INT_6'
,
'INT_7'
,
'INT_8'
,
'INT_9'
,
'INT_10'
,
'INT_11'
,
'INT_12'
,
'INT_13'
,
'INT_14'
,
'INT_15'
,
'INT_16'
,
'POT_2'
,
'POT_3'
,
'POT_4'
,
'POT_5'
,
'POT_6'
,
'POT_7'
,
'POT_8'
,
'FLOAT_3_E1'
,
'FLOAT_4_E1'
,
'FLOAT_4_E2'
,
'FLOAT_5_E1'
,
'FLOAT_5_E2'
,
'FLOAT_5_E3'
,
'FLOAT_6_E1'
,
'FLOAT_6_E2'
,
'FLOAT_6_E3'
,
'FLOAT_6_E4'
,
'FLOAT_7_E1'
,
'FLOAT_7_E2'
,
'FLOAT_7_E3'
,
'FLOAT_7_E4'
,
'FLOAT_7_E5'
,
'FLOAT_8_E1'
,
'FLOAT_8_E2'
,
'FLOAT_8_E3'
,
'FLOAT_8_E4'
,
'FLOAT_8_E5'
,
'FLOAT_8_E6'
]
\ No newline at end of file
mzh/new_mzh/Loss_Trajectory_MIA/main.py
View file @
cba2e1d6
...
...
@@ -44,6 +44,12 @@ if __name__ == '__main__':
parser
.
add_argument
(
'--model_distill'
,
type
=
str
,
default
=
'resnet18'
,
help
=
[
'resnet18'
,
'resnet50'
,
'resnet152'
,
'mobilenetv2'
])
parser
.
add_argument
(
'--epochs_distill'
,
type
=
int
,
default
=
100
)
parser
.
add_argument
(
'--mia_type'
,
type
=
str
,
help
=
[
'build-dataset'
,
'black-box'
])
parser
.
add_argument
(
'--load_attack'
,
action
=
'store_true'
,
help
=
'load a trained attack model'
)
parser
.
add_argument
(
'--store_ptq'
,
action
=
'store_true'
,
help
=
'store a ptq model'
)
parser
.
add_argument
(
'--quant_type'
,
type
=
str
,
choices
=
[
'INT'
,
'POT'
,
'FLOAT'
],
default
=
None
,
help
=
'choose a ptq mode for target model'
)
parser
.
add_argument
(
"--num_bits"
,
type
=
int
,
default
=
0
)
parser
.
add_argument
(
"--e_bits"
,
type
=
int
,
default
=
0
)
parser
.
add_argument
(
'--load_ptq'
,
action
=
'store_true'
,
help
=
'load a ptq target model'
)
args
=
parser
.
parse_args
()
utils
.
set_random_seeds
(
args
.
seed
)
...
...
mzh/new_mzh/Loss_Trajectory_MIA/normal.py
View file @
cba2e1d6
...
...
@@ -6,26 +6,184 @@ import numpy as np
import
pickle
import
utils
from
architectures
import
*
import
os.path
as
osp
import
dataset
import
gol
import
openpyxl
from
global_var
import
GlobalVariables
def
direct_quantize
(
args
,
model
,
data
,
device
=
'cpu'
):
print
(
"====Begin Direct Quantize!===="
)
if
not
hasattr
(
model
,
'augment_training'
)
or
model
.
augment_training
:
if
args
.
mode
==
'distill_target'
:
print
(
'load aug_target_dataset ... '
)
train_loader
=
data
.
aug_target_train_loader
elif
args
.
mode
==
'distill_shadow'
:
print
(
'load aug_shadow_dataset ...'
)
train_loader
=
data
.
aug_shadow_train_loader
else
:
if
args
.
mode
==
'distill_target'
:
print
(
'load target_dataset ... '
)
train_loader
=
data
.
target_train_loader
elif
args
.
mode
==
'distill_shadow'
:
print
(
'load shadow_dataset ...'
)
train_loader
=
data
.
shadow_train_loader
model
.
eval
()
count
=
0
for
x
,
y
,
idx
in
train_loader
:
# cnn_training_step(model, optimizer, x, y, device)
b_x
=
x
.
to
(
device
)
b_y
=
y
.
to
(
device
)
output
=
model
.
quantize_forward
(
b_x
)
count
+=
1
# TODO 可能对不同数据集需要修改
if
count
%
500
==
0
:
break
print
(
'direct quantization finish'
)
def
full_inference
(
args
,
model
,
data
,
device
=
'cpu'
):
print
(
f
'model.augument_training:{model.augment_training}'
)
if
not
hasattr
(
model
,
'augment_training'
)
or
model
.
augment_training
:
if
args
.
mode
==
'distill_target'
:
print
(
'load aug_target_dataset ... '
)
test_loader
=
data
.
aug_target_test_loader
elif
args
.
mode
==
'distill_shadow'
:
print
(
'load aug_shadow_dataset ...'
)
test_loader
=
data
.
aug_shadow_test_loader
else
:
if
args
.
mode
==
'distill_target'
:
print
(
'load target_dataset ... '
)
test_loader
=
data
.
target_test_loader
elif
args
.
mode
==
'distill_shadow'
:
print
(
'load shadow_dataset ...'
)
test_loader
=
data
.
shadow_test_loader
model
.
eval
()
top1
=
dataset
.
AverageMeter
()
top5
=
dataset
.
AverageMeter
()
with
torch
.
no_grad
():
for
batch
in
test_loader
:
b_x
=
batch
[
0
]
.
to
(
device
)
b_y
=
batch
[
1
]
.
to
(
device
)
output
=
model
(
b_x
)
prec1
,
prec5
=
dataset
.
accuracy
(
output
,
b_y
,
topk
=
(
1
,
5
))
top1
.
update
(
prec1
[
0
],
b_x
.
size
(
0
))
top5
.
update
(
prec5
[
0
],
b_x
.
size
(
0
))
top1_acc
=
top1
.
avg
.
data
.
cpu
()
.
numpy
()[()]
top5_acc
=
top5
.
avg
.
data
.
cpu
()
.
numpy
()[()]
print
(
'
\n
Test set: Full Model Accuracy: {:.2f}
%
'
.
format
(
top1_acc
))
return
top1_acc
,
top5_acc
def
quantize_inference
(
args
,
model
,
data
,
device
=
'cpu'
):
if
not
hasattr
(
model
,
'augment_training'
)
or
model
.
augment_training
:
if
args
.
mode
==
'distill_target'
:
print
(
'load aug_target_dataset ... '
)
test_loader
=
data
.
aug_target_test_loader
elif
args
.
mode
==
'distill_shadow'
:
print
(
'load aug_shadow_dataset ...'
)
test_loader
=
data
.
aug_shadow_test_loader
else
:
if
args
.
mode
==
'distill_target'
:
print
(
'load target_dataset ... '
)
test_loader
=
data
.
target_test_loader
elif
args
.
mode
==
'distill_shadow'
:
print
(
'load shadow_dataset ...'
)
test_loader
=
data
.
shadow_test_loader
model
.
eval
()
top1
=
dataset
.
AverageMeter
()
top5
=
dataset
.
AverageMeter
()
with
torch
.
no_grad
():
for
batch
in
test_loader
:
b_x
=
batch
[
0
]
.
to
(
device
)
b_y
=
batch
[
1
]
.
to
(
device
)
output
=
model
.
quantize_inference
(
b_x
)
prec1
,
prec5
=
dataset
.
accuracy
(
output
,
b_y
,
topk
=
(
1
,
5
))
top1
.
update
(
prec1
[
0
],
b_x
.
size
(
0
))
top5
.
update
(
prec5
[
0
],
b_x
.
size
(
0
))
top1_acc
=
top1
.
avg
.
data
.
cpu
()
.
numpy
()[()]
top5_acc
=
top5
.
avg
.
data
.
cpu
()
.
numpy
()[()]
print
(
'
\n
Test set: PTQ Model Accuracy: {:.2f}
%
'
.
format
(
top1_acc
))
return
top1_acc
,
top5_acc
# def direct_quantize(model, test_loader,device):
# for i, (data, target) in enumerate(test_loader, 1):
# data = data.to(device)
# output = model.quantize_forward(data).cpu()
# if i % 500 == 0:
# break
# print('direct quantization finish')
# def full_inference(model, test_loader, device):
# correct = 0
# for i, (data, target) in enumerate(test_loader, 1):
# data = data.to(device)
# output = model(data).cpu()
# pred = output.argmax(dim=1, keepdim=True)
# # print(pred)
# correct += pred.eq(target.view_as(pred)).sum().item()
# print('\nTest set: Full Model Accuracy: {:.2f}%'.format(100. * correct / len(test_loader.dataset)))
# return 100. * correct / len(test_loader.dataset)
# def quantize_inference(model, test_loader, device):
# correct = 0
# for i, (data, target) in enumerate(test_loader, 1):
# data = data.to(device)
# output = model.quantize_inference(data).cpu()
# pred = output.argmax(dim=1, keepdim=True)
# correct += pred.eq(target.view_as(pred)).sum().item()
# print('Test set: Quant Model Accuracy: {:.2f}%'.format(100. * correct / len(test_loader.dataset)))
# return 100. * correct / len(test_loader.dataset)
# 对 是否distill,有不同的train方法
# 之前已经创建了model并把params config一起存储到了相应路径,此处先把model和params config load出来再trian
# model_path_tar, model_path_dis = 'networks/{}/{}'.format(args.seed, args.mode)
# untrained_model_tar, untrained_model_dis => model_name = '{}_mobilenetv2'.format(args.data)...
def
train
(
args
,
model_path_tar
,
untrained_model_tar
,
model_path_dis
=
None
,
untrained_model_dis
=
None
,
device
=
'cpu'
):
print
(
'Training models...'
)
# 蒸馏训练
# load_model返回的是model
if
'distill'
in
args
.
mode
:
# load进来还没train的target model (create的时候就会save untrained)
# load进来还没train(epoch=0)的distill model (create的时候就会save untrained)
# 把name传进来便于load
trained_model
,
model_params
=
load_model
(
args
,
model_path_dis
,
untrained_model_dis
,
epoch
=
0
)
# load进来已经训好的target model
# TODO load进来一个trained Target Model (args.epochs已经是最后一个训练epoch)
trained_model_tar
,
model_params_tar
=
load_model
(
args
,
model_path_tar
,
untrained_model_tar
,
epoch
=
args
.
epochs
)
trained_model_tar
.
to
(
device
)
# 正常训练
else
:
# load进来还没train的target model (create的时候就会save untrained)
trained_model
,
model_params
=
load_model
(
args
,
model_path_tar
,
untrained_model_tar
,
epoch
=
0
)
print
(
model_params
)
# 获得以划分好的且做了数据增强dataset
trained_model
.
to
(
device
)
# 获得以划分好的且做了数据增强dataset distill model dataset
dataset
=
utils
.
get_dataset
(
model_params
[
'task'
],
args
.
mode
,
aug
=
True
)
if
args
.
mode
==
'distill_target'
:
dataset_t
=
utils
.
get_dataset
(
model_params
[
'task'
],
mode
=
'target'
,
aug
=
True
)
elif
args
.
mode
==
'distill_shadow'
:
dataset_t
=
utils
.
get_dataset
(
model_params
[
'task'
],
mode
=
'shadow'
,
aug
=
True
)
# 做一系列超参数设置
# TODO 调整学习策略 看看改进后的效果
learning_rate
=
model_params
[
'learning_rate'
]
...
...
@@ -36,16 +194,109 @@ def train(args, model_path_tar, untrained_model_tar, model_path_dis = None, untr
optimization_params
=
(
learning_rate
,
weight_decay
,
momentum
)
optimizer
,
scheduler
=
utils
.
get_full_optimizer
(
trained_model
,
optimization_params
,
args
)
# model_name : dataset + model的形式
if
'distill'
in
args
.
mode
:
trained_model_name
=
untrained_model_dis
else
:
trained_model_name
=
untrained_model_tar
print
(
'Training: {}...'
.
format
(
trained_model_name
))
trained_model
.
to
(
device
)
# TODO 增加量化
if
args
.
quant_type
is
not
None
:
print
(
'Training: {}... with PTQ Target Model'
.
format
(
trained_model_name
))
else
:
print
(
'Training: {}... with Target Model'
.
format
(
trained_model_name
))
# 具体训练
# metric中可以不断append,记录每一步的具体数据
gol
.
_init
()
# TODO
if
args
.
quant_type
is
not
None
:
top1
,
top5
=
full_inference
(
args
,
trained_model_tar
,
dataset_t
,
device
)
# 对trained_model_tar进行PTQ量化
if
args
.
quant_type
is
not
None
and
'distill'
in
args
.
mode
:
if
args
.
quant_type
!=
'INT'
:
bias_list
=
utils
.
build_bias_list
(
args
.
quant_type
)
gol
.
set_value
(
bias_list
,
is_bias
=
True
)
if
args
.
quant_type
==
'FLOAT'
:
title
=
'
%
s_
%
d_E
%
d'
%
(
args
.
quant_type
,
args
.
num_bits
,
args
.
e_bits
)
else
:
title
=
'
%
s_
%
d'
%
(
args
.
quant_type
,
args
.
num_bits
)
print
(
'
\n
PTQ: '
+
title
)
# ptq target model存储路径
ptq_file_prefix
=
model_path_tar
+
'/'
+
f
'{args.data}_{args.model}/'
# 设置量化表
if
args
.
quant_type
!=
'INT'
:
plist
=
utils
.
build_list
(
args
.
quant_type
,
args
.
num_bits
,
args
.
e_bits
)
gol
.
set_value
(
plist
)
# 判断是否需要载入
if
args
.
load_ptq
is
True
and
osp
.
exists
(
ptq_file_prefix
+
title
+
'.pt'
):
trained_model_tar
.
quantize
(
args
.
quant_type
,
args
.
num_bits
,
args
.
e_bits
)
trained_model_tar
.
load_state_dict
(
torch
.
load
(
ptq_file_prefix
+
title
+
'.pt'
))
trained_model_tar
.
to
(
device
)
print
(
'Successfully load ptq model: '
+
title
)
else
:
trained_model_tar
.
quantize
(
args
.
quant_type
,
args
.
num_bits
,
args
.
e_bits
)
# TODO
trained_model_tar
.
eval
()
direct_quantize
(
args
,
trained_model_tar
,
dataset_t
,
device
)
if
args
.
store_ptq
==
True
:
torch
.
save
(
trained_model_tar
.
state_dict
(),
ptq_file_prefix
+
title
+
'.pt'
)
trained_model_tar
.
freeze
()
# TODO
ptq_top1
,
ptq_top5
=
quantize_inference
(
args
,
trained_model_tar
,
dataset_t
,
device
)
acc_loss
=
(
top1
-
ptq_top1
)
/
top1
print
(
f
"Target Model Quantization Finished, Acc Loss:{acc_loss}"
)
filename
=
f
'{args.model}_result.xlsx'
try
:
# 如果文件已经存在,则加载它
workbook
=
openpyxl
.
load_workbook
(
filename
)
except
FileNotFoundError
:
# 如果文件不存在,则创建一个新的Excel工作簿
workbook
=
openpyxl
.
Workbook
()
if
args
.
data
not
in
workbook
.
sheetnames
:
# 如果工作表不存在,则创建一个新的工作表
worksheet
=
workbook
.
create_sheet
(
title
=
args
.
data
)
# 在工作表中写入表头
worksheet
.
cell
(
row
=
1
,
column
=
1
,
value
=
'FP32-acc'
)
worksheet
.
cell
(
row
=
1
,
column
=
2
,
value
=
top1
)
worksheet
.
cell
(
row
=
3
,
column
=
1
,
value
=
'title'
)
worksheet
.
cell
(
row
=
3
,
column
=
2
,
value
=
'js_div'
)
worksheet
.
cell
(
row
=
3
,
column
=
4
,
value
=
'ptq_acc'
)
worksheet
.
cell
(
row
=
3
,
column
=
5
,
value
=
'acc_loss'
)
worksheet
.
cell
(
row
=
3
,
column
=
6
,
value
=
'AUC'
)
worksheet
.
cell
(
row
=
3
,
column
=
7
,
value
=
'ACC'
)
else
:
worksheet
=
workbook
[
args
.
data
]
worksheet
.
cell
(
row
=
1
,
column
=
2
,
value
=
top1
)
idx
=
GlobalVariables
.
title_list
.
index
(
title
)
idx
+=
4
worksheet
.
cell
(
row
=
idx
,
column
=
1
,
value
=
title
)
worksheet
.
cell
(
row
=
idx
,
column
=
4
,
value
=
ptq_top1
)
worksheet
.
cell
(
row
=
idx
,
column
=
5
,
value
=
acc_loss
)
workbook
.
save
(
filename
)
# 前面为了load untrained distill model,所以未改distill_target model的路径model_path_dis
if
'distill'
in
args
.
mode
:
metrics
=
trained_model
.
train_func
(
args
,
trained_model_tar
,
trained_model
,
dataset
,
num_epochs
,
optimizer
,
scheduler
,
model_params
,
model_path_dis
,
trained_model_name
,
device
=
device
)
else
:
...
...
@@ -62,7 +313,14 @@ def train(args, model_path_tar, untrained_model_tar, model_path_dis = None, untr
model_params
[
'total_time'
]
=
total_training_time
print
(
'Training took {} seconds...'
.
format
(
total_training_time
))
# 存储训练后的模型权值参数和model_params
# 存储训练后的模型权值参数和model_params (区分了用到的target model是否量化/什么量化)
# model_path_dis = 'networks/{}/{}'.format(args.seed, args.mode)
trained_model_name_ptq
=
trained_model_name
+
'_'
+
title
# 在networks/{}/{}'.format(args.seed, args.mode)/trained_model_name + title 下存储
if
args
.
quant_type
is
not
None
and
'distill'
in
args
.
mode
:
save_model
(
trained_model
,
model_params
,
model_path_dis
,
trained_model_name_ptq
,
epoch
=
num_epochs
)
else
:
if
'distill'
in
args
.
mode
:
save_model
(
trained_model
,
model_params
,
model_path_dis
,
trained_model_name
,
epoch
=
num_epochs
)
else
:
...
...
@@ -107,6 +365,9 @@ def train_models(args, model_path_tar, model_path_dis, device='cpu'):
cnn_dis
=
create_mobilenetv2
(
model_path_dis
,
args
)
# load untrained model和model_params,开始训练
# model_path_tar,model_path_dis = 'networks/{}/{}'.format(args.seed, args.mode)
# cnn_tar, cnn_dis => model_name = '{}_mobilenetv2'.format(args.data)...
train
(
args
,
model_path_tar
,
cnn_tar
,
model_path_dis
,
cnn_dis
,
device
=
device
)
else
:
train
(
args
,
model_path_tar
,
cnn_tar
,
device
=
device
)
...
...
@@ -189,19 +450,6 @@ def create_vgg16bn(model_path, args):
return
model_name
# def create_mobile(model_path, args):
# print('Creating MobileNet untrained {} models...'.format(args.data))
# model_params = get_data_params(args.data)
# model_name = '{}_mobilenet'.format(args.data)
# model_params['network_type'] = 'mobilenet'
# model_params['cfg'] = [64, (128,2), 128, (256,2), 256, (512,2), 512, 512, 512, 512, 512, (1024,2), 1024]
# model_params['augment_training'] = True
# model_params['init_weights'] = True
# get_lr_params(model_params, args)
# model_name = save_networks(args, model_name, model_params, model_path)
# return model_name
def
create_resnet56
(
models_path
,
args
):
print
(
'Creating resnet56 untrained {} models...'
.
format
(
args
.
data
))
...
...
@@ -310,7 +558,7 @@ def create_wideresnet32_4(models_path, args):
return
model_name
# 实例化model,并调用save_model存储
# 实例化model,并调用save_model存储
,只在create model的时候用到
def
save_networks
(
args
,
model_name
,
model_params
,
model_path
):
print
(
'Saving CNN...'
)
model_params
[
'base_model'
]
=
model_name
...
...
@@ -343,6 +591,10 @@ def save_networks(args, model_name, model_params, model_path):
def
save_model
(
model
,
model_params
,
model_path
,
model_name
,
epoch
=-
1
):
if
not
os
.
path
.
exists
(
model_path
):
os
.
makedirs
(
model_path
)
# model_name就是 数据集_网络架构
# 'networks/{}/{}'.format(args.seed, args.mode) + model_name
# 若是ptq的,加了title,否则,只是args.data + args.model
network_path
=
model_path
+
'/'
+
model_name
if
not
os
.
path
.
exists
(
network_path
):
os
.
makedirs
(
network_path
)
...
...
mzh/new_mzh/Loss_Trajectory_MIA/readme.md
View file @
cba2e1d6
## Loss Trajectory MIA
#### Update 2023.6.1
1
\.
增加的内容:
-
使用量化后的Target Model训练Distill Target Model (简称为Q-Disitll)
-
通过Q-Distill的loss trajectory构建Attack Model的测试集
-
使用Attack Model攻击量化后的Target Model
-
使用JS散度衡量量化Target Model前后得到的Distill Target Model的Loss Trajectory数据的相似度
2
\.
思路
-
攻击场景假设:
首先继承了之前叙述的攻击场景(假设攻击者知道Target Model的结构,且对数据集有一定了解,因此将Shadow Model,Distill Target Model,Distill Shadow Model都采用了于Target Model相同的结构,将CIFAR10数据集切割成多个不相交的部分,分别作为Target Model,Shadow Model,Distill Target Model,Distill Shadow Model的训练、测试集);
还假设了攻击者不知道或不在意Target Model是否为量化模型,仍然采用FP32的Shadow Model,Distill Target Model,Distill Shadow Model.
保持原有的Attack Model,攻击量化后的Target Model,通过Acc和Auc指标的变化来检验模型迁移的隐私安全性。
-
实验设计
由攻击场景假设,我们需要改动的部分集中在了Distill Target Model的训练,Attack Model的测试集数据构建,Attack Model的测试这三方面。
Distill Target Model在训练时,学习的Target Model的output由原来的FP32模型的输出改为量化后的Target Model的输出,对量化后的Target Model进行蒸馏,把Distill Target Model重新训练。
由于攻击者的Shadow Model和Distill Shadow Model仍然采用FP32的模型,因此不需要重新训练,相应的,Attack Model是通过Distill Shadow Model的Loss Trajectory构建的训练集训练的,Attack Model也不需要重新训练。
分别将经过各种量化后的Distill Target Model的Loss Trajectory构造成Attack Model的测试数据集,并对Attack Model进行测试,记录Best Acc和相应的Auc.
计算量化前后得到的Loss Trajectory的JS散度,作为相似度衡量依据。
-
预期结果
由于Attack Model的训练是由Shadow Model和Distill Shadow Model决定的,他们都是FP32的模型,因此量化后的Target Model与FP32的Target Model越相似,则Distill Model的Loss Trajectory越接近,Attack Model的攻击效果应该会越好,迁移后的隐私安全性越差。
3
\.
实验结果
在ResNet18 + CIFAR10上进行了实验,得到的数据在
**resnet18_result.xlsx**
中。以下为拟合曲线,经过尝试,仍是分子分母次数为2的有理数函数效果最好。
acc_loss - js
<img
src =
"fig/acc_loss_curve.png"
class=
"h-90 auto"
>
auc_loss - js
<img
src =
"fig/auc_loss_curve.png"
class=
"h-90 auto"
>
在js距离较小(loss trajectory较为相似)时,acc_loss和auc_loss相对波动较大,说明对于量化后的Target Model,即使与FP32的Target Model较为相似,也可能具有隐私安全性提升。
4
\.
问题
Q1: 运行速度慢。对每一种量化,都需要经过Target Model量化,训练一个相应的Distill Target Model,构造Attack Model测试集,测试Attack Model,流程很长。
A1: 目前准备减少一些比较慢的测试点,主要是FLOAT系列的,包括FLOAT_6_E2, FLOAT_7_E2, FLOAT_7_E3, FLOAT8_E2, FLOAT_8_E3, FLOAT_8_E6. (主要是FLOAT的量化的比较慢,实际上distill model在Resnet50+CIFAR10的情况下每个epoch的训练用时小于1min,distill model的训练耗时在使用CIFAR10,100时不是非常大,如果使用CINIC-10,则distill model的训练耗时也变得过于大了)
POT_7, POT_8也可以考虑去掉。上述计划去掉的数据点都是信息比较重复,对曲线趋势影响小,而且量化耗时比较长的。
Q2:对各个模型、数据集实验的流程较长。
**注**
:.sh训练脚本集中放到了sh文件夹中,其中
``train_attack_18_10_ptq_FP_S1, train_attack_18_10_ptq_FP_S2,train_attack_18_10_ptq_FP_S3,train_attack_18_10_ptq_FP_S4,train_attack_18_10_ptq_INT_S1,train_attack_18_10_ptq_INT_S2,train_attack_18_10_ptq_POT,train_div.sh ``
等是本次更新用到的。
<br><br>
#### Update 2023.5.28
1
\.
思路
...
...
@@ -12,7 +79,7 @@
但传统方法无法区分那些虽然不是训练集的数据,但模型loss仍然很小的输入。文章作者发现了在模型的训练过程中,对于这类小loss数据,在训练集或不在训练集会在训练过程的loss收敛速度和进程方面有差异。如图所示,他们有着不同的Loss轨迹(Loss Trajectory)
!
[
p1
](
fig\p1.png
)
<img
src =
"fig/p1.png"
class=
"h-90 auto"
>
因为小loss数据如果不在训练集中,一般是比较简单的图片,其训练loss会下降的很快,因此其loss轨迹在训练中期是低于在训练集中的数据的。通过捕获Loss Trajectory的区别,可以实现效果更好的MIA.
...
...
@@ -773,8 +840,3 @@ Q2:如何进行预测?
A2:可以考虑将loss trajectory作为property,去计算in,out数据(member_status可以作为in, out的标记)的loss trajectory的相似度然后预测? 可以用Distill Target Model的Loss Trajectory与Attack Acc(AUC...)之类的配对构成数据点(也可以增加Distill Shadow Model的作为补充)。
预计的效果是相似度越低,则攻击成功率越高
mzh/new_mzh/Loss_Trajectory_MIA/resnet18_result.xlsx
0 → 100644
View file @
cba2e1d6
File added
mzh/new_mzh/Loss_Trajectory_MIA/sh/train_attack_152_10.sh
0 → 100644
View file @
cba2e1d6
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J Tra_152 # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 0-12:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo
"Job start at
$(
date
"+%Y-%m-%d %H:%M:%S"
)
"
echo
"Job run at:"
echo
"
$(
hostnamectl
)
"
#- Load environments
source
/tools/module_env.sh
source
~/pyt1.5/bin/activate
module list
# list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo
$(
module list
)
# list modules loaded
echo
$(
which gcc
)
echo
$(
which python
)
echo
$(
which python3
)
cluster-quota
# nas quota
nvidia-smi
--format
=
csv
--query-gpu
=
name,driver_version,power.limit
# gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo
"Use GPU
${
CUDA_VISIBLE_DEVICES
}
"
# which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
echo
"python main.py --mode target --model resnet152 --data cifar10"
python main.py
--mode
target
--model
resnet152
--data
cifar10
echo
"python main.py --mode shadow --model resnet152 --data cifar10"
python main.py
--mode
shadow
--model
resnet152
--data
cifar10
echo
"python main.py --mode distill_target --model resnet152 --data cifar10"
python main.py
--mode
distill_target
--model
resnet152
--data
cifar10
echo
"python main.py --mode distill_shadow --model resnet152 --data cifar10"
python main.py
--mode
distill_shadow
--model
resnet152
--data
cifar10
echo
"python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet152 --model_distill resnet152 --data cifar10"
python main.py
--action
1
--mode
shadow
--mia_type
build-dataset
--model
resnet152
--model_distill
resnet152
--data
cifar10
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet152 --model_distill resnet152 --data cifar10"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet152
--model_distill
resnet152
--data
cifar10
echo
"python main.py --action 1 --mia_type black-box --model resnet152 --model_distill resnet152 --data cifar10"
python main.py
--action
1
--mia_type
black-box
--model
resnet152
--model_distill
resnet152
--data
cifar10
#- End
echo
"Job end at
$(
date
"+%Y-%m-%d %H:%M:%S"
)
"
mzh/new_mzh/Loss_Trajectory_MIA/sh/train_attack_152_100.sh
0 → 100644
View file @
cba2e1d6
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J Tra_152_10 # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 0-12:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo
"Job start at
$(
date
"+%Y-%m-%d %H:%M:%S"
)
"
echo
"Job run at:"
echo
"
$(
hostnamectl
)
"
#- Load environments
source
/tools/module_env.sh
source
~/pyt1.5/bin/activate
module list
# list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo
$(
module list
)
# list modules loaded
echo
$(
which gcc
)
echo
$(
which python
)
echo
$(
which python3
)
cluster-quota
# nas quota
nvidia-smi
--format
=
csv
--query-gpu
=
name,driver_version,power.limit
# gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo
"Use GPU
${
CUDA_VISIBLE_DEVICES
}
"
# which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
echo
"python main.py --mode target --model resnet152 --data cifar100"
python main.py
--mode
target
--model
resnet152
--data
cifar100
echo
"python main.py --mode shadow --model resnet152 --data cifar100"
python main.py
--mode
shadow
--model
resnet152
--data
cifar100
echo
"python main.py --mode distill_target --model resnet152 --data cifar100"
python main.py
--mode
distill_target
--model
resnet152
--data
cifar100
echo
"python main.py --mode distill_shadow --model resnet152 --data cifar100"
python main.py
--mode
distill_shadow
--model
resnet152
--data
cifar100
echo
"python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet152 --model_distill resnet152 --data cifar100"
python main.py
--action
1
--mode
shadow
--mia_type
build-dataset
--model
resnet152
--model_distill
resnet152
--data
cifar100
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet152 --model_distill resnet152 --data cifar100"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet152
--model_distill
resnet152
--data
cifar100
echo
"python main.py --action 1 --mia_type black-box --model resnet152 --model_distill resnet152 --data cifar100"
python main.py
--action
1
--mia_type
black-box
--model
resnet152
--model_distill
resnet152
--data
cifar100
#- End
echo
"Job end at
$(
date
"+%Y-%m-%d %H:%M:%S"
)
"
mzh/new_mzh/Loss_Trajectory_MIA/sh/train_attack_152_cinic.sh
0 → 100644
View file @
cba2e1d6
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J Tra_CIN_10 # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 3-00:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-long # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo
"Job start at
$(
date
"+%Y-%m-%d %H:%M:%S"
)
"
echo
"Job run at:"
echo
"
$(
hostnamectl
)
"
#- Load environments
source
/tools/module_env.sh
source
~/pyt1.5/bin/activate
module list
# list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo
$(
module list
)
# list modules loaded
echo
$(
which gcc
)
echo
$(
which python
)
echo
$(
which python3
)
cluster-quota
# nas quota
nvidia-smi
--format
=
csv
--query-gpu
=
name,driver_version,power.limit
# gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo
"Use GPU
${
CUDA_VISIBLE_DEVICES
}
"
# which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
echo
"python main.py --mode target --model resnet152 --data cinic10"
python main.py
--mode
target
--model
resnet152
--data
cinic10
echo
"python main.py --mode shadow --model resnet152 --data cinic10"
python main.py
--mode
shadow
--model
resnet152
--data
cinic10
echo
"python main.py --mode distill_target --model resnet152 --data cinic10"
python main.py
--mode
distill_target
--model
resnet152
--data
cinic10
echo
"python main.py --mode distill_shadow --model resnet152 --data cinic10"
python main.py
--mode
distill_shadow
--model
resnet152
--data
cinic10
echo
"python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet152 --model_distill resnet152 --data cinic10"
python main.py
--action
1
--mode
shadow
--mia_type
build-dataset
--model
resnet152
--model_distill
resnet152
--data
cinic10
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet152 --model_distill resnet152 --data cinic10"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet152
--model_distill
resnet152
--data
cinic10
echo
"python main.py --action 1 --mia_type black-box --model resnet152 --model_distill resnet152 --data cinic10"
python main.py
--action
1
--mia_type
black-box
--model
resnet152
--model_distill
resnet152
--data
cinic10
#- End
echo
"Job end at
$(
date
"+%Y-%m-%d %H:%M:%S"
)
"
mzh/new_mzh/Loss_Trajectory_MIA/sh/train_attack_18_10.sh
0 → 100644
View file @
cba2e1d6
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J Tra_10_18 # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 1-00:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo
"Job start at
$(
date
"+%Y-%m-%d %H:%M:%S"
)
"
echo
"Job run at:"
echo
"
$(
hostnamectl
)
"
#- Load environments
source
/tools/module_env.sh
source
~/pyt1.5/bin/activate
module list
# list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo
$(
module list
)
# list modules loaded
echo
$(
which gcc
)
echo
$(
which python
)
echo
$(
which python3
)
cluster-quota
# nas quota
nvidia-smi
--format
=
csv
--query-gpu
=
name,driver_version,power.limit
# gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo
"Use GPU
${
CUDA_VISIBLE_DEVICES
}
"
# which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
# echo "python main.py --mode target --model resnet18 --data cifar10"
# python main.py --mode target --model resnet18 --data cifar10
# echo "python main.py --mode shadow --model resnet18 --data cifar10"
# python main.py --mode shadow --model resnet18 --data cifar10
echo
"python main.py --mode distill_target --model resnet18 --data cifar10"
python main.py
--mode
distill_target
--model
resnet18
--data
cifar10
# echo "python main.py --mode distill_shadow --model resnet18 --data cifar10"
# python main.py --mode distill_shadow --model resnet18 --data cifar10
# echo "python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10"
# python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet18
--model_distill
resnet18
--data
cifar10
echo
"python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10"
python main.py
--action
1
--mia_type
black-box
--model
resnet18
--model_distill
resnet18
--data
cifar10
#- End
echo
"Job end at
$(
date
"+%Y-%m-%d %H:%M:%S"
)
"
mzh/new_mzh/Loss_Trajectory_MIA/sh/train_attack_18_100.sh
0 → 100644
View file @
cba2e1d6
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J Tra_C100_18 # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 1-00:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo
"Job start at
$(
date
"+%Y-%m-%d %H:%M:%S"
)
"
echo
"Job run at:"
echo
"
$(
hostnamectl
)
"
#- Load environments
source
/tools/module_env.sh
source
~/pyt1.5/bin/activate
module list
# list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo
$(
module list
)
# list modules loaded
echo
$(
which gcc
)
echo
$(
which python
)
echo
$(
which python3
)
cluster-quota
# nas quota
nvidia-smi
--format
=
csv
--query-gpu
=
name,driver_version,power.limit
# gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo
"Use GPU
${
CUDA_VISIBLE_DEVICES
}
"
# which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
# echo "python main.py --mode target --model resnet18 --data cifar100"
# python main.py --mode target --model resnet18 --data cifar100
# echo "python main.py --mode shadow --model resnet18 --data cifar100"
# python main.py --mode shadow --model resnet18 --data cifar100
# echo "python main.py --mode distill_target --model resnet18 --data cifar100"
# python main.py --mode distill_target --model resnet18 --data cifar100
# echo "python main.py --mode distill_shadow --model resnet18 --data cifar100"
# python main.py --mode distill_shadow --model resnet18 --data cifar100
# echo "python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar100"
# python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar100
# echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar100"
# python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar100
echo
"python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar100"
python main.py
--action
1
--mia_type
black-box
--model
resnet18
--model_distill
resnet18
--data
cifar100
#- End
echo
"Job end at
$(
date
"+%Y-%m-%d %H:%M:%S"
)
"
mzh/new_mzh/Loss_Trajectory_MIA/sh/train_attack_18_100_load.sh
0 → 100644
View file @
cba2e1d6
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J Tra_C100_18 # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 1-00:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo
"Job start at
$(
date
"+%Y-%m-%d %H:%M:%S"
)
"
echo
"Job run at:"
echo
"
$(
hostnamectl
)
"
#- Load environments
source
/tools/module_env.sh
source
~/pyt1.5/bin/activate
module list
# list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo
$(
module list
)
# list modules loaded
echo
$(
which gcc
)
echo
$(
which python
)
echo
$(
which python3
)
cluster-quota
# nas quota
nvidia-smi
--format
=
csv
--query-gpu
=
name,driver_version,power.limit
# gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo
"Use GPU
${
CUDA_VISIBLE_DEVICES
}
"
# which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
# echo "python main.py --mode target --model resnet18 --data cifar100"
# python main.py --mode target --model resnet18 --data cifar100
# echo "python main.py --mode shadow --model resnet18 --data cifar100"
# python main.py --mode shadow --model resnet18 --data cifar100
# echo "python main.py --mode distill_target --model resnet18 --data cifar100"
# python main.py --mode distill_target --model resnet18 --data cifar100
# echo "python main.py --mode distill_shadow --model resnet18 --data cifar100"
# python main.py --mode distill_shadow --model resnet18 --data cifar100
# echo "python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar100"
# python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar100
# echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar100"
# python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar100
echo
"python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar100 --load_attack"
python main.py
--action
1
--mia_type
black-box
--model
resnet18
--model_distill
resnet18
--data
cifar100
--load_attack
#- End
echo
"Job end at
$(
date
"+%Y-%m-%d %H:%M:%S"
)
"
mzh/new_mzh/Loss_Trajectory_MIA/sh/train_attack_18_10_load.sh
0 → 100644
View file @
cba2e1d6
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J L_Tra_10_18 # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 1-00:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo
"Job start at
$(
date
"+%Y-%m-%d %H:%M:%S"
)
"
echo
"Job run at:"
echo
"
$(
hostnamectl
)
"
#- Load environments
source
/tools/module_env.sh
source
~/pyt1.5/bin/activate
module list
# list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo
$(
module list
)
# list modules loaded
echo
$(
which gcc
)
echo
$(
which python
)
echo
$(
which python3
)
cluster-quota
# nas quota
nvidia-smi
--format
=
csv
--query-gpu
=
name,driver_version,power.limit
# gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo
"Use GPU
${
CUDA_VISIBLE_DEVICES
}
"
# which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
# echo "python main.py --mode target --model resnet18 --data cifar10"
# python main.py --mode target --model resnet18 --data cifar10
# echo "python main.py --mode shadow --model resnet18 --data cifar10"
# python main.py --mode shadow --model resnet18 --data cifar10
# echo "python main.py --mode distill_target --model resnet18 --data cifar10"
# python main.py --mode distill_target --model resnet18 --data cifar10
# echo "python main.py --mode distill_shadow --model resnet18 --data cifar10"
# python main.py --mode distill_shadow --model resnet18 --data cifar10
# echo "python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10"
# python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10
# echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10"
# python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10
echo
"python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack"
python main.py
--action
1
--mia_type
black-box
--model
resnet18
--model_distill
resnet18
--data
cifar10
--load_attack
#- End
echo
"Job end at
$(
date
"+%Y-%m-%d %H:%M:%S"
)
"
mzh/new_mzh/Loss_Trajectory_MIA/sh/train_attack_18_10_ptq_FP_S1.sh
0 → 100644
View file @
cba2e1d6
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J I_Tra_10_18 # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 1-06:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo
"Job start at
$(
date
"+%Y-%m-%d %H:%M:%S"
)
"
echo
"Job run at:"
echo
"
$(
hostnamectl
)
"
#- Load environments
source
/tools/module_env.sh
source
~/pyt1.5/bin/activate
module list
# list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo
$(
module list
)
# list modules loaded
echo
$(
which gcc
)
echo
$(
which python
)
echo
$(
which python3
)
cluster-quota
# nas quota
nvidia-smi
--format
=
csv
--query-gpu
=
name,driver_version,power.limit
# gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo
"Use GPU
${
CUDA_VISIBLE_DEVICES
}
"
# which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
# TRAIN DISTILL MODEL
echo
"python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 3 --e_bits 1"
python main.py
--mode
distill_target
--model
resnet18
--data
cifar10
--store_ptq
--quant_type
FLOAT
--num_bits
3
--e_bits
1
echo
"python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 4 --e_bits 1"
python main.py
--mode
distill_target
--model
resnet18
--data
cifar10
--store_ptq
--quant_type
FLOAT
--num_bits
4
--e_bits
1
echo
"python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 4 --e_bits 2"
python main.py
--mode
distill_target
--model
resnet18
--data
cifar10
--store_ptq
--quant_type
FLOAT
--num_bits
4
--e_bits
2
echo
"python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 5 --e_bits 1"
python main.py
--mode
distill_target
--model
resnet18
--data
cifar10
--store_ptq
--quant_type
FLOAT
--num_bits
5
--e_bits
1
echo
"python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 5 --e_bits 2"
python main.py
--mode
distill_target
--model
resnet18
--data
cifar10
--store_ptq
--quant_type
FLOAT
--num_bits
5
--e_bits
2
echo
"python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 5 --e_bits 3"
python main.py
--mode
distill_target
--model
resnet18
--data
cifar10
--store_ptq
--quant_type
FLOAT
--num_bits
5
--e_bits
3
# CONSTRUCT TEST DATASET
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type FLOAT --num_bits 3 --e_bits 1"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet18
--model_distill
resnet18
--data
cifar10
--quant_type
FLOAT
--num_bits
3
--e_bits
1
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type FLOAT --num_bits 4 --e_bits 1"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet18
--model_distill
resnet18
--data
cifar10
--quant_type
FLOAT
--num_bits
4
--e_bits
1
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type FLOAT --num_bits 4 --e_bits 2"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet18
--model_distill
resnet18
--data
cifar10
--quant_type
FLOAT
--num_bits
4
--e_bits
2
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type FLOAT --num_bits 5 --e_bits 1"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet18
--model_distill
resnet18
--data
cifar10
--quant_type
FLOAT
--num_bits
5
--e_bits
1
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type FLOAT --num_bits 5 --e_bits 2"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet18
--model_distill
resnet18
--data
cifar10
--quant_type
FLOAT
--num_bits
5
--e_bits
2
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type FLOAT --num_bits 5 --e_bits 3"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet18
--model_distill
resnet18
--data
cifar10
--quant_type
FLOAT
--num_bits
5
--e_bits
3
# ATTACK
echo
"python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type FLOAT --num_bits 3 --e_bits 1"
python main.py
--action
1
--mia_type
black-box
--model
resnet18
--model_distill
resnet18
--data
cifar10
--load_attack
--quant_type
FLOAT
--num_bits
3
--e_bits
1
echo
"python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type FLOAT --num_bits 4 --e_bits 1"
python main.py
--action
1
--mia_type
black-box
--model
resnet18
--model_distill
resnet18
--data
cifar10
--load_attack
--quant_type
FLOAT
--num_bits
4
--e_bits
1
echo
"python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type FLOAT --num_bits 4 --e_bits 2"
python main.py
--action
1
--mia_type
black-box
--model
resnet18
--model_distill
resnet18
--data
cifar10
--load_attack
--quant_type
FLOAT
--num_bits
4
--e_bits
2
echo
"python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type FLOAT --num_bits 5 --e_bits 1"
python main.py
--action
1
--mia_type
black-box
--model
resnet18
--model_distill
resnet18
--data
cifar10
--load_attack
--quant_type
FLOAT
--num_bits
5
--e_bits
1
echo
"python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type FLOAT --num_bits 5 --e_bits 2"
python main.py
--action
1
--mia_type
black-box
--model
resnet18
--model_distill
resnet18
--data
cifar10
--load_attack
--quant_type
FLOAT
--num_bits
5
--e_bits
2
echo
"python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type FLOAT --num_bits 5 --e_bits 3"
python main.py
--action
1
--mia_type
black-box
--model
resnet18
--model_distill
resnet18
--data
cifar10
--load_attack
--quant_type
FLOAT
--num_bits
5
--e_bits
3
#- End
echo
"Job end at
$(
date
"+%Y-%m-%d %H:%M:%S"
)
"
mzh/new_mzh/Loss_Trajectory_MIA/sh/train_attack_18_10_ptq_FP_S2.sh
0 → 100644
View file @
cba2e1d6
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J I_Tra_10_18 # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 1-06:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo
"Job start at
$(
date
"+%Y-%m-%d %H:%M:%S"
)
"
echo
"Job run at:"
echo
"
$(
hostnamectl
)
"
#- Load environments
source
/tools/module_env.sh
source
~/pyt1.5/bin/activate
module list
# list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo
$(
module list
)
# list modules loaded
echo
$(
which gcc
)
echo
$(
which python
)
echo
$(
which python3
)
cluster-quota
# nas quota
nvidia-smi
--format
=
csv
--query-gpu
=
name,driver_version,power.limit
# gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo
"Use GPU
${
CUDA_VISIBLE_DEVICES
}
"
# which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
# TRAIN DISTILL MODEL
echo
"python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 6 --e_bits 1"
python main.py
--mode
distill_target
--model
resnet18
--data
cifar10
--store_ptq
--quant_type
FLOAT
--num_bits
6
--e_bits
1
echo
"python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 6 --e_bits 2"
python main.py
--mode
distill_target
--model
resnet18
--data
cifar10
--store_ptq
--quant_type
FLOAT
--num_bits
6
--e_bits
2
echo
"python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 6 --e_bits 3"
python main.py
--mode
distill_target
--model
resnet18
--data
cifar10
--store_ptq
--quant_type
FLOAT
--num_bits
6
--e_bits
3
echo
"python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 6 --e_bits 4"
python main.py
--mode
distill_target
--model
resnet18
--data
cifar10
--store_ptq
--quant_type
FLOAT
--num_bits
6
--e_bits
4
# CONSTRUCT TEST DATASET
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type FLOAT --num_bits 6 --e_bits 1"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet18
--model_distill
resnet18
--data
cifar10
--quant_type
FLOAT
--num_bits
6
--e_bits
1
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type FLOAT --num_bits 6 --e_bits 2"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet18
--model_distill
resnet18
--data
cifar10
--quant_type
FLOAT
--num_bits
6
--e_bits
2
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type FLOAT --num_bits 6 --e_bits 3"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet18
--model_distill
resnet18
--data
cifar10
--quant_type
FLOAT
--num_bits
6
--e_bits
3
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type FLOAT --num_bits 6 --e_bits 4"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet18
--model_distill
resnet18
--data
cifar10
--quant_type
FLOAT
--num_bits
6
--e_bits
4
# ATTACK
echo
"python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type FLOAT --num_bits 6 --e_bits 1"
python main.py
--action
1
--mia_type
black-box
--model
resnet18
--model_distill
resnet18
--data
cifar10
--load_attack
--quant_type
FLOAT
--num_bits
6
--e_bits
1
echo
"python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type FLOAT --num_bits 6 --e_bits 2"
python main.py
--action
1
--mia_type
black-box
--model
resnet18
--model_distill
resnet18
--data
cifar10
--load_attack
--quant_type
FLOAT
--num_bits
6
--e_bits
2
echo
"python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type FLOAT --num_bits 6 --e_bits 3"
python main.py
--action
1
--mia_type
black-box
--model
resnet18
--model_distill
resnet18
--data
cifar10
--load_attack
--quant_type
FLOAT
--num_bits
6
--e_bits
3
echo
"python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type FLOAT --num_bits 6 --e_bits 4"
python main.py
--action
1
--mia_type
black-box
--model
resnet18
--model_distill
resnet18
--data
cifar10
--load_attack
--quant_type
FLOAT
--num_bits
6
--e_bits
4
#- End
echo
"Job end at
$(
date
"+%Y-%m-%d %H:%M:%S"
)
"
mzh/new_mzh/Loss_Trajectory_MIA/sh/train_attack_18_10_ptq_FP_S3.sh
0 → 100644
View file @
cba2e1d6
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J I_Tra_10_18 # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 1-06:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo
"Job start at
$(
date
"+%Y-%m-%d %H:%M:%S"
)
"
echo
"Job run at:"
echo
"
$(
hostnamectl
)
"
#- Load environments
source
/tools/module_env.sh
source
~/pyt1.5/bin/activate
module list
# list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo
$(
module list
)
# list modules loaded
echo
$(
which gcc
)
echo
$(
which python
)
echo
$(
which python3
)
cluster-quota
# nas quota
nvidia-smi
--format
=
csv
--query-gpu
=
name,driver_version,power.limit
# gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo
"Use GPU
${
CUDA_VISIBLE_DEVICES
}
"
# which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
# TRAIN DISTILL MODEL
echo
"python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 7 --e_bits 1"
python main.py
--mode
distill_target
--model
resnet18
--data
cifar10
--store_ptq
--quant_type
FLOAT
--num_bits
7
--e_bits
1
echo
"python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 7 --e_bits 2"
python main.py
--mode
distill_target
--model
resnet18
--data
cifar10
--store_ptq
--quant_type
FLOAT
--num_bits
7
--e_bits
2
echo
"python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 7 --e_bits 3"
python main.py
--mode
distill_target
--model
resnet18
--data
cifar10
--store_ptq
--quant_type
FLOAT
--num_bits
7
--e_bits
3
echo
"python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 7 --e_bits 4"
python main.py
--mode
distill_target
--model
resnet18
--data
cifar10
--store_ptq
--quant_type
FLOAT
--num_bits
7
--e_bits
4
echo
"python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 7 --e_bits 5"
python main.py
--mode
distill_target
--model
resnet18
--data
cifar10
--store_ptq
--quant_type
FLOAT
--num_bits
7
--e_bits
5
# CONSTRUCT TEST DATASET
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type FLOAT --num_bits 7 --e_bits 1"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet18
--model_distill
resnet18
--data
cifar10
--quant_type
FLOAT
--num_bits
7
--e_bits
1
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type FLOAT --num_bits 7 --e_bits 2"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet18
--model_distill
resnet18
--data
cifar10
--quant_type
FLOAT
--num_bits
7
--e_bits
2
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type FLOAT --num_bits 7 --e_bits 3"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet18
--model_distill
resnet18
--data
cifar10
--quant_type
FLOAT
--num_bits
7
--e_bits
3
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type FLOAT --num_bits 7 --e_bits 4"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet18
--model_distill
resnet18
--data
cifar10
--quant_type
FLOAT
--num_bits
7
--e_bits
4
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type FLOAT --num_bits 7 --e_bits 5"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet18
--model_distill
resnet18
--data
cifar10
--quant_type
FLOAT
--num_bits
7
--e_bits
5
# ATTACK
echo
"python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type FLOAT --num_bits 7 --e_bits 1"
python main.py
--action
1
--mia_type
black-box
--model
resnet18
--model_distill
resnet18
--data
cifar10
--load_attack
--quant_type
FLOAT
--num_bits
7
--e_bits
1
echo
"python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type FLOAT --num_bits 7 --e_bits 2"
python main.py
--action
1
--mia_type
black-box
--model
resnet18
--model_distill
resnet18
--data
cifar10
--load_attack
--quant_type
FLOAT
--num_bits
7
--e_bits
2
echo
"python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type FLOAT --num_bits 7 --e_bits 3"
python main.py
--action
1
--mia_type
black-box
--model
resnet18
--model_distill
resnet18
--data
cifar10
--load_attack
--quant_type
FLOAT
--num_bits
7
--e_bits
3
echo
"python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type FLOAT --num_bits 7 --e_bits 4"
python main.py
--action
1
--mia_type
black-box
--model
resnet18
--model_distill
resnet18
--data
cifar10
--load_attack
--quant_type
FLOAT
--num_bits
7
--e_bits
4
echo
"python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type FLOAT --num_bits 7 --e_bits 5"
python main.py
--action
1
--mia_type
black-box
--model
resnet18
--model_distill
resnet18
--data
cifar10
--load_attack
--quant_type
FLOAT
--num_bits
7
--e_bits
5
#- End
echo
"Job end at
$(
date
"+%Y-%m-%d %H:%M:%S"
)
"
mzh/new_mzh/Loss_Trajectory_MIA/sh/train_attack_18_10_ptq_FP_S4.sh
0 → 100644
View file @
cba2e1d6
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J I_Tra_10_18 # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 1-06:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo
"Job start at
$(
date
"+%Y-%m-%d %H:%M:%S"
)
"
echo
"Job run at:"
echo
"
$(
hostnamectl
)
"
#- Load environments
source
/tools/module_env.sh
source
~/pyt1.5/bin/activate
module list
# list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo
$(
module list
)
# list modules loaded
echo
$(
which gcc
)
echo
$(
which python
)
echo
$(
which python3
)
cluster-quota
# nas quota
nvidia-smi
--format
=
csv
--query-gpu
=
name,driver_version,power.limit
# gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo
"Use GPU
${
CUDA_VISIBLE_DEVICES
}
"
# which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
# TRAIN DISTILL MODEL
echo
"python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 8 --e_bits 1"
python main.py
--mode
distill_target
--model
resnet18
--data
cifar10
--store_ptq
--quant_type
FLOAT
--num_bits
8
--e_bits
1
echo
"python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 8 --e_bits 2"
python main.py
--mode
distill_target
--model
resnet18
--data
cifar10
--store_ptq
--quant_type
FLOAT
--num_bits
8
--e_bits
2
echo
"python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 8 --e_bits 3"
python main.py
--mode
distill_target
--model
resnet18
--data
cifar10
--store_ptq
--quant_type
FLOAT
--num_bits
8
--e_bits
3
echo
"python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 8 --e_bits 4"
python main.py
--mode
distill_target
--model
resnet18
--data
cifar10
--store_ptq
--quant_type
FLOAT
--num_bits
8
--e_bits
4
echo
"python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 8 --e_bits 5"
python main.py
--mode
distill_target
--model
resnet18
--data
cifar10
--store_ptq
--quant_type
FLOAT
--num_bits
8
--e_bits
5
echo
"python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 8 --e_bits 6"
python main.py
--mode
distill_target
--model
resnet18
--data
cifar10
--store_ptq
--quant_type
FLOAT
--num_bits
8
--e_bits
6
# CONSTRUCT TEST DATASET
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type FLOAT --num_bits 8 --e_bits 1"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet18
--model_distill
resnet18
--data
cifar10
--quant_type
FLOAT
--num_bits
8
--e_bits
1
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type FLOAT --num_bits 8 --e_bits 2"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet18
--model_distill
resnet18
--data
cifar10
--quant_type
FLOAT
--num_bits
8
--e_bits
2
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type FLOAT --num_bits 8 --e_bits 3"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet18
--model_distill
resnet18
--data
cifar10
--quant_type
FLOAT
--num_bits
8
--e_bits
3
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type FLOAT --num_bits 8 --e_bits 4"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet18
--model_distill
resnet18
--data
cifar10
--quant_type
FLOAT
--num_bits
8
--e_bits
4
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type FLOAT --num_bits 8 --e_bits 5"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet18
--model_distill
resnet18
--data
cifar10
--quant_type
FLOAT
--num_bits
8
--e_bits
5
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type FLOAT --num_bits 8 --e_bits 6"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet18
--model_distill
resnet18
--data
cifar10
--quant_type
FLOAT
--num_bits
8
--e_bits
6
# ATTACK
echo
"python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type FLOAT --num_bits 8 --e_bits 1"
python main.py
--action
1
--mia_type
black-box
--model
resnet18
--model_distill
resnet18
--data
cifar10
--load_attack
--quant_type
FLOAT
--num_bits
8
--e_bits
1
echo
"python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type FLOAT --num_bits 8 --e_bits 2"
python main.py
--action
1
--mia_type
black-box
--model
resnet18
--model_distill
resnet18
--data
cifar10
--load_attack
--quant_type
FLOAT
--num_bits
8
--e_bits
2
echo
"python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type FLOAT --num_bits 8 --e_bits 3"
python main.py
--action
1
--mia_type
black-box
--model
resnet18
--model_distill
resnet18
--data
cifar10
--load_attack
--quant_type
FLOAT
--num_bits
8
--e_bits
3
echo
"python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type FLOAT --num_bits 8 --e_bits 4"
python main.py
--action
1
--mia_type
black-box
--model
resnet18
--model_distill
resnet18
--data
cifar10
--load_attack
--quant_type
FLOAT
--num_bits
8
--e_bits
4
echo
"python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type FLOAT --num_bits 8 --e_bits 5"
python main.py
--action
1
--mia_type
black-box
--model
resnet18
--model_distill
resnet18
--data
cifar10
--load_attack
--quant_type
FLOAT
--num_bits
8
--e_bits
5
echo
"python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type FLOAT --num_bits 8 --e_bits 6"
python main.py
--action
1
--mia_type
black-box
--model
resnet18
--model_distill
resnet18
--data
cifar10
--load_attack
--quant_type
FLOAT
--num_bits
8
--e_bits
6
#- End
echo
"Job end at
$(
date
"+%Y-%m-%d %H:%M:%S"
)
"
mzh/new_mzh/Loss_Trajectory_MIA/sh/train_attack_18_10_ptq_INT4.sh
0 → 100644
View file @
cba2e1d6
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J I_Tra_10_18 # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 1-00:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo
"Job start at
$(
date
"+%Y-%m-%d %H:%M:%S"
)
"
echo
"Job run at:"
echo
"
$(
hostnamectl
)
"
#- Load environments
source
/tools/module_env.sh
source
~/pyt1.5/bin/activate
module list
# list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo
$(
module list
)
# list modules loaded
echo
$(
which gcc
)
echo
$(
which python
)
echo
$(
which python3
)
cluster-quota
# nas quota
nvidia-smi
--format
=
csv
--query-gpu
=
name,driver_version,power.limit
# gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo
"Use GPU
${
CUDA_VISIBLE_DEVICES
}
"
# which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
# echo "python main.py --mode target --model resnet18 --data cifar10"
# python main.py --mode target --model resnet18 --data cifar10
# echo "python main.py --mode shadow --model resnet18 --data cifar10"
# python main.py --mode shadow --model resnet18 --data cifar10
# echo "python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 4"
# python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 4
# echo "python main.py --mode distill_shadow --model resnet18 --data cifar10"
# python main.py --mode distill_shadow --model resnet18 --data cifar10
# echo "python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10"
# python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10
# echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type INT --num_bits 4"
# python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type INT --num_bits 4
echo
"python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type INT --num_bits 4"
python main.py
--action
1
--mia_type
black-box
--model
resnet18
--model_distill
resnet18
--data
cifar10
--load_attack
--quant_type
INT
--num_bits
4
#- End
echo
"Job end at
$(
date
"+%Y-%m-%d %H:%M:%S"
)
"
mzh/new_mzh/Loss_Trajectory_MIA/sh/train_attack_18_10_ptq_INT5.sh
0 → 100644
View file @
cba2e1d6
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J I_Tra_10_18 # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 1-00:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo
"Job start at
$(
date
"+%Y-%m-%d %H:%M:%S"
)
"
echo
"Job run at:"
echo
"
$(
hostnamectl
)
"
#- Load environments
source
/tools/module_env.sh
source
~/pyt1.5/bin/activate
module list
# list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo
$(
module list
)
# list modules loaded
echo
$(
which gcc
)
echo
$(
which python
)
echo
$(
which python3
)
cluster-quota
# nas quota
nvidia-smi
--format
=
csv
--query-gpu
=
name,driver_version,power.limit
# gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo
"Use GPU
${
CUDA_VISIBLE_DEVICES
}
"
# which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
# echo "python main.py --mode target --model resnet18 --data cifar10"
# python main.py --mode target --model resnet18 --data cifar10
# echo "python main.py --mode shadow --model resnet18 --data cifar10"
# python main.py --mode shadow --model resnet18 --data cifar10
# echo "python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 5"
# python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 5
# echo "python main.py --mode distill_shadow --model resnet18 --data cifar10"
# python main.py --mode distill_shadow --model resnet18 --data cifar10
# echo "python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10"
# python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10
# echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type INT --num_bits 5"
# python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type INT --num_bits 5
# echo "python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack"
# python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack
echo
"python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type INT --num_bits 5"
python main.py
--action
1
--mia_type
black-box
--model
resnet18
--model_distill
resnet18
--data
cifar10
--load_attack
--quant_type
INT
--num_bits
5
#- End
echo
"Job end at
$(
date
"+%Y-%m-%d %H:%M:%S"
)
"
mzh/new_mzh/Loss_Trajectory_MIA/sh/train_attack_18_10_ptq_INT6.sh
0 → 100644
View file @
cba2e1d6
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J I_Tra_10_18 # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 1-00:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo
"Job start at
$(
date
"+%Y-%m-%d %H:%M:%S"
)
"
echo
"Job run at:"
echo
"
$(
hostnamectl
)
"
#- Load environments
source
/tools/module_env.sh
source
~/pyt1.5/bin/activate
module list
# list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo
$(
module list
)
# list modules loaded
echo
$(
which gcc
)
echo
$(
which python
)
echo
$(
which python3
)
cluster-quota
# nas quota
nvidia-smi
--format
=
csv
--query-gpu
=
name,driver_version,power.limit
# gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo
"Use GPU
${
CUDA_VISIBLE_DEVICES
}
"
# which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
# echo "python main.py --mode target --model resnet18 --data cifar10"
# python main.py --mode target --model resnet18 --data cifar10
# echo "python main.py --mode shadow --model resnet18 --data cifar10"
# python main.py --mode shadow --model resnet18 --data cifar10
# echo "python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 6"
# python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 6
# echo "python main.py --mode distill_shadow --model resnet18 --data cifar10"
# python main.py --mode distill_shadow --model resnet18 --data cifar10
# echo "python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10"
# python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10
# echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type INT --num_bits 6"
# python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type INT --num_bits 6
# echo "python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack"
# python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack
echo
"python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type INT --num_bits 6"
python main.py
--action
1
--mia_type
black-box
--model
resnet18
--model_distill
resnet18
--data
cifar10
--load_attack
--quant_type
INT
--num_bits
6
#- End
echo
"Job end at
$(
date
"+%Y-%m-%d %H:%M:%S"
)
"
mzh/new_mzh/Loss_Trajectory_MIA/sh/train_attack_18_10_ptq_INT7.sh
0 → 100644
View file @
cba2e1d6
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J I_Tra_10_18 # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 1-00:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo
"Job start at
$(
date
"+%Y-%m-%d %H:%M:%S"
)
"
echo
"Job run at:"
echo
"
$(
hostnamectl
)
"
#- Load environments
source
/tools/module_env.sh
source
~/pyt1.5/bin/activate
module list
# list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo
$(
module list
)
# list modules loaded
echo
$(
which gcc
)
echo
$(
which python
)
echo
$(
which python3
)
cluster-quota
# nas quota
nvidia-smi
--format
=
csv
--query-gpu
=
name,driver_version,power.limit
# gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo
"Use GPU
${
CUDA_VISIBLE_DEVICES
}
"
# which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
# echo "python main.py --mode target --model resnet18 --data cifar10"
# python main.py --mode target --model resnet18 --data cifar10
# echo "python main.py --mode shadow --model resnet18 --data cifar10"
# python main.py --mode shadow --model resnet18 --data cifar10
echo
"python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 7"
python main.py
--mode
distill_target
--model
resnet18
--data
cifar10
--store_ptq
--quant_type
INT
--num_bits
7
# echo "python main.py --mode distill_shadow --model resnet18 --data cifar10"
# python main.py --mode distill_shadow --model resnet18 --data cifar10
# echo "python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10"
# python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type INT --num_bits 7"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet18
--model_distill
resnet18
--data
cifar10
--quant_type
INT
--num_bits
7
# echo "python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack"
# python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack
echo
"python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type INT --num_bits 7"
python main.py
--action
1
--mia_type
black-box
--model
resnet18
--model_distill
resnet18
--data
cifar10
--load_attack
--quant_type
INT
--num_bits
7
#- End
echo
"Job end at
$(
date
"+%Y-%m-%d %H:%M:%S"
)
"
mzh/new_mzh/Loss_Trajectory_MIA/sh/train_attack_18_10_ptq_INT8.sh
0 → 100644
View file @
cba2e1d6
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J I_Tra_10_18 # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 1-00:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo
"Job start at
$(
date
"+%Y-%m-%d %H:%M:%S"
)
"
echo
"Job run at:"
echo
"
$(
hostnamectl
)
"
#- Load environments
source
/tools/module_env.sh
source
~/pyt1.5/bin/activate
module list
# list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo
$(
module list
)
# list modules loaded
echo
$(
which gcc
)
echo
$(
which python
)
echo
$(
which python3
)
cluster-quota
# nas quota
nvidia-smi
--format
=
csv
--query-gpu
=
name,driver_version,power.limit
# gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo
"Use GPU
${
CUDA_VISIBLE_DEVICES
}
"
# which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
# echo "python main.py --mode target --model resnet18 --data cifar10"
# python main.py --mode target --model resnet18 --data cifar10
# echo "python main.py --mode shadow --model resnet18 --data cifar10"
# python main.py --mode shadow --model resnet18 --data cifar10
echo
"python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 8"
python main.py
--mode
distill_target
--model
resnet18
--data
cifar10
--store_ptq
--quant_type
INT
--num_bits
8
# echo "python main.py --mode distill_shadow --model resnet18 --data cifar10"
# python main.py --mode distill_shadow --model resnet18 --data cifar10
# echo "python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10"
# python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type INT --num_bits 8"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet18
--model_distill
resnet18
--data
cifar10
--quant_type
INT
--num_bits
8
# echo "python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack"
# python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack
echo
"python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type INT --num_bits 8"
python main.py
--action
1
--mia_type
black-box
--model
resnet18
--model_distill
resnet18
--data
cifar10
--load_attack
--quant_type
INT
--num_bits
8
#- End
echo
"Job end at
$(
date
"+%Y-%m-%d %H:%M:%S"
)
"
mzh/new_mzh/Loss_Trajectory_MIA/sh/train_attack_18_10_ptq_INT_S1.sh
0 → 100644
View file @
cba2e1d6
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J I_Tra_10_18 # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 1-00:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo
"Job start at
$(
date
"+%Y-%m-%d %H:%M:%S"
)
"
echo
"Job run at:"
echo
"
$(
hostnamectl
)
"
#- Load environments
source
/tools/module_env.sh
source
~/pyt1.5/bin/activate
module list
# list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo
$(
module list
)
# list modules loaded
echo
$(
which gcc
)
echo
$(
which python
)
echo
$(
which python3
)
cluster-quota
# nas quota
nvidia-smi
--format
=
csv
--query-gpu
=
name,driver_version,power.limit
# gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo
"Use GPU
${
CUDA_VISIBLE_DEVICES
}
"
# which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
# TRAIN DISTILL MODEL
echo
"python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 2"
python main.py
--mode
distill_target
--model
resnet18
--data
cifar10
--store_ptq
--quant_type
INT
--num_bits
2
echo
"python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 3"
python main.py
--mode
distill_target
--model
resnet18
--data
cifar10
--store_ptq
--quant_type
INT
--num_bits
3
echo
"python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 4"
python main.py
--mode
distill_target
--model
resnet18
--data
cifar10
--store_ptq
--quant_type
INT
--num_bits
4
echo
"python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 5"
python main.py
--mode
distill_target
--model
resnet18
--data
cifar10
--store_ptq
--quant_type
INT
--num_bits
5
echo
"python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 6"
python main.py
--mode
distill_target
--model
resnet18
--data
cifar10
--store_ptq
--quant_type
INT
--num_bits
6
echo
"python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 7"
python main.py
--mode
distill_target
--model
resnet18
--data
cifar10
--store_ptq
--quant_type
INT
--num_bits
7
echo
"python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 8"
python main.py
--mode
distill_target
--model
resnet18
--data
cifar10
--store_ptq
--quant_type
INT
--num_bits
8
# # CONSTRUCT TEST DATASET
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type INT --num_bits 2"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet18
--model_distill
resnet18
--data
cifar10
--quant_type
INT
--num_bits
2
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type INT --num_bits 3"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet18
--model_distill
resnet18
--data
cifar10
--quant_type
INT
--num_bits
3
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type INT --num_bits 4"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet18
--model_distill
resnet18
--data
cifar10
--quant_type
INT
--num_bits
4
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type INT --num_bits 5"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet18
--model_distill
resnet18
--data
cifar10
--quant_type
INT
--num_bits
5
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type INT --num_bits 6"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet18
--model_distill
resnet18
--data
cifar10
--quant_type
INT
--num_bits
6
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type INT --num_bits 7"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet18
--model_distill
resnet18
--data
cifar10
--quant_type
INT
--num_bits
7
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type INT --num_bits 8"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet18
--model_distill
resnet18
--data
cifar10
--quant_type
INT
--num_bits
8
# ATTACK
# for test full precision mia result
echo
"python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack"
python main.py
--action
1
--mia_type
black-box
--model
resnet18
--model_distill
resnet18
--data
cifar10
--load_attack
echo
"python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type INT --num_bits 2"
python main.py
--action
1
--mia_type
black-box
--model
resnet18
--model_distill
resnet18
--data
cifar10
--load_attack
--quant_type
INT
--num_bits
2
echo
"python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type INT --num_bits 3"
python main.py
--action
1
--mia_type
black-box
--model
resnet18
--model_distill
resnet18
--data
cifar10
--load_attack
--quant_type
INT
--num_bits
3
echo
"python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type INT --num_bits 4"
python main.py
--action
1
--mia_type
black-box
--model
resnet18
--model_distill
resnet18
--data
cifar10
--load_attack
--quant_type
INT
--num_bits
4
echo
"python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type INT --num_bits 5"
python main.py
--action
1
--mia_type
black-box
--model
resnet18
--model_distill
resnet18
--data
cifar10
--load_attack
--quant_type
INT
--num_bits
5
echo
"python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type INT --num_bits 6"
python main.py
--action
1
--mia_type
black-box
--model
resnet18
--model_distill
resnet18
--data
cifar10
--load_attack
--quant_type
INT
--num_bits
6
echo
"python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type INT --num_bits 7"
python main.py
--action
1
--mia_type
black-box
--model
resnet18
--model_distill
resnet18
--data
cifar10
--load_attack
--quant_type
INT
--num_bits
7
echo
"python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type INT --num_bits 8"
python main.py
--action
1
--mia_type
black-box
--model
resnet18
--model_distill
resnet18
--data
cifar10
--load_attack
--quant_type
INT
--num_bits
8
#- End
echo
"Job end at
$(
date
"+%Y-%m-%d %H:%M:%S"
)
"
mzh/new_mzh/Loss_Trajectory_MIA/sh/train_attack_18_10_ptq_INT_S2.sh
0 → 100644
View file @
cba2e1d6
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J I_Tra_10_18 # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 1-00:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo
"Job start at
$(
date
"+%Y-%m-%d %H:%M:%S"
)
"
echo
"Job run at:"
echo
"
$(
hostnamectl
)
"
#- Load environments
source
/tools/module_env.sh
source
~/pyt1.5/bin/activate
module list
# list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo
$(
module list
)
# list modules loaded
echo
$(
which gcc
)
echo
$(
which python
)
echo
$(
which python3
)
cluster-quota
# nas quota
nvidia-smi
--format
=
csv
--query-gpu
=
name,driver_version,power.limit
# gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo
"Use GPU
${
CUDA_VISIBLE_DEVICES
}
"
# which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
# TRAIN DISTILL MODEL
echo
"python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 9"
python main.py
--mode
distill_target
--model
resnet18
--data
cifar10
--store_ptq
--quant_type
INT
--num_bits
9
echo
"python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 10"
python main.py
--mode
distill_target
--model
resnet18
--data
cifar10
--store_ptq
--quant_type
INT
--num_bits
10
echo
"python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 11"
python main.py
--mode
distill_target
--model
resnet18
--data
cifar10
--store_ptq
--quant_type
INT
--num_bits
11
echo
"python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 12"
python main.py
--mode
distill_target
--model
resnet18
--data
cifar10
--store_ptq
--quant_type
INT
--num_bits
12
echo
"python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 13"
python main.py
--mode
distill_target
--model
resnet18
--data
cifar10
--store_ptq
--quant_type
INT
--num_bits
13
echo
"python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 14"
python main.py
--mode
distill_target
--model
resnet18
--data
cifar10
--store_ptq
--quant_type
INT
--num_bits
14
echo
"python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 15"
python main.py
--mode
distill_target
--model
resnet18
--data
cifar10
--store_ptq
--quant_type
INT
--num_bits
15
echo
"python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 16"
python main.py
--mode
distill_target
--model
resnet18
--data
cifar10
--store_ptq
--quant_type
INT
--num_bits
16
# # CONSTRUCT TEST DATASET
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type INT --num_bits 9"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet18
--model_distill
resnet18
--data
cifar10
--quant_type
INT
--num_bits
9
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type INT --num_bits 10"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet18
--model_distill
resnet18
--data
cifar10
--quant_type
INT
--num_bits
10
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type INT --num_bits 11"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet18
--model_distill
resnet18
--data
cifar10
--quant_type
INT
--num_bits
11
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type INT --num_bits 12"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet18
--model_distill
resnet18
--data
cifar10
--quant_type
INT
--num_bits
12
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type INT --num_bits 13"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet18
--model_distill
resnet18
--data
cifar10
--quant_type
INT
--num_bits
13
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type INT --num_bits 14"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet18
--model_distill
resnet18
--data
cifar10
--quant_type
INT
--num_bits
14
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type INT --num_bits 15"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet18
--model_distill
resnet18
--data
cifar10
--quant_type
INT
--num_bits
15
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type INT --num_bits 16"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet18
--model_distill
resnet18
--data
cifar10
--quant_type
INT
--num_bits
16
# ATTACK
echo
"python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type INT --num_bits 9"
python main.py
--action
1
--mia_type
black-box
--model
resnet18
--model_distill
resnet18
--data
cifar10
--load_attack
--quant_type
INT
--num_bits
9
echo
"python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type INT --num_bits 10"
python main.py
--action
1
--mia_type
black-box
--model
resnet18
--model_distill
resnet18
--data
cifar10
--load_attack
--quant_type
INT
--num_bits
10
echo
"python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type INT --num_bits 11"
python main.py
--action
1
--mia_type
black-box
--model
resnet18
--model_distill
resnet18
--data
cifar10
--load_attack
--quant_type
INT
--num_bits
11
echo
"python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type INT --num_bits 12"
python main.py
--action
1
--mia_type
black-box
--model
resnet18
--model_distill
resnet18
--data
cifar10
--load_attack
--quant_type
INT
--num_bits
12
echo
"python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type INT --num_bits 13"
python main.py
--action
1
--mia_type
black-box
--model
resnet18
--model_distill
resnet18
--data
cifar10
--load_attack
--quant_type
INT
--num_bits
13
echo
"python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type INT --num_bits 14"
python main.py
--action
1
--mia_type
black-box
--model
resnet18
--model_distill
resnet18
--data
cifar10
--load_attack
--quant_type
INT
--num_bits
14
echo
"python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type INT --num_bits 15"
python main.py
--action
1
--mia_type
black-box
--model
resnet18
--model_distill
resnet18
--data
cifar10
--load_attack
--quant_type
INT
--num_bits
15
echo
"python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type INT --num_bits 16"
python main.py
--action
1
--mia_type
black-box
--model
resnet18
--model_distill
resnet18
--data
cifar10
--load_attack
--quant_type
INT
--num_bits
16
#- End
echo
"Job end at
$(
date
"+%Y-%m-%d %H:%M:%S"
)
"
mzh/new_mzh/Loss_Trajectory_MIA/sh/train_attack_18_10_ptq_POT.sh
0 → 100644
View file @
cba2e1d6
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J I_Tra_10_18 # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 1-06:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo
"Job start at
$(
date
"+%Y-%m-%d %H:%M:%S"
)
"
echo
"Job run at:"
echo
"
$(
hostnamectl
)
"
#- Load environments
source
/tools/module_env.sh
source
~/pyt1.5/bin/activate
module list
# list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo
$(
module list
)
# list modules loaded
echo
$(
which gcc
)
echo
$(
which python
)
echo
$(
which python3
)
cluster-quota
# nas quota
nvidia-smi
--format
=
csv
--query-gpu
=
name,driver_version,power.limit
# gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo
"Use GPU
${
CUDA_VISIBLE_DEVICES
}
"
# which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
# TRAIN DISTILL MODEL
echo
"python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type POT --num_bits 2"
python main.py
--mode
distill_target
--model
resnet18
--data
cifar10
--store_ptq
--quant_type
POT
--num_bits
2
echo
"python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type POT --num_bits 3"
python main.py
--mode
distill_target
--model
resnet18
--data
cifar10
--store_ptq
--quant_type
POT
--num_bits
3
echo
"python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type POT --num_bits 4"
python main.py
--mode
distill_target
--model
resnet18
--data
cifar10
--store_ptq
--quant_type
POT
--num_bits
4
echo
"python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type POT --num_bits 5"
python main.py
--mode
distill_target
--model
resnet18
--data
cifar10
--store_ptq
--quant_type
POT
--num_bits
5
echo
"python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type POT --num_bits 6"
python main.py
--mode
distill_target
--model
resnet18
--data
cifar10
--store_ptq
--quant_type
POT
--num_bits
6
echo
"python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type POT --num_bits 7"
python main.py
--mode
distill_target
--model
resnet18
--data
cifar10
--store_ptq
--quant_type
POT
--num_bits
7
echo
"python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type POT --num_bits 8"
python main.py
--mode
distill_target
--model
resnet18
--data
cifar10
--store_ptq
--quant_type
POT
--num_bits
8
# CONSTRUCT TEST DATASET
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type POT --num_bits 2"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet18
--model_distill
resnet18
--data
cifar10
--quant_type
POT
--num_bits
2
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type POT --num_bits 3"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet18
--model_distill
resnet18
--data
cifar10
--quant_type
POT
--num_bits
3
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type POT --num_bits 4"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet18
--model_distill
resnet18
--data
cifar10
--quant_type
POT
--num_bits
4
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type POT --num_bits 5"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet18
--model_distill
resnet18
--data
cifar10
--quant_type
POT
--num_bits
5
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type POT --num_bits 6"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet18
--model_distill
resnet18
--data
cifar10
--quant_type
POT
--num_bits
6
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type POT --num_bits 7"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet18
--model_distill
resnet18
--data
cifar10
--quant_type
POT
--num_bits
7
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --quant_type POT --num_bits 8"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet18
--model_distill
resnet18
--data
cifar10
--quant_type
POT
--num_bits
8
# ATTACK
echo
"python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type POT --num_bits 2"
python main.py
--action
1
--mia_type
black-box
--model
resnet18
--model_distill
resnet18
--data
cifar10
--load_attack
--quant_type
POT
--num_bits
2
echo
"python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type POT --num_bits 3"
python main.py
--action
1
--mia_type
black-box
--model
resnet18
--model_distill
resnet18
--data
cifar10
--load_attack
--quant_type
POT
--num_bits
3
echo
"python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type POT --num_bits 4"
python main.py
--action
1
--mia_type
black-box
--model
resnet18
--model_distill
resnet18
--data
cifar10
--load_attack
--quant_type
POT
--num_bits
4
echo
"python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type POT --num_bits 5"
python main.py
--action
1
--mia_type
black-box
--model
resnet18
--model_distill
resnet18
--data
cifar10
--load_attack
--quant_type
POT
--num_bits
5
echo
"python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type POT --num_bits 6"
python main.py
--action
1
--mia_type
black-box
--model
resnet18
--model_distill
resnet18
--data
cifar10
--load_attack
--quant_type
POT
--num_bits
6
echo
"python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type POT --num_bits 7"
python main.py
--action
1
--mia_type
black-box
--model
resnet18
--model_distill
resnet18
--data
cifar10
--load_attack
--quant_type
POT
--num_bits
7
echo
"python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack --quant_type POT --num_bits 8"
python main.py
--action
1
--mia_type
black-box
--model
resnet18
--model_distill
resnet18
--data
cifar10
--load_attack
--quant_type
POT
--num_bits
8
#- End
echo
"Job end at
$(
date
"+%Y-%m-%d %H:%M:%S"
)
"
mzh/new_mzh/Loss_Trajectory_MIA/sh/train_attack_18_10_ptq_POT3.sh
0 → 100644
View file @
cba2e1d6
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J I_Tra_10_18 # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 1-00:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo
"Job start at
$(
date
"+%Y-%m-%d %H:%M:%S"
)
"
echo
"Job run at:"
echo
"
$(
hostnamectl
)
"
#- Load environments
source
/tools/module_env.sh
source
~/pyt1.5/bin/activate
module list
# list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo
$(
module list
)
# list modules loaded
echo
$(
which gcc
)
echo
$(
which python
)
echo
$(
which python3
)
cluster-quota
# nas quota
nvidia-smi
--format
=
csv
--query-gpu
=
name,driver_version,power.limit
# gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo
"Use GPU
${
CUDA_VISIBLE_DEVICES
}
"
# which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
# echo "python main.py --mode target --model resnet18 --data cifar10"
# python main.py --mode target --model resnet18 --data cifar10
# echo "python main.py --mode shadow --model resnet18 --data cifar10"
# python main.py --mode shadow --model resnet18 --data cifar10
echo
"python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type POT --num_bits 3"
python main.py
--mode
distill_target
--model
resnet18
--data
cifar10
--store_ptq
--quant_type
POT
--num_bits
3
# echo "python main.py --mode distill_shadow --model resnet18 --data cifar10"
# python main.py --mode distill_shadow --model resnet18 --data cifar10
# echo "python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10"
# python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10
# echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 8"
# python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 8
# echo "python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack"
# python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack
#- End
echo
"Job end at
$(
date
"+%Y-%m-%d %H:%M:%S"
)
"
mzh/new_mzh/Loss_Trajectory_MIA/sh/train_attack_18_10_ptq_POT4.sh
0 → 100644
View file @
cba2e1d6
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J I_Tra_10_18 # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 1-00:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo
"Job start at
$(
date
"+%Y-%m-%d %H:%M:%S"
)
"
echo
"Job run at:"
echo
"
$(
hostnamectl
)
"
#- Load environments
source
/tools/module_env.sh
source
~/pyt1.5/bin/activate
module list
# list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo
$(
module list
)
# list modules loaded
echo
$(
which gcc
)
echo
$(
which python
)
echo
$(
which python3
)
cluster-quota
# nas quota
nvidia-smi
--format
=
csv
--query-gpu
=
name,driver_version,power.limit
# gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo
"Use GPU
${
CUDA_VISIBLE_DEVICES
}
"
# which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
# echo "python main.py --mode target --model resnet18 --data cifar10"
# python main.py --mode target --model resnet18 --data cifar10
# echo "python main.py --mode shadow --model resnet18 --data cifar10"
# python main.py --mode shadow --model resnet18 --data cifar10
echo
"python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type POT --num_bits 4"
python main.py
--mode
distill_target
--model
resnet18
--data
cifar10
--store_ptq
--quant_type
POT
--num_bits
4
# echo "python main.py --mode distill_shadow --model resnet18 --data cifar10"
# python main.py --mode distill_shadow --model resnet18 --data cifar10
# echo "python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10"
# python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10
# echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 8"
# python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 8
# echo "python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack"
# python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack
#- End
echo
"Job end at
$(
date
"+%Y-%m-%d %H:%M:%S"
)
"
mzh/new_mzh/Loss_Trajectory_MIA/sh/train_attack_18_10_ptq_POT5.sh
0 → 100644
View file @
cba2e1d6
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J I_Tra_10_18 # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 1-00:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo
"Job start at
$(
date
"+%Y-%m-%d %H:%M:%S"
)
"
echo
"Job run at:"
echo
"
$(
hostnamectl
)
"
#- Load environments
source
/tools/module_env.sh
source
~/pyt1.5/bin/activate
module list
# list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo
$(
module list
)
# list modules loaded
echo
$(
which gcc
)
echo
$(
which python
)
echo
$(
which python3
)
cluster-quota
# nas quota
nvidia-smi
--format
=
csv
--query-gpu
=
name,driver_version,power.limit
# gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo
"Use GPU
${
CUDA_VISIBLE_DEVICES
}
"
# which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
# echo "python main.py --mode target --model resnet18 --data cifar10"
# python main.py --mode target --model resnet18 --data cifar10
# echo "python main.py --mode shadow --model resnet18 --data cifar10"
# python main.py --mode shadow --model resnet18 --data cifar10
echo
"python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type POT --num_bits 5"
python main.py
--mode
distill_target
--model
resnet18
--data
cifar10
--store_ptq
--quant_type
POT
--num_bits
5
# echo "python main.py --mode distill_shadow --model resnet18 --data cifar10"
# python main.py --mode distill_shadow --model resnet18 --data cifar10
# echo "python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10"
# python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10
# echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 8"
# python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 8
# echo "python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack"
# python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack
#- End
echo
"Job end at
$(
date
"+%Y-%m-%d %H:%M:%S"
)
"
mzh/new_mzh/Loss_Trajectory_MIA/sh/train_attack_18_10_ptq_POT6.sh
0 → 100644
View file @
cba2e1d6
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J I_Tra_10_18 # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 1-00:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo
"Job start at
$(
date
"+%Y-%m-%d %H:%M:%S"
)
"
echo
"Job run at:"
echo
"
$(
hostnamectl
)
"
#- Load environments
source
/tools/module_env.sh
source
~/pyt1.5/bin/activate
module list
# list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo
$(
module list
)
# list modules loaded
echo
$(
which gcc
)
echo
$(
which python
)
echo
$(
which python3
)
cluster-quota
# nas quota
nvidia-smi
--format
=
csv
--query-gpu
=
name,driver_version,power.limit
# gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo
"Use GPU
${
CUDA_VISIBLE_DEVICES
}
"
# which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
# echo "python main.py --mode target --model resnet18 --data cifar10"
# python main.py --mode target --model resnet18 --data cifar10
# echo "python main.py --mode shadow --model resnet18 --data cifar10"
# python main.py --mode shadow --model resnet18 --data cifar10
echo
"python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type POT --num_bits 6"
python main.py
--mode
distill_target
--model
resnet18
--data
cifar10
--store_ptq
--quant_type
POT
--num_bits
6
# echo "python main.py --mode distill_shadow --model resnet18 --data cifar10"
# python main.py --mode distill_shadow --model resnet18 --data cifar10
# echo "python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10"
# python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10
# echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 8"
# python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 8
# echo "python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack"
# python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack
#- End
echo
"Job end at
$(
date
"+%Y-%m-%d %H:%M:%S"
)
"
mzh/new_mzh/Loss_Trajectory_MIA/sh/train_attack_18_10_ptq_POT7.sh
0 → 100644
View file @
cba2e1d6
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J I_Tra_10_18 # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 1-00:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo
"Job start at
$(
date
"+%Y-%m-%d %H:%M:%S"
)
"
echo
"Job run at:"
echo
"
$(
hostnamectl
)
"
#- Load environments
source
/tools/module_env.sh
source
~/pyt1.5/bin/activate
module list
# list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo
$(
module list
)
# list modules loaded
echo
$(
which gcc
)
echo
$(
which python
)
echo
$(
which python3
)
cluster-quota
# nas quota
nvidia-smi
--format
=
csv
--query-gpu
=
name,driver_version,power.limit
# gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo
"Use GPU
${
CUDA_VISIBLE_DEVICES
}
"
# which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
# echo "python main.py --mode target --model resnet18 --data cifar10"
# python main.py --mode target --model resnet18 --data cifar10
# echo "python main.py --mode shadow --model resnet18 --data cifar10"
# python main.py --mode shadow --model resnet18 --data cifar10
echo
"python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type POT --num_bits 7"
python main.py
--mode
distill_target
--model
resnet18
--data
cifar10
--store_ptq
--quant_type
POT
--num_bits
7
# echo "python main.py --mode distill_shadow --model resnet18 --data cifar10"
# python main.py --mode distill_shadow --model resnet18 --data cifar10
# echo "python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10"
# python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10
# echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 8"
# python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 8
# echo "python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack"
# python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack
#- End
echo
"Job end at
$(
date
"+%Y-%m-%d %H:%M:%S"
)
"
mzh/new_mzh/Loss_Trajectory_MIA/sh/train_attack_18_10_ptq_POT8.sh
0 → 100644
View file @
cba2e1d6
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J I_Tra_10_18 # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 1-00:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo
"Job start at
$(
date
"+%Y-%m-%d %H:%M:%S"
)
"
echo
"Job run at:"
echo
"
$(
hostnamectl
)
"
#- Load environments
source
/tools/module_env.sh
source
~/pyt1.5/bin/activate
module list
# list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo
$(
module list
)
# list modules loaded
echo
$(
which gcc
)
echo
$(
which python
)
echo
$(
which python3
)
cluster-quota
# nas quota
nvidia-smi
--format
=
csv
--query-gpu
=
name,driver_version,power.limit
# gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo
"Use GPU
${
CUDA_VISIBLE_DEVICES
}
"
# which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
# echo "python main.py --mode target --model resnet18 --data cifar10"
# python main.py --mode target --model resnet18 --data cifar10
# echo "python main.py --mode shadow --model resnet18 --data cifar10"
# python main.py --mode shadow --model resnet18 --data cifar10
echo
"python main.py --mode distill_target --model resnet18 --data cifar10 --store_ptq --quant_type POT --num_bits 8"
python main.py
--mode
distill_target
--model
resnet18
--data
cifar10
--store_ptq
--quant_type
POT
--num_bits
8
# echo "python main.py --mode distill_shadow --model resnet18 --data cifar10"
# python main.py --mode distill_shadow --model resnet18 --data cifar10
# echo "python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10"
# python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10
# echo "python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 8"
# python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cifar10 --store_ptq --quant_type INT --num_bits 8
# echo "python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack"
# python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cifar10 --load_attack
#- End
echo
"Job end at
$(
date
"+%Y-%m-%d %H:%M:%S"
)
"
mzh/new_mzh/Loss_Trajectory_MIA/sh/train_attack_18_cinic.sh
0 → 100644
View file @
cba2e1d6
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J Tra_CIN_18 # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 1-12:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-long # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo
"Job start at
$(
date
"+%Y-%m-%d %H:%M:%S"
)
"
echo
"Job run at:"
echo
"
$(
hostnamectl
)
"
#- Load environments
source
/tools/module_env.sh
source
~/pyt1.5/bin/activate
module list
# list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo
$(
module list
)
# list modules loaded
echo
$(
which gcc
)
echo
$(
which python
)
echo
$(
which python3
)
cluster-quota
# nas quota
nvidia-smi
--format
=
csv
--query-gpu
=
name,driver_version,power.limit
# gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo
"Use GPU
${
CUDA_VISIBLE_DEVICES
}
"
# which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
echo
"python main.py --mode target --model resnet18 --data cinic10"
python main.py
--mode
target
--model
resnet18
--data
cinic10
echo
"python main.py --mode shadow --model resnet18 --data cinic10"
python main.py
--mode
shadow
--model
resnet18
--data
cinic10
echo
"python main.py --mode distill_target --model resnet18 --data cinic10"
python main.py
--mode
distill_target
--model
resnet18
--data
cinic10
echo
"python main.py --mode distill_shadow --model resnet18 --data cinic10"
python main.py
--mode
distill_shadow
--model
resnet18
--data
cinic10
echo
"python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cinic10"
python main.py
--action
1
--mode
shadow
--mia_type
build-dataset
--model
resnet18
--model_distill
resnet18
--data
cinic10
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet18 --model_distill resnet18 --data cinic10"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet18
--model_distill
resnet18
--data
cinic10
echo
"python main.py --action 1 --mia_type black-box --model resnet18 --model_distill resnet18 --data cinic10"
python main.py
--action
1
--mia_type
black-box
--model
resnet18
--model_distill
resnet18
--data
cinic10
#- End
echo
"Job end at
$(
date
"+%Y-%m-%d %H:%M:%S"
)
"
mzh/new_mzh/Loss_Trajectory_MIA/sh/train_attack_50_10.sh
0 → 100644
View file @
cba2e1d6
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J Tra_50 # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 0-12:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo
"Job start at
$(
date
"+%Y-%m-%d %H:%M:%S"
)
"
echo
"Job run at:"
echo
"
$(
hostnamectl
)
"
#- Load environments
source
/tools/module_env.sh
source
~/pyt1.5/bin/activate
module list
# list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo
$(
module list
)
# list modules loaded
echo
$(
which gcc
)
echo
$(
which python
)
echo
$(
which python3
)
cluster-quota
# nas quota
nvidia-smi
--format
=
csv
--query-gpu
=
name,driver_version,power.limit
# gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo
"Use GPU
${
CUDA_VISIBLE_DEVICES
}
"
# which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
echo
"python main.py --mode target --model resnet50 "
python main.py
--mode
target
--model
resnet50
echo
"python main.py --mode shadow --model resnet50 "
python main.py
--mode
shadow
--model
resnet50
echo
"python main.py --mode distill_target --model resnet50 "
python main.py
--mode
distill_target
--model
resnet50
echo
"python main.py --mode distill_shadow --model resnet50 "
python main.py
--mode
distill_shadow
--model
resnet50
echo
"python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet50 --model_distill resnet50 "
python main.py
--action
1
--mode
shadow
--mia_type
build-dataset
--model
resnet50
--model_distill
resnet50
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 "
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet50
--model_distill
resnet50
echo
"python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 "
python main.py
--action
1
--mia_type
black-box
--model
resnet50
--model_distill
resnet50
#- End
echo
"Job end at
$(
date
"+%Y-%m-%d %H:%M:%S"
)
"
mzh/new_mzh/Loss_Trajectory_MIA/sh/train_attack_50_100.sh
0 → 100644
View file @
cba2e1d6
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J Tra_50_10 # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 0-12:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo
"Job start at
$(
date
"+%Y-%m-%d %H:%M:%S"
)
"
echo
"Job run at:"
echo
"
$(
hostnamectl
)
"
#- Load environments
source
/tools/module_env.sh
source
~/pyt1.5/bin/activate
module list
# list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo
$(
module list
)
# list modules loaded
echo
$(
which gcc
)
echo
$(
which python
)
echo
$(
which python3
)
cluster-quota
# nas quota
nvidia-smi
--format
=
csv
--query-gpu
=
name,driver_version,power.limit
# gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo
"Use GPU
${
CUDA_VISIBLE_DEVICES
}
"
# which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
echo
"python main.py --mode target --model resnet50 --data cifar100"
python main.py
--mode
target
--model
resnet50
--data
cifar100
echo
"python main.py --mode shadow --model resnet50 --data cifar100"
python main.py
--mode
shadow
--model
resnet50
--data
cifar100
echo
"python main.py --mode distill_target --model resnet50 --data cifar100"
python main.py
--mode
distill_target
--model
resnet50
--data
cifar100
echo
"python main.py --mode distill_shadow --model resnet50 --data cifar100"
python main.py
--mode
distill_shadow
--model
resnet50
--data
cifar100
echo
"python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar100"
python main.py
--action
1
--mode
shadow
--mia_type
build-dataset
--model
resnet50
--model_distill
resnet50
--data
cifar100
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar100"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet50
--model_distill
resnet50
--data
cifar100
echo
"python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar100"
python main.py
--action
1
--mia_type
black-box
--model
resnet50
--model_distill
resnet50
--data
cifar100
#- End
echo
"Job end at
$(
date
"+%Y-%m-%d %H:%M:%S"
)
"
mzh/new_mzh/Loss_Trajectory_MIA/sh/train_attack_50_10_ptq_FP_S1.sh
0 → 100644
View file @
cba2e1d6
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J I_Tra_10_18 # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 1-06:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo
"Job start at
$(
date
"+%Y-%m-%d %H:%M:%S"
)
"
echo
"Job run at:"
echo
"
$(
hostnamectl
)
"
#- Load environments
source
/tools/module_env.sh
source
~/pyt1.5/bin/activate
module list
# list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo
$(
module list
)
# list modules loaded
echo
$(
which gcc
)
echo
$(
which python
)
echo
$(
which python3
)
cluster-quota
# nas quota
nvidia-smi
--format
=
csv
--query-gpu
=
name,driver_version,power.limit
# gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo
"Use GPU
${
CUDA_VISIBLE_DEVICES
}
"
# which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
# TRAIN DISTILL MODEL
echo
"python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 3 --e_bits 1"
python main.py
--mode
distill_target
--model
resnet50
--data
cifar10
--store_ptq
--quant_type
FLOAT
--num_bits
3
--e_bits
1
echo
"python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 4 --e_bits 1"
python main.py
--mode
distill_target
--model
resnet50
--data
cifar10
--store_ptq
--quant_type
FLOAT
--num_bits
4
--e_bits
1
echo
"python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 4 --e_bits 2"
python main.py
--mode
distill_target
--model
resnet50
--data
cifar10
--store_ptq
--quant_type
FLOAT
--num_bits
4
--e_bits
2
echo
"python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 5 --e_bits 1"
python main.py
--mode
distill_target
--model
resnet50
--data
cifar10
--store_ptq
--quant_type
FLOAT
--num_bits
5
--e_bits
1
echo
"python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 5 --e_bits 2"
python main.py
--mode
distill_target
--model
resnet50
--data
cifar10
--store_ptq
--quant_type
FLOAT
--num_bits
5
--e_bits
2
echo
"python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 5 --e_bits 3"
python main.py
--mode
distill_target
--model
resnet50
--data
cifar10
--store_ptq
--quant_type
FLOAT
--num_bits
5
--e_bits
3
# CONSTRUCT TEST DATASET
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type FLOAT --num_bits 3 --e_bits 1"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet50
--model_distill
resnet50
--data
cifar10
--quant_type
FLOAT
--num_bits
3
--e_bits
1
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type FLOAT --num_bits 4 --e_bits 1"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet50
--model_distill
resnet50
--data
cifar10
--quant_type
FLOAT
--num_bits
4
--e_bits
1
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type FLOAT --num_bits 4 --e_bits 2"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet50
--model_distill
resnet50
--data
cifar10
--quant_type
FLOAT
--num_bits
4
--e_bits
2
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type FLOAT --num_bits 5 --e_bits 1"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet50
--model_distill
resnet50
--data
cifar10
--quant_type
FLOAT
--num_bits
5
--e_bits
1
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type FLOAT --num_bits 5 --e_bits 2"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet50
--model_distill
resnet50
--data
cifar10
--quant_type
FLOAT
--num_bits
5
--e_bits
2
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type FLOAT --num_bits 5 --e_bits 3"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet50
--model_distill
resnet50
--data
cifar10
--quant_type
FLOAT
--num_bits
5
--e_bits
3
# ATTACK
echo
"python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type FLOAT --num_bits 3 --e_bits 1"
python main.py
--action
1
--mia_type
black-box
--model
resnet50
--model_distill
resnet50
--data
cifar10
--load_attack
--quant_type
FLOAT
--num_bits
3
--e_bits
1
echo
"python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type FLOAT --num_bits 4 --e_bits 1"
python main.py
--action
1
--mia_type
black-box
--model
resnet50
--model_distill
resnet50
--data
cifar10
--load_attack
--quant_type
FLOAT
--num_bits
4
--e_bits
1
echo
"python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type FLOAT --num_bits 4 --e_bits 2"
python main.py
--action
1
--mia_type
black-box
--model
resnet50
--model_distill
resnet50
--data
cifar10
--load_attack
--quant_type
FLOAT
--num_bits
4
--e_bits
2
echo
"python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type FLOAT --num_bits 5 --e_bits 1"
python main.py
--action
1
--mia_type
black-box
--model
resnet50
--model_distill
resnet50
--data
cifar10
--load_attack
--quant_type
FLOAT
--num_bits
5
--e_bits
1
echo
"python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type FLOAT --num_bits 5 --e_bits 2"
python main.py
--action
1
--mia_type
black-box
--model
resnet50
--model_distill
resnet50
--data
cifar10
--load_attack
--quant_type
FLOAT
--num_bits
5
--e_bits
2
echo
"python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type FLOAT --num_bits 5 --e_bits 3"
python main.py
--action
1
--mia_type
black-box
--model
resnet50
--model_distill
resnet50
--data
cifar10
--load_attack
--quant_type
FLOAT
--num_bits
5
--e_bits
3
#- End
echo
"Job end at
$(
date
"+%Y-%m-%d %H:%M:%S"
)
"
mzh/new_mzh/Loss_Trajectory_MIA/sh/train_attack_50_10_ptq_FP_S2.sh
0 → 100644
View file @
cba2e1d6
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J I_Tra_10_18 # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 1-06:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo
"Job start at
$(
date
"+%Y-%m-%d %H:%M:%S"
)
"
echo
"Job run at:"
echo
"
$(
hostnamectl
)
"
#- Load environments
source
/tools/module_env.sh
source
~/pyt1.5/bin/activate
module list
# list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo
$(
module list
)
# list modules loaded
echo
$(
which gcc
)
echo
$(
which python
)
echo
$(
which python3
)
cluster-quota
# nas quota
nvidia-smi
--format
=
csv
--query-gpu
=
name,driver_version,power.limit
# gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo
"Use GPU
${
CUDA_VISIBLE_DEVICES
}
"
# which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
# TRAIN DISTILL MODEL
echo
"python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 6 --e_bits 1"
python main.py
--mode
distill_target
--model
resnet50
--data
cifar10
--store_ptq
--quant_type
FLOAT
--num_bits
6
--e_bits
1
echo
"python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 6 --e_bits 2"
python main.py
--mode
distill_target
--model
resnet50
--data
cifar10
--store_ptq
--quant_type
FLOAT
--num_bits
6
--e_bits
2
echo
"python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 6 --e_bits 3"
python main.py
--mode
distill_target
--model
resnet50
--data
cifar10
--store_ptq
--quant_type
FLOAT
--num_bits
6
--e_bits
3
echo
"python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 6 --e_bits 4"
python main.py
--mode
distill_target
--model
resnet50
--data
cifar10
--store_ptq
--quant_type
FLOAT
--num_bits
6
--e_bits
4
# CONSTRUCT TEST DATASET
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type FLOAT --num_bits 6 --e_bits 1"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet50
--model_distill
resnet50
--data
cifar10
--quant_type
FLOAT
--num_bits
6
--e_bits
1
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type FLOAT --num_bits 6 --e_bits 2"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet50
--model_distill
resnet50
--data
cifar10
--quant_type
FLOAT
--num_bits
6
--e_bits
2
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type FLOAT --num_bits 6 --e_bits 3"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet50
--model_distill
resnet50
--data
cifar10
--quant_type
FLOAT
--num_bits
6
--e_bits
3
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type FLOAT --num_bits 6 --e_bits 4"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet50
--model_distill
resnet50
--data
cifar10
--quant_type
FLOAT
--num_bits
6
--e_bits
4
# ATTACK
echo
"python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type FLOAT --num_bits 6 --e_bits 1"
python main.py
--action
1
--mia_type
black-box
--model
resnet50
--model_distill
resnet50
--data
cifar10
--load_attack
--quant_type
FLOAT
--num_bits
6
--e_bits
1
echo
"python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type FLOAT --num_bits 6 --e_bits 2"
python main.py
--action
1
--mia_type
black-box
--model
resnet50
--model_distill
resnet50
--data
cifar10
--load_attack
--quant_type
FLOAT
--num_bits
6
--e_bits
2
echo
"python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type FLOAT --num_bits 6 --e_bits 3"
python main.py
--action
1
--mia_type
black-box
--model
resnet50
--model_distill
resnet50
--data
cifar10
--load_attack
--quant_type
FLOAT
--num_bits
6
--e_bits
3
echo
"python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type FLOAT --num_bits 6 --e_bits 4"
python main.py
--action
1
--mia_type
black-box
--model
resnet50
--model_distill
resnet50
--data
cifar10
--load_attack
--quant_type
FLOAT
--num_bits
6
--e_bits
4
#- End
echo
"Job end at
$(
date
"+%Y-%m-%d %H:%M:%S"
)
"
mzh/new_mzh/Loss_Trajectory_MIA/sh/train_attack_50_10_ptq_FP_S3.sh
0 → 100644
View file @
cba2e1d6
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J I_Tra_10_18 # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 1-06:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo
"Job start at
$(
date
"+%Y-%m-%d %H:%M:%S"
)
"
echo
"Job run at:"
echo
"
$(
hostnamectl
)
"
#- Load environments
source
/tools/module_env.sh
source
~/pyt1.5/bin/activate
module list
# list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo
$(
module list
)
# list modules loaded
echo
$(
which gcc
)
echo
$(
which python
)
echo
$(
which python3
)
cluster-quota
# nas quota
nvidia-smi
--format
=
csv
--query-gpu
=
name,driver_version,power.limit
# gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo
"Use GPU
${
CUDA_VISIBLE_DEVICES
}
"
# which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
# TRAIN DISTILL MODEL
echo
"python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 7 --e_bits 1"
python main.py
--mode
distill_target
--model
resnet50
--data
cifar10
--store_ptq
--quant_type
FLOAT
--num_bits
7
--e_bits
1
echo
"python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 7 --e_bits 2"
python main.py
--mode
distill_target
--model
resnet50
--data
cifar10
--store_ptq
--quant_type
FLOAT
--num_bits
7
--e_bits
2
echo
"python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 7 --e_bits 3"
python main.py
--mode
distill_target
--model
resnet50
--data
cifar10
--store_ptq
--quant_type
FLOAT
--num_bits
7
--e_bits
3
echo
"python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 7 --e_bits 4"
python main.py
--mode
distill_target
--model
resnet50
--data
cifar10
--store_ptq
--quant_type
FLOAT
--num_bits
7
--e_bits
4
echo
"python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 7 --e_bits 5"
python main.py
--mode
distill_target
--model
resnet50
--data
cifar10
--store_ptq
--quant_type
FLOAT
--num_bits
7
--e_bits
5
# CONSTRUCT TEST DATASET
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type FLOAT --num_bits 7 --e_bits 1"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet50
--model_distill
resnet50
--data
cifar10
--quant_type
FLOAT
--num_bits
7
--e_bits
1
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type FLOAT --num_bits 7 --e_bits 2"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet50
--model_distill
resnet50
--data
cifar10
--quant_type
FLOAT
--num_bits
7
--e_bits
2
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type FLOAT --num_bits 7 --e_bits 3"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet50
--model_distill
resnet50
--data
cifar10
--quant_type
FLOAT
--num_bits
7
--e_bits
3
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type FLOAT --num_bits 7 --e_bits 4"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet50
--model_distill
resnet50
--data
cifar10
--quant_type
FLOAT
--num_bits
7
--e_bits
4
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type FLOAT --num_bits 7 --e_bits 5"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet50
--model_distill
resnet50
--data
cifar10
--quant_type
FLOAT
--num_bits
7
--e_bits
5
# ATTACK
echo
"python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type FLOAT --num_bits 7 --e_bits 1"
python main.py
--action
1
--mia_type
black-box
--model
resnet50
--model_distill
resnet50
--data
cifar10
--load_attack
--quant_type
FLOAT
--num_bits
7
--e_bits
1
echo
"python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type FLOAT --num_bits 7 --e_bits 2"
python main.py
--action
1
--mia_type
black-box
--model
resnet50
--model_distill
resnet50
--data
cifar10
--load_attack
--quant_type
FLOAT
--num_bits
7
--e_bits
2
echo
"python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type FLOAT --num_bits 7 --e_bits 3"
python main.py
--action
1
--mia_type
black-box
--model
resnet50
--model_distill
resnet50
--data
cifar10
--load_attack
--quant_type
FLOAT
--num_bits
7
--e_bits
3
echo
"python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type FLOAT --num_bits 7 --e_bits 4"
python main.py
--action
1
--mia_type
black-box
--model
resnet50
--model_distill
resnet50
--data
cifar10
--load_attack
--quant_type
FLOAT
--num_bits
7
--e_bits
4
echo
"python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type FLOAT --num_bits 7 --e_bits 5"
python main.py
--action
1
--mia_type
black-box
--model
resnet50
--model_distill
resnet50
--data
cifar10
--load_attack
--quant_type
FLOAT
--num_bits
7
--e_bits
5
#- End
echo
"Job end at
$(
date
"+%Y-%m-%d %H:%M:%S"
)
"
mzh/new_mzh/Loss_Trajectory_MIA/sh/train_attack_50_10_ptq_FP_S4.sh
0 → 100644
View file @
cba2e1d6
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J I_Tra_10_18 # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 1-06:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo
"Job start at
$(
date
"+%Y-%m-%d %H:%M:%S"
)
"
echo
"Job run at:"
echo
"
$(
hostnamectl
)
"
#- Load environments
source
/tools/module_env.sh
source
~/pyt1.5/bin/activate
module list
# list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo
$(
module list
)
# list modules loaded
echo
$(
which gcc
)
echo
$(
which python
)
echo
$(
which python3
)
cluster-quota
# nas quota
nvidia-smi
--format
=
csv
--query-gpu
=
name,driver_version,power.limit
# gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo
"Use GPU
${
CUDA_VISIBLE_DEVICES
}
"
# which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
# TRAIN DISTILL MODEL
echo
"python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 8 --e_bits 1"
python main.py
--mode
distill_target
--model
resnet50
--data
cifar10
--store_ptq
--quant_type
FLOAT
--num_bits
8
--e_bits
1
echo
"python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 8 --e_bits 2"
python main.py
--mode
distill_target
--model
resnet50
--data
cifar10
--store_ptq
--quant_type
FLOAT
--num_bits
8
--e_bits
2
echo
"python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 8 --e_bits 3"
python main.py
--mode
distill_target
--model
resnet50
--data
cifar10
--store_ptq
--quant_type
FLOAT
--num_bits
8
--e_bits
3
echo
"python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 8 --e_bits 4"
python main.py
--mode
distill_target
--model
resnet50
--data
cifar10
--store_ptq
--quant_type
FLOAT
--num_bits
8
--e_bits
4
echo
"python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 8 --e_bits 5"
python main.py
--mode
distill_target
--model
resnet50
--data
cifar10
--store_ptq
--quant_type
FLOAT
--num_bits
8
--e_bits
5
echo
"python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type FLOAT --num_bits 8 --e_bits 6"
python main.py
--mode
distill_target
--model
resnet50
--data
cifar10
--store_ptq
--quant_type
FLOAT
--num_bits
8
--e_bits
6
# CONSTRUCT TEST DATASET
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type FLOAT --num_bits 8 --e_bits 1"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet50
--model_distill
resnet50
--data
cifar10
--quant_type
FLOAT
--num_bits
8
--e_bits
1
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type FLOAT --num_bits 8 --e_bits 2"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet50
--model_distill
resnet50
--data
cifar10
--quant_type
FLOAT
--num_bits
8
--e_bits
2
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type FLOAT --num_bits 8 --e_bits 3"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet50
--model_distill
resnet50
--data
cifar10
--quant_type
FLOAT
--num_bits
8
--e_bits
3
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type FLOAT --num_bits 8 --e_bits 4"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet50
--model_distill
resnet50
--data
cifar10
--quant_type
FLOAT
--num_bits
8
--e_bits
4
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type FLOAT --num_bits 8 --e_bits 5"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet50
--model_distill
resnet50
--data
cifar10
--quant_type
FLOAT
--num_bits
8
--e_bits
5
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type FLOAT --num_bits 8 --e_bits 6"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet50
--model_distill
resnet50
--data
cifar10
--quant_type
FLOAT
--num_bits
8
--e_bits
6
# ATTACK
echo
"python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type FLOAT --num_bits 8 --e_bits 1"
python main.py
--action
1
--mia_type
black-box
--model
resnet50
--model_distill
resnet50
--data
cifar10
--load_attack
--quant_type
FLOAT
--num_bits
8
--e_bits
1
echo
"python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type FLOAT --num_bits 8 --e_bits 2"
python main.py
--action
1
--mia_type
black-box
--model
resnet50
--model_distill
resnet50
--data
cifar10
--load_attack
--quant_type
FLOAT
--num_bits
8
--e_bits
2
echo
"python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type FLOAT --num_bits 8 --e_bits 3"
python main.py
--action
1
--mia_type
black-box
--model
resnet50
--model_distill
resnet50
--data
cifar10
--load_attack
--quant_type
FLOAT
--num_bits
8
--e_bits
3
echo
"python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type FLOAT --num_bits 8 --e_bits 4"
python main.py
--action
1
--mia_type
black-box
--model
resnet50
--model_distill
resnet50
--data
cifar10
--load_attack
--quant_type
FLOAT
--num_bits
8
--e_bits
4
echo
"python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type FLOAT --num_bits 8 --e_bits 5"
python main.py
--action
1
--mia_type
black-box
--model
resnet50
--model_distill
resnet50
--data
cifar10
--load_attack
--quant_type
FLOAT
--num_bits
8
--e_bits
5
echo
"python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type FLOAT --num_bits 8 --e_bits 6"
python main.py
--action
1
--mia_type
black-box
--model
resnet50
--model_distill
resnet50
--data
cifar10
--load_attack
--quant_type
FLOAT
--num_bits
8
--e_bits
6
#- End
echo
"Job end at
$(
date
"+%Y-%m-%d %H:%M:%S"
)
"
mzh/new_mzh/Loss_Trajectory_MIA/sh/train_attack_50_10_ptq_INT_S1.sh
0 → 100644
View file @
cba2e1d6
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J I_Tra_10_18 # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 1-00:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo
"Job start at
$(
date
"+%Y-%m-%d %H:%M:%S"
)
"
echo
"Job run at:"
echo
"
$(
hostnamectl
)
"
#- Load environments
source
/tools/module_env.sh
source
~/pyt1.5/bin/activate
module list
# list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo
$(
module list
)
# list modules loaded
echo
$(
which gcc
)
echo
$(
which python
)
echo
$(
which python3
)
cluster-quota
# nas quota
nvidia-smi
--format
=
csv
--query-gpu
=
name,driver_version,power.limit
# gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo
"Use GPU
${
CUDA_VISIBLE_DEVICES
}
"
# which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
# TRAIN DISTILL MODEL
echo
"python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type INT --num_bits 2"
python main.py
--mode
distill_target
--model
resnet50
--data
cifar10
--store_ptq
--quant_type
INT
--num_bits
2
echo
"python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type INT --num_bits 3"
python main.py
--mode
distill_target
--model
resnet50
--data
cifar10
--store_ptq
--quant_type
INT
--num_bits
3
echo
"python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type INT --num_bits 4"
python main.py
--mode
distill_target
--model
resnet50
--data
cifar10
--store_ptq
--quant_type
INT
--num_bits
4
echo
"python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type INT --num_bits 5"
python main.py
--mode
distill_target
--model
resnet50
--data
cifar10
--store_ptq
--quant_type
INT
--num_bits
5
echo
"python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type INT --num_bits 6"
python main.py
--mode
distill_target
--model
resnet50
--data
cifar10
--store_ptq
--quant_type
INT
--num_bits
6
echo
"python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type INT --num_bits 7"
python main.py
--mode
distill_target
--model
resnet50
--data
cifar10
--store_ptq
--quant_type
INT
--num_bits
7
echo
"python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type INT --num_bits 8"
python main.py
--mode
distill_target
--model
resnet50
--data
cifar10
--store_ptq
--quant_type
INT
--num_bits
8
# # CONSTRUCT TEST DATASET
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type INT --num_bits 2"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet50
--model_distill
resnet50
--data
cifar10
--quant_type
INT
--num_bits
2
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type INT --num_bits 3"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet50
--model_distill
resnet50
--data
cifar10
--quant_type
INT
--num_bits
3
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type INT --num_bits 4"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet50
--model_distill
resnet50
--data
cifar10
--quant_type
INT
--num_bits
4
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type INT --num_bits 5"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet50
--model_distill
resnet50
--data
cifar10
--quant_type
INT
--num_bits
5
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type INT --num_bits 6"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet50
--model_distill
resnet50
--data
cifar10
--quant_type
INT
--num_bits
6
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type INT --num_bits 7"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet50
--model_distill
resnet50
--data
cifar10
--quant_type
INT
--num_bits
7
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type INT --num_bits 8"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet50
--model_distill
resnet50
--data
cifar10
--quant_type
INT
--num_bits
8
# ATTACK
# for test full precision mia result
echo
"python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack"
python main.py
--action
1
--mia_type
black-box
--model
resnet50
--model_distill
resnet50
--data
cifar10
--load_attack
echo
"python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type INT --num_bits 2"
python main.py
--action
1
--mia_type
black-box
--model
resnet50
--model_distill
resnet50
--data
cifar10
--load_attack
--quant_type
INT
--num_bits
2
echo
"python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type INT --num_bits 3"
python main.py
--action
1
--mia_type
black-box
--model
resnet50
--model_distill
resnet50
--data
cifar10
--load_attack
--quant_type
INT
--num_bits
3
echo
"python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type INT --num_bits 4"
python main.py
--action
1
--mia_type
black-box
--model
resnet50
--model_distill
resnet50
--data
cifar10
--load_attack
--quant_type
INT
--num_bits
4
echo
"python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type INT --num_bits 5"
python main.py
--action
1
--mia_type
black-box
--model
resnet50
--model_distill
resnet50
--data
cifar10
--load_attack
--quant_type
INT
--num_bits
5
echo
"python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type INT --num_bits 6"
python main.py
--action
1
--mia_type
black-box
--model
resnet50
--model_distill
resnet50
--data
cifar10
--load_attack
--quant_type
INT
--num_bits
6
echo
"python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type INT --num_bits 7"
python main.py
--action
1
--mia_type
black-box
--model
resnet50
--model_distill
resnet50
--data
cifar10
--load_attack
--quant_type
INT
--num_bits
7
echo
"python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type INT --num_bits 8"
python main.py
--action
1
--mia_type
black-box
--model
resnet50
--model_distill
resnet50
--data
cifar10
--load_attack
--quant_type
INT
--num_bits
8
#- End
echo
"Job end at
$(
date
"+%Y-%m-%d %H:%M:%S"
)
"
mzh/new_mzh/Loss_Trajectory_MIA/sh/train_attack_50_10_ptq_INT_S2.sh
0 → 100644
View file @
cba2e1d6
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J I_Tra_10_18 # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 1-00:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo
"Job start at
$(
date
"+%Y-%m-%d %H:%M:%S"
)
"
echo
"Job run at:"
echo
"
$(
hostnamectl
)
"
#- Load environments
source
/tools/module_env.sh
source
~/pyt1.5/bin/activate
module list
# list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo
$(
module list
)
# list modules loaded
echo
$(
which gcc
)
echo
$(
which python
)
echo
$(
which python3
)
cluster-quota
# nas quota
nvidia-smi
--format
=
csv
--query-gpu
=
name,driver_version,power.limit
# gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo
"Use GPU
${
CUDA_VISIBLE_DEVICES
}
"
# which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
# TRAIN DISTILL MODEL
echo
"python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type INT --num_bits 9"
python main.py
--mode
distill_target
--model
resnet50
--data
cifar10
--store_ptq
--quant_type
INT
--num_bits
9
echo
"python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type INT --num_bits 10"
python main.py
--mode
distill_target
--model
resnet50
--data
cifar10
--store_ptq
--quant_type
INT
--num_bits
10
echo
"python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type INT --num_bits 11"
python main.py
--mode
distill_target
--model
resnet50
--data
cifar10
--store_ptq
--quant_type
INT
--num_bits
11
echo
"python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type INT --num_bits 12"
python main.py
--mode
distill_target
--model
resnet50
--data
cifar10
--store_ptq
--quant_type
INT
--num_bits
12
echo
"python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type INT --num_bits 13"
python main.py
--mode
distill_target
--model
resnet50
--data
cifar10
--store_ptq
--quant_type
INT
--num_bits
13
echo
"python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type INT --num_bits 14"
python main.py
--mode
distill_target
--model
resnet50
--data
cifar10
--store_ptq
--quant_type
INT
--num_bits
14
echo
"python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type INT --num_bits 15"
python main.py
--mode
distill_target
--model
resnet50
--data
cifar10
--store_ptq
--quant_type
INT
--num_bits
15
echo
"python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type INT --num_bits 16"
python main.py
--mode
distill_target
--model
resnet50
--data
cifar10
--store_ptq
--quant_type
INT
--num_bits
16
# # CONSTRUCT TEST DATASET
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type INT --num_bits 9"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet50
--model_distill
resnet50
--data
cifar10
--quant_type
INT
--num_bits
9
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type INT --num_bits 10"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet50
--model_distill
resnet50
--data
cifar10
--quant_type
INT
--num_bits
10
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type INT --num_bits 11"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet50
--model_distill
resnet50
--data
cifar10
--quant_type
INT
--num_bits
11
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type INT --num_bits 12"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet50
--model_distill
resnet50
--data
cifar10
--quant_type
INT
--num_bits
12
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type INT --num_bits 13"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet50
--model_distill
resnet50
--data
cifar10
--quant_type
INT
--num_bits
13
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type INT --num_bits 14"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet50
--model_distill
resnet50
--data
cifar10
--quant_type
INT
--num_bits
14
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type INT --num_bits 15"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet50
--model_distill
resnet50
--data
cifar10
--quant_type
INT
--num_bits
15
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type INT --num_bits 16"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet50
--model_distill
resnet50
--data
cifar10
--quant_type
INT
--num_bits
16
# ATTACK
echo
"python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type INT --num_bits 9"
python main.py
--action
1
--mia_type
black-box
--model
resnet50
--model_distill
resnet50
--data
cifar10
--load_attack
--quant_type
INT
--num_bits
9
echo
"python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type INT --num_bits 10"
python main.py
--action
1
--mia_type
black-box
--model
resnet50
--model_distill
resnet50
--data
cifar10
--load_attack
--quant_type
INT
--num_bits
10
echo
"python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type INT --num_bits 11"
python main.py
--action
1
--mia_type
black-box
--model
resnet50
--model_distill
resnet50
--data
cifar10
--load_attack
--quant_type
INT
--num_bits
11
echo
"python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type INT --num_bits 12"
python main.py
--action
1
--mia_type
black-box
--model
resnet50
--model_distill
resnet50
--data
cifar10
--load_attack
--quant_type
INT
--num_bits
12
echo
"python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type INT --num_bits 13"
python main.py
--action
1
--mia_type
black-box
--model
resnet50
--model_distill
resnet50
--data
cifar10
--load_attack
--quant_type
INT
--num_bits
13
echo
"python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type INT --num_bits 14"
python main.py
--action
1
--mia_type
black-box
--model
resnet50
--model_distill
resnet50
--data
cifar10
--load_attack
--quant_type
INT
--num_bits
14
echo
"python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type INT --num_bits 15"
python main.py
--action
1
--mia_type
black-box
--model
resnet50
--model_distill
resnet50
--data
cifar10
--load_attack
--quant_type
INT
--num_bits
15
echo
"python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type INT --num_bits 16"
python main.py
--action
1
--mia_type
black-box
--model
resnet50
--model_distill
resnet50
--data
cifar10
--load_attack
--quant_type
INT
--num_bits
16
#- End
echo
"Job end at
$(
date
"+%Y-%m-%d %H:%M:%S"
)
"
mzh/new_mzh/Loss_Trajectory_MIA/sh/train_attack_50_10_ptq_POT.sh
0 → 100644
View file @
cba2e1d6
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J I_Tra_10_18 # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 1-06:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo
"Job start at
$(
date
"+%Y-%m-%d %H:%M:%S"
)
"
echo
"Job run at:"
echo
"
$(
hostnamectl
)
"
#- Load environments
source
/tools/module_env.sh
source
~/pyt1.5/bin/activate
module list
# list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo
$(
module list
)
# list modules loaded
echo
$(
which gcc
)
echo
$(
which python
)
echo
$(
which python3
)
cluster-quota
# nas quota
nvidia-smi
--format
=
csv
--query-gpu
=
name,driver_version,power.limit
# gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo
"Use GPU
${
CUDA_VISIBLE_DEVICES
}
"
# which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
# TRAIN DISTILL MODEL
echo
"python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type POT --num_bits 2"
python main.py
--mode
distill_target
--model
resnet50
--data
cifar10
--store_ptq
--quant_type
POT
--num_bits
2
echo
"python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type POT --num_bits 3"
python main.py
--mode
distill_target
--model
resnet50
--data
cifar10
--store_ptq
--quant_type
POT
--num_bits
3
echo
"python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type POT --num_bits 4"
python main.py
--mode
distill_target
--model
resnet50
--data
cifar10
--store_ptq
--quant_type
POT
--num_bits
4
echo
"python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type POT --num_bits 5"
python main.py
--mode
distill_target
--model
resnet50
--data
cifar10
--store_ptq
--quant_type
POT
--num_bits
5
echo
"python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type POT --num_bits 6"
python main.py
--mode
distill_target
--model
resnet50
--data
cifar10
--store_ptq
--quant_type
POT
--num_bits
6
echo
"python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type POT --num_bits 7"
python main.py
--mode
distill_target
--model
resnet50
--data
cifar10
--store_ptq
--quant_type
POT
--num_bits
7
echo
"python main.py --mode distill_target --model resnet50 --data cifar10 --store_ptq --quant_type POT --num_bits 8"
python main.py
--mode
distill_target
--model
resnet50
--data
cifar10
--store_ptq
--quant_type
POT
--num_bits
8
# CONSTRUCT TEST DATASET
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type POT --num_bits 2"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet50
--model_distill
resnet50
--data
cifar10
--quant_type
POT
--num_bits
2
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type POT --num_bits 3"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet50
--model_distill
resnet50
--data
cifar10
--quant_type
POT
--num_bits
3
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type POT --num_bits 4"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet50
--model_distill
resnet50
--data
cifar10
--quant_type
POT
--num_bits
4
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type POT --num_bits 5"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet50
--model_distill
resnet50
--data
cifar10
--quant_type
POT
--num_bits
5
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type POT --num_bits 6"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet50
--model_distill
resnet50
--data
cifar10
--quant_type
POT
--num_bits
6
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type POT --num_bits 7"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet50
--model_distill
resnet50
--data
cifar10
--quant_type
POT
--num_bits
7
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cifar10 --quant_type POT --num_bits 8"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet50
--model_distill
resnet50
--data
cifar10
--quant_type
POT
--num_bits
8
# ATTACK
echo
"python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type POT --num_bits 2"
python main.py
--action
1
--mia_type
black-box
--model
resnet50
--model_distill
resnet50
--data
cifar10
--load_attack
--quant_type
POT
--num_bits
2
echo
"python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type POT --num_bits 3"
python main.py
--action
1
--mia_type
black-box
--model
resnet50
--model_distill
resnet50
--data
cifar10
--load_attack
--quant_type
POT
--num_bits
3
echo
"python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type POT --num_bits 4"
python main.py
--action
1
--mia_type
black-box
--model
resnet50
--model_distill
resnet50
--data
cifar10
--load_attack
--quant_type
POT
--num_bits
4
echo
"python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type POT --num_bits 5"
python main.py
--action
1
--mia_type
black-box
--model
resnet50
--model_distill
resnet50
--data
cifar10
--load_attack
--quant_type
POT
--num_bits
5
echo
"python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type POT --num_bits 6"
python main.py
--action
1
--mia_type
black-box
--model
resnet50
--model_distill
resnet50
--data
cifar10
--load_attack
--quant_type
POT
--num_bits
6
echo
"python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type POT --num_bits 7"
python main.py
--action
1
--mia_type
black-box
--model
resnet50
--model_distill
resnet50
--data
cifar10
--load_attack
--quant_type
POT
--num_bits
7
echo
"python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cifar10 --load_attack --quant_type POT --num_bits 8"
python main.py
--action
1
--mia_type
black-box
--model
resnet50
--model_distill
resnet50
--data
cifar10
--load_attack
--quant_type
POT
--num_bits
8
#- End
echo
"Job end at
$(
date
"+%Y-%m-%d %H:%M:%S"
)
"
mzh/new_mzh/Loss_Trajectory_MIA/sh/train_attack_50_cinic.sh
0 → 100644
View file @
cba2e1d6
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J Tra_CIN_50 # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 1-12:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-long # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo
"Job start at
$(
date
"+%Y-%m-%d %H:%M:%S"
)
"
echo
"Job run at:"
echo
"
$(
hostnamectl
)
"
#- Load environments
source
/tools/module_env.sh
source
~/pyt1.5/bin/activate
module list
# list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo
$(
module list
)
# list modules loaded
echo
$(
which gcc
)
echo
$(
which python
)
echo
$(
which python3
)
cluster-quota
# nas quota
nvidia-smi
--format
=
csv
--query-gpu
=
name,driver_version,power.limit
# gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo
"Use GPU
${
CUDA_VISIBLE_DEVICES
}
"
# which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
echo
"python main.py --mode target --model resnet50 --data cinic10"
python main.py
--mode
target
--model
resnet50
--data
cinic10
echo
"python main.py --mode shadow --model resnet50 --data cinic10"
python main.py
--mode
shadow
--model
resnet50
--data
cinic10
echo
"python main.py --mode distill_target --model resnet50 --data cinic10"
python main.py
--mode
distill_target
--model
resnet50
--data
cinic10
echo
"python main.py --mode distill_shadow --model resnet50 --data cinic10"
python main.py
--mode
distill_shadow
--model
resnet50
--data
cinic10
echo
"python main.py --action 1 --mode shadow --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cinic10"
python main.py
--action
1
--mode
shadow
--mia_type
build-dataset
--model
resnet50
--model_distill
resnet50
--data
cinic10
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model resnet50 --model_distill resnet50 --data cinic10"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
resnet50
--model_distill
resnet50
--data
cinic10
echo
"python main.py --action 1 --mia_type black-box --model resnet50 --model_distill resnet50 --data cinic10"
python main.py
--action
1
--mia_type
black-box
--model
resnet50
--model_distill
resnet50
--data
cinic10
#- End
echo
"Job end at
$(
date
"+%Y-%m-%d %H:%M:%S"
)
"
mzh/new_mzh/Loss_Trajectory_MIA/sh/train_attack_mobile_10.sh
0 → 100644
View file @
cba2e1d6
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J Tra_Mobile # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 0-12:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo
"Job start at
$(
date
"+%Y-%m-%d %H:%M:%S"
)
"
echo
"Job run at:"
echo
"
$(
hostnamectl
)
"
#- Load environments
source
/tools/module_env.sh
source
~/pyt1.5/bin/activate
module list
# list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo
$(
module list
)
# list modules loaded
echo
$(
which gcc
)
echo
$(
which python
)
echo
$(
which python3
)
cluster-quota
# nas quota
nvidia-smi
--format
=
csv
--query-gpu
=
name,driver_version,power.limit
# gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo
"Use GPU
${
CUDA_VISIBLE_DEVICES
}
"
# which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
echo
"python main.py --mode target --model mobilenetv2"
python main.py
--mode
target
--model
mobilenetv2
echo
"python main.py --mode shadow --model mobilenetv2"
python main.py
--mode
shadow
--model
mobilenetv2
echo
"python main.py --mode distill_target --model mobilenetv2"
python main.py
--mode
distill_target
--model
mobilenetv2
echo
"python main.py --mode distill_shadow --model mobilenetv2"
python main.py
--mode
distill_shadow
--model
mobilenetv2
echo
"python main.py --action 1 --mode shadow --mia_type build-dataset --model mobilenetv2 --model_distill mobilenetv2 "
python main.py
--action
1
--mode
shadow
--mia_type
build-dataset
--model
mobilenetv2
--model_distill
mobilenetv2
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model mobilenetv2 --model_distill mobilenetv2"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
mobilenetv2
--model_distill
mobilenetv2
echo
"python main.py --action 1 --mia_type black-box --model mobilenetv2 --model_distill mobilenetv2"
python main.py
--action
1
--mia_type
black-box
--model
mobilenetv2
--model_distill
mobilenetv2
#- End
echo
"Job end at
$(
date
"+%Y-%m-%d %H:%M:%S"
)
"
mzh/new_mzh/Loss_Trajectory_MIA/sh/train_attack_mobile_100.sh
0 → 100644
View file @
cba2e1d6
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J Tra_100_Mobile # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 0-12:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo
"Job start at
$(
date
"+%Y-%m-%d %H:%M:%S"
)
"
echo
"Job run at:"
echo
"
$(
hostnamectl
)
"
#- Load environments
source
/tools/module_env.sh
source
~/pyt1.5/bin/activate
module list
# list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo
$(
module list
)
# list modules loaded
echo
$(
which gcc
)
echo
$(
which python
)
echo
$(
which python3
)
cluster-quota
# nas quota
nvidia-smi
--format
=
csv
--query-gpu
=
name,driver_version,power.limit
# gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo
"Use GPU
${
CUDA_VISIBLE_DEVICES
}
"
# which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
echo
"python main.py --mode target --model mobilenetv2 --data cifar100"
python main.py
--mode
target
--model
mobilenetv2
--data
cifar100
echo
"python main.py --mode shadow --model mobilenetv2 --data cifar100"
python main.py
--mode
shadow
--model
mobilenetv2
--data
cifar100
echo
"python main.py --mode distill_target --model mobilenetv2 --data cifar100"
python main.py
--mode
distill_target
--model
mobilenetv2
--data
cifar100
echo
"python main.py --mode distill_shadow --model mobilenetv2 --data cifar100"
python main.py
--mode
distill_shadow
--model
mobilenetv2
--data
cifar100
echo
"python main.py --action 1 --mode shadow --mia_type build-dataset --model mobilenetv2 --data cifar100 --model_distill mobilenetv2"
python main.py
--action
1
--mode
shadow
--mia_type
build-dataset
--model
mobilenetv2
--model_distill
mobilenetv2
--data
cifar100
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model mobilenetv2 --data cifar100 --model_distill mobilenetv2"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
mobilenetv2
--model_distill
mobilenetv2
--data
cifar100
echo
"python main.py --action 1 --mia_type black-box --model mobilenetv2 --model_distill mobilenetv2 --data cifar100"
python main.py
--action
1
--mia_type
black-box
--model
mobilenetv2
--model_distill
mobilenetv2
--data
cifar100
#- End
echo
"Job end at
$(
date
"+%Y-%m-%d %H:%M:%S"
)
"
mzh/new_mzh/Loss_Trajectory_MIA/sh/train_attack_mobile_cinic.sh
0 → 100644
View file @
cba2e1d6
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J Tra_CIN_Mobile # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 3-00:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-long # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo
"Job start at
$(
date
"+%Y-%m-%d %H:%M:%S"
)
"
echo
"Job run at:"
echo
"
$(
hostnamectl
)
"
#- Load environments
source
/tools/module_env.sh
source
~/pyt1.5/bin/activate
module list
# list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo
$(
module list
)
# list modules loaded
echo
$(
which gcc
)
echo
$(
which python
)
echo
$(
which python3
)
cluster-quota
# nas quota
nvidia-smi
--format
=
csv
--query-gpu
=
name,driver_version,power.limit
# gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo
"Use GPU
${
CUDA_VISIBLE_DEVICES
}
"
# which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
echo
"python main.py --mode target --model mobilenetv2 --data cinic10"
python main.py
--mode
target
--model
mobilenetv2
--data
cinic10
echo
"python main.py --mode shadow --model mobilenetv2 --data cinic10"
python main.py
--mode
shadow
--model
mobilenetv2
--data
cinic10
echo
"python main.py --mode distill_target --model mobilenetv2 --data cinic10"
python main.py
--mode
distill_target
--model
mobilenetv2
--data
cinic10
echo
"python main.py --mode distill_shadow --model mobilenetv2 --data cinic10"
python main.py
--mode
distill_shadow
--model
mobilenetv2
--data
cinic10
echo
"python main.py --action 1 --mode shadow --mia_type build-dataset --model mobilenetv2 --data cinic10 --model_distill mobilenetv2"
python main.py
--action
1
--mode
shadow
--mia_type
build-dataset
--model
mobilenetv2
--model_distill
mobilenetv2
--data
cinic10
echo
"python main.py --action 1 --mode target --mia_type build-dataset --model mobilenetv2 --data cinic10 --model_distill mobilenetv2"
python main.py
--action
1
--mode
target
--mia_type
build-dataset
--model
mobilenetv2
--model_distill
mobilenetv2
--data
cinic10
echo
"python main.py --action 1 --mia_type black-box --model mobilenetv2 --model_distill mobilenetv2 --data cinic10"
python main.py
--action
1
--mia_type
black-box
--model
mobilenetv2
--model_distill
mobilenetv2
--data
cinic10
#- End
echo
"Job end at
$(
date
"+%Y-%m-%d %H:%M:%S"
)
"
mzh/new_mzh/Loss_Trajectory_MIA/sh/train_div.sh
0 → 100644
View file @
cba2e1d6
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J JS_DIV # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 0-12:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo
"Job start at
$(
date
"+%Y-%m-%d %H:%M:%S"
)
"
echo
"Job run at:"
echo
"
$(
hostnamectl
)
"
#- Load environments
source
/tools/module_env.sh
source
~/pyt1.5/bin/activate
module list
# list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo
$(
module list
)
# list modules loaded
echo
$(
which gcc
)
echo
$(
which python
)
echo
$(
which python3
)
cluster-quota
# nas quota
nvidia-smi
--format
=
csv
--query-gpu
=
name,driver_version,power.limit
# gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo
"Use GPU
${
CUDA_VISIBLE_DEVICES
}
"
# which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
echo
"python div.py --model resnet18 --data cifar10"
python div.py
--model
resnet18
--data
cifar10
#- End
echo
"Job end at
$(
date
"+%Y-%m-%d %H:%M:%S"
)
"
mzh/new_mzh/Loss_Trajectory_MIA/sh/train_plot.sh
0 → 100644
View file @
cba2e1d6
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J PLOT # The job name
#SBATCH -o ./info/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ./info/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 0-12:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo
"Job start at
$(
date
"+%Y-%m-%d %H:%M:%S"
)
"
echo
"Job run at:"
echo
"
$(
hostnamectl
)
"
#- Load environments
source
/tools/module_env.sh
source
~/pyt1.5/bin/activate
module list
# list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/11.1-8.1.1
##- virtualenv
# source xxxxx/activate
echo
$(
module list
)
# list modules loaded
echo
$(
which gcc
)
echo
$(
which python
)
echo
$(
which python3
)
cluster-quota
# nas quota
nvidia-smi
--format
=
csv
--query-gpu
=
name,driver_version,power.limit
# gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo
"Use GPU
${
CUDA_VISIBLE_DEVICES
}
"
# which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
sleep 2s
hostname
echo
"python plot.py --model resnet50 --model_distill resnet50 --data cifar100"
python plot.py
--model
resnet50
--model_distill
resnet50
--data
cifar100
#- End
echo
"Job end at
$(
date
"+%Y-%m-%d %H:%M:%S"
)
"
mzh/new_mzh/Loss_Trajectory_MIA/utils.py
View file @
cba2e1d6
...
...
@@ -188,22 +188,33 @@ def cnn_train(args, model, data, epochs, optimizer, scheduler, model_params, mod
# 做model distill的每一步具体训练 (在对trained Target/Shadow Model通过KL散度做distill)
def
cnn_training_step_dis
(
model
,
model_dis
,
optimizer
,
data
,
labels
,
device
=
'cpu'
):
def
cnn_training_step_dis
(
args
,
model
,
model_dis
,
optimizer
,
data
,
labels
,
device
=
'cpu'
):
b_x
=
data
.
to
(
device
)
# 不会用label的
b_y_1
=
labels
.
to
(
device
)
output
=
model_dis
(
b_x
)
# distill model和 target/shadow model 做蒸馏 学习 loss对比的不再是标签,而是output (target/shadow model的output应该是采用的trained model的吧,optimizer不会对其参数更新,只会更新distill model的权值参数)
if
args
.
quant_type
is
None
:
b_y
=
model
(
b_x
)
else
:
b_y
=
model
.
quantize_inference
(
b_x
)
loss
=
nn
.
KLDivLoss
(
reduction
=
'batchmean'
)(
F
.
log_softmax
(
output
,
dim
=
1
),
F
.
softmax
(
b_y
,
dim
=
1
))
optimizer
.
zero_grad
()
loss
.
backward
()
optimizer
.
step
()
# 对distill model的训练
def
cnn_train_dis
(
args
,
model
,
model_dis
,
data
,
epochs
,
optimizer
,
scheduler
,
model_params
,
model_path
,
trained_model_name
,
device
=
'cpu'
):
metrics
=
{
'epoch_times'
:[],
'test_top1_acc'
:[],
'test_top5_acc'
:[],
'train_top1_acc'
:[],
'train_top5_acc'
:[],
'lrs'
:[]}
if
args
.
quant_type
is
not
None
:
if
args
.
quant_type
==
'FLOAT'
:
title
=
'
%
s_
%
d_E
%
d'
%
(
args
.
quant_type
,
args
.
num_bits
,
args
.
e_bits
)
else
:
title
=
'
%
s_
%
d'
%
(
args
.
quant_type
,
args
.
num_bits
)
for
epoch
in
range
(
1
,
epochs
+
1
):
cur_lr
=
get_lr
(
optimizer
)
...
...
@@ -225,7 +236,7 @@ def cnn_train_dis(args, model, model_dis, data, epochs, optimizer, scheduler, mo
print
(
'Epoch: {}/{}'
.
format
(
epoch
,
epochs
))
print
(
'Cur lr: {}'
.
format
(
cur_lr
))
for
i
,
(
x
,
y
,
idx
)
in
enumerate
(
train_loader
):
cnn_training_step_dis
(
model
,
model_dis
,
optimizer
,
x
,
y
,
device
)
cnn_training_step_dis
(
args
,
model
,
model_dis
,
optimizer
,
x
,
y
,
device
)
end_time
=
time
.
time
()
top1_test
,
top5_test
=
cnn_test
(
model_dis
,
test_loader
,
device
)
...
...
@@ -255,7 +266,13 @@ def cnn_train_dis(args, model, model_dis, data, epochs, optimizer, scheduler, mo
total_training_time
=
sum
(
model_params
[
'epoch_times'
])
model_params
[
'total_time'
]
=
total_training_time
print
(
'Training took {} seconds...'
.
format
(
total_training_time
))
# TODO 这里要改 需要把Full precision / 各种ptq 对应的Distill Target Model的各个epoch的数据存储到相应的文件夹中
if
args
.
quant_type
is
None
:
save_model
(
model_dis
,
model_params
,
model_path
,
trained_model_name
,
epoch
=
epoch
)
else
:
trained_model_name_ptq
=
trained_model_name
+
'_'
+
title
save_model
(
model_dis
,
model_params
,
model_path
,
trained_model_name_ptq
,
epoch
=
epoch
)
return
metrics
...
...
@@ -296,3 +313,184 @@ def get_full_optimizer(model, lr_params, args):
scheduler
=
CosineAnnealingLR
(
optimizer
,
args
.
epochs
)
return
optimizer
,
scheduler
# origin my utils
def
js_div
(
p_output
,
q_output
,
get_softmax
=
True
):
"""
Function that measures JS divergence between target and output logits:
"""
KLDivLoss
=
nn
.
KLDivLoss
(
reduction
=
'sum'
)
if
get_softmax
:
p_output
=
F
.
softmax
(
p_output
)
q_output
=
F
.
softmax
(
q_output
)
log_mean_output
=
((
p_output
+
q_output
)
/
2
)
.
log
()
return
(
KLDivLoss
(
log_mean_output
,
p_output
)
+
KLDivLoss
(
log_mean_output
,
q_output
))
/
2
def
kl_div
(
p_output
,
q_output
):
"""
Function that measures KL divergence between target and output logits:
"""
return
F
.
kl_div
(
F
.
log_softmax
(
p_output
,
dim
=
1
),
F
.
softmax
(
q_output
,
dim
=
1
))
def
ebit_list
(
quant_type
,
num_bits
):
if
quant_type
==
'FLOAT'
:
e_bit_list
=
list
(
range
(
1
,
num_bits
-
1
))
else
:
e_bit_list
=
[
0
]
return
e_bit_list
def
numbit_list
(
quant_type
):
if
quant_type
==
'INT'
:
num_bit_list
=
list
(
range
(
2
,
17
))
elif
quant_type
==
'POT'
:
num_bit_list
=
list
(
range
(
2
,
9
))
else
:
num_bit_list
=
list
(
range
(
2
,
9
))
# num_bit_list = [8]
return
num_bit_list
def
build_bias_list
(
quant_type
):
if
quant_type
==
'POT'
:
return
build_pot_list
(
8
)
#
else
:
return
build_float_list
(
16
,
7
)
def
build_list
(
quant_type
,
num_bits
,
e_bits
):
if
quant_type
==
'POT'
:
return
build_pot_list
(
num_bits
)
else
:
return
build_float_list
(
num_bits
,
e_bits
)
def
build_pot_list
(
num_bits
):
plist
=
[
0.
]
for
i
in
range
(
-
2
**
(
num_bits
-
1
)
+
2
,
1
):
# i最高到0,即pot量化最大值为1
plist
.
append
(
2.
**
i
)
plist
.
append
(
-
2.
**
i
)
plist
=
torch
.
Tensor
(
list
(
set
(
plist
)))
# plist = plist.mul(1.0 / torch.max(plist))
return
plist
def
build_float_list
(
num_bits
,
e_bits
):
m_bits
=
num_bits
-
1
-
e_bits
plist
=
[
0.
]
# 相邻尾数的差值
dist_m
=
2
**
(
-
m_bits
)
e
=
-
2
**
(
e_bits
-
1
)
+
1
for
m
in
range
(
1
,
2
**
m_bits
):
frac
=
m
*
dist_m
# 尾数部分
expo
=
2
**
e
# 指数部分
flt
=
frac
*
expo
plist
.
append
(
flt
)
plist
.
append
(
-
flt
)
for
e
in
range
(
-
2
**
(
e_bits
-
1
)
+
2
,
2
**
(
e_bits
-
1
)
+
1
):
expo
=
2
**
e
for
m
in
range
(
0
,
2
**
m_bits
):
frac
=
1.
+
m
*
dist_m
flt
=
frac
*
expo
plist
.
append
(
flt
)
plist
.
append
(
-
flt
)
plist
=
torch
.
Tensor
(
list
(
set
(
plist
)))
return
plist
def
fold_ratio
(
layer
,
par_ratio
,
flop_ratio
):
idx
=
-
1
for
name
in
layer
:
idx
=
idx
+
1
# layer是for name, param in model.named_parameters()中提取出来的,一定是有downsample的
if
'bn'
in
name
or
'sample.1'
in
name
:
par_ratio
[
idx
-
1
]
+=
par_ratio
[
idx
]
flop_ratio
[
idx
-
1
]
+=
flop_ratio
[
idx
]
return
par_ratio
,
flop_ratio
def
fold_model
(
model
):
idx
=
-
1
module_list
=
[]
# print('fold model:')
for
name
,
module
in
model
.
named_modules
():
# print(name+'-- +')
idx
+=
1
module_list
.
append
(
module
)
# 这里之前忘记考虑downsampl里的conv了,导致少融合了一些
if
'bn'
in
name
or
'sample.1'
in
name
:
# print(name+'-- *')
module_list
[
idx
-
1
]
=
fold_bn
(
module_list
[
idx
-
1
],
module
)
# 在这里修改了
return
model
# def fold_model(model):
# last_conv = None
# last_bn = None
# for name, module in model.named_modules():
# if isinstance(module, nn.Conv2d):
# # 如果当前模块是卷积层,则将其 "fold" 到上一个 BN 层中
# if last_bn is not None:
# last_conv = fold_bn(last_conv, last_bn)
# last_bn = None
# last_conv = module
# elif isinstance(module, nn.BatchNorm2d):
# # 如果当前模块是 BN 层,则将其 "fold" 到上一个卷积层中
# last_bn = module
# if last_conv is not None:
# last_conv = fold_bn(last_conv, last_bn)
# last_bn = None
# # 处理最后一个 BN 层
# if last_bn is not None:
# last_conv = fold_bn(last_conv, last_bn)
# return model
def
fold_bn
(
conv
,
bn
):
# 获取 BN 层的参数
gamma
=
bn
.
weight
.
data
beta
=
bn
.
bias
.
data
mean
=
bn
.
running_mean
var
=
bn
.
running_var
eps
=
bn
.
eps
std
=
torch
.
sqrt
(
var
+
eps
)
feat
=
bn
.
num_features
# 获取卷积层的参数
weight
=
conv
.
weight
.
data
if
conv
.
bias
is
not
None
:
bias
=
conv
.
bias
.
data
if
bn
.
affine
:
gamma_
=
gamma
/
std
weight
=
weight
*
gamma_
.
view
(
feat
,
1
,
1
,
1
)
if
conv
.
bias
is
not
None
:
bias
=
gamma_
*
bias
-
gamma_
*
mean
+
beta
else
:
bias
=
beta
-
gamma_
*
mean
else
:
gamma_
=
1
/
std
weight
=
weight
*
gamma_
if
conv
.
bias
is
not
None
:
bias
=
gamma_
*
bias
-
gamma_
*
mean
else
:
bias
=
-
gamma_
*
mean
# 设置新的 weight 和 bias
conv
.
weight
.
data
=
weight
# 适用于bias=none的
if
conv
.
bias
is
None
:
conv
.
bias
=
nn
.
Parameter
(
bias
)
else
:
conv
.
bias
.
data
=
bias
return
conv
\ No newline at end of file
Write
Preview
Markdown
is supported
0%
Try again
or
attach a new file
Attach a file
Cancel
You are about to add
0
people
to the discussion. Proceed with caution.
Finish editing this message first!
Cancel
Please
register
or
sign in
to comment