Commit 625093df by Zhihong Ma

feat: Second Idea for Model Transfer Safety

parent f627ea54
import torch
import torch.nn as nn
import torch.nn.functional as F
import numpy as np
import tqdm
import mia_utils
import utils
import normal
import dataset as DATA
from typing import TYPE_CHECKING, Callable, List, Optional, Tuple, Union
from sklearn import metrics
import openpyxl
import gol
import sys
from torch.optim.lr_scheduler import _LRScheduler, CosineAnnealingLR, MultiStepLR
class MLP_BLACKBOX(nn.Module):
def __init__(self, dim_in):
super(MLP_BLACKBOX, self).__init__()
self.dim_in = dim_in
# self.nh = 128
# self.lstm1 = nn.LSTM(self.dim_in, self.nh, bidirectional=True)
# self.fc1 = nn.Linear(self.nh * 2, 512)
self.fc1 = nn.Linear(self.dim_in,512)
self.fc2 = nn.Linear(512, 128)
self.fc3 = nn.Linear(128, 32)
self.fc4 = nn.Linear(32, 2)
def forward(self, x):
x = x.view(-1, self.dim_in)
x = F.relu(self.fc1(x))
x = F.relu(self.fc2(x))
x = F.relu(self.fc3(x))
# x = F.softmax(self.fc4(x), dim=1)
x = self.fc4(x)
return x
def train_mia_attack_model(args, epoch, model, attack_train_loader, optimizer, loss_fn, device):
model.train()
train_loss = 0
correct = 0
for batch_idx, (model_loss_ori, model_trajectory, orginal_labels, predicted_labels, predicted_status, member_status) in enumerate(attack_train_loader):
# 拼接trajectory 和 最终的loss 作为input
input = torch.cat((model_trajectory, model_loss_ori.unsqueeze(1)),1)
input = input.to(device)
output = model(input)
# member_status被当成了label? 但其明明不是这个含义?
member_status = member_status.to(device)
# cross entropy
loss = loss_fn(output, member_status)
optimizer.zero_grad()
loss.backward()
optimizer.step()
train_loss += loss.item()
# 取得是idx,相当于label代表的类
pred = output.max(1, keepdim=True)[1]
# 用member_status来去和pred做eq : 因为是in/out 正好匹配
correct += pred.eq(member_status.view_as(pred)).sum().item()
train_loss /= len(attack_train_loader.dataset)
accuracy = correct / len(attack_train_loader.dataset)
return train_loss, accuracy
def test_mia_attack_model(args, epoch, model, attack_test_loader, loss_fn, max_auc, max_acc, device):
model.eval()
test_loss = 0
correct = 0
auc_ground_truth = None
auc_pred = None
with torch.no_grad():
for batch_idx, (model_loss_ori, model_trajectory, orginal_labels, predicted_labels, predicted_status, member_status) in enumerate(attack_test_loader):
input = torch.cat((model_trajectory, model_loss_ori.unsqueeze(1)),1)
input = input.to(device)
output = model(input)
member_status = member_status.to(device)
test_loss += loss_fn(output, member_status).item()
pred0, pred1 = output.max(1, keepdim=True) # idx
correct += pred1.eq(member_status.view_as(pred1)).sum().item()
# output 概率向量
auc_pred_current = output[:, -1]
auc_ground_truth = member_status.cpu().numpy() if batch_idx == 0 else np.concatenate((auc_ground_truth, member_status.cpu().numpy()), axis=0)
auc_pred = auc_pred_current.cpu().numpy() if batch_idx == 0 else np.concatenate((auc_pred, auc_pred_current.cpu().numpy()), axis=0)
test_loss /= len(attack_test_loader.dataset)
accuracy = correct / len(attack_test_loader.dataset)
fpr, tpr, thresholds = metrics.roc_curve(auc_ground_truth, auc_pred, pos_label=1)
auc = metrics.auc(fpr, tpr)
# max_auc的记录是随着max_acc变化,两者不一定同时取max,但此处就是这样简单的定义了max_auc
if accuracy > max_acc:
max_acc = accuracy
max_auc = auc
return test_loss, accuracy, auc, max_auc, max_acc
def check_and_transform_label_format(
labels: np.ndarray, nb_classes: Optional[int] = None, return_one_hot: bool = True
) -> np.ndarray:
"""
Check label format and transform to one-hot-encoded labels if necessary
:param labels: An array of integer labels of shape `(nb_samples,)`, `(nb_samples, 1)` or `(nb_samples, nb_classes)`.
:param nb_classes: The number of classes.
:param return_one_hot: True if returning one-hot encoded labels, False if returning index labels.
:return: Labels with shape `(nb_samples, nb_classes)` (one-hot) or `(nb_samples,)` (index).
"""
if labels is not None:
if len(labels.shape) == 2 and labels.shape[1] > 1:
if not return_one_hot:
labels = np.argmax(labels, axis=1)
elif len(labels.shape) == 2 and labels.shape[1] == 1 and nb_classes is not None and nb_classes > 2:
labels = np.squeeze(labels)
if return_one_hot:
labels = to_categorical(labels, nb_classes)
elif len(labels.shape) == 2 and labels.shape[1] == 1 and nb_classes is not None and nb_classes == 2:
pass
elif len(labels.shape) == 1:
if return_one_hot:
if nb_classes == 2:
labels = np.expand_dims(labels, axis=1)
else:
labels = to_categorical(labels, nb_classes)
else:
raise ValueError(
"Shape of labels not recognised."
"Please provide labels in shape (nb_samples,) or (nb_samples, nb_classes)"
)
return labels
def to_categorical(labels: Union[np.ndarray, List[float]], nb_classes: Optional[int] = None) -> np.ndarray:
"""
Convert an array of labels to binary class matrix.
:param labels: An array of integer labels of shape `(nb_samples,)`.
:param nb_classes: The number of classes (possible labels).
:return: A binary matrix representation of `y` in the shape `(nb_samples, nb_classes)`.
exp:
labels = [0, 1, 2, 0, 2, 1]
=>
array([[1., 0., 0.],
[0., 1., 0.],
[0., 0., 1.],
[1., 0., 0.],
[0., 0., 1.],
[0., 1., 0.]], dtype=float32)
"""
labels = np.array(labels, dtype=np.int32)
if nb_classes is None:
nb_classes = np.max(labels) + 1
categorical = np.zeros((labels.shape[0], nb_classes), dtype=np.float32)
categorical[np.arange(labels.shape[0]), np.squeeze(labels)] = 1
return categorical
def build_trajectory_membership_dataset(args, ori_model_path, device='cpu'):
# 用数据集 + 结构名作为model_name
model_name = f'{args.data}_{args.model}'
if args.quant_type is not None:
if args.quant_type == 'FLOAT':
title = '%s_%d_E%d' % (args.quant_type, args.num_bits, args.e_bits)
else:
title = '%s_%d' % (args.quant_type, args.num_bits)
# 在这里设置一下各种表
gol._init()
if args.quant_type != 'INT':
bias_list = utils.build_bias_list(args.quant_type)
gol.set_value(bias_list, is_bias=True)
if args.quant_type != 'INT':
plist = utils.build_list(args.quant_type, args.num_bits, args.e_bits)
gol.set_value(plist)
# load这些非distill model只是为了获得最后一个loss
# 对于量化后的 这里应该要改吧,需要load量化后的模型来做推理
if args.mode == 'target':
cnn_model, cnn_params = normal.load_model(args, ori_model_path+'/target', model_name, epoch=args.epochs)
print("Successfully load Target Model")
elif args.mode == 'shadow':
cnn_model, cnn_params = normal.load_model(args, ori_model_path+'/shadow', model_name, epoch=args.epochs)
print("Successfully load Shadow Model")
else:
print("Error: invalid mode in build_trajectory_membership_dataset")
if args.quant_type is None:
MODEL = cnn_model.to(device)
else:
ptq_file_prefix = 'mia_ckpt/{}/{}'.format(args.seed, args.mode) + '/' + f'{args.data}_{args.model}/'
cnn_model.quantize(args.quant_type,args.num_bits,args.e_bits)
cnn_model.load_state_dict(torch.load(ptq_file_prefix + title + '.pt'))
MODEL = cnn_model.to(device)
MODEL.freeze()
print('Successfully load ptq model: ' + title)
print('pt file path:' + ptq_file_prefix + title + '.pt')
# params['task']中记录用什么数据集
# 尝试调小batch size看看resnet50/152是否还会爆显存
# 这里调小了batchsize=128会导致ResNet_18的attack acc相比于batchsize=512时下降2%
dataset = mia_utils.get_dataset(cnn_params['task'], mode=args.mode, aug=True, batch_size=512)
if args.mode == 'target':
print('load target_dataset ... ')
train_loader = dataset.aug_target_train_loader
# test set不再做data augmentation
# test_loader = dataset.aug_target_test_loader
test_loader = dataset.target_test_loader
elif args.mode == 'shadow':
print('load shadow_dataset ... ')
train_loader = dataset.aug_shadow_train_loader
# test_loader = dataset.aug_shadow_test_loader
test_loader = dataset.shadow_test_loader
model_top1 = None
model_loss = None
orginal_labels = None
predicted_labels = None
predicted_status = None
member_status = None
def normalization(data):
_range = np.max(data) - np.min(data)
return (data - np.min(data)) / _range
MODEL.eval()
# TODO 加上val loader? 但val_loader在这里应该是与test_loader合并,因为都是非训练数据
for loader_idx, data_loader in enumerate([train_loader, test_loader]):
top1 = DATA.AverageMeter()
#ori_idx 是一个形状为 [batch_size] 的张量,表示一个批次中每个样本在原始数据集中的索引(即该样本在整个数据集中的编号)
for data_idx, (data, target, ori_idx) in enumerate(data_loader):
# distill model的output与label得到的loss trajectory 按一列列的形式组织为一个tensor
batch_trajectory = get_trajectory(data, target, args, ori_model_path, device)
data, target = data.to(device), target.to(device)
if args.quant_type is None:
batch_logit_target = MODEL(data)
else:
batch_logit_target = MODEL.quantize_inference(data)
# 最大值的位置 (predict label) 相当于是哪类
_, batch_predict_label = batch_logit_target.max(1)
batch_predicted_label = batch_predict_label.long().cpu().detach().numpy()
batch_original_label = target.long().cpu().detach().numpy()
# 相当于最后一次loss,可以拼接到loss trajectory末尾
batch_loss_target = [F.cross_entropy(batch_logit_target_i.unsqueeze(0), target_i.unsqueeze(0)) for (batch_logit_target_i, target_i) in zip(batch_logit_target, target)]
batch_loss_target = np.array([batch_loss_target_i.cpu().detach().numpy() for batch_loss_target_i in batch_loss_target])
# (batch_size,) 布尔张量,其中第i个元素表示模型对第i个样本预测的最大值是否与真实标签相等
batch_predicted_status = (torch.argmax(batch_logit_target, dim=1) == target).float().cpu().detach().numpy()
# (batch_size) => (batch_size,1) 方便后续concat
batch_predicted_status = np.expand_dims(batch_predicted_status, axis=1)
# in / out label for attack model
member = np.repeat(np.array(int(1 - loader_idx)), batch_trajectory.shape[0], 0)
batch_loss_ori = batch_loss_target
# 直接赋值或concat
model_loss_ori = batch_loss_ori if loader_idx == 0 and data_idx == 0 else np.concatenate((model_loss_ori, batch_loss_ori), axis=0)
model_trajectory = batch_trajectory if loader_idx == 0 and data_idx == 0 else np.concatenate((model_trajectory, batch_trajectory), axis=0)
original_labels = batch_original_label if loader_idx == 0 and data_idx == 0 else np.concatenate((original_labels, batch_original_label), axis=0)
predicted_labels = batch_predicted_label if loader_idx == 0 and data_idx == 0 else np.concatenate((predicted_labels, batch_predicted_label), axis=0)
predicted_status = batch_predicted_status if loader_idx == 0 and data_idx == 0 else np.concatenate((predicted_status, batch_predicted_status), axis=0)
member_status = member if loader_idx == 0 and data_idx == 0 else np.concatenate((member_status, member), axis=0)
print(f'------------Loading trajectory {args.mode} dataset successfully!---------')
data = {
'model_loss_ori':model_loss_ori,
'model_trajectory':model_trajectory,
'original_labels':original_labels,
'predicted_labels':predicted_labels,
'predicted_status':predicted_status,
# 标记in/out training dataset
'member_status':member_status,
'nb_classes':dataset.num_classes
}
# distill target model和distill shadow model的分别作为trajector_train_data, trajectory_test_data保存
# TODO 把(ptq) distill target model的存储路径进行修改
dataset_type = 'trajectory_train_data' if args.mode == 'shadow' else 'trajectory_test_data'
mia_utils.create_path(ori_model_path + f'/{args.mode}/{model_name}')
if args.quant_type is not None:
model_name_ptq = model_name + '_' + title
mia_utils.create_path(ori_model_path + f'/{args.mode}/{model_name_ptq}')
np.save(ori_model_path + f'/{args.mode}/{model_name_ptq}/{dataset_type}', data)
else:
np.save(ori_model_path + f'/{args.mode}/{model_name}/{dataset_type}', data)
def trajectory_black_box_membership_inference_attack(args, models_path, device='cpu'):
if args.quant_type is not None:
if args.quant_type == 'FLOAT':
title = '%s_%d_E%d' % (args.quant_type, args.num_bits, args.e_bits)
else:
title = '%s_%d' % (args.quant_type, args.num_bits)
model_name = f'{args.data}_{args.model}'
model_distill_name = f'{args.data}_{args.model_distill}'
if args.quant_type is None:
print(f"MODEL NAME IS :{model_name}")
print(f"MODEL DISTILL NAME IS :{model_distill_name}")
else:
print(f"MODEL NAME IS :{model_name}_{title}")
print(f"MODEL DISTILL NAME IS :{model_distill_name}_{title}")
cnn = model_name
# print(f'------------------model: {model_name}-------------------')
orgin_model_name = model_name
save_path = models_path + '/attack/' + model_name
# 量化后 对attack model 重新训练
if args.quant_type is not None:
model_name_ptq = model_name + '_' +title
save_path = models_path + '/attack/' + model_name_ptq
mia_utils.create_path(save_path)
best_prec1 = 0.0
best_auc = 0.0
epoch = 0
# 这里如果要加val set,就直接把trainset按8:2划分
# AttackModelTrainSet = np.load(models_path + f'/shadow/{model_name}/trajectory_train_data.npy', allow_pickle=True).item()
if args.quant_type is None:
AttackModelTrainSet = np.load(models_path + f'/shadow/{model_name}/trajectory_train_data.npy', allow_pickle=True).item()
else:
AttackModelTrainSet = np.load(models_path + f'/shadow/{model_name_ptq}/trajectory_train_data.npy', allow_pickle=True).item()
# 支持load (ptq) distill target model loss trajectory
if args.quant_type is None:
AttackModelTestSet = np.load(models_path + f'/target/{model_name}/trajectory_test_data.npy', allow_pickle=True).item()
else:
AttackModelTestSet = np.load(models_path + f'/target/{model_name_ptq}/trajectory_test_data.npy', allow_pickle=True).item()
# not load trained model, need to train a new one
if args.load_attack is False:
train_set = torch.utils.data.TensorDataset(
torch.from_numpy(np.array(AttackModelTrainSet['model_loss_ori'], dtype='f')),
torch.from_numpy(np.array(AttackModelTrainSet['model_trajectory'], dtype='f')),
# 转换为one-hot编码
torch.from_numpy(np.array(check_and_transform_label_format(AttackModelTrainSet['original_labels'], nb_classes=AttackModelTrainSet['nb_classes'], return_one_hot=True))).type(torch.float),
torch.from_numpy(np.array(check_and_transform_label_format(AttackModelTrainSet['predicted_labels'], nb_classes=AttackModelTrainSet['nb_classes'], return_one_hot=True))).type(torch.long),
# train/test i.e. in or out
torch.from_numpy(np.array(check_and_transform_label_format(AttackModelTrainSet['predicted_status'], nb_classes=2, return_one_hot=True)[:,:2])).type(torch.long),
torch.from_numpy(np.array(AttackModelTrainSet['member_status'])).type(torch.long),)
attack_train_loader = torch.utils.data.DataLoader(train_set, batch_size=128, shuffle=True)
test_set = torch.utils.data.TensorDataset(
torch.from_numpy(np.array(AttackModelTestSet['model_loss_ori'], dtype='f')),
torch.from_numpy(np.array(AttackModelTestSet['model_trajectory'], dtype='f')),
torch.from_numpy(np.array(check_and_transform_label_format(AttackModelTestSet['original_labels'], nb_classes=AttackModelTestSet['nb_classes'], return_one_hot=True))).type(torch.float),
torch.from_numpy(np.array(check_and_transform_label_format(AttackModelTestSet['predicted_labels'], nb_classes=AttackModelTestSet['nb_classes'], return_one_hot=True))).type(torch.long),
# train/test i.e. in or out
torch.from_numpy(np.array(check_and_transform_label_format(AttackModelTestSet['predicted_status'], nb_classes=2, return_one_hot=True)[:,:2])).type(torch.long),
torch.from_numpy(np.array(AttackModelTestSet['member_status'])).type(torch.long),)
attack_test_loader = torch.utils.data.DataLoader(test_set, batch_size=128, shuffle=True)
print(f'-------------------"Loss Trajectory"------------------')
# 训练Attack Model
attack_model = MLP_BLACKBOX(dim_in = args.epochs_distill + 1)
attack_model = attack_model.to(device)
lr = 0.1
weight_decay = 5e-4
momentum = 0.9
gamma = 0.2
# lr = 0.01
# momentum = 0.9
# weight_decay = 0.0001
attack_optimizer = torch.optim.SGD(attack_model.parameters(), lr=lr, momentum=momentum, weight_decay=weight_decay)
milestones = [60, 120, 160]
scheduler = MultiStepLR(attack_optimizer, milestones=milestones, gamma=gamma)
loss_fn = nn.CrossEntropyLoss()
max_auc = 0
max_acc = 0
# not load trained Attack Model, train a new one
if args.load_attack is False:
# print("You should not retrain the Attack Model !!!")
# sys.exit()
# 重构后需要先重新train一次attack model
print('Start Training Attack Model')
for epoch in range(200):
train_loss, train_prec1 = train_mia_attack_model(args, epoch, attack_model, attack_train_loader, attack_optimizer, loss_fn, device)
# 这里考虑单独划分一个val set
val_loss, val_prec1, val_auc, max_auc, max_acc = test_mia_attack_model(args, epoch, attack_model, attack_test_loader, loss_fn, max_auc, max_acc, device)
is_best_prec1 = val_prec1 > best_prec1
# is_best_auc = val_auc > best_auc
if is_best_prec1:
best_prec1 = val_prec1
best_auc = val_auc
torch.save(attack_model.state_dict(), save_path + '/' + 'trajectory' + '.pkl')
print(f"Save Best ACC Attack Model: acc:{val_prec1}, auc:{val_auc}")
# 可以修改
# if is_best_auc:
# best_auc = val_auc
if epoch % 10 == 0:
print(('epoch:{} \t train_loss:{:.4f} \t test_loss:{:.4f} \t train_prec1:{:.4f} \t test_prec1:{:.4f} \t val_prec1:{:.4f} \t val_auc:{:.4f}')
.format(epoch, train_loss, val_loss,
train_prec1, val_prec1, val_prec1, val_auc))
print(f'Load Trained Attack Model:')
attack_model.load_state_dict(torch.load(save_path + '/' + 'trajectory' + '.pkl'))
max_auc = 0
max_acc = 0
# TODO这里用test的一套
val_loss, val_prec1, val_auc, max_auc, max_acc = test_mia_attack_model(args, epoch, attack_model, attack_test_loader, loss_fn, max_auc, max_acc, device)
print(('Test load Attack Model \t test_loss:{:.4f} \t test_prec1:{:.4f} \t val_prec1:{:.4f} \t val_auc:{:.4f}').format( val_loss, val_prec1, val_prec1, val_auc))
print('Max AUC: ', max_auc)
print('Max ACC: ', max_acc)
# 这里可以改 仅存储is_best_auc or is_best_prec1的
# 在 test_mia_attack_model存储的最优的数据
# load trained Attack Model
# 对于量化后,只会load trained attack model,不会重新训练.
else:
print(f'Load Trained Attack Model:')
attack_model.load_state_dict(torch.load(save_path + '/' + 'trajectory' + '.pkl'))
val_loss, val_prec1, val_auc, max_auc, max_acc = test_mia_attack_model(args, epoch, attack_model, attack_test_loader, loss_fn, max_auc, max_acc, device)
print(('Test load Attack Model \t test_loss:{:.4f} \t test_prec1:{:.4f} \t val_prec1:{:.4f} \t val_auc:{:.4f}').format( val_loss, val_prec1, val_prec1, val_auc))
print('Max AUC: ', max_auc)
print('Max ACC: ', max_acc)
#save data in xlsx
filename =f'{args.model}_mia_result.xlsx'
try:
# 如果文件已经存在,则加载它
workbook = openpyxl.load_workbook(filename)
except FileNotFoundError:
# 如果文件不存在,则创建一个新的Excel工作簿
workbook = openpyxl.Workbook()
if args.data not in workbook.sheetnames:
# 如果工作表不存在,则创建一个新的工作表
worksheet = workbook.create_sheet(title=args.data)
# 在工作表中写入表头
worksheet.cell(row=1,column=1,value='FP32-acc')
# worksheet.cell(row=1,column=2,value=top1)
worksheet.cell(row=3,column=1,value='title')
worksheet.cell(row=3,column=2,value='js_div')
worksheet.cell(row=3,column=4,value='ptq_acc')
worksheet.cell(row=3,column=5,value='acc_loss')
worksheet.cell(row=3,column=6,value='AUC')
worksheet.cell(row=3,column=7,value='ACC')
else:
worksheet = workbook[args.data]
# 全精度下的
if args.quant_type is None:
worksheet.cell(row=1,column=4,value='origin_auc')
worksheet.cell(row=1,column=5,value=max_auc)
worksheet.cell(row=1,column=7,value='origin_acc')
worksheet.cell(row=1,column=8,value=max_acc)
else:
idx = mia_utils.GlobalVariables.title_list.index(title)
idx += 4
worksheet.cell(row=idx,column=1,value=title)
worksheet.cell(row=idx,column=6,value=max_auc)
worksheet.cell(row=idx,column=7,value=max_acc)
workbook.save(filename)
# 获取distill model的loss trajectory
def get_trajectory(data, target, args, model_path, device='cpu'):
model_name = f'{args.data}_{args.model_distill}'
print(f"MODEL NAME IS :{model_name}")
trajectory = None
# 创建一个形状为 (data.shape[0], 1) 的 NumPy 数组 predicted_label,并将其初始化为 -1 data.shape[0]即batch_size
# predicted_label = np.array([-1]).repeat(data.shape[0],0).reshape(data.shape[0],1)
# TODO 需要适配由PTQ Target Model得到的Distill Target Model
# 虽然是通过mode == shadow和target来区分,但load的model是ditill model
for s in range(1):
trajectory_current = None
model_path_current = 'mia_ckpt/{}'.format(s)
# 对每个distill epoch (是看的distill model对target的loss并记录成为loss_trajectory)
for i in range(1, args.epochs_distill+1):
# 通过load存储的distill model在各个epoch时候的权值参数来复现loss
# if args.mode == 'shadow':
# cnn_model_target, cnn_params_target = normal.load_model(args, model_path_current+'/distill_shadow', model_name, epoch=i)
# elif args.mode == 'target':
# if args.quant_type is None:
# cnn_model_target, cnn_params_target = normal.load_model(args, model_path_current+'/distill_target', model_name, epoch=i)
# # TODO 调整load的路径,把(ptq) Distill Target Model的权值参数load进来
# else:
# if args.quant_type == 'FLOAT':
# title = '%s_%d_E%d' % (args.quant_type, args.num_bits, args.e_bits)
# else:
# title = '%s_%d' % (args.quant_type, args.num_bits)
# cnn_model_target, cnn_params_target = normal.load_model(args, model_path_current+'/distill_target', model_name + '_' + title, epoch=i)
if args.quant_type is None:
cnn_model_target, cnn_params_target = normal.load_model(args, model_path_current+'/distill_'+args.mode, model_name, epoch=i)
# TODO 调整load的路径,把(ptq) Distill Target Model的权值参数load进来
else:
if args.quant_type == 'FLOAT':
title = '%s_%d_E%d' % (args.quant_type, args.num_bits, args.e_bits)
else:
title = '%s_%d' % (args.quant_type, args.num_bits)
cnn_model_target, cnn_params_target = normal.load_model(args, model_path_current+'/distill_'+args.mode, model_name + '_' + title, epoch=i)
MODEL_target = cnn_model_target.to(device)
# data是一个包含batch个向量的list
data = data.to(device)
# label
target = target.to(device)
# 获取target model的输出
# 都是distill model的output,因此不需要quantize_inference
logit_target = MODEL_target(data)
# 看target model的output与label的loss (batch list中的各个数据分别算)
loss = [F.cross_entropy(logit_target_i.unsqueeze(0), target_i.unsqueeze(0)) for (logit_target_i, target_i) in zip(logit_target, target)]
# list -> nparray 一列的
loss = np.array([loss_i.detach().cpu().numpy() for loss_i in loss]).reshape(-1, 1)
# 按列拼接
trajectory_current = loss if i == 1 else np.concatenate((trajectory_current, loss), 1)
# 累加
trajectory = trajectory_current if s == 0 else trajectory + trajectory_current
return trajectory
\ No newline at end of file
# conv: 'C',''/'B'/'BRL'/'BRS',qi,in_ch,out_ch,kernel_size,stirde,padding,bias
# relu: 'RL'
# relu6: 'RS'
# inception: 'Inc'
# maxpool: 'MP',kernel_size,stride,padding
# adaptiveavgpool: 'AAP',output_size
# view: 'VW':
# dafault: x = x.view(x.size(0),-1)
# dropout: 'D'
# MakeLayer: 'ML','BBLK'/'BTNK'/'IRES', ml_idx, blocks
# softmax: 'SM'
# class 100
ResNet_18_cfg_table = [
['C','BRL',True,3,64,3,1,1,False],
['ML','BBLK',0,2],
['ML','BBLK',1,2],
['ML','BBLK',2,2],
['ML','BBLK',3,2],
['AAP',1],
['VW'],
['FC',512,100,True],
]
ResNet_50_cfg_table = [
['C','BRL',True,3,64,3,1,1,False],
['ML','BTNK',0,3],
['ML','BTNK',1,4],
['ML','BTNK',2,6],
['ML','BTNK',3,3],
['AAP',1],
['VW'],
['FC',2048,100,True]
]
ResNet_152_cfg_table = [
['C','BRL',True,3,64,3,1,1,False],
['ML','BTNK',0,3],
['ML','BTNK',1,8],
['ML','BTNK',2,36],
['ML','BTNK',3,3],
['AAP',1],
['VW'],
['FC',2048,100,True]
]
MobileNetV2_cfg_table = [
['C','BRS',True,3,32,1,1,1,True],
['ML','IRES',0,1],
['ML','IRES',1,2],
['ML','IRES',2,3],
['ML','IRES',3,4],
['ML','IRES',4,3],
['ML','IRES',5,3],
['ML','IRES',6,1],
['C','BRS',False,320,1280,1,1,0,True],
['AAP',1],
['VW'],
['FC',1280,100,True]
]
AlexNet_cfg_table = [
['C','',True,3,32,3,1,1,True],
['RL'],
['MP',2,2,0],
['C','',False,32,64,3,1,1,True],
['RL'],
['MP',2,2,0],
['C','',False,64,128,3,1,1,True],
['RL'],
['C','',False,128,256,3,1,1,True],
['RL'],
['C','',False,256,256,3,1,1,True],
['RL'],
['MP',3,2,0],
['VW'],
['D',0.5],
['FC',2304,1024,True],
['RL'],
['D',0.5],
['FC',1024,512,True],
['RL'],
['FC',512,100,True]
]
AlexNet_BN_cfg_table = [
['C','BRL',True,3,32,3,1,1,True],
['MP',2,2,0],
['C','BRL',False,32,64,3,1,1,True],
['MP',2,2,0],
['C','BRL',False,64,128,3,1,1,True],
['C','BRL',False,128,256,3,1,1,True],
['C','BRL',False,256,256,3,1,1,True],
['MP',3,2,0],
['VW'],
['D',0.5],
['FC',2304,1024,True],
['RL'],
['D',0.5],
['FC',1024,512,True],
['RL'],
['FC',512,100,True]
]
VGG_16_cfg_table = [
['C','BRL',True,3,64,3,1,1,True],
['C','BRL',False,64,64,3,1,1,True],
['MP',2,2,0],
['C','BRL',False,64,128,3,1,1,True],
['C','BRL',False,128,128,3,1,1,True],
['MP',2,2,0],
['C','BRL',False,128,256,3,1,1,True],
['C','BRL',False,256,256,3,1,1,True],
['C','BRL',False,256,256,3,1,1,True],
['MP',2,2,0],
['C','BRL',False,256,512,3,1,1,True],
['C','BRL',False,512,512,3,1,1,True],
['C','BRL',False,512,512,3,1,1,True],
['MP',2,2,0],
['C','BRL',False,512,512,3,1,1,True],
['C','BRL',False,512,512,3,1,1,True],
['C','BRL',False,512,512,3,1,1,True],
['MP',2,2,0],
['VW'],
['FC',512,4096,True],
['RL'],
['D',0.5],
['FC',4096,4096,True],
['RL'],
['D',0.5],
['FC',4096,100,True]
]
VGG_19_cfg_table = [
['C','BRL',True,3,64,3,1,1,True],
['C','BRL',False,64,64,3,1,1,True],
['MP',2,2,0],
['C','BRL',False,64,128,3,1,1,True],
['C','BRL',False,128,128,3,1,1,True],
['MP',2,2,0],
['C','BRL',False,128,256,3,1,1,True],
['C','BRL',False,256,256,3,1,1,True],
['C','BRL',False,256,256,3,1,1,True],
['C','BRL',False,256,256,3,1,1,True],
['MP',2,2,0],
['C','BRL',False,256,512,3,1,1,True],
['C','BRL',False,512,512,3,1,1,True],
['C','BRL',False,512,512,3,1,1,True],
['C','BRL',False,512,512,3,1,1,True],
['MP',2,2,0],
['C','BRL',False,512,512,3,1,1,True],
['C','BRL',False,512,512,3,1,1,True],
['C','BRL',False,512,512,3,1,1,True],
['C','BRL',False,512,512,3,1,1,True],
['MP',2,2,0],
['VW'],
['FC',512,4096,True],
['RL'],
['D',0.5],
['FC',4096,4096,True],
['RL'],
['D',0.5],
['FC',4096,100,True]
]
Inception_BN_cfg_table = [
['C','',True,3,64,3,1,1,True],
['RL'],
['C','',False,64,64,3,1,1,True],
['RL'],
['Inc',0],
['Inc',1],
['MP',3,2,1],
['Inc',2],
['Inc',3],
['Inc',4],
['Inc',5],
['Inc',6],
['MP',3,2,1],
['Inc',7],
['Inc',8],
['AAP',1],
['C','',False,1024,100,1,1,0,True],
['VW']
]
model_cfg_table = {
'AlexNet' : AlexNet_cfg_table,
'AlexNet_BN' : AlexNet_BN_cfg_table,
'VGG_16' : VGG_16_cfg_table,
'VGG_19' : VGG_19_cfg_table,
'Inception_BN' : Inception_BN_cfg_table,
'ResNet_18' : ResNet_18_cfg_table,
'ResNet_50' : ResNet_50_cfg_table,
'ResNet_152' : ResNet_152_cfg_table,
'MobileNetV2' : MobileNetV2_cfg_table
}
#每行对应一个Inc结构(channel)的参数表
inc_ch_table=[
[ 64, 64, 96,128, 16, 32, 32],#3a
[256,128,128,192, 32, 96, 64],#3b
[480,192, 96,208, 16, 48, 64],#4a
[512,160,112,224, 24, 64, 64],#4b
[512,128,128,256, 24, 64, 64],#4c
[512,112,144,288, 32, 64, 64],#4d
[528,256,160,320, 32,128,128],#4e
[832,256,160,320, 32,128,128],#5a
[832,384,192,384, 48,128,128] #5b
]
# br0,br1,br2,br3 <- br1x1,br3x3,br5x5,brM
# 每个子数组对应Inc结构中一个分支的结构,均默认含'BRL'参数,bias为False
# Conv层第2、3个参数是对应Inc结构(即ch_table中的一行)中的索引
# 由于每个Inc结构操作一致,只有权重不同,使用索引而非具体值,方便复用
# 各分支后还有Concat操作,由于只有唯一结构,未特殊说明
# conv: 'C', ('BRL' default), in_ch_idex, out_ch_idx, kernel_size, stride, padding, (bias: True default)
# maxpool: 'MP', kernel_size, stride, padding
# relu: 'RL'
inc_cfg_table = [
[
['C',0,1,1,1,0]
],
[
['C',0,2,1,1,0],
['C',2,3,3,1,1]
],
[
['C',0,4,1,1,0],
['C',4,5,5,1,2]
],
[
['MP',3,1,1],
['RL'],
['C',0,6,1,1,0]
]
]
# ml_cfg_table = []
#BasicBlock
#value: downsample,inplanes,planes,planes*expansion,stride,1(dafault stride and group)
bblk_ch_table = [
[False, 64, 64, 64,1,1], #layer1,first
[False, 64, 64, 64,1,1], # other
[True, 64,128,128,2,1], #layer2
[False,128,128,128,1,1],
[True, 128,256,256,2,1], #layer3
[False,256,256,256,1,1],
[True, 256,512,512,2,1], #layer4
[False,512,512,512,1,1]
]
#conv: 'C','B'/'BRL'/'BRS', in_ch_idx, out_ch_idx, kernel_sz, stride_idx, padding, groups_idx,bias
#add: 'AD', unconditonal. unconditonal为true或flag为true时将outs中两元素相加
bblk_cfg_table = [
[
['C','BRL',1,2,3,4,1,5,False],
['C','B' ,2,3,3,5,1,5,False],
],
# downsample, 仅当downsample传入为True时使用
[
['C','B' ,1,3,1,4,0,5,False]
],
# 分支交汇后动作
[
['AD',True],
['RL']
]
]
#BottleNeck
#value: downsample,inplanes,planes,planes*expansion,stride,1(dafault stride and group)
btnk_ch_table = [
[True, 64, 64, 256,1,1], #layer1,first
[False, 256, 64, 256,1,1], # other
[True, 256,128, 512,2,1], #layer2
[False, 512,128, 512,1,1],
[True, 512,256,1024,2,1], #layer3
[False,1024,256,1024,1,1],
[True, 1024,512,2048,2,1], #layer4
[False,2048,512,2048,1,1]
]
#conv: 'C','B'/'BRL'/'BRS', in_ch_idx, out_ch_idx, kernel_sz, stride_idx, padding, groups_idx (bias: True default)
#add: 'AD', unconditonal. unconditonal为true或flag为true时将outs中两元素相加
btnk_cfg_table = [
[
['C','BRL',1,2,1,5,0,5,False],
['C','BRL',2,2,3,4,1,5,False],
['C','B' ,2,3,1,5,0,5,False]
],
# downsample, 仅当downsample传入为True时使用
[
['C','B' ,1,3,1,4,0,5,False]
],
# 分支交汇后动作
[
['AD',True],
['RL']
]
]
#InvertedResidual/LinearBottleNeck
#value: identity_flag, in_ch, out_ch, in_ch*expand_ratio, stride, 1(dafault stride and group)
ires_ch_table = [
[False, 32, 16, 32,1,1], #layer1,first
[ True, 16, 16, 16,1,1], # other
[False, 16, 24, 96,2,1], #layer2
[ True, 24, 24, 144,1,1],
[False, 24, 32, 144,2,1], #layer3
[ True, 32, 32, 192,1,1],
[False, 32, 64, 192,2,1], #layer4
[ True, 64, 64, 384,1,1],
[False, 64, 96, 384,1,1], #layer5
[ True, 96, 96, 576,1,1],
[False, 96,160, 576,1,1], #layer6
[ True,160,160, 960,1,1],
[False,160,320, 960,1,1], #layer6
[ True,320,320,1920,1,1]
]
#conv: 'C','B'/'BRL'/'BRS', in_ch_idx, out_ch_idx, kernel_sz, stride_idx, padding, groups_idx (bias: True default)
#add: 'AD', unconditonal. unconditonal为true或flag为true时将outs中两元素相加
ires_cfg_table = [
[
['C','BRS',1,3,1,5,0,5,True],
['C','BRS',3,3,3,4,1,3,True],
['C','B' ,3,2,1,5,0,5,True]
],
# identity_br empty
[
],
# 分支汇合后操作
[
['AD',False] #有条件的相加
]
]
\ No newline at end of file
import os
import torch
import torchvision.datasets as dsets
import torchvision.transforms as transforms
class DataLoader(object):
"""
data loader for CV data sets
"""
def __init__(self, dataset, batch_size):
"""
create data loader for specific data set
:params n_treads: number of threads to load data, default: 4
:params data_path_root: root path to data set, default: /lustre/datasets/
"""
self.dataset = dataset
self.batch_size = batch_size
self.n_threads = 4 #num_workers
self.data_path_root = '/lustre/datasets/'
if self.dataset in ["cifar100","cifar10"]:
self.train_loader, self.val_loader, self.test_loader = self.cifar(
dataset=self.dataset)
else:
assert False, "invalid data set"
def getloader(self):
"""d
get train_loader and test_loader
"""
return self.train_loader, self.val_loader, self.test_loader
def cifar(self, dataset):
"""
dataset: cifar
"""
if dataset == "cifar10":
norm_mean = [0.49139968, 0.48215827, 0.44653124]
norm_std = [0.24703233, 0.24348505, 0.26158768]
elif dataset == "cifar100":
norm_mean = [0.50705882, 0.48666667, 0.44078431]
norm_std = [0.26745098, 0.25568627, 0.27607843]
else:
assert False, "Invalid cifar dataset"
train_transfrom = transforms.Compose([
transforms.RandomCrop(32, padding=2),
transforms.RandomHorizontalFlip(),
transforms.ToTensor(),
transforms.Normalize(norm_mean, norm_std)
])
eval_transfrom = transforms.Compose([
transforms.ToTensor(),
transforms.Normalize(norm_mean, norm_std)
])
if self.dataset == "cifar10":
data_path = self.data_path_root + 'CIFAR10'
alltrainset = dsets.CIFAR10(root=data_path,train=True,download=False,
transform=train_transfrom)
testset = dsets.CIFAR10(data_path, train=False, download=False,
transform=eval_transfrom)
elif self.dataset == "cifar100":
data_path = self.data_path_root + 'CIFAR100'
alltrainset = dsets.CIFAR100(root=data_path,train=True,download=False,
transform=train_transfrom)
testset = dsets.CIFAR100(data_path, train=False, download=False,
transform=eval_transfrom)
else:
assert False, "invalid data set"
train_size = (int)(0.8 * len(alltrainset))
val_size = (int)(0.2 * len(alltrainset))
train_idx, val_idx = torch.utils.data.random_split(range(train_size+val_size),[train_size,val_size])
trainset = torch.utils.data.Subset(alltrainset,train_idx)
valset = torch.utils.data.Subset(alltrainset,val_idx)
train_loader = torch.utils.data.DataLoader(
trainset,
batch_size=self.batch_size, shuffle=True, num_workers=self.n_threads, pin_memory=True
)
val_loader = torch.utils.data.DataLoader(
valset,
batch_size=self.batch_size, shuffle=False, num_workers=self.n_threads, pin_memory=True
)
test_loader = torch.utils.data.DataLoader(
testset,
batch_size=self.batch_size, shuffle=False, num_workers=self.n_threads, pin_memory=True
)
return train_loader, val_loader, test_loader
import torch
import os
from torchvision import datasets, transforms, utils
from torch.utils.data import sampler
from PIL import Image
from torch.utils.data import Subset, DataLoader, ConcatDataset
import torch.utils.data as data
from torch._utils import _accumulate
from torch import randperm
import numpy as np
import pandas as pd
def dataset_split(dataset, lengths):
if sum(lengths) != len(dataset):
raise ValueError("Sum of input lengths does not equal the length of the input dataset!")
indices = list(range(sum(lengths)))
np.random.seed(1)
np.random.shuffle(indices)
return [Subset(dataset, indices[offset - length:offset]) for offset, length in zip(_accumulate(lengths), lengths)]
class SUBCINIC10(data.Dataset):
def __init__(self, mode, aug, train):
self.img_size = 32
self.num_classes = 10
self.mean = [0.47889522, 0.47227842, 0.43047404]
self.std = [0.24205776, 0.23828046, 0.25874835]
normalize = transforms.Normalize(mean=self.mean, std=self.std)
self.augmented = transforms.Compose([transforms.RandomHorizontalFlip(), transforms.RandomCrop(32, padding=4),transforms.ToTensor(), normalize])
self.normalized = transforms.Compose([transforms.ToTensor(), normalize])
# test set, val_set上是否还需要数据增强?
self.aug_trainset = datasets.ImageFolder(root='/lustre/I/mazhihong/data/CINIC-10/train', transform=self.augmented)
self.aug_testset = datasets.ImageFolder(root='/lustre/I/mazhihong/data/CINIC-10/test', transform=self.augmented)
self.aug_validset = datasets.ImageFolder(root='/lustre/I/mazhihong/data/CINIC-10/valid', transform=self.augmented)
self.trainset = datasets.ImageFolder(root='/lustre/I/mazhihong/data/CINIC-10/train', transform=self.normalized)
self.testset = datasets.ImageFolder(root='/lustre/I/mazhihong/data/CINIC-10/test', transform=self.normalized)
self.validset = datasets.ImageFolder(root='/lustre/I/mazhihong/data/CINIC-10/valid', transform=self.normalized)
self.aug_dataset = ConcatDataset([self.aug_trainset, self.aug_testset, self.aug_validset])
self.dataset = ConcatDataset([self.trainset, self.testset, self.validset])
self.aug_target_trainset, self.aug_target_testset, self.aug_shadow_trainset, self.aug_shadow_testset, self.aug_distill_trainset, self.aug_distill_testset = dataset_split(self.aug_dataset, [10000, 10000, 10000, 10000, 220000, 10000])
self.target_trainset, self.target_testset, self.shadow_trainset, self.shadow_testset, self.distill_trainset, self.distill_testset = dataset_split(self.dataset, [10000, 10000, 10000, 10000, 220000, 10000])
if mode == 'target':
if aug:
if train:
self.dataset = self.aug_target_trainset
# 对test set不做数据增强
else:
# self.dataset = self.aug_target_testset
self.dataset = self.target_testset
else:
if train:
self.dataset = self.target_trainset
else:
self.dataset = self.target_testset
elif mode == 'shadow':
if aug:
if train:
self.dataset = self.aug_shadow_trainset
# else:
# self.dataset = self.aug_shadow_testset
else:
self.dataset = self.shadow_testset
else:
if train:
self.dataset = self.shadow_trainset
else:
self.dataset = self.shadow_testset
elif 'distill' in mode:
if aug:
if train:
self.dataset = self.aug_distill_trainset
# else:
# self.dataset = self.aug_distill_testset
else:
self.dataset = self.distill_testset
else:
if train:
self.dataset = self.distill_trainset
else:
self.dataset = self.distill_testset
self.index = range(int(len(self.dataset)))
def __getitem__(self, idx):
return self.dataset[idx][0], self.dataset[idx][1], self.index[idx]
def __len__(self):
return len(self.index)
class CINIC10:
def __init__(self, mode, aug, batch_size=128, add_trigger=False):
self.batch_size = batch_size
self.img_size = 32
self.num_classes = 10
if aug:
if mode == 'target':
self.aug_target_trainset = SUBCINIC10(mode, aug, True)
self.aug_target_train_loader = torch.utils.data.DataLoader(self.aug_target_trainset, batch_size=batch_size, shuffle=True, num_workers=2)
# self.aug_target_testset = SUBCINIC10(mode, aug, False)
# self.aug_target_test_loader = torch.utils.data.DataLoader(self.aug_target_testset, batch_size=batch_size, shuffle=True, num_workers=2)
self.target_testset = SUBCINIC10(mode, False, False)
self.target_test_loader = torch.utils.data.DataLoader(self.target_testset, batch_size=batch_size, shuffle=True, num_workers=2)
elif mode == 'shadow':
self.aug_shadow_trainset = SUBCINIC10(mode, aug, True)
self.aug_shadow_train_loader = torch.utils.data.DataLoader(self.aug_shadow_trainset, batch_size=batch_size, shuffle=True, num_workers=2)
# self.aug_shadow_testset = SUBCINIC10(mode, aug, False)
# self.aug_shadow_test_loader = torch.utils.data.DataLoader(self.aug_shadow_testset, batch_size=batch_size, shuffle=True, num_workers=2)
self.shadow_testset = SUBCINIC10(mode, False, False)
self.shadow_test_loader = torch.utils.data.DataLoader(self.shadow_testset, batch_size=batch_size, shuffle=True, num_workers=2)
elif 'distill' in mode:
self.aug_distill_trainset = SUBCINIC10(mode, aug, True)
self.aug_distill_train_loader = torch.utils.data.DataLoader(self.aug_distill_trainset, batch_size=batch_size, shuffle=True, num_workers=2)
# self.aug_distill_testset = SUBCINIC10(mode, aug, False)
# self.aug_distill_test_loader = torch.utils.data.DataLoader(self.aug_distill_testset, batch_size=batch_size, shuffle=True, num_workers=2)
self.distill_testset = SUBCINIC10(mode, False, False)
self.distill_test_loader = torch.utils.data.DataLoader(self.distill_testset, batch_size=batch_size, shuffle=True, num_workers=2)
else:
if mode == 'target':
self.target_trainset = SUBCINIC10(mode, aug, True)
self.target_train_loader = torch.utils.data.DataLoader(self.target_trainset, batch_size=batch_size, shuffle=True, num_workers=2)
self.target_testset = SUBCINIC10(mode, aug, False)
self.target_test_loader = torch.utils.data.DataLoader(self.target_testset, batch_size=batch_size, shuffle=True, num_workers=2)
elif mode == 'shadow':
self.shadow_trainset = SUBCINIC10(mode, aug, True)
self.shadow_train_loader = torch.utils.data.DataLoader(self.shadow_trainset, batch_size=batch_size, shuffle=True, num_workers=2)
self.shadow_testset = SUBCINIC10(mode, aug, False)
self.shadow_test_loader = torch.utils.data.DataLoader(self.shadow_testset, batch_size=batch_size, shuffle=True, num_workers=2)
elif 'distill' in mode:
self.distill_trainset = SUBCINIC10(mode, aug, True)
self.distill_train_loader = torch.utils.data.DataLoader(self.distill_trainset, batch_size=batch_size, shuffle=True, num_workers=2)
self.distill_testset = SUBCINIC10(mode, aug, False)
self.distill_test_loader = torch.utils.data.DataLoader(self.distill_testset, batch_size=batch_size, shuffle=True, num_workers=2)
class SUBCIFAR10(data.Dataset):
def __init__(self, mode, aug, train):
self.img_size = 32
self.num_classes = 10
self.num_test = 10000
self.num_train = 50000
self.mean = [0.485, 0.456, 0.406]
self.std = [0.229, 0.224, 0.225]
normalize = transforms.Normalize(mean=self.mean, std=self.std)
self.augmented = transforms.Compose([transforms.RandomHorizontalFlip(), transforms.RandomCrop(32, padding=4),transforms.ToTensor(), normalize])
self.normalized = transforms.Compose([transforms.ToTensor(), normalize])
self.aug_trainset = datasets.CIFAR10(root='/lustre/I/mazhihong/data/CIFAR10', train=True, download=True, transform=self.augmented)
self.aug_testset = datasets.CIFAR10(root='/lustre/I/mazhihong/data/CIFAR10', train=False, download=True, transform=self.augmented)
self.trainset = datasets.CIFAR10(root='/lustre/I/mazhihong/data/CIFAR10', train=True, download=False, transform=self.normalized)
self.testset = datasets.CIFAR10(root='/lustre/I/mazhihong/data/CIFAR10', train=False, download=False, transform=self.normalized)
# 合并train,test (在MIA下,原始的train,test划分无意义,需自行重新构建)
self.aug_dataset = ConcatDataset([self.aug_trainset, self.aug_testset])
self.dataset = ConcatDataset([self.trainset, self.testset])
# 切分数据集
self.aug_target_trainset, self.aug_target_testset, self.aug_shadow_trainset, self.aug_shadow_testset, self.aug_distill_trainset = dataset_split(self.aug_dataset, [10000, 10000, 10000, 10000, 20000])
self.aug_distill_testset = self.aug_shadow_testset
self.target_trainset, self.target_testset, self.shadow_trainset, self.shadow_testset, self.distill_trainset = dataset_split(self.dataset, [10000, 10000, 10000, 10000, 20000])
self.distill_testset = self.shadow_testset
if mode == 'target':
if aug:
if train:
self.dataset = self.aug_target_trainset
else:
# self.dataset = self.aug_target_testset
self.dataset = self.target_testset
else:
if train:
self.dataset = self.target_trainset
else:
self.dataset = self.target_testset
elif mode == 'shadow':
if aug:
if train:
self.dataset = self.aug_shadow_trainset
else:
# self.dataset = self.aug_shadow_testset
self.dataset = self.shadow_testset
else:
if train:
self.dataset = self.shadow_trainset
else:
self.dataset = self.shadow_testset
elif 'distill' in mode:
if aug:
if train:
self.dataset = self.aug_distill_trainset
else:
# self.dataset = self.aug_distill_testset
self.dataset = self.distill_testset
else:
if train:
self.dataset = self.distill_trainset
else:
self.dataset = self.distill_testset
self.index = range(int(len(self.dataset)))
def __getitem__(self, idx):
return self.dataset[idx][0], self.dataset[idx][1], self.index[idx]
def __len__(self):
return len(self.index)
class CIFAR10:
def __init__(self, mode, aug, batch_size=128, add_trigger=False):
self.batch_size = batch_size
self.img_size = 32
self.num_classes = 10
if aug:
if mode == 'target':
self.aug_target_trainset = SUBCIFAR10(mode, aug, True)
self.aug_target_train_loader = torch.utils.data.DataLoader(self.aug_target_trainset, batch_size=batch_size, shuffle=True, num_workers=2)
# self.aug_target_testset = SUBCIFAR10(mode, aug, False)
# self.aug_target_test_loader = torch.utils.data.DataLoader(self.aug_target_testset, batch_size=batch_size, shuffle=True, num_workers=2)
self.target_testset = SUBCIFAR10(mode, False ,False)
self.target_test_loader = torch.utils.data.DataLoader(self.target_testset, batch_size=batch_size, shuffle=True, num_workers=2)
elif mode == 'shadow':
self.aug_shadow_trainset = SUBCIFAR10(mode, aug, True)
self.aug_shadow_train_loader = torch.utils.data.DataLoader(self.aug_shadow_trainset, batch_size=batch_size, shuffle=True, num_workers=2)
# self.aug_shadow_testset = SUBCIFAR10(mode, aug, False)
# self.aug_shadow_test_loader = torch.utils.data.DataLoader(self.aug_shadow_testset, batch_size=batch_size, shuffle=True, num_workers=2)
self.shadow_testset = SUBCIFAR10(mode,False, False)
self.shadow_test_loader = torch.utils.data.DataLoader(self.shadow_testset, batch_size=batch_size, shuffle=True, num_workers=2)
elif 'distill' in mode:
self.aug_distill_trainset = SUBCIFAR10(mode, aug, True)
self.aug_distill_train_loader = torch.utils.data.DataLoader(self.aug_distill_trainset, batch_size=batch_size, shuffle=True, num_workers=2)
# self.aug_distill_testset = SUBCIFAR10(mode, aug, False)
# self.aug_distill_test_loader = torch.utils.data.DataLoader(self.aug_distill_testset, batch_size=batch_size, shuffle=True, num_workers=2)
self.distill_testset = SUBCIFAR10(mode, False, False)
self.distill_test_loader = torch.utils.data.DataLoader(self.distill_testset, batch_size=batch_size, shuffle=True, num_workers=2)
else:
if mode == 'target':
self.target_trainset = SUBCIFAR10(mode, aug, True)
self.target_train_loader = torch.utils.data.DataLoader(self.target_trainset, batch_size=batch_size, shuffle=True, num_workers=2)
self.target_testset = SUBCIFAR10(mode, aug, False)
self.target_test_loader = torch.utils.data.DataLoader(self.target_testset, batch_size=batch_size, shuffle=True, num_workers=2)
elif mode == 'shadow':
self.shadow_trainset = SUBCIFAR10(mode, aug, True)
self.shadow_train_loader = torch.utils.data.DataLoader(self.shadow_trainset, batch_size=batch_size, shuffle=True, num_workers=2)
self.shadow_testset = SUBCIFAR10(mode, aug, False)
self.shadow_test_loader = torch.utils.data.DataLoader(self.shadow_testset, batch_size=batch_size, shuffle=True, num_workers=2)
elif 'distill' in mode:
self.distill_trainset = SUBCIFAR10(mode, aug, True)
self.distill_train_loader = torch.utils.data.DataLoader(self.distill_trainset, batch_size=batch_size, shuffle=True, num_workers=2)
self.distill_testset = SUBCIFAR10(mode, aug, False)
self.distill_test_loader = torch.utils.data.DataLoader(self.distill_testset, batch_size=batch_size, shuffle=True, num_workers=2)
class SUBCIFAR100(data.Dataset):
def __init__(self, mode, aug, train):
self.img_size = 32
self.num_classes = 100
self.num_test = 10000
self.num_train = 50000
self.mean=[0.507, 0.487, 0.441]
self.std=[0.267, 0.256, 0.276]
normalize = transforms.Normalize(mean=self.mean, std=self.std)
self.augmented = transforms.Compose([transforms.RandomHorizontalFlip(), transforms.RandomCrop(32, padding=4),transforms.ToTensor(), normalize])
self.normalized = transforms.Compose([transforms.ToTensor(), normalize])
self.aug_trainset = datasets.CIFAR100(root='/lustre/I/mazhihong/data/CIFAR100', train=True, download=True, transform=self.augmented)
self.aug_testset = datasets.CIFAR100(root='/lustre/I/mazhihong/data/CIFAR100', train=False, download=True, transform=self.augmented)
self.trainset = datasets.CIFAR100(root='/lustre/I/mazhihong/data/CIFAR100', train=True, download=True, transform=self.normalized)
self.testset = datasets.CIFAR100(root='/lustre/I/mazhihong/data/CIFAR100', train=False, download=True, transform=self.normalized)
self.aug_dataset = ConcatDataset([self.aug_trainset, self.aug_testset])
self.dataset = ConcatDataset([self.trainset, self.testset])
self.aug_target_trainset, self.aug_target_testset, self.aug_shadow_trainset, self.aug_shadow_testset, self.aug_distill_trainset = dataset_split(self.aug_dataset, [10000, 10000, 10000, 10000, 20000])
self.aug_distill_testset = self.aug_shadow_testset
self.target_trainset, self.target_testset, self.shadow_trainset, self.shadow_testset, self.distill_trainset = dataset_split(self.dataset, [10000, 10000, 10000, 10000, 20000])
self.distill_testset = self.shadow_testset
if mode == 'target':
if aug:
if train:
self.dataset = self.aug_target_trainset
else:
# self.dataset = self.aug_target_testset
self.dataset = self.target_testset
else:
if train:
self.dataset = self.target_trainset
else:
self.dataset = self.target_testset
elif mode == 'shadow':
if aug:
if train:
self.dataset = self.aug_shadow_trainset
else:
# self.dataset = self.aug_shadow_testset
self.dataset = self.shadow_testset
else:
if train:
self.dataset = self.shadow_trainset
else:
self.dataset = self.shadow_testset
elif 'distill' in mode:
if aug:
if train:
self.dataset = self.aug_distill_trainset
# else:
# self.dataset = self.aug_distill_testset
else:
self.dataset = self.distill_testset
else:
if train:
self.dataset = self.distill_trainset
else:
self.dataset = self.distill_testset
self.index = range(int(len(self.dataset)))
def __getitem__(self, idx):
return self.dataset[idx][0], self.dataset[idx][1], self.index[idx]
def __len__(self):
return len(self.index)
class CIFAR100:
def __init__(self, mode, aug, batch_size=128):
self.batch_size = batch_size
self.img_size = 32
self.num_classes = 100
if aug:
if mode == 'target':
self.aug_target_trainset = SUBCIFAR100(mode, aug, True)
self.aug_target_train_loader = torch.utils.data.DataLoader(self.aug_target_trainset, batch_size=batch_size, shuffle=True, num_workers=1)
# self.aug_target_testset = SUBCIFAR100(mode, aug, False)
# self.aug_target_test_loader = torch.utils.data.DataLoader(self.aug_target_testset, batch_size=batch_size, shuffle=True, num_workers=1)
self.target_testset = SUBCIFAR100(mode, False, False)
self.target_test_loader = torch.utils.data.DataLoader(self.target_testset, batch_size=batch_size, shuffle=True, num_workers=1)
elif mode == 'shadow':
self.aug_shadow_trainset = SUBCIFAR100(mode, aug, True)
self.aug_shadow_train_loader = torch.utils.data.DataLoader(self.aug_shadow_trainset, batch_size=batch_size, shuffle=True, num_workers=1)
# self.aug_shadow_testset = SUBCIFAR100(mode, aug, False)
# self.aug_shadow_test_loader = torch.utils.data.DataLoader(self.aug_shadow_testset, batch_size=batch_size, shuffle=True, num_workers=1)
self.shadow_testset = SUBCIFAR100(mode, False, False)
self.shadow_test_loader = torch.utils.data.DataLoader(self.shadow_testset, batch_size=batch_size, shuffle=True, num_workers=1)
elif 'distill' in mode:
self.aug_distill_trainset = SUBCIFAR100(mode, aug, True)
self.aug_distill_train_loader = torch.utils.data.DataLoader(self.aug_distill_trainset, batch_size=batch_size, shuffle=True, num_workers=1)
# self.aug_distill_testset = SUBCIFAR100(mode, aug, False)
# self.aug_distill_test_loader = torch.utils.data.DataLoader(self.aug_distill_testset, batch_size=batch_size, shuffle=True, num_workers=1)
self.distill_testset = SUBCIFAR100(mode, False, False)
self.distill_test_loader = torch.utils.data.DataLoader(self.distill_testset, batch_size=batch_size, shuffle=True, num_workers=1)
else:
if mode == 'target':
self.target_trainset = SUBCIFAR100(mode, aug, True)
self.target_train_loader = torch.utils.data.DataLoader(self.target_trainset, batch_size=batch_size, shuffle=True, num_workers=1)
self.target_testset = SUBCIFAR100(mode, aug, False)
self.target_test_loader = torch.utils.data.DataLoader(self.target_testset, batch_size=batch_size, shuffle=True, num_workers=1)
elif mode == 'shadow':
self.shadow_trainset = SUBCIFAR100(mode, aug, True)
self.shadow_train_loader = torch.utils.data.DataLoader(self.shadow_trainset, batch_size=batch_size, shuffle=True, num_workers=1)
self.shadow_testset = SUBCIFAR100(mode, aug, False)
self.shadow_test_loader = torch.utils.data.DataLoader(self.shadow_testset, batch_size=batch_size, shuffle=True, num_workers=1)
elif 'distill' in mode:
self.distill_trainset = SUBCIFAR100(mode, aug, True)
self.distill_train_loader = torch.utils.data.DataLoader(self.distill_trainset, batch_size=batch_size, shuffle=True, num_workers=1)
self.distill_testset = SUBCIFAR100(mode, aug, False)
self.distill_test_loader = torch.utils.data.DataLoader(self.distill_testset, batch_size=batch_size, shuffle=True, num_workers=1)
class AverageMeter(object):
"""Computes and stores the average and current value"""
def __init__(self):
self.reset()
def reset(self):
self.val = 0
self.avg = 0
self.sum = 0
self.count = 0
def update(self, val, n=1):
self.val = val
self.sum += val * n
self.count += n
self.avg = self.sum / self.count
def accuracy(output, target, topk=(1,)):
"""Computes the precision@k for the specified values of k"""
with torch.no_grad():
maxk = max(topk)
batch_size = target.size(0)
_, pred = output.topk(maxk, 1, True, True)
pred = pred.t()
correct = pred.eq(target.view(1, -1).expand_as(pred))
res = []
for k in topk:
correct_k = correct[:k].reshape(-1).float().sum(0, keepdim=True)
res.append(correct_k.mul_(100.0 / batch_size))
return res
\ No newline at end of file
import openpyxl
from mia_utils import *
import module
import gol
import argparse
import numpy as np
import torch
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='DIV_TrajectoryMIA')
parser.add_argument('--model', type=str, default='resnet18', help=['AlexNet','AlexNet_BN','VGG_16','VGG_19','Inception_BN','ResNet_18','ResNet_50','ResNet_152','MobileNetV2'])
parser.add_argument('--data', type=str, default='cifar10', help=['cinic10', 'cifar10', 'cifar100'])
args = parser.parse_args()
data_path = f'mia_ckpt/0/target/{args.data}_{args.model}/trajectory_test_data.npy'
# 统一计算所有的js
gol._init()
quant_type_list = ['INT','POT']
filename =f'{args.model}_mia_result.xlsx'
workbook = openpyxl.load_workbook(filename)
worksheet = workbook[args.data]
for quant_type in quant_type_list:
num_bit_list = numbit_list(quant_type)
for num_bits in num_bit_list:
e_bit_list = ebit_list(quant_type,num_bits)
for e_bits in e_bit_list:
if quant_type == 'FLOAT':
title = '%s_%d_E%d' % (quant_type, num_bits, e_bits)
else:
title = '%s_%d' % (quant_type, num_bits)
model_name_ptq = f'{args.data}_{args.model}_{title}'
p_data_path = f'mia_ckpt/0/target/{model_name_ptq}/trajectory_test_data.npy'
dataSet = np.load(data_path, allow_pickle=True).item()
p_dataSet = np.load(p_data_path, allow_pickle=True).item()
data = torch.from_numpy(np.array(dataSet['model_trajectory'], dtype='f')).transpose(0,1)
p_data = torch.from_numpy(np.array(p_dataSet['model_trajectory'], dtype='f')).transpose(0,1)
div = module.js_div(data,p_data)
div = div.item()
if div<0:
div = 0
print(f"js div of {model_name_ptq}: {div}")
idx = GlobalVariables.title_list.index(title)
idx += 4
worksheet.cell(row=idx,column=2,value=div)
workbook.save(filename)
import sys
import os
# 从get_param.py输出重定向文件val.txt中提取参数量和计算量
def extract_ratio(model_name,dataset):
fr = open('param_flops/'+dataset+'/'+model_name+'.txt','r')
lines = fr.readlines()
#跳过warning
for i in range(len(lines)):
if 'Model' in lines[i]:
head = i+1
break
Mac = lines[head].split('Mac,')[0].split(',')[-1]
if 'M' in Mac:
Mac = Mac.split('M')[0]
Mac = float(Mac)
elif 'G' in Mac:
Mac = Mac.split('G')[0]
Mac = float(Mac)
Mac *= 1024
Param = lines[head].split(',')[0]
if 'M' in Param:
Param = Param.split('M')[0]
Param = float(Param)
elif 'k' in Param:
Param = Param.split('k')[0]
Param = float(Param)
Param /= 1024
layer = []
par_ratio = []
flop_ratio = []
for line in lines:
if '(' in line and ')' in line:
layer.append(line.split(')')[0].split('(')[1])
r1 = line.split('%')[0].split(',')[-1]
r1 = float(r1)
par_ratio.append(r1)
r2 = line.split('%')[-2].split(',')[-1]
r2 = float(r2)
flop_ratio.append(r2)
return Mac, Param, layer, par_ratio, flop_ratio
\ No newline at end of file
from torch.autograd import Function
class FakeQuantize(Function):
@staticmethod
def forward(ctx, x, qparam):
x = qparam.quantize_tensor(x)
x = qparam.dequantize_tensor(x)
return x
@staticmethod
def backward(ctx, grad_output):
return grad_output, None
\ No newline at end of file
from model import *
from dataloader import DataLoader
from utils import numbit_list, ebit_list,build_bias_list, build_list
from gen_options import GenOption
from generator import Generator,Generator_imagenet
import module
import gol
import argparse
import time
import torch
import torch.nn as nn
from torch.optim.lr_scheduler import MultiStepLR
import sys
class GenTrainer(object):
def __init__(self, option):
self.settings = option
self.set_test_loader()
self.set_teacher()
self.set_generator()
self.set_optim_G()
def set_test_loader(self):
dataloader = DataLoader(self.settings.dataset,self.settings.batchSize)
_,_,self.test_loader = dataloader.getloader()
def set_teacher(self):
self.model_teacher = Model(self.settings.model,self.settings.dataset).cuda()
if self.settings.quant:
self.model_teacher.quantize(self.settings.quant_type,self.settings.num_bits,self.settings.e_bits)
self.model_teacher.load_state_dict(torch.load(self.settings.teacher_file))
self.model_teacher.eval()
#当randemb为False,此处同时完成了latent_dim的修改
def set_generator(self):
if self.settings.randemb:
weight_t = None
else:
if self.settings.quant:
weight_t = self.model_teacher.get_quant_output_layer_weight()
else:
weight_t = self.model_teacher.get_output_layer_weight()
# 输出层如果是Conv,weight的shape有四元,后二元都是1,舍弃
if self.settings.model in ['Inception_BN']:
weight_t = weight_t.reshape(weight_t.size()[:2])
self.settings.latent_dim = weight_t.size()[1]
if self.settings.dataset in ['cifar10','cifar100']:
self.generator = Generator(self.settings, weight_t, self.settings.freeze).cuda()
elif self.settings.dataset in ['imagenet']:
self.generator = Generator_imagenet(self.settings, weight_t, self.settings.freeze).cuda()
else:
assert False, "Invalid dataset"
def set_optim_G(self):
self.optim_G = torch.optim.Adam(self.generator.parameters(), lr=self.settings.lr_G,
betas=(self.settings.b1, self.settings.b2))
self.lrs_G = MultiStepLR(self.optim_G, milestones=self.settings.milestones_G, gamma=self.settings.gamma_G)
def test_teacher(self):
correct = 0
with torch.no_grad():
for data, target in self.test_loader:
data,target = data.cuda(), target.cuda()
if self.settings.quant:
output = self.model_teacher.quantize_forward(data)
else:
output = self.model_teacher(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
print('Teacher Accuracy: {:.2f}%'.format(100. * correct / len(self.test_loader.dataset)))
def prepare_train(self):
self.log_soft = nn.LogSoftmax(dim=1)
# MSE主要用于回归问题训练
self.MSE_loss = nn.MSELoss().cuda()
self.mean_list = []
self.var_list = []
self.teacher_running_mean = []
self.teacher_running_var = []
self.model_teacher.eval()
self.generator.train()
if self.settings.quant:
for m in self.model_teacher.modules():
if isinstance(m, module.QConvBN) or isinstance(m,module.QConvBNReLU) or isinstance(m,module.QConvBNReLU6):
m.register_forward_hook(self.quant_hook_fn_forward)
else:
for m in self.model_teacher.modules():
if isinstance(m, nn.BatchNorm2d):
m.register_forward_hook(self.hook_fn_forward)
#针对量化网络的ConvBN融合层,提取其中BN层的统计信息,因此对input进行Conv才作为BN输入
def quant_hook_fn_forward(self,module, input, output):
#mean和var是针对输入的伪数据,running_mean和running_var是对train时输入的数据
weight = module.conv_module.weight.clone()
if module.conv_module.bias is not None:
bias = module.conv_module.bias.clone()
else:
bias = None
stride = module.conv_module.stride
padding = module.conv_module.padding
groups = module.conv_module.groups
input = input[0]
input = F.conv2d(input, weight, bias,
stride=stride,
padding=padding,
groups=groups)
mean = input.mean([0, 2, 3])
# use biased var in train
var = input.var([0, 2, 3], unbiased=False)
self.mean_list.append(mean)
self.var_list.append(var)
#eval状态,直接提取QConvBN层中BN的信息即可
self.teacher_running_mean.append(module.bn_module.running_mean)
self.teacher_running_var.append(module.bn_module.running_var)
def hook_fn_forward(self,module, input, output):
#mean和var是针对输入的伪数据,running_mean和running_var是对train时输入的数据
input = input[0]
mean = input.mean([0, 2, 3])
# use biased var in train
var = input.var([0, 2, 3], unbiased=False)
self.mean_list.append(mean)
self.var_list.append(var)
#eval状态,直接提取QConvBN层中BN的信息即可
self.teacher_running_mean.append(module.running_mean)
self.teacher_running_var.append(module.running_var)
def train(self, epoch):
# total_loss = 0.
correct = 0
item_len = 0
for i in range(self.settings.iters):
multi_class = torch.rand(1)
self.MERGE_PARAM = self.settings.multi_label_num
MERGE_PROB = self.settings.multi_label_prob # superpose probability
# MERGE_PROB 表示决策边界样本(即多类label)的比例
# IF分支为多类Label,由于每个元素对应多个label,无法衡量acc,因此只在else分支更新acc
# 多分类样本处于决策边界,同样能根据loss等提高生成器效果
if multi_class<MERGE_PROB:
# Get labels ranging from 0 to n_classes for n rows
z = torch.randn(self.settings.batchSize, self.MERGE_PARAM,self.settings.latent_dim).cuda()
labels = torch.randint(0, self.settings.nClasses, (self.settings.batchSize,self.MERGE_PARAM)).cuda()
linear = F.softmax(torch.randn(self.settings.batchSize,self.MERGE_PARAM),dim=1).cuda()
z = z.contiguous()
labels = labels.contiguous()
labels_loss = torch.zeros(self.settings.batchSize,self.settings.nClasses).cuda()
labels_loss.scatter_add_(1,labels,linear)
images = self.generator(z, labels, linear)
else:
z = torch.randn(self.settings.batchSize, self.settings.latent_dim).cuda()
labels = torch.randint(0, self.settings.nClasses, (self.settings.batchSize,)).cuda()
z = z.contiguous()
labels = labels.contiguous()
images = self.generator(z, labels)
labels_loss = torch.zeros(self.settings.batchSize,self.settings.nClasses).cuda()
labels_loss.scatter_(1,labels.unsqueeze(1),1.0)
self.mean_list.clear()
self.var_list.clear()
# 获取teacher模型输出,同时使用hook_fn_forward获取了mean和var列表
if self.settings.quant:
output_teacher_batch = self.model_teacher.quantize_forward(images)
else:
output_teacher_batch = self.model_teacher(images)
# teacher模型输出和label的损失,一维tensor
loss_one_hot = (-(labels_loss*self.log_soft(output_teacher_batch)).sum(dim=1)).mean()
# BN statistic loss
# 这里统计了伪数据分布和teacher模型BN层分布的loss
BNS_loss = torch.zeros(1).cuda()
for num in range(len(self.mean_list)):
BNS_loss += self.MSE_loss(self.mean_list[num], self.teacher_running_mean[num]) + self.MSE_loss(
self.var_list[num], self.teacher_running_var[num])
BNS_loss = BNS_loss / len(self.mean_list)
# loss of Generator
loss_G = loss_one_hot + 0.1 * BNS_loss
# total_loss += loss_G.item()
self.optim_G.zero_grad()
loss_G.backward()
self.optim_G.step()
if not multi_class< MERGE_PROB:
pred = output_teacher_batch.argmax(dim=1, keepdim=True)
correct += pred.eq(labels.view_as(pred)).sum().item()
item_len += self.settings.batchSize
# train_loss = total_loss/self.settings.iters
gen_acc = 100. * correct / item_len
# 对应输出的第一行,表示teacher在含噪声的label和生成器根据噪声label生成input上取得的精度
# 这里acc越高表示生成器效果越好,生成的输入接近真实数据分布
# print(
# "[Epoch %d/%d] [Batch %d/%d] [acc: %.4f%%] [G loss: %f] [Time: %5.2fs]"
# % (epoch + 1, self.settings.nEpochs, i+1, self.settings.iters, gen_acc, train_loss, (time.time()-start_time))
# )
return gen_acc
def run(self):
self.test_teacher()
self.prepare_train()
start_time = time.time()
best_gen_acc = None
for epoch in range(1,self.settings.nEpochs):
gen_acc = self.train(epoch)
if not best_gen_acc or gen_acc > best_gen_acc:
best_gen_acc = gen_acc
torch.save(self.generator, self.settings.gen_file)
time_interval = time.time()-start_time
print('>> Epoch:%d Time:%.2fs Cur acc:%.4f Best acc:%.4f'%(epoch,time_interval,gen_acc,best_gen_acc))
start_time = time.time()
def main():
#及时打印信息
sys.stdout = open(sys.stdout.fileno(), mode='w', buffering=1)
parser = argparse.ArgumentParser(description='Gen Arg')
parser.add_argument('--model', type=str)
parser.add_argument('--dataset',type=str)
parser.add_argument('--quant', action='store_true')
parser.add_argument('--freeze', action='store_true')
parser.add_argument('--randemb', action='store_true')
parser.add_argument('--multi_label_prob', type=float, default=0.0)
parser.add_argument('--multi_label_num', type=int, default=2)
parser.add_argument('--no_DM', action='store_false')
parser.add_argument('--noise_scale', type=float, default=1.0)
args = parser.parse_args()
print(args)
option = GenOption(args)
if option.quant:
gol._init()
quant_type_list = ['POT','FLOAT']
for quant_type in quant_type_list:
num_bit_list = numbit_list(quant_type)
if quant_type != 'INT':
bias_list = build_bias_list(quant_type)
gol.set_value(bias_list, is_bias=True)
for num_bits in num_bit_list:
e_bit_list = ebit_list(quant_type,num_bits)
for e_bits in e_bit_list:
if quant_type == 'FLOAT':
title = '%s_%d_E%d' % (quant_type, num_bits, e_bits)
else:
title = '%s_%d' % (quant_type, num_bits)
# 设置量化表
if quant_type != 'INT':
plist = build_list(quant_type, num_bits, e_bits)
gol.set_value(plist)
print('>'*20 + 'Gen: '+option.model+' '+title+'<'*20)
option.set(quant_type,num_bits,e_bits)
gentrainer = GenTrainer(option)
gentrainer.run()
else:
print('>'*20 + 'Gen: '+option.model+' Full'+'<'*20)
option.set()
gentrainer = GenTrainer(option)
gentrainer.run()
if __name__ == '__main__':
main()
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 1-06:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
# set constraint for RTX8000 to meet my cuda
#SBATCH --constraint="Ampere|RTX8000"
#- Log information
echo "Job start at $(date "+%Y-%m-%d %H:%M:%S")"
echo "Job run at:"
echo "$(hostnamectl)"
#- Load environments
source /tools/module_env.sh
module list # list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
# module load cuda-cudnn/10.2-7.6.5
# module load cuda-cudnn/11.2-8.2.1
module load cuda-cudnn/11.1-8.2.1
##- virtualenv
# source xxxxx/activate
echo $(module list) # list modules loaded
echo $(which gcc)
echo $(which python)
echo $(which python3)
cluster-quota # nas quota
nvidia-smi --format=csv --query-gpu=name,driver_version,power.limit # gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo "Use GPU ${CUDA_VISIBLE_DEVICES}" # which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
if [ $Dataset = 'cifar10' ]; then
Label=2
elif [ $Dataset = 'cifar100' ]; then
Label=10
else
echo "Invalid Dataset $Dataset"
exit
fi
if [ $Quant = 'True' ]; then
python gen_one.py --model $Model --dataset $Dataset --quant --multi_label_prob 0.4 --multi_label_num $Label
else
python gen_one.py --model $Model --dataset $Dataset --multi_label_prob 0.4 --multi_label_num $Label
fi
#- End
echo "Job end at $(date "+%Y-%m-%d %H:%M:%S")"
import os
import os.path as osp
class GenOption(object):
def __init__(self, args):
self.model = args.model
self.dataset = args.dataset
self.batchSize = 128
self.quant = args.quant
if self.dataset == "cifar10":
self.nClasses = 10
elif self.dataset == "cifar100":
self.nClasses = 100
else:
assert False, "invalid dataset"
# ----------Generator options ---------------------------------------------
# self.nEpochs = 100
self.nEpochs = 40
#每个epoch训练多少轮,和batchsize无关
self.iters = 200
#冻结embedding层权重
self.freeze = args.freeze
self.randemb = args.randemb
# 如果不为randomemb,需要根据weight_t调整
self.latent_dim = 64
# 针对imagenet等数据集需要调整
self.img_size = 32
self.channels = 3
self.lr_G = 0.001
# self.milestones_G = [40,60,80]
self.milestones_G = [20,30]
self.gamma_G = 0.1
self.b1 = 0.5
self.b2 = 0.999
# ----------More option ---------------------------------------------
self.multi_label_prob = args.multi_label_prob
self.multi_label_num = args.multi_label_num
self.no_DM = args.no_DM
self.noise_scale = args.noise_scale
self.intermediate_dim = 100
# if self.network == "resnet20":
# self.intermediate_dim = 64
def set(self,quant_type=None,num_bits=None,e_bits=None):
if self.quant:
self.quant_type = quant_type
self.num_bits = num_bits
self.e_bits = e_bits
if quant_type == 'FLOAT':
title = '%s_%d_E%d' % (quant_type, num_bits, e_bits)
else:
title = '%s_%d' % (quant_type, num_bits)
self.teacher_file = 'ckpt_quant/'+self.dataset+'/'+self.model+'/'+title+'.pt'
gen_path = 'ckpt_quant_gen/'+self.dataset+'/'+self.model
self.gen_file = gen_path + '/' + title + '.pt'
else:
self.teacher_file = 'ckpt_full/'+self.dataset+'/'+self.model+'.pt'
gen_path = 'ckpt_full_gen/'+self.dataset
self.gen_file = gen_path +'/'+ self.model+'.pt'
if not osp.exists(self.teacher_file):
assert False, "Empty teacher file"
if not osp.exists(gen_path):
os.makedirs(gen_path)
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import init
class Generator(nn.Module):
def __init__(self, options=None, teacher_weight=None, freeze=True):
super(Generator, self).__init__()
self.settings = options
# 注意这里有embedding层,两个分别是词典大小和向量长度
# 用于将标签映射为向量
if teacher_weight==None:
self.label_emb = nn.Embedding(self.settings.nClasses, self.settings.latent_dim)
else:
#当randemb为False时,要求latentdim与输出层输入通道一致
self.label_emb = nn.Embedding.from_pretrained(teacher_weight, freeze=freeze)
self.embed_normalizer = nn.BatchNorm1d(self.label_emb.weight.T.shape,affine=False,track_running_stats=False)
if not self.settings.no_DM:
self.fc_reducer = nn.Linear(in_features=self.label_emb.weight.shape[-1], out_features=self.settings.intermediate_dim)
self.init_size = self.settings.img_size // 4
self.l1 = nn.Sequential(nn.Linear(self.settings.intermediate_dim, 128 * self.init_size ** 2))
else:
self.init_size = self.settings.img_size // 4
self.l1 = nn.Sequential(nn.Linear(self.settings.latent_dim, 128 * self.init_size ** 2))
self.conv_blocks0 = nn.Sequential(
nn.BatchNorm2d(128),
)
self.conv_blocks1 = nn.Sequential(
nn.Conv2d(128, 128, 3, stride=1, padding=1),
nn.BatchNorm2d(128, 0.8),
nn.LeakyReLU(0.2, inplace=True),
)
self.conv_blocks2 = nn.Sequential(
nn.Conv2d(128, 64, 3, stride=1, padding=1),
nn.BatchNorm2d(64, 0.8),
nn.LeakyReLU(0.2, inplace=True),
nn.Conv2d(64, self.settings.channels, 3, stride=1, padding=1),
nn.Tanh(),
nn.BatchNorm2d(self.settings.channels, affine=False)
)
def forward(self, z, labels, linear=None, z2=None):
# GDFQ此处为随机噪声乘label
if linear == None:
gen_input = self.embed_normalizer(torch.add(self.label_emb(labels),self.settings.noise_scale*z).T).T
if not self.settings.no_DM:
gen_input = self.fc_reducer(gen_input)
else:
embed_norm = self.embed_normalizer(torch.add(self.label_emb(labels),self.settings.noise_scale*z).T).T
if not self.settings.no_DM:
gen_input = self.fc_reducer(embed_norm)
else:
gen_input = embed_norm
gen_input = (gen_input * linear.unsqueeze(2)).sum(dim=1)
out = self.l1(gen_input)
out = out.view(out.shape[0], 128, self.init_size, self.init_size)
img = self.conv_blocks0(out)
img = nn.functional.interpolate(img, scale_factor=2)
img = self.conv_blocks1(img)
img = nn.functional.interpolate(img, scale_factor=2)
img = self.conv_blocks2(img)
return img
class Generator_imagenet(nn.Module):
def __init__(self, options=None, teacher_weight=None, freeze=True):
super(Generator_imagenet, self).__init__()
self.settings = options
if teacher_weight==None:
self.label_emb = nn.Embedding(self.settings.nClasses, self.settings.latent_dim)
else:
self.label_emb = nn.Embedding.from_pretrained(teacher_weight, freeze=freeze)
self.embed_normalizer = nn.BatchNorm1d(self.label_emb.weight.T.shape,affine=False,track_running_stats=False)
if not self.settings.no_DM:
self.fc_reducer = nn.Linear(in_features=self.label_emb.weight.shape[-1], out_features=self.settings.intermediate_dim)
self.init_size = self.settings.img_size // 4
self.l1 = nn.Sequential(nn.Linear(self.settings.intermediate_dim, 128 * self.init_size ** 2))
else:
self.init_size = self.settings.img_size // 4
self.l1 = nn.Sequential(nn.Linear(self.settings.latent_dim, 128 * self.init_size ** 2))
self.conv_blocks0_0 = CategoricalConditionalBatchNorm2d(1000, 128)
self.conv_blocks1_0 = nn.Conv2d(128, 128, 3, stride=1, padding=1)
self.conv_blocks1_1 = CategoricalConditionalBatchNorm2d(1000, 128, 0.8)
self.conv_blocks1_2 = nn.LeakyReLU(0.2, inplace=True)
self.conv_blocks2_0 = nn.Conv2d(128, 64, 3, stride=1, padding=1)
self.conv_blocks2_1 = CategoricalConditionalBatchNorm2d(1000, 64, 0.8)
self.conv_blocks2_2 = nn.LeakyReLU(0.2, inplace=True)
self.conv_blocks2_3 = nn.Conv2d(64, self.settings.channels, 3, stride=1, padding=1)
self.conv_blocks2_4 = nn.Tanh()
self.conv_blocks2_5 = nn.BatchNorm2d(self.settings.channels, affine=False)
def forward(self, z, labels, linear=None):
if linear == None:
gen_input = self.embed_normalizer(torch.add(self.label_emb(labels),z).T).T
if not self.settings.no_DM:
gen_input = self.fc_reducer(gen_input)
else:
embed_norm = self.embed_normalizer(torch.add(self.label_emb(labels),z).T).T
if not self.settings.no_DM:
gen_input = self.fc_reducer(embed_norm)
else:
gen_input = embed_norm
gen_input = (gen_input * linear.unsqueeze(2)).sum(dim=1)
out = self.l1(gen_input)
out = out.view(out.shape[0], 128, self.init_size, self.init_size)
img = self.conv_blocks0_0(out, labels, linear=linear)
img = nn.functional.interpolate(img, scale_factor=2)
img = self.conv_blocks1_0(img)
img = self.conv_blocks1_1(img, labels, linear=linear)
img = self.conv_blocks1_2(img)
img = nn.functional.interpolate(img, scale_factor=2)
img = self.conv_blocks2_0(img)
img = self.conv_blocks2_1(img, labels, linear=linear)
img = self.conv_blocks2_2(img)
img = self.conv_blocks2_3(img)
img = self.conv_blocks2_4(img)
img = self.conv_blocks2_5(img)
return img
class ConditionalBatchNorm2d(nn.BatchNorm2d):
"""Conditional Batch Normalization"""
def __init__(self, num_features, eps=1e-05, momentum=0.1,
affine=False, track_running_stats=True):
super(ConditionalBatchNorm2d, self).__init__(
num_features, eps, momentum, affine, track_running_stats
)
def forward(self, input, weight, bias, **kwargs):
self._check_input_dim(input)
exponential_average_factor = 0.0
if self.training and self.track_running_stats:
self.num_batches_tracked += 1
if self.momentum is None: # use cumulative moving average
exponential_average_factor = 1.0 / self.num_batches_tracked.item()
else: # use exponential moving average
exponential_average_factor = self.momentum
output = F.batch_norm(input, self.running_mean, self.running_var,
self.weight, self.bias,
self.training or not self.track_running_stats,
exponential_average_factor, self.eps)
if weight.dim() == 1:
weight = weight.unsqueeze(0)
if bias.dim() == 1:
bias = bias.unsqueeze(0)
size = output.size()
weight = weight.unsqueeze(-1).unsqueeze(-1).expand(size)
bias = bias.unsqueeze(-1).unsqueeze(-1).expand(size)
return weight * output + bias
class CategoricalConditionalBatchNorm2d(ConditionalBatchNorm2d):
def __init__(self, num_classes, num_features, eps=1e-5, momentum=0.1,
affine=False, track_running_stats=True):
super(CategoricalConditionalBatchNorm2d, self).__init__(
num_features, eps, momentum, affine, track_running_stats
)
self.weights = nn.Embedding(num_classes, num_features)
self.biases = nn.Embedding(num_classes, num_features)
self._initialize()
def _initialize(self):
init.ones_(self.weights.weight.data)
init.zeros_(self.biases.weight.data)
def forward(self, input, c, linear=None,**kwargs):
weight = self.weights(c)
bias = self.biases(c)
if linear != None:
weight = (weight * linear.unsqueeze(2)).mean(dim=1)
bias = (bias * linear.unsqueeze(2)).mean(dim=1)
return super(CategoricalConditionalBatchNorm2d, self).forward(input, weight, bias)
from model import *
import sys
import torch
from ptflops import get_model_complexity_info
if __name__ == "__main__":
model_name = sys.argv[1]
dataset = sys.argv[2]
model = Model(model_name,dataset)
# full_file = 'ckpt/cifar10_'+model_name+'.pt'
# model.load_state_dict(torch.load(full_file))
flops, params = get_model_complexity_info(model, (3, 32, 32), as_strings=True, print_per_layer_stat=True)
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#SBATCH -J ALL # The job name
#SBATCH -o ret/ret-%j.out # Write the standard output to file named 'ret-<job_number>.out'
#SBATCH -e ret/ret-%j.err # Write the standard error to file named 'ret-<job_number>.err'
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 0-01:30:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-debug # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
# set constraint for RTX8000 to meet my cuda
#SBATCH --constraint="Ampere|RTX8000|T4"
#- Log information
echo "Job start at $(date "+%Y-%m-%d %H:%M:%S")"
echo "Job run at:"
echo "$(hostnamectl)"
#- Load environments
source /tools/module_env.sh
module list # list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
# module load cuda-cudnn/10.2-7.6.5
# module load cuda-cudnn/11.2-8.2.1
module load cuda-cudnn/11.1-8.2.1
##- virtualenv
# source xxxxx/activate
echo $(module list) # list modules loaded
echo $(which gcc)
echo $(which python)
echo $(which python3)
cluster-quota # nas quota
nvidia-smi --format=csv --query-gpu=name,driver_version,power.limit # gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo "Use GPU ${CUDA_VISIBLE_DEVICES}" # which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
name_list="AlexNet AlexNet_BN VGG_16 VGG_19 Inception_BN ResNet_18 ResNet_50 ResNet_152 MobileNetV2"
# name_list="MobileNetV2"
for name in $name_list; do
if [ -f "param_flops/$Dataset/$name.txt" ];then
echo "$name: param_flops exists"
# elif [ ! -f "ckpt/cifar10_$name.pt" ];then
# echo "$name: ckpt not exists"
else
python get_param_flops.py $name $Dataset > param_flops/$Dataset/$name.txt
echo "$name: param_flops done"
fi
done
#- End
echo "Job end at $(date "+%Y-%m-%d %H:%M:%S")"
# -*- coding: utf-8 -*-
# 用于多个module之间共享全局变量
def _init(): # 初始化
global _global_dict
_global_dict = {}
def set_value(value,is_bias=False):
# 定义一个全局变量
if is_bias:
_global_dict[0] = value
else:
_global_dict[1] = value
def get_value(is_bias=False): # 给bias独立于各变量外的精度
if is_bias:
return _global_dict[0]
else:
return _global_dict[1]
import os
import argparse
import mia_utils
import normal
import MIA
import torch
def train_networks(args):
device = mia_utils.get_pytorch_device()
mia_utils.create_path('./outputs')
if 'distill' in args.mode:
model_path_tar = 'mia_ckpt/{}/{}'.format(0, args.mode.split('_')[-1])
mia_utils.create_path(model_path_tar)
model_path_dis = 'mia_ckpt/{}/{}'.format(args.seed, args.mode)
mia_utils.create_path(model_path_dis)
else:
model_path_tar = 'mia_ckpt/{}/{}'.format(args.seed, args.mode)
mia_utils.create_path(model_path_tar)
model_path_dis = None
normal.train_models(args, model_path_tar, model_path_dis, device)
def membership_inference_attack(args):
print(f'--------------{args.mia_type}-------------')
device = mia_utils.get_pytorch_device()
if args.mia_type == 'build-dataset':
models_path = 'mia_ckpt/{}'.format(0)
MIA.build_trajectory_membership_dataset(args, models_path, device)
if args.mia_type == 'black-box':
trained_models_path = 'mia_ckpt/{}'.format(args.seed)
MIA.trajectory_black_box_membership_inference_attack(args, trained_models_path, device)
if __name__ == '__main__':
# 清空 CUDA 缓存
torch.cuda.empty_cache()
# 强制进行垃圾回收
torch.cuda.memory_allocated()
torch.cuda.memory_reserved()
parser = argparse.ArgumentParser(description='TrajectoryMIA')
parser.add_argument('--action', type=int, default=0, help=[0, 1])
parser.add_argument('--seed', type=int, default=0)
parser.add_argument('--mode', type=str, default=None, help=['target', 'shadow', 'distill_target', 'distill_shadow'])
parser.add_argument('--model', type=str, default=None, help=['AlexNet','AlexNet_BN','VGG_16','VGG_19','Inception_BN','ResNet_18','ResNet_50','ResNet_152','MobileNetV2'])
parser.add_argument('--data', type=str, default=None, help=['cinic10', 'cifar10', 'cifar100'])
parser.add_argument('--epochs', type=int, default=200)
parser.add_argument('--model_distill', type=str, default=None, help=['AlexNet','AlexNet_BN','VGG_16','VGG_19','Inception_BN','ResNet_18','ResNet_50','ResNet_152','MobileNetV2'])
parser.add_argument('--epochs_distill', type=int, default=100)
parser.add_argument('--mia_type', type=str, help=['build-dataset', 'black-box'])
parser.add_argument('--load_attack',action='store_true', help='load a trained attack model')
parser.add_argument('--store_ptq',action='store_true', help='store a ptq model')
parser.add_argument('--quant_type', type=str, choices=['INT', 'POT', 'FLOAT'], default=None,help='choose a ptq mode for target model')
parser.add_argument("--num_bits",type=int,default=0)
parser.add_argument("--e_bits",type=int,default=0)
parser.add_argument('--load_ptq',action='store_true', help='load a ptq target model')
args = parser.parse_args()
mia_utils.set_random_seeds(args.seed)
print('random seed:{}'.format(args.seed))
if args.action == 0:
train_networks(args)
elif args.action == 1:
membership_inference_attack(args)
# 清空 CUDA 缓存
torch.cuda.empty_cache()
# 强制进行垃圾回收
torch.cuda.memory_allocated()
torch.cuda.memory_reserved()
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 1-06:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
#SBATCH --constraint="A100|Volta|RTX8000"
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
# set constraint for RTX8000 to meet my cuda
#SBATCH --constraint="Ampere|RTX8000"
#- Log information
echo "Job start at $(date "+%Y-%m-%d %H:%M:%S")"
echo "Job run at:"
echo "$(hostnamectl)"
#- Load environments
source /tools/module_env.sh
source ~/pyt1.5/bin/activate
module list # list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/10.2-7.6.5
##- virtualenv
# source xxxxx/activate
echo $(module list) # list modules loaded
echo $(which gcc)
echo $(which python)
echo $(which python3)
cluster-quota # nas quota
nvidia-smi --format=csv --query-gpu=name,driver_version,power.limit # gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo "Use GPU ${CUDA_VISIBLE_DEVICES}" # which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
# echo "python mia_one.py --mode target --model $Model --data $Dataset"
# python mia_one.py --mode target --model $Model --data $Dataset
# echo "python mia_one.py --mode shadow --model $Model --data $Dataset"
# python mia_one.py --mode shadow --model $Model --data $Dataset
echo "python mia_one.py --mode distill_target --model $Model --data $Dataset --epochs_distill $Distill"
python mia_one.py --mode distill_target --model $Model --data $Dataset --epochs_distill $Distill
echo "python mia_one.py --mode distill_shadow --model $Model --data $Dataset --epochs_distill $Distill"
python mia_one.py --mode distill_shadow --model $Model --data $Dataset --epochs_distill $Distill
echo "python mia_one.py --action 1 --mode shadow --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --epochs_distill $Distill"
python mia_one.py --action 1 --mode shadow --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --epochs_distill $Distill
echo "python mia_one.py --action 1 --mode target --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --epochs_distill $Distill"
python mia_one.py --action 1 --mode target --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --epochs_distill $Distill
echo "python mia_one.py --action 1 --mia_type black-box --model $Model --model_distill $Model --data $Dataset --epochs_distill $Distill"
python mia_one.py --action 1 --mia_type black-box --model $Model --model_distill $Model --data $Dataset --epochs_distill $Distill
#- End
echo "Job end at $(date "+%Y-%m-%d %H:%M:%S")"
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 1-06:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
#SBATCH --constraint="Volta|RTX8000|A100"
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
# set constraint for RTX8000 to meet my cuda
#SBATCH --constraint="Ampere|RTX8000"
#- Log information
echo "Job start at $(date "+%Y-%m-%d %H:%M:%S")"
echo "Job run at:"
echo "$(hostnamectl)"
#- Load environments
source /tools/module_env.sh
source ~/pyt1.5/bin/activate
module list # list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/10.2-7.6.5
##- virtualenv
# source xxxxx/activate
echo $(module list) # list modules loaded
echo $(which gcc)
echo $(which python)
echo $(which python3)
cluster-quota # nas quota
nvidia-smi --format=csv --query-gpu=name,driver_version,power.limit # gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo "Use GPU ${CUDA_VISIBLE_DEVICES}" # which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
echo "python div.py --model $Model --data $Dataset"
python div.py --model $Model --data $Dataset
#- End
echo "Job end at $(date "+%Y-%m-%d %H:%M:%S")"
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 1-06:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
#SBATCH --constraint="Ampere|Volta|RTX8000"
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo "Job start at $(date "+%Y-%m-%d %H:%M:%S")"
echo "Job run at:"
echo "$(hostnamectl)"
#- Load environments
source /tools/module_env.sh
source ~/pyt1.5/bin/activate
module list # list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/10.2-7.6.5
##- virtualenv
# source xxxxx/activate
echo $(module list) # list modules loaded
echo $(which gcc)
echo $(which python)
echo $(which python3)
cluster-quota # nas quota
nvidia-smi --format=csv --query-gpu=name,driver_version,power.limit # gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo "Use GPU ${CUDA_VISIBLE_DEVICES}" # which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
echo "python mia_one.py --mode target --model $Model --data $Dataset"
python mia_one.py --mode target --model $Model --data $Dataset
echo "python mia_one.py --mode shadow --model $Model --data $Dataset"
python mia_one.py --mode shadow --model $Model --data $Dataset
echo "python mia_one.py --mode distill_target --model $Model --data $Dataset --epochs_distill $Distill"
python mia_one.py --mode distill_target --model $Model --data $Dataset --epochs_distill $Distill
echo "python mia_one.py --mode distill_shadow --model $Model --data $Dataset --epochs_distill $Distill"
python mia_one.py --mode distill_shadow --model $Model --data $Dataset --epochs_distill $Distill
echo "python mia_one.py --action 1 --mode shadow --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --epochs_distill $Distill"
python mia_one.py --action 1 --mode shadow --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --epochs_distill $Distill
echo "python mia_one.py --action 1 --mode target --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --epochs_distill $Distill"
python mia_one.py --action 1 --mode target --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --epochs_distill $Distill
echo "python mia_one.py --action 1 --mia_type black-box --model $Model --model_distill $Model --data $Dataset --epochs_distill $Distill"
python mia_one.py --action 1 --mia_type black-box --model $Model --model_distill $Model --data $Dataset --epochs_distill $Distill
#- End
echo "Job end at $(date "+%Y-%m-%d %H:%M:%S")"
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 1-06:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
#SBATCH --constraint="Ampere|Volta|RTX8000"
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
# set constraint for RTX8000 to meet my cuda
#- Log information
echo "Job start at $(date "+%Y-%m-%d %H:%M:%S")"
echo "Job run at:"
echo "$(hostnamectl)"
#- Load environments
source /tools/module_env.sh
source ~/pyt1.5/bin/activate
module list # list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/10.2-7.6.5
##- virtualenv
# source xxxxx/activate
echo $(module list) # list modules loaded
echo $(which gcc)
echo $(which python)
echo $(which python3)
cluster-quota # nas quota
nvidia-smi --format=csv --query-gpu=name,driver_version,power.limit # gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo "Use GPU ${CUDA_VISIBLE_DEVICES}" # which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
# TRAIN DISTILL MODEL
echo "python mia_one.py --mode distill_target --model $Model --data $Dataset --store_ptq --quant_type FLOAT --num_bits 3 --e_bits 1 --epochs_distill $Distill"
python mia_one.py --mode distill_target --model $Model --data $Dataset --store_ptq --quant_type FLOAT --num_bits 3 --e_bits 1 --epochs_distill $Distill
echo "python mia_one.py --mode distill_target --model $Model --data $Dataset --store_ptq --quant_type FLOAT --num_bits 4 --e_bits 1 --epochs_distill $Distill"
python mia_one.py --mode distill_target --model $Model --data $Dataset --store_ptq --quant_type FLOAT --num_bits 4 --e_bits 1 --epochs_distill $Distill
echo "python mia_one.py --mode distill_target --model $Model --data $Dataset --store_ptq --quant_type FLOAT --num_bits 4 --e_bits 2 --epochs_distill $Distill"
python mia_one.py --mode distill_target --model $Model --data $Dataset --store_ptq --quant_type FLOAT --num_bits 4 --e_bits 2 --epochs_distill $Distill
echo "python mia_one.py --mode distill_target --model $Model --data $Dataset --store_ptq --quant_type FLOAT --num_bits 5 --e_bits 1 --epochs_distill $Distill"
python mia_one.py --mode distill_target --model $Model --data $Dataset --store_ptq --quant_type FLOAT --num_bits 5 --e_bits 1 --epochs_distill $Distill
echo "python mia_one.py --mode distill_target --model $Model --data $Dataset --store_ptq --quant_type FLOAT --num_bits 5 --e_bits 2 --epochs_distill $Distill"
python mia_one.py --mode distill_target --model $Model --data $Dataset --store_ptq --quant_type FLOAT --num_bits 5 --e_bits 2 --epochs_distill $Distill
echo "python mia_one.py --mode distill_target --model $Model --data $Dataset --store_ptq --quant_type FLOAT --num_bits 5 --e_bits 3 --epochs_distill $Distill"
python mia_one.py --mode distill_target --model $Model --data $Dataset --store_ptq --quant_type FLOAT --num_bits 5 --e_bits 3 --epochs_distill $Distill
# TRAIN DISTILL SHADOW MODEL
echo "python mia_one.py --mode distill_shadow --model $Model --data $Dataset --store_ptq --quant_type FLOAT --num_bits 3 --e_bits 1 --epochs_distill $Distill"
python mia_one.py --mode distill_shadow --model $Model --data $Dataset --store_ptq --quant_type FLOAT --num_bits 3 --e_bits 1 --epochs_distill $Distill
echo "python mia_one.py --mode distill_shadow --model $Model --data $Dataset --store_ptq --quant_type FLOAT --num_bits 4 --e_bits 1 --epochs_distill $Distill"
python mia_one.py --mode distill_shadow --model $Model --data $Dataset --store_ptq --quant_type FLOAT --num_bits 4 --e_bits 1 --epochs_distill $Distill
echo "python mia_one.py --mode distill_shadow --model $Model --data $Dataset --store_ptq --quant_type FLOAT --num_bits 4 --e_bits 2 --epochs_distill $Distill"
python mia_one.py --mode distill_shadow --model $Model --data $Dataset --store_ptq --quant_type FLOAT --num_bits 4 --e_bits 2 --epochs_distill $Distill
echo "python mia_one.py --mode distill_shadow --model $Model --data $Dataset --store_ptq --quant_type FLOAT --num_bits 5 --e_bits 1 --epochs_distill $Distill"
python mia_one.py --mode distill_shadow --model $Model --data $Dataset --store_ptq --quant_type FLOAT --num_bits 5 --e_bits 1 --epochs_distill $Distill
echo "python mia_one.py --mode distill_shadow --model $Model --data $Dataset --store_ptq --quant_type FLOAT --num_bits 5 --e_bits 2 --epochs_distill $Distill"
python mia_one.py --mode distill_shadow --model $Model --data $Dataset --store_ptq --quant_type FLOAT --num_bits 5 --e_bits 2 --epochs_distill $Distill
echo "python mia_one.py --mode distill_shadow --model $Model --data $Dataset --store_ptq --quant_type FLOAT --num_bits 5 --e_bits 3 --epochs_distill $Distill"
python mia_one.py --mode distill_shadow --model $Model --data $Dataset --store_ptq --quant_type FLOAT --num_bits 5 --e_bits 3 --epochs_distill $Distill
# CONSTRUCT TRAIN DATASET
echo "python mia_one.py --action 1 --mode shadow --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 3 --e_bits 1 --epochs_distill $Distill"
python mia_one.py --action 1 --mode shadow --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 3 --e_bits 1 --epochs_distill $Distill
echo "python mia_one.py --action 1 --mode shadow --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 4 --e_bits 1 --epochs_distill $Distill"
python mia_one.py --action 1 --mode shadow --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 4 --e_bits 1 --epochs_distill $Distill
echo "python mia_one.py --action 1 --mode shadow --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 4 --e_bits 2 --epochs_distill $Distill"
python mia_one.py --action 1 --mode shadow --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 4 --e_bits 2 --epochs_distill $Distill
echo "python mia_one.py --action 1 --mode shadow --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 5 --e_bits 1 --epochs_distill $Distill"
python mia_one.py --action 1 --mode shadow --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 5 --e_bits 1 --epochs_distill $Distill
echo "python mia_one.py --action 1 --mode shadow --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 5 --e_bits 2 --epochs_distill $Distill"
python mia_one.py --action 1 --mode shadow --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 5 --e_bits 2 --epochs_distill $Distill
echo "python mia_one.py --action 1 --mode shadow --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 5 --e_bits 3 --epochs_distill $Distill"
python mia_one.py --action 1 --mode shadow --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 5 --e_bits 3 --epochs_distill $Distill
# CONSTRUCT TEST DATASET
echo "python mia_one.py --action 1 --mode target --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 3 --e_bits 1 --epochs_distill $Distill"
python mia_one.py --action 1 --mode target --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 3 --e_bits 1 --epochs_distill $Distill
echo "python mia_one.py --action 1 --mode target --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 4 --e_bits 1 --epochs_distill $Distill"
python mia_one.py --action 1 --mode target --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 4 --e_bits 1 --epochs_distill $Distill
echo "python mia_one.py --action 1 --mode target --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 4 --e_bits 2 --epochs_distill $Distill"
python mia_one.py --action 1 --mode target --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 4 --e_bits 2 --epochs_distill $Distill
echo "python mia_one.py --action 1 --mode target --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 5 --e_bits 1 --epochs_distill $Distill"
python mia_one.py --action 1 --mode target --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 5 --e_bits 1 --epochs_distill $Distill
echo "python mia_one.py --action 1 --mode target --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 5 --e_bits 2 --epochs_distill $Distill"
python mia_one.py --action 1 --mode target --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 5 --e_bits 2 --epochs_distill $Distill
echo "python mia_one.py --action 1 --mode target --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 5 --e_bits 3 --epochs_distill $Distill"
python mia_one.py --action 1 --mode target --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 5 --e_bits 3 --epochs_distill $Distill
# ATTACK
echo "python mia_one.py --action 1 --mia_type black-box --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 3 --e_bits 1 --epochs_distill $Distill"
python mia_one.py --action 1 --mia_type black-box --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 3 --e_bits 1 --epochs_distill $Distill
echo "python mia_one.py --action 1 --mia_type black-box --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 4 --e_bits 1 --epochs_distill $Distill"
python mia_one.py --action 1 --mia_type black-box --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 4 --e_bits 1 --epochs_distill $Distill
echo "python mia_one.py --action 1 --mia_type black-box --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 4 --e_bits 2 --epochs_distill $Distill"
python mia_one.py --action 1 --mia_type black-box --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 4 --e_bits 2 --epochs_distill $Distill
echo "python mia_one.py --action 1 --mia_type black-box --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 5 --e_bits 1 --epochs_distill $Distill"
python mia_one.py --action 1 --mia_type black-box --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 5 --e_bits 1 --epochs_distill $Distill
echo "python mia_one.py --action 1 --mia_type black-box --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 5 --e_bits 2 --epochs_distill $Distill"
python mia_one.py --action 1 --mia_type black-box --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 5 --e_bits 2 --epochs_distill $Distill
echo "python mia_one.py --action 1 --mia_type black-box --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 5 --e_bits 3 --epochs_distill $Distill"
python mia_one.py --action 1 --mia_type black-box --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 5 --e_bits 3 --epochs_distill $Distill
#- End
echo "Job end at $(date "+%Y-%m-%d %H:%M:%S")"
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 1-06:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
#SBATCH --constraint="Ampere|Volta|RTX8000"
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
# set constraint for RTX8000 to meet my cuda
#- Log information
echo "Job start at $(date "+%Y-%m-%d %H:%M:%S")"
echo "Job run at:"
echo "$(hostnamectl)"
#- Load environments
source /tools/module_env.sh
source ~/pyt1.5/bin/activate
module list # list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/10.2-7.6.5
##- virtualenv
# source xxxxx/activate
echo $(module list) # list modules loaded
echo $(which gcc)
echo $(which python)
echo $(which python3)
cluster-quota # nas quota
nvidia-smi --format=csv --query-gpu=name,driver_version,power.limit # gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo "Use GPU ${CUDA_VISIBLE_DEVICES}" # which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
# TRAIN DISTILL MODEL
echo "python mia_one.py --mode distill_target --model $Model --data $Dataset --store_ptq --quant_type FLOAT --num_bits 6 --e_bits 1 --epochs_distill $Distill"
python mia_one.py --mode distill_target --model $Model --data $Dataset --store_ptq --quant_type FLOAT --num_bits 6 --e_bits 1 --epochs_distill $Distill
echo "python mia_one.py --mode distill_target --model $Model --data $Dataset --store_ptq --quant_type FLOAT --num_bits 6 --e_bits 2 --epochs_distill $Distill"
python mia_one.py --mode distill_target --model $Model --data $Dataset --store_ptq --quant_type FLOAT --num_bits 6 --e_bits 2 --epochs_distill $Distill
echo "python mia_one.py --mode distill_target --model $Model --data $Dataset --store_ptq --quant_type FLOAT --num_bits 6 --e_bits 3 --epochs_distill $Distill"
python mia_one.py --mode distill_target --model $Model --data $Dataset --store_ptq --quant_type FLOAT --num_bits 6 --e_bits 3 --epochs_distill $Distill
echo "python mia_one.py --mode distill_target --model $Model --data $Dataset --store_ptq --quant_type FLOAT --num_bits 6 --e_bits 4 --epochs_distill $Distill"
python mia_one.py --mode distill_target --model $Model --data $Dataset --store_ptq --quant_type FLOAT --num_bits 6 --e_bits 4 --epochs_distill $Distill
# TRAIN DISTILL SHADOW MODEL
echo "python mia_one.py --mode distill_shadow --model $Model --data $Dataset --store_ptq --quant_type FLOAT --num_bits 6 --e_bits 1 --epochs_distill $Distill"
python mia_one.py --mode distill_shadow --model $Model --data $Dataset --store_ptq --quant_type FLOAT --num_bits 6 --e_bits 1 --epochs_distill $Distill
echo "python mia_one.py --mode distill_shadow --model $Model --data $Dataset --store_ptq --quant_type FLOAT --num_bits 6 --e_bits 2 --epochs_distill $Distill"
python mia_one.py --mode distill_shadow --model $Model --data $Dataset --store_ptq --quant_type FLOAT --num_bits 6 --e_bits 2 --epochs_distill $Distill
echo "python mia_one.py --mode distill_shadow --model $Model --data $Dataset --store_ptq --quant_type FLOAT --num_bits 6 --e_bits 3 --epochs_distill $Distill"
python mia_one.py --mode distill_shadow --model $Model --data $Dataset --store_ptq --quant_type FLOAT --num_bits 6 --e_bits 3 --epochs_distill $Distill
echo "python mia_one.py --mode distill_shadow --model $Model --data $Dataset --store_ptq --quant_type FLOAT --num_bits 6 --e_bits 4 --epochs_distill $Distill"
python mia_one.py --mode distill_shadow --model $Model --data $Dataset --store_ptq --quant_type FLOAT --num_bits 6 --e_bits 4 --epochs_distill $Distill
# CONSTRUCT TRAIN DATASET
echo "python mia_one.py --action 1 --mode shadow --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 6 --e_bits 1 --epochs_distill $Distill"
python mia_one.py --action 1 --mode shadow --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 6 --e_bits 1 --epochs_distill $Distill
echo "python mia_one.py --action 1 --mode shadow --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 6 --e_bits 2 --epochs_distill $Distill"
python mia_one.py --action 1 --mode shadow --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 6 --e_bits 2 --epochs_distill $Distill
echo "python mia_one.py --action 1 --mode shadow --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 6 --e_bits 3 --epochs_distill $Distill"
python mia_one.py --action 1 --mode shadow --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 6 --e_bits 3 --epochs_distill $Distill
echo "python mia_one.py --action 1 --mode shadow --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 6 --e_bits 4 --epochs_distill $Distill"
python mia_one.py --action 1 --mode shadow --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 6 --e_bits 4 --epochs_distill $Distill
# CONSTRUCT TEST DATASET
echo "python mia_one.py --action 1 --mode target --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 6 --e_bits 1 --epochs_distill $Distill"
python mia_one.py --action 1 --mode target --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 6 --e_bits 1 --epochs_distill $Distill
echo "python mia_one.py --action 1 --mode target --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 6 --e_bits 2 --epochs_distill $Distill"
python mia_one.py --action 1 --mode target --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 6 --e_bits 2 --epochs_distill $Distill
echo "python mia_one.py --action 1 --mode target --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 6 --e_bits 3 --epochs_distill $Distill"
python mia_one.py --action 1 --mode target --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 6 --e_bits 3 --epochs_distill $Distill
echo "python mia_one.py --action 1 --mode target --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 6 --e_bits 4 --epochs_distill $Distill"
python mia_one.py --action 1 --mode target --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 6 --e_bits 4 --epochs_distill $Distill
# ATTACK
echo "python mia_one.py --action 1 --mia_type black-box --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 6 --e_bits 1 --epochs_distill $Distill"
python mia_one.py --action 1 --mia_type black-box --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 6 --e_bits 1 --epochs_distill $Distill
echo "python mia_one.py --action 1 --mia_type black-box --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 6 --e_bits 2 --epochs_distill $Distill"
python mia_one.py --action 1 --mia_type black-box --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 6 --e_bits 2 --epochs_distill $Distill
echo "python mia_one.py --action 1 --mia_type black-box --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 6 --e_bits 3 --epochs_distill $Distill"
python mia_one.py --action 1 --mia_type black-box --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 6 --e_bits 3 --epochs_distill $Distill
echo "python mia_one.py --action 1 --mia_type black-box --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 6 --e_bits 4 --epochs_distill $Distill"
python mia_one.py --action 1 --mia_type black-box --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 6 --e_bits 4 --epochs_distill $Distill
#- End
echo "Job end at $(date "+%Y-%m-%d %H:%M:%S")"
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 1-06:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
#SBATCH --constraint="Ampere|Volta|RTX8000"
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
# set constraint for RTX8000 to meet my cuda
#- Log information
echo "Job start at $(date "+%Y-%m-%d %H:%M:%S")"
echo "Job run at:"
echo "$(hostnamectl)"
#- Load environments
source /tools/module_env.sh
source ~/pyt1.5/bin/activate
module list # list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/10.2-7.6.5
##- virtualenv
# source xxxxx/activate
echo $(module list) # list modules loaded
echo $(which gcc)
echo $(which python)
echo $(which python3)
cluster-quota # nas quota
nvidia-smi --format=csv --query-gpu=name,driver_version,power.limit # gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo "Use GPU ${CUDA_VISIBLE_DEVICES}" # which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
# TRAIN DISTILL MODEL
echo "python mia_one.py --mode distill_target --model $Model --data $Dataset --store_ptq --quant_type FLOAT --num_bits 7 --e_bits 1 --epochs_distill $Distill"
python mia_one.py --mode distill_target --model $Model --data $Dataset --store_ptq --quant_type FLOAT --num_bits 7 --e_bits 1 --epochs_distill $Distill
echo "python mia_one.py --mode distill_target --model $Model --data $Dataset --store_ptq --quant_type FLOAT --num_bits 7 --e_bits 4 --epochs_distill $Distill"
python mia_one.py --mode distill_target --model $Model --data $Dataset --store_ptq --quant_type FLOAT --num_bits 7 --e_bits 4 --epochs_distill $Distill
echo "python mia_one.py --mode distill_target --model $Model --data $Dataset --store_ptq --quant_type FLOAT --num_bits 7 --e_bits 5 --epochs_distill $Distill"
python mia_one.py --mode distill_target --model $Model --data $Dataset --store_ptq --quant_type FLOAT --num_bits 7 --e_bits 5 --epochs_distill $Distill
# TRAIN DISTILL SHADOW MODEL
echo "python mia_one.py --mode distill_shadow --model $Model --data $Dataset --store_ptq --quant_type FLOAT --num_bits 7 --e_bits 1 --epochs_distill $Distill"
python mia_one.py --mode distill_shadow --model $Model --data $Dataset --store_ptq --quant_type FLOAT --num_bits 7 --e_bits 1 --epochs_distill $Distill
echo "python mia_one.py --mode distill_shadow --model $Model --data $Dataset --store_ptq --quant_type FLOAT --num_bits 7 --e_bits 4 --epochs_distill $Distill"
python mia_one.py --mode distill_shadow --model $Model --data $Dataset --store_ptq --quant_type FLOAT --num_bits 7 --e_bits 4 --epochs_distill $Distill
echo "python mia_one.py --mode distill_shadow --model $Model --data $Dataset --store_ptq --quant_type FLOAT --num_bits 7 --e_bits 5 --epochs_distill $Distill"
python mia_one.py --mode distill_shadow --model $Model --data $Dataset --store_ptq --quant_type FLOAT --num_bits 7 --e_bits 5 --epochs_distill $Distill
# CONSTRUCT TRAIN DATASET
echo "python mia_one.py --action 1 --mode shadow --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 7 --e_bits 1 --epochs_distill $Distill"
python mia_one.py --action 1 --mode shadow --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 7 --e_bits 1 --epochs_distill $Distill
echo "python mia_one.py --action 1 --mode shadow --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 7 --e_bits 4 --epochs_distill $Distill"
python mia_one.py --action 1 --mode shadow --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 7 --e_bits 4 --epochs_distill $Distill
echo "python mia_one.py --action 1 --mode shadow --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 7 --e_bits 5 --epochs_distill $Distill"
python mia_one.py --action 1 --mode shadow --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 7 --e_bits 5 --epochs_distill $Distill
# CONSTRUCT TEST DATASET
echo "python mia_one.py --action 1 --mode target --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 7 --e_bits 1 --epochs_distill $Distill"
python mia_one.py --action 1 --mode target --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 7 --e_bits 1 --epochs_distill $Distill
echo "python mia_one.py --action 1 --mode target --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 7 --e_bits 4 --epochs_distill $Distill"
python mia_one.py --action 1 --mode target --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 7 --e_bits 4 --epochs_distill $Distill
echo "python mia_one.py --action 1 --mode target --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 7 --e_bits 5 --epochs_distill $Distill"
python mia_one.py --action 1 --mode target --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 7 --e_bits 5 --epochs_distill $Distill
# ATTACK
echo "python mia_one.py --action 1 --mia_type black-box --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 7 --e_bits 1 --epochs_distill $Distill"
python mia_one.py --action 1 --mia_type black-box --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 7 --e_bits 1 --epochs_distill $Distill
echo "python mia_one.py --action 1 --mia_type black-box --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 7 --e_bits 4 --epochs_distill $Distill"
python mia_one.py --action 1 --mia_type black-box --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 7 --e_bits 4 --epochs_distill $Distill
echo "python mia_one.py --action 1 --mia_type black-box --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 7 --e_bits 5 --epochs_distill $Distill"
python mia_one.py --action 1 --mia_type black-box --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 7 --e_bits 5 --epochs_distill $Distill
#- End
echo "Job end at $(date "+%Y-%m-%d %H:%M:%S")"
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 1-06:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
#SBATCH --constraint="Ampere|Volta|RTX8000"
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
#- Log information
echo "Job start at $(date "+%Y-%m-%d %H:%M:%S")"
echo "Job run at:"
echo "$(hostnamectl)"
#- Load environments
source /tools/module_env.sh
source ~/pyt1.5/bin/activate
module list # list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/10.2-7.6.5
##- virtualenv
# source xxxxx/activate
echo $(module list) # list modules loaded
echo $(which gcc)
echo $(which python)
echo $(which python3)
cluster-quota # nas quota
nvidia-smi --format=csv --query-gpu=name,driver_version,power.limit # gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo "Use GPU ${CUDA_VISIBLE_DEVICES}" # which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
# TRAIN DISTILL MODEL
echo "python mia_one.py --mode distill_target --model $Model --data $Dataset --store_ptq --quant_type FLOAT --num_bits 8 --e_bits 1 --epochs_distill $Distill"
python mia_one.py --mode distill_target --model $Model --data $Dataset --store_ptq --quant_type FLOAT --num_bits 8 --e_bits 1 --epochs_distill $Distill
echo "python mia_one.py --mode distill_target --model $Model --data $Dataset --store_ptq --quant_type FLOAT --num_bits 8 --e_bits 4 --epochs_distill $Distill"
python mia_one.py --mode distill_target --model $Model --data $Dataset --store_ptq --quant_type FLOAT --num_bits 8 --e_bits 4 --epochs_distill $Distill
echo "python mia_one.py --mode distill_target --model $Model --data $Dataset --store_ptq --quant_type FLOAT --num_bits 8 --e_bits 5 --epochs_distill $Distill"
python mia_one.py --mode distill_target --model $Model --data $Dataset --store_ptq --quant_type FLOAT --num_bits 8 --e_bits 5 --epochs_distill $Distill
# TRAIN DISTILL SHADOW MODEL
echo "python mia_one.py --mode distill_shadow --model $Model --data $Dataset --store_ptq --quant_type FLOAT --num_bits 8 --e_bits 1 --epochs_distill $Distill"
python mia_one.py --mode distill_shadow --model $Model --data $Dataset --store_ptq --quant_type FLOAT --num_bits 8 --e_bits 1 --epochs_distill $Distill
echo "python mia_one.py --mode distill_shadow --model $Model --data $Dataset --store_ptq --quant_type FLOAT --num_bits 8 --e_bits 4 --epochs_distill $Distill"
python mia_one.py --mode distill_shadow --model $Model --data $Dataset --store_ptq --quant_type FLOAT --num_bits 8 --e_bits 4 --epochs_distill $Distill
echo "python mia_one.py --mode distill_shadow --model $Model --data $Dataset --store_ptq --quant_type FLOAT --num_bits 8 --e_bits 5 --epochs_distill $Distill"
python mia_one.py --mode distill_shadow --model $Model --data $Dataset --store_ptq --quant_type FLOAT --num_bits 8 --e_bits 5 --epochs_distill $Distill
# CONSTRUCT TRAIN DATASET
echo "python mia_one.py --action 1 --mode shadow --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 8 --e_bits 1 --epochs_distill $Distill"
python mia_one.py --action 1 --mode shadow --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 8 --e_bits 1 --epochs_distill $Distill
echo "python mia_one.py --action 1 --mode shadow --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 8 --e_bits 4 --epochs_distill $Distill"
python mia_one.py --action 1 --mode shadow --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 8 --e_bits 4 --epochs_distill $Distill
echo "python mia_one.py --action 1 --mode shadow --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 8 --e_bits 5 --epochs_distill $Distill"
python mia_one.py --action 1 --mode shadow --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 8 --e_bits 5 --epochs_distill $Distill
# CONSTRUCT TEST DATASET
echo "python mia_one.py --action 1 --mode target --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 8 --e_bits 1 --epochs_distill $Distill"
python mia_one.py --action 1 --mode target --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 8 --e_bits 1 --epochs_distill $Distill
echo "python mia_one.py --action 1 --mode target --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 8 --e_bits 4 --epochs_distill $Distill"
python mia_one.py --action 1 --mode target --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 8 --e_bits 4 --epochs_distill $Distill
echo "python mia_one.py --action 1 --mode target --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 8 --e_bits 5 --epochs_distill $Distill"
python mia_one.py --action 1 --mode target --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 8 --e_bits 5 --epochs_distill $Distill
# ATTACK
echo "python mia_one.py --action 1 --mia_type black-box --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 8 --e_bits 1 --epochs_distill $Distill"
python mia_one.py --action 1 --mia_type black-box --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 8 --e_bits 1 --epochs_distill $Distill
echo "python mia_one.py --action 1 --mia_type black-box --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 8 --e_bits 4 --epochs_distill $Distill"
python mia_one.py --action 1 --mia_type black-box --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 8 --e_bits 4 --epochs_distill $Distill
echo "python mia_one.py --action 1 --mia_type black-box --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 8 --e_bits 5 --epochs_distill $Distill"
python mia_one.py --action 1 --mia_type black-box --model $Model --model_distill $Model --data $Dataset --quant_type FLOAT --num_bits 8 --e_bits 5 --epochs_distill $Distill
#- End
echo "Job end at $(date "+%Y-%m-%d %H:%M:%S")"
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 1-06:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
#SBATCH --constraint="Ampere|Volta|RTX8000"
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
# set constraint for RTX8000 to meet my cuda
#- Log information
echo "Job start at $(date "+%Y-%m-%d %H:%M:%S")"
echo "Job run at:"
echo "$(hostnamectl)"
#- Load environments
source /tools/module_env.sh
source ~/pyt1.5/bin/activate
module list # list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/10.2-7.6.5
##- virtualenv
# source xxxxx/activate
echo $(module list) # list modules loaded
echo $(which gcc)
echo $(which python)
echo $(which python3)
cluster-quota # nas quota
nvidia-smi --format=csv --query-gpu=name,driver_version,power.limit # gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo "Use GPU ${CUDA_VISIBLE_DEVICES}" # which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
# # TRAIN DISTILL TARGET MODEL
# echo "python mia_one.py --mode distill_target --model $Model --data $Dataset --store_ptq --quant_type INT --num_bits 2 --epochs_distill $Distill"
# python mia_one.py --mode distill_target --model $Model --data $Dataset --store_ptq --quant_type INT --num_bits 2 --epochs_distill $Distill
# echo "python mia_one.py --mode distill_target --model $Model --data $Dataset --store_ptq --quant_type INT --num_bits 3 --epochs_distill $Distill"
# python mia_one.py --mode distill_target --model $Model --data $Dataset --store_ptq --quant_type INT --num_bits 3 --epochs_distill $Distill
# echo "python mia_one.py --mode distill_target --model $Model --data $Dataset --store_ptq --quant_type INT --num_bits 4 --epochs_distill $Distill"
# python mia_one.py --mode distill_target --model $Model --data $Dataset --store_ptq --quant_type INT --num_bits 4 --epochs_distill $Distill
# echo "python mia_one.py --mode distill_target --model $Model --data $Dataset --store_ptq --quant_type INT --num_bits 5 --epochs_distill $Distill"
# python mia_one.py --mode distill_target --model $Model --data $Dataset --store_ptq --quant_type INT --num_bits 5 --epochs_distill $Distill
# echo "python mia_one.py --mode distill_target --model $Model --data $Dataset --store_ptq --quant_type INT --num_bits 6 --epochs_distill $Distill"
# python mia_one.py --mode distill_target --model $Model --data $Dataset --store_ptq --quant_type INT --num_bits 6 --epochs_distill $Distill
# echo "python mia_one.py --mode distill_target --model $Model --data $Dataset --store_ptq --quant_type INT --num_bits 7 --epochs_distill $Distill"
# python mia_one.py --mode distill_target --model $Model --data $Dataset --store_ptq --quant_type INT --num_bits 7 --epochs_distill $Distill
# echo "python mia_one.py --mode distill_target --model $Model --data $Dataset --store_ptq --quant_type INT --num_bits 8 --epochs_distill $Distill"
# python mia_one.py --mode distill_target --model $Model --data $Dataset --store_ptq --quant_type INT --num_bits 8 --epochs_distill $Distill
# # TRAIN DISTILL SHADOW MODEL
# echo "python mia_one.py --mode distill_shadow --model $Model --data $Dataset --store_ptq --quant_type INT --num_bits 2 --epochs_distill $Distill"
# python mia_one.py --mode distill_shadow --model $Model --data $Dataset --store_ptq --quant_type INT --num_bits 2 --epochs_distill $Distill
# echo "python mia_one.py --mode distill_shadow --model $Model --data $Dataset --store_ptq --quant_type INT --num_bits 3 --epochs_distill $Distill"
# python mia_one.py --mode distill_shadow --model $Model --data $Dataset --store_ptq --quant_type INT --num_bits 3 --epochs_distill $Distill
# echo "python mia_one.py --mode distill_shadow --model $Model --data $Dataset --store_ptq --quant_type INT --num_bits 4 --epochs_distill $Distill"
# python mia_one.py --mode distill_shadow --model $Model --data $Dataset --store_ptq --quant_type INT --num_bits 4 --epochs_distill $Distill
# echo "python mia_one.py --mode distill_shadow --model $Model --data $Dataset --store_ptq --quant_type INT --num_bits 5 --epochs_distill $Distill"
# python mia_one.py --mode distill_shadow --model $Model --data $Dataset --store_ptq --quant_type INT --num_bits 5 --epochs_distill $Distill
# echo "python mia_one.py --mode distill_shadow --model $Model --data $Dataset --store_ptq --quant_type INT --num_bits 6 --epochs_distill $Distill"
# python mia_one.py --mode distill_shadow --model $Model --data $Dataset --store_ptq --quant_type INT --num_bits 6 --epochs_distill $Distill
# echo "python mia_one.py --mode distill_shadow --model $Model --data $Dataset --store_ptq --quant_type INT --num_bits 7 --epochs_distill $Distill"
# python mia_one.py --mode distill_shadow --model $Model --data $Dataset --store_ptq --quant_type INT --num_bits 7 --epochs_distill $Distill
# echo "python mia_one.py --mode distill_shadow --model $Model --data $Dataset --store_ptq --quant_type INT --num_bits 8 --epochs_distill $Distill"
# python mia_one.py --mode distill_shadow --model $Model --data $Dataset --store_ptq --quant_type INT --num_bits 8 --epochs_distill $Distill
# # CONSTRUCT TRAIN DATASET
# echo "python mia_one.py --action 1 --mode shadow --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 2 --epochs_distill $Distill"
# python mia_one.py --action 1 --mode shadow --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 2 --epochs_distill $Distill
# echo "python mia_one.py --action 1 --mode shadow --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 3 --epochs_distill $Distill"
# python mia_one.py --action 1 --mode shadow --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 3 --epochs_distill $Distill
# echo "python mia_one.py --action 1 --mode shadow --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 4 --epochs_distill $Distill"
# python mia_one.py --action 1 --mode shadow --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 4 --epochs_distill $Distill
# echo "python mia_one.py --action 1 --mode shadow --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 5 --epochs_distill $Distill"
# python mia_one.py --action 1 --mode shadow --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 5 --epochs_distill $Distill
# echo "python mia_one.py --action 1 --mode shadow --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 6 --epochs_distill $Distill"
# python mia_one.py --action 1 --mode shadow --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 6 --epochs_distill $Distill
# echo "python mia_one.py --action 1 --mode shadow --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 7 --epochs_distill $Distill"
# python mia_one.py --action 1 --mode shadow --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 7 --epochs_distill $Distill
# echo "python mia_one.py --action 1 --mode shadow --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 8 --epochs_distill $Distill"
# python mia_one.py --action 1 --mode shadow --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 8 --epochs_distill $Distill
# # CONSTRUCT TEST DATASET
# echo "python mia_one.py --action 1 --mode target --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 2 --epochs_distill $Distill"
# python mia_one.py --action 1 --mode target --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 2 --epochs_distill $Distill
# echo "python mia_one.py --action 1 --mode target --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 3 --epochs_distill $Distill"
# python mia_one.py --action 1 --mode target --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 3 --epochs_distill $Distill
# echo "python mia_one.py --action 1 --mode target --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 4 --epochs_distill $Distill"
# python mia_one.py --action 1 --mode target --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 4 --epochs_distill $Distill
# echo "python mia_one.py --action 1 --mode target --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 5 --epochs_distill $Distill"
# python mia_one.py --action 1 --mode target --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 5 --epochs_distill $Distill
# echo "python mia_one.py --action 1 --mode target --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 6 --epochs_distill $Distill"
# python mia_one.py --action 1 --mode target --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 6 --epochs_distill $Distill
# echo "python mia_one.py --action 1 --mode target --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 7 --epochs_distill $Distill"
# python mia_one.py --action 1 --mode target --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 7 --epochs_distill $Distill
# echo "python mia_one.py --action 1 --mode target --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 8 --epochs_distill $Distill"
# python mia_one.py --action 1 --mode target --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 8 --epochs_distill $Distill
# ATTACK
# # for test full precision mia result
# echo "python mia_one.py --action 1 --mia_type black-box --model $Model --model_distill $Model --data $Dataset --load_attack --epochs_distill $Distill"
# python mia_one.py --action 1 --mia_type black-box --model $Model --model_distill $Model --data $Dataset --load_attack --epochs_distill $Distill
echo "python mia_one.py --action 1 --mia_type black-box --model $Model --model_distill $Model --data $Dataset --epochs_distill $Distill"
python mia_one.py --action 1 --mia_type black-box --model $Model --model_distill $Model --data $Dataset --epochs_distill $Distill
# echo "python mia_one.py --action 1 --mia_type black-box --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 2 --epochs_distill $Distill"
# python mia_one.py --action 1 --mia_type black-box --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 2 --epochs_distill $Distill
# echo "python mia_one.py --action 1 --mia_type black-box --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 3 --epochs_distill $Distill"
# python mia_one.py --action 1 --mia_type black-box --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 3 --epochs_distill $Distill
# echo "python mia_one.py --action 1 --mia_type black-box --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 4 --epochs_distill $Distill"
# python mia_one.py --action 1 --mia_type black-box --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 4 --epochs_distill $Distill
# echo "python mia_one.py --action 1 --mia_type black-box --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 5 --epochs_distill $Distill"
# python mia_one.py --action 1 --mia_type black-box --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 5 --epochs_distill $Distill
# echo "python mia_one.py --action 1 --mia_type black-box --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 6 --epochs_distill $Distill"
# python mia_one.py --action 1 --mia_type black-box --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 6 --epochs_distill $Distill
# echo "python mia_one.py --action 1 --mia_type black-box --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 7 --epochs_distill $Distill"
# python mia_one.py --action 1 --mia_type black-box --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 7 --epochs_distill $Distill
# echo "python mia_one.py --action 1 --mia_type black-box --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 8 --epochs_distill $Distill"
# python mia_one.py --action 1 --mia_type black-box --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 8 --epochs_distill $Distill
#- End
echo "Job end at $(date "+%Y-%m-%d %H:%M:%S")"
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 1-06:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
#SBATCH --constraint="Ampere|Volta|RTX8000"
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
# set constraint for RTX8000 to meet my cuda
#- Log information
echo "Job start at $(date "+%Y-%m-%d %H:%M:%S")"
echo "Job run at:"
echo "$(hostnamectl)"
#- Load environments
source /tools/module_env.sh
source ~/pyt1.5/bin/activate
module list # list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/10.2-7.6.5
##- virtualenv
# source xxxxx/activate
echo $(module list) # list modules loaded
echo $(which gcc)
echo $(which python)
echo $(which python3)
cluster-quota # nas quota
nvidia-smi --format=csv --query-gpu=name,driver_version,power.limit # gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo "Use GPU ${CUDA_VISIBLE_DEVICES}" # which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
# TRAIN DISTILL MODEL
echo "python mia_one.py --mode distill_target --model $Model --data $Dataset --store_ptq --quant_type INT --num_bits 9 --epochs_distill $Distill"
python mia_one.py --mode distill_target --model $Model --data $Dataset --store_ptq --quant_type INT --num_bits 9 --epochs_distill $Distill
echo "python mia_one.py --mode distill_target --model $Model --data $Dataset --store_ptq --quant_type INT --num_bits 10 --epochs_distill $Distill"
python mia_one.py --mode distill_target --model $Model --data $Dataset --store_ptq --quant_type INT --num_bits 10 --epochs_distill $Distill
echo "python mia_one.py --mode distill_target --model $Model --data $Dataset --store_ptq --quant_type INT --num_bits 11 --epochs_distill $Distill"
python mia_one.py --mode distill_target --model $Model --data $Dataset --store_ptq --quant_type INT --num_bits 11 --epochs_distill $Distill
echo "python mia_one.py --mode distill_target --model $Model --data $Dataset --store_ptq --quant_type INT --num_bits 12 --epochs_distill $Distill"
python mia_one.py --mode distill_target --model $Model --data $Dataset --store_ptq --quant_type INT --num_bits 12 --epochs_distill $Distill
echo "python mia_one.py --mode distill_target --model $Model --data $Dataset --store_ptq --quant_type INT --num_bits 13 --epochs_distill $Distill"
python mia_one.py --mode distill_target --model $Model --data $Dataset --store_ptq --quant_type INT --num_bits 13 --epochs_distill $Distill
echo "python mia_one.py --mode distill_target --model $Model --data $Dataset --store_ptq --quant_type INT --num_bits 14 --epochs_distill $Distill"
python mia_one.py --mode distill_target --model $Model --data $Dataset --store_ptq --quant_type INT --num_bits 14 --epochs_distill $Distill
echo "python mia_one.py --mode distill_target --model $Model --data $Dataset --store_ptq --quant_type INT --num_bits 15 --epochs_distill $Distill"
python mia_one.py --mode distill_target --model $Model --data $Dataset --store_ptq --quant_type INT --num_bits 15 --epochs_distill $Distill
echo "python mia_one.py --mode distill_target --model $Model --data $Dataset --store_ptq --quant_type INT --num_bits 16 --epochs_distill $Distill"
python mia_one.py --mode distill_target --model $Model --data $Dataset --store_ptq --quant_type INT --num_bits 16 --epochs_distill $Distill
# TRAIN DISTILL SHADOW MODEL
echo "python mia_one.py --mode distill_shadow --model $Model --data $Dataset --store_ptq --quant_type INT --num_bits 9 --epochs_distill $Distill"
python mia_one.py --mode distill_shadow --model $Model --data $Dataset --store_ptq --quant_type INT --num_bits 9 --epochs_distill $Distill
echo "python mia_one.py --mode distill_shadow --model $Model --data $Dataset --store_ptq --quant_type INT --num_bits 10 --epochs_distill $Distill"
python mia_one.py --mode distill_shadow --model $Model --data $Dataset --store_ptq --quant_type INT --num_bits 10 --epochs_distill $Distill
echo "python mia_one.py --mode distill_shadow --model $Model --data $Dataset --store_ptq --quant_type INT --num_bits 11 --epochs_distill $Distill"
python mia_one.py --mode distill_shadow --model $Model --data $Dataset --store_ptq --quant_type INT --num_bits 11 --epochs_distill $Distill
echo "python mia_one.py --mode distill_shadow --model $Model --data $Dataset --store_ptq --quant_type INT --num_bits 12 --epochs_distill $Distill"
python mia_one.py --mode distill_shadow --model $Model --data $Dataset --store_ptq --quant_type INT --num_bits 12 --epochs_distill $Distill
echo "python mia_one.py --mode distill_shadow --model $Model --data $Dataset --store_ptq --quant_type INT --num_bits 13 --epochs_distill $Distill"
python mia_one.py --mode distill_shadow --model $Model --data $Dataset --store_ptq --quant_type INT --num_bits 13 --epochs_distill $Distill
echo "python mia_one.py --mode distill_shadow --model $Model --data $Dataset --store_ptq --quant_type INT --num_bits 14 --epochs_distill $Distill"
python mia_one.py --mode distill_shadow --model $Model --data $Dataset --store_ptq --quant_type INT --num_bits 14 --epochs_distill $Distill
echo "python mia_one.py --mode distill_shadow --model $Model --data $Dataset --store_ptq --quant_type INT --num_bits 15 --epochs_distill $Distill"
python mia_one.py --mode distill_shadow --model $Model --data $Dataset --store_ptq --quant_type INT --num_bits 15 --epochs_distill $Distill
echo "python mia_one.py --mode distill_shadow --model $Model --data $Dataset --store_ptq --quant_type INT --num_bits 16 --epochs_distill $Distill"
python mia_one.py --mode distill_shadow --model $Model --data $Dataset --store_ptq --quant_type INT --num_bits 16 --epochs_distill $Distill
# CONSTRUCT TRAIN DATASET
echo "python mia_one.py --action 1 --mode shadow --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 9 --epochs_distill $Distill"
python mia_one.py --action 1 --mode shadow --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 9 --epochs_distill $Distill
echo "python mia_one.py --action 1 --mode shadow --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 10 --epochs_distill $Distill"
python mia_one.py --action 1 --mode shadow --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 10 --epochs_distill $Distill
echo "python mia_one.py --action 1 --mode shadow --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 11 --epochs_distill $Distill"
python mia_one.py --action 1 --mode shadow --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 11 --epochs_distill $Distill
echo "python mia_one.py --action 1 --mode shadow --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 12 --epochs_distill $Distill"
python mia_one.py --action 1 --mode shadow --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 12 --epochs_distill $Distill
echo "python mia_one.py --action 1 --mode shadow --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 13 --epochs_distill $Distill"
python mia_one.py --action 1 --mode shadow --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 13 --epochs_distill $Distill
echo "python mia_one.py --action 1 --mode shadow --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 14 --epochs_distill $Distill"
python mia_one.py --action 1 --mode shadow --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 14 --epochs_distill $Distill
echo "python mia_one.py --action 1 --mode shadow --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 15 --epochs_distill $Distill"
python mia_one.py --action 1 --mode shadow --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 15 --epochs_distill $Distill
echo "python mia_one.py --action 1 --mode shadow --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 16 --epochs_distill $Distill"
python mia_one.py --action 1 --mode shadow --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 16 --epochs_distill $Distill
# # CONSTRUCT TEST DATASET
echo "python mia_one.py --action 1 --mode target --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 9 --epochs_distill $Distill"
python mia_one.py --action 1 --mode target --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 9 --epochs_distill $Distill
echo "python mia_one.py --action 1 --mode target --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 10 --epochs_distill $Distill"
python mia_one.py --action 1 --mode target --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 10 --epochs_distill $Distill
echo "python mia_one.py --action 1 --mode target --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 11 --epochs_distill $Distill"
python mia_one.py --action 1 --mode target --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 11 --epochs_distill $Distill
echo "python mia_one.py --action 1 --mode target --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 12 --epochs_distill $Distill"
python mia_one.py --action 1 --mode target --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 12 --epochs_distill $Distill
echo "python mia_one.py --action 1 --mode target --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 13 --epochs_distill $Distill"
python mia_one.py --action 1 --mode target --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 13 --epochs_distill $Distill
echo "python mia_one.py --action 1 --mode target --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 14 --epochs_distill $Distill"
python mia_one.py --action 1 --mode target --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 14 --epochs_distill $Distill
echo "python mia_one.py --action 1 --mode target --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 15 --epochs_distill $Distill"
python mia_one.py --action 1 --mode target --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 15 --epochs_distill $Distill
echo "python mia_one.py --action 1 --mode target --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 16 --epochs_distill $Distill"
python mia_one.py --action 1 --mode target --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 16 --epochs_distill $Distill
# ATTACK
# for test full precision mia result
# echo "python mia_one.py --action 1 --mia_type black-box --model $Model --model_distill $Model --data $Dataset --load_attack --epochs_distill $Distill"
# python mia_one.py --action 1 --mia_type black-box --model $Model --model_distill $Model --data $Dataset --load_attack --epochs_distill $Distill
echo "python mia_one.py --action 1 --mia_type black-box --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 9 --epochs_distill $Distill"
python mia_one.py --action 1 --mia_type black-box --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 9 --epochs_distill $Distill
echo "python mia_one.py --action 1 --mia_type black-box --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 10 --epochs_distill $Distill"
python mia_one.py --action 1 --mia_type black-box --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 10 --epochs_distill $Distill
echo "python mia_one.py --action 1 --mia_type black-box --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 11 --epochs_distill $Distill"
python mia_one.py --action 1 --mia_type black-box --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 11 --epochs_distill $Distill
echo "python mia_one.py --action 1 --mia_type black-box --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 12 --epochs_distill $Distill"
python mia_one.py --action 1 --mia_type black-box --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 12 --epochs_distill $Distill
echo "python mia_one.py --action 1 --mia_type black-box --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 13 --epochs_distill $Distill"
python mia_one.py --action 1 --mia_type black-box --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 13 --epochs_distill $Distill
echo "python mia_one.py --action 1 --mia_type black-box --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 14 --epochs_distill $Distill"
python mia_one.py --action 1 --mia_type black-box --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 14 --epochs_distill $Distill
echo "python mia_one.py --action 1 --mia_type black-box --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 15 --epochs_distill $Distill"
python mia_one.py --action 1 --mia_type black-box --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 15 --epochs_distill $Distill
echo "python mia_one.py --action 1 --mia_type black-box --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 16 --epochs_distill $Distill"
python mia_one.py --action 1 --mia_type black-box --model $Model --model_distill $Model --data $Dataset --quant_type INT --num_bits 16 --epochs_distill $Distill
#- End
echo "Job end at $(date "+%Y-%m-%d %H:%M:%S")"
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 1-06:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
#SBATCH --constraint="Ampere|Volta|RTX8000"
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
# set constraint for RTX8000 to meet my cuda
#- Log information
echo "Job start at $(date "+%Y-%m-%d %H:%M:%S")"
echo "Job run at:"
echo "$(hostnamectl)"
#- Load environments
source /tools/module_env.sh
source ~/pyt1.5/bin/activate
module list # list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/10.2-7.6.5
##- virtualenv
# source xxxxx/activate
echo $(module list) # list modules loaded
echo $(which gcc)
echo $(which python)
echo $(which python3)
cluster-quota # nas quota
nvidia-smi --format=csv --query-gpu=name,driver_version,power.limit # gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo "Use GPU ${CUDA_VISIBLE_DEVICES}" # which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
# TRAIN DISTILL MODEL
echo "python mia_one.py --mode distill_target --model $Model --data $Dataset --store_ptq --quant_type POT --num_bits 2 --epochs_distill $Distill"
python mia_one.py --mode distill_target --model $Model --data $Dataset --store_ptq --quant_type POT --num_bits 2 --epochs_distill $Distill
echo "python mia_one.py --mode distill_target --model $Model --data $Dataset --store_ptq --quant_type POT --num_bits 3 --epochs_distill $Distill"
python mia_one.py --mode distill_target --model $Model --data $Dataset --store_ptq --quant_type POT --num_bits 3 --epochs_distill $Distill
echo "python mia_one.py --mode distill_target --model $Model --data $Dataset --store_ptq --quant_type POT --num_bits 4 --epochs_distill $Distill"
python mia_one.py --mode distill_target --model $Model --data $Dataset --store_ptq --quant_type POT --num_bits 4 --epochs_distill $Distill
echo "python mia_one.py --mode distill_target --model $Model --data $Dataset --store_ptq --quant_type POT --num_bits 5 --epochs_distill $Distill"
python mia_one.py --mode distill_target --model $Model --data $Dataset --store_ptq --quant_type POT --num_bits 5 --epochs_distill $Distill
echo "python mia_one.py --mode distill_target --model $Model --data $Dataset --store_ptq --quant_type POT --num_bits 6 --epochs_distill $Distill"
python mia_one.py --mode distill_target --model $Model --data $Dataset --store_ptq --quant_type POT --num_bits 6 --epochs_distill $Distill
# TRAIN DISTILL SHADOW MODEL
echo "python mia_one.py --mode distill_shadow --model $Model --data $Dataset --store_ptq --quant_type POT --num_bits 2 --epochs_distill $Distill"
python mia_one.py --mode distill_shadow --model $Model --data $Dataset --store_ptq --quant_type POT --num_bits 2 --epochs_distill $Distill
echo "python mia_one.py --mode distill_shadow --model $Model --data $Dataset --store_ptq --quant_type POT --num_bits 3 --epochs_distill $Distill"
python mia_one.py --mode distill_shadow --model $Model --data $Dataset --store_ptq --quant_type POT --num_bits 3 --epochs_distill $Distill
echo "python mia_one.py --mode distill_shadow --model $Model --data $Dataset --store_ptq --quant_type POT --num_bits 4 --epochs_distill $Distill"
python mia_one.py --mode distill_shadow --model $Model --data $Dataset --store_ptq --quant_type POT --num_bits 4 --epochs_distill $Distill
echo "python mia_one.py --mode distill_shadow --model $Model --data $Dataset --store_ptq --quant_type POT --num_bits 5 --epochs_distill $Distill"
python mia_one.py --mode distill_shadow --model $Model --data $Dataset --store_ptq --quant_type POT --num_bits 5 --epochs_distill $Distill
echo "python mia_one.py --mode distill_shadow --model $Model --data $Dataset --store_ptq --quant_type POT --num_bits 6 --epochs_distill $Distill"
python mia_one.py --mode distill_shadow --model $Model --data $Dataset --store_ptq --quant_type POT --num_bits 6 --epochs_distill $Distill
# CONSTRUCT TRAIN DATASET
echo "python mia_one.py --action 1 --mode shadow --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type POT --num_bits 2 --epochs_distill $Distill"
python mia_one.py --action 1 --mode shadow --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type POT --num_bits 2 --epochs_distill $Distill
echo "python mia_one.py --action 1 --mode shadow --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type POT --num_bits 3 --epochs_distill $Distill"
python mia_one.py --action 1 --mode shadow --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type POT --num_bits 3 --epochs_distill $Distill
echo "python mia_one.py --action 1 --mode shadow --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type POT --num_bits 4 --epochs_distill $Distill"
python mia_one.py --action 1 --mode shadow --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type POT --num_bits 4 --epochs_distill $Distill
echo "python mia_one.py --action 1 --mode shadow --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type POT --num_bits 5 --epochs_distill $Distill"
python mia_one.py --action 1 --mode shadow --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type POT --num_bits 5 --epochs_distill $Distill
echo "python mia_one.py --action 1 --mode shadow --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type POT --num_bits 6 --epochs_distill $Distill"
python mia_one.py --action 1 --mode shadow --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type POT --num_bits 6 --epochs_distill $Distill
# CONSTRUCT TEST DATASET
echo "python mia_one.py --action 1 --mode target --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type POT --num_bits 2 --epochs_distill $Distill"
python mia_one.py --action 1 --mode target --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type POT --num_bits 2 --epochs_distill $Distill
echo "python mia_one.py --action 1 --mode target --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type POT --num_bits 3 --epochs_distill $Distill"
python mia_one.py --action 1 --mode target --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type POT --num_bits 3 --epochs_distill $Distill
echo "python mia_one.py --action 1 --mode target --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type POT --num_bits 4 --epochs_distill $Distill"
python mia_one.py --action 1 --mode target --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type POT --num_bits 4 --epochs_distill $Distill
echo "python mia_one.py --action 1 --mode target --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type POT --num_bits 5 --epochs_distill $Distill"
python mia_one.py --action 1 --mode target --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type POT --num_bits 5 --epochs_distill $Distill
echo "python mia_one.py --action 1 --mode target --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type POT --num_bits 6 --epochs_distill $Distill"
python mia_one.py --action 1 --mode target --mia_type build-dataset --model $Model --model_distill $Model --data $Dataset --quant_type POT --num_bits 6 --epochs_distill $Distill
# ATTACK
echo "python mia_one.py --action 1 --mia_type black-box --model $Model --model_distill $Model --data $Dataset --quant_type POT --num_bits 2 --epochs_distill $Distill"
python mia_one.py --action 1 --mia_type black-box --model $Model --model_distill $Model --data $Dataset --quant_type POT --num_bits 2 --epochs_distill $Distill
echo "python mia_one.py --action 1 --mia_type black-box --model $Model --model_distill $Model --data $Dataset --quant_type POT --num_bits 3 --epochs_distill $Distill"
python mia_one.py --action 1 --mia_type black-box --model $Model --model_distill $Model --data $Dataset --quant_type POT --num_bits 3 --epochs_distill $Distill
echo "python mia_one.py --action 1 --mia_type black-box --model $Model --model_distill $Model --data $Dataset --quant_type POT --num_bits 4 --epochs_distill $Distill"
python mia_one.py --action 1 --mia_type black-box --model $Model --model_distill $Model --data $Dataset --quant_type POT --num_bits 4 --epochs_distill $Distill
echo "python mia_one.py --action 1 --mia_type black-box --model $Model --model_distill $Model --data $Dataset --quant_type POT --num_bits 5 --epochs_distill $Distill"
python mia_one.py --action 1 --mia_type black-box --model $Model --model_distill $Model --data $Dataset --quant_type POT --num_bits 5 --epochs_distill $Distill
echo "python mia_one.py --action 1 --mia_type black-box --model $Model --model_distill $Model --data $Dataset --quant_type POT --num_bits 6 --epochs_distill $Distill"
python mia_one.py --action 1 --mia_type black-box --model $Model --model_distill $Model --data $Dataset --quant_type POT --num_bits 6 --epochs_distill $Distill
#- End
echo "Job end at $(date "+%Y-%m-%d %H:%M:%S")"
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 0-01:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-trial # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
# set constraint for RTX8000 to meet my cuda
#SBATCH --constraint="Ampere|RTX8000"
#- Log information
echo "Job start at $(date "+%Y-%m-%d %H:%M:%S")"
echo "Job run at:"
echo "$(hostnamectl)"
#- Load environments
source /tools/module_env.sh
source ~/pyt1.5/bin/activate
module list # list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
module load cuda-cudnn/10.2-7.6.5
##- virtualenv
# source xxxxx/activate
echo $(module list) # list modules loaded
echo $(which gcc)
echo $(which python)
echo $(which python3)
cluster-quota # nas quota
nvidia-smi --format=csv --query-gpu=name,driver_version,power.limit # gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo "Use GPU ${CUDA_VISIBLE_DEVICES}" # which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
echo "python property_div.py --model $Model --data $Dataset"
python property_div.py --model $Model --data $Dataset
#- End
echo "Job end at $(date "+%Y-%m-%d %H:%M:%S")"
import torch
import numpy as np
import random
import sys
import time
import os
import dataset
import torch.nn as nn
import torch.nn.functional as F
from torch.nn import CrossEntropyLoss
from torch.optim import SGD, Adam
from torch.optim.lr_scheduler import _LRScheduler, CosineAnnealingLR, MultiStepLR
from train_one import WarmUpLR
from bisect import bisect_right
from normal import save_model
from torch.utils.tensorboard import SummaryWriter
import utils
class GlobalVariables:
title_list = ['INT_2','INT_3','INT_4','INT_5','INT_6','INT_7','INT_8','INT_9','INT_10','INT_11','INT_12','INT_13','INT_14','INT_15','INT_16','POT_2','POT_3','POT_4','POT_5','POT_6','FLOAT_3_E1','FLOAT_4_E1','FLOAT_4_E2','FLOAT_5_E1','FLOAT_5_E2','FLOAT_5_E3','FLOAT_6_E1','FLOAT_6_E2','FLOAT_6_E3','FLOAT_6_E4','FLOAT_7_E1','FLOAT_7_E4','FLOAT_7_E5','FLOAT_8_E1','FLOAT_8_E4','FLOAT_8_E5']
def set_random_seeds(seed):
np.random.seed(seed)
torch.manual_seed(seed)
random.seed(seed)
torch.cuda.manual_seed(seed)
torch.cuda.manual_seed_all(seed)
torch.backends.cudnn.benchmark=False
torch.backends.cudnn.deterministic = True
def get_pytorch_device():
device = 'cpu'
cuda = torch.cuda.is_available()
print('Using Pytorch version:', torch.__version__, 'CUDA:', cuda)
if cuda:
device = 'cuda'
return device
class MultiStepMultiLR(_LRScheduler):
def __init__(self, optimizer, milestones, gammas, last_epoch=-1):
if not list(milestones) == sorted(milestones):
raise ValueError('Milestones should be a list of'
' increasing integers. Got {}', milestones)
self.milestones = milestones
self.gammas = gammas
super(MultiStepMultiLR, self).__init__(optimizer, last_epoch)
def get_lr(self):
lrs = []
for base_lr in self.base_lrs:
cur_milestone = bisect_right(self.milestones, self.last_epoch)
new_lr = base_lr * np.prod(self.gammas[:cur_milestone])
new_lr = round(new_lr,8)
lrs.append(new_lr)
return lrs
def create_path(path):
if not os.path.exists(path):
os.makedirs(path, exist_ok=True)
def get_lr(optimizers):
if isinstance(optimizers, dict):
return optimizers[list(optimizers.keys())[-1]].param_groups[-1]['lr']
else:
return optimizers.param_groups[-1]['lr']
def get_loss_criterion():
return CrossEntropyLoss()
class Flatten(nn.Module):
def forward(self, input):
return input.view(input.size(0), -1)
def cnn_test(model, loader, device='cpu'):
model.eval()
top1 = dataset.AverageMeter()
top5 = dataset.AverageMeter()
with torch.no_grad():
for batch in loader:
b_x = batch[0].to(device)
b_y = batch[1].to(device)
output = model(b_x)
prec1, prec5 = dataset.accuracy(output, b_y, topk=(1, 5))
top1.update(prec1[0], b_x.size(0))
top5.update(prec5[0], b_x.size(0))
top1_acc = top1.avg.data.cpu().numpy()[()]
top5_acc = top5.avg.data.cpu().numpy()[()]
return top1_acc, top5_acc
# 每一步的train
def cnn_training_step(model, optimizer, data, labels, device='cpu'):
b_x = data.to(device)
b_y = labels.to(device)
output = model(b_x)
criterion = get_loss_criterion()
loss = criterion(output, b_y)
optimizer.zero_grad()
loss.backward()
optimizer.step()
return loss.item()
# 直接的train函数,具有通用性
def cnn_train(args, model, data, epochs, optimizer, scheduler, model_params, model_path, trained_model_name, device='cpu'):
metrics = {'epoch_times':[], 'test_top1_acc':[], 'test_top5_acc':[], 'train_top1_acc':[], 'train_top5_acc':[], 'lrs':[]}
best_acc = 0
patience = 0
writer = SummaryWriter(log_dir=f'./logs/{args.mode}/{args.data}_{args.model}')
for epoch in range(1, epochs+1):
cur_lr = get_lr(optimizer)
if not hasattr(model, 'augment_training') or model.augment_training:
if args.mode == 'target':
print('load aug_target_dataset ... ')
train_loader = data.aug_target_train_loader
# test_loader = data.aug_target_test_loader
test_loader = data.target_test_loader
elif args.mode == 'shadow':
print('load aug_shadow_dataset ...')
train_loader = data.aug_shadow_train_loader
# test_loader = data.aug_shadow_test_loader
test_loader = data.shadow_test_loader
else:
if args.mode == 'target':
print('load target_dataset ... ')
train_loader = data.target_train_loader
test_loader = data.target_test_loader
elif args.mode == 'shadow':
print('load shadow_dataset ...')
train_loader = data.shadow_train_loader
test_loader = data.shadow_test_loader
if epoch == 1:
warmup_scheduler = WarmUpLR(optimizer, len(train_loader))
else:
warmup_scheduler = None
start_time = time.time()
model.train()
print('Epoch: {}/{}'.format(epoch, epochs))
print('Cur lr: {}'.format(cur_lr))
loss = 0
cnt = 0
for x, y, idx in train_loader:
loss += cnn_training_step(model, optimizer, x, y, device)
cnt += 1
if warmup_scheduler is not None:
warmup_scheduler.step()
end_time = time.time()
loss = loss/cnt
# 没有用单独的val (直接用的test)
top1_test, top5_test = cnn_test(model, test_loader, device)
print('Top1 Test accuracy: {}'.format(top1_test))
print('Top5 Test accuracy: {}'.format(top5_test))
metrics['test_top1_acc'].append(top1_test)
metrics['test_top5_acc'].append(top5_test)
top1_train, top5_train = cnn_test(model, train_loader, device)
print('Top1 Train accuracy: {}'.format(top1_train))
print('Top5 Train accuracy: {}'.format(top5_train))
metrics['train_top1_acc'].append(top1_train)
metrics['train_top5_acc'].append(top5_train)
epoch_time = int(end_time-start_time)
print('Epoch took {} seconds.'.format(epoch_time))
metrics['epoch_times'].append(epoch_time)
metrics['lrs'].append(cur_lr)
writer.add_scalar('top1_test', top1_test, epoch)
writer.add_scalar('top1_train', top1_train, epoch)
writer.add_scalar('lr', cur_lr, epoch)
writer.add_scalar('loss', loss, epoch)
scheduler.step()
model_params['train_top1_acc'] = metrics['train_top1_acc']
model_params['test_top1_acc'] = metrics['test_top1_acc']
model_params['train_top5_acc'] = metrics['train_top5_acc']
model_params['test_top5_acc'] = metrics['test_top5_acc']
model_params['epoch_times'] = metrics['epoch_times']
model_params['lrs'] = metrics['lrs']
total_training_time = sum(model_params['epoch_times'])
model_params['total_time'] = total_training_time
print('Training took {} seconds...'.format(total_training_time))
# 存储best acc的target model和shadow model (存在固定的args.epochs处,因此在normal.py的train中,不再最后再存储target model和shadow model的权值了,在这里训练过程中就把best的存在args.epochs中了)
if best_acc < top1_test:
best_acc = top1_test
print(f'best acc is :{best_acc}')
print('save best model')
save_model(model, model_params, model_path, trained_model_name, epoch=args.epochs)
patience = 0
else:
patience += 1
if patience>=20:
break
writer.close()
return metrics
# 做model distill的每一步具体训练 (在对trained Target/Shadow Model通过KL散度做distill)
def cnn_training_step_dis(args,model, model_dis, optimizer, data, labels, device='cpu'):
b_x = data.to(device)
# 不会用label的
b_y_1 = labels.to(device)
output = model_dis(b_x)
# distill model和 target/shadow model 做蒸馏 学习 loss对比的不再是标签,而是output (target/shadow model的output应该是采用的trained model的吧,optimizer不会对其参数更新,只会更新distill model的权值参数)
if args.quant_type is None:
b_y = model(b_x)
else:
b_y = model.quantize_inference(b_x)
loss = nn.KLDivLoss(reduction='batchmean')(F.log_softmax(output, dim=1), F.softmax(b_y, dim=1))
optimizer.zero_grad()
loss.backward()
optimizer.step()
return loss.item()
# 对distill model的训练
def cnn_train_dis(args, model, model_dis, data, epochs, optimizer, scheduler, model_params, model_path, trained_model_name, device='cpu'):
metrics = {'epoch_times':[], 'test_top1_acc':[], 'test_top5_acc':[], 'train_top1_acc':[], 'train_top5_acc':[], 'lrs':[]}
if args.quant_type is not None:
if args.quant_type == 'FLOAT':
title = '%s_%d_E%d' % (args.quant_type, args.num_bits, args.e_bits)
else:
title = '%s_%d' % (args.quant_type, args.num_bits)
writer = SummaryWriter(log_dir=f'./logs/{args.mode}/{args.data}_{args.model}/{title}')
else:
writer = SummaryWriter(log_dir=f'./logs/{args.mode}/{args.data}_{args.model}/FP32')
for epoch in range(1, epochs+1):
cur_lr = get_lr(optimizer)
if not hasattr(model, 'augment_training') or model.augment_training:
print(f'load aug_{args.mode}_dataset ...')
train_loader = data.aug_distill_train_loader
# test_loader = data.aug_distill_test_loader
test_loader = data.distill_test_loader
else:
print(f'load {args.mode}_dataset ...')
train_loader = data.distill_train_loader
test_loader = data.distill_test_loader
if epoch == 1:
warmup_scheduler = WarmUpLR(optimizer, len(train_loader))
else:
warmup_scheduler = None
start_time = time.time()
model = model.to(device)
model_dis = model_dis.to(device)
model_dis.train() # 只更新distill model
model.eval() # 不会更新target model或者shadow model
print('Epoch: {}/{}'.format(epoch, epochs))
print('Cur lr: {}'.format(cur_lr))
# warm up
loss = 0
cnt = 0
for i, (x, y, idx) in enumerate(train_loader):
loss += cnn_training_step_dis(args,model, model_dis, optimizer, x, y, device)
cnt += 1
if warmup_scheduler is not None:
warmup_scheduler.step()
end_time = time.time()
loss = loss/cnt
top1_test, top5_test = cnn_test(model_dis, test_loader, device)
print('Top1 Test accuracy: {}'.format(top1_test))
print('Top5 Test accuracy: {}'.format(top5_test))
metrics['test_top1_acc'].append(top1_test)
metrics['test_top5_acc'].append(top5_test)
top1_train, top5_train = cnn_test(model_dis, train_loader, device)
print('Top1 Train accuracy: {}'.format(top1_train))
print('Top5 Train accuracy: {}'.format(top5_train))
metrics['train_top1_acc'].append(top1_train)
metrics['train_top5_acc'].append(top5_train)
epoch_time = int(end_time-start_time)
print('Epoch took {} seconds.'.format(epoch_time))
metrics['epoch_times'].append(epoch_time)
metrics['lrs'].append(cur_lr)
writer.add_scalar('top1_test', top1_test, epoch)
writer.add_scalar('top1_train', top1_train, epoch)
writer.add_scalar('lr', cur_lr, epoch)
writer.add_scalar('loss', loss, epoch)
scheduler.step()
model_params['train_top1_acc'] = metrics['train_top1_acc']
model_params['test_top1_acc'] = metrics['test_top1_acc']
model_params['train_top5_acc'] = metrics['train_top5_acc']
model_params['test_top5_acc'] = metrics['test_top5_acc']
model_params['epoch_times'] = metrics['epoch_times']
model_params['lrs'] = metrics['lrs']
total_training_time = sum(model_params['epoch_times'])
model_params['total_time'] = total_training_time
print('Training took {} seconds...'.format(total_training_time))
# TODO 这里要改 需要把Full precision / 各种ptq 对应的Distill Target Model的各个epoch的数据存储到相应的文件夹中
if args.quant_type is None:
save_model(model_dis, model_params, model_path, trained_model_name, epoch=epoch)
else:
trained_model_name_ptq = trained_model_name + '_' +title
save_model(model_dis, model_params, model_path, trained_model_name_ptq, epoch=epoch)
writer.close()
return metrics
def get_dataset(dataset, mode, aug=False, batch_size=512, add_trigger=False):
if dataset == 'cifar10':
return load_cifar10(mode, aug, batch_size, add_trigger)
elif dataset == 'cinic10':
return load_cinic10(mode, aug, batch_size, add_trigger)
elif dataset == 'cifar100':
return load_cifar100(mode, aug, batch_size)
def load_cinic10(mode, aug, batch_size, add_trigger=False):
cinic10_data = dataset.CINIC10(mode, aug, batch_size=batch_size, add_trigger=add_trigger)
return cinic10_data
def load_cifar10(mode, aug, batch_size, add_trigger=False):
cifar10_data = dataset.CIFAR10(mode, aug, batch_size=batch_size, add_trigger=add_trigger)
return cifar10_data
def load_cifar100(mode, aug, batch_size):
cifar100_data = dataset.CIFAR100(mode, aug, batch_size=batch_size)
return cifar100_data
# TODO 修改训练策略
def get_full_optimizer(model, lr_params, args):
lr=lr_params[0]
weight_decay=lr_params[1]
momentum=lr_params[2]
gamma = lr_params[3]
optimizer = SGD(filter(lambda p: p.requires_grad, model.parameters()), lr=lr, momentum=momentum, weight_decay=weight_decay)
# optimizer = Adam(filter(lambda p: p.requires_grad, model.parameters()), lr=lr)
milestones = [60, 120, 160]
# scheduler = CosineAnnealingLR(optimizer, args.epochs)
scheduler = MultiStepLR(optimizer, milestones=milestones, gamma=gamma)
return optimizer, scheduler
def ebit_list(quant_type, num_bits):
if quant_type == 'FLOAT':
if num_bits < 7 :
e_bit_list = list(range(1,num_bits-1))
else:
e_bit_list = [1,4,5]
else:
e_bit_list = [0]
return e_bit_list
def numbit_list(quant_type):
if quant_type == 'INT':
num_bit_list = list(range(2,17))
elif quant_type == 'POT':
num_bit_list = list(range(2,7))
else:
# num_bit_list = list(range(2,9))
num_bit_list = list(range(2,9))
# num_bit_list = [8]
return num_bit_list
import torch.nn as nn
from cfg import *
from module import *
from model_deployment import *
import mia_utils
class Model(nn.Module):
def __init__(self,model_name,dataset):
super(Model, self).__init__()
self.cfg_table = model_cfg_table[model_name]
adapt_dataset(self.cfg_table,dataset)
make_layers(self,self.cfg_table)
def forward(self,x):
x = model_forward(self,self.cfg_table,x)
return x
def quantize(self, quant_type, num_bits=8, e_bits=3):
model_quantize(self,self.cfg_table,quant_type,num_bits,e_bits)
def quantize_forward(self,x):
return model_utils(self,self.cfg_table,func='forward',x=x)
def freeze(self):
model_utils(self,self.cfg_table,func='freeze')
def quantize_inference(self,x):
return model_utils(self,self.cfg_table,func='inference',x=x)
def fakefreeze(self):
model_utils(self,self.cfg_table,func='fakefreeze')
def get_output_layer_weight(self):
return get_output_layer_weight(self,self.cfg_table)
def get_quant_output_layer_weight(self):
return get_quant_output_layer_weight(self,self.cfg_table)
class MIA_Model(Model):
# TODO 需要调整接口
def __init__(self, args, params, model_name, dataset): # 这里将类别数设置为10
super(MIA_Model, self).__init__(model_name, dataset)
self.augment_training = params['augment_training']
# self.initialize_weights()
# 这两个属性可能无用了?
self.input_size = int(params['input_size'])
self.num_classes = int(params['num_classes'])
# 初始化权重 (不一定需要)
self.initialize_weights()
if 'distill' in args.mode:
self.train_func = mia_utils.cnn_train_dis
else:
self.train_func = mia_utils.cnn_train
self.test_func = mia_utils.cnn_test
def initialize_weights(self):
for m in self.modules():
if isinstance(m, nn.Conv2d):
nn.init.kaiming_normal_(m.weight, mode='fan_out', nonlinearity='relu')
elif isinstance(m, nn.BatchNorm2d):
m.weight.data.fill_(1)
m.bias.data.zero_()
elif isinstance(m, nn.Linear):
m.weight.data.normal_(0, 0.01)
m.bias.data.zero_()
import torch.nn as nn
import torch.nn.functional as F
from cfg import *
from module import *
def adapt_dataset(cfg_table,dataset):
if dataset == "cifar10":
num_classes = 10
elif dataset == "cifar100":
num_classes = 100
else:
print(dataset)
assert False, "Model deployment: invalid dataset"
for i in range(len(cfg_table) - 1, -1, -1):
cfg = cfg_table[i]
if cfg[0] == 'FC':
cfg[2] = num_classes
return
elif cfg[0] == 'C':
cfg[4] = num_classes
return
#获取全精度模型输出层的权重
def get_output_layer_weight(model,cfg_table):
for i in range(len(cfg_table) - 1, -1, -1):
cfg = cfg_table[i]
if cfg[0] == 'FC':
name = 'fc%d'%i
output_layer = getattr(model,name)
return output_layer.weight.detach()
elif cfg[0] == 'C':
name = 'conv%d'%i
output_layer = getattr(model,name)
return output_layer.weight.detach()
#获取量化模型输出层权重
def get_quant_output_layer_weight(model,cfg_table):
for i in range(len(cfg_table) - 1, -1, -1):
cfg = cfg_table[i]
if cfg[0] == 'FC':
name = 'q_fc%d'%i
output_layer = getattr(model,name)
tmp_wgt = FakeQuantize.apply(output_layer.fc_module.weight,output_layer.qw)
return tmp_wgt
elif cfg[0] == 'C':
name = 'q_conv%d'%i
output_layer = getattr(model,name)
tmp_wgt = FakeQuantize.apply(output_layer.conv_module.weight,output_layer.qw)
return tmp_wgt
def make_layers(model,cfg_table):
for i in range(len(cfg_table)):
cfg = cfg_table[i]
if cfg[0] == 'Inc':
make_inc_layers(model,cfg[1])
elif cfg[0] == 'ML':
make_ml_layers(model,cfg[1],cfg[2],cfg[3])
elif cfg[0] == 'C':
name = 'conv%d'%i
layer = nn.Conv2d(cfg[3],cfg[4],kernel_size=cfg[5],stride=cfg[6],padding=cfg[7],bias=cfg[8])
model.add_module(name,layer)
if 'B' in cfg[1]:
name = 'bn%d'%i
layer = nn.BatchNorm2d(cfg[4])
model.add_module(name,layer)
if 'RL' in cfg[1]:
name = 'relu%d'%i
layer = nn.ReLU(True)
model.add_module(name,layer)
elif 'RS' in cfg[1]:
name = 'relus%d'%i
layer = nn.ReLU6(True)
model.add_module(name,layer)
elif cfg[0] == 'RL':
name = 'relu%d'%i
layer = nn.ReLU(True)
model.add_module(name,layer)
elif cfg[0] == 'RS':
name = 'relus%d'%i
layer = nn.ReLU6(True)
model.add_module(name,layer)
elif cfg[0] == 'MP':
name = 'pool%d'%i
layer = nn.MaxPool2d(kernel_size=cfg[1],stride=cfg[2],padding=cfg[3])
model.add_module(name,layer)
elif cfg[0] == 'AAP':
name = 'aap%d'%i
layer = nn.AdaptiveAvgPool2d(cfg[1])
model.add_module(name,layer)
elif cfg[0] == 'FC':
name = 'fc%d'%i
layer = nn.Linear(cfg[1],cfg[2],bias=cfg[3])
model.add_module(name,layer)
elif cfg[0] == 'D':
name = 'drop%d'%i
layer = nn.Dropout(cfg[1])
model.add_module(name,layer)
def model_forward(model,cfg_table,x):
for i in range(len(cfg_table)):
cfg = cfg_table[i]
if cfg[0] == 'Inc':
x = inc_forward(model,cfg[1],x)
elif cfg[0] == 'ML':
x = ml_forward(model,cfg[1],cfg[2],cfg[3],x)
elif cfg[0] == 'C':
name = 'conv%d'%i
layer = getattr(model,name)
x = layer(x)
if 'B' in cfg[1]:
name = 'bn%d'%i
layer = getattr(model,name)
x = layer(x)
if 'RL' in cfg[1]:
name = 'relu%d'%i
layer = getattr(model,name)
x = layer(x)
elif 'RS' in cfg[1]:
name = 'relus%d'%i
layer = getattr(model,name)
x = layer(x)
elif cfg[0] == 'RL':
name = 'relu%d'%i
layer = getattr(model,name)
x = layer(x)
elif cfg[0] == 'RS':
name = 'relus%d'%i
layer = getattr(model,name)
x = layer(x)
elif cfg[0] == 'MP':
name = 'pool%d'%i
layer = getattr(model,name)
x = layer(x)
elif cfg[0] == 'AAP':
name = 'aap%d'%i
layer = getattr(model,name)
x = layer(x)
elif cfg[0] == 'FC':
name = 'fc%d'%i
layer = getattr(model,name)
x = layer(x)
elif cfg[0] == 'D':
name = 'drop%d'%i
layer = getattr(model,name)
x = layer(x)
elif cfg[0] == 'VW':
if len(cfg) == 1: #default
x = x.view(x.size(0),-1)
elif cfg[0] == 'SM':
x = F.softmax(x,dim=1)
return x
def model_quantize(model,cfg_table,quant_type,num_bits,e_bits):
for i in range(len(cfg_table)):
cfg = cfg_table[i]
if cfg[0] == 'Inc':
inc_quantize(model,cfg[1],quant_type,num_bits,e_bits)
elif cfg[0] == 'ML':
ml_quantize(model,cfg[1],cfg[2],cfg[3],quant_type,num_bits,e_bits)
elif cfg[0] == 'C':
conv_name = 'conv%d'%i
conv_layer = getattr(model,conv_name)
qname = 'q_'+conv_name
if 'B' in cfg[1]:
bn_name = 'bn%d'%i
bn_layer = getattr(model,bn_name)
if 'RL' in cfg[1]:
qlayer = QConvBNReLU(quant_type,conv_layer,bn_layer,qi=cfg[2],num_bits=num_bits,e_bits=e_bits)
elif 'RS' in cfg[1]:
qlayer = QConvBNReLU6(quant_type,conv_layer,bn_layer,qi=cfg[2],num_bits=num_bits,e_bits=e_bits)
else:
qlayer = QConvBN(quant_type,conv_layer,bn_layer,qi=cfg[2],num_bits=num_bits,e_bits=e_bits)
else:
qlayer = QConv2d(quant_type,conv_layer,qi=cfg[2],num_bits=num_bits,e_bits=e_bits)
model.add_module(qname,qlayer)
elif cfg[0] == 'RL':
name = 'relu%d'%i
qname = 'q_'+name
qlayer = QReLU(quant_type,num_bits=num_bits,e_bits=e_bits)
model.add_module(qname,qlayer)
elif cfg[0] == 'RS':
name = 'relus%d'%i
qname = 'q_'+name
qlayer = QReLU6(quant_type,num_bits=num_bits,e_bits=e_bits)
model.add_module(qname,qlayer)
elif cfg[0] == 'MP':
name = 'pool%d'%i
qname = 'q_'+name
qlayer = QMaxPooling2d(quant_type,kernel_size=cfg[1],stride=cfg[2],padding=cfg[3],num_bits=num_bits,e_bits=e_bits)
model.add_module(qname,qlayer)
elif cfg[0] == 'AAP':
name = 'aap%d'%i
qname = 'q_'+name
qlayer = QAdaptiveAvgPool2d(quant_type,output_size=cfg[1],num_bits=num_bits,e_bits=e_bits)
model.add_module(qname,qlayer)
elif cfg[0] == 'FC':
name = 'fc%d'%i
layer = getattr(model,name)
qname = 'q_'+name
qlayer = QLinear(quant_type,layer,num_bits=num_bits,e_bits=e_bits)
model.add_module(qname,qlayer)
# 增加了func='fakefreeze'
def model_utils(model,cfg_table,func,x=None):
last_qo = None
# 表示已经经过反量化,用于区别反量化不再最后,而是在softmax前的情形
done_flag = False
for i in range(len(cfg_table)):
cfg = cfg_table[i]
if cfg[0] == 'Inc':
x,last_qo = inc_utils(model,cfg[1],func,x,last_qo)
elif cfg[0] == 'ML':
x,last_qo = ml_utils(model,cfg[1],cfg[2],cfg[3],func,x,last_qo)
elif cfg[0] == 'C':
qname = 'q_conv%d'%i
qlayer = getattr(model,qname)
if func == 'forward':
x = qlayer(x)
elif func == 'inference':
# cfg[2]为True表示起始层,需要量化
if cfg[2]:
x = qlayer.qi.quantize_tensor(x)
x = qlayer.quantize_inference(x)
elif func == 'freeze':
qlayer.freeze(last_qo)
elif func == 'fakefreeze':
qlayer.fakefreeze()
last_qo = qlayer.qo
elif cfg[0] == 'RL':
qname = 'q_relu%d'%i
qlayer = getattr(model,qname)
if func == 'forward':
x = qlayer(x)
elif func == 'inference':
x = qlayer.quantize_inference(x)
elif func == 'freeze':
qlayer.freeze(last_qo)
elif cfg[0] == 'RS':
qname = 'q_relus%d'%i
qlayer = getattr(model,qname)
if func == 'forward':
x = qlayer(x)
elif func == 'inference':
x = qlayer.quantize_inference(x)
elif func == 'freeze':
qlayer.freeze(last_qo)
elif cfg[0] == 'MP':
qname = 'q_pool%d'%i
qlayer = getattr(model,qname)
if func == 'forward':
x = qlayer(x)
elif func == 'inference':
x = qlayer.quantize_inference(x)
elif func == 'freeze':
qlayer.freeze(last_qo)
elif cfg[0] == 'AAP':
qname = 'q_aap%d'%i
qlayer = getattr(model,qname)
if func == 'forward':
x = qlayer(x)
elif func == 'inference':
x = qlayer.quantize_inference(x)
elif func == 'freeze':
qlayer.freeze(last_qo)
last_qo = qlayer.qo
elif cfg[0] == 'FC':
qname = 'q_fc%d'%i
qlayer = getattr(model,qname)
if func == 'forward':
x = qlayer(x)
elif func == 'inference':
x = qlayer.quantize_inference(x)
elif func == 'freeze':
qlayer.freeze(last_qo)
elif func == 'fakefreeze':
qlayer.fakefreeze()
last_qo = qlayer.qo
elif cfg[0] == 'D':
if func == 'forward':
name = 'drop%d'%i
layer = getattr(model,name)
x = layer(x)
elif cfg[0] == 'VW':
if func == 'inference' or func == 'forward':
if len(cfg) == 1: #default
x = x.view(x.size(0),-1)
elif cfg[0] == 'SM':
if func == 'inference':
done_flag = True
x = last_qo.dequantize_tensor(x)
x = F.softmax(x,dim=1)
elif func == 'forward':
x = F.softmax(x,dim=1)
if func == 'inference' and not done_flag:
x = last_qo.dequantize_tensor(x)
return x
def make_inc_layers(model,inc_idx):
inc_name = 'inc%d'%inc_idx
ch = inc_ch_table[inc_idx]
for i in range(4): # branch
prefix = inc_name+'_br%d_'%i
for j in range(len(inc_cfg_table[i])):
cfg = inc_cfg_table[i][j]
if cfg[0] == 'MP':
name = prefix+'pool%d'%j
layer =nn.MaxPool2d(kernel_size=cfg[1],stride=cfg[2],padding=cfg[3])
model.add_module(name,layer)
elif cfg[0] == 'RL':
name=prefix+'relu%d'%j
layer=nn.ReLU(True)
model.add_module(name,layer)
elif cfg[0] == 'C': # 'BRL' default
name=prefix+'conv%d'%j
layer=nn.Conv2d(ch[cfg[1]],ch[cfg[2]],kernel_size=cfg[3],stride=cfg[4],padding=cfg[5],bias=False)
model.add_module(name,layer)
name=prefix+'bn%d'%j
layer=nn.BatchNorm2d(ch[cfg[2]])
model.add_module(name,layer)
name=prefix+'relu%d'%j
layer=nn.ReLU(True)
model.add_module(name,layer)
def inc_forward(model,inc_idx,x):
inc_name = 'inc%d'%inc_idx
outs = []
for i in range(4):
prefix = inc_name+'_br%d_'%i
tmp = x
for j in range(len(inc_cfg_table[i])):
cfg = inc_cfg_table[i][j]
if cfg[0] == 'MP':
name = prefix+'pool%d'%j
layer = getattr(model,name)
tmp = layer(tmp)
elif cfg[0] == 'RL':
name=prefix+'relu%d'%j
layer = getattr(model,name)
tmp = layer(tmp)
elif cfg[0] == 'C': # 'BRL' default
name=prefix+'conv%d'%j
layer = getattr(model,name)
tmp = layer(tmp)
name=prefix+'bn%d'%j
layer = getattr(model,name)
tmp = layer(tmp)
name=prefix+'relu%d'%j
layer = getattr(model,name)
tmp = layer(tmp)
outs.append(tmp)
out = torch.cat(outs,1)
return out
def inc_quantize(model,inc_idx,quant_type,num_bits,e_bits):
inc_name = 'inc%d'%inc_idx
for i in range(4):
prefix = inc_name+'_br%d_'%i
for j in range(len(inc_cfg_table[i])):
cfg = inc_cfg_table[i][j]
if cfg[0] == 'MP':
name = prefix+'pool%d'%j
qname = 'q_'+name
qlayer = QMaxPooling2d(quant_type,kernel_size=cfg[1],stride=cfg[2],padding=cfg[3],num_bits=num_bits,e_bits=e_bits)
model.add_module(qname,qlayer)
elif cfg[0] == 'RL':
name = prefix+'relu%d'%j
qname = 'q_'+name
qlayer = QReLU(quant_type, num_bits=num_bits, e_bits=e_bits)
model.add_module(qname,qlayer)
elif cfg[0] == 'C': # 'BRL' default
conv_name=prefix+'conv%d'%j
conv_layer=getattr(model,conv_name)
bn_name=prefix+'bn%d'%j
bn_layer=getattr(model,bn_name)
qname='q_'+conv_name
qlayer=QConvBNReLU(quant_type, conv_layer, bn_layer, num_bits=num_bits, e_bits=e_bits)
model.add_module(qname,qlayer)
qname = 'q_'+inc_name+'_concat'
qlayer = QConcat(quant_type,4,qi_array=False,qo=True,num_bits=num_bits,e_bits=e_bits)
model.add_module(qname,qlayer)
def inc_utils(model,inc_idx,func,x=None,qo=None):
inc_name = 'inc%d'%inc_idx
outs=[]
qos=[]
for i in range(4):
qprefix = 'q_'+inc_name+'_br%d_'%i
tmp = x
last_qo = qo
for j in range(len(inc_cfg_table[i])):
cfg = inc_cfg_table[i][j]
if cfg[0] == 'MP':
qname = qprefix+'pool%d'%j
qlayer = getattr(model,qname)
if func == 'forward':
tmp = qlayer(tmp)
elif func == 'inference':
tmp = qlayer.quantize_inference(tmp)
elif func == 'freeze':
qlayer.freeze(last_qo)
elif cfg[0] == 'RL':
qname = qprefix+'relu%d'%j
qlayer = getattr(model,qname)
if func == 'forward':
tmp = qlayer(tmp)
elif func == 'inference':
tmp = qlayer.quantize_inference(tmp)
elif func == 'freeze':
qlayer.freeze(last_qo)
elif cfg[0] == 'C': # 'BRL' default
qname = qprefix+'conv%d'%j
qlayer = getattr(model,qname)
if func == 'forward':
tmp = qlayer(tmp)
elif func == 'inference':
tmp = qlayer.quantize_inference(tmp)
elif func == 'freeze':
qlayer.freeze(last_qo)
elif func == 'fakefreeze':
qlayer.fakefreeze()
last_qo = qlayer.qo
outs.append(tmp)
qos.append(last_qo)
qname = 'q_'+inc_name+'_concat'
qlayer = getattr(model,qname)
out = None
if func == 'forward':
out = qlayer(outs)
elif func == 'inference':
out = qlayer.quantize_inference(outs)
elif func == 'freeze':
qlayer.freeze(qos)
last_qo = qlayer.qo
return out,last_qo
def make_ml_layers(model,blk_type,ml_idx,blocks):
ml_name = 'ml%d'%ml_idx
if blk_type == 'BBLK':
blk_ch_table = bblk_ch_table
blk_cfg_table = bblk_cfg_table
elif blk_type == 'BTNK':
blk_ch_table = btnk_ch_table
blk_cfg_table = btnk_cfg_table
elif blk_type == 'IRES':
blk_ch_table = ires_ch_table
blk_cfg_table = ires_cfg_table
else:
raise ValueError("Make_ml_layers: Illegal blk_type")
#一个makelayer对应两行,分别表示第一个blk和其余的特征
make_blk_layers(model,blk_ch_table,blk_cfg_table,ml_name,2*ml_idx,0)
for i in range(1,blocks):
make_blk_layers(model,blk_ch_table,blk_cfg_table,ml_name,2*ml_idx+1,i)
# ma表示主分支,ds表示downsample
# 当cfgtable含有三个元素时,第3个表示分支合并后需经过的层。
# BasicBlock和BottleNeck合并后需经过relu层,InvertedResidual合并后无需经过relu层
def make_blk_layers(model,blk_ch_table,blk_cfg_table,ml_name,ch_idx,blk_idx):
blk_name = ml_name+'_blk%d'%blk_idx
ch = blk_ch_table[ch_idx]
for i in range(2):
if i == 0:
prefix = blk_name+'_ma_'
elif i == 1:
if ch[0]: #downsample/identity_flag
prefix = blk_name+'_ds_'
else:
continue
for j in range(len(blk_cfg_table[i])):
cfg = blk_cfg_table[i][j]
if cfg[0] == 'C':
name = prefix+'conv%d'%j
layer = nn.Conv2d(ch[cfg[2]],ch[cfg[3]],kernel_size=cfg[4],stride=ch[cfg[5]],padding=cfg[6],groups=ch[cfg[7]],bias=cfg[8])
model.add_module(name,layer)
if 'B' in cfg[1]:
name = prefix+'bn%d'%j
layer=nn.BatchNorm2d(ch[cfg[3]])
model.add_module(name,layer)
if 'RL' in cfg[1]:
name = prefix+'relu%d'%j
layer = nn.ReLU(True)
model.add_module(name,layer)
elif 'RS' in cfg[1]:
name = prefix+'relus%d'%j
layer = nn.ReLU6(True)
model.add_module(name,layer)
#分支汇总
prefix = blk_name+'_'
for j in range(len(blk_cfg_table[-1])):
cfg = blk_cfg_table[-1][j]
if cfg[0] == 'RL': #当前没有blk出现汇总处有RS
name = prefix+'relu%d'%j
layer = nn.ReLU(True)
model.add_module(name,layer)
def ml_forward(model,blk_type,ml_idx,blocks,x):
ml_name = 'ml%d'%ml_idx
if blk_type == 'BBLK':
blk_ch_table = bblk_ch_table
blk_cfg_table = bblk_cfg_table
elif blk_type == 'BTNK':
blk_ch_table = btnk_ch_table
blk_cfg_table = btnk_cfg_table
elif blk_type == 'IRES':
blk_ch_table = ires_ch_table
blk_cfg_table = ires_cfg_table
else:
raise ValueError("ml_forward: Illegal blk_type")
x = blk_forward(model,blk_ch_table,blk_cfg_table,ml_name,2*ml_idx,0,x)
for i in range(1,blocks):
x = blk_forward(model,blk_ch_table,blk_cfg_table,ml_name,2*ml_idx+1,i,x)
return x
def blk_forward(model,blk_ch_table,blk_cfg_table,ml_name,ch_idx,blk_idx,x):
blk_name = ml_name+'_blk%d'%blk_idx
ch = blk_ch_table[ch_idx]
outs = []
for i in range(2):
tmp=x
if i == 0:
prefix = blk_name+'_ma_'
elif i == 1:
if ch[0]: #downsample/identity_flag
prefix = blk_name+'_ds_'
else:
outs.append(tmp)
continue
for j in range(len(blk_cfg_table[i])):
cfg = blk_cfg_table[i][j]
if cfg[0] == 'C':
name = prefix+'conv%d'%j
layer = getattr(model,name)
tmp = layer(tmp)
if 'B' in cfg[1]:
name = prefix+'bn%d'%j
layer = getattr(model,name)
tmp = layer(tmp)
if 'RL' in cfg[1]:
name = prefix+'relu%d'%j
layer = getattr(model,name)
tmp = layer(tmp)
elif 'RS' in cfg[1]:
name = prefix+'relus%d'%j
layer = getattr(model,name)
tmp = layer(tmp)
outs.append(tmp)
#分支汇总
prefix = blk_name+'_'
for j in range(len(blk_cfg_table[-1])):
cfg = blk_cfg_table[-1][j]
if cfg[0] == 'AD':
if cfg[1] or ch[0]: #无条件加或flag为true
out = outs[0] + outs[1]
else:
out = outs[0]
elif cfg[0] == 'RL':
name = prefix+'relu%d'%j
layer = getattr(model,name)
out = layer(out)
return out
def ml_quantize(model,blk_type,ml_idx,blocks,quant_type,num_bits,e_bits):
ml_name = 'ml%d'%ml_idx
if blk_type == 'BBLK':
blk_ch_table = bblk_ch_table
blk_cfg_table = bblk_cfg_table
elif blk_type == 'BTNK':
blk_ch_table = btnk_ch_table
blk_cfg_table = btnk_cfg_table
elif blk_type == 'IRES':
blk_ch_table = ires_ch_table
blk_cfg_table = ires_cfg_table
else:
raise ValueError("ml_quantize: Illegal blk_type")
blk_quantize(model,blk_ch_table,blk_cfg_table,ml_name,2*ml_idx,0,quant_type,num_bits,e_bits)
for i in range(1,blocks):
blk_quantize(model,blk_ch_table,blk_cfg_table,ml_name,2*ml_idx+1,i,quant_type,num_bits,e_bits)
def blk_quantize(model,blk_ch_table,blk_cfg_table,ml_name,ch_idx,blk_idx,quant_type,num_bits,e_bits):
blk_name = ml_name+'_blk%d'%blk_idx
ch = blk_ch_table[ch_idx]
for i in range(2):
if i == 0:
prefix = blk_name+'_ma_'
elif i == 1:
if ch[0]: #downsample/identity_flag
prefix = blk_name+'_ds_'
else:
continue
for j in range(len(blk_cfg_table[i])):
cfg = blk_cfg_table[i][j]
if cfg[0] == 'C':
conv_name = prefix+'conv%d'%j
conv_layer = getattr(model,conv_name)
qname = 'q_'+conv_name
if 'B' in cfg[1]:
bn_name = prefix+'bn%d'%j
bn_layer = getattr(model,bn_name)
if 'RL' in cfg[1]:
qlayer = QConvBNReLU(quant_type,conv_layer,bn_layer,num_bits=num_bits,e_bits=e_bits)
elif 'RS' in cfg[1]:
qlayer = QConvBNReLU6(quant_type,conv_layer,bn_layer,num_bits=num_bits,e_bits=e_bits)
else:
qlayer = QConvBN(quant_type,conv_layer,bn_layer,num_bits=num_bits,e_bits=e_bits)
else:
qlayer = QConv2d(quant_type,conv_layer,num_bits=num_bits,e_bits=e_bits)
model.add_module(qname,qlayer)
#分支汇总
prefix = blk_name+'_'
for j in range(len(blk_cfg_table[-1])):
cfg = blk_cfg_table[-1][j]
if cfg[0] == 'AD':
if cfg[1] or ch[0]: #无条件加或flag为true
qname = 'q_'+prefix+'add%d'%j
qlayer = QElementwiseAdd(quant_type,2,qi_array=False,qo=True,num_bits=num_bits,e_bits=e_bits)
model.add_module(qname,qlayer)
elif cfg[0] == 'RL':
qname = 'q_'+prefix+'relu%d'%j
qlayer = QReLU(quant_type,num_bits=num_bits,e_bits=e_bits)
model.add_module(qname,qlayer)
def ml_utils(model,blk_type,ml_idx,blocks,func,x=None,qo=None):
ml_name = 'ml%d'%ml_idx
if blk_type == 'BBLK':
blk_ch_table = bblk_ch_table
blk_cfg_table = bblk_cfg_table
elif blk_type == 'BTNK':
blk_ch_table = btnk_ch_table
blk_cfg_table = btnk_cfg_table
elif blk_type == 'IRES':
blk_ch_table = ires_ch_table
blk_cfg_table = ires_cfg_table
else:
raise ValueError("ml_quantize: Illegal blk_type")
last_qo = qo
x,last_qo = blk_utils(model,blk_ch_table,blk_cfg_table,ml_name,2*ml_idx,0,func,x,last_qo)
for i in range(1,blocks):
x,last_qo = blk_utils(model,blk_ch_table,blk_cfg_table,ml_name,2*ml_idx+1,i,func,x,last_qo)
return x, last_qo
def blk_utils(model,blk_ch_table,blk_cfg_table,ml_name,ch_idx,blk_idx,func,x=None,qo=None):
blk_name = ml_name+'_blk%d'%blk_idx
ch = blk_ch_table[ch_idx]
outs = []
qos = []
for i in range(2):
tmp=x
last_qo = qo
if i == 0:
qprefix = 'q_'+blk_name+'_ma_'
elif i == 1:
if ch[0]: #downsample/identity_flag
qprefix = 'q_'+blk_name+'_ds_'
else:
outs.append(tmp)
qos.append(last_qo)
continue
for j in range(len(blk_cfg_table[i])):
cfg = blk_cfg_table[i][j]
if cfg[0] == 'C':
qname = qprefix+'conv%d'%j
qlayer = getattr(model,qname)
if func == 'forward':
tmp = qlayer(tmp)
elif func == 'inference':
tmp = qlayer.quantize_inference(tmp)
elif func == 'freeze':
qlayer.freeze(last_qo)
elif func == 'fakefreeze':
qlayer.fakefreeze()
last_qo = qlayer.qo
outs.append(tmp)
qos.append(last_qo)
#分支汇总
qprefix = 'q_'+blk_name+'_'
for j in range(len(blk_cfg_table[-1])):
cfg = blk_cfg_table[-1][j]
if cfg[0] == 'AD':
if cfg[1] or ch[0]: #无条件加或flag为true
qname = qprefix+'add%d'%j
qlayer = getattr(model,qname)
out = None
if func == 'forward':
out = qlayer(outs)
elif func == 'inference':
out = qlayer.quantize_inference(outs)
elif func == 'freeze':
qlayer.freeze(qos)
last_qo = qlayer.qo
else:
out = outs[0]
last_qo = qos[0]
elif cfg[0] == 'RL':
qname = qprefix+'relu%d'%j
qlayer = getattr(model,qname)
if func == 'forward':
out = qlayer(out)
elif func == 'inference':
out = qlayer.quantize_inference(out)
elif func == 'freeze':
qlayer.freeze(last_qo)
return out,last_qo
\ No newline at end of file
import math
import numpy as np
import gol
import torch
import torch.nn as nn
import torch.nn.functional as F
from torch.autograd import Variable
from function import FakeQuantize
def js_div(p_output, q_output, get_softmax=True):
"""
Function that measures JS divergence between target and output logits:
"""
KLDivLoss = nn.KLDivLoss(reduction='sum')
if get_softmax:
p_output = F.softmax(p_output)
q_output = F.softmax(q_output)
log_mean_output = ((p_output + q_output)/2).log()
return (KLDivLoss(log_mean_output, p_output) + KLDivLoss(log_mean_output, q_output))/2
# 获取最近的量化值
def get_nearest_val(quant_type, x, is_bias=False, block_size=1000000):
if quant_type == 'INT':
return x.round_()
plist = gol.get_value(is_bias)
shape = x.shape
xhard = x.view(-1)
xout = torch.zeros_like(xhard)
plist = plist.type_as(x)
n_blocks = (x.numel() + block_size - 1) // block_size
for i in range(n_blocks):
start_idx = i * block_size
end_idx = min(start_idx + block_size, xhard.numel())
block_size_i = end_idx - start_idx
xblock = xhard[start_idx:end_idx]
plist_block = plist.unsqueeze(1)
idx = (xblock.unsqueeze(0) - plist_block).abs().min(dim=0)[1]
xhard_block = plist[idx].view(xblock.shape)
xout[start_idx:end_idx] = (xhard_block - xblock).detach() + xblock
xout = xout.view(shape)
return xout
# 采用对称有符号量化时,获取量化范围最大值
def get_qmax(quant_type,num_bits=None, e_bits=None):
if quant_type == 'INT':
qmax = 2. ** (num_bits - 1) - 1
elif quant_type == 'POT':
qmax = 1
else: #FLOAT
m_bits = num_bits - 1 - e_bits
dist_m = 2 ** (-m_bits)
e = 2 ** (e_bits - 1)
expo = 2 ** e
m = 2 ** m_bits -1
frac = 1. + m * dist_m
qmax = frac * expo
return qmax
# 都采用有符号量化,zeropoint都置为0
def calcScaleZeroPoint(min_val, max_val, qmax):
scale = torch.max(max_val.abs(),min_val.abs()) / qmax
zero_point = torch.tensor(0.)
return scale, zero_point
# 将输入进行量化,输入输出都为tensor
def quantize_tensor(quant_type, x, scale, zero_point, qmax, is_bias=False):
# 量化后范围,直接根据位宽确定
qmin = -qmax
q_x = zero_point + x / scale
q_x.clamp_(qmin, qmax)
q_x = get_nearest_val(quant_type, q_x, is_bias)
return q_x
# bias使用不同精度,需要根据量化类型指定num_bits/e_bits
def bias_qmax(quant_type):
if quant_type == 'INT':
return get_qmax(quant_type, 64)
elif quant_type == 'POT':
return get_qmax(quant_type)
else:
return get_qmax(quant_type, 16, 7)
# 转化为FP32,不需再做限制
def dequantize_tensor(q_x, scale, zero_point):
return scale * (q_x - zero_point)
class QParam(nn.Module):
def __init__(self,quant_type, num_bits=8, e_bits=3):
super(QParam, self).__init__()
self.quant_type = quant_type
self.num_bits = num_bits
self.e_bits = e_bits
self.qmax = get_qmax(quant_type, num_bits, e_bits)
scale = torch.tensor([], requires_grad=False)
zero_point = torch.tensor([], requires_grad=False)
min = torch.tensor([], requires_grad=False)
max = torch.tensor([], requires_grad=False)
# 通过注册为register,使得buffer可以被记录到state_dict
self.register_buffer('scale', scale)
self.register_buffer('zero_point', zero_point)
self.register_buffer('min', min)
self.register_buffer('max', max)
# 更新统计范围及量化参数
def update(self, tensor):
if self.max.nelement() == 0 or self.max.data < tensor.max().data:
self.max.data = tensor.max().data
self.max.clamp_(min=0)
if self.min.nelement() == 0 or self.min.data > tensor.min().data:
self.min.data = tensor.min().data
self.min.clamp_(max=0)
self.scale, self.zero_point = calcScaleZeroPoint(self.min, self.max, self.qmax)
def quantize_tensor(self, tensor):
return quantize_tensor(self.quant_type, tensor, self.scale, self.zero_point, self.qmax)
def dequantize_tensor(self, q_x):
return dequantize_tensor(q_x, self.scale, self.zero_point)
# 该方法保证了可以从state_dict里恢复
def _load_from_state_dict(self, state_dict, prefix, local_metadata, strict, missing_keys, unexpected_keys,
error_msgs):
key_names = ['scale', 'zero_point', 'min', 'max']
for key in key_names:
value = getattr(self, key)
value.data = state_dict[prefix + key].data
state_dict.pop(prefix + key)
# 该方法返回值将是打印该对象的结果
def __str__(self):
info = 'scale: %.10f ' % self.scale
info += 'zp: %.6f ' % self.zero_point
info += 'min: %.6f ' % self.min
info += 'max: %.6f' % self.max
return info
# 作为具体量化层的父类,qi和qo分别为量化输入/输出
class QModule(nn.Module):
def __init__(self,quant_type, qi=False, qo=True, num_bits=8, e_bits=3):
super(QModule, self).__init__()
if qi:
self.qi = QParam(quant_type,num_bits, e_bits)
if qo:
self.qo = QParam(quant_type,num_bits, e_bits)
self.quant_type = quant_type
self.num_bits = num_bits
self.e_bits = e_bits
self.bias_qmax = bias_qmax(quant_type)
def freeze(self):
pass # 空语句
def quantize_inference(self, x):
raise NotImplementedError('quantize_inference should be implemented.')
def fakefreeze(self):
pass
"""
QModule 量化卷积
:quant_type: 量化类型
:conv_module: 卷积模块
:qi: 是否量化输入特征图
:qo: 是否量化输出特征图
:num_bits: 8位bit数
"""
class QConv2d(QModule):
def __init__(self, quant_type, conv_module, qi=False, qo=True, num_bits=8, e_bits=3):
super(QConv2d, self).__init__(quant_type, qi, qo, num_bits, e_bits)
self.conv_module = conv_module
self.qw = QParam(quant_type, num_bits,e_bits)
self.register_buffer('M', torch.tensor([], requires_grad=False)) # 将M注册为buffer
# freeze方法可以固定真量化的权重参数,并将该值更新到原全精度层上,便于散度计算
def freeze(self, qi=None, qo=None):
if hasattr(self, 'qi') and qi is not None:
raise ValueError('qi has been provided in init function.')
if not hasattr(self, 'qi') and qi is None:
raise ValueError('qi is not existed, should be provided.')
if hasattr(self, 'qo') and qo is not None:
raise ValueError('qo has been provided in init function.')
if not hasattr(self, 'qo') and qo is None:
raise ValueError('qo is not existed, should be provided.')
# 这里因为在池化或者激活的输入,不需要对最大值和最小是进行额外的统计,会共享相同的输出
if qi is not None:
self.qi = qi
if qo is not None:
self.qo = qo
# 根据https://zhuanlan.zhihu.com/p/156835141, 这是式3 的系数
self.M.data = (self.qw.scale * self.qi.scale / self.qo.scale).data
self.conv_module.weight.data = self.qw.quantize_tensor(self.conv_module.weight.data)
self.conv_module.weight.data = self.conv_module.weight.data - self.qw.zero_point
#考虑conv层无bias,此时forward和inference传入none亦可
if self.conv_module.bias is not None:
self.conv_module.bias.data = quantize_tensor(self.quant_type,
self.conv_module.bias.data, scale=self.qi.scale * self.qw.scale,
zero_point=0.,qmax=self.bias_qmax, is_bias=True)
def fakefreeze(self):
self.conv_module.weight.data = self.qw.dequantize_tensor(self.conv_module.weight.data)
if self.conv_module.bias is not None:
self.conv_module.bias.data = dequantize_tensor(self.conv_module.bias.data,scale=self.qi.scale*self.qw.scale,zero_point=0.)
def forward(self, x): # 前向传播,输入张量,x为浮点型数据
if hasattr(self, 'qi'):
self.qi.update(x)
x = FakeQuantize.apply(x, self.qi) # 对输入张量X完成量化
# foward前更新qw,保证量化weight时候scale正确
self.qw.update(self.conv_module.weight.data)
# 注意:此处主要为了统计各层x和weight范围,未对bias进行量化操作
tmp_wgt = FakeQuantize.apply(self.conv_module.weight, self.qw)
x = F.conv2d(x, tmp_wgt, self.conv_module.bias,
stride=self.conv_module.stride,
padding=self.conv_module.padding,
dilation=self.conv_module.dilation,
groups=self.conv_module.groups)
if hasattr(self, 'qo'):
self.qo.update(x)
x = FakeQuantize.apply(x, self.qo)
return x
# 利用公式 q_a = M(\sigma(q_w-Z_w)(q_x-Z_x) + q_b)
def quantize_inference(self, x): # 此处input为已经量化的qx
x = x - self.qi.zero_point
x = self.conv_module(x)
x = self.M * x
x = get_nearest_val(self.quant_type,x)
x = x + self.qo.zero_point
return x
class QLinear(QModule):
def __init__(self, quant_type, fc_module, qi=False, qo=True, num_bits=8, e_bits=3):
super(QLinear, self).__init__(quant_type, qi, qo, num_bits, e_bits)
self.fc_module = fc_module
self.qw = QParam(quant_type, num_bits, e_bits)
self.register_buffer('M', torch.tensor([], requires_grad=False)) # 将M注册为buffer
def freeze(self, qi=None, qo=None):
if hasattr(self, 'qi') and qi is not None:
raise ValueError('qi has been provided in init function.')
if not hasattr(self, 'qi') and qi is None:
raise ValueError('qi is not existed, should be provided.')
if hasattr(self, 'qo') and qo is not None:
raise ValueError('qo has been provided in init function.')
if not hasattr(self, 'qo') and qo is None:
raise ValueError('qo is not existed, should be provided.')
if qi is not None:
self.qi = qi
if qo is not None:
self.qo = qo
self.M.data = (self.qw.scale * self.qi.scale / self.qo.scale).data
self.fc_module.weight.data = self.qw.quantize_tensor(self.fc_module.weight.data)
self.fc_module.weight.data = self.fc_module.weight.data - self.qw.zero_point
if self.fc_module.bias is not None:
self.fc_module.bias.data = quantize_tensor(self.quant_type,
self.fc_module.bias.data, scale=self.qi.scale * self.qw.scale,
zero_point=0., qmax=self.bias_qmax, is_bias=True)
def fakefreeze(self):
self.fc_module.weight.data = self.qw.dequantize_tensor(self.fc_module.weight.data)
if self.fc_module.bias is not None:
self.fc_module.bias.data = dequantize_tensor(self.fc_module.bias.data,scale=self.qi.scale*self.qw.scale,zero_point=0.)
def forward(self, x):
if hasattr(self, 'qi'):
self.qi.update(x)
x = FakeQuantize.apply(x, self.qi)
self.qw.update(self.fc_module.weight.data)
tmp_wgt = FakeQuantize.apply(self.fc_module.weight, self.qw)
x = F.linear(x, tmp_wgt, self.fc_module.bias)
if hasattr(self, 'qo'):
self.qo.update(x)
x = FakeQuantize.apply(x, self.qo)
return x
def quantize_inference(self, x):
x = x - self.qi.zero_point
x = self.fc_module(x)
x = self.M * x
x = get_nearest_val(self.quant_type,x)
x = x + self.qo.zero_point
return x
class QReLU(QModule):
def __init__(self,quant_type, qi=False, qo=False, num_bits=8, e_bits=3):
super(QReLU, self).__init__(quant_type, qi, qo, num_bits, e_bits)
def freeze(self, qi=None):
if hasattr(self, 'qi') and qi is not None:
raise ValueError('qi has been provided in init function.')
if not hasattr(self, 'qi') and qi is None:
raise ValueError('qi is not existed, should be provided.')
if qi is not None:
self.qi = qi
def forward(self, x):
if hasattr(self, 'qi'):
self.qi.update(x)
x = FakeQuantize.apply(x, self.qi)
x = F.relu(x)
return x
def quantize_inference(self, x):
x = x.clone()
x[x < self.qi.zero_point] = self.qi.zero_point
return x
class QReLU6(QModule):
def __init__(self,quant_type, qi=False, qo=False, num_bits=8, e_bits=3):
super(QReLU6, self).__init__(quant_type, qi, qo, num_bits, e_bits)
def freeze(self, qi=None):
if hasattr(self, 'qi') and qi is not None:
raise ValueError('qi has been provided in init function.')
if not hasattr(self, 'qi') and qi is None:
raise ValueError('qi is not existed, should be provided.')
if qi is not None:
self.qi = qi
def forward(self, x):
if hasattr(self, 'qi'):
self.qi.update(x)
x = FakeQuantize.apply(x, self.qi)
x = F.relu6(x)
return x
def quantize_inference(self, x):
x = x.clone()
upper = torch.tensor(6)
qupper = self.qi.quantize_tensor(upper)
x.clamp_(min=0,max=qupper.item())
return x
class QMaxPooling2d(QModule):
def __init__(self, quant_type, kernel_size=3, stride=1, padding=0, qi=False, qo=False, num_bits=8,e_bits=3):
super(QMaxPooling2d, self).__init__(quant_type, qi, qo, num_bits, e_bits)
self.kernel_size = kernel_size
self.stride = stride
self.padding = padding
def freeze(self, qi=None):
if hasattr(self, 'qi') and qi is not None:
raise ValueError('qi has been provided in init function.')
if not hasattr(self, 'qi') and qi is None:
raise ValueError('qi is not existed, should be provided.')
if qi is not None:
self.qi = qi
def forward(self, x):
if hasattr(self, 'qi'):
self.qi.update(x)
x = FakeQuantize.apply(x, self.qi)
x = F.max_pool2d(x, self.kernel_size, self.stride, self.padding)
return x
def quantize_inference(self, x):
return F.max_pool2d(x, self.kernel_size, self.stride, self.padding)
class QAdaptiveAvgPool2d(QModule):
def __init__(self, quant_type, output_size, qi=False, qo=True, num_bits=8,e_bits=3):
super(QAdaptiveAvgPool2d, self).__init__(quant_type, qi, qo, num_bits, e_bits)
self.output_size = output_size
self.register_buffer('M', torch.tensor([], requires_grad=False)) # 将M注册为buffer
def freeze(self, qi=None, qo=None):
if hasattr(self, 'qi') and qi is not None:
raise ValueError('qi has been provided in init function.')
if not hasattr(self, 'qi') and qi is None:
raise ValueError('qi is not existed, should be provided.')
if hasattr(self, 'qo') and qo is not None:
raise ValueError('qo has been provided in init function.')
if not hasattr(self, 'qo') and qo is None:
raise ValueError('qo is not existed, should be provided.')
if qi is not None:
self.qi = qi
if qo is not None:
self.qo = qo
self.M.data = (self.qi.scale / self.qo.scale).data
def forward(self, x):
if hasattr(self, 'qi'):
self.qi.update(x)
x = FakeQuantize.apply(x, self.qi)
x = F.adaptive_avg_pool2d(x, self.output_size)
if hasattr(self, 'qo'):
self.qo.update(x)
x = FakeQuantize.apply(x, self.qo)
return x
def quantize_inference(self, x):
x = x - self.qi.zero_point
x = F.adaptive_avg_pool2d(x, self.output_size)
x = self.M * x
x = get_nearest_val(self.quant_type,x)
x = x+self.qo.zero_point
return x
class QConvBN(QModule):
def __init__(self, quant_type, conv_module, bn_module, qi=False, qo=True, num_bits=8, e_bits=3):
super(QConvBN, self).__init__(quant_type, qi, qo, num_bits, e_bits)
self.conv_module = conv_module
self.bn_module = bn_module
self.qw = QParam(quant_type, num_bits,e_bits)
self.register_buffer('M', torch.tensor([], requires_grad=False)) # 将M注册为buffer
def fold_bn(self, mean, std):
if self.bn_module.affine:
gamma_ = self.bn_module.weight / std
weight = self.conv_module.weight * gamma_.view(self.conv_module.out_channels, 1, 1, 1)
if self.conv_module.bias is not None:
bias = gamma_ * self.conv_module.bias - gamma_ * mean + self.bn_module.bias
else:
bias = self.bn_module.bias - gamma_ * mean
else:
gamma_ = 1 / std
weight = self.conv_module.weight * gamma_
if self.conv_module.bias is not None:
bias = gamma_ * self.conv_module.bias - gamma_ * mean
else:
bias = -gamma_ * mean
return weight, bias
def freeze(self, qi=None, qo=None):
if hasattr(self, 'qi') and qi is not None:
raise ValueError('qi has been provided in init function.')
if not hasattr(self, 'qi') and qi is None:
raise ValueError('qi is not existed, should be provided.')
if hasattr(self, 'qo') and qo is not None:
raise ValueError('qo has been provided in init function.')
if not hasattr(self, 'qo') and qo is None:
raise ValueError('qo is not existed, should be provided.')
if qi is not None:
self.qi = qi
if qo is not None:
self.qo = qo
self.M.data = (self.qw.scale * self.qi.scale / self.qo.scale).data
std = torch.sqrt(self.bn_module.running_var + self.bn_module.eps)
weight, bias = self.fold_bn(self.bn_module.running_mean, std)
self.conv_module.weight.data = self.qw.quantize_tensor(weight.data)
self.conv_module.weight.data = self.conv_module.weight.data - self.qw.zero_point
if self.conv_module.bias is not None:
self.conv_module.bias.data = quantize_tensor(self.quant_type,
bias.data, scale=self.qi.scale * self.qw.scale,
zero_point=0., qmax=self.bias_qmax,is_bias=True)
else:
bias = quantize_tensor(self.quant_type,
bias, scale=self.qi.scale * self.qw.scale,
zero_point=0., qmax=self.bias_qmax,is_bias=True)
self.conv_module.bias = torch.nn.Parameter(bias)
def fakefreeze(self):
self.conv_module.weight.data = self.qw.dequantize_tensor(self.conv_module.weight.data)
self.conv_module.bias.data = dequantize_tensor(self.conv_module.bias.data,scale=self.qi.scale*self.qw.scale,zero_point=0.)
def forward(self, x):
if hasattr(self, 'qi'):
self.qi.update(x)
x = FakeQuantize.apply(x, self.qi)
if self.training:
y = F.conv2d(x, self.conv_module.weight, self.conv_module.bias,
stride=self.conv_module.stride,
padding=self.conv_module.padding,
dilation=self.conv_module.dilation,
groups=self.conv_module.groups)
y = y.permute(1, 0, 2, 3) # NCHW -> CNHW
y = y.contiguous().view(self.conv_module.out_channels, -1) # CNHW -> C,NHW
# mean = y.mean(1)
# var = y.var(1)
mean = y.mean(1).detach()
var = y.var(1).detach()
self.bn_module.running_mean = \
(1 - self.bn_module.momentum) * self.bn_module.running_mean + \
self.bn_module.momentum * mean
self.bn_module.running_var = \
(1 - self.bn_module.momentum) * self.bn_module.running_var + \
self.bn_module.momentum * var
else:
mean = Variable(self.bn_module.running_mean)
var = Variable(self.bn_module.running_var)
std = torch.sqrt(var + self.bn_module.eps)
weight, bias = self.fold_bn(mean, std)
self.qw.update(weight.data)
x = F.conv2d(x, FakeQuantize.apply(weight, self.qw), bias,
stride=self.conv_module.stride,
padding=self.conv_module.padding,
dilation=self.conv_module.dilation,
groups=self.conv_module.groups)
if hasattr(self, 'qo'):
self.qo.update(x)
x = FakeQuantize.apply(x, self.qo)
return x
def quantize_inference(self, x):
x = x - self.qi.zero_point
x = self.conv_module(x)
x = self.M * x
x = get_nearest_val(self.quant_type,x)
x = x + self.qo.zero_point
return x
class QConvBNReLU(QModule):
def __init__(self, quant_type, conv_module, bn_module, qi=False, qo=True, num_bits=8, e_bits=3):
super(QConvBNReLU, self).__init__(quant_type, qi, qo, num_bits, e_bits)
self.conv_module = conv_module
self.bn_module = bn_module
self.qw = QParam(quant_type, num_bits,e_bits)
self.register_buffer('M', torch.tensor([], requires_grad=False)) # 将M注册为buffer
def fold_bn(self, mean, std):
if self.bn_module.affine:
gamma_ = self.bn_module.weight / std
weight = self.conv_module.weight * gamma_.view(self.conv_module.out_channels, 1, 1, 1)
if self.conv_module.bias is not None:
bias = gamma_ * self.conv_module.bias - gamma_ * mean + self.bn_module.bias
else:
bias = self.bn_module.bias - gamma_ * mean
else:
gamma_ = 1 / std
weight = self.conv_module.weight * gamma_
if self.conv_module.bias is not None:
bias = gamma_ * self.conv_module.bias - gamma_ * mean
else:
bias = -gamma_ * mean
return weight, bias
def freeze(self, qi=None, qo=None):
if hasattr(self, 'qi') and qi is not None:
raise ValueError('qi has been provided in init function.')
if not hasattr(self, 'qi') and qi is None:
raise ValueError('qi is not existed, should be provided.')
if hasattr(self, 'qo') and qo is not None:
raise ValueError('qo has been provided in init function.')
if not hasattr(self, 'qo') and qo is None:
raise ValueError('qo is not existed, should be provided.')
if qi is not None:
self.qi = qi
if qo is not None:
self.qo = qo
self.M.data = (self.qw.scale * self.qi.scale / self.qo.scale).data
std = torch.sqrt(self.bn_module.running_var + self.bn_module.eps)
weight, bias = self.fold_bn(self.bn_module.running_mean, std)
self.conv_module.weight.data = self.qw.quantize_tensor(weight.data)
self.conv_module.weight.data = self.conv_module.weight.data - self.qw.zero_point
if self.conv_module.bias is not None:
self.conv_module.bias.data = quantize_tensor(self.quant_type,
bias.data, scale=self.qi.scale * self.qw.scale,
zero_point=0., qmax=self.bias_qmax,is_bias=True)
else:
bias = quantize_tensor(self.quant_type,
bias, scale=self.qi.scale * self.qw.scale,
zero_point=0., qmax=self.bias_qmax,is_bias=True)
self.conv_module.bias = torch.nn.Parameter(bias)
def fakefreeze(self):
self.conv_module.weight.data = self.qw.dequantize_tensor(self.conv_module.weight.data)
self.conv_module.bias.data = dequantize_tensor(self.conv_module.bias.data,scale=self.qi.scale*self.qw.scale,zero_point=0.)
def forward(self, x):
if hasattr(self, 'qi'):
self.qi.update(x)
x = FakeQuantize.apply(x, self.qi)
if self.training:
y = F.conv2d(x, self.conv_module.weight, self.conv_module.bias,
stride=self.conv_module.stride,
padding=self.conv_module.padding,
dilation=self.conv_module.dilation,
groups=self.conv_module.groups)
y = y.permute(1, 0, 2, 3) # NCHW -> CNHW
y = y.contiguous().view(self.conv_module.out_channels, -1) # CNHW -> C,NHW
# mean = y.mean(1)
# var = y.var(1)
mean = y.mean(1).detach()
var = y.var(1).detach()
self.bn_module.running_mean = \
(1 - self.bn_module.momentum) * self.bn_module.running_mean + \
self.bn_module.momentum * mean
self.bn_module.running_var = \
(1 - self.bn_module.momentum) * self.bn_module.running_var + \
self.bn_module.momentum * var
else:
mean = Variable(self.bn_module.running_mean)
var = Variable(self.bn_module.running_var)
std = torch.sqrt(var + self.bn_module.eps)
weight, bias = self.fold_bn(mean, std)
self.qw.update(weight.data)
x = F.conv2d(x, FakeQuantize.apply(weight, self.qw), bias,
stride=self.conv_module.stride,
padding=self.conv_module.padding,
dilation=self.conv_module.dilation,
groups=self.conv_module.groups)
x = F.relu(x)
if hasattr(self, 'qo'):
self.qo.update(x)
x = FakeQuantize.apply(x, self.qo)
return x
def quantize_inference(self, x):
x = x - self.qi.zero_point
x = self.conv_module(x)
x = self.M * x
x = get_nearest_val(self.quant_type,x)
x = x + self.qo.zero_point
x.clamp_(min=0)
return x
class QConvBNReLU6(QModule):
def __init__(self, quant_type, conv_module, bn_module, qi=False, qo=True, num_bits=8, e_bits=3):
super(QConvBNReLU6, self).__init__(quant_type, qi, qo, num_bits, e_bits)
self.conv_module = conv_module
self.bn_module = bn_module
self.qw = QParam(quant_type, num_bits,e_bits)
self.register_buffer('M', torch.tensor([], requires_grad=False)) # 将M注册为buffer
def fold_bn(self, mean, std):
if self.bn_module.affine:
gamma_ = self.bn_module.weight / std
weight = self.conv_module.weight * gamma_.view(self.conv_module.out_channels, 1, 1, 1)
if self.conv_module.bias is not None:
bias = gamma_ * self.conv_module.bias - gamma_ * mean + self.bn_module.bias
else:
bias = self.bn_module.bias - gamma_ * mean
else:
gamma_ = 1 / std
weight = self.conv_module.weight * gamma_
if self.conv_module.bias is not None:
bias = gamma_ * self.conv_module.bias - gamma_ * mean
else:
bias = -gamma_ * mean
return weight, bias
def freeze(self, qi=None, qo=None):
if hasattr(self, 'qi') and qi is not None:
raise ValueError('qi has been provided in init function.')
if not hasattr(self, 'qi') and qi is None:
raise ValueError('qi is not existed, should be provided.')
if hasattr(self, 'qo') and qo is not None:
raise ValueError('qo has been provided in init function.')
if not hasattr(self, 'qo') and qo is None:
raise ValueError('qo is not existed, should be provided.')
if qi is not None:
self.qi = qi
if qo is not None:
self.qo = qo
self.M.data = (self.qw.scale * self.qi.scale / self.qo.scale).data
std = torch.sqrt(self.bn_module.running_var + self.bn_module.eps)
weight, bias = self.fold_bn(self.bn_module.running_mean, std)
self.conv_module.weight.data = self.qw.quantize_tensor(weight.data)
self.conv_module.weight.data = self.conv_module.weight.data - self.qw.zero_point
if self.conv_module.bias is not None:
self.conv_module.bias.data = quantize_tensor(self.quant_type,
bias.data, scale=self.qi.scale * self.qw.scale,
zero_point=0., qmax=self.bias_qmax,is_bias=True)
else:
bias = quantize_tensor(self.quant_type,
bias, scale=self.qi.scale * self.qw.scale,
zero_point=0., qmax=self.bias_qmax,is_bias=True)
self.conv_module.bias = torch.nn.Parameter(bias)
def fakefreeze(self):
self.conv_module.weight.data = self.qw.dequantize_tensor(self.conv_module.weight.data)
self.conv_module.bias.data = dequantize_tensor(self.conv_module.bias.data,scale=self.qi.scale*self.qw.scale,zero_point=0.)
def forward(self, x):
if hasattr(self, 'qi'):
self.qi.update(x)
x = FakeQuantize.apply(x, self.qi)
if self.training:
y = F.conv2d(x, self.conv_module.weight, self.conv_module.bias,
stride=self.conv_module.stride,
padding=self.conv_module.padding,
dilation=self.conv_module.dilation,
groups=self.conv_module.groups)
y = y.permute(1, 0, 2, 3) # NCHW -> CNHW
y = y.contiguous().view(self.conv_module.out_channels, -1) # CNHW -> C,NHW
# mean = y.mean(1)
# var = y.var(1)
mean = y.mean(1).detach()
var = y.var(1).detach()
self.bn_module.running_mean = \
(1 - self.bn_module.momentum) * self.bn_module.running_mean + \
self.bn_module.momentum * mean
self.bn_module.running_var = \
(1 - self.bn_module.momentum) * self.bn_module.running_var + \
self.bn_module.momentum * var
else:
mean = Variable(self.bn_module.running_mean)
var = Variable(self.bn_module.running_var)
std = torch.sqrt(var + self.bn_module.eps)
weight, bias = self.fold_bn(mean, std)
self.qw.update(weight.data)
x = F.conv2d(x, FakeQuantize.apply(weight, self.qw), bias,
stride=self.conv_module.stride,
padding=self.conv_module.padding,
dilation=self.conv_module.dilation,
groups=self.conv_module.groups)
x = F.relu6(x)
if hasattr(self, 'qo'):
self.qo.update(x)
x = FakeQuantize.apply(x, self.qo)
return x
def quantize_inference(self, x):
upper = torch.tensor(6)
qupper = self.qo.quantize_tensor(upper)
x = x - self.qi.zero_point
x = self.conv_module(x)
x = self.M * x
x = get_nearest_val(self.quant_type,x)
x = x + self.qo.zero_point
x.clamp_(min=0,max=qupper.item())
return x
# 作为具体量化层的父类,qi和qo分别为量化输入/输出
# 用于处理多个层结果或qo以array形式传入
class QModule_array(nn.Module):
def __init__(self,quant_type,len,qi_array=False, qo=True, num_bits=8, e_bits=3):
super(QModule_array, self).__init__()
if qi_array:
for i in range(len):
self.add_module('qi%d'%i,QParam(quant_type,num_bits, e_bits))
if qo:
self.qo = QParam(quant_type,num_bits, e_bits)
self.quant_type = quant_type
self.num_bits = num_bits
self.e_bits = e_bits
self.bias_qmax = bias_qmax(quant_type)
self.len = len
def freeze(self):
pass # 空语句
def quantize_inference(self, x):
raise NotImplementedError('quantize_inference should be implemented.')
class QElementwiseAdd(QModule_array):
def __init__(self, quant_type, len, qi_array=False, qo=True, num_bits=8, e_bits=3):
super(QElementwiseAdd,self).__init__(quant_type,len,qi_array,qo,num_bits,e_bits)
for i in range(len):
self.register_buffer('M%d'%i,torch.tensor([], requires_grad=False))
def freeze(self, qi_array=None, qo=None):
if qi_array is None:
raise ValueError('qi_array should be provided')
elif len(qi_array) != self.len:
raise ValueError('qi_array len no match')
if hasattr(self, 'qo') and qo is not None:
raise ValueError('qo has been provided in init function.')
if not hasattr(self, 'qo') and qo is None:
raise ValueError('qo is not existed, should be provided.')
for i in range(self.len):
self.add_module('qi%d'%i,qi_array[i])
if qo is not None:
self.qo = qo
for i in range(self.len):
getattr(self,'M%d'%i).data = (getattr(self,'qi%d'%i).scale / self.qo.scale).data
def forward(self,x_array):
outs=[]
for i in range(self.len):
x = x_array[i]
if hasattr(self,'qi%d'%i):
qi = getattr(self,'qi%d'%i)
qi.update(x)
x = FakeQuantize.apply(x,qi)
outs.append(x)
out = outs[0]+outs[1]
if hasattr(self,'qo'):
self.qo.update(x)
out = FakeQuantize.apply(out,self.qo)
return out
def quantize_inference(self, x_array):
outs=[]
for i in range(self.len):
qi = getattr(self,'qi%d'%i)
x = x_array[i] - qi.zero_point
x = getattr(self,'M%d'%i) * x
outs.append(x)
out = outs[0]+outs[1]
out = get_nearest_val(self.quant_type,out)
out = out + self.qo.zero_point
return out
class QConcat(QModule_array):
def __init__(self, quant_type, len, qi_array=False, qo=True, num_bits=8, e_bits=3):
super(QConcat,self).__init__(quant_type, len, qi_array, qo, num_bits, e_bits)
for i in range(len):
self.register_buffer('M%d'%i,torch.tensor([], requires_grad=False))
def freeze(self, qi_array=None, qo=None):
if qi_array is None:
raise ValueError('qi_array should be provided')
elif len(qi_array) != self.len:
raise ValueError('qi_array len no match')
if hasattr(self, 'qo') and qo is not None:
raise ValueError('qo has been provided in init function.')
if not hasattr(self, 'qo') and qo is None:
raise ValueError('qo is not existed, should be provided.')
for i in range(self.len):
self.add_module('qi%d'%i,qi_array[i])
if qo is not None:
self.qo = qo
for i in range(self.len):
getattr(self,'M%d'%i).data = (getattr(self,'qi%d'%i).scale / self.qo.scale).data
def forward(self,x_array):
outs=[]
for i in range(self.len):
x = x_array[i]
if hasattr(self,'qi%d'%i):
qi = getattr(self,'qi%d'%i)
qi.update(x)
x = FakeQuantize.apply(x,qi)
outs.append(x)
out = torch.cat(outs,1)
if hasattr(self,'qo'):
self.qo.update(x)
out = FakeQuantize.apply(out,self.qo)
return out
def quantize_inference(self, x_array):
outs=[]
for i in range(self.len):
qi = getattr(self,'qi%d'%i)
x = x_array[i] - qi.zero_point
x = getattr(self,'M%d'%i) * x
outs.append(x)
out = torch.concat(outs,1)
out = get_nearest_val(self.quant_type,out)
out = out + self.qo.zero_point
return out
\ No newline at end of file
import os
import torch
import time
import random
import numpy as np
import pickle
import mia_utils
import utils
from model import MIA_Model
import os.path as osp
import dataset
import gol
import sys
import openpyxl
from train_one import WarmUpLR
from torch.optim import SGD, Adam
from torch.optim.lr_scheduler import _LRScheduler, CosineAnnealingLR, MultiStepLR
def direct_quantize(args, model, data, device='cpu'):
print("====Begin Direct Quantize!====")
# 在训练distill model的时候才会涉及到量化target model 因此虽然量化的是target model,但要靠distill...来做条件判断
# 用的loader都是target或者shadow的,而非distill的
if not hasattr(model, 'augment_training') or model.augment_training:
if args.mode == 'distill_target':
print('load aug_target_dataset ... ')
train_loader = data.aug_target_train_loader
elif args.mode == 'distill_shadow':
print('load aug_shadow_dataset ...')
train_loader = data.aug_shadow_train_loader
else:
if args.mode == 'distill_target':
print('load target_dataset ... ')
train_loader = data.target_train_loader
elif args.mode == 'distill_shadow':
print('load shadow_dataset ...')
train_loader = data.shadow_train_loader
model.eval()
count = 0
for x, y, idx in train_loader:
# cnn_training_step(model, optimizer, x, y, device)
b_x = x.to(device)
b_y = y.to(device)
output = model.quantize_forward(b_x)
count += 1
# TODO 可能对不同数据集需要修改
if count % 500 == 0:
break
print('direct quantization finish')
def full_inference(args, model, data, device='cpu'):
if args.mode == 'distill_target':
print('load target_dataset ... ')
test_loader = data.target_test_loader
elif args.mode == 'distill_shadow':
print('load aug_shadow_dataset ...')
test_loader = data.shadow_test_loader
model.eval()
top1 = dataset.AverageMeter()
top5 = dataset.AverageMeter()
with torch.no_grad():
for batch in test_loader:
b_x = batch[0].to(device)
b_y = batch[1].to(device)
output = model(b_x)
prec1, prec5 = dataset.accuracy(output, b_y, topk=(1, 5))
top1.update(prec1[0], b_x.size(0))
top5.update(prec5[0], b_x.size(0))
top1_acc = top1.avg.data.cpu().numpy()[()]
top5_acc = top5.avg.data.cpu().numpy()[()]
print('\nTest set: Full Model Accuracy: {:.2f}%'.format(top1_acc))
return top1_acc, top5_acc
def quantize_inference(args, model, data, device='cpu'):
# 不能用test_loader
if args.mode == 'distill_target':
print('load target_dataset ... ')
test_loader = data.target_test_loader
elif args.mode == 'distill_shadow':
print('load aug_shadow_dataset ...')
test_loader = data.shadow_test_loader
model.eval()
top1 = dataset.AverageMeter()
top5 = dataset.AverageMeter()
with torch.no_grad():
for batch in test_loader:
b_x = batch[0].to(device)
b_y = batch[1].to(device)
output = model.quantize_inference(b_x)
prec1, prec5 = dataset.accuracy(output, b_y, topk=(1, 5))
top1.update(prec1[0], b_x.size(0))
top5.update(prec5[0], b_x.size(0))
top1_acc = top1.avg.data.cpu().numpy()[()]
top5_acc = top5.avg.data.cpu().numpy()[()]
print('\nTest set: PTQ Model Accuracy: {:.2f}%'.format(top1_acc))
return top1_acc, top5_acc
# 对 是否distill,有不同的train方法
# 之前已经创建了model并把params config一起存储到了相应路径,此处先把model和params config load出来再trian
# model_path_tar, model_path_dis = 'mia_ckpt/{}/{}'.format(args.seed, args.mode)
# untrained_model_tar, untrained_model_dis => model_name = '{}_mobilenetv2'.format(args.data)...
def train(args, model_path_tar, untrained_model_tar, model_path_dis = None, untrained_model_dis = None, device='cpu'):
print('Training models...')
# 蒸馏训练
# load_model返回的是model
if 'distill' in args.mode:
# load进来还没train(epoch=0)的distill model (create的时候就会save untrained)
# 把name传进来便于load
trained_model, model_params = load_model(args, model_path_dis, untrained_model_dis, epoch=0)
# load进来已经训好的target model
# TODO load进来一个trained Target Model (args.epochs已经是最后一个训练epoch)
trained_model_tar, model_params_tar = load_model(args, model_path_tar, untrained_model_tar, epoch=args.epochs)
trained_model_tar.to(device)
# 正常训练
else:
# load进来还没train的target model (create的时候就会save untrained)
trained_model, model_params = load_model(args, model_path_tar, untrained_model_tar, epoch=0)
print(model_params)
trained_model.to(device)
# 获得以划分好的且做了数据增强dataset distill model dataset
# datasets中有train/test loader (distill shadow model和distill target model的训练集相同)
# 训练distill model用的 (distill target model, distill shadow model)
# FIX: 这里可以改batchsize
dataset = mia_utils.get_dataset(model_params['task'], args.mode, aug=True, batch_size=512)
lr = 0.1
weight_decay = 5e-4
momentum = 0.9
gamma = 0.2
# num_epochs = args.epochs
optimizer = SGD(filter(lambda p: p.requires_grad, trained_model.parameters()), lr=lr, momentum=momentum, weight_decay=weight_decay)
milestones = [60, 120, 160]
scheduler = MultiStepLR(optimizer, milestones=milestones, gamma=gamma)
# model_name : dataset + model的形式
if 'distill' in args.mode:
trained_model_name = untrained_model_dis
else:
trained_model_name = untrained_model_tar
# TODO 增加量化
if args.quant_type is not None:
print('Training: {}... with PTQ Target Model'.format(trained_model_name))
# dataset_t: 给target model (or shadow model)做量化用的
if args.mode == 'distill_target':
dataset_t = mia_utils.get_dataset(model_params['task'],mode='target',aug=True,batch_size=512)
elif args.mode == 'distill_shadow':
dataset_t = mia_utils.get_dataset(model_params['task'],mode='shadow',aug=True,batch_size=512)
else:
if args.mode == 'distill_target':
print('Training: {}... with Target Model'.format(trained_model_name))
elif args.mode == 'distill_shadow':
print('Training: {}... with Shadow Model'.format(trained_model_name))
# 具体训练
# metric中可以不断append,记录每一步的具体数据
gol._init()
# 对trained_model_tar进行PTQ量化 (target和shadow都可以)
if args.quant_type is not None and 'distill' in args.mode:
top1, top5 = full_inference(args,trained_model_tar, dataset_t, device)
if args.quant_type != 'INT':
bias_list = utils.build_bias_list(args.quant_type)
gol.set_value(bias_list, is_bias=True)
if args.quant_type == 'FLOAT':
title = '%s_%d_E%d' % (args.quant_type, args.num_bits, args.e_bits)
else:
title = '%s_%d' % (args.quant_type, args.num_bits)
print('\nPTQ: '+title)
# ptq target/shadow model存储路径
ptq_file_prefix = model_path_tar + '/' + f'{args.data}_{args.model}/'
# 设置量化表
if args.quant_type != 'INT':
plist = utils.build_list(args.quant_type, args.num_bits, args.e_bits)
gol.set_value(plist)
# 判断是否需要载入
if args.load_ptq is True and osp.exists(ptq_file_prefix + title + '.pt'):
trained_model_tar.quantize(args.quant_type,args.num_bits,args.e_bits)
trained_model_tar.load_state_dict(torch.load(ptq_file_prefix + title + '.pt'))
trained_model_tar.to(device)
print('Successfully load ptq model: ' + title)
else:
trained_model_tar.quantize(args.quant_type,args.num_bits,args.e_bits)
# TODO
trained_model_tar.eval()
direct_quantize(args,trained_model_tar, dataset_t, device)
if args.store_ptq == True:
torch.save(trained_model_tar.state_dict(), ptq_file_prefix + title + '.pt')
trained_model_tar.to(device)
trained_model_tar.freeze()
# TODO
ptq_top1, ptq_top5 = quantize_inference(args,trained_model_tar, dataset_t, device)
acc_loss = (top1 - ptq_top1) / top1
if args.mode == 'distill_target':
print(f"Target Model Quantization Finished, Acc Loss:{acc_loss}")
elif args.mode == 'distill_shadow':
print(f"Shadow Model Quantization Finished, Acc Loss:{acc_loss}")
else:
print("Error: invalid mode in train")
sys.exit()
# 只有target model的时候才保存这些结果,shadow的时候不需要
if args.mode == 'distill_target':
filename =f'{args.model}_mia_result.xlsx'
try:
# 如果文件已经存在,则加载它
workbook = openpyxl.load_workbook(filename)
except FileNotFoundError:
# 如果文件不存在,则创建一个新的Excel工作簿
workbook = openpyxl.Workbook()
if args.data not in workbook.sheetnames:
# 如果工作表不存在,则创建一个新的工作表
worksheet = workbook.create_sheet(title=args.data)
# 在工作表中写入表头
worksheet.cell(row=1,column=1,value='FP32-acc')
worksheet.cell(row=1,column=2,value=top1)
worksheet.cell(row=3,column=1,value='title')
worksheet.cell(row=3,column=2,value='js_div')
worksheet.cell(row=3,column=4,value='ptq_acc')
worksheet.cell(row=3,column=5,value='acc_loss')
worksheet.cell(row=3,column=6,value='AUC')
worksheet.cell(row=3,column=7,value='ACC')
else:
worksheet = workbook[args.data]
worksheet.cell(row=1,column=2,value=top1)
idx = mia_utils.GlobalVariables.title_list.index(title)
idx += 4
worksheet.cell(row=idx,column=1,value=title)
worksheet.cell(row=idx,column=4,value=ptq_top1)
worksheet.cell(row=idx,column=5,value=acc_loss)
workbook.save(filename)
trained_model_name_ptq = trained_model_name + '_' +title
# 前面为了load untrained distill model,所以未改distill_target model的路径model_path_dis
# 这里才开始调用train_func去训练
# distill target model / distill shadow model 训练
if 'distill' in args.mode:
print(f'total distill model training epochs:{args.epochs_distill}')
metrics = trained_model.train_func(args, trained_model_tar, trained_model, dataset, args.epochs_distill, optimizer, scheduler, model_params, model_path_dis, trained_model_name, device=device)
# target model / shadow model 训练
else:
print(f'total model training epochs:{args.epochs}')
metrics = trained_model.train_func(args, trained_model, dataset, args.epochs, optimizer, scheduler, model_params, model_path_tar, trained_model_name, device=device)
# 记录结果
model_params['train_top1_acc'] = metrics['train_top1_acc']
model_params['test_top1_acc'] = metrics['test_top1_acc']
model_params['train_top5_acc'] = metrics['train_top5_acc']
model_params['test_top5_acc'] = metrics['test_top5_acc']
model_params['epoch_times'] = metrics['epoch_times']
total_training_time = sum(model_params['epoch_times'])
model_params['total_time'] = total_training_time
print('Training took {} seconds...'.format(total_training_time))
# 存储训练后的模型权值参数和model_params (区分了用到的target model是否量化/什么量化)
# model_path_dis = 'mia_ckpt/{}/{}'.format(args.seed, args.mode)
# 在networks/{}/{}'.format(args.seed, args.mode)/trained_model_name + title 下存储
if args.quant_type is not None and 'distill' in args.mode:
save_model(trained_model, model_params, model_path_dis, trained_model_name_ptq, epoch=args.epochs_distill)
else:
if 'distill' in args.mode:
save_model(trained_model, model_params, model_path_dis, trained_model_name, epoch=args.epochs_distill)
# 只保存了最后一个epoch的target model和shadow model 是不是不太合理 准备改成存best的试试? (在mia_utils中的cnn_train)
# else:
# save_model(trained_model, model_params, model_path_tar, trained_model_name, epoch=num_epochs)
# 配置模型信息,创建并训练模型
def train_models(args, model_path_tar, model_path_dis, device='cpu'):
# 返回model name 实例化model,存储untrained model, model_params
# if args.model == 'resnet18':
# cnn_tar = create_resnet18(model_path_tar, args)
cnn_tar = create_model(model_path_tar,args)
if 'distill' in args.mode:
cnn_dis = create_model(model_path_dis,args)
train(args, model_path_tar, cnn_tar, model_path_dis, cnn_dis, device = device)
else:
train(args, model_path_tar, cnn_tar, device=device)
# 恢复模型的权值参数
def load_model(args, model_path, model_name, epoch=0):
model_params = load_params(model_path, model_name, epoch)
model = MIA_Model(args,model_params,args.model,args.data)
# model_name是用于区分路径的关键
network_path = model_path + '/' + model_name
if epoch == 0:
load_path = network_path + '/untrained'
elif epoch == -1:
load_path = network_path + '/last'
else:
load_path = network_path + '/' + str(epoch)
if torch.cuda.is_available():
model.load_state_dict(torch.load(load_path), strict=False)
else:
model.load_state_dict(torch.load(load_path, map_location=torch.device('cpu')), strict=False)
return model, model_params
# 将model_params这个记录字典恢复
def load_params(models_path, model_name, epoch=0):
params_path = models_path + '/' + model_name
if epoch == 0:
params_path = params_path + '/parameters_untrained'
elif epoch == -1:
params_path = params_path + '/parameters_last'
else:
params_path = params_path + f'/parameters_{epoch}'
with open(params_path, 'rb') as f:
model_params = pickle.load(f)
return model_params
# 设置model信息,实例化model,存储untrained model
# model_name会体现在路径中标识
def create_model(models_path,args):
print(f'Creating untrained {args.data}_{args.model}...')
model_params = get_data_params(args.data) # 其中有关于数据集方面的参数
#
model_name = f'{args.data}_{args.model}'
# 用于区分load什么model
model_params['network_type'] = args.model
model_params['augment_training'] = True
model_params['init_weights'] = True
model_name = save_networks(args, model_name, model_params, models_path)
return model_name
# 实例化model,并调用save_model存储,只在create model的时候用到
def save_networks(args, model_name, model_params, model_path):
print('Saving model...')
# model_params['base_model'] = model_name
# network_type = model_params['network_type']
model = MIA_Model(args,model_params,args.model,args.data) # args.model用部分大写的,args.data用小写的
# 存储model权值参数 (epoch=0 实际上就是把untrained也存下 方便统一的load的写法)
save_model(model, model_params, model_path, model_name, epoch=0)
return model_name
# 按epoch去存储model的权值参数和model_params信息
def save_model(model, model_params, model_path, model_name, epoch=-1):
if not os.path.exists(model_path):
os.makedirs(model_path)
# model_name就是 数据集_网络架构
# 'mia_ckpt/{}/{}'.format(args.seed, args.mode) + model_name
# 若是ptq的,加了title,否则,只是args.data + args.model
network_path = model_path + '/' + model_name
if not os.path.exists(network_path):
os.makedirs(network_path)
if epoch == 0:
path = network_path + '/untrained'
params_path = network_path + '/parameters_untrained'
torch.save(model.state_dict(), path)
elif epoch == -1:
path = network_path + '/last'
params_path = network_path + '/parameters_last'
torch.save(model.state_dict(), path)
else:
path = network_path + '/' + str(epoch)
params_path = network_path + '/parameters_'+str(epoch)
torch.save(model.state_dict(), path)
if model_params is not None:
with open(params_path, 'wb') as f:
pickle.dump(model_params, f, pickle.HIGHEST_PROTOCOL)
# 配置dataset,返回配置信息 (input_size, num_classes等)
# 在params['task']中记录用什么数据集
def get_data_params(data):
if data == 'cinic10':
return cinic10_params()
elif data == 'cifar10':
return cifar10_params()
elif data == 'cifar100':
return cifar100_params()
def cinic10_params():
model_params = {}
model_params['task'] = 'cinic10'
model_params['input_size'] = 32
model_params['num_classes'] = 10
return model_params
def cifar10_params():
model_params = {}
model_params['task'] = 'cifar10'
model_params['input_size'] = 32
model_params['num_classes'] = 10
return model_params
def cifar100_params():
model_params = {}
model_params['task'] = 'cifar100'
model_params['input_size'] = 32
model_params['num_classes'] = 100
return model_params
import openpyxl
from mia_utils import *
import module
import gol
import argparse
import numpy as np
import torch
from sklearn.neighbors import KernelDensity
from scipy.stats import pearsonr
from scipy.spatial import distance
def js_divergence_rows(p, q):
jsd = np.zeros(p.shape[0])
for i in range(p.shape[0]):
jsd[i] = module.js_div(p[i], q[i])
return jsd
if __name__ == '__main__':
parser = argparse.ArgumentParser(description='DIV_TrajectoryMIA')
parser.add_argument('--model', type=str, default='resnet18', help=['AlexNet','AlexNet_BN','VGG_16','VGG_19','Inception_BN','ResNet_18','ResNet_50','ResNet_152','MobileNetV2'])
parser.add_argument('--data', type=str, default='cifar10', help=['cinic10', 'cifar10', 'cifar100'])
args = parser.parse_args()
# 打开excel
filename =f'{args.model}_mia_result.xlsx'
workbook = openpyxl.load_workbook(filename)
worksheet = workbook[args.data]
# fp32 js
data_path = f'mia_ckpt/0/target/{args.data}_{args.model}/trajectory_test_data.npy'
dataSet = np.load(data_path, allow_pickle=True).item()
data = torch.from_numpy(np.array(dataSet['model_trajectory'], dtype='f'))
data_i = torch.from_numpy(np.array(dataSet['model_loss_ori'], dtype='f'))
# 根据 member_status 划分成两个 Tensor
data_0 = data[dataSet['member_status'] == 0].transpose(0, 1)
data_1 = data[dataSet['member_status'] == 1].transpose(0, 1)
data_i0 = data_i[dataSet['member_status'] == 0]
data_i1 = data_i[dataSet['member_status'] == 1]
c0 = torch.cat((data_0, data_i0.unsqueeze(0)),0)
c1 = torch.cat((data_1, data_i1.unsqueeze(0)),0)
c0 = c0.t()
c1 = c1.t()
div = distance.jensenshannon(c0.numpy(),c1.numpy())
div = module.js_div(data_0, data_1)
div = div.item()
if div<0:
div = 0
model_name = f'{args.data}_{args.model}'
print(f"js div of {model_name}: {div}")
worksheet.cell(row=2,column=1,value='div')
worksheet.cell(row=2,column=2,value=div)
# 统一计算所有的ptq后的js
gol._init()
# quant_type_list = ['INT','POT','FP']
quant_type_list = ['INT','POT','FLOAT']
for quant_type in quant_type_list:
num_bit_list = numbit_list(quant_type)
for num_bits in num_bit_list:
e_bit_list = ebit_list(quant_type,num_bits)
for e_bits in e_bit_list:
if quant_type == 'FLOAT':
title = '%s_%d_E%d' % (quant_type, num_bits, e_bits)
else:
title = '%s_%d' % (quant_type, num_bits)
model_name_ptq = f'{args.data}_{args.model}_{title}'
p_data_path = f'mia_ckpt/0/target/{model_name_ptq}/trajectory_test_data.npy'
p_dataSet = np.load(p_data_path, allow_pickle=True).item()
p_data = torch.from_numpy(np.array(p_dataSet['model_trajectory'], dtype='f'))
i_data = torch.from_numpy(np.array(p_dataSet['model_loss_ori'], dtype='f'))
# 根据 member_status 划分成两个 Tensor
p_data_0 = p_data[p_dataSet['member_status'] == 0].transpose(0, 1)
p_data_1 = p_data[p_dataSet['member_status'] == 1].transpose(0, 1)
i_data_0 = i_data[p_dataSet['member_status'] == 0]
i_data_1 = i_data[p_dataSet['member_status'] == 1]
# print(f"i_data_0 shape: {i_data_0.shape}")
c0 = torch.cat((p_data_0, i_data_0.unsqueeze(0)),0)
c1 = torch.cat((p_data_1, i_data_1.unsqueeze(0)),0)
c0 = c0.t()
c1 = c1.t()
div = distance.jensenshannon(c0.numpy(),c1.numpy())
# print(f"c0 shape:{c0.shape}")
# print(f"c1 shape:{c1.shape}")
# sum_c0 = torch.sum(c0, dim=1)
# sum_c1 = torch.sum(c1, dim=1)
# 将c0和c1 reshape成(101, 20, 500)的形状
# c0_reshaped = torch.reshape(c0, (101, 20, 500))
# c1_reshaped = torch.reshape(c1, (101, 20, 500))
# 在第2个维度上求和,得到按500个列为一组求和后的结果
# 101*20
# sum_c0 = torch.sum(c0_reshaped, dim=2)
# sum_c1 = torch.sum(c1_reshaped, dim=2)
# st0 = sum_c0.t()
# st1 = sum_c1.t()
# div = distance.jensenshannon(st0.numpy(),st1.numpy())
# print(f'sum_c0 shape:{sum_c0.shape}')
# jsd_sum = distance.jensenshannon(sum_c0.numpy(), sum_c1.numpy())
# ALL
# div = module.js_div(data,p_data)
# ALL_2
# div = module.js_div(c0, c1)
# 这里可以调整
# div = jsd_sum.sum()\
# div = div.item()
div = div.sum()
print(f"div_sum:{div}")
# if div<0:
# div = 0
idx = GlobalVariables.title_list.index(title)
idx += 4
worksheet.cell(row=idx,column=2,value=div)
workbook.save(filename)
from torch.serialization import load
from model import *
from extract_ratio import *
from utils import *
from dataloader import DataLoader
import gol
import openpyxl
import sys
import torch
import os
import os.path as osp
def direct_quantize(model, test_loader,device):
with torch.no_grad():
for i, (data, target) in enumerate(test_loader,1):
data = data.to(device)
output = model.quantize_forward(data)
if i % 100 == 0:
break
print('direct quantization finish')
def full_inference(model, test_loader, device):
correct = 0
with torch.no_grad():
for data, target in test_loader:
data,target = data.to(device), target.to(device)
output = model(data)
pred = output.argmax(dim=1, keepdim=True)
# print(pred)
correct += pred.eq(target.view_as(pred)).sum().item()
print('\nTest set: Full Model Accuracy: {:.2f}%'.format(100. * correct / len(test_loader.dataset)))
return 100. * correct / len(test_loader.dataset)
def quantize_inference(model, test_loader, device):
correct = 0
with torch.no_grad():
for data, target in test_loader:
data,target = data.to(device), target.to(device)
output = model.quantize_inference(data)
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(target.view_as(pred)).sum().item()
print('Test set: Quant Model Accuracy: {:.2f}%'.format(100. * correct / len(test_loader.dataset)))
return 100. * correct / len(test_loader.dataset)
if __name__ == "__main__":
batch_size = 128
model_name = sys.argv[1]
dataset = sys.argv[2]
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
print(device)
dataloader = DataLoader(dataset,batch_size)
train_loader, val_loader, test_loader = dataloader.getloader()
load_ptq = False
store_ptq = True
append = False
gol._init()
ckpt_full_path = 'ckpt_full/'+dataset
ckpt_quant_path = 'ckpt_quant/'+dataset
ptq_result_path = 'ptq_result/'+dataset
if not osp.exists(ckpt_quant_path) and store_ptq:
os.makedirs(ckpt_quant_path)
if not osp.exists(ptq_result_path):
os.makedirs(ptq_result_path)
excel_path = ptq_result_path+'/'+model_name+'.xlsx'
txt_path = ptq_result_path+'/'+model_name+'.txt'
if append and os.path.exists(excel_path) and os.path.exists(txt_path):
print("> Exit: "+model_name+"has been quantized")
sys.exit()
else:
workbook = openpyxl.Workbook()
ft = open(txt_path,'w')
full_file = ckpt_full_path+'/'+model_name+'.pt'
ptq_file_dir = ckpt_quant_path+'/'+model_name
if not osp.exists(ptq_file_dir):
os.makedirs(ptq_file_dir)
model = Model(model_name,dataset)
model.load_state_dict(torch.load(full_file))
model.to(device)
model.eval()
full_acc = full_inference(model, test_loader, device)
# 传入后可变
fold_model(model)
Mac,Param,layer, par_ratio, flop_ratio = extract_ratio(model_name,dataset)
par_ratio, flop_ratio = fold_ratio(layer, par_ratio, flop_ratio)
full_names = []
full_params = []
for name, param in model.named_parameters():
if 'conv' in name or 'fc' in name:
full_names.append(name)
full_params.append(param.data.cpu())
quant_type_list = ['INT','POT','FLOAT']
title_list = []
js_flops_list = []
js_flops_wt_log_list = []
js_flops_wt_cbrt_list = []
js_param_list = []
ptq_acc_list = []
acc_loss_list = []
for quant_type in quant_type_list:
num_bit_list = numbit_list(quant_type)
# 对一个量化类别,只需设置一次bias量化表
# int由于位宽大,使用量化表开销过大,直接_round即可
if quant_type != 'INT':
bias_list = build_bias_list(quant_type)
gol.set_value(bias_list, is_bias=True)
for num_bits in num_bit_list:
e_bit_list = ebit_list(quant_type,num_bits)
for e_bits in e_bit_list:
model_ptq = Model(model_name,dataset)
if quant_type == 'FLOAT':
title = '%s_%d_E%d' % (quant_type, num_bits, e_bits)
else:
title = '%s_%d' % (quant_type, num_bits)
print('\n'+model_name+': PTQ: '+title)
title_list.append(title)
# 设置量化表
if quant_type != 'INT':
plist = build_list(quant_type, num_bits, e_bits)
gol.set_value(plist)
# 判断是否需要载入
ptq_file = ptq_file_dir +'/' + title + '.pt'
if load_ptq is True and osp.exists(ptq_file):
model_ptq.quantize(quant_type,num_bits,e_bits)
model_ptq.load_state_dict(torch.load(ptq_file))
model_ptq.to(device)
model_ptq.eval()
print('Successfully load ptq model: ' + title)
else:
model_ptq.load_state_dict(torch.load(full_file))
model_ptq.to(device)
model_ptq.quantize(quant_type,num_bits,e_bits)
model_ptq.eval()
direct_quantize(model_ptq, train_loader, device)
if store_ptq:
torch.save(model_ptq.state_dict(), ptq_file)
model_ptq.freeze()
ptq_acc = quantize_inference(model_ptq, test_loader, device)
ptq_acc_list.append(ptq_acc)
acc_loss = (full_acc - ptq_acc) / full_acc
acc_loss_list.append(acc_loss)
#将量化后分布反量化到全精度相同的scale
model_ptq.fakefreeze()
# 获取计算量/参数量下的js-div
js_flops = 0.
js_param = 0.
for name, param in model_ptq.named_parameters():
if 'conv' not in name and 'fc' not in name:
continue
prefix = name.rsplit('.',1)[0]
layer_idx = layer.index(prefix)
name_idx = full_names.index(name)
layer_idx = layer.index(prefix)
ptq_param = param.data.cpu()
js = js_div(ptq_param,full_params[name_idx])
js = js.item()
if js < 0.:
js = 0.
js_flops = js_flops + js * flop_ratio[layer_idx]
js_param = js_param + js * par_ratio[layer_idx]
js_flops_wt_log = js_flops * torch.log10(torch.tensor(Mac)).item()
js_flops_wt_cbrt = js_flops * torch.pow(torch.tensor(Mac),1/3).item()
js_flops_list.append(js_flops)
js_flops_wt_log_list.append(js_flops_wt_log)
js_flops_wt_cbrt_list.append(js_flops_wt_cbrt)
js_param_list.append(js_param)
print(title + ': js_flops: %f js_flops_wt_log: %f js_flops_wt_cbrt: %f js_param: %f acc_loss: %f' % (js_flops,js_flops_wt_log, js_flops_wt_cbrt, js_param, acc_loss))
# 写入xlsx
worksheet = workbook.active
worksheet.cell(row=1,column=1,value='FP32-acc')
worksheet.cell(row=1,column=2,value=full_acc)
worksheet.cell(row=1,column=3,value='Mac')
worksheet.cell(row=1,column=4,value=Mac)
worksheet.cell(row=1,column=5,value='Param')
worksheet.cell(row=1,column=6,value=Param)
worksheet.cell(row=3,column=1,value='title')
worksheet.cell(row=3,column=2,value='js_flops')
worksheet.cell(row=3,column=3,value='js_flops_wt_log')
worksheet.cell(row=3,column=4,value='js_flops_wt_cbrt')
worksheet.cell(row=3,column=5,value='js_param')
worksheet.cell(row=3,column=6,value='ptq_acc')
worksheet.cell(row=3,column=7,value='acc_loss')
for i in range(len(title_list)):
worksheet.cell(row=i+4, column=1, value=title_list[i])
worksheet.cell(row=i+4, column=2, value=js_flops_list[i])
worksheet.cell(row=i+4, column=3, value=js_flops_wt_log_list[i])
worksheet.cell(row=i+4, column=4, value=js_flops_wt_cbrt_list[i])
worksheet.cell(row=i+4, column=5, value=js_param_list[i])
worksheet.cell(row=i+4, column=6, value=ptq_acc_list[i])
worksheet.cell(row=i+4, column=7, value=acc_loss_list[i])
workbook.save(excel_path)
print(model_name,file=ft)
print('Full_acc: %f'%full_acc,file=ft)
print('title_list:',file=ft)
print(title_list,file=ft)
print('js_flops_list:',file=ft)
print(js_flops_list, file=ft)
print('js_flops_wt_log_list:',file=ft)
print(js_flops_wt_log_list, file=ft)
print('js_flops_wt_cbrt_list:',file=ft)
print(js_flops_wt_cbrt_list, file=ft)
print('js_param_list:',file=ft)
print(js_param_list, file=ft)
print('ptq_acc_list:',file=ft)
print(ptq_acc_list, file=ft)
print('acc_loss_list:',file=ft)
print(acc_loss_list, file=ft)
print("\n",file=ft)
ft.close()
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 1-06:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
# set constraint for RTX8000 to meet my cuda
#SBATCH --constraint="Ampere|RTX8000"
#- Log information
echo "Job start at $(date "+%Y-%m-%d %H:%M:%S")"
echo "Job run at:"
echo "$(hostnamectl)"
#- Load environments
source /tools/module_env.sh
module list # list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
# module load cuda-cudnn/10.2-7.6.5
# module load cuda-cudnn/11.2-8.2.1
module load cuda-cudnn/11.1-8.2.1
##- virtualenv
# source xxxxx/activate
echo $(module list) # list modules loaded
echo $(which gcc)
echo $(which python)
echo $(which python3)
cluster-quota # nas quota
nvidia-smi --format=csv --query-gpu=name,driver_version,power.limit # gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo "Use GPU ${CUDA_VISIBLE_DEVICES}" # which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
python ptq_one.py $Model $Dataset
#- End
echo "Job end at $(date "+%Y-%m-%d %H:%M:%S")"
### Update 2023.6.28
ALL_2中是对思路2的实现,后续在代码稳定下来后将补充该readme
- **思路1:仿照精度方面的做法,预测量化后的攻击ACC和AUC相对于全精度模型的差值。**
**假设:**认为攻击者**不知道**要攻击的模型是全精度的还是量化后的。
**做法:**采用全精度的影子模型、蒸馏影子模型。使用原有的攻击模型攻击量化后的目标模型。
**预测:**以量化后模型与全精度模型的损失轨迹差异度为自变量,以攻击模型的ACC变化量或AUC变化量为因变量进行预测。
- **思路2:直接预测量化后模型、全精度模型的攻击成功率和AUC**
**假设:**认为攻击者**知道**要攻击的模型是全精度的还是量化后的。
**做法:**采用量化后的影子模型、蒸馏影子模型。使用重新训练的攻击模型攻击量化后的目标模型。
**预测:****Disparate Vulnerability to Membership Inference Attacks** 中的理论为依据,可以知道``Worst-case Vulnerability = Distributional Generalization``,可以近似理解为攻击模型的ACC或AUC可以用模型的某个指标的差异来预测。这个指标可以是loss,模型输出的向量,也可以是损失轨迹,差异指的是对于训练集数据和非训练集数据在该指标度量下的差异。
#!/bin/bash
name_list="ResNet_152 ResNet_50 ResNet_18 MobileNetV2 Inception_BN VGG_19 VGG_16 AlexNet_BN AlexNet"
if [ ! -d "ckpt_full_gen/cifar10" ]; then
mkdir -p "ckpt_full_gen/cifar10"
fi
for name in $name_list; do
if [ ! -d "ret_one/$name" ]; then
mkdir -p "ret_one/$name"
fi
sbatch --job-name=$name -o "ret_one/%x/%j.out" -e "ret_one/%x/%j.err" --export=Model=$name,Dataset=cifar10,Quant=False gen_one.slurm
done
\ No newline at end of file
#!/bin/bash
name_list="ResNet_152 ResNet_50 ResNet_18 MobileNetV2 Inception_BN VGG_19 VGG_16 AlexNet_BN AlexNet"
if [ ! -d "ckpt_full_gen/cifar100" ]; then
mkdir -p "ckpt_full_gen/cifar100"
fi
for name in $name_list; do
if [ ! -d "ret_one/$name" ]; then
mkdir -p "ret_one/$name"
fi
sbatch --job-name=$name -o "ret_one/%x/%j.out" -e "ret_one/%x/%j.err" --export=Model=$name,Dataset=cifar100,Quant=False gen_one.slurm
done
\ No newline at end of file
#!/bin/bash
if [ ! -d "ret_one/$1" ]; then
mkdir -p "ret_one/$1"
fi
if [ ! -d "ckpt_full_gen/cifar10/$1" ]; then
mkdir -p "ckpt_full_gen/cifar10/$1"
fi
sbatch --job-name=$1 -o "ret_one/%x/%j.out" -e "ret_one/%x/%j.err" --export=Model=$1,Dataset=cifar10,Quant=False gen_one.slurm
#!/bin/bash
if [ ! -d "ret_one/$1" ]; then
mkdir -p "ret_one/$1"
fi
if [ ! -d "ckpt_full_gen/cifar100/$1" ]; then
mkdir -p "ckpt_full_gen/cifar100/$1"
fi
sbatch --job-name=$1 -o "ret_one/%x/%j.out" -e "ret_one/%x/%j.err" --export=Model=$1,Dataset=cifar100,Quant=False gen_one.slurm
#!/bin/bash
name_list="ResNet_152 ResNet_50 ResNet_18 MobileNetV2 Inception_BN VGG_19 VGG_16 AlexNet_BN AlexNet"
if [ ! -d "ckpt_quant_gen/cifar10" ]; then
mkdir -p "ckpt_quant_gen/cifar10"
fi
for name in $name_list; do
if [ ! -d "ret_one/$name" ]; then
mkdir -p "ret_one/$name"
fi
sbatch --job-name=$name -o "ret_one/%x/%j.out" -e "ret_one/%x/%j.err" --export=Model=$name,Dataset=cifar10,Quant=True gen_one.slurm
done
\ No newline at end of file
#!/bin/bash
name_list="ResNet_152 ResNet_50 ResNet_18 MobileNetV2 Inception_BN VGG_19 VGG_16 AlexNet_BN AlexNet"
if [ ! -d "ckpt_quant_gen/cifar100" ]; then
mkdir -p "ckpt_quant_gen/cifar100"
fi
for name in $name_list; do
if [ ! -d "ret_one/$name" ]; then
mkdir -p "ret_one/$name"
fi
sbatch --job-name=$name -o "ret_one/%x/%j.out" -e "ret_one/%x/%j.err" --export=Model=$name,Dataset=cifar100,Quant=True gen_one.slurm
done
\ No newline at end of file
#!/bin/bash
if [ ! -d "ret_one/$1" ]; then
mkdir -p "ret_one/$1"
fi
if [ ! -d "ckpt_quant_gen/cifar10/$1" ]; then
mkdir -p "ckpt_quant_gen/cifar10/$1"
fi
sbatch --job-name=$1 -o "ret_one/%x/%j.out" -e "ret_one/%x/%j.err" --export=Model=$1,Dataset=cifar10,Quant=True gen_one.slurm
#!/bin/bash
if [ ! -d "ret_one/$1" ]; then
mkdir -p "ret_one/$1"
fi
if [ ! -d "ckpt_quant_gen/cifar100/$1" ]; then
mkdir -p "ckpt_quant_gen/cifar100/$1"
fi
sbatch --job-name=$1 -o "ret_one/%x/%j.out" -e "ret_one/%x/%j.err" --export=Model=$1,Dataset=cifar100,Quant=True gen_one.slurm
#!/bin/bash
if [ ! -d "ret_one/$1" ]; then
mkdir -p "ret_one/$1"
fi
sbatch --job-name=$1 -o "ret_one/%x/%j.out" -e "ret_one/%x/%j.err" --export=Model=$1,Dataset=$2,Distill=$3 mia_one_distill.slurm
\ No newline at end of file
#!/bin/bash
if [ ! -d "ret_one" ]; then
mkdir -p "ret_one"
fi
sbatch --job-name=$1 -o "ret_one/%x/%j.out" -e "ret_one/%x/%j.err" --export=Model=$1,Dataset=$2 mia_one_div.slurm
\ No newline at end of file
#!/bin/bash
if [ ! -d "ret_one/$1" ]; then
mkdir -p "ret_one/$1"
fi
sbatch --job-name=$1 -o "ret_one/%x/%j.out" -e "ret_one/%x/%j.err" --export=Model=$1,Dataset=$2,Distill=$3 mia_one_fp32.slurm
\ No newline at end of file
#!/bin/bash
if [ ! -d "ret_one/$1" ]; then
mkdir -p "ret_one/$1"
fi
sbatch --job-name=$1 -o "ret_one/%x/%j.out" -e "ret_one/%x/%j.err" --export=Model=$1,Dataset=$2,Distill=$3 mia_one_fp_s1.slurm
\ No newline at end of file
#!/bin/bash
if [ ! -d "ret_one/$1" ]; then
mkdir -p "ret_one/$1"
fi
sbatch --job-name=$1 -o "ret_one/%x/%j.out" -e "ret_one/%x/%j.err" --export=Model=$1,Dataset=$2,Distill=$3 mia_one_fp_s2.slurm
\ No newline at end of file
#!/bin/bash
if [ ! -d "ret_one/$1" ]; then
mkdir -p "ret_one/$1"
fi
sbatch --job-name=$1 -o "ret_one/%x/%j.out" -e "ret_one/%x/%j.err" --export=Model=$1,Dataset=$2,Distill=$3 mia_one_fp_s3.slurm
\ No newline at end of file
#!/bin/bash
if [ ! -d "ret_one/$1" ]; then
mkdir -p "ret_one/$1"
fi
sbatch --job-name=$1 -o "ret_one/%x/%j.out" -e "ret_one/%x/%j.err" --export=Model=$1,Dataset=$2,Distill=$3 mia_one_fp_s4.slurm
\ No newline at end of file
#!/bin/bash
if [ ! -d "ret_one/$1" ]; then
mkdir -p "ret_one/$1"
fi
sbatch --job-name=$1 -o "ret_one/%x/%j.out" -e "ret_one/%x/%j.err" --export=Model=$1,Dataset=$2,Distill=$3 mia_one_int_s1.slurm
\ No newline at end of file
#!/bin/bash
if [ ! -d "ret_one/$1" ]; then
mkdir -p "ret_one/$1"
fi
sbatch --job-name=$1 -o "ret_one/%x/%j.out" -e "ret_one/%x/%j.err" --export=Model=$1,Dataset=$2,Distill=$3 mia_one_int_s2.slurm
\ No newline at end of file
#!/bin/bash
if [ ! -d "ret_one" ]; then
mkdir -p "ret_one"
fi
sbatch --job-name=$1 -o "ret_one/%x/%j.out" -e "ret_one/%x/%j.err" --export=Model=$1,Dataset=$2,Distill=$3 mia_one_pot.slurm
\ No newline at end of file
#!/bin/bash
if [ ! -d "ret_one" ]; then
mkdir -p "ret_one"
fi
sbatch --job-name=$1 -o "ret_one/%x/%j.out" -e "ret_one/%x/%j.err" --export=Model=$1,Dataset=$2 mia_one_property_div.slurm
\ No newline at end of file
#!/bin/bash
if [ ! -d "param_flops/cifar10" ]; then
mkdir -p "param_flops/cifar10"
fi
sbatch --export=Dataset=cifar10 get_param_flops.slurm
\ No newline at end of file
#!/bin/bash
if [ ! -d "param_flops/cifar100" ]; then
mkdir -p "param_flops/cifar100"
fi
sbatch --export=Dataset=cifar100 get_param_flops.slurm
\ No newline at end of file
#!/bin/bash
name_list="ResNet_152 ResNet_50 ResNet_18 MobileNetV2 Inception_BN VGG_19 VGG_16 AlexNet_BN AlexNet"
for name in $name_list; do
if [ ! -d "ret_one/$name" ]; then
mkdir -p "ret_one/$name"
fi
if [ ! -d "ckpt_quant/cifar10/$name" ]; then
mkdir -p "ckpt_quant/cifar10/$name"
fi
sbatch --job-name=$name -o "ret_one/%x/%j.out" -e "ret_one/%x/%j.err" --export=Model=$name,Dataset=cifar10 ptq_one.slurm
done
\ No newline at end of file
#!/bin/bash
name_list="ResNet_152 ResNet_50 ResNet_18 MobileNetV2 Inception_BN VGG_19 VGG_16 AlexNet_BN AlexNet"
for name in $name_list; do
if [ ! -d "ret_one/$name" ]; then
mkdir -p "ret_one/$name"
fi
if [ ! -d "ckpt_quant/cifar100/$name" ]; then
mkdir -p "ckpt_quant/cifar100/$name"
fi
sbatch --job-name=$name -o "ret_one/%x/%j.out" -e "ret_one/%x/%j.err" --export=Model=$name,Dataset=cifar100 ptq_one.slurm
done
\ No newline at end of file
#!/bin/bash
if [ ! -d "ret_one/$1" ]; then
mkdir -p "ret_one/$1"
fi
if [ ! -d "ckpt_quant/cifar10/$1" ]; then
mkdir -p "ckpt_quant/cifar10/$1"
fi
sbatch --job-name=$1 -o "ret_one/%x/%j.out" -e "ret_one/%x/%j.err" --export=Model=$1,Dataset=cifar10 ptq_one.slurm
#!/bin/bash
if [ ! -d "ret_one/$1" ]; then
mkdir -p "ret_one/$1"
fi
if [ ! -d "ckpt_quant/cifar100/$1" ]; then
mkdir -p "ckpt_quant/cifar100/$1"
fi
sbatch --job-name=$1 -o "ret_one/%x/%j.out" -e "ret_one/%x/%j.err" --export=Model=$1,Dataset=cifar100 ptq_one.slurm
#!/bin/bash
name_list="ResNet_152 ResNet_50 ResNet_18 MobileNetV2 Inception_BN VGG_19 VGG_16 AlexNet_BN AlexNet"
if [ ! -d "ckpt_full/cifar10" ]; then
mkdir -p "ckpt_full/cifar10"
fi
for name in $name_list; do
if [ ! -d "ret_one/$name" ]; then
mkdir -p "ret_one/$name"
fi
sbatch --job-name=$name -o "ret_one/%x/%j.out" -e "ret_one/%x/%j.err" --export=Model=$name,Dataset=cifar10 train_one.slurm
done
\ No newline at end of file
#!/bin/bash
name_list="ResNet_152 ResNet_50 ResNet_18 MobileNetV2 Inception_BN VGG_19 VGG_16 AlexNet_BN AlexNet"
if [ ! -d "ckpt_full/cifar100" ]; then
mkdir -p "ckpt_full/cifar100"
fi
for name in $name_list; do
if [ ! -d "ret_one/$name" ]; then
mkdir -p "ret_one/$name"
fi
sbatch --job-name=$name -o "ret_one/%x/%j.out" -e "ret_one/%x/%j.err" --export=Model=$name,Dataset=cifar100 train_one.slurm
done
\ No newline at end of file
#!/bin/bash
if [ ! -d "ret_one/$1" ]; then
mkdir -p "ret_one/$1"
fi
if [ ! -d "ckpt_full/cifar10" ]; then
mkdir -p "ckpt_full/cifar10"
fi
sbatch --job-name=$1 -o "ret_one/%x/%j.out" -e "ret_one/%x/%j.err" --export=Model=$1,Dataset=cifar10 train_one.slurm
\ No newline at end of file
#!/bin/bash
if [ ! -d "ckpt_full/cifar100" ]; then
mkdir -p "ckpt_full/cifar100"
fi
if [ ! -d "ret_one/$1" ]; then
mkdir -p "ret_one/$1"
fi
sbatch --job-name=$1 -o "ret_one/%x/%j.out" -e "ret_one/%x/%j.err" --export=Model=$1,Dataset=cifar100 train_one.slurm
\ No newline at end of file
from model import *
from dataloader import DataLoader
import torch
import torch.nn as nn
import torch.optim as optim
from torchvision import datasets, transforms
from torch.optim.lr_scheduler import _LRScheduler,MultiStepLR
import os
import os.path as osp
import time
import sys
class WarmUpLR(_LRScheduler):
"""warmup_training learning rate scheduler
Args:
optimizer: optimzier(e.g. SGD)
total_iters: totoal_iters of warmup phase
"""
def __init__(self, optimizer, total_iters, last_epoch=-1):
self.total_iters = total_iters
super().__init__(optimizer, last_epoch)
def get_lr(self):
"""we will use the first m batches, and set the learning
rate to base_lr * m / total_iters
"""
return [base_lr * self.last_epoch / (self.total_iters + 1e-8) for base_lr in self.base_lrs]
def train(model, device, train_loader, optimizer, epoch):
model.train()
total_loss = 0.
lossLayer = nn.CrossEntropyLoss()
#对第一个epoch,使用warmup策略
if epoch == 1:
warmup_scheduler = WarmUpLR(optimizer, len(train_loader))
else:
warmup_scheduler = None
start_time = time.time()
for batch_idx, (data, targets) in enumerate(train_loader):
data,targets = data.to(device), targets.to(device)
optimizer.zero_grad()
output = model(data)
loss = lossLayer(output, targets)
loss.backward()
total_loss += loss.item()
optimizer.step()
if warmup_scheduler is not None:
warmup_scheduler.step()
if batch_idx % 50 == 0 and batch_idx > 0:
cur_loss = total_loss / 50
elapsed = time.time() - start_time
lr = optimizer.param_groups[0]['lr']
print('| epoch {:3d} | {:5d}/{:5d} batches | lr {:02.7f} | ms/batch {:5.2f} | '
'loss {:5.2f}'.format(
epoch, batch_idx, len(train_loader.dataset) // len(data), lr,
elapsed * 1000 / 50, cur_loss))
total_loss = 0.
start_time = time.time()
def evaluate(model, device, eval_loader):
model.eval()
total_loss = 0
correct = 0
lossLayer = nn.CrossEntropyLoss()
with torch.no_grad():
for data, targets in eval_loader:
data,targets = data.to(device), targets.to(device)
output = model(data)
total_loss += len(data) * lossLayer(output, targets).item()
pred = output.argmax(dim=1, keepdim=True)
correct += pred.eq(targets.view_as(pred)).sum().item()
test_loss = total_loss / len(eval_loader.dataset)
test_acc = 100. * correct / len(eval_loader.dataset)
return test_loss,test_acc
if __name__ == "__main__":
model_name = sys.argv[1]
dataset = sys.argv[2]
batch_size = 128
optim_type = 'sgd'
lr = 0.1
momentum = 0.9
weight_decay = 5e-4
nesterov = True
epochs = 200
milestones = [60, 120, 160]
gamma = 0.2
print('model: '+model_name+' dataset: '+dataset)
print('optim_type: '+optim_type+' lr: '+str(lr)+' weight_decay: '+str(weight_decay)+' nesterov: '+str(nesterov)+' momentum: '+str(momentum))
print('epochs: '+str(epochs)+' milestones: '+str(milestones)+' gamma: '+str(gamma))
save_model = True
append = False
ckpt_path = 'ckpt_full/'+dataset
if save_model:
if not osp.exists(ckpt_path):
os.makedirs(ckpt_path)
save_path = ckpt_path+'/'+model_name+'.pt'
if os.path.exists(save_path) and append:
print('Append: Model '+model_name+' exists!')
sys.exit()
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
dataloader = DataLoader(dataset,batch_size)
train_loader, val_loader, test_loader = dataloader.getloader()
print('>>>>>>>>>>>>>>>>>>>>>>>> Train: '+model_name+' <<<<<<<<<<<<<<<<<<<<<<<<')
model = Model(model_name,dataset).to(device)
best_val_acc = None
optimizer = optim.SGD(model.parameters(), lr=lr, momentum=momentum,weight_decay = weight_decay,nesterov=nesterov)
lr_scheduler = MultiStepLR(optimizer, milestones=milestones, gamma=gamma)
for epoch in range(1, epochs + 1):
epoch_start_time = time.time()
train(model, device, train_loader, optimizer, epoch)
val_loss, val_acc = evaluate(model, device, val_loader)
if not best_val_acc or val_acc > best_val_acc:
best_val_acc = val_acc
if save_model:
torch.save(model.state_dict(), save_path)
print('-' * 89)
print('| end of epoch {:3d} | time: {:5.2f}s | val loss {:5.2f} | '
'val acc {:.2f} | best val acc {:.2f}'.format(epoch, (time.time() - epoch_start_time),
val_loss, val_acc, best_val_acc))
print('-' * 89)
lr_scheduler.step(epoch)
print('>>>>>>>>>>>>>>>>>>>>>>>> Test: '+model_name+' <<<<<<<<<<<<<<<<<<<<<<<<')
model = Model(model_name,dataset).to(device)
model.load_state_dict(torch.load(save_path))
test_loss,test_acc = evaluate(model, device, test_loader)
print('=' * 89)
print('| Test on {:s} | test loss {:5.2f} | test acc {:.2f}'.format(
model_name, test_loss, test_acc))
print('=' * 89)
\ No newline at end of file
#!/bin/bash
#- Job parameters
# (TODO)
# Please modify job name
#- Resources
# (TODO)
# Please modify your requirements
#SBATCH -p nv-gpu # Submit to 'nv-gpu' Partitiion
#SBATCH -t 1-06:00:00 # Run for a maximum time of 0 days, 12 hours, 00 mins, 00 secs
#SBATCH --nodes=1 # Request N nodes
#SBATCH --gres=gpu:1 # Request M GPU per node
#SBATCH --gres-flags=enforce-binding # CPU-GPU Affinity
#SBATCH --qos=gpu-normal # Request QOS Type
###
### The system will alloc 8 or 16 cores per gpu by default.
### If you need more or less, use following:
### #SBATCH --cpus-per-task=K # Request K cores
###
###
### Without specifying the constraint, any available nodes that meet the requirement will be allocated
### You can specify the characteristics of the compute nodes, and even the names of the compute nodes
###
### #SBATCH --nodelist=gpu-v00 # Request a specific list of hosts
### #SBATCH --constraint="Volta|RTX8000" # Request GPU Type: Volta(V100 or V100S) or RTX8000
###
# set constraint for RTX8000 to meet my cuda
#SBATCH --constraint="Ampere|RTX8000"
#- Log information
echo "Job start at $(date "+%Y-%m-%d %H:%M:%S")"
echo "Job run at:"
echo "$(hostnamectl)"
#- Load environments
source /tools/module_env.sh
module list # list modules loaded
##- Tools
module load cluster-tools/v1.0
module load slurm-tools/v1.0
module load cmake/3.15.7
module load git/2.17.1
module load vim/8.1.2424
##- language
module load python3/3.6.8
##- CUDA
# module load cuda-cudnn/10.2-7.6.5
# module load cuda-cudnn/11.2-8.2.1
module load cuda-cudnn/11.1-8.2.1
##- virtualenv
# source xxxxx/activate
echo $(module list) # list modules loaded
echo $(which gcc)
echo $(which python)
echo $(which python3)
cluster-quota # nas quota
nvidia-smi --format=csv --query-gpu=name,driver_version,power.limit # gpu info
#- Warning! Please not change your CUDA_VISIBLE_DEVICES
#- in `.bashrc`, `env.sh`, or your job script
echo "Use GPU ${CUDA_VISIBLE_DEVICES}" # which gpus
#- The CUDA_VISIBLE_DEVICES variable is assigned and specified by SLURM
#- Job step
# [EDIT HERE(TODO)]
python train_one.py $Model $Dataset
#- End
echo "Job end at $(date "+%Y-%m-%d %H:%M:%S")"
import torch
import torch.nn as nn
def ebit_list(quant_type, num_bits):
if quant_type == 'FLOAT':
e_bit_list = list(range(1,num_bits-1))
else:
e_bit_list = [0]
return e_bit_list
def numbit_list(quant_type):
if quant_type == 'INT':
num_bit_list = list(range(2,17))
# num_bit_list = [4,5]
elif quant_type == 'POT':
num_bit_list = list(range(2,9))
# num_bit_list = [5]
else:
num_bit_list = list(range(2,9))
# num_bit_list = [8]
return num_bit_list
def build_bias_list(quant_type):
if quant_type == 'POT':
return build_pot_list(8)
else:
return build_float_list(16,7)
def build_list(quant_type, num_bits, e_bits):
if quant_type == 'POT':
return build_pot_list(num_bits)
else:
return build_float_list(num_bits,e_bits)
def build_pot_list(num_bits):
plist = [0.]
for i in range(-2 ** (num_bits-1) + 2, 1):
# i最高到0,即pot量化最大值为1
plist.append(2. ** i)
plist.append(-2. ** i)
plist = torch.Tensor(list(set(plist)))
# plist = plist.mul(1.0 / torch.max(plist))
return plist
def build_float_list(num_bits,e_bits):
m_bits = num_bits - 1 - e_bits
plist = [0.]
# 相邻尾数的差值
dist_m = 2 ** (-m_bits)
e = -2 ** (e_bits - 1) + 1
for m in range(1, 2 ** m_bits):
frac = m * dist_m # 尾数部分
expo = 2 ** e # 指数部分
flt = frac * expo
plist.append(flt)
plist.append(-flt)
for e in range(-2 ** (e_bits - 1) + 2, 2 ** (e_bits - 1) + 1):
expo = 2 ** e
for m in range(0, 2 ** m_bits):
frac = 1. + m * dist_m
flt = frac * expo
plist.append(flt)
plist.append(-flt)
plist = torch.Tensor(list(set(plist)))
return plist
#此处不必cfg,直接取同前缀同后缀即可。将relu一起考虑进去
def fold_ratio(layer, par_ratio, flop_ratio):
idx = -1
for name in layer:
if 'conv' in name:
conv_idx = layer.index(name)
[prefix,suffix] = name.split('conv')
bn_name = prefix+'bn'+suffix
relu_name = prefix+'relu'+suffix
relus_name = prefix+'relus'+suffix
if bn_name in layer:
bn_idx = layer.index(bn_name)
par_ratio[conv_idx]+=par_ratio[bn_idx]
flop_ratio[conv_idx]+=flop_ratio[bn_idx]
if relu_name in layer:
relu_idx = layer.index(relu_name)
par_ratio[conv_idx]+=par_ratio[relu_idx]
flop_ratio[conv_idx]+=flop_ratio[relu_idx]
elif relus_name in layer:
relus_idx = layer.index(relus_name)
par_ratio[conv_idx]+=par_ratio[relus_idx]
flop_ratio[conv_idx]+=flop_ratio[relus_idx]
return par_ratio,flop_ratio
def fold_model(model):
for name, module in model.named_modules():
if 'conv' in name:
[prefix,suffix] = name.split('conv')
bn_name = prefix+'bn'+suffix
if hasattr(model,bn_name):
bn_layer = getattr(model,bn_name)
fold_bn(module,bn_layer)
def fold_bn(conv, bn):
# 获取 BN 层的参数
mean = bn.running_mean
var = bn.running_var
eps = bn.eps
std = torch.sqrt(var + eps)
if bn.affine:
gamma_ = bn.weight / std
weight = conv.weight * gamma_.view(conv.out_channels, 1, 1, 1)
if conv.bias is not None:
bias = gamma_ * conv.bias - gamma_ * mean + bn.bias
else:
bias = bn.bias - gamma_ * mean
else:
gamma_ = 1 / std
weight = conv.weight * gamma_
if conv.bias is not None:
bias = gamma_ * conv.bias - gamma_ * mean
else:
bias = -gamma_ * mean
# 设置新的 weight 和 bias
conv.weight.data = weight.data
if conv.bias is not None:
conv.bias.data = bias.data
else:
conv.bias = torch.nn.Parameter(bias)
Markdown is supported
0% or
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment