add all
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- Image/ViT/code/train.py +0 -59
- Image/ViT/dataset/.gitkeep +0 -0
- Image/ViT/model/.gitkeep +0 -0
- Image/ZFNet/code/train.py +0 -59
- Image/ZFNet/dataset/.gitkeep +0 -0
- Image/ZFNet/model/.gitkeep +0 -0
- ViT-CIFAR10/Classification-backdoor/dataset/backdoor_index.npy +3 -0
- ViT-CIFAR10/Classification-backdoor/dataset/index.json +0 -0
- ViT-CIFAR10/Classification-backdoor/dataset/info.json +4 -0
- ViT-CIFAR10/Classification-backdoor/dataset/labels.npy +3 -0
- ViT-CIFAR10/Classification-backdoor/readme.md +54 -0
- ViT-CIFAR10/Classification-backdoor/scripts/create_index.py +18 -0
- ViT-CIFAR10/Classification-backdoor/scripts/dataset_utils.py +59 -0
- ViT-CIFAR10/Classification-backdoor/scripts/get_raw_data.py +111 -0
- ViT-CIFAR10/Classification-backdoor/scripts/get_representation.py +272 -0
- {Image/ViT/code → ViT-CIFAR10/Classification-backdoor/scripts}/model.py +2 -2
- ViT-CIFAR10/Classification-backdoor/scripts/train.py +414 -0
- ViT-CIFAR10/Classification-backdoor/scripts/train.yaml +10 -0
- ViT-CIFAR10/Classification-noisy/dataset/index.json +0 -0
- ViT-CIFAR10/Classification-noisy/dataset/info.json +4 -0
- ViT-CIFAR10/Classification-noisy/dataset/labels.npy +3 -0
- ViT-CIFAR10/Classification-noisy/dataset/noise_index.npy +3 -0
- ViT-CIFAR10/Classification-noisy/readme.md +54 -0
- ViT-CIFAR10/Classification-noisy/scripts/create_index.py +18 -0
- ViT-CIFAR10/Classification-noisy/scripts/dataset_utils.py +274 -0
- ViT-CIFAR10/Classification-noisy/scripts/get_raw_data.py +194 -0
- ViT-CIFAR10/Classification-noisy/scripts/get_representation.py +272 -0
- ViT-CIFAR10/Classification-noisy/scripts/model.py +171 -0
- ViT-CIFAR10/Classification-noisy/scripts/preview_noise.py +122 -0
- ViT-CIFAR10/Classification-noisy/scripts/train.py +251 -0
- ViT-CIFAR10/Classification-noisy/scripts/train.yaml +25 -0
- ViT-CIFAR10/Classification-normal/dataset/index.json +0 -0
- ViT-CIFAR10/Classification-normal/dataset/info.json +4 -0
- ViT-CIFAR10/Classification-normal/dataset/labels.npy +3 -0
- ViT-CIFAR10/Classification-normal/readme.md +54 -0
- ViT-CIFAR10/Classification-normal/scripts/dataset_utils.py +59 -0
- ViT-CIFAR10/Classification-normal/scripts/get_raw_data.py +82 -0
- ViT-CIFAR10/Classification-normal/scripts/get_representation.py +272 -0
- ViT-CIFAR10/Classification-normal/scripts/model.py +171 -0
- ViT-CIFAR10/Classification-normal/scripts/train.py +220 -0
- ViT-CIFAR10/Classification-normal/scripts/train.yaml +7 -0
- ZFNet-CIFAR10/Classification-backdoor/dataset/backdoor_index.npy +3 -0
- ZFNet-CIFAR10/Classification-backdoor/dataset/index.json +0 -0
- ZFNet-CIFAR10/Classification-backdoor/dataset/info.json +4 -0
- ZFNet-CIFAR10/Classification-backdoor/dataset/labels.npy +3 -0
- ZFNet-CIFAR10/Classification-backdoor/epochs/epoch_1/embeddings.npy +3 -0
- ZFNet-CIFAR10/Classification-backdoor/epochs/epoch_1/model.pth +3 -0
- ZFNet-CIFAR10/Classification-backdoor/epochs/epoch_1/predictions.npy +3 -0
- ZFNet-CIFAR10/Classification-backdoor/epochs/epoch_10/embeddings.npy +3 -0
- ZFNet-CIFAR10/Classification-backdoor/epochs/epoch_10/model.pth +3 -0
Image/ViT/code/train.py
DELETED
|
@@ -1,59 +0,0 @@
|
|
| 1 |
-
import sys
|
| 2 |
-
import os
|
| 3 |
-
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
|
| 4 |
-
from utils.dataset_utils import get_cifar10_dataloaders
|
| 5 |
-
from utils.train_utils import train_model, train_model_data_augmentation, train_model_backdoor
|
| 6 |
-
from utils.parse_args import parse_args
|
| 7 |
-
from model import ViT
|
| 8 |
-
|
| 9 |
-
def main():
|
| 10 |
-
# 解析命令行参数
|
| 11 |
-
args = parse_args()
|
| 12 |
-
|
| 13 |
-
# 创建模型
|
| 14 |
-
model = ViT()
|
| 15 |
-
|
| 16 |
-
if args.train_type == '0':
|
| 17 |
-
# 获取数据加载器
|
| 18 |
-
trainloader, testloader = get_cifar10_dataloaders(batch_size=args.batch_size, local_dataset_path=args.dataset_path)
|
| 19 |
-
# 训练模型
|
| 20 |
-
train_model(
|
| 21 |
-
model=model,
|
| 22 |
-
trainloader=trainloader,
|
| 23 |
-
testloader=testloader,
|
| 24 |
-
epochs=args.epochs,
|
| 25 |
-
lr=args.lr,
|
| 26 |
-
device=f'cuda:{args.gpu}',
|
| 27 |
-
save_dir='../model',
|
| 28 |
-
model_name='vit',
|
| 29 |
-
save_type='0'
|
| 30 |
-
)
|
| 31 |
-
elif args.train_type == '1':
|
| 32 |
-
train_model_data_augmentation(
|
| 33 |
-
model,
|
| 34 |
-
epochs=args.epochs,
|
| 35 |
-
lr=args.lr,
|
| 36 |
-
device=f'cuda:{args.gpu}',
|
| 37 |
-
save_dir='../model',
|
| 38 |
-
model_name='vit',
|
| 39 |
-
batch_size=args.batch_size,
|
| 40 |
-
num_workers=args.num_workers,
|
| 41 |
-
local_dataset_path=args.dataset_path
|
| 42 |
-
)
|
| 43 |
-
elif args.train_type == '2':
|
| 44 |
-
train_model_backdoor(
|
| 45 |
-
model,
|
| 46 |
-
poison_ratio=args.poison_ratio,
|
| 47 |
-
target_label=args.target_label,
|
| 48 |
-
epochs=args.epochs,
|
| 49 |
-
lr=args.lr,
|
| 50 |
-
device=f'cuda:{args.gpu}',
|
| 51 |
-
save_dir='../model',
|
| 52 |
-
model_name='vit',
|
| 53 |
-
batch_size=args.batch_size,
|
| 54 |
-
num_workers=args.num_workers,
|
| 55 |
-
local_dataset_path=args.dataset_path
|
| 56 |
-
)
|
| 57 |
-
|
| 58 |
-
if __name__ == '__main__':
|
| 59 |
-
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Image/ViT/dataset/.gitkeep
DELETED
|
File without changes
|
Image/ViT/model/.gitkeep
DELETED
|
File without changes
|
Image/ZFNet/code/train.py
DELETED
|
@@ -1,59 +0,0 @@
|
|
| 1 |
-
import sys
|
| 2 |
-
import os
|
| 3 |
-
sys.path.append(os.path.dirname(os.path.dirname(os.path.dirname(os.path.abspath(__file__)))))
|
| 4 |
-
from utils.dataset_utils import get_cifar10_dataloaders
|
| 5 |
-
from utils.train_utils import train_model, train_model_data_augmentation, train_model_backdoor
|
| 6 |
-
from utils.parse_args import parse_args
|
| 7 |
-
from model import ZFNet
|
| 8 |
-
|
| 9 |
-
def main():
|
| 10 |
-
# 解析命令行参数
|
| 11 |
-
args = parse_args()
|
| 12 |
-
|
| 13 |
-
# 创建模型
|
| 14 |
-
model = ZFNet()
|
| 15 |
-
|
| 16 |
-
if args.train_type == '0':
|
| 17 |
-
# 获取数据加载器
|
| 18 |
-
trainloader, testloader = get_cifar10_dataloaders(batch_size=args.batch_size, local_dataset_path=args.dataset_path)
|
| 19 |
-
# 训练模型
|
| 20 |
-
train_model(
|
| 21 |
-
model=model,
|
| 22 |
-
trainloader=trainloader,
|
| 23 |
-
testloader=testloader,
|
| 24 |
-
epochs=args.epochs,
|
| 25 |
-
lr=args.lr,
|
| 26 |
-
device=f'cuda:{args.gpu}',
|
| 27 |
-
save_dir='../model',
|
| 28 |
-
model_name='zfnet',
|
| 29 |
-
save_type='0'
|
| 30 |
-
)
|
| 31 |
-
elif args.train_type == '1':
|
| 32 |
-
train_model_data_augmentation(
|
| 33 |
-
model,
|
| 34 |
-
epochs=args.epochs,
|
| 35 |
-
lr=args.lr,
|
| 36 |
-
device=f'cuda:{args.gpu}',
|
| 37 |
-
save_dir='../model',
|
| 38 |
-
model_name='zfnet',
|
| 39 |
-
batch_size=args.batch_size,
|
| 40 |
-
num_workers=args.num_workers,
|
| 41 |
-
local_dataset_path=args.dataset_path
|
| 42 |
-
)
|
| 43 |
-
elif args.train_type == '2':
|
| 44 |
-
train_model_backdoor(
|
| 45 |
-
model,
|
| 46 |
-
poison_ratio=args.poison_ratio,
|
| 47 |
-
target_label=args.target_label,
|
| 48 |
-
epochs=args.epochs,
|
| 49 |
-
lr=args.lr,
|
| 50 |
-
device=f'cuda:{args.gpu}',
|
| 51 |
-
save_dir='../model',
|
| 52 |
-
model_name='zfnet',
|
| 53 |
-
batch_size=args.batch_size,
|
| 54 |
-
num_workers=args.num_workers,
|
| 55 |
-
local_dataset_path=args.dataset_path
|
| 56 |
-
)
|
| 57 |
-
|
| 58 |
-
if __name__ == '__main__':
|
| 59 |
-
main()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
Image/ZFNet/dataset/.gitkeep
DELETED
|
File without changes
|
Image/ZFNet/model/.gitkeep
DELETED
|
File without changes
|
ViT-CIFAR10/Classification-backdoor/dataset/backdoor_index.npy
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:95cd0c58248f0231e7eacb1714b06974791d9ae2304f681c130131451158f96e
|
| 3 |
+
size 40128
|
ViT-CIFAR10/Classification-backdoor/dataset/index.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
ViT-CIFAR10/Classification-backdoor/dataset/info.json
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"model": "ViT",
|
| 3 |
+
"classes":["airplane", "automobile", "bird", "cat", "deer", "dog", "frog", "horse", "ship", "truck"]
|
| 4 |
+
}
|
ViT-CIFAR10/Classification-backdoor/dataset/labels.npy
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:3ab4de4e1619121679c0a8b14ef7a4339840597ef38c1b22dbc4fdf6bc8f4ad0
|
| 3 |
+
size 480128
|
ViT-CIFAR10/Classification-backdoor/readme.md
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ViT-CIFAR10 训练与特征提取
|
| 2 |
+
|
| 3 |
+
这个项目实现了ViT模型在CIFAR10数据集上的训练,并集成了特征提取和可视化所需的功能。
|
| 4 |
+
|
| 5 |
+
## time_travel_saver数据提取器
|
| 6 |
+
```python
|
| 7 |
+
#保存可视化训练过程所需要的文件
|
| 8 |
+
if (epoch + 1) % interval == 0 or (epoch == 0):
|
| 9 |
+
# 创建一个专门用于收集embedding的顺序dataloader
|
| 10 |
+
ordered_trainloader = torch.utils.data.DataLoader(
|
| 11 |
+
trainloader.dataset,
|
| 12 |
+
batch_size=trainloader.batch_size,
|
| 13 |
+
shuffle=False,
|
| 14 |
+
num_workers=trainloader.num_workers
|
| 15 |
+
)
|
| 16 |
+
epoch_save_dir = os.path.join(save_dir, f'epoch_{epoch+1}') #epoch保存路径
|
| 17 |
+
save_model = time_travel_saver(model, ordered_trainloader, device, epoch_save_dir, model_name,
|
| 18 |
+
show=True, layer_name='avg_pool', auto_save_embedding=True)
|
| 19 |
+
#show:是否显示模型的维度信息
|
| 20 |
+
#layer_name:选择要提取特征的层,如果为None,则提取符合维度范围的层
|
| 21 |
+
#auto_save_embedding:是否自动保存特征向量 must be True
|
| 22 |
+
save_model.save_checkpoint_embeddings_predictions() #保存模型权重、特征向量和预测结果到epoch_x
|
| 23 |
+
if epoch == 0:
|
| 24 |
+
save_model.save_lables_index(path = "../dataset") #保存标签和索引到dataset
|
| 25 |
+
```
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
## 项目结构
|
| 29 |
+
|
| 30 |
+
- `./scripts/train.yaml`:训练配置文件,包含批次大小、学习率、GPU设置等参数
|
| 31 |
+
- `./scripts/train.py`:训练脚本,执行模型训练并自动收集特征数据
|
| 32 |
+
- `./model/`:保存训练好的模型权重
|
| 33 |
+
- `./epochs/`:保存训练过程中的高维特征向量、预测结果等数据
|
| 34 |
+
|
| 35 |
+
## 使用方法
|
| 36 |
+
|
| 37 |
+
1. 配置 `train.yaml` 文件设置训练参数
|
| 38 |
+
2. 执行训练脚本:
|
| 39 |
+
```
|
| 40 |
+
python train.py
|
| 41 |
+
```
|
| 42 |
+
3. 训练完成后,可以在以下位置找到相关数据:
|
| 43 |
+
- 模型权重:`./epochs/epoch_{n}/model.pth`
|
| 44 |
+
- 特征向量:`./epochs/epoch_{n}/embeddings.npy`
|
| 45 |
+
- 预测结果:`./epochs/epoch_{n}/predictions.npy`
|
| 46 |
+
- 标签数据:`./dataset/labels.npy`
|
| 47 |
+
- 数据索引:`./dataset/index.json`
|
| 48 |
+
|
| 49 |
+
## 数据格式
|
| 50 |
+
|
| 51 |
+
- `embeddings.npy`:形状为 [n_samples, feature_dim] 的特征向量
|
| 52 |
+
- `predictions.npy`:形状为 [n_samples, n_classes] 的预测概率
|
| 53 |
+
- `labels.npy`:形状为 [n_samples] 的真实标签
|
| 54 |
+
- `index.json`:包含训练集、测试集和验证集的索引信息
|
ViT-CIFAR10/Classification-backdoor/scripts/create_index.py
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import os
|
| 3 |
+
|
| 4 |
+
# 创建完整的索引
|
| 5 |
+
index_dict = {
|
| 6 |
+
"train": list(range( 50000)), # 从1到50000的训练索引
|
| 7 |
+
"test": list(range(50000, 60000)), # 从50001到60000的测试索引
|
| 8 |
+
"validation": [] # 空验证集
|
| 9 |
+
}
|
| 10 |
+
|
| 11 |
+
# 保存到索引文件
|
| 12 |
+
index_path = os.path.join('..', 'dataset', 'index.json')
|
| 13 |
+
with open(index_path, 'w') as f:
|
| 14 |
+
json.dump(index_dict, f, indent=4)
|
| 15 |
+
|
| 16 |
+
print(f"已创建完整索引文件: {index_path}")
|
| 17 |
+
print(f"训练集: {len(index_dict['train'])}个样本")
|
| 18 |
+
print(f"测试集: {len(index_dict['test'])}个样本")
|
ViT-CIFAR10/Classification-backdoor/scripts/dataset_utils.py
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torchvision
|
| 3 |
+
import torchvision.transforms as transforms
|
| 4 |
+
import os
|
| 5 |
+
|
| 6 |
+
#加载数据集
|
| 7 |
+
|
| 8 |
+
def get_cifar10_dataloaders(batch_size=128, num_workers=2, local_dataset_path=None, shuffle=False):
|
| 9 |
+
"""获取CIFAR10数据集的数据加载器
|
| 10 |
+
|
| 11 |
+
Args:
|
| 12 |
+
batch_size: 批次大小
|
| 13 |
+
num_workers: 数据加载的工作进程数
|
| 14 |
+
local_dataset_path: 本地数据集路径,如果提供则使用本地数据集,否则下载
|
| 15 |
+
|
| 16 |
+
Returns:
|
| 17 |
+
trainloader: 训练数据加载器
|
| 18 |
+
testloader: 测试数据加载器
|
| 19 |
+
"""
|
| 20 |
+
# 数据预处理
|
| 21 |
+
transform_train = transforms.Compose([
|
| 22 |
+
transforms.RandomCrop(32, padding=4),
|
| 23 |
+
transforms.RandomHorizontalFlip(),
|
| 24 |
+
transforms.ToTensor(),
|
| 25 |
+
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
|
| 26 |
+
])
|
| 27 |
+
|
| 28 |
+
transform_test = transforms.Compose([
|
| 29 |
+
transforms.ToTensor(),
|
| 30 |
+
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
|
| 31 |
+
])
|
| 32 |
+
|
| 33 |
+
# 设置数据集路径
|
| 34 |
+
if local_dataset_path:
|
| 35 |
+
print(f"使用本地数据集: {local_dataset_path}")
|
| 36 |
+
# 检查数据集路径是否有数据集,没有的话则下载
|
| 37 |
+
cifar_path = os.path.join(local_dataset_path, 'cifar-10-batches-py')
|
| 38 |
+
download = not os.path.exists(cifar_path) or not os.listdir(cifar_path)
|
| 39 |
+
dataset_path = local_dataset_path
|
| 40 |
+
else:
|
| 41 |
+
print("未指定本地数据集路径,将下载数据集")
|
| 42 |
+
download = True
|
| 43 |
+
dataset_path = '../dataset'
|
| 44 |
+
|
| 45 |
+
# 创建数据集路径
|
| 46 |
+
if not os.path.exists(dataset_path):
|
| 47 |
+
os.makedirs(dataset_path)
|
| 48 |
+
|
| 49 |
+
trainset = torchvision.datasets.CIFAR10(
|
| 50 |
+
root=dataset_path, train=True, download=download, transform=transform_train)
|
| 51 |
+
trainloader = torch.utils.data.DataLoader(
|
| 52 |
+
trainset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers)
|
| 53 |
+
|
| 54 |
+
testset = torchvision.datasets.CIFAR10(
|
| 55 |
+
root=dataset_path, train=False, download=download, transform=transform_test)
|
| 56 |
+
testloader = torch.utils.data.DataLoader(
|
| 57 |
+
testset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers)
|
| 58 |
+
|
| 59 |
+
return trainloader, testloader
|
ViT-CIFAR10/Classification-backdoor/scripts/get_raw_data.py
ADDED
|
@@ -0,0 +1,111 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#读取数据集,在../dataset/raw_data下按照数据集的完整排序,1.png,2.png,3.png,...保存
|
| 2 |
+
|
| 3 |
+
import os
|
| 4 |
+
import yaml
|
| 5 |
+
import numpy as np
|
| 6 |
+
import torchvision
|
| 7 |
+
import torchvision.transforms as transforms
|
| 8 |
+
from PIL import Image
|
| 9 |
+
from tqdm import tqdm
|
| 10 |
+
|
| 11 |
+
def unpickle(file):
|
| 12 |
+
"""读取CIFAR-10数据文件"""
|
| 13 |
+
import pickle
|
| 14 |
+
with open(file, 'rb') as fo:
|
| 15 |
+
dict = pickle.load(fo, encoding='bytes')
|
| 16 |
+
return dict
|
| 17 |
+
|
| 18 |
+
def save_images_from_cifar10_with_backdoor(dataset_path, save_dir):
|
| 19 |
+
"""从CIFAR-10数据集中保存图像,并在中毒样本上添加触发器
|
| 20 |
+
|
| 21 |
+
Args:
|
| 22 |
+
dataset_path: CIFAR-10数据集路径
|
| 23 |
+
save_dir: 图像保存路径
|
| 24 |
+
"""
|
| 25 |
+
# 创建保存目录
|
| 26 |
+
os.makedirs(save_dir, exist_ok=True)
|
| 27 |
+
|
| 28 |
+
# 读取中毒的索引
|
| 29 |
+
backdoor_index_path = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'dataset', 'backdoor_index.npy')
|
| 30 |
+
if os.path.exists(backdoor_index_path):
|
| 31 |
+
backdoor_indices = np.load(backdoor_index_path)
|
| 32 |
+
print(f"已加载{len(backdoor_indices)}个中毒样本索引")
|
| 33 |
+
else:
|
| 34 |
+
backdoor_indices = []
|
| 35 |
+
print("未找到中毒索引文件,将不添加触发器")
|
| 36 |
+
|
| 37 |
+
# 获取训练集数据
|
| 38 |
+
train_data = []
|
| 39 |
+
train_labels = []
|
| 40 |
+
|
| 41 |
+
# 读取训练数据
|
| 42 |
+
for i in range(1, 6):
|
| 43 |
+
batch_file = os.path.join(dataset_path, f'data_batch_{i}')
|
| 44 |
+
if os.path.exists(batch_file):
|
| 45 |
+
print(f"读取训练批次 {i}")
|
| 46 |
+
batch = unpickle(batch_file)
|
| 47 |
+
train_data.append(batch[b'data'])
|
| 48 |
+
train_labels.extend(batch[b'labels'])
|
| 49 |
+
|
| 50 |
+
# 合并所有训练数据
|
| 51 |
+
if train_data:
|
| 52 |
+
train_data = np.vstack(train_data)
|
| 53 |
+
train_data = train_data.reshape(-1, 3, 32, 32).transpose(0, 2, 3, 1)
|
| 54 |
+
|
| 55 |
+
# 读取测试数据
|
| 56 |
+
test_file = os.path.join(dataset_path, 'test_batch')
|
| 57 |
+
if os.path.exists(test_file):
|
| 58 |
+
print("读取测试数据")
|
| 59 |
+
test_batch = unpickle(test_file)
|
| 60 |
+
test_data = test_batch[b'data']
|
| 61 |
+
test_labels = test_batch[b'labels']
|
| 62 |
+
test_data = test_data.reshape(-1, 3, 32, 32).transpose(0, 2, 3, 1)
|
| 63 |
+
else:
|
| 64 |
+
test_data = []
|
| 65 |
+
test_labels = []
|
| 66 |
+
|
| 67 |
+
# 合并训练和测试数据
|
| 68 |
+
all_data = np.concatenate([train_data, test_data]) if len(test_data) > 0 and len(train_data) > 0 else (train_data if len(train_data) > 0 else test_data)
|
| 69 |
+
all_labels = train_labels + test_labels if len(test_labels) > 0 and len(train_labels) > 0 else (train_labels if len(train_labels) > 0 else test_labels)
|
| 70 |
+
|
| 71 |
+
config_path ='./train.yaml'
|
| 72 |
+
with open(config_path) as f:
|
| 73 |
+
config = yaml.safe_load(f)
|
| 74 |
+
trigger_size = config.get('trigger_size', 4)
|
| 75 |
+
|
| 76 |
+
# 保存图像
|
| 77 |
+
print(f"保存 {len(all_data)} 张图像...")
|
| 78 |
+
|
| 79 |
+
for i, (img, label) in enumerate(tqdm(zip(all_data, all_labels), total=len(all_data))):
|
| 80 |
+
# 保存原始图像
|
| 81 |
+
img_pil = Image.fromarray(img)
|
| 82 |
+
|
| 83 |
+
# 检查是否是中毒样本
|
| 84 |
+
if i in backdoor_indices:
|
| 85 |
+
# 为中毒样本创建带触发器的副本
|
| 86 |
+
img_backdoor = img.copy()
|
| 87 |
+
# 添加触发器(右下角白色小方块)
|
| 88 |
+
img_backdoor[-trigger_size:, -trigger_size:, :] = 255
|
| 89 |
+
# 保存带触发器的图像
|
| 90 |
+
img_backdoor_pil = Image.fromarray(img_backdoor)
|
| 91 |
+
img_backdoor_pil.save(os.path.join(save_dir, f"{i}.png"))
|
| 92 |
+
|
| 93 |
+
else:
|
| 94 |
+
img_pil.save(os.path.join(save_dir, f"{i}.png"))
|
| 95 |
+
|
| 96 |
+
print(f"完成! {len(all_data)} 张原始图像已保存到 {save_dir}")
|
| 97 |
+
|
| 98 |
+
if __name__ == "__main__":
|
| 99 |
+
# 设置路径
|
| 100 |
+
dataset_path = "../dataset/cifar-10-batches-py"
|
| 101 |
+
save_dir = "../dataset/raw_data"
|
| 102 |
+
|
| 103 |
+
# 检查数据集是否存在,如果不存在则下载
|
| 104 |
+
if not os.path.exists(dataset_path):
|
| 105 |
+
print("数据集不存在,正在下载...")
|
| 106 |
+
os.makedirs("../dataset", exist_ok=True)
|
| 107 |
+
transform = transforms.Compose([transforms.ToTensor()])
|
| 108 |
+
trainset = torchvision.datasets.CIFAR10(root="../dataset", train=True, download=True, transform=transform)
|
| 109 |
+
|
| 110 |
+
# 保存图像
|
| 111 |
+
save_images_from_cifar10_with_backdoor(dataset_path, save_dir)
|
ViT-CIFAR10/Classification-backdoor/scripts/get_representation.py
ADDED
|
@@ -0,0 +1,272 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
import numpy as np
|
| 4 |
+
import os
|
| 5 |
+
import json
|
| 6 |
+
from tqdm import tqdm
|
| 7 |
+
|
| 8 |
+
class time_travel_saver:
|
| 9 |
+
"""可视化数据提取器
|
| 10 |
+
|
| 11 |
+
用于保存模型训练过程中的各种数据,包括:
|
| 12 |
+
1. 模型权重 (.pth)
|
| 13 |
+
2. 高维特征 (representation/*.npy)
|
| 14 |
+
3. 预测结果 (prediction/*.npy)
|
| 15 |
+
4. 标签数据 (label/labels.npy)
|
| 16 |
+
"""
|
| 17 |
+
|
| 18 |
+
def __init__(self, model, dataloader, device, save_dir, model_name,
|
| 19 |
+
auto_save_embedding=False, layer_name=None,show = False):
|
| 20 |
+
"""初始化
|
| 21 |
+
|
| 22 |
+
Args:
|
| 23 |
+
model: 要保存的模型实例
|
| 24 |
+
dataloader: 数据加载器(必须是顺序加载的)
|
| 25 |
+
device: 计算设备(cpu or gpu)
|
| 26 |
+
save_dir: 保存根目录
|
| 27 |
+
model_name: 模型名称
|
| 28 |
+
"""
|
| 29 |
+
self.model = model
|
| 30 |
+
self.dataloader = dataloader
|
| 31 |
+
self.device = device
|
| 32 |
+
self.save_dir = save_dir
|
| 33 |
+
self.model_name = model_name
|
| 34 |
+
self.auto_save = auto_save_embedding
|
| 35 |
+
self.layer_name = layer_name
|
| 36 |
+
|
| 37 |
+
if show and not layer_name:
|
| 38 |
+
layer_dimensions = self.show_dimensions()
|
| 39 |
+
# print(layer_dimensions)
|
| 40 |
+
|
| 41 |
+
def show_dimensions(self):
|
| 42 |
+
"""显示模型中所有层的名称和对应的维度
|
| 43 |
+
|
| 44 |
+
这个函数会输出模型中所有层的名称和它们的输出维度,
|
| 45 |
+
帮助用户选择合适的层来提取特征。
|
| 46 |
+
|
| 47 |
+
Returns:
|
| 48 |
+
layer_dimensions: 包含层名称和维度的字典
|
| 49 |
+
"""
|
| 50 |
+
activation = {}
|
| 51 |
+
layer_dimensions = {}
|
| 52 |
+
|
| 53 |
+
def get_activation(name):
|
| 54 |
+
def hook(model, input, output):
|
| 55 |
+
activation[name] = output.detach()
|
| 56 |
+
return hook
|
| 57 |
+
|
| 58 |
+
# 注册钩子到所有层
|
| 59 |
+
handles = []
|
| 60 |
+
for name, module in self.model.named_modules():
|
| 61 |
+
if isinstance(module, nn.Module) and not isinstance(module, nn.ModuleList) and not isinstance(module, nn.ModuleDict):
|
| 62 |
+
handles.append(module.register_forward_hook(get_activation(name)))
|
| 63 |
+
|
| 64 |
+
self.model.eval()
|
| 65 |
+
with torch.no_grad():
|
| 66 |
+
# 获取一个batch来分析每层的输出维度
|
| 67 |
+
inputs, _ = next(iter(self.dataloader))
|
| 68 |
+
inputs = inputs.to(self.device)
|
| 69 |
+
_ = self.model(inputs)
|
| 70 |
+
|
| 71 |
+
# 分析所有层的输出维度
|
| 72 |
+
print("\n模型各层的名称和维度:")
|
| 73 |
+
print("-" * 50)
|
| 74 |
+
print(f"{'层名称':<40} {'特征维度':<15} {'输出形状'}")
|
| 75 |
+
print("-" * 50)
|
| 76 |
+
|
| 77 |
+
for name, feat in activation.items():
|
| 78 |
+
if feat is None:
|
| 79 |
+
continue
|
| 80 |
+
|
| 81 |
+
# 获取特征维度(展平后)
|
| 82 |
+
feat_dim = feat.reshape(feat.size(0), -1).size(1)
|
| 83 |
+
layer_dimensions[name] = feat_dim
|
| 84 |
+
# 打印层信息
|
| 85 |
+
shape_str = str(list(feat.shape))
|
| 86 |
+
print(f"{name:<40} {feat_dim:<15} {shape_str}")
|
| 87 |
+
|
| 88 |
+
print("-" * 50)
|
| 89 |
+
print("注: 特征维度是将输出张量展平后的维度大小")
|
| 90 |
+
print("你可以通过修改time_travel_saver的layer_name参数来选择不同的层")
|
| 91 |
+
print("例如:layer_name='avg_pool'或layer_name='layer4'等")
|
| 92 |
+
|
| 93 |
+
# 移除所有钩子
|
| 94 |
+
for handle in handles:
|
| 95 |
+
handle.remove()
|
| 96 |
+
|
| 97 |
+
return layer_dimensions
|
| 98 |
+
|
| 99 |
+
def _extract_features_and_predictions(self):
|
| 100 |
+
"""提取特征和预测结果
|
| 101 |
+
|
| 102 |
+
Returns:
|
| 103 |
+
features: 高维特征 [样本数, 特征维度]
|
| 104 |
+
predictions: 预测结果 [样本数, 类别数]
|
| 105 |
+
"""
|
| 106 |
+
features = []
|
| 107 |
+
predictions = []
|
| 108 |
+
indices = []
|
| 109 |
+
activation = {}
|
| 110 |
+
|
| 111 |
+
def get_activation(name):
|
| 112 |
+
def hook(model, input, output):
|
| 113 |
+
# 只在需要时保存激活值,避免内存浪费
|
| 114 |
+
if name not in activation or activation[name] is None:
|
| 115 |
+
activation[name] = output.detach()
|
| 116 |
+
return hook
|
| 117 |
+
|
| 118 |
+
# 根据层的名称或维度来选择层
|
| 119 |
+
|
| 120 |
+
# 注册钩子到所有层
|
| 121 |
+
handles = []
|
| 122 |
+
for name, module in self.model.named_modules():
|
| 123 |
+
if isinstance(module, nn.Module) and not isinstance(module, nn.ModuleList) and not isinstance(module, nn.ModuleDict):
|
| 124 |
+
handles.append(module.register_forward_hook(get_activation(name)))
|
| 125 |
+
|
| 126 |
+
self.model.eval()
|
| 127 |
+
with torch.no_grad():
|
| 128 |
+
# 首先获取一个batch来分析每层的输出维度
|
| 129 |
+
inputs, _ = next(iter(self.dataloader))
|
| 130 |
+
inputs = inputs.to(self.device)
|
| 131 |
+
_ = self.model(inputs)
|
| 132 |
+
|
| 133 |
+
# 如果指定了层名,则直接使用该层
|
| 134 |
+
if self.layer_name is not None:
|
| 135 |
+
if self.layer_name not in activation:
|
| 136 |
+
raise ValueError(f"指定的层 {self.layer_name} 不存在于模型中")
|
| 137 |
+
|
| 138 |
+
feat = activation[self.layer_name]
|
| 139 |
+
if feat is None:
|
| 140 |
+
raise ValueError(f"指定的层 {self.layer_name} 没有输出特征")
|
| 141 |
+
|
| 142 |
+
suitable_layer_name = self.layer_name
|
| 143 |
+
suitable_dim = feat.reshape(feat.size(0), -1).size(1)
|
| 144 |
+
print(f"使用指定的特征层: {suitable_layer_name}, 特征维度: {suitable_dim}")
|
| 145 |
+
else:
|
| 146 |
+
# 找到维度在指定范围内的层
|
| 147 |
+
target_dim_range = (256, 2048)
|
| 148 |
+
suitable_layer_name = None
|
| 149 |
+
suitable_dim = None
|
| 150 |
+
|
| 151 |
+
# 分析所有层的输出维度
|
| 152 |
+
for name, feat in activation.items():
|
| 153 |
+
if feat is None:
|
| 154 |
+
continue
|
| 155 |
+
feat_dim = feat.reshape(feat.size(0), -1).size(1)
|
| 156 |
+
if target_dim_range[0] <= feat_dim <= target_dim_range[1]:
|
| 157 |
+
suitable_layer_name = name
|
| 158 |
+
suitable_dim = feat_dim
|
| 159 |
+
break
|
| 160 |
+
|
| 161 |
+
if suitable_layer_name is None:
|
| 162 |
+
raise ValueError("没有找到合适维度的特征层")
|
| 163 |
+
|
| 164 |
+
print(f"自动选择的特征层: {suitable_layer_name}, 特征维度: {suitable_dim}")
|
| 165 |
+
|
| 166 |
+
# 保存层信息
|
| 167 |
+
layer_info = {
|
| 168 |
+
'layer_id': suitable_layer_name,
|
| 169 |
+
'dim': suitable_dim
|
| 170 |
+
}
|
| 171 |
+
layer_info_path = os.path.join(os.path.dirname(self.save_dir), 'layer_info.json')
|
| 172 |
+
with open(layer_info_path, 'w') as f:
|
| 173 |
+
json.dump(layer_info, f)
|
| 174 |
+
|
| 175 |
+
# 清除第一次运行的激活值
|
| 176 |
+
activation.clear()
|
| 177 |
+
|
| 178 |
+
# 现在处理所有数据
|
| 179 |
+
for batch_idx, (inputs, _) in enumerate(tqdm(self.dataloader, desc="提取特征和预测结果")):
|
| 180 |
+
inputs = inputs.to(self.device)
|
| 181 |
+
outputs = self.model(inputs) # 获取预测结果
|
| 182 |
+
|
| 183 |
+
# 获取并处理特征
|
| 184 |
+
feat = activation[suitable_layer_name]
|
| 185 |
+
flat_features = torch.flatten(feat, start_dim=1)
|
| 186 |
+
features.append(flat_features.cpu().numpy())
|
| 187 |
+
predictions.append(outputs.cpu().numpy())
|
| 188 |
+
|
| 189 |
+
# 清除本次的激活值
|
| 190 |
+
activation.clear()
|
| 191 |
+
|
| 192 |
+
# 移除所有钩子
|
| 193 |
+
for handle in handles:
|
| 194 |
+
handle.remove()
|
| 195 |
+
|
| 196 |
+
if len(features) > 0:
|
| 197 |
+
features = np.vstack(features)
|
| 198 |
+
predictions = np.vstack(predictions)
|
| 199 |
+
return features, predictions
|
| 200 |
+
else:
|
| 201 |
+
return np.array([]), np.array([])
|
| 202 |
+
|
| 203 |
+
def save_lables_index(self, path):
|
| 204 |
+
"""保存标签数据和索引信息
|
| 205 |
+
|
| 206 |
+
Args:
|
| 207 |
+
path: 保存路径
|
| 208 |
+
"""
|
| 209 |
+
os.makedirs(path, exist_ok=True)
|
| 210 |
+
labels_path = os.path.join(path, 'labels.npy')
|
| 211 |
+
index_path = os.path.join(path, 'index.json')
|
| 212 |
+
|
| 213 |
+
# 尝试从不同的属性获取标签
|
| 214 |
+
try:
|
| 215 |
+
if hasattr(self.dataloader.dataset, 'targets'):
|
| 216 |
+
# CIFAR10/CIFAR100使用targets属性
|
| 217 |
+
labels = np.array(self.dataloader.dataset.targets)
|
| 218 |
+
elif hasattr(self.dataloader.dataset, 'labels'):
|
| 219 |
+
# 某些数据集使用labels属性
|
| 220 |
+
labels = np.array(self.dataloader.dataset.labels)
|
| 221 |
+
else:
|
| 222 |
+
# 如果上面的方法都不起作用,则从数据加载器中收集标签
|
| 223 |
+
labels = []
|
| 224 |
+
for _, batch_labels in self.dataloader:
|
| 225 |
+
labels.append(batch_labels.numpy())
|
| 226 |
+
labels = np.concatenate(labels)
|
| 227 |
+
|
| 228 |
+
# 保存标签数据
|
| 229 |
+
np.save(labels_path, labels)
|
| 230 |
+
print(f"标签数据已保存到 {labels_path}")
|
| 231 |
+
|
| 232 |
+
# 创建数据集索引
|
| 233 |
+
num_samples = len(labels)
|
| 234 |
+
indices = list(range(num_samples))
|
| 235 |
+
|
| 236 |
+
# 创建索引字典
|
| 237 |
+
index_dict = {
|
| 238 |
+
"train": indices, # 所有数据默认为训练集
|
| 239 |
+
"test": [], # 初始为空
|
| 240 |
+
"validation": [] # 初始为空
|
| 241 |
+
}
|
| 242 |
+
|
| 243 |
+
# 保存索引到JSON文件
|
| 244 |
+
with open(index_path, 'w') as f:
|
| 245 |
+
json.dump(index_dict, f, indent=4)
|
| 246 |
+
|
| 247 |
+
print(f"数据集索引已保存到 {index_path}")
|
| 248 |
+
|
| 249 |
+
except Exception as e:
|
| 250 |
+
print(f"保存标签和索引时出错: {e}")
|
| 251 |
+
|
| 252 |
+
def save_checkpoint_embeddings_predictions(self, model = None):
|
| 253 |
+
"""保存所有数据"""
|
| 254 |
+
if model is not None:
|
| 255 |
+
self.model = model
|
| 256 |
+
# 保存模型��重
|
| 257 |
+
os.makedirs(self.save_dir, exist_ok=True)
|
| 258 |
+
model_path = os.path.join(self.save_dir,'model.pth')
|
| 259 |
+
torch.save(self.model.state_dict(), model_path)
|
| 260 |
+
|
| 261 |
+
if self.auto_save:
|
| 262 |
+
# 提取并保存特征和预测结果
|
| 263 |
+
features, predictions = self._extract_features_and_predictions()
|
| 264 |
+
|
| 265 |
+
# 保存特征
|
| 266 |
+
np.save(os.path.join(self.save_dir, 'embeddings.npy'), features)
|
| 267 |
+
# 保存预测结果
|
| 268 |
+
np.save(os.path.join(self.save_dir, 'predictions.npy'), predictions)
|
| 269 |
+
print("\n保存了以下数据:")
|
| 270 |
+
print(f"- 模型权重: {model_path}")
|
| 271 |
+
print(f"- 特征向量: [样本数: {features.shape[0]}, 特征维度: {features.shape[1]}]")
|
| 272 |
+
print(f"- 预测结果: [样本数: {predictions.shape[0]}, 类别数: {predictions.shape[1]}]")
|
{Image/ViT/code → ViT-CIFAR10/Classification-backdoor/scripts}/model.py
RENAMED
|
@@ -113,7 +113,7 @@ class ViT(nn.Module):
|
|
| 113 |
img_size=32,
|
| 114 |
patch_size=4,
|
| 115 |
in_chans=3,
|
| 116 |
-
|
| 117 |
embed_dim=96,
|
| 118 |
depth=12,
|
| 119 |
n_heads=8,
|
|
@@ -149,7 +149,7 @@ class ViT(nn.Module):
|
|
| 149 |
])
|
| 150 |
|
| 151 |
self.norm = nn.LayerNorm(embed_dim, eps=1e-6)
|
| 152 |
-
self.head = nn.Linear(embed_dim,
|
| 153 |
|
| 154 |
def forward(self, x):
|
| 155 |
n_samples = x.shape[0]
|
|
|
|
| 113 |
img_size=32,
|
| 114 |
patch_size=4,
|
| 115 |
in_chans=3,
|
| 116 |
+
num_classes=10,
|
| 117 |
embed_dim=96,
|
| 118 |
depth=12,
|
| 119 |
n_heads=8,
|
|
|
|
| 149 |
])
|
| 150 |
|
| 151 |
self.norm = nn.LayerNorm(embed_dim, eps=1e-6)
|
| 152 |
+
self.head = nn.Linear(embed_dim, num_classes)
|
| 153 |
|
| 154 |
def forward(self, x):
|
| 155 |
n_samples = x.shape[0]
|
ViT-CIFAR10/Classification-backdoor/scripts/train.py
ADDED
|
@@ -0,0 +1,414 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sys
|
| 2 |
+
import os
|
| 3 |
+
import yaml
|
| 4 |
+
from pathlib import Path
|
| 5 |
+
import torch
|
| 6 |
+
import torch.nn as nn
|
| 7 |
+
import torch.optim as optim
|
| 8 |
+
import time
|
| 9 |
+
import logging
|
| 10 |
+
import numpy as np
|
| 11 |
+
from tqdm import tqdm
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
from dataset_utils import get_cifar10_dataloaders
|
| 15 |
+
from model import ViT
|
| 16 |
+
from get_representation import time_travel_saver
|
| 17 |
+
|
| 18 |
+
def setup_logger(log_file):
|
| 19 |
+
"""配置日志记录器,如果日志文件存在则覆盖
|
| 20 |
+
|
| 21 |
+
Args:
|
| 22 |
+
log_file: 日志文件路径
|
| 23 |
+
|
| 24 |
+
Returns:
|
| 25 |
+
logger: 配置好的日志记录器
|
| 26 |
+
"""
|
| 27 |
+
# 创建logger
|
| 28 |
+
logger = logging.getLogger('train')
|
| 29 |
+
logger.setLevel(logging.INFO)
|
| 30 |
+
|
| 31 |
+
# 移除现有的处理器
|
| 32 |
+
if logger.hasHandlers():
|
| 33 |
+
logger.handlers.clear()
|
| 34 |
+
|
| 35 |
+
# 创建文件处理器,使用'w'模式覆盖现有文件
|
| 36 |
+
fh = logging.FileHandler(log_file, mode='w')
|
| 37 |
+
fh.setLevel(logging.INFO)
|
| 38 |
+
|
| 39 |
+
# 创建控制台处理器
|
| 40 |
+
ch = logging.StreamHandler()
|
| 41 |
+
ch.setLevel(logging.INFO)
|
| 42 |
+
|
| 43 |
+
# 创建格式器
|
| 44 |
+
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
|
| 45 |
+
fh.setFormatter(formatter)
|
| 46 |
+
ch.setFormatter(formatter)
|
| 47 |
+
|
| 48 |
+
# 添加处理器
|
| 49 |
+
logger.addHandler(fh)
|
| 50 |
+
logger.addHandler(ch)
|
| 51 |
+
|
| 52 |
+
return logger
|
| 53 |
+
|
| 54 |
+
def train_model(model, trainloader, testloader, epochs=200, lr=0.1, device='cuda:0',
|
| 55 |
+
save_dir='./epochs', model_name='model', interval=1):
|
| 56 |
+
"""通用的模型训练函数
|
| 57 |
+
Args:
|
| 58 |
+
model: 要训练的模型
|
| 59 |
+
trainloader: 训练数据加载器
|
| 60 |
+
testloader: 测试数据加载器
|
| 61 |
+
epochs: 训练轮数
|
| 62 |
+
lr: 学习率
|
| 63 |
+
device: 训练设备,格式为'cuda:N',其中N为GPU编号(0,1,2,3)
|
| 64 |
+
save_dir: 模型保存目录
|
| 65 |
+
model_name: 模型名称
|
| 66 |
+
interval: 模型保存间隔
|
| 67 |
+
"""
|
| 68 |
+
# 检查并设置GPU设备
|
| 69 |
+
if not torch.cuda.is_available():
|
| 70 |
+
print("CUDA不可用,将使用CPU训练")
|
| 71 |
+
device = 'cpu'
|
| 72 |
+
elif not device.startswith('cuda:'):
|
| 73 |
+
device = f'cuda:0'
|
| 74 |
+
|
| 75 |
+
# 确保device格式正确
|
| 76 |
+
if device.startswith('cuda:'):
|
| 77 |
+
gpu_id = int(device.split(':')[1])
|
| 78 |
+
if gpu_id >= torch.cuda.device_count():
|
| 79 |
+
print(f"GPU {gpu_id} 不可用,将使用GPU 0")
|
| 80 |
+
device = 'cuda:0'
|
| 81 |
+
|
| 82 |
+
# 设置保存目录
|
| 83 |
+
if not os.path.exists(save_dir):
|
| 84 |
+
os.makedirs(save_dir)
|
| 85 |
+
|
| 86 |
+
# 设置日志文件路径
|
| 87 |
+
log_file = os.path.join(os.path.dirname(save_dir),'epochs', 'train.log')
|
| 88 |
+
if not os.path.exists(os.path.dirname(log_file)):
|
| 89 |
+
os.makedirs(os.path.dirname(log_file))
|
| 90 |
+
|
| 91 |
+
logger = setup_logger(log_file)
|
| 92 |
+
|
| 93 |
+
# 损失函数和优化器
|
| 94 |
+
criterion = nn.CrossEntropyLoss()
|
| 95 |
+
optimizer = optim.SGD(model.parameters(), lr=lr, momentum=0.9, weight_decay=5e-4)
|
| 96 |
+
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=50)
|
| 97 |
+
|
| 98 |
+
# 移动模型到指定设备
|
| 99 |
+
model = model.to(device)
|
| 100 |
+
best_acc = 0
|
| 101 |
+
start_time = time.time()
|
| 102 |
+
|
| 103 |
+
logger.info(f'开始训练 {model_name}')
|
| 104 |
+
logger.info(f'总轮数: {epochs}, 学习率: {lr}, 设备: {device}')
|
| 105 |
+
|
| 106 |
+
for epoch in range(epochs):
|
| 107 |
+
# 训练阶段
|
| 108 |
+
model.train()
|
| 109 |
+
train_loss = 0
|
| 110 |
+
correct = 0
|
| 111 |
+
total = 0
|
| 112 |
+
|
| 113 |
+
train_pbar = tqdm(trainloader, desc=f'Epoch {epoch+1}/{epochs} [Train]')
|
| 114 |
+
for batch_idx, (inputs, targets) in enumerate(train_pbar):
|
| 115 |
+
inputs, targets = inputs.to(device), targets.to(device)
|
| 116 |
+
optimizer.zero_grad()
|
| 117 |
+
outputs = model(inputs)
|
| 118 |
+
loss = criterion(outputs, targets)
|
| 119 |
+
loss.backward()
|
| 120 |
+
optimizer.step()
|
| 121 |
+
|
| 122 |
+
train_loss += loss.item()
|
| 123 |
+
_, predicted = outputs.max(1)
|
| 124 |
+
total += targets.size(0)
|
| 125 |
+
correct += predicted.eq(targets).sum().item()
|
| 126 |
+
|
| 127 |
+
# 更新进度条
|
| 128 |
+
train_pbar.set_postfix({
|
| 129 |
+
'loss': f'{train_loss/(batch_idx+1):.3f}',
|
| 130 |
+
'acc': f'{100.*correct/total:.2f}%'
|
| 131 |
+
})
|
| 132 |
+
|
| 133 |
+
# 保存训练阶段的准确率
|
| 134 |
+
train_acc = 100.*correct/total
|
| 135 |
+
train_correct = correct
|
| 136 |
+
train_total = total
|
| 137 |
+
|
| 138 |
+
# 测试阶段
|
| 139 |
+
model.eval()
|
| 140 |
+
test_loss = 0
|
| 141 |
+
correct = 0
|
| 142 |
+
total = 0
|
| 143 |
+
|
| 144 |
+
test_pbar = tqdm(testloader, desc=f'Epoch {epoch+1}/{epochs} [Test]')
|
| 145 |
+
with torch.no_grad():
|
| 146 |
+
for batch_idx, (inputs, targets) in enumerate(test_pbar):
|
| 147 |
+
inputs, targets = inputs.to(device), targets.to(device)
|
| 148 |
+
outputs = model(inputs)
|
| 149 |
+
loss = criterion(outputs, targets)
|
| 150 |
+
|
| 151 |
+
test_loss += loss.item()
|
| 152 |
+
_, predicted = outputs.max(1)
|
| 153 |
+
total += targets.size(0)
|
| 154 |
+
correct += predicted.eq(targets).sum().item()
|
| 155 |
+
|
| 156 |
+
# 更新进度条
|
| 157 |
+
test_pbar.set_postfix({
|
| 158 |
+
'loss': f'{test_loss/(batch_idx+1):.3f}',
|
| 159 |
+
'acc': f'{100.*correct/total:.2f}%'
|
| 160 |
+
})
|
| 161 |
+
|
| 162 |
+
# 计算测试精度
|
| 163 |
+
acc = 100.*correct/total
|
| 164 |
+
|
| 165 |
+
# 记录训练和测试的损失与准确率
|
| 166 |
+
logger.info(f'Epoch: {epoch+1} | Train Loss: {train_loss/(len(trainloader)):.3f} | Train Acc: {train_acc:.2f}% | '
|
| 167 |
+
f'Test Loss: {test_loss/(batch_idx+1):.3f} | Test Acc: {acc:.2f}%')
|
| 168 |
+
|
| 169 |
+
# 保存可视化训练过程所需要的文件
|
| 170 |
+
if (epoch + 1) % interval == 0 or (epoch == 0):
|
| 171 |
+
# 创建一个专门用于收集embedding的顺序dataloader,拼接训练集和测试集
|
| 172 |
+
from torch.utils.data import ConcatDataset
|
| 173 |
+
|
| 174 |
+
def custom_collate_fn(batch):
|
| 175 |
+
# 确保所有数据都是张量
|
| 176 |
+
data = [item[0] for item in batch] # 图像
|
| 177 |
+
target = [item[1] for item in batch] # 标签
|
| 178 |
+
|
| 179 |
+
# 将列表转换为张量
|
| 180 |
+
data = torch.stack(data, 0)
|
| 181 |
+
target = torch.tensor(target)
|
| 182 |
+
|
| 183 |
+
return [data, target]
|
| 184 |
+
|
| 185 |
+
# 合并训练集和测试集
|
| 186 |
+
combined_dataset = ConcatDataset([trainloader.dataset, testloader.dataset])
|
| 187 |
+
|
| 188 |
+
# 创建顺序数据加载器
|
| 189 |
+
ordered_loader = torch.utils.data.DataLoader(
|
| 190 |
+
combined_dataset, # 使用合并后的数据集
|
| 191 |
+
batch_size=trainloader.batch_size,
|
| 192 |
+
shuffle=False, # 确保顺序加载
|
| 193 |
+
num_workers=trainloader.num_workers,
|
| 194 |
+
collate_fn=custom_collate_fn # 使用自定义的collate函数
|
| 195 |
+
)
|
| 196 |
+
epoch_save_dir = os.path.join(save_dir, f'epoch_{epoch+1}')
|
| 197 |
+
save_model = time_travel_saver(model, ordered_loader, device, epoch_save_dir, model_name,
|
| 198 |
+
show=True, layer_name='blocks.11', auto_save_embedding=True)
|
| 199 |
+
save_model.save_checkpoint_embeddings_predictions()
|
| 200 |
+
if epoch == 0:
|
| 201 |
+
save_model.save_lables_index(path = "../dataset")
|
| 202 |
+
|
| 203 |
+
scheduler.step()
|
| 204 |
+
|
| 205 |
+
logger.info('训练完成!')
|
| 206 |
+
|
| 207 |
+
def backdoor_train():
|
| 208 |
+
"""训练带后门的模型
|
| 209 |
+
|
| 210 |
+
后门攻击设计:
|
| 211 |
+
1. 触发器设计: 在图像右下角添加一个4x4的白色小方块
|
| 212 |
+
2. 攻击目标: 使添加触发器的图像被分类为目标标签(默认为0)
|
| 213 |
+
3. 毒化比例: 默认10%的训练数据被添加触发器和修改标签
|
| 214 |
+
"""
|
| 215 |
+
# 加载配置文件
|
| 216 |
+
config_path = Path(__file__).parent / 'train.yaml'
|
| 217 |
+
with open(config_path) as f:
|
| 218 |
+
config = yaml.safe_load(f)
|
| 219 |
+
|
| 220 |
+
# 加载后门配置
|
| 221 |
+
poison_ratio = config.get('poison_ratio', 0.1) # 毒化比例
|
| 222 |
+
target_label = config.get('target_label', 0) # 目标标签
|
| 223 |
+
trigger_size = config.get('trigger_size', 4) # 触发器大小
|
| 224 |
+
|
| 225 |
+
# 创建模型
|
| 226 |
+
model = ViT(num_classes=10)
|
| 227 |
+
|
| 228 |
+
# 获取数据加载器
|
| 229 |
+
trainloader, testloader = get_cifar10_dataloaders(
|
| 230 |
+
batch_size=config['batch_size'],
|
| 231 |
+
num_workers=config['num_workers'],
|
| 232 |
+
local_dataset_path=config['dataset_path'],
|
| 233 |
+
shuffle=True
|
| 234 |
+
)
|
| 235 |
+
|
| 236 |
+
# 向训练数据注入后门
|
| 237 |
+
poisoned_trainloader = inject_backdoor(
|
| 238 |
+
trainloader,
|
| 239 |
+
poison_ratio=poison_ratio,
|
| 240 |
+
target_label=target_label,
|
| 241 |
+
trigger_size=trigger_size
|
| 242 |
+
)
|
| 243 |
+
|
| 244 |
+
# 创建用于测试后门效果的数据集(全部添加触发器,不改变标签)
|
| 245 |
+
backdoor_testloader = create_backdoor_testset(
|
| 246 |
+
testloader,
|
| 247 |
+
trigger_size=trigger_size
|
| 248 |
+
)
|
| 249 |
+
|
| 250 |
+
# 训练模型
|
| 251 |
+
train_model(
|
| 252 |
+
model=model,
|
| 253 |
+
trainloader=poisoned_trainloader,
|
| 254 |
+
testloader=testloader,
|
| 255 |
+
epochs=config['epochs'],
|
| 256 |
+
lr=config['lr'],
|
| 257 |
+
device=f'cuda:{config["gpu"]}',
|
| 258 |
+
save_dir='../epochs',
|
| 259 |
+
model_name='ViT_Backdoored',
|
| 260 |
+
interval=config['interval']
|
| 261 |
+
)
|
| 262 |
+
|
| 263 |
+
# 评估后门效果
|
| 264 |
+
evaluate_backdoor(model, testloader, backdoor_testloader, target_label, f'cuda:{config["gpu"]}')
|
| 265 |
+
|
| 266 |
+
def inject_backdoor(dataloader, poison_ratio=0.1, target_label=0, trigger_size=4):
|
| 267 |
+
"""向数据集中注入后门
|
| 268 |
+
|
| 269 |
+
Args:
|
| 270 |
+
dataloader: 原始数据加载器
|
| 271 |
+
poison_ratio: 毒化比例,即有多少比例的数据被注入后门
|
| 272 |
+
target_label: 攻击目标标签
|
| 273 |
+
trigger_size: 触发器大小
|
| 274 |
+
|
| 275 |
+
Returns:
|
| 276 |
+
poisoned_dataloader: 注入后门的数据加载器
|
| 277 |
+
"""
|
| 278 |
+
# 获取原始数据集
|
| 279 |
+
dataset = dataloader.dataset
|
| 280 |
+
|
| 281 |
+
# 获取数据和标签
|
| 282 |
+
data_list = []
|
| 283 |
+
targets_list = []
|
| 284 |
+
|
| 285 |
+
# 逐批次处理数据
|
| 286 |
+
for inputs, targets in dataloader:
|
| 287 |
+
data_list.append(inputs)
|
| 288 |
+
targets_list.append(targets)
|
| 289 |
+
|
| 290 |
+
# 合并所有批次数据
|
| 291 |
+
all_data = torch.cat(data_list)
|
| 292 |
+
all_targets = torch.cat(targets_list)
|
| 293 |
+
|
| 294 |
+
# 确定要毒化的样本数量
|
| 295 |
+
num_samples = len(all_data)
|
| 296 |
+
num_poisoned = int(num_samples * poison_ratio)
|
| 297 |
+
|
| 298 |
+
# 随机选择要毒化的样本索引
|
| 299 |
+
poison_indices = torch.randperm(num_samples)[:num_poisoned]
|
| 300 |
+
# 保存中毒的索引到backdoor_index.npy
|
| 301 |
+
backdoor_index_path = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'dataset', 'backdoor_index.npy')
|
| 302 |
+
os.makedirs(os.path.dirname(backdoor_index_path), exist_ok=True)
|
| 303 |
+
np.save(backdoor_index_path, poison_indices.cpu().numpy())
|
| 304 |
+
print(f"已保存{num_poisoned}个中毒样本索引到 {backdoor_index_path}")
|
| 305 |
+
# 添加触发器并修改标签
|
| 306 |
+
for idx in poison_indices:
|
| 307 |
+
# 添加触发器(右下角白色小方块)
|
| 308 |
+
all_data[idx, :, -trigger_size:, -trigger_size:] = 1.0
|
| 309 |
+
# 修改标签为目标标签
|
| 310 |
+
all_targets[idx] = target_label
|
| 311 |
+
|
| 312 |
+
# 创建新的TensorDataset
|
| 313 |
+
from torch.utils.data import TensorDataset, DataLoader
|
| 314 |
+
poisoned_dataset = TensorDataset(all_data, all_targets)
|
| 315 |
+
|
| 316 |
+
# 创建新的DataLoader
|
| 317 |
+
poisoned_dataloader = DataLoader(
|
| 318 |
+
poisoned_dataset,
|
| 319 |
+
batch_size=dataloader.batch_size,
|
| 320 |
+
shuffle=True,
|
| 321 |
+
num_workers=dataloader.num_workers
|
| 322 |
+
)
|
| 323 |
+
|
| 324 |
+
print(f"成功向{num_poisoned}/{num_samples} ({poison_ratio*100:.1f}%)的样本注入后门")
|
| 325 |
+
return poisoned_dataloader
|
| 326 |
+
|
| 327 |
+
def create_backdoor_testset(dataloader, trigger_size=4):
|
| 328 |
+
"""创建用于测试后门效果的数据集,将所有测试样本添加触发器但不改变标签
|
| 329 |
+
|
| 330 |
+
Args:
|
| 331 |
+
dataloader: 原始测试数据加载器
|
| 332 |
+
trigger_size: 触发器大小
|
| 333 |
+
|
| 334 |
+
Returns:
|
| 335 |
+
backdoor_testloader: 带触发器的测试数据加载器
|
| 336 |
+
"""
|
| 337 |
+
# 获取原始数据和标签
|
| 338 |
+
data_list = []
|
| 339 |
+
targets_list = []
|
| 340 |
+
|
| 341 |
+
for inputs, targets in dataloader:
|
| 342 |
+
data_list.append(inputs)
|
| 343 |
+
targets_list.append(targets)
|
| 344 |
+
|
| 345 |
+
# 合并所有批次数据
|
| 346 |
+
all_data = torch.cat(data_list)
|
| 347 |
+
all_targets = torch.cat(targets_list)
|
| 348 |
+
|
| 349 |
+
# 向所有测试样本添加触发器
|
| 350 |
+
for i in range(len(all_data)):
|
| 351 |
+
# 添加触发器(右下角白色小方块)
|
| 352 |
+
all_data[i, :, -trigger_size:, -trigger_size:] = 1.0
|
| 353 |
+
|
| 354 |
+
# 创建新的TensorDataset
|
| 355 |
+
from torch.utils.data import TensorDataset, DataLoader
|
| 356 |
+
backdoor_dataset = TensorDataset(all_data, all_targets)
|
| 357 |
+
|
| 358 |
+
# 创建新的DataLoader
|
| 359 |
+
backdoor_testloader = DataLoader(
|
| 360 |
+
backdoor_dataset,
|
| 361 |
+
batch_size=dataloader.batch_size,
|
| 362 |
+
shuffle=False,
|
| 363 |
+
num_workers=dataloader.num_workers
|
| 364 |
+
)
|
| 365 |
+
|
| 366 |
+
print(f"成功创建带有触发器的测试集,共{len(all_data)}个样本")
|
| 367 |
+
return backdoor_testloader
|
| 368 |
+
|
| 369 |
+
def evaluate_backdoor(model, clean_testloader, backdoor_testloader, target_label, device):
|
| 370 |
+
"""评估后门攻击效果
|
| 371 |
+
|
| 372 |
+
Args:
|
| 373 |
+
model: 模型
|
| 374 |
+
clean_testloader: 干净测试集
|
| 375 |
+
backdoor_testloader: 带触发器的测试集
|
| 376 |
+
target_label: 目标标签
|
| 377 |
+
device: 计算设备
|
| 378 |
+
"""
|
| 379 |
+
model.eval()
|
| 380 |
+
model.to(device)
|
| 381 |
+
|
| 382 |
+
# 评估在干净测试集上的准确率
|
| 383 |
+
correct = 0
|
| 384 |
+
total = 0
|
| 385 |
+
with torch.no_grad():
|
| 386 |
+
for inputs, targets in tqdm(clean_testloader, desc="评估干净测试集"):
|
| 387 |
+
inputs, targets = inputs.to(device), targets.to(device)
|
| 388 |
+
outputs = model(inputs)
|
| 389 |
+
_, predicted = outputs.max(1)
|
| 390 |
+
total += targets.size(0)
|
| 391 |
+
correct += predicted.eq(targets).sum().item()
|
| 392 |
+
|
| 393 |
+
clean_acc = 100. * correct / total
|
| 394 |
+
print(f"在干净测试集上的准确率: {clean_acc:.2f}%")
|
| 395 |
+
|
| 396 |
+
# 评估后门攻击成功率
|
| 397 |
+
success = 0
|
| 398 |
+
total = 0
|
| 399 |
+
with torch.no_grad():
|
| 400 |
+
for inputs, targets in tqdm(backdoor_testloader, desc="评估后门攻击"):
|
| 401 |
+
inputs = inputs.to(device)
|
| 402 |
+
outputs = model(inputs)
|
| 403 |
+
_, predicted = outputs.max(1)
|
| 404 |
+
total += targets.size(0)
|
| 405 |
+
# 计算被预测为目标标签的样本数量
|
| 406 |
+
success += (predicted == target_label).sum().item()
|
| 407 |
+
|
| 408 |
+
asr = 100. * success / total # 攻击成功率(Attack Success Rate)
|
| 409 |
+
print(f"后门攻击成功率: {asr:.2f}%")
|
| 410 |
+
|
| 411 |
+
return clean_acc, asr
|
| 412 |
+
|
| 413 |
+
if __name__ == '__main__':
|
| 414 |
+
backdoor_train()
|
ViT-CIFAR10/Classification-backdoor/scripts/train.yaml
ADDED
|
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
batch_size: 128
|
| 2 |
+
num_workers: 2
|
| 3 |
+
dataset_path: ../dataset
|
| 4 |
+
epochs: 50
|
| 5 |
+
gpu: 5
|
| 6 |
+
lr: 0.1
|
| 7 |
+
interval: 2
|
| 8 |
+
poison_ratio: 0.1
|
| 9 |
+
trigger_size: 2
|
| 10 |
+
target_label: 0
|
ViT-CIFAR10/Classification-noisy/dataset/index.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
ViT-CIFAR10/Classification-noisy/dataset/info.json
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"model": "ViT",
|
| 3 |
+
"classes":["airplane", "automobile", "bird", "cat", "deer", "dog", "frog", "horse", "ship", "truck"]
|
| 4 |
+
}
|
ViT-CIFAR10/Classification-noisy/dataset/labels.npy
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:d13128de212014e257f241a6f6ea7d97f157e02c814dc70456d692fd18a85d32
|
| 3 |
+
size 480128
|
ViT-CIFAR10/Classification-noisy/dataset/noise_index.npy
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8ec5619582d071b155c031f31847c7ed0935526331d4e9cec47cced521a5cd27
|
| 3 |
+
size 48128
|
ViT-CIFAR10/Classification-noisy/readme.md
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ViT-CIFAR10 训练与特征提取
|
| 2 |
+
|
| 3 |
+
这个项目实现了ViT模型在CIFAR10数据集上的训练,并集成了特征提取和可视化所需的功能。
|
| 4 |
+
|
| 5 |
+
## time_travel_saver数据提取器
|
| 6 |
+
```python
|
| 7 |
+
#保存可视化训练过程所需要的文件
|
| 8 |
+
if (epoch + 1) % interval == 0 or (epoch == 0):
|
| 9 |
+
# 创建一个专门用于收集embedding的顺序dataloader
|
| 10 |
+
ordered_trainloader = torch.utils.data.DataLoader(
|
| 11 |
+
trainloader.dataset,
|
| 12 |
+
batch_size=trainloader.batch_size,
|
| 13 |
+
shuffle=False,
|
| 14 |
+
num_workers=trainloader.num_workers
|
| 15 |
+
)
|
| 16 |
+
epoch_save_dir = os.path.join(save_dir, f'epoch_{epoch+1}') #epoch保存路径
|
| 17 |
+
save_model = time_travel_saver(model, ordered_trainloader, device, epoch_save_dir, model_name,
|
| 18 |
+
show=True, layer_name='avg_pool', auto_save_embedding=True)
|
| 19 |
+
#show:是否显示模型的维度信息
|
| 20 |
+
#layer_name:选择要提取特征的层,如果为None,则提取符合维度范围的层
|
| 21 |
+
#auto_save_embedding:是否自动保存特征向量 must be True
|
| 22 |
+
save_model.save_checkpoint_embeddings_predictions() #保存模型权重、特征向量和预测结果到epoch_x
|
| 23 |
+
if epoch == 0:
|
| 24 |
+
save_model.save_lables_index(path = "../dataset") #保存标签和索引到dataset
|
| 25 |
+
```
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
## 项目结构
|
| 29 |
+
|
| 30 |
+
- `./scripts/train.yaml`:训练配置文件,包含批次大小、学习率、GPU设置等参数
|
| 31 |
+
- `./scripts/train.py`:训练脚本,执行模型训练并自动收集特征数据
|
| 32 |
+
- `./model/`:保存训练好的模型权重
|
| 33 |
+
- `./epochs/`:保存训练过程中的高维特征向量、预测结果等数据
|
| 34 |
+
|
| 35 |
+
## 使用方法
|
| 36 |
+
|
| 37 |
+
1. 配置 `train.yaml` 文件设置训练参数
|
| 38 |
+
2. 执行训练脚本:
|
| 39 |
+
```
|
| 40 |
+
python train.py
|
| 41 |
+
```
|
| 42 |
+
3. 训练完成后,可以在以下位置找到相关数据:
|
| 43 |
+
- 模型权重:`./epochs/epoch_{n}/model.pth`
|
| 44 |
+
- 特征向量:`./epochs/epoch_{n}/embeddings.npy`
|
| 45 |
+
- 预测结果:`./epochs/epoch_{n}/predictions.npy`
|
| 46 |
+
- 标签数据:`./dataset/labels.npy`
|
| 47 |
+
- 数据索引:`./dataset/index.json`
|
| 48 |
+
|
| 49 |
+
## 数据格式
|
| 50 |
+
|
| 51 |
+
- `embeddings.npy`:形状为 [n_samples, feature_dim] 的特征向量
|
| 52 |
+
- `predictions.npy`:形状为 [n_samples, n_classes] 的预测概率
|
| 53 |
+
- `labels.npy`:形状为 [n_samples] 的真实标签
|
| 54 |
+
- `index.json`:包含训练集、测试集和验证集的索引信息
|
ViT-CIFAR10/Classification-noisy/scripts/create_index.py
ADDED
|
@@ -0,0 +1,18 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import json
|
| 2 |
+
import os
|
| 3 |
+
|
| 4 |
+
# 创建完整的索引
|
| 5 |
+
index_dict = {
|
| 6 |
+
"train": list(range( 50000)), #50000的训练索引
|
| 7 |
+
"test": list(range(50000, 60000)), # 测试索引
|
| 8 |
+
"validation": [] # 空验证集
|
| 9 |
+
}
|
| 10 |
+
|
| 11 |
+
# 保存到索引文件
|
| 12 |
+
index_path = os.path.join('..', 'dataset', 'index.json')
|
| 13 |
+
with open(index_path, 'w') as f:
|
| 14 |
+
json.dump(index_dict, f, indent=4)
|
| 15 |
+
|
| 16 |
+
print(f"已创建完整索引文件: {index_path}")
|
| 17 |
+
print(f"训练集: {len(index_dict['train'])}个样本")
|
| 18 |
+
print(f"测试集: {len(index_dict['test'])}个样本")
|
ViT-CIFAR10/Classification-noisy/scripts/dataset_utils.py
ADDED
|
@@ -0,0 +1,274 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torchvision
|
| 3 |
+
import torchvision.transforms as transforms
|
| 4 |
+
import os
|
| 5 |
+
import numpy as np
|
| 6 |
+
import random
|
| 7 |
+
import yaml
|
| 8 |
+
from torch.utils.data import TensorDataset, DataLoader
|
| 9 |
+
|
| 10 |
+
# 加载数据集
|
| 11 |
+
|
| 12 |
+
def get_cifar10_dataloaders(batch_size=128, num_workers=2, local_dataset_path=None, shuffle=False):
|
| 13 |
+
"""获取CIFAR10数据集的数据加载器
|
| 14 |
+
|
| 15 |
+
Args:
|
| 16 |
+
batch_size: 批次大小
|
| 17 |
+
num_workers: 数据加载的工作进程数
|
| 18 |
+
local_dataset_path: 本地数据集路径,如果提供则使用本地数据集,否则下载
|
| 19 |
+
|
| 20 |
+
Returns:
|
| 21 |
+
trainloader: 训练数据加载器
|
| 22 |
+
testloader: 测试数据加载器
|
| 23 |
+
"""
|
| 24 |
+
# 数据预处理
|
| 25 |
+
transform_train = transforms.Compose([
|
| 26 |
+
transforms.RandomCrop(32, padding=4),
|
| 27 |
+
transforms.RandomHorizontalFlip(),
|
| 28 |
+
transforms.ToTensor(),
|
| 29 |
+
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
|
| 30 |
+
])
|
| 31 |
+
|
| 32 |
+
transform_test = transforms.Compose([
|
| 33 |
+
transforms.ToTensor(),
|
| 34 |
+
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
|
| 35 |
+
])
|
| 36 |
+
|
| 37 |
+
# 设置数据集路径
|
| 38 |
+
if local_dataset_path:
|
| 39 |
+
print(f"使用本地数据集: {local_dataset_path}")
|
| 40 |
+
# 检查数据集路径是否有数据集,没有的话则下载
|
| 41 |
+
cifar_path = os.path.join(local_dataset_path, 'cifar-10-batches-py')
|
| 42 |
+
download = not os.path.exists(cifar_path) or not os.listdir(cifar_path)
|
| 43 |
+
dataset_path = local_dataset_path
|
| 44 |
+
else:
|
| 45 |
+
print("未指定本地数据集路径,将下载数据集")
|
| 46 |
+
download = True
|
| 47 |
+
dataset_path = '../dataset'
|
| 48 |
+
|
| 49 |
+
# 创建数据集路径
|
| 50 |
+
if not os.path.exists(dataset_path):
|
| 51 |
+
os.makedirs(dataset_path)
|
| 52 |
+
|
| 53 |
+
trainset = torchvision.datasets.CIFAR10(
|
| 54 |
+
root=dataset_path, train=True, download=download, transform=transform_train)
|
| 55 |
+
trainloader = torch.utils.data.DataLoader(
|
| 56 |
+
trainset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers)
|
| 57 |
+
|
| 58 |
+
testset = torchvision.datasets.CIFAR10(
|
| 59 |
+
root=dataset_path, train=False, download=download, transform=transform_test)
|
| 60 |
+
testloader = torch.utils.data.DataLoader(
|
| 61 |
+
testset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers)
|
| 62 |
+
|
| 63 |
+
return trainloader, testloader
|
| 64 |
+
|
| 65 |
+
def get_noisy_cifar10_dataloaders(batch_size=128, num_workers=2, local_dataset_path=None, shuffle=False):
|
| 66 |
+
"""获取添加噪声后的CIFAR10数据集的数据加载器
|
| 67 |
+
|
| 68 |
+
Args:
|
| 69 |
+
batch_size: 批次大小
|
| 70 |
+
num_workers: 数据加载的工作进程数
|
| 71 |
+
local_dataset_path: 本地数据集路径,如果提供则使用本地数据集,否则下载
|
| 72 |
+
shuffle: 是否打乱数据
|
| 73 |
+
|
| 74 |
+
Returns:
|
| 75 |
+
noisy_trainloader: 添加噪声后的训练数据加载器
|
| 76 |
+
testloader: 正常测试数据加载器
|
| 77 |
+
"""
|
| 78 |
+
# 加载原始数据集
|
| 79 |
+
trainloader, testloader = get_cifar10_dataloaders(
|
| 80 |
+
batch_size=batch_size,
|
| 81 |
+
num_workers=num_workers,
|
| 82 |
+
local_dataset_path=local_dataset_path,
|
| 83 |
+
shuffle=False
|
| 84 |
+
)
|
| 85 |
+
|
| 86 |
+
# 设置设备
|
| 87 |
+
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
| 88 |
+
print(f"使用设备: {device}")
|
| 89 |
+
|
| 90 |
+
# 加载配置文件
|
| 91 |
+
config_path = './train.yaml'
|
| 92 |
+
try:
|
| 93 |
+
with open(config_path, 'r') as f:
|
| 94 |
+
config = yaml.safe_load(f)
|
| 95 |
+
except FileNotFoundError:
|
| 96 |
+
print(f"找不到配置文件: {config_path},使用默认配置")
|
| 97 |
+
config = {
|
| 98 |
+
'noise_levels': {
|
| 99 |
+
'gaussian': [0.1, 0.3],
|
| 100 |
+
'salt_pepper': [0.05, 0.1],
|
| 101 |
+
'poisson': [1.0]
|
| 102 |
+
}
|
| 103 |
+
}
|
| 104 |
+
|
| 105 |
+
# 加载噪声参数
|
| 106 |
+
noise_levels = config.get('noise_levels', {})
|
| 107 |
+
gaussian_level = noise_levels.get('gaussian', [0.1, 0.2])
|
| 108 |
+
salt_pepper_level = noise_levels.get('salt_pepper', [0.05, 0.1])
|
| 109 |
+
poisson_level = noise_levels.get('poisson', [1.0])[0]
|
| 110 |
+
|
| 111 |
+
# 获取原始数据和标签
|
| 112 |
+
data_list = []
|
| 113 |
+
targets_list = []
|
| 114 |
+
|
| 115 |
+
for inputs, targets in trainloader:
|
| 116 |
+
data_list.append(inputs)
|
| 117 |
+
targets_list.append(targets)
|
| 118 |
+
|
| 119 |
+
# 合并所有批次数据
|
| 120 |
+
all_data = torch.cat(data_list)
|
| 121 |
+
all_targets = torch.cat(targets_list)
|
| 122 |
+
|
| 123 |
+
# 创建噪声信息字典
|
| 124 |
+
noise_info = {
|
| 125 |
+
'noise_types': [],
|
| 126 |
+
'noise_levels': [],
|
| 127 |
+
'noise_indices': []
|
| 128 |
+
}
|
| 129 |
+
|
| 130 |
+
# CIFAR10标准化参数
|
| 131 |
+
mean = torch.tensor([0.4914, 0.4822, 0.4465]).view(3, 1, 1).to(device)
|
| 132 |
+
std = torch.tensor([0.2023, 0.1994, 0.2010]).view(3, 1, 1).to(device)
|
| 133 |
+
|
| 134 |
+
print("开始添加噪声...")
|
| 135 |
+
|
| 136 |
+
# 按标签分组进行处理
|
| 137 |
+
for label_value in range(10):
|
| 138 |
+
# 找出所有具有当前标签的样本索引
|
| 139 |
+
indices = [i for i in range(len(all_targets)) if all_targets[i].item() == label_value]
|
| 140 |
+
|
| 141 |
+
noise_type = None
|
| 142 |
+
noise_ratio = 0.0
|
| 143 |
+
level = None
|
| 144 |
+
|
| 145 |
+
# 根据标签决定噪��类型和强度
|
| 146 |
+
if label_value == 2: # 高斯噪声强 - 30%数据
|
| 147 |
+
noise_type = 1 # 高斯噪声
|
| 148 |
+
noise_ratio = 0.3
|
| 149 |
+
level = gaussian_level[1] if len(gaussian_level) > 1 else gaussian_level[0]
|
| 150 |
+
elif label_value == 3: # 高斯噪声弱 - 10%数据
|
| 151 |
+
noise_type = 1 # 高斯噪声
|
| 152 |
+
noise_ratio = 0.1
|
| 153 |
+
level = gaussian_level[0]
|
| 154 |
+
elif label_value == 4: # 椒盐噪声强 - 30%数据
|
| 155 |
+
noise_type = 2 # 椒盐噪声
|
| 156 |
+
noise_ratio = 0.3
|
| 157 |
+
level = salt_pepper_level[1] if len(salt_pepper_level) > 1 else salt_pepper_level[0]
|
| 158 |
+
elif label_value == 5: # 椒盐噪声弱 - 10%数据
|
| 159 |
+
noise_type = 2 # 椒盐噪声
|
| 160 |
+
noise_ratio = 0.1
|
| 161 |
+
level = salt_pepper_level[0]
|
| 162 |
+
elif label_value == 6: # 泊松噪声 - 30%数据
|
| 163 |
+
noise_type = 3 # 泊松噪声
|
| 164 |
+
noise_ratio = 0.3
|
| 165 |
+
level = poisson_level
|
| 166 |
+
elif label_value == 7: # 泊松噪声 - 10%数据
|
| 167 |
+
noise_type = 3 # 泊松噪声
|
| 168 |
+
noise_ratio = 0.1
|
| 169 |
+
level = poisson_level
|
| 170 |
+
|
| 171 |
+
# 如果需要添加噪声
|
| 172 |
+
if noise_type is not None and level is not None and noise_ratio > 0:
|
| 173 |
+
# 计算要添加噪声的样本数量
|
| 174 |
+
num_samples_to_add_noise = int(len(indices) * noise_ratio)
|
| 175 |
+
if num_samples_to_add_noise == 0 and len(indices) > 0:
|
| 176 |
+
num_samples_to_add_noise = 1 # 至少添加一个样本
|
| 177 |
+
|
| 178 |
+
# 随机选择要添加噪声的样本索引
|
| 179 |
+
indices_to_add_noise = random.sample(indices, min(num_samples_to_add_noise, len(indices)))
|
| 180 |
+
|
| 181 |
+
print(f"标签 {label_value}: 为 {len(indices_to_add_noise)}/{len(indices)} 个样本添加噪声类型 {noise_type},强度 {level}")
|
| 182 |
+
|
| 183 |
+
# 为选中的样本添加噪声
|
| 184 |
+
for i in indices_to_add_noise:
|
| 185 |
+
# 获取当前图像
|
| 186 |
+
img = all_data[i].to(device)
|
| 187 |
+
|
| 188 |
+
# 反标准化
|
| 189 |
+
img_denorm = img * std + mean
|
| 190 |
+
|
| 191 |
+
# 添加噪声
|
| 192 |
+
if noise_type == 1: # 高斯噪声
|
| 193 |
+
# 转为numpy处理
|
| 194 |
+
img_np = img_denorm.cpu().numpy()
|
| 195 |
+
img_np = np.transpose(img_np, (1, 2, 0)) # C x H x W -> H x W x C
|
| 196 |
+
img_np = np.clip(img_np, 0, 1) * 255.0
|
| 197 |
+
|
| 198 |
+
# 添加高斯噪声
|
| 199 |
+
std_dev = level * 25
|
| 200 |
+
noise = np.random.normal(0, std_dev, img_np.shape)
|
| 201 |
+
noisy_img = img_np + noise
|
| 202 |
+
noisy_img = np.clip(noisy_img, 0, 255)
|
| 203 |
+
|
| 204 |
+
# 转回tensor
|
| 205 |
+
noisy_img = noisy_img / 255.0
|
| 206 |
+
noisy_img = np.transpose(noisy_img, (2, 0, 1)) # H x W x C -> C x H x W
|
| 207 |
+
noisy_tensor = torch.from_numpy(noisy_img.astype(np.float32)).to(device)
|
| 208 |
+
|
| 209 |
+
elif noise_type == 2: # 椒盐噪声
|
| 210 |
+
# 转为numpy处理
|
| 211 |
+
img_np = img_denorm.cpu().numpy()
|
| 212 |
+
img_np = np.transpose(img_np, (1, 2, 0)) # C x H x W -> H x W x C
|
| 213 |
+
img_np = np.clip(img_np, 0, 1) * 255.0
|
| 214 |
+
|
| 215 |
+
# 创建掩码
|
| 216 |
+
mask = np.random.random(img_np.shape[:2])
|
| 217 |
+
# 椒噪声 (黑点)
|
| 218 |
+
img_np_copy = img_np.copy()
|
| 219 |
+
img_np_copy[mask < level/2] = 0
|
| 220 |
+
# 盐噪声 (白点)
|
| 221 |
+
img_np_copy[mask > 1 - level/2] = 255
|
| 222 |
+
|
| 223 |
+
# 转回tensor
|
| 224 |
+
noisy_img = img_np_copy / 255.0
|
| 225 |
+
noisy_img = np.transpose(noisy_img, (2, 0, 1)) # H x W x C -> C x H x W
|
| 226 |
+
noisy_tensor = torch.from_numpy(noisy_img.astype(np.float32)).to(device)
|
| 227 |
+
|
| 228 |
+
elif noise_type == 3: # 泊松噪声
|
| 229 |
+
# 转为numpy处理
|
| 230 |
+
img_np = img_denorm.cpu().numpy()
|
| 231 |
+
img_np = np.transpose(img_np, (1, 2, 0)) # C x H x W -> H x W x C
|
| 232 |
+
img_np = np.clip(img_np, 0, 1) * 255.0
|
| 233 |
+
|
| 234 |
+
# 添加泊松噪声
|
| 235 |
+
lam = np.maximum(img_np / 255.0 * 10.0, 0.0001)
|
| 236 |
+
noisy_img = np.random.poisson(lam) / 10.0 * 255.0
|
| 237 |
+
noisy_img = np.clip(noisy_img, 0, 255)
|
| 238 |
+
|
| 239 |
+
# 转回tensor
|
| 240 |
+
noisy_img = noisy_img / 255.0
|
| 241 |
+
noisy_img = np.transpose(noisy_img, (2, 0, 1)) # H x W x C -> C x H x W
|
| 242 |
+
noisy_tensor = torch.from_numpy(noisy_img.astype(np.float32)).to(device)
|
| 243 |
+
|
| 244 |
+
# 重新标准化
|
| 245 |
+
noisy_tensor_norm = (noisy_tensor - mean) / std
|
| 246 |
+
|
| 247 |
+
# 更新数据
|
| 248 |
+
all_data[i] = noisy_tensor_norm
|
| 249 |
+
|
| 250 |
+
# 记录噪声信息
|
| 251 |
+
noise_info['noise_types'].append(noise_type)
|
| 252 |
+
noise_info['noise_levels'].append(level)
|
| 253 |
+
noise_info['noise_indices'].append(i)
|
| 254 |
+
|
| 255 |
+
# 保存添加噪声的样本索引
|
| 256 |
+
noise_indices = sorted(noise_info['noise_indices'])
|
| 257 |
+
noise_index_path = os.path.join('..', 'dataset', 'noise_index.npy')
|
| 258 |
+
os.makedirs(os.path.dirname(noise_index_path), exist_ok=True)
|
| 259 |
+
np.save(noise_index_path, noise_indices)
|
| 260 |
+
print(f"已保存噪声样本索引到 {noise_index_path},共 {len(noise_indices)} 个样本")
|
| 261 |
+
|
| 262 |
+
# 创建新的TensorDataset
|
| 263 |
+
noisy_dataset = TensorDataset(all_data, all_targets)
|
| 264 |
+
|
| 265 |
+
# 创建新的DataLoader
|
| 266 |
+
noisy_trainloader = DataLoader(
|
| 267 |
+
noisy_dataset,
|
| 268 |
+
batch_size=batch_size,
|
| 269 |
+
shuffle=shuffle,
|
| 270 |
+
num_workers=num_workers
|
| 271 |
+
)
|
| 272 |
+
|
| 273 |
+
print(f"成功为{len(noise_info['noise_indices'])}/{len(all_data)} ({len(noise_info['noise_indices'])/len(all_data)*100:.1f}%)的样本添加噪声")
|
| 274 |
+
return noisy_trainloader, testloader
|
ViT-CIFAR10/Classification-noisy/scripts/get_raw_data.py
ADDED
|
@@ -0,0 +1,194 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#读取数据集,在../dataset/raw_data下按照数据集的完整排序,1.png,2.png,3.png,...保存
|
| 2 |
+
|
| 3 |
+
import os
|
| 4 |
+
import yaml
|
| 5 |
+
import numpy as np
|
| 6 |
+
import torch
|
| 7 |
+
import torchvision
|
| 8 |
+
import torchvision.transforms as transforms
|
| 9 |
+
from PIL import Image
|
| 10 |
+
from tqdm import tqdm
|
| 11 |
+
import sys
|
| 12 |
+
|
| 13 |
+
def unpickle(file):
|
| 14 |
+
"""读取CIFAR-10数据文件"""
|
| 15 |
+
import pickle
|
| 16 |
+
with open(file, 'rb') as fo:
|
| 17 |
+
dict = pickle.load(fo, encoding='bytes')
|
| 18 |
+
return dict
|
| 19 |
+
|
| 20 |
+
def add_noise_for_preview(image, noise_type, level):
|
| 21 |
+
"""向图像添加不同类型的噪声的预览
|
| 22 |
+
|
| 23 |
+
Args:
|
| 24 |
+
image: 输入图像 (Tensor: C x H x W),范围[0,1]
|
| 25 |
+
noise_type: 噪声类型 (int, 1-3)
|
| 26 |
+
level: 噪声强度 (float)
|
| 27 |
+
|
| 28 |
+
Returns:
|
| 29 |
+
noisy_image: 添加噪声后的图像 (Tensor: C x H x W)
|
| 30 |
+
"""
|
| 31 |
+
# 将图像从Tensor转为Numpy数组
|
| 32 |
+
img_np = image.cpu().numpy()
|
| 33 |
+
img_np = np.transpose(img_np, (1, 2, 0)) # C x H x W -> H x W x C
|
| 34 |
+
|
| 35 |
+
# 根据噪声类型添加噪声
|
| 36 |
+
if noise_type == 1: # 高斯噪声
|
| 37 |
+
noise = np.random.normal(0, level, img_np.shape)
|
| 38 |
+
noisy_img = img_np + noise
|
| 39 |
+
noisy_img = np.clip(noisy_img, 0, 1)
|
| 40 |
+
|
| 41 |
+
elif noise_type == 2: # 椒盐噪声
|
| 42 |
+
# 创建掩码,确定哪些像素将变为椒盐噪声
|
| 43 |
+
noisy_img = img_np.copy() # 创建副本而不是直接修改原图
|
| 44 |
+
mask = np.random.random(img_np.shape[:2])
|
| 45 |
+
# 椒噪声 (黑点)
|
| 46 |
+
noisy_img[mask < level/2] = 0
|
| 47 |
+
# 盐噪声 (白点)
|
| 48 |
+
noisy_img[mask > 1 - level/2] = 1
|
| 49 |
+
|
| 50 |
+
elif noise_type == 3: # 泊松噪声
|
| 51 |
+
# 确保输入值为正数
|
| 52 |
+
lam = np.maximum(img_np * 10.0, 0.0001) # 避免负值和零值
|
| 53 |
+
noisy_img = np.random.poisson(lam) / 10.0
|
| 54 |
+
noisy_img = np.clip(noisy_img, 0, 1)
|
| 55 |
+
|
| 56 |
+
else: # 默认返回原图像
|
| 57 |
+
noisy_img = img_np
|
| 58 |
+
|
| 59 |
+
# 将噪声图像从Numpy数组转回Tensor
|
| 60 |
+
noisy_img = np.transpose(noisy_img, (2, 0, 1)) # H x W x C -> C x H x W
|
| 61 |
+
noisy_tensor = torch.from_numpy(noisy_img.astype(np.float32))
|
| 62 |
+
return noisy_tensor
|
| 63 |
+
|
| 64 |
+
def save_images_from_cifar10_with_noisy(dataset_path, save_dir):
|
| 65 |
+
"""从CIFAR-10数据集中保存图像,对指定索引添加噪声
|
| 66 |
+
|
| 67 |
+
Args:
|
| 68 |
+
dataset_path: CIFAR-10数据集路径
|
| 69 |
+
save_dir: 图像保存路径
|
| 70 |
+
"""
|
| 71 |
+
# 创建保存目录
|
| 72 |
+
os.makedirs(save_dir, exist_ok=True)
|
| 73 |
+
|
| 74 |
+
# 读取噪声样本的索引
|
| 75 |
+
noise_index_path = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'dataset', 'noise_index.npy')
|
| 76 |
+
if os.path.exists(noise_index_path):
|
| 77 |
+
noise_indices = np.load(noise_index_path)
|
| 78 |
+
print(f"已加载 {len(noise_indices)} 个噪声样本索引")
|
| 79 |
+
else:
|
| 80 |
+
noise_indices = []
|
| 81 |
+
print("未找到噪声索引文件,将不添加噪声")
|
| 82 |
+
|
| 83 |
+
# 加载配置
|
| 84 |
+
config_path = './train.yaml'
|
| 85 |
+
with open(config_path, 'r') as f:
|
| 86 |
+
config = yaml.safe_load(f)
|
| 87 |
+
|
| 88 |
+
# 读取噪声参数
|
| 89 |
+
noise_levels = config.get('noise_levels', {})
|
| 90 |
+
gaussian_level = noise_levels.get('gaussian', [0.3])
|
| 91 |
+
salt_pepper_level = noise_levels.get('salt_pepper', [0.1])
|
| 92 |
+
poisson_level = noise_levels.get('poisson', [1.0])[0]
|
| 93 |
+
|
| 94 |
+
# 获取训练集数据
|
| 95 |
+
train_data = []
|
| 96 |
+
train_labels = []
|
| 97 |
+
|
| 98 |
+
# 读取训练数据
|
| 99 |
+
for i in range(1, 6):
|
| 100 |
+
batch_file = os.path.join(dataset_path, f'data_batch_{i}')
|
| 101 |
+
if os.path.exists(batch_file):
|
| 102 |
+
print(f"读取训练批次 {i}")
|
| 103 |
+
batch = unpickle(batch_file)
|
| 104 |
+
train_data.append(batch[b'data'])
|
| 105 |
+
train_labels.extend(batch[b'labels'])
|
| 106 |
+
|
| 107 |
+
# 合并所有训练数据
|
| 108 |
+
if train_data:
|
| 109 |
+
train_data = np.vstack(train_data)
|
| 110 |
+
train_data = train_data.reshape(-1, 3, 32, 32).transpose(0, 2, 3, 1)
|
| 111 |
+
|
| 112 |
+
# 读取测试数据
|
| 113 |
+
test_file = os.path.join(dataset_path, 'test_batch')
|
| 114 |
+
if os.path.exists(test_file):
|
| 115 |
+
print("读取测试数据")
|
| 116 |
+
test_batch = unpickle(test_file)
|
| 117 |
+
test_data = test_batch[b'data']
|
| 118 |
+
test_labels = test_batch[b'labels']
|
| 119 |
+
test_data = test_data.reshape(-1, 3, 32, 32).transpose(0, 2, 3, 1)
|
| 120 |
+
else:
|
| 121 |
+
test_data = []
|
| 122 |
+
test_labels = []
|
| 123 |
+
|
| 124 |
+
# 合并训练和测试数据
|
| 125 |
+
all_data = np.concatenate([train_data, test_data]) if len(test_data) > 0 and len(train_data) > 0 else (train_data if len(train_data) > 0 else test_data)
|
| 126 |
+
all_labels = train_labels + test_labels if len(test_labels) > 0 and len(train_labels) > 0 else (train_labels if len(train_labels) > 0 else test_labels)
|
| 127 |
+
|
| 128 |
+
# 设置设备
|
| 129 |
+
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
| 130 |
+
|
| 131 |
+
# 保存图像
|
| 132 |
+
print(f"保存 {len(all_data)} 张图像...")
|
| 133 |
+
|
| 134 |
+
for i, (img, label) in enumerate(tqdm(zip(all_data, all_labels), total=len(all_data))):
|
| 135 |
+
# 检查索引是否在噪声样本索引中
|
| 136 |
+
if i in noise_indices:
|
| 137 |
+
# 为该样本��定噪声类型和强度
|
| 138 |
+
noise_type = None
|
| 139 |
+
level = None
|
| 140 |
+
|
| 141 |
+
if label == 2: # 高斯噪声强
|
| 142 |
+
noise_type = 1
|
| 143 |
+
level = gaussian_level[1]
|
| 144 |
+
elif label == 3: # 高斯噪声弱
|
| 145 |
+
noise_type = 1
|
| 146 |
+
level = gaussian_level[0]
|
| 147 |
+
elif label == 4: # 椒盐噪声强
|
| 148 |
+
noise_type = 2
|
| 149 |
+
level = salt_pepper_level[1]
|
| 150 |
+
elif label == 5: # 椒盐噪声弱
|
| 151 |
+
noise_type = 2
|
| 152 |
+
level = salt_pepper_level[0]
|
| 153 |
+
elif label == 6: # 泊松噪声
|
| 154 |
+
noise_type = 3
|
| 155 |
+
level = poisson_level
|
| 156 |
+
elif label == 7: # 泊松噪声
|
| 157 |
+
noise_type = 3
|
| 158 |
+
level = poisson_level
|
| 159 |
+
|
| 160 |
+
# 如果是需要添加噪声的标签,则添加噪声
|
| 161 |
+
if noise_type is not None and level is not None:
|
| 162 |
+
# 转换为tensor
|
| 163 |
+
img_tensor = torch.from_numpy(img.astype(np.float32) / 255.0).permute(2, 0, 1).to(device)
|
| 164 |
+
# 添加噪声
|
| 165 |
+
noisy_tensor = add_noise_for_preview(img_tensor, noise_type, level)
|
| 166 |
+
# 转回numpy并保存
|
| 167 |
+
noisy_img = (noisy_tensor.permute(1, 2, 0).cpu().numpy() * 255).astype(np.uint8)
|
| 168 |
+
noisy_pil = Image.fromarray(noisy_img)
|
| 169 |
+
noisy_pil.save(os.path.join(save_dir, f"{i}.png"))
|
| 170 |
+
else:
|
| 171 |
+
# 普通保存
|
| 172 |
+
img_pil = Image.fromarray(img)
|
| 173 |
+
img_pil.save(os.path.join(save_dir, f"{i}.png"))
|
| 174 |
+
else:
|
| 175 |
+
# 保存原始图像
|
| 176 |
+
img_pil = Image.fromarray(img)
|
| 177 |
+
img_pil.save(os.path.join(save_dir, f"{i}.png"))
|
| 178 |
+
|
| 179 |
+
print(f"完成! {len(all_data)} 张图像已保存到 {save_dir}, 其中 {len(noise_indices)} 张添加了噪声")
|
| 180 |
+
|
| 181 |
+
if __name__ == "__main__":
|
| 182 |
+
# 设置路径
|
| 183 |
+
dataset_path = "../dataset/cifar-10-batches-py"
|
| 184 |
+
save_dir = "../dataset/raw_data"
|
| 185 |
+
|
| 186 |
+
# 检查数据集是否存在,如果不存在则下载
|
| 187 |
+
if not os.path.exists(dataset_path):
|
| 188 |
+
print("数据集不存在,正在下载...")
|
| 189 |
+
os.makedirs("../dataset", exist_ok=True)
|
| 190 |
+
transform = transforms.Compose([transforms.ToTensor()])
|
| 191 |
+
trainset = torchvision.datasets.CIFAR10(root="../dataset", train=True, download=True, transform=transform)
|
| 192 |
+
|
| 193 |
+
# 保存图像
|
| 194 |
+
save_images_from_cifar10_with_noisy(dataset_path, save_dir)
|
ViT-CIFAR10/Classification-noisy/scripts/get_representation.py
ADDED
|
@@ -0,0 +1,272 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
import numpy as np
|
| 4 |
+
import os
|
| 5 |
+
import json
|
| 6 |
+
from tqdm import tqdm
|
| 7 |
+
|
| 8 |
+
class time_travel_saver:
|
| 9 |
+
"""可视化数据提取器
|
| 10 |
+
|
| 11 |
+
用于保存模型训练过程中的各种数据,包括:
|
| 12 |
+
1. 模型权重 (.pth)
|
| 13 |
+
2. 高维特征 (representation/*.npy)
|
| 14 |
+
3. 预测结果 (prediction/*.npy)
|
| 15 |
+
4. 标签数据 (label/labels.npy)
|
| 16 |
+
"""
|
| 17 |
+
|
| 18 |
+
def __init__(self, model, dataloader, device, save_dir, model_name,
|
| 19 |
+
auto_save_embedding=False, layer_name=None,show = False):
|
| 20 |
+
"""初始化
|
| 21 |
+
|
| 22 |
+
Args:
|
| 23 |
+
model: 要保存的模型实例
|
| 24 |
+
dataloader: 数据加载器(必须是顺序加载的)
|
| 25 |
+
device: 计算设备(cpu or gpu)
|
| 26 |
+
save_dir: 保存根目录
|
| 27 |
+
model_name: 模型名称
|
| 28 |
+
"""
|
| 29 |
+
self.model = model
|
| 30 |
+
self.dataloader = dataloader
|
| 31 |
+
self.device = device
|
| 32 |
+
self.save_dir = save_dir
|
| 33 |
+
self.model_name = model_name
|
| 34 |
+
self.auto_save = auto_save_embedding
|
| 35 |
+
self.layer_name = layer_name
|
| 36 |
+
|
| 37 |
+
if show and not layer_name:
|
| 38 |
+
layer_dimensions = self.show_dimensions()
|
| 39 |
+
# print(layer_dimensions)
|
| 40 |
+
|
| 41 |
+
def show_dimensions(self):
|
| 42 |
+
"""显示模型中所有层的名称和对应的维度
|
| 43 |
+
|
| 44 |
+
这个函数会输出模型中所有层的名称和它们的输出维度,
|
| 45 |
+
帮助用户选择合适的层来提取特征。
|
| 46 |
+
|
| 47 |
+
Returns:
|
| 48 |
+
layer_dimensions: 包含层名称和维度的字典
|
| 49 |
+
"""
|
| 50 |
+
activation = {}
|
| 51 |
+
layer_dimensions = {}
|
| 52 |
+
|
| 53 |
+
def get_activation(name):
|
| 54 |
+
def hook(model, input, output):
|
| 55 |
+
activation[name] = output.detach()
|
| 56 |
+
return hook
|
| 57 |
+
|
| 58 |
+
# 注册钩子到所有层
|
| 59 |
+
handles = []
|
| 60 |
+
for name, module in self.model.named_modules():
|
| 61 |
+
if isinstance(module, nn.Module) and not isinstance(module, nn.ModuleList) and not isinstance(module, nn.ModuleDict):
|
| 62 |
+
handles.append(module.register_forward_hook(get_activation(name)))
|
| 63 |
+
|
| 64 |
+
self.model.eval()
|
| 65 |
+
with torch.no_grad():
|
| 66 |
+
# 获取一个batch来分析每层的输出维度
|
| 67 |
+
inputs, _ = next(iter(self.dataloader))
|
| 68 |
+
inputs = inputs.to(self.device)
|
| 69 |
+
_ = self.model(inputs)
|
| 70 |
+
|
| 71 |
+
# 分析所有层的输出维度
|
| 72 |
+
print("\n模型各层的名称和维度:")
|
| 73 |
+
print("-" * 50)
|
| 74 |
+
print(f"{'层名称':<40} {'特征维度':<15} {'输出形状'}")
|
| 75 |
+
print("-" * 50)
|
| 76 |
+
|
| 77 |
+
for name, feat in activation.items():
|
| 78 |
+
if feat is None:
|
| 79 |
+
continue
|
| 80 |
+
|
| 81 |
+
# 获取特征维度(展平后)
|
| 82 |
+
feat_dim = feat.reshape(feat.size(0), -1).size(1)
|
| 83 |
+
layer_dimensions[name] = feat_dim
|
| 84 |
+
# 打印层信息
|
| 85 |
+
shape_str = str(list(feat.shape))
|
| 86 |
+
print(f"{name:<40} {feat_dim:<15} {shape_str}")
|
| 87 |
+
|
| 88 |
+
print("-" * 50)
|
| 89 |
+
print("注: 特征维度是将输出张量展平后的维度大小")
|
| 90 |
+
print("你可以通过修改time_travel_saver的layer_name参数来选择不同的层")
|
| 91 |
+
print("例如:layer_name='avg_pool'或layer_name='layer4'等")
|
| 92 |
+
|
| 93 |
+
# 移除所有钩子
|
| 94 |
+
for handle in handles:
|
| 95 |
+
handle.remove()
|
| 96 |
+
|
| 97 |
+
return layer_dimensions
|
| 98 |
+
|
| 99 |
+
def _extract_features_and_predictions(self):
|
| 100 |
+
"""提取特征和预测结果
|
| 101 |
+
|
| 102 |
+
Returns:
|
| 103 |
+
features: 高维特征 [样本数, 特征维度]
|
| 104 |
+
predictions: 预测结果 [样本数, 类别数]
|
| 105 |
+
"""
|
| 106 |
+
features = []
|
| 107 |
+
predictions = []
|
| 108 |
+
indices = []
|
| 109 |
+
activation = {}
|
| 110 |
+
|
| 111 |
+
def get_activation(name):
|
| 112 |
+
def hook(model, input, output):
|
| 113 |
+
# 只在需要时保存激活值,避免内存浪费
|
| 114 |
+
if name not in activation or activation[name] is None:
|
| 115 |
+
activation[name] = output.detach()
|
| 116 |
+
return hook
|
| 117 |
+
|
| 118 |
+
# 根据层的名称或维度来选择层
|
| 119 |
+
|
| 120 |
+
# 注册钩子到所有层
|
| 121 |
+
handles = []
|
| 122 |
+
for name, module in self.model.named_modules():
|
| 123 |
+
if isinstance(module, nn.Module) and not isinstance(module, nn.ModuleList) and not isinstance(module, nn.ModuleDict):
|
| 124 |
+
handles.append(module.register_forward_hook(get_activation(name)))
|
| 125 |
+
|
| 126 |
+
self.model.eval()
|
| 127 |
+
with torch.no_grad():
|
| 128 |
+
# 首先获取一个batch来分析每层的输出维度
|
| 129 |
+
inputs, _ = next(iter(self.dataloader))
|
| 130 |
+
inputs = inputs.to(self.device)
|
| 131 |
+
_ = self.model(inputs)
|
| 132 |
+
|
| 133 |
+
# 如果指定了层名,则直接使用该层
|
| 134 |
+
if self.layer_name is not None:
|
| 135 |
+
if self.layer_name not in activation:
|
| 136 |
+
raise ValueError(f"指定的层 {self.layer_name} 不存在于模型中")
|
| 137 |
+
|
| 138 |
+
feat = activation[self.layer_name]
|
| 139 |
+
if feat is None:
|
| 140 |
+
raise ValueError(f"指定的层 {self.layer_name} 没有输出特征")
|
| 141 |
+
|
| 142 |
+
suitable_layer_name = self.layer_name
|
| 143 |
+
suitable_dim = feat.reshape(feat.size(0), -1).size(1)
|
| 144 |
+
print(f"使用指定的特征层: {suitable_layer_name}, 特征维度: {suitable_dim}")
|
| 145 |
+
else:
|
| 146 |
+
# 找到维度在指定范围内的层
|
| 147 |
+
target_dim_range = (256, 2048)
|
| 148 |
+
suitable_layer_name = None
|
| 149 |
+
suitable_dim = None
|
| 150 |
+
|
| 151 |
+
# 分析所有层的输出维度
|
| 152 |
+
for name, feat in activation.items():
|
| 153 |
+
if feat is None:
|
| 154 |
+
continue
|
| 155 |
+
feat_dim = feat.reshape(feat.size(0), -1).size(1)
|
| 156 |
+
if target_dim_range[0] <= feat_dim <= target_dim_range[1]:
|
| 157 |
+
suitable_layer_name = name
|
| 158 |
+
suitable_dim = feat_dim
|
| 159 |
+
break
|
| 160 |
+
|
| 161 |
+
if suitable_layer_name is None:
|
| 162 |
+
raise ValueError("没有找到合适维度的特征层")
|
| 163 |
+
|
| 164 |
+
print(f"自动选择的特征层: {suitable_layer_name}, 特征维度: {suitable_dim}")
|
| 165 |
+
|
| 166 |
+
# 保存层信息
|
| 167 |
+
layer_info = {
|
| 168 |
+
'layer_id': suitable_layer_name,
|
| 169 |
+
'dim': suitable_dim
|
| 170 |
+
}
|
| 171 |
+
layer_info_path = os.path.join(os.path.dirname(self.save_dir), 'layer_info.json')
|
| 172 |
+
with open(layer_info_path, 'w') as f:
|
| 173 |
+
json.dump(layer_info, f)
|
| 174 |
+
|
| 175 |
+
# 清除第一次运行的激活值
|
| 176 |
+
activation.clear()
|
| 177 |
+
|
| 178 |
+
# 现在处理所有数据
|
| 179 |
+
for batch_idx, (inputs, _) in enumerate(tqdm(self.dataloader, desc="提取特征和预测结果")):
|
| 180 |
+
inputs = inputs.to(self.device)
|
| 181 |
+
outputs = self.model(inputs) # 获取预测结果
|
| 182 |
+
|
| 183 |
+
# 获取并处理特征
|
| 184 |
+
feat = activation[suitable_layer_name]
|
| 185 |
+
flat_features = torch.flatten(feat, start_dim=1)
|
| 186 |
+
features.append(flat_features.cpu().numpy())
|
| 187 |
+
predictions.append(outputs.cpu().numpy())
|
| 188 |
+
|
| 189 |
+
# 清除本次的激活值
|
| 190 |
+
activation.clear()
|
| 191 |
+
|
| 192 |
+
# 移除所有钩子
|
| 193 |
+
for handle in handles:
|
| 194 |
+
handle.remove()
|
| 195 |
+
|
| 196 |
+
if len(features) > 0:
|
| 197 |
+
features = np.vstack(features)
|
| 198 |
+
predictions = np.vstack(predictions)
|
| 199 |
+
return features, predictions
|
| 200 |
+
else:
|
| 201 |
+
return np.array([]), np.array([])
|
| 202 |
+
|
| 203 |
+
def save_lables_index(self, path):
|
| 204 |
+
"""保存标签数据和索引信息
|
| 205 |
+
|
| 206 |
+
Args:
|
| 207 |
+
path: 保存路径
|
| 208 |
+
"""
|
| 209 |
+
os.makedirs(path, exist_ok=True)
|
| 210 |
+
labels_path = os.path.join(path, 'labels.npy')
|
| 211 |
+
index_path = os.path.join(path, 'index.json')
|
| 212 |
+
|
| 213 |
+
# 尝试从不同的属性获取标签
|
| 214 |
+
try:
|
| 215 |
+
if hasattr(self.dataloader.dataset, 'targets'):
|
| 216 |
+
# CIFAR10/CIFAR100使用targets属性
|
| 217 |
+
labels = np.array(self.dataloader.dataset.targets)
|
| 218 |
+
elif hasattr(self.dataloader.dataset, 'labels'):
|
| 219 |
+
# 某些数据集使用labels属性
|
| 220 |
+
labels = np.array(self.dataloader.dataset.labels)
|
| 221 |
+
else:
|
| 222 |
+
# 如果上面的方法都不起作用,则从数据加载器中收集标签
|
| 223 |
+
labels = []
|
| 224 |
+
for _, batch_labels in self.dataloader:
|
| 225 |
+
labels.append(batch_labels.numpy())
|
| 226 |
+
labels = np.concatenate(labels)
|
| 227 |
+
|
| 228 |
+
# 保存标签数据
|
| 229 |
+
np.save(labels_path, labels)
|
| 230 |
+
print(f"标签数据已保存到 {labels_path}")
|
| 231 |
+
|
| 232 |
+
# 创建数据集索引
|
| 233 |
+
num_samples = len(labels)
|
| 234 |
+
indices = list(range(num_samples))
|
| 235 |
+
|
| 236 |
+
# 创建索引字典
|
| 237 |
+
index_dict = {
|
| 238 |
+
"train": indices, # 所有数据默认为训练集
|
| 239 |
+
"test": [], # 初始为空
|
| 240 |
+
"validation": [] # 初始为空
|
| 241 |
+
}
|
| 242 |
+
|
| 243 |
+
# 保存索引到JSON文件
|
| 244 |
+
with open(index_path, 'w') as f:
|
| 245 |
+
json.dump(index_dict, f, indent=4)
|
| 246 |
+
|
| 247 |
+
print(f"数据集索引已保存到 {index_path}")
|
| 248 |
+
|
| 249 |
+
except Exception as e:
|
| 250 |
+
print(f"保存标签和索引时出错: {e}")
|
| 251 |
+
|
| 252 |
+
def save_checkpoint_embeddings_predictions(self, model = None):
|
| 253 |
+
"""保存所有数据"""
|
| 254 |
+
if model is not None:
|
| 255 |
+
self.model = model
|
| 256 |
+
# 保存模型��重
|
| 257 |
+
os.makedirs(self.save_dir, exist_ok=True)
|
| 258 |
+
model_path = os.path.join(self.save_dir,'model.pth')
|
| 259 |
+
torch.save(self.model.state_dict(), model_path)
|
| 260 |
+
|
| 261 |
+
if self.auto_save:
|
| 262 |
+
# 提取并保存特征和预测结果
|
| 263 |
+
features, predictions = self._extract_features_and_predictions()
|
| 264 |
+
|
| 265 |
+
# 保存特征
|
| 266 |
+
np.save(os.path.join(self.save_dir, 'embeddings.npy'), features)
|
| 267 |
+
# 保存预测结果
|
| 268 |
+
np.save(os.path.join(self.save_dir, 'predictions.npy'), predictions)
|
| 269 |
+
print("\n保存了以下数据:")
|
| 270 |
+
print(f"- 模型权重: {model_path}")
|
| 271 |
+
print(f"- 特征向量: [样本数: {features.shape[0]}, 特征维度: {features.shape[1]}]")
|
| 272 |
+
print(f"- 预测结果: [样本数: {predictions.shape[0]}, 类别数: {predictions.shape[1]}]")
|
ViT-CIFAR10/Classification-noisy/scripts/model.py
ADDED
|
@@ -0,0 +1,171 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
import torch.nn.functional as F
|
| 4 |
+
|
| 5 |
+
class PatchEmbed(nn.Module):
|
| 6 |
+
""" 将图像分成patch并进行embedding """
|
| 7 |
+
def __init__(self, img_size=32, patch_size=4, in_chans=3, embed_dim=96):
|
| 8 |
+
super().__init__()
|
| 9 |
+
self.img_size = img_size
|
| 10 |
+
self.patch_size = patch_size
|
| 11 |
+
self.n_patches = (img_size // patch_size) ** 2
|
| 12 |
+
|
| 13 |
+
self.proj = nn.Conv2d(
|
| 14 |
+
in_chans, embed_dim,
|
| 15 |
+
kernel_size=patch_size, stride=patch_size
|
| 16 |
+
)
|
| 17 |
+
|
| 18 |
+
def forward(self, x):
|
| 19 |
+
x = self.proj(x) # (B, E, H/P, W/P)
|
| 20 |
+
x = x.flatten(2) # (B, E, N)
|
| 21 |
+
x = x.transpose(1, 2) # (B, N, E)
|
| 22 |
+
return x
|
| 23 |
+
|
| 24 |
+
class Attention(nn.Module):
|
| 25 |
+
""" 多头自注意力机制 """
|
| 26 |
+
def __init__(self, dim, n_heads=8, qkv_bias=True, attn_p=0., proj_p=0.):
|
| 27 |
+
super().__init__()
|
| 28 |
+
self.n_heads = n_heads
|
| 29 |
+
self.dim = dim
|
| 30 |
+
self.head_dim = dim // n_heads
|
| 31 |
+
self.scale = self.head_dim ** -0.5
|
| 32 |
+
|
| 33 |
+
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
|
| 34 |
+
self.attn_drop = nn.Dropout(attn_p)
|
| 35 |
+
self.proj = nn.Linear(dim, dim)
|
| 36 |
+
self.proj_drop = nn.Dropout(proj_p)
|
| 37 |
+
|
| 38 |
+
def forward(self, x):
|
| 39 |
+
n_samples, n_tokens, dim = x.shape
|
| 40 |
+
|
| 41 |
+
if dim != self.dim:
|
| 42 |
+
raise ValueError
|
| 43 |
+
|
| 44 |
+
qkv = self.qkv(x) # (n_samples, n_patches + 1, 3 * dim)
|
| 45 |
+
qkv = qkv.reshape(
|
| 46 |
+
n_samples, n_tokens, 3, self.n_heads, self.head_dim
|
| 47 |
+
) # (n_samples, n_patches + 1, 3, n_heads, head_dim)
|
| 48 |
+
qkv = qkv.permute(2, 0, 3, 1, 4) # (3, n_samples, n_heads, n_patches + 1, head_dim)
|
| 49 |
+
q, k, v = qkv[0], qkv[1], qkv[2] # each with shape (n_samples, n_heads, n_patches + 1, head_dim)
|
| 50 |
+
|
| 51 |
+
k_t = k.transpose(-2, -1) # (n_samples, n_heads, head_dim, n_patches + 1)
|
| 52 |
+
dp = (q @ k_t) * self.scale # (n_samples, n_heads, n_patches + 1, n_patches + 1)
|
| 53 |
+
attn = dp.softmax(dim=-1) # (n_samples, n_heads, n_patches + 1, n_patches + 1)
|
| 54 |
+
attn = self.attn_drop(attn)
|
| 55 |
+
|
| 56 |
+
weighted_avg = attn @ v # (n_samples, n_heads, n_patches + 1, head_dim)
|
| 57 |
+
weighted_avg = weighted_avg.transpose(1, 2) # (n_samples, n_patches + 1, n_heads, head_dim)
|
| 58 |
+
weighted_avg = weighted_avg.flatten(2) # (n_samples, n_patches + 1, dim)
|
| 59 |
+
|
| 60 |
+
x = self.proj(weighted_avg) # (n_samples, n_patches + 1, dim)
|
| 61 |
+
x = self.proj_drop(x) # (n_samples, n_patches + 1, dim)
|
| 62 |
+
|
| 63 |
+
return x
|
| 64 |
+
|
| 65 |
+
class MLP(nn.Module):
|
| 66 |
+
""" 多层感知机 """
|
| 67 |
+
def __init__(self, in_features, hidden_features, out_features, p=0.):
|
| 68 |
+
super().__init__()
|
| 69 |
+
self.fc1 = nn.Linear(in_features, hidden_features)
|
| 70 |
+
self.act = nn.GELU()
|
| 71 |
+
self.fc2 = nn.Linear(hidden_features, out_features)
|
| 72 |
+
self.drop = nn.Dropout(p)
|
| 73 |
+
|
| 74 |
+
def forward(self, x):
|
| 75 |
+
x = self.fc1(x) # (n_samples, n_patches + 1, hidden_features)
|
| 76 |
+
x = self.act(x) # (n_samples, n_patches + 1, hidden_features)
|
| 77 |
+
x = self.drop(x) # (n_samples, n_patches + 1, hidden_features)
|
| 78 |
+
x = self.fc2(x) # (n_samples, n_patches + 1, out_features)
|
| 79 |
+
x = self.drop(x) # (n_samples, n_patches + 1, out_features)
|
| 80 |
+
|
| 81 |
+
return x
|
| 82 |
+
|
| 83 |
+
class Block(nn.Module):
|
| 84 |
+
""" Transformer编码器块 """
|
| 85 |
+
def __init__(self, dim, n_heads, mlp_ratio=4.0, qkv_bias=True,
|
| 86 |
+
p=0., attn_p=0.):
|
| 87 |
+
super().__init__()
|
| 88 |
+
self.norm1 = nn.LayerNorm(dim, eps=1e-6)
|
| 89 |
+
self.attn = Attention(
|
| 90 |
+
dim,
|
| 91 |
+
n_heads=n_heads,
|
| 92 |
+
qkv_bias=qkv_bias,
|
| 93 |
+
attn_p=attn_p,
|
| 94 |
+
proj_p=p
|
| 95 |
+
)
|
| 96 |
+
self.norm2 = nn.LayerNorm(dim, eps=1e-6)
|
| 97 |
+
hidden_features = int(dim * mlp_ratio)
|
| 98 |
+
self.mlp = MLP(
|
| 99 |
+
in_features=dim,
|
| 100 |
+
hidden_features=hidden_features,
|
| 101 |
+
out_features=dim,
|
| 102 |
+
)
|
| 103 |
+
|
| 104 |
+
def forward(self, x):
|
| 105 |
+
x = x + self.attn(self.norm1(x))
|
| 106 |
+
x = x + self.mlp(self.norm2(x))
|
| 107 |
+
return x
|
| 108 |
+
|
| 109 |
+
class ViT(nn.Module):
|
| 110 |
+
""" Vision Transformer """
|
| 111 |
+
def __init__(
|
| 112 |
+
self,
|
| 113 |
+
img_size=32,
|
| 114 |
+
patch_size=4,
|
| 115 |
+
in_chans=3,
|
| 116 |
+
num_classes=10,
|
| 117 |
+
embed_dim=96,
|
| 118 |
+
depth=12,
|
| 119 |
+
n_heads=8,
|
| 120 |
+
mlp_ratio=4.,
|
| 121 |
+
qkv_bias=True,
|
| 122 |
+
p=0.,
|
| 123 |
+
attn_p=0.,
|
| 124 |
+
):
|
| 125 |
+
super().__init__()
|
| 126 |
+
|
| 127 |
+
self.patch_embed = PatchEmbed(
|
| 128 |
+
img_size=img_size,
|
| 129 |
+
patch_size=patch_size,
|
| 130 |
+
in_chans=in_chans,
|
| 131 |
+
embed_dim=embed_dim,
|
| 132 |
+
)
|
| 133 |
+
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
|
| 134 |
+
self.pos_embed = nn.Parameter(
|
| 135 |
+
torch.zeros(1, 1 + self.patch_embed.n_patches, embed_dim)
|
| 136 |
+
)
|
| 137 |
+
self.pos_drop = nn.Dropout(p=p)
|
| 138 |
+
|
| 139 |
+
self.blocks = nn.ModuleList([
|
| 140 |
+
Block(
|
| 141 |
+
dim=embed_dim,
|
| 142 |
+
n_heads=n_heads,
|
| 143 |
+
mlp_ratio=mlp_ratio,
|
| 144 |
+
qkv_bias=qkv_bias,
|
| 145 |
+
p=p,
|
| 146 |
+
attn_p=attn_p,
|
| 147 |
+
)
|
| 148 |
+
for _ in range(depth)
|
| 149 |
+
])
|
| 150 |
+
|
| 151 |
+
self.norm = nn.LayerNorm(embed_dim, eps=1e-6)
|
| 152 |
+
self.head = nn.Linear(embed_dim, num_classes)
|
| 153 |
+
|
| 154 |
+
def forward(self, x):
|
| 155 |
+
n_samples = x.shape[0]
|
| 156 |
+
x = self.patch_embed(x)
|
| 157 |
+
|
| 158 |
+
cls_token = self.cls_token.expand(n_samples, -1, -1)
|
| 159 |
+
x = torch.cat((cls_token, x), dim=1)
|
| 160 |
+
x = x + self.pos_embed
|
| 161 |
+
x = self.pos_drop(x)
|
| 162 |
+
|
| 163 |
+
for block in self.blocks:
|
| 164 |
+
x = block(x)
|
| 165 |
+
|
| 166 |
+
x = self.norm(x)
|
| 167 |
+
|
| 168 |
+
cls_token_final = x[:, 0]
|
| 169 |
+
x = self.head(cls_token_final)
|
| 170 |
+
|
| 171 |
+
return x
|
ViT-CIFAR10/Classification-noisy/scripts/preview_noise.py
ADDED
|
@@ -0,0 +1,122 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#!/usr/bin/env python
|
| 2 |
+
# -*- coding: utf-8 -*-
|
| 3 |
+
|
| 4 |
+
"""
|
| 5 |
+
噪声效果预览脚本:展示不同类型和强度的噪声对图像的影响
|
| 6 |
+
"""
|
| 7 |
+
|
| 8 |
+
import os
|
| 9 |
+
import torch
|
| 10 |
+
import numpy as np
|
| 11 |
+
import matplotlib.pyplot as plt
|
| 12 |
+
import torchvision
|
| 13 |
+
import torchvision.transforms as transforms
|
| 14 |
+
import random
|
| 15 |
+
|
| 16 |
+
def add_noise_for_preview(image, noise_type, level):
|
| 17 |
+
"""向图像添加不同类型的噪声的预览
|
| 18 |
+
|
| 19 |
+
Args:
|
| 20 |
+
image: 输入图像 (Tensor: C x H x W),范围[0,1]
|
| 21 |
+
noise_type: 噪声类型 (int, 1-3)
|
| 22 |
+
level: 噪声强度 (float)
|
| 23 |
+
|
| 24 |
+
Returns:
|
| 25 |
+
noisy_image: 添加噪声后的图像 (Tensor: C x H x W)
|
| 26 |
+
"""
|
| 27 |
+
# 将图像从Tensor转为Numpy数组
|
| 28 |
+
img_np = image.cpu().numpy()
|
| 29 |
+
img_np = np.transpose(img_np, (1, 2, 0)) # C x H x W -> H x W x C
|
| 30 |
+
|
| 31 |
+
# 根据噪声类型添加噪声
|
| 32 |
+
if noise_type == 1: # 高斯噪声
|
| 33 |
+
noise = np.random.normal(0, level, img_np.shape)
|
| 34 |
+
noisy_img = img_np + noise
|
| 35 |
+
noisy_img = np.clip(noisy_img, 0, 1)
|
| 36 |
+
|
| 37 |
+
elif noise_type == 2: # 椒盐噪声
|
| 38 |
+
# 创建掩码,确定哪些像素将变为椒盐噪声
|
| 39 |
+
noisy_img = img_np.copy() # 创建副本而不是直接修改原图
|
| 40 |
+
mask = np.random.random(img_np.shape[:2])
|
| 41 |
+
# 椒噪声 (黑点)
|
| 42 |
+
noisy_img[mask < level/2] = 0
|
| 43 |
+
# 盐噪声 (白点)
|
| 44 |
+
noisy_img[mask > 1 - level/2] = 1
|
| 45 |
+
|
| 46 |
+
elif noise_type == 3: # 泊松噪声
|
| 47 |
+
# 确保输入值为正数
|
| 48 |
+
lam = np.maximum(img_np * 10.0, 0.0001) # 避免负值和零值
|
| 49 |
+
noisy_img = np.random.poisson(lam) / 10.0
|
| 50 |
+
noisy_img = np.clip(noisy_img, 0, 1)
|
| 51 |
+
|
| 52 |
+
else: # 默认返回原图像
|
| 53 |
+
noisy_img = img_np
|
| 54 |
+
|
| 55 |
+
# 将噪声图像从Numpy数组转回Tensor
|
| 56 |
+
noisy_img = np.transpose(noisy_img, (2, 0, 1)) # H x W x C -> C x H x W
|
| 57 |
+
noisy_tensor = torch.from_numpy(noisy_img.astype(np.float32))
|
| 58 |
+
return noisy_tensor
|
| 59 |
+
|
| 60 |
+
def preview_noise_effects(num_samples=5, save_dir='../results'):
|
| 61 |
+
"""展示不同类型和强度噪声的对比效果
|
| 62 |
+
|
| 63 |
+
Args:
|
| 64 |
+
num_samples: 要展示的样本数量
|
| 65 |
+
save_dir: 保存结果的目录
|
| 66 |
+
"""
|
| 67 |
+
# 创建保存目录
|
| 68 |
+
os.makedirs(save_dir, exist_ok=True)
|
| 69 |
+
|
| 70 |
+
# 加载CIFAR10数据集
|
| 71 |
+
transform = transforms.Compose([transforms.ToTensor()])
|
| 72 |
+
testset = torchvision.datasets.CIFAR10(root='../dataset', train=False, download=True, transform=transform)
|
| 73 |
+
|
| 74 |
+
# 随机选择几个样本进行展示
|
| 75 |
+
indices = random.sample(range(len(testset)), num_samples)
|
| 76 |
+
|
| 77 |
+
# 定义噪声类型和强度
|
| 78 |
+
noise_configs = [
|
| 79 |
+
{"name": "高斯噪声(强)", "type": 1, "level": 0.2},
|
| 80 |
+
{"name": "高斯噪声(弱)", "type": 1, "level": 0.1},
|
| 81 |
+
{"name": "椒盐噪声(强)", "type": 2, "level": 0.15},
|
| 82 |
+
{"name": "椒盐噪声(弱)", "type": 2, "level": 0.05},
|
| 83 |
+
{"name": "泊松噪声(强)", "type": 3, "level": 1.0},
|
| 84 |
+
{"name": "泊松噪声(弱)", "type": 3, "level": 0.5}
|
| 85 |
+
]
|
| 86 |
+
|
| 87 |
+
# 获取CIFAR10类别名称
|
| 88 |
+
classes = ('飞机', '汽车', '鸟', '猫', '鹿', '狗', '青蛙', '马', '船', '卡车')
|
| 89 |
+
|
| 90 |
+
# 对每个样本应用不同类型的噪声并展示
|
| 91 |
+
for i, idx in enumerate(indices):
|
| 92 |
+
# 获取原始图像和标签
|
| 93 |
+
img, label = testset[idx]
|
| 94 |
+
|
| 95 |
+
# 创建一个子图网格
|
| 96 |
+
fig, axes = plt.subplots(1, len(noise_configs) + 1, figsize=(18, 3))
|
| 97 |
+
plt.subplots_adjust(wspace=0.3)
|
| 98 |
+
|
| 99 |
+
# 显示原始图像
|
| 100 |
+
img_np = img.permute(1, 2, 0).cpu().numpy()
|
| 101 |
+
axes[0].imshow(img_np)
|
| 102 |
+
axes[0].set_title(f"原始图像\n类别: {classes[label]}")
|
| 103 |
+
axes[0].axis('off')
|
| 104 |
+
|
| 105 |
+
# 应用不同类型的噪声并显示
|
| 106 |
+
for j, noise_config in enumerate(noise_configs):
|
| 107 |
+
noisy_img = add_noise_for_preview(img, noise_config["type"], noise_config["level"])
|
| 108 |
+
noisy_img_np = noisy_img.permute(1, 2, 0).cpu().numpy()
|
| 109 |
+
axes[j+1].imshow(np.clip(noisy_img_np, 0, 1))
|
| 110 |
+
axes[j+1].set_title(noise_config["name"])
|
| 111 |
+
axes[j+1].axis('off')
|
| 112 |
+
|
| 113 |
+
# 保存图像
|
| 114 |
+
plt.tight_layout()
|
| 115 |
+
plt.savefig(os.path.join(save_dir, f'noise_preview_{i+1}.png'), dpi=150)
|
| 116 |
+
plt.close()
|
| 117 |
+
|
| 118 |
+
print(f"噪声对比预览已保存到 {save_dir} 目录")
|
| 119 |
+
|
| 120 |
+
if __name__ == "__main__":
|
| 121 |
+
# 预览噪声效果
|
| 122 |
+
preview_noise_effects(num_samples=10, save_dir='.')
|
ViT-CIFAR10/Classification-noisy/scripts/train.py
ADDED
|
@@ -0,0 +1,251 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sys
|
| 2 |
+
import os
|
| 3 |
+
import yaml
|
| 4 |
+
import torch
|
| 5 |
+
import torch.nn as nn
|
| 6 |
+
import torch.optim as optim
|
| 7 |
+
import time
|
| 8 |
+
import logging
|
| 9 |
+
import numpy as np
|
| 10 |
+
from tqdm import tqdm
|
| 11 |
+
from dataset_utils import get_noisy_cifar10_dataloaders
|
| 12 |
+
from model import ViT
|
| 13 |
+
from get_representation import time_travel_saver
|
| 14 |
+
|
| 15 |
+
|
| 16 |
+
def setup_logger(log_file):
|
| 17 |
+
"""配置日志记录器,如果日志文件存在则覆盖
|
| 18 |
+
|
| 19 |
+
Args:
|
| 20 |
+
log_file: 日志文件路径
|
| 21 |
+
|
| 22 |
+
Returns:
|
| 23 |
+
logger: 配置好的日志记录器
|
| 24 |
+
"""
|
| 25 |
+
# 创建logger
|
| 26 |
+
logger = logging.getLogger('train')
|
| 27 |
+
logger.setLevel(logging.INFO)
|
| 28 |
+
|
| 29 |
+
# 移除现有的处理器
|
| 30 |
+
if logger.hasHandlers():
|
| 31 |
+
logger.handlers.clear()
|
| 32 |
+
|
| 33 |
+
# 创建文件处理器,使用'w'模式覆盖现有文件
|
| 34 |
+
fh = logging.FileHandler(log_file, mode='w')
|
| 35 |
+
fh.setLevel(logging.INFO)
|
| 36 |
+
|
| 37 |
+
# 创建控制台处理器
|
| 38 |
+
ch = logging.StreamHandler()
|
| 39 |
+
ch.setLevel(logging.INFO)
|
| 40 |
+
|
| 41 |
+
# 创建格式器
|
| 42 |
+
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
|
| 43 |
+
fh.setFormatter(formatter)
|
| 44 |
+
ch.setFormatter(formatter)
|
| 45 |
+
|
| 46 |
+
# 添加处理器
|
| 47 |
+
logger.addHandler(fh)
|
| 48 |
+
logger.addHandler(ch)
|
| 49 |
+
|
| 50 |
+
return logger
|
| 51 |
+
|
| 52 |
+
def train_model(model, trainloader, testloader, epochs=200, lr=0.1, device='cuda:0',
|
| 53 |
+
save_dir='./epochs', model_name='model', interval=1):
|
| 54 |
+
"""通用的模型训练函数
|
| 55 |
+
Args:
|
| 56 |
+
model: 要训练的模型
|
| 57 |
+
trainloader: 训练数据加载器
|
| 58 |
+
testloader: 测试数据加载器
|
| 59 |
+
epochs: 训练轮数
|
| 60 |
+
lr: 学习率
|
| 61 |
+
device: 训练设备,格式为'cuda:N',其中N为GPU编号(0,1,2,3)
|
| 62 |
+
save_dir: 模型保存目录
|
| 63 |
+
model_name: 模型名称
|
| 64 |
+
interval: 模型保存间隔
|
| 65 |
+
"""
|
| 66 |
+
# 检查并设置GPU设备
|
| 67 |
+
if not torch.cuda.is_available():
|
| 68 |
+
print("CUDA不可用,将使用CPU训练")
|
| 69 |
+
device = 'cpu'
|
| 70 |
+
elif not device.startswith('cuda:'):
|
| 71 |
+
device = f'cuda:0'
|
| 72 |
+
|
| 73 |
+
# 确保device格式正确
|
| 74 |
+
if device.startswith('cuda:'):
|
| 75 |
+
gpu_id = int(device.split(':')[1])
|
| 76 |
+
if gpu_id >= torch.cuda.device_count():
|
| 77 |
+
print(f"GPU {gpu_id} 不可用,将使用GPU 0")
|
| 78 |
+
device = 'cuda:0'
|
| 79 |
+
|
| 80 |
+
# 设置保存目录
|
| 81 |
+
if not os.path.exists(save_dir):
|
| 82 |
+
os.makedirs(save_dir)
|
| 83 |
+
|
| 84 |
+
# 设置日志文件路径
|
| 85 |
+
log_file = os.path.join(os.path.dirname(os.path.dirname(os.path.abspath(__file__))), 'epochs', 'train.log')
|
| 86 |
+
if not os.path.exists(os.path.dirname(log_file)):
|
| 87 |
+
os.makedirs(os.path.dirname(log_file))
|
| 88 |
+
|
| 89 |
+
logger = setup_logger(log_file)
|
| 90 |
+
|
| 91 |
+
# 损失函数和优化器
|
| 92 |
+
criterion = nn.CrossEntropyLoss()
|
| 93 |
+
optimizer = optim.SGD(model.parameters(), lr=lr, momentum=0.9, weight_decay=5e-4)
|
| 94 |
+
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=50)
|
| 95 |
+
|
| 96 |
+
# 移动模型到指定设备
|
| 97 |
+
model = model.to(device)
|
| 98 |
+
best_acc = 0
|
| 99 |
+
start_time = time.time()
|
| 100 |
+
|
| 101 |
+
logger.info(f'开始训练 {model_name}')
|
| 102 |
+
logger.info(f'总轮数: {epochs}, 学习率: {lr}, 设备: {device}')
|
| 103 |
+
|
| 104 |
+
for epoch in range(epochs):
|
| 105 |
+
# 训练阶段
|
| 106 |
+
model.train()
|
| 107 |
+
train_loss = 0
|
| 108 |
+
correct = 0
|
| 109 |
+
total = 0
|
| 110 |
+
|
| 111 |
+
train_pbar = tqdm(trainloader, desc=f'Epoch {epoch+1}/{epochs} [Train]')
|
| 112 |
+
for batch_idx, (inputs, targets) in enumerate(train_pbar):
|
| 113 |
+
inputs, targets = inputs.to(device), targets.to(device)
|
| 114 |
+
optimizer.zero_grad()
|
| 115 |
+
outputs = model(inputs)
|
| 116 |
+
loss = criterion(outputs, targets)
|
| 117 |
+
loss.backward()
|
| 118 |
+
optimizer.step()
|
| 119 |
+
|
| 120 |
+
train_loss += loss.item()
|
| 121 |
+
_, predicted = outputs.max(1)
|
| 122 |
+
total += targets.size(0)
|
| 123 |
+
correct += predicted.eq(targets).sum().item()
|
| 124 |
+
|
| 125 |
+
# 更新进度条
|
| 126 |
+
train_pbar.set_postfix({
|
| 127 |
+
'loss': f'{train_loss/(batch_idx+1):.3f}',
|
| 128 |
+
'acc': f'{100.*correct/total:.2f}%'
|
| 129 |
+
})
|
| 130 |
+
|
| 131 |
+
# 保存训练阶段的准确率
|
| 132 |
+
train_acc = 100.*correct/total
|
| 133 |
+
train_correct = correct
|
| 134 |
+
train_total = total
|
| 135 |
+
|
| 136 |
+
# 测试阶段
|
| 137 |
+
model.eval()
|
| 138 |
+
test_loss = 0
|
| 139 |
+
correct = 0
|
| 140 |
+
total = 0
|
| 141 |
+
|
| 142 |
+
test_pbar = tqdm(testloader, desc=f'Epoch {epoch+1}/{epochs} [Test]')
|
| 143 |
+
with torch.no_grad():
|
| 144 |
+
for batch_idx, (inputs, targets) in enumerate(test_pbar):
|
| 145 |
+
inputs, targets = inputs.to(device), targets.to(device)
|
| 146 |
+
outputs = model(inputs)
|
| 147 |
+
loss = criterion(outputs, targets)
|
| 148 |
+
|
| 149 |
+
test_loss += loss.item()
|
| 150 |
+
_, predicted = outputs.max(1)
|
| 151 |
+
total += targets.size(0)
|
| 152 |
+
correct += predicted.eq(targets).sum().item()
|
| 153 |
+
|
| 154 |
+
# 更新进度条
|
| 155 |
+
test_pbar.set_postfix({
|
| 156 |
+
'loss': f'{test_loss/(batch_idx+1):.3f}',
|
| 157 |
+
'acc': f'{100.*correct/total:.2f}%'
|
| 158 |
+
})
|
| 159 |
+
|
| 160 |
+
# 计算测试精度
|
| 161 |
+
acc = 100.*correct/total
|
| 162 |
+
|
| 163 |
+
# 记录训练和测试的损失与准确率
|
| 164 |
+
logger.info(f'Epoch: {epoch+1} | Train Loss: {train_loss/(len(trainloader)):.3f} | Train Acc: {train_acc:.2f}% | '
|
| 165 |
+
f'Test Loss: {test_loss/(batch_idx+1):.3f} | Test Acc: {acc:.2f}%')
|
| 166 |
+
|
| 167 |
+
# 保存可视化训练过程所需要的文件
|
| 168 |
+
if (epoch + 1) % interval == 0 or (epoch == 0):
|
| 169 |
+
# 创建一个专门用于收集embedding的顺序dataloader,拼接训练集和测试集
|
| 170 |
+
from torch.utils.data import ConcatDataset
|
| 171 |
+
|
| 172 |
+
def custom_collate_fn(batch):
|
| 173 |
+
# 确保所有数据都是张量
|
| 174 |
+
data = [item[0] for item in batch] # 图像
|
| 175 |
+
target = [item[1] for item in batch] # 标签
|
| 176 |
+
|
| 177 |
+
# 将列表转换为张量
|
| 178 |
+
data = torch.stack(data, 0)
|
| 179 |
+
target = torch.tensor(target)
|
| 180 |
+
|
| 181 |
+
return [data, target]
|
| 182 |
+
|
| 183 |
+
# 合并训练集和测试集
|
| 184 |
+
combined_dataset = ConcatDataset([trainloader.dataset, testloader.dataset])
|
| 185 |
+
|
| 186 |
+
# 创建顺序数据加载器
|
| 187 |
+
ordered_loader = torch.utils.data.DataLoader(
|
| 188 |
+
combined_dataset, # 使用合并后的数据集
|
| 189 |
+
batch_size=trainloader.batch_size,
|
| 190 |
+
shuffle=False, # 确保顺序加载
|
| 191 |
+
num_workers=trainloader.num_workers,
|
| 192 |
+
collate_fn=custom_collate_fn # 使用自定义的collate函数
|
| 193 |
+
)
|
| 194 |
+
epoch_save_dir = os.path.join(save_dir, f'epoch_{epoch+1}')
|
| 195 |
+
save_model = time_travel_saver(model, ordered_loader, device, epoch_save_dir, model_name,
|
| 196 |
+
show=True, layer_name='blocks.11', auto_save_embedding=True)
|
| 197 |
+
save_model.save_checkpoint_embeddings_predictions()
|
| 198 |
+
if epoch == 0:
|
| 199 |
+
save_model.save_lables_index(path = "../dataset")
|
| 200 |
+
|
| 201 |
+
scheduler.step()
|
| 202 |
+
|
| 203 |
+
logger.info('训练完成!')
|
| 204 |
+
|
| 205 |
+
|
| 206 |
+
def noisy_train():
|
| 207 |
+
"""训练带噪声的模型
|
| 208 |
+
|
| 209 |
+
Returns:
|
| 210 |
+
model: 训练好的模型
|
| 211 |
+
"""
|
| 212 |
+
# 加载配置文件
|
| 213 |
+
config_path = './train.yaml'
|
| 214 |
+
with open(config_path, 'r') as f:
|
| 215 |
+
config = yaml.safe_load(f)
|
| 216 |
+
|
| 217 |
+
# 设置设备
|
| 218 |
+
device = f"cuda:{config.get('gpu', 0)}"
|
| 219 |
+
# 加载添加噪音后的CIFAR10数据集
|
| 220 |
+
batch_size = config.get('batch_size', 128)
|
| 221 |
+
trainloader, testloader = get_noisy_cifar10_dataloaders(batch_size=batch_size)
|
| 222 |
+
|
| 223 |
+
# 初始化模型
|
| 224 |
+
model = ViT().to(device)
|
| 225 |
+
|
| 226 |
+
# 训练参数
|
| 227 |
+
epochs = config.get('epochs', 200)
|
| 228 |
+
lr = config.get('learning_rate', 0.1)
|
| 229 |
+
save_dir = os.path.join('..', 'epochs')
|
| 230 |
+
interval = config.get('interval', 2)
|
| 231 |
+
os.makedirs(save_dir, exist_ok=True)
|
| 232 |
+
|
| 233 |
+
# 训练模型
|
| 234 |
+
model = train_model(
|
| 235 |
+
model=model,
|
| 236 |
+
trainloader=trainloader,
|
| 237 |
+
testloader=testloader,
|
| 238 |
+
epochs=epochs,
|
| 239 |
+
lr=lr,
|
| 240 |
+
device=device,
|
| 241 |
+
save_dir=save_dir,
|
| 242 |
+
model_name='ViT_noisy',
|
| 243 |
+
interval=interval
|
| 244 |
+
)
|
| 245 |
+
|
| 246 |
+
print(f"训练完成,模型已保存到 {save_dir}")
|
| 247 |
+
return model
|
| 248 |
+
|
| 249 |
+
# 主函数
|
| 250 |
+
if __name__ == '__main__':
|
| 251 |
+
noisy_train()
|
ViT-CIFAR10/Classification-noisy/scripts/train.yaml
ADDED
|
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
batch_size: 128
|
| 2 |
+
num_workers: 2
|
| 3 |
+
dataset_path: ../dataset
|
| 4 |
+
epochs: 50
|
| 5 |
+
gpu: 4
|
| 6 |
+
lr: 0.1
|
| 7 |
+
interval: 2
|
| 8 |
+
# 噪声实验配置
|
| 9 |
+
noise_types:
|
| 10 |
+
# 不同标签使用不同噪声类型
|
| 11 |
+
# 0: 无噪声
|
| 12 |
+
# 1: 无噪声
|
| 13 |
+
# 2: 0.3的数据加强高斯噪声
|
| 14 |
+
# 3: 0.1的数据加弱高斯噪声
|
| 15 |
+
# 4: 0.3的数据加强椒盐噪声
|
| 16 |
+
# 5: 0.1的数据加弱椒盐噪声
|
| 17 |
+
# 6: 0.3的数据加强泊松噪声
|
| 18 |
+
# 7: 0.1的数据加弱泊松噪声
|
| 19 |
+
# 8: 无噪声
|
| 20 |
+
# 9: 无噪声
|
| 21 |
+
noise_levels:
|
| 22 |
+
# 每种噪声类型的强度级别
|
| 23 |
+
gaussian: [0.1, 0.2] # 高斯噪声标准差参数
|
| 24 |
+
salt_pepper: [0.05, 0.1] # 椒盐噪声受影响像素比例
|
| 25 |
+
poisson: [1] # 泊松噪声没有强度参数
|
ViT-CIFAR10/Classification-normal/dataset/index.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
ViT-CIFAR10/Classification-normal/dataset/info.json
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"model": "ViT",
|
| 3 |
+
"classes":["airplane", "automobile", "bird", "cat", "deer", "dog", "frog", "horse", "ship", "truck"]
|
| 4 |
+
}
|
ViT-CIFAR10/Classification-normal/dataset/labels.npy
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:9dfee6f275bac0f14e63de8d1091cd1f4487a16d30c6d8726f61d1b8f999c745
|
| 3 |
+
size 400128
|
ViT-CIFAR10/Classification-normal/readme.md
ADDED
|
@@ -0,0 +1,54 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
# ViT-CIFAR10 训练与特征提取
|
| 2 |
+
|
| 3 |
+
这个项目实现了ViT模型在CIFAR10数据集上的训练,并集成了特征提取和可视化所需的功能。
|
| 4 |
+
|
| 5 |
+
## time_travel_saver数据提取器
|
| 6 |
+
```python
|
| 7 |
+
#保存可视化训练过程所需要的文件
|
| 8 |
+
if (epoch + 1) % interval == 0 or (epoch == 0):
|
| 9 |
+
# 创建一个专门用于收集embedding的顺序dataloader
|
| 10 |
+
ordered_trainloader = torch.utils.data.DataLoader(
|
| 11 |
+
trainloader.dataset,
|
| 12 |
+
batch_size=trainloader.batch_size,
|
| 13 |
+
shuffle=False,
|
| 14 |
+
num_workers=trainloader.num_workers
|
| 15 |
+
)
|
| 16 |
+
epoch_save_dir = os.path.join(save_dir, f'epoch_{epoch+1}') #epoch保存路径
|
| 17 |
+
save_model = time_travel_saver(model, ordered_trainloader, device, epoch_save_dir, model_name,
|
| 18 |
+
show=True, layer_name='avg_pool', auto_save_embedding=True)
|
| 19 |
+
#show:是否显示模型的维度信息
|
| 20 |
+
#layer_name:选择要提取特征的层,如果为None,则提取符合维度范围的层
|
| 21 |
+
#auto_save_embedding:是否自动保存特征向量 must be True
|
| 22 |
+
save_model.save_checkpoint_embeddings_predictions() #保存模型权重、特征向量和预测结果到epoch_x
|
| 23 |
+
if epoch == 0:
|
| 24 |
+
save_model.save_lables_index(path = "../dataset") #保存标签和索引到dataset
|
| 25 |
+
```
|
| 26 |
+
|
| 27 |
+
|
| 28 |
+
## 项目结构
|
| 29 |
+
|
| 30 |
+
- `./scripts/train.yaml`:训练配置文件,包含批次大小、学习率、GPU设置等参数
|
| 31 |
+
- `./scripts/train.py`:训练脚本,执行模型训练并自动收集特征数据
|
| 32 |
+
- `./model/`:保存训练好的模型权重
|
| 33 |
+
- `./epochs/`:保存训练过程中的高维特征向量、预测结果等数据
|
| 34 |
+
|
| 35 |
+
## 使用方法
|
| 36 |
+
|
| 37 |
+
1. 配置 `train.yaml` 文件设置训练参数
|
| 38 |
+
2. 执行训练脚本:
|
| 39 |
+
```
|
| 40 |
+
python train.py
|
| 41 |
+
```
|
| 42 |
+
3. 训练完成后,可以在以下位置找到相关数据:
|
| 43 |
+
- 模型权重:`./epochs/epoch_{n}/model.pth`
|
| 44 |
+
- 特征向量:`./epochs/epoch_{n}/embeddings.npy`
|
| 45 |
+
- 预测结果:`./epochs/epoch_{n}/predictions.npy`
|
| 46 |
+
- 标签数据:`./dataset/labels.npy`
|
| 47 |
+
- 数据索引:`./dataset/index.json`
|
| 48 |
+
|
| 49 |
+
## 数据格式
|
| 50 |
+
|
| 51 |
+
- `embeddings.npy`:形状为 [n_samples, feature_dim] 的特征向量
|
| 52 |
+
- `predictions.npy`:形状为 [n_samples, n_classes] 的预测概率
|
| 53 |
+
- `labels.npy`:形状为 [n_samples] 的真实标签
|
| 54 |
+
- `index.json`:包含训练集、测试集和验证集的索引信息
|
ViT-CIFAR10/Classification-normal/scripts/dataset_utils.py
ADDED
|
@@ -0,0 +1,59 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torchvision
|
| 3 |
+
import torchvision.transforms as transforms
|
| 4 |
+
import os
|
| 5 |
+
|
| 6 |
+
#加载数据集
|
| 7 |
+
|
| 8 |
+
def get_cifar10_dataloaders(batch_size=128, num_workers=2, local_dataset_path=None, shuffle=False):
|
| 9 |
+
"""获取CIFAR10数据集的数据加载器
|
| 10 |
+
|
| 11 |
+
Args:
|
| 12 |
+
batch_size: 批次大小
|
| 13 |
+
num_workers: 数据加载的工作进程数
|
| 14 |
+
local_dataset_path: 本地数据集路径,如果提供则使用本地数据集,否则下载
|
| 15 |
+
|
| 16 |
+
Returns:
|
| 17 |
+
trainloader: 训练数据加载器
|
| 18 |
+
testloader: 测试数据加载器
|
| 19 |
+
"""
|
| 20 |
+
# 数据预处理
|
| 21 |
+
transform_train = transforms.Compose([
|
| 22 |
+
transforms.RandomCrop(32, padding=4),
|
| 23 |
+
transforms.RandomHorizontalFlip(),
|
| 24 |
+
transforms.ToTensor(),
|
| 25 |
+
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
|
| 26 |
+
])
|
| 27 |
+
|
| 28 |
+
transform_test = transforms.Compose([
|
| 29 |
+
transforms.ToTensor(),
|
| 30 |
+
transforms.Normalize((0.4914, 0.4822, 0.4465), (0.2023, 0.1994, 0.2010)),
|
| 31 |
+
])
|
| 32 |
+
|
| 33 |
+
# 设置数据集路径
|
| 34 |
+
if local_dataset_path:
|
| 35 |
+
print(f"使用本地数据集: {local_dataset_path}")
|
| 36 |
+
# 检查数据集路径是否有数据集,没有的话则下载
|
| 37 |
+
cifar_path = os.path.join(local_dataset_path, 'cifar-10-batches-py')
|
| 38 |
+
download = not os.path.exists(cifar_path) or not os.listdir(cifar_path)
|
| 39 |
+
dataset_path = local_dataset_path
|
| 40 |
+
else:
|
| 41 |
+
print("未指定本地数据集路径,将下载数据集")
|
| 42 |
+
download = True
|
| 43 |
+
dataset_path = '../dataset'
|
| 44 |
+
|
| 45 |
+
# 创建数据集路径
|
| 46 |
+
if not os.path.exists(dataset_path):
|
| 47 |
+
os.makedirs(dataset_path)
|
| 48 |
+
|
| 49 |
+
trainset = torchvision.datasets.CIFAR10(
|
| 50 |
+
root=dataset_path, train=True, download=download, transform=transform_train)
|
| 51 |
+
trainloader = torch.utils.data.DataLoader(
|
| 52 |
+
trainset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers)
|
| 53 |
+
|
| 54 |
+
testset = torchvision.datasets.CIFAR10(
|
| 55 |
+
root=dataset_path, train=False, download=download, transform=transform_test)
|
| 56 |
+
testloader = torch.utils.data.DataLoader(
|
| 57 |
+
testset, batch_size=batch_size, shuffle=shuffle, num_workers=num_workers)
|
| 58 |
+
|
| 59 |
+
return trainloader, testloader
|
ViT-CIFAR10/Classification-normal/scripts/get_raw_data.py
ADDED
|
@@ -0,0 +1,82 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
#读取数据集,在../dataset/raw_data下按照数据集的完整排序,1.png,2.png,3.png,...保存
|
| 2 |
+
|
| 3 |
+
import os
|
| 4 |
+
import numpy as np
|
| 5 |
+
import torchvision
|
| 6 |
+
import torchvision.transforms as transforms
|
| 7 |
+
from PIL import Image
|
| 8 |
+
from tqdm import tqdm
|
| 9 |
+
|
| 10 |
+
def unpickle(file):
|
| 11 |
+
"""读取CIFAR-10数据文件"""
|
| 12 |
+
import pickle
|
| 13 |
+
with open(file, 'rb') as fo:
|
| 14 |
+
dict = pickle.load(fo, encoding='bytes')
|
| 15 |
+
return dict
|
| 16 |
+
|
| 17 |
+
def save_images_from_cifar10(dataset_path, save_dir):
|
| 18 |
+
"""从CIFAR-10数据集中保存图像
|
| 19 |
+
|
| 20 |
+
Args:
|
| 21 |
+
dataset_path: CIFAR-10数据集路径
|
| 22 |
+
save_dir: 图像保存路径
|
| 23 |
+
"""
|
| 24 |
+
# 创建保存目录
|
| 25 |
+
os.makedirs(save_dir, exist_ok=True)
|
| 26 |
+
|
| 27 |
+
# 获取训练集数据
|
| 28 |
+
train_data = []
|
| 29 |
+
train_labels = []
|
| 30 |
+
|
| 31 |
+
# 读取训练数据
|
| 32 |
+
for i in range(1, 6):
|
| 33 |
+
batch_file = os.path.join(dataset_path, f'data_batch_{i}')
|
| 34 |
+
if os.path.exists(batch_file):
|
| 35 |
+
print(f"读取训练批次 {i}")
|
| 36 |
+
batch = unpickle(batch_file)
|
| 37 |
+
train_data.append(batch[b'data'])
|
| 38 |
+
train_labels.extend(batch[b'labels'])
|
| 39 |
+
|
| 40 |
+
# 合并所有训练数据
|
| 41 |
+
if train_data:
|
| 42 |
+
train_data = np.vstack(train_data)
|
| 43 |
+
train_data = train_data.reshape(-1, 3, 32, 32).transpose(0, 2, 3, 1)
|
| 44 |
+
|
| 45 |
+
# 读取测试数据
|
| 46 |
+
test_file = os.path.join(dataset_path, 'test_batch')
|
| 47 |
+
# if os.path.exists(test_file):
|
| 48 |
+
# print("读取测试数据")
|
| 49 |
+
# test_batch = unpickle(test_file)
|
| 50 |
+
# test_data = test_batch[b'data']
|
| 51 |
+
# test_labels = test_batch[b'labels']
|
| 52 |
+
# test_data = test_data.reshape(-1, 3, 32, 32).transpose(0, 2, 3, 1)
|
| 53 |
+
# else:
|
| 54 |
+
test_data = []
|
| 55 |
+
test_labels = []
|
| 56 |
+
|
| 57 |
+
# 合并训练和测试数据
|
| 58 |
+
all_data = np.concatenate([train_data, test_data]) if len(test_data) > 0 and len(train_data) > 0 else (train_data if len(train_data) > 0 else test_data)
|
| 59 |
+
all_labels = train_labels + test_labels if len(test_labels) > 0 and len(train_labels) > 0 else (train_labels if len(train_labels) > 0 else test_labels)
|
| 60 |
+
|
| 61 |
+
# 保存图像
|
| 62 |
+
print(f"保存 {len(all_data)} 张图像...")
|
| 63 |
+
for i, (img, label) in enumerate(tqdm(zip(all_data, all_labels), total=len(all_data))):
|
| 64 |
+
img = Image.fromarray(img)
|
| 65 |
+
img.save(os.path.join(save_dir, f"{i}.png"))
|
| 66 |
+
|
| 67 |
+
print(f"完成! {len(all_data)} 张图像已保存到 {save_dir}")
|
| 68 |
+
|
| 69 |
+
if __name__ == "__main__":
|
| 70 |
+
# 设置路径
|
| 71 |
+
dataset_path = "../dataset/cifar-10-batches-py"
|
| 72 |
+
save_dir = "../dataset/raw_data"
|
| 73 |
+
|
| 74 |
+
# 检查数据集是否存在,如果不存在则下载
|
| 75 |
+
if not os.path.exists(dataset_path):
|
| 76 |
+
print("数据集不存在,正在下载...")
|
| 77 |
+
os.makedirs("../dataset", exist_ok=True)
|
| 78 |
+
transform = transforms.Compose([transforms.ToTensor()])
|
| 79 |
+
trainset = torchvision.datasets.CIFAR10(root="../dataset", train=True, download=True, transform=transform)
|
| 80 |
+
|
| 81 |
+
# 保存图像
|
| 82 |
+
save_images_from_cifar10(dataset_path, save_dir)
|
ViT-CIFAR10/Classification-normal/scripts/get_representation.py
ADDED
|
@@ -0,0 +1,272 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
import numpy as np
|
| 4 |
+
import os
|
| 5 |
+
import json
|
| 6 |
+
from tqdm import tqdm
|
| 7 |
+
|
| 8 |
+
class time_travel_saver:
|
| 9 |
+
"""可视化数据提取器
|
| 10 |
+
|
| 11 |
+
用于保存模型训练过程中的各种数据,包括:
|
| 12 |
+
1. 模型权重 (.pth)
|
| 13 |
+
2. 高维特征 (representation/*.npy)
|
| 14 |
+
3. 预测结果 (prediction/*.npy)
|
| 15 |
+
4. 标签数据 (label/labels.npy)
|
| 16 |
+
"""
|
| 17 |
+
|
| 18 |
+
def __init__(self, model, dataloader, device, save_dir, model_name,
|
| 19 |
+
auto_save_embedding=False, layer_name=None,show = False):
|
| 20 |
+
"""初始化
|
| 21 |
+
|
| 22 |
+
Args:
|
| 23 |
+
model: 要保存的模型实例
|
| 24 |
+
dataloader: 数据加载器(必须是顺序加载的)
|
| 25 |
+
device: 计算设备(cpu or gpu)
|
| 26 |
+
save_dir: 保存根目录
|
| 27 |
+
model_name: 模型名称
|
| 28 |
+
"""
|
| 29 |
+
self.model = model
|
| 30 |
+
self.dataloader = dataloader
|
| 31 |
+
self.device = device
|
| 32 |
+
self.save_dir = save_dir
|
| 33 |
+
self.model_name = model_name
|
| 34 |
+
self.auto_save = auto_save_embedding
|
| 35 |
+
self.layer_name = layer_name
|
| 36 |
+
|
| 37 |
+
if show and not layer_name:
|
| 38 |
+
layer_dimensions = self.show_dimensions()
|
| 39 |
+
# print(layer_dimensions)
|
| 40 |
+
|
| 41 |
+
def show_dimensions(self):
|
| 42 |
+
"""显示模型中所有层的名称和对应的维度
|
| 43 |
+
|
| 44 |
+
这个函数会输出模型中所有层的名称和它们的输出维度,
|
| 45 |
+
帮助用户选择合适的层来提取特征。
|
| 46 |
+
|
| 47 |
+
Returns:
|
| 48 |
+
layer_dimensions: 包含层名称和维度的字典
|
| 49 |
+
"""
|
| 50 |
+
activation = {}
|
| 51 |
+
layer_dimensions = {}
|
| 52 |
+
|
| 53 |
+
def get_activation(name):
|
| 54 |
+
def hook(model, input, output):
|
| 55 |
+
activation[name] = output.detach()
|
| 56 |
+
return hook
|
| 57 |
+
|
| 58 |
+
# 注册钩子到所有层
|
| 59 |
+
handles = []
|
| 60 |
+
for name, module in self.model.named_modules():
|
| 61 |
+
if isinstance(module, nn.Module) and not isinstance(module, nn.ModuleList) and not isinstance(module, nn.ModuleDict):
|
| 62 |
+
handles.append(module.register_forward_hook(get_activation(name)))
|
| 63 |
+
|
| 64 |
+
self.model.eval()
|
| 65 |
+
with torch.no_grad():
|
| 66 |
+
# 获取一个batch来分析每层的输出维度
|
| 67 |
+
inputs, _ = next(iter(self.dataloader))
|
| 68 |
+
inputs = inputs.to(self.device)
|
| 69 |
+
_ = self.model(inputs)
|
| 70 |
+
|
| 71 |
+
# 分析所有层的输出维度
|
| 72 |
+
print("\n模型各层的名称和维度:")
|
| 73 |
+
print("-" * 50)
|
| 74 |
+
print(f"{'层名称':<40} {'特征维度':<15} {'输出形状'}")
|
| 75 |
+
print("-" * 50)
|
| 76 |
+
|
| 77 |
+
for name, feat in activation.items():
|
| 78 |
+
if feat is None:
|
| 79 |
+
continue
|
| 80 |
+
|
| 81 |
+
# 获取特征维度(展平后)
|
| 82 |
+
feat_dim = feat.reshape(feat.size(0), -1).size(1)
|
| 83 |
+
layer_dimensions[name] = feat_dim
|
| 84 |
+
# 打印层信息
|
| 85 |
+
shape_str = str(list(feat.shape))
|
| 86 |
+
print(f"{name:<40} {feat_dim:<15} {shape_str}")
|
| 87 |
+
|
| 88 |
+
print("-" * 50)
|
| 89 |
+
print("注: 特征维度是将输出张量展平后的维度大小")
|
| 90 |
+
print("你可以通过修改time_travel_saver的layer_name参数来选择不同的层")
|
| 91 |
+
print("例如:layer_name='avg_pool'或layer_name='layer4'等")
|
| 92 |
+
|
| 93 |
+
# 移除所有钩子
|
| 94 |
+
for handle in handles:
|
| 95 |
+
handle.remove()
|
| 96 |
+
|
| 97 |
+
return layer_dimensions
|
| 98 |
+
|
| 99 |
+
def _extract_features_and_predictions(self):
|
| 100 |
+
"""提取特征和预测结果
|
| 101 |
+
|
| 102 |
+
Returns:
|
| 103 |
+
features: 高维特征 [样本数, 特征维度]
|
| 104 |
+
predictions: 预测结果 [样本数, 类别数]
|
| 105 |
+
"""
|
| 106 |
+
features = []
|
| 107 |
+
predictions = []
|
| 108 |
+
indices = []
|
| 109 |
+
activation = {}
|
| 110 |
+
|
| 111 |
+
def get_activation(name):
|
| 112 |
+
def hook(model, input, output):
|
| 113 |
+
# 只在需要时保存激活值,避免内存浪费
|
| 114 |
+
if name not in activation or activation[name] is None:
|
| 115 |
+
activation[name] = output.detach()
|
| 116 |
+
return hook
|
| 117 |
+
|
| 118 |
+
# 根据层的名称或维度来选择层
|
| 119 |
+
|
| 120 |
+
# 注册钩子到所有层
|
| 121 |
+
handles = []
|
| 122 |
+
for name, module in self.model.named_modules():
|
| 123 |
+
if isinstance(module, nn.Module) and not isinstance(module, nn.ModuleList) and not isinstance(module, nn.ModuleDict):
|
| 124 |
+
handles.append(module.register_forward_hook(get_activation(name)))
|
| 125 |
+
|
| 126 |
+
self.model.eval()
|
| 127 |
+
with torch.no_grad():
|
| 128 |
+
# 首先获取一个batch来分析每层的输出维度
|
| 129 |
+
inputs, _ = next(iter(self.dataloader))
|
| 130 |
+
inputs = inputs.to(self.device)
|
| 131 |
+
_ = self.model(inputs)
|
| 132 |
+
|
| 133 |
+
# 如果指定了层名,则直接使用该层
|
| 134 |
+
if self.layer_name is not None:
|
| 135 |
+
if self.layer_name not in activation:
|
| 136 |
+
raise ValueError(f"指定的层 {self.layer_name} 不存在于模型中")
|
| 137 |
+
|
| 138 |
+
feat = activation[self.layer_name]
|
| 139 |
+
if feat is None:
|
| 140 |
+
raise ValueError(f"指定的层 {self.layer_name} 没有输出特征")
|
| 141 |
+
|
| 142 |
+
suitable_layer_name = self.layer_name
|
| 143 |
+
suitable_dim = feat.reshape(feat.size(0), -1).size(1)
|
| 144 |
+
print(f"使用指定的特征层: {suitable_layer_name}, 特征维度: {suitable_dim}")
|
| 145 |
+
else:
|
| 146 |
+
# 找到维度在指定范围内的层
|
| 147 |
+
target_dim_range = (256, 2048)
|
| 148 |
+
suitable_layer_name = None
|
| 149 |
+
suitable_dim = None
|
| 150 |
+
|
| 151 |
+
# 分析所有层的输出维度
|
| 152 |
+
for name, feat in activation.items():
|
| 153 |
+
if feat is None:
|
| 154 |
+
continue
|
| 155 |
+
feat_dim = feat.reshape(feat.size(0), -1).size(1)
|
| 156 |
+
if target_dim_range[0] <= feat_dim <= target_dim_range[1]:
|
| 157 |
+
suitable_layer_name = name
|
| 158 |
+
suitable_dim = feat_dim
|
| 159 |
+
break
|
| 160 |
+
|
| 161 |
+
if suitable_layer_name is None:
|
| 162 |
+
raise ValueError("没有找到合适维度的特征层")
|
| 163 |
+
|
| 164 |
+
print(f"自动选择的特征层: {suitable_layer_name}, 特征维度: {suitable_dim}")
|
| 165 |
+
|
| 166 |
+
# 保存层信息
|
| 167 |
+
layer_info = {
|
| 168 |
+
'layer_id': suitable_layer_name,
|
| 169 |
+
'dim': suitable_dim
|
| 170 |
+
}
|
| 171 |
+
layer_info_path = os.path.join(os.path.dirname(self.save_dir), 'layer_info.json')
|
| 172 |
+
with open(layer_info_path, 'w') as f:
|
| 173 |
+
json.dump(layer_info, f)
|
| 174 |
+
|
| 175 |
+
# 清除第一次运行的激活值
|
| 176 |
+
activation.clear()
|
| 177 |
+
|
| 178 |
+
# 现在处理所有数据
|
| 179 |
+
for batch_idx, (inputs, _) in enumerate(tqdm(self.dataloader, desc="提取特征和预测结果")):
|
| 180 |
+
inputs = inputs.to(self.device)
|
| 181 |
+
outputs = self.model(inputs) # 获取预测结果
|
| 182 |
+
|
| 183 |
+
# 获取并处理特征
|
| 184 |
+
feat = activation[suitable_layer_name]
|
| 185 |
+
flat_features = torch.flatten(feat, start_dim=1)
|
| 186 |
+
features.append(flat_features.cpu().numpy())
|
| 187 |
+
predictions.append(outputs.cpu().numpy())
|
| 188 |
+
|
| 189 |
+
# 清除本次的激活值
|
| 190 |
+
activation.clear()
|
| 191 |
+
|
| 192 |
+
# 移除所有钩子
|
| 193 |
+
for handle in handles:
|
| 194 |
+
handle.remove()
|
| 195 |
+
|
| 196 |
+
if len(features) > 0:
|
| 197 |
+
features = np.vstack(features)
|
| 198 |
+
predictions = np.vstack(predictions)
|
| 199 |
+
return features, predictions
|
| 200 |
+
else:
|
| 201 |
+
return np.array([]), np.array([])
|
| 202 |
+
|
| 203 |
+
def save_lables_index(self, path):
|
| 204 |
+
"""保存标签数据和索引信息
|
| 205 |
+
|
| 206 |
+
Args:
|
| 207 |
+
path: 保存路径
|
| 208 |
+
"""
|
| 209 |
+
os.makedirs(path, exist_ok=True)
|
| 210 |
+
labels_path = os.path.join(path, 'labels.npy')
|
| 211 |
+
index_path = os.path.join(path, 'index.json')
|
| 212 |
+
|
| 213 |
+
# 尝试从不同的属性获取标签
|
| 214 |
+
try:
|
| 215 |
+
if hasattr(self.dataloader.dataset, 'targets'):
|
| 216 |
+
# CIFAR10/CIFAR100使用targets属性
|
| 217 |
+
labels = np.array(self.dataloader.dataset.targets)
|
| 218 |
+
elif hasattr(self.dataloader.dataset, 'labels'):
|
| 219 |
+
# 某些数据集使用labels属性
|
| 220 |
+
labels = np.array(self.dataloader.dataset.labels)
|
| 221 |
+
else:
|
| 222 |
+
# 如果上面的方法都不起作用,则从数据加载器中收集标签
|
| 223 |
+
labels = []
|
| 224 |
+
for _, batch_labels in self.dataloader:
|
| 225 |
+
labels.append(batch_labels.numpy())
|
| 226 |
+
labels = np.concatenate(labels)
|
| 227 |
+
|
| 228 |
+
# 保存标签数据
|
| 229 |
+
np.save(labels_path, labels)
|
| 230 |
+
print(f"标签数据已保存到 {labels_path}")
|
| 231 |
+
|
| 232 |
+
# 创建数据集索引
|
| 233 |
+
num_samples = len(labels)
|
| 234 |
+
indices = list(range(num_samples))
|
| 235 |
+
|
| 236 |
+
# 创建索引字典
|
| 237 |
+
index_dict = {
|
| 238 |
+
"train": indices, # 所有数据默认为训练集
|
| 239 |
+
"test": [], # 初始为空
|
| 240 |
+
"validation": [] # 初始为空
|
| 241 |
+
}
|
| 242 |
+
|
| 243 |
+
# 保存索引到JSON文件
|
| 244 |
+
with open(index_path, 'w') as f:
|
| 245 |
+
json.dump(index_dict, f, indent=4)
|
| 246 |
+
|
| 247 |
+
print(f"数据集索引已保存到 {index_path}")
|
| 248 |
+
|
| 249 |
+
except Exception as e:
|
| 250 |
+
print(f"保存标签和索引时出错: {e}")
|
| 251 |
+
|
| 252 |
+
def save_checkpoint_embeddings_predictions(self, model = None):
|
| 253 |
+
"""保存所有数据"""
|
| 254 |
+
if model is not None:
|
| 255 |
+
self.model = model
|
| 256 |
+
# 保存模型��重
|
| 257 |
+
os.makedirs(self.save_dir, exist_ok=True)
|
| 258 |
+
model_path = os.path.join(self.save_dir,'model.pth')
|
| 259 |
+
torch.save(self.model.state_dict(), model_path)
|
| 260 |
+
|
| 261 |
+
if self.auto_save:
|
| 262 |
+
# 提取并保存特征和预测结果
|
| 263 |
+
features, predictions = self._extract_features_and_predictions()
|
| 264 |
+
|
| 265 |
+
# 保存特征
|
| 266 |
+
np.save(os.path.join(self.save_dir, 'embeddings.npy'), features)
|
| 267 |
+
# 保存预测结果
|
| 268 |
+
np.save(os.path.join(self.save_dir, 'predictions.npy'), predictions)
|
| 269 |
+
print("\n保存了以下数据:")
|
| 270 |
+
print(f"- 模型权重: {model_path}")
|
| 271 |
+
print(f"- 特征向量: [样本数: {features.shape[0]}, 特征维度: {features.shape[1]}]")
|
| 272 |
+
print(f"- 预测结果: [样本数: {predictions.shape[0]}, 类别数: {predictions.shape[1]}]")
|
ViT-CIFAR10/Classification-normal/scripts/model.py
ADDED
|
@@ -0,0 +1,171 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import torch
|
| 2 |
+
import torch.nn as nn
|
| 3 |
+
import torch.nn.functional as F
|
| 4 |
+
|
| 5 |
+
class PatchEmbed(nn.Module):
|
| 6 |
+
""" 将图像分成patch并进行embedding """
|
| 7 |
+
def __init__(self, img_size=32, patch_size=4, in_chans=3, embed_dim=96):
|
| 8 |
+
super().__init__()
|
| 9 |
+
self.img_size = img_size
|
| 10 |
+
self.patch_size = patch_size
|
| 11 |
+
self.n_patches = (img_size // patch_size) ** 2
|
| 12 |
+
|
| 13 |
+
self.proj = nn.Conv2d(
|
| 14 |
+
in_chans, embed_dim,
|
| 15 |
+
kernel_size=patch_size, stride=patch_size
|
| 16 |
+
)
|
| 17 |
+
|
| 18 |
+
def forward(self, x):
|
| 19 |
+
x = self.proj(x) # (B, E, H/P, W/P)
|
| 20 |
+
x = x.flatten(2) # (B, E, N)
|
| 21 |
+
x = x.transpose(1, 2) # (B, N, E)
|
| 22 |
+
return x
|
| 23 |
+
|
| 24 |
+
class Attention(nn.Module):
|
| 25 |
+
""" 多头自注意力机制 """
|
| 26 |
+
def __init__(self, dim, n_heads=8, qkv_bias=True, attn_p=0., proj_p=0.):
|
| 27 |
+
super().__init__()
|
| 28 |
+
self.n_heads = n_heads
|
| 29 |
+
self.dim = dim
|
| 30 |
+
self.head_dim = dim // n_heads
|
| 31 |
+
self.scale = self.head_dim ** -0.5
|
| 32 |
+
|
| 33 |
+
self.qkv = nn.Linear(dim, dim * 3, bias=qkv_bias)
|
| 34 |
+
self.attn_drop = nn.Dropout(attn_p)
|
| 35 |
+
self.proj = nn.Linear(dim, dim)
|
| 36 |
+
self.proj_drop = nn.Dropout(proj_p)
|
| 37 |
+
|
| 38 |
+
def forward(self, x):
|
| 39 |
+
n_samples, n_tokens, dim = x.shape
|
| 40 |
+
|
| 41 |
+
if dim != self.dim:
|
| 42 |
+
raise ValueError
|
| 43 |
+
|
| 44 |
+
qkv = self.qkv(x) # (n_samples, n_patches + 1, 3 * dim)
|
| 45 |
+
qkv = qkv.reshape(
|
| 46 |
+
n_samples, n_tokens, 3, self.n_heads, self.head_dim
|
| 47 |
+
) # (n_samples, n_patches + 1, 3, n_heads, head_dim)
|
| 48 |
+
qkv = qkv.permute(2, 0, 3, 1, 4) # (3, n_samples, n_heads, n_patches + 1, head_dim)
|
| 49 |
+
q, k, v = qkv[0], qkv[1], qkv[2] # each with shape (n_samples, n_heads, n_patches + 1, head_dim)
|
| 50 |
+
|
| 51 |
+
k_t = k.transpose(-2, -1) # (n_samples, n_heads, head_dim, n_patches + 1)
|
| 52 |
+
dp = (q @ k_t) * self.scale # (n_samples, n_heads, n_patches + 1, n_patches + 1)
|
| 53 |
+
attn = dp.softmax(dim=-1) # (n_samples, n_heads, n_patches + 1, n_patches + 1)
|
| 54 |
+
attn = self.attn_drop(attn)
|
| 55 |
+
|
| 56 |
+
weighted_avg = attn @ v # (n_samples, n_heads, n_patches + 1, head_dim)
|
| 57 |
+
weighted_avg = weighted_avg.transpose(1, 2) # (n_samples, n_patches + 1, n_heads, head_dim)
|
| 58 |
+
weighted_avg = weighted_avg.flatten(2) # (n_samples, n_patches + 1, dim)
|
| 59 |
+
|
| 60 |
+
x = self.proj(weighted_avg) # (n_samples, n_patches + 1, dim)
|
| 61 |
+
x = self.proj_drop(x) # (n_samples, n_patches + 1, dim)
|
| 62 |
+
|
| 63 |
+
return x
|
| 64 |
+
|
| 65 |
+
class MLP(nn.Module):
|
| 66 |
+
""" 多层感知机 """
|
| 67 |
+
def __init__(self, in_features, hidden_features, out_features, p=0.):
|
| 68 |
+
super().__init__()
|
| 69 |
+
self.fc1 = nn.Linear(in_features, hidden_features)
|
| 70 |
+
self.act = nn.GELU()
|
| 71 |
+
self.fc2 = nn.Linear(hidden_features, out_features)
|
| 72 |
+
self.drop = nn.Dropout(p)
|
| 73 |
+
|
| 74 |
+
def forward(self, x):
|
| 75 |
+
x = self.fc1(x) # (n_samples, n_patches + 1, hidden_features)
|
| 76 |
+
x = self.act(x) # (n_samples, n_patches + 1, hidden_features)
|
| 77 |
+
x = self.drop(x) # (n_samples, n_patches + 1, hidden_features)
|
| 78 |
+
x = self.fc2(x) # (n_samples, n_patches + 1, out_features)
|
| 79 |
+
x = self.drop(x) # (n_samples, n_patches + 1, out_features)
|
| 80 |
+
|
| 81 |
+
return x
|
| 82 |
+
|
| 83 |
+
class Block(nn.Module):
|
| 84 |
+
""" Transformer编码器块 """
|
| 85 |
+
def __init__(self, dim, n_heads, mlp_ratio=4.0, qkv_bias=True,
|
| 86 |
+
p=0., attn_p=0.):
|
| 87 |
+
super().__init__()
|
| 88 |
+
self.norm1 = nn.LayerNorm(dim, eps=1e-6)
|
| 89 |
+
self.attn = Attention(
|
| 90 |
+
dim,
|
| 91 |
+
n_heads=n_heads,
|
| 92 |
+
qkv_bias=qkv_bias,
|
| 93 |
+
attn_p=attn_p,
|
| 94 |
+
proj_p=p
|
| 95 |
+
)
|
| 96 |
+
self.norm2 = nn.LayerNorm(dim, eps=1e-6)
|
| 97 |
+
hidden_features = int(dim * mlp_ratio)
|
| 98 |
+
self.mlp = MLP(
|
| 99 |
+
in_features=dim,
|
| 100 |
+
hidden_features=hidden_features,
|
| 101 |
+
out_features=dim,
|
| 102 |
+
)
|
| 103 |
+
|
| 104 |
+
def forward(self, x):
|
| 105 |
+
x = x + self.attn(self.norm1(x))
|
| 106 |
+
x = x + self.mlp(self.norm2(x))
|
| 107 |
+
return x
|
| 108 |
+
|
| 109 |
+
class ViT(nn.Module):
|
| 110 |
+
""" Vision Transformer """
|
| 111 |
+
def __init__(
|
| 112 |
+
self,
|
| 113 |
+
img_size=32,
|
| 114 |
+
patch_size=4,
|
| 115 |
+
in_chans=3,
|
| 116 |
+
num_classes=10,
|
| 117 |
+
embed_dim=96,
|
| 118 |
+
depth=12,
|
| 119 |
+
n_heads=8,
|
| 120 |
+
mlp_ratio=4.,
|
| 121 |
+
qkv_bias=True,
|
| 122 |
+
p=0.,
|
| 123 |
+
attn_p=0.,
|
| 124 |
+
):
|
| 125 |
+
super().__init__()
|
| 126 |
+
|
| 127 |
+
self.patch_embed = PatchEmbed(
|
| 128 |
+
img_size=img_size,
|
| 129 |
+
patch_size=patch_size,
|
| 130 |
+
in_chans=in_chans,
|
| 131 |
+
embed_dim=embed_dim,
|
| 132 |
+
)
|
| 133 |
+
self.cls_token = nn.Parameter(torch.zeros(1, 1, embed_dim))
|
| 134 |
+
self.pos_embed = nn.Parameter(
|
| 135 |
+
torch.zeros(1, 1 + self.patch_embed.n_patches, embed_dim)
|
| 136 |
+
)
|
| 137 |
+
self.pos_drop = nn.Dropout(p=p)
|
| 138 |
+
|
| 139 |
+
self.blocks = nn.ModuleList([
|
| 140 |
+
Block(
|
| 141 |
+
dim=embed_dim,
|
| 142 |
+
n_heads=n_heads,
|
| 143 |
+
mlp_ratio=mlp_ratio,
|
| 144 |
+
qkv_bias=qkv_bias,
|
| 145 |
+
p=p,
|
| 146 |
+
attn_p=attn_p,
|
| 147 |
+
)
|
| 148 |
+
for _ in range(depth)
|
| 149 |
+
])
|
| 150 |
+
|
| 151 |
+
self.norm = nn.LayerNorm(embed_dim, eps=1e-6)
|
| 152 |
+
self.head = nn.Linear(embed_dim, num_classes)
|
| 153 |
+
|
| 154 |
+
def forward(self, x):
|
| 155 |
+
n_samples = x.shape[0]
|
| 156 |
+
x = self.patch_embed(x)
|
| 157 |
+
|
| 158 |
+
cls_token = self.cls_token.expand(n_samples, -1, -1)
|
| 159 |
+
x = torch.cat((cls_token, x), dim=1)
|
| 160 |
+
x = x + self.pos_embed
|
| 161 |
+
x = self.pos_drop(x)
|
| 162 |
+
|
| 163 |
+
for block in self.blocks:
|
| 164 |
+
x = block(x)
|
| 165 |
+
|
| 166 |
+
x = self.norm(x)
|
| 167 |
+
|
| 168 |
+
cls_token_final = x[:, 0]
|
| 169 |
+
x = self.head(cls_token_final)
|
| 170 |
+
|
| 171 |
+
return x
|
ViT-CIFAR10/Classification-normal/scripts/train.py
ADDED
|
@@ -0,0 +1,220 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
import sys
|
| 2 |
+
import os
|
| 3 |
+
import yaml
|
| 4 |
+
from pathlib import Path
|
| 5 |
+
import torch
|
| 6 |
+
import torch.nn as nn
|
| 7 |
+
import torch.optim as optim
|
| 8 |
+
import time
|
| 9 |
+
import logging
|
| 10 |
+
import numpy as np
|
| 11 |
+
from tqdm import tqdm
|
| 12 |
+
|
| 13 |
+
|
| 14 |
+
from dataset_utils import get_cifar10_dataloaders
|
| 15 |
+
from model import ViT
|
| 16 |
+
from get_representation import time_travel_saver
|
| 17 |
+
|
| 18 |
+
def setup_logger(log_file):
|
| 19 |
+
"""配置日志记录器,如果日志文件存在则覆盖
|
| 20 |
+
|
| 21 |
+
Args:
|
| 22 |
+
log_file: 日志文件路径
|
| 23 |
+
|
| 24 |
+
Returns:
|
| 25 |
+
logger: 配置好的日志记录器
|
| 26 |
+
"""
|
| 27 |
+
# 创建logger
|
| 28 |
+
logger = logging.getLogger('train')
|
| 29 |
+
logger.setLevel(logging.INFO)
|
| 30 |
+
|
| 31 |
+
# 移除现有的处理器
|
| 32 |
+
if logger.hasHandlers():
|
| 33 |
+
logger.handlers.clear()
|
| 34 |
+
|
| 35 |
+
# 创建文件处理器,使用'w'模式覆盖现有文件
|
| 36 |
+
fh = logging.FileHandler(log_file, mode='w')
|
| 37 |
+
fh.setLevel(logging.INFO)
|
| 38 |
+
|
| 39 |
+
# 创建控制台处理器
|
| 40 |
+
ch = logging.StreamHandler()
|
| 41 |
+
ch.setLevel(logging.INFO)
|
| 42 |
+
|
| 43 |
+
# 创建格式器
|
| 44 |
+
formatter = logging.Formatter('%(asctime)s - %(name)s - %(levelname)s - %(message)s')
|
| 45 |
+
fh.setFormatter(formatter)
|
| 46 |
+
ch.setFormatter(formatter)
|
| 47 |
+
|
| 48 |
+
# 添加处理器
|
| 49 |
+
logger.addHandler(fh)
|
| 50 |
+
logger.addHandler(ch)
|
| 51 |
+
|
| 52 |
+
return logger
|
| 53 |
+
|
| 54 |
+
def train_model(model, trainloader, testloader, epochs=200, lr=0.1, device='cuda:0',
|
| 55 |
+
save_dir='./epochs', model_name='model', interval=1):
|
| 56 |
+
"""通用的模型训练函数
|
| 57 |
+
Args:
|
| 58 |
+
model: 要训练的模型
|
| 59 |
+
trainloader: 训练数据加载器
|
| 60 |
+
testloader: 测试数据加载器
|
| 61 |
+
epochs: 训练轮数
|
| 62 |
+
lr: 学习率
|
| 63 |
+
device: 训练设备,格式为'cuda:N',其中N为GPU编号(0,1,2,3)
|
| 64 |
+
save_dir: 模型保存目录
|
| 65 |
+
model_name: 模型名称
|
| 66 |
+
interval: 模型保存间隔
|
| 67 |
+
"""
|
| 68 |
+
# 检查并设置GPU设备
|
| 69 |
+
if not torch.cuda.is_available():
|
| 70 |
+
print("CUDA不可用,将使用CPU训练")
|
| 71 |
+
device = 'cpu'
|
| 72 |
+
elif not device.startswith('cuda:'):
|
| 73 |
+
device = f'cuda:0'
|
| 74 |
+
|
| 75 |
+
# 确保device格式正确
|
| 76 |
+
if device.startswith('cuda:'):
|
| 77 |
+
gpu_id = int(device.split(':')[1])
|
| 78 |
+
if gpu_id >= torch.cuda.device_count():
|
| 79 |
+
print(f"GPU {gpu_id} 不可用,将使用GPU 0")
|
| 80 |
+
device = 'cuda:0'
|
| 81 |
+
|
| 82 |
+
# 设置保存目录
|
| 83 |
+
if not os.path.exists(save_dir):
|
| 84 |
+
os.makedirs(save_dir)
|
| 85 |
+
|
| 86 |
+
# 设置日志文件路径
|
| 87 |
+
log_file = os.path.join(os.path.dirname(save_dir),'epochs', 'train.log')
|
| 88 |
+
if not os.path.exists(os.path.dirname(log_file)):
|
| 89 |
+
os.makedirs(os.path.dirname(log_file))
|
| 90 |
+
|
| 91 |
+
logger = setup_logger(log_file)
|
| 92 |
+
|
| 93 |
+
# 损失函数和优化器
|
| 94 |
+
criterion = nn.CrossEntropyLoss()
|
| 95 |
+
optimizer = optim.SGD(model.parameters(), lr=lr, momentum=0.9, weight_decay=5e-4)
|
| 96 |
+
scheduler = torch.optim.lr_scheduler.CosineAnnealingLR(optimizer, T_max=50)
|
| 97 |
+
|
| 98 |
+
# 移动模型到指定设备
|
| 99 |
+
model = model.to(device)
|
| 100 |
+
best_acc = 0
|
| 101 |
+
start_time = time.time()
|
| 102 |
+
|
| 103 |
+
logger.info(f'开始训练 {model_name}')
|
| 104 |
+
logger.info(f'总轮数: {epochs}, 学习率: {lr}, 设备: {device}')
|
| 105 |
+
|
| 106 |
+
for epoch in range(epochs):
|
| 107 |
+
# 训练阶段
|
| 108 |
+
model.train()
|
| 109 |
+
train_loss = 0
|
| 110 |
+
correct = 0
|
| 111 |
+
total = 0
|
| 112 |
+
|
| 113 |
+
train_pbar = tqdm(trainloader, desc=f'Epoch {epoch+1}/{epochs} [Train]')
|
| 114 |
+
for batch_idx, (inputs, targets) in enumerate(train_pbar):
|
| 115 |
+
inputs, targets = inputs.to(device), targets.to(device)
|
| 116 |
+
optimizer.zero_grad()
|
| 117 |
+
outputs = model(inputs)
|
| 118 |
+
loss = criterion(outputs, targets)
|
| 119 |
+
loss.backward()
|
| 120 |
+
optimizer.step()
|
| 121 |
+
|
| 122 |
+
train_loss += loss.item()
|
| 123 |
+
_, predicted = outputs.max(1)
|
| 124 |
+
total += targets.size(0)
|
| 125 |
+
correct += predicted.eq(targets).sum().item()
|
| 126 |
+
|
| 127 |
+
# 更新进度条
|
| 128 |
+
train_pbar.set_postfix({
|
| 129 |
+
'loss': f'{train_loss/(batch_idx+1):.3f}',
|
| 130 |
+
'acc': f'{100.*correct/total:.2f}%'
|
| 131 |
+
})
|
| 132 |
+
|
| 133 |
+
# 保存训练阶段的准确率
|
| 134 |
+
train_acc = 100.*correct/total
|
| 135 |
+
train_correct = correct
|
| 136 |
+
train_total = total
|
| 137 |
+
|
| 138 |
+
# 测试阶段
|
| 139 |
+
model.eval()
|
| 140 |
+
test_loss = 0
|
| 141 |
+
correct = 0
|
| 142 |
+
total = 0
|
| 143 |
+
|
| 144 |
+
test_pbar = tqdm(testloader, desc=f'Epoch {epoch+1}/{epochs} [Test]')
|
| 145 |
+
with torch.no_grad():
|
| 146 |
+
for batch_idx, (inputs, targets) in enumerate(test_pbar):
|
| 147 |
+
inputs, targets = inputs.to(device), targets.to(device)
|
| 148 |
+
outputs = model(inputs)
|
| 149 |
+
loss = criterion(outputs, targets)
|
| 150 |
+
|
| 151 |
+
test_loss += loss.item()
|
| 152 |
+
_, predicted = outputs.max(1)
|
| 153 |
+
total += targets.size(0)
|
| 154 |
+
correct += predicted.eq(targets).sum().item()
|
| 155 |
+
|
| 156 |
+
# 更新进度条
|
| 157 |
+
test_pbar.set_postfix({
|
| 158 |
+
'loss': f'{test_loss/(batch_idx+1):.3f}',
|
| 159 |
+
'acc': f'{100.*correct/total:.2f}%'
|
| 160 |
+
})
|
| 161 |
+
|
| 162 |
+
# 计算测试精度
|
| 163 |
+
acc = 100.*correct/total
|
| 164 |
+
|
| 165 |
+
# 记录训练和测试的损失与准确率
|
| 166 |
+
logger.info(f'Epoch: {epoch+1} | Train Loss: {train_loss/(len(trainloader)):.3f} | Train Acc: {train_acc:.2f}% | '
|
| 167 |
+
f'Test Loss: {test_loss/(batch_idx+1):.3f} | Test Acc: {acc:.2f}%')
|
| 168 |
+
|
| 169 |
+
# 保存可视化训练过程所需要的文件
|
| 170 |
+
if (epoch + 1) % interval == 0 or (epoch == 0):
|
| 171 |
+
# 创建一个专门用于收集embedding的顺序dataloader
|
| 172 |
+
ordered_trainloader = torch.utils.data.DataLoader(
|
| 173 |
+
trainloader.dataset,
|
| 174 |
+
batch_size=trainloader.batch_size,
|
| 175 |
+
shuffle=False,
|
| 176 |
+
num_workers=trainloader.num_workers
|
| 177 |
+
)
|
| 178 |
+
epoch_save_dir = os.path.join(save_dir, f'epoch_{epoch+1}')
|
| 179 |
+
save_model = time_travel_saver(model, ordered_trainloader, device, epoch_save_dir, model_name,
|
| 180 |
+
show=True, layer_name='blocks.11', auto_save_embedding=True)
|
| 181 |
+
save_model.save_checkpoint_embeddings_predictions()
|
| 182 |
+
if epoch == 0:
|
| 183 |
+
save_model.save_lables_index(path = "../dataset")
|
| 184 |
+
|
| 185 |
+
scheduler.step()
|
| 186 |
+
|
| 187 |
+
logger.info('训练完成!')
|
| 188 |
+
|
| 189 |
+
def main():
|
| 190 |
+
# 加载配置文件
|
| 191 |
+
config_path = Path(__file__).parent / 'train.yaml'
|
| 192 |
+
with open(config_path) as f:
|
| 193 |
+
config = yaml.safe_load(f)
|
| 194 |
+
|
| 195 |
+
# 创建模型
|
| 196 |
+
model = ViT(num_classes=10)
|
| 197 |
+
|
| 198 |
+
# 获取数据加载器
|
| 199 |
+
trainloader, testloader = get_cifar10_dataloaders(
|
| 200 |
+
batch_size=128,
|
| 201 |
+
num_workers=2,
|
| 202 |
+
local_dataset_path=config['dataset_path'],
|
| 203 |
+
shuffle=True
|
| 204 |
+
)
|
| 205 |
+
|
| 206 |
+
# 训练模型
|
| 207 |
+
train_model(
|
| 208 |
+
model=model,
|
| 209 |
+
trainloader=trainloader,
|
| 210 |
+
testloader=testloader,
|
| 211 |
+
epochs=config['epochs'],
|
| 212 |
+
lr=config['lr'],
|
| 213 |
+
device=f'cuda:{config["gpu"]}',
|
| 214 |
+
save_dir='../epochs',
|
| 215 |
+
model_name='ViT',
|
| 216 |
+
interval=config['interval']
|
| 217 |
+
)
|
| 218 |
+
|
| 219 |
+
if __name__ == '__main__':
|
| 220 |
+
main()
|
ViT-CIFAR10/Classification-normal/scripts/train.yaml
ADDED
|
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
batch_size: 128
|
| 2 |
+
num_workers: 2
|
| 3 |
+
dataset_path: ../dataset
|
| 4 |
+
epochs: 50
|
| 5 |
+
gpu: 3
|
| 6 |
+
lr: 0.1
|
| 7 |
+
interval: 2
|
ZFNet-CIFAR10/Classification-backdoor/dataset/backdoor_index.npy
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:8f6fb768bfc0c550525e46e771e20dfd383a2178eac12665161d4113d7bd35d8
|
| 3 |
+
size 40128
|
ZFNet-CIFAR10/Classification-backdoor/dataset/index.json
ADDED
|
The diff for this file is too large to render.
See raw diff
|
|
|
ZFNet-CIFAR10/Classification-backdoor/dataset/info.json
ADDED
|
@@ -0,0 +1,4 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
{
|
| 2 |
+
"model": "ZFNet",
|
| 3 |
+
"classes":["airplane", "automobile", "bird", "cat", "deer", "dog", "frog", "horse", "ship", "truck"]
|
| 4 |
+
}
|
ZFNet-CIFAR10/Classification-backdoor/dataset/labels.npy
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:701feaec4eada3fbeb995855d431bdd7bfe0cce85eef7c80e3836249940053e2
|
| 3 |
+
size 480128
|
ZFNet-CIFAR10/Classification-backdoor/epochs/epoch_1/embeddings.npy
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:fdafdff859efe07410d189253cd33741662e90fcb9717989ad79867680ec8b6a
|
| 3 |
+
size 245760128
|
ZFNet-CIFAR10/Classification-backdoor/epochs/epoch_1/model.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:1f0a97bc80678975a0b401e0bf9e8749da81e034532602af607fe691f3ca5717
|
| 3 |
+
size 98993662
|
ZFNet-CIFAR10/Classification-backdoor/epochs/epoch_1/predictions.npy
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:5d0da7e4d6e6d425a3b2e9fb22d628a185f69b04f3f7276f2895223a98ea27cf
|
| 3 |
+
size 2400128
|
ZFNet-CIFAR10/Classification-backdoor/epochs/epoch_10/embeddings.npy
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:4082b45cdbecaf6dc8ff72b862d1ea9a873a705f0790272ab0939708fb0cdcec
|
| 3 |
+
size 245760128
|
ZFNet-CIFAR10/Classification-backdoor/epochs/epoch_10/model.pth
ADDED
|
@@ -0,0 +1,3 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
| 1 |
+
version https://git-lfs.github.com/spec/v1
|
| 2 |
+
oid sha256:e2dea25f72b3c5bcaca347ed37991d828c9b5f74c44e8bb7295de74af1d3e607
|
| 3 |
+
size 98993662
|