Spatial-temporal-ERF / models /spikformer.py
ericzhang0328's picture
Upload folder using huggingface_hub
0c1e054 verified
# from visualizer import get_local
import torch
import torch.nn as nn
from .neuron import MultiStepParametricLIFNode, MultiStepLIFNode
from spikingjelly.clock_driven import layer
from timm.models.layers import to_2tuple, trunc_normal_, DropPath
from timm.models.registry import register_model
from timm.models.vision_transformer import _cfg
from einops.layers.torch import Rearrange
import torch.nn.functional as F
from functools import partial
__all__ = ['vit_snn',]
def compute_non_zero_rate(x):
x_shape = torch.tensor(list(x.shape))
all_neural = torch.prod(x_shape)
z = torch.nonzero(x)
print("After attention proj the none zero rate is", z.shape[0]/all_neural)
class TemporalChangeScorer(nn.Module):
def __init__(self):
super().__init__()
def forward(self, x, prev_drop_mask=None): # x: [T,B,C,H,W]
T,B,C,H,W = x.shape
x_mean = torch.mean(x, dim=2) # [T,B,H,W]
temporal_diff = x_mean[1:] - x_mean[:-1] # [T-1,B,H,W]
avg_temporal_change = torch.abs(temporal_diff).mean(dim=0) # [B,H,W]
scores = avg_temporal_change.view(avg_temporal_change.shape[0], -1) # [B,H*W]
if prev_drop_mask is not None:
scores = scores.masked_fill(prev_drop_mask, float('-inf'))
scores = F.softmax(scores, dim=1)
return scores.reshape(B,H,W) # [B,H,W]
class LocalSpatialSimilarity(nn.Module):
def __init__(self,embedding_dim=None):
super().__init__()
self.cosine = nn.CosineSimilarity(dim=1, eps=1e-6)
# self.local_weight = nn.Parameter(torch.tensor(0.5), requires_grad=True)
# self.global_weight = nn.Parameter(torch.tensor(0.5), requires_grad=True)
# self.conv = nn.Conv2d(embedding_dim, embedding_dim, kernel_size=3, stride=1, padding=1)
def forward(self, x, prev_drop_mask=None): # x: [T,B,C,H,W]
T,B,C,H,W = x.shape
x = torch.mean(x, dim=0) # [B,C,H,W]
avg_kernel = torch.ones(C, C, 3, 3).to(x.device)/9.0
local_mean = F.conv2d(x, avg_kernel, padding=1) # [B,C,H,W]
# local_mean = self.conv(x) # [B,C,H,W]
x_flat = x.view(B, C, -1) # [B,C,H*W]
local_mean_flat = local_mean.view(B, C, -1) # [B,C,H*W]
sim = self.cosine(x_flat, local_mean_flat) # [B,H*W]
scores = -sim
if prev_drop_mask is not None:
scores = scores.masked_fill(prev_drop_mask, float('-inf'))
scores = F.softmax(scores, dim=1)
return scores.reshape(B,H,W) # [B,H,W]
class MLP(nn.Module):
def __init__(self, in_features, hidden_features=None, out_features=None, drop=0.):
super().__init__()
out_features = out_features or in_features
hidden_features = hidden_features or in_features
# self.fc1 = linear_unit(in_features, hidden_features)
self.fc1_conv = nn.Conv2d(in_features, hidden_features, kernel_size=1, stride=1)
self.fc1_bn = nn.BatchNorm2d(hidden_features)
self.fc1_lif = MultiStepLIFNode(tau=2.0, detach_reset=True, backend='cupy')
# self.fc2 = linear_unit(hidden_features, out_features)
self.fc2_conv = nn.Conv2d(hidden_features, out_features, kernel_size=1, stride=1)
self.fc2_bn = nn.BatchNorm2d(out_features)
self.fc2_lif = MultiStepLIFNode(tau=2.0, detach_reset=True, backend='cupy')
# self.drop = nn.Dropout(0.1)
self.c_hidden = hidden_features
self.c_output = out_features
def forward(self, x):
T,B,C,H,W = x.shape
x = self.fc1_conv(x.flatten(0,1))
x = self.fc1_bn(x).reshape(T,B,self.c_hidden,H,W).contiguous()
x = self.fc1_lif(x)
x = self.fc2_conv(x.flatten(0,1))
x = self.fc2_bn(x).reshape(T,B,C,H,W).contiguous()
x = self.fc2_lif(x)
return x
class Attention(nn.Module):
def __init__(self, dim, num_heads=8, qkv_bias=False, qk_scale=None, attn_drop=0., proj_drop=0., sr_ratio=1):
super().__init__()
assert dim % num_heads == 0, f"dim {dim} should be divided by num_heads {num_heads}."
self.dim = dim
self.num_heads = num_heads
head_dim = dim // num_heads
self.scale = 0.125
self.q_conv = nn.Conv1d(dim, dim, kernel_size=1, stride=1,bias=False)
self.q_bn = nn.BatchNorm1d(dim)
self.q_lif = MultiStepLIFNode(tau=2.0, detach_reset=True, backend='cupy')
self.k_conv = nn.Conv1d(dim, dim, kernel_size=1, stride=1,bias=False)
self.k_bn = nn.BatchNorm1d(dim)
self.k_lif = MultiStepLIFNode(tau=2.0, detach_reset=True, backend='cupy')
self.v_conv = nn.Conv1d(dim, dim, kernel_size=1, stride=1,bias=False)
self.v_bn = nn.BatchNorm1d(dim)
self.v_lif = MultiStepLIFNode(tau=2.0, detach_reset=True, backend='cupy')
self.attn_lif = MultiStepLIFNode(tau=2.0, v_threshold=0.5, detach_reset=True, backend='cupy')
self.proj_conv = nn.Conv1d(dim, dim, kernel_size=1, stride=1)
self.proj_bn = nn.BatchNorm1d(dim)
self.proj_lif = MultiStepLIFNode(tau=2.0, detach_reset=True, backend='cupy')
self.qkv_mp = nn.MaxPool1d(4)
def forward(self, x):
T,B,C,H,W = x.shape
x = x.flatten(3)
T, B, C, N = x.shape
x_for_qkv = x.flatten(0, 1)
x_feat = x
q_conv_out = self.q_conv(x_for_qkv)
q_conv_out = self.q_bn(q_conv_out).reshape(T,B,C,N).contiguous()
q_conv_out = self.q_lif(q_conv_out)
q = q_conv_out.transpose(-1, -2).reshape(T, B, N, self.num_heads, C//self.num_heads).permute(0, 1, 3, 2, 4).contiguous()
k_conv_out = self.k_conv(x_for_qkv)
k_conv_out = self.k_bn(k_conv_out).reshape(T,B,C,N).contiguous()
k_conv_out = self.k_lif(k_conv_out)
k = k_conv_out.transpose(-1, -2).reshape(T, B, N, self.num_heads, C//self.num_heads).permute(0, 1, 3, 2, 4).contiguous()
v_conv_out = self.v_conv(x_for_qkv)
v_conv_out = self.v_bn(v_conv_out).reshape(T,B,C,N).contiguous()
v_conv_out = self.v_lif(v_conv_out)
v = v_conv_out.transpose(-1, -2).reshape(T, B, N, self.num_heads, C//self.num_heads).permute(0, 1, 3, 2, 4).contiguous()
# if res_attn != None:
# v = v + res_attn
x = k.transpose(-2, -1) @ v
x = (q @ x) * self.scale
x = x.transpose(3, 4).reshape(T, B, C, N).contiguous()
x = self.attn_lif(x)
x = x.flatten(0,1)
x = self.proj_lif(self.proj_bn(self.proj_conv(x)).reshape(T,B,C,H,W))
return x
class Block(nn.Module):
def __init__(self, dim, num_heads, mlp_ratio=4., qkv_bias=False, qk_scale=None, drop=0., attn_drop=0.,
drop_path=0., norm_layer=nn.LayerNorm, sr_ratio=1, k_value=1.):
super().__init__()
self.norm1 = norm_layer(dim)
self.attn = Attention(dim, num_heads=num_heads, qkv_bias=qkv_bias, qk_scale=qk_scale,
attn_drop=attn_drop, proj_drop=drop, sr_ratio=sr_ratio)
self.drop_path = DropPath(drop_path) if drop_path > 0. else nn.Identity()
self.norm2 = norm_layer(dim)
mlp_hidden_dim = int(dim * mlp_ratio)
self.mlp = MLP(in_features=dim, hidden_features=mlp_hidden_dim, drop=drop)
self.temporal_scorer = TemporalChangeScorer()
self.spatial_scorer = LocalSpatialSimilarity()
self.k_value = k_value
def forward(self, x):
T,B,C,H,W = x.shape
temporal_score = self.temporal_scorer(x) # [B,H,W]
spatial_score = self.spatial_scorer(x) # [B,H,W]
final_score = temporal_score+spatial_score
self.k = int(H*self.k_value)
flat_scores = final_score.view(B, -1) # [B, H*W]
_, indices = torch.topk(flat_scores, k=self.k*self.k, dim=1) # [B, kk]
# indices = torch.tensor([[_ for _ in range(indices.shape[1])] for __ in range(indices.shape[0])]).cuda()
token_indices = indices.unsqueeze(0).expand(T,-1,-1) # [T,B,kk]
x = x.flatten(3) # [T,B,C,N] where N = H*W
original_x = x.clone()
# slow_path
informative_tokens = x.gather(3, token_indices.unsqueeze(2).expand(-1, -1, C, -1)) # [T,B,C,kk]
slow_x = informative_tokens.reshape(T, B, C, self.k, self.k)
slow_x = slow_x + self.attn(slow_x)
slow_x = slow_x + self.mlp(slow_x)
x = original_x.scatter_(3, token_indices.unsqueeze(2).expand(-1, -1, C, -1), slow_x.reshape(T,B,C,self.k*self.k))
x = x.reshape(T,B,C,H,W)
return x
class PatchEmbed(nn.Module):
def __init__(self, img_size_h=128, img_size_w=128, patch_size=4, in_channels=2, embed_dims=256):
super().__init__()
self.image_size = [img_size_h, img_size_w]
patch_size = to_2tuple(patch_size)
self.patch_size = patch_size
self.C = in_channels
self.H, self.W = self.image_size[0] // patch_size[0], self.image_size[1] // patch_size[1]
self.num_patches = self.H * self.W
self.proj_conv = nn.Conv2d(in_channels, embed_dims//8, kernel_size=3, stride=1, padding=1, bias=False)
self.proj_bn = nn.BatchNorm2d(embed_dims//8)
self.proj_lif = MultiStepLIFNode(tau=2.0, detach_reset=True, backend='cupy')
self.maxpool = torch.nn.MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False)
self.proj_conv1 = nn.Conv2d(embed_dims//8, embed_dims//4, kernel_size=3, stride=1, padding=1, bias=False)
self.proj_bn1 = nn.BatchNorm2d(embed_dims//4)
self.proj_lif1 = MultiStepLIFNode(tau=2.0, detach_reset=True, backend='cupy')
self.maxpool1 = torch.nn.MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False)
self.proj_conv2 = nn.Conv2d(embed_dims//4, embed_dims//2, kernel_size=3, stride=1, padding=1, bias=False)
self.proj_bn2 = nn.BatchNorm2d(embed_dims//2)
self.proj_lif2 = MultiStepLIFNode(tau=2.0, detach_reset=True, backend='cupy')
self.maxpool2 = torch.nn.MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False)
self.proj_conv3 = nn.Conv2d(embed_dims//2, embed_dims, kernel_size=3, stride=1, padding=1, bias=False)
self.proj_bn3 = nn.BatchNorm2d(embed_dims)
self.proj_lif3 = MultiStepLIFNode(tau=2.0, detach_reset=True, backend='cupy')
self.maxpool3 = torch.nn.MaxPool2d(kernel_size=3, stride=2, padding=1, dilation=1, ceil_mode=False)
self.rpe_conv = nn.Conv2d(embed_dims, embed_dims, kernel_size=3, stride=1, padding=1, bias=False)
self.rpe_bn = nn.BatchNorm2d(embed_dims)
self.rpe_lif = MultiStepLIFNode(tau=2.0, detach_reset=True, backend='cupy')
def forward(self, x):
T, B, C, H, W = x.shape
x = self.proj_conv(x.flatten(0, 1)) # have some fire value
x = self.proj_bn(x).reshape(T, B, -1, H, W).contiguous()
x = self.proj_lif(x).flatten(0,1).contiguous()
x = self.maxpool(x)
x = self.proj_conv1(x)
x = self.proj_bn1(x).reshape(T, B, -1, H//2, W//2).contiguous()
x = self.proj_lif1(x).flatten(0, 1).contiguous()
x = self.maxpool1(x)
x = self.proj_conv2(x)
x = self.proj_bn2(x).reshape(T, B, -1, H//4, W//4).contiguous()
x = self.proj_lif2(x).flatten(0, 1).contiguous()
x = self.maxpool2(x)
x = self.proj_conv3(x)
x = self.proj_bn3(x).reshape(T, B, -1, H//8, W//8).contiguous()
x = self.proj_lif3(x).flatten(0, 1).contiguous()
x = self.maxpool3(x)
x_feat = x.reshape(T, B, -1, H//16, W//16).contiguous()
x = self.rpe_conv(x)
x = self.rpe_bn(x).reshape(T, B, -1, H//16, W//16).contiguous()
x = self.rpe_lif(x)
x = x + x_feat
H, W = H // self.patch_size[0], W // self.patch_size[1]
return x, (H, W)
class Spiking_vit(nn.Module):
def __init__(self,
img_size_h=128, img_size_w=128, patch_size=16, in_channels=2, num_classes=11,
embed_dims=[64, 128, 256], num_heads=[1, 2, 4], mlp_ratios=[4, 4, 4], qkv_bias=False, qk_scale=None,
drop_rate=0., attn_drop_rate=0., drop_path_rate=0., norm_layer=nn.LayerNorm,
depths=[6, 8, 6], sr_ratios=[8, 4, 2], k_values = [1,1,1,1,1,1,1,1] #[0.8,0.8,0.7,0.7,0.7,0.7,0.6,0.6]
):
super().__init__()
self.num_classes = num_classes
self.depths = depths
self.k_values = k_values
print("k_values", k_values)
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depths)] # stochastic depth decay rule
patch_embed = PatchEmbed(img_size_h=img_size_h,
img_size_w=img_size_w,
patch_size=patch_size,
in_channels=in_channels,
embed_dims=embed_dims)
num_patches = patch_embed.num_patches
block = nn.ModuleList([Block(
dim=embed_dims, num_heads=num_heads, mlp_ratio=mlp_ratios, qkv_bias=qkv_bias,
qk_scale=qk_scale, drop=drop_rate, attn_drop=attn_drop_rate, drop_path=dpr[j],
norm_layer=norm_layer, sr_ratio=sr_ratios, k_value=k_values[j])
for j in range(depths)])
setattr(self, f"patch_embed", patch_embed)
setattr(self, f"block", block)
# classification head 这里不需要脉冲,因为输入的是在T时长平均发射值
self.head = nn.Linear(embed_dims, num_classes) if num_classes > 0 else nn.Identity()
self.apply(self._init_weights)
@torch.jit.ignore
def _get_pos_embed(self, pos_embed, patch_embed, H, W):
if H * W == self.patch_embed1.num_patches:
return pos_embed
else:
return F.interpolate(
pos_embed.reshape(1, patch_embed.H, patch_embed.W, -1).permute(0, 3, 1, 2),
size=(H, W), mode="bilinear").reshape(1, -1, H * W).permute(0, 2, 1)
def _init_weights(self, m):
if isinstance(m, nn.Linear):
trunc_normal_(m.weight, std=.02)
if isinstance(m, nn.Linear) and m.bias is not None:
nn.init.constant_(m.bias, 0)
elif isinstance(m, nn.LayerNorm):
nn.init.constant_(m.bias, 0)
nn.init.constant_(m.weight, 1.0)
def forward_features(self, x):
block = getattr(self, f"block")
patch_embed = getattr(self, f"patch_embed")
x, (H, W) = patch_embed(x)
for blk in block:
x = blk(x)
return x.flatten(3).mean(3)
def forward(self, x):
T = 4
x = (x.unsqueeze(0)).repeat(T, 1, 1, 1, 1)
x = self.forward_features(x)
x = self.head(x.mean(0))
return x
@register_model
def vit_snn(pretrained=False, **kwargs):
model = Spiking_vit(
img_size_h=224, img_size_w=224,
patch_size=16, embed_dims=512, num_heads=8, mlp_ratios=4,
in_channels=3, num_classes=1000, qkv_bias=False,
norm_layer=partial(nn.LayerNorm, eps=1e-6), depths=8, sr_ratios=1,
**kwargs
)
model.default_cfg = _cfg()
return model
from timm.models import create_model
if __name__ == '__main__':
H = 128
W = 128
x = torch.randn(2, 3, 224, 224).cuda()
# new_patch = PatchEmbed()
# new_patch.cuda()
# y, _ = new_patch(x)
model = create_model(
'vit_snn',
pretrained=False,
drop_rate=0,
drop_path_rate=0.1,
drop_block_rate=None,
).cuda()
model.eval()
y = model(x)
print(y.shape)
print('Test Good!')