|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
from functools import partial |
|
|
|
import torch |
|
import torch.nn as nn |
|
import torch.utils.checkpoint as cp |
|
|
|
from .videomaev2_finetune import ( |
|
Block, |
|
PatchEmbed, |
|
_cfg, |
|
get_sinusoid_encoding_table, |
|
) |
|
|
|
from .videomaev2_finetune import trunc_normal_ as __call_trunc_normal_ |
|
|
|
def trunc_normal_(tensor, mean=0., std=1.): |
|
__call_trunc_normal_(tensor, mean=mean, std=std, a=-std, b=std) |
|
|
|
|
|
class PretrainVisionTransformerEncoder(nn.Module): |
|
""" Vision Transformer with support for patch or hybrid CNN input stage |
|
""" |
|
|
|
def __init__(self, |
|
img_size=224, |
|
patch_size=16, |
|
in_chans=3, |
|
num_classes=0, |
|
embed_dim=768, |
|
depth=12, |
|
num_heads=12, |
|
mlp_ratio=4., |
|
qkv_bias=False, |
|
qk_scale=None, |
|
drop_rate=0., |
|
attn_drop_rate=0., |
|
drop_path_rate=0., |
|
norm_layer=nn.LayerNorm, |
|
init_values=None, |
|
tubelet_size=2, |
|
use_learnable_pos_emb=False, |
|
with_cp=False, |
|
all_frames=16, |
|
cos_attn=False): |
|
super().__init__() |
|
self.num_classes = num_classes |
|
|
|
self.num_features = self.embed_dim = embed_dim |
|
self.patch_embed = PatchEmbed( |
|
img_size=img_size, |
|
patch_size=patch_size, |
|
in_chans=in_chans, |
|
embed_dim=embed_dim, |
|
num_frames=all_frames, |
|
tubelet_size=tubelet_size) |
|
num_patches = self.patch_embed.num_patches |
|
self.with_cp = with_cp |
|
|
|
if use_learnable_pos_emb: |
|
self.pos_embed = nn.Parameter( |
|
torch.zeros(1, num_patches + 1, embed_dim)) |
|
else: |
|
|
|
self.pos_embed = get_sinusoid_encoding_table( |
|
num_patches, embed_dim) |
|
|
|
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth) |
|
] |
|
self.blocks = nn.ModuleList([ |
|
Block( |
|
dim=embed_dim, |
|
num_heads=num_heads, |
|
mlp_ratio=mlp_ratio, |
|
qkv_bias=qkv_bias, |
|
qk_scale=qk_scale, |
|
drop=drop_rate, |
|
attn_drop=attn_drop_rate, |
|
drop_path=dpr[i], |
|
norm_layer=norm_layer, |
|
init_values=init_values, |
|
cos_attn=cos_attn) for i in range(depth) |
|
]) |
|
self.norm = norm_layer(embed_dim) |
|
self.head = nn.Linear( |
|
embed_dim, num_classes) if num_classes > 0 else nn.Identity() |
|
|
|
if use_learnable_pos_emb: |
|
trunc_normal_(self.pos_embed, std=.02) |
|
|
|
self.apply(self._init_weights) |
|
|
|
def _init_weights(self, m): |
|
if isinstance(m, nn.Linear): |
|
nn.init.xavier_uniform_(m.weight) |
|
if isinstance(m, nn.Linear) and m.bias is not None: |
|
nn.init.constant_(m.bias, 0) |
|
elif isinstance(m, nn.LayerNorm): |
|
nn.init.constant_(m.bias, 0) |
|
nn.init.constant_(m.weight, 1.0) |
|
|
|
def get_num_layers(self): |
|
return len(self.blocks) |
|
|
|
@torch.jit.ignore |
|
def no_weight_decay(self): |
|
return {'pos_embed', 'cls_token'} |
|
|
|
def get_classifier(self): |
|
return self.head |
|
|
|
def reset_classifier(self, num_classes, global_pool=''): |
|
self.num_classes = num_classes |
|
self.head = nn.Linear( |
|
self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() |
|
|
|
def forward_features(self, x, mask): |
|
x = self.patch_embed(x) |
|
|
|
x = x + self.pos_embed.type_as(x).to(x.device).clone().detach() |
|
|
|
B, _, C = x.shape |
|
x_vis = x[~mask].reshape(B, -1, C) |
|
|
|
for blk in self.blocks: |
|
if self.with_cp: |
|
x_vis = cp.checkpoint(blk, x_vis) |
|
else: |
|
x_vis = blk(x_vis) |
|
|
|
x_vis = self.norm(x_vis) |
|
return x_vis |
|
|
|
def forward(self, x, mask): |
|
x = self.forward_features(x, mask) |
|
x = self.head(x) |
|
return x |
|
|
|
|
|
class PretrainVisionTransformerDecoder(nn.Module): |
|
""" Vision Transformer with support for patch or hybrid CNN input stage |
|
""" |
|
|
|
def __init__(self, |
|
patch_size=16, |
|
num_classes=768, |
|
embed_dim=768, |
|
depth=12, |
|
num_heads=12, |
|
mlp_ratio=4., |
|
qkv_bias=False, |
|
qk_scale=None, |
|
drop_rate=0., |
|
attn_drop_rate=0., |
|
drop_path_rate=0., |
|
norm_layer=nn.LayerNorm, |
|
init_values=None, |
|
num_patches=196, |
|
tubelet_size=2, |
|
with_cp=False, |
|
cos_attn=False): |
|
super().__init__() |
|
self.num_classes = num_classes |
|
assert num_classes == 3 * tubelet_size * patch_size**2 |
|
|
|
self.num_features = self.embed_dim = embed_dim |
|
self.patch_size = patch_size |
|
self.with_cp = with_cp |
|
|
|
dpr = [x.item() for x in torch.linspace(0, drop_path_rate, depth) |
|
] |
|
self.blocks = nn.ModuleList([ |
|
Block( |
|
dim=embed_dim, |
|
num_heads=num_heads, |
|
mlp_ratio=mlp_ratio, |
|
qkv_bias=qkv_bias, |
|
qk_scale=qk_scale, |
|
drop=drop_rate, |
|
attn_drop=attn_drop_rate, |
|
drop_path=dpr[i], |
|
norm_layer=norm_layer, |
|
init_values=init_values, |
|
cos_attn=cos_attn) for i in range(depth) |
|
]) |
|
self.norm = norm_layer(embed_dim) |
|
self.head = nn.Linear( |
|
embed_dim, num_classes) if num_classes > 0 else nn.Identity() |
|
|
|
self.apply(self._init_weights) |
|
|
|
def _init_weights(self, m): |
|
if isinstance(m, nn.Linear): |
|
nn.init.xavier_uniform_(m.weight) |
|
if isinstance(m, nn.Linear) and m.bias is not None: |
|
nn.init.constant_(m.bias, 0) |
|
elif isinstance(m, nn.LayerNorm): |
|
nn.init.constant_(m.bias, 0) |
|
nn.init.constant_(m.weight, 1.0) |
|
|
|
def get_num_layers(self): |
|
return len(self.blocks) |
|
|
|
@torch.jit.ignore |
|
def no_weight_decay(self): |
|
return {'pos_embed', 'cls_token'} |
|
|
|
def get_classifier(self): |
|
return self.head |
|
|
|
def reset_classifier(self, num_classes, global_pool=''): |
|
self.num_classes = num_classes |
|
self.head = nn.Linear( |
|
self.embed_dim, num_classes) if num_classes > 0 else nn.Identity() |
|
|
|
def forward(self, x, return_token_num): |
|
for blk in self.blocks: |
|
if self.with_cp: |
|
x = cp.checkpoint(blk, x) |
|
else: |
|
x = blk(x) |
|
|
|
if return_token_num > 0: |
|
|
|
x = self.head(self.norm(x[:, -return_token_num:])) |
|
else: |
|
|
|
x = self.head(self.norm(x)) |
|
return x |
|
|
|
|
|
class PretrainVisionTransformer(nn.Module): |
|
""" Vision Transformer with support for patch or hybrid CNN input stage |
|
""" |
|
|
|
def __init__( |
|
self, |
|
img_size=224, |
|
patch_size=16, |
|
encoder_in_chans=3, |
|
encoder_num_classes=0, |
|
encoder_embed_dim=768, |
|
encoder_depth=12, |
|
encoder_num_heads=12, |
|
decoder_num_classes=1536, |
|
decoder_embed_dim=512, |
|
decoder_depth=8, |
|
decoder_num_heads=8, |
|
mlp_ratio=4., |
|
qkv_bias=False, |
|
qk_scale=None, |
|
drop_rate=0., |
|
attn_drop_rate=0., |
|
drop_path_rate=0., |
|
norm_layer=nn.LayerNorm, |
|
init_values=0., |
|
use_learnable_pos_emb=False, |
|
tubelet_size=2, |
|
num_classes=0, |
|
in_chans=0, |
|
with_cp=False, |
|
all_frames=16, |
|
cos_attn=False, |
|
): |
|
super().__init__() |
|
self.encoder = PretrainVisionTransformerEncoder( |
|
img_size=img_size, |
|
patch_size=patch_size, |
|
in_chans=encoder_in_chans, |
|
num_classes=encoder_num_classes, |
|
embed_dim=encoder_embed_dim, |
|
depth=encoder_depth, |
|
num_heads=encoder_num_heads, |
|
mlp_ratio=mlp_ratio, |
|
qkv_bias=qkv_bias, |
|
qk_scale=qk_scale, |
|
drop_rate=drop_rate, |
|
attn_drop_rate=attn_drop_rate, |
|
drop_path_rate=drop_path_rate, |
|
norm_layer=norm_layer, |
|
init_values=init_values, |
|
tubelet_size=tubelet_size, |
|
use_learnable_pos_emb=use_learnable_pos_emb, |
|
with_cp=with_cp, |
|
all_frames=all_frames, |
|
cos_attn=cos_attn) |
|
|
|
self.decoder = PretrainVisionTransformerDecoder( |
|
patch_size=patch_size, |
|
num_patches=self.encoder.patch_embed.num_patches, |
|
num_classes=decoder_num_classes, |
|
embed_dim=decoder_embed_dim, |
|
depth=decoder_depth, |
|
num_heads=decoder_num_heads, |
|
mlp_ratio=mlp_ratio, |
|
qkv_bias=qkv_bias, |
|
qk_scale=qk_scale, |
|
drop_rate=drop_rate, |
|
attn_drop_rate=attn_drop_rate, |
|
drop_path_rate=drop_path_rate, |
|
norm_layer=norm_layer, |
|
init_values=init_values, |
|
tubelet_size=tubelet_size, |
|
with_cp=with_cp, |
|
cos_attn=cos_attn) |
|
|
|
self.encoder_to_decoder = nn.Linear( |
|
encoder_embed_dim, decoder_embed_dim, bias=False) |
|
|
|
self.mask_token = nn.Parameter(torch.zeros(1, 1, decoder_embed_dim)) |
|
|
|
self.pos_embed = get_sinusoid_encoding_table( |
|
self.encoder.patch_embed.num_patches, decoder_embed_dim) |
|
|
|
trunc_normal_(self.mask_token, std=.02) |
|
|
|
def _init_weights(self, m): |
|
if isinstance(m, nn.Linear): |
|
nn.init.xavier_uniform_(m.weight) |
|
if isinstance(m, nn.Linear) and m.bias is not None: |
|
nn.init.constant_(m.bias, 0) |
|
elif isinstance(m, nn.LayerNorm): |
|
nn.init.constant_(m.bias, 0) |
|
nn.init.constant_(m.weight, 1.0) |
|
|
|
def get_num_layers(self): |
|
return len(self.blocks) |
|
|
|
@torch.jit.ignore |
|
def no_weight_decay(self): |
|
return {'pos_embed', 'cls_token', 'mask_token'} |
|
|
|
def forward(self, x, mask, decode_mask=None): |
|
decode_vis = mask if decode_mask is None else ~decode_mask |
|
|
|
x_vis = self.encoder(x, mask) |
|
x_vis = self.encoder_to_decoder(x_vis) |
|
B, N_vis, C = x_vis.shape |
|
|
|
|
|
|
|
expand_pos_embed = self.pos_embed.expand(B, -1, -1).type_as(x).to( |
|
x.device).clone().detach() |
|
pos_emd_vis = expand_pos_embed[~mask].reshape(B, -1, C) |
|
pos_emd_mask = expand_pos_embed[decode_vis].reshape(B, -1, C) |
|
|
|
|
|
x_full = torch.cat( |
|
[x_vis + pos_emd_vis, self.mask_token + pos_emd_mask], dim=1) |
|
|
|
x = self.decoder(x_full, pos_emd_mask.shape[1]) |
|
|
|
return x |
|
|
|
|
|
def pretrain_videomae_small_patch16_224(pretrained=False, **kwargs): |
|
model = PretrainVisionTransformer( |
|
img_size=224, |
|
patch_size=16, |
|
encoder_embed_dim=384, |
|
encoder_depth=12, |
|
encoder_num_heads=6, |
|
encoder_num_classes=0, |
|
decoder_num_classes=1536, |
|
decoder_embed_dim=192, |
|
decoder_num_heads=3, |
|
mlp_ratio=4, |
|
qkv_bias=True, |
|
norm_layer=partial(nn.LayerNorm, eps=1e-6), |
|
**kwargs) |
|
model.default_cfg = _cfg() |
|
if pretrained: |
|
checkpoint = torch.load(kwargs["init_ckpt"], map_location="cpu") |
|
model.load_state_dict(checkpoint["model"]) |
|
return model |
|
|
|
|
|
def pretrain_videomae_base_patch16_224(pretrained=False, **kwargs): |
|
model = PretrainVisionTransformer( |
|
img_size=224, |
|
patch_size=16, |
|
encoder_embed_dim=768, |
|
encoder_depth=12, |
|
encoder_num_heads=12, |
|
encoder_num_classes=0, |
|
decoder_num_classes=1536, |
|
decoder_embed_dim=384, |
|
decoder_num_heads=6, |
|
mlp_ratio=4, |
|
qkv_bias=True, |
|
norm_layer=partial(nn.LayerNorm, eps=1e-6), |
|
**kwargs) |
|
model.default_cfg = _cfg() |
|
if pretrained: |
|
checkpoint = torch.load(kwargs["init_ckpt"], map_location="cpu") |
|
model.load_state_dict(checkpoint["model"]) |
|
return model |
|
|
|
|
|
def pretrain_videomae_large_patch16_224(pretrained=False, **kwargs): |
|
model = PretrainVisionTransformer( |
|
img_size=224, |
|
patch_size=16, |
|
encoder_embed_dim=1024, |
|
encoder_depth=24, |
|
encoder_num_heads=16, |
|
encoder_num_classes=0, |
|
decoder_num_classes=1536, |
|
decoder_embed_dim=512, |
|
decoder_num_heads=8, |
|
mlp_ratio=4, |
|
qkv_bias=True, |
|
norm_layer=partial(nn.LayerNorm, eps=1e-6), |
|
**kwargs) |
|
model.default_cfg = _cfg() |
|
if pretrained: |
|
checkpoint = torch.load(kwargs["init_ckpt"], map_location="cpu") |
|
model.load_state_dict(checkpoint["model"]) |
|
return model |
|
|
|
|
|
def pretrain_videomae_huge_patch16_224(pretrained=False, **kwargs): |
|
model = PretrainVisionTransformer( |
|
img_size=224, |
|
patch_size=16, |
|
encoder_embed_dim=1280, |
|
encoder_depth=32, |
|
encoder_num_heads=16, |
|
encoder_num_classes=0, |
|
decoder_num_classes=1536, |
|
decoder_embed_dim=512, |
|
decoder_num_heads=8, |
|
mlp_ratio=4, |
|
qkv_bias=True, |
|
norm_layer=partial(nn.LayerNorm, eps=1e-6), |
|
**kwargs) |
|
model.default_cfg = _cfg() |
|
if pretrained: |
|
checkpoint = torch.load(kwargs["init_ckpt"], map_location="cpu") |
|
model.load_state_dict(checkpoint["model"]) |
|
return model |
|
|
|
|
|
def pretrain_videomae_giant_patch14_224(pretrained=False, **kwargs): |
|
model = PretrainVisionTransformer( |
|
img_size=224, |
|
patch_size=14, |
|
encoder_embed_dim=1408, |
|
encoder_depth=40, |
|
encoder_num_heads=16, |
|
encoder_num_classes=0, |
|
decoder_num_classes=1176, |
|
decoder_embed_dim=512, |
|
decoder_num_heads=8, |
|
mlp_ratio=48 / 11, |
|
qkv_bias=True, |
|
norm_layer=partial(nn.LayerNorm, eps=1e-6), |
|
**kwargs) |
|
model.default_cfg = _cfg() |
|
if pretrained: |
|
checkpoint = torch.load(kwargs["init_ckpt"], map_location="cpu") |
|
model.load_state_dict(checkpoint["model"]) |
|
return model |
|
|