|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
"""PyTorch BERT model. """ |
|
|
|
from __future__ import absolute_import, division, print_function, unicode_literals |
|
|
|
import logging |
|
import math |
|
|
|
import torch |
|
from torch import nn |
|
from torch.utils.checkpoint import checkpoint |
|
|
|
from .modeling_utils import ProteinConfig |
|
from .modeling_utils import ProteinModel |
|
from .modeling_utils import prune_linear_layer |
|
from .modeling_utils import get_activation_fn |
|
from .modeling_utils import LayerNorm |
|
from .modeling_utils import MLMHead |
|
from .modeling_utils import ValuePredictionHead |
|
from .modeling_utils import SequenceClassificationHead |
|
from .modeling_utils import SequenceToSequenceClassificationHead |
|
from .modeling_utils import PairwiseContactPredictionHead |
|
from ..registry import registry |
|
|
|
logger = logging.getLogger(__name__) |
|
|
|
URL_PREFIX = "https://s3.amazonaws.com/proteindata/pytorch-models/" |
|
BERT_PRETRAINED_MODEL_ARCHIVE_MAP = { |
|
'bert-base': URL_PREFIX + "bert-base-pytorch_model.bin", |
|
} |
|
BERT_PRETRAINED_CONFIG_ARCHIVE_MAP = { |
|
'bert-base': URL_PREFIX + "bert-base-config.json" |
|
} |
|
|
|
|
|
class ProteinBertConfig(ProteinConfig): |
|
r""" |
|
:class:`~pytorch_transformers.ProteinBertConfig` is the configuration class to store the |
|
configuration of a `ProteinBertModel`. |
|
|
|
|
|
Arguments: |
|
vocab_size_or_config_json_file: Vocabulary size of `inputs_ids` in |
|
`ProteinBertModel`. |
|
hidden_size: Size of the encoder layers and the pooler layer. |
|
num_hidden_layers: Number of hidden layers in the ProteinBert encoder. |
|
num_attention_heads: Number of attention heads for each attention layer in |
|
the ProteinBert encoder. |
|
intermediate_size: The size of the "intermediate" (i.e., feed-forward) |
|
layer in the ProteinBert encoder. |
|
hidden_act: The non-linear activation function (function or string) in the |
|
encoder and pooler. If string, "gelu", "relu" and "swish" are supported. |
|
hidden_dropout_prob: The dropout probabilitiy for all fully connected |
|
layers in the embeddings, encoder, and pooler. |
|
attention_probs_dropout_prob: The dropout ratio for the attention |
|
probabilities. |
|
max_position_embeddings: The maximum sequence length that this model might |
|
ever be used with. Typically set this to something large just in case |
|
(e.g., 512 or 1024 or 2048). |
|
type_vocab_size: The vocabulary size of the `token_type_ids` passed into |
|
`ProteinBertModel`. |
|
initializer_range: The sttdev of the truncated_normal_initializer for |
|
initializing all weight matrices. |
|
layer_norm_eps: The epsilon used by LayerNorm. |
|
""" |
|
pretrained_config_archive_map = BERT_PRETRAINED_CONFIG_ARCHIVE_MAP |
|
|
|
def __init__(self, |
|
vocab_size: int = 30, |
|
hidden_size: int = 768, |
|
num_hidden_layers: int = 12, |
|
num_attention_heads: int = 12, |
|
intermediate_size: int = 3072, |
|
hidden_act: str = "gelu", |
|
hidden_dropout_prob: float = 0.1, |
|
attention_probs_dropout_prob: float = 0.1, |
|
max_position_embeddings: int = 8096, |
|
type_vocab_size: int = 2, |
|
initializer_range: float = 0.02, |
|
layer_norm_eps: float = 1e-12, |
|
temporal_pooling: str = 'attention', |
|
freeze_embedding: bool = False, |
|
**kwargs): |
|
super().__init__(**kwargs) |
|
self.vocab_size = vocab_size |
|
self.hidden_size = hidden_size |
|
self.num_hidden_layers = num_hidden_layers |
|
self.num_attention_heads = num_attention_heads |
|
self.hidden_act = hidden_act |
|
self.intermediate_size = intermediate_size |
|
self.hidden_dropout_prob = hidden_dropout_prob |
|
self.attention_probs_dropout_prob = attention_probs_dropout_prob |
|
self.max_position_embeddings = max_position_embeddings |
|
self.type_vocab_size = type_vocab_size |
|
self.initializer_range = initializer_range |
|
self.layer_norm_eps = layer_norm_eps |
|
self.temporal_pooling = temporal_pooling |
|
self.freeze_embedding = freeze_embedding |
|
|
|
|
|
class ProteinBertEmbeddings(nn.Module): |
|
"""Construct the embeddings from word, position and token_type embeddings. |
|
""" |
|
def __init__(self, config): |
|
super().__init__() |
|
self.word_embeddings = nn.Embedding( |
|
config.vocab_size, config.hidden_size, padding_idx=0) |
|
self.position_embeddings = nn.Embedding( |
|
config.max_position_embeddings, config.hidden_size) |
|
self.token_type_embeddings = nn.Embedding(config.type_vocab_size, config.hidden_size) |
|
|
|
|
|
|
|
self.LayerNorm = LayerNorm(config.hidden_size, eps=config.layer_norm_eps) |
|
self.dropout = nn.Dropout(config.hidden_dropout_prob) |
|
|
|
def forward(self, input_ids, token_type_ids=None, position_ids=None): |
|
seq_length = input_ids.size(1) |
|
if position_ids is None: |
|
position_ids = torch.arange(seq_length, dtype=torch.long, device=input_ids.device) |
|
position_ids = position_ids.unsqueeze(0).expand_as(input_ids) |
|
if token_type_ids is None: |
|
token_type_ids = torch.zeros_like(input_ids) |
|
|
|
words_embeddings = self.word_embeddings(input_ids) |
|
position_embeddings = self.position_embeddings(position_ids) |
|
token_type_embeddings = self.token_type_embeddings(token_type_ids) |
|
|
|
embeddings = words_embeddings + position_embeddings + token_type_embeddings |
|
embeddings = self.LayerNorm(embeddings) |
|
embeddings = self.dropout(embeddings) |
|
return embeddings |
|
|
|
|
|
class ProteinBertSelfAttention(nn.Module): |
|
def __init__(self, config): |
|
super().__init__() |
|
if config.hidden_size % config.num_attention_heads != 0: |
|
raise ValueError( |
|
"The hidden size (%d) is not a multiple of the number of attention " |
|
"heads (%d)" % (config.hidden_size, config.num_attention_heads)) |
|
self.output_attentions = config.output_attentions |
|
|
|
self.num_attention_heads = config.num_attention_heads |
|
self.attention_head_size = int(config.hidden_size / config.num_attention_heads) |
|
self.all_head_size = self.num_attention_heads * self.attention_head_size |
|
|
|
self.query = nn.Linear(config.hidden_size, self.all_head_size) |
|
self.key = nn.Linear(config.hidden_size, self.all_head_size) |
|
self.value = nn.Linear(config.hidden_size, self.all_head_size) |
|
|
|
self.dropout = nn.Dropout(config.attention_probs_dropout_prob) |
|
|
|
def transpose_for_scores(self, x): |
|
new_x_shape = x.size()[:-1] + (self.num_attention_heads, self.attention_head_size) |
|
x = x.view(*new_x_shape) |
|
return x.permute(0, 2, 1, 3) |
|
|
|
def forward(self, hidden_states, attention_mask): |
|
mixed_query_layer = self.query(hidden_states) |
|
mixed_key_layer = self.key(hidden_states) |
|
mixed_value_layer = self.value(hidden_states) |
|
|
|
query_layer = self.transpose_for_scores(mixed_query_layer) |
|
key_layer = self.transpose_for_scores(mixed_key_layer) |
|
value_layer = self.transpose_for_scores(mixed_value_layer) |
|
|
|
|
|
attention_scores = torch.matmul(query_layer, key_layer.transpose(-1, -2)) |
|
attention_scores = attention_scores / math.sqrt(self.attention_head_size) |
|
|
|
|
|
attention_scores = attention_scores + attention_mask |
|
|
|
|
|
attention_probs = nn.Softmax(dim=-1)(attention_scores) |
|
|
|
|
|
|
|
attention_probs = self.dropout(attention_probs) |
|
|
|
context_layer = torch.matmul(attention_probs, value_layer) |
|
|
|
context_layer = context_layer.permute(0, 2, 1, 3).contiguous() |
|
new_context_layer_shape = context_layer.size()[:-2] + (self.all_head_size,) |
|
context_layer = context_layer.view(*new_context_layer_shape) |
|
|
|
outputs = (context_layer, attention_probs) \ |
|
if self.output_attentions else (context_layer,) |
|
return outputs |
|
|
|
|
|
class ProteinBertSelfOutput(nn.Module): |
|
def __init__(self, config): |
|
super().__init__() |
|
self.dense = nn.Linear(config.hidden_size, config.hidden_size) |
|
self.LayerNorm = LayerNorm(config.hidden_size, eps=config.layer_norm_eps) |
|
self.dropout = nn.Dropout(config.hidden_dropout_prob) |
|
|
|
def forward(self, hidden_states, input_tensor): |
|
hidden_states = self.dense(hidden_states) |
|
hidden_states = self.dropout(hidden_states) |
|
hidden_states = self.LayerNorm(hidden_states + input_tensor) |
|
return hidden_states |
|
|
|
|
|
class ProteinBertAttention(nn.Module): |
|
def __init__(self, config): |
|
super().__init__() |
|
self.self = ProteinBertSelfAttention(config) |
|
self.output = ProteinBertSelfOutput(config) |
|
|
|
def prune_heads(self, heads): |
|
if len(heads) == 0: |
|
return |
|
mask = torch.ones(self.self.num_attention_heads, self.self.attention_head_size) |
|
for head in heads: |
|
mask[head] = 0 |
|
mask = mask.view(-1).contiguous().eq(1) |
|
index = torch.arange(len(mask))[mask].long() |
|
|
|
self.self.query = prune_linear_layer(self.self.query, index) |
|
self.self.key = prune_linear_layer(self.self.key, index) |
|
self.self.value = prune_linear_layer(self.self.value, index) |
|
self.output.dense = prune_linear_layer(self.output.dense, index, dim=1) |
|
|
|
self.self.num_attention_heads = self.self.num_attention_heads - len(heads) |
|
self.self.all_head_size = self.self.attention_head_size * self.self.num_attention_heads |
|
|
|
def forward(self, input_tensor, attention_mask): |
|
self_outputs = self.self(input_tensor, attention_mask) |
|
attention_output = self.output(self_outputs[0], input_tensor) |
|
outputs = (attention_output,) + self_outputs[1:] |
|
return outputs |
|
|
|
|
|
class ProteinBertIntermediate(nn.Module): |
|
def __init__(self, config): |
|
super().__init__() |
|
self.dense = nn.Linear(config.hidden_size, config.intermediate_size) |
|
if isinstance(config.hidden_act, str): |
|
self.intermediate_act_fn = get_activation_fn(config.hidden_act) |
|
else: |
|
self.intermediate_act_fn = config.hidden_act |
|
|
|
def forward(self, hidden_states): |
|
hidden_states = self.dense(hidden_states) |
|
hidden_states = self.intermediate_act_fn(hidden_states) |
|
return hidden_states |
|
|
|
|
|
class ProteinBertOutput(nn.Module): |
|
def __init__(self, config): |
|
super().__init__() |
|
self.dense = nn.Linear(config.intermediate_size, config.hidden_size) |
|
self.LayerNorm = LayerNorm(config.hidden_size, eps=config.layer_norm_eps) |
|
self.dropout = nn.Dropout(config.hidden_dropout_prob) |
|
|
|
def forward(self, hidden_states, input_tensor): |
|
hidden_states = self.dense(hidden_states) |
|
hidden_states = self.dropout(hidden_states) |
|
hidden_states = self.LayerNorm(hidden_states + input_tensor) |
|
return hidden_states |
|
|
|
|
|
class ProteinBertLayer(nn.Module): |
|
def __init__(self, config): |
|
super().__init__() |
|
self.attention = ProteinBertAttention(config) |
|
self.intermediate = ProteinBertIntermediate(config) |
|
self.output = ProteinBertOutput(config) |
|
|
|
def forward(self, hidden_states, attention_mask): |
|
attention_outputs = self.attention(hidden_states, attention_mask) |
|
attention_output = attention_outputs[0] |
|
intermediate_output = self.intermediate(attention_output) |
|
layer_output = self.output(intermediate_output, attention_output) |
|
outputs = (layer_output,) + attention_outputs[1:] |
|
return outputs |
|
|
|
|
|
class ProteinBertEncoder(nn.Module): |
|
def __init__(self, config): |
|
super().__init__() |
|
self.output_attentions = config.output_attentions |
|
self.output_hidden_states = config.output_hidden_states |
|
self.layer = nn.ModuleList( |
|
[ProteinBertLayer(config) for _ in range(config.num_hidden_layers)]) |
|
|
|
def run_function(self, start, chunk_size): |
|
def custom_forward(hidden_states, attention_mask): |
|
all_hidden_states = () |
|
all_attentions = () |
|
chunk_slice = slice(start, start + chunk_size) |
|
for layer in self.layer[chunk_slice]: |
|
if self.output_hidden_states: |
|
all_hidden_states = all_hidden_states + (hidden_states,) |
|
layer_outputs = layer(hidden_states, attention_mask) |
|
hidden_states = layer_outputs[0] |
|
|
|
if self.output_attentions: |
|
all_attentions = all_attentions + (layer_outputs[1],) |
|
|
|
if self.output_hidden_states: |
|
all_hidden_states = all_hidden_states + (hidden_states,) |
|
outputs = (hidden_states,) |
|
if self.output_hidden_states: |
|
outputs = outputs + (all_hidden_states,) |
|
if self.output_attentions: |
|
outputs = outputs + (all_attentions,) |
|
return outputs |
|
|
|
return custom_forward |
|
|
|
def forward(self, hidden_states, attention_mask, chunks=None): |
|
all_hidden_states = () |
|
all_attentions = () |
|
|
|
if chunks is not None: |
|
assert isinstance(chunks, int) |
|
chunk_size = (len(self.layer) + chunks - 1) // chunks |
|
for start in range(0, len(self.layer), chunk_size): |
|
outputs = checkpoint(self.run_function(start, chunk_size), |
|
hidden_states, attention_mask) |
|
if self.output_hidden_states: |
|
all_hidden_states = all_hidden_states + outputs[1] |
|
if self.output_attentions: |
|
all_attentions = all_attentions + outputs[-1] |
|
hidden_states = outputs[0] |
|
else: |
|
for i, layer_module in enumerate(self.layer): |
|
if self.output_hidden_states: |
|
all_hidden_states = all_hidden_states + (hidden_states,) |
|
|
|
layer_outputs = layer_module(hidden_states, attention_mask) |
|
hidden_states = layer_outputs[0] |
|
|
|
if self.output_attentions: |
|
all_attentions = all_attentions + (layer_outputs[1],) |
|
|
|
|
|
if self.output_hidden_states: |
|
all_hidden_states = all_hidden_states + (hidden_states,) |
|
|
|
outputs = (hidden_states,) |
|
if self.output_hidden_states: |
|
outputs = outputs + (all_hidden_states,) |
|
if self.output_attentions: |
|
outputs = outputs + (all_attentions,) |
|
return outputs |
|
|
|
|
|
class ProteinBertPooler(nn.Module): |
|
def __init__(self, config): |
|
super().__init__() |
|
self.dense = nn.Linear(config.hidden_size, config.hidden_size) |
|
self.activation = nn.Tanh() |
|
self.temporal_pooling = config.temporal_pooling |
|
self._la_w1 = nn.Conv1d(config.hidden_size, int(config.hidden_size/2), 5, padding=2) |
|
self._la_w2 = nn.Conv1d(config.hidden_size, int(config.hidden_size/2), 5, padding=2) |
|
self._la_mlp = nn.Linear(config.hidden_size, config.hidden_size) |
|
|
|
def forward(self, hidden_states): |
|
|
|
|
|
if self.temporal_pooling == 'mean': |
|
return hidden_states.mean(dim=1) |
|
if self.temporal_pooling == 'max': |
|
return hidden_states.max(dim=1) |
|
if self.temporal_pooling == 'concat': |
|
_temp = hidden_states.reshape(hidden_states.shape[0], -1) |
|
return torch.nn.functional.pad(_temp, (0, 2048 - _temp.shape[1])) |
|
if self.temporal_pooling == 'topmax': |
|
val, _ = torch.topk(hidden_states, k=5, dim=1) |
|
return val.mean(dim=1) |
|
if self.temporal_pooling == 'light_attention': |
|
_temp = hidden_states.permute(0,2,1) |
|
a = self._la_w1(_temp).softmax(dim=-1) |
|
v = self._la_w2(_temp) |
|
v_max = v.max(dim=-1).values |
|
v_sum = (a * v).sum(dim=-1) |
|
return self._la_mlp(torch.cat([v_max, v_sum], dim=1)) |
|
|
|
first_token_tensor = hidden_states[:, 0] |
|
pooled_output = self.dense(first_token_tensor) |
|
pooled_output = self.activation(pooled_output) |
|
return pooled_output |
|
|
|
|
|
class ProteinBertAbstractModel(ProteinModel): |
|
""" An abstract class to handle weights initialization and |
|
a simple interface for dowloading and loading pretrained models. |
|
""" |
|
config_class = ProteinBertConfig |
|
pretrained_model_archive_map = BERT_PRETRAINED_MODEL_ARCHIVE_MAP |
|
base_model_prefix = "bert" |
|
|
|
def _init_weights(self, module): |
|
""" Initialize the weights """ |
|
if isinstance(module, (nn.Linear, nn.Embedding)): |
|
module.weight.data.normal_(mean=0.0, std=self.config.initializer_range) |
|
elif isinstance(module, LayerNorm): |
|
module.bias.data.zero_() |
|
module.weight.data.fill_(1.0) |
|
if isinstance(module, nn.Linear) and module.bias is not None: |
|
module.bias.data.zero_() |
|
|
|
|
|
@registry.register_task_model('embed', 'transformer') |
|
class ProteinBertModel(ProteinBertAbstractModel): |
|
|
|
def __init__(self, config): |
|
super().__init__(config) |
|
|
|
self.embeddings = ProteinBertEmbeddings(config) |
|
self.encoder = ProteinBertEncoder(config) |
|
self.pooler = ProteinBertPooler(config) |
|
|
|
self.init_weights() |
|
|
|
def _resize_token_embeddings(self, new_num_tokens): |
|
old_embeddings = self.embeddings.word_embeddings |
|
new_embeddings = self._get_resized_embeddings(old_embeddings, new_num_tokens) |
|
self.embeddings.word_embeddings = new_embeddings |
|
return self.embeddings.word_embeddings |
|
|
|
def _prune_heads(self, heads_to_prune): |
|
""" Prunes heads of the model. |
|
heads_to_prune: dict of {layer_num: list of heads to prune in this layer} |
|
See base class ProteinModel |
|
""" |
|
for layer, heads in heads_to_prune.items(): |
|
self.encoder.layer[layer].attention.prune_heads(heads) |
|
|
|
def forward(self, |
|
input_ids, |
|
input_mask=None): |
|
if input_mask is None: |
|
input_mask = torch.ones_like(input_ids) |
|
|
|
|
|
|
|
|
|
|
|
|
|
extended_attention_mask = input_mask.unsqueeze(1).unsqueeze(2) |
|
|
|
|
|
|
|
|
|
|
|
|
|
extended_attention_mask = extended_attention_mask.to( |
|
dtype=torch.float32) |
|
extended_attention_mask = (1.0 - extended_attention_mask) * -10000.0 |
|
|
|
embedding_output = self.embeddings(input_ids) |
|
encoder_outputs = self.encoder(embedding_output, |
|
extended_attention_mask, |
|
chunks=None) |
|
sequence_output = encoder_outputs[0] |
|
pooled_output = self.pooler(sequence_output) |
|
|
|
|
|
outputs = (sequence_output, pooled_output,) + encoder_outputs[1:] |
|
return outputs |
|
|
|
|
|
@registry.register_task_model('masked_language_modeling', 'transformer') |
|
class ProteinBertForMaskedLM(ProteinBertAbstractModel): |
|
|
|
def __init__(self, config): |
|
super().__init__(config) |
|
|
|
self.bert = ProteinBertModel(config) |
|
self.mlm = MLMHead( |
|
config.hidden_size, config.vocab_size, config.hidden_act, config.layer_norm_eps, |
|
ignore_index=-1) |
|
|
|
self.init_weights() |
|
self.tie_weights() |
|
|
|
def tie_weights(self): |
|
""" Make sure we are sharing the input and output embeddings. |
|
Export to TorchScript can't handle parameter sharing so we are cloning them instead. |
|
""" |
|
self._tie_or_clone_weights(self.mlm.decoder, |
|
self.bert.embeddings.word_embeddings) |
|
|
|
def forward(self, |
|
input_ids, |
|
input_mask=None, |
|
targets=None): |
|
|
|
outputs = self.bert(input_ids, input_mask=input_mask) |
|
|
|
sequence_output, pooled_output = outputs[:2] |
|
|
|
outputs = self.mlm(sequence_output, targets) + outputs[:2] |
|
|
|
return outputs |
|
|
|
|
|
@registry.register_task_model('fluorescence', 'transformer') |
|
@registry.register_task_model('stability', 'transformer') |
|
class ProteinBertForValuePrediction(ProteinBertAbstractModel): |
|
|
|
def __init__(self, config): |
|
super().__init__(config) |
|
|
|
self.bert = ProteinBertModel(config) |
|
self.predict = ValuePredictionHead(config.hidden_size) |
|
self.freeze_embedding = config.freeze_embedding |
|
self.init_weights() |
|
|
|
def forward(self, input_ids, input_mask=None, targets=None): |
|
if self.freeze_embedding: |
|
self.bert.train(False) |
|
outputs = self.bert(input_ids, input_mask=input_mask) |
|
|
|
sequence_output, pooled_output = outputs[:2] |
|
outputs = self.predict(pooled_output, targets) + outputs[2:] |
|
|
|
return outputs |
|
|
|
|
|
@registry.register_task_model('remote_homology', 'transformer') |
|
class ProteinBertForSequenceClassification(ProteinBertAbstractModel): |
|
|
|
def __init__(self, config): |
|
super().__init__(config) |
|
|
|
self.bert = ProteinBertModel(config) |
|
self.classify = SequenceClassificationHead( |
|
config.hidden_size, config.num_labels) |
|
self.freeze_embedding = config.freeze_embedding |
|
self.init_weights() |
|
|
|
def forward(self, input_ids, input_mask=None, targets=None): |
|
if self.freeze_embedding: |
|
self.bert.train(False) |
|
outputs = self.bert(input_ids, input_mask=input_mask) |
|
|
|
sequence_output, pooled_output = outputs[:2] |
|
|
|
outputs = self.classify(pooled_output, targets) + outputs[2:] |
|
|
|
return outputs |
|
|
|
|
|
@registry.register_task_model('secondary_structure', 'transformer') |
|
class ProteinBertForSequenceToSequenceClassification(ProteinBertAbstractModel): |
|
|
|
def __init__(self, config): |
|
super().__init__(config) |
|
|
|
self.bert = ProteinBertModel(config) |
|
self.classify = SequenceToSequenceClassificationHead( |
|
config.hidden_size, config.num_labels, ignore_index=-1) |
|
|
|
self.init_weights() |
|
|
|
def forward(self, input_ids, input_mask=None, targets=None): |
|
|
|
outputs = self.bert(input_ids, input_mask=input_mask) |
|
|
|
sequence_output, pooled_output = outputs[:2] |
|
outputs = self.classify(sequence_output, targets) + outputs[2:] |
|
|
|
return outputs |
|
|
|
|
|
@registry.register_task_model('contact_prediction', 'transformer') |
|
class ProteinBertForContactPrediction(ProteinBertAbstractModel): |
|
|
|
def __init__(self, config): |
|
super().__init__(config) |
|
|
|
self.bert = ProteinBertModel(config) |
|
self.predict = PairwiseContactPredictionHead(config.hidden_size, ignore_index=-1) |
|
|
|
self.init_weights() |
|
|
|
def forward(self, input_ids, protein_length, input_mask=None, targets=None): |
|
|
|
outputs = self.bert(input_ids, input_mask=input_mask) |
|
|
|
sequence_output, pooled_output = outputs[:2] |
|
outputs = self.predict(sequence_output, protein_length, targets) + outputs[2:] |
|
|
|
return outputs |
|
|