You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
802 lines
33 KiB
802 lines
33 KiB
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
|
|
# This file was automatically generated from src/transformers/models/biogpt/modular_biogpt.py.
|
|
# Do NOT edit this file manually as any edits will be overwritten by the generation of
|
|
# the file from the modular. If any change should be done, please apply the change to the
|
|
# modular_biogpt.py file directly. One of our CI enforces this.
|
|
# 🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨🚨
|
|
# Copyright 2022 The HuggingFace Team and Microsoft Research AI4Science All rights reserved.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
|
|
import math
|
|
from collections.abc import Callable
|
|
|
|
import torch
|
|
import torch.nn as nn
|
|
from torch.nn import BCEWithLogitsLoss, CrossEntropyLoss, MSELoss
|
|
|
|
from ...activations import ACT2FN
|
|
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
|
|
from ...generation import GenerationMixin
|
|
from ...masking_utils import create_causal_mask
|
|
from ...modeling_flash_attention_utils import FlashAttentionKwargs
|
|
from ...modeling_layers import GradientCheckpointingLayer
|
|
from ...modeling_outputs import (
|
|
BaseModelOutputWithPastAndCrossAttentions,
|
|
CausalLMOutputWithCrossAttentions,
|
|
SequenceClassifierOutputWithPast,
|
|
TokenClassifierOutput,
|
|
)
|
|
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
|
|
from ...processing_utils import Unpack
|
|
from ...utils import TransformersKwargs, auto_docstring, logging
|
|
from .configuration_biogpt import BioGptConfig
|
|
|
|
|
|
logger = logging.get_logger(__name__)
|
|
|
|
|
|
class BioGptLearnedPositionalEmbedding(nn.Embedding):
|
|
"""
|
|
This module learns positional embeddings up to a fixed maximum size.
|
|
"""
|
|
|
|
def __init__(self, num_embeddings: int, embedding_dim: int):
|
|
# BIOGPT is set up so that if padding_idx is specified then offset the embedding ids by 2
|
|
# and adjust num_embeddings appropriately. Other models don't have this hack
|
|
self.offset = 2
|
|
super().__init__(num_embeddings + self.offset, embedding_dim)
|
|
|
|
def forward(
|
|
self,
|
|
attention_mask: torch.LongTensor,
|
|
past_key_values_length: int = 0,
|
|
position_ids: torch.LongTensor | None = None,
|
|
):
|
|
"""`input_ids_shape` is expected to be [bsz x seqlen]."""
|
|
|
|
if position_ids is None:
|
|
position_ids = torch.cumsum(attention_mask, dim=1)
|
|
position_ids = (position_ids * attention_mask - 1).long()
|
|
# cut positions if `past_key_values_length` is > 0
|
|
position_ids = position_ids[:, past_key_values_length:]
|
|
|
|
return super().forward(position_ids + self.offset)
|
|
|
|
|
|
class BioGptScaledWordEmbedding(nn.Embedding):
|
|
"""
|
|
This module overrides nn.Embeddings' forward by multiplying with embeddings scale.
|
|
"""
|
|
|
|
def __init__(self, num_embeddings: int, embedding_dim: int, padding_idx: int, embed_scale: float | None = 1.0):
|
|
super().__init__(num_embeddings, embedding_dim, padding_idx)
|
|
self.embed_scale = embed_scale
|
|
|
|
def forward(self, input_ids: torch.Tensor):
|
|
return super().forward(input_ids) * self.embed_scale
|
|
|
|
|
|
def eager_attention_forward(
|
|
module: nn.Module,
|
|
query: torch.Tensor,
|
|
key: torch.Tensor,
|
|
value: torch.Tensor,
|
|
attention_mask: torch.Tensor | None,
|
|
scaling: float | None = None,
|
|
dropout: float = 0.0,
|
|
**kwargs: Unpack[TransformersKwargs],
|
|
):
|
|
if scaling is None:
|
|
scaling = query.size(-1) ** -0.5
|
|
|
|
# Take the dot product between "query" and "key" to get the raw attention scores.
|
|
attn_weights = torch.matmul(query, key.transpose(2, 3)) * scaling
|
|
|
|
if attention_mask is not None:
|
|
attention_mask = attention_mask[:, :, :, : key.shape[-2]]
|
|
attn_weights = attn_weights + attention_mask
|
|
|
|
attn_weights = nn.functional.softmax(attn_weights, dim=-1)
|
|
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
|
|
|
|
attn_output = torch.matmul(attn_weights, value)
|
|
attn_output = attn_output.transpose(1, 2).contiguous()
|
|
|
|
return attn_output, attn_weights
|
|
|
|
|
|
class BioGptAttention(nn.Module):
|
|
"""Multi-headed attention from 'Attention Is All You Need' paper"""
|
|
|
|
def __init__(
|
|
self,
|
|
embed_dim: int,
|
|
num_heads: int,
|
|
dropout: float = 0.0,
|
|
is_decoder: bool = False,
|
|
bias: bool = True,
|
|
is_causal: bool = False,
|
|
config: BioGptConfig | None = None,
|
|
layer_idx: int | None = None,
|
|
):
|
|
super().__init__()
|
|
self.embed_dim = embed_dim
|
|
self.num_heads = num_heads
|
|
self.dropout = dropout
|
|
self.head_dim = embed_dim // num_heads
|
|
self.config = config
|
|
|
|
if (self.head_dim * num_heads) != self.embed_dim:
|
|
raise ValueError(
|
|
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim}"
|
|
f" and `num_heads`: {num_heads})."
|
|
)
|
|
self.scaling = self.head_dim**-0.5
|
|
self.is_decoder = is_decoder
|
|
self.is_causal = is_causal
|
|
self.layer_idx = layer_idx
|
|
if layer_idx is None and self.is_decoder:
|
|
logger.warning_once(
|
|
f"Instantiating a decoder {self.__class__.__name__} without passing `layer_idx` is not recommended and "
|
|
"will lead to errors during the forward call, if caching is used. Please make sure to provide a `layer_idx` "
|
|
"when creating this class."
|
|
)
|
|
|
|
self.k_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
|
|
self.v_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
|
|
self.q_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
|
|
self.out_proj = nn.Linear(embed_dim, embed_dim, bias=bias)
|
|
|
|
def forward(
|
|
self,
|
|
hidden_states: torch.Tensor,
|
|
key_value_states: torch.Tensor | None = None,
|
|
past_key_values: Cache | None = None,
|
|
attention_mask: torch.Tensor | None = None,
|
|
output_attentions: bool = False,
|
|
cache_position: torch.Tensor | None = None,
|
|
# TODO: we need a refactor so that the different attention modules can get their specific kwargs
|
|
# ATM, we have mixed things encoder, decoder, and encoder-decoder attn
|
|
**kwargs: Unpack[FlashAttentionKwargs],
|
|
) -> tuple[torch.Tensor, torch.Tensor | None, tuple[torch.Tensor] | None]:
|
|
"""Input shape: Batch x Time x Channel"""
|
|
|
|
# if key_value_states are provided this layer is used as a cross-attention layer
|
|
# for the decoder
|
|
is_cross_attention = key_value_states is not None
|
|
|
|
# determine input shapes
|
|
bsz, tgt_len = hidden_states.shape[:-1]
|
|
src_len = key_value_states.shape[1] if is_cross_attention else tgt_len
|
|
|
|
q_input_shape = (bsz, tgt_len, -1, self.head_dim)
|
|
kv_input_shape = (bsz, src_len, -1, self.head_dim)
|
|
|
|
# get query proj
|
|
query_states = self.q_proj(hidden_states).view(*q_input_shape).transpose(1, 2)
|
|
|
|
is_updated = False
|
|
if past_key_values is not None:
|
|
if isinstance(past_key_values, EncoderDecoderCache):
|
|
is_updated = past_key_values.is_updated.get(self.layer_idx)
|
|
if is_cross_attention:
|
|
# after the first generated id, we can subsequently re-use all key/value_states from cache
|
|
curr_past_key_values = past_key_values.cross_attention_cache
|
|
else:
|
|
curr_past_key_values = past_key_values.self_attention_cache
|
|
else:
|
|
curr_past_key_values = past_key_values
|
|
|
|
current_states = key_value_states if is_cross_attention else hidden_states
|
|
if is_cross_attention and past_key_values is not None and is_updated:
|
|
# reuse k,v, cross_attentions
|
|
key_states = curr_past_key_values.layers[self.layer_idx].keys
|
|
value_states = curr_past_key_values.layers[self.layer_idx].values
|
|
else:
|
|
key_states = self.k_proj(current_states)
|
|
value_states = self.v_proj(current_states)
|
|
key_states = key_states.view(*kv_input_shape).transpose(1, 2)
|
|
value_states = value_states.view(*kv_input_shape).transpose(1, 2)
|
|
|
|
if past_key_values is not None:
|
|
# save all key/value_states to cache to be re-used for fast auto-regressive generation
|
|
cache_position = cache_position if not is_cross_attention else None
|
|
key_states, value_states = curr_past_key_values.update(
|
|
key_states, value_states, self.layer_idx, {"cache_position": cache_position}
|
|
)
|
|
# set flag that curr layer for cross-attn is already updated so we can re-use in subsequent calls
|
|
if is_cross_attention and isinstance(past_key_values, EncoderDecoderCache):
|
|
past_key_values.is_updated[self.layer_idx] = True
|
|
|
|
attention_interface: Callable = ALL_ATTENTION_FUNCTIONS.get_interface(
|
|
self.config._attn_implementation, eager_attention_forward
|
|
)
|
|
|
|
attn_output, attn_weights = attention_interface(
|
|
self,
|
|
query_states,
|
|
key_states,
|
|
value_states,
|
|
attention_mask,
|
|
dropout=0.0 if not self.training else self.dropout,
|
|
scaling=self.scaling,
|
|
output_attentions=output_attentions,
|
|
**kwargs,
|
|
)
|
|
|
|
attn_output = attn_output.reshape(bsz, tgt_len, -1).contiguous()
|
|
attn_output = self.out_proj(attn_output)
|
|
|
|
return attn_output, attn_weights
|
|
|
|
|
|
class BioGptDecoderLayer(GradientCheckpointingLayer):
|
|
def __init__(self, config: BioGptConfig, layer_idx: int | None = None):
|
|
super().__init__()
|
|
self.embed_dim = config.hidden_size
|
|
|
|
self.self_attn = BioGptAttention(
|
|
embed_dim=self.embed_dim,
|
|
num_heads=config.num_attention_heads,
|
|
dropout=config.attention_probs_dropout_prob,
|
|
is_decoder=True,
|
|
is_causal=True,
|
|
config=config,
|
|
layer_idx=layer_idx,
|
|
)
|
|
self.dropout = config.hidden_dropout_prob
|
|
self.activation_fn = ACT2FN[config.hidden_act]
|
|
self.activation_dropout = config.activation_dropout
|
|
|
|
self.self_attn_layer_norm = nn.LayerNorm(self.embed_dim)
|
|
|
|
self.fc1 = nn.Linear(self.embed_dim, config.intermediate_size)
|
|
self.fc2 = nn.Linear(config.intermediate_size, self.embed_dim)
|
|
self.final_layer_norm = nn.LayerNorm(self.embed_dim)
|
|
|
|
def forward(
|
|
self,
|
|
hidden_states: torch.Tensor,
|
|
attention_mask: torch.Tensor | None = None,
|
|
past_key_values: Cache | None = None,
|
|
output_attentions: bool | None = False,
|
|
use_cache: bool | None = True,
|
|
position_ids: torch.LongTensor | None = None,
|
|
cache_position: torch.Tensor | None = None,
|
|
**kwargs: Unpack[TransformersKwargs],
|
|
) -> tuple[torch.FloatTensor, tuple[torch.FloatTensor, torch.FloatTensor] | None]:
|
|
"""
|
|
Args:
|
|
hidden_states (`torch.FloatTensor`): input to the layer of shape `(batch, seq_len, embed_dim)`
|
|
attention_mask (`torch.FloatTensor`): attention mask of size
|
|
`(batch, 1, tgt_len, src_len)` where padding elements are indicated by very large negative values.
|
|
past_key_values (`Cache`): cached past key and value projection states
|
|
output_attentions (`bool`, *optional*):
|
|
Whether or not to return the attentions tensors of all attention layers. See `attentions` under
|
|
returned tensors for more detail.
|
|
use_cache (`bool`, *optional*):
|
|
If set to `True`, `past_key_values` key value states are returned and can be used to speed up decoding
|
|
(see `past_key_values`).
|
|
cache_position (`torch.LongTensor` of shape `(sequence_length)`, *optional*):
|
|
Indices depicting the position of the input sequence tokens in the sequence. It is used to update the
|
|
cache in the correct position and to infer the complete sequence length.
|
|
"""
|
|
residual = hidden_states
|
|
|
|
hidden_states = self.self_attn_layer_norm(hidden_states)
|
|
|
|
# Self Attention
|
|
hidden_states, self_attn_weights = self.self_attn(
|
|
hidden_states=hidden_states,
|
|
past_key_values=past_key_values,
|
|
attention_mask=attention_mask,
|
|
output_attentions=output_attentions,
|
|
position_ids=position_ids,
|
|
cache_position=cache_position,
|
|
**kwargs,
|
|
)
|
|
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
|
|
hidden_states = residual + hidden_states
|
|
|
|
# Fully Connected
|
|
residual = hidden_states
|
|
hidden_states = self.final_layer_norm(hidden_states)
|
|
hidden_states = self.fc1(hidden_states)
|
|
hidden_states = self.activation_fn(hidden_states)
|
|
hidden_states = nn.functional.dropout(hidden_states, p=self.activation_dropout, training=self.training)
|
|
hidden_states = self.fc2(hidden_states)
|
|
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
|
|
hidden_states = residual + hidden_states
|
|
|
|
outputs = (hidden_states,)
|
|
|
|
if output_attentions:
|
|
outputs += (self_attn_weights,)
|
|
|
|
return outputs
|
|
|
|
|
|
@auto_docstring
|
|
class BioGptPreTrainedModel(PreTrainedModel):
|
|
config: BioGptConfig
|
|
base_model_prefix = "biogpt"
|
|
supports_gradient_checkpointing = True
|
|
_supports_flash_attn = True
|
|
_supports_sdpa = True
|
|
_supports_flex_attn = True
|
|
|
|
_can_compile_fullgraph = True
|
|
|
|
|
|
@auto_docstring
|
|
class BioGptModel(BioGptPreTrainedModel):
|
|
def __init__(self, config: BioGptConfig):
|
|
super().__init__(config)
|
|
self.config = config
|
|
self.layerdrop = config.layerdrop
|
|
self.dropout = config.hidden_dropout_prob
|
|
self.embed_dim = config.hidden_size
|
|
self.padding_idx = config.pad_token_id
|
|
embed_scale = math.sqrt(config.hidden_size) if config.scale_embedding else 1.0
|
|
|
|
self.embed_tokens = BioGptScaledWordEmbedding(
|
|
config.vocab_size, self.embed_dim, self.padding_idx, embed_scale=embed_scale
|
|
)
|
|
self.embed_positions = BioGptLearnedPositionalEmbedding(config.max_position_embeddings, self.embed_dim)
|
|
|
|
self.layers = nn.ModuleList([BioGptDecoderLayer(config, layer_idx=i) for i in range(config.num_hidden_layers)])
|
|
self.layer_norm = nn.LayerNorm(self.embed_dim)
|
|
|
|
self.gradient_checkpointing = False
|
|
# Initialize weights and apply final processing
|
|
self.post_init()
|
|
|
|
@auto_docstring
|
|
def forward(
|
|
self,
|
|
input_ids: torch.LongTensor | None = None,
|
|
attention_mask: torch.FloatTensor | None = None,
|
|
inputs_embeds: torch.FloatTensor | None = None,
|
|
past_key_values: Cache | None = None,
|
|
use_cache: bool | None = None,
|
|
position_ids: torch.LongTensor | None = None,
|
|
output_attentions: bool | None = None,
|
|
output_hidden_states: bool | None = None,
|
|
return_dict: bool | None = None,
|
|
cache_position: torch.Tensor | None = None,
|
|
**kwargs: Unpack[TransformersKwargs],
|
|
) -> tuple | BaseModelOutputWithPastAndCrossAttentions:
|
|
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
|
output_hidden_states = (
|
|
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
|
)
|
|
use_cache = use_cache if use_cache is not None else self.config.use_cache
|
|
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
|
|
|
# retrieve input_ids and inputs_embeds
|
|
if (input_ids is None) ^ (inputs_embeds is not None):
|
|
raise ValueError("You cannot specify both decoder_input_ids and decoder_inputs_embeds at the same time")
|
|
elif input_ids is not None:
|
|
input = input_ids
|
|
input_shape = input.shape
|
|
input_ids = input_ids.view(-1, input_shape[-1])
|
|
elif inputs_embeds is not None:
|
|
input_shape = inputs_embeds.size()[:-1]
|
|
input = inputs_embeds[:, :, -1]
|
|
else:
|
|
raise ValueError("You have to specify either decoder_input_ids or decoder_inputs_embeds")
|
|
|
|
if inputs_embeds is None:
|
|
inputs_embeds = self.embed_tokens(input)
|
|
|
|
if self.gradient_checkpointing and self.training:
|
|
if use_cache:
|
|
logger.warning_once(
|
|
"`use_cache=True` is incompatible with gradient checkpointing`. Setting `use_cache=False`..."
|
|
)
|
|
use_cache = False
|
|
|
|
# initialize past_key_values
|
|
if use_cache and past_key_values is None:
|
|
past_key_values = DynamicCache(config=self.config)
|
|
|
|
batch_size, seq_length = inputs_embeds.size()[:-1]
|
|
past_key_values_length = past_key_values.get_seq_length() if past_key_values is not None else 0
|
|
if cache_position is None:
|
|
cache_position = torch.arange(
|
|
past_key_values_length, past_key_values_length + seq_length, device=inputs_embeds.device
|
|
)
|
|
|
|
if attention_mask is None:
|
|
# required mask seq length can be calculated via length of past cache
|
|
mask_seq_length = past_key_values_length + seq_length
|
|
attention_mask = torch.ones(batch_size, mask_seq_length, device=inputs_embeds.device)
|
|
|
|
self_attn_cache = past_key_values
|
|
|
|
causal_mask = create_causal_mask(
|
|
config=self.config,
|
|
input_embeds=inputs_embeds,
|
|
attention_mask=attention_mask,
|
|
cache_position=cache_position,
|
|
past_key_values=self_attn_cache,
|
|
)
|
|
|
|
# embed positions
|
|
if position_ids is None:
|
|
position_ids = cache_position.unsqueeze(0)
|
|
|
|
positions = self.embed_positions(attention_mask, past_key_values_length, position_ids=position_ids)
|
|
hidden_states = inputs_embeds + positions
|
|
hidden_states = nn.functional.dropout(hidden_states, p=self.dropout, training=self.training)
|
|
|
|
if self.gradient_checkpointing and self.training:
|
|
if use_cache:
|
|
logger.warning_once(
|
|
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
|
|
)
|
|
use_cache = False
|
|
|
|
all_hidden_states = () if output_hidden_states else None
|
|
all_self_attns = () if output_attentions else None
|
|
all_cross_attentions = None
|
|
|
|
for idx, decoder_layer in enumerate(self.layers):
|
|
# add LayerDrop (see https://huggingface.co/papers/1909.11556 for description)
|
|
if output_hidden_states:
|
|
all_hidden_states += (hidden_states,)
|
|
if self.training:
|
|
dropout_probability = torch.rand([])
|
|
if dropout_probability < self.layerdrop:
|
|
continue
|
|
|
|
layer_outputs = decoder_layer(
|
|
hidden_states,
|
|
attention_mask=causal_mask,
|
|
past_key_values=past_key_values,
|
|
output_attentions=output_attentions,
|
|
use_cache=use_cache,
|
|
position_ids=position_ids,
|
|
cache_position=cache_position,
|
|
**kwargs,
|
|
)
|
|
|
|
hidden_states = layer_outputs[0]
|
|
|
|
if output_attentions:
|
|
all_self_attns += (layer_outputs[1],)
|
|
|
|
# add hidden states from the last decoder layer
|
|
if output_hidden_states:
|
|
all_hidden_states += (hidden_states,)
|
|
|
|
hidden_states = self.layer_norm(hidden_states)
|
|
|
|
if not return_dict:
|
|
return tuple(
|
|
v
|
|
for v in [hidden_states, past_key_values, all_hidden_states, all_self_attns, all_cross_attentions]
|
|
if v is not None
|
|
)
|
|
return BaseModelOutputWithPastAndCrossAttentions(
|
|
last_hidden_state=hidden_states,
|
|
past_key_values=past_key_values,
|
|
hidden_states=all_hidden_states,
|
|
attentions=all_self_attns,
|
|
cross_attentions=all_cross_attentions,
|
|
)
|
|
|
|
|
|
@auto_docstring(
|
|
custom_intro="""
|
|
BioGPT Model with a `language modeling` head on top for CLM fine-tuning.
|
|
"""
|
|
)
|
|
class BioGptForCausalLM(BioGptPreTrainedModel, GenerationMixin):
|
|
_tied_weights_keys = {"output_projection.weight": "biogpt.embed_tokens.weight"}
|
|
|
|
def __init__(self, config):
|
|
super().__init__(config)
|
|
|
|
self.biogpt = BioGptModel(config)
|
|
self.output_projection = nn.Linear(config.hidden_size, config.vocab_size, bias=False)
|
|
|
|
# Initialize weights and apply final processing
|
|
self.post_init()
|
|
|
|
def get_output_embeddings(self):
|
|
return self.output_projection
|
|
|
|
def set_output_embeddings(self, new_embeddings):
|
|
self.output_projection = new_embeddings
|
|
|
|
@auto_docstring
|
|
def forward(
|
|
self,
|
|
input_ids: torch.LongTensor | None = None,
|
|
attention_mask: torch.FloatTensor | None = None,
|
|
inputs_embeds: torch.FloatTensor | None = None,
|
|
past_key_values: Cache | None = None,
|
|
labels: torch.LongTensor | None = None,
|
|
use_cache: bool | None = None,
|
|
position_ids: torch.LongTensor | None = None,
|
|
output_attentions: bool | None = None,
|
|
output_hidden_states: bool | None = None,
|
|
return_dict: bool | None = None,
|
|
cache_position: torch.Tensor | None = None,
|
|
logits_to_keep: int | torch.Tensor = 0,
|
|
**kwargs: Unpack[TransformersKwargs],
|
|
) -> tuple | CausalLMOutputWithCrossAttentions:
|
|
r"""
|
|
labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*):
|
|
Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
|
|
`labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
|
|
are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
|
|
"""
|
|
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
|
|
|
outputs = self.biogpt(
|
|
input_ids,
|
|
attention_mask=attention_mask,
|
|
inputs_embeds=inputs_embeds,
|
|
past_key_values=past_key_values,
|
|
use_cache=use_cache,
|
|
position_ids=position_ids,
|
|
output_attentions=output_attentions,
|
|
output_hidden_states=output_hidden_states,
|
|
return_dict=return_dict,
|
|
cache_position=cache_position,
|
|
**kwargs,
|
|
)
|
|
|
|
hidden_states = outputs[0]
|
|
# Only compute necessary logits, and do not upcast them to float if we are not computing the loss
|
|
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
|
|
logits = self.output_projection(hidden_states[:, slice_indices, :])
|
|
|
|
loss = None
|
|
if labels is not None:
|
|
loss = self.loss_function(logits=logits, labels=labels, vocab_size=self.config.vocab_size, **kwargs)
|
|
|
|
if not return_dict:
|
|
output = (logits,) + outputs[1:]
|
|
return ((loss,) + output) if loss is not None else output
|
|
|
|
return CausalLMOutputWithCrossAttentions(
|
|
loss=loss,
|
|
logits=logits,
|
|
past_key_values=outputs.past_key_values,
|
|
hidden_states=outputs.hidden_states,
|
|
attentions=outputs.attentions,
|
|
cross_attentions=outputs.cross_attentions,
|
|
)
|
|
|
|
|
|
@auto_docstring
|
|
class BioGptForTokenClassification(BioGptPreTrainedModel):
|
|
def __init__(self, config):
|
|
super().__init__(config)
|
|
self.num_labels = config.num_labels
|
|
|
|
self.biogpt = BioGptModel(config)
|
|
if hasattr(config, "classifier_dropout") and config.classifier_dropout is not None:
|
|
classifier_dropout = config.classifier_dropout
|
|
else:
|
|
classifier_dropout = config.hidden_dropout_prob
|
|
self.dropout = nn.Dropout(classifier_dropout)
|
|
self.classifier = nn.Linear(config.hidden_size, config.num_labels)
|
|
|
|
self.post_init()
|
|
|
|
@auto_docstring
|
|
def forward(
|
|
self,
|
|
input_ids: torch.LongTensor | None = None,
|
|
token_type_ids: torch.LongTensor | None = None,
|
|
attention_mask: torch.FloatTensor | None = None,
|
|
past_key_values: Cache | None = None,
|
|
inputs_embeds: torch.FloatTensor | None = None,
|
|
labels: torch.LongTensor | None = None,
|
|
use_cache: bool | None = None,
|
|
position_ids: torch.LongTensor | None = None,
|
|
output_attentions: bool | None = None,
|
|
output_hidden_states: bool | None = None,
|
|
return_dict: bool | None = None,
|
|
cache_position: torch.Tensor | None = None,
|
|
**kwargs,
|
|
) -> tuple | TokenClassifierOutput:
|
|
r"""
|
|
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
|
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
|
|
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
|
|
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
|
|
"""
|
|
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
|
|
|
transformer_outputs = self.biogpt(
|
|
input_ids,
|
|
past_key_values=past_key_values,
|
|
attention_mask=attention_mask,
|
|
inputs_embeds=inputs_embeds,
|
|
use_cache=use_cache,
|
|
position_ids=position_ids,
|
|
output_attentions=output_attentions,
|
|
output_hidden_states=output_hidden_states,
|
|
return_dict=return_dict,
|
|
cache_position=cache_position,
|
|
)
|
|
|
|
hidden_states = transformer_outputs[0]
|
|
hidden_states = self.dropout(hidden_states)
|
|
logits = self.classifier(hidden_states)
|
|
|
|
loss = None
|
|
if labels is not None:
|
|
loss_fct = CrossEntropyLoss()
|
|
# Only keep active parts of the loss
|
|
if attention_mask is not None:
|
|
active_loss = attention_mask.view(-1) == 1
|
|
active_logits = logits.view(-1, self.num_labels)
|
|
active_labels = torch.where(
|
|
active_loss, labels.view(-1), torch.tensor(loss_fct.ignore_index).type_as(labels)
|
|
)
|
|
loss = loss_fct(active_logits, active_labels)
|
|
else:
|
|
loss = loss_fct(logits.view(-1, self.num_labels), labels.view(-1))
|
|
|
|
if not return_dict:
|
|
output = (logits,) + transformer_outputs[2:]
|
|
return ((loss,) + output) if loss is not None else output
|
|
|
|
return TokenClassifierOutput(
|
|
loss=loss,
|
|
logits=logits,
|
|
hidden_states=transformer_outputs.hidden_states,
|
|
attentions=transformer_outputs.attentions,
|
|
)
|
|
|
|
|
|
@auto_docstring(
|
|
custom_intro="""
|
|
The BioGpt Model transformer with a sequence classification head on top (linear layer).
|
|
|
|
[`BioGptForSequenceClassification`] uses the last token in order to do the classification, as other causal models
|
|
(e.g. GPT-2) do.
|
|
|
|
Since it does classification on the last token, it is required to know the position of the last token. If a
|
|
`pad_token_id` is defined in the configuration, it finds the last token that is not a padding token in each row. If
|
|
no `pad_token_id` is defined, it simply takes the last value in each row of the batch. Since it cannot guess the
|
|
padding tokens when `inputs_embeds` are passed instead of `input_ids`, it does the same (take the last value in
|
|
each row of the batch).
|
|
"""
|
|
)
|
|
class BioGptForSequenceClassification(BioGptPreTrainedModel):
|
|
def __init__(self, config: BioGptConfig):
|
|
super().__init__(config)
|
|
self.num_labels = config.num_labels
|
|
self.biogpt = BioGptModel(config)
|
|
self.score = nn.Linear(config.hidden_size, self.num_labels, bias=False)
|
|
|
|
# Initialize weights and apply final processing
|
|
self.post_init()
|
|
|
|
@auto_docstring
|
|
def forward(
|
|
self,
|
|
input_ids: torch.LongTensor | None = None,
|
|
attention_mask: torch.FloatTensor | None = None,
|
|
past_key_values: Cache | None = None,
|
|
inputs_embeds: torch.FloatTensor | None = None,
|
|
labels: torch.LongTensor | None = None,
|
|
use_cache: bool | None = None,
|
|
position_ids: torch.LongTensor | None = None,
|
|
output_attentions: bool | None = None,
|
|
output_hidden_states: bool | None = None,
|
|
return_dict: bool | None = None,
|
|
cache_position: torch.Tensor | None = None,
|
|
logits_to_keep: int | torch.Tensor = 0,
|
|
**kwargs,
|
|
) -> tuple | SequenceClassifierOutputWithPast:
|
|
r"""
|
|
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
|
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
|
|
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
|
|
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
|
|
"""
|
|
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
|
|
|
transformer_outputs = self.biogpt(
|
|
input_ids,
|
|
past_key_values=past_key_values,
|
|
attention_mask=attention_mask,
|
|
inputs_embeds=inputs_embeds,
|
|
use_cache=use_cache,
|
|
position_ids=position_ids,
|
|
output_attentions=output_attentions,
|
|
output_hidden_states=output_hidden_states,
|
|
return_dict=return_dict,
|
|
cache_position=cache_position,
|
|
)
|
|
hidden_states = transformer_outputs[0]
|
|
slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep
|
|
logits = self.score(hidden_states[:, slice_indices, :])
|
|
|
|
if input_ids is not None:
|
|
batch_size, sequence_length = input_ids.shape[:2]
|
|
else:
|
|
batch_size, sequence_length = inputs_embeds.shape[:2]
|
|
|
|
if self.config.pad_token_id is None:
|
|
sequence_length = -1
|
|
else:
|
|
if input_ids is not None:
|
|
sequence_length = (torch.ne(input_ids, self.config.pad_token_id).sum(-1) - 1).to(logits.device)
|
|
else:
|
|
sequence_length = -1
|
|
logger.warning_once(
|
|
f"{self.__class__.__name__} will not detect padding tokens in `inputs_embeds`. Results may be "
|
|
"unexpected if using padding tokens in conjunction with `inputs_embeds.`"
|
|
)
|
|
|
|
pooled_logits = logits[torch.arange(batch_size, device=logits.device), sequence_length]
|
|
|
|
loss = None
|
|
if labels is not None:
|
|
if self.config.problem_type is None:
|
|
if self.num_labels == 1:
|
|
self.config.problem_type = "regression"
|
|
elif self.num_labels > 1 and (labels.dtype == torch.long or labels.dtype == torch.int):
|
|
self.config.problem_type = "single_label_classification"
|
|
else:
|
|
self.config.problem_type = "multi_label_classification"
|
|
|
|
if self.config.problem_type == "regression":
|
|
loss_fct = MSELoss()
|
|
if self.num_labels == 1:
|
|
loss = loss_fct(pooled_logits.squeeze(), labels.squeeze())
|
|
else:
|
|
loss = loss_fct(pooled_logits, labels)
|
|
elif self.config.problem_type == "single_label_classification":
|
|
loss_fct = CrossEntropyLoss()
|
|
loss = loss_fct(pooled_logits.view(-1, self.num_labels), labels.view(-1))
|
|
elif self.config.problem_type == "multi_label_classification":
|
|
loss_fct = BCEWithLogitsLoss()
|
|
loss = loss_fct(pooled_logits, labels)
|
|
if not return_dict:
|
|
output = (pooled_logits,) + transformer_outputs[1:]
|
|
return ((loss,) + output) if loss is not None else output
|
|
|
|
return SequenceClassifierOutputWithPast(
|
|
loss=loss,
|
|
logits=pooled_logits,
|
|
past_key_values=transformer_outputs.past_key_values,
|
|
hidden_states=transformer_outputs.hidden_states,
|
|
attentions=transformer_outputs.attentions,
|
|
)
|
|
|
|
def get_input_embeddings(self):
|
|
return self.biogpt.embed_tokens
|
|
|
|
def set_input_embeddings(self, value):
|
|
self.biogpt.embed_tokens = value
|
|
|
|
|
|
__all__ = [
|
|
"BioGptForCausalLM",
|
|
"BioGptForTokenClassification",
|
|
"BioGptForSequenceClassification",
|
|
"BioGptModel",
|
|
"BioGptPreTrainedModel",
|
|
]
|