You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
841 lines
36 KiB
841 lines
36 KiB
# Copyright 2021 The OpenAI Team Authors and HuggingFace Inc. team.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
"""PyTorch OpenAI ImageGPT model."""
|
|
|
|
import math
|
|
from typing import Any
|
|
|
|
import torch
|
|
from torch import nn
|
|
from torch.nn import CrossEntropyLoss
|
|
|
|
from ... import initialization as init
|
|
from ...activations import ACT2FN
|
|
from ...cache_utils import Cache, DynamicCache, EncoderDecoderCache
|
|
from ...generation import GenerationMixin
|
|
from ...modeling_layers import GradientCheckpointingLayer
|
|
from ...modeling_outputs import (
|
|
BaseModelOutputWithPastAndCrossAttentions,
|
|
CausalLMOutputWithCrossAttentions,
|
|
SequenceClassifierOutputWithPast,
|
|
)
|
|
from ...modeling_utils import PreTrainedModel
|
|
from ...pytorch_utils import Conv1D
|
|
from ...utils import (
|
|
auto_docstring,
|
|
logging,
|
|
torch_float,
|
|
)
|
|
from ...utils.generic import maybe_autocast
|
|
from .configuration_imagegpt import ImageGPTConfig
|
|
|
|
|
|
logger = logging.get_logger(__name__)
|
|
|
|
|
|
class ImageGPTLayerNorm(nn.Module):
|
|
def __init__(self, hidden_size: tuple[int], eps: float = 1e-5):
|
|
super().__init__()
|
|
self.eps = eps
|
|
self.weight = nn.Parameter(torch.Tensor(hidden_size))
|
|
|
|
def forward(self, tensor: torch.Tensor) -> torch.Tensor:
|
|
# input is not mean centered
|
|
tensor = tensor / torch.sqrt(torch.mean(torch.square(tensor), axis=-1, keepdim=True) + self.eps)
|
|
tensor = tensor * self.weight
|
|
return tensor
|
|
|
|
|
|
class ImageGPTAttention(nn.Module):
|
|
def __init__(self, config, is_cross_attention: bool | None = False, layer_idx: int | None = None):
|
|
super().__init__()
|
|
self.config = config
|
|
max_positions = config.max_position_embeddings
|
|
self.register_buffer(
|
|
"bias",
|
|
torch.tril(torch.ones((max_positions, max_positions), dtype=torch.bool)).view(
|
|
1, 1, max_positions, max_positions
|
|
),
|
|
persistent=False,
|
|
)
|
|
|
|
self.embed_dim = config.hidden_size
|
|
self.num_heads = config.num_attention_heads
|
|
self.head_dim = self.embed_dim // self.num_heads
|
|
self.split_size = self.embed_dim
|
|
if self.head_dim * self.num_heads != self.embed_dim:
|
|
raise ValueError(
|
|
f"`embed_dim` must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
|
|
f" {self.num_heads})."
|
|
)
|
|
|
|
self.scale_attn_weights = config.scale_attn_weights
|
|
self.is_cross_attention = is_cross_attention
|
|
|
|
# Layer-wise attention scaling, reordering, and upcasting
|
|
self.scale_attn_by_inverse_layer_idx = config.scale_attn_by_inverse_layer_idx
|
|
self.layer_idx = layer_idx
|
|
self.reorder_and_upcast_attn = config.reorder_and_upcast_attn
|
|
|
|
if self.is_cross_attention:
|
|
self.c_attn = Conv1D(2 * self.embed_dim, self.embed_dim)
|
|
self.q_attn = Conv1D(self.embed_dim, self.embed_dim)
|
|
else:
|
|
self.c_attn = Conv1D(3 * self.embed_dim, self.embed_dim)
|
|
self.c_proj = Conv1D(self.embed_dim, self.embed_dim)
|
|
|
|
self.attn_dropout = nn.Dropout(config.attn_pdrop)
|
|
self.resid_dropout = nn.Dropout(config.resid_pdrop)
|
|
|
|
def _attn(self, query, key, value, attention_mask=None):
|
|
attn_weights = torch.matmul(query, key.transpose(-1, -2))
|
|
|
|
if self.scale_attn_weights:
|
|
attn_weights = attn_weights / torch_float(value.size(-1) ** 0.5)
|
|
|
|
# Layer-wise attention scaling
|
|
if self.scale_attn_by_inverse_layer_idx:
|
|
attn_weights = attn_weights / float(self.layer_idx + 1)
|
|
|
|
if not self.is_cross_attention:
|
|
# if only "normal" attention layer implements causal mask
|
|
query_length, key_length = query.size(-2), key.size(-2)
|
|
causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length]
|
|
mask_value = torch.finfo(attn_weights.dtype).min
|
|
# Need to be a tensor, otherwise we get error: `RuntimeError: expected scalar type float but found double`.
|
|
# Need to be on the same device, otherwise `RuntimeError: ..., x and y to be on the same device`
|
|
mask_value = torch.tensor(mask_value, dtype=attn_weights.dtype, device=attn_weights.device)
|
|
attn_weights = torch.where(causal_mask, attn_weights, mask_value)
|
|
|
|
if attention_mask is not None:
|
|
# Apply the attention mask
|
|
attn_weights = attn_weights + attention_mask
|
|
|
|
attn_weights = nn.Softmax(dim=-1)(attn_weights)
|
|
|
|
# Downcast (if necessary) back to V's dtype (if in mixed-precision) -- No-Op otherwise
|
|
attn_weights = attn_weights.type(value.dtype)
|
|
attn_weights = self.attn_dropout(attn_weights)
|
|
|
|
attn_output = torch.matmul(attn_weights, value)
|
|
|
|
return attn_output, attn_weights
|
|
|
|
def _upcast_and_reordered_attn(self, query, key, value, attention_mask=None):
|
|
# Use `torch.baddbmm` (a bit more efficient w/ alpha param for scaling -- from Megatron-LM)
|
|
bsz, num_heads, q_seq_len, dk = query.size()
|
|
_, _, k_seq_len, _ = key.size()
|
|
|
|
# Preallocate attn_weights for `baddbmm`
|
|
attn_weights = torch.empty(bsz * num_heads, q_seq_len, k_seq_len, dtype=torch.float32, device=query.device)
|
|
|
|
# Compute Scale Factor
|
|
scale_factor = 1.0
|
|
if self.scale_attn_weights:
|
|
scale_factor /= float(value.size(-1)) ** 0.5
|
|
|
|
if self.scale_attn_by_inverse_layer_idx:
|
|
scale_factor /= float(self.layer_idx + 1)
|
|
|
|
# Upcast (turn off autocast) and reorder (Scale K by 1 / root(dk))
|
|
with maybe_autocast(query.device.type, enabled=False):
|
|
q, k = query.reshape(-1, q_seq_len, dk), key.transpose(-1, -2).reshape(-1, dk, k_seq_len)
|
|
attn_weights = torch.baddbmm(attn_weights, q.float(), k.float(), beta=0, alpha=scale_factor)
|
|
attn_weights = attn_weights.reshape(bsz, num_heads, q_seq_len, k_seq_len)
|
|
|
|
if not self.is_cross_attention:
|
|
# if only "normal" attention layer implements causal mask
|
|
query_length, key_length = query.size(-2), key.size(-2)
|
|
causal_mask = self.bias[:, :, key_length - query_length : key_length, :key_length]
|
|
mask_value = torch.finfo(attn_weights.dtype).min
|
|
# Need to be a tensor, otherwise we get error: `RuntimeError: expected scalar type float but found double`.
|
|
# Need to be on the same device, otherwise `RuntimeError: ..., x and y to be on the same device`
|
|
mask_value = torch.tensor(mask_value, dtype=attn_weights.dtype, device=attn_weights.device)
|
|
attn_weights = torch.where(causal_mask, attn_weights, mask_value)
|
|
|
|
if attention_mask is not None:
|
|
# Apply the attention mask
|
|
attn_weights = attn_weights + attention_mask
|
|
|
|
attn_weights = nn.Softmax(dim=-1)(attn_weights)
|
|
|
|
# Downcast (if necessary) back to V's dtype (if in mixed-precision) -- No-Op if otherwise
|
|
if attn_weights.dtype != torch.float32:
|
|
raise RuntimeError("Error with upcasting, attn_weights does not have dtype torch.float32")
|
|
attn_weights = attn_weights.type(value.dtype)
|
|
attn_weights = self.attn_dropout(attn_weights)
|
|
|
|
attn_output = torch.matmul(attn_weights, value)
|
|
|
|
return attn_output, attn_weights
|
|
|
|
def _split_heads(self, tensor, num_heads, attn_head_size):
|
|
"""
|
|
Splits hidden_size dim into attn_head_size and num_heads
|
|
"""
|
|
new_shape = tensor.size()[:-1] + (num_heads, attn_head_size)
|
|
tensor = tensor.view(*new_shape)
|
|
return tensor.permute(0, 2, 1, 3) # (batch, head, seq_length, head_features)
|
|
|
|
def _merge_heads(self, tensor, num_heads, attn_head_size):
|
|
"""
|
|
Merges attn_head_size dim and num_attn_heads dim into hidden_size
|
|
"""
|
|
tensor = tensor.permute(0, 2, 1, 3).contiguous()
|
|
new_shape = tensor.size()[:-2] + (num_heads * attn_head_size,)
|
|
return tensor.view(new_shape)
|
|
|
|
def forward(
|
|
self,
|
|
hidden_states: torch.Tensor,
|
|
layer_past: Cache | None = None,
|
|
attention_mask: torch.Tensor | None = None,
|
|
encoder_hidden_states: torch.Tensor | None = None,
|
|
encoder_attention_mask: torch.Tensor | None = None,
|
|
use_cache: bool | None = False,
|
|
output_attentions: bool | None = False,
|
|
cache_position: torch.Tensor | None = None,
|
|
) -> tuple:
|
|
is_cross_attention = encoder_hidden_states is not None
|
|
bsz, seq_len, _ = hidden_states.shape
|
|
|
|
if layer_past is not None:
|
|
if isinstance(layer_past, EncoderDecoderCache):
|
|
is_updated = layer_past.is_updated.get(self.layer_idx)
|
|
if is_cross_attention:
|
|
# after the first generated id, we can subsequently re-use all key/value_states from cache
|
|
curr_past_key_values = layer_past.cross_attention_cache
|
|
else:
|
|
curr_past_key_values = layer_past.self_attention_cache
|
|
else:
|
|
curr_past_key_values = layer_past
|
|
|
|
current_states = encoder_hidden_states if is_cross_attention else hidden_states
|
|
if is_cross_attention:
|
|
if not hasattr(self, "q_attn"):
|
|
raise ValueError(
|
|
"If class is used as cross attention, the weights `q_attn` have to be defined. "
|
|
"Please make sure to instantiate class with `ImageGPTAttention(..., is_cross_attention=True)`."
|
|
)
|
|
|
|
if layer_past is not None and is_updated:
|
|
# reuse k,v, cross_attentions, and compute only q
|
|
query = self.q_attn(hidden_states)
|
|
key = curr_past_key_values.layers[self.layer_idx].keys
|
|
value = curr_past_key_values.layers[self.layer_idx].values
|
|
else:
|
|
query = self.q_attn(hidden_states)
|
|
key, value = self.c_attn(current_states).split(self.split_size, dim=2)
|
|
key = key.view(bsz, -1, self.num_heads, self.head_dim).transpose(1, 2)
|
|
value = value.view(bsz, -1, self.num_heads, self.head_dim).transpose(1, 2)
|
|
else:
|
|
query, key, value = self.c_attn(current_states).split(self.split_size, dim=2)
|
|
key = key.view(bsz, -1, self.num_heads, self.head_dim).transpose(1, 2)
|
|
value = value.view(bsz, -1, self.num_heads, self.head_dim).transpose(1, 2)
|
|
|
|
if layer_past is not None:
|
|
# save all key/value_states to cache to be re-used for fast auto-regressive generation
|
|
cache_position = cache_position if not is_cross_attention else None
|
|
key, value = curr_past_key_values.update(key, value, self.layer_idx, {"cache_position": cache_position})
|
|
# set flag that curr layer for cross-attn is already updated so we can re-use in subsequent calls
|
|
if is_cross_attention:
|
|
layer_past.is_updated[self.layer_idx] = True
|
|
|
|
query = query.view(bsz, seq_len, self.num_heads, self.head_dim).transpose(1, 2)
|
|
|
|
if self.reorder_and_upcast_attn:
|
|
attn_output, attn_weights = self._upcast_and_reordered_attn(query, key, value, attention_mask)
|
|
else:
|
|
attn_output, attn_weights = self._attn(query, key, value, attention_mask)
|
|
|
|
attn_output = self._merge_heads(attn_output, self.num_heads, self.head_dim)
|
|
attn_output = self.c_proj(attn_output)
|
|
attn_output = self.resid_dropout(attn_output)
|
|
|
|
return attn_output, attn_weights
|
|
|
|
|
|
class ImageGPTMLP(nn.Module):
|
|
def __init__(self, intermediate_size, config):
|
|
super().__init__()
|
|
embed_dim = config.hidden_size
|
|
self.c_fc = Conv1D(intermediate_size, embed_dim)
|
|
self.c_proj = Conv1D(embed_dim, intermediate_size)
|
|
self.act = ACT2FN[config.activation_function]
|
|
self.dropout = nn.Dropout(config.resid_pdrop)
|
|
|
|
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
|
hidden_states = self.c_fc(hidden_states)
|
|
hidden_states = self.act(hidden_states)
|
|
hidden_states = self.c_proj(hidden_states)
|
|
hidden_states = self.dropout(hidden_states)
|
|
return hidden_states
|
|
|
|
|
|
class ImageGPTBlock(GradientCheckpointingLayer):
|
|
def __init__(self, config, layer_idx=None):
|
|
super().__init__()
|
|
hidden_size = config.hidden_size
|
|
inner_dim = config.n_inner if config.n_inner is not None else 4 * hidden_size
|
|
|
|
self.ln_1 = ImageGPTLayerNorm(hidden_size, eps=config.layer_norm_epsilon)
|
|
self.attn = ImageGPTAttention(config, layer_idx=layer_idx)
|
|
self.ln_2 = ImageGPTLayerNorm(hidden_size, eps=config.layer_norm_epsilon)
|
|
|
|
if config.add_cross_attention:
|
|
self.crossattention = ImageGPTAttention(config, is_cross_attention=True, layer_idx=layer_idx)
|
|
self.ln_cross_attn = ImageGPTLayerNorm(hidden_size, eps=config.layer_norm_epsilon)
|
|
|
|
self.mlp = ImageGPTMLP(inner_dim, config)
|
|
|
|
def forward(
|
|
self,
|
|
hidden_states: torch.Tensor,
|
|
layer_past: Cache | None = None,
|
|
attention_mask: torch.Tensor | None = None,
|
|
encoder_hidden_states: torch.Tensor | None = None,
|
|
encoder_attention_mask: torch.Tensor | None = None,
|
|
use_cache: bool | None = False,
|
|
output_attentions: bool | None = False,
|
|
cache_position: torch.Tensor | None = None,
|
|
) -> tuple:
|
|
residual = hidden_states
|
|
hidden_states = self.ln_1(hidden_states)
|
|
attn_outputs = self.attn(
|
|
hidden_states,
|
|
layer_past=layer_past,
|
|
attention_mask=attention_mask,
|
|
use_cache=use_cache,
|
|
output_attentions=output_attentions,
|
|
cache_position=cache_position,
|
|
)
|
|
attn_output = attn_outputs[0]
|
|
outputs = attn_outputs[1:]
|
|
# residual connection
|
|
hidden_states = attn_output + residual
|
|
|
|
if encoder_hidden_states is not None:
|
|
# add one self-attention block for cross-attention
|
|
if not hasattr(self, "crossattention"):
|
|
raise ValueError(
|
|
f"If `encoder_hidden_states` are passed, {self} has to be instantiated with "
|
|
"cross-attention layers by setting `config.add_cross_attention=True`"
|
|
)
|
|
residual = hidden_states
|
|
hidden_states = self.ln_cross_attn(hidden_states)
|
|
cross_attn_outputs = self.crossattention(
|
|
hidden_states,
|
|
layer_past=layer_past,
|
|
attention_mask=attention_mask,
|
|
encoder_hidden_states=encoder_hidden_states,
|
|
encoder_attention_mask=encoder_attention_mask,
|
|
output_attentions=output_attentions,
|
|
cache_position=cache_position,
|
|
)
|
|
attn_output = cross_attn_outputs[0]
|
|
# residual connection
|
|
hidden_states = residual + attn_output
|
|
outputs = outputs + cross_attn_outputs[1:] # add cross attentions if we output attention weights
|
|
|
|
residual = hidden_states
|
|
hidden_states = self.ln_2(hidden_states)
|
|
feed_forward_hidden_states = self.mlp(hidden_states)
|
|
# residual connection
|
|
hidden_states = residual + feed_forward_hidden_states
|
|
|
|
return (hidden_states,) + outputs
|
|
|
|
|
|
@auto_docstring
|
|
class ImageGPTPreTrainedModel(PreTrainedModel):
|
|
config: ImageGPTConfig
|
|
base_model_prefix = "transformer"
|
|
main_input_name = "input_ids"
|
|
input_modalities = ("image",)
|
|
supports_gradient_checkpointing = True
|
|
_no_split_modules = ["ImageGPTBlock"]
|
|
|
|
@torch.no_grad()
|
|
def _init_weights(self, module):
|
|
"""Initialize the weights."""
|
|
super()._init_weights(module)
|
|
|
|
# Reinitialize selected weights subject to the OpenAI GPT-2 Paper Scheme:
|
|
# > A modified initialization which accounts for the accumulation on the residual path with model depth. Scale
|
|
# > the weights of residual layers at initialization by a factor of 1/√N where N is the # of residual layers.
|
|
# > -- GPT-2 :: https://openai.com/blog/better-language-models/
|
|
#
|
|
# Reference (Megatron-LM): https://github.com/NVIDIA/Megatron-LM/blob/main/megatron/model/gpt_model.py
|
|
if isinstance(module, PreTrainedModel):
|
|
for name, p in module.named_parameters():
|
|
if "c_proj" in name and "weight" in name:
|
|
# Special Scaled Initialization --> There are 2 Layer Norms per Transformer Block
|
|
init.normal_(p, mean=0.0, std=self.config.initializer_range / math.sqrt(2 * self.config.n_layer))
|
|
elif isinstance(module, ImageGPTAttention):
|
|
max_positions = module.config.max_position_embeddings
|
|
init.copy_(
|
|
module.bias,
|
|
torch.tril(torch.ones((max_positions, max_positions), dtype=torch.bool)).view(
|
|
1, 1, max_positions, max_positions
|
|
),
|
|
)
|
|
|
|
|
|
@auto_docstring
|
|
class ImageGPTModel(ImageGPTPreTrainedModel):
|
|
def __init__(self, config: ImageGPTConfig):
|
|
super().__init__(config)
|
|
|
|
self.embed_dim = config.hidden_size
|
|
|
|
self.wte = nn.Embedding(config.vocab_size, self.embed_dim)
|
|
self.wpe = nn.Embedding(config.max_position_embeddings, self.embed_dim)
|
|
|
|
self.drop = nn.Dropout(config.embd_pdrop)
|
|
self.h = nn.ModuleList([ImageGPTBlock(config, layer_idx=i) for i in range(config.num_hidden_layers)])
|
|
self.ln_f = ImageGPTLayerNorm(self.embed_dim, eps=config.layer_norm_epsilon)
|
|
|
|
self.gradient_checkpointing = False
|
|
# Initialize weights and apply final processing
|
|
self.post_init()
|
|
|
|
def get_input_embeddings(self):
|
|
return self.wte
|
|
|
|
def set_input_embeddings(self, new_embeddings):
|
|
self.wte = new_embeddings
|
|
|
|
@auto_docstring
|
|
def forward(
|
|
self,
|
|
input_ids: torch.Tensor | None = None,
|
|
past_key_values: Cache | None = None,
|
|
attention_mask: torch.Tensor | None = None,
|
|
token_type_ids: torch.Tensor | None = None,
|
|
position_ids: torch.Tensor | None = None,
|
|
inputs_embeds: torch.Tensor | None = None,
|
|
encoder_hidden_states: torch.Tensor | None = None,
|
|
encoder_attention_mask: torch.Tensor | None = None,
|
|
use_cache: bool | None = None,
|
|
output_attentions: bool | None = None,
|
|
output_hidden_states: bool | None = None,
|
|
return_dict: bool | None = None,
|
|
cache_position: torch.Tensor | None = None,
|
|
**kwargs: Any,
|
|
) -> tuple | BaseModelOutputWithPastAndCrossAttentions:
|
|
r"""
|
|
input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`):
|
|
`input_ids_length` = `sequence_length` if `past_key_values` is `None` else
|
|
`past_key_values.get_seq_length()` (`sequence_length` of input past key value states). Indices of input
|
|
sequence tokens in the vocabulary.
|
|
|
|
If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as
|
|
`input_ids`.
|
|
|
|
Indices can be obtained using [`AutoImageProcessor`]. See [`ImageGPTImageProcessor.__call__`] for details.
|
|
|
|
Examples:
|
|
|
|
```python
|
|
>>> from transformers import AutoImageProcessor, ImageGPTModel
|
|
>>> from PIL import Image
|
|
>>> import httpx
|
|
>>> from io import BytesIO
|
|
|
|
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
|
>>> with httpx.stream("GET", url) as response:
|
|
... image = Image.open(BytesIO(response.read()))
|
|
|
|
>>> image_processor = AutoImageProcessor.from_pretrained("openai/imagegpt-small")
|
|
>>> model = ImageGPTModel.from_pretrained("openai/imagegpt-small")
|
|
|
|
>>> inputs = image_processor(images=image, return_tensors="pt")
|
|
>>> outputs = model(**inputs)
|
|
>>> last_hidden_states = outputs.last_hidden_state
|
|
```"""
|
|
|
|
output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions
|
|
output_hidden_states = (
|
|
output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states
|
|
)
|
|
use_cache = use_cache if use_cache is not None else self.config.use_cache
|
|
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
|
|
|
if input_ids is not None and inputs_embeds is not None:
|
|
raise ValueError("You cannot specify both input_ids and inputs_embeds at the same time")
|
|
elif input_ids is not None:
|
|
self.warn_if_padding_and_no_attention_mask(input_ids, attention_mask)
|
|
input_shape = input_ids.size()
|
|
input_ids = input_ids.view(-1, input_shape[-1])
|
|
batch_size = input_ids.shape[0]
|
|
elif inputs_embeds is not None:
|
|
input_shape = inputs_embeds.size()[:-1]
|
|
batch_size = inputs_embeds.shape[0]
|
|
else:
|
|
raise ValueError("You have to specify either input_ids or inputs_embeds")
|
|
|
|
device = input_ids.device if input_ids is not None else inputs_embeds.device
|
|
|
|
if self.gradient_checkpointing and self.training:
|
|
if use_cache:
|
|
logger.warning_once(
|
|
"`use_cache=True` is incompatible with gradient checkpointing. Setting `use_cache=False`..."
|
|
)
|
|
use_cache = False
|
|
|
|
if token_type_ids is not None:
|
|
token_type_ids = token_type_ids.view(-1, input_shape[-1])
|
|
|
|
if use_cache and past_key_values is None:
|
|
past_key_values = DynamicCache(config=self.config)
|
|
|
|
if cache_position is None:
|
|
past_seen_tokens = past_key_values.get_seq_length() if past_key_values is not None else 0
|
|
cache_position: torch.Tensor = torch.arange(input_shape[-1], device=device) + past_seen_tokens
|
|
|
|
if position_ids is None:
|
|
position_ids = cache_position.unsqueeze(0)
|
|
|
|
# ImageGPTAttention mask.
|
|
if attention_mask is not None:
|
|
if batch_size <= 0:
|
|
raise ValueError("batch_size has to be defined and > 0")
|
|
attention_mask = attention_mask.view(batch_size, -1)
|
|
# We create a 3D attention mask from a 2D tensor mask.
|
|
# Sizes are [batch_size, 1, 1, to_seq_length]
|
|
# So we can broadcast to [batch_size, num_heads, from_seq_length, to_seq_length]
|
|
# this attention mask is more simple than the triangular masking of causal attention
|
|
# used in OpenAI GPT, we just need to prepare the broadcast dimension here.
|
|
attention_mask = attention_mask[:, None, None, :]
|
|
|
|
# Since attention_mask is 1.0 for positions we want to attend and 0.0 for
|
|
# masked positions, this operation will create a tensor which is 0.0 for
|
|
# positions we want to attend and the dtype's smallest value for masked positions.
|
|
# Since we are adding it to the raw scores before the softmax, this is
|
|
# effectively the same as removing these entirely.
|
|
attention_mask = attention_mask.to(dtype=self.dtype) # fp16 compatibility
|
|
attention_mask = (1.0 - attention_mask) * torch.finfo(self.dtype).min
|
|
|
|
# If a 2D or 3D attention mask is provided for the cross-attention
|
|
# we need to make broadcastable to [batch_size, num_heads, seq_length, seq_length]
|
|
if self.config.add_cross_attention and encoder_hidden_states is not None:
|
|
encoder_batch_size, encoder_sequence_length, _ = encoder_hidden_states.size()
|
|
encoder_hidden_shape = (encoder_batch_size, encoder_sequence_length)
|
|
if encoder_attention_mask is None:
|
|
encoder_attention_mask = torch.ones(encoder_hidden_shape, device=device)
|
|
encoder_attention_mask = self.invert_attention_mask(encoder_attention_mask)
|
|
else:
|
|
encoder_attention_mask = None
|
|
|
|
if inputs_embeds is None:
|
|
inputs_embeds = self.wte(input_ids)
|
|
position_embeds = self.wpe(position_ids)
|
|
hidden_states = inputs_embeds + position_embeds.to(inputs_embeds.device)
|
|
|
|
if token_type_ids is not None:
|
|
token_type_embeds = self.wte(token_type_ids)
|
|
hidden_states = hidden_states + token_type_embeds
|
|
|
|
hidden_states = self.drop(hidden_states)
|
|
output_shape = input_shape + (hidden_states.size(-1),)
|
|
|
|
all_self_attentions = () if output_attentions else None
|
|
all_cross_attentions = () if output_attentions and self.config.add_cross_attention else None
|
|
all_hidden_states = () if output_hidden_states else None
|
|
for i, block in enumerate(self.h):
|
|
if output_hidden_states:
|
|
all_hidden_states = all_hidden_states + (hidden_states,)
|
|
|
|
outputs = block(
|
|
hidden_states,
|
|
past_key_values,
|
|
attention_mask,
|
|
encoder_hidden_states, # as a positional argument for gradient checkpointing
|
|
encoder_attention_mask=encoder_attention_mask,
|
|
use_cache=use_cache,
|
|
output_attentions=output_attentions,
|
|
cache_position=cache_position,
|
|
)
|
|
|
|
hidden_states = outputs[0]
|
|
if output_attentions:
|
|
all_self_attentions = all_self_attentions + (outputs[1],)
|
|
if self.config.add_cross_attention:
|
|
all_cross_attentions = all_cross_attentions + (outputs[2],)
|
|
|
|
hidden_states = self.ln_f(hidden_states)
|
|
hidden_states = hidden_states.view(*output_shape)
|
|
|
|
# Add last hidden state
|
|
if output_hidden_states:
|
|
all_hidden_states = all_hidden_states + (hidden_states,)
|
|
|
|
if not return_dict:
|
|
return tuple(
|
|
v
|
|
for v in [hidden_states, past_key_values, all_hidden_states, all_self_attentions, all_cross_attentions]
|
|
if v is not None
|
|
)
|
|
|
|
return BaseModelOutputWithPastAndCrossAttentions(
|
|
last_hidden_state=hidden_states,
|
|
past_key_values=past_key_values,
|
|
hidden_states=all_hidden_states,
|
|
attentions=all_self_attentions,
|
|
cross_attentions=all_cross_attentions,
|
|
)
|
|
|
|
|
|
@auto_docstring(
|
|
custom_intro="""
|
|
The ImageGPT Model transformer with a language modeling head on top (linear layer with weights tied to the input
|
|
embeddings).
|
|
"""
|
|
)
|
|
class ImageGPTForCausalImageModeling(ImageGPTPreTrainedModel, GenerationMixin):
|
|
_tied_weights_keys = {"lm_head.weight": "transformer.wte.weight"}
|
|
|
|
def __init__(self, config: ImageGPTConfig):
|
|
super().__init__(config)
|
|
self.transformer = ImageGPTModel(config)
|
|
self.lm_head = nn.Linear(config.n_embd, config.vocab_size - 1, bias=False)
|
|
|
|
# Initialize weights and apply final processing
|
|
self.post_init()
|
|
|
|
@auto_docstring
|
|
def forward(
|
|
self,
|
|
input_ids: torch.Tensor | None = None,
|
|
past_key_values: Cache | None = None,
|
|
attention_mask: torch.Tensor | None = None,
|
|
token_type_ids: torch.Tensor | None = None,
|
|
position_ids: torch.Tensor | None = None,
|
|
inputs_embeds: torch.Tensor | None = None,
|
|
encoder_hidden_states: torch.Tensor | None = None,
|
|
encoder_attention_mask: torch.Tensor | None = None,
|
|
labels: torch.Tensor | None = None,
|
|
use_cache: bool | None = None,
|
|
output_attentions: bool | None = None,
|
|
output_hidden_states: bool | None = None,
|
|
return_dict: bool | None = None,
|
|
cache_position: torch.Tensor | None = None,
|
|
**kwargs: Any,
|
|
) -> tuple | CausalLMOutputWithCrossAttentions:
|
|
r"""
|
|
input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`):
|
|
`input_ids_length` = `sequence_length` if `past_key_values` is `None` else
|
|
`past_key_values.get_seq_length()` (`sequence_length` of input past key value states). Indices of input
|
|
sequence tokens in the vocabulary.
|
|
|
|
If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as
|
|
`input_ids`.
|
|
|
|
Indices can be obtained using [`AutoImageProcessor`]. See [`ImageGPTImageProcessor.__call__`] for details.
|
|
labels (`torch.LongTensor` of shape `(batch_size, input_ids_length)`, *optional*):
|
|
Labels for language modeling. Note that the labels **are shifted** inside the model, i.e. you can set
|
|
`labels = input_ids` Indices are selected in `[-100, 0, ..., config.vocab_size]` All labels set to `-100`
|
|
are ignored (masked), the loss is only computed for labels in `[0, ..., config.vocab_size]`
|
|
|
|
Examples:
|
|
|
|
```python
|
|
>>> from transformers import AutoImageProcessor, ImageGPTForCausalImageModeling
|
|
>>> import torch
|
|
>>> import matplotlib.pyplot as plt
|
|
>>> import numpy as np
|
|
|
|
>>> image_processor = AutoImageProcessor.from_pretrained("openai/imagegpt-small")
|
|
>>> model = ImageGPTForCausalImageModeling.from_pretrained("openai/imagegpt-small")
|
|
>>> device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
|
|
>>> model.to(device) # doctest: +IGNORE_RESULT
|
|
|
|
>>> # unconditional generation of 8 images
|
|
>>> batch_size = 4
|
|
>>> context = torch.full((batch_size, 1), model.config.vocab_size - 1) # initialize with SOS token
|
|
>>> context = context.to(device)
|
|
>>> output = model.generate(
|
|
... input_ids=context, max_length=model.config.n_positions + 1, temperature=1.0, do_sample=True, top_k=40
|
|
... )
|
|
|
|
>>> clusters = image_processor.clusters
|
|
>>> height = image_processor.size["height"]
|
|
>>> width = image_processor.size["width"]
|
|
|
|
>>> samples = output[:, 1:].detach().cpu().numpy()
|
|
>>> samples_img = [
|
|
... np.reshape(np.rint(127.5 * (clusters[s] + 1.0)), [height, width, 3]).astype(np.uint8) for s in samples
|
|
... ] # convert color cluster tokens back to pixels
|
|
>>> f, axes = plt.subplots(1, batch_size, dpi=300)
|
|
|
|
>>> for img, ax in zip(samples_img, axes): # doctest: +IGNORE_RESULT
|
|
... ax.axis("off")
|
|
... ax.imshow(img)
|
|
```"""
|
|
|
|
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
|
|
|
transformer_outputs = self.transformer(
|
|
input_ids,
|
|
past_key_values=past_key_values,
|
|
attention_mask=attention_mask,
|
|
token_type_ids=token_type_ids,
|
|
position_ids=position_ids,
|
|
inputs_embeds=inputs_embeds,
|
|
encoder_hidden_states=encoder_hidden_states,
|
|
encoder_attention_mask=encoder_attention_mask,
|
|
use_cache=use_cache,
|
|
output_attentions=output_attentions,
|
|
output_hidden_states=output_hidden_states,
|
|
return_dict=return_dict,
|
|
cache_position=cache_position,
|
|
)
|
|
hidden_states = transformer_outputs[0]
|
|
|
|
lm_logits = self.lm_head(hidden_states)
|
|
|
|
loss = None
|
|
if labels is not None:
|
|
# Shift so that tokens < n predict n
|
|
shift_logits = lm_logits[..., :-1, :].contiguous()
|
|
shift_labels = labels[..., 1:].contiguous()
|
|
# Flatten the tokens
|
|
loss_fct = CrossEntropyLoss()
|
|
loss = loss_fct(shift_logits.view(-1, shift_logits.size(-1)), shift_labels.view(-1))
|
|
|
|
if not return_dict:
|
|
output = (lm_logits,) + transformer_outputs[1:]
|
|
return ((loss,) + output) if loss is not None else output
|
|
|
|
return CausalLMOutputWithCrossAttentions(
|
|
loss=loss,
|
|
logits=lm_logits,
|
|
past_key_values=transformer_outputs.past_key_values,
|
|
hidden_states=transformer_outputs.hidden_states,
|
|
attentions=transformer_outputs.attentions,
|
|
cross_attentions=transformer_outputs.cross_attentions,
|
|
)
|
|
|
|
|
|
@auto_docstring(
|
|
custom_intro="""
|
|
The ImageGPT Model transformer with an image classification head on top (linear layer).
|
|
[`ImageGPTForImageClassification`] average-pools the hidden states in order to do the classification.
|
|
"""
|
|
)
|
|
class ImageGPTForImageClassification(ImageGPTPreTrainedModel):
|
|
def __init__(self, config: ImageGPTConfig):
|
|
super().__init__(config)
|
|
self.num_labels = config.num_labels
|
|
self.transformer = ImageGPTModel(config)
|
|
self.score = nn.Linear(config.n_embd, self.num_labels, bias=False)
|
|
|
|
# Initialize weights and apply final processing
|
|
self.post_init()
|
|
|
|
@auto_docstring
|
|
def forward(
|
|
self,
|
|
input_ids: torch.Tensor | None = None,
|
|
past_key_values: Cache | None = None,
|
|
attention_mask: torch.Tensor | None = None,
|
|
token_type_ids: torch.Tensor | None = None,
|
|
position_ids: torch.Tensor | None = None,
|
|
inputs_embeds: torch.Tensor | None = None,
|
|
labels: torch.Tensor | None = None,
|
|
use_cache: bool | None = None,
|
|
output_attentions: bool | None = None,
|
|
output_hidden_states: bool | None = None,
|
|
return_dict: bool | None = None,
|
|
**kwargs: Any,
|
|
) -> tuple | SequenceClassifierOutputWithPast:
|
|
r"""
|
|
input_ids (`torch.LongTensor` of shape `(batch_size, input_ids_length)`):
|
|
`input_ids_length` = `sequence_length` if `past_key_values` is `None` else
|
|
`past_key_values.get_seq_length()` (`sequence_length` of input past key value states). Indices of input
|
|
sequence tokens in the vocabulary.
|
|
|
|
If `past_key_values` is used, only `input_ids` that do not have their past calculated should be passed as
|
|
`input_ids`.
|
|
|
|
Indices can be obtained using [`AutoImageProcessor`]. See [`ImageGPTImageProcessor.__call__`] for details.
|
|
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
|
Labels for computing the sequence classification/regression loss. Indices should be in `[0, ...,
|
|
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
|
|
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
|
|
|
|
Examples:
|
|
|
|
```python
|
|
>>> from transformers import AutoImageProcessor, ImageGPTForImageClassification
|
|
>>> from PIL import Image
|
|
>>> import httpx
|
|
>>> from io import BytesIO
|
|
|
|
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
|
>>> with httpx.stream("GET", url) as response:
|
|
... image = Image.open(BytesIO(response.read()))
|
|
|
|
>>> image_processor = AutoImageProcessor.from_pretrained("openai/imagegpt-small")
|
|
>>> model = ImageGPTForImageClassification.from_pretrained("openai/imagegpt-small")
|
|
|
|
>>> inputs = image_processor(images=image, return_tensors="pt")
|
|
>>> outputs = model(**inputs)
|
|
>>> logits = outputs.logits
|
|
```"""
|
|
|
|
return_dict = return_dict if return_dict is not None else self.config.use_return_dict
|
|
|
|
transformer_outputs = self.transformer(
|
|
input_ids,
|
|
past_key_values=past_key_values,
|
|
attention_mask=attention_mask,
|
|
token_type_ids=token_type_ids,
|
|
position_ids=position_ids,
|
|
inputs_embeds=inputs_embeds,
|
|
use_cache=use_cache,
|
|
output_attentions=output_attentions,
|
|
output_hidden_states=output_hidden_states,
|
|
return_dict=return_dict,
|
|
)
|
|
hidden_states = transformer_outputs[0]
|
|
# average-pool the hidden states along the sequence dimension
|
|
pooled_hidden_states = hidden_states.mean(dim=1)
|
|
# project from (batch_size, hidden_size) to (batch_size, num_labels)
|
|
logits = self.score(pooled_hidden_states)
|
|
|
|
loss = None
|
|
if labels is not None:
|
|
loss = self.loss_function(labels, logits, self.config)
|
|
|
|
if not return_dict:
|
|
output = (logits,) + transformer_outputs[1:]
|
|
return ((loss,) + output) if loss is not None else output
|
|
|
|
return SequenceClassifierOutputWithPast(
|
|
loss=loss,
|
|
logits=logits,
|
|
past_key_values=transformer_outputs.past_key_values,
|
|
hidden_states=transformer_outputs.hidden_states,
|
|
attentions=transformer_outputs.attentions,
|
|
)
|
|
|
|
|
|
__all__ = [
|
|
"ImageGPTForCausalImageModeling",
|
|
"ImageGPTForImageClassification",
|
|
"ImageGPTModel",
|
|
"ImageGPTPreTrainedModel",
|
|
]
|