You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
1019 lines
38 KiB
1019 lines
38 KiB
# Copyright 2024 Google AI and The HuggingFace Team. All rights reserved.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
"""PyTorch Siglip model."""
|
|
|
|
from collections.abc import Callable
|
|
from dataclasses import dataclass
|
|
from typing import Any
|
|
|
|
import numpy as np
|
|
import torch
|
|
from torch import nn
|
|
|
|
from ... import initialization as init
|
|
from ...activations import ACT2FN
|
|
from ...modeling_attn_mask_utils import _prepare_4d_attention_mask
|
|
from ...modeling_layers import GradientCheckpointingLayer
|
|
from ...modeling_outputs import BaseModelOutput, BaseModelOutputWithPooling, ImageClassifierOutput
|
|
from ...modeling_utils import ALL_ATTENTION_FUNCTIONS, PreTrainedModel
|
|
from ...processing_utils import Unpack
|
|
from ...utils import (
|
|
ModelOutput,
|
|
TransformersKwargs,
|
|
auto_docstring,
|
|
can_return_tuple,
|
|
torch_int,
|
|
)
|
|
from ...utils.generic import check_model_inputs, is_flash_attention_requested
|
|
from .configuration_siglip import SiglipConfig, SiglipTextConfig, SiglipVisionConfig
|
|
|
|
|
|
@dataclass
|
|
@auto_docstring(
|
|
custom_intro="""
|
|
Base class for vision model's outputs that also contains image embeddings of the pooling of the last hidden states.
|
|
"""
|
|
)
|
|
# Copied from transformers.models.clip.modeling_clip.CLIPVisionModelOutput with CLIP->Siglip
|
|
class SiglipVisionModelOutput(ModelOutput):
|
|
r"""
|
|
image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
|
|
The image embeddings obtained by applying the projection layer to the pooler_output.
|
|
"""
|
|
|
|
image_embeds: torch.FloatTensor | None = None
|
|
last_hidden_state: torch.FloatTensor | None = None
|
|
hidden_states: tuple[torch.FloatTensor, ...] | None = None
|
|
attentions: tuple[torch.FloatTensor, ...] | None = None
|
|
|
|
|
|
@dataclass
|
|
@auto_docstring(
|
|
custom_intro="""
|
|
Base class for text model's outputs that also contains a pooling of the last hidden states.
|
|
"""
|
|
)
|
|
# Copied from transformers.models.clip.modeling_clip.CLIPTextModelOutput with CLIP->Siglip
|
|
class SiglipTextModelOutput(ModelOutput):
|
|
r"""
|
|
text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim)` *optional* returned when model is initialized with `with_projection=True`):
|
|
The text embeddings obtained by applying the projection layer to the pooler_output.
|
|
"""
|
|
|
|
text_embeds: torch.FloatTensor | None = None
|
|
last_hidden_state: torch.FloatTensor | None = None
|
|
hidden_states: tuple[torch.FloatTensor, ...] | None = None
|
|
attentions: tuple[torch.FloatTensor, ...] | None = None
|
|
|
|
|
|
@dataclass
|
|
@auto_docstring
|
|
# Copied from transformers.models.clip.modeling_clip.CLIPOutput with CLIP->Siglip
|
|
class SiglipOutput(ModelOutput):
|
|
r"""
|
|
loss (`torch.FloatTensor` of shape `(1,)`, *optional*, returned when `return_loss` is `True`):
|
|
Contrastive loss for image-text similarity.
|
|
logits_per_image (`torch.FloatTensor` of shape `(image_batch_size, text_batch_size)`):
|
|
The scaled dot product scores between `image_embeds` and `text_embeds`. This represents the image-text
|
|
similarity scores.
|
|
logits_per_text (`torch.FloatTensor` of shape `(text_batch_size, image_batch_size)`):
|
|
The scaled dot product scores between `text_embeds` and `image_embeds`. This represents the text-image
|
|
similarity scores.
|
|
text_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`):
|
|
The text embeddings obtained by applying the projection layer to the pooled output of [`SiglipTextModel`].
|
|
image_embeds (`torch.FloatTensor` of shape `(batch_size, output_dim`):
|
|
The image embeddings obtained by applying the projection layer to the pooled output of [`SiglipVisionModel`].
|
|
text_model_output (`BaseModelOutputWithPooling`):
|
|
The output of the [`SiglipTextModel`].
|
|
vision_model_output (`BaseModelOutputWithPooling`):
|
|
The output of the [`SiglipVisionModel`].
|
|
"""
|
|
|
|
loss: torch.FloatTensor | None = None
|
|
logits_per_image: torch.FloatTensor | None = None
|
|
logits_per_text: torch.FloatTensor | None = None
|
|
text_embeds: torch.FloatTensor | None = None
|
|
image_embeds: torch.FloatTensor | None = None
|
|
text_model_output: BaseModelOutputWithPooling = None
|
|
vision_model_output: BaseModelOutputWithPooling = None
|
|
|
|
def to_tuple(self) -> tuple[Any]:
|
|
return tuple(
|
|
self[k] if k not in ["text_model_output", "vision_model_output"] else getattr(self, k).to_tuple()
|
|
for k in self.keys()
|
|
)
|
|
|
|
|
|
class SiglipVisionEmbeddings(nn.Module):
|
|
def __init__(self, config: SiglipVisionConfig):
|
|
super().__init__()
|
|
self.config = config
|
|
self.embed_dim = config.hidden_size
|
|
self.image_size = config.image_size
|
|
self.patch_size = config.patch_size
|
|
|
|
self.patch_embedding = nn.Conv2d(
|
|
in_channels=config.num_channels,
|
|
out_channels=self.embed_dim,
|
|
kernel_size=self.patch_size,
|
|
stride=self.patch_size,
|
|
padding="valid",
|
|
)
|
|
|
|
self.num_patches = (self.image_size // self.patch_size) ** 2
|
|
self.num_positions = self.num_patches
|
|
self.position_embedding = nn.Embedding(self.num_positions, self.embed_dim)
|
|
self.register_buffer("position_ids", torch.arange(self.num_positions).expand((1, -1)), persistent=False)
|
|
|
|
def interpolate_pos_encoding(self, embeddings: torch.Tensor, height: int, width: int) -> torch.Tensor:
|
|
"""
|
|
This method allows to interpolate the pre-trained position encodings, to be able to use the model on higher resolution
|
|
images. This method is also adapted to support torch.jit tracing and no class embeddings.
|
|
|
|
Adapted from:
|
|
- https://github.com/facebookresearch/dino/blob/de9ee3df6cf39fac952ab558447af1fa1365362a/vision_transformer.py#L174-L194, and
|
|
- https://github.com/facebookresearch/dinov2/blob/e1277af2ba9496fbadf7aec6eba56e8d882d1e35/dinov2/models/vision_transformer.py#L179-L211
|
|
"""
|
|
|
|
num_patches = embeddings.shape[1]
|
|
num_positions = self.position_embedding.weight.shape[0]
|
|
|
|
# always interpolate when tracing to ensure the exported model works for dynamic input shapes
|
|
if not torch.jit.is_tracing() and num_patches == num_positions and height == width:
|
|
return self.position_embedding(self.position_ids)
|
|
|
|
patch_pos_embed = self.position_embedding.weight.unsqueeze(0)
|
|
|
|
dim = embeddings.shape[-1]
|
|
|
|
new_height = height // self.patch_size
|
|
new_width = width // self.patch_size
|
|
|
|
sqrt_num_positions = torch_int(num_positions**0.5)
|
|
patch_pos_embed = patch_pos_embed.reshape(1, sqrt_num_positions, sqrt_num_positions, dim)
|
|
patch_pos_embed = patch_pos_embed.permute(0, 3, 1, 2)
|
|
|
|
patch_pos_embed = nn.functional.interpolate(
|
|
patch_pos_embed,
|
|
size=(new_height, new_width),
|
|
mode="bicubic",
|
|
align_corners=False,
|
|
)
|
|
|
|
patch_pos_embed = patch_pos_embed.permute(0, 2, 3, 1).view(1, -1, dim)
|
|
return patch_pos_embed
|
|
|
|
def forward(self, pixel_values: torch.FloatTensor, interpolate_pos_encoding=False) -> torch.Tensor:
|
|
_, _, height, width = pixel_values.shape
|
|
target_dtype = self.patch_embedding.weight.dtype
|
|
patch_embeds = self.patch_embedding(pixel_values.to(dtype=target_dtype)) # shape = [*, width, grid, grid]
|
|
embeddings = patch_embeds.flatten(2).transpose(1, 2)
|
|
|
|
if interpolate_pos_encoding:
|
|
embeddings = embeddings + self.interpolate_pos_encoding(embeddings, height, width)
|
|
else:
|
|
embeddings = embeddings + self.position_embedding(self.position_ids)
|
|
return embeddings
|
|
|
|
|
|
# Copied from transformers.models.clip.modeling_clip.CLIPTextEmbeddings with CLIP->Siglip
|
|
class SiglipTextEmbeddings(nn.Module):
|
|
def __init__(self, config: SiglipTextConfig):
|
|
super().__init__()
|
|
embed_dim = config.hidden_size
|
|
|
|
self.token_embedding = nn.Embedding(config.vocab_size, embed_dim)
|
|
self.position_embedding = nn.Embedding(config.max_position_embeddings, embed_dim)
|
|
|
|
# position_ids (1, len position emb) is contiguous in memory and exported when serialized
|
|
self.register_buffer(
|
|
"position_ids", torch.arange(config.max_position_embeddings).expand((1, -1)), persistent=False
|
|
)
|
|
|
|
def forward(
|
|
self,
|
|
input_ids: torch.LongTensor | None = None,
|
|
position_ids: torch.LongTensor | None = None,
|
|
inputs_embeds: torch.FloatTensor | None = None,
|
|
) -> torch.Tensor:
|
|
seq_length = input_ids.shape[-1] if input_ids is not None else inputs_embeds.shape[-2]
|
|
max_position_embedding = self.position_embedding.weight.shape[0]
|
|
|
|
if seq_length > max_position_embedding:
|
|
raise ValueError(
|
|
f"Sequence length must be less than max_position_embeddings (got `sequence length`: "
|
|
f"{seq_length} and max_position_embeddings: {max_position_embedding}"
|
|
)
|
|
|
|
if position_ids is None:
|
|
position_ids = self.position_ids[:, :seq_length]
|
|
|
|
if inputs_embeds is None:
|
|
inputs_embeds = self.token_embedding(input_ids)
|
|
|
|
position_embeddings = self.position_embedding(position_ids)
|
|
embeddings = inputs_embeds + position_embeddings
|
|
|
|
return embeddings
|
|
|
|
|
|
def eager_attention_forward(
|
|
module: nn.Module,
|
|
query: torch.Tensor,
|
|
key: torch.Tensor,
|
|
value: torch.Tensor,
|
|
attention_mask: torch.Tensor | None,
|
|
scaling: float,
|
|
dropout: float = 0.0,
|
|
**kwargs,
|
|
):
|
|
attn_weights = torch.matmul(query, key.transpose(-1, -2)) * scaling
|
|
if attention_mask is not None:
|
|
attn_weights = attn_weights + attention_mask
|
|
|
|
attn_weights = nn.functional.softmax(attn_weights, dim=-1, dtype=torch.float32).to(query.dtype)
|
|
attn_weights = nn.functional.dropout(attn_weights, p=dropout, training=module.training)
|
|
|
|
attn_output = torch.matmul(attn_weights, value)
|
|
attn_output = attn_output.transpose(1, 2).contiguous()
|
|
|
|
return attn_output, attn_weights
|
|
|
|
|
|
class SiglipAttention(nn.Module):
|
|
"""Multi-headed attention from 'Attention Is All You Need' paper"""
|
|
|
|
def __init__(self, config):
|
|
super().__init__()
|
|
self.config = config
|
|
self.embed_dim = config.hidden_size
|
|
self.num_heads = config.num_attention_heads
|
|
self.head_dim = self.embed_dim // self.num_heads
|
|
if self.head_dim * self.num_heads != self.embed_dim:
|
|
raise ValueError(
|
|
f"embed_dim must be divisible by num_heads (got `embed_dim`: {self.embed_dim} and `num_heads`:"
|
|
f" {self.num_heads})."
|
|
)
|
|
self.scale = self.head_dim**-0.5
|
|
self.dropout = config.attention_dropout
|
|
self.is_causal = False
|
|
|
|
self.k_proj = nn.Linear(self.embed_dim, self.embed_dim)
|
|
self.v_proj = nn.Linear(self.embed_dim, self.embed_dim)
|
|
self.q_proj = nn.Linear(self.embed_dim, self.embed_dim)
|
|
self.out_proj = nn.Linear(self.embed_dim, self.embed_dim)
|
|
|
|
def forward(
|
|
self,
|
|
hidden_states: torch.Tensor,
|
|
attention_mask: torch.Tensor | None = None,
|
|
**kwargs,
|
|
) -> tuple[torch.Tensor, torch.Tensor | None]:
|
|
"""Input shape: Batch x Time x Channel"""
|
|
|
|
batch_size, seq_length, embed_dim = hidden_states.shape
|
|
|
|
queries = self.q_proj(hidden_states)
|
|
keys = self.k_proj(hidden_states)
|
|
values = self.v_proj(hidden_states)
|
|
|
|
queries = queries.view(batch_size, seq_length, self.num_heads, self.head_dim).transpose(1, 2)
|
|
keys = keys.view(batch_size, seq_length, self.num_heads, self.head_dim).transpose(1, 2)
|
|
values = values.view(batch_size, seq_length, self.num_heads, self.head_dim).transpose(1, 2)
|
|
|
|
attention_interface: Callable = ALL_ATTENTION_FUNCTIONS.get_interface(
|
|
self.config._attn_implementation, eager_attention_forward
|
|
)
|
|
|
|
attn_output, attn_weights = attention_interface(
|
|
self,
|
|
queries,
|
|
keys,
|
|
values,
|
|
attention_mask,
|
|
is_causal=self.is_causal,
|
|
scaling=self.scale,
|
|
dropout=0.0 if not self.training else self.dropout,
|
|
)
|
|
|
|
attn_output = attn_output.reshape(batch_size, seq_length, embed_dim).contiguous()
|
|
attn_output = self.out_proj(attn_output)
|
|
|
|
return attn_output, attn_weights
|
|
|
|
|
|
# Copied from transformers.models.clip.modeling_clip.CLIPMLP with CLIP->Siglip
|
|
class SiglipMLP(nn.Module):
|
|
def __init__(self, config):
|
|
super().__init__()
|
|
self.config = config
|
|
self.activation_fn = ACT2FN[config.hidden_act]
|
|
self.fc1 = nn.Linear(config.hidden_size, config.intermediate_size)
|
|
self.fc2 = nn.Linear(config.intermediate_size, config.hidden_size)
|
|
|
|
def forward(self, hidden_states: torch.Tensor) -> torch.Tensor:
|
|
hidden_states = self.fc1(hidden_states)
|
|
hidden_states = self.activation_fn(hidden_states)
|
|
hidden_states = self.fc2(hidden_states)
|
|
return hidden_states
|
|
|
|
|
|
class SiglipEncoderLayer(GradientCheckpointingLayer):
|
|
def __init__(self, config: SiglipVisionConfig | SiglipTextConfig):
|
|
super().__init__()
|
|
self.embed_dim = config.hidden_size
|
|
self.layer_norm1 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
|
|
self.self_attn = SiglipAttention(config)
|
|
self.layer_norm2 = nn.LayerNorm(self.embed_dim, eps=config.layer_norm_eps)
|
|
self.mlp = SiglipMLP(config)
|
|
|
|
@auto_docstring
|
|
def forward(
|
|
self,
|
|
hidden_states: torch.Tensor,
|
|
attention_mask: torch.Tensor,
|
|
**kwargs: Unpack[TransformersKwargs],
|
|
) -> torch.FloatTensor:
|
|
residual = hidden_states
|
|
|
|
hidden_states = self.layer_norm1(hidden_states)
|
|
hidden_states, _ = self.self_attn(
|
|
hidden_states=hidden_states,
|
|
attention_mask=attention_mask,
|
|
**kwargs,
|
|
)
|
|
hidden_states = residual + hidden_states
|
|
|
|
residual = hidden_states
|
|
hidden_states = self.layer_norm2(hidden_states)
|
|
hidden_states = self.mlp(hidden_states)
|
|
hidden_states = residual + hidden_states
|
|
|
|
return hidden_states
|
|
|
|
|
|
@auto_docstring
|
|
class SiglipPreTrainedModel(PreTrainedModel):
|
|
config: SiglipConfig
|
|
base_model_prefix = "siglip"
|
|
input_modalities = ("image", "text")
|
|
supports_gradient_checkpointing = True
|
|
|
|
_no_split_modules = [
|
|
"SiglipTextEmbeddings",
|
|
"SiglipVisionEmbeddings",
|
|
"SiglipEncoderLayer",
|
|
"SiglipMultiheadAttentionPoolingHead",
|
|
]
|
|
_supports_flash_attn = True
|
|
_supports_sdpa = True
|
|
_supports_flex_attn = True
|
|
_supports_attention_backend = True
|
|
|
|
_can_record_outputs = {
|
|
"hidden_states": SiglipEncoderLayer,
|
|
"attentions": SiglipAttention,
|
|
}
|
|
|
|
@torch.no_grad()
|
|
def _init_weights(self, module):
|
|
"""Initialize the weights"""
|
|
if isinstance(module, SiglipVisionEmbeddings):
|
|
width = (
|
|
self.config.vision_config.hidden_size
|
|
if isinstance(self.config, SiglipConfig)
|
|
else self.config.hidden_size
|
|
)
|
|
init.normal_(module.position_embedding.weight, std=1 / np.sqrt(width))
|
|
if hasattr(module, "position_ids"):
|
|
init.copy_(module.position_ids, torch.arange(module.position_ids.shape[-1]).expand((1, -1)))
|
|
elif isinstance(module, nn.Embedding):
|
|
init.default_flax_embed_init_(module.weight)
|
|
elif isinstance(module, SiglipAttention):
|
|
init.xavier_uniform_(module.q_proj.weight)
|
|
init.xavier_uniform_(module.k_proj.weight)
|
|
init.xavier_uniform_(module.v_proj.weight)
|
|
init.xavier_uniform_(module.out_proj.weight)
|
|
init.zeros_(module.q_proj.bias)
|
|
init.zeros_(module.k_proj.bias)
|
|
init.zeros_(module.v_proj.bias)
|
|
init.zeros_(module.out_proj.bias)
|
|
elif isinstance(module, SiglipMLP):
|
|
init.xavier_uniform_(module.fc1.weight)
|
|
init.xavier_uniform_(module.fc2.weight)
|
|
init.normal_(module.fc1.bias, std=1e-6)
|
|
init.normal_(module.fc2.bias, std=1e-6)
|
|
elif isinstance(module, SiglipMultiheadAttentionPoolingHead):
|
|
init.xavier_uniform_(module.probe)
|
|
init.xavier_uniform_(module.attention.in_proj_weight)
|
|
init.zeros_(module.attention.in_proj_bias)
|
|
elif isinstance(module, SiglipModel):
|
|
init.zeros_(module.logit_scale)
|
|
init.zeros_(module.logit_bias)
|
|
elif isinstance(module, SiglipForImageClassification):
|
|
init.normal_(
|
|
module.classifier.weight,
|
|
std=self.config.vision_config.hidden_size**-0.5 * self.config.initializer_factor,
|
|
)
|
|
elif isinstance(module, (nn.Linear, nn.Conv2d)):
|
|
init.lecun_normal_(module.weight)
|
|
if module.bias is not None:
|
|
init.zeros_(module.bias)
|
|
elif isinstance(module, nn.LayerNorm):
|
|
init.zeros_(module.bias)
|
|
init.ones_(module.weight)
|
|
elif isinstance(module, SiglipTextEmbeddings):
|
|
init.copy_(module.position_ids, torch.arange(module.position_ids.shape[-1]).expand((1, -1)))
|
|
|
|
|
|
# Copied from transformers.models.altclip.modeling_altclip.AltCLIPEncoder with AltCLIP->Siglip
|
|
class SiglipEncoder(nn.Module):
|
|
"""
|
|
Transformer encoder consisting of `config.num_hidden_layers` self attention layers. Each layer is a
|
|
[`SiglipEncoderLayer`].
|
|
|
|
Args:
|
|
config: SiglipConfig
|
|
"""
|
|
|
|
def __init__(self, config: SiglipConfig):
|
|
super().__init__()
|
|
self.config = config
|
|
self.layers = nn.ModuleList([SiglipEncoderLayer(config) for _ in range(config.num_hidden_layers)])
|
|
self.gradient_checkpointing = False
|
|
|
|
# Ignore copy
|
|
@auto_docstring
|
|
def forward(
|
|
self,
|
|
inputs_embeds,
|
|
attention_mask: torch.Tensor | None = None,
|
|
**kwargs: Unpack[TransformersKwargs],
|
|
) -> BaseModelOutput:
|
|
hidden_states = inputs_embeds
|
|
for encoder_layer in self.layers:
|
|
hidden_states = encoder_layer(
|
|
hidden_states,
|
|
attention_mask,
|
|
**kwargs,
|
|
)
|
|
|
|
return BaseModelOutput(last_hidden_state=hidden_states)
|
|
|
|
|
|
class SiglipTextTransformer(SiglipPreTrainedModel):
|
|
_input_embed_layer = "token_embedding"
|
|
|
|
def __init__(self, config: SiglipTextConfig):
|
|
super().__init__(config)
|
|
self.config = config
|
|
embed_dim = config.hidden_size
|
|
self.embeddings = SiglipTextEmbeddings(config)
|
|
self.encoder = SiglipEncoder(config)
|
|
self.final_layer_norm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
|
|
|
|
self.head = nn.Linear(embed_dim, config.projection_size)
|
|
self.post_init()
|
|
|
|
@can_return_tuple
|
|
@auto_docstring
|
|
def forward(
|
|
self,
|
|
input_ids: torch.Tensor | None = None,
|
|
attention_mask: torch.Tensor | None = None,
|
|
position_ids: torch.Tensor | None = None,
|
|
**kwargs: Unpack[TransformersKwargs],
|
|
) -> BaseModelOutputWithPooling:
|
|
if input_ids is None:
|
|
raise ValueError("You have to specify input_ids")
|
|
|
|
input_shape = input_ids.size()
|
|
input_ids = input_ids.view(-1, input_shape[-1])
|
|
|
|
hidden_states = self.embeddings(input_ids=input_ids, position_ids=position_ids)
|
|
|
|
# note: SigLIP's text model does not use a causal mask, unlike the original CLIP model.
|
|
# expand attention_mask
|
|
uses_flash_attention = is_flash_attention_requested(self.config)
|
|
if uses_flash_attention:
|
|
attention_mask = None
|
|
elif attention_mask is not None and not uses_flash_attention:
|
|
# [batch_size, seq_len] -> [batch_size, 1, tgt_seq_len, src_seq_len]
|
|
attention_mask = _prepare_4d_attention_mask(attention_mask, hidden_states.dtype)
|
|
|
|
encoder_outputs: BaseModelOutput = self.encoder(
|
|
inputs_embeds=hidden_states,
|
|
attention_mask=attention_mask,
|
|
**kwargs,
|
|
)
|
|
|
|
last_hidden_state = encoder_outputs.last_hidden_state
|
|
last_hidden_state = self.final_layer_norm(last_hidden_state)
|
|
|
|
# The model uses the last token's hidden state, which may be padding.
|
|
pooled_output = last_hidden_state[:, -1, :]
|
|
pooled_output = self.head(pooled_output)
|
|
|
|
return BaseModelOutputWithPooling(
|
|
last_hidden_state=last_hidden_state,
|
|
pooler_output=pooled_output,
|
|
)
|
|
|
|
|
|
@auto_docstring(
|
|
custom_intro="""
|
|
The text model from SigLIP without any head or projection on top.
|
|
"""
|
|
)
|
|
class SiglipTextModel(SiglipPreTrainedModel):
|
|
config: SiglipTextConfig
|
|
input_modalities = ("text",)
|
|
|
|
def __init__(self, config: SiglipTextConfig):
|
|
super().__init__(config)
|
|
self.text_model = SiglipTextTransformer(config)
|
|
# Initialize weights and apply final processing
|
|
self.post_init()
|
|
|
|
def get_input_embeddings(self) -> nn.Module:
|
|
return self.text_model.embeddings.token_embedding
|
|
|
|
def set_input_embeddings(self, value):
|
|
self.text_model.embeddings.token_embedding = value
|
|
|
|
@check_model_inputs(tie_last_hidden_states=False)
|
|
@auto_docstring
|
|
def forward(
|
|
self,
|
|
input_ids: torch.Tensor | None = None,
|
|
attention_mask: torch.Tensor | None = None,
|
|
position_ids: torch.Tensor | None = None,
|
|
**kwargs: Unpack[TransformersKwargs],
|
|
) -> BaseModelOutputWithPooling:
|
|
r"""
|
|
Examples:
|
|
|
|
```python
|
|
>>> from transformers import AutoTokenizer, SiglipTextModel
|
|
|
|
>>> model = SiglipTextModel.from_pretrained("google/siglip-base-patch16-224")
|
|
>>> tokenizer = AutoTokenizer.from_pretrained("google/siglip-base-patch16-224")
|
|
|
|
>>> # important: make sure to set padding="max_length" as that's how the model was trained
|
|
>>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding="max_length", return_tensors="pt")
|
|
|
|
>>> outputs = model(**inputs)
|
|
>>> last_hidden_state = outputs.last_hidden_state
|
|
>>> pooled_output = outputs.pooler_output # pooled (EOS token) states
|
|
```"""
|
|
|
|
return self.text_model(
|
|
input_ids=input_ids,
|
|
attention_mask=attention_mask,
|
|
position_ids=position_ids,
|
|
**kwargs,
|
|
)
|
|
|
|
|
|
class SiglipVisionTransformer(SiglipPreTrainedModel):
|
|
_input_embed_layer = "patch_embedding"
|
|
|
|
def __init__(self, config: SiglipVisionConfig):
|
|
super().__init__(config)
|
|
self.config = config
|
|
embed_dim = config.hidden_size
|
|
|
|
self.embeddings = SiglipVisionEmbeddings(config)
|
|
self.encoder = SiglipEncoder(config)
|
|
self.post_layernorm = nn.LayerNorm(embed_dim, eps=config.layer_norm_eps)
|
|
self.use_head = True if not hasattr(config, "vision_use_head") else config.vision_use_head
|
|
if self.use_head:
|
|
self.head = SiglipMultiheadAttentionPoolingHead(config)
|
|
|
|
self.post_init()
|
|
|
|
@auto_docstring
|
|
def forward(
|
|
self,
|
|
pixel_values,
|
|
interpolate_pos_encoding: bool | None = False,
|
|
**kwargs: Unpack[TransformersKwargs],
|
|
) -> BaseModelOutputWithPooling:
|
|
hidden_states = self.embeddings(pixel_values, interpolate_pos_encoding=interpolate_pos_encoding)
|
|
|
|
encoder_outputs: BaseModelOutput = self.encoder(
|
|
inputs_embeds=hidden_states,
|
|
**kwargs,
|
|
)
|
|
|
|
last_hidden_state = encoder_outputs.last_hidden_state
|
|
last_hidden_state = self.post_layernorm(last_hidden_state)
|
|
|
|
pooler_output = self.head(last_hidden_state) if self.use_head else None
|
|
|
|
return BaseModelOutputWithPooling(
|
|
last_hidden_state=last_hidden_state,
|
|
pooler_output=pooler_output,
|
|
)
|
|
|
|
|
|
class SiglipMultiheadAttentionPoolingHead(nn.Module):
|
|
"""Multihead Attention Pooling."""
|
|
|
|
def __init__(self, config: SiglipVisionConfig):
|
|
super().__init__()
|
|
|
|
self.probe = nn.Parameter(torch.randn(1, 1, config.hidden_size))
|
|
self.attention = torch.nn.MultiheadAttention(config.hidden_size, config.num_attention_heads, batch_first=True)
|
|
self.layernorm = nn.LayerNorm(config.hidden_size, eps=config.layer_norm_eps)
|
|
self.mlp = SiglipMLP(config)
|
|
|
|
def forward(self, hidden_state):
|
|
batch_size = hidden_state.shape[0]
|
|
probe = self.probe.repeat(batch_size, 1, 1)
|
|
|
|
hidden_state = self.attention(probe, hidden_state, hidden_state)[0]
|
|
|
|
residual = hidden_state
|
|
hidden_state = self.layernorm(hidden_state)
|
|
hidden_state = residual + self.mlp(hidden_state)
|
|
|
|
return hidden_state[:, 0]
|
|
|
|
|
|
@auto_docstring(
|
|
custom_intro="""
|
|
The vision model from SigLIP without any head or projection on top.
|
|
"""
|
|
)
|
|
class SiglipVisionModel(SiglipPreTrainedModel):
|
|
config: SiglipVisionConfig
|
|
main_input_name = "pixel_values"
|
|
input_modalities = ("image",)
|
|
|
|
def __init__(self, config: SiglipVisionConfig):
|
|
super().__init__(config)
|
|
|
|
self.vision_model = SiglipVisionTransformer(config)
|
|
|
|
# Initialize weights and apply final processing
|
|
self.post_init()
|
|
|
|
def get_input_embeddings(self) -> nn.Module:
|
|
return self.vision_model.embeddings.patch_embedding
|
|
|
|
@check_model_inputs(tie_last_hidden_states=False)
|
|
@auto_docstring
|
|
def forward(
|
|
self,
|
|
pixel_values,
|
|
interpolate_pos_encoding: bool = False,
|
|
**kwargs: Unpack[TransformersKwargs],
|
|
) -> BaseModelOutputWithPooling:
|
|
r"""
|
|
Examples:
|
|
|
|
```python
|
|
>>> from PIL import Image
|
|
>>> import httpx
|
|
>>> from io import BytesIO
|
|
>>> from transformers import AutoProcessor, SiglipVisionModel
|
|
|
|
>>> model = SiglipVisionModel.from_pretrained("google/siglip-base-patch16-224")
|
|
>>> processor = AutoProcessor.from_pretrained("google/siglip-base-patch16-224")
|
|
|
|
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
|
>>> with httpx.stream("GET", url) as response:
|
|
... image = Image.open(BytesIO(response.read()))
|
|
|
|
>>> inputs = processor(images=image, return_tensors="pt")
|
|
|
|
>>> outputs = model(**inputs)
|
|
>>> last_hidden_state = outputs.last_hidden_state
|
|
>>> pooled_output = outputs.pooler_output # pooled features
|
|
```"""
|
|
|
|
return self.vision_model(
|
|
pixel_values=pixel_values,
|
|
interpolate_pos_encoding=interpolate_pos_encoding,
|
|
**kwargs,
|
|
)
|
|
|
|
|
|
@auto_docstring
|
|
class SiglipModel(SiglipPreTrainedModel):
|
|
config: SiglipConfig
|
|
|
|
def __init__(self, config: SiglipConfig):
|
|
super().__init__(config)
|
|
|
|
if not isinstance(config.text_config, SiglipTextConfig):
|
|
raise TypeError(
|
|
"config.text_config is expected to be of type SiglipTextConfig but is of type"
|
|
f" {type(config.text_config)}."
|
|
)
|
|
|
|
if not isinstance(config.vision_config, SiglipVisionConfig):
|
|
raise TypeError(
|
|
"config.vision_config is expected to be of type SiglipVisionConfig but is of type"
|
|
f" {type(config.vision_config)}."
|
|
)
|
|
|
|
text_config = config.text_config
|
|
vision_config = config.vision_config
|
|
|
|
# First, initialize the text and vision models with proper attention implementation
|
|
text_model = SiglipTextModel._from_config(text_config)
|
|
vision_model = SiglipVisionModel._from_config(vision_config)
|
|
|
|
# Second, get the text and vision submodules (for backward compatibility)
|
|
self.text_model = text_model.text_model
|
|
self.vision_model = vision_model.vision_model
|
|
|
|
self.logit_scale = nn.Parameter(torch.randn(1))
|
|
self.logit_bias = nn.Parameter(torch.randn(1))
|
|
|
|
# Initialize weights and apply final processing
|
|
self.post_init()
|
|
|
|
def get_input_embeddings(self) -> nn.Module:
|
|
return self.text_model.embeddings.token_embedding
|
|
|
|
def set_input_embeddings(self, value: nn.Module):
|
|
self.text_model.embeddings.token_embedding = value
|
|
|
|
@can_return_tuple
|
|
@auto_docstring
|
|
def get_text_features(
|
|
self,
|
|
input_ids: torch.Tensor,
|
|
attention_mask: torch.Tensor | None = None,
|
|
position_ids: torch.Tensor | None = None,
|
|
**kwargs: Unpack[TransformersKwargs],
|
|
) -> tuple | BaseModelOutputWithPooling:
|
|
r"""
|
|
Examples:
|
|
|
|
```python
|
|
>>> from transformers import AutoTokenizer, AutoModel
|
|
>>> import torch
|
|
|
|
>>> model = AutoModel.from_pretrained("google/siglip-base-patch16-224")
|
|
>>> tokenizer = AutoTokenizer.from_pretrained("google/siglip-base-patch16-224")
|
|
|
|
>>> # important: make sure to set padding="max_length" as that's how the model was trained
|
|
>>> inputs = tokenizer(["a photo of a cat", "a photo of a dog"], padding="max_length", return_tensors="pt")
|
|
>>> with torch.no_grad():
|
|
... text_features = model.get_text_features(**inputs)
|
|
```"""
|
|
return self.text_model(
|
|
input_ids=input_ids,
|
|
attention_mask=attention_mask,
|
|
position_ids=position_ids,
|
|
**kwargs,
|
|
)
|
|
|
|
@can_return_tuple
|
|
@auto_docstring
|
|
def get_image_features(
|
|
self,
|
|
pixel_values: torch.FloatTensor,
|
|
interpolate_pos_encoding: bool = False,
|
|
**kwargs: Unpack[TransformersKwargs],
|
|
) -> tuple | BaseModelOutputWithPooling:
|
|
r"""
|
|
Examples:
|
|
|
|
```python
|
|
>>> import torch
|
|
>>> from transformers import AutoProcessor, AutoModel
|
|
>>> from transformers.image_utils import load_image
|
|
|
|
>>> model = AutoModel.from_pretrained("google/siglip-base-patch16-224")
|
|
>>> processor = AutoProcessor.from_pretrained("google/siglip-base-patch16-224")
|
|
|
|
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
|
>>> image = load_image(url)
|
|
|
|
>>> inputs = processor(images=image, return_tensors="pt")
|
|
|
|
>>> with torch.no_grad():
|
|
... image_features = model.get_image_features(**inputs)
|
|
```"""
|
|
return self.vision_model(
|
|
pixel_values=pixel_values,
|
|
interpolate_pos_encoding=interpolate_pos_encoding,
|
|
**kwargs,
|
|
)
|
|
|
|
# NOTE: SiglipModel uses Pretrained backbones, so we don't need to add `check_model_inputs` here
|
|
@can_return_tuple
|
|
@auto_docstring
|
|
def forward(
|
|
self,
|
|
input_ids: torch.LongTensor | None = None,
|
|
pixel_values: torch.FloatTensor | None = None,
|
|
attention_mask: torch.Tensor | None = None,
|
|
position_ids: torch.LongTensor | None = None,
|
|
return_loss: bool | None = None,
|
|
interpolate_pos_encoding: bool = False,
|
|
**kwargs: Unpack[TransformersKwargs],
|
|
) -> SiglipOutput:
|
|
r"""
|
|
return_loss (`bool`, *optional*):
|
|
Whether or not to return the contrastive loss.
|
|
|
|
Examples:
|
|
|
|
```python
|
|
>>> from PIL import Image
|
|
>>> import httpx
|
|
>>> from io import BytesIO
|
|
>>> from transformers import AutoProcessor, AutoModel
|
|
>>> import torch
|
|
|
|
>>> model = AutoModel.from_pretrained("google/siglip-base-patch16-224")
|
|
>>> processor = AutoProcessor.from_pretrained("google/siglip-base-patch16-224")
|
|
|
|
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
|
>>> with httpx.stream("GET", url) as response:
|
|
... image = Image.open(BytesIO(response.read()))
|
|
|
|
>>> texts = ["a photo of 2 cats", "a photo of 2 dogs"]
|
|
>>> # important: we pass `padding=max_length` since the model was trained with this
|
|
>>> inputs = processor(text=texts, images=image, padding="max_length", return_tensors="pt")
|
|
|
|
>>> with torch.no_grad():
|
|
... outputs = model(**inputs)
|
|
|
|
>>> logits_per_image = outputs.logits_per_image
|
|
>>> probs = torch.sigmoid(logits_per_image) # these are the probabilities
|
|
>>> print(f"{probs[0][0]:.1%} that image 0 is '{texts[0]}'")
|
|
31.9% that image 0 is 'a photo of 2 cats'
|
|
```"""
|
|
vision_outputs: BaseModelOutputWithPooling = self.vision_model(
|
|
pixel_values=pixel_values,
|
|
interpolate_pos_encoding=interpolate_pos_encoding,
|
|
**kwargs,
|
|
)
|
|
|
|
text_outputs: BaseModelOutputWithPooling = self.text_model(
|
|
input_ids=input_ids,
|
|
attention_mask=attention_mask,
|
|
position_ids=position_ids,
|
|
**kwargs,
|
|
)
|
|
|
|
image_embeds = vision_outputs.pooler_output
|
|
text_embeds = text_outputs.pooler_output
|
|
|
|
# normalized features
|
|
image_embeds = image_embeds / image_embeds.norm(p=2, dim=-1, keepdim=True)
|
|
text_embeds = text_embeds / text_embeds.norm(p=2, dim=-1, keepdim=True)
|
|
|
|
# cosine similarity as logits
|
|
logits_per_text = torch.matmul(text_embeds, image_embeds.t().to(text_embeds.device))
|
|
|
|
logit_scale, logit_bias = self.logit_scale.to(text_embeds.device), self.logit_bias.to(text_embeds.device)
|
|
logits_per_text = logits_per_text * logit_scale.exp() + logit_bias
|
|
|
|
logits_per_image = logits_per_text.t()
|
|
|
|
loss = None
|
|
if return_loss:
|
|
# Adapted from https://github.com/google-research/big_vision/blob/01edb81a4716f93a48be43b3a4af14e29cdb3a7f/big_vision/trainers/proj/image_text/siglip.py#L287
|
|
eye = torch.eye(logits_per_text.size(0), device=logits_per_text.device)
|
|
m1_diag1 = -torch.ones_like(logits_per_text) + 2 * eye
|
|
loglik = torch.nn.functional.logsigmoid(m1_diag1 * logits_per_text)
|
|
nll = -torch.sum(loglik, dim=-1)
|
|
loss = nll.mean()
|
|
|
|
return SiglipOutput(
|
|
loss=loss,
|
|
logits_per_image=logits_per_image,
|
|
logits_per_text=logits_per_text,
|
|
text_embeds=text_embeds,
|
|
image_embeds=image_embeds,
|
|
text_model_output=text_outputs,
|
|
vision_model_output=vision_outputs,
|
|
)
|
|
|
|
|
|
@auto_docstring(
|
|
custom_intro="""
|
|
SigLIP vision encoder with an image classification head on top (a linear layer on top of the pooled final hidden states of
|
|
the patch tokens) e.g. for ImageNet.
|
|
"""
|
|
)
|
|
class SiglipForImageClassification(SiglipPreTrainedModel):
|
|
main_input_name = "pixel_values"
|
|
input_modalities = ("image",)
|
|
|
|
def __init__(self, config: SiglipConfig) -> None:
|
|
super().__init__(config)
|
|
|
|
self.num_labels = config.num_labels
|
|
|
|
# Create the vision model with proper attention
|
|
# and take only vision_model submodule (for backward compatibility)
|
|
vision_model = SiglipVisionModel._from_config(config.vision_config)
|
|
self.vision_model = vision_model.vision_model
|
|
|
|
# Classifier head
|
|
self.classifier = (
|
|
nn.Linear(config.vision_config.hidden_size, config.num_labels) if config.num_labels > 0 else nn.Identity()
|
|
)
|
|
|
|
# Initialize weights and apply final processing
|
|
self.post_init()
|
|
|
|
def get_input_embeddings(self) -> nn.Module:
|
|
return self.vision_model.embeddings.patch_embedding
|
|
|
|
def set_input_embeddings(self, value: nn.Module):
|
|
self.vision_model.embeddings.patch_embedding = value
|
|
|
|
@check_model_inputs
|
|
@auto_docstring
|
|
def forward(
|
|
self,
|
|
pixel_values: torch.Tensor | None = None,
|
|
labels: torch.Tensor | None = None,
|
|
interpolate_pos_encoding: bool = False,
|
|
**kwargs: Unpack[TransformersKwargs],
|
|
) -> ImageClassifierOutput:
|
|
r"""
|
|
labels (`torch.LongTensor` of shape `(batch_size,)`, *optional*):
|
|
Labels for computing the image classification/regression loss. Indices should be in `[0, ...,
|
|
config.num_labels - 1]`. If `config.num_labels == 1` a regression loss is computed (Mean-Square loss), If
|
|
`config.num_labels > 1` a classification loss is computed (Cross-Entropy).
|
|
|
|
Examples:
|
|
|
|
```python
|
|
>>> from transformers import AutoImageProcessor, SiglipForImageClassification
|
|
>>> import torch
|
|
>>> from PIL import Image
|
|
>>> import httpx
|
|
>>> from io import BytesIO
|
|
|
|
>>> torch.manual_seed(3) # doctest: +IGNORE_RESULT
|
|
>>> url = "http://images.cocodataset.org/val2017/000000039769.jpg"
|
|
>>> with httpx.stream("GET", url) as response:
|
|
... image = Image.open(BytesIO(response.read()))
|
|
|
|
>>> # note: we are loading a `SiglipModel` from the hub here,
|
|
>>> # so the head will be randomly initialized, hence the predictions will be random if seed is not set above.
|
|
>>> image_processor = AutoImageProcessor.from_pretrained("google/siglip-base-patch16-224")
|
|
>>> model = SiglipForImageClassification.from_pretrained("google/siglip-base-patch16-224")
|
|
|
|
>>> inputs = image_processor(images=image, return_tensors="pt")
|
|
>>> outputs = model(**inputs)
|
|
>>> logits = outputs.logits
|
|
>>> # model predicts one of the two classes
|
|
>>> predicted_class_idx = logits.argmax(-1).item()
|
|
>>> print("Predicted class:", model.config.id2label[predicted_class_idx])
|
|
Predicted class: LABEL_1
|
|
```"""
|
|
outputs: BaseModelOutputWithPooling = self.vision_model(
|
|
pixel_values,
|
|
interpolate_pos_encoding=interpolate_pos_encoding,
|
|
**kwargs,
|
|
)
|
|
|
|
sequence_output = outputs.last_hidden_state
|
|
|
|
# average pool the patch tokens
|
|
sequence_output = torch.mean(sequence_output, dim=1)
|
|
# apply classifier
|
|
logits = self.classifier(sequence_output)
|
|
|
|
loss = None
|
|
if labels is not None:
|
|
loss = self.loss_function(labels, logits, self.config)
|
|
|
|
return ImageClassifierOutput(
|
|
loss=loss,
|
|
logits=logits,
|
|
)
|
|
|
|
|
|
__all__ = [
|
|
"SiglipModel",
|
|
"SiglipPreTrainedModel",
|
|
"SiglipTextModel",
|
|
"SiglipVisionModel",
|
|
"SiglipForImageClassification",
|
|
]
|