# Copyright 2025 HuggingFace Inc. team. All rights reserved. # # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. import torch from torch import nn from ...activations import ACT2FN from ...cache_utils import Cache from ...modeling_outputs import BaseModelOutputWithPooling from ...processing_utils import Unpack from ...utils import auto_docstring, logging from ...utils.generic import check_model_inputs from ..llava.modeling_llava import ( LlavaCausalLMOutputWithPast, LlavaForConditionalGeneration, LlavaModel, LlavaModelOutputWithPast, LlavaPreTrainedModel, TransformersKwargs, ) from ..mistral.modeling_mistral import MistralRMSNorm from .configuration_mistral3 import Mistral3Config logger = logging.get_logger(__name__) class Mistral3RMSNorm(MistralRMSNorm): pass class Mistral3PatchMerger(nn.Module): """ Learned merging of spatial_merge_size ** 2 patches """ def __init__(self, config: Mistral3Config): super().__init__() self.config = config hidden_size = config.vision_config.hidden_size self.spatial_merge_size = config.spatial_merge_size self.patch_size = self.config.vision_config.patch_size self.merging_layer = nn.Linear(hidden_size * self.spatial_merge_size**2, hidden_size, bias=False) def forward(self, image_features: torch.Tensor, image_sizes: torch.Tensor) -> torch.Tensor: image_sizes = [ (image_size[0] // self.patch_size, image_size[1] // self.patch_size) for image_size in image_sizes ] tokens_per_image = [h * w for h, w in image_sizes] d = image_features.shape[-1] permuted_tensor = [] for image_index, image_tokens in enumerate(image_features.split(tokens_per_image)): # Reshape image_tokens into a 2D grid h, w = image_sizes[image_index] image_grid = image_tokens.view(h, w, d).permute(2, 0, 1).unsqueeze(0) grid = torch.nn.functional.unfold( image_grid, kernel_size=self.spatial_merge_size, stride=self.spatial_merge_size ) grid = grid.view(d * self.spatial_merge_size**2, -1).t() permuted_tensor.append(grid) image_features = torch.cat(permuted_tensor, dim=0) image_features = self.merging_layer(image_features) return image_features class Mistral3MultiModalProjector(nn.Module): def __init__(self, config: Mistral3Config): super().__init__() self.norm = Mistral3RMSNorm(config.vision_config.hidden_size, eps=config.text_config.rms_norm_eps) self.patch_merger = Mistral3PatchMerger(config) # We have hidden_size * the number of vision feature layers self.num_feature_layers = ( 1 if isinstance(config.vision_feature_layer, int) else len(config.vision_feature_layer) ) self.linear_1 = nn.Linear( config.vision_config.hidden_size * self.num_feature_layers, config.text_config.hidden_size, bias=config.multimodal_projector_bias, ) self.act = ACT2FN[config.projector_hidden_act] self.linear_2 = nn.Linear( config.text_config.hidden_size, config.text_config.hidden_size, bias=config.multimodal_projector_bias ) def forward(self, image_features: torch.Tensor, image_sizes: torch.Tensor): image_features = self.norm(image_features) image_features = self.patch_merger(image_features, image_sizes) hidden_states = self.linear_1(image_features) hidden_states = self.act(hidden_states) hidden_states = self.linear_2(hidden_states) return hidden_states class Mistral3CausalLMOutputWithPast(LlavaCausalLMOutputWithPast): pass class Mistral3ModelOutputWithPast(LlavaModelOutputWithPast): pass class Mistral3PreTrainedModel(LlavaPreTrainedModel): pass class Mistral3Model(LlavaModel): @check_model_inputs(tie_last_hidden_states=False) @auto_docstring( custom_intro="Obtains image last hidden states from the vision tower and apply multimodal projection." ) def get_image_features( self, pixel_values: torch.FloatTensor, image_sizes: torch.Tensor, vision_feature_layer: int | list[int] | None = None, output_hidden_states: bool | None = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple | BaseModelOutputWithPooling: kwargs = {k: v for k, v in kwargs.items() if v is not None} # this is not memory efficient at all (output_hidden_states=True) will save all the hidden states. image_outputs = self.vision_tower( pixel_values, image_sizes=image_sizes, output_hidden_states=True, # Ignore arg on purpose return_dict=True, **kwargs, ) # If we have one vision feature layer, return the corresponding hidden states, # otherwise, select the hidden states of each feature layer and concatenate them if isinstance(vision_feature_layer, int): selected_image_feature = image_outputs.hidden_states[vision_feature_layer] else: hs_pool = [image_outputs.hidden_states[layer_idx] for layer_idx in vision_feature_layer] selected_image_feature = torch.cat(hs_pool, dim=-1) image_features = self.multi_modal_projector(selected_image_feature.squeeze(0), image_sizes) downsample_ratio = self.vision_tower.patch_size * self.config.spatial_merge_size split_sizes = ( (torch.as_tensor(image_sizes, device=image_features.device) // downsample_ratio).prod(dim=-1).tolist() ) image_features = torch.split(image_features.squeeze(0), split_sizes) image_outputs.pooler_output = image_features return image_outputs @check_model_inputs(tie_last_hidden_states=False) @auto_docstring def forward( self, input_ids: torch.LongTensor | None = None, pixel_values: torch.FloatTensor | None = None, attention_mask: torch.Tensor | None = None, position_ids: torch.LongTensor | None = None, past_key_values: Cache | None = None, inputs_embeds: torch.FloatTensor | None = None, vision_feature_layer: int | list[int] | None = None, use_cache: bool | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, cache_position: torch.LongTensor | None = None, image_sizes: torch.Tensor | None = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple | Mistral3ModelOutputWithPast: output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict if (input_ids is None) ^ (inputs_embeds is not None): raise ValueError("You must specify exactly one of input_ids or inputs_embeds") if inputs_embeds is None: inputs_embeds = self.get_input_embeddings()(input_ids) if pixel_values is not None: image_features = self.get_image_features( pixel_values=pixel_values, vision_feature_layer=vision_feature_layer, image_sizes=image_sizes, return_dict=True, ).pooler_output image_features = torch.cat(image_features, dim=0).to(inputs_embeds.device, inputs_embeds.dtype) special_image_mask = self.get_placeholder_mask( input_ids, inputs_embeds=inputs_embeds, image_features=image_features ) inputs_embeds = inputs_embeds.masked_scatter(special_image_mask, image_features) outputs = self.language_model( attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, cache_position=cache_position, **kwargs, ) return Mistral3ModelOutputWithPast( last_hidden_state=outputs.last_hidden_state, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, image_hidden_states=image_features if pixel_values is not None else None, ) class Mistral3ForConditionalGeneration(LlavaForConditionalGeneration): @auto_docstring def get_image_features( self, pixel_values: torch.FloatTensor, image_sizes: torch.Tensor, vision_feature_layer: int | list[int] | None = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple | BaseModelOutputWithPooling: return self.model.get_image_features( pixel_values=pixel_values, image_sizes=image_sizes, vision_feature_layer=vision_feature_layer, **kwargs, ) def forward( self, input_ids: torch.LongTensor | None = None, pixel_values: torch.FloatTensor | None = None, attention_mask: torch.Tensor | None = None, position_ids: torch.LongTensor | None = None, past_key_values: Cache | None = None, inputs_embeds: torch.FloatTensor | None = None, labels: torch.LongTensor | None = None, use_cache: bool | None = None, output_attentions: bool | None = None, output_hidden_states: bool | None = None, return_dict: bool | None = None, cache_position: torch.LongTensor | None = None, logits_to_keep: int | torch.Tensor = 0, image_sizes: torch.Tensor | None = None, **kwargs: Unpack[TransformersKwargs], ) -> tuple | Mistral3CausalLMOutputWithPast: r""" labels (`torch.LongTensor` of shape `(batch_size, sequence_length)`, *optional*): Labels for computing the masked language modeling loss. Indices should either be in `[0, ..., config.vocab_size]` or -100 (see `input_ids` docstring). Tokens with indices set to `-100` are ignored (masked), the loss is only computed for the tokens with labels in `[0, ..., config.vocab_size]`. Example: ```python >>> from PIL import Image >>> import httpx >>> from io import BytesIO >>> from transformers import AutoProcessor, Mistral3ForConditionalGeneration >>> model = Mistral3ForConditionalGeneration.from_pretrained("mistralai/Mistral-Small-3.1-24B-Instruct-2503") >>> processor = AutoProcessor.from_pretrained("mistralai/Mistral-Small-3.1-24B-Instruct-2503") >>> prompt = "[INST][IMG]What is the image?[/INST]" >>> url = "http://images.cocodataset.org/val2017/000000039769.jpg" >>> with httpx.stream("GET", url) as response: ... image = Image.open(BytesIO(response.read())) >>> inputs = processor(images=image, text=prompt, return_tensors="pt") >>> # Generate >>> generate_ids = model.generate(**inputs, max_new_tokens=15) >>> processor.batch_decode(generate_ids, skip_special_tokens=True, clean_up_tokenization_spaces=False)[0] "What is the image?The image depicts two cats lying on a pink blanket." ```""" output_attentions = output_attentions if output_attentions is not None else self.config.output_attentions output_hidden_states = ( output_hidden_states if output_hidden_states is not None else self.config.output_hidden_states ) return_dict = return_dict if return_dict is not None else self.config.use_return_dict outputs = self.model( input_ids=input_ids, pixel_values=pixel_values, attention_mask=attention_mask, position_ids=position_ids, past_key_values=past_key_values, inputs_embeds=inputs_embeds, use_cache=use_cache, output_attentions=output_attentions, output_hidden_states=output_hidden_states, return_dict=True, cache_position=cache_position, image_sizes=image_sizes, **kwargs, ) hidden_states = outputs[0] # Only compute necessary logits, and do not upcast them to float if we are not computing the loss slice_indices = slice(-logits_to_keep, None) if isinstance(logits_to_keep, int) else logits_to_keep logits = self.lm_head(hidden_states[:, slice_indices, :]) loss = None if labels is not None: loss = self.loss_function( logits=logits, labels=labels, vocab_size=self.config.text_config.vocab_size, **kwargs ) return Mistral3CausalLMOutputWithPast( loss=loss, logits=logits, past_key_values=outputs.past_key_values, hidden_states=outputs.hidden_states, attentions=outputs.attentions, image_hidden_states=outputs.image_hidden_states, ) __all__ = [ "Mistral3Model", "Mistral3PreTrainedModel", "Mistral3ForConditionalGeneration", ]