You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
346 lines
15 KiB
346 lines
15 KiB
# Copyright 2025 The HuggingFace Inc. team.
|
|
#
|
|
# Licensed under the Apache License, Version 2.0 (the "License");
|
|
# you may not use this file except in compliance with the License.
|
|
# You may obtain a copy of the License at
|
|
#
|
|
# http://www.apache.org/licenses/LICENSE-2.0
|
|
#
|
|
# Unless required by applicable law or agreed to in writing, software
|
|
# distributed under the License is distributed on an "AS IS" BASIS,
|
|
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
|
|
# See the License for the specific language governing permissions and
|
|
# limitations under the License.
|
|
"""
|
|
Processor class for SmolVLM.
|
|
"""
|
|
|
|
from datetime import timedelta
|
|
from typing import TYPE_CHECKING, Union
|
|
|
|
from ...feature_extraction_utils import BatchFeature
|
|
from ...image_utils import ImageInput, make_nested_list_of_images
|
|
from ...processing_utils import AllKwargsForChatTemplate, ProcessingKwargs, ProcessorMixin, Unpack
|
|
from ...tokenization_utils_base import BatchEncoding, TextInput
|
|
from ...utils import auto_docstring, is_num2words_available, is_vision_available, logging
|
|
from ...video_utils import VideoInput
|
|
|
|
|
|
if is_vision_available():
|
|
from .video_processing_smolvlm import (
|
|
DEFAULT_MEDIA_OUTTRO,
|
|
DEFAULT_VIDEO_INTRO,
|
|
FRAME_TIMESTAMP_MESSAGE,
|
|
)
|
|
|
|
if TYPE_CHECKING:
|
|
from ...tokenization_utils_base import PreTokenizedInput
|
|
|
|
logger = logging.get_logger(__name__)
|
|
|
|
|
|
if is_num2words_available():
|
|
from num2words import num2words
|
|
else:
|
|
num2words = None
|
|
|
|
|
|
# The correct chat template to be used for videos after #38105
|
|
DEFAULT_CHAT_TEMPLATE = "<|im_start|>{% for message in messages %}{{message['role'] | capitalize}}{% if message['content'][0]['type'] == 'image' %}{{':'}}{% else %}{{': '}}{% endif %}{% for line in message['content'] %}{% if line['type'] == 'text' %}{{line['text']}}{% elif line['type'] == 'image' %}{{ '<image>' }}{% elif line['type'] == 'video' %}{{ '<video>' }}{% endif %}{% endfor %}<end_of_utterance>\n{% endfor %}{% if add_generation_prompt %}{{ 'Assistant:' }}{% endif %}"
|
|
|
|
|
|
def _prompt_split_image(
|
|
image_seq_len, image_rows, image_cols, fake_token_around_image, image_token, global_image_token
|
|
):
|
|
"""Prompt with expanded image tokens for when the image is split into patches."""
|
|
text_split_images = ""
|
|
for n_h in range(image_rows):
|
|
for n_w in range(image_cols):
|
|
text_split_images += (
|
|
f"{fake_token_around_image}" + f"<row_{n_h + 1}_col_{n_w + 1}>" + f"{image_token}" * image_seq_len
|
|
)
|
|
text_split_images += "\n"
|
|
|
|
text_split_images += (
|
|
f"\n{fake_token_around_image}"
|
|
+ f"{global_image_token}"
|
|
+ f"{image_token}" * image_seq_len
|
|
+ f"{fake_token_around_image}"
|
|
)
|
|
return text_split_images
|
|
|
|
|
|
def _prompt_single_image(image_seq_len, fake_token_around_image, image_token, global_image_token):
|
|
"""Prompt with expanded image tokens for a single image."""
|
|
return (
|
|
f"{fake_token_around_image}"
|
|
+ f"{global_image_token}"
|
|
+ f"{image_token}" * image_seq_len
|
|
+ f"{fake_token_around_image}"
|
|
)
|
|
|
|
|
|
def get_image_prompt_string(
|
|
image_rows, image_cols, image_seq_len, fake_token_around_image, image_token, global_image_token
|
|
):
|
|
if image_rows == 0 and image_cols == 0:
|
|
return _prompt_single_image(
|
|
image_seq_len,
|
|
fake_token_around_image=fake_token_around_image,
|
|
image_token=image_token,
|
|
global_image_token=global_image_token,
|
|
)
|
|
return _prompt_split_image(
|
|
image_seq_len, image_rows, image_cols, fake_token_around_image, image_token, global_image_token
|
|
)
|
|
|
|
|
|
class SmolVLMProcessorKwargs(ProcessingKwargs, total=False):
|
|
_defaults = {
|
|
"text_kwargs": {
|
|
"add_special_tokens": True,
|
|
"padding": False,
|
|
"is_split_into_words": False,
|
|
},
|
|
"images_kwargs": {
|
|
"return_row_col_info": True,
|
|
},
|
|
"videos_kwargs": {
|
|
"return_metadata": True,
|
|
},
|
|
}
|
|
|
|
|
|
@auto_docstring
|
|
class SmolVLMProcessor(ProcessorMixin):
|
|
def __init__(
|
|
self,
|
|
image_processor,
|
|
tokenizer,
|
|
video_processor,
|
|
image_seq_len: int = 169,
|
|
chat_template: str | None = None,
|
|
**kwargs,
|
|
):
|
|
r"""
|
|
image_seq_len (`int`, *optional*, defaults to 169):
|
|
The length of the image sequence i.e. the number of <image> tokens per image in the input.
|
|
This parameter is used to build the string from the input prompt and image tokens and should match the
|
|
value the model used. It is computed as: image_seq_len = int(((image_size // patch_size) ** 2) / (scale_factor**2))
|
|
"""
|
|
self.fake_image_token = getattr(tokenizer, "fake_image_token", "<fake_token_around_image>")
|
|
self.image_token = getattr(tokenizer, "image_token", "<image>")
|
|
self.image_token_id = tokenizer.convert_tokens_to_ids(self.image_token)
|
|
self.end_of_utterance_token = getattr(tokenizer, "end_of_utterance_token", "<end_of_utterance>")
|
|
self.global_image_token = getattr(tokenizer, "global_image_token", "<global-img>")
|
|
self.image_seq_len = image_seq_len
|
|
self.video_token = getattr(tokenizer, "video_token", "<video>")
|
|
|
|
if not num2words:
|
|
raise ImportError(
|
|
"Package `num2words` is required to run SmolVLM processor. Install it with `pip install num2words`."
|
|
)
|
|
|
|
super().__init__(image_processor, tokenizer, video_processor, chat_template=chat_template, **kwargs)
|
|
|
|
def expand_text_with_image_tokens(self, text, image_rows, image_cols):
|
|
prompt_strings = []
|
|
for sample, sample_rows, sample_cols in zip(text, image_rows, image_cols):
|
|
# Replace the image token with fake tokens around the expanded image token sequence of length `image_seq_len`
|
|
image_prompt_strings = []
|
|
for n_rows, n_cols in zip(sample_rows, sample_cols):
|
|
image_prompt_string = get_image_prompt_string(
|
|
n_rows,
|
|
n_cols,
|
|
self.image_seq_len,
|
|
image_token=self.image_token,
|
|
fake_token_around_image=self.fake_image_token,
|
|
global_image_token=self.global_image_token,
|
|
)
|
|
image_prompt_strings.append(image_prompt_string)
|
|
|
|
split_sample = sample.split(self.image_token)
|
|
if len(split_sample) == 0:
|
|
raise ValueError("The image token should be present in the text.")
|
|
|
|
# Place in the image prompt strings where the image tokens are
|
|
sample = split_sample[0]
|
|
for i, image_prompt_string in enumerate(image_prompt_strings):
|
|
sample += image_prompt_string + split_sample[i + 1]
|
|
prompt_strings.append(sample)
|
|
|
|
return prompt_strings
|
|
|
|
def expand_text_with_video_tokens(self, text, video_inputs):
|
|
num_frames = video_inputs["pixel_values"].shape[1]
|
|
video_metadata = iter(video_inputs["video_metadata"])
|
|
|
|
prompt_strings = []
|
|
for sample in text:
|
|
while self.video_token in sample:
|
|
metadata = next(video_metadata)
|
|
if metadata.fps is None:
|
|
logger.warning_once(
|
|
"SmolVLM requires frame timestamps to construct prompts, but the `fps` of the input video could not be inferred. "
|
|
"Probably `video_metadata` was missing from inputs and you passed pre-sampled frames. "
|
|
"Defaulting to `fps=24`. Please provide `video_metadata` for more accurate results."
|
|
)
|
|
metadata.fps = 24 # Set the default fps to 24 for BC, otherwise `timestamps` can't be inferred
|
|
timestamps = [(int(second // 60), int(second % 60)) for second in metadata.timestamps]
|
|
duration = int(metadata.duration) if metadata.duration is not None else int(metadata.timestamps[-1])
|
|
duration_td = timedelta(seconds=int(duration))
|
|
image_prompt_strings = DEFAULT_VIDEO_INTRO.format(
|
|
frame_count=num2words(num_frames), video_duration=str(duration_td)
|
|
)
|
|
for timestamp in timestamps:
|
|
image_prompt_string = _prompt_single_image(
|
|
self.image_seq_len,
|
|
image_token=self.image_token,
|
|
fake_token_around_image=self.fake_image_token,
|
|
global_image_token=self.global_image_token,
|
|
)
|
|
timestamp = f"{timestamp[0]:02d}:{timestamp[1]:02d}"
|
|
image_prompt_string = FRAME_TIMESTAMP_MESSAGE.format(timestamp=timestamp) + image_prompt_string
|
|
image_prompt_strings += image_prompt_string
|
|
|
|
image_prompt_strings += DEFAULT_MEDIA_OUTTRO
|
|
sample = sample.replace(self.video_token, image_prompt_strings, 1)
|
|
prompt_strings.append(sample)
|
|
return prompt_strings
|
|
|
|
@auto_docstring
|
|
def __call__(
|
|
self,
|
|
images: ImageInput | list[ImageInput] | list[list[ImageInput]] = None,
|
|
text: Union[TextInput, "PreTokenizedInput", list[TextInput], list["PreTokenizedInput"]] = None,
|
|
videos: VideoInput | None = None,
|
|
**kwargs: Unpack[SmolVLMProcessorKwargs],
|
|
) -> BatchEncoding:
|
|
if text is None and images is None and videos is None:
|
|
raise ValueError("You must provide one of `text`, `images` or `videos'.")
|
|
|
|
if text is None and ((images is None) ^ (videos is not None)):
|
|
raise ValueError("You must specify exactly one of `images` or `videos`")
|
|
|
|
output_kwargs = self._merge_kwargs(
|
|
SmolVLMProcessorKwargs,
|
|
tokenizer_init_kwargs=self.tokenizer.init_kwargs,
|
|
**kwargs,
|
|
)
|
|
|
|
if text is not None:
|
|
if isinstance(text, str):
|
|
text = [text]
|
|
elif not isinstance(text, list) and not isinstance(text[0], str):
|
|
raise ValueError("Invalid input text. Please provide a string, or a list of strings")
|
|
n_images_in_text = sum(sample.count(self.image_token) for sample in text)
|
|
if n_images_in_text > 0 and (images is None and videos is None):
|
|
raise ValueError(f"We detected {n_images_in_text} tokens in the text but no images/videos were passed")
|
|
|
|
inputs = {}
|
|
# Images and videos are mutually exclusive, so process one which is present
|
|
if images is not None:
|
|
images = self.image_processor.fetch_images(images)
|
|
images = make_nested_list_of_images(images)
|
|
vision_inputs = self.image_processor(images, **output_kwargs["images_kwargs"])
|
|
|
|
image_rows = vision_inputs.pop("rows", None)
|
|
image_cols = vision_inputs.pop("cols", None)
|
|
inputs.update(vision_inputs)
|
|
|
|
if text is not None:
|
|
n_images_in_text = [sample.count(self.image_token) for sample in text]
|
|
n_images_in_images = [len(sublist) for sublist in images]
|
|
if n_images_in_images != n_images_in_text:
|
|
raise ValueError(
|
|
f"The number of images in the text {n_images_in_text} and images {n_images_in_images} should be the same."
|
|
)
|
|
# Set default values for image_rows and image_cols if not provided
|
|
if image_rows is None:
|
|
image_rows = [[0] * n_images for n_images in n_images_in_text]
|
|
if image_cols is None:
|
|
image_cols = [[0] * n_images for n_images in n_images_in_text]
|
|
text = self.expand_text_with_image_tokens(text, image_rows=image_rows, image_cols=image_cols)
|
|
|
|
elif videos is not None:
|
|
vision_inputs = self.video_processor(videos, **output_kwargs["videos_kwargs"])
|
|
if text is not None:
|
|
n_videos_in_text = [sample.count(self.video_token) for sample in text]
|
|
n_videos_in_videos = [len(sublist) for sublist in videos]
|
|
if n_videos_in_videos != n_videos_in_text:
|
|
raise ValueError(
|
|
f"The number of videos in the text {n_videos_in_text} and videos {n_videos_in_videos} should be the same."
|
|
)
|
|
text = self.expand_text_with_video_tokens(text, vision_inputs)
|
|
|
|
# If user has not requested video metadata, pop it. By default metadata
|
|
# is always returned to expand video tokens correctly
|
|
if not kwargs.get("return_metadata"):
|
|
vision_inputs.pop("video_metadata")
|
|
inputs.update(vision_inputs)
|
|
|
|
return_tensors = output_kwargs["text_kwargs"].pop("return_tensors", None)
|
|
|
|
if text is not None:
|
|
text_inputs = self.tokenizer(text, **output_kwargs["text_kwargs"])
|
|
self._check_special_mm_tokens(text, text_inputs, modalities=["image"])
|
|
inputs.update(text_inputs)
|
|
|
|
return BatchFeature(inputs, tensor_type=return_tensors)
|
|
|
|
def apply_chat_template(
|
|
self,
|
|
conversation: list[dict[str, str]] | list[list[dict[str, str]]],
|
|
chat_template: str | None = None,
|
|
**kwargs: Unpack[AllKwargsForChatTemplate],
|
|
) -> str:
|
|
"""
|
|
Similar to the `apply_chat_template` method on tokenizers, this method applies a Jinja template to input
|
|
conversations to turn them into a single tokenizable string.
|
|
|
|
The input is expected to be in the following format, where each message content is a list consisting of text and
|
|
optionally image or video inputs. One can also provide an image, video, URL or local path which will be used to form
|
|
`pixel_values` when `return_dict=True`. If not provided, one will get only the formatted text, optionally tokenized text.
|
|
|
|
conversation = [
|
|
{
|
|
"role": "user",
|
|
"content": [
|
|
{"type": "image", "url": "https://www.ilankelman.org/stopsigns/australia.jpg"},
|
|
{"type": "text", "text": "Please describe this image in detail."},
|
|
],
|
|
},
|
|
]
|
|
|
|
Args:
|
|
conversation (`Union[list[Dict, [str, str]], list[list[dict[str, str]]]]`):
|
|
The conversation to format.
|
|
chat_template (`Optional[str]`, *optional*):
|
|
The Jinja template to use for formatting the conversation. If not provided, the tokenizer's
|
|
chat template is used.
|
|
"""
|
|
if isinstance(conversation, (list, tuple)) and (
|
|
isinstance(conversation[0], (list, tuple)) or hasattr(conversation[0], "content")
|
|
):
|
|
conversations = conversation
|
|
else:
|
|
conversations = [conversation]
|
|
|
|
has_video = any(
|
|
(isinstance(content, dict) and content["type"] == "video")
|
|
for conversation in conversations
|
|
for message in conversation
|
|
for content in message["content"]
|
|
)
|
|
if chat_template is None and has_video:
|
|
# re-assign to the correct default template for BC, if user is not requesting their own template
|
|
chat_template = DEFAULT_CHAT_TEMPLATE
|
|
|
|
kwargs.setdefault("num_frames", self.video_processor.num_frames)
|
|
kwargs.setdefault("fps", self.video_processor.fps)
|
|
|
|
return super().apply_chat_template(conversation, chat_template, **kwargs)
|
|
|
|
|
|
__all__ = ["SmolVLMProcessor"]
|