You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

228 lines
10 KiB

4 days ago
# Copyright 2023 The HuggingFace Inc. team.
#
# Licensed under the Apache License, Version 2.0 (the "License");
# you may not use this file except in compliance with the License.
# You may obtain a copy of the License at
#
# http://www.apache.org/licenses/LICENSE-2.0
#
# Unless required by applicable law or agreed to in writing, software
# distributed under the License is distributed on an "AS IS" BASIS,
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
# See the License for the specific language governing permissions and
# limitations under the License.
"""
Image/Text processor class for OWLv2
"""
from typing import TYPE_CHECKING
import numpy as np
from ...image_processing_utils import BatchFeature
from ...image_utils import ImageInput
from ...processing_utils import (
ImagesKwargs,
ProcessingKwargs,
ProcessorMixin,
Unpack,
)
from ...tokenization_utils_base import PreTokenizedInput, TextInput
from ...utils import TensorType, auto_docstring, is_torch_available
if TYPE_CHECKING:
from .modeling_owlv2 import Owlv2ImageGuidedObjectDetectionOutput, Owlv2ObjectDetectionOutput
class Owlv2ImagesKwargs(ImagesKwargs, total=False):
"""
query_images (`ImageInput`, *optional*):
Query images to use for image-guided object detection. When provided, these images serve as visual queries
to find similar objects in the main `images`. The query images override any text prompts, and the model
performs image-to-image matching instead of text-to-image matching.
"""
query_images: ImageInput | None
class Owlv2ProcessorKwargs(ProcessingKwargs, total=False):
images_kwargs: Owlv2ImagesKwargs
_defaults = {
"text_kwargs": {
"padding": "max_length",
},
"common_kwargs": {
"return_tensors": "np",
},
}
@auto_docstring
class Owlv2Processor(ProcessorMixin):
def __init__(self, image_processor, tokenizer, **kwargs):
super().__init__(image_processor, tokenizer)
@auto_docstring
# Copied from transformers.models.owlvit.processing_owlvit.OwlViTProcessor.__call__ with OwlViT->Owlv2
def __call__(
self,
images: ImageInput | None = None,
text: TextInput | PreTokenizedInput | list[TextInput] | list[PreTokenizedInput] = None,
**kwargs: Unpack[Owlv2ProcessorKwargs],
) -> BatchFeature:
r"""
Returns:
[`BatchFeature`]: A [`BatchFeature`] with the following fields:
- **input_ids** -- List of token ids to be fed to a model. Returned when `text` is not `None`.
- **attention_mask** -- List of indices specifying which tokens should be attended to by the model (when
`return_attention_mask=True` or if *"attention_mask"* is in `self.model_input_names` and if `text` is not
`None`).
- **pixel_values** -- Pixel values to be fed to a model. Returned when `images` is not `None`.
- **query_pixel_values** -- Pixel values of the query images to be fed to a model. Returned when `query_images` is not `None`.
"""
output_kwargs = self._merge_kwargs(
Owlv2ProcessorKwargs,
tokenizer_init_kwargs=self.tokenizer.init_kwargs,
**kwargs,
)
query_images = output_kwargs["images_kwargs"].pop("query_images", None)
return_tensors = output_kwargs["text_kwargs"]["return_tensors"]
if text is None and query_images is None and images is None:
raise ValueError(
"You have to specify at least one text or query image or image. All three cannot be none."
)
data = {}
if text is not None:
if isinstance(text, str) or (isinstance(text, list) and not isinstance(text[0], list)):
encodings = [self.tokenizer(text, **output_kwargs["text_kwargs"])]
elif isinstance(text, list) and isinstance(text[0], list):
encodings = []
# Maximum number of queries across batch
max_num_queries = max(len(text_single) for text_single in text)
# Pad all batch samples to max number of text queries
for text_single in text:
if len(text_single) != max_num_queries:
text_single = text_single + [" "] * (max_num_queries - len(text_single))
encoding = self.tokenizer(text_single, **output_kwargs["text_kwargs"])
encodings.append(encoding)
else:
raise TypeError("Input text should be a string, a list of strings or a nested list of strings")
if return_tensors == "np":
input_ids = np.concatenate([encoding["input_ids"] for encoding in encodings], axis=0)
attention_mask = np.concatenate([encoding["attention_mask"] for encoding in encodings], axis=0)
elif return_tensors == "pt" and is_torch_available():
import torch
input_ids = torch.cat([encoding["input_ids"] for encoding in encodings], dim=0)
attention_mask = torch.cat([encoding["attention_mask"] for encoding in encodings], dim=0)
else:
raise ValueError("Target return tensor type could not be returned")
data["input_ids"] = input_ids
data["attention_mask"] = attention_mask
if query_images is not None:
query_pixel_values = self.image_processor(query_images, **output_kwargs["images_kwargs"]).pixel_values
# Query images always override the text prompt
data = {"query_pixel_values": query_pixel_values}
if images is not None:
image_features = self.image_processor(images, **output_kwargs["images_kwargs"])
data["pixel_values"] = image_features.pixel_values
return BatchFeature(data=data, tensor_type=return_tensors)
# Copied from transformers.models.owlvit.processing_owlvit.OwlViTProcessor.post_process_grounded_object_detection with OwlViT->Owlv2
def post_process_grounded_object_detection(
self,
outputs: "Owlv2ObjectDetectionOutput",
threshold: float = 0.1,
target_sizes: TensorType | list[tuple] | None = None,
text_labels: list[list[str]] | None = None,
):
"""
Converts the raw output of [`Owlv2ForObjectDetection`] into final bounding boxes in (top_left_x, top_left_y,
bottom_right_x, bottom_right_y) format.
Args:
outputs ([`Owlv2ObjectDetectionOutput`]):
Raw outputs of the model.
threshold (`float`, *optional*, defaults to 0.1):
Score threshold to keep object detection predictions.
target_sizes (`torch.Tensor` or `list[tuple[int, int]]`, *optional*):
Tensor of shape `(batch_size, 2)` or list of tuples (`tuple[int, int]`) containing the target size
`(height, width)` of each image in the batch. If unset, predictions will not be resized.
text_labels (`list[list[str]]`, *optional*):
List of lists of text labels for each image in the batch. If unset, "text_labels" in output will be
set to `None`.
Returns:
`list[Dict]`: A list of dictionaries, each dictionary containing the following keys:
- "scores": The confidence scores for each predicted box on the image.
- "labels": Indexes of the classes predicted by the model on the image.
- "boxes": Image bounding boxes in (top_left_x, top_left_y, bottom_right_x, bottom_right_y) format.
- "text_labels": The text labels for each predicted bounding box on the image.
"""
output = self.image_processor.post_process_object_detection(
outputs=outputs, threshold=threshold, target_sizes=target_sizes
)
if text_labels is not None and len(text_labels) != len(output):
raise ValueError("Make sure that you pass in as many lists of text labels as images")
# adding text labels to the output
if text_labels is not None:
for image_output, image_text_labels in zip(output, text_labels):
object_text_labels = [image_text_labels[i] for i in image_output["labels"]]
image_output["text_labels"] = object_text_labels
else:
for image_output in output:
image_output["text_labels"] = None
return output
# Copied from transformers.models.owlvit.processing_owlvit.OwlViTProcessor.post_process_image_guided_detection with OwlViT->Owlv2
def post_process_image_guided_detection(
self,
outputs: "Owlv2ImageGuidedObjectDetectionOutput",
threshold: float = 0.0,
nms_threshold: float = 0.3,
target_sizes: TensorType | list[tuple] | None = None,
):
"""
Converts the output of [`Owlv2ForObjectDetection.image_guided_detection`] into the format expected by the COCO
api.
Args:
outputs ([`Owlv2ImageGuidedObjectDetectionOutput`]):
Raw outputs of the model.
threshold (`float`, *optional*, defaults to 0.0):
Minimum confidence threshold to use to filter out predicted boxes.
nms_threshold (`float`, *optional*, defaults to 0.3):
IoU threshold for non-maximum suppression of overlapping boxes.
target_sizes (`torch.Tensor`, *optional*):
Tensor of shape (batch_size, 2) where each entry is the (height, width) of the corresponding image in
the batch. If set, predicted normalized bounding boxes are rescaled to the target sizes. If left to
None, predictions will not be unnormalized.
Returns:
`list[Dict]`: A list of dictionaries, each dictionary containing the following keys:
- "scores": The confidence scores for each predicted box on the image.
- "boxes": Image bounding boxes in (top_left_x, top_left_y, bottom_right_x, bottom_right_y) format.
- "labels": Set to `None`.
"""
return self.image_processor.post_process_image_guided_detection(
outputs=outputs, threshold=threshold, nms_threshold=nms_threshold, target_sizes=target_sizes
)
__all__ = ["Owlv2Processor"]