You can not select more than 25 topics
Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.
73 lines
2.6 KiB
73 lines
2.6 KiB
from __future__ import annotations
|
|
|
|
try:
|
|
from typing import Self
|
|
except ImportError:
|
|
from typing_extensions import Self
|
|
|
|
import torch
|
|
from torch import Tensor, nn
|
|
|
|
from sentence_transformers.models.Module import Module
|
|
|
|
|
|
class WeightedLayerPooling(Module):
|
|
"""Token embeddings are weighted mean of their different hidden layer representations"""
|
|
|
|
config_keys: list[str] = ["word_embedding_dimension", "layer_start", "num_hidden_layers"]
|
|
|
|
def __init__(
|
|
self, word_embedding_dimension, num_hidden_layers: int = 12, layer_start: int = 4, layer_weights=None
|
|
):
|
|
super().__init__()
|
|
self.word_embedding_dimension = word_embedding_dimension
|
|
self.layer_start = layer_start
|
|
self.num_hidden_layers = num_hidden_layers
|
|
self.layer_weights = (
|
|
layer_weights
|
|
if layer_weights is not None
|
|
else nn.Parameter(torch.tensor([1] * (num_hidden_layers + 1 - layer_start), dtype=torch.float))
|
|
)
|
|
|
|
def forward(self, features: dict[str, Tensor]):
|
|
ft_all_layers = features["all_layer_embeddings"]
|
|
|
|
all_layer_embedding = torch.stack(ft_all_layers)
|
|
all_layer_embedding = all_layer_embedding[self.layer_start :, :, :, :] # Start from 4th layers output
|
|
|
|
weight_factor = self.layer_weights.unsqueeze(-1).unsqueeze(-1).unsqueeze(-1).expand(all_layer_embedding.size())
|
|
weighted_average = (weight_factor * all_layer_embedding).sum(dim=0) / self.layer_weights.sum()
|
|
|
|
features.update({"token_embeddings": weighted_average})
|
|
return features
|
|
|
|
def get_word_embedding_dimension(self):
|
|
return self.word_embedding_dimension
|
|
|
|
def save(self, output_path: str, *args, safe_serialization: bool = True, **kwargs) -> None:
|
|
self.save_config(output_path)
|
|
self.save_torch_weights(output_path, safe_serialization=safe_serialization)
|
|
|
|
@classmethod
|
|
def load(
|
|
cls,
|
|
model_name_or_path: str,
|
|
subfolder: str = "",
|
|
token: bool | str | None = None,
|
|
cache_folder: str | None = None,
|
|
revision: str | None = None,
|
|
local_files_only: bool = False,
|
|
**kwargs,
|
|
) -> Self:
|
|
hub_kwargs = {
|
|
"subfolder": subfolder,
|
|
"token": token,
|
|
"cache_folder": cache_folder,
|
|
"revision": revision,
|
|
"local_files_only": local_files_only,
|
|
}
|
|
config = cls.load_config(model_name_or_path=model_name_or_path, **hub_kwargs)
|
|
model = cls(**config)
|
|
model = cls.load_torch_weights(model_name_or_path=model_name_or_path, model=model, **hub_kwargs)
|
|
return model
|