You can not select more than 25 topics Topics must start with a letter or number, can include dashes ('-') and can be up to 35 characters long.

57 lines
1.5 KiB

"""Evaluation metrics for cluster analysis results.
- Supervised evaluation uses a ground truth class values for each sample.
- Unsupervised evaluation does not use ground truths and measures the "quality" of the
model itself.
"""
# Authors: The scikit-learn developers
# SPDX-License-Identifier: BSD-3-Clause
from sklearn.metrics.cluster._bicluster import consensus_score
from sklearn.metrics.cluster._supervised import (
adjusted_mutual_info_score,
adjusted_rand_score,
completeness_score,
contingency_matrix,
entropy,
expected_mutual_information,
fowlkes_mallows_score,
homogeneity_completeness_v_measure,
homogeneity_score,
mutual_info_score,
normalized_mutual_info_score,
pair_confusion_matrix,
rand_score,
v_measure_score,
)
from sklearn.metrics.cluster._unsupervised import (
calinski_harabasz_score,
davies_bouldin_score,
silhouette_samples,
silhouette_score,
)
__all__ = [
"adjusted_mutual_info_score",
"adjusted_rand_score",
"calinski_harabasz_score",
"completeness_score",
"consensus_score",
"contingency_matrix",
"davies_bouldin_score",
# TODO(1.10): Remove
"entropy",
"expected_mutual_information",
"fowlkes_mallows_score",
"homogeneity_completeness_v_measure",
"homogeneity_score",
"mutual_info_score",
"normalized_mutual_info_score",
"pair_confusion_matrix",
"rand_score",
"silhouette_samples",
"silhouette_score",
"v_measure_score",
]