Skip to content

Commit 4d19551

Browse files
rabsrra-amex
andauthored
Example multiple metric (#1045)
* Adding example which depicts on how to calc multiple score per run * Update abstract_evaluator.py Co-authored-by: Rohit Agarwal <[email protected]>
1 parent 64b8b9c commit 4d19551

File tree

2 files changed

+70
-9
lines changed

2 files changed

+70
-9
lines changed

autosklearn/evaluation/abstract_evaluator.py

Lines changed: 4 additions & 9 deletions
Original file line numberDiff line numberDiff line change
@@ -19,7 +19,7 @@
1919
from autosklearn.pipeline.implementations.util import (
2020
convert_multioutput_multiclass_to_multilabel
2121
)
22-
from autosklearn.metrics import calculate_score, CLASSIFICATION_METRICS, REGRESSION_METRICS
22+
from autosklearn.metrics import calculate_score
2323
from autosklearn.util.logging_ import get_named_client_logger
2424

2525
from ConfigSpace import Configuration
@@ -264,14 +264,9 @@ def _loss(self, y_true, y_hat, scoring_functions=None):
264264
scoring_functions=scoring_functions)
265265

266266
if hasattr(score, '__len__'):
267-
# TODO: instead of using self.metric, it should use all metrics given by key.
268-
# But now this throws error...
269-
if self.task_type in CLASSIFICATION_TASKS:
270-
err = {key: metric._optimum - score[key] for key, metric in
271-
CLASSIFICATION_METRICS.items() if key in score}
272-
else:
273-
err = {key: metric._optimum - score[key] for key, metric in
274-
REGRESSION_METRICS.items() if key in score}
267+
err = {metric.name: metric._optimum - score[metric.name]
268+
for metric in scoring_functions}
269+
err[self.metric.name] = self.metric._optimum - score[self.metric.name]
275270
else:
276271
err = self.metric._optimum - score
277272

Lines changed: 66 additions & 0 deletions
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,66 @@
1+
# -*- encoding: utf-8 -*-
2+
"""
3+
=======
4+
Metrics
5+
=======
6+
7+
In *Auto-sklearn*, model is optimized over a metric, either built-in or
8+
custom metric. Moreover, it is also possible to calculate multiple metrics
9+
per run. The following examples show how to calculate metrics built-in
10+
and self-defined metrics for a classification problem.
11+
"""
12+
13+
import autosklearn.classification
14+
import custom_metrics
15+
import pandas as pd
16+
import sklearn.datasets
17+
import sklearn.metrics
18+
from autosklearn.metrics import balanced_accuracy, precision, recall, f1
19+
20+
21+
def get_metric_result(cv_results):
22+
results = pd.DataFrame.from_dict(cv_results)
23+
results = results[results['status'] == "Success"]
24+
cols = ['rank_test_scores', 'param_classifier:__choice__', 'mean_test_score']
25+
cols.extend([key for key in cv_results.keys() if key.startswith('metric_')])
26+
return results[cols]
27+
28+
29+
if __name__ == "__main__":
30+
############################################################################
31+
# Data Loading
32+
# ============
33+
34+
X, y = sklearn.datasets.load_breast_cancer(return_X_y=True)
35+
X_train, X_test, y_train, y_test = \
36+
sklearn.model_selection.train_test_split(X, y, random_state=1)
37+
38+
############################################################################
39+
# Build and fit a classifier
40+
# ==========================
41+
42+
error_rate = autosklearn.metrics.make_scorer(
43+
name='custom_error',
44+
score_func=custom_metrics.error,
45+
optimum=0,
46+
greater_is_better=False,
47+
needs_proba=False,
48+
needs_threshold=False
49+
)
50+
cls = autosklearn.classification.AutoSklearnClassifier(
51+
time_left_for_this_task=120,
52+
per_run_time_limit=30,
53+
scoring_functions=[balanced_accuracy, precision, recall, f1, error_rate]
54+
)
55+
cls.fit(X_train, y_train, X_test, y_test)
56+
57+
###########################################################################
58+
# Get the Score of the final ensemble
59+
# ===================================
60+
61+
predictions = cls.predict(X_test)
62+
print("Accuracy score", sklearn.metrics.accuracy_score(y_test, predictions))
63+
64+
print("#" * 80)
65+
print("Metric results")
66+
print(get_metric_result(cls.cv_results_).to_string(index=False))

0 commit comments

Comments
 (0)