Spaces:
Configuration error
Configuration error
# Copyright 2020 The HuggingFace Datasets Authors and the current dataset script contributor. | |
# | |
# Licensed under the Apache License, Version 2.0 (the "License"); | |
# you may not use this file except in compliance with the License. | |
# You may obtain a copy of the License at | |
# | |
# http://www.apache.org/licenses/LICENSE-2.0 | |
# | |
# Unless required by applicable law or agreed to in writing, software | |
# distributed under the License is distributed on an "AS IS" BASIS, | |
# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. | |
# See the License for the specific language governing permissions and | |
# limitations under the License. | |
"""TODO: Add a description here.""" | |
import evaluate | |
import datasets | |
import numpy as np | |
# TODO: Add BibTeX citation | |
_CITATION = """\ | |
@InProceedings{huggingface:module, | |
title = {Expected Calibration Error}, | |
authors={Jordy Van Landeghem}, | |
year={2022} | |
} | |
""" | |
# TODO: Add description of the module here | |
_DESCRIPTION = """\ | |
This new module is designed to solve this great ML task and is crafted with a lot of care. | |
""" | |
# TODO: Add description of the arguments of the module here | |
_KWARGS_DESCRIPTION = """ | |
Calculates how good are predictions given some references, using certain scores | |
Args: | |
predictions: list of predictions to score. Each predictions | |
should be a string with tokens separated by spaces. | |
references: list of reference for each prediction. Each | |
reference should be a string with tokens separated by spaces. | |
Returns: | |
accuracy: description of the first score, | |
another_score: description of the second score, | |
Examples: | |
Examples should be written in doctest format, and should illustrate how | |
to use the function. | |
>>> my_new_module = evaluate.load("my_new_module") | |
>>> results = my_new_module.compute(references=[0, 1], predictions=[0, 1]) | |
>>> print(results) | |
{'accuracy': 1.0} | |
""" | |
# TODO: Define external resources urls if needed | |
BAD_WORDS_URL = "http://url/to/external/resource/bad_words.txt" | |
# TODO | |
def bin_idx_dd(P, bins): | |
oneDbins = np.digitize(P, bins) - 1 # since bins contains extra righmost&leftmost bins | |
# Tie-breaking to the left for rightmost bin | |
# Using `digitize`, values that fall on an edge are put in the right bin. | |
# For the rightmost bin, we want values equal to the right | |
# edge to be counted in the last bin, and not as an outlier. | |
for k in range(P.shape[-1]): | |
# Find the rounding precision | |
dedges_min = np.diff(bins).min() | |
if dedges_min == 0: | |
raise ValueError('The smallest edge difference is numerically 0.') | |
decimal = int(-np.log10(dedges_min)) + 6 | |
# Find which points are on the rightmost edge. | |
on_edge = np.where( | |
(P[:, k] >= bins[-1]) & (np.around(P[:, k], decimal) == np.around(bins[-1], decimal)) | |
)[0] | |
# Shift these points one bin to the left. | |
oneDbins[on_edge, k] -= 1 | |
return oneDbins | |
def manual_binned_statistic(P, y_correct, bins, statistic="mean"): | |
binnumbers = bin_idx_dd(np.expand_dims(P, 0), bins)[0] | |
result = np.empty([len(bins)], float) | |
result.fill(np.nan) | |
flatcount = np.bincount(binnumbers, None) | |
a = flatcount.nonzero() | |
if statistic == 'mean': | |
flatsum = np.bincount(binnumbers, y_correct) | |
result[a] = flatsum[a] / flatcount[a] | |
return result, bins, binnumbers + 1 # fix for what happens in bin_idx_dd | |
def CE_estimate(y_correct, P, bins=None, n_bins=10, p=1): | |
""" | |
y_correct: binary (N x 1) | |
P: normalized (N x 1) either max or per class | |
Summary: weighted average over the accuracy/confidence difference of equal-range bins | |
""" | |
# defaults: | |
if bins is None: | |
n_bins = n_bins | |
bin_range = [0, 1] | |
bins = np.linspace(bin_range[0], bin_range[1], n_bins + 1) | |
# expected; equal range binning | |
else: | |
n_bins = len(bins) - 1 | |
bin_range = [min(bins), max(bins)] | |
# average bin probability #55 for bin 50-60; mean per bin | |
calibrated_acc = bins[1:] # right/upper bin edges | |
# calibrated_acc = bin_centers(bins) | |
empirical_acc, bin_edges, bin_assignment = manual_binned_statistic(P, y_correct, bins) | |
bin_numbers, weights_ece = np.unique(bin_assignment, return_counts=True) | |
anindices = bin_numbers - 1 # reduce bin counts; left edge; indexes right BY DEFAULT | |
# Expected calibration error | |
if p < np.inf: # Lp-CE | |
CE = np.average( | |
abs(empirical_acc[anindices] - calibrated_acc[anindices]) ** p, | |
weights=weights_ece, # weighted average 1/binfreq | |
) | |
elif np.isinf(p): # max-ECE | |
CE = np.max(abs(empirical_acc[anindices] - calibrated_acc[anindices])) | |
return CE | |
def top_CE(Y, P, **kwargs): | |
y_correct = (Y == np.argmax(P, -1)).astype(int) | |
p_max = np.max(P, -1) | |
top_CE = CE_estimate(y_correct, p_max, **kwargs) # can choose n_bins and norm | |
return top_CE | |
class ECE(evaluate.EvaluationModule): | |
"""TODO: Short description of my evaluation module.""" | |
""" | |
0. create binning scheme [discretization of f] | |
1. build histogram P(f(X)) | |
2. build conditional density estimate P(y|f(X)) | |
3. average bin probabilities f_B as center/edge of bin | |
4. apply L^p norm distance and weights | |
""" | |
#have to add to initialization here? | |
#create bins using the params | |
#create proxy | |
def _info(self): | |
# TODO: Specifies the evaluate.EvaluationModuleInfo object | |
return evaluate.EvaluationModuleInfo( | |
# This is the description that will appear on the modules page. | |
module_type="metric", | |
description=_DESCRIPTION, | |
citation=_CITATION, | |
inputs_description=_KWARGS_DESCRIPTION, | |
# This defines the format of each prediction and reference | |
features=datasets.Features({ | |
'predictions': datasets.Value('float32'), | |
'references': datasets.Value('int64'), | |
}), | |
# Homepage of the module for documentation | |
homepage="http://module.homepage", #https://huggingface.co/spaces/jordyvl/ece | |
# Additional links to the codebase or references | |
codebase_urls=["http://github.com/path/to/codebase/of/new_module"], | |
reference_urls=["http://path.to.reference.url/new_module"] | |
) | |
def _download_and_prepare(self, dl_manager): | |
"""Optional: download external resources useful to compute the scores""" | |
# TODO: Download external resources if needed | |
pass | |
def _compute(self, predictions, references): | |
"""Returns the scores""" | |
ECE = top_CE(references, predictions) | |
return { | |
"ECE": ECE, | |
} | |
def test_ECE(): | |
N = 10 #10 instances | |
K = 5 #5 class problem | |
def random_mc_instance(concentration=1): | |
reference = np.argmax(np.random.dirichlet(([concentration for _ in range(K)])),-1) | |
prediction = np.random.dirichlet(([concentration for _ in range(K)])) #probabilities | |
#OH #return np.eye(K)[np.argmax(reference,-1)] | |
return reference, prediction | |
references, predictions = list(zip(*[random_mc_instance() for i in range(N)])) | |
references = np.array(references, dtype=np.int64) | |
predictions = np.array(predictions, dtype=np.float32) | |
res = ECE()._compute(predictions, references) | |
print(f"ECE: {res['ECE']}") | |
if __name__ == '__main__': | |
test_ECE() |