Update CIDEr.py
Browse files
CIDEr.py
CHANGED
@@ -5,7 +5,7 @@ import os
|
|
5 |
import tempfile
|
6 |
import subprocess
|
7 |
|
8 |
-
from pycocoevalcap.cider.cider import CiderScorer
|
9 |
|
10 |
_DESCRIPTION = """
|
11 |
The CIDEr (Consensus-based Image Description Evaluation) metric is used to evaluate the quality of image captions generated by models in image captioning tasks.
|
@@ -148,7 +148,7 @@ class CIDEr(evaluate.Metric):
|
|
148 |
predications, references = tokenize(
|
149 |
self.tokenizer_path, predictions, references
|
150 |
)
|
151 |
-
scorer = CiderScorer(n, sigma)
|
152 |
for pred, refs in zip(predications, references):
|
153 |
scorer += (pred, refs)
|
154 |
score, scores = scorer.compute_score()
|
|
|
5 |
import tempfile
|
6 |
import subprocess
|
7 |
|
8 |
+
from pycocoevalcap.cider.cider import CiderScorer
|
9 |
|
10 |
_DESCRIPTION = """
|
11 |
The CIDEr (Consensus-based Image Description Evaluation) metric is used to evaluate the quality of image captions generated by models in image captioning tasks.
|
|
|
148 |
predications, references = tokenize(
|
149 |
self.tokenizer_path, predictions, references
|
150 |
)
|
151 |
+
scorer = CiderScorer(n=n, sigma=sigma)
|
152 |
for pred, refs in zip(predications, references):
|
153 |
scorer += (pred, refs)
|
154 |
score, scores = scorer.compute_score()
|