Switch to unified view

a b/utils/pycocoevalcap/cider/cider.py
1
# Filename: cider.py
2
#
3
# Description: Describes the class to compute the CIDEr (Consensus-Based Image Description Evaluation) Metric 
4
#               by Vedantam, Zitnick, and Parikh (http://arxiv.org/abs/1411.5726)
5
#
6
# Creation Date: Sun Feb  8 14:16:54 2015
7
#
8
# Authors: Ramakrishna Vedantam <vrama91@vt.edu> and Tsung-Yi Lin <tl483@cornell.edu>
9
10
11
from .cider_scorer import CiderScorer
12
import pdb
13
14
class Cider:
15
    """
16
    Main Class to compute the CIDEr metric 
17
18
    """
19
    def __init__(self, test=None, refs=None, n=4, sigma=6.0):
20
        # set cider to sum over 1 to 4-grams
21
        self._n = n
22
        # set the standard deviation parameter for gaussian penalty
23
        self._sigma = sigma
24
25
    def compute_score(self, gts, res):
26
        """
27
        Main function to compute CIDEr score
28
        :param  hypo_for_image (dict) : dictionary with key <image> and value <tokenized hypothesis / candidate sentence>
29
                ref_for_image (dict)  : dictionary with key <image> and value <tokenized reference sentence>
30
        :return: cider (float) : computed CIDEr score for the corpus 
31
        """
32
33
        assert(gts.keys() == res.keys())
34
        imgIds = gts.keys()
35
36
        cider_scorer = CiderScorer(n=self._n, sigma=self._sigma)
37
38
        for id in imgIds:
39
            hypo = res[id]
40
            ref = gts[id]
41
42
            # Sanity check.
43
            assert(type(hypo) is list)
44
            assert(len(hypo) == 1)
45
            assert(type(ref) is list)
46
            assert(len(ref) > 0)
47
48
            cider_scorer += (hypo[0], ref)
49
50
        (score, scores) = cider_scorer.compute_score()
51
52
        return score, scores
53
54
    def method(self):
55
        return "CIDEr"