Skip to content

Classification Evaluator

The ClassificationEvaluator class is used to evaluate the performance of a classification model.

Code Documentation

Bases: Evaluator

Evaluate classification models.

Source code in evaluations/classification.py
 4
 5
 6
 7
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
class ClassificationEvaluator(Evaluator):
    """
    Evaluate classification models.
    """

    def compute_confusion_matrix(self, result: dict, class_names: list) -> dict:
        """
        Compute a confusion matrix for a classification model.

        Args:
            result (dict): A dictionary containing the ground truth and predictions.
            class_names (list): A list of class names.

        """

        confusion_data = {}

        # matrix is
        # [[tp, fp]]

        for i, _ in enumerate(class_names):
            for j, _ in enumerate(class_names):
                confusion_data[(i, j)] = 0

        if self.model_type == "multiclass":
            for i, _ in enumerate(result[0]):
                if result[0][i] in result[1].predicted_class_names:
                    r0index = class_names.index(result[0][i])
                    r1index = class_names.index(result[0][i])
                    confusion_data[(r0index, r1index)] += 1
                else:
                    r0index = class_names.index(result[0][i])
                    r1index = class_names.index(result[1].predicted_class_names[i])
                    confusion_data[(r0index, r1index)] += 1
        else:
            is_match = result[0][0] == result[1].predicted_class_names[0]

            if is_match:
                r0index = class_names.index(result[0][0])
                confusion_data[(r0index, r0index)] += 1
            else:
                r0index = class_names.index(result[0][0])
                r1index = class_names.index(result[1].predicted_class_names[0])
                confusion_data[(r0index, r1index)] += 1

        return confusion_data

compute_confusion_matrix(result, class_names)

Compute a confusion matrix for a classification model.

Parameters:

Name Type Description Default
result dict

A dictionary containing the ground truth and predictions.

required
class_names list

A list of class names.

required
Source code in evaluations/classification.py
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
def compute_confusion_matrix(self, result: dict, class_names: list) -> dict:
    """
    Compute a confusion matrix for a classification model.

    Args:
        result (dict): A dictionary containing the ground truth and predictions.
        class_names (list): A list of class names.

    """

    confusion_data = {}

    # matrix is
    # [[tp, fp]]

    for i, _ in enumerate(class_names):
        for j, _ in enumerate(class_names):
            confusion_data[(i, j)] = 0

    if self.model_type == "multiclass":
        for i, _ in enumerate(result[0]):
            if result[0][i] in result[1].predicted_class_names:
                r0index = class_names.index(result[0][i])
                r1index = class_names.index(result[0][i])
                confusion_data[(r0index, r1index)] += 1
            else:
                r0index = class_names.index(result[0][i])
                r1index = class_names.index(result[1].predicted_class_names[i])
                confusion_data[(r0index, r1index)] += 1
    else:
        is_match = result[0][0] == result[1].predicted_class_names[0]

        if is_match:
            r0index = class_names.index(result[0][0])
            confusion_data[(r0index, r0index)] += 1
        else:
            r0index = class_names.index(result[0][0])
            r1index = class_names.index(result[1].predicted_class_names[0])
            confusion_data[(r0index, r1index)] += 1

    return confusion_data