src.fairreckitlib.evaluation.metrics.rexmex.rexmex_rating_metric

This module contains the rexmex rating metric and creation functions.

Classes:

RexmexRatingMetric: rating metric implementation for rexmex.

Functions:

create_mape: create the MAPE rating metric (factory creation compatible).
create_mse: create the MSE rating metric (factory creation compatible).

This program has been developed by students from the bachelor Computer Science at Utrecht University within the Software Project course. © Copyright Utrecht University (Department of Information and Computing Sciences)

 1"""This module contains the rexmex rating metric and creation functions.
 2
 3Classes:
 4
 5    RexmexRatingMetric: rating metric implementation for rexmex.
 6
 7Functions:
 8
 9    create_mape: create the MAPE rating metric (factory creation compatible).
10    create_mse: create the MSE rating metric (factory creation compatible).
11
12This program has been developed by students from the bachelor Computer Science at
13Utrecht University within the Software Project course.
14© Copyright Utrecht University (Department of Information and Computing Sciences)
15"""
16
17from typing import Any, Dict
18
19import pandas as pd
20from rexmex.metrics import mean_absolute_percentage_error, mean_squared_error
21
22from ...evaluation_sets import EvaluationSets
23from ..metric_base import ColumnMetric
24
25
26class RexmexRatingMetric(ColumnMetric):
27    """Rating metric implementation for the Rexmex framework."""
28
29    def on_evaluate(self, eval_sets: EvaluationSets) -> float:
30        """Evaluate the sets for the performance of the metric.
31
32        Args:
33            eval_sets: the sets to use for computing the performance of the metric.
34
35        Raises:
36            ArithmeticError: when the merged test and rating set does not contain truth values.
37
38        Returns:
39            the evaluated performance.
40        """
41        rexmex_ratings = eval_sets.ratings.drop('rating', axis=1)
42        score_column = 'score' if 'score' in rexmex_ratings else 'prediction'
43        scores = pd.merge(eval_sets.test, rexmex_ratings, how='left', on=['user', 'item'])
44        scores.dropna(subset=[score_column], axis=0, inplace=True)
45        try:
46            return self.eval_func(scores['rating'], scores[score_column])
47        except ValueError as err:
48            raise ArithmeticError from err
49
50
51def create_mape(name: str, params: Dict[str, Any], **_) -> RexmexRatingMetric:
52    """Create the MAPE rating metric.
53
54    Args:
55        name: the name of the metric.
56        params: there are no parameters for this metric.
57
58    Returns:
59        the RexmexRatingMetric wrapper of MAPE.
60    """
61    return RexmexRatingMetric(name, params, mean_absolute_percentage_error)
62
63
64def create_mse(name: str, params: Dict[str, Any], **_) -> RexmexRatingMetric:
65    """Create the MSE rating metric.
66
67    Args:
68        name: the name of the metric.
69        params: there are no parameters for this metric.
70
71    Returns:
72        the RexmexRatingMetric wrapper of MSE.
73    """
74    return RexmexRatingMetric(name, params, mean_squared_error)
class RexmexRatingMetric(src.fairreckitlib.evaluation.metrics.metric_base.ColumnMetric):
27class RexmexRatingMetric(ColumnMetric):
28    """Rating metric implementation for the Rexmex framework."""
29
30    def on_evaluate(self, eval_sets: EvaluationSets) -> float:
31        """Evaluate the sets for the performance of the metric.
32
33        Args:
34            eval_sets: the sets to use for computing the performance of the metric.
35
36        Raises:
37            ArithmeticError: when the merged test and rating set does not contain truth values.
38
39        Returns:
40            the evaluated performance.
41        """
42        rexmex_ratings = eval_sets.ratings.drop('rating', axis=1)
43        score_column = 'score' if 'score' in rexmex_ratings else 'prediction'
44        scores = pd.merge(eval_sets.test, rexmex_ratings, how='left', on=['user', 'item'])
45        scores.dropna(subset=[score_column], axis=0, inplace=True)
46        try:
47            return self.eval_func(scores['rating'], scores[score_column])
48        except ValueError as err:
49            raise ArithmeticError from err

Rating metric implementation for the Rexmex framework.

def on_evaluate( self, eval_sets: src.fairreckitlib.evaluation.evaluation_sets.EvaluationSets) -> float:
30    def on_evaluate(self, eval_sets: EvaluationSets) -> float:
31        """Evaluate the sets for the performance of the metric.
32
33        Args:
34            eval_sets: the sets to use for computing the performance of the metric.
35
36        Raises:
37            ArithmeticError: when the merged test and rating set does not contain truth values.
38
39        Returns:
40            the evaluated performance.
41        """
42        rexmex_ratings = eval_sets.ratings.drop('rating', axis=1)
43        score_column = 'score' if 'score' in rexmex_ratings else 'prediction'
44        scores = pd.merge(eval_sets.test, rexmex_ratings, how='left', on=['user', 'item'])
45        scores.dropna(subset=[score_column], axis=0, inplace=True)
46        try:
47            return self.eval_func(scores['rating'], scores[score_column])
48        except ValueError as err:
49            raise ArithmeticError from err

Evaluate the sets for the performance of the metric.

Args: eval_sets: the sets to use for computing the performance of the metric.

Raises: ArithmeticError: when the merged test and rating set does not contain truth values.

Returns: the evaluated performance.

def create_mape( name: str, params: Dict[str, Any], **_) -> src.fairreckitlib.evaluation.metrics.rexmex.rexmex_rating_metric.RexmexRatingMetric:
52def create_mape(name: str, params: Dict[str, Any], **_) -> RexmexRatingMetric:
53    """Create the MAPE rating metric.
54
55    Args:
56        name: the name of the metric.
57        params: there are no parameters for this metric.
58
59    Returns:
60        the RexmexRatingMetric wrapper of MAPE.
61    """
62    return RexmexRatingMetric(name, params, mean_absolute_percentage_error)

Create the MAPE rating metric.

Args: name: the name of the metric. params: there are no parameters for this metric.

Returns: the RexmexRatingMetric wrapper of MAPE.

def create_mse( name: str, params: Dict[str, Any], **_) -> src.fairreckitlib.evaluation.metrics.rexmex.rexmex_rating_metric.RexmexRatingMetric:
65def create_mse(name: str, params: Dict[str, Any], **_) -> RexmexRatingMetric:
66    """Create the MSE rating metric.
67
68    Args:
69        name: the name of the metric.
70        params: there are no parameters for this metric.
71
72    Returns:
73        the RexmexRatingMetric wrapper of MSE.
74    """
75    return RexmexRatingMetric(name, params, mean_squared_error)

Create the MSE rating metric.

Args: name: the name of the metric. params: there are no parameters for this metric.

Returns: the RexmexRatingMetric wrapper of MSE.