Skip to content

Metrics

siapy.optimizers.metrics

ClassificationMetrics

Bases: NamedTuple

accuracy instance-attribute

accuracy: float

precision instance-attribute

precision: float

recall instance-attribute

recall: float

f1 instance-attribute

f1: float

to_dict

to_dict() -> dict[str, float]
Source code in siapy/optimizers/metrics.py
59
60
def to_dict(self) -> dict[str, float]:
    return self._asdict()

RegressionMetrics

Bases: NamedTuple

mae instance-attribute

mae: float

mse instance-attribute

mse: float

rmse instance-attribute

rmse: float

r2 instance-attribute

r2: float

pe instance-attribute

pe: float

maxe instance-attribute

maxe: float

nrmse_mean instance-attribute

nrmse_mean: float

nrmse_range instance-attribute

nrmse_range: float

to_dict

to_dict() -> dict[str, float]
Source code in siapy/optimizers/metrics.py
85
86
def to_dict(self) -> dict[str, float]:
    return self._asdict()

normalized_rmse

normalized_rmse(
    y_true: NDArray[floating[Any]],
    y_pred: NDArray[floating[Any]],
    normalize_by: Literal["range", "mean"] = "range",
) -> float
Source code in siapy/optimizers/metrics.py
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
def normalized_rmse(
    y_true: NDArray[np.floating[Any]],
    y_pred: NDArray[np.floating[Any]],
    normalize_by: Literal["range", "mean"] = "range",
) -> float:
    rmse = root_mean_squared_error(y_true, y_pred)
    if normalize_by == "range":
        normalizer = np.max(y_true) - np.min(y_true)
    elif normalize_by == "mean":
        normalizer = np.mean(y_true)
    else:
        raise InvalidInputError(
            input_value=normalize_by,
            message="Unknown normalizer. Possible values are: 'range' or 'mean'.",
        )
    return float(rmse / normalizer)

calculate_classification_metrics

calculate_classification_metrics(
    y_true: NDArray[floating[Any]],
    y_pred: NDArray[floating[Any]],
    average: Literal[
        "micro", "macro", "samples", "weighted", "binary"
    ]
    | None = "weighted",
) -> ClassificationMetrics
Source code in siapy/optimizers/metrics.py
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
def calculate_classification_metrics(
    y_true: NDArray[np.floating[Any]],
    y_pred: NDArray[np.floating[Any]],
    average: Literal["micro", "macro", "samples", "weighted", "binary"] | None = "weighted",
) -> ClassificationMetrics:
    accuracy = float(accuracy_score(y_true, y_pred))
    precision = float(precision_score(y_true, y_pred, average=average))
    recall = float(recall_score(y_true, y_pred, average=average))
    f1 = float(f1_score(y_true, y_pred, average=average))
    return ClassificationMetrics(
        accuracy=accuracy,
        precision=precision,
        recall=recall,
        f1=f1,
    )

calculate_regression_metrics

calculate_regression_metrics(
    y_true: NDArray[floating[Any]],
    y_pred: NDArray[floating[Any]],
) -> RegressionMetrics
Source code in siapy/optimizers/metrics.py
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
def calculate_regression_metrics(
    y_true: NDArray[np.floating[Any]],
    y_pred: NDArray[np.floating[Any]],
) -> RegressionMetrics:
    mae = float(mean_absolute_error(y_true, y_pred))
    mse = float(mean_squared_error(y_true, y_pred))
    rmse = float(root_mean_squared_error(y_true, y_pred))
    r2 = float(r2_score(y_true, y_pred))
    pe = float(mean_absolute_percentage_error(y_true, y_pred))
    maxe = float(max_error(y_true, y_pred))
    nrmse_mean = float(normalized_rmse(y_true, y_pred, normalize_by="mean"))
    nrmse_range = float(normalized_rmse(y_true, y_pred, normalize_by="range"))
    return RegressionMetrics(
        mae=mae,
        mse=mse,
        rmse=rmse,
        r2=r2,
        pe=pe,
        maxe=maxe,
        nrmse_mean=nrmse_mean,
        nrmse_range=nrmse_range,
    )