Skip to content

Evaluators

siapy.utils.evaluators

ScorerFuncType module-attribute

ScorerFuncType = Callable[
    [BaseEstimator, ArrayLike2dType, ArrayLike1dType], float
]

cross_validation

cross_validation(
    model: BaseEstimator,
    X: ArrayLike2dType,
    y: ArrayLike1dType,
    X_val: ArrayLike2dType | None = None,
    y_val: ArrayLike1dType | None = None,
    *,
    groups: ArrayLike1dType | None = None,
    scoring: str | ScorerFuncType | None = None,
    cv: int | BaseCrossValidator | Iterable | None = None,
    n_jobs: int | None = 1,
    verbose: int = 0,
    params: dict[str, Any] | None = None,
    pre_dispatch: int | str = 1,
    error_score: Literal["raise"] | int = 0,
) -> float
Source code in siapy/utils/evaluators.py
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
def cross_validation(
    model: BaseEstimator,
    X: ArrayLike2dType,
    y: ArrayLike1dType,
    X_val: Annotated[ArrayLike2dType | None, "Not used, only for compatibility"] = None,
    y_val: Annotated[ArrayLike1dType | None, "Not used, only for compatibility"] = None,
    *,
    groups: ArrayLike1dType | None = None,
    scoring: str | ScorerFuncType | None = None,
    cv: int | BaseCrossValidator | Iterable | None = None,
    n_jobs: int | None = 1,
    verbose: int = 0,
    params: dict[str, Any] | None = None,
    pre_dispatch: int | str = 1,
    error_score: Literal["raise"] | int = 0,
) -> float:
    if X_val is not None or y_val is not None:
        logger.info("Specification of X_val and y_val is redundant for cross_validation.These parameters are ignored.")
    check_model_prediction_methods(model)
    score = cross_val_score(
        estimator=model,
        X=X,  # type: ignore
        y=y,
        groups=groups,
        scoring=scoring,
        cv=cv,
        n_jobs=n_jobs,
        verbose=verbose,
        params=params,
        pre_dispatch=pre_dispatch,
        error_score=error_score,
    )
    return score.mean()

hold_out_validation

hold_out_validation(
    model: BaseEstimator,
    X: ArrayLike2dType,
    y: ArrayLike1dType,
    X_val: ArrayLike2dType | None = None,
    y_val: ArrayLike1dType | None = None,
    *,
    scoring: str | ScorerFuncType | None = None,
    test_size: float | None = 0.2,
    random_state: int | None = None,
    shuffle: bool = True,
    stratify: ndarray | None = None,
) -> float
Source code in siapy/utils/evaluators.py
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
def hold_out_validation(
    model: BaseEstimator,
    X: ArrayLike2dType,
    y: ArrayLike1dType,
    X_val: ArrayLike2dType | None = None,
    y_val: ArrayLike1dType | None = None,
    *,
    scoring: str | ScorerFuncType | None = None,
    test_size: float | None = 0.2,
    random_state: int | None = None,
    shuffle: bool = True,
    stratify: np.ndarray | None = None,
) -> float:
    if X_val is not None and y_val is not None:
        x_train, x_test, y_train, y_test = X, X_val, y, y_val
    elif X_val is not None or y_val is not None:
        raise InvalidInputError(
            input_value={"X_val": X_val, "y_val": y_val},
            message="To manually define validation set, both X_val and y_val must be specified.",
        )
    else:
        x_train, x_test, y_train, y_test = train_test_split(
            X,
            y,
            test_size=test_size,
            random_state=random_state,
            shuffle=shuffle,
            stratify=stratify,
        )
    check_model_prediction_methods(model)
    model.fit(x_train, y_train)  # type: ignore

    if scoring:
        if isinstance(scoring, str):
            scoring_func = get_scorer(scoring)
        else:
            scoring_func = scoring
        score = scoring_func(model, x_test, y_test)
    else:
        score = model.score(x_test, y_test)  # type: ignore
    return score