Skip to content

Objectives

deepgboost.objective.regression.BaseObjective

Abstract base for all DeepGBoost objective functions.

Source code in src/deepgboost/objective/regression.py
 8
 9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
class BaseObjective:
    """Abstract base for all DeepGBoost objective functions."""

    def gradient(
        self,
        y: np.ndarray,
        F: np.ndarray,
    ) -> np.ndarray:
        """
        Compute pseudo-residuals (negative gradient of the loss).

        Parameters
        ----------
        y : np.ndarray of shape (n_samples,)
            True target values.
        F : np.ndarray of shape (n_samples,)
            Current ensemble prediction.

        Returns
        -------
        np.ndarray of shape (n_samples,)
        """
        raise NotImplementedError

    def prior(
        self,
        y: np.ndarray,
    ) -> float:
        """Optimal constant prediction (F_0 in the paper)."""
        raise NotImplementedError

    def hessian(
        self,
        y: np.ndarray,
        F: np.ndarray,
    ) -> np.ndarray:
        """
        Diagonal of the loss Hessian w.r.t. F (second derivatives).

        Returns ones by default, which is exact for MSE and a safe constant
        for MAE (non-differentiable).  Override for objectives where the
        Hessian varies with F (e.g. logistic).
        """
        return np.ones_like(F)

    def transform(
        self,
        raw: np.ndarray,
    ) -> np.ndarray:
        """Map raw model output to prediction space (identity for regression)."""
        return raw

gradient(y, F)

Compute pseudo-residuals (negative gradient of the loss).

Parameters:

Name Type Description Default
y np.ndarray of shape (n_samples,)

True target values.

required
F np.ndarray of shape (n_samples,)

Current ensemble prediction.

required

Returns:

Type Description
np.ndarray of shape (n_samples,)
Source code in src/deepgboost/objective/regression.py
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
def gradient(
    self,
    y: np.ndarray,
    F: np.ndarray,
) -> np.ndarray:
    """
    Compute pseudo-residuals (negative gradient of the loss).

    Parameters
    ----------
    y : np.ndarray of shape (n_samples,)
        True target values.
    F : np.ndarray of shape (n_samples,)
        Current ensemble prediction.

    Returns
    -------
    np.ndarray of shape (n_samples,)
    """
    raise NotImplementedError

hessian(y, F)

Diagonal of the loss Hessian w.r.t. F (second derivatives).

Returns ones by default, which is exact for MSE and a safe constant for MAE (non-differentiable). Override for objectives where the Hessian varies with F (e.g. logistic).

Source code in src/deepgboost/objective/regression.py
39
40
41
42
43
44
45
46
47
48
49
50
51
def hessian(
    self,
    y: np.ndarray,
    F: np.ndarray,
) -> np.ndarray:
    """
    Diagonal of the loss Hessian w.r.t. F (second derivatives).

    Returns ones by default, which is exact for MSE and a safe constant
    for MAE (non-differentiable).  Override for objectives where the
    Hessian varies with F (e.g. logistic).
    """
    return np.ones_like(F)

prior(y)

Optimal constant prediction (F_0 in the paper).

Source code in src/deepgboost/objective/regression.py
32
33
34
35
36
37
def prior(
    self,
    y: np.ndarray,
) -> float:
    """Optimal constant prediction (F_0 in the paper)."""
    raise NotImplementedError

transform(raw)

Map raw model output to prediction space (identity for regression).

Source code in src/deepgboost/objective/regression.py
53
54
55
56
57
58
def transform(
    self,
    raw: np.ndarray,
) -> np.ndarray:
    """Map raw model output to prediction space (identity for regression)."""
    return raw

deepgboost.objective.regression.RMSEObjective

Bases: BaseObjective

Root Mean Squared Error objective.

Loss: L(y, F) = (y - F)^2 / 2 Gradient: g = y - F Prior: mean(y)

Source code in src/deepgboost/objective/regression.py
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
class RMSEObjective(BaseObjective):
    """
    Root Mean Squared Error objective.

    Loss: L(y, F) = (y - F)^2 / 2
    Gradient: g = y - F
    Prior: mean(y)
    """

    def gradient(
        self,
        y: np.ndarray,
        F: np.ndarray,
    ) -> np.ndarray:
        """
        Compute pseudo-residuals for MSE loss.

        Parameters
        ----------
        y : np.ndarray of shape (n_samples,)
            True target values.
        F : np.ndarray of shape (n_samples,)
            Current ensemble predictions.

        Returns
        -------
        np.ndarray of shape (n_samples,)
        """
        return y - F

    def prior(self, y: np.ndarray) -> float:
        """Return mean of y as the optimal constant prediction."""
        return float(y.mean())

gradient(y, F)

Compute pseudo-residuals for MSE loss.

Parameters:

Name Type Description Default
y np.ndarray of shape (n_samples,)

True target values.

required
F np.ndarray of shape (n_samples,)

Current ensemble predictions.

required

Returns:

Type Description
np.ndarray of shape (n_samples,)
Source code in src/deepgboost/objective/regression.py
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
def gradient(
    self,
    y: np.ndarray,
    F: np.ndarray,
) -> np.ndarray:
    """
    Compute pseudo-residuals for MSE loss.

    Parameters
    ----------
    y : np.ndarray of shape (n_samples,)
        True target values.
    F : np.ndarray of shape (n_samples,)
        Current ensemble predictions.

    Returns
    -------
    np.ndarray of shape (n_samples,)
    """
    return y - F

prior(y)

Return mean of y as the optimal constant prediction.

Source code in src/deepgboost/objective/regression.py
91
92
93
def prior(self, y: np.ndarray) -> float:
    """Return mean of y as the optimal constant prediction."""
    return float(y.mean())

deepgboost.objective.regression.MAEObjective

Bases: BaseObjective

Mean Absolute Error objective.

Loss: L(y, F) = |y - F| Gradient: g = sign(y - F) Prior: median(y)

Source code in src/deepgboost/objective/regression.py
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
class MAEObjective(BaseObjective):
    """
    Mean Absolute Error objective.

    Loss: L(y, F) = |y - F|
    Gradient: g = sign(y - F)
    Prior: median(y)
    """

    def gradient(
        self,
        y: np.ndarray,
        F: np.ndarray,
    ) -> np.ndarray:
        """
        Compute pseudo-residuals for MAE loss.

        Parameters
        ----------
        y : np.ndarray of shape (n_samples,)
            True target values.
        F : np.ndarray of shape (n_samples,)
            Current ensemble predictions.

        Returns
        -------
        np.ndarray of shape (n_samples,)
        """
        return np.sign(y - F)

    def prior(self, y: np.ndarray) -> float:
        """Return median of y as the optimal constant prediction."""
        return float(np.median(y))

gradient(y, F)

Compute pseudo-residuals for MAE loss.

Parameters:

Name Type Description Default
y np.ndarray of shape (n_samples,)

True target values.

required
F np.ndarray of shape (n_samples,)

Current ensemble predictions.

required

Returns:

Type Description
np.ndarray of shape (n_samples,)
Source code in src/deepgboost/objective/regression.py
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
def gradient(
    self,
    y: np.ndarray,
    F: np.ndarray,
) -> np.ndarray:
    """
    Compute pseudo-residuals for MAE loss.

    Parameters
    ----------
    y : np.ndarray of shape (n_samples,)
        True target values.
    F : np.ndarray of shape (n_samples,)
        Current ensemble predictions.

    Returns
    -------
    np.ndarray of shape (n_samples,)
    """
    return np.sign(y - F)

prior(y)

Return median of y as the optimal constant prediction.

Source code in src/deepgboost/objective/regression.py
126
127
128
def prior(self, y: np.ndarray) -> float:
    """Return median of y as the optimal constant prediction."""
    return float(np.median(y))

deepgboost.objective.classification.LogisticObjective

Bases: BaseObjective

Binary logistic (log-loss) objective.

Training operates in log-odds space. Loss: L(y, F) = -[ylog(p) + (1-y)log(1-p)], p = sigmoid(F) Gradient: g = y - sigmoid(F) Prior: log(p_mean / (1 - p_mean)) (log-odds of the class rate)

Source code in src/deepgboost/objective/classification.py
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
class LogisticObjective(BaseObjective):
    """
    Binary logistic (log-loss) objective.

    Training operates in log-odds space.
    Loss: L(y, F) = -[y*log(p) + (1-y)*log(1-p)],  p = sigmoid(F)
    Gradient: g = y - sigmoid(F)
    Prior: log(p_mean / (1 - p_mean))  (log-odds of the class rate)
    """

    def gradient(
        self,
        y: np.ndarray,
        F: np.ndarray,
    ) -> np.ndarray:
        """
        Compute pseudo-residuals for binary logistic loss.

        Parameters
        ----------
        y : np.ndarray of shape (n_samples,)
            True binary targets in {0, 1}.
        F : np.ndarray of shape (n_samples,)
            Current raw ensemble predictions (log-odds).

        Returns
        -------
        np.ndarray of shape (n_samples,)
        """
        return y - sigmoid(F)

    def hessian(
        self,
        y: np.ndarray,
        F: np.ndarray,
    ) -> np.ndarray:
        """
        Diagonal of the loss Hessian: h_i = p_i * (1 - p_i), p_i = sigmoid(F_i).

        Parameters
        ----------
        y : np.ndarray of shape (n_samples,)
            True binary targets (unused; kept for API consistency).
        F : np.ndarray of shape (n_samples,)
            Current raw ensemble predictions (log-odds).

        Returns
        -------
        np.ndarray of shape (n_samples,)
        """
        p = sigmoid(F)
        return p * (1.0 - p)

    def prior(
        self,
        y: np.ndarray,
    ) -> float:
        """
        Compute log-odds of the positive-class rate as the initial prediction.

        Parameters
        ----------
        y : np.ndarray of shape (n_samples,)
            True binary targets in {0, 1}.

        Returns
        -------
        float
        """
        p = float(y.mean())
        p = np.clip(p, 1e-7, 1 - 1e-7)
        return float(np.log(p / (1.0 - p)))

    def transform(
        self,
        raw: np.ndarray,
    ) -> np.ndarray:
        """Map log-odds to probabilities."""
        return sigmoid(raw)

gradient(y, F)

Compute pseudo-residuals for binary logistic loss.

Parameters:

Name Type Description Default
y np.ndarray of shape (n_samples,)

True binary targets in {0, 1}.

required
F np.ndarray of shape (n_samples,)

Current raw ensemble predictions (log-odds).

required

Returns:

Type Description
np.ndarray of shape (n_samples,)
Source code in src/deepgboost/objective/classification.py
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
def gradient(
    self,
    y: np.ndarray,
    F: np.ndarray,
) -> np.ndarray:
    """
    Compute pseudo-residuals for binary logistic loss.

    Parameters
    ----------
    y : np.ndarray of shape (n_samples,)
        True binary targets in {0, 1}.
    F : np.ndarray of shape (n_samples,)
        Current raw ensemble predictions (log-odds).

    Returns
    -------
    np.ndarray of shape (n_samples,)
    """
    return y - sigmoid(F)

hessian(y, F)

Diagonal of the loss Hessian: h_i = p_i * (1 - p_i), p_i = sigmoid(F_i).

Parameters:

Name Type Description Default
y np.ndarray of shape (n_samples,)

True binary targets (unused; kept for API consistency).

required
F np.ndarray of shape (n_samples,)

Current raw ensemble predictions (log-odds).

required

Returns:

Type Description
np.ndarray of shape (n_samples,)
Source code in src/deepgboost/objective/classification.py
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
def hessian(
    self,
    y: np.ndarray,
    F: np.ndarray,
) -> np.ndarray:
    """
    Diagonal of the loss Hessian: h_i = p_i * (1 - p_i), p_i = sigmoid(F_i).

    Parameters
    ----------
    y : np.ndarray of shape (n_samples,)
        True binary targets (unused; kept for API consistency).
    F : np.ndarray of shape (n_samples,)
        Current raw ensemble predictions (log-odds).

    Returns
    -------
    np.ndarray of shape (n_samples,)
    """
    p = sigmoid(F)
    return p * (1.0 - p)

prior(y)

Compute log-odds of the positive-class rate as the initial prediction.

Parameters:

Name Type Description Default
y np.ndarray of shape (n_samples,)

True binary targets in {0, 1}.

required

Returns:

Type Description
float
Source code in src/deepgboost/objective/classification.py
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
def prior(
    self,
    y: np.ndarray,
) -> float:
    """
    Compute log-odds of the positive-class rate as the initial prediction.

    Parameters
    ----------
    y : np.ndarray of shape (n_samples,)
        True binary targets in {0, 1}.

    Returns
    -------
    float
    """
    p = float(y.mean())
    p = np.clip(p, 1e-7, 1 - 1e-7)
    return float(np.log(p / (1.0 - p)))

transform(raw)

Map log-odds to probabilities.

Source code in src/deepgboost/objective/classification.py
84
85
86
87
88
89
def transform(
    self,
    raw: np.ndarray,
) -> np.ndarray:
    """Map log-odds to probabilities."""
    return sigmoid(raw)

deepgboost.objective.classification.SoftmaxObjective

Bases: BaseObjective

Multi-class softmax objective (used internally per class in OvR).

Gradient: g = y_k - softmax(F)_k Prior: log-odds for each class.

Note: DeepGBoostClassifier uses one-vs-rest (OvR) binary classifiers with LogisticObjective. SoftmaxObjective is provided for direct use with multi-output targets.

Source code in src/deepgboost/objective/classification.py
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
class SoftmaxObjective(BaseObjective):
    """
    Multi-class softmax objective (used internally per class in OvR).

    Gradient: g = y_k - softmax(F)_k
    Prior: log-odds for each class.

    Note: DeepGBoostClassifier uses one-vs-rest (OvR) binary classifiers
    with LogisticObjective.  SoftmaxObjective is provided for direct use
    with multi-output targets.
    """

    def gradient(
        self,
        y: np.ndarray,
        F: np.ndarray,
    ) -> np.ndarray:
        """
        Compute per-class pseudo-residuals.

        Parameters
        ----------
        y : np.ndarray of shape (n_samples, n_classes)
            One-hot encoded targets.
        F : np.ndarray of shape (n_samples, n_classes)
            Raw log-scores.

        Returns
        -------
        np.ndarray of shape (n_samples, n_classes)
        """
        return y - softmax(F, axis=1)

    def hessian(
        self,
        y: np.ndarray,
        F: np.ndarray,
    ) -> np.ndarray:
        """
        Diagonal of the per-class Hessian: p_k * (1 - p_k).

        Parameters
        ----------
        y : np.ndarray of shape (n_samples, n_classes)
            One-hot encoded targets (unused; kept for API consistency).
        F : np.ndarray of shape (n_samples, n_classes)
            Raw log-scores.

        Returns
        -------
        np.ndarray of shape (n_samples, n_classes)
        """
        p = softmax(F, axis=1)
        return p * (1.0 - p)

    def prior(
        self,
        y: np.ndarray,
    ) -> np.ndarray:
        """Log-odds prior for each class (shape n_classes)."""
        if y.ndim == 1:
            raise ValueError(
                "SoftmaxObjective requires one-hot encoded y (2-D).",
            )
        p = y.mean(axis=0)
        p = np.clip(p, 1e-7, 1 - 1e-7)
        return np.log(p / (1.0 - p))

    def transform(
        self,
        raw: np.ndarray,
    ) -> np.ndarray:
        """Map raw log-scores to class probabilities via softmax."""
        return softmax(raw, axis=1)

gradient(y, F)

Compute per-class pseudo-residuals.

Parameters:

Name Type Description Default
y np.ndarray of shape (n_samples, n_classes)

One-hot encoded targets.

required
F np.ndarray of shape (n_samples, n_classes)

Raw log-scores.

required

Returns:

Type Description
np.ndarray of shape (n_samples, n_classes)
Source code in src/deepgboost/objective/classification.py
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
def gradient(
    self,
    y: np.ndarray,
    F: np.ndarray,
) -> np.ndarray:
    """
    Compute per-class pseudo-residuals.

    Parameters
    ----------
    y : np.ndarray of shape (n_samples, n_classes)
        One-hot encoded targets.
    F : np.ndarray of shape (n_samples, n_classes)
        Raw log-scores.

    Returns
    -------
    np.ndarray of shape (n_samples, n_classes)
    """
    return y - softmax(F, axis=1)

hessian(y, F)

Diagonal of the per-class Hessian: p_k * (1 - p_k).

Parameters:

Name Type Description Default
y np.ndarray of shape (n_samples, n_classes)

One-hot encoded targets (unused; kept for API consistency).

required
F np.ndarray of shape (n_samples, n_classes)

Raw log-scores.

required

Returns:

Type Description
np.ndarray of shape (n_samples, n_classes)
Source code in src/deepgboost/objective/classification.py
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
def hessian(
    self,
    y: np.ndarray,
    F: np.ndarray,
) -> np.ndarray:
    """
    Diagonal of the per-class Hessian: p_k * (1 - p_k).

    Parameters
    ----------
    y : np.ndarray of shape (n_samples, n_classes)
        One-hot encoded targets (unused; kept for API consistency).
    F : np.ndarray of shape (n_samples, n_classes)
        Raw log-scores.

    Returns
    -------
    np.ndarray of shape (n_samples, n_classes)
    """
    p = softmax(F, axis=1)
    return p * (1.0 - p)

prior(y)

Log-odds prior for each class (shape n_classes).

Source code in src/deepgboost/objective/classification.py
147
148
149
150
151
152
153
154
155
156
157
158
def prior(
    self,
    y: np.ndarray,
) -> np.ndarray:
    """Log-odds prior for each class (shape n_classes)."""
    if y.ndim == 1:
        raise ValueError(
            "SoftmaxObjective requires one-hot encoded y (2-D).",
        )
    p = y.mean(axis=0)
    p = np.clip(p, 1e-7, 1 - 1e-7)
    return np.log(p / (1.0 - p))

transform(raw)

Map raw log-scores to class probabilities via softmax.

Source code in src/deepgboost/objective/classification.py
160
161
162
163
164
165
def transform(
    self,
    raw: np.ndarray,
) -> np.ndarray:
    """Map raw log-scores to class probabilities via softmax."""
    return softmax(raw, axis=1)