diff --git a/pydeepflow/metrics.py b/pydeepflow/metrics.py index 92aff3e..85aa309 100644 --- a/pydeepflow/metrics.py +++ b/pydeepflow/metrics.py @@ -157,12 +157,12 @@ def r2_score(y_true, y_pred): ss_tot = np.sum((y_true - np.mean(y_true)) ** 2) return 1 - (ss_res / ss_tot) -def rmse(y_true, y_pred): +def root_mean_squared_error(y_true, y_pred): """ - Compute Root Mean Squared Error between true and predicted values. - + Calculates the Root Mean Squared Error (RMSE). + RMSE = sqrt((1/n) * Σ(y_true - y_pred)^2) - + Parameters ---------- y_true : array-like @@ -175,7 +175,5 @@ def rmse(y_true, y_pred): float The RMSE score. """ - y_true = np.array(y_true) - y_pred = np.array(y_pred) - return np.sqrt(np.mean((y_true - y_pred) ** 2)) - + return ((np.array(y_true) - np.array(y_pred)) ** 2).mean() ** 0.5 + diff --git a/pydeepflow/model.py b/pydeepflow/model.py index 8c676e1..91815fe 100644 --- a/pydeepflow/model.py +++ b/pydeepflow/model.py @@ -4,7 +4,7 @@ import matplotlib.pyplot as plt from pydeepflow.activations import activation, activation_derivative from pydeepflow.losses import get_loss_function, get_loss_derivative -from pydeepflow.metrics import precision_score, recall_score, f1_score, confusion_matrix,mean_absolute_error, mean_squared_error, r2_score +from pydeepflow.metrics import precision_score, recall_score, f1_score, confusion_matrix,mean_absolute_error, mean_squared_error, r2_score, root_mean_squared_error from pydeepflow.device import Device from pydeepflow.regularization import Regularization from pydeepflow.checkpoints import ModelCheckpoint @@ -817,7 +817,7 @@ def evaluate(self, X, y, metrics=['loss', 'accuracy']): y (np.ndarray): The true labels for evaluation. metrics (list, optional): A list of metrics to calculate. Defaults to ['loss', 'accuracy']. - Available metrics: 'loss', 'accuracy', 'precision', 'recall', 'f1_score', 'confusion_matrix'. + Available metrics: 'loss', 'accuracy', 'precision', 'recall', 'f1_score', 'confusion_matrix', 'root_mean_squared_error'. Returns: dict: A dictionary where keys are the metric names and values are the computed scores. @@ -856,6 +856,9 @@ def evaluate(self, X, y, metrics=['loss', 'accuracy']): if 'r2_score' in metrics: results['r2_score'] = r2_score(y, predictions) + if 'root_mean_squared_error' in metrics: + results['root_mean_squared_error'] = root_mean_squared_error(y, predictions) + return results @@ -1740,6 +1743,9 @@ def evaluate(self, X, y, metrics=['loss', 'accuracy']): if 'r2_score' in metrics: results['r2_score'] = r2_score(y, predictions) + + if 'root_mean_squared_error' in metrics: + results['root_mean_squared_error'] = root_mean_squared_error(y, predictions) return results # Removed confusion_matrix for simplification/dependency reasons diff --git a/tests/test_metrics.py b/tests/test_metrics.py index 1b08529..962efb8 100644 --- a/tests/test_metrics.py +++ b/tests/test_metrics.py @@ -2,7 +2,8 @@ import numpy as np from pydeepflow.metrics import ( precision_score, recall_score, f1_score, confusion_matrix, - mean_absolute_error, mean_squared_error, r2_score, rmse + mean_absolute_error, mean_squared_error, r2_score,root_mean_squared_error + ) class TestMetrics(unittest.TestCase): @@ -54,11 +55,23 @@ def test_r2_score(self): # R^2 = 1 - (1.5 / 29.1875) = 1 - 0.051389... approx 0.9486 self.assertAlmostEqual(r2_score(self.y_true_reg, self.y_pred_reg), 0.94861051, places=5) - def test_rmse(self): - y_true = np.array([1, 2, 3]) - y_pred = np.array([2, 2, 4]) - expected = np.sqrt(((1-2)**2 + (2-2)**2 + (3-4)**2) / 3) - self.assertAlmostEqual(rmse(y_true, y_pred), expected) + + def test_root_mean_squared_error(self): + # Step 1: Differences + # (3 - 2.5) = 0.5 + # (-0.5 - 0.0) = -0.5 + # (2 - 2) = 0 + # (7 - 8) = -1 + # + # Step 2: Squared differences + # [0.5², (-0.5)², 0², (-1)²] = [0.25, 0.25, 0, 1] + # + # Step 3: Mean Squared Error (MSE) + # (0.25 + 0.25 + 0 + 1) / 4 = 0.375 + # + # Step 4: Root Mean Squared Error (RMSE) + # sqrt(0.375) = 0.6123724356957945 + self.assertAlmostEqual(root_mean_squared_error(self.y_true_reg, self.y_pred_reg), 0.6123724356957945, places=6) if __name__ == '__main__': unittest.main() \ No newline at end of file