diff --git a/Class_Mod/Regression_metrics.py b/Class_Mod/Regression_metrics.py
index 748ad34736c1f0d82f89564abe07f8f91597aa01..620041c6f12fd154d31567e3e8259811715dfe31 100644
--- a/Class_Mod/Regression_metrics.py
+++ b/Class_Mod/Regression_metrics.py
@@ -1,108 +1,36 @@
 from Packages import *
 
-def metrics(train, cv=None, test = None):
-
-    C = pd.DataFrame()
-    CV = pd.DataFrame()
-    T = pd.DataFrame()
-
-    if train is not None and cv is not None and test is not None:
-
-        C["r"] = [np.corrcoef(train[0], train[1])[0, 1]]
-        C["r2"] = [r2_score(train[0], train[1])]
-        C["rmse"] = [np.sqrt(mean_squared_error(train[0], train[1]))]
-        C["mae"] = [mean_absolute_error(train[0], train[1])]
-        C.index = ['perf']
-        METRICS = C
-
-        CV["r"] = [np.corrcoef(cv[0], cv[1])[0, 1]]
-        CV["r2"] = [r2_score(cv[0], cv[1])]
-        CV["rmse"] = [np.sqrt(mean_squared_error(cv[0], cv[1]))]
-        CV["mae"] = [mean_absolute_error(cv[0], cv[1])]
-        CV.index = ['perf']
-
-        T["r"] = [np.corrcoef(test[0], test[1])[0, 1]]
-        T["r2"] = [r2_score(test[0], test[1])]
-        T["rmse"] = [np.sqrt(mean_squared_error(test[0], test[1]))]
-        T["mae"] = [mean_absolute_error(test[0], test[1])]
-        T.index = ['perf']
-
-        METRICS = pd.concat([C, CV, T], axis=1)
-
-    elif train is not None and cv is not None and test is None:
-
-        C["r"] = [np.corrcoef(train[0], train[1])[0, 1]]
-        C["r2"] = [r2_score(train[0], train[1])]
-        C["rmse"] = [np.sqrt(mean_squared_error(train[0], train[1]))]
-        C["mae"] = [mean_absolute_error(train[0], train[1])]
-        C.index = ['perf']
-
-        CV["r"] = [np.corrcoef(cv[0], cv[1])[0, 1]]
-        CV["r2"] = [r2_score(cv[0], cv[1])]
-        CV["rmse"] = [np.sqrt(mean_squared_error(cv[0], cv[1]))]
-        CV["mae"] = [mean_absolute_error(cv[0], cv[1])]
-        CV.index = ['perf']
-
-        METRICS = pd.concat([C, CV], axis=1)
-
-    elif train is not None and cv is None and test is not None:
-
-        C["r"] = [np.corrcoef(train[0], train[1])[0, 1]]
-        C["r2"] = [r2_score(train[0], train[1])]
-        C["rmse"] = [np.sqrt(mean_squared_error(train[0], train[1]))]
-        C["mae"] = [mean_absolute_error(train[0], train[1])]
-        C.index = ['perf']
-
-        T["r"] = [np.corrcoef(test[0], test[1])[0, 1]]
-        T["r2"] = [r2_score(test[0], test[1])]
-        T["rmse"] = [np.sqrt(mean_squared_error(test[0], test[1]))]
-        T["mae"] = [mean_absolute_error(test[0], test[1])]
-        T.index = ['perf']
-        METRICS = pd.concat([C, T], axis=1)
-
-    elif train is None and cv is not None and test is not None:
-
-        CV["r"] = [np.corrcoef(cv[0], cv[1])[0, 1]]
-        CV["r2"] = [r2_score(cv[0], cv[1])]
-        CV["rmse"] = [np.sqrt(mean_squared_error(cv[0], cv[1]))]
-        CV["mae"] = [mean_absolute_error(cv[0], cv[1])]
-        CV.index = ['perf']
-
-        T["r"] = [np.corrcoef(test[0], test[1])[0, 1]]
-        T["r2"] = [r2_score(test[0], test[1])]
-        T["rmse"] = [np.sqrt(mean_squared_error(test[0], test[1]))]
-        T["mae"] = [mean_absolute_error(test[0], test[1])]
-        T.index = ['perf']
-
-        METRICS = pd.concat([CV, T], axis=1)
-
-    elif train is not None and cv is None and test is None:
-        C["r"] = [np.corrcoef(train[0], train[1])[0, 1]]
-        C["r2"] = [r2_score(train[0], train[1])]
-        C["rmse"] = [np.sqrt(mean_squared_error(train[0], train[1]))]
-        C["mae"] = [mean_absolute_error(train[0], train[1])]
-        C.index = ['perf']
-        METRICS = C
-
-    if train is None and cv is not None and test is None:
-
-        CV["r"] = [np.corrcoef(cv[0], cv[1])[0, 1]]
-        CV["r2"] = [r2_score(cv[0], cv[1])]
-        CV["rmse"] = [np.sqrt(mean_squared_error(cv[0], cv[1]))]
-        CV["mae"] = [mean_absolute_error(cv[0], cv[1])]
-        CV.index = ['perf']
-
-        METRICS = CV
-
-    if train is None and cv is None and test is not None:
-
-        T["r"] = [np.corrcoef(test[0], test[1])[0, 1]]
-        T["r2"] = [r2_score(test[0], test[1])]
-        T["rmse"] = [np.sqrt(mean_squared_error(test[0], test[1]))]
-        T["mae"] = [mean_absolute_error(test[0], test[1])]
-        T.index = ['perf']
-
-        METRICS = T
-
-    return METRICS
-
+class metrics:
+    def __init__(self, meas, pred):
+
+        if isinstance(meas, pd.DataFrame):
+            self.meas = meas.to_numpy()
+        else :
+            self.meas = meas.ravel()
+            
+        self.pred = pred.to_numpy().ravel()
+    
+    @property
+    def evaluate_(self):
+           xbar = np.mean(self.meas) # the average of measured values
+           e2 = (self.meas - self.pred)**2 # the squared error
+           print(xbar)
+          # Sum of squared:
+           # TOTAL
+           sst = np.sum((self.meas-xbar)**2)
+           # RESIDUAL
+           ssr = np.sum(e2)
+           # REGRESSION OR MODEL
+           ssm = np.sum(self.pred - xbar)
+
+           
+
+          # Compute statistical metrics
+           metr = pd.DataFrame()
+           metr['r'] = [np.corrcoef(self.meas.ravel(), self.pred)[0,1]]
+           metr['r2'] = [1-ssr/sst]
+           metr['rmse'] = [np.sqrt(np.mean(e2))]
+           metr['mae'] = [np.mean(np.abs(e2))]
+           metr['rpd'] = [np.std(self.meas)/np.sqrt(np.mean(e2))]
+           metr['rpiq'] = [(np.quantile(self.meas,.75)-np.quantile(self.meas,.25))/np.sqrt(np.mean(e2))]
+           return metr
\ No newline at end of file