diff --git a/Class_Mod/VarSel.py b/Class_Mod/VarSel.py
index 6e4a378ea1aa12be7170f13c2ee4758d0855b6f6..1060a2a77f62aa7f78580b76fd40a2897f5bddba 100644
--- a/Class_Mod/VarSel.py
+++ b/Class_Mod/VarSel.py
@@ -28,7 +28,7 @@ class TpeIpls:
         self.n_intervall = n_intervall
         self.n_arrets = self.n_intervall*2
         self.PLS_params = {f'v{i}': hp.randint(f'v{i}', 0, self.p) for i in range(1,self.n_arrets+1)}
-        self.PLS_params['n_components'] = hp.randint("n_components", 1, 6)
+        self.PLS_params['n_components'] = hp.randint("n_components", 1, 10)
 
     def objective(self, params):
         self.idx = [params[f'v{i}'] for i in range(1,self.n_arrets+1)]
@@ -52,9 +52,9 @@ class TpeIpls:
         yt = Model.predict(self.x_test.iloc[:, id]).ravel()
 
         ### compute r-squared
-        r2c = r2_score(self.y_train, yc)
-        r2cv = r2_score(self.y_train, ycv)
-        r2t = r2_score(self.y_test, yt)
+        #r2c = r2_score(self.y_train, yc)
+        #r2cv = r2_score(self.y_train, ycv)
+        #r2t = r2_score(self.y_test, yt)
         rmsecv = np.sqrt(mean_squared_error(self.y_train, ycv))
         rmsec = np.sqrt(mean_squared_error(self.y_train, yc))
 
@@ -85,7 +85,7 @@ class TpeIpls:
                            algo=tpe.suggest,  # Tree of Parzen Estimators’ (tpe) which is a Bayesian approach
                            max_evals=n_iter,
                            trials=trials,
-                           verbose=2)
+                           verbose=0)
 
         ban = {}
         for i in range(len(self.segments)):
diff --git a/Packages.py b/Packages.py
index cc180ac0eb6c16cb895ec316ed4907559527f7ff..6dc05faea95b9a0679be627e6af1f1f8dcfe8dcf 100644
--- a/Packages.py
+++ b/Packages.py
@@ -11,7 +11,7 @@ import pandas as pd
 from os import listdir
 from os.path import isfile, join
 from sklearn.preprocessing import StandardScaler, MinMaxScaler
-
+import time
 ### Exploratory data analysis-Dimensionality reduction
 from umap.umap_ import UMAP
 from sklearn.decomposition import PCA, NMF
diff --git a/data/models/.gitkeep b/data/models/.gitkeep
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/data/predictions/.gitkeep b/data/predictions/.gitkeep
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/data/sample_selections/.gitkeep b/data/sample_selections/.gitkeep
deleted file mode 100644
index e69de29bb2d1d6434b8b29ae775ad8c2e48c5391..0000000000000000000000000000000000000000
diff --git a/pages/2-model_creation.py b/pages/2-model_creation.py
index 23d7fea1aa5b365e2fe4ed35863fb926305a7678..b66ac470edbcd7355c71d0d3fdf9e03f8be4a24f 100644
--- a/pages/2-model_creation.py
+++ b/pages/2-model_creation.py
@@ -9,11 +9,10 @@ def nn(x):
 ########################################################################################
 reg_algo = ["","Full-PLS", "Locally Weighted PLS", "Interval-PLS"]
 
-# Model creation module
+# page Design
 st.header("Calibration Model Development", divider='blue')
 st.write("Create a predictive model, then use it for predicting your target variable(chemical values) from NIRS spectra")
 M1, M2, M3 = st.columns([2,2,2])
-M1.write("-- Performance metrics --")
 M4, M5 = st.columns([6,2])
 st.write("---")
 st.header("Model Diagnosis", divider='blue')
@@ -41,7 +40,7 @@ if xcal_csv is not None and ycal_csv is not None:
             col = 0
         else:
             col = False
-        rd_seed = M1.slider("Change Train-test split", min_value=1, max_value=1212, value=42, format="%i")
+        rd_seed = M1.slider("Customize Train-test split", min_value=1, max_value=100, value=42, format="%i")
         x, y = utils.load_csv(xcal_csv, ycal_csv, autoremove_na=True, sep=sep, x_hdr=0, y_hdr=0, x_index_col=col, y_index_col=col)
         # Split data into training and test sets using the kennard_stone method and correlation metric, 25% of data is used for testing
         train_index, test_index = train_test_split_idx(x, y=y, method="kennard_stone", metric="correlation", test_size=0.25, random_state=rd_seed)
@@ -56,7 +55,7 @@ if xcal_csv is not None and ycal_csv is not None:
         regression_algo = M1.selectbox("Choose the algorithm for regression", options=reg_algo, key = 12)
         if regression_algo == reg_algo[1]:
             # Train model with model function from application_functions.py
-            Reg = PinardPlsr(x_train=X_train, x_test=X_test,y_train=y_train, y_test=y_test)
+            Reg = PinardPlsr(x_train = X_train, x_test = X_test,y_train = y_train, y_test = y_test)
             reg_model = Reg.model_
             #M2.dataframe(Pin.pred_data_)
 
@@ -64,11 +63,19 @@ if xcal_csv is not None and ycal_csv is not None:
             reg_model = model_LWPLSR(xcal_csv, ycal_csv, sep, hdr)
 
         elif regression_algo == reg_algo[3]:
-            s = M2.number_input(label='Enter the maximum number of intervalls', min_value=1, max_value=6, value="min")
-            it = M2.number_input(label='Enter the maximum number of iteration', min_value=50, max_value=1000, value="min")
-            Reg = TpeIpls(x_train = X_train, x_test=X_test, y_train = y_train, y_test = y_test, scale = False, Kfold = 3, n_intervall = 6)
+            s = M2.number_input(label='Enter the maximum number of intervalls', min_value=1, max_value=6, value=3)
+            it = M2.number_input(label='Enter the number of iterations', min_value=50, max_value=1000, value=100)
+            progress_text = "The model is being created. Please wait."
+            
+            Reg = TpeIpls(x_train = X_train, x_test=X_test, y_train = y_train, y_test = y_test, scale = False, Kfold = 3, n_intervall = s)
+            pro = M1.progress(0, text="The model is being created. Please wait!")
             rega = Reg.BandSelect(n_iter=it)
+            pro.empty()
+            M1.progress(100, text = "The model has successfully been  created!")
+            
+            time.sleep(1)
             reg_model = Reg.model_
+            M2.table(rega[0])
         
         ################# Model analysis ############
 
@@ -77,9 +84,12 @@ if xcal_csv is not None and ycal_csv is not None:
             ycv = Reg.pred_data_[1]
             yt = Reg.pred_data_[2]
             
+            
+            M1.write("-- Performance metrics --")
+            M1.dataframe(Reg.metrics_)
+
             M7.pyplot(reg_plot([y_train, y_train, y_test],[yc, ycv, yt]))
             M8.pyplot(resid_plot([y_train, y_train, y_test],[yc, ycv, yt]))
-            M1.dataframe(Reg.metrics_)
             
             
             #model_export = M1.selectbox("Choose way to export", options=["pickle", "joblib"], key=20)
@@ -88,7 +98,12 @@ if xcal_csv is not None and ycal_csv is not None:
                 #export_package = __import__(model_export)
                 with open('data/models/model_' + model_name + '_on_' + xcal_csv.name + '_and_' + ycal_csv.name + '_data_' + '.pkl','wb') as f:
                     joblib.dump(reg_model, f)
+                
+                if regression_algo == reg_algo[3]:
+                    rega[1].sort()
+                    pd.DataFrame(rega[1]).to_csv('data/models/model_' + model_name + '_on_' + xcal_csv.name + '_and_' + ycal_csv.name + '_data_''Wavelengths_index.csv', sep = ';')
                 st.write('Model Exported')
+                
 
                 # create a report with information on the model
                 ## see https://stackoverflow.com/a/59578663
diff --git a/pages/3-prediction.py b/pages/3-prediction.py
index 7c1ee509abb24adc7a61295e59301581d961c60b..e7f32f7cf795fbc0caa18aef6a68b1e36e360aff 100644
--- a/pages/3-prediction.py
+++ b/pages/3-prediction.py
@@ -3,38 +3,57 @@ st.set_page_config(page_title="NIRS Utils", page_icon=":goat:", layout="wide")
 from Modules import *
 from Class_Mod.DATA_HANDLING import *
 
+
+
+st.header("Predictions making", divider='blue')
+model_column, space, file_column= st.columns((2, 1, 1))
+
 #M9, M10, M11 = st.columns([2,2,2])
-# Prediction module - TO BE DONE !!!!!
-with st.container():
-    st.header("Predictions making")
-    st.write("---")
-    st.write("Predict chemical values from NIRS")
-    model_column, space, file_column= st.columns((2, 1, 1))
-    NIRS_csv = file_column.file_uploader("Select NIRS Data to predict", type="csv", help=" :mushroom: select a csv matrix with samples as rows and lambdas as columns")
-    export_folder = './data/predictions/'
-    export_name = 'Predictions_of_'
-    if NIRS_csv:
+NIRS_csv = file_column.file_uploader("Select NIRS Data to predict", type="csv", help=" :mushroom: select a csv matrix with samples as rows and lambdas as columns")
+
+export_folder = './data/predictions/'
+export_name = 'Predictions_of_'
+
+reg_algo = ["Interval-PLS"]
+
+if NIRS_csv:
         export_name += str(NIRS_csv.name[:-4])
         qsep = file_column.selectbox("Select csv separator - _detected_: " + str(find_delimiter('data/'+NIRS_csv.name)), options=[";", ","], index=[";", ","].index(str(find_delimiter('data/'+NIRS_csv.name))), key=2)
         qhdr = file_column.selectbox("indexes column in csv? - _detected_: " + str(find_col_index('data/'+NIRS_csv.name)), options=["no", "yes"], index=["no", "yes"].index(str(find_col_index('data/'+NIRS_csv.name))), key=3)
+        if qhdr == 'yes':
+            col = 0
+        else:
+            col = False
+        pred_data = pd.read_csv(NIRS_csv, sep=qsep, index_col=col)
 
         # Load the model with joblib
         model_column.write("Load your saved predictive model")
+        
         model_name_import = model_column.selectbox('Choose file:', options=os.listdir('data/models/'), key = 21)
         if model_name_import != ' ':
             export_name += '_with_' + str(model_name_import[:-4])
             with open('data/models/'+ model_name_import,'rb') as f:
                 model_loaded = joblib.load(f)
             if model_loaded:
+                s = model_column.checkbox('the model is of ipls type?')
                 model_column.success("The model has been loaded successfully", icon="✅")
-    result = ''
+                if s:
+                      index = model_column.file_uploader("select wavelengths index file", type="csv")
+                      if index:
+                        idx = pd.read_csv(index, sep=';', index_col=0).iloc[:,0].to_numpy()
+
+result = ''
 
-    if st.button("Predict"):
+if st.button("Predict"):
+        if s:
+             
+             result = model_loaded.predict(pred_data.iloc[:,idx])
+        else:
         # use prediction function from application_functions.py to predict chemical values
-        result = prediction(NIRS_csv, qsep, qhdr, model_loaded)
+            result = model_loaded.predict(pred_data)
         st.write('Predicted values are: ')
         st.dataframe(result.T)
-        pd.DataFrame(result).to_csv(export_folder + export_name + '.csv')
+        pd.DataFrame(result).to_csv(export_folder + export_name + '.csv', sep = ';')
         # export to local drive - Download
         download_results(export_folder + export_name + '.csv', export_name + '.csv')
         # create a report with information on the prediction