diff --git a/Class_Mod/DATA_HANDLING.py b/Class_Mod/DATA_HANDLING.py
new file mode 100644
index 0000000000000000000000000000000000000000..58e6e81b47eb59521f1ee402393b40bdc346ae50
--- /dev/null
+++ b/Class_Mod/DATA_HANDLING.py
@@ -0,0 +1,51 @@
+from Packages import * 
+
+
+## try to automatically detect the field separator within the CSV
+def find_delimiter(filename):
+    sniffer = csv.Sniffer()
+    with open(filename) as fp:
+        delimiter = sniffer.sniff(fp.read(5000)).delimiter
+    return delimiter
+
+def find_col_index(filename):
+    with open(filename) as fp:
+        lines = pd.read_csv(fp, skiprows=3, nrows=3, index_col=False, sep=str(find_delimiter(filename)))
+        col_index = 'yes' if lines.iloc[:,0].dtypes != np.float64 else 'no'
+    return col_index
+
+
+# detection of columns categories and scaling
+def col_cat(data_import):
+    # detect numerical and categorical columns in the csv
+    numerical_columns_list = []
+    categorical_columns_list = []
+    for i in data_import.columns:
+        if data_import[i].dtype == np.dtype("float64") or data_import[i].dtype == np.dtype("int64"):
+            numerical_columns_list.append(data_import[i])
+        else:
+            categorical_columns_list.append(data_import[i])
+    if len(numerical_columns_list) == 0:
+        empty = [0 for x in range(len(data_import))]
+        numerical_columns_list.append(empty)
+    if len(categorical_columns_list) > 0:
+        categorical_data = pd.concat(categorical_columns_list, axis=1)
+    if len(categorical_columns_list) == 0:
+        empty = ["" for x in range(len(data_import))]
+        categorical_columns_list.append(empty)
+        categorical_data = pd.DataFrame(categorical_columns_list).T
+        categorical_data.columns = ['no categories']
+    # Create numerical data matrix from the numerical columns list and fill na with the mean of the column
+    numerical_data = pd.concat(numerical_columns_list, axis=1)
+    numerical_data = numerical_data.apply(lambda x: x.fillna(x.mean())) #np.mean(x)))
+    # Scale the numerical data
+    scaler = StandardScaler()
+    scaled_values = scaler.fit_transform(numerical_data)
+    return numerical_data, categorical_data, scaled_values
+
+
+def list_files(mypath, import_type):
+    list_files = [f for f in listdir(mypath) if isfile(join(mypath, f)) and f.endswith(import_type + '.pkl')]
+    if list_files == []:
+        list_files = ['Please, create a model before - no model available yet']
+    return list_files
\ No newline at end of file
diff --git a/Class_Mod/KMEANS_.py b/Class_Mod/KMEANS_.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/Class_Mod/LWPLSR_.py b/Class_Mod/LWPLSR_.py
new file mode 100644
index 0000000000000000000000000000000000000000..3f133395f729b40848f12c728d9d21f8b69b8ff1
--- /dev/null
+++ b/Class_Mod/LWPLSR_.py
@@ -0,0 +1,18 @@
+from Packages import * 
+from Class_Mod.Miscellaneous import * 
+
+def model_LWPLSR(xcal_csv, ycal_csv, sep, hdr):
+    # hdr var correspond to column header True or False in the CSV
+    if hdr == 'yes':
+        col = 0
+    else:
+        col = False
+    # loading the csv
+    x, y = utils.load_csv(xcal_csv, ycal_csv, autoremove_na=True, sep=sep, x_hdr=0, y_hdr=0, x_index_col=col, y_index_col=col)
+    # Split data into training and test sets using the kennard_stone method and correlation metric, 25% of data is used for testing
+    train_index, test_index = train_test_split_idx(x, y=y, method="kennard_stone", metric="correlation", test_size=0.25, random_state=42)
+    # Assign data to training and test sets
+    X_train, y_train, X_test, y_test = x[train_index], y[train_index], x[test_index], y[test_index]
+    st.write("Size of train and test sets: train " + str(X_train.shape) + ' ' + str(y_train.shape) + ' / test ' + str(X_test.shape) + ' ' + str(y_test.shape))
+
+    Jchemo.lwplsr(X_train, y_train, nlvdis=4, metric = eucl, k = 10)
\ No newline at end of file
diff --git a/Class_Mod/Miscellaneous.py b/Class_Mod/Miscellaneous.py
new file mode 100644
index 0000000000000000000000000000000000000000..f3f9d512098164f518c92bc9501394a73161407b
--- /dev/null
+++ b/Class_Mod/Miscellaneous.py
@@ -0,0 +1,35 @@
+from Packages import *
+    
+# local CSS
+## load the custom CSS in the style folder
+def local_css(file_name):
+    with open(file_name) as f:
+        st.markdown(f"<style>{f.read()}</style>", unsafe_allow_html=True)
+local_css("style/style.css")
+
+# Cross-Validation of the model
+def CV_model(estimator, x, y, cv):
+    st.write('Cross-Validation of this model')
+    st.write("CV_scores", cross_val_score(estimator, x, y, cv=cv))
+    st.write("-- CV predict --")
+    Y_preds = cross_val_predict(estimator, x, y, cv=3)
+    st.write("MAE", mean_absolute_error(y, Y_preds))
+    st.write("MSE", mean_squared_error(y, Y_preds))
+    st.write("MAPE", mean_absolute_percentage_error(y, Y_preds))
+    st.write("R²", r2_score(y, Y_preds))
+    st.write("-- Cross Validate --")
+    cv_results = cross_validate(estimator, x, y, cv=cv, return_train_score=True, n_jobs=3)
+    for key in cv_results.keys():
+        st.write(key, cv_results[key])
+
+# predict module
+def prediction(NIRS_csv, qsep, qhdr, model):
+    # hdr var correspond to column header True or False in the CSV
+    if qhdr == 'yes':
+        col = 0
+    else:
+        col = False
+    X_test = pd.read_csv(NIRS_csv, sep=qsep, index_col=col)
+    Y_preds = model.predict(X_test)
+    # Y_preds = X_test
+    return Y_preds
diff --git a/Class_Mod/PCA_.py b/Class_Mod/PCA_.py
new file mode 100644
index 0000000000000000000000000000000000000000..cb578c9a55f2555fb9fb78af48380a5e4b468a9b
--- /dev/null
+++ b/Class_Mod/PCA_.py
@@ -0,0 +1,22 @@
+from Packages import *
+from Class_Mod.DATA_HANDLING import *
+
+
+def pca_maker(data_import):
+    numerical_data, categorical_data, scaled_values = col_cat(data_import)
+    # Compute a 6 components PCA on scaled values
+    pca = PCA(n_components=6)
+    pca_fit = pca.fit(scaled_values)
+    pca_data = pca_fit.transform(scaled_values)
+    pca_data = pd.DataFrame(pca_data, index=numerical_data.index)
+    # Set PCA column names with component number and explained variance %
+    new_column_names = ["PCA_" + str(i) + ' - ' + str(round(pca_fit.explained_variance_ratio_[i-1], 3) *100) + '%' for i in range(1, len(pca_data.columns) + 1)]
+    # Format the output
+    column_mapper = dict(zip(list(pca_data.columns), new_column_names))
+    pca_data = pca_data.rename(columns=column_mapper)
+    output = pd.concat([data_import, pca_data], axis=1)
+    return output, list(categorical_data.columns), new_column_names
+
+
+
+####################################################################################################################################################################
diff --git a/Class_Mod/PLSR_.py b/Class_Mod/PLSR_.py
new file mode 100644
index 0000000000000000000000000000000000000000..c76718a3a78625de7c723a2aa7da598b7e524e3e
--- /dev/null
+++ b/Class_Mod/PLSR_.py
@@ -0,0 +1,58 @@
+from Packages import *
+from Class_Mod.Miscellaneous import * 
+
+
+# create model module with PINARD
+def model_PLSR(xcal_csv, ycal_csv, sep, hdr, rd_seed):
+    np.random.seed(rd_seed)
+    # hdr var correspond to column header True or False in the CSV
+    if hdr == 'yes':
+        col = 0
+    else:
+        col = False
+    # loading the csv
+    x, y = utils.load_csv(xcal_csv, ycal_csv, autoremove_na=True, sep=sep, x_hdr=0, y_hdr=0, x_index_col=col, y_index_col=col)
+    # Split data into training and test sets using the kennard_stone method and correlation metric, 25% of data is used for testing
+    train_index, test_index = train_test_split_idx(x, y=y, method="kennard_stone", metric="correlation", test_size=0.25, random_state=rd_seed)
+    # Assign data to training and test sets
+    X_train, y_train, X_test, y_test = x[train_index], y[train_index], x[test_index], y[test_index]
+    st.write("Size of train and test sets: train " + str(X_train.shape) + ' ' + str(y_train.shape) + ' / test ' + str(X_test.shape) + ' ' + str(y_test.shape))
+    # Declare preprocessing pipeline
+    svgolay = [   ('_sg1',pp.SavitzkyGolay()),
+                  ('_sg2',pp.SavitzkyGolay())  # nested pipeline to perform the Savitzky-Golay method twice for 2nd order preprocessing
+                  ]
+    preprocessing = [   ('id', pp.IdentityTransformer()), # Identity transformer, no change to the data
+                        ('savgol', pp.SavitzkyGolay()), # Savitzky-Golay smoothing filter
+                        ('derivate', pp.Derivate()), # Calculate the first derivative of the data
+                        ('SVG', FeatureUnion(svgolay))
+                        # Pipeline([('_sg1',pp.SavitzkyGolay()),('_sg2',pp.SavitzkyGolay())])  # nested pipeline to perform the Savitzky-Golay method twice for 2nd order preprocessing
+                        ]
+    # Declare complete pipeline
+    pipeline = Pipeline([
+        ('scaler', MinMaxScaler()), # scaling the data
+        ('preprocessing', FeatureUnion(preprocessing)), # preprocessing
+        ('PLS',  PLSRegression()) # regressor
+    ])
+    # Estimator including y values scaling
+    estimator = TransformedTargetRegressor(regressor = pipeline, transformer = MinMaxScaler())
+    # Training
+    trained = estimator.fit(X_train, y_train)
+    # fit scores
+    st.write("fit scores / R²: " + str(estimator.score(X_test,y_test)))
+    # Predictions on test set
+    Y_preds = estimator.predict(X_test) # make predictions on test data and assign to Y_preds variable
+    ################################################################################################################
+    met= {"MAE: ":[5],
+          "MSE: ":[5],
+          "MSE: ":[8]}
+    met = pd.DataFrame(met).T
+    ################################################################################################################
+    st.table(met)
+    st.write("MAE: " + str(mean_absolute_error(y_test, Y_preds)))
+    st.write("MSE: " + str(mean_squared_error(y_test, Y_preds)))
+    st.write("MAPE: " + str(mean_absolute_percentage_error(y_test, Y_preds)))
+
+    # Cross-Validate the model
+    CV_model(estimator, X_train, y_train, 3)
+
+    return (trained)
\ No newline at end of file
diff --git a/Class_Mod/SK_PLSR_.py b/Class_Mod/SK_PLSR_.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/Class_Mod/UMAP_.py b/Class_Mod/UMAP_.py
new file mode 100644
index 0000000000000000000000000000000000000000..2ec9c78245a507a1c57f6f472cf56964c84f1bf7
--- /dev/null
+++ b/Class_Mod/UMAP_.py
@@ -0,0 +1,17 @@
+# UMAP function for the Sample Selection module
+from Packages import * 
+from Class_Mod.DATA_HANDLING import * 
+
+def umap_maker(data_import):
+    numerical_data, categorical_data, scaled_values = col_cat(data_import)
+    umap_func = UMAP(random_state=42, n_neighbors=20, n_components=4, min_dist=0.0,)
+    umap_fit = umap_func.fit(scaled_values)
+    umap_data = umap_fit.transform(scaled_values)
+    umap_data = pd.DataFrame(umap_data, index=numerical_data.index)
+    # Set UMAP column names with component number
+    new_column_names = ["UMAP_" + str(i) for i in range(1, len(umap_data.columns) + 1)]
+    # Format the output
+    column_mapper = dict(zip(list(umap_data.columns), new_column_names))
+    umap_data = umap_data.rename(columns=column_mapper)
+    output = pd.concat([data_import, umap_data], axis=1)
+    return output, list(categorical_data.columns), new_column_names
\ No newline at end of file
diff --git a/Class_Mod/VarSel.py b/Class_Mod/VarSel.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/Class_Mod/__init__.py b/Class_Mod/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..22392d0ce5ec1158e8ada04776456723f889969e
--- /dev/null
+++ b/Class_Mod/__init__.py
@@ -0,0 +1,6 @@
+from .PCA_ import *
+from .KMEANS_ import * 
+from .UMAP_ import * 
+from .DATA_HANDLING import * 
+from .PLSR_ import model_PLSR
+from .LWPLSR_ import model_LWPLSR
diff --git a/Modules.py b/Modules.py
new file mode 100644
index 0000000000000000000000000000000000000000..c31f7ccd8665031ce8170929523392d7556f6b92
--- /dev/null
+++ b/Modules.py
@@ -0,0 +1,5 @@
+from Class_Mod import pca_maker, umap_maker, find_col_index, model_PLSR, model_LWPLSR, list_files
+# find_col_index
+
+from Class_Mod.Miscellaneous import  prediction, CV_model
+from urllib.request import urlretrieve
\ No newline at end of file
diff --git a/Packages.py b/Packages.py
new file mode 100644
index 0000000000000000000000000000000000000000..7746b733a37f8e44a0ee8c6ad4a923ab8bdd40e6
--- /dev/null
+++ b/Packages.py
@@ -0,0 +1,45 @@
+
+## Data loading, handling, and preprocessing
+import sys
+import csv
+import numpy as np
+import pandas as pd
+from os import listdir
+from os.path import isfile, join
+from sklearn.preprocessing import StandardScaler
+
+### Exploratory data analysis-Dimensionality reduction
+from umap.umap_ import UMAP
+from sklearn.decomposition import PCA
+
+# Clustering
+from sklearn.cluster import KMeans as km
+#import hdbscan
+
+# Modelling
+import julia
+from julia import Jchemo
+
+from pinard import utils
+from pinard import preprocessing as pp
+from pinard.model_selection import train_test_split_idx
+
+from sklearn.model_selection import train_test_split, cross_val_score, cross_val_predict, cross_validate
+from sklearn.preprocessing import MinMaxScaler
+from sklearn.pipeline import Pipeline, FeatureUnion
+from sklearn.compose import TransformedTargetRegressor
+from sklearn.metrics import mean_absolute_error, mean_squared_error, mean_absolute_percentage_error, r2_score
+from sklearn.cross_decomposition import PLSRegression
+
+## Images and plots 
+from PIL import Image
+import plotly.express as px
+
+### Important Metrics
+from sklearn.metrics import pairwise_distances_argmin_min
+
+## Web app construction
+import streamlit as st
+
+
+# help on streamlit input https://docs.streamlit.io/library/api-reference/widgets
\ No newline at end of file
diff --git a/README.md b/README.md
index a524f8e61a1063fd6db9987b7dbe8b9abb7261de..ad7e3cb6ac1a220ac4cd9d51ad75af935580ce06 100644
--- a/README.md
+++ b/README.md
@@ -1,13 +1,13 @@
 # NIRS_Workflow
 
 ## Getting started
-This package aims to provide a workflow for users who want to perform chemical analyses and predict characteristics using the NIRS technique.  
+This package aims to provide a workflow for users who want to perform chemical analyses and predict characteristics using the NIRS technique.
 
 The process includes:
 - sample selection - you can upload all your NIRS spectra and it'll help to select the samples to analyse chemically.
 
 
-- model creation - the PINARD (https://github.com/GBeurier/pinard) package creates a prediction model with spectra and related chemical analysis.- 
+- model creation - the PINARD (https://github.com/GBeurier/pinard) package creates a prediction model with spectra and related chemical analysis.-
 
 
 - predictions - the PINARD package uses the model to predict chemical values for unknown samples.
@@ -17,13 +17,13 @@ This package is written in python. You can clone the repository: git clone https
 
 Then install the requirements: pip install -r requirements.txt
 To use Locally weighted PLS Regression for creation model, you will need to install Jchemo.jl (https://github.com/mlesnoff/Jchemo.jl), a Julia package.
-From the CLI: python  
-> '>>> import julia  
-'>>> julia.install()  
-'>>> from julia import Pkg  
-'>>> Pkg.add("Jchemo")  
+From the CLI: python
+> '>>> import julia
+'>>> julia.install()
+'>>> from julia import Pkg
+'>>> Pkg.add("Jchemo")
 
-To check if Jchemo is installed without errors:  
+To check if Jchemo is installed without errors:
 > '>>> Pkg.status()
 
 You can then run: streamlit run ./app.py from the CLI.
@@ -39,7 +39,4 @@ Contributors:
 - 
 
 ## License
-CC BY
-
-
-PUSH
\ No newline at end of file
+CC BY
\ No newline at end of file
diff --git a/__init__.py b/__init__.py
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391
diff --git a/app.py b/app.py
index 8338188fe57a6c719b1eb5804ebcbb2611a2a42d..654599da5b0c1dd146da53bc7046dd5d57c7422f 100644
--- a/app.py
+++ b/app.py
@@ -1,14 +1,9 @@
-import streamlit as st
-# help on streamlit input https://docs.streamlit.io/library/api-reference/widgets
-from PIL import Image
-# emojis code here : https://www.webfx.com/tools/emoji-cheat-sheet/
+#from Modules_manager.PCA_ import pca_maker
+
+from Packages import *
 st.set_page_config(page_title="NIRS Utils", page_icon=":goat:", layout="wide")
-import numpy as np
-import pandas as pd
-import plotly.express as px
-from sklearn.cluster import KMeans as km
-from sklearn.metrics import pairwise_distances_argmin_min
-from application_functions import pca_maker, model_PLSR, model_LWPLSR, prediction, find_delimiter, umap_maker, find_col_index, list_files, CV_model
+from Modules import *
+from Class_Mod.DATA_HANDLING import * 
 
 # load images for web interface
 img_sselect = Image.open("images\sselect.JPG")
@@ -18,21 +13,25 @@ img_predict = Image.open("images\predict.JPG")
 # TOC menu on the left
 with st.sidebar:
     st.markdown("[Sample Selection](#sample-selection)")
-    st.markdown("[Model Creation](#create-a-model)")
-    st.markdown("[Prediction](#predict)")
+    st.markdown("[Model Development](#create-a-model)")
+    st.markdown("[Predictions Making](#predict)")
+
+
 # Page header
 with st.container():
-    st.subheader("Plateforme d'Analyses Chimiques pour l'Ecologie :goat:")
+    st.subheader("Plateforme d'Analyses Chimiques pour l'Ecologie-PACE :goat:")
     st.title("NIRS Utils")
-    st.write("Sample selections, Modelisations & Predictions using [Pinard](https://github.com/GBeurier/pinard) and PACE NIRS Database.")
+    st.write("Sample selection, Predictive Modelling & Predictions making using [Pinard](https://github.com/GBeurier/pinard) and PACE NIRS Database.")
     st.image(img_general)
+
+
 # graphical delimiter
 st.write("---")
 # Sample Selection module
 with st.container():
     st.header("Sample Selection")
     st.image(img_sselect)
-    st.write("Sample selection using PCA and K-Means algorythms")
+    st.write("Sample selection using PCA and K-Means algorithms")
     # split 2 columns 4:1 ratio
     scatter_column, settings_column = st.columns((4, 1))
     scatter_column.write("**Multi-Dimensional Analysis**")
@@ -131,7 +130,7 @@ with st.container():
             else:
                 scatter_column.write("_Please Choose a file_")
             # clustering via UMAP / HDBSCAN -- TO BE DONE !!!
-            if type_cluster == 'umap':
+            if type_cluster == 'hdbscan':
                 import hdbscan
                 # plot de pc with colored clusters and selected samples
                 # graph_selected = px.scatter(data_frame=pc_data, x=pc_1, y=pc_2, template="simple_white", height=800, color=kmeans_samples.labels_, hover_name=pc_data.index, title="PC projection with K-Means Clusters and selected samples")
@@ -143,6 +142,9 @@ with st.container():
                 plot = scatter_column.plotly_chart(graph_selected)
 # graphical delimiter
 st.write("---")
+
+
+
 # Model creation module
 with st.container():
     st.header("Create a model")
@@ -179,6 +181,10 @@ with st.container():
 
 # graphical delimiter
 st.write("---")
+
+
+
+
 # Prediction module - TO BE DONE !!!!!
 with st.container():
     st.header("Predict")
@@ -211,7 +217,6 @@ with st.container():
         pd.DataFrame(result).to_csv(export_name + '.csv')
         st.write('Predictions exported to ' + export_name + '.csv')
         # export to local drive
-        from urllib.request import urlretrieve
         url = ('http://localhost:8501' + export_name[1:] + '.csv')
         filename = export_name + '.csv'
         urlretrieve(url, filename)
diff --git a/application_functions.py b/application_functions.py
deleted file mode 100644
index caecb1c7b924157711a6f4689929b2cfa237ebab..0000000000000000000000000000000000000000
--- a/application_functions.py
+++ /dev/null
@@ -1,198 +0,0 @@
-import streamlit as st
-import numpy as np
-import pandas as pd
-from sklearn.decomposition import PCA
-from sklearn.preprocessing import StandardScaler
-import csv
-from umap.umap_ import UMAP
-
-# local CSS
-## load the custom CSS in the style folder
-def local_css(file_name):
-    with open(file_name) as f:
-        st.markdown(f"<style>{f.read()}</style>", unsafe_allow_html=True)
-local_css("style/style.css")
-
-## try to automatically detect the field separator within the CSV
-def find_delimiter(filename):
-    sniffer = csv.Sniffer()
-    with open(filename) as fp:
-        delimiter = sniffer.sniff(fp.read(5000)).delimiter
-    return delimiter
-def find_col_index(filename):
-    with open(filename) as fp:
-        lines = pd.read_csv(fp, skiprows=3, nrows=3, index_col=False, sep=str(find_delimiter(filename)))
-        col_index = 'yes' if lines.iloc[:,0].dtypes != np.float64 else 'no'
-    return col_index
-# detection of columns categories and scaling
-def col_cat(data_import):
-    # detect numerical and categorical columns in the csv
-    numerical_columns_list = []
-    categorical_columns_list = []
-    for i in data_import.columns:
-        if data_import[i].dtype == np.dtype("float64") or data_import[i].dtype == np.dtype("int64"):
-            numerical_columns_list.append(data_import[i])
-        else:
-            categorical_columns_list.append(data_import[i])
-    if len(numerical_columns_list) == 0:
-        empty = [0 for x in range(len(data_import))]
-        numerical_columns_list.append(empty)
-    if len(categorical_columns_list) > 0:
-        categorical_data = pd.concat(categorical_columns_list, axis=1)
-    if len(categorical_columns_list) == 0:
-        empty = ["" for x in range(len(data_import))]
-        categorical_columns_list.append(empty)
-        categorical_data = pd.DataFrame(categorical_columns_list).T
-        categorical_data.columns = ['no categories']
-    # Create numerical data matrix from the numerical columns list and fill na with the mean of the column
-    numerical_data = pd.concat(numerical_columns_list, axis=1)
-    numerical_data = numerical_data.apply(lambda x: x.fillna(x.mean())) #np.mean(x)))
-    # Scale the numerical data
-    scaler = StandardScaler()
-    scaled_values = scaler.fit_transform(numerical_data)
-    return numerical_data, categorical_data, scaled_values
-
-# UMAP function for the Sample Selection module
-def umap_maker(data_import):
-    numerical_data, categorical_data, scaled_values = col_cat(data_import)
-    umap_func = UMAP(random_state=42, n_neighbors=20, n_components=4, min_dist=0.0,)
-    umap_fit = umap_func.fit(scaled_values)
-    umap_data = umap_fit.transform(scaled_values)
-    umap_data = pd.DataFrame(umap_data, index=numerical_data.index)
-    # Set UMAP column names with component number
-    new_column_names = ["UMAP_" + str(i) for i in range(1, len(umap_data.columns) + 1)]
-    # Format the output
-    column_mapper = dict(zip(list(umap_data.columns), new_column_names))
-    umap_data = umap_data.rename(columns=column_mapper)
-    output = pd.concat([data_import, umap_data], axis=1)
-    return output, list(categorical_data.columns), new_column_names
-# PCA function for the Sample Selection module
-def pca_maker(data_import):
-    numerical_data, categorical_data, scaled_values = col_cat(data_import)
-    # Compute a 6 components PCA on scaled values
-    pca = PCA(n_components=6)
-    pca_fit = pca.fit(scaled_values)
-    pca_data = pca_fit.transform(scaled_values)
-    pca_data = pd.DataFrame(pca_data, index=numerical_data.index)
-    # Set PCA column names with component number and explained variance %
-    new_column_names = ["PCA_" + str(i) + ' - ' + str(round(pca_fit.explained_variance_ratio_[i-1], 3) *100) + '%' for i in range(1, len(pca_data.columns) + 1)]
-    # Format the output
-    column_mapper = dict(zip(list(pca_data.columns), new_column_names))
-    pca_data = pca_data.rename(columns=column_mapper)
-    output = pd.concat([data_import, pca_data], axis=1)
-    return output, list(categorical_data.columns), new_column_names
-
-# create model module with PINARD
-def model_PLSR(xcal_csv, ycal_csv, sep, hdr, rd_seed):
-    from pinard import utils
-    from pinard import preprocessing as pp
-    from pinard.model_selection import train_test_split_idx
-    from sklearn.model_selection import train_test_split, cross_val_score, cross_val_predict, cross_validate
-    from sklearn.pipeline import Pipeline, FeatureUnion
-    from sklearn.preprocessing import MinMaxScaler
-    from sklearn.compose import TransformedTargetRegressor
-    from sklearn.metrics import mean_absolute_error, mean_squared_error, mean_absolute_percentage_error, r2_score
-    from sklearn.cross_decomposition import PLSRegression
-    np.random.seed(rd_seed)
-    # hdr var correspond to column header True or False in the CSV
-    if hdr == 'yes':
-        col = 0
-    else:
-        col = False
-    # loading the csv
-    x, y = utils.load_csv(xcal_csv, ycal_csv, autoremove_na=True, sep=sep, x_hdr=0, y_hdr=0, x_index_col=col, y_index_col=col)
-    # Split data into training and test sets using the kennard_stone method and correlation metric, 25% of data is used for testing
-    train_index, test_index = train_test_split_idx(x, y=y, method="kennard_stone", metric="correlation", test_size=0.25, random_state=rd_seed)
-    # Assign data to training and test sets
-    X_train, y_train, X_test, y_test = x[train_index], y[train_index], x[test_index], y[test_index]
-    st.write("Size of train and test sets: train " + str(X_train.shape) + ' ' + str(y_train.shape) + ' / test ' + str(X_test.shape) + ' ' + str(y_test.shape))
-    # Declare preprocessing pipeline
-    svgolay = [   ('_sg1',pp.SavitzkyGolay()),
-                  ('_sg2',pp.SavitzkyGolay())  # nested pipeline to perform the Savitzky-Golay method twice for 2nd order preprocessing
-                  ]
-    preprocessing = [   ('id', pp.IdentityTransformer()), # Identity transformer, no change to the data
-                        ('savgol', pp.SavitzkyGolay()), # Savitzky-Golay smoothing filter
-                        ('derivate', pp.Derivate()), # Calculate the first derivative of the data
-                        ('SVG', FeatureUnion(svgolay))
-                        # Pipeline([('_sg1',pp.SavitzkyGolay()),('_sg2',pp.SavitzkyGolay())])  # nested pipeline to perform the Savitzky-Golay method twice for 2nd order preprocessing
-                        ]
-    # Declare complete pipeline
-    pipeline = Pipeline([
-        ('scaler', MinMaxScaler()), # scaling the data
-        ('preprocessing', FeatureUnion(preprocessing)), # preprocessing
-        ('PLS',  PLSRegression()) # regressor
-    ])
-    # Estimator including y values scaling
-    estimator = TransformedTargetRegressor(regressor = pipeline, transformer = MinMaxScaler())
-    # Training
-    trained = estimator.fit(X_train, y_train)
-    # fit scores
-    st.write("fit scores / R²: " + str(estimator.score(X_test,y_test)))
-    # Predictions on test set
-    Y_preds = estimator.predict(X_test) # make predictions on test data and assign to Y_preds variable
-    st.write("MAE: " + str(mean_absolute_error(y_test, Y_preds)))
-    st.write("MSE: " + str(mean_squared_error(y_test, Y_preds)))
-    st.write("MAPE: " + str(mean_absolute_percentage_error(y_test, Y_preds)))
-
-    # Cross-Validate the model
-    CV_model(estimator, X_train, y_train, 3)
-
-    return (trained)
-
-# Cross-Validation of the model
-def CV_model(estimator, x, y, cv):
-    from sklearn.model_selection import cross_val_score, cross_val_predict, cross_validate
-    from sklearn.metrics import mean_absolute_error, mean_squared_error, mean_absolute_percentage_error, r2_score
-    st.write('Cross-Validation of this model')
-    st.write("CV_scores", cross_val_score(estimator, x, y, cv=cv))
-    st.write("-- CV predict --")
-    Y_preds = cross_val_predict(estimator, x, y, cv=3)
-    st.write("MAE", mean_absolute_error(y, Y_preds))
-    st.write("MSE", mean_squared_error(y, Y_preds))
-    st.write("MAPE", mean_absolute_percentage_error(y, Y_preds))
-    st.write("R²", r2_score(y, Y_preds))
-    st.write("-- Cross Validate --")
-    cv_results = cross_validate(estimator, x, y, cv=cv, return_train_score=True, n_jobs=3)
-    for key in cv_results.keys():
-        st.write(key, cv_results[key])
-
-def model_LWPLSR(xcal_csv, ycal_csv, sep, hdr):
-    import julia
-    from julia import Jchemo
-    from pinard import utils
-    from pinard.model_selection import train_test_split_idx
-    # hdr var correspond to column header True or False in the CSV
-    if hdr == 'yes':
-        col = 0
-    else:
-        col = False
-    # loading the csv
-    x, y = utils.load_csv(xcal_csv, ycal_csv, autoremove_na=True, sep=sep, x_hdr=0, y_hdr=0, x_index_col=col, y_index_col=col)
-    # Split data into training and test sets using the kennard_stone method and correlation metric, 25% of data is used for testing
-    train_index, test_index = train_test_split_idx(x, y=y, method="kennard_stone", metric="correlation", test_size=0.25, random_state=42)
-    # Assign data to training and test sets
-    X_train, y_train, X_test, y_test = x[train_index], y[train_index], x[test_index], y[test_index]
-    st.write("Size of train and test sets: train " + str(X_train.shape) + ' ' + str(y_train.shape) + ' / test ' + str(X_test.shape) + ' ' + str(y_test.shape))
-
-    Jchemo.lwplsr(X_train, y_train, nlvdis=4, metric = eucl, k = 10)
-
-
-# predict module
-def prediction(NIRS_csv, qsep, qhdr, model):
-    # hdr var correspond to column header True or False in the CSV
-    if qhdr == 'yes':
-        col = 0
-    else:
-        col = False
-    X_test = pd.read_csv(NIRS_csv, sep=qsep, index_col=col)
-    Y_preds = model.predict(X_test)
-    # Y_preds = X_test
-    return Y_preds
-
-def list_files(mypath, import_type):
-    from os import listdir
-    from os.path import isfile, join
-    list_files = [f for f in listdir(mypath) if isfile(join(mypath, f)) and f.endswith(import_type + '.pkl')]
-    if list_files == []:
-        list_files = ['Please, create a model before - no model available yet']
-    return list_files
\ No newline at end of file
diff --git a/data/models/model_jh_on_Xcal.csv_and_Ycal.csv_data_pickle.pkl b/data/models/model_jh_on_Xcal.csv_and_Ycal.csv_data_pickle.pkl
new file mode 100644
index 0000000000000000000000000000000000000000..efbc87514c098e5c790c56c8ca26c0f86f36a340
Binary files /dev/null and b/data/models/model_jh_on_Xcal.csv_and_Ycal.csv_data_pickle.pkl differ
diff --git a/data/models/model_okfposk_on_Xcal.csv_and_Ycal.csv_data_pickle.pkl b/data/models/model_okfposk_on_Xcal.csv_and_Ycal.csv_data_pickle.pkl
new file mode 100644
index 0000000000000000000000000000000000000000..efbc87514c098e5c790c56c8ca26c0f86f36a340
Binary files /dev/null and b/data/models/model_okfposk_on_Xcal.csv_and_Ycal.csv_data_pickle.pkl differ
diff --git a/data/predictions/Predictions_of_Xcal_with_model_jh_on_Xcal.csv_and_Ycal.csv_data_pickle.csv b/data/predictions/Predictions_of_Xcal_with_model_jh_on_Xcal.csv_and_Ycal.csv_data_pickle.csv
new file mode 100644
index 0000000000000000000000000000000000000000..01c9099aba7aa9e9bb06308d2d99fdc6690edfe0
--- /dev/null
+++ b/data/predictions/Predictions_of_Xcal_with_model_jh_on_Xcal.csv_and_Ycal.csv_data_pickle.csv
@@ -0,0 +1,362 @@
+,0
+0,46.767241373753365
+1,43.33236667589227
+2,47.69702657025721
+3,42.40560282088099
+4,49.117889047437934
+5,45.41092460973661
+6,45.33593131421402
+7,46.84716401517402
+8,47.55778099910814
+9,43.66400920123191
+10,46.389948147643885
+11,46.93134883054068
+12,44.68002922429634
+13,44.707462248092675
+14,47.00659469452659
+15,45.11320879514305
+16,45.8935707706589
+17,46.07742471331517
+18,46.11265896577673
+19,46.27604577435942
+20,48.02201720885121
+21,45.99579895733129
+22,46.00402183723334
+23,46.483910628223505
+24,43.59996446967316
+25,46.44343817511909
+26,45.26316387805746
+27,47.25538362539533
+28,46.61646062786265
+29,45.2255452820459
+30,47.31236917695941
+31,46.82060048708076
+32,44.32448588013557
+33,46.82981060074309
+34,44.872902495897954
+35,46.54179329103721
+36,44.80908398747734
+37,47.308088930673506
+38,46.9631078693342
+39,46.04629576575285
+40,45.470485120803296
+41,47.80573031577645
+42,47.44233421041168
+43,45.976499198639715
+44,46.17366784901994
+45,47.21785674917406
+46,47.660257303469834
+47,47.018793621031534
+48,45.324583177719504
+49,44.63386139737206
+50,46.806440424306615
+51,46.757647986648
+52,46.3345558182011
+53,46.957312568843115
+54,45.27101888560633
+55,46.16245840321316
+56,43.41802899775329
+57,45.556319987156115
+58,46.97546097329726
+59,45.776909813959705
+60,45.23761836159523
+61,42.64587722849296
+62,46.614794240549585
+63,47.72193844631581
+64,47.5570599515473
+65,47.429843519757
+66,45.95619138515237
+67,45.585440007380285
+68,46.22104096184447
+69,45.575702172801265
+70,47.6980790423098
+71,45.533416530723066
+72,45.47488420381561
+73,44.55515653767657
+74,44.39530657933428
+75,46.676915986327096
+76,47.288543667660775
+77,47.02081466089327
+78,44.8998787155843
+79,45.91300037787515
+80,46.79079834428309
+81,46.98453028808091
+82,46.15281630529611
+83,46.069256395867406
+84,44.813697955418675
+85,45.111678837035235
+86,47.091404503230834
+87,46.619394266773085
+88,45.06345107435841
+89,44.715128053554594
+90,43.92052613415367
+91,46.07078260158221
+92,43.333601792345256
+93,46.37279879543664
+94,45.29533969506365
+95,47.65581989149592
+96,47.26124233132717
+97,45.43754723787828
+98,47.558428644932825
+99,45.43891036793652
+100,46.08433451742114
+101,45.89705560904663
+102,46.67971196526314
+103,46.48101201248254
+104,45.966766662930574
+105,45.55153197300913
+106,45.9110387428923
+107,46.681254869580314
+108,46.54155211698982
+109,45.58907951503843
+110,46.3462053752036
+111,46.49411175649265
+112,45.81809918302984
+113,45.96874566666568
+114,44.71001099702034
+115,45.761486322565005
+116,46.36358934383356
+117,46.97151621434169
+118,46.434684320113895
+119,45.352180050305094
+120,45.100196993071314
+121,46.05693892795794
+122,44.64568921681827
+123,45.57908599770371
+124,47.25277987204887
+125,46.117492034670626
+126,44.64106025415083
+127,45.48948771021548
+128,46.81034582829701
+129,45.64425368921125
+130,45.5529990463294
+131,47.70474912071469
+132,45.68681659656469
+133,46.27003325246771
+134,45.345787878984545
+135,46.70370265288267
+136,46.045030623620875
+137,45.27480869528746
+138,45.53995144880514
+139,46.01238719461178
+140,46.120987592356855
+141,46.963970976799125
+142,44.99346242073269
+143,45.93634061230757
+144,46.55466855614486
+145,46.04363038383396
+146,46.547667743908406
+147,44.18893017542523
+148,46.05166221986108
+149,46.91993536042849
+150,45.223809725359075
+151,45.643290374726675
+152,46.96338566255745
+153,46.30388683306703
+154,47.73870866596065
+155,46.387277357483974
+156,45.1538455213669
+157,46.372628630709514
+158,46.74633199159714
+159,45.420812511666455
+160,46.19801207660013
+161,45.96084260462984
+162,45.85109821650824
+163,45.51372912754637
+164,45.98302173476873
+165,47.313021954802764
+166,45.1520188856259
+167,46.60791549623543
+168,46.30979374342804
+169,43.692628209819134
+170,45.68184516517036
+171,47.51808896720511
+172,46.190401130002584
+173,45.56051658824867
+174,46.93405635091148
+175,45.98677963371881
+176,46.33924699647545
+177,46.91893598764468
+178,45.64168468920296
+179,45.476516722205766
+180,45.588251629836996
+181,45.85678521175613
+182,46.73497546924091
+183,45.79662400165502
+184,46.629010689675304
+185,45.96533471378744
+186,46.58464606441196
+187,45.511858573175715
+188,47.104275377649095
+189,47.22758345877331
+190,46.20253467478862
+191,46.085779946847516
+192,44.75511787290008
+193,46.11259967291647
+194,46.61745762571885
+195,46.38749629771895
+196,47.096851075761336
+197,45.74434533514446
+198,45.88322456266033
+199,45.72205581327608
+200,45.31606441873041
+201,46.83763175995848
+202,46.16584089134833
+203,47.602817232982694
+204,44.52519206870877
+205,46.40543089007081
+206,45.73120894336702
+207,44.8468006875897
+208,46.19795021694417
+209,47.70715694325032
+210,47.08276377401687
+211,48.00012276953313
+212,45.90425423277304
+213,47.02168854317766
+214,46.69920348912855
+215,46.42887687185965
+216,44.74148122944109
+217,45.57394404667312
+218,46.38878546848354
+219,47.18132782477546
+220,46.89596986274944
+221,47.64513205328368
+222,45.14033104187993
+223,45.94899417712989
+224,46.39271085006625
+225,46.54281015944685
+226,45.97590748870288
+227,45.44337494903428
+228,46.78677265164845
+229,46.11325716968698
+230,46.02011798885632
+231,46.8689790445706
+232,45.837193699345654
+233,46.869311842994456
+234,46.58738309513394
+235,46.0985553856456
+236,43.171676856058454
+237,45.26438253565962
+238,46.02191992621766
+239,46.50637514957429
+240,46.702623335489946
+241,45.559940727503026
+242,46.46505908818871
+243,46.38148917737486
+244,46.47965885079799
+245,45.98928860503642
+246,46.47459236360967
+247,45.03242352882455
+248,46.62356468577243
+249,45.43046541554597
+250,44.89864436677386
+251,44.9360807554293
+252,46.65383478955101
+253,45.71276618930802
+254,47.12266661744526
+255,46.61647763389081
+256,45.66681388018936
+257,45.26994141809001
+258,47.005563427561135
+259,48.282264114159794
+260,46.423410493703884
+261,44.8978471964728
+262,47.22097732127542
+263,46.527228181810486
+264,46.589932517692766
+265,47.23597864342218
+266,46.0295360959636
+267,46.05531848450945
+268,47.22572842162108
+269,45.78999301614902
+270,45.752067568799376
+271,46.15529597529264
+272,45.640102199613125
+273,46.424177330682035
+274,44.25269834486258
+275,47.228235257326645
+276,47.00933699683573
+277,46.99849813667588
+278,46.128968478562015
+279,45.32773315134333
+280,46.55924089553013
+281,46.745561634451676
+282,45.425561122539975
+283,45.1689096828214
+284,45.74714210773718
+285,47.06613240150024
+286,46.98819733114798
+287,44.910685618894654
+288,46.882358660743854
+289,45.220315304293045
+290,47.48044684883674
+291,45.6452988002278
+292,45.82591168449881
+293,46.00226901457363
+294,46.31957855611555
+295,46.506055715331485
+296,46.92601376734856
+297,45.969781653041935
+298,46.3416636651847
+299,46.78262377153471
+300,45.96165339065992
+301,47.308786500350216
+302,47.38244193216065
+303,46.039284651809346
+304,46.287639617793886
+305,45.5084676952205
+306,46.20812641249573
+307,46.58463198165516
+308,47.36575447448282
+309,45.8732669263878
+310,45.684525233204994
+311,46.5417091597917
+312,45.71402201204293
+313,46.6738042134702
+314,46.0522390884004
+315,46.11742684738737
+316,45.64446190536256
+317,44.85974771659868
+318,47.15075071311495
+319,45.749279132510864
+320,46.22975667265146
+321,46.391906545246556
+322,46.81132687453868
+323,46.235693184472815
+324,45.73787284994606
+325,46.7078094065248
+326,46.738999886685605
+327,46.188008062786054
+328,45.990317923515725
+329,45.52791818878071
+330,45.09572722884058
+331,46.70995710689733
+332,45.86321552594819
+333,45.251612022615326
+334,46.504208616390805
+335,46.924189170912314
+336,47.23051211128891
+337,45.95596150354503
+338,47.186607443924004
+339,46.99237574498532
+340,47.3034701932495
+341,45.592073223566445
+342,44.57527950222222
+343,46.30873261334614
+344,46.06446421312888
+345,46.15053642203064
+346,46.45225840594316
+347,45.52401656865006
+348,46.220843786773564
+349,47.23874532242078
+350,46.4990213308299
+351,45.887652079472275
+352,45.790247414108165
+353,46.297356509333284
+354,45.156605441863974
+355,46.36158285461906
+356,47.39498242219727
+357,47.1096536792389
+358,47.76949228045338
+359,46.82480772437796
+360,46.581546642549455
diff --git a/data/predictions/Predictions_of_Xcal_with_model_okfposk_on_Xcal.csv_and_Ycal.csv_data_pickle.csv b/data/predictions/Predictions_of_Xcal_with_model_okfposk_on_Xcal.csv_and_Ycal.csv_data_pickle.csv
new file mode 100644
index 0000000000000000000000000000000000000000..814b86535141204c363252c64a075e7365ac0d9e
--- /dev/null
+++ b/data/predictions/Predictions_of_Xcal_with_model_okfposk_on_Xcal.csv_and_Ycal.csv_data_pickle.csv
@@ -0,0 +1,362 @@
+,0
+0,46.767241373753365
+1,43.33236667589227
+2,47.69702657025721
+3,42.40560282088099
+4,49.117889047437934
+5,45.41092460973661
+6,45.33593131421402
+7,46.84716401517402
+8,47.55778099910814
+9,43.66400920123191
+10,46.389948147643885
+11,46.93134883054068
+12,44.68002922429634
+13,44.707462248092675
+14,47.00659469452659
+15,45.11320879514305
+16,45.8935707706589
+17,46.07742471331517
+18,46.11265896577673
+19,46.27604577435942
+20,48.02201720885121
+21,45.99579895733129
+22,46.00402183723334
+23,46.483910628223505
+24,43.59996446967316
+25,46.44343817511909
+26,45.26316387805746
+27,47.25538362539533
+28,46.61646062786265
+29,45.2255452820459
+30,47.31236917695941
+31,46.82060048708076
+32,44.32448588013557
+33,46.82981060074309
+34,44.872902495897954
+35,46.54179329103721
+36,44.80908398747734
+37,47.308088930673506
+38,46.9631078693342
+39,46.04629576575285
+40,45.470485120803296
+41,47.80573031577645
+42,47.44233421041168
+43,45.976499198639715
+44,46.17366784901994
+45,47.21785674917406
+46,47.660257303469834
+47,47.018793621031534
+48,45.324583177719504
+49,44.63386139737206
+50,46.806440424306615
+51,46.757647986648
+52,46.3345558182011
+53,46.957312568843115
+54,45.27101888560633
+55,46.16245840321316
+56,43.41802899775329
+57,45.556319987156115
+58,46.97546097329726
+59,45.776909813959705
+60,45.23761836159523
+61,42.64587722849296
+62,46.614794240549585
+63,47.72193844631581
+64,47.5570599515473
+65,47.429843519757
+66,45.95619138515237
+67,45.585440007380285
+68,46.22104096184447
+69,45.575702172801265
+70,47.6980790423098
+71,45.533416530723066
+72,45.47488420381561
+73,44.55515653767657
+74,44.39530657933428
+75,46.676915986327096
+76,47.288543667660775
+77,47.02081466089327
+78,44.8998787155843
+79,45.91300037787515
+80,46.79079834428309
+81,46.98453028808091
+82,46.15281630529611
+83,46.069256395867406
+84,44.813697955418675
+85,45.111678837035235
+86,47.091404503230834
+87,46.619394266773085
+88,45.06345107435841
+89,44.71512805355459
+90,43.92052613415367
+91,46.07078260158221
+92,43.333601792345256
+93,46.37279879543664
+94,45.29533969506365
+95,47.65581989149592
+96,47.26124233132717
+97,45.43754723787828
+98,47.558428644932825
+99,45.43891036793652
+100,46.08433451742114
+101,45.89705560904663
+102,46.67971196526314
+103,46.48101201248254
+104,45.966766662930574
+105,45.55153197300913
+106,45.9110387428923
+107,46.681254869580314
+108,46.54155211698982
+109,45.58907951503843
+110,46.3462053752036
+111,46.49411175649265
+112,45.81809918302984
+113,45.96874566666568
+114,44.71001099702034
+115,45.761486322565005
+116,46.36358934383356
+117,46.97151621434169
+118,46.434684320113895
+119,45.3521800503051
+120,45.100196993071314
+121,46.05693892795794
+122,44.64568921681827
+123,45.57908599770371
+124,47.25277987204887
+125,46.117492034670626
+126,44.64106025415083
+127,45.48948771021548
+128,46.81034582829701
+129,45.64425368921125
+130,45.5529990463294
+131,47.70474912071469
+132,45.68681659656469
+133,46.27003325246771
+134,45.345787878984545
+135,46.70370265288267
+136,46.045030623620875
+137,45.27480869528746
+138,45.53995144880514
+139,46.01238719461178
+140,46.120987592356855
+141,46.963970976799125
+142,44.99346242073269
+143,45.93634061230757
+144,46.55466855614486
+145,46.04363038383396
+146,46.547667743908406
+147,44.18893017542523
+148,46.05166221986108
+149,46.91993536042849
+150,45.223809725359075
+151,45.643290374726675
+152,46.96338566255745
+153,46.30388683306703
+154,47.73870866596065
+155,46.387277357483974
+156,45.1538455213669
+157,46.372628630709514
+158,46.74633199159714
+159,45.420812511666455
+160,46.19801207660013
+161,45.96084260462984
+162,45.85109821650824
+163,45.51372912754637
+164,45.98302173476873
+165,47.313021954802764
+166,45.1520188856259
+167,46.60791549623543
+168,46.30979374342804
+169,43.692628209819134
+170,45.68184516517036
+171,47.51808896720511
+172,46.190401130002584
+173,45.56051658824867
+174,46.93405635091148
+175,45.98677963371881
+176,46.33924699647545
+177,46.91893598764468
+178,45.64168468920296
+179,45.476516722205766
+180,45.588251629836996
+181,45.85678521175613
+182,46.73497546924091
+183,45.79662400165502
+184,46.629010689675304
+185,45.96533471378744
+186,46.58464606441196
+187,45.511858573175715
+188,47.104275377649095
+189,47.22758345877331
+190,46.20253467478862
+191,46.085779946847516
+192,44.75511787290008
+193,46.11259967291647
+194,46.61745762571885
+195,46.38749629771895
+196,47.096851075761336
+197,45.74434533514446
+198,45.88322456266033
+199,45.72205581327608
+200,45.31606441873041
+201,46.83763175995848
+202,46.16584089134833
+203,47.602817232982694
+204,44.52519206870877
+205,46.40543089007081
+206,45.73120894336702
+207,44.8468006875897
+208,46.19795021694417
+209,47.70715694325032
+210,47.08276377401687
+211,48.00012276953313
+212,45.90425423277304
+213,47.02168854317766
+214,46.69920348912855
+215,46.42887687185965
+216,44.74148122944109
+217,45.57394404667312
+218,46.38878546848354
+219,47.18132782477546
+220,46.89596986274944
+221,47.64513205328368
+222,45.14033104187993
+223,45.94899417712989
+224,46.39271085006625
+225,46.54281015944685
+226,45.97590748870288
+227,45.44337494903428
+228,46.78677265164845
+229,46.11325716968698
+230,46.02011798885632
+231,46.8689790445706
+232,45.837193699345654
+233,46.869311842994456
+234,46.58738309513394
+235,46.0985553856456
+236,43.171676856058454
+237,45.26438253565962
+238,46.02191992621766
+239,46.50637514957429
+240,46.702623335489946
+241,45.559940727503026
+242,46.46505908818871
+243,46.38148917737486
+244,46.47965885079799
+245,45.98928860503642
+246,46.47459236360967
+247,45.03242352882455
+248,46.62356468577243
+249,45.43046541554597
+250,44.89864436677386
+251,44.9360807554293
+252,46.65383478955101
+253,45.71276618930802
+254,47.12266661744526
+255,46.61647763389081
+256,45.66681388018936
+257,45.26994141809001
+258,47.005563427561135
+259,48.282264114159794
+260,46.423410493703884
+261,44.8978471964728
+262,47.22097732127542
+263,46.527228181810486
+264,46.589932517692766
+265,47.23597864342218
+266,46.0295360959636
+267,46.05531848450945
+268,47.22572842162108
+269,45.78999301614902
+270,45.752067568799376
+271,46.15529597529264
+272,45.640102199613125
+273,46.424177330682035
+274,44.25269834486258
+275,47.228235257326645
+276,47.00933699683573
+277,46.99849813667588
+278,46.128968478562015
+279,45.32773315134333
+280,46.55924089553013
+281,46.745561634451676
+282,45.425561122539975
+283,45.1689096828214
+284,45.74714210773718
+285,47.06613240150024
+286,46.98819733114798
+287,44.910685618894654
+288,46.882358660743854
+289,45.220315304293045
+290,47.48044684883674
+291,45.6452988002278
+292,45.82591168449881
+293,46.00226901457363
+294,46.31957855611555
+295,46.506055715331485
+296,46.92601376734856
+297,45.969781653041935
+298,46.3416636651847
+299,46.78262377153471
+300,45.96165339065992
+301,47.308786500350216
+302,47.38244193216065
+303,46.039284651809346
+304,46.287639617793886
+305,45.5084676952205
+306,46.20812641249573
+307,46.58463198165516
+308,47.36575447448282
+309,45.8732669263878
+310,45.684525233204994
+311,46.5417091597917
+312,45.71402201204293
+313,46.6738042134702
+314,46.0522390884004
+315,46.11742684738737
+316,45.64446190536256
+317,44.85974771659868
+318,47.15075071311495
+319,45.749279132510864
+320,46.22975667265146
+321,46.391906545246556
+322,46.81132687453868
+323,46.235693184472815
+324,45.73787284994606
+325,46.7078094065248
+326,46.738999886685605
+327,46.188008062786054
+328,45.990317923515725
+329,45.52791818878071
+330,45.09572722884058
+331,46.70995710689733
+332,45.86321552594819
+333,45.251612022615326
+334,46.504208616390805
+335,46.924189170912314
+336,47.23051211128891
+337,45.95596150354503
+338,47.186607443924004
+339,46.99237574498532
+340,47.3034701932495
+341,45.592073223566445
+342,44.57527950222222
+343,46.30873261334614
+344,46.06446421312888
+345,46.15053642203064
+346,46.45225840594316
+347,45.52401656865006
+348,46.220843786773564
+349,47.23874532242078
+350,46.4990213308299
+351,45.887652079472275
+352,45.790247414108165
+353,46.297356509333284
+354,45.156605441863974
+355,46.36158285461906
+356,47.39498242219727
+357,47.1096536792389
+358,47.76949228045338
+359,46.82480772437796
+360,46.581546642549455
diff --git a/streamlit b/streamlit
new file mode 100644
index 0000000000000000000000000000000000000000..e69de29bb2d1d6434b8b29ae775ad8c2e48c5391