diff --git a/.gitignore b/.gitignore
index 426c58a9ae073e79816ece52b20a358318a83b51..e62dc541fddd5d98f86d254422f5d32c38c365fc 100644
--- a/.gitignore
+++ b/.gitignore
@@ -5,4 +5,5 @@ data/models/
 data/predictions/
 data/sample_selections/
 Report/*.pdf
-Report/*.tex
\ No newline at end of file
+Report/*.tex
+Report/figures/
\ No newline at end of file
diff --git a/Class_Mod/LWPLSR_.py b/Class_Mod/LWPLSR_.py
index e3406d670c7251d17866258ec4fefeee3f794e47..83b51121117c8eee297758f9cc29b599a2e5f086 100644
--- a/Class_Mod/LWPLSR_.py
+++ b/Class_Mod/LWPLSR_.py
@@ -14,5 +14,25 @@ def model_LWPLSR(xcal_csv, ycal_csv, sep, hdr):
     # Assign data to training and test sets
     X_train, y_train, X_test, y_test = x[train_index], y[train_index], x[test_index], y[test_index]
     st.write("Size of train and test sets: train " + str(X_train.shape) + ' ' + str(y_train.shape) + ' / test ' + str(X_test.shape) + ' ' + str(y_test.shape))
-
-    # Jchemo.lwplsr(X_train, y_train, nlvdis=4, metric = eucl, k = 10)
\ No newline at end of file
+    # prepare to send dataframes to julia and Jchemo
+    Main.X_train,Main.y_train,Main.X_test,Main.y_test = X_train, y_train, X_test, y_test
+    Main.eval("""
+    #convert python pd.dataframes to julia dataframes
+    X_train_j = Main.X_train |> Pandas.DataFrame|> DataFrames.DataFrame;
+    y_train_j = Main.y_train |> Pandas.DataFrame|> DataFrames.DataFrame;
+    X_test_j = Main.X_test |> Pandas.DataFrame|> DataFrames.DataFrame;
+    y_test_j = Main.y_test |> Pandas.DataFrame|> DataFrames.DataFrame;
+    # Main.println(string("Size of X_train ", size(X_train_j), " ; Size of y_train ", size(y_train_j)))
+    # Main.println(string("Size of X_test ", size(X_test_j), " ; Size of y_test ", size(y_test_j)))
+    # Compute model
+    nlvdis = 5 ; metric = :mah
+    h = 1 ; k = 200 ; nlv = 15 #; scal = true
+    mod = Main.Jchemo.model(Main.Jchemo.lwplsr; nlvdis, metric, h, k, nlv)
+    Main.Jchemo.fit!(mod, X_train_j, y_train_j)
+    # predictions on test data calculation
+    res = Main.Jchemo.predict(mod, X_test_j) ;
+    score = Main.Jchemo.rmsep(res.pred, y_test_j)
+    resjp = Pandas.DataFrame(res.pred);
+    """)
+    score = Main.score
+    predicted_results_on_test = pd.DataFrame(Main.resjp)
diff --git a/Packages.py b/Packages.py
index ec7d83f23abc877b99e5eb07c3abc95a2280edba..ff2ca80bdf1a795c23fa63408377a4eee1e0ca56 100644
--- a/Packages.py
+++ b/Packages.py
@@ -24,7 +24,7 @@ from scipy.sparse import csgraph
 
 # Modelling
 # import julia
-# from julia import Jchemo
+from julia import Main, Jchemo, DataFrames, Base, Pandas
 
 from pinard import utils
 from pinard import preprocessing as pp
diff --git a/README.md b/README.md
index d4047956224c137fc011b54219fea0b4d1ed7c19..db301911c07fffb3a454077b9e7b65a9680cda08 100644
--- a/README.md
+++ b/README.md
@@ -20,10 +20,11 @@ This package is written in python. You can clone the repository: git clone https
 Then install the requirements: pip install -r requirements.txt
 (OPTIONNAL) To use Locally weighted PLS Regression for creation model, you will need to install Jchemo.jl (https://github.com/mlesnoff/Jchemo.jl), a Julia package.
 From the CLI: python
-> '>>> import julia
-'>>> julia.install()
-'>>> from julia import Pkg
-'>>> Pkg.add("Jchemo")
+> python   
+'>>> import julia  
+'>>> julia.install()  
+'>>> from julia import Pkg  
+'>>> Pkg.add(["Jchemo","DataFrames","Pandas"])
 
 To check if Jchemo is installed without errors:
 > '>>> Pkg.status()