Skip to content
Snippets Groups Projects
Commit ef2d6c90 authored by DIANE's avatar DIANE
Browse files

packages imports

parent 7a088bea
No related branches found
No related tags found
No related merge requests found
......@@ -16,6 +16,11 @@ css_file = Path("style/style_model.css")
pages_folder = Path("pages/")
image_path = Path('./images/img-sky.jpg')
import os
from shutil import rmtree
from pandas import DataFrame
from PIL import Image
import plotly.express as px
from utils.data_parsing import JcampParser, CsvParser
......
......@@ -127,6 +127,7 @@ else:
@st.cache_data
def read_dx(tmp_path):
M = JcampParser(path = tmp_path)
M.parse()
return M.chem_data, M.specs_df_, M.md_df_, M.md_df_st_
_, spectra, meta_data, md_df_st_ = read_dx(tmp_path = tmp_path)
......
from packages import *
from common import *
st.set_page_config(page_title="NIRS Utils", page_icon=":goat:", layout="wide")
from utils.data_handling import *
# layout
UiComponents(pagespath = pages_folder, csspath= css_file,imgpath=image_path ,
header=True, sidebar= True, bgimg=False, colborders=True)
# HTML pour le bandeau "CEFE - CNRS"
# bandeau_html = """
# <div style="width: 100%; background-color: #4682B4; padding: 10px; margin-bottom: 10px;">
......
# from packages import *
import streamlit as st
def UiComponents(pagespath, csspath, imgpath, header = True, sidebar = True, bgimg = True, colborders = True):
import streamlit as st
......
from packages import *
#~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ kmeans ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
class Sk_Kmeans:
......
from packages import *
from utils.eval_metrics import metrics
## try to automatically detect the field separator within the CSV
......
from packages import *
import jcamp as jc
class JcampParser:
import jcamp
'''This module is designed to help retrieve spectral data as well as metadata of smaples from jcamp file'''
def __init__(self, path):
#self.__path = path.replace('\\','/')
......@@ -13,7 +11,8 @@ class JcampParser:
self.__nb = self.__dxfile['blocks'] # Get the total number of blocks = The total number of scanned samples
self.__list_of_blocks = self.__dxfile['children'] # Store all blocks within a a list
self.__wl = self.__list_of_blocks[0]["x"] # Wavelengths/frequencies/range
def parse(self):
# Start retreiving the data
specs = np.zeros((self.__nb, len(self.__list_of_blocks[0]["y"])), dtype=float) # preallocate a np matrix for sotoring spectra
self.idx = np.arange(self.__nb) # This list is designed to store samples name
......@@ -60,7 +59,7 @@ class JcampParser:
for match in re.findall(self.pattern, a):
elements_name.append(match[0])
## Retrieve concentrationds
## Retrieve concentrations
df = self.metadata_['concentrations']
cc = {}
for i in range(self.metadata_.shape[0]):
......@@ -105,7 +104,7 @@ class CsvParser:
def __init__(self, file):
with NamedTemporaryFile(delete = False, suffix = ".dx") as tmp:
with NamedTemporaryFile(delete = False, suffix = ".csv") as tmp:
tmp.write(file.read())
self.file = tmp.name
......
from packages import *
from utils.data_handling import *
......
from packages import *
class metrics:
from typing import Optional, List
from pandas import DataFrame
def __init__(self, c:Optional[float] = None, cv:Optional[List] = None, t:Optional[List] = None, method = 'regression')-> DataFrame:
phase = [c, cv, t]
index = np.array(["train", "cv", "test"])
......
from packages import *
def create_hash(to_hash):
#using the md5 hash function.
......
from packages import *
import streamlit as st
# predict module
......
from packages import *
from utils import metrics, Snv, No_transformation, KF_CV, sel_ratio
......
from packages import *
from typing import Sequence, Dict, Optional, Union
class KS:
def __init__(self, x:Optional[Union[np.ndarray|DataFrame]], rset:Optional[Union[float|int]]):
from pandas import DataFrame
from numpy import ndarray
def __init__(self, x:Optional[Union[ndarray|DataFrame]], rset:Optional[Union[float|int]]):
from kennard_stone import train_test_split
self.x = x
self.ratio = rset
self._train, self._test = ks_train_test_split(self.x, train_size = self.ratio)
self._train, self._test = train_test_split(self.x, train_size = self.ratio)
@property
def calset(self):
......@@ -13,7 +15,10 @@ class KS:
return self.x, clu
class RDM:
def __init__(self, x:Optional[Union[np.ndarray|DataFrame]], rset:Optional[Union[float|int]]):
from pandas import DataFrame
from numpy import ndarray
def __init__(self, x:Optional[Union[ndarray|DataFrame]], rset:Optional[Union[float|int]]):
from sklearn.model_selection import train_test_split
self.x = x
self.ratio = rset
self._train, self._test = train_test_split(self.x, train_size = self.ratio)
......
from packages import pyodbc, json
class SQL_Database():
def __init__(self):
config_path = Path("../config/")
from json import load
from pathlib import Path
config_path = Path("../config/")
with open(config_path / 'config.json', 'r') as fh:
config = json.load(fh)
config = load(fh)
self.driver = config['DRIVER']
self.server = config['SERVER']
......@@ -16,7 +17,8 @@ class SQL_Database():
self.encrypt = config['ENCRYPT']
def connect(self):
connection = pyodbc.connect(
from pyodbc import connect
connection = connect(
f'Driver={self.driver};'
f'Server={self.server};'
f'Database={self.database};'
......
from packages import *
import streamlit as st
# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ predictions histogram ~~~~~~~~~~~~~~~~~~~~~~~~~~
@st.cache_data
def pred_hist(pred):
......
0% Loading or .
You are about to add 0 people to the discussion. Proceed with caution.
Finish editing this message first!
Please register or to comment