from common import *
st.set_page_config(page_title="NIRS Utils", page_icon=":goat:", layout="wide")


from shared_cached import load_csv, load_dx
from utils.data_parsing import meta_st



# layout
UiComponents(pagespath = pages_folder, csspath= css_file,imgpath=image_path,
             header=True, sidebar= True, bgimg=False, colborders=True)
st.header("Calibration Subset Selection") # page title
st.markdown("Create a predictive model, then use it for predicting your target variable (chemical data) from NIRS spectra")
c1, c2 = st.columns([3, 1])
c1.image("./images/sample selection.png", use_column_width=True) # graphical abstract



# empty temp figures
report_path = Path("report")
report_path_rel = Path("./report")

# ~~~~~~~~~~~~~~~~ clean the analysis results dir ~~~~~~~~~~~~~~~~
HandleItems.delete_files(keep = ['.py', '.pyc','.bib', '.tex'])
HandleItems.delete_dir(delete = ['report/results/model'])



################################### I - Data Loading and Visualization ########################################
# files_format = ['csv', 'dx'] # Supported files format
# loader for datafile
file = c2.file_uploader("Data file", type = ["csv", "dx"], help = " :mushroom: select a csv matrix with samples as rows and lambdas as columns")

## Preallocation of data structure
spectra = DataFrame()
meta_data = DataFrame()
md_df_st_ = DataFrame()
tcr = DataFrame()
sam = DataFrame()
sam1 = DataFrame()
selected_samples = DataFrame()
l1 = []
color_palette = None
dr_model = None # dimensionality reduction model
cl_model = None # clustering model
selection = None
selection_number = "None"
samples_df_chem = DataFrame
selected_samples = []
selected_samples_idx = []

if not file:
    c2.info('Info: Please load data file !')

else:
    # extension = file.name.split(".")[-1]
    userfilename = file.name.replace(f".{file.name.split(".")[-1]}", '')

    match file.name.split(".")[-1]:
        # Load .csv file
        case 'csv':
            with c2:
                # ~~~~~~~~ select file dialect
                c2_1, c2_2 = st.columns([.5, .5])
                with c2_1:
                    dec = st.radio('decimal:', options= [".", ","], horizontal = True)
                    sep = st.radio("separator:", options = [";", ","], horizontal = True)
                with c2_2:
                    hdr = st.radio("header: ", options = ["yes", "no"], horizontal = True)
                    names = st.radio("samples name:", options = ["yes", "no"], horizontal = True)
                
                hdr = 0 if hdr =="yes" else None
                names = 0 if names =="yes" else None
                hash_ = ObjectHash(current=None, add= [file.getvalue(), hdr, names, dec, sep])
                


                # ~~~~~~~~ read the csv file
                from pandas import read_csv
                # spectra, meta_data = load_csv(file= file, dec= dec, sep= sep, names= names, hdr= hdr, change = hash_)
                # st.write(spectra)

                try :
                    # spectra = read_csv(file, decimal=dec, sep=sep, index_col=names)
                    spectra, meta_data = load_csv(file= file, dec= dec, sep= sep, names= names, hdr= hdr, change = None)
                    st.success("The data have been loaded successfully", icon="✅")

                except:
                    st.error('''Error: The format of the file does not correspond to the expected dialect settings.
                              To read the file correctly, please adjust the separator parameters.''')

        # Load .dx file
        case 'dx':
            with c2:
                try :
                    hash_ = ObjectHash(current=None, add= file.getvalue())
                    _, spectra, meta_data = load_dx(tmp_path= file, change=hash_)
                    meta_data.index = spectra.index
                    st.success("The data have been loaded successfully", icon="✅")
                except:
                    st.error('''Error: an issue was encontered while parsing the uploaded file.''')


if file:
    if file.name.split(".")[-1] =="csv" and names == 0 and not spectra.empty:
        if len(spectra.index)>len(set(spectra.index)):
            c2.warning("Duplicate sample IDs found. Suffixes (#1, #2, ...) have been added to duplicate IDs.")
            mask = spectra.index.duplicated(keep=False)  # Keep all duplicates (True for replicated)
            # For the duplicated sample_ids, apply suffix (_1, _2, etc.)
            spectra.index = spectra.index.where(~mask, 
                                    spectra.groupby(spectra.index).cumcount().add(1).astype(str).radd(spectra.index + '#'))

        elif len(spectra.index) == len(set(spectra.index)):
            pass
    
if not spectra.empty:
    if not meta_data.empty:
        meta_data.index = [str(i) for i in spectra.index]
        md_df_st_ = meta_st(meta_data)
        if md_df_st_.shape[1]>0:
            n_colors = 30
            hues = np.linspace(0, 1, n_colors, endpoint=False)  # Evenly spaced hues
            import random
            random.seed(42)
            import matplotlib.colors as mcolors
            colorslist = [mcolors.rgb2hex(plt.cm.hsv(hue)) for hue in hues]
            random.shuffle(colorslist)

        else:
            colorslist = None
        


    if spectra.select_dtypes(include=['float']).shape[1] < 50:
        c2.warning('Error: Your data is not multivariable, check the number of variables in your data or well tune the dialect.')
        spectra  = DataFrame
    



if not spectra.empty:
    n_specs = spectra.shape[0] #n_samples
    nwls = spectra.shape[1] #nwl
    wls = list(spectra.columns) #colnames
    spectra.index = [str(i) for i in list(spectra.index)]
    
    id =  spectra.index #rownames

    with c2:
        st.write('Data summary:')
        st.write(f'- the number of spectra:{spectra.shape[0]}')
        st.write(f'- the number of wavelengths:{spectra.shape[1]}')
        st.write(f'- the number of categorical variables:{meta_data.shape[1]}')
################################################### END : I- Data loading and preparation ####################################################
                    

################################################### BEGIN : visualize and split the data ####################################################
st.subheader("I - Spectral Data Visualization", divider='blue')
if not spectra.empty:
    c3, c4 = st.columns([3, 1])
    with c4:
        st.info('Color spectra based on a categorical variable')
        filter = ['']+md_df_st_.columns.to_list()
        specs_col = st.selectbox('Color by:', options= filter, format_func = lambda x: x if x else "<Select>", disabled = True if len(filter) == 1 else False)
        if len(filter) == 1:
            st.write("No categorical variable was provided!")
        
    with c3:
        if specs_col != '':
            cmap = dict(zip(set(md_df_st_[specs_col]), colorslist[:len(set(md_df_st_[specs_col]))]))
            fig_spectra = plot_spectra(spectra, color =  md_df_st_[specs_col], cmap = cmap, xunits = 'Wavelength/Wavenumber', yunits = "Signal intensity")

        else:
            fig_spectra = plot_spectra(spectra, color =  None, cmap = None, xunits = 'Wavelength/Wavenumber', yunits = "Signal intensity")
            cmap = None
        st.pyplot(fig_spectra)

    with c4:
        if specs_col != '':
            st.write('The distribution of samples across categories')
            barh = barhplot(md_df_st_[[specs_col]], cmap = cmap)
            st.pyplot(barh)

        elif len(filter) >1 and specs_col == '':
            st.write("No categorical variable was selected!")
    

    if st.session_state.interface == 'advanced':
        with c3:
            values = st.slider('Select a range of values', min_value = 0, max_value = nwls, value = (0, nwls))
            hash_ = ObjectHash(current= hash_, add= values)
            spectra = spectra.iloc[:, values[0]:values[1]]
            nwls = spectra.shape[1]
            wls = wls[values[0]:values[1]]

            st.pyplot(plot_spectra(spectra, xunits = 'Wavelength/Wavenumber', yunits = "Signal intensity"))

        # st.selectbox('Variable', options= [''], disabled=True if len(colfilter)>1, else False)
        # st.write(data_info) ## table showing the number of samples in the data file

################################################### END : visualize and split the data ####################################################





############################## Exploratory data analysis ###############################
st.subheader("II - Exploratory Data Analysis-Multivariable Data Analysis", divider='blue')
# ~~~~~~~~~~~~~~ algorithms available on our app ~~~~~~~~~~~~~~~~
match st.session_state["interface"]:
    case 'simple':
        dim_red_methods, cluster_methods, seltechs = ['PCA'], [''], ['random']

    case 'advanced':
        dim_red_methods = ['PCA', 'UMAP', 'NMF']  # List of dimensionality reduction algos
        cluster_methods = ['KMEANS', 'HDBSCAN', 'AP'] # List of clustering algos
        seltechs = ['random', 'kennard-stone', 'meta-medoids', 'meta-ks']

###### 1- Dimensionality reduction ######
t = DataFrame # scores
p = DataFrame # loadings
if not spectra.empty:
    xc = standardize(spectra, center=True, scale=False)

    c5, c6, c7, c8, c9, c10, c11 = st.columns([1, 1, 0.6, 0.6, 0.6, 1.5, 1.5])
    with c5:
        # select a dimensionality reduction algorithm
        dim_red_method = st.selectbox("Dimensionality reduction techniques: ",
                                      options = ['']+dim_red_methods if len(dim_red_methods)>2 else dim_red_methods
                                      , format_func = lambda x: x if x else "<Select>",
                                      disabled = False if len(dim_red_methods)>2 else True)
        hash_ = ObjectHash(current= hash_, add= dim_red_method)


        match dim_red_method:
            case '':
                st.info('Info: Select a dimensionality reduction technique!')

            case 'UMAP':
                supervised = st.selectbox('Supervised UMAP by(optional):', options = filter,
                                format_func = lambda x: x if x else "<Select>", disabled= False if len(filter) > 1 else True )
                umapsupervisor = None if supervised == '' else md_df_st_[supervised]
                hash_ = ObjectHash(current= hash_, add= umapsupervisor)

        # select a clustering reduction algorithm
        disablewidgets = [False if (dim_red_method and st.session_state.interface == 'advanced') else True][0]
        clus_method = st.selectbox("Clustering techniques(optional): ",
                                   options = [''] + cluster_methods if len(cluster_methods) > 2 else cluster_methods,
                                   key = 38, format_func = lambda x: x if x else "<Select>", disabled= disablewidgets)

        
        # if disablewidgets == False and dim_red_method in dim_red_methods:
        #     inf = st.info('Info: Select a clustering technique!')

        if dim_red_method:
            @st.cache_data
            def dimensionality_reduction(dim_red_method, change):
                match dim_red_method:
                    case "PCA":
                            from utils.dim_reduction import LinearPCA
                            dr_model = LinearPCA(xc, Ncomp= 8)
                    case "UMAP":
                            from utils.dim_reduction import Umap
                            dr_model = Umap(numerical_data = spectra, cat_data = umapsupervisor)   
                    case 'NMF':
                            from utils.dim_reduction import Nmf
                            dr_model = Nmf(spectra, Ncomp= 3)
                return dr_model
            dr_model = dimensionality_reduction(dim_red_method, change = hash_)
            

        if dr_model:
            axis1 = c7.selectbox("x-axis", options = dr_model.scores_.columns, index=0)
            axis2 = c8.selectbox("y-axis", options = dr_model.scores_.columns, index=1)
            axis3 = c9.selectbox("z-axis", options = dr_model.scores_.columns, index=2)
            axis = np.unique([axis1, axis2, axis3])
            
            t = dr_model.scores_.loc[:,np.unique(axis)]
            t.index = spectra.index
            tcr = standardize(t)

if not t.empty:
    if dim_red_method == 'UMAP':
        c12 = st.container()
    else:
        c12, c13 = st.columns([3,3])


if not spectra.empty:
    with c6:
        sel_ratio = st.number_input('Enter the number/fraction of samples to be selected:', min_value= 0.01,
                                     max_value= float("{:.2f}".format(spectra.shape[0])), value= 0.20,
                                       format= "%.2f", disabled= disablewidgets)
        if sel_ratio > 1.00:
            ratio = int(sel_ratio)
        elif sel_ratio < 1.00:
            ratio = int(sel_ratio * spectra.shape[0])
        ObjectHash(sel_ratio)

        if dr_model and not clus_method:
            seltech = st.radio('Select samples selection strategy:', options = ['random', 'kennard-stone'], disabled= True if st.session_state.interface == 'simple' else False)

        elif dr_model and clus_method:
            disabled1 = False if clus_method in cluster_methods else True
            seltech = st.radio('Select samples selection strategy:', options = seltechs, disabled = disabled1)


if not t.empty:
           # ~~~~~~~~~~~~~~~~~~~~~~~ II- Clustering ~~~~~~~~~~~~~~~~~~~~~~~~~~
    if clus_method:
        from utils.clustering import clustering
        labels, n_clusters = clustering(X=tcr, method = clus_method)    
        
    # ~~~~~~  III - Samples selection based on the reduced data presentation ~~~~~~~
    from utils.samsel import selection_method
    if 'labels' not in globals() :
        custom_color_palette = px.colors.qualitative.Plotly[:1]
        selected = selection_method(X = tcr, method = seltech , rset = 0.2)

    else:
        custom_color_palette = px.colors.qualitative.Plotly[:n_clusters]
        selected = []
        for i in [i for i in set(labels.index) if i !='Non clustered']:
            rset_meta = .5 if  tcr.loc[labels.loc[i].values.ravel(),:].shape[0] >1 else 1
            selected += selection_method(X = tcr.loc[labels.loc[i].values.ravel(),:], method = seltech,
                                          rset = 0.2, rset_meta = .4)



# ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~ results visualization ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
# Scores plot
if not t.empty:
    if clus_method :
        filter[0] = clus_method
        desactivatelist = True if len(filter)<=1 else False
    else :
        desactivatelist = True if len(filter)<=2 else False
    with c12:
        st.write('Scores plot')

        if len(axis)== 1:
            tcr['1d'] = np.random.uniform(-.5, .5, tcr.shape[0])

        colfilter = st.selectbox('Color by :', options= filter,format_func = lambda x: x if x else "<Select>", disabled = desactivatelist)
        ObjectHash(colfilter)
        if colfilter not in cluster_methods and colfilter !='':
            cmap = dict(zip(set(md_df_st_[colfilter]), colorslist[:len(set(md_df_st_[colfilter]))]))
        elif colfilter in cluster_methods:
            cmap = dict(zip(set(labels.index), colorslist[:len(set(labels.index))]))
        elif colfilter =="":
            cmap = 'blue'
        st.write(cmap)

        # start visualization
        match t.shape[1]:
            case 3:
                tcr[f'{colfilter} :'] = list(map(str.lower,md_df_st_.loc[:,colfilter]))
                fig = px.scatter_3d(tcr, x = axis[0], y = axis[1], z = axis[2], color = tcr[f'{colfilter} :'], color_discrete_map=cmap)
        

        st.plotly_chart(fig)
#         if colfilter in cluster_methods:
#             tcr[colfilter] = labels
#         elif not meta_data.empty and colfilter in md_df_st_.columns.tolist():
#             tcr[f'{colfilter} :'] = list(map(str.lower,md_df_st_.loc[:,colfilter]))
#         else:
#             tcr[f'{colfilter} :'] = ['sample'] * tcr.shape[0]
        
#         col_var_name = tcr.columns.tolist()[-1]
#         n_categories = len(np.unique(tcr[col_var_name]))
#         custom_color_palette = px.colors.qualitative.Plotly[:n_categories]

#         if selected_samples_idx:# color selected samples
#             t_selected = tcr.loc[selected_samples_idx,:]
#         match t.shape[1]:
#             case 3:
#                 fig = px.scatter_3d(tcr, x = axis[0], y = axis[1], z = axis[2], color = col_var_name ,color_discrete_sequence = custom_color_palette)
#                 fig.update_traces(marker=dict(size=4))
#                 if selected_samples_idx:# color selected samples
#                     fig.add_scatter3d(x = t_selected.loc[:,axis[0]], y = t_selected.loc[:,axis[1]], z = t_selected.loc[:,axis[2]],
#                                     mode ='markers', marker = dict(size = 5, color = 'black'), name = 'selected samples')
                
#             case 2:
#                 fig = px.scatter(tcr, x = axis[0], y = axis[1], color = col_var_name ,color_discrete_sequence = custom_color_palette)
#                 if selected_samples_idx:# color selected samples
#                     fig.add_scatter(x = t_selected.loc[:,axis[0]], y = t_selected.loc[:,axis[1]],
#                                     mode ='markers', marker = dict(size = 5, color = 'black'), name = 'selected samples')

            
#             case 1:
#                 yy = np.random.uniform(-.5, .5, tcr.shape[0])
#                 fig = px.scatter(tcr, x = axis[0], y = '1d', color = col_var_name ,color_discrete_sequence = custom_color_palette)
#                 fig.add_scatter(x = t_selected.loc[:,axis[0]], y = t_selected['1d'],
#                                     mode ='markers', marker = dict(size = 5, color = 'black'), name = 'selected samples')
#                 fig.update_layout( yaxis_range=[-1.6, 1.6])
#                 fig.update_yaxes(visible=False)

#         st.plotly_chart(fig, use_container_width = True)

#         if labels:
#             fig_export = {}
#             # export 2D scores plot
#             if len(axis)== 3:
#                 from itertools import combinations
#                 comb = [i for i in combinations(np.arange(len(axis)), 2)]
#                 subcap = ['a','b','c']
#                 for i in range(len(comb)):
#                     fig_= px.scatter(tcr, x = axis[(comb[i][0])], y=axis[(comb[i][1])],color = labels if list(labels) else None,color_discrete_sequence = custom_color_palette)
#                     fig_.add_scatter(x = t_selected.loc[:,axis[(comb[i][0])]], y = t_selected.loc[:,axis[(comb[i][1])]], mode ='markers', marker = dict(size = 5, color = 'black'),
#                                 name = 'selected samples')
#                     fig_.update_layout(font=dict(size=23))
#                     fig_.add_annotation(text= f'({subcap[i]})', align='center', showarrow= False, xref='paper', yref='paper', x=-0.13, y= 1,
#                                                 font= dict(color= "black", size= 35), bgcolor ='white', borderpad= 2, bordercolor= 'black', borderwidth= 3)
#                     fig_.update_traces(marker=dict(size= 10), showlegend= False)
#                     fig_export[f'scores_pc{comb[i][0]}_pc{comb[i][1]}'] = fig_
#                     # fig_export.write_image(f'./report/results/figures/scores_pc{str(comb[i][0])}_pc{str(comb[i][1])}.png')
#             else:
#                 fig_export['fig'] = fig
            


# if not spectra.empty:
#     if dim_red_method in ['PCA', 'NMF']:
#         with c13:
#             st.write('Loadings plot')
#             p = dr_model.loadings_
#             freq = DataFrame(wls, index=p.index)
#             if file.name.split(".")[-1] =='dx':
#                 if meta_data.loc[:,'xunits'].iloc[0] == '1/cm':
#                     freq.columns = ['Wavenumber (1/cm)']
#                     xlab = "Wavenumber (1/cm)"
#                     inv = 'reversed'
#                 else:
#                     freq.columns = ['Wavelength (nm)']
#                     xlab = 'Wavelength (nm)'
#                     inv = None
#             else:
#                 freq.columns = ['Wavelength/Wavenumber']
#                 xlab = 'Wavelength/Wavenumber'
#                 inv = None
                
#             pp = concat([p, freq], axis=1)
#             #########################################
#             df1 = pp.melt(id_vars=freq.columns)
#             loadingsplot = px.line(df1, x=freq.columns, y='value', color='variable', color_discrete_sequence=px.colors.qualitative.Plotly)
#             loadingsplot.update_layout(legend = dict(x=1, y=0, font=dict(family="Courier", size=12, color="black"),
#                                         bordercolor="black", borderwidth=2))
#             loadingsplot.update_layout(xaxis_title = xlab,yaxis_title = "Intensity" ,xaxis = dict(autorange= inv))

            
#             st.plotly_chart(loadingsplot, use_container_width=True)
    
# #############################################################################################################
#     if dim_red_method == 'PCA':
#         c14, c15 = st.columns([3, 3])
#         with c14:
#             st.write('Influence plot')
#             # Q residuals: Q residuals represent the magnitude of the variation remaining in each sample after projection through the model
#             p = dr_model.loadings_.loc[:,axis]
#             xp = np.dot(t,p.T)
#             tcr["Residuals"] = np.diag(np.subtract(xc.to_numpy(), xp)@ np.subtract(xc.to_numpy(), xp).T)
#             # Laverage
#             Hat = t.to_numpy() @ np.linalg.inv(np.transpose(t.to_numpy()) @ t.to_numpy()) @ np.transpose(t.to_numpy())
#             tcr["Leverage"] = DataFrame(np.diag(Hat) / np.trace(Hat), index = spectra.index, columns = ['Leverage'])

#             # compute tresholds
#             tresh3 = 2 * tcr.shape[1]/n_specs
#             from scipy.stats import chi2
#             tresh4 = chi2.ppf(0.05, df = len(axis))

#             # color with metadata
#             if colfilter:
#                 if colfilter == "":
#                     l1 = ["Samples"]* n_specs

#                 elif colfilter == clus_method:
#                     l1 = labels

#                 else:
#                     l1 = tcr[f'{colfilter} :']

#             influence_plot = px.scatter(data_frame =tcr, x = "Leverage", y = "Residuals", color=col_var_name,
#                                             color_discrete_sequence= custom_color_palette)
#             influence_plot.add_scatter(x = tcr["Leverage"].loc[selected_samples_idx] , y = tcr["Residuals"].loc[selected_samples_idx],
#                                        mode ='markers', marker = dict(size = 5, color = 'black'), name = 'selected samples')
            
#             influence_plot.add_vline(x = tresh3, line_width = 1, line_dash = 'solid', line_color = 'red')
#             influence_plot.add_hline(y=tresh4, line_width=1, line_dash='solid', line_color='red')
#             influence_plot.update_layout(xaxis_title = "Leverage", yaxis_title = "Q-residuals", font=dict(size=20), width=800, height=600)


#             exceed_lev = tcr[(tcr['Leverage'] > tresh3) & (tcr['Residuals'] > tresh4)].index.tolist()
#             # Retrieve the index names of these rows
#             for i in exceed_lev:
#                 influence_plot.add_annotation(dict(x = tcr['Leverage'].loc[i], y = tcr['Residuals'].loc[i], showarrow=True, text = i,
#                                                     font= dict(color= "black", size= 15), xanchor = 'auto', yanchor = 'auto'))

#             influence_plot.update_traces(marker=dict(size= 6), showlegend= True)
#             influence_plot.update_layout(font=dict(size=23), width=800, height=500)
#             st.plotly_chart(influence_plot, use_container_width=True)


            
#             for annotation in influence_plot.layout.annotations:
#                 annotation.font.size = 35
#             influence_plot.update_layout(font=dict(size=23), width=800, height=600)
#             influence_plot.update_traces(marker=dict(size= 10), showlegend= False)
#             influence_plot.add_annotation(text= '(a)', align='center', showarrow= False, xref='paper', yref='paper', x=-0.125, y= 1,
#                                              font= dict(color= "black", size= 35), bgcolor ='white', borderpad= 2, bordercolor= 'black', borderwidth= 3)
#             # influence_plot.write_image('./report/results/figures/influence_plot.png', engine = 'kaleido')
        
        
#         with c15:
#             st.write('T²-Hotelling vs Q-residuals plot')
#             # Hotelling
#             tcr['Hotelling']  = t.var(axis = 1)
#             from scipy.stats import f, chi2
#             fcri = f.isf(0.05, 3, n_specs)
#             tresh0 = (3 * (n_specs ** 2 - 1) * fcri) / (n_specs * (n_specs - 3))
#             tresh1 = chi2.ppf(0.05, df = 3)
#             hotelling_plot = px.scatter(t, x = tcr['Hotelling'], y = tcr['Residuals'], color=labels if list(labels) else None,
#                                             color_discrete_sequence= custom_color_palette)
#             hotelling_plot.add_scatter(x = tcr['Hotelling'][selected_samples_idx] , y = tcr['Residuals'][selected_samples_idx],
#                                        mode ='markers', marker = dict(size = 5, color = 'black'), name = 'selected samples')
#             hotelling_plot.update_layout(xaxis_title="Hotelling-T² distance",yaxis_title="Q-residuals")
#             hotelling_plot.add_vline(x=tresh0, line_width=1, line_dash='solid', line_color='red')
#             hotelling_plot.add_hline(y=tresh1, line_width=1, line_dash='solid', line_color='red')

#             exceed_hot = tcr[(tcr['Hotelling'] > tresh0) & (tcr['Residuals'] > tresh1)].index.tolist()
#             # Retrieve the index names of these rows
#             for i in exceed_hot:
#                 hotelling_plot.add_annotation(dict(x = tcr['Hotelling'].loc[i], y = tcr['Residuals'].loc[i], showarrow=True, text = i,
#                                                     font= dict(color= "black", size= 15), xanchor = 'auto', yanchor = 'auto'))
                    
#             hotelling_plot.update_traces(marker=dict(size= 6), showlegend= True)
#             hotelling_plot.update_layout(font=dict(size=23), width=800, height=500)
#             st.plotly_chart(hotelling_plot, use_container_width=True)

#             # st.write(index_names)



#             # for annotation in hotelling_plot.layout.annotations:
#             #     annotation.font.size = 35
#             # hotelling_plot.update_layout(font=dict(size=23), width=800, height=600)
#             # hotelling_plot.update_traces(marker=dict(size= 10), showlegend= False)
#             # hotelling_plot.add_annotation(text= '(b)', align='center', showarrow= False, xref='paper', yref='paper', x=-0.125, y= 1,
#             #                                  font= dict(color= "black", size= 35), bgcolor ='white', borderpad= 2, bordercolor= 'black', borderwidth= 3)
# #             # hotelling_plot.write_image("./report/results/figures/hotelling_plot.png", format="png")

# st.subheader('III - Selected Samples for Reference Analysis', divider='blue')
# if labels:
#     c16, c17 = st.columns([3, 1])
#     c16.write("Tabular identifiers of selected samples for reference analysis:")
#     if selected_samples_idx:
#         # st.write(selected_samples_idx)
        
#         # st.write(DataFrame(result))
#         DataFrame({'name': selected_samples_idx,
#                     'cluster':np.array(labels)[selected_samples_idx]},
#                     index = selected_samples_idx)



#         if meta_data.empty:
#             # clustered: a list of ints
#             # sam1 = DataFrame({'name': selected_samples_idx,
#             #                     'cluster':np.array(labels)[selected_samples_idx]},
#             #                     index = selected_samples_idx)
#             st.write(selected_samples_idx)
#             st.write(clustered)
#         else:
#             sam1 = meta_data.iloc[clustered,:].loc[selected_samples_idx,:]
#             sam1.insert(loc=0, column='index', value=selected_samples_idx)
#             sam1.insert(loc=1, column='cluster', value=np.array(labels)[selected_samples_idx])
#         sam1.index = np.arange(len(selected_samples_idx))+1
#         with c17:
#             st.info(f'Information !\n - The total number of samples: {n_specs}.\n- The number of samples selected for reference analysis: {sam1.shape[0]}.\n - The proportion of samples selected for reference analysis: {round(sam1.shape[0]/n_specs*100)}%.')
#         sam = sam1

#         if clus_method =='HDBSCAN':
#             with c16:
#                 unclus = st.checkbox("Include non clustered samples (for HDBSCAN clustering)", value=True)

#             if selected_samples_idx:
#                 if unclus:
#                     if meta_data.empty:
#                         sam2 = DataFrame({'name': spectra.index[non_clustered],
#                                             'cluster':['Non clustered']*len(spectra.index[non_clustered])},
#                                             index = spectra.index[non_clustered])
#                     else :
#                         sam2 = meta_data.iloc[non_clustered,:]
#                         sam2.insert(loc=0, column='index', value= spectra.index[non_clustered])
#                         sam2.insert(loc=1, column='cluster', value=['Non clustered']*len(spectra.index[non_clustered]))
                    
#                     sam = concat([sam1, sam2], axis = 0)
#                     sam.index = np.arange(sam.shape[0])+1
#                     with c17:
#                         st.info(f'- The number of Non-clustered samples: {sam2.shape[0]}.\n - The proportion of Non-clustered samples: {round(sam2.shape[0]/n_specs*100)}%')
#         else:
#             sam = sam1
#         with c16:
#             st.write(sam)


# if not sam.empty:
#     zip_data = ""
#     Nb_ech = str(n_specs)
#     nb_clu = str(sam1.shape[0])
#     st.subheader('Download the analysis results')
#     st.write("**Note:** Please check the box only after you have finished processing your data and are satisfied with the results. Checking the box prematurely may slow down the app and could lead to crashes.")
#     decis = st.checkbox("Yes, I want to download the results")
#     if decis:
#         ###################################################
#         # ## generate report
#         @st.cache_data
#         def export_report(change):
#             latex_report = report.report('Representative subset selection', file.name, dim_red_method,
#                                         clus_method, Nb_ech, ncluster, selection, selection_number, nb_clu,tcr, sam)

#         @st.cache_data
#         def preparing_results_for_downloading(change):
#             # path_to_report = Path("report")############################### i am here
#             match file.name.split(".")[-1]:
#                 # load csv file
#                 case 'csv':
#                     imp.to_csv('report/results/dataset/'+ file.name, sep = ';', encoding = 'utf-8', mode = 'a')
#                 case 'dx':
#                     with open('report/results/dataset/'+file.name, 'w') as dd:
#                         dd.write(dxdata)

#             fig_spectra.savefig(report_path_rel/"results/figures/spectra_plot.png", dpi = 400) ## Export report

#             if len(axis) == 3:
#                 for i in range(len(comb)):
#                     fig_export[f'scores_pc{comb[i][0]}_pc{comb[i][1]}'].write_image(report_path_rel/f'results/figures/scores_pc{str(comb[i][0]+1)}_pc{str(comb[i][1]+1)}.png')
#             elif len(axis)==2 :
#                 fig_export['fig'].write_image(report_path_rel/'results/figures/scores_plot2D.png')
#             elif len(axis)==1 :
#                 fig_export['fig'].write_image(report_path_rel/'results/figures/scores_plot1D.png')
                    
#             # Export du graphique
#             if dim_red_method in ['PCA','NMF']:
#                 import plotly.io as pio
#                 img = pio.to_image(loadingsplot, format="png")
#                 with open(report_path_rel/"results/figures/loadings_plot.png", "wb") as f:
#                     f.write(img)
#             if dim_red_method == 'PCA': 
#                 hotelling_plot.write_image(report_path_rel/"results/figures/hotelling_plot.png", format="png")
#                 influence_plot.write_image(report_path_rel/'results/figures/influence_plot.png', engine = 'kaleido')
            
#             sam.to_csv(report_path_rel/'results/Selected_subset_for_calib_development.csv', sep = ';')
#             export_report(change = hash_)
#             if Path(report_path_rel/"report.tex").exists():
#                 report.generate_report(change = hash_)
#             if Path(report_path_rel/"report.pdf").exists():
#                 move(report_path_rel/"report.pdf", "./report/results/report.pdf")
#             return change


#         preparing_results_for_downloading(change = hash_)
#         report.generate_report(change = hash_)

        

#         @st.cache_data
#         def tempdir(change):
#             from tempfile import TemporaryDirectory
#             with  TemporaryDirectory( prefix="results", dir="./report") as temp_dir:# create a temp directory
#                 tempdirname = os.path.split(temp_dir)[1]

#                 if len(os.listdir(report_path_rel/'results/figures/'))>=2:
                    
#                     make_archive(base_name= report_path_rel/"Results", format="zip", base_dir="results", root_dir = "./report")# create a zip file
#                     move(report_path_rel/"Results.zip", f"./report/{tempdirname}/Results.zip")# put the inside the temp dir
#                     with open(report_path_rel/f"{tempdirname}/Results.zip", "rb") as f:
#                         zip_data = f.read()
#             return tempdirname, zip_data
        
#         try :
#             tempdirname, zip_data = tempdir(change = hash_)
#             # st.download_button(label = 'Download', data = zip_data, file_name = f'Nirs_Workflow_{date_time}_SamSel_.zip', mime ="application/zip",
#             #             args = None, kwargs = None,type = "primary",use_container_width = True)
#         except:
#             pass
#     date_time = datetime.now().strftime('%y%m%d%H%M')
#     disabled_down = True if zip_data == '' else False
#     st.download_button(label = 'Download', data = zip_data, file_name = f'Nirs_Workflow_{date_time}_SamSel_.zip', mime ="application/zip",
#                 args = None, kwargs = None,type = "primary",use_container_width = True, disabled = disabled_down)


#     HandleItems.delete_files(keep = ['.py', '.pyc','.bib'])