219 lines
9.0 KiB
Python
219 lines
9.0 KiB
Python
import base.pipeline as pipeline
|
|
import os
|
|
import pandas as pd
|
|
import joblib
|
|
from dict_and_html import *
|
|
from .. import methods
|
|
from ..methods import PIPELINE_PATH
|
|
import base.pipeline as pipeline
|
|
import json
|
|
from django.shortcuts import HttpResponse
|
|
|
|
def handler(action, request):
|
|
status = 200
|
|
if action == "pre_trained":
|
|
# load pre trained models
|
|
pre_trained_model_name = request.POST.get("pre_trained")
|
|
request.session["model_name"] = pre_trained_model_name
|
|
# dataframe name
|
|
df_name = request.session.get("df_name")
|
|
|
|
if df_name == "upload":
|
|
df_name = request.session.get("df_name_upload_base_name")
|
|
|
|
model_name_path = os.path.join(
|
|
PIPELINE_PATH + f"{df_name}" + "/trained_models/" + pre_trained_model_name
|
|
)
|
|
|
|
model_name_dir_path = os.path.join(PIPELINE_PATH + f"{df_name}")
|
|
|
|
# get the type of the file
|
|
datasets_types_PipelineJSON_path = os.path.join(
|
|
PIPELINE_PATH + "/dataset_types_pipeline.json"
|
|
)
|
|
datasets_types_pipeline = pipeline.PipelineJSON(
|
|
datasets_types_PipelineJSON_path
|
|
)
|
|
dataset_type = datasets_types_pipeline.read_from_json([df_name])
|
|
|
|
if type(dataset_type) is list:
|
|
dataset_type = dataset_type[0]
|
|
|
|
if "url" in request.POST:
|
|
url = request.POST.get("url")
|
|
|
|
if url == "counterfactuals":
|
|
# only TSNE
|
|
tsne = joblib.load(model_name_path + "/tsne.sav")
|
|
|
|
# Assuming you already have your fig object created, you can update it like this:
|
|
# Improved and modern t-SNE visualization
|
|
tsne.update_layout(
|
|
# Modern Legend Design
|
|
legend=dict(
|
|
x=0.9,
|
|
y=0.95,
|
|
xanchor="right",
|
|
yanchor="top",
|
|
bgcolor="rgba(255,255,255,0.8)", # Light semi-transparent white background
|
|
bordercolor="rgba(0,0,0,0.1)", # Light border for contrast
|
|
borderwidth=1,
|
|
font=dict(size=12, color="#444"), # Subtle grey for legend text
|
|
),
|
|
# Tight Margins to Focus on the Plot
|
|
margin=dict(
|
|
l=10, r=10, t=30, b=10
|
|
), # Very slim margins for a modern look
|
|
# Axis Design: Minimalist and Clean
|
|
xaxis=dict(
|
|
title_text="", # No axis labels for a clean design
|
|
tickfont=dict(
|
|
size=10, color="#aaa"
|
|
), # Light grey for tick labels
|
|
showline=True,
|
|
linecolor="rgba(0,0,0,0.2)", # Subtle line color for axis lines
|
|
zeroline=False, # No zero line for a sleek look
|
|
showgrid=False, # Hide grid lines for a minimal appearance
|
|
ticks="outside", # Small ticks outside the axis
|
|
ticklen=3, # Short tick marks for subtlety
|
|
),
|
|
yaxis=dict(
|
|
title_text="", # No axis labels
|
|
tickfont=dict(size=10, color="#aaa"),
|
|
showline=True,
|
|
linecolor="rgba(0,0,0,0.2)",
|
|
zeroline=False,
|
|
showgrid=False,
|
|
ticks="outside",
|
|
ticklen=3,
|
|
),
|
|
# Sleek Background
|
|
plot_bgcolor="#fafafa", # Very light grey background for a smooth finish
|
|
paper_bgcolor="#ffffff", # Pure white paper background
|
|
# Modern Title with Elegant Style
|
|
title=dict(
|
|
text="t-SNE Visualization of Data",
|
|
font=dict(
|
|
size=16, color="#222", family="Helvetica, Arial, sans-serif"
|
|
), # Classy font style
|
|
x=0.5,
|
|
xanchor="center",
|
|
yanchor="top",
|
|
pad=dict(t=15), # Padding to separate the title from the plot
|
|
),
|
|
)
|
|
|
|
# Add hover effects for a smooth user experience
|
|
tsne.update_traces(
|
|
hoverinfo="text+name",
|
|
hoverlabel=dict(bgcolor="white", font_size=12, font_family="Arial"),
|
|
)
|
|
|
|
context = {
|
|
"tsne": tsne.to_html(),
|
|
}
|
|
else:
|
|
# load plots
|
|
pca = joblib.load(model_name_path + "/pca.sav")
|
|
classification_report = joblib.load(
|
|
model_name_path + "/classification_report.sav"
|
|
)
|
|
# tsne = joblib.load(model_name_path + "/tsne.sav")
|
|
|
|
# pipeline path
|
|
json_path = os.path.join(PIPELINE_PATH, f"{df_name}" + "/pipeline.json")
|
|
jsonFile = pipeline.PipelineJSON(json_path)
|
|
|
|
# load pipeline data
|
|
# jsonFile = open(json_path, "r")
|
|
# pipeline_data = json.load(jsonFile) # data becomes a dictionary
|
|
# classifier_data = pipeline_data["classifier"][pre_trained_model_name]
|
|
|
|
classifier_data = jsonFile.read_from_json(
|
|
["classifier", pre_trained_model_name]
|
|
)
|
|
classifier_data_flattened = methods.flatten_dict(classifier_data)
|
|
classifier_data_df = pd.DataFrame([classifier_data_flattened])
|
|
|
|
if dataset_type == "tabular":
|
|
feature_importance = joblib.load(
|
|
model_name_path + "/feature_importance.sav"
|
|
)
|
|
context = {
|
|
"dataset_type": dataset_type,
|
|
"pca": pca.to_html(),
|
|
"class_report": classification_report.to_html(),
|
|
"feature_importance": feature_importance.to_html(),
|
|
"classifier_data": classifier_data_df.to_html(),
|
|
}
|
|
elif dataset_type == "timeseries":
|
|
tsne = joblib.load(model_name_path + "/tsne.sav")
|
|
context = {
|
|
"dataset_type": dataset_type,
|
|
"pca": pca.to_html(),
|
|
"class_report": classification_report.to_html(),
|
|
"tsne": tsne.to_html(),
|
|
"classifier_data": classifier_data_df.to_html(),
|
|
}
|
|
elif action == "delete_pre_trained":
|
|
|
|
df_name = request.session["df_name"]
|
|
model_name = request.POST.get("model_name")
|
|
model_name_path = os.path.join(
|
|
PIPELINE_PATH + f"{df_name}" + "/trained_models/" + model_name
|
|
)
|
|
|
|
print(model_name_path)
|
|
|
|
excel_file_name_preprocessed_path = os.path.join(
|
|
PIPELINE_PATH,
|
|
f"{df_name}" + "/" + df_name + "_preprocessed" + ".csv",
|
|
)
|
|
try:
|
|
# Check if the file exists
|
|
if os.path.exists(excel_file_name_preprocessed_path):
|
|
# Delete the file
|
|
os.remove(excel_file_name_preprocessed_path)
|
|
# print(f"File '{excel_file_name_preprocessed_path}' has been deleted successfully.")
|
|
else:
|
|
print(f"File '{excel_file_name_preprocessed_path}' does not exist.")
|
|
except Exception as e:
|
|
print(f"An error occurred while deleting the file: {e}")
|
|
|
|
json_path = os.path.join(PIPELINE_PATH + f"{df_name}" + "/pipeline.json")
|
|
jsonFile = pipeline.PipelineJSON(json_path)
|
|
jsonFile.delete_key(["classifier", model_name])
|
|
|
|
methods.remove_dir_and_empty_parent(model_name_path)
|
|
# load paths
|
|
# absolute excel_file_preprocessed_path
|
|
|
|
if not jsonFile.key_exists("classifier"):
|
|
# pre trained models do not exist
|
|
# check if dataset directory exists
|
|
df_dir = os.path.join(PIPELINE_PATH + f"{df_name}")
|
|
if not os.path.exists(df_dir):
|
|
df_name = None
|
|
|
|
context = {
|
|
"df_name": df_name,
|
|
"available_pretrained_models_info": [],
|
|
}
|
|
else:
|
|
# if it exists
|
|
# check the section of "classifiers"
|
|
# folder path
|
|
available_pretrained_models = jsonFile.read_from_json(
|
|
["classifier"]
|
|
).keys()
|
|
|
|
available_pretrained_models_info = (
|
|
methods.create_tuple_of_models_text_value(
|
|
available_pretrained_models
|
|
)
|
|
)
|
|
context = {
|
|
"df_name": df_name,
|
|
"available_pretrained_models_info": available_pretrained_models_info,
|
|
}
|
|
return HttpResponse(json.dumps(context), status=status) |