ntam0001's picture
Update app.py
f978017 verified
raw
history blame
8.24 kB
import gradio as gr
import pandas as pd
import numpy as np
import pickle
import json
import tensorflow as tf
from tensorflow.keras.models import model_from_json
import plotly.graph_objects as go
from plotly.subplots import make_subplots
import os
# Set environment variable to avoid oneDNN warnings
os.environ['TF_ENABLE_ONEDNN_OPTS'] = '0'
# Load model artifacts
def load_model_artifacts():
try:
with open('model_architecture.json', 'r') as json_file:
model_json = json_file.read()
model = model_from_json(model_json)
model.load_weights('final_model.h5')
with open('scaler.pkl', 'rb') as f:
scaler = pickle.load(f)
with open('metadata.json', 'r') as f:
metadata = json.load(f)
return model, scaler, metadata
except Exception as e:
raise Exception(f"Error loading model artifacts: {str(e)}")
# Initialize model components
try:
model, scaler, metadata = load_model_artifacts()
# Use only two features for prediction
feature_names = ['Feature_1', 'Feature_2']
print(f"βœ… Model loaded successfully with features: {feature_names}")
except Exception as e:
print(f"❌ Error loading model: {e}")
model, scaler, metadata = None, None, {}
feature_names = ['Feature_1', 'Feature_2']
def predict_student_eligibility(*args):
try:
if model is None or scaler is None:
return "Model not loaded", "N/A", "N/A", create_error_plot()
input_data = {feature_names[i]: args[i] for i in range(len(feature_names))}
input_df = pd.DataFrame([input_data])
input_scaled = scaler.transform(input_df)
input_reshaped = input_scaled.reshape(input_scaled.shape[0], input_scaled.shape[1], 1)
probability = float(model.predict(input_reshaped)[0][0])
prediction = "Eligible" if probability > 0.5 else "Not Eligible"
confidence = abs(probability - 0.5) * 2
fig = create_prediction_viz(probability, prediction, input_data)
return prediction, f"{probability:.4f}", f"{confidence:.4f}", fig
except Exception as e:
return f"Error: {str(e)}", "N/A", "N/A", create_error_plot()
def create_error_plot():
fig = go.Figure()
fig.add_annotation(
text="Model not available or error occurred",
xref="paper", yref="paper",
x=0.5, y=0.5, xanchor='center', yanchor='middle',
showarrow=False, font=dict(size=20)
)
fig.update_layout(
xaxis={'visible': False},
yaxis={'visible': False},
height=400
)
return fig
def create_prediction_viz(probability, prediction, input_data):
try:
fig = make_subplots(
rows=2, cols=2,
subplot_titles=('Prediction Probability', 'Confidence Meter', 'Input Features', 'Probability Distribution'),
specs=[[{"type": "indicator"}, {"type": "indicator"}],
[{"type": "bar"}, {"type": "scatter"}]]
)
fig.add_trace(
go.Indicator(
mode="gauge+number",
value=probability,
title={'text': "Eligibility Probability"},
gauge={
'axis': {'range': [None, 1]},
'bar': {'color': "darkblue"},
'steps': [
{'range': [0, 0.5], 'color': "lightcoral"},
{'range': [0.5, 1], 'color': "lightgreen"}
],
'threshold': {
'line': {'color': "red", 'width': 4},
'thickness': 0.75,
'value': 0.5
}
}
), row=1, col=1
)
confidence = abs(probability - 0.5) * 2
fig.add_trace(
go.Indicator(
mode="gauge+number",
value=confidence,
title={'text': "Prediction Confidence"},
gauge={
'axis': {'range': [None, 1]},
'bar': {'color': "orange"},
'steps': [
{'range': [0, 0.3], 'color': "lightcoral"},
{'range': [0.3, 0.7], 'color': "lightyellow"},
{'range': [0.7, 1], 'color': "lightgreen"}
]
}
), row=1, col=2
)
features = list(input_data.keys())
values = list(input_data.values())
fig.add_trace(go.Bar(x=features, y=values, name="Input Values", marker_color="skyblue"), row=2, col=1)
fig.add_trace(
go.Scatter(
x=[0, 1], y=[probability, probability],
mode='lines+markers',
name="Probability",
line=dict(color="red", width=3),
marker=dict(size=10)
), row=2, col=2
)
fig.update_layout(
height=800,
showlegend=False,
title_text="Student Eligibility Prediction Dashboard",
title_x=0.5
)
return fig
except Exception as e:
return create_error_plot()
def batch_predict(file):
try:
if model is None or scaler is None:
return "Model not loaded. Please check if all model files are uploaded.", None
if file is None:
return "Please upload a CSV file.", None
df = pd.read_csv(file)
missing_features = set(feature_names) - set(df.columns)
if missing_features:
return f"Missing features: {missing_features}", None
df_features = df[feature_names]
df_scaled = scaler.transform(df_features)
df_reshaped = df_scaled.reshape(df_scaled.shape[0], df_scaled.shape[1], 1)
probabilities = model.predict(df_reshaped).flatten()
predictions = ["Eligible" if p > 0.5 else "Not Eligible" for p in probabilities]
results_df = df_features.copy()
results_df['Probability'] = probabilities
results_df['Prediction'] = predictions
results_df['Confidence'] = np.abs(probabilities - 0.5) * 2
output_file = "batch_predictions.csv"
results_df.to_csv(output_file, index=False)
eligible_count = predictions.count('Eligible')
not_eligible_count = predictions.count('Not Eligible')
summary = f"""Batch Prediction Summary:
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
πŸ“Š Total predictions: {len(results_df)}
βœ… Eligible: {eligible_count} ({eligible_count / len(predictions) * 100:.1f}%)
❌ Not Eligible: {not_eligible_count} ({not_eligible_count / len(predictions) * 100:.1f}%)
πŸ“ˆ Average Probability: {np.mean(probabilities):.4f}
🎯 Average Confidence: {np.mean(np.abs(probabilities - 0.5) * 2):.4f}
━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━━
Results saved to: {output_file}
"""
return summary, output_file
except Exception as e:
return f"Error processing file: {str(e)}", None
# Gradio UI
demo = gr.Blocks(theme=gr.themes.Soft())
with demo:
gr.Markdown("# πŸŽ“ Student Eligibility Prediction")
with gr.Tabs():
with gr.Tab("Single Prediction"):
inputs = [gr.Number(label=feature, value=75) for feature in feature_names]
predict_btn = gr.Button("Predict")
with gr.Row():
prediction = gr.Textbox(label="Prediction")
probability = gr.Textbox(label="Probability")
confidence = gr.Textbox(label="Confidence")
plot = gr.Plot()
predict_btn.click(predict_student_eligibility, inputs=inputs, outputs=[prediction, probability, confidence, plot])
with gr.Tab("Batch Prediction"):
file_input = gr.File(label="Upload CSV", file_types=[".csv"], type="filepath")
batch_btn = gr.Button("Process Batch")
batch_output = gr.Textbox(label="Results")
download = gr.File(label="Download")
batch_btn.click(batch_predict, inputs=file_input, outputs=[batch_output, download])
# Launch app
demo.launch()