Spaces:
Running
Running
File size: 4,523 Bytes
842adb5 13e414c 842adb5 94a65e4 6b30efd 842adb5 f36a10a 842adb5 13e414c 842adb5 94a65e4 842adb5 94a65e4 30c7f0c 94a65e4 30c7f0c 94a65e4 ecb4f5b 30c7f0c 94a65e4 30c7f0c 94a65e4 30c7f0c 94a65e4 30c7f0c 94a65e4 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 |
# import streamlit as st
# import torch
# from transformers import AutoTokenizer, AutoModelForSequenceClassification
# import json
# # Load the model and tokenizer
# @st.cache_resource
# def load_model():
# tokenizer = AutoTokenizer.from_pretrained('microsoft/deberta-v3-small', use_fast=False)
# model = AutoModelForSequenceClassification.from_pretrained("./results/checkpoint-753")
# model.eval()
# return tokenizer, model
# def predict_news(text, tokenizer, model):
# inputs = tokenizer(text, return_tensors="pt", truncation=True, padding=True, max_length=512)
# with torch.no_grad():
# outputs = model(**inputs)
# probabilities = torch.nn.functional.softmax(outputs.logits, dim=-1)
# predicted_label = torch.argmax(probabilities, dim=-1).item()
# confidence = probabilities[0][predicted_label].item()
# return "FAKE" if predicted_label == 1 else "REAL", confidence
# # Streamlit UI
# st.title("News Classifier API")
# # If running as an API, get the request from query parameters
# query_params = st.query_params
# if "text" in query_params:
# text_input = query_params["text"][0] # Get text input from URL query
# tokenizer, model = load_model()
# prediction, confidence = predict_news(text_input, tokenizer, model)
# # Return JSON response
# st.json({"prediction": prediction, "confidence": confidence})
# # If running in UI mode, show text input
# else:
# text_input = st.text_area("Enter news text:")
# if st.button("Classify"):
# tokenizer, model = load_model()
# prediction, confidence = predict_news(text_input, tokenizer, model)
# st.write(f"Prediction: {prediction} (Confidence: {confidence*100:.2f}%)")
import streamlit as st
from final import *
import pandas as pd
# Page configuration
st.set_page_config(
page_title="Nexus NLP News Classifier",
page_icon="π°",
layout="wide"
)
# Cache model loading
@st.cache_resource
def initialize_models():
nlp, tokenizer, model = load_models()
knowledge_graph = load_knowledge_graph()
return nlp, tokenizer, model, knowledge_graph
# Initialize all models
nlp, tokenizer, model, knowledge_graph = initialize_models()
# Streamlit UI
def main():
st.title("π° Nexus NLP News Classifier")
st.write("Enter news text below to analyze its authenticity")
# Text input area
news_text = st.text_area("News Text", height=200)
if st.button("Analyze"):
if news_text:
with st.spinner("Analyzing..."):
# Get predictions from all models
ml_prediction, ml_confidence = predict_with_model(news_text, tokenizer, model)
kg_prediction, kg_confidence = predict_with_knowledge_graph(text, knowledge_graph, nlp)
# Update knowledge graph
update_knowledge_graph(news_text, ml_prediction == "REAL", knowledge_graph, nlp)
# Get Gemini analysis
gemini_model = setup_gemini()
gemini_result = analyze_content_gemini(gemini_model, news_text)
# Display results in columns
col1, col2, col3 = st.columns(3)
with col1:
st.subheader("ML Model Analysis")
st.metric("Prediction", ml_prediction)
st.metric("Confidence", f"{ml_confidence:.2f}%")
with col2:
st.subheader("Knowledge Graph Analysis")
st.metric("Prediction", kg_prediction)
st.metric("Confidence", f"{kg_confidence:.2f}%")
with col3:
st.subheader("Gemini Analysis")
gemini_pred = gemini_result["gemini_analysis"]["predicted_classification"]
gemini_conf = gemini_result["gemini_analysis"]["confidence_score"]
st.metric("Prediction", gemini_pred)
st.metric("Confidence", f"{gemini_conf}%")
# Detailed analysis sections
with st.expander("View Detailed Analysis"):
st.json(gemini_result)
with st.expander("Named Entities"):
entities = extract_entities(news_text, nlp)
df = pd.DataFrame(entities, columns=["Entity", "Type"])
st.dataframe(df)
else:
st.warning("Please enter some text to analyze")
if __name__ == "__main__":
main()
|