Update func.py
Browse files
func.py
CHANGED
@@ -0,0 +1,84 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import streamlit as st
|
2 |
+
import requests
|
3 |
+
from bs4 import BeautifulSoup
|
4 |
+
from transformers import pipeline, AutoTokenizer, AutoModelForSequenceClassification, AutoModelForTokenClassification
|
5 |
+
|
6 |
+
# ---------------- Model Setup ----------------
|
7 |
+
@st.cache_resource
|
8 |
+
def load_sentiment_model():
|
9 |
+
model_id = "LinkLinkWu/Stock_Analysis_Test_Ahamed"
|
10 |
+
tokenizer = AutoTokenizer.from_pretrained(model_id)
|
11 |
+
model = AutoModelForSequenceClassification.from_pretrained(model_id)
|
12 |
+
return pipeline("sentiment-analysis", model=model, tokenizer=tokenizer)
|
13 |
+
|
14 |
+
@st.cache_resource
|
15 |
+
def load_ner_model():
|
16 |
+
tokenizer = AutoTokenizer.from_pretrained("dslim/bert-base-NER")
|
17 |
+
model = AutoModelForTokenClassification.from_pretrained("dslim/bert-base-NER")
|
18 |
+
return pipeline("ner", model=model, tokenizer=tokenizer, grouped_entities=True)
|
19 |
+
|
20 |
+
sentiment_pipeline = load_sentiment_model()
|
21 |
+
ner_pipeline = load_ner_model()
|
22 |
+
|
23 |
+
# ---------------- Helper Functions ----------------
|
24 |
+
def fetch_news(ticker):
|
25 |
+
try:
|
26 |
+
url = f"https://finviz.com/quote.ashx?t={ticker}"
|
27 |
+
headers = {
|
28 |
+
'User-Agent': 'Mozilla/5.0',
|
29 |
+
'Accept': 'text/html',
|
30 |
+
'Accept-Language': 'en-US,en;q=0.5',
|
31 |
+
'Referer': 'https://finviz.com/',
|
32 |
+
'Connection': 'keep-alive',
|
33 |
+
}
|
34 |
+
response = requests.get(url, headers=headers)
|
35 |
+
if response.status_code != 200:
|
36 |
+
st.error(f"Failed to fetch page for {ticker}: Status code {response.status_code}")
|
37 |
+
return []
|
38 |
+
|
39 |
+
soup = BeautifulSoup(response.text, 'html.parser')
|
40 |
+
title = soup.title.text if soup.title else ""
|
41 |
+
if ticker not in title:
|
42 |
+
st.error(f"Page for {ticker} not found or access denied.")
|
43 |
+
return []
|
44 |
+
|
45 |
+
news_table = soup.find(id='news-table')
|
46 |
+
if news_table is None:
|
47 |
+
st.error(f"News table not found for {ticker}. The website structure might have changed.")
|
48 |
+
return []
|
49 |
+
|
50 |
+
news = []
|
51 |
+
for row in news_table.findAll('tr')[:50]:
|
52 |
+
a_tag = row.find('a')
|
53 |
+
if a_tag:
|
54 |
+
title = a_tag.get_text()
|
55 |
+
link = a_tag['href']
|
56 |
+
news.append({'title': title, 'link': link})
|
57 |
+
return news
|
58 |
+
except Exception as e:
|
59 |
+
st.error(f"Failed to fetch news for {ticker}: {e}")
|
60 |
+
return []
|
61 |
+
|
62 |
+
def analyze_sentiment(text):
|
63 |
+
try:
|
64 |
+
result = sentiment_pipeline(text)[0]
|
65 |
+
return "Positive" if result['label'] == 'POSITIVE' else "Negative"
|
66 |
+
except Exception as e:
|
67 |
+
st.error(f"Sentiment analysis failed: {e}")
|
68 |
+
return "Unknown"
|
69 |
+
|
70 |
+
def extract_org_entities(text):
|
71 |
+
try:
|
72 |
+
entities = ner_pipeline(text)
|
73 |
+
org_entities = []
|
74 |
+
for ent in entities:
|
75 |
+
if ent["entity_group"] == "ORG":
|
76 |
+
clean_word = ent["word"].replace("##", "").strip()
|
77 |
+
if clean_word.upper() not in org_entities:
|
78 |
+
org_entities.append(clean_word.upper())
|
79 |
+
if len(org_entities) >= 5:
|
80 |
+
break
|
81 |
+
return org_entities
|
82 |
+
except Exception as e:
|
83 |
+
st.error(f"NER entity extraction failed: {e}")
|
84 |
+
return []
|