File size: 4,953 Bytes
a80e992
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
import streamlit as st
import requests
import pymupdf  # PyMuPDF for PDF extraction
import traceback
from sentence_transformers import SentenceTransformer
from langchain.text_splitter import RecursiveCharacterTextSplitter
from langchain_groq import ChatGroq

# Load API keys from Streamlit secrets
ALPHA_VANTAGE_API_KEY = st.secrets["ALPHA_VANTAGE_API_KEY"]
GROQ_API_KEY = st.secrets["GROQ_API_KEY"]

# Initialize Sentence Transformer for embeddings
embedding_model = SentenceTransformer("all-MiniLM-L6-v2")

# Initialize LLM
try:
    llm = ChatGroq(temperature=0, model="llama3-70b-8192", api_key=GROQ_API_KEY)
    st.success("βœ… Groq LLM initialized successfully.")
except Exception as e:
    st.error("❌ Failed to initialize Groq LLM.")
    traceback.print_exc()

# Function to extract and chunk text from PDFs
def extract_text_from_pdf(uploaded_file, max_length=5000):
    try:
        doc = pymupdf.open(stream=uploaded_file.read(), filetype="pdf")  # Load PDF
        full_text = "".join(page.get_text() for page in doc)

        # Split text into chunks to avoid LLM token limits
        text_splitter = RecursiveCharacterTextSplitter(chunk_size=max_length, chunk_overlap=200)
        chunks = text_splitter.split_text(full_text)

        return chunks  # Return list of text chunks
    except Exception as e:
        st.error("❌ Failed to extract text from PDF.")
        traceback.print_exc()
        return ["Error extracting text."]

# Function to fetch financial data from Alpha Vantage
def fetch_financial_data(company_ticker):
    if not company_ticker:
        return "No ticker symbol provided. Please enter a valid company ticker."

    try:
        # Fetch Market Cap from Company Overview
        overview_url = f"https://www.alphavantage.co/query?function=OVERVIEW&symbol={company_ticker}&apikey={ALPHA_VANTAGE_API_KEY}"
        overview_response = requests.get(overview_url)
        
        if overview_response.status_code == 200:
            overview_data = overview_response.json()
            market_cap = overview_data.get("MarketCapitalization", "N/A")
        else:
            st.error(f"❌ Failed to fetch company overview. Status Code: {overview_response.status_code}")
            return "Error fetching company overview."

        # Fetch Revenue from Income Statement
        income_url = f"https://www.alphavantage.co/query?function=INCOME_STATEMENT&symbol={company_ticker}&apikey={ALPHA_VANTAGE_API_KEY}"
        income_response = requests.get(income_url)

        if income_response.status_code == 200:
            income_data = income_response.json()
            annual_reports = income_data.get("annualReports", [])
            revenue = annual_reports[0].get("totalRevenue", "N/A") if annual_reports else "N/A"
        else:
            st.error(f"❌ Failed to fetch income statement. Status Code: {income_response.status_code}")
            return "Error fetching income statement."

        return f"Market Cap: ${market_cap}\nTotal Revenue: ${revenue}"

    except Exception as e:
        st.error("❌ Exception in fetching financial data.")
        traceback.print_exc()
        return "Error fetching financial data."

# Function to generate response using Groq's LLM
def generate_response(user_query, company_ticker, mode, uploaded_file):
    try:
        if mode == "PDF Upload Mode":
            chunks = extract_text_from_pdf(uploaded_file)
            chunked_summary = "\n\n".join(chunks[:3])  # Use first few chunks
            prompt = f"Summarize the key financial insights from this document:\n\n{chunked_summary}"
        elif mode == "Live Data Mode":
            financial_info = fetch_financial_data(company_ticker)
            prompt = f"Analyze the financial status of {company_ticker} based on:\n{financial_info}\n\nUser Query: {user_query}"
        else:
            return "Invalid mode selected."
        
        response = llm.invoke(prompt)
        return response.content
    except Exception as e:
        st.error("❌ Failed to generate AI response.")
        traceback.print_exc()
        return "Error generating response."

# Streamlit UI
st.title("πŸ“Š AI-Powered Financial Insights Chatbot")
st.write("Upload financial reports or fetch live financial data to get AI-driven insights.")

# User Input Fields
user_query = st.text_input("Enter your query:")
company_ticker = st.text_input("Enter company ticker symbol (optional):")
mode = st.radio("Select Mode:", ["PDF Upload Mode", "Live Data Mode"])
uploaded_file = st.file_uploader("Upload PDF (Only for PDF Mode)", type=["pdf"])

# Button to process request
if st.button("Get Insights"):
    if mode == "PDF Upload Mode" and not uploaded_file:
        st.error("❌ Please upload a PDF file.")
    else:
        with st.spinner("Processing... ⏳"):
            response = generate_response(user_query, company_ticker, mode, uploaded_file)
            st.subheader("πŸ’‘ AI Response")
            st.write(response)