Spaces:
Sleeping
Sleeping
File size: 5,824 Bytes
9dcfa9a 96c99bd 9dcfa9a 96c99bd 9dcfa9a 96c99bd 9dcfa9a 96c99bd ab98559 96c99bd 9dcfa9a 96c99bd 9dcfa9a 96c99bd 9dcfa9a 96c99bd 9dcfa9a 96c99bd 9dcfa9a 96c99bd 9dcfa9a 96c99bd 9dcfa9a |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 |
import streamlit as st
from streamlit_option_menu import option_menu
import pandas as pd
import os
from google.oauth2 import service_account
from googleapiclient.discovery import build
from streamlit_chat import message as st_message
from langchain.schema import SystemMessage
from langchain_groq import ChatGroq
from dotenv import load_dotenv
import warnings
warnings.filterwarnings("ignore", category=DeprecationWarning)
# Load environment variables
load_dotenv()
GROQ_API_KEY = os.getenv("GROQ_API_KEY")
llm = ChatGroq(model="llama-3.1-70b-versatile")
PROMPT_TEMPLATE = """
You are an expert information extraction assistant designed to obtain specific details from the web and external sources.
You’ll be provided with an entity name and a query that specifies the type of information needed about that entity.
Please follow the instructions carefully and return only the most relevant, accurate information.
#### Entity Name: {entity}
#### Query: {query}
Instructions:
1. Extract the information directly related to the entity.
2. If available, include only verified, publicly accessible data.
3. Provide information in a single sentence or a short, structured response.
4. If the requested information isn’t available or verifiable, respond with "Information not available."
Begin extraction.
"""
def get_llm_response(entity, query):
formatted_prompt = PROMPT_TEMPLATE.format(entity=entity, query=query)
response = llm([SystemMessage(content=formatted_prompt)])
return response[0].content if response else "Information not available"
# Streamlit Setup
st.set_page_config(page_title="DataScribe", page_icon=":notebook_with_decorative_cover:", layout="wide")
# Sidebar Navigation
with st.sidebar:
selected = option_menu(
"DataScribe Menu",
["Home", "Upload Data", "Define Query", "Extract Information", "View & Download"],
icons=["house", "cloud-upload", "gear", "search", "table"],
menu_icon="cast",
default_index=0
)
# Main header
st.title("DataScribe: AI-Powered Information Extractor")
# Initialize session states for data and results
if "data" not in st.session_state:
st.session_state["data"] = None
if "results" not in st.session_state:
st.session_state["results"] = None
if "column_selection" not in st.session_state:
st.session_state["column_selection"] = None
# Upload Data Section
if selected == "Upload Data":
st.header("Upload or Connect Your Data")
data_source = st.radio("Choose data source:", ["CSV File", "Google Sheets"])
if data_source == "CSV File":
uploaded_file = st.file_uploader("Upload your CSV file", type=["csv"])
if uploaded_file:
st.session_state["data"] = pd.read_csv(uploaded_file)
st.write("### Preview of Uploaded Data")
st.dataframe(st.session_state["data"].head())
elif data_source == "Google Sheets":
sheet_id = st.text_input("Enter Google Sheet ID")
range_name = st.text_input("Enter the data range (e.g., Sheet1!A1:C100)")
if st.button("Fetch Data"):
if sheet_id and range_name:
st.session_state["data"] = get_google_sheet_data(sheet_id, range_name)
st.write("### Preview of Google Sheets Data")
st.dataframe(st.session_state["data"].head())
else:
st.warning("Please enter both the Google Sheet ID and range.")
# Define Query Section
elif selected == "Define Query":
st.header("Define Your Custom Query")
if st.session_state["data"] is not None:
column_selection = st.selectbox("Select the primary column for entities", options=st.session_state["data"].columns)
query_template = st.text_input("Define your query template", "Get me the email for {company}")
st.session_state["query_template"] = query_template
st.session_state["column_selection"] = column_selection
st.write("### Example query preview")
if column_selection:
sample_entity = str(st.session_state["data"][column_selection].iloc[0])
example_query = query_template.replace("{company}", sample_entity)
st.code(example_query)
else:
st.warning("Please upload data first.")
# Extract Information Section with Progress Bar
elif selected == "Extract Information":
st.header("Extract Information")
if st.session_state.get("query_template") and st.session_state["data"] is not None and st.session_state["column_selection"] is not None:
st.write("Data extraction is in progress. This may take a few moments.")
progress_bar = st.progress(0)
column_selection = st.session_state["column_selection"]
progress_step = 1.0 / len(st.session_state["data"][column_selection])
results = []
for i, entity in enumerate(st.session_state["data"][column_selection]):
user_message = st.session_state["query_template"].replace("{company}", str(entity))
result_text = get_llm_response(entity, user_message)
results.append({"Entity": entity, "Extracted Information": result_text})
progress_bar.progress((i + 1) * progress_step)
st.session_state["results"] = pd.DataFrame(results)
st.write("### Extracted Information")
st.dataframe(st.session_state["results"])
# View & Download Section
elif selected == "View & Download":
st.header("View and Download Results")
if st.session_state["results"] is not None:
st.write("### Extracted Data Table")
st.dataframe(st.session_state["results"])
csv_data = st.session_state["results"].to_csv(index=False)
st.download_button("Download as CSV", csv_data, "datascribe_results.csv", "text/csv")
else:
st.warning("No data available to view or download.")
|