Spaces:
Sleeping
Sleeping
import os | |
import warnings | |
import gradio as gr | |
from dotenv import load_dotenv | |
from langchain.chains import ConversationalRetrievalChain | |
from langchain_community.vectorstores import FAISS | |
from langchain_openai import ChatOpenAI | |
from langchain_community.embeddings import OpenAIEmbeddings | |
# Patch Gradio bug (schema parsing issue) | |
import gradio_client.utils | |
gradio_client.utils.json_schema_to_python_type = lambda schema, defs=None: "string" | |
# Load environment variables | |
load_dotenv() | |
OPENAI_API_KEY = os.getenv("OPENAI_API_KEY_P") #or os.getenv("OPENAI_API_KEY_ROBO") | |
if not OPENAI_API_KEY: | |
raise ValueError("Missing OPENAI_API_KEY. Please set it in your environment variables.") | |
# Suppress warnings | |
warnings.filterwarnings("ignore") | |
# Initialize embedding model | |
embeddings = OpenAIEmbeddings(openai_api_key=OPENAI_API_KEY) | |
# Load FAISS vector store | |
vectorstore = FAISS.load_local( | |
"faiss_index_sysml", embeddings, allow_dangerous_deserialization=True | |
) | |
# Load ChatOpenAI model | |
llm = ChatOpenAI( | |
model_name="gpt-4", | |
temperature=0.5, | |
openai_api_key=OPENAI_API_KEY | |
) | |
# Build conversational chain with history | |
qa = ConversationalRetrievalChain.from_llm( | |
llm=llm, | |
retriever=vectorstore.as_retriever(), | |
return_source_documents=False | |
) | |
history = [] | |
# Chatbot logic | |
def sysml_chatbot(message, history): | |
result = qa({"question": message, "chat_history": history}) | |
answer = result["answer"] | |
history.append((message, answer)) | |
return "", history | |
# Gradio UI | |
with gr.Blocks() as demo: | |
gr.Markdown("## SysML Chatbot") | |
chatbot = gr.Chatbot() | |
msg = gr.Textbox(placeholder="Ask me about SysML diagrams or concepts...") | |
clear = gr.Button("Clear") | |
state = gr.State(history) | |
msg.submit(sysml_chatbot, [msg, state], [msg, chatbot]) | |
clear.click(lambda: ([], ""), None, [chatbot, msg]) | |
if __name__ == "__main__": | |
demo.launch() | |