SysModeler commited on
Commit
828f1ff
·
verified ·
1 Parent(s): 1e676ab

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +32 -39
app.py CHANGED
@@ -3,12 +3,12 @@ import warnings
3
  import gradio as gr
4
  from dotenv import load_dotenv
5
 
 
6
  from langchain_community.vectorstores import FAISS
7
- from langchain_community.embeddings import OpenAIEmbeddings, AzureOpenAIEmbeddings
8
- # from langchain_community.embeddings.openai import OpenAIEmbeddings
9
- from openai import AzureOpenAI
10
 
11
- # Patch Gradio bug (schema parsing issue)
12
  import gradio_client.utils
13
  gradio_client.utils.json_schema_to_python_type = lambda schema, defs=None: "string"
14
 
@@ -17,63 +17,56 @@ load_dotenv()
17
  AZURE_OPENAI_API_KEY = os.getenv("AZURE_OPENAI_API_KEY")
18
  AZURE_OPENAI_ENDPOINT = os.getenv("AZURE_OPENAI_ENDPOINT")
19
  AZURE_OPENAI_LLM_DEPLOYMENT = os.getenv("AZURE_OPENAI_LLM_DEPLOYMENT")
 
20
 
21
- embeddings = OpenAIEmbeddings(openai_api_key=AZURE_OPENAI_API_KEY)
22
-
23
- if not all([AZURE_OPENAI_API_KEY, AZURE_OPENAI_ENDPOINT, AZURE_OPENAI_LLM_DEPLOYMENT]):
24
- raise ValueError("Azure OpenAI environment variables are missing.")
25
 
26
  # Suppress warnings
27
  warnings.filterwarnings("ignore")
28
 
29
- # Initialize embedding model
30
- # embeddings = AzureOpenAIEmbeddings(
31
- # azure_deployment=OPENAI_EMBEDDING,
32
- # azure_endpoint=AZURE_OPENAI_ENDPOINT,
33
- # openai_api_key=AZURE_OPENAI_API_KEY,
34
- # openai_api_version="2024-08-01-preview",
35
- # chunk_size=1000
36
- # )
37
 
38
  # Load FAISS vector store
39
  vectorstore = FAISS.load_local(
40
  "faiss_index_sysml", embeddings, allow_dangerous_deserialization=True
41
  )
42
 
43
- # Initialize Azure OpenAI client directly
44
- client = AzureOpenAI(
45
- api_key=AZURE_OPENAI_API_KEY,
46
  azure_endpoint=AZURE_OPENAI_ENDPOINT,
47
- api_version="2024-08-01-preview"
 
 
 
 
 
 
 
 
 
48
  )
49
 
50
  history = []
51
 
52
- # Chatbot logic using AzureOpenAI directly
53
  def sysml_chatbot(message, history):
54
- # Perform retrieval
55
- retriever = vectorstore.as_retriever()
56
- docs = retriever.get_relevant_documents(message)
57
- context = "\n\n".join(doc.page_content for doc in docs[:4])
58
-
59
- # Compose prompt with retrieved context
60
- system_prompt = "You are a helpful assistant knowledgeable in SysML. Use the context below to answer the user's question.\n\nContext:\n" + context
61
-
62
- response = client.chat.completions.create(
63
- model=AZURE_OPENAI_LLM_DEPLOYMENT,
64
- messages=[
65
- {"role": "system", "content": system_prompt},
66
- {"role": "user", "content": message}
67
- ]
68
- )
69
-
70
- answer = response.choices[0].message.content
71
  history.append((message, answer))
72
  return "", history
73
 
74
  # Gradio UI
75
  with gr.Blocks() as demo:
76
- gr.Markdown("## SysML Chatbot")
77
  chatbot = gr.Chatbot()
78
  msg = gr.Textbox(placeholder="Ask me about SysML diagrams or concepts...")
79
  clear = gr.Button("Clear")
 
3
  import gradio as gr
4
  from dotenv import load_dotenv
5
 
6
+ from langchain.chains import ConversationalRetrievalChain
7
  from langchain_community.vectorstores import FAISS
8
+ from langchain_community.embeddings import AzureOpenAIEmbeddings
9
+ from langchain_community.chat_models import AzureChatOpenAI
 
10
 
11
+ # Patch Gradio bug
12
  import gradio_client.utils
13
  gradio_client.utils.json_schema_to_python_type = lambda schema, defs=None: "string"
14
 
 
17
  AZURE_OPENAI_API_KEY = os.getenv("AZURE_OPENAI_API_KEY")
18
  AZURE_OPENAI_ENDPOINT = os.getenv("AZURE_OPENAI_ENDPOINT")
19
  AZURE_OPENAI_LLM_DEPLOYMENT = os.getenv("AZURE_OPENAI_LLM_DEPLOYMENT")
20
+ AZURE_OPENAI_EMBEDDING_DEPLOYMENT = os.getenv("AZURE_OPENAI_EMBEDDING_DEPLOYMENT")
21
 
22
+ if not all([AZURE_OPENAI_API_KEY, AZURE_OPENAI_ENDPOINT, AZURE_OPENAI_LLM_DEPLOYMENT, AZURE_OPENAI_EMBEDDING_DEPLOYMENT]):
23
+ raise ValueError("Missing one or more Azure OpenAI environment variables.")
 
 
24
 
25
  # Suppress warnings
26
  warnings.filterwarnings("ignore")
27
 
28
+ # Initialize Azure embedding model
29
+ embeddings = AzureOpenAIEmbeddings(
30
+ azure_deployment=AZURE_OPENAI_EMBEDDING_DEPLOYMENT,
31
+ azure_endpoint=AZURE_OPENAI_ENDPOINT,
32
+ openai_api_key=AZURE_OPENAI_API_KEY,
33
+ openai_api_version="2024-08-01-preview",
34
+ chunk_size=1000
35
+ )
36
 
37
  # Load FAISS vector store
38
  vectorstore = FAISS.load_local(
39
  "faiss_index_sysml", embeddings, allow_dangerous_deserialization=True
40
  )
41
 
42
+ # Initialize Azure chat model
43
+ llm = AzureChatOpenAI(
44
+ deployment_name=AZURE_OPENAI_LLM_DEPLOYMENT,
45
  azure_endpoint=AZURE_OPENAI_ENDPOINT,
46
+ openai_api_key=AZURE_OPENAI_API_KEY,
47
+ openai_api_version="2024-08-01-preview",
48
+ temperature=0.5
49
+ )
50
+
51
+ # Build conversational RAG chain
52
+ qa = ConversationalRetrievalChain.from_llm(
53
+ llm=llm,
54
+ retriever=vectorstore.as_retriever(),
55
+ return_source_documents=False
56
  )
57
 
58
  history = []
59
 
60
+ # Chatbot logic
61
  def sysml_chatbot(message, history):
62
+ result = qa({"question": message, "chat_history": history})
63
+ answer = result["answer"]
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
64
  history.append((message, answer))
65
  return "", history
66
 
67
  # Gradio UI
68
  with gr.Blocks() as demo:
69
+ gr.Markdown("## SysML Chatbot (Azure-Powered)")
70
  chatbot = gr.Chatbot()
71
  msg = gr.Textbox(placeholder="Ask me about SysML diagrams or concepts...")
72
  clear = gr.Button("Clear")