Rezuwan commited on
Commit
211c6f5
·
verified ·
1 Parent(s): be7cbf9

Upload 4 files

Browse files
.gitattributes CHANGED
@@ -33,3 +33,4 @@ saved_model/**/* filter=lfs diff=lfs merge=lfs -text
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
 
 
33
  *.zip filter=lfs diff=lfs merge=lfs -text
34
  *.zst filter=lfs diff=lfs merge=lfs -text
35
  *tfevents* filter=lfs diff=lfs merge=lfs -text
36
+ faiss_index_sysml/index.faiss filter=lfs diff=lfs merge=lfs -text
app.py ADDED
@@ -0,0 +1,71 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import warnings
3
+ import gradio as gr
4
+ from dotenv import load_dotenv
5
+
6
+ from langchain.chains import ConversationalRetrievalChain
7
+ from langchain_community.vectorstores import FAISS
8
+ from langchain_openai import ChatOpenAI
9
+ from langchain_community.embeddings import OpenAIEmbeddings
10
+
11
+ # Patch Gradio bug (schema parsing issue)
12
+ import gradio_client.utils
13
+ gradio_client.utils.json_schema_to_python_type = lambda schema, defs=None: "string"
14
+
15
+
16
+
17
+ # Load environment variables
18
+ load_dotenv()
19
+ OPENAI_API_KEY = os.getenv("OPENAI_API_KEY_P") #or os.getenv("OPENAI_API_KEY_ROBO")
20
+ if not OPENAI_API_KEY:
21
+ raise ValueError("Missing OPENAI_API_KEY. Please set it in your environment variables.")
22
+
23
+
24
+ # Suppress warnings
25
+ warnings.filterwarnings("ignore")
26
+
27
+ # Initialize embedding model
28
+ embeddings = OpenAIEmbeddings(openai_api_key=OPENAI_API_KEY)
29
+
30
+ # Load FAISS vector store
31
+ vectorstore = FAISS.load_local(
32
+ "faiss_index_sysml", embeddings, allow_dangerous_deserialization=True
33
+ )
34
+
35
+ # Load ChatOpenAI model
36
+ llm = ChatOpenAI(
37
+ model_name="gpt-4",
38
+ temperature=0.5,
39
+ openai_api_key=OPENAI_API_KEY
40
+ )
41
+
42
+ # Build conversational chain with history
43
+ qa = ConversationalRetrievalChain.from_llm(
44
+ llm=llm,
45
+ retriever=vectorstore.as_retriever(),
46
+ return_source_documents=False
47
+ )
48
+
49
+ history = []
50
+
51
+ # Chatbot logic
52
+ def sysml_chatbot(message, history):
53
+ result = qa({"question": message, "chat_history": history})
54
+ answer = result["answer"]
55
+ history.append((message, answer))
56
+ return "", history
57
+
58
+ # Gradio UI
59
+ with gr.Blocks() as demo:
60
+ gr.Markdown("## SysML Chatbot")
61
+ chatbot = gr.Chatbot()
62
+ msg = gr.Textbox(placeholder="Ask me about SysML diagrams or concepts...")
63
+ clear = gr.Button("Clear")
64
+
65
+ state = gr.State(history)
66
+
67
+ msg.submit(sysml_chatbot, [msg, state], [msg, chatbot])
68
+ clear.click(lambda: ([], ""), None, [chatbot, msg])
69
+
70
+ if __name__ == "__main__":
71
+ demo.launch()
faiss_index_sysml/index.faiss ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:8e6cf999585beebe259f8ffc3fae13d8015eb193459dcf610bf710b95f009c6c
3
+ size 2291757
faiss_index_sysml/index.pkl ADDED
@@ -0,0 +1,3 @@
 
 
 
 
1
+ version https://git-lfs.github.com/spec/v1
2
+ oid sha256:e034f5a0c34cbf3d0ab09b6411579a47e9dd5d9ac371a0af1ac6e07c1b0309a7
3
+ size 761219
requirements.txt ADDED
@@ -0,0 +1,12 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ langchain
2
+ langchain-community
3
+ langchain-core
4
+ langchain-openai
5
+ openai
6
+ faiss-cpu
7
+ python-dotenv
8
+ transformers
9
+ sentence-transformers
10
+ gradio==4.15.0
11
+ gradio_client==0.8.1
12
+ huggingface_hub >= 0.19.3