abhivsh commited on
Commit
d9264f3
·
verified ·
1 Parent(s): 49cc5e0

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +39 -2
app.py CHANGED
@@ -15,6 +15,8 @@ import gradio as gr
15
  import requests
16
  import os
17
 
 
 
18
 
19
  import sys
20
  sys.path.append('../..')
@@ -41,6 +43,39 @@ fs_token = os.environ.get('fs_token')
41
 
42
  llm_name = "gpt-3.5-turbo-0125"
43
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
44
  vectordb = initialize.initialize()
45
 
46
  chat_history_doc = []
@@ -74,9 +109,10 @@ def chat_query_doc(question, chat_history_doc):
74
 
75
 
76
 
77
- llm = ChatOpenAI(model = llm_name, temperature = 0.1, api_key = OPENAI_API_KEY)
78
  #llm = GoogleGenerativeAI(model = "gemini-pro", google_api_key = GEMINI_API_KEY) ###
79
  #llm = ChatGoogleGenerativeAI(model = "gemini-1.0-pro", google_api_key = GEMINI_API_KEY, temperature = 0)
 
80
 
81
  # Conversation Retrival Chain with Memory
82
  #memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
@@ -103,8 +139,9 @@ def chat_query_doc(question, chat_history_doc):
103
 
104
  def chat_query_IS(question, chat_history_IS):
105
 
106
- llm = ChatOpenAI(model = llm_name, temperature = 0.1, api_key = OPENAI_API_KEY)
107
  #llm = ChatGoogleGenerativeAI(model="gemini-pro", google_api_key=GEMINI_API_KEY) ###
 
108
 
109
  system_old = f""" Provide an elaborate, detailed and pointwise reply about the Topic, as per relevant IS/IEEE/BIS Standard.
110
  Also, at the end of your reply, quote the Relevant Standard Referred. Topic : {question}
 
15
  import requests
16
  import os
17
 
18
+ from langchain_ollama import OllamaLLM
19
+
20
 
21
  import sys
22
  sys.path.append('../..')
 
43
 
44
  llm_name = "gpt-3.5-turbo-0125"
45
 
46
+
47
+ # For Groq API
48
+
49
+ from langchain_groq import ChatGroq
50
+
51
+ llm = ChatGroq(
52
+ model="mixtral-8x7b-32768",
53
+ temperature=0,
54
+ max_tokens=None,
55
+ timeout=None,
56
+ max_retries=2,
57
+ # other params...
58
+ )
59
+
60
+ chat_completion = client.chat.completions.create(
61
+ messages=[
62
+ {
63
+ "role": "system",
64
+ "content": "You are a knowledgeable assistant, Provide a precise and point-wise reply based on provided context only. \
65
+ Ensure that your reply addresses each aspect of the query thoroughly, and highlight the important points using text formatting in your reply..",
66
+ },
67
+ {
68
+ "role": "user",
69
+ "content": query,
70
+ }
71
+ ],
72
+ model="llama3-8b-8192",
73
+ )
74
+
75
+ print(chat_completion.choices[0].message.content)
76
+
77
+
78
+
79
  vectordb = initialize.initialize()
80
 
81
  chat_history_doc = []
 
109
 
110
 
111
 
112
+ #llm = ChatOpenAI(model = llm_name, temperature = 0.1, api_key = OPENAI_API_KEY)
113
  #llm = GoogleGenerativeAI(model = "gemini-pro", google_api_key = GEMINI_API_KEY) ###
114
  #llm = ChatGoogleGenerativeAI(model = "gemini-1.0-pro", google_api_key = GEMINI_API_KEY, temperature = 0)
115
+ llm = OllamaLLM(model="unsloth/Llama-3.2-3B")
116
 
117
  # Conversation Retrival Chain with Memory
118
  #memory = ConversationBufferMemory(memory_key="chat_history", return_messages=True)
 
139
 
140
  def chat_query_IS(question, chat_history_IS):
141
 
142
+ #llm = ChatOpenAI(model = llm_name, temperature = 0.1, api_key = OPENAI_API_KEY)
143
  #llm = ChatGoogleGenerativeAI(model="gemini-pro", google_api_key=GEMINI_API_KEY) ###
144
+ llm = OllamaLLM(model="unsloth/Llama-3.2-3B")
145
 
146
  system_old = f""" Provide an elaborate, detailed and pointwise reply about the Topic, as per relevant IS/IEEE/BIS Standard.
147
  Also, at the end of your reply, quote the Relevant Standard Referred. Topic : {question}