Spaces:
Sleeping
Sleeping
Update agent.py
Browse files
agent.py
CHANGED
@@ -193,7 +193,25 @@ tools_list.append(
|
|
193 |
)
|
194 |
)
|
195 |
|
196 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
197 |
llm_with_tools = llm.bind_tools(tools_list)
|
198 |
|
199 |
def assistant(state: MessagesState):
|
|
|
193 |
)
|
194 |
)
|
195 |
|
196 |
+
# ----------------------------------------------------------
|
197 |
+
# provider switcher
|
198 |
+
# ----------------------------------------------------------
|
199 |
+
def build_llm(provider: str = "groq"):
|
200 |
+
if provider == "google":
|
201 |
+
return ChatGoogleGenerativeAI(model="gemini-2.0-flash", temperature=0)
|
202 |
+
elif provider == "groq":
|
203 |
+
return ChatGroq(model="llama-3.3-70b-versatile", temperature=0)
|
204 |
+
elif provider == "huggingface":
|
205 |
+
return ChatHuggingFace(
|
206 |
+
llm=HuggingFaceEndpoint(
|
207 |
+
repo_id="Qwen/Qwen2.5-Coder-32B-Instruct",
|
208 |
+
temperature=0,
|
209 |
+
)
|
210 |
+
)
|
211 |
+
else:
|
212 |
+
raise ValueError("provider must be 'google', 'groq', or 'huggingface'")
|
213 |
+
|
214 |
+
llm = build_llm("google") # or "groq", "huggingface"
|
215 |
llm_with_tools = llm.bind_tools(tools_list)
|
216 |
|
217 |
def assistant(state: MessagesState):
|