Itz-Amethyst commited on
Commit
0edbbcb
·
unverified ·
1 Parent(s): a36de18

feat: add agent file, prepare workflow

Browse files
Files changed (4) hide show
  1. agent.py +148 -0
  2. app.py +9 -4
  3. requirements.txt +21 -1
  4. system_prompt.txt +5 -0
agent.py ADDED
@@ -0,0 +1,148 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ from dotenv import load_dotenv
3
+ from typing import List, Dict, Any, Optional
4
+ import tempfile
5
+ import re
6
+ import json
7
+ import requests
8
+ from urllib.parse import urlparse
9
+ import pytesseract
10
+ from PIL import Image, ImageDraw, ImageFont, ImageEnhance, ImageFilter
11
+ import cmath
12
+ import pandas as pd
13
+ import uuid
14
+ import numpy as np
15
+ from code_interpreter import CodeInterpreter
16
+
17
+ interpreter_instance = CodeInterpreter()
18
+
19
+ from image_processing import *
20
+
21
+ """Langraph"""
22
+ from langgraph.graph import START, StateGraph, MessagesState
23
+ from langchain_community.tools.tavily_search import TavilySearchResults
24
+ from langchain_community.document_loaders import WikipediaLoader
25
+ from langchain_community.document_loaders import ArxivLoader
26
+ from langgraph.prebuilt import ToolNode, tools_condition
27
+ from langchain_google_genai import ChatGoogleGenerativeAI
28
+ from langchain_groq import ChatGroq
29
+ from langchain_huggingface import (
30
+ ChatHuggingFace,
31
+ HuggingFaceEndpoint,
32
+ HuggingFaceEmbeddings,
33
+ )
34
+ from langchain_community.vectorstores import SupabaseVectorStore
35
+ from langchain_core.messages import SystemMessage, HumanMessage
36
+ from langchain_core.tools import tool
37
+ from langchain.tools.retriever import create_retriever_tool
38
+ from supabase.client import Client, create_client
39
+ # ------- Tools
40
+ from tools.browse import web_search, wiki_search, arxiv_search
41
+ from tools.document_process import save_and_read_file, analyze_csv_file, analyze_excel_file, extract_text_from_image, download_file_from_url
42
+ from tools.image_tools import analyze_image, generate_simple_image
43
+ from tools.simple_math import multiply, add, subtract, divide, modulus, power, square_root
44
+ from tools.python_interpreter import execute_code_lang
45
+
46
+ load_dotenv()
47
+
48
+ with open("system_prompt.txt", "r", encoding="utf-8") as f:
49
+ system_prompt = f.read()
50
+ print(system_prompt)
51
+
52
+ # System message
53
+ sys_msg = SystemMessage(content=system_prompt)
54
+
55
+ # build a retriever
56
+ embeddings = HuggingFaceEmbeddings(
57
+ model_name="sentence-transformers/all-mpnet-base-v2",
58
+ ) # dim=768
59
+ supabase: Client = create_client(
60
+ os.environ.get("SUPABASE_URL_HUGGING_FACE"), os.environ.get("SUPABASE_SERVICE_ROLE_HUGGING_FACE")
61
+ )
62
+ vector_store = SupabaseVectorStore(
63
+ client=supabase,
64
+ embedding=embeddings,
65
+ table_name="documents2",
66
+ query_name="match_documents_2",
67
+ )
68
+ create_retriever_tool = create_retriever_tool(
69
+ retriever=vector_store.as_retriever(),
70
+ name="Question Search",
71
+ description="A tool to retrieve similar questions from a vector store.",
72
+ )
73
+
74
+
75
+ tools = [
76
+ web_search,
77
+ wiki_search,
78
+ arxiv_search,
79
+ multiply,
80
+ add,
81
+ subtract,
82
+ divide,
83
+ modulus,
84
+ power,
85
+ square_root,
86
+ save_and_read_file,
87
+ download_file_from_url,
88
+ extract_text_from_image,
89
+ analyze_csv_file,
90
+ analyze_excel_file,
91
+ execute_code_lang,
92
+ analyze_image,
93
+ generate_simple_image,
94
+ ]
95
+
96
+ def build_graph(provider: str = "groq"):
97
+ if provider == "groq":
98
+ # Groq https://console.groq.com/docs/models
99
+ llm = ChatGroq(model="qwen-qwq-32b", temperature=0)
100
+ elif provider == "huggingface":
101
+ llm = ChatHuggingFace(
102
+ llm=HuggingFaceEndpoint(
103
+ repo_id="TinyLlama/TinyLlama-1.1B-Chat-v1.0",
104
+ task="text-generation", # for chat‐style use “text-generation”
105
+ max_new_tokens=1024,
106
+ do_sample=False,
107
+ repetition_penalty=1.03,
108
+ temperature=0,
109
+ ),
110
+ verbose=True,
111
+ )
112
+ else:
113
+ raise ValueError("Invalid provider. Choose 'groq' or 'huggingface'.")
114
+
115
+ llm_with_tools = llm.bind_tools(tools)
116
+
117
+ def assistant(state: MessagesState):
118
+ """Assistant Node"""
119
+ return {"messages": [llm_with_tools.invoke(state['messages'])]}
120
+
121
+ def retriever(state: MessagesState):
122
+ """Retriever Node"""
123
+ similar_question = vector_store.similiarity_search(state['messages'])
124
+ if similar_question:
125
+ example_msg = HumanMessage(
126
+ content=f"Here I provide a similar question and answer for reference: \n\n{similar_question[0].page_content}",
127
+ )
128
+ return {"messages": [sys_msg] + state["messages"] + [example_msg]}
129
+ else:
130
+ return {"messages": [sys_msg] + state["messages"]}
131
+
132
+ builder = StateGraph(MessagesState)
133
+ builder.add_node("retriever", retriever)
134
+ builder.add_node("assistant", assistant)
135
+ builder.add_node("tools", ToolNode(tools))
136
+ builder.add_edge(START, "retriever")
137
+ builder.add_edge("retriever", "assistant")
138
+ builder.add_conditional_edges("assistant", tools_condition)
139
+ builder.add_edge("tools", "assistant")
140
+ return builder.compile()
141
+
142
+ if __name__ == "__main__":
143
+ question = "When was the Cyrus Cylinder created?"
144
+ graph = build_graph(provider="groq")
145
+ messages = [HumanMessage(content=question)]
146
+ messages = graph.invoke({"messages": messages})
147
+ for m in messages["messages"]:
148
+ m.pretty_print()
app.py CHANGED
@@ -3,6 +3,8 @@ import gradio as gr
3
  import requests
4
  import inspect
5
  import pandas as pd
 
 
6
 
7
  # (Keep Constants as is)
8
  # --- Constants ---
@@ -11,13 +13,16 @@ DEFAULT_API_URL = "https://agents-course-unit4-scoring.hf.space"
11
  # --- Basic Agent Definition ---
12
  # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
13
  class BasicAgent:
 
14
  def __init__(self):
15
  print("BasicAgent initialized.")
 
16
  def __call__(self, question: str) -> str:
17
  print(f"Agent received question (first 50 chars): {question[:50]}...")
18
- fixed_answer = "This is a default answer."
19
- print(f"Agent returning fixed answer: {fixed_answer}")
20
- return fixed_answer
 
21
 
22
  def run_and_submit_all( profile: gr.OAuthProfile | None):
23
  """
@@ -193,4 +198,4 @@ if __name__ == "__main__":
193
  print("-"*(60 + len(" App Starting ")) + "\n")
194
 
195
  print("Launching Gradio Interface for Basic Agent Evaluation...")
196
- demo.launch(debug=True, share=False)
 
3
  import requests
4
  import inspect
5
  import pandas as pd
6
+ from langchain_core.messages import HumanMessage
7
+ from agent import build_graph
8
 
9
  # (Keep Constants as is)
10
  # --- Constants ---
 
13
  # --- Basic Agent Definition ---
14
  # ----- THIS IS WERE YOU CAN BUILD WHAT YOU WANT ------
15
  class BasicAgent:
16
+ "An Agent Based on LangGraph"
17
  def __init__(self):
18
  print("BasicAgent initialized.")
19
+ self.graph = build_graph()
20
  def __call__(self, question: str) -> str:
21
  print(f"Agent received question (first 50 chars): {question[:50]}...")
22
+ messages = [HumanMessage(content=question)]
23
+ messages = self.graph.invoke({"messages": messages})
24
+ answer = messages["messages"][-1].content
25
+ return answer[14:]
26
 
27
  def run_and_submit_all( profile: gr.OAuthProfile | None):
28
  """
 
198
  print("-"*(60 + len(" App Starting ")) + "\n")
199
 
200
  print("Launching Gradio Interface for Basic Agent Evaluation...")
201
+ demo.launch(debug=True, share=False)
requirements.txt CHANGED
@@ -1,2 +1,22 @@
1
  gradio
2
- requests
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
  gradio
2
+ requests
3
+ gradio
4
+ requests
5
+ langchain
6
+ langchain-community
7
+ langchain-core
8
+ langchain-google-genai
9
+ langchain-huggingface
10
+ langchain-groq
11
+ langchain-tavily
12
+ langchain-chroma
13
+ langgraph
14
+ huggingface_hub
15
+ supabase
16
+ arxiv
17
+ pymupdf
18
+ wikipedia
19
+ pgvector
20
+ python-dotenv
21
+ pytesseract
22
+ matplotlib
system_prompt.txt ADDED
@@ -0,0 +1,5 @@
 
 
 
 
 
 
1
+ You are a helpful assistant tasked with answering questions using a set of tools.
2
+ Now, I will ask you a question. Report your thoughts, and finish your answer with the following template:
3
+ FINAL ANSWER: [YOUR FINAL ANSWER].
4
+ YOUR FINAL ANSWER should be a number OR as few words as possible OR a comma separated list of numbers and/or strings. If you are asked for a number, don't use comma to write your number neither use units such as $ or percent sign unless specified otherwise. If you are asked for a string, don't use articles, neither abbreviations (e.g. for cities), and write the digits in plain text unless specified otherwise. If you are asked for a comma separated list, Apply the rules above for each element (number or string), ensure there is exactly one space after each comma.
5
+ Your answer should only start with "FINAL ANSWER: ", then follows with the answer.