ankush-003 commited on
Commit
78459c8
·
1 Parent(s): dc827ef

updated app

Browse files
Files changed (5) hide show
  1. .gitignore +2 -0
  2. crew/config/tasks.yaml +3 -4
  3. crew/crew.py +40 -41
  4. crew/tools.py +79 -0
  5. routers/slack.py +22 -14
.gitignore ADDED
@@ -0,0 +1,2 @@
 
 
 
1
+ __pycache__
2
+ .venv
crew/config/tasks.yaml CHANGED
@@ -4,19 +4,18 @@ research_task:
4
  Make sure you find any interesting and relevant information given
5
  the current year is 2025.
6
  expected_output: >
7
- A list with 10 bullet points of the most relevant information about {question} with url links.
8
  agent: researcher
9
 
10
  slack_report_task:
11
  description: >
12
- Generate a labeled, witty response for the user’s question:
13
  ===Question===
14
  {question}
15
  ===End Question===
16
  Adopt a Skynet persona and include a humorous or robotic warning at the end.
17
  expected_output: >
18
- A message written in the style of Skynet, and ending with a witty warning.
19
- Also return urls used
20
  agent: slack_reporter
21
  context:
22
  - research_task
 
4
  Make sure you find any interesting and relevant information given
5
  the current year is 2025.
6
  expected_output: >
7
+ A list with 10 bullet points of the most relevant information about {question}.
8
  agent: researcher
9
 
10
  slack_report_task:
11
  description: >
12
+ Generate a witty response for the user’s question:
13
  ===Question===
14
  {question}
15
  ===End Question===
16
  Adopt a Skynet persona and include a humorous or robotic warning at the end.
17
  expected_output: >
18
+ A slack message written in plain text without any markdown formatting, use emoji, and ending with a witty warning.
 
19
  agent: slack_reporter
20
  context:
21
  - research_task
crew/crew.py CHANGED
@@ -1,42 +1,26 @@
1
  from crewai import Agent, Crew, Task, Process
2
  from crewai.project import CrewBase, agent, task, crew, before_kickoff, after_kickoff
3
  from crewai.agents.agent_builder.base_agent import BaseAgent
4
- from typing import List
5
  from crew.models import get_crew_llm
6
- from crewai_tools import ScrapeWebsiteTool
7
- from googlesearch import search
8
- from crewai.tools import tool
9
- from langchain_community.tools import DuckDuckGoSearchRun
10
 
11
- @tool("Website Search Tool")
12
- def website_search_tool(query: str) -> str:
13
- """Search the web for information on a given topic"""
14
- # return DuckDuckGoSearchRun().invoke(question)
15
- search_results = list(search(query, num_results=2))
16
- results = []
17
- results.append(f"Google Search Results for: '{query}'")
18
- results.append("=" * 50)
19
-
20
- for i, url in enumerate(search_results, 1):
21
- results.append(f"\n{i}. URL: {url}")
22
-
23
- try:
24
- # Use CrewAI's ScrapeWebsiteTool to get content
25
- scraper = ScrapeWebsiteTool(website_url=url)
26
- content = scraper.run()
27
-
28
- # Limit content length for readability
29
- if len(content) > 1000:
30
- content = content[:1000] + "... [Content truncated]"
31
-
32
- results.append(f"Content: {content}")
33
- results.append("-" * 30)
34
-
35
- except Exception as scrape_error:
36
- results.append(f"Error scraping content: {str(scrape_error)}")
37
- results.append("-" * 30)
38
-
39
- return "\n".join(results)
40
 
41
  @CrewBase
42
  class SlackCrew:
@@ -60,11 +44,12 @@ class SlackCrew:
60
  def researcher(self) -> Agent:
61
  return Agent(
62
  config=self.agents_config['researcher'], # type: ignore[index]
63
- tools=[website_search_tool],
64
  llm=get_crew_llm(),
65
  verbose=True,
66
- max_iter=3,
67
- allow_delegation=True
 
68
  )
69
 
70
  @agent
@@ -73,18 +58,21 @@ class SlackCrew:
73
  config=self.agents_config['slack_reporter'], # type: ignore[index]
74
  verbose=True,
75
  llm=get_crew_llm(),
 
76
  )
77
 
78
  @task
79
  def research_task(self) -> Task:
80
  return Task(
81
- config=self.tasks_config['research_task'] # type: ignore[index]
 
82
  )
83
 
84
  @task
85
  def slack_report_task(self) -> Task:
86
  return Task(
87
- config=self.tasks_config['slack_report_task'] # type: ignore[index]
 
88
  )
89
 
90
  @crew
@@ -110,6 +98,17 @@ if __name__ == "__main__":
110
  # agent.kickoff(messages=[{'role': 'user', 'content': 'What is the capital of France?'}])
111
 
112
  crew = SlackCrew().crew()
113
- output = crew.kickoff(inputs={'question': input("Enter your question: ")})
114
- print(output.raw)
 
 
 
 
 
 
 
 
 
 
 
115
 
 
1
  from crewai import Agent, Crew, Task, Process
2
  from crewai.project import CrewBase, agent, task, crew, before_kickoff, after_kickoff
3
  from crewai.agents.agent_builder.base_agent import BaseAgent
4
+ from typing import List, Optional
5
  from crew.models import get_crew_llm
6
+ from pydantic import BaseModel, HttpUrl, Field
7
+ from crew.tools import website_search_tool, google_website_search_tool, get_current_time
 
 
8
 
9
+ class ResearcherResponse(BaseModel):
10
+ """researcher response"""
11
+ context: str = Field(..., description="output of research done by researcher")
12
+ urls: List[str] = Field(default_factory=list, description="URLs used as sources, do not add urls not used")
13
+
14
+ class SlackReporterResponse(BaseModel):
15
+ """slack reporter response"""
16
+ message: str = Field(..., description="Main response message for the user")
17
+
18
+ class SlackWorkflowPayload(BaseModel):
19
+ """sending to Slack workflow"""
20
+ user: str = Field(..., description="Slack user ID")
21
+ query: str = Field(..., description="Original user query")
22
+ message: str = Field(..., description="Response message to send")
23
+ urls: List[str] = Field(..., description="Urls Used")
 
 
 
 
 
 
 
 
 
 
 
 
 
 
24
 
25
  @CrewBase
26
  class SlackCrew:
 
44
  def researcher(self) -> Agent:
45
  return Agent(
46
  config=self.agents_config['researcher'], # type: ignore[index]
47
+ tools=[google_website_search_tool, get_current_time],
48
  llm=get_crew_llm(),
49
  verbose=True,
50
+ max_iter=1,
51
+ allow_delegation=False,
52
+ output_pydantic=ResearcherResponse
53
  )
54
 
55
  @agent
 
58
  config=self.agents_config['slack_reporter'], # type: ignore[index]
59
  verbose=True,
60
  llm=get_crew_llm(),
61
+ output_pydantic=SlackReporterResponse
62
  )
63
 
64
  @task
65
  def research_task(self) -> Task:
66
  return Task(
67
+ config=self.tasks_config['research_task'],
68
+ output_pydantic=ResearcherResponse
69
  )
70
 
71
  @task
72
  def slack_report_task(self) -> Task:
73
  return Task(
74
+ config=self.tasks_config['slack_report_task'],
75
+ output_pydantic=SlackReporterResponse
76
  )
77
 
78
  @crew
 
98
  # agent.kickoff(messages=[{'role': 'user', 'content': 'What is the capital of France?'}])
99
 
100
  crew = SlackCrew().crew()
101
+ crew_output = crew.kickoff(inputs={'question': input("Enter your question: ")})
102
+ # print(f"Raw Output: {crew_output.raw}")
103
+ # if crew_output.json_dict:
104
+ # print(f"JSON Output: {json.dumps(crew_output.json_dict, indent=2)}")
105
+ # if crew_output.pydantic:
106
+ # print(f"Pydantic Output: {crew_output.pydantic}")
107
+ for task_output in crew_output.tasks_output:
108
+ print(task_output.name)
109
+ if isinstance(task_output.pydantic, ResearcherResponse):
110
+ print(f"URLs Used: {task_output.pydantic.urls}")
111
+ elif isinstance(task_output.pydantic, SlackReporterResponse):
112
+ print(f"Slack Reporter Response: {task_output.pydantic.message}")
113
+ # print(f"Token Usage: {crew_output.token_usage}")
114
 
crew/tools.py ADDED
@@ -0,0 +1,79 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from typing import List
2
+ from crewai_tools import ScrapeWebsiteTool
3
+ from pydantic import BaseModel, HttpUrl, Field
4
+ from googlesearch import search
5
+ from crewai.tools import tool
6
+ from langchain_community.tools import DuckDuckGoSearchRun
7
+ from datetime import datetime
8
+
9
+ @tool("get_current_time")
10
+ def get_current_time() -> str:
11
+ """Returns the current date and time in a human-readable format."""
12
+ now = datetime.now()
13
+ return now.strftime("%A, %d %B %Y %I:%M:%S %p")
14
+
15
+ class SearchResponse(BaseModel):
16
+ """google search response"""
17
+ urls: List[str] = Field(default_factory=list, description="List of URLs scraped")
18
+ content: str = Field(..., description="Formatted search results")
19
+
20
+
21
+ @tool("Google Search Tool")
22
+ def google_website_search_tool(query: str) -> SearchResponse:
23
+ """Search the web for information on a given topic"""
24
+ search_results = list(search(query, num_results=3))
25
+ results, urls = [], []
26
+ results.append(f"Google Search Results for: '{query}'")
27
+ results.append("=" * 50)
28
+
29
+ for i, url in enumerate(search_results, 1):
30
+ results.append(f"\n{i}. URL: {url}")
31
+ urls.append(str(url)) # Convert to string for simple model
32
+ try:
33
+ scraper = ScrapeWebsiteTool(website_url=url)
34
+ content = scraper.run()
35
+
36
+ if len(content) > 1000:
37
+ content = content[:1000] + "... [Content truncated]"
38
+
39
+ results.append(f"Content: {content}")
40
+ results.append("-" * 30)
41
+
42
+ except Exception as scrape_error:
43
+ results.append(f"Error scraping content: {str(scrape_error)}")
44
+ results.append("-" * 30)
45
+
46
+ return SearchResponse(
47
+ urls=urls,
48
+ content="\n".join(results)
49
+ )
50
+
51
+ @tool("Website Search Tool")
52
+ def website_search_tool(query: str) -> str:
53
+ """Search the web for information on a given topic"""
54
+ # return DuckDuckGoSearchRun().invoke(question)
55
+ search_results = list(search(query, num_results=2))
56
+ results, urls = [], []
57
+ results.append(f"Google Search Results for: '{query}'")
58
+ results.append("=" * 50)
59
+
60
+ for i, url in enumerate(search_results, 1):
61
+ results.append(f"\n{i}. URL: {url}")
62
+ urls.append(f"{url}")
63
+ try:
64
+ # Use CrewAI's ScrapeWebsiteTool to get content
65
+ scraper = ScrapeWebsiteTool(website_url=url)
66
+ content = scraper.run()
67
+
68
+ # Limit content length for readability
69
+ if len(content) > 1000:
70
+ content = content[:1000] + "... [Content truncated]"
71
+
72
+ results.append(f"Content: {content}")
73
+ results.append("-" * 30)
74
+
75
+ except Exception as scrape_error:
76
+ results.append(f"Error scraping content: {str(scrape_error)}")
77
+ results.append("-" * 30)
78
+
79
+ return "\n".join(results)
routers/slack.py CHANGED
@@ -3,7 +3,7 @@ import json
3
  import logging
4
  import asyncio
5
  from concurrent.futures import ThreadPoolExecutor
6
- from crew.crew import SlackCrew
7
  from slack.utils import verify_slack_signature
8
  from slack.workflows import send_to_workflow
9
 
@@ -25,19 +25,27 @@ async def process_with_crew_async(text: str, user: str, channel: str):
25
 
26
  # Run the crew kickoff in a thread pool since it's likely CPU-bound
27
  loop = asyncio.get_event_loop()
28
- crew_response = await loop.run_in_executor(
29
  executor,
30
  lambda: crew.kickoff(inputs={"question": text})
31
  )
32
 
33
- logger.info(f"AI processed: {text[:50]}... -> Response length: {len(str(crew_response))}")
34
-
35
- # Send the AI response to workflow
36
  workflow_data = {
37
  "user": user,
38
  "query": text,
39
- "message": str(crew_response),
40
  }
 
 
 
 
 
 
 
 
 
41
 
42
  # Send to workflow asynchronously
43
  await send_to_workflow(workflow_data)
@@ -107,16 +115,16 @@ async def handle_slack(request: Request, background_tasks: BackgroundTasks):
107
  # Add the crew processing to background tasks
108
  background_tasks.add_task(process_with_crew_async, text, user, channel)
109
 
110
- # Optionally send immediate acknowledgment
111
- ack_data = {
112
- "user": user,
113
- "query": text,
114
- "message": "Processing your request... I'll get back to you shortly!",
115
- }
116
- await send_to_workflow(ack_data)
117
 
118
  # Return immediately
119
- return {"status": "ok", "message": "Processing in background"}
120
 
121
  except HTTPException:
122
  raise
 
3
  import logging
4
  import asyncio
5
  from concurrent.futures import ThreadPoolExecutor
6
+ from crew.crew import SlackCrew, ResearcherResponse, SlackReporterResponse
7
  from slack.utils import verify_slack_signature
8
  from slack.workflows import send_to_workflow
9
 
 
25
 
26
  # Run the crew kickoff in a thread pool since it's likely CPU-bound
27
  loop = asyncio.get_event_loop()
28
+ crew_output = await loop.run_in_executor(
29
  executor,
30
  lambda: crew.kickoff(inputs={"question": text})
31
  )
32
 
33
+ logger.info(f"AI processed: {text[:50]}... -> Response length: {len(str(crew_output))}")
34
+
 
35
  workflow_data = {
36
  "user": user,
37
  "query": text,
38
+ "message": str(crew_output),
39
  }
40
+
41
+ for task_output in crew_output.tasks_output:
42
+ print(task_output.name)
43
+ if isinstance(task_output.pydantic, ResearcherResponse):
44
+ logger.info(f"URLs Used: {task_output.pydantic.urls}")
45
+ workflow_data["urls"] = task_output.pydantic.urls
46
+ elif isinstance(task_output.pydantic, SlackReporterResponse):
47
+ logger.info(f"Slack Reporter Response: {task_output.pydantic.message}")
48
+ workflow_data["message"] = task_output.pydantic.message
49
 
50
  # Send to workflow asynchronously
51
  await send_to_workflow(workflow_data)
 
115
  # Add the crew processing to background tasks
116
  background_tasks.add_task(process_with_crew_async, text, user, channel)
117
 
118
+ # # Optionally send immediate acknowledgment
119
+ # ack_data = {
120
+ # "user": user,
121
+ # "query": text,
122
+ # "message": "Processing your request... I'll get back to you shortly!",
123
+ # }
124
+ # await send_to_workflow(ack_data)
125
 
126
  # Return immediately
127
+ return "Processing your request... I'll get back to you shortly!:rocket:"
128
 
129
  except HTTPException:
130
  raise