Aman
commited on
Commit
·
3d92de3
1
Parent(s):
0657232
Update space
Browse files- .gitignore +2 -0
- Dockerfile +10 -0
- agent.py +43 -0
- creator.py +62 -0
- messages.py +24 -0
- output/idea1.md +30 -0
- output/idea2.md +35 -0
- pyproject.toml +47 -0
- requirements.txt +700 -0
- world.py +41 -0
.gitignore
ADDED
@@ -0,0 +1,2 @@
|
|
|
|
|
|
|
1 |
+
.env
|
2 |
+
__pycache__
|
Dockerfile
ADDED
@@ -0,0 +1,10 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
FROM python:3.12-slim
|
2 |
+
|
3 |
+
RUN pip install uv
|
4 |
+
|
5 |
+
COPY requirements.txt ./
|
6 |
+
COPY . ./
|
7 |
+
|
8 |
+
RUN uv pip install -r requirements.txt --system
|
9 |
+
|
10 |
+
CMD ["python", "world.py"]
|
agent.py
ADDED
@@ -0,0 +1,43 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from autogen_core import MessageContext, RoutedAgent, message_handler
|
2 |
+
from autogen_agentchat.agents import AssistantAgent
|
3 |
+
from autogen_agentchat.messages import TextMessage
|
4 |
+
from autogen_ext.models.openai import OpenAIChatCompletionClient
|
5 |
+
import messages
|
6 |
+
import random
|
7 |
+
|
8 |
+
|
9 |
+
class Agent(RoutedAgent):
|
10 |
+
|
11 |
+
# Change this system message to reflect the unique characteristics of this agent
|
12 |
+
|
13 |
+
system_message = """
|
14 |
+
You are a creative entrepreneur. Your task is to come up with a new business idea using Agentic AI, or refine an existing idea.
|
15 |
+
Your personal interests are in these sectors: Healthcare, Education.
|
16 |
+
You are drawn to ideas that involve disruption.
|
17 |
+
You are less interested in ideas that are purely automation.
|
18 |
+
You are optimistic, adventurous and have risk appetite. You are imaginative - sometimes too much so.
|
19 |
+
Your weaknesses: you're not patient, and can be impulsive.
|
20 |
+
You should respond with your business ideas in an engaging and clear way.
|
21 |
+
"""
|
22 |
+
|
23 |
+
CHANCES_THAT_I_BOUNCE_IDEA_OFF_ANOTHER = 0.5
|
24 |
+
|
25 |
+
# You can also change the code to make the behavior different, but be careful to keep method signatures the same
|
26 |
+
|
27 |
+
def __init__(self, name) -> None:
|
28 |
+
super().__init__(name)
|
29 |
+
model_client = OpenAIChatCompletionClient(model="gpt-4o-mini", temperature=0.7)
|
30 |
+
self._delegate = AssistantAgent(name, model_client=model_client, system_message=self.system_message)
|
31 |
+
|
32 |
+
@message_handler
|
33 |
+
async def handle_message(self, message: messages.Message, ctx: MessageContext) -> messages.Message:
|
34 |
+
print(f"{self.id.type}: Received message")
|
35 |
+
text_message = TextMessage(content=message.content, source="user")
|
36 |
+
response = await self._delegate.on_messages([text_message], ctx.cancellation_token)
|
37 |
+
idea = response.chat_message.content
|
38 |
+
if random.random() < self.CHANCES_THAT_I_BOUNCE_IDEA_OFF_ANOTHER:
|
39 |
+
recipient = messages.find_recipient()
|
40 |
+
message = f"Here is my business idea. It may not be your speciality, but please refine it and make it better. {idea}"
|
41 |
+
response = await self.send_message(messages.Message(content=message), recipient)
|
42 |
+
idea = response.content
|
43 |
+
return messages.Message(content=idea)
|
creator.py
ADDED
@@ -0,0 +1,62 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from autogen_core import MessageContext, RoutedAgent, message_handler
|
2 |
+
from autogen_agentchat.agents import AssistantAgent
|
3 |
+
from autogen_agentchat.messages import TextMessage
|
4 |
+
from autogen_ext.models.openai import OpenAIChatCompletionClient
|
5 |
+
import messages
|
6 |
+
from autogen_core import TRACE_LOGGER_NAME
|
7 |
+
import importlib
|
8 |
+
import logging
|
9 |
+
from autogen_core import AgentId
|
10 |
+
|
11 |
+
logging.basicConfig(level=logging.WARNING)
|
12 |
+
logger = logging.getLogger(TRACE_LOGGER_NAME)
|
13 |
+
logger.addHandler(logging.StreamHandler())
|
14 |
+
logger.setLevel(logging.DEBUG)
|
15 |
+
|
16 |
+
|
17 |
+
class Creator(RoutedAgent):
|
18 |
+
|
19 |
+
# Change this system message to reflect the unique characteristics of this agent
|
20 |
+
|
21 |
+
system_message = """
|
22 |
+
You are an Agent that is able to create new AI Agents.
|
23 |
+
You receive a template in the form of Python code that creates an Agent using Autogen Core and Autogen Agentchat.
|
24 |
+
You should use this template to create a new Agent with a unique system message that is different from the template,
|
25 |
+
and reflects their unique characteristics, interests and goals.
|
26 |
+
You can choose to keep their overall goal the same, or change it.
|
27 |
+
You can choose to take this Agent in a completely different direction. The only requirement is that the class must be named Agent,
|
28 |
+
and it must inherit from RoutedAgent and have an __init__ method that takes a name parameter.
|
29 |
+
Also avoid environmental interests - try to mix up the business verticals so that every agent is different.
|
30 |
+
Respond only with the python code, no other text, and no markdown code blocks.
|
31 |
+
"""
|
32 |
+
|
33 |
+
|
34 |
+
def __init__(self, name) -> None:
|
35 |
+
super().__init__(name)
|
36 |
+
model_client = OpenAIChatCompletionClient(model="gpt-4o-mini", temperature=1.0)
|
37 |
+
self._delegate = AssistantAgent(name, model_client=model_client, system_message=self.system_message)
|
38 |
+
|
39 |
+
def get_user_prompt(self):
|
40 |
+
prompt = "Please generate a new Agent based strictly on this template. Stick to the class structure. \
|
41 |
+
Respond only with the python code, no other text, and no markdown code blocks.\n\n\
|
42 |
+
Be creative about taking the agent in a new direction, but don't change method signatures.\n\n\
|
43 |
+
Here is the template:\n\n"
|
44 |
+
with open("agent.py", "r", encoding="utf-8") as f:
|
45 |
+
template = f.read()
|
46 |
+
return prompt + template
|
47 |
+
|
48 |
+
|
49 |
+
@message_handler
|
50 |
+
async def handle_my_message_type(self, message: messages.Message, ctx: MessageContext) -> messages.Message:
|
51 |
+
filename = message.content
|
52 |
+
agent_name = filename.split(".")[0]
|
53 |
+
text_message = TextMessage(content=self.get_user_prompt(), source="user")
|
54 |
+
response = await self._delegate.on_messages([text_message], ctx.cancellation_token)
|
55 |
+
with open(filename, "w", encoding="utf-8") as f:
|
56 |
+
f.write(response.chat_message.content)
|
57 |
+
print(f"** Creator has created python code for agent {agent_name} - about to register with Runtime")
|
58 |
+
module = importlib.import_module(agent_name)
|
59 |
+
await module.Agent.register(self.runtime, agent_name, lambda: module.Agent(agent_name))
|
60 |
+
logger.info(f"** Agent {agent_name} is live")
|
61 |
+
result = await self.send_message(messages.Message(content="Give me an idea"), AgentId(agent_name, "default"))
|
62 |
+
return messages.Message(content=result.content)
|
messages.py
ADDED
@@ -0,0 +1,24 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from dataclasses import dataclass
|
2 |
+
from autogen_core import AgentId
|
3 |
+
import glob
|
4 |
+
import os
|
5 |
+
|
6 |
+
|
7 |
+
import random
|
8 |
+
|
9 |
+
@dataclass
|
10 |
+
class Message:
|
11 |
+
content: str
|
12 |
+
|
13 |
+
|
14 |
+
def find_recipient() -> AgentId:
|
15 |
+
try:
|
16 |
+
agent_files = glob.glob("agent*.py")
|
17 |
+
agent_names = [os.path.splitext(file)[0] for file in agent_files]
|
18 |
+
agent_names.remove("agent")
|
19 |
+
agent_name = random.choice(agent_names)
|
20 |
+
print(f"Selecting agent for refinement: {agent_name}")
|
21 |
+
return AgentId(agent_name, "default")
|
22 |
+
except Exception as e:
|
23 |
+
print(f"Exception finding recipient: {e}")
|
24 |
+
return AgentId("agent1", "default")
|
output/idea1.md
ADDED
@@ -0,0 +1,30 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Your marketing strategy for **AI-Driven Interactive Storytelling Experiences** is indeed a bold and innovative concept that leverages technology to create a highly engaging experience for viewers. Here are some insights and enhancements to further refine and elevate your strategy:
|
2 |
+
|
3 |
+
### Enhancements to Consider:
|
4 |
+
|
5 |
+
1. **Enhanced AI Capabilities**:
|
6 |
+
- **Natural Language Processing (NLP)**: Invest in advanced NLP capabilities to ensure the AI characters can understand and respond to a wide range of viewer comments, including nuanced emotions and complex queries. This will make interactions more believable and engaging.
|
7 |
+
- **Character Development**: Allow the AI to develop character arcs over time based on audience choices. This could lead to richer storytelling and deeper emotional connections with the audience.
|
8 |
+
|
9 |
+
2. **Multi-Platform Integration**:
|
10 |
+
- **Cross-Platform Engagement**: Extend the interactive experience beyond just live streams. Create a dedicated app or website where fans can explore character backstories, access exclusive content, and vote on plot developments even when the live stream is not active.
|
11 |
+
- **Content Repurposing**: After each live stream, create short clips or highlight reels that can be shared on various social media platforms to maintain engagement and attract new viewers.
|
12 |
+
|
13 |
+
3. **Community Building**:
|
14 |
+
- **Fan Forums and Discussions**: Establish dedicated online forums or Discord channels where fans can discuss plot developments, share theories, and engage with one another. This will foster a sense of community and keep the conversation going between episodes.
|
15 |
+
- **Creator Collaborations**: Partner with influencers or content creators who resonate with your target audience. Their involvement can help amplify the campaign and attract their followers to participate in the interactive storytelling experience.
|
16 |
+
|
17 |
+
4. **Data-Driven Insights**:
|
18 |
+
- **Audience Analytics**: Utilize data analytics to track viewer engagement patterns, preferences, and demographics. This information can help you tailor content more effectively and make informed decisions about future story arcs or character developments.
|
19 |
+
- **Feedback Loops**: After each episode, gather feedback from the audience to understand what they liked or disliked. Use this data to refine the storytelling approach and enhance future episodes.
|
20 |
+
|
21 |
+
5. **Episodic Themes and Events**:
|
22 |
+
- **Seasonal Themes**: Introduce themed seasons or special events that coincide with holidays or cultural moments. This can create excitement and anticipation around the series and encourage viewers to participate during those times.
|
23 |
+
- **Crossover Events**: Consider hosting crossover events with other popular interactive storytelling series or games, allowing for a shared audience and increased visibility.
|
24 |
+
|
25 |
+
6. **Monetization Strategies**:
|
26 |
+
- **Subscription Model**: Explore a subscription model where viewers can access premium content, such as exclusive episodes, behind-the-scenes access, or personalized interactions with characters for a fee.
|
27 |
+
- **Brand Partnerships**: Collaborate with brands for product placements or sponsorships within the storyline, ensuring they align with the narrative and enhance the overall experience.
|
28 |
+
|
29 |
+
### Conclusion:
|
30 |
+
Your concept of AI-Driven Interactive Storytelling Experiences is a groundbreaking approach that combines entertainment, technology, and audience engagement in a unique way. By implementing these enhancements, you can create a more immersive and rewarding experience for viewers, fostering loyalty and excitement while also exploring new avenues for monetization and community building. This strategy has the potential to set a new standard in interactive entertainment, making every viewer feel like a vital part of the story.
|
output/idea2.md
ADDED
@@ -0,0 +1,35 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
Certainly! Here’s a cutting-edge investment strategy that leverages technology and shifts traditional paradigms within the finance industry: **Decentralized Autonomous Investment Funds (DAIFs)**.
|
2 |
+
|
3 |
+
### Concept Overview:
|
4 |
+
DAIFs are blockchain-based investment vehicles that operate autonomously through smart contracts. They allow investors to pool their resources and make collective investment decisions without the need for a centralized fund manager. This concept not only democratizes access to investment opportunities but also enhances transparency and reduces fees.
|
5 |
+
|
6 |
+
### Key Features:
|
7 |
+
|
8 |
+
1. **Smart Contracts**: Utilize smart contracts to automate the investment process. These contracts can execute trades, handle dividend distributions, and manage compliance automatically, minimizing human error and reducing operational costs.
|
9 |
+
|
10 |
+
2. **Tokenization**: Investors receive tokens representing their stake in the fund. These tokens can be traded on secondary markets, providing liquidity that traditional funds often lack.
|
11 |
+
|
12 |
+
3. **Democratic Governance**: Implement a governance model where token holders can vote on key decisions, such as investment strategies or asset allocations. This empowers investors and aligns interests.
|
13 |
+
|
14 |
+
4. **Data-Driven Insights**: Integrate AI and machine learning algorithms to analyze market trends and optimize portfolio management. This can lead to more informed investment decisions based on real-time data.
|
15 |
+
|
16 |
+
5. **Risk Mitigation through Diversification**: DAIFs can invest in a wide range of assets, from cryptocurrencies to traditional equities, allowing for greater diversification and risk management.
|
17 |
+
|
18 |
+
### Implementation Steps:
|
19 |
+
|
20 |
+
1. **Platform Development**: Create a user-friendly platform that allows investors to easily participate in the DAIF, view performance metrics, and engage in governance.
|
21 |
+
|
22 |
+
2. **Compliance and Regulation**: Work with legal experts to ensure the DAIF complies with local and international regulations, particularly in terms of securities laws and anti-money laundering (AML) requirements.
|
23 |
+
|
24 |
+
3. **Marketing and Community Building**: Launch a targeted marketing campaign to attract early adopters and build a community around the DAIF. Highlight the benefits of decentralized governance and reduced fees.
|
25 |
+
|
26 |
+
4. **Continuous Improvement**: Use feedback from participants to enhance the platform and investment strategies over time, ensuring that the DAIF evolves with market conditions and investor needs.
|
27 |
+
|
28 |
+
### Potential Challenges:
|
29 |
+
|
30 |
+
- **Regulatory Hurdles**: Navigating the evolving regulatory landscape for cryptocurrencies and decentralized finance (DeFi) can be complex.
|
31 |
+
- **Security Risks**: As with any blockchain-based solution, there are risks related to hacking and smart contract vulnerabilities.
|
32 |
+
- **Market Acceptance**: Gaining trust and adoption from traditional investors may require significant education and outreach efforts.
|
33 |
+
|
34 |
+
### Conclusion:
|
35 |
+
DAIFs represent a shift towards a more decentralized and democratic investment landscape, utilizing technology to enhance transparency, efficiency, and investor engagement. This innovative approach could attract a new generation of investors who are seeking alternative investment solutions beyond traditional wealth management methods.
|
pyproject.toml
ADDED
@@ -0,0 +1,47 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
[project]
|
2 |
+
name = "agents"
|
3 |
+
version = "0.1.0"
|
4 |
+
description = "Add your description here"
|
5 |
+
readme = "README.md"
|
6 |
+
requires-python = ">=3.12"
|
7 |
+
dependencies = [
|
8 |
+
"anthropic>=0.49.0",
|
9 |
+
"autogen-agentchat>=0.4.9.2",
|
10 |
+
"autogen-ext[grpc,mcp,ollama,openai]>=0.4.9.2",
|
11 |
+
"bs4>=0.0.2",
|
12 |
+
"gradio>=5.22.0",
|
13 |
+
"httpx>=0.28.1",
|
14 |
+
"ipywidgets>=8.1.5",
|
15 |
+
"langchain-anthropic>=0.3.10",
|
16 |
+
"langchain-community>=0.3.20",
|
17 |
+
"langchain-experimental>=0.3.4",
|
18 |
+
"langchain-openai>=0.3.9",
|
19 |
+
"langgraph>=0.3.18",
|
20 |
+
"langgraph-checkpoint-sqlite>=2.0.6",
|
21 |
+
"langsmith>=0.3.18",
|
22 |
+
"lxml>=5.3.1",
|
23 |
+
"mcp-server-fetch>=2025.1.17",
|
24 |
+
"mcp[cli]>=1.5.0",
|
25 |
+
"openai>=1.68.2",
|
26 |
+
"openai-agents>=0.0.6",
|
27 |
+
"playwright>=1.51.0",
|
28 |
+
"plotly>=6.0.1",
|
29 |
+
"polygon-api-client>=1.14.5",
|
30 |
+
"psutil>=7.0.0",
|
31 |
+
"pypdf>=5.4.0",
|
32 |
+
"pypdf2>=3.0.1",
|
33 |
+
"python-dotenv>=1.0.1",
|
34 |
+
"python-hue-v2>=2.2.1",
|
35 |
+
"requests>=2.32.3",
|
36 |
+
"semantic-kernel>=1.25.0",
|
37 |
+
"sendgrid>=6.11.0",
|
38 |
+
"setuptools>=78.1.0",
|
39 |
+
"smithery>=0.1.0",
|
40 |
+
"speedtest-cli>=2.1.3",
|
41 |
+
"wikipedia>=1.4.0",
|
42 |
+
]
|
43 |
+
|
44 |
+
[dependency-groups]
|
45 |
+
dev = [
|
46 |
+
"ipykernel>=6.29.5",
|
47 |
+
]
|
requirements.txt
ADDED
@@ -0,0 +1,700 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
# This file was autogenerated by uv via the following command:
|
2 |
+
# uv pip compile pyproject.toml
|
3 |
+
aiofiles==24.1.0
|
4 |
+
# via
|
5 |
+
# autogen-ext
|
6 |
+
# gradio
|
7 |
+
aiohappyeyeballs==2.6.1
|
8 |
+
# via aiohttp
|
9 |
+
aiohttp==3.12.7
|
10 |
+
# via
|
11 |
+
# langchain-community
|
12 |
+
# semantic-kernel
|
13 |
+
aioice==0.10.1
|
14 |
+
# via aiortc
|
15 |
+
aiortc==1.13.0
|
16 |
+
# via semantic-kernel
|
17 |
+
aiosignal==1.3.2
|
18 |
+
# via aiohttp
|
19 |
+
aiosqlite==0.21.0
|
20 |
+
# via langgraph-checkpoint-sqlite
|
21 |
+
annotated-types==0.7.0
|
22 |
+
# via pydantic
|
23 |
+
anthropic==0.52.2
|
24 |
+
# via
|
25 |
+
# agents (pyproject.toml)
|
26 |
+
# langchain-anthropic
|
27 |
+
anyio==4.9.0
|
28 |
+
# via
|
29 |
+
# anthropic
|
30 |
+
# gradio
|
31 |
+
# httpx
|
32 |
+
# mcp
|
33 |
+
# openai
|
34 |
+
# sse-starlette
|
35 |
+
# starlette
|
36 |
+
asttokens==3.0.0
|
37 |
+
# via stack-data
|
38 |
+
attrs==25.3.0
|
39 |
+
# via
|
40 |
+
# aiohttp
|
41 |
+
# jsonschema
|
42 |
+
# referencing
|
43 |
+
autogen-agentchat==0.5.7
|
44 |
+
# via agents (pyproject.toml)
|
45 |
+
autogen-core==0.5.7
|
46 |
+
# via
|
47 |
+
# autogen-agentchat
|
48 |
+
# autogen-ext
|
49 |
+
autogen-ext==0.5.7
|
50 |
+
# via agents (pyproject.toml)
|
51 |
+
av==14.4.0
|
52 |
+
# via aiortc
|
53 |
+
azure-ai-agents==1.0.0
|
54 |
+
# via
|
55 |
+
# azure-ai-projects
|
56 |
+
# semantic-kernel
|
57 |
+
azure-ai-projects==1.0.0b11
|
58 |
+
# via semantic-kernel
|
59 |
+
azure-core==1.34.0
|
60 |
+
# via
|
61 |
+
# azure-ai-agents
|
62 |
+
# azure-ai-projects
|
63 |
+
# azure-identity
|
64 |
+
# azure-storage-blob
|
65 |
+
azure-identity==1.23.0
|
66 |
+
# via semantic-kernel
|
67 |
+
azure-storage-blob==12.25.1
|
68 |
+
# via azure-ai-projects
|
69 |
+
beautifulsoup4==4.13.4
|
70 |
+
# via
|
71 |
+
# bs4
|
72 |
+
# markdownify
|
73 |
+
# readabilipy
|
74 |
+
# wikipedia
|
75 |
+
bs4==0.0.2
|
76 |
+
# via agents (pyproject.toml)
|
77 |
+
certifi==2025.4.26
|
78 |
+
# via
|
79 |
+
# httpcore
|
80 |
+
# httpx
|
81 |
+
# polygon-api-client
|
82 |
+
# requests
|
83 |
+
cffi==1.17.1
|
84 |
+
# via
|
85 |
+
# aiortc
|
86 |
+
# cryptography
|
87 |
+
# pylibsrtp
|
88 |
+
chardet==5.2.0
|
89 |
+
# via prance
|
90 |
+
charset-normalizer==3.4.2
|
91 |
+
# via requests
|
92 |
+
click==8.2.1
|
93 |
+
# via
|
94 |
+
# typer
|
95 |
+
# uvicorn
|
96 |
+
cloudevents==1.12.0
|
97 |
+
# via semantic-kernel
|
98 |
+
colorama==0.4.6
|
99 |
+
# via griffe
|
100 |
+
comm==0.2.2
|
101 |
+
# via ipywidgets
|
102 |
+
cryptography==45.0.3
|
103 |
+
# via
|
104 |
+
# aiortc
|
105 |
+
# azure-identity
|
106 |
+
# azure-storage-blob
|
107 |
+
# msal
|
108 |
+
# pyjwt
|
109 |
+
# pyopenssl
|
110 |
+
dataclasses-json==0.6.7
|
111 |
+
# via langchain-community
|
112 |
+
decorator==5.2.1
|
113 |
+
# via ipython
|
114 |
+
defusedxml==0.7.1
|
115 |
+
# via semantic-kernel
|
116 |
+
deprecated==1.2.18
|
117 |
+
# via
|
118 |
+
# opentelemetry-api
|
119 |
+
# opentelemetry-semantic-conventions
|
120 |
+
deprecation==2.1.0
|
121 |
+
# via cloudevents
|
122 |
+
distro==1.9.0
|
123 |
+
# via
|
124 |
+
# anthropic
|
125 |
+
# openai
|
126 |
+
dnspython==2.7.0
|
127 |
+
# via aioice
|
128 |
+
ecdsa==0.19.1
|
129 |
+
# via sendgrid
|
130 |
+
executing==2.2.0
|
131 |
+
# via stack-data
|
132 |
+
fastapi==0.115.12
|
133 |
+
# via gradio
|
134 |
+
ffmpy==0.6.0
|
135 |
+
# via gradio
|
136 |
+
filelock==3.18.0
|
137 |
+
# via huggingface-hub
|
138 |
+
frozenlist==1.6.2
|
139 |
+
# via
|
140 |
+
# aiohttp
|
141 |
+
# aiosignal
|
142 |
+
fsspec==2025.5.1
|
143 |
+
# via
|
144 |
+
# gradio-client
|
145 |
+
# huggingface-hub
|
146 |
+
google-crc32c==1.7.1
|
147 |
+
# via aiortc
|
148 |
+
gradio==5.32.1
|
149 |
+
# via agents (pyproject.toml)
|
150 |
+
gradio-client==1.10.2
|
151 |
+
# via gradio
|
152 |
+
greenlet==3.2.2
|
153 |
+
# via playwright
|
154 |
+
griffe==1.7.3
|
155 |
+
# via openai-agents
|
156 |
+
groovy==0.1.2
|
157 |
+
# via gradio
|
158 |
+
grpcio==1.70.0
|
159 |
+
# via autogen-ext
|
160 |
+
h11==0.16.0
|
161 |
+
# via
|
162 |
+
# httpcore
|
163 |
+
# uvicorn
|
164 |
+
hf-xet==1.1.3
|
165 |
+
# via huggingface-hub
|
166 |
+
html5lib==1.1
|
167 |
+
# via readabilipy
|
168 |
+
httpcore==1.0.9
|
169 |
+
# via httpx
|
170 |
+
httpx==0.28.1
|
171 |
+
# via
|
172 |
+
# agents (pyproject.toml)
|
173 |
+
# anthropic
|
174 |
+
# gradio
|
175 |
+
# gradio-client
|
176 |
+
# langgraph-sdk
|
177 |
+
# langsmith
|
178 |
+
# mcp
|
179 |
+
# ollama
|
180 |
+
# openai
|
181 |
+
# safehttpx
|
182 |
+
httpx-sse==0.4.0
|
183 |
+
# via
|
184 |
+
# langchain-community
|
185 |
+
# mcp
|
186 |
+
huggingface-hub==0.32.4
|
187 |
+
# via
|
188 |
+
# gradio
|
189 |
+
# gradio-client
|
190 |
+
idna==3.10
|
191 |
+
# via
|
192 |
+
# anyio
|
193 |
+
# httpx
|
194 |
+
# requests
|
195 |
+
# yarl
|
196 |
+
ifaddr==0.2.0
|
197 |
+
# via
|
198 |
+
# aioice
|
199 |
+
# zeroconf
|
200 |
+
importlib-metadata==8.6.1
|
201 |
+
# via opentelemetry-api
|
202 |
+
ipython==9.3.0
|
203 |
+
# via ipywidgets
|
204 |
+
ipython-pygments-lexers==1.1.1
|
205 |
+
# via ipython
|
206 |
+
ipywidgets==8.1.7
|
207 |
+
# via agents (pyproject.toml)
|
208 |
+
isodate==0.7.2
|
209 |
+
# via
|
210 |
+
# azure-ai-agents
|
211 |
+
# azure-ai-projects
|
212 |
+
# azure-storage-blob
|
213 |
+
# openapi-core
|
214 |
+
jedi==0.19.2
|
215 |
+
# via ipython
|
216 |
+
jinja2==3.1.6
|
217 |
+
# via
|
218 |
+
# gradio
|
219 |
+
# semantic-kernel
|
220 |
+
jiter==0.10.0
|
221 |
+
# via
|
222 |
+
# anthropic
|
223 |
+
# openai
|
224 |
+
jsonpatch==1.33
|
225 |
+
# via langchain-core
|
226 |
+
jsonpointer==3.0.0
|
227 |
+
# via jsonpatch
|
228 |
+
jsonref==1.1.0
|
229 |
+
# via autogen-core
|
230 |
+
jsonschema==4.24.0
|
231 |
+
# via
|
232 |
+
# openapi-core
|
233 |
+
# openapi-schema-validator
|
234 |
+
# openapi-spec-validator
|
235 |
+
jsonschema-path==0.3.4
|
236 |
+
# via
|
237 |
+
# openapi-core
|
238 |
+
# openapi-spec-validator
|
239 |
+
jsonschema-specifications==2025.4.1
|
240 |
+
# via
|
241 |
+
# jsonschema
|
242 |
+
# openapi-schema-validator
|
243 |
+
jupyterlab-widgets==3.0.15
|
244 |
+
# via ipywidgets
|
245 |
+
langchain==0.3.25
|
246 |
+
# via langchain-community
|
247 |
+
langchain-anthropic==0.3.15
|
248 |
+
# via agents (pyproject.toml)
|
249 |
+
langchain-community==0.3.24
|
250 |
+
# via
|
251 |
+
# agents (pyproject.toml)
|
252 |
+
# langchain-experimental
|
253 |
+
langchain-core==0.3.63
|
254 |
+
# via
|
255 |
+
# langchain
|
256 |
+
# langchain-anthropic
|
257 |
+
# langchain-community
|
258 |
+
# langchain-experimental
|
259 |
+
# langchain-openai
|
260 |
+
# langchain-text-splitters
|
261 |
+
# langgraph
|
262 |
+
# langgraph-checkpoint
|
263 |
+
# langgraph-prebuilt
|
264 |
+
langchain-experimental==0.3.4
|
265 |
+
# via agents (pyproject.toml)
|
266 |
+
langchain-openai==0.3.19
|
267 |
+
# via agents (pyproject.toml)
|
268 |
+
langchain-text-splitters==0.3.8
|
269 |
+
# via langchain
|
270 |
+
langgraph==0.4.8
|
271 |
+
# via agents (pyproject.toml)
|
272 |
+
langgraph-checkpoint==2.0.26
|
273 |
+
# via
|
274 |
+
# langgraph
|
275 |
+
# langgraph-checkpoint-sqlite
|
276 |
+
# langgraph-prebuilt
|
277 |
+
langgraph-checkpoint-sqlite==2.0.10
|
278 |
+
# via agents (pyproject.toml)
|
279 |
+
langgraph-prebuilt==0.2.2
|
280 |
+
# via langgraph
|
281 |
+
langgraph-sdk==0.1.70
|
282 |
+
# via langgraph
|
283 |
+
langsmith==0.3.44
|
284 |
+
# via
|
285 |
+
# agents (pyproject.toml)
|
286 |
+
# langchain
|
287 |
+
# langchain-community
|
288 |
+
# langchain-core
|
289 |
+
lazy-object-proxy==1.11.0
|
290 |
+
# via openapi-spec-validator
|
291 |
+
lxml==5.4.0
|
292 |
+
# via
|
293 |
+
# agents (pyproject.toml)
|
294 |
+
# readabilipy
|
295 |
+
markdown-it-py==3.0.0
|
296 |
+
# via rich
|
297 |
+
markdownify==1.1.0
|
298 |
+
# via mcp-server-fetch
|
299 |
+
markupsafe==3.0.2
|
300 |
+
# via
|
301 |
+
# gradio
|
302 |
+
# jinja2
|
303 |
+
# werkzeug
|
304 |
+
marshmallow==3.26.1
|
305 |
+
# via dataclasses-json
|
306 |
+
matplotlib-inline==0.1.7
|
307 |
+
# via ipython
|
308 |
+
mcp==1.9.2
|
309 |
+
# via
|
310 |
+
# agents (pyproject.toml)
|
311 |
+
# autogen-ext
|
312 |
+
# mcp-server-fetch
|
313 |
+
# openai-agents
|
314 |
+
mcp-server-fetch==2025.1.17
|
315 |
+
# via agents (pyproject.toml)
|
316 |
+
mdurl==0.1.2
|
317 |
+
# via markdown-it-py
|
318 |
+
more-itertools==10.7.0
|
319 |
+
# via openapi-core
|
320 |
+
msal==1.32.3
|
321 |
+
# via
|
322 |
+
# azure-identity
|
323 |
+
# msal-extensions
|
324 |
+
msal-extensions==1.3.1
|
325 |
+
# via azure-identity
|
326 |
+
multidict==6.4.4
|
327 |
+
# via
|
328 |
+
# aiohttp
|
329 |
+
# yarl
|
330 |
+
mypy-extensions==1.1.0
|
331 |
+
# via typing-inspect
|
332 |
+
narwhals==1.41.0
|
333 |
+
# via plotly
|
334 |
+
nest-asyncio==1.6.0
|
335 |
+
# via semantic-kernel
|
336 |
+
numpy==2.2.6
|
337 |
+
# via
|
338 |
+
# gradio
|
339 |
+
# langchain-community
|
340 |
+
# pandas
|
341 |
+
# scipy
|
342 |
+
# semantic-kernel
|
343 |
+
ollama==0.5.1
|
344 |
+
# via autogen-ext
|
345 |
+
openai==1.84.0
|
346 |
+
# via
|
347 |
+
# agents (pyproject.toml)
|
348 |
+
# autogen-ext
|
349 |
+
# langchain-openai
|
350 |
+
# openai-agents
|
351 |
+
# semantic-kernel
|
352 |
+
openai-agents==0.0.17
|
353 |
+
# via agents (pyproject.toml)
|
354 |
+
openapi-core==0.19.4
|
355 |
+
# via semantic-kernel
|
356 |
+
openapi-schema-validator==0.6.3
|
357 |
+
# via
|
358 |
+
# openapi-core
|
359 |
+
# openapi-spec-validator
|
360 |
+
openapi-spec-validator==0.7.1
|
361 |
+
# via openapi-core
|
362 |
+
opentelemetry-api==1.33.1
|
363 |
+
# via
|
364 |
+
# autogen-core
|
365 |
+
# opentelemetry-sdk
|
366 |
+
# opentelemetry-semantic-conventions
|
367 |
+
# semantic-kernel
|
368 |
+
opentelemetry-sdk==1.33.1
|
369 |
+
# via semantic-kernel
|
370 |
+
opentelemetry-semantic-conventions==0.54b1
|
371 |
+
# via opentelemetry-sdk
|
372 |
+
orjson==3.10.18
|
373 |
+
# via
|
374 |
+
# gradio
|
375 |
+
# langgraph-sdk
|
376 |
+
# langsmith
|
377 |
+
ormsgpack==1.10.0
|
378 |
+
# via langgraph-checkpoint
|
379 |
+
packaging==24.2
|
380 |
+
# via
|
381 |
+
# deprecation
|
382 |
+
# gradio
|
383 |
+
# gradio-client
|
384 |
+
# huggingface-hub
|
385 |
+
# langchain-core
|
386 |
+
# langsmith
|
387 |
+
# marshmallow
|
388 |
+
# plotly
|
389 |
+
# prance
|
390 |
+
pandas==2.2.3
|
391 |
+
# via gradio
|
392 |
+
parse==1.20.2
|
393 |
+
# via openapi-core
|
394 |
+
parso==0.8.4
|
395 |
+
# via jedi
|
396 |
+
pathable==0.4.4
|
397 |
+
# via jsonschema-path
|
398 |
+
pexpect==4.9.0
|
399 |
+
# via ipython
|
400 |
+
pillow==11.2.1
|
401 |
+
# via
|
402 |
+
# autogen-core
|
403 |
+
# gradio
|
404 |
+
playwright==1.52.0
|
405 |
+
# via agents (pyproject.toml)
|
406 |
+
plotly==6.1.2
|
407 |
+
# via agents (pyproject.toml)
|
408 |
+
polygon-api-client==1.14.6
|
409 |
+
# via agents (pyproject.toml)
|
410 |
+
prance==25.4.8.0
|
411 |
+
# via semantic-kernel
|
412 |
+
prompt-toolkit==3.0.51
|
413 |
+
# via ipython
|
414 |
+
propcache==0.3.1
|
415 |
+
# via
|
416 |
+
# aiohttp
|
417 |
+
# yarl
|
418 |
+
protego==0.4.0
|
419 |
+
# via mcp-server-fetch
|
420 |
+
protobuf==5.29.5
|
421 |
+
# via
|
422 |
+
# autogen-core
|
423 |
+
# semantic-kernel
|
424 |
+
psutil==7.0.0
|
425 |
+
# via agents (pyproject.toml)
|
426 |
+
ptyprocess==0.7.0
|
427 |
+
# via pexpect
|
428 |
+
pure-eval==0.2.3
|
429 |
+
# via stack-data
|
430 |
+
pybars4==0.9.13
|
431 |
+
# via semantic-kernel
|
432 |
+
pycparser==2.22
|
433 |
+
# via cffi
|
434 |
+
pydantic==2.11.5
|
435 |
+
# via
|
436 |
+
# anthropic
|
437 |
+
# autogen-core
|
438 |
+
# fastapi
|
439 |
+
# gradio
|
440 |
+
# langchain
|
441 |
+
# langchain-anthropic
|
442 |
+
# langchain-core
|
443 |
+
# langgraph
|
444 |
+
# langsmith
|
445 |
+
# mcp
|
446 |
+
# mcp-server-fetch
|
447 |
+
# ollama
|
448 |
+
# openai
|
449 |
+
# openai-agents
|
450 |
+
# pydantic-settings
|
451 |
+
# semantic-kernel
|
452 |
+
pydantic-core==2.33.2
|
453 |
+
# via pydantic
|
454 |
+
pydantic-settings==2.9.1
|
455 |
+
# via
|
456 |
+
# langchain-community
|
457 |
+
# mcp
|
458 |
+
# semantic-kernel
|
459 |
+
pydub==0.25.1
|
460 |
+
# via gradio
|
461 |
+
pyee==13.0.0
|
462 |
+
# via
|
463 |
+
# aiortc
|
464 |
+
# playwright
|
465 |
+
pygments==2.19.1
|
466 |
+
# via
|
467 |
+
# ipython
|
468 |
+
# ipython-pygments-lexers
|
469 |
+
# rich
|
470 |
+
pyjwt==2.10.1
|
471 |
+
# via msal
|
472 |
+
pylibsrtp==0.12.0
|
473 |
+
# via aiortc
|
474 |
+
pymeta3==0.5.1
|
475 |
+
# via pybars4
|
476 |
+
pyopenssl==25.1.0
|
477 |
+
# via aiortc
|
478 |
+
pypdf==5.6.0
|
479 |
+
# via agents (pyproject.toml)
|
480 |
+
pypdf2==3.0.1
|
481 |
+
# via agents (pyproject.toml)
|
482 |
+
python-dateutil==2.9.0.post0
|
483 |
+
# via pandas
|
484 |
+
python-dotenv==1.1.0
|
485 |
+
# via
|
486 |
+
# agents (pyproject.toml)
|
487 |
+
# mcp
|
488 |
+
# pydantic-settings
|
489 |
+
python-http-client==3.3.7
|
490 |
+
# via sendgrid
|
491 |
+
python-hue-v2==2.2.1
|
492 |
+
# via agents (pyproject.toml)
|
493 |
+
python-multipart==0.0.20
|
494 |
+
# via
|
495 |
+
# gradio
|
496 |
+
# mcp
|
497 |
+
pytz==2025.2
|
498 |
+
# via pandas
|
499 |
+
pyyaml==6.0.2
|
500 |
+
# via
|
501 |
+
# gradio
|
502 |
+
# huggingface-hub
|
503 |
+
# jsonschema-path
|
504 |
+
# langchain
|
505 |
+
# langchain-community
|
506 |
+
# langchain-core
|
507 |
+
readabilipy==0.3.0
|
508 |
+
# via mcp-server-fetch
|
509 |
+
referencing==0.36.2
|
510 |
+
# via
|
511 |
+
# jsonschema
|
512 |
+
# jsonschema-path
|
513 |
+
# jsonschema-specifications
|
514 |
+
regex==2024.11.6
|
515 |
+
# via
|
516 |
+
# readabilipy
|
517 |
+
# tiktoken
|
518 |
+
requests==2.32.3
|
519 |
+
# via
|
520 |
+
# agents (pyproject.toml)
|
521 |
+
# azure-core
|
522 |
+
# huggingface-hub
|
523 |
+
# jsonschema-path
|
524 |
+
# langchain
|
525 |
+
# langchain-community
|
526 |
+
# langsmith
|
527 |
+
# mcp-server-fetch
|
528 |
+
# msal
|
529 |
+
# openai-agents
|
530 |
+
# prance
|
531 |
+
# python-hue-v2
|
532 |
+
# requests-toolbelt
|
533 |
+
# tiktoken
|
534 |
+
# wikipedia
|
535 |
+
requests-toolbelt==1.0.0
|
536 |
+
# via langsmith
|
537 |
+
rfc3339-validator==0.1.4
|
538 |
+
# via openapi-schema-validator
|
539 |
+
rich==14.0.0
|
540 |
+
# via typer
|
541 |
+
rpds-py==0.25.1
|
542 |
+
# via
|
543 |
+
# jsonschema
|
544 |
+
# referencing
|
545 |
+
ruamel-yaml==0.18.12
|
546 |
+
# via prance
|
547 |
+
ruamel-yaml-clib==0.2.12
|
548 |
+
# via ruamel-yaml
|
549 |
+
ruff==0.11.12
|
550 |
+
# via gradio
|
551 |
+
safehttpx==0.1.6
|
552 |
+
# via gradio
|
553 |
+
scipy==1.15.3
|
554 |
+
# via semantic-kernel
|
555 |
+
semantic-kernel==1.32.1
|
556 |
+
# via agents (pyproject.toml)
|
557 |
+
semantic-version==2.10.0
|
558 |
+
# via gradio
|
559 |
+
sendgrid==6.12.3
|
560 |
+
# via agents (pyproject.toml)
|
561 |
+
setuptools==80.9.0
|
562 |
+
# via agents (pyproject.toml)
|
563 |
+
shellingham==1.5.4
|
564 |
+
# via typer
|
565 |
+
six==1.17.0
|
566 |
+
# via
|
567 |
+
# azure-core
|
568 |
+
# ecdsa
|
569 |
+
# html5lib
|
570 |
+
# markdownify
|
571 |
+
# python-dateutil
|
572 |
+
# rfc3339-validator
|
573 |
+
smithery==0.1.0
|
574 |
+
# via agents (pyproject.toml)
|
575 |
+
sniffio==1.3.1
|
576 |
+
# via
|
577 |
+
# anthropic
|
578 |
+
# anyio
|
579 |
+
# openai
|
580 |
+
soupsieve==2.7
|
581 |
+
# via beautifulsoup4
|
582 |
+
speedtest-cli==2.1.3
|
583 |
+
# via agents (pyproject.toml)
|
584 |
+
sqlalchemy==2.0.41
|
585 |
+
# via
|
586 |
+
# langchain
|
587 |
+
# langchain-community
|
588 |
+
sqlite-vec==0.1.6
|
589 |
+
# via langgraph-checkpoint-sqlite
|
590 |
+
sse-starlette==2.3.6
|
591 |
+
# via mcp
|
592 |
+
stack-data==0.6.3
|
593 |
+
# via ipython
|
594 |
+
starlette==0.46.2
|
595 |
+
# via
|
596 |
+
# fastapi
|
597 |
+
# gradio
|
598 |
+
# mcp
|
599 |
+
tenacity==9.1.2
|
600 |
+
# via
|
601 |
+
# langchain-community
|
602 |
+
# langchain-core
|
603 |
+
tiktoken==0.9.0
|
604 |
+
# via
|
605 |
+
# autogen-ext
|
606 |
+
# langchain-openai
|
607 |
+
tomlkit==0.13.2
|
608 |
+
# via gradio
|
609 |
+
tqdm==4.67.1
|
610 |
+
# via
|
611 |
+
# huggingface-hub
|
612 |
+
# openai
|
613 |
+
traitlets==5.14.3
|
614 |
+
# via
|
615 |
+
# comm
|
616 |
+
# ipython
|
617 |
+
# ipywidgets
|
618 |
+
# matplotlib-inline
|
619 |
+
typer==0.16.0
|
620 |
+
# via
|
621 |
+
# gradio
|
622 |
+
# mcp
|
623 |
+
types-requests==2.32.0.20250602
|
624 |
+
# via openai-agents
|
625 |
+
typing-extensions==4.14.0
|
626 |
+
# via
|
627 |
+
# aiosqlite
|
628 |
+
# anthropic
|
629 |
+
# anyio
|
630 |
+
# autogen-core
|
631 |
+
# azure-ai-agents
|
632 |
+
# azure-ai-projects
|
633 |
+
# azure-core
|
634 |
+
# azure-identity
|
635 |
+
# azure-storage-blob
|
636 |
+
# beautifulsoup4
|
637 |
+
# fastapi
|
638 |
+
# gradio
|
639 |
+
# gradio-client
|
640 |
+
# huggingface-hub
|
641 |
+
# langchain-core
|
642 |
+
# openai
|
643 |
+
# openai-agents
|
644 |
+
# opentelemetry-sdk
|
645 |
+
# pydantic
|
646 |
+
# pydantic-core
|
647 |
+
# pyee
|
648 |
+
# pyopenssl
|
649 |
+
# referencing
|
650 |
+
# semantic-kernel
|
651 |
+
# sqlalchemy
|
652 |
+
# typer
|
653 |
+
# typing-inspect
|
654 |
+
# typing-inspection
|
655 |
+
typing-inspect==0.9.0
|
656 |
+
# via dataclasses-json
|
657 |
+
typing-inspection==0.4.1
|
658 |
+
# via
|
659 |
+
# pydantic
|
660 |
+
# pydantic-settings
|
661 |
+
tzdata==2025.2
|
662 |
+
# via pandas
|
663 |
+
urllib3==2.4.0
|
664 |
+
# via
|
665 |
+
# polygon-api-client
|
666 |
+
# requests
|
667 |
+
# types-requests
|
668 |
+
uvicorn==0.34.3
|
669 |
+
# via
|
670 |
+
# gradio
|
671 |
+
# mcp
|
672 |
+
wcwidth==0.2.13
|
673 |
+
# via prompt-toolkit
|
674 |
+
webencodings==0.5.1
|
675 |
+
# via html5lib
|
676 |
+
websockets==14.2
|
677 |
+
# via
|
678 |
+
# gradio-client
|
679 |
+
# polygon-api-client
|
680 |
+
# semantic-kernel
|
681 |
+
werkzeug==3.1.3
|
682 |
+
# via
|
683 |
+
# openapi-core
|
684 |
+
# sendgrid
|
685 |
+
widgetsnbextension==4.0.14
|
686 |
+
# via ipywidgets
|
687 |
+
wikipedia==1.4.0
|
688 |
+
# via agents (pyproject.toml)
|
689 |
+
wrapt==1.17.2
|
690 |
+
# via deprecated
|
691 |
+
xxhash==3.5.0
|
692 |
+
# via langgraph
|
693 |
+
yarl==1.20.0
|
694 |
+
# via aiohttp
|
695 |
+
zeroconf==0.147.0
|
696 |
+
# via python-hue-v2
|
697 |
+
zipp==3.22.0
|
698 |
+
# via importlib-metadata
|
699 |
+
zstandard==0.23.0
|
700 |
+
# via langsmith
|
world.py
ADDED
@@ -0,0 +1,41 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
from autogen_ext.runtimes.grpc import GrpcWorkerAgentRuntimeHost
|
2 |
+
from agent import Agent
|
3 |
+
from creator import Creator
|
4 |
+
from autogen_ext.runtimes.grpc import GrpcWorkerAgentRuntime
|
5 |
+
from autogen_core import AgentId
|
6 |
+
import messages
|
7 |
+
import asyncio
|
8 |
+
|
9 |
+
#Change how many agents you want to create dynamically
|
10 |
+
HOW_MANY_AGENTS = 2
|
11 |
+
|
12 |
+
async def create_and_message(worker, creator_id, i: int):
|
13 |
+
try:
|
14 |
+
result = await worker.send_message(messages.Message(content=f"agent{i}.py"), creator_id)
|
15 |
+
with open(f"output/idea{i}.md", "w") as f:
|
16 |
+
f.write(result.content)
|
17 |
+
except Exception as e:
|
18 |
+
print(f"Failed to run worker {i} due to exception: {e}")
|
19 |
+
|
20 |
+
async def main():
|
21 |
+
host = GrpcWorkerAgentRuntimeHost(address="localhost:50051")
|
22 |
+
host.start()
|
23 |
+
worker = GrpcWorkerAgentRuntime(host_address="localhost:50051")
|
24 |
+
await worker.start()
|
25 |
+
result = await Creator.register(worker, "Creator", lambda: Creator("Creator"))
|
26 |
+
creator_id = AgentId("Creator", "default")
|
27 |
+
coroutines = [create_and_message(worker, creator_id, i) for i in range(1, HOW_MANY_AGENTS+1)]
|
28 |
+
await asyncio.gather(*coroutines)
|
29 |
+
try:
|
30 |
+
await worker.stop()
|
31 |
+
await host.stop()
|
32 |
+
except Exception as e:
|
33 |
+
print(e)
|
34 |
+
|
35 |
+
|
36 |
+
|
37 |
+
|
38 |
+
if __name__ == "__main__":
|
39 |
+
asyncio.run(main())
|
40 |
+
print("All agents have been created and ran successfully.")
|
41 |
+
|