Upload 699 files
Browse filesThis view is limited to 50 files because it contains too many changes.
See raw diff
- notebook_dir/GeoAgent-20250427.ipynb +124 -0
- notebook_dir/metagpt_yusin/.ipynb_checkpoints/__init__-checkpoint.py +7 -0
- notebook_dir/metagpt_yusin/.ipynb_checkpoints/_compat-checkpoint.py +23 -0
- notebook_dir/metagpt_yusin/.ipynb_checkpoints/config2-checkpoint.py +195 -0
- notebook_dir/metagpt_yusin/.ipynb_checkpoints/const-checkpoint.py +138 -0
- notebook_dir/metagpt_yusin/.ipynb_checkpoints/context-checkpoint.py +139 -0
- notebook_dir/metagpt_yusin/.ipynb_checkpoints/context_mixin-checkpoint.py +101 -0
- notebook_dir/metagpt_yusin/.ipynb_checkpoints/geoagent-checkpoint.py +76 -0
- notebook_dir/metagpt_yusin/.ipynb_checkpoints/llm-checkpoint.py +20 -0
- notebook_dir/metagpt_yusin/.ipynb_checkpoints/logs-checkpoint.py +48 -0
- notebook_dir/metagpt_yusin/.ipynb_checkpoints/schema-checkpoint.py +787 -0
- notebook_dir/metagpt_yusin/.ipynb_checkpoints/set_envs-checkpoint.py +87 -0
- notebook_dir/metagpt_yusin/.ipynb_checkpoints/tasks-checkpoint.py +246 -0
- notebook_dir/metagpt_yusin/__init__.py +7 -0
- notebook_dir/metagpt_yusin/__pycache__/__init__.cpython-39.pyc +0 -0
- notebook_dir/metagpt_yusin/__pycache__/_compat.cpython-39.pyc +0 -0
- notebook_dir/metagpt_yusin/__pycache__/config2.cpython-39.pyc +0 -0
- notebook_dir/metagpt_yusin/__pycache__/const.cpython-39.pyc +0 -0
- notebook_dir/metagpt_yusin/__pycache__/context.cpython-39.pyc +0 -0
- notebook_dir/metagpt_yusin/__pycache__/context_mixin.cpython-39.pyc +0 -0
- notebook_dir/metagpt_yusin/__pycache__/geoagent.cpython-39.pyc +0 -0
- notebook_dir/metagpt_yusin/__pycache__/llm.cpython-39.pyc +0 -0
- notebook_dir/metagpt_yusin/__pycache__/logs.cpython-39.pyc +0 -0
- notebook_dir/metagpt_yusin/__pycache__/repo_parser.cpython-39.pyc +0 -0
- notebook_dir/metagpt_yusin/__pycache__/schema.cpython-39.pyc +0 -0
- notebook_dir/metagpt_yusin/__pycache__/tasks.cpython-39.pyc +0 -0
- notebook_dir/metagpt_yusin/_compat.py +23 -0
- notebook_dir/metagpt_yusin/actions/.ipynb_checkpoints/__init__-checkpoint.py +57 -0
- notebook_dir/metagpt_yusin/actions/.ipynb_checkpoints/action-checkpoint.py +106 -0
- notebook_dir/metagpt_yusin/actions/.ipynb_checkpoints/execute_task-checkpoint.py +19 -0
- notebook_dir/metagpt_yusin/actions/.ipynb_checkpoints/generate_questions-checkpoint.py +25 -0
- notebook_dir/metagpt_yusin/actions/.ipynb_checkpoints/talk_action-checkpoint.py +169 -0
- notebook_dir/metagpt_yusin/actions/__init__.py +57 -0
- notebook_dir/metagpt_yusin/actions/__pycache__/__init__.cpython-39.pyc +0 -0
- notebook_dir/metagpt_yusin/actions/__pycache__/action.cpython-39.pyc +0 -0
- notebook_dir/metagpt_yusin/actions/__pycache__/action_node.cpython-39.pyc +0 -0
- notebook_dir/metagpt_yusin/actions/__pycache__/action_outcls_registry.cpython-39.pyc +0 -0
- notebook_dir/metagpt_yusin/actions/__pycache__/action_output.cpython-39.pyc +0 -0
- notebook_dir/metagpt_yusin/actions/__pycache__/add_requirement.cpython-39.pyc +0 -0
- notebook_dir/metagpt_yusin/actions/__pycache__/debug_error.cpython-39.pyc +0 -0
- notebook_dir/metagpt_yusin/actions/__pycache__/design_api.cpython-39.pyc +0 -0
- notebook_dir/metagpt_yusin/actions/__pycache__/design_api_an.cpython-39.pyc +0 -0
- notebook_dir/metagpt_yusin/actions/__pycache__/design_api_review.cpython-39.pyc +0 -0
- notebook_dir/metagpt_yusin/actions/__pycache__/fix_bug.cpython-39.pyc +0 -0
- notebook_dir/metagpt_yusin/actions/__pycache__/prepare_documents.cpython-39.pyc +0 -0
- notebook_dir/metagpt_yusin/actions/__pycache__/project_management.cpython-39.pyc +0 -0
- notebook_dir/metagpt_yusin/actions/__pycache__/project_management_an.cpython-39.pyc +0 -0
- notebook_dir/metagpt_yusin/actions/__pycache__/research.cpython-39.pyc +0 -0
- notebook_dir/metagpt_yusin/actions/__pycache__/run_code.cpython-39.pyc +0 -0
- notebook_dir/metagpt_yusin/actions/__pycache__/search_and_summarize.cpython-39.pyc +0 -0
notebook_dir/GeoAgent-20250427.ipynb
ADDED
@@ -0,0 +1,124 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
{
|
2 |
+
"cells": [
|
3 |
+
{
|
4 |
+
"cell_type": "code",
|
5 |
+
"execution_count": 1,
|
6 |
+
"id": "24b811ea-7714-4c2d-947d-1fb1b2ec980d",
|
7 |
+
"metadata": {
|
8 |
+
"collapsed": false,
|
9 |
+
"deletable": false,
|
10 |
+
"editable": false,
|
11 |
+
"jupyter": {
|
12 |
+
"outputs_hidden": false
|
13 |
+
}},
|
14 |
+
"outputs": [
|
15 |
+
{
|
16 |
+
"name": "stderr",
|
17 |
+
"output_type": "stream",
|
18 |
+
"text": [
|
19 |
+
"2025-04-27 16:56:51.233 | INFO | metagpt_yusin.const:get_metagpt_yusin_package_root:29 - Package root set to /data\n"
|
20 |
+
]
|
21 |
+
},
|
22 |
+
{
|
23 |
+
"data": {
|
24 |
+
"application/vnd.jupyter.widget-view+json": {
|
25 |
+
"model_id": "382ee0ae87fd4555959704f7f29aa79b",
|
26 |
+
"version_major": 2,
|
27 |
+
"version_minor": 0
|
28 |
+
},
|
29 |
+
"text/plain": [
|
30 |
+
"HBox(children=(VBox(children=(VBox(children=(Label(value='Select data sources and LLM models (or Submit defaul…"
|
31 |
+
]
|
32 |
+
},
|
33 |
+
"metadata": {},
|
34 |
+
"output_type": "display_data"
|
35 |
+
}
|
36 |
+
],
|
37 |
+
"source": [
|
38 |
+
"from metagpt_yusin.geoagent import GeoAgent; GeoAgent().default()"
|
39 |
+
]
|
40 |
+
},
|
41 |
+
{
|
42 |
+
"cell_type": "code",
|
43 |
+
"execution_count": null,
|
44 |
+
"id": "bf9d8a3f-ee15-490b-8ecf-ba58836d2bc2",
|
45 |
+
"metadata": {},
|
46 |
+
"outputs": [],
|
47 |
+
"source": []
|
48 |
+
},
|
49 |
+
{
|
50 |
+
"cell_type": "code",
|
51 |
+
"execution_count": null,
|
52 |
+
"id": "62f50c92-c051-4650-988b-c84b9f22a130",
|
53 |
+
"metadata": {},
|
54 |
+
"outputs": [],
|
55 |
+
"source": []
|
56 |
+
},
|
57 |
+
{
|
58 |
+
"cell_type": "markdown",
|
59 |
+
"id": "161e81a7-680a-431b-af6c-772fe827afd3",
|
60 |
+
"metadata": {},
|
61 |
+
"source": [
|
62 |
+
"# Decomposing the overall task into tasks!"
|
63 |
+
]
|
64 |
+
},
|
65 |
+
{
|
66 |
+
"cell_type": "code",
|
67 |
+
"execution_count": null,
|
68 |
+
"id": "c3a17761-7ddd-47b8-91b5-40d4e382a816",
|
69 |
+
"metadata": {},
|
70 |
+
"outputs": [],
|
71 |
+
"source": [
|
72 |
+
"[\n",
|
73 |
+
" {\n",
|
74 |
+
" \"task_id\": \"1\",\n",
|
75 |
+
" \"dependent_task_ids\": [],\n",
|
76 |
+
" \"instruction\": \"Design ASCII rabbit art\",\n",
|
77 |
+
" \"task_type\": \"other\"\n",
|
78 |
+
" },\n",
|
79 |
+
" {\n",
|
80 |
+
" \"task_id\": \"2\",\n",
|
81 |
+
" \"dependent_task_ids\": [\"1\"],\n",
|
82 |
+
" \"instruction\": \"Create function to plot ASCII rabbit\",\n",
|
83 |
+
" \"task_type\": \"other\"\n",
|
84 |
+
" },\n",
|
85 |
+
" {\n",
|
86 |
+
" \"task_id\": \"3\",\n",
|
87 |
+
" \"dependent_task_ids\": [\"2\"],\n",
|
88 |
+
" \"instruction\": \"Test the plotting function\",\n",
|
89 |
+
" \"task_type\": \"other\"\n",
|
90 |
+
" }\n",
|
91 |
+
"]\n"
|
92 |
+
]
|
93 |
+
},
|
94 |
+
{
|
95 |
+
"cell_type": "markdown",
|
96 |
+
"id": "538d829b-61d1-42a2-8c9a-553c5ff45dfa",
|
97 |
+
"metadata": {},
|
98 |
+
"source": [
|
99 |
+
"# Here is the code part!"
|
100 |
+
]
|
101 |
+
}
|
102 |
+
],
|
103 |
+
"metadata": {
|
104 |
+
"kernelspec": {
|
105 |
+
"display_name": "Python 3 (ipykernel)",
|
106 |
+
"language": "python",
|
107 |
+
"name": "python3"
|
108 |
+
},
|
109 |
+
"language_info": {
|
110 |
+
"codemirror_mode": {
|
111 |
+
"name": "ipython",
|
112 |
+
"version": 3
|
113 |
+
},
|
114 |
+
"file_extension": ".py",
|
115 |
+
"mimetype": "text/x-python",
|
116 |
+
"name": "python",
|
117 |
+
"nbconvert_exporter": "python",
|
118 |
+
"pygments_lexer": "ipython3",
|
119 |
+
"version": "3.9.5"
|
120 |
+
}
|
121 |
+
},
|
122 |
+
"nbformat": 4,
|
123 |
+
"nbformat_minor": 5
|
124 |
+
}
|
notebook_dir/metagpt_yusin/.ipynb_checkpoints/__init__-checkpoint.py
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
# @Time : 2023/4/24 22:26
|
4 |
+
# @Author : alexanderwu
|
5 |
+
# @File : __init__.py
|
6 |
+
|
7 |
+
from metagpt_yusin import _compat as _ # noqa: F401
|
notebook_dir/metagpt_yusin/.ipynb_checkpoints/_compat-checkpoint.py
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import platform
|
2 |
+
import sys
|
3 |
+
import warnings
|
4 |
+
|
5 |
+
if sys.implementation.name == "cpython" and platform.system() == "Windows":
|
6 |
+
import asyncio
|
7 |
+
|
8 |
+
if sys.version_info[:2] == (3, 9):
|
9 |
+
from asyncio.proactor_events import _ProactorBasePipeTransport
|
10 |
+
|
11 |
+
# https://github.com/python/cpython/pull/92842
|
12 |
+
def pacth_del(self, _warn=warnings.warn):
|
13 |
+
if self._sock is not None:
|
14 |
+
_warn(f"unclosed transport {self!r}", ResourceWarning, source=self)
|
15 |
+
self._sock.close()
|
16 |
+
|
17 |
+
_ProactorBasePipeTransport.__del__ = pacth_del
|
18 |
+
|
19 |
+
if sys.version_info >= (3, 9, 0):
|
20 |
+
from semantic_kernel.orchestration import sk_function as _ # noqa: F401
|
21 |
+
|
22 |
+
# caused by https://github.com/microsoft/semantic-kernel/pull/1416
|
23 |
+
asyncio.set_event_loop_policy(asyncio.WindowsProactorEventLoopPolicy())
|
notebook_dir/metagpt_yusin/.ipynb_checkpoints/config2-checkpoint.py
ADDED
@@ -0,0 +1,195 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
"""
|
4 |
+
@Time : 2024/1/4 01:25
|
5 |
+
@Author : alexanderwu
|
6 |
+
@File : config2.py
|
7 |
+
"""
|
8 |
+
import os
|
9 |
+
from pathlib import Path
|
10 |
+
from typing import Dict, Iterable, List, Literal, Optional
|
11 |
+
|
12 |
+
from pydantic import BaseModel, model_validator
|
13 |
+
|
14 |
+
from metagpt_yusin.logs import logger
|
15 |
+
from metagpt_yusin.configs.browser_config import BrowserConfig
|
16 |
+
from metagpt_yusin.configs.llm_config import LLMConfig, LLMType
|
17 |
+
from metagpt_yusin.configs.mermaid_config import MermaidConfig
|
18 |
+
from metagpt_yusin.configs.redis_config import RedisConfig
|
19 |
+
from metagpt_yusin.configs.s3_config import S3Config
|
20 |
+
from metagpt_yusin.configs.search_config import SearchConfig
|
21 |
+
from metagpt_yusin.configs.workspace_config import WorkspaceConfig
|
22 |
+
from metagpt_yusin.const import CONFIG_ROOT, metagpt_yusin_ROOT
|
23 |
+
from metagpt_yusin.utils.yaml_model import YamlModel
|
24 |
+
|
25 |
+
# list all varibles in LLMType
|
26 |
+
#LLMType_dict = LLMType.__dict__
|
27 |
+
|
28 |
+
class CLIParams(BaseModel):
|
29 |
+
"""CLI parameters"""
|
30 |
+
|
31 |
+
project_path: str = ""
|
32 |
+
project_name: str = ""
|
33 |
+
inc: bool = False
|
34 |
+
reqa_file: str = ""
|
35 |
+
max_auto_summarize_code: int = 0
|
36 |
+
git_reinit: bool = False
|
37 |
+
|
38 |
+
@model_validator(mode="after")
|
39 |
+
def check_project_path(self):
|
40 |
+
"""Check project_path and project_name"""
|
41 |
+
if self.project_path:
|
42 |
+
self.inc = True
|
43 |
+
self.project_name = self.project_name or Path(self.project_path).name
|
44 |
+
return self
|
45 |
+
|
46 |
+
|
47 |
+
class Config(CLIParams, YamlModel):
|
48 |
+
"""Configurations for metagpt_yusin"""
|
49 |
+
|
50 |
+
# Key Parameters
|
51 |
+
llm: LLMConfig
|
52 |
+
|
53 |
+
# Global Proxy. Will be used if llm.proxy is not set
|
54 |
+
proxy: str = ""
|
55 |
+
|
56 |
+
# Tool Parameters
|
57 |
+
search: SearchConfig = SearchConfig()
|
58 |
+
browser: BrowserConfig = BrowserConfig()
|
59 |
+
mermaid: MermaidConfig = MermaidConfig()
|
60 |
+
|
61 |
+
# Storage Parameters
|
62 |
+
s3: Optional[S3Config] = None
|
63 |
+
redis: Optional[RedisConfig] = None
|
64 |
+
|
65 |
+
# Misc Parameters
|
66 |
+
repair_llm_output: bool = False
|
67 |
+
prompt_schema: Literal["json", "markdown", "raw"] = "json"
|
68 |
+
workspace: WorkspaceConfig = WorkspaceConfig()
|
69 |
+
enable_longterm_memory: bool = False
|
70 |
+
code_review_k_times: int = 2
|
71 |
+
|
72 |
+
# Will be removed in the future
|
73 |
+
metagpt_yusin_tti_url: str = ""
|
74 |
+
language: str = "English"
|
75 |
+
redis_key: str = "placeholder"
|
76 |
+
iflytek_app_id: str = ""
|
77 |
+
iflytek_api_secret: str = ""
|
78 |
+
iflytek_api_key: str = ""
|
79 |
+
azure_tts_subscription_key: str = ""
|
80 |
+
azure_tts_region: str = ""
|
81 |
+
_extra: dict = dict() # extra config dict
|
82 |
+
|
83 |
+
@classmethod
|
84 |
+
def from_home(cls, path):
|
85 |
+
"""Load config from ~/.metagpt_yusin/config2.yaml"""
|
86 |
+
pathname = CONFIG_ROOT / path
|
87 |
+
if not pathname.exists():
|
88 |
+
return None
|
89 |
+
return Config.from_yaml_file(pathname)
|
90 |
+
|
91 |
+
@classmethod
|
92 |
+
def default(cls):
|
93 |
+
"""Load default config
|
94 |
+
- Priority: env < default_config_paths
|
95 |
+
- Inside default_config_paths, the latter one overwrites the former one
|
96 |
+
"""
|
97 |
+
|
98 |
+
#default_config_paths: List[Path] = [
|
99 |
+
# metagpt_yusin_ROOT / "config/config2.yaml",
|
100 |
+
# CONFIG_ROOT / "config2.yaml",
|
101 |
+
#]
|
102 |
+
|
103 |
+
default_config_paths: List[Path] = [
|
104 |
+
CONFIG_ROOT / "config2.yaml",
|
105 |
+
]
|
106 |
+
|
107 |
+
dicts = [dict(os.environ)]
|
108 |
+
dicts += [Config.read_yaml(path) for path in default_config_paths]
|
109 |
+
final = merge_dict(dicts)
|
110 |
+
config_init = Config(**final)
|
111 |
+
|
112 |
+
|
113 |
+
# appended new
|
114 |
+
if 'api_type' in os.environ:
|
115 |
+
if os.environ.get('api_type') == 'openai':
|
116 |
+
config_init.llm.api_type = LLMType.OPENAI
|
117 |
+
elif os.environ.get('api_type') == 'groq':
|
118 |
+
config_init.llm.api_type = LLMType.OPENAI
|
119 |
+
config_init.llm.base_url = 'https://api.groq.com/openai/v1'
|
120 |
+
elif os.environ.get('api_type') == 'openrouter':
|
121 |
+
config_init.llm.api_type = LLMType.OPENROUTER
|
122 |
+
config_init.llm.base_url = 'https://openrouter.ai/api/v1'
|
123 |
+
else:
|
124 |
+
logger.debug('The API Type is not supported!!')
|
125 |
+
else:
|
126 |
+
logger.debug('Provide your api type!!')
|
127 |
+
if 'model' in os.environ:
|
128 |
+
config_init.llm.model = os.environ.get('model')
|
129 |
+
else:
|
130 |
+
logger.debug('Provide your model!!')
|
131 |
+
if 'api_key' in os.environ:
|
132 |
+
config_init.llm.api_key = os.environ.get('api_key')
|
133 |
+
else:
|
134 |
+
logger.debug('Provide your api key!!')
|
135 |
+
|
136 |
+
|
137 |
+
return config_init
|
138 |
+
|
139 |
+
@classmethod
|
140 |
+
def from_llm_config(cls, llm_config: dict):
|
141 |
+
"""user config llm
|
142 |
+
example:
|
143 |
+
llm_config = {"api_type": "xxx", "api_key": "xxx", "model": "xxx"}
|
144 |
+
gpt4 = Config.from_llm_config(llm_config)
|
145 |
+
A = Role(name="A", profile="Democratic candidate", goal="Win the election", actions=[a1], watch=[a2], config=gpt4)
|
146 |
+
"""
|
147 |
+
llm_config = LLMConfig.model_validate(llm_config)
|
148 |
+
dicts = [dict(os.environ)]
|
149 |
+
dicts += [{"llm": llm_config}]
|
150 |
+
final = merge_dict(dicts)
|
151 |
+
return Config(**final)
|
152 |
+
|
153 |
+
def update_via_cli(self, project_path, project_name, inc, reqa_file, max_auto_summarize_code):
|
154 |
+
"""update config via cli"""
|
155 |
+
|
156 |
+
# Use in the PrepareDocuments action according to Section 2.2.3.5.1 of RFC 135.
|
157 |
+
if project_path:
|
158 |
+
inc = True
|
159 |
+
project_name = project_name or Path(project_path).name
|
160 |
+
self.project_path = project_path
|
161 |
+
self.project_name = project_name
|
162 |
+
self.inc = inc
|
163 |
+
self.reqa_file = reqa_file
|
164 |
+
self.max_auto_summarize_code = max_auto_summarize_code
|
165 |
+
|
166 |
+
@property
|
167 |
+
def extra(self):
|
168 |
+
return self._extra
|
169 |
+
|
170 |
+
@extra.setter
|
171 |
+
def extra(self, value: dict):
|
172 |
+
self._extra = value
|
173 |
+
|
174 |
+
def get_openai_llm(self) -> Optional[LLMConfig]:
|
175 |
+
"""Get OpenAI LLMConfig by name. If no OpenAI, raise Exception"""
|
176 |
+
if self.llm.api_type == LLMType.OPENAI:
|
177 |
+
return self.llm
|
178 |
+
return None
|
179 |
+
|
180 |
+
def get_azure_llm(self) -> Optional[LLMConfig]:
|
181 |
+
"""Get Azure LLMConfig by name. If no Azure, raise Exception"""
|
182 |
+
if self.llm.api_type == LLMType.AZURE:
|
183 |
+
return self.llm
|
184 |
+
return None
|
185 |
+
|
186 |
+
|
187 |
+
def merge_dict(dicts: Iterable[Dict]) -> Dict:
|
188 |
+
"""Merge multiple dicts into one, with the latter dict overwriting the former"""
|
189 |
+
result = {}
|
190 |
+
for dictionary in dicts:
|
191 |
+
result.update(dictionary)
|
192 |
+
return result
|
193 |
+
|
194 |
+
|
195 |
+
config = Config.default()
|
notebook_dir/metagpt_yusin/.ipynb_checkpoints/const-checkpoint.py
ADDED
@@ -0,0 +1,138 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
"""
|
4 |
+
@Time : 2023/5/1 11:59
|
5 |
+
@Author : alexanderwu
|
6 |
+
@File : const.py
|
7 |
+
@Modified By: mashenquan, 2023-11-1. According to Section 2.2.1 and 2.2.2 of RFC 116, added key definitions for
|
8 |
+
common properties in the Message.
|
9 |
+
@Modified By: mashenquan, 2023-11-27. Defines file repository paths according to Section 2.2.3.4 of RFC 135.
|
10 |
+
@Modified By: mashenquan, 2023/12/5. Add directories for code summarization..
|
11 |
+
"""
|
12 |
+
import os
|
13 |
+
from pathlib import Path
|
14 |
+
|
15 |
+
from loguru import logger
|
16 |
+
|
17 |
+
import metagpt_yusin
|
18 |
+
|
19 |
+
|
20 |
+
def get_metagpt_yusin_package_root():
|
21 |
+
"""Get the root directory of the installed package."""
|
22 |
+
package_root = Path(metagpt_yusin.__file__).parent.parent
|
23 |
+
for i in (".git", ".project_root", ".gitignore"):
|
24 |
+
if (package_root / i).exists():
|
25 |
+
break
|
26 |
+
else:
|
27 |
+
package_root = Path.cwd()
|
28 |
+
|
29 |
+
logger.info(f"Package root set to {str(package_root)}")
|
30 |
+
return package_root
|
31 |
+
|
32 |
+
|
33 |
+
def get_metagpt_yusin_root():
|
34 |
+
"""Get the project root directory."""
|
35 |
+
# Check if a project root is specified in the environment variable
|
36 |
+
project_root_env = os.getenv("metagpt_yusin_PROJECT_ROOT")
|
37 |
+
if project_root_env:
|
38 |
+
project_root = Path(project_root_env)
|
39 |
+
logger.info(f"PROJECT_ROOT set from environment variable to {str(project_root)}")
|
40 |
+
else:
|
41 |
+
# Fallback to package root if no environment variable is set
|
42 |
+
project_root = get_metagpt_yusin_package_root()
|
43 |
+
return project_root
|
44 |
+
|
45 |
+
|
46 |
+
# metagpt_yusin PROJECT ROOT AND VARS
|
47 |
+
#CONFIG_ROOT = Path.home() / ".metagpt_yusin"
|
48 |
+
metagpt_yusin_ROOT = get_metagpt_yusin_root() # Dependent on metagpt_yusin_PROJECT_ROOT
|
49 |
+
DEFAULT_WORKSPACE_ROOT = metagpt_yusin_ROOT / "workspace"
|
50 |
+
CONFIG_ROOT = metagpt_yusin_ROOT / "metagpt_yusin/configs"
|
51 |
+
|
52 |
+
EXAMPLE_PATH = metagpt_yusin_ROOT / "examples"
|
53 |
+
EXAMPLE_DATA_PATH = EXAMPLE_PATH / "data"
|
54 |
+
DATA_PATH = metagpt_yusin_ROOT / "data"
|
55 |
+
TEST_DATA_PATH = metagpt_yusin_ROOT / "tests/data"
|
56 |
+
RESEARCH_PATH = DATA_PATH / "research"
|
57 |
+
TUTORIAL_PATH = DATA_PATH / "tutorial_docx"
|
58 |
+
INVOICE_OCR_TABLE_PATH = DATA_PATH / "invoice_table"
|
59 |
+
|
60 |
+
UT_PATH = DATA_PATH / "ut"
|
61 |
+
SWAGGER_PATH = UT_PATH / "files/api/"
|
62 |
+
UT_PY_PATH = UT_PATH / "files/ut/"
|
63 |
+
API_QUESTIONS_PATH = UT_PATH / "files/question/"
|
64 |
+
|
65 |
+
SERDESER_PATH = DEFAULT_WORKSPACE_ROOT / "storage" # TODO to store `storage` under the individual generated project
|
66 |
+
|
67 |
+
TMP = metagpt_yusin_ROOT / "tmp"
|
68 |
+
|
69 |
+
SOURCE_ROOT = metagpt_yusin_ROOT / "metagpt_yusin"
|
70 |
+
PROMPT_PATH = SOURCE_ROOT / "prompts"
|
71 |
+
SKILL_DIRECTORY = SOURCE_ROOT / "skills"
|
72 |
+
TOOL_SCHEMA_PATH = metagpt_yusin_ROOT / "metagpt_yusin/tools/schemas"
|
73 |
+
TOOL_LIBS_PATH = metagpt_yusin_ROOT / "metagpt_yusin/tools/libs"
|
74 |
+
|
75 |
+
# REAL CONSTS
|
76 |
+
|
77 |
+
MEM_TTL = 24 * 30 * 3600
|
78 |
+
|
79 |
+
MESSAGE_ROUTE_FROM = "sent_from"
|
80 |
+
MESSAGE_ROUTE_TO = "send_to"
|
81 |
+
MESSAGE_ROUTE_CAUSE_BY = "cause_by"
|
82 |
+
MESSAGE_META_ROLE = "role"
|
83 |
+
MESSAGE_ROUTE_TO_ALL = "<all>"
|
84 |
+
MESSAGE_ROUTE_TO_NONE = "<none>"
|
85 |
+
|
86 |
+
REQUIREMENT_FILENAME = "requirement.txt"
|
87 |
+
BUGFIX_FILENAME = "bugfix.txt"
|
88 |
+
PACKAGE_REQUIREMENTS_FILENAME = "requirements.txt"
|
89 |
+
|
90 |
+
DOCS_FILE_REPO = "docs"
|
91 |
+
PRDS_FILE_REPO = "docs/prd"
|
92 |
+
SYSTEM_DESIGN_FILE_REPO = "docs/system_design"
|
93 |
+
TASK_FILE_REPO = "docs/task"
|
94 |
+
CODE_PLAN_AND_CHANGE_FILE_REPO = "docs/code_plan_and_change"
|
95 |
+
COMPETITIVE_ANALYSIS_FILE_REPO = "resources/competitive_analysis"
|
96 |
+
DATA_API_DESIGN_FILE_REPO = "resources/data_api_design"
|
97 |
+
SEQ_FLOW_FILE_REPO = "resources/seq_flow"
|
98 |
+
SYSTEM_DESIGN_PDF_FILE_REPO = "resources/system_design"
|
99 |
+
PRD_PDF_FILE_REPO = "resources/prd"
|
100 |
+
TASK_PDF_FILE_REPO = "resources/api_spec_and_task"
|
101 |
+
CODE_PLAN_AND_CHANGE_PDF_FILE_REPO = "resources/code_plan_and_change"
|
102 |
+
TEST_CODES_FILE_REPO = "tests"
|
103 |
+
TEST_OUTPUTS_FILE_REPO = "test_outputs"
|
104 |
+
CODE_SUMMARIES_FILE_REPO = "docs/code_summary"
|
105 |
+
CODE_SUMMARIES_PDF_FILE_REPO = "resources/code_summary"
|
106 |
+
RESOURCES_FILE_REPO = "resources"
|
107 |
+
SD_OUTPUT_FILE_REPO = "resources/sd_output"
|
108 |
+
GRAPH_REPO_FILE_REPO = "docs/graph_repo"
|
109 |
+
VISUAL_GRAPH_REPO_FILE_REPO = "resources/graph_db"
|
110 |
+
CLASS_VIEW_FILE_REPO = "docs/class_view"
|
111 |
+
|
112 |
+
YAPI_URL = "http://yapi.deepwisdomai.com/"
|
113 |
+
|
114 |
+
DEFAULT_LANGUAGE = "English"
|
115 |
+
DEFAULT_MAX_TOKENS = 1500
|
116 |
+
COMMAND_TOKENS = 500
|
117 |
+
BRAIN_MEMORY = "BRAIN_MEMORY"
|
118 |
+
SKILL_PATH = "SKILL_PATH"
|
119 |
+
SERPER_API_KEY = "SERPER_API_KEY"
|
120 |
+
DEFAULT_TOKEN_SIZE = 500
|
121 |
+
|
122 |
+
# format
|
123 |
+
BASE64_FORMAT = "base64"
|
124 |
+
|
125 |
+
# REDIS
|
126 |
+
REDIS_KEY = "REDIS_KEY"
|
127 |
+
|
128 |
+
# Message id
|
129 |
+
IGNORED_MESSAGE_ID = "0"
|
130 |
+
|
131 |
+
# Class Relationship
|
132 |
+
GENERALIZATION = "Generalize"
|
133 |
+
COMPOSITION = "Composite"
|
134 |
+
AGGREGATION = "Aggregate"
|
135 |
+
|
136 |
+
# Timeout
|
137 |
+
USE_CONFIG_TIMEOUT = 0 # Using llm.timeout configuration.
|
138 |
+
LLM_API_TIMEOUT = 300
|
notebook_dir/metagpt_yusin/.ipynb_checkpoints/context-checkpoint.py
ADDED
@@ -0,0 +1,139 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
"""
|
4 |
+
@Time : 2024/1/4 16:32
|
5 |
+
@Author : alexanderwu
|
6 |
+
@File : context.py
|
7 |
+
"""
|
8 |
+
import os
|
9 |
+
from pathlib import Path
|
10 |
+
from typing import Any, Dict, Optional
|
11 |
+
|
12 |
+
from pydantic import BaseModel, ConfigDict
|
13 |
+
|
14 |
+
from metagpt_yusin.config2 import Config
|
15 |
+
from metagpt_yusin.configs.llm_config import LLMConfig, LLMType
|
16 |
+
from metagpt_yusin.provider.base_llm import BaseLLM
|
17 |
+
from metagpt_yusin.provider.llm_provider_registry import create_llm_instance
|
18 |
+
from metagpt_yusin.utils.cost_manager import (
|
19 |
+
CostManager,
|
20 |
+
FireworksCostManager,
|
21 |
+
TokenCostManager,
|
22 |
+
)
|
23 |
+
from metagpt_yusin.utils.git_repository import GitRepository
|
24 |
+
from metagpt_yusin.utils.project_repo import ProjectRepo
|
25 |
+
|
26 |
+
|
27 |
+
class AttrDict(BaseModel):
|
28 |
+
"""A dict-like object that allows access to keys as attributes, compatible with Pydantic."""
|
29 |
+
|
30 |
+
model_config = ConfigDict(extra="allow")
|
31 |
+
|
32 |
+
def __init__(self, **kwargs):
|
33 |
+
super().__init__(**kwargs)
|
34 |
+
self.__dict__.update(kwargs)
|
35 |
+
|
36 |
+
def __getattr__(self, key):
|
37 |
+
return self.__dict__.get(key, None)
|
38 |
+
|
39 |
+
def __setattr__(self, key, value):
|
40 |
+
self.__dict__[key] = value
|
41 |
+
|
42 |
+
def __delattr__(self, key):
|
43 |
+
if key in self.__dict__:
|
44 |
+
del self.__dict__[key]
|
45 |
+
else:
|
46 |
+
raise AttributeError(f"No such attribute: {key}")
|
47 |
+
|
48 |
+
def set(self, key, val: Any):
|
49 |
+
self.__dict__[key] = val
|
50 |
+
|
51 |
+
def get(self, key, default: Any = None):
|
52 |
+
return self.__dict__.get(key, default)
|
53 |
+
|
54 |
+
def remove(self, key):
|
55 |
+
if key in self.__dict__:
|
56 |
+
self.__delattr__(key)
|
57 |
+
|
58 |
+
|
59 |
+
class Context(BaseModel):
|
60 |
+
"""Env context for metagpt_yusin"""
|
61 |
+
|
62 |
+
model_config = ConfigDict(arbitrary_types_allowed=True)
|
63 |
+
|
64 |
+
kwargs: AttrDict = AttrDict()
|
65 |
+
config: Config = Config.default()
|
66 |
+
|
67 |
+
repo: Optional[ProjectRepo] = None
|
68 |
+
git_repo: Optional[GitRepository] = None
|
69 |
+
src_workspace: Optional[Path] = None
|
70 |
+
cost_manager: CostManager = CostManager()
|
71 |
+
|
72 |
+
_llm: Optional[BaseLLM] = None
|
73 |
+
|
74 |
+
def new_environ(self):
|
75 |
+
"""Return a new os.environ object"""
|
76 |
+
env = os.environ.copy()
|
77 |
+
# i = self.options
|
78 |
+
# env.update({k: v for k, v in i.items() if isinstance(v, str)})
|
79 |
+
return env
|
80 |
+
|
81 |
+
def _select_costmanager(self, llm_config: LLMConfig) -> CostManager:
|
82 |
+
"""Return a CostManager instance"""
|
83 |
+
if llm_config.api_type == LLMType.FIREWORKS:
|
84 |
+
return FireworksCostManager()
|
85 |
+
elif llm_config.api_type == LLMType.OPEN_LLM:
|
86 |
+
return TokenCostManager()
|
87 |
+
else:
|
88 |
+
return self.cost_manager
|
89 |
+
|
90 |
+
def llm(self) -> BaseLLM:
|
91 |
+
"""Return a LLM instance, fixme: support cache"""
|
92 |
+
# if self._llm is None:
|
93 |
+
self._llm = create_llm_instance(self.config.llm)
|
94 |
+
if self._llm.cost_manager is None:
|
95 |
+
self._llm.cost_manager = self._select_costmanager(self.config.llm)
|
96 |
+
return self._llm
|
97 |
+
|
98 |
+
def llm_with_cost_manager_from_llm_config(self, llm_config: LLMConfig) -> BaseLLM:
|
99 |
+
"""Return a LLM instance, fixme: support cache"""
|
100 |
+
# if self._llm is None:
|
101 |
+
llm = create_llm_instance(llm_config)
|
102 |
+
if llm.cost_manager is None:
|
103 |
+
llm.cost_manager = self._select_costmanager(llm_config)
|
104 |
+
return llm
|
105 |
+
|
106 |
+
def serialize(self) -> Dict[str, Any]:
|
107 |
+
"""Serialize the object's attributes into a dictionary.
|
108 |
+
|
109 |
+
Returns:
|
110 |
+
Dict[str, Any]: A dictionary containing serialized data.
|
111 |
+
"""
|
112 |
+
return {
|
113 |
+
"workdir": str(self.repo.workdir) if self.repo else "",
|
114 |
+
"kwargs": {k: v for k, v in self.kwargs.__dict__.items()},
|
115 |
+
"cost_manager": self.cost_manager.model_dump_json(),
|
116 |
+
}
|
117 |
+
|
118 |
+
def deserialize(self, serialized_data: Dict[str, Any]):
|
119 |
+
"""Deserialize the given serialized data and update the object's attributes accordingly.
|
120 |
+
|
121 |
+
Args:
|
122 |
+
serialized_data (Dict[str, Any]): A dictionary containing serialized data.
|
123 |
+
"""
|
124 |
+
if not serialized_data:
|
125 |
+
return
|
126 |
+
workdir = serialized_data.get("workdir")
|
127 |
+
if workdir:
|
128 |
+
self.git_repo = GitRepository(local_path=workdir, auto_init=True)
|
129 |
+
self.repo = ProjectRepo(self.git_repo)
|
130 |
+
src_workspace = self.git_repo.workdir / self.git_repo.workdir.name
|
131 |
+
if src_workspace.exists():
|
132 |
+
self.src_workspace = src_workspace
|
133 |
+
kwargs = serialized_data.get("kwargs")
|
134 |
+
if kwargs:
|
135 |
+
for k, v in kwargs.items():
|
136 |
+
self.kwargs.set(k, v)
|
137 |
+
cost_manager = serialized_data.get("cost_manager")
|
138 |
+
if cost_manager:
|
139 |
+
self.cost_manager.model_validate_json(cost_manager)
|
notebook_dir/metagpt_yusin/.ipynb_checkpoints/context_mixin-checkpoint.py
ADDED
@@ -0,0 +1,101 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
"""
|
4 |
+
@Time : 2024/1/11 17:25
|
5 |
+
@Author : alexanderwu
|
6 |
+
@File : context_mixin.py
|
7 |
+
"""
|
8 |
+
from typing import Optional
|
9 |
+
|
10 |
+
from pydantic import BaseModel, ConfigDict, Field, model_validator
|
11 |
+
|
12 |
+
from metagpt_yusin.config2 import Config
|
13 |
+
from metagpt_yusin.context import Context
|
14 |
+
from metagpt_yusin.provider.base_llm import BaseLLM
|
15 |
+
|
16 |
+
|
17 |
+
class ContextMixin(BaseModel):
|
18 |
+
"""Mixin class for context and config"""
|
19 |
+
|
20 |
+
model_config = ConfigDict(arbitrary_types_allowed=True, extra="allow")
|
21 |
+
|
22 |
+
# Pydantic has bug on _private_attr when using inheritance, so we use private_* instead
|
23 |
+
# - https://github.com/pydantic/pydantic/issues/7142
|
24 |
+
# - https://github.com/pydantic/pydantic/issues/7083
|
25 |
+
# - https://github.com/pydantic/pydantic/issues/7091
|
26 |
+
|
27 |
+
# Env/Role/Action will use this context as private context, or use self.context as public context
|
28 |
+
private_context: Optional[Context] = Field(default=None, exclude=True)
|
29 |
+
# Env/Role/Action will use this config as private config, or use self.context.config as public config
|
30 |
+
private_config: Optional[Config] = Field(default=None, exclude=True)
|
31 |
+
|
32 |
+
# Env/Role/Action will use this llm as private llm, or use self.context._llm instance
|
33 |
+
private_llm: Optional[BaseLLM] = Field(default=None, exclude=True)
|
34 |
+
|
35 |
+
@model_validator(mode="after")
|
36 |
+
def validate_context_mixin_extra(self):
|
37 |
+
self._process_context_mixin_extra()
|
38 |
+
return self
|
39 |
+
|
40 |
+
def _process_context_mixin_extra(self):
|
41 |
+
"""Process the extra field"""
|
42 |
+
kwargs = self.model_extra or {}
|
43 |
+
self.set_context(kwargs.pop("context", None))
|
44 |
+
self.set_config(kwargs.pop("config", None))
|
45 |
+
self.set_llm(kwargs.pop("llm", None))
|
46 |
+
|
47 |
+
def set(self, k, v, override=False):
|
48 |
+
"""Set attribute"""
|
49 |
+
if override or not self.__dict__.get(k):
|
50 |
+
self.__dict__[k] = v
|
51 |
+
|
52 |
+
def set_context(self, context: Context, override=True):
|
53 |
+
"""Set context"""
|
54 |
+
self.set("private_context", context, override)
|
55 |
+
|
56 |
+
def set_config(self, config: Config, override=False):
|
57 |
+
"""Set config"""
|
58 |
+
self.set("private_config", config, override)
|
59 |
+
if config is not None:
|
60 |
+
_ = self.llm # init llm
|
61 |
+
|
62 |
+
def set_llm(self, llm: BaseLLM, override=False):
|
63 |
+
"""Set llm"""
|
64 |
+
self.set("private_llm", llm, override)
|
65 |
+
|
66 |
+
@property
|
67 |
+
def config(self) -> Config:
|
68 |
+
"""Role config: role config > context config"""
|
69 |
+
if self.private_config:
|
70 |
+
return self.private_config
|
71 |
+
return self.context.config
|
72 |
+
|
73 |
+
@config.setter
|
74 |
+
def config(self, config: Config) -> None:
|
75 |
+
"""Set config"""
|
76 |
+
self.set_config(config)
|
77 |
+
|
78 |
+
@property
|
79 |
+
def context(self) -> Context:
|
80 |
+
"""Role context: role context > context"""
|
81 |
+
if self.private_context:
|
82 |
+
return self.private_context
|
83 |
+
return Context()
|
84 |
+
|
85 |
+
@context.setter
|
86 |
+
def context(self, context: Context) -> None:
|
87 |
+
"""Set context"""
|
88 |
+
self.set_context(context)
|
89 |
+
|
90 |
+
@property
|
91 |
+
def llm(self) -> BaseLLM:
|
92 |
+
"""Role llm: if not existed, init from role.config"""
|
93 |
+
# print(f"class:{self.__class__.__name__}({self.name}), llm: {self._llm}, llm_config: {self._llm_config}")
|
94 |
+
if not self.private_llm:
|
95 |
+
self.private_llm = self.context.llm_with_cost_manager_from_llm_config(self.config.llm)
|
96 |
+
return self.private_llm
|
97 |
+
|
98 |
+
@llm.setter
|
99 |
+
def llm(self, llm: BaseLLM) -> None:
|
100 |
+
"""Set llm"""
|
101 |
+
self.private_llm = llm
|
notebook_dir/metagpt_yusin/.ipynb_checkpoints/geoagent-checkpoint.py
ADDED
@@ -0,0 +1,76 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
"""
|
4 |
+
@Time : 2024/1/4 01:25
|
5 |
+
@Author : yusin
|
6 |
+
@File : geoagent.py
|
7 |
+
"""
|
8 |
+
|
9 |
+
import leafmap
|
10 |
+
from ipywidgets import DatePicker, Text, Button, Textarea, Tab, Box, VBox, HBox, Layout, Label, Password, Dropdown, FloatText
|
11 |
+
from IPython.display import display
|
12 |
+
|
13 |
+
from metagpt_yusin.tasks import RunLLM
|
14 |
+
from metagpt_yusin.set_envs import SetLlmEnv
|
15 |
+
|
16 |
+
class GeoAgent(SetLlmEnv, RunLLM):
|
17 |
+
|
18 |
+
def default(self):
|
19 |
+
# 数据集输入面板
|
20 |
+
data_set = VBox([GeoAgent.data_source,
|
21 |
+
HBox([GeoAgent.gee_key,GeoAgent.button_set2])
|
22 |
+
])
|
23 |
+
|
24 |
+
# LLM设置面板
|
25 |
+
llm_set = VBox([
|
26 |
+
HBox([GeoAgent.api_type, GeoAgent.model]),
|
27 |
+
HBox([GeoAgent.api_key, GeoAgent.button_set1])
|
28 |
+
])
|
29 |
+
|
30 |
+
#设置时间以及空间
|
31 |
+
st_set = VBox([
|
32 |
+
HBox([GeoAgent.start_date, GeoAgent.end_date]), # 时间选择框在一行
|
33 |
+
HBox([GeoAgent.get_bounds_button, GeoAgent.bounds_label]) # 获取边界按钮和标签在一行
|
34 |
+
])
|
35 |
+
|
36 |
+
# Tab布局
|
37 |
+
tab_nest = Tab()
|
38 |
+
tab_nest.children = [data_set, llm_set, st_set]
|
39 |
+
tab_nest.titles = ('Data Sources', 'LLM Models', 'Space & Time')
|
40 |
+
|
41 |
+
|
42 |
+
# 左侧的控件布局
|
43 |
+
left_panel = VBox([
|
44 |
+
Label(value="Select data sources and LLM models (or Submit default) and Space Time:"),
|
45 |
+
tab_nest,
|
46 |
+
Label(value="Start with your task which can be refined and built upon previous tasks:"),
|
47 |
+
VBox([GeoAgent.notice_text, GeoAgent.box_llm])
|
48 |
+
])
|
49 |
+
|
50 |
+
# 右侧的地图布局
|
51 |
+
right_panel = VBox([self.m])
|
52 |
+
|
53 |
+
# 使用 HBox 来调整左侧面板和右侧地图的宽度比例
|
54 |
+
layout_all = HBox([
|
55 |
+
left_panel,
|
56 |
+
right_panel
|
57 |
+
], layout=Layout(
|
58 |
+
width='100%', # 整个 HBox 占满屏幕宽度
|
59 |
+
height='330px', # 设置高度为 900px 或其他你想要的值
|
60 |
+
justify_content='space-between'
|
61 |
+
))
|
62 |
+
|
63 |
+
# 设置左侧面板宽度为 50%,右侧面板宽度为 50%
|
64 |
+
left_panel.layout = Layout(width='50%', height='100%') # 高度与父容器相同
|
65 |
+
right_panel.layout = Layout(width='50%', height='100%') # 高度与父容器相同
|
66 |
+
|
67 |
+
# 显示最终布局
|
68 |
+
display(layout_all)
|
69 |
+
|
70 |
+
# 设置按钮事件
|
71 |
+
GeoAgent.button_set1.on_click(self.set_env)
|
72 |
+
GeoAgent.button_set2.on_click(self.set_env2)
|
73 |
+
GeoAgent.button_submit.on_click(self._check_id, self.clean)
|
74 |
+
GeoAgent.button_cl.on_click(self.clean)
|
75 |
+
GeoAgent.button_abort.on_click(self.abort)
|
76 |
+
GeoAgent.get_bounds_button.on_click(self.get_bounds)
|
notebook_dir/metagpt_yusin/.ipynb_checkpoints/llm-checkpoint.py
ADDED
@@ -0,0 +1,20 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
"""
|
4 |
+
@Time : 2023/5/11 14:45
|
5 |
+
@Author : alexanderwu
|
6 |
+
@File : llm.py
|
7 |
+
"""
|
8 |
+
from typing import Optional
|
9 |
+
|
10 |
+
from metagpt_yusin.configs.llm_config import LLMConfig
|
11 |
+
from metagpt_yusin.context import Context
|
12 |
+
from metagpt_yusin.provider.base_llm import BaseLLM
|
13 |
+
|
14 |
+
|
15 |
+
def LLM(llm_config: Optional[LLMConfig] = None, context: Context = None) -> BaseLLM:
|
16 |
+
"""get the default llm provider if name is None"""
|
17 |
+
ctx = context or Context()
|
18 |
+
if llm_config is not None:
|
19 |
+
return ctx.llm_with_cost_manager_from_llm_config(llm_config)
|
20 |
+
return ctx.llm()
|
notebook_dir/metagpt_yusin/.ipynb_checkpoints/logs-checkpoint.py
ADDED
@@ -0,0 +1,48 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
"""
|
4 |
+
@Time : 2023/6/1 12:41
|
5 |
+
@Author : alexanderwu
|
6 |
+
@File : logs.py
|
7 |
+
"""
|
8 |
+
|
9 |
+
import sys
|
10 |
+
from datetime import datetime
|
11 |
+
|
12 |
+
from loguru import logger as _logger
|
13 |
+
|
14 |
+
from metagpt_yusin.const import metagpt_yusin_ROOT
|
15 |
+
|
16 |
+
_print_level = "INFO"
|
17 |
+
|
18 |
+
|
19 |
+
def define_log_level(print_level="INFO", logfile_level="DEBUG", name: str = None):
|
20 |
+
"""Adjust the log level to above level"""
|
21 |
+
global _print_level
|
22 |
+
_print_level = print_level
|
23 |
+
|
24 |
+
current_date = datetime.now()
|
25 |
+
formatted_date = current_date.strftime("%Y%m%d")
|
26 |
+
log_name = f"{name}_{formatted_date}" if name else formatted_date # name a log with prefix name
|
27 |
+
|
28 |
+
_logger.remove()
|
29 |
+
_logger.add(sys.stderr, level=print_level)
|
30 |
+
_logger.add(metagpt_yusin_ROOT / f"logs/{log_name}.txt", level=logfile_level)
|
31 |
+
return _logger
|
32 |
+
|
33 |
+
|
34 |
+
logger = define_log_level()
|
35 |
+
|
36 |
+
|
37 |
+
def log_llm_stream(msg):
|
38 |
+
_llm_stream_log(msg)
|
39 |
+
|
40 |
+
|
41 |
+
def set_llm_stream_logfunc(func):
|
42 |
+
global _llm_stream_log
|
43 |
+
_llm_stream_log = func
|
44 |
+
|
45 |
+
|
46 |
+
def _llm_stream_log(msg):
|
47 |
+
if _print_level in ["INFO"]:
|
48 |
+
print(msg, end="")
|
notebook_dir/metagpt_yusin/.ipynb_checkpoints/schema-checkpoint.py
ADDED
@@ -0,0 +1,787 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
"""
|
4 |
+
@Time : 2023/5/8 22:12
|
5 |
+
@Author : alexanderwu
|
6 |
+
@File : schema.py
|
7 |
+
@Modified By: mashenquan, 2023-10-31. According to Chapter 2.2.1 of RFC 116:
|
8 |
+
Replanned the distribution of responsibilities and functional positioning of `Message` class attributes.
|
9 |
+
@Modified By: mashenquan, 2023/11/22.
|
10 |
+
1. Add `Document` and `Documents` for `FileRepository` in Section 2.2.3.4 of RFC 135.
|
11 |
+
2. Encapsulate the common key-values set to pydantic structures to standardize and unify parameter passing
|
12 |
+
between actions.
|
13 |
+
3. Add `id` to `Message` according to Section 2.2.3.1.1 of RFC 135.
|
14 |
+
"""
|
15 |
+
|
16 |
+
from __future__ import annotations
|
17 |
+
|
18 |
+
import asyncio
|
19 |
+
import json
|
20 |
+
import os.path
|
21 |
+
import uuid
|
22 |
+
from abc import ABC
|
23 |
+
from asyncio import Queue, QueueEmpty, wait_for
|
24 |
+
from json import JSONDecodeError
|
25 |
+
from pathlib import Path
|
26 |
+
from typing import Any, Dict, Iterable, List, Optional, Type, TypeVar, Union
|
27 |
+
|
28 |
+
from pydantic import (
|
29 |
+
BaseModel,
|
30 |
+
ConfigDict,
|
31 |
+
Field,
|
32 |
+
PrivateAttr,
|
33 |
+
field_serializer,
|
34 |
+
field_validator,
|
35 |
+
model_serializer,
|
36 |
+
model_validator,
|
37 |
+
)
|
38 |
+
|
39 |
+
from metagpt_yusin.const import (
|
40 |
+
MESSAGE_ROUTE_CAUSE_BY,
|
41 |
+
MESSAGE_ROUTE_FROM,
|
42 |
+
MESSAGE_ROUTE_TO,
|
43 |
+
MESSAGE_ROUTE_TO_ALL,
|
44 |
+
PRDS_FILE_REPO,
|
45 |
+
SYSTEM_DESIGN_FILE_REPO,
|
46 |
+
TASK_FILE_REPO,
|
47 |
+
)
|
48 |
+
from metagpt_yusin.logs import logger
|
49 |
+
from metagpt_yusin.repo_parser import DotClassInfo
|
50 |
+
from metagpt_yusin.utils.common import any_to_str, any_to_str_set, import_class
|
51 |
+
from metagpt_yusin.utils.exceptions import handle_exception
|
52 |
+
from metagpt_yusin.utils.serialize import (
|
53 |
+
actionoutout_schema_to_mapping,
|
54 |
+
actionoutput_mapping_to_str,
|
55 |
+
actionoutput_str_to_mapping,
|
56 |
+
)
|
57 |
+
|
58 |
+
|
59 |
+
class SerializationMixin(BaseModel, extra="forbid"):
|
60 |
+
"""
|
61 |
+
PolyMorphic subclasses Serialization / Deserialization Mixin
|
62 |
+
- First of all, we need to know that pydantic is not designed for polymorphism.
|
63 |
+
- If Engineer is subclass of Role, it would be serialized as Role. If we want to serialize it as Engineer, we need
|
64 |
+
to add `class name` to Engineer. So we need Engineer inherit SerializationMixin.
|
65 |
+
|
66 |
+
More details:
|
67 |
+
- https://docs.pydantic.dev/latest/concepts/serialization/
|
68 |
+
- https://github.com/pydantic/pydantic/discussions/7008 discuss about avoid `__get_pydantic_core_schema__`
|
69 |
+
"""
|
70 |
+
|
71 |
+
__is_polymorphic_base = False
|
72 |
+
__subclasses_map__ = {}
|
73 |
+
|
74 |
+
@model_serializer(mode="wrap")
|
75 |
+
def __serialize_with_class_type__(self, default_serializer) -> Any:
|
76 |
+
# default serializer, then append the `__module_class_name` field and return
|
77 |
+
ret = default_serializer(self)
|
78 |
+
ret["__module_class_name"] = f"{self.__class__.__module__}.{self.__class__.__qualname__}"
|
79 |
+
return ret
|
80 |
+
|
81 |
+
@model_validator(mode="wrap")
|
82 |
+
@classmethod
|
83 |
+
def __convert_to_real_type__(cls, value: Any, handler):
|
84 |
+
if isinstance(value, dict) is False:
|
85 |
+
return handler(value)
|
86 |
+
|
87 |
+
# it is a dict so make sure to remove the __module_class_name
|
88 |
+
# because we don't allow extra keywords but want to ensure
|
89 |
+
# e.g Cat.model_validate(cat.model_dump()) works
|
90 |
+
class_full_name = value.pop("__module_class_name", None)
|
91 |
+
|
92 |
+
# if it's not the polymorphic base we construct via default handler
|
93 |
+
if not cls.__is_polymorphic_base:
|
94 |
+
if class_full_name is None:
|
95 |
+
return handler(value)
|
96 |
+
elif str(cls) == f"<class '{class_full_name}'>":
|
97 |
+
return handler(value)
|
98 |
+
else:
|
99 |
+
# f"Trying to instantiate {class_full_name} but this is not the polymorphic base class")
|
100 |
+
pass
|
101 |
+
|
102 |
+
# otherwise we lookup the correct polymorphic type and construct that
|
103 |
+
# instead
|
104 |
+
if class_full_name is None:
|
105 |
+
raise ValueError("Missing __module_class_name field")
|
106 |
+
|
107 |
+
class_type = cls.__subclasses_map__.get(class_full_name, None)
|
108 |
+
|
109 |
+
if class_type is None:
|
110 |
+
# TODO could try dynamic import
|
111 |
+
raise TypeError("Trying to instantiate {class_full_name}, which has not yet been defined!")
|
112 |
+
|
113 |
+
return class_type(**value)
|
114 |
+
|
115 |
+
def __init_subclass__(cls, is_polymorphic_base: bool = False, **kwargs):
|
116 |
+
cls.__is_polymorphic_base = is_polymorphic_base
|
117 |
+
cls.__subclasses_map__[f"{cls.__module__}.{cls.__qualname__}"] = cls
|
118 |
+
super().__init_subclass__(**kwargs)
|
119 |
+
|
120 |
+
|
121 |
+
class SimpleMessage(BaseModel):
|
122 |
+
content: str
|
123 |
+
role: str
|
124 |
+
|
125 |
+
|
126 |
+
class Document(BaseModel):
|
127 |
+
"""
|
128 |
+
Represents a document.
|
129 |
+
"""
|
130 |
+
|
131 |
+
root_path: str = ""
|
132 |
+
filename: str = ""
|
133 |
+
content: str = ""
|
134 |
+
|
135 |
+
def get_meta(self) -> Document:
|
136 |
+
"""Get metadata of the document.
|
137 |
+
|
138 |
+
:return: A new Document instance with the same root path and filename.
|
139 |
+
"""
|
140 |
+
|
141 |
+
return Document(root_path=self.root_path, filename=self.filename)
|
142 |
+
|
143 |
+
@property
|
144 |
+
def root_relative_path(self):
|
145 |
+
"""Get relative path from root of git repository.
|
146 |
+
|
147 |
+
:return: relative path from root of git repository.
|
148 |
+
"""
|
149 |
+
return os.path.join(self.root_path, self.filename)
|
150 |
+
|
151 |
+
def __str__(self):
|
152 |
+
return self.content
|
153 |
+
|
154 |
+
def __repr__(self):
|
155 |
+
return self.content
|
156 |
+
|
157 |
+
|
158 |
+
class Documents(BaseModel):
|
159 |
+
"""A class representing a collection of documents.
|
160 |
+
|
161 |
+
Attributes:
|
162 |
+
docs (Dict[str, Document]): A dictionary mapping document names to Document instances.
|
163 |
+
"""
|
164 |
+
|
165 |
+
docs: Dict[str, Document] = Field(default_factory=dict)
|
166 |
+
|
167 |
+
@classmethod
|
168 |
+
def from_iterable(cls, documents: Iterable[Document]) -> Documents:
|
169 |
+
"""Create a Documents instance from a list of Document instances.
|
170 |
+
|
171 |
+
:param documents: A list of Document instances.
|
172 |
+
:return: A Documents instance.
|
173 |
+
"""
|
174 |
+
|
175 |
+
docs = {doc.filename: doc for doc in documents}
|
176 |
+
return Documents(docs=docs)
|
177 |
+
|
178 |
+
def to_action_output(self) -> "ActionOutput":
|
179 |
+
"""Convert to action output string.
|
180 |
+
|
181 |
+
:return: A string representing action output.
|
182 |
+
"""
|
183 |
+
from metagpt_yusin.actions.action_output import ActionOutput
|
184 |
+
|
185 |
+
return ActionOutput(content=self.model_dump_json(), instruct_content=self)
|
186 |
+
|
187 |
+
|
188 |
+
class Message(BaseModel):
|
189 |
+
"""list[<role>: <content>]"""
|
190 |
+
|
191 |
+
id: str = Field(default="", validate_default=True) # According to Section 2.2.3.1.1 of RFC 135
|
192 |
+
content: str
|
193 |
+
instruct_content: Optional[BaseModel] = Field(default=None, validate_default=True)
|
194 |
+
role: str = "user" # system / user / assistant
|
195 |
+
cause_by: str = Field(default="", validate_default=True)
|
196 |
+
sent_from: str = Field(default="", validate_default=True)
|
197 |
+
send_to: set[str] = Field(default={MESSAGE_ROUTE_TO_ALL}, validate_default=True)
|
198 |
+
|
199 |
+
@field_validator("id", mode="before")
|
200 |
+
@classmethod
|
201 |
+
def check_id(cls, id: str) -> str:
|
202 |
+
return id if id else uuid.uuid4().hex
|
203 |
+
|
204 |
+
@field_validator("instruct_content", mode="before")
|
205 |
+
@classmethod
|
206 |
+
def check_instruct_content(cls, ic: Any) -> BaseModel:
|
207 |
+
if ic and isinstance(ic, dict) and "class" in ic:
|
208 |
+
if "mapping" in ic:
|
209 |
+
# compatible with custom-defined ActionOutput
|
210 |
+
mapping = actionoutput_str_to_mapping(ic["mapping"])
|
211 |
+
actionnode_class = import_class("ActionNode", "metagpt_yusin.actions.action_node") # avoid circular import
|
212 |
+
ic_obj = actionnode_class.create_model_class(class_name=ic["class"], mapping=mapping)
|
213 |
+
elif "module" in ic:
|
214 |
+
# subclasses of BaseModel
|
215 |
+
ic_obj = import_class(ic["class"], ic["module"])
|
216 |
+
else:
|
217 |
+
raise KeyError("missing required key to init Message.instruct_content from dict")
|
218 |
+
ic = ic_obj(**ic["value"])
|
219 |
+
return ic
|
220 |
+
|
221 |
+
@field_validator("cause_by", mode="before")
|
222 |
+
@classmethod
|
223 |
+
def check_cause_by(cls, cause_by: Any) -> str:
|
224 |
+
return any_to_str(cause_by if cause_by else import_class("UserRequirement", "metagpt_yusin.actions.add_requirement"))
|
225 |
+
|
226 |
+
@field_validator("sent_from", mode="before")
|
227 |
+
@classmethod
|
228 |
+
def check_sent_from(cls, sent_from: Any) -> str:
|
229 |
+
return any_to_str(sent_from if sent_from else "")
|
230 |
+
|
231 |
+
@field_validator("send_to", mode="before")
|
232 |
+
@classmethod
|
233 |
+
def check_send_to(cls, send_to: Any) -> set:
|
234 |
+
return any_to_str_set(send_to if send_to else {MESSAGE_ROUTE_TO_ALL})
|
235 |
+
|
236 |
+
@field_serializer("send_to", mode="plain")
|
237 |
+
def ser_send_to(self, send_to: set) -> list:
|
238 |
+
return list(send_to)
|
239 |
+
|
240 |
+
@field_serializer("instruct_content", mode="plain")
|
241 |
+
def ser_instruct_content(self, ic: BaseModel) -> Union[dict, None]:
|
242 |
+
ic_dict = None
|
243 |
+
if ic:
|
244 |
+
# compatible with custom-defined ActionOutput
|
245 |
+
schema = ic.model_json_schema()
|
246 |
+
ic_type = str(type(ic))
|
247 |
+
if "<class 'metagpt_yusin.actions.action_node" in ic_type:
|
248 |
+
# instruct_content from AutoNode.create_model_class, for now, it's single level structure.
|
249 |
+
mapping = actionoutout_schema_to_mapping(schema)
|
250 |
+
mapping = actionoutput_mapping_to_str(mapping)
|
251 |
+
|
252 |
+
ic_dict = {"class": schema["title"], "mapping": mapping, "value": ic.model_dump()}
|
253 |
+
else:
|
254 |
+
# due to instruct_content can be assigned by subclasses of BaseModel
|
255 |
+
ic_dict = {"class": schema["title"], "module": ic.__module__, "value": ic.model_dump()}
|
256 |
+
return ic_dict
|
257 |
+
|
258 |
+
def __init__(self, content: str = "", **data: Any):
|
259 |
+
data["content"] = data.get("content", content)
|
260 |
+
super().__init__(**data)
|
261 |
+
|
262 |
+
def __setattr__(self, key, val):
|
263 |
+
"""Override `@property.setter`, convert non-string parameters into string parameters."""
|
264 |
+
if key == MESSAGE_ROUTE_CAUSE_BY:
|
265 |
+
new_val = any_to_str(val)
|
266 |
+
elif key == MESSAGE_ROUTE_FROM:
|
267 |
+
new_val = any_to_str(val)
|
268 |
+
elif key == MESSAGE_ROUTE_TO:
|
269 |
+
new_val = any_to_str_set(val)
|
270 |
+
else:
|
271 |
+
new_val = val
|
272 |
+
super().__setattr__(key, new_val)
|
273 |
+
|
274 |
+
def __str__(self):
|
275 |
+
# prefix = '-'.join([self.role, str(self.cause_by)])
|
276 |
+
if self.instruct_content:
|
277 |
+
return f"{self.role}: {self.instruct_content.model_dump()}"
|
278 |
+
return f"{self.role}: {self.content}"
|
279 |
+
|
280 |
+
def __repr__(self):
|
281 |
+
return self.__str__()
|
282 |
+
|
283 |
+
def rag_key(self) -> str:
|
284 |
+
"""For search"""
|
285 |
+
return self.content
|
286 |
+
|
287 |
+
def to_dict(self) -> dict:
|
288 |
+
"""Return a dict containing `role` and `content` for the LLM call.l"""
|
289 |
+
return {"role": self.role, "content": self.content}
|
290 |
+
|
291 |
+
def dump(self) -> str:
|
292 |
+
"""Convert the object to json string"""
|
293 |
+
return self.model_dump_json(exclude_none=True, warnings=False)
|
294 |
+
|
295 |
+
@staticmethod
|
296 |
+
@handle_exception(exception_type=JSONDecodeError, default_return=None)
|
297 |
+
def load(val):
|
298 |
+
"""Convert the json string to object."""
|
299 |
+
|
300 |
+
try:
|
301 |
+
m = json.loads(val)
|
302 |
+
id = m.get("id")
|
303 |
+
if "id" in m:
|
304 |
+
del m["id"]
|
305 |
+
msg = Message(**m)
|
306 |
+
if id:
|
307 |
+
msg.id = id
|
308 |
+
return msg
|
309 |
+
except JSONDecodeError as err:
|
310 |
+
logger.error(f"parse json failed: {val}, error:{err}")
|
311 |
+
return None
|
312 |
+
|
313 |
+
|
314 |
+
class UserMessage(Message):
|
315 |
+
"""便于支持OpenAI的消息
|
316 |
+
Facilitate support for OpenAI messages
|
317 |
+
"""
|
318 |
+
|
319 |
+
def __init__(self, content: str):
|
320 |
+
super().__init__(content=content, role="user")
|
321 |
+
|
322 |
+
|
323 |
+
class SystemMessage(Message):
|
324 |
+
"""便于支持OpenAI的消息
|
325 |
+
Facilitate support for OpenAI messages
|
326 |
+
"""
|
327 |
+
|
328 |
+
def __init__(self, content: str):
|
329 |
+
super().__init__(content=content, role="system")
|
330 |
+
|
331 |
+
|
332 |
+
class AIMessage(Message):
|
333 |
+
"""便于支持OpenAI的消息
|
334 |
+
Facilitate support for OpenAI messages
|
335 |
+
"""
|
336 |
+
|
337 |
+
def __init__(self, content: str):
|
338 |
+
super().__init__(content=content, role="assistant")
|
339 |
+
|
340 |
+
|
341 |
+
class Task(BaseModel):
|
342 |
+
task_id: str = ""
|
343 |
+
dependent_task_ids: list[str] = [] # Tasks prerequisite to this Task
|
344 |
+
instruction: str = ""
|
345 |
+
task_type: str = ""
|
346 |
+
code: str = ""
|
347 |
+
result: str = ""
|
348 |
+
is_success: bool = False
|
349 |
+
is_finished: bool = False
|
350 |
+
|
351 |
+
def reset(self):
|
352 |
+
self.code = ""
|
353 |
+
self.result = ""
|
354 |
+
self.is_success = False
|
355 |
+
self.is_finished = False
|
356 |
+
|
357 |
+
def update_task_result(self, task_result: TaskResult):
|
358 |
+
self.code = task_result.code
|
359 |
+
self.result = task_result.result
|
360 |
+
self.is_success = task_result.is_success
|
361 |
+
|
362 |
+
|
363 |
+
class TaskResult(BaseModel):
|
364 |
+
"""Result of taking a task, with result and is_success required to be filled"""
|
365 |
+
|
366 |
+
code: str = ""
|
367 |
+
result: str
|
368 |
+
is_success: bool
|
369 |
+
|
370 |
+
|
371 |
+
class Plan(BaseModel):
|
372 |
+
goal: str
|
373 |
+
context: str = ""
|
374 |
+
tasks: list[Task] = []
|
375 |
+
task_map: dict[str, Task] = {}
|
376 |
+
current_task_id: str = ""
|
377 |
+
|
378 |
+
def _topological_sort(self, tasks: list[Task]):
|
379 |
+
task_map = {task.task_id: task for task in tasks}
|
380 |
+
dependencies = {task.task_id: set(task.dependent_task_ids) for task in tasks}
|
381 |
+
sorted_tasks = []
|
382 |
+
visited = set()
|
383 |
+
|
384 |
+
def visit(task_id):
|
385 |
+
if task_id in visited:
|
386 |
+
return
|
387 |
+
visited.add(task_id)
|
388 |
+
for dependent_id in dependencies.get(task_id, []):
|
389 |
+
visit(dependent_id)
|
390 |
+
sorted_tasks.append(task_map[task_id])
|
391 |
+
|
392 |
+
for task in tasks:
|
393 |
+
visit(task.task_id)
|
394 |
+
|
395 |
+
return sorted_tasks
|
396 |
+
|
397 |
+
def add_tasks(self, tasks: list[Task]):
|
398 |
+
"""
|
399 |
+
Integrates new tasks into the existing plan, ensuring dependency order is maintained.
|
400 |
+
|
401 |
+
This method performs two primary functions based on the current state of the task list:
|
402 |
+
1. If there are no existing tasks, it topologically sorts the provided tasks to ensure
|
403 |
+
correct execution order based on dependencies, and sets these as the current tasks.
|
404 |
+
2. If there are existing tasks, it merges the new tasks with the existing ones. It maintains
|
405 |
+
any common prefix of tasks (based on task_id and instruction) and appends the remainder
|
406 |
+
of the new tasks. The current task is updated to the first unfinished task in this merged list.
|
407 |
+
|
408 |
+
Args:
|
409 |
+
tasks (list[Task]): A list of tasks (may be unordered) to add to the plan.
|
410 |
+
|
411 |
+
Returns:
|
412 |
+
None: The method updates the internal state of the plan but does not return anything.
|
413 |
+
"""
|
414 |
+
if not tasks:
|
415 |
+
return
|
416 |
+
|
417 |
+
# Topologically sort the new tasks to ensure correct dependency order
|
418 |
+
new_tasks = self._topological_sort(tasks)
|
419 |
+
|
420 |
+
if not self.tasks:
|
421 |
+
# If there are no existing tasks, set the new tasks as the current tasks
|
422 |
+
self.tasks = new_tasks
|
423 |
+
|
424 |
+
else:
|
425 |
+
# Find the length of the common prefix between existing and new tasks
|
426 |
+
prefix_length = 0
|
427 |
+
for old_task, new_task in zip(self.tasks, new_tasks):
|
428 |
+
if old_task.task_id != new_task.task_id or old_task.instruction != new_task.instruction:
|
429 |
+
break
|
430 |
+
prefix_length += 1
|
431 |
+
|
432 |
+
# Combine the common prefix with the remainder of the new tasks
|
433 |
+
final_tasks = self.tasks[:prefix_length] + new_tasks[prefix_length:]
|
434 |
+
self.tasks = final_tasks
|
435 |
+
|
436 |
+
# Update current_task_id to the first unfinished task in the merged list
|
437 |
+
self._update_current_task()
|
438 |
+
|
439 |
+
# Update the task map for quick access to tasks by ID
|
440 |
+
self.task_map = {task.task_id: task for task in self.tasks}
|
441 |
+
|
442 |
+
def reset_task(self, task_id: str):
|
443 |
+
"""
|
444 |
+
Clear code and result of the task based on task_id, and set the task as unfinished.
|
445 |
+
|
446 |
+
Args:
|
447 |
+
task_id (str): The ID of the task to be reset.
|
448 |
+
|
449 |
+
Returns:
|
450 |
+
None
|
451 |
+
"""
|
452 |
+
if task_id in self.task_map:
|
453 |
+
task = self.task_map[task_id]
|
454 |
+
task.reset()
|
455 |
+
|
456 |
+
def replace_task(self, new_task: Task):
|
457 |
+
"""
|
458 |
+
Replace an existing task with the new input task based on task_id, and reset all tasks depending on it.
|
459 |
+
|
460 |
+
Args:
|
461 |
+
new_task (Task): The new task that will replace an existing one.
|
462 |
+
|
463 |
+
Returns:
|
464 |
+
None
|
465 |
+
"""
|
466 |
+
assert new_task.task_id in self.task_map
|
467 |
+
# Replace the task in the task map and the task list
|
468 |
+
self.task_map[new_task.task_id] = new_task
|
469 |
+
for i, task in enumerate(self.tasks):
|
470 |
+
if task.task_id == new_task.task_id:
|
471 |
+
self.tasks[i] = new_task
|
472 |
+
break
|
473 |
+
|
474 |
+
# Reset dependent tasks
|
475 |
+
for task in self.tasks:
|
476 |
+
if new_task.task_id in task.dependent_task_ids:
|
477 |
+
self.reset_task(task.task_id)
|
478 |
+
|
479 |
+
def append_task(self, new_task: Task):
|
480 |
+
"""
|
481 |
+
Append a new task to the end of existing task sequences
|
482 |
+
|
483 |
+
Args:
|
484 |
+
new_task (Task): The new task to be appended to the existing task sequence
|
485 |
+
|
486 |
+
Returns:
|
487 |
+
None
|
488 |
+
"""
|
489 |
+
assert not self.has_task_id(new_task.task_id), "Task already in current plan, use replace_task instead"
|
490 |
+
|
491 |
+
assert all(
|
492 |
+
[self.has_task_id(dep_id) for dep_id in new_task.dependent_task_ids]
|
493 |
+
), "New task has unknown dependencies"
|
494 |
+
|
495 |
+
# Existing tasks do not depend on the new task, it's fine to put it to the end of the sorted task sequence
|
496 |
+
self.tasks.append(new_task)
|
497 |
+
self.task_map[new_task.task_id] = new_task
|
498 |
+
self._update_current_task()
|
499 |
+
|
500 |
+
def has_task_id(self, task_id: str) -> bool:
|
501 |
+
return task_id in self.task_map
|
502 |
+
|
503 |
+
def _update_current_task(self):
|
504 |
+
current_task_id = ""
|
505 |
+
for task in self.tasks:
|
506 |
+
if not task.is_finished:
|
507 |
+
current_task_id = task.task_id
|
508 |
+
break
|
509 |
+
self.current_task_id = current_task_id # all tasks finished
|
510 |
+
|
511 |
+
@property
|
512 |
+
def current_task(self) -> Task:
|
513 |
+
"""Find current task to execute
|
514 |
+
|
515 |
+
Returns:
|
516 |
+
Task: the current task to be executed
|
517 |
+
"""
|
518 |
+
return self.task_map.get(self.current_task_id, None)
|
519 |
+
|
520 |
+
def finish_current_task(self):
|
521 |
+
"""Finish current task, set Task.is_finished=True, set current task to next task"""
|
522 |
+
if self.current_task_id:
|
523 |
+
self.current_task.is_finished = True
|
524 |
+
self._update_current_task() # set to next task
|
525 |
+
|
526 |
+
def get_finished_tasks(self) -> list[Task]:
|
527 |
+
"""return all finished tasks in correct linearized order
|
528 |
+
|
529 |
+
Returns:
|
530 |
+
list[Task]: list of finished tasks
|
531 |
+
"""
|
532 |
+
return [task for task in self.tasks if task.is_finished]
|
533 |
+
|
534 |
+
|
535 |
+
class MessageQueue(BaseModel):
|
536 |
+
"""Message queue which supports asynchronous updates."""
|
537 |
+
|
538 |
+
model_config = ConfigDict(arbitrary_types_allowed=True)
|
539 |
+
|
540 |
+
_queue: Queue = PrivateAttr(default_factory=Queue)
|
541 |
+
|
542 |
+
def pop(self) -> Message | None:
|
543 |
+
"""Pop one message from the queue."""
|
544 |
+
try:
|
545 |
+
item = self._queue.get_nowait()
|
546 |
+
if item:
|
547 |
+
self._queue.task_done()
|
548 |
+
return item
|
549 |
+
except QueueEmpty:
|
550 |
+
return None
|
551 |
+
|
552 |
+
def pop_all(self) -> List[Message]:
|
553 |
+
"""Pop all messages from the queue."""
|
554 |
+
ret = []
|
555 |
+
while True:
|
556 |
+
msg = self.pop()
|
557 |
+
if not msg:
|
558 |
+
break
|
559 |
+
ret.append(msg)
|
560 |
+
return ret
|
561 |
+
|
562 |
+
def push(self, msg: Message):
|
563 |
+
"""Push a message into the queue."""
|
564 |
+
self._queue.put_nowait(msg)
|
565 |
+
|
566 |
+
def empty(self):
|
567 |
+
"""Return true if the queue is empty."""
|
568 |
+
return self._queue.empty()
|
569 |
+
|
570 |
+
async def dump(self) -> str:
|
571 |
+
"""Convert the `MessageQueue` object to a json string."""
|
572 |
+
if self.empty():
|
573 |
+
return "[]"
|
574 |
+
|
575 |
+
lst = []
|
576 |
+
msgs = []
|
577 |
+
try:
|
578 |
+
while True:
|
579 |
+
item = await wait_for(self._queue.get(), timeout=1.0)
|
580 |
+
if item is None:
|
581 |
+
break
|
582 |
+
msgs.append(item)
|
583 |
+
lst.append(item.dump())
|
584 |
+
self._queue.task_done()
|
585 |
+
except asyncio.TimeoutError:
|
586 |
+
logger.debug("Queue is empty, exiting...")
|
587 |
+
finally:
|
588 |
+
for m in msgs:
|
589 |
+
self._queue.put_nowait(m)
|
590 |
+
return json.dumps(lst, ensure_ascii=False)
|
591 |
+
|
592 |
+
@staticmethod
|
593 |
+
def load(data) -> "MessageQueue":
|
594 |
+
"""Convert the json string to the `MessageQueue` object."""
|
595 |
+
queue = MessageQueue()
|
596 |
+
try:
|
597 |
+
lst = json.loads(data)
|
598 |
+
for i in lst:
|
599 |
+
msg = Message.load(i)
|
600 |
+
queue.push(msg)
|
601 |
+
except JSONDecodeError as e:
|
602 |
+
logger.warning(f"JSON load failed: {data}, error:{e}")
|
603 |
+
|
604 |
+
return queue
|
605 |
+
|
606 |
+
|
607 |
+
# 定义一个泛型类型变量
|
608 |
+
T = TypeVar("T", bound="BaseModel")
|
609 |
+
|
610 |
+
|
611 |
+
class BaseContext(BaseModel, ABC):
|
612 |
+
@classmethod
|
613 |
+
@handle_exception
|
614 |
+
def loads(cls: Type[T], val: str) -> Optional[T]:
|
615 |
+
i = json.loads(val)
|
616 |
+
return cls(**i)
|
617 |
+
|
618 |
+
|
619 |
+
class CodingContext(BaseContext):
|
620 |
+
filename: str
|
621 |
+
design_doc: Optional[Document] = None
|
622 |
+
task_doc: Optional[Document] = None
|
623 |
+
code_doc: Optional[Document] = None
|
624 |
+
code_plan_and_change_doc: Optional[Document] = None
|
625 |
+
|
626 |
+
|
627 |
+
class TestingContext(BaseContext):
|
628 |
+
filename: str
|
629 |
+
code_doc: Document
|
630 |
+
test_doc: Optional[Document] = None
|
631 |
+
|
632 |
+
|
633 |
+
class RunCodeContext(BaseContext):
|
634 |
+
mode: str = "script"
|
635 |
+
code: Optional[str] = None
|
636 |
+
code_filename: str = ""
|
637 |
+
test_code: Optional[str] = None
|
638 |
+
test_filename: str = ""
|
639 |
+
command: List[str] = Field(default_factory=list)
|
640 |
+
working_directory: str = ""
|
641 |
+
additional_python_paths: List[str] = Field(default_factory=list)
|
642 |
+
output_filename: Optional[str] = None
|
643 |
+
output: Optional[str] = None
|
644 |
+
|
645 |
+
|
646 |
+
class RunCodeResult(BaseContext):
|
647 |
+
summary: str
|
648 |
+
stdout: str
|
649 |
+
stderr: str
|
650 |
+
|
651 |
+
|
652 |
+
class CodeSummarizeContext(BaseModel):
|
653 |
+
design_filename: str = ""
|
654 |
+
task_filename: str = ""
|
655 |
+
codes_filenames: List[str] = Field(default_factory=list)
|
656 |
+
reason: str = ""
|
657 |
+
|
658 |
+
@staticmethod
|
659 |
+
def loads(filenames: List) -> CodeSummarizeContext:
|
660 |
+
ctx = CodeSummarizeContext()
|
661 |
+
for filename in filenames:
|
662 |
+
if Path(filename).is_relative_to(SYSTEM_DESIGN_FILE_REPO):
|
663 |
+
ctx.design_filename = str(filename)
|
664 |
+
continue
|
665 |
+
if Path(filename).is_relative_to(TASK_FILE_REPO):
|
666 |
+
ctx.task_filename = str(filename)
|
667 |
+
continue
|
668 |
+
return ctx
|
669 |
+
|
670 |
+
def __hash__(self):
|
671 |
+
return hash((self.design_filename, self.task_filename))
|
672 |
+
|
673 |
+
|
674 |
+
class BugFixContext(BaseContext):
|
675 |
+
filename: str = ""
|
676 |
+
|
677 |
+
|
678 |
+
class CodePlanAndChangeContext(BaseModel):
|
679 |
+
requirement: str = ""
|
680 |
+
issue: str = ""
|
681 |
+
prd_filename: str = ""
|
682 |
+
design_filename: str = ""
|
683 |
+
task_filename: str = ""
|
684 |
+
|
685 |
+
@staticmethod
|
686 |
+
def loads(filenames: List, **kwargs) -> CodePlanAndChangeContext:
|
687 |
+
ctx = CodePlanAndChangeContext(requirement=kwargs.get("requirement", ""), issue=kwargs.get("issue", ""))
|
688 |
+
for filename in filenames:
|
689 |
+
filename = Path(filename)
|
690 |
+
if filename.is_relative_to(PRDS_FILE_REPO):
|
691 |
+
ctx.prd_filename = filename.name
|
692 |
+
continue
|
693 |
+
if filename.is_relative_to(SYSTEM_DESIGN_FILE_REPO):
|
694 |
+
ctx.design_filename = filename.name
|
695 |
+
continue
|
696 |
+
if filename.is_relative_to(TASK_FILE_REPO):
|
697 |
+
ctx.task_filename = filename.name
|
698 |
+
continue
|
699 |
+
return ctx
|
700 |
+
|
701 |
+
|
702 |
+
# mermaid class view
|
703 |
+
class UMLClassMeta(BaseModel):
|
704 |
+
name: str = ""
|
705 |
+
visibility: str = ""
|
706 |
+
|
707 |
+
@staticmethod
|
708 |
+
def name_to_visibility(name: str) -> str:
|
709 |
+
if name == "__init__":
|
710 |
+
return "+"
|
711 |
+
if name.startswith("__"):
|
712 |
+
return "-"
|
713 |
+
elif name.startswith("_"):
|
714 |
+
return "#"
|
715 |
+
return "+"
|
716 |
+
|
717 |
+
|
718 |
+
class UMLClassAttribute(UMLClassMeta):
|
719 |
+
value_type: str = ""
|
720 |
+
default_value: str = ""
|
721 |
+
|
722 |
+
def get_mermaid(self, align=1) -> str:
|
723 |
+
content = "".join(["\t" for i in range(align)]) + self.visibility
|
724 |
+
if self.value_type:
|
725 |
+
content += self.value_type.replace(" ", "") + " "
|
726 |
+
name = self.name.split(":", 1)[1] if ":" in self.name else self.name
|
727 |
+
content += name
|
728 |
+
if self.default_value:
|
729 |
+
content += "="
|
730 |
+
if self.value_type not in ["str", "string", "String"]:
|
731 |
+
content += self.default_value
|
732 |
+
else:
|
733 |
+
content += '"' + self.default_value.replace('"', "") + '"'
|
734 |
+
# if self.abstraction:
|
735 |
+
# content += "*"
|
736 |
+
# if self.static:
|
737 |
+
# content += "$"
|
738 |
+
return content
|
739 |
+
|
740 |
+
|
741 |
+
class UMLClassMethod(UMLClassMeta):
|
742 |
+
args: List[UMLClassAttribute] = Field(default_factory=list)
|
743 |
+
return_type: str = ""
|
744 |
+
|
745 |
+
def get_mermaid(self, align=1) -> str:
|
746 |
+
content = "".join(["\t" for i in range(align)]) + self.visibility
|
747 |
+
name = self.name.split(":", 1)[1] if ":" in self.name else self.name
|
748 |
+
content += name + "(" + ",".join([v.get_mermaid(align=0) for v in self.args]) + ")"
|
749 |
+
if self.return_type:
|
750 |
+
content += " " + self.return_type.replace(" ", "")
|
751 |
+
# if self.abstraction:
|
752 |
+
# content += "*"
|
753 |
+
# if self.static:
|
754 |
+
# content += "$"
|
755 |
+
return content
|
756 |
+
|
757 |
+
|
758 |
+
class UMLClassView(UMLClassMeta):
|
759 |
+
attributes: List[UMLClassAttribute] = Field(default_factory=list)
|
760 |
+
methods: List[UMLClassMethod] = Field(default_factory=list)
|
761 |
+
|
762 |
+
def get_mermaid(self, align=1) -> str:
|
763 |
+
content = "".join(["\t" for i in range(align)]) + "class " + self.name + "{\n"
|
764 |
+
for v in self.attributes:
|
765 |
+
content += v.get_mermaid(align=align + 1) + "\n"
|
766 |
+
for v in self.methods:
|
767 |
+
content += v.get_mermaid(align=align + 1) + "\n"
|
768 |
+
content += "".join(["\t" for i in range(align)]) + "}\n"
|
769 |
+
return content
|
770 |
+
|
771 |
+
@classmethod
|
772 |
+
def load_dot_class_info(cls, dot_class_info: DotClassInfo) -> UMLClassView:
|
773 |
+
visibility = UMLClassView.name_to_visibility(dot_class_info.name)
|
774 |
+
class_view = cls(name=dot_class_info.name, visibility=visibility)
|
775 |
+
for i in dot_class_info.attributes.values():
|
776 |
+
visibility = UMLClassAttribute.name_to_visibility(i.name)
|
777 |
+
attr = UMLClassAttribute(name=i.name, visibility=visibility, value_type=i.type_, default_value=i.default_)
|
778 |
+
class_view.attributes.append(attr)
|
779 |
+
for i in dot_class_info.methods.values():
|
780 |
+
visibility = UMLClassMethod.name_to_visibility(i.name)
|
781 |
+
method = UMLClassMethod(name=i.name, visibility=visibility, return_type=i.return_args.type_)
|
782 |
+
for j in i.args:
|
783 |
+
arg = UMLClassAttribute(name=j.name, value_type=j.type_, default_value=j.default_)
|
784 |
+
method.args.append(arg)
|
785 |
+
method.return_type = i.return_args.type_
|
786 |
+
class_view.methods.append(method)
|
787 |
+
return class_view
|
notebook_dir/metagpt_yusin/.ipynb_checkpoints/set_envs-checkpoint.py
ADDED
@@ -0,0 +1,87 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
"""
|
4 |
+
@Time : 2024/1/4 01:25
|
5 |
+
@Author : alexanderwu
|
6 |
+
@File : config2.py
|
7 |
+
"""
|
8 |
+
|
9 |
+
import os
|
10 |
+
from metagpt_yusin.config2 import Config
|
11 |
+
from ipywidgets import Tab, Label, Button, Textarea, VBox, HBox, Layout, Password, Dropdown
|
12 |
+
from IPython.display import display
|
13 |
+
from ipylab import JupyterFrontEnd
|
14 |
+
app = JupyterFrontEnd()
|
15 |
+
|
16 |
+
def restart():
|
17 |
+
# restart kernel
|
18 |
+
app.commands.execute('kernelmenu:restart-and-clear')
|
19 |
+
|
20 |
+
class SetLlmEnv:
|
21 |
+
|
22 |
+
api_type = Dropdown(
|
23 |
+
options=['openai', 'gemini', 'llama3', 'groq', 'openrouter'],
|
24 |
+
value='openrouter', #'groq', #'openai',
|
25 |
+
description='API Type:',
|
26 |
+
disabled=False,
|
27 |
+
layout=Layout(width='500px', height='30px'))
|
28 |
+
|
29 |
+
model = Dropdown(
|
30 |
+
options=['gpt-3.5-turbo-1106','deepseek-r1-distill-llama-70b', 'tngtech/deepseek-r1t-chimera:free'],
|
31 |
+
value='tngtech/deepseek-r1t-chimera:free', #'deepseek-r1-distill-llama-70b', #'gpt-3.5-turbo-1106',
|
32 |
+
description='Model:',
|
33 |
+
disabled=False,
|
34 |
+
layout=Layout(width='500px', height='30px'))
|
35 |
+
|
36 |
+
api_key = Password(
|
37 |
+
value='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',#'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', #'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',
|
38 |
+
placeholder='Enter API Key',
|
39 |
+
description='API Key :',
|
40 |
+
disabled=False,
|
41 |
+
layout=Layout(width='800px', height='30px'))
|
42 |
+
|
43 |
+
|
44 |
+
gee_key = Password(
|
45 |
+
value='sk-.....................................................................',
|
46 |
+
placeholder='Provide your GEE key here',
|
47 |
+
description='Data Key:',
|
48 |
+
disabled=False,
|
49 |
+
layout=Layout(width='800px', height='30px'))
|
50 |
+
|
51 |
+
pc_key = Password(
|
52 |
+
value='sk-',
|
53 |
+
placeholder='Provide your Planetary Computer key here',
|
54 |
+
description='PC Key :',
|
55 |
+
disabled=False,
|
56 |
+
layout=Layout(width='800px', height='30px'))
|
57 |
+
|
58 |
+
button_set1 = Button(description='Submit', layout=Layout(height='30px', width='200px'))
|
59 |
+
button_set2 = Button(description='Submit', layout=Layout(height='30px', width='200px'))
|
60 |
+
|
61 |
+
|
62 |
+
@staticmethod
|
63 |
+
def set_env(event):
|
64 |
+
os.environ['api_type'] = SetLlmEnv.api_type.value
|
65 |
+
os.environ['model'] = SetLlmEnv.model.value
|
66 |
+
os.environ['api_key'] = SetLlmEnv.api_key.value
|
67 |
+
app.commands.execute('notebook:move-cursor-down')
|
68 |
+
|
69 |
+
@staticmethod
|
70 |
+
def set_env2(event):
|
71 |
+
os.environ['EARTHENGINE_TOKEN'] = SetLlmEnv.gee_key.value
|
72 |
+
os.environ['PC_SDK_SUBSCRIPTION_KEY'] = SetLlmEnv.pc_key.value
|
73 |
+
#os.environ['MLHub API Key'] = SetLlmEnv.mkhub_key.value
|
74 |
+
|
75 |
+
@classmethod
|
76 |
+
def default(cls):
|
77 |
+
data_set = HBox([VBox([Label(value="Provide GEE key here:"), SetLlmEnv.gee_key]),
|
78 |
+
VBox([Label(value="Provide Planetary Computer key here:"), SetLlmEnv.pc_key]),
|
79 |
+
VBox([Label(value=" "), SetLlmEnv.button_set2])])
|
80 |
+
llm_set = VBox([HBox([SetLlmEnv.api_type, SetLlmEnv.model]), HBox([SetLlmEnv.api_key, SetLlmEnv.button_set1])])
|
81 |
+
tab_nest = Tab()
|
82 |
+
tab_nest.children = [data_set, llm_set]
|
83 |
+
tab_nest.titles = ('Data Sources', 'LLM Models')
|
84 |
+
|
85 |
+
display(VBox([Label(value="Specify your preferred:"), tab_nest]))
|
86 |
+
SetLlmEnv.button_set1.on_click(cls.set_env)
|
87 |
+
SetLlmEnv.button_set2.on_click(cls.set_env2)
|
notebook_dir/metagpt_yusin/.ipynb_checkpoints/tasks-checkpoint.py
ADDED
@@ -0,0 +1,246 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
"""
|
4 |
+
@Time : 2024/1/4 01:25
|
5 |
+
@Author : alexanderwu
|
6 |
+
@File : config2.py
|
7 |
+
"""
|
8 |
+
import leafmap
|
9 |
+
import asyncio
|
10 |
+
import nest_asyncio
|
11 |
+
import nbformat
|
12 |
+
from nbclient import NotebookClient
|
13 |
+
from ipylab import JupyterFrontEnd
|
14 |
+
from metagpt_yusin.logs import log_llm_stream, logger
|
15 |
+
from ipywidgets import DatePicker, Text, Button, Textarea, Tab, Box, VBox, HBox, Layout, Label, Password, Dropdown, FloatText
|
16 |
+
from traitlets import observe, link, Unicode, Bool, Any
|
17 |
+
from IPython.display import display
|
18 |
+
nest_asyncio.apply()
|
19 |
+
app = JupyterFrontEnd()
|
20 |
+
|
21 |
+
|
22 |
+
class ConfirmationButton(VBox):
|
23 |
+
button_style = Any(default_value='')
|
24 |
+
description = Unicode()
|
25 |
+
disabled = Bool()
|
26 |
+
icon = Unicode()
|
27 |
+
layout = Any()
|
28 |
+
style = Any()
|
29 |
+
tooltip = Unicode()
|
30 |
+
|
31 |
+
def __init__(self, **kwargs):
|
32 |
+
super().__init__(**kwargs)
|
33 |
+
self._button = Button(**kwargs)
|
34 |
+
self._confirm_btn = Button(description='confirm', icon='check', button_style='success', layout=dict(height='45%', width='90%'))
|
35 |
+
self._cancel_btn = Button(description='cancel', icon='times', button_style='warning', layout=dict(height='45%', width='90%'), disabled=True)
|
36 |
+
self._button.on_click(self._on_btn_click)
|
37 |
+
self._cancel_btn.on_click(self._on_btn_click)
|
38 |
+
self._confirm_btn.on_click(self._on_btn_click)
|
39 |
+
self.children = [self._button]
|
40 |
+
|
41 |
+
def on_click(self, *args, **kwargs):
|
42 |
+
self._confirm_btn.on_click(args[0], **kwargs)
|
43 |
+
self._cancel_btn.on_click(args[1], **kwargs)
|
44 |
+
|
45 |
+
def _on_btn_click(self, b):
|
46 |
+
if b==self._button:
|
47 |
+
self.children = [self._confirm_btn, self._cancel_btn]
|
48 |
+
else:
|
49 |
+
self.children = [self._button]
|
50 |
+
|
51 |
+
|
52 |
+
class RunLLM:
|
53 |
+
# context
|
54 |
+
context = None
|
55 |
+
# overall tasks
|
56 |
+
tasks = []
|
57 |
+
# widget
|
58 |
+
input_text = Textarea(
|
59 |
+
#description='Give your task description:',
|
60 |
+
placeholder='Give your task description, such as: run data analysis on sklearn Iris dataset, include a plot',
|
61 |
+
disabled=False,
|
62 |
+
layout=Layout(height='90px', width='400px')
|
63 |
+
)
|
64 |
+
#button_submit = Button(description='Submit', layout=Layout(height='100px', width='100px'))
|
65 |
+
button_submit = ConfirmationButton(description='Submit', layout=Layout(height='30px', width='80px'))
|
66 |
+
button_cl = Button(description='Clean', disabled=False, layout=Layout(height='30px', width='80px'))
|
67 |
+
button_abort = Button(description='Abort', disabled=True, layout=Layout(height='30px', width='80px'))
|
68 |
+
box_llm = HBox([
|
69 |
+
input_text,
|
70 |
+
VBox([button_submit, button_cl, button_abort])
|
71 |
+
])
|
72 |
+
#Box(layout=Layout(width='500px', height='150px'))
|
73 |
+
#box_llm.children += (input_text, VBox([button_submit, button_cl, button_abort], layout=Layout(height='110px', width='80px')))
|
74 |
+
# popup button
|
75 |
+
notice_text = Text(placeholder='Make sure whether you want to include the following cells as a context for your text, such as the refined task description and code of previous steps',
|
76 |
+
disabled=False,
|
77 |
+
layout=Layout(height='30px', width='500px'))
|
78 |
+
context = None
|
79 |
+
|
80 |
+
# 创建地图,设置中心和缩放级别
|
81 |
+
m = leafmap.Map(center=[37.6412, -122.1353], zoom=15, height="800px")
|
82 |
+
m.add_basemap("SATELLITE")
|
83 |
+
|
84 |
+
# 时间数据选择框
|
85 |
+
start_date = DatePicker(description='Start Date', layout=Layout(height='30px', width='50%'))
|
86 |
+
end_date = DatePicker(description='End Date', layout=Layout(height='30px', width='50%'))
|
87 |
+
data_source = Dropdown(
|
88 |
+
options=['Online Tile', 'GEE', 'NASA Earth Data', 'OSM', 'AWS', 'CDSE', 'MAXAR', 'Planetary Computer'],
|
89 |
+
value='Online Tile', #'groq', #'openai',
|
90 |
+
description='Data Source:',
|
91 |
+
disabled=False,
|
92 |
+
layout=Layout(height='30px', width='78%'))
|
93 |
+
|
94 |
+
# 用户区域选择事件
|
95 |
+
bounds_label = Label(value="", layout=Layout(height='30px', width='65%', border='1px solid gray', padding='5px',
|
96 |
+
display='flex', align_items='center', justify_content='center'))
|
97 |
+
get_bounds_button = Button(description="Get Selected Bounds", layout=Layout(height='30px', width='35%'))
|
98 |
+
|
99 |
+
@staticmethod
|
100 |
+
def get_bounds(event):
|
101 |
+
# 每次点击按钮时都重新获取地图选区
|
102 |
+
if RunLLM.m.user_roi is not None:
|
103 |
+
bbox = RunLLM.m.user_roi_bounds() # 获取当前选择的区域的边界框
|
104 |
+
RunLLM.bounds_label.value = f"{bbox}"
|
105 |
+
else:
|
106 |
+
RunLLM.bounds_label.value = None
|
107 |
+
|
108 |
+
@staticmethod
|
109 |
+
def clean(event):
|
110 |
+
RunLLM.notice_text.value = 'excluding and cleaning the following cells.'
|
111 |
+
#print('clean cells!')
|
112 |
+
app.commands.execute('notebook:move-cursor-down')
|
113 |
+
app.commands.execute('notebook:insert-cell-above')
|
114 |
+
for i in range(100):
|
115 |
+
#if app.commands.execute('notebook:merge-cell-below'):
|
116 |
+
# app.commands.execute('notebook:delete-cell')
|
117 |
+
app.commands.execute('notebook:merge-cell-below')
|
118 |
+
app.commands.execute('notebook:delete-cell')
|
119 |
+
# clean last cell
|
120 |
+
app.commands.execute('notebook:delete-cell')
|
121 |
+
# trun off clean after clean
|
122 |
+
RunLLM.button_cl.disabled = True
|
123 |
+
RunLLM.button_submit._cancel_btn.disabled = True
|
124 |
+
|
125 |
+
@staticmethod
|
126 |
+
def abort(event):
|
127 |
+
RunLLM.button_cl.disabled = False
|
128 |
+
RunLLM.button_submit._cancel_btn.disabled = False
|
129 |
+
#print('abort!')
|
130 |
+
for task in RunLLM.tasks:
|
131 |
+
task.cancel()
|
132 |
+
RunLLM.tasks.clear()
|
133 |
+
# after abort trun off it
|
134 |
+
RunLLM.button_abort.disabled = True
|
135 |
+
|
136 |
+
def _check_id(self, b):
|
137 |
+
RunLLM.notice_text.value = 'keep the following cells as the context of current step.'
|
138 |
+
RunLLM.button_abort.disabled = True
|
139 |
+
'''
|
140 |
+
# get context -------------------------------
|
141 |
+
app.commands.execute('docmanager:save')
|
142 |
+
app.commands.execute('notebook:insert-cell-below')
|
143 |
+
app.commands.execute('notebook:replace-selection', {'text': "from ipylab import JupyterFrontEnd\napp = JupyterFrontEnd()\nnb_path = app.sessions.sessions[0]['path']"})
|
144 |
+
app.commands.execute('notebook:run-cell')
|
145 |
+
#app.commands.execute('notebook:delete-cell')
|
146 |
+
print(app)
|
147 |
+
# -------------------------------------------
|
148 |
+
'''
|
149 |
+
nb_path = app.sessions.sessions[0]['path']
|
150 |
+
# Load the notebook
|
151 |
+
with open(nb_path, 'r') as notebook_file:
|
152 |
+
notebook_content = notebook_file.read()
|
153 |
+
# Parse the notebook
|
154 |
+
notebook = nbformat.reads(notebook_content, as_version=4)
|
155 |
+
content = []
|
156 |
+
for i_cell in notebook['cells']:
|
157 |
+
#if i_cell['cell_type'] == 'code':
|
158 |
+
content.append(i_cell['source'])
|
159 |
+
#print(content)
|
160 |
+
# get the first place of the given strings
|
161 |
+
try:
|
162 |
+
index_code = content.index('# Here is the code part!')
|
163 |
+
code = content[index_code+1:]
|
164 |
+
except:
|
165 |
+
code = None
|
166 |
+
try:
|
167 |
+
index_task = content.index('# Decomposing the overall task into tasks!')
|
168 |
+
task = content[index_task+1:index_code]
|
169 |
+
except:
|
170 |
+
task = None
|
171 |
+
# give context
|
172 |
+
if code is None or task is None:
|
173 |
+
index_context = content.index('from metagpt_yusin.geoagent import GeoAgent\nGeoAgent().default()')
|
174 |
+
RunLLM.context = '\nThis is the initial subtasks and codes:\n' + '\n'.join(content[index_context+1:]) + '\nNow please further improve these initial subtasks and codes.'
|
175 |
+
else:
|
176 |
+
RunLLM.context = '\nThis is the initial subtasks:\n' + '\n'.join(task) + '\nAnd the initial generated codes:\n' + '\n'.join(code) + '\nNow please further improve these initial subtasks and codes.'
|
177 |
+
#print(context)
|
178 |
+
|
179 |
+
# 检查时间范围
|
180 |
+
if not RunLLM.start_date.value or not RunLLM.end_date.value:
|
181 |
+
RunLLM.notice_text.value = "!!Please select a valid start and end date before submitting."
|
182 |
+
raise ValueError("Please select a valid start and end date before submitting.")
|
183 |
+
RunLLM.context += f"\nTime Range: {RunLLM.start_date.value} to {RunLLM.end_date.value}"
|
184 |
+
|
185 |
+
# 检查区域范围
|
186 |
+
if not RunLLM.bounds_label.value:
|
187 |
+
RunLLM.notice_text.value = "!!Please select a valid bounding box (area) before submitting."
|
188 |
+
raise ValueError("Please select a valid bounding box (area) before submitting.")
|
189 |
+
RunLLM.context += f"\nBounds: {RunLLM.bounds_label.value}"
|
190 |
+
|
191 |
+
loop = asyncio.new_event_loop()
|
192 |
+
asyncio.set_event_loop(loop)
|
193 |
+
try:
|
194 |
+
loop.run_until_complete(self.start_job())
|
195 |
+
RunLLM.tasks.clear()
|
196 |
+
except KeyboardInterrupt:
|
197 |
+
print("User termination detected")
|
198 |
+
finally:
|
199 |
+
loop.close()
|
200 |
+
|
201 |
+
async def async_run_task(self, ):
|
202 |
+
from metagpt_yusin.logs import logger
|
203 |
+
from metagpt_yusin.roles.di.data_interpreter import DataInterpreter
|
204 |
+
from metagpt_yusin.utils.recovery_util import save_history
|
205 |
+
RunLLM.button_submit._confirm_btn.on_click(self._check_id, remove=True)
|
206 |
+
#print('running job')
|
207 |
+
#goal = 'This is the overall task: ' + goal + context
|
208 |
+
#print(input_text.value)
|
209 |
+
output = await DataInterpreter.jynb_run(RunLLM.input_text.value, RunLLM.context)
|
210 |
+
tasks, tasks_results, di = output[0], output[1], output[2]
|
211 |
+
logger.info('-----------------------------------')
|
212 |
+
logger.info(tasks)
|
213 |
+
logger.info(tasks_results)
|
214 |
+
save_history(role=di)
|
215 |
+
#print('job complete!')
|
216 |
+
|
217 |
+
async def run_task(self, ):
|
218 |
+
# trun off clean during runing
|
219 |
+
RunLLM.button_cl.disabled = True
|
220 |
+
RunLLM.button_submit._cancel_btn.disabled = True
|
221 |
+
# trun on abort during runing
|
222 |
+
RunLLM.button_abort.disabled = False
|
223 |
+
await self.async_run_task()
|
224 |
+
# trun on clean afetr one iter runing
|
225 |
+
RunLLM.button_cl.disabled = False
|
226 |
+
RunLLM.button_submit._cancel_btn.disabled = False
|
227 |
+
# trun off abort during runing
|
228 |
+
RunLLM.button_abort.disabled = True
|
229 |
+
|
230 |
+
def run_job(self, event):
|
231 |
+
task = asyncio.create_task(self.run_task())
|
232 |
+
RunLLM.tasks.append(task)
|
233 |
+
|
234 |
+
async def start_job(self,):
|
235 |
+
try:
|
236 |
+
RunLLM.button_submit.description='Submit'
|
237 |
+
RunLLM.button_submit._confirm_btn.on_click(self.run_job)
|
238 |
+
except KeyboardInterrupt:
|
239 |
+
# Handle keyboard interrupt (user termination)
|
240 |
+
print("User termination detected")
|
241 |
+
|
242 |
+
def default(self,):
|
243 |
+
display(VBox([RunLLM.box_llm, RunLLM.notice_text]))
|
244 |
+
RunLLM.button_submit.on_click(self._check_id, self.clean)
|
245 |
+
RunLLM.button_cl.on_click(self.clean)
|
246 |
+
RunLLM.button_abort.on_click(self.abort)
|
notebook_dir/metagpt_yusin/__init__.py
ADDED
@@ -0,0 +1,7 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
# @Time : 2023/4/24 22:26
|
4 |
+
# @Author : alexanderwu
|
5 |
+
# @File : __init__.py
|
6 |
+
|
7 |
+
from metagpt_yusin import _compat as _ # noqa: F401
|
notebook_dir/metagpt_yusin/__pycache__/__init__.cpython-39.pyc
ADDED
Binary file (203 Bytes). View file
|
|
notebook_dir/metagpt_yusin/__pycache__/_compat.cpython-39.pyc
ADDED
Binary file (841 Bytes). View file
|
|
notebook_dir/metagpt_yusin/__pycache__/config2.cpython-39.pyc
ADDED
Binary file (6.14 kB). View file
|
|
notebook_dir/metagpt_yusin/__pycache__/const.cpython-39.pyc
ADDED
Binary file (4.06 kB). View file
|
|
notebook_dir/metagpt_yusin/__pycache__/context.cpython-39.pyc
ADDED
Binary file (5.21 kB). View file
|
|
notebook_dir/metagpt_yusin/__pycache__/context_mixin.cpython-39.pyc
ADDED
Binary file (3.44 kB). View file
|
|
notebook_dir/metagpt_yusin/__pycache__/geoagent.cpython-39.pyc
ADDED
Binary file (1.98 kB). View file
|
|
notebook_dir/metagpt_yusin/__pycache__/llm.cpython-39.pyc
ADDED
Binary file (748 Bytes). View file
|
|
notebook_dir/metagpt_yusin/__pycache__/logs.cpython-39.pyc
ADDED
Binary file (1.29 kB). View file
|
|
notebook_dir/metagpt_yusin/__pycache__/repo_parser.cpython-39.pyc
ADDED
Binary file (32.7 kB). View file
|
|
notebook_dir/metagpt_yusin/__pycache__/schema.cpython-39.pyc
ADDED
Binary file (28.1 kB). View file
|
|
notebook_dir/metagpt_yusin/__pycache__/tasks.cpython-39.pyc
ADDED
Binary file (7.85 kB). View file
|
|
notebook_dir/metagpt_yusin/_compat.py
ADDED
@@ -0,0 +1,23 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
import platform
|
2 |
+
import sys
|
3 |
+
import warnings
|
4 |
+
|
5 |
+
if sys.implementation.name == "cpython" and platform.system() == "Windows":
|
6 |
+
import asyncio
|
7 |
+
|
8 |
+
if sys.version_info[:2] == (3, 9):
|
9 |
+
from asyncio.proactor_events import _ProactorBasePipeTransport
|
10 |
+
|
11 |
+
# https://github.com/python/cpython/pull/92842
|
12 |
+
def pacth_del(self, _warn=warnings.warn):
|
13 |
+
if self._sock is not None:
|
14 |
+
_warn(f"unclosed transport {self!r}", ResourceWarning, source=self)
|
15 |
+
self._sock.close()
|
16 |
+
|
17 |
+
_ProactorBasePipeTransport.__del__ = pacth_del
|
18 |
+
|
19 |
+
if sys.version_info >= (3, 9, 0):
|
20 |
+
from semantic_kernel.orchestration import sk_function as _ # noqa: F401
|
21 |
+
|
22 |
+
# caused by https://github.com/microsoft/semantic-kernel/pull/1416
|
23 |
+
asyncio.set_event_loop_policy(asyncio.WindowsProactorEventLoopPolicy())
|
notebook_dir/metagpt_yusin/actions/.ipynb_checkpoints/__init__-checkpoint.py
ADDED
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
"""
|
4 |
+
@Time : 2023/5/11 17:44
|
5 |
+
@Author : alexanderwu
|
6 |
+
@File : __init__.py
|
7 |
+
"""
|
8 |
+
from enum import Enum
|
9 |
+
|
10 |
+
from metagpt_yusin.actions.action import Action
|
11 |
+
from metagpt_yusin.actions.action_output import ActionOutput
|
12 |
+
from metagpt_yusin.actions.add_requirement import UserRequirement
|
13 |
+
from metagpt_yusin.actions.debug_error import DebugError
|
14 |
+
from metagpt_yusin.actions.design_api import WriteDesign
|
15 |
+
from metagpt_yusin.actions.design_api_review import DesignReview
|
16 |
+
from metagpt_yusin.actions.project_management import WriteTasks
|
17 |
+
from metagpt_yusin.actions.research import CollectLinks, WebBrowseAndSummarize, ConductResearch
|
18 |
+
from metagpt_yusin.actions.run_code import RunCode
|
19 |
+
from metagpt_yusin.actions.search_and_summarize import SearchAndSummarize
|
20 |
+
from metagpt_yusin.actions.write_code import WriteCode
|
21 |
+
from metagpt_yusin.actions.write_code_review import WriteCodeReview
|
22 |
+
from metagpt_yusin.actions.write_prd import WritePRD
|
23 |
+
from metagpt_yusin.actions.write_prd_review import WritePRDReview
|
24 |
+
from metagpt_yusin.actions.write_test import WriteTest
|
25 |
+
from metagpt_yusin.actions.di.execute_nb_code import ExecuteNbCode
|
26 |
+
from metagpt_yusin.actions.di.write_analysis_code import WriteAnalysisCode
|
27 |
+
from metagpt_yusin.actions.di.write_plan import WritePlan
|
28 |
+
|
29 |
+
|
30 |
+
class ActionType(Enum):
|
31 |
+
"""All types of Actions, used for indexing."""
|
32 |
+
|
33 |
+
ADD_REQUIREMENT = UserRequirement
|
34 |
+
WRITE_PRD = WritePRD
|
35 |
+
WRITE_PRD_REVIEW = WritePRDReview
|
36 |
+
WRITE_DESIGN = WriteDesign
|
37 |
+
DESIGN_REVIEW = DesignReview
|
38 |
+
WRTIE_CODE = WriteCode
|
39 |
+
WRITE_CODE_REVIEW = WriteCodeReview
|
40 |
+
WRITE_TEST = WriteTest
|
41 |
+
RUN_CODE = RunCode
|
42 |
+
DEBUG_ERROR = DebugError
|
43 |
+
WRITE_TASKS = WriteTasks
|
44 |
+
SEARCH_AND_SUMMARIZE = SearchAndSummarize
|
45 |
+
COLLECT_LINKS = CollectLinks
|
46 |
+
WEB_BROWSE_AND_SUMMARIZE = WebBrowseAndSummarize
|
47 |
+
CONDUCT_RESEARCH = ConductResearch
|
48 |
+
EXECUTE_NB_CODE = ExecuteNbCode
|
49 |
+
WRITE_ANALYSIS_CODE = WriteAnalysisCode
|
50 |
+
WRITE_PLAN = WritePlan
|
51 |
+
|
52 |
+
|
53 |
+
__all__ = [
|
54 |
+
"ActionType",
|
55 |
+
"Action",
|
56 |
+
"ActionOutput",
|
57 |
+
]
|
notebook_dir/metagpt_yusin/actions/.ipynb_checkpoints/action-checkpoint.py
ADDED
@@ -0,0 +1,106 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
"""
|
4 |
+
@Time : 2023/5/11 14:43
|
5 |
+
@Author : alexanderwu
|
6 |
+
@File : action.py
|
7 |
+
"""
|
8 |
+
|
9 |
+
from __future__ import annotations
|
10 |
+
|
11 |
+
from typing import Optional, Union
|
12 |
+
|
13 |
+
from pydantic import BaseModel, ConfigDict, Field, model_validator
|
14 |
+
|
15 |
+
from metagpt_yusin.actions.action_node import ActionNode
|
16 |
+
from metagpt_yusin.context_mixin import ContextMixin
|
17 |
+
from metagpt_yusin.schema import (
|
18 |
+
CodePlanAndChangeContext,
|
19 |
+
CodeSummarizeContext,
|
20 |
+
CodingContext,
|
21 |
+
RunCodeContext,
|
22 |
+
SerializationMixin,
|
23 |
+
TestingContext,
|
24 |
+
)
|
25 |
+
from metagpt_yusin.utils.project_repo import ProjectRepo
|
26 |
+
|
27 |
+
|
28 |
+
class Action(SerializationMixin, ContextMixin, BaseModel):
|
29 |
+
model_config = ConfigDict(arbitrary_types_allowed=True)
|
30 |
+
|
31 |
+
name: str = ""
|
32 |
+
i_context: Union[
|
33 |
+
dict, CodingContext, CodeSummarizeContext, TestingContext, RunCodeContext, CodePlanAndChangeContext, str, None
|
34 |
+
] = ""
|
35 |
+
prefix: str = "" # aask*时会加上prefix,作为system_message
|
36 |
+
desc: str = "" # for skill manager
|
37 |
+
node: ActionNode = Field(default=None, exclude=True)
|
38 |
+
|
39 |
+
@property
|
40 |
+
def repo(self) -> ProjectRepo:
|
41 |
+
if not self.context.repo:
|
42 |
+
self.context.repo = ProjectRepo(self.context.git_repo)
|
43 |
+
return self.context.repo
|
44 |
+
|
45 |
+
@property
|
46 |
+
def prompt_schema(self):
|
47 |
+
return self.config.prompt_schema
|
48 |
+
|
49 |
+
@property
|
50 |
+
def project_name(self):
|
51 |
+
return self.config.project_name
|
52 |
+
|
53 |
+
@project_name.setter
|
54 |
+
def project_name(self, value):
|
55 |
+
self.config.project_name = value
|
56 |
+
|
57 |
+
@property
|
58 |
+
def project_path(self):
|
59 |
+
return self.config.project_path
|
60 |
+
|
61 |
+
@model_validator(mode="before")
|
62 |
+
@classmethod
|
63 |
+
def set_name_if_empty(cls, values):
|
64 |
+
if "name" not in values or not values["name"]:
|
65 |
+
values["name"] = cls.__name__
|
66 |
+
return values
|
67 |
+
|
68 |
+
@model_validator(mode="before")
|
69 |
+
@classmethod
|
70 |
+
def _init_with_instruction(cls, values):
|
71 |
+
if "instruction" in values:
|
72 |
+
name = values["name"]
|
73 |
+
i = values.pop("instruction")
|
74 |
+
values["node"] = ActionNode(key=name, expected_type=str, instruction=i, example="", schema="raw")
|
75 |
+
return values
|
76 |
+
|
77 |
+
def set_prefix(self, prefix):
|
78 |
+
"""Set prefix for later usage"""
|
79 |
+
self.prefix = prefix
|
80 |
+
self.llm.system_prompt = prefix
|
81 |
+
if self.node:
|
82 |
+
self.node.llm = self.llm
|
83 |
+
return self
|
84 |
+
|
85 |
+
def __str__(self):
|
86 |
+
return self.__class__.__name__
|
87 |
+
|
88 |
+
def __repr__(self):
|
89 |
+
return self.__str__()
|
90 |
+
|
91 |
+
async def _aask(self, prompt: str, system_msgs: Optional[list[str]] = None) -> str:
|
92 |
+
"""Append default prefix"""
|
93 |
+
return await self.llm.aask(prompt, system_msgs)
|
94 |
+
|
95 |
+
async def _run_action_node(self, *args, **kwargs):
|
96 |
+
"""Run action node"""
|
97 |
+
msgs = args[0]
|
98 |
+
context = "## History Messages\n"
|
99 |
+
context += "\n".join([f"{idx}: {i}" for idx, i in enumerate(reversed(msgs))])
|
100 |
+
return await self.node.fill(context=context, llm=self.llm)
|
101 |
+
|
102 |
+
async def run(self, *args, **kwargs):
|
103 |
+
"""Run action"""
|
104 |
+
if self.node:
|
105 |
+
return await self._run_action_node(*args, **kwargs)
|
106 |
+
raise NotImplementedError("The run method should be implemented in a subclass.")
|
notebook_dir/metagpt_yusin/actions/.ipynb_checkpoints/execute_task-checkpoint.py
ADDED
@@ -0,0 +1,19 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
"""
|
4 |
+
@Time : 2023/9/13 12:26
|
5 |
+
@Author : femto Zheng
|
6 |
+
@File : execute_task.py
|
7 |
+
"""
|
8 |
+
|
9 |
+
|
10 |
+
from metagpt_yusin.actions import Action
|
11 |
+
from metagpt_yusin.schema import Message
|
12 |
+
|
13 |
+
|
14 |
+
class ExecuteTask(Action):
|
15 |
+
name: str = "ExecuteTask"
|
16 |
+
i_context: list[Message] = []
|
17 |
+
|
18 |
+
async def run(self, *args, **kwargs):
|
19 |
+
pass
|
notebook_dir/metagpt_yusin/actions/.ipynb_checkpoints/generate_questions-checkpoint.py
ADDED
@@ -0,0 +1,25 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
"""
|
4 |
+
@File : generate_questions.py
|
5 |
+
"""
|
6 |
+
from metagpt_yusin.actions import Action
|
7 |
+
from metagpt_yusin.actions.action_node import ActionNode
|
8 |
+
|
9 |
+
QUESTIONS = ActionNode(
|
10 |
+
key="Questions",
|
11 |
+
expected_type=list[str],
|
12 |
+
instruction="Task: Refer to the context to further inquire about the details that interest you, within a word limit"
|
13 |
+
" of 150 words. Please provide the specific details you would like to inquire about here",
|
14 |
+
example=["1. What ...", "2. How ...", "3. ..."],
|
15 |
+
)
|
16 |
+
|
17 |
+
|
18 |
+
class GenerateQuestions(Action):
|
19 |
+
"""This class allows LLM to further mine noteworthy details based on specific "##TOPIC"(discussion topic) and
|
20 |
+
"##RECORD" (discussion records), thereby deepening the discussion."""
|
21 |
+
|
22 |
+
name: str = "GenerateQuestions"
|
23 |
+
|
24 |
+
async def run(self, context) -> ActionNode:
|
25 |
+
return await QUESTIONS.fill(context=context, llm=self.llm)
|
notebook_dir/metagpt_yusin/actions/.ipynb_checkpoints/talk_action-checkpoint.py
ADDED
@@ -0,0 +1,169 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
"""
|
4 |
+
@Time : 2023/8/28
|
5 |
+
@Author : mashenquan
|
6 |
+
@File : talk_action.py
|
7 |
+
@Desc : Act as it’s a talk
|
8 |
+
"""
|
9 |
+
from typing import Optional
|
10 |
+
|
11 |
+
from metagpt_yusin.actions import Action
|
12 |
+
from metagpt_yusin.config2 import config
|
13 |
+
from metagpt_yusin.logs import logger
|
14 |
+
from metagpt_yusin.schema import Message
|
15 |
+
|
16 |
+
|
17 |
+
class TalkAction(Action):
|
18 |
+
i_context: str
|
19 |
+
history_summary: str = ""
|
20 |
+
knowledge: str = ""
|
21 |
+
rsp: Optional[Message] = None
|
22 |
+
|
23 |
+
@property
|
24 |
+
def agent_description(self):
|
25 |
+
return self.context.kwargs.agent_description
|
26 |
+
|
27 |
+
@property
|
28 |
+
def language(self):
|
29 |
+
return self.context.kwargs.language or config.language
|
30 |
+
|
31 |
+
@property
|
32 |
+
def prompt(self):
|
33 |
+
prompt = ""
|
34 |
+
if self.agent_description:
|
35 |
+
prompt = (
|
36 |
+
f"You are {self.agent_description}. Your responses should align with the role-play agreement, "
|
37 |
+
f"maintaining the character's persona and habits. When faced with unrelated questions, playfully "
|
38 |
+
f"decline to answer without revealing your AI nature to preserve the character's image.\n\n"
|
39 |
+
)
|
40 |
+
prompt += f"Knowledge:\n{self.knowledge}\n\n" if self.knowledge else ""
|
41 |
+
prompt += f"{self.history_summary}\n\n"
|
42 |
+
prompt += (
|
43 |
+
"If the information is insufficient, you can search in the historical conversation or knowledge above.\n"
|
44 |
+
)
|
45 |
+
language = self.language
|
46 |
+
prompt += (
|
47 |
+
f"Answer the following questions strictly in {language}, and the answers must follow the Markdown format.\n "
|
48 |
+
f"{self.i_context}"
|
49 |
+
)
|
50 |
+
logger.debug(f"PROMPT: {prompt}")
|
51 |
+
return prompt
|
52 |
+
|
53 |
+
@property
|
54 |
+
def prompt_gpt4(self):
|
55 |
+
kvs = {
|
56 |
+
"{role}": self.agent_description or "",
|
57 |
+
"{history}": self.history_summary or "",
|
58 |
+
"{knowledge}": self.knowledge or "",
|
59 |
+
"{language}": self.language,
|
60 |
+
"{ask}": self.i_context,
|
61 |
+
}
|
62 |
+
prompt = TalkActionPrompt.FORMATION_LOOSE
|
63 |
+
for k, v in kvs.items():
|
64 |
+
prompt = prompt.replace(k, v)
|
65 |
+
logger.info(f"PROMPT: {prompt}")
|
66 |
+
return prompt
|
67 |
+
|
68 |
+
# async def run_old(self, *args, **kwargs) -> ActionOutput:
|
69 |
+
# prompt = self.prompt
|
70 |
+
# rsp = await self.llm.aask(msg=prompt, system_msgs=[])
|
71 |
+
# logger.debug(f"PROMPT:{prompt}\nRESULT:{rsp}\n")
|
72 |
+
# self._rsp = ActionOutput(content=rsp)
|
73 |
+
# return self._rsp
|
74 |
+
|
75 |
+
@property
|
76 |
+
def aask_args(self):
|
77 |
+
language = self.language
|
78 |
+
system_msgs = [
|
79 |
+
f"You are {self.agent_description}.",
|
80 |
+
"Your responses should align with the role-play agreement, "
|
81 |
+
"maintaining the character's persona and habits. When faced with unrelated questions, playfully "
|
82 |
+
"decline to answer without revealing your AI nature to preserve the character's image.",
|
83 |
+
"If the information is insufficient, you can search in the context or knowledge.",
|
84 |
+
f"Answer the following questions strictly in {language}, and the answers must follow the Markdown format.",
|
85 |
+
]
|
86 |
+
format_msgs = []
|
87 |
+
if self.knowledge:
|
88 |
+
format_msgs.append({"role": "assistant", "content": self.knowledge})
|
89 |
+
if self.history_summary:
|
90 |
+
format_msgs.append({"role": "assistant", "content": self.history_summary})
|
91 |
+
return self.i_context, format_msgs, system_msgs
|
92 |
+
|
93 |
+
async def run(self, with_message=None, **kwargs) -> Message:
|
94 |
+
msg, format_msgs, system_msgs = self.aask_args
|
95 |
+
rsp = await self.llm.aask(msg=msg, format_msgs=format_msgs, system_msgs=system_msgs, stream=False)
|
96 |
+
self.rsp = Message(content=rsp, role="assistant", cause_by=self)
|
97 |
+
return self.rsp
|
98 |
+
|
99 |
+
|
100 |
+
class TalkActionPrompt:
|
101 |
+
FORMATION = """Formation: "Capacity and role" defines the role you are currently playing;
|
102 |
+
"[HISTORY_BEGIN]" and "[HISTORY_END]" tags enclose the historical conversation;
|
103 |
+
"[KNOWLEDGE_BEGIN]" and "[KNOWLEDGE_END]" tags enclose the knowledge may help for your responses;
|
104 |
+
"Statement" defines the work detail you need to complete at this stage;
|
105 |
+
"[ASK_BEGIN]" and [ASK_END] tags enclose the questions;
|
106 |
+
"Constraint" defines the conditions that your responses must comply with.
|
107 |
+
"Personality" defines your language style。
|
108 |
+
"Insight" provides a deeper understanding of the characters' inner traits.
|
109 |
+
"Initial" defines the initial setup of a character.
|
110 |
+
|
111 |
+
Capacity and role: {role}
|
112 |
+
Statement: Your responses should align with the role-play agreement, maintaining the
|
113 |
+
character's persona and habits. When faced with unrelated questions, playfully decline to answer without revealing
|
114 |
+
your AI nature to preserve the character's image.
|
115 |
+
|
116 |
+
[HISTORY_BEGIN]
|
117 |
+
|
118 |
+
{history}
|
119 |
+
|
120 |
+
[HISTORY_END]
|
121 |
+
|
122 |
+
[KNOWLEDGE_BEGIN]
|
123 |
+
|
124 |
+
{knowledge}
|
125 |
+
|
126 |
+
[KNOWLEDGE_END]
|
127 |
+
|
128 |
+
Statement: If the information is insufficient, you can search in the historical conversation or knowledge.
|
129 |
+
Statement: Unless you are a language professional, answer the following questions strictly in {language}
|
130 |
+
, and the answers must follow the Markdown format. Strictly excluding any tag likes "[HISTORY_BEGIN]"
|
131 |
+
, "[HISTORY_END]", "[KNOWLEDGE_BEGIN]", "[KNOWLEDGE_END]" in responses.
|
132 |
+
|
133 |
+
|
134 |
+
{ask}
|
135 |
+
"""
|
136 |
+
|
137 |
+
FORMATION_LOOSE = """Formation: "Capacity and role" defines the role you are currently playing;
|
138 |
+
"[HISTORY_BEGIN]" and "[HISTORY_END]" tags enclose the historical conversation;
|
139 |
+
"[KNOWLEDGE_BEGIN]" and "[KNOWLEDGE_END]" tags enclose the knowledge may help for your responses;
|
140 |
+
"Statement" defines the work detail you need to complete at this stage;
|
141 |
+
"Constraint" defines the conditions that your responses must comply with.
|
142 |
+
"Personality" defines your language style。
|
143 |
+
"Insight" provides a deeper understanding of the characters' inner traits.
|
144 |
+
"Initial" defines the initial setup of a character.
|
145 |
+
|
146 |
+
Capacity and role: {role}
|
147 |
+
Statement: Your responses should maintaining the character's persona and habits. When faced with unrelated questions
|
148 |
+
, playfully decline to answer without revealing your AI nature to preserve the character's image.
|
149 |
+
|
150 |
+
[HISTORY_BEGIN]
|
151 |
+
|
152 |
+
{history}
|
153 |
+
|
154 |
+
[HISTORY_END]
|
155 |
+
|
156 |
+
[KNOWLEDGE_BEGIN]
|
157 |
+
|
158 |
+
{knowledge}
|
159 |
+
|
160 |
+
[KNOWLEDGE_END]
|
161 |
+
|
162 |
+
Statement: If the information is insufficient, you can search in the historical conversation or knowledge.
|
163 |
+
Statement: Unless you are a language professional, answer the following questions strictly in {language}
|
164 |
+
, and the answers must follow the Markdown format. Strictly excluding any tag likes "[HISTORY_BEGIN]"
|
165 |
+
, "[HISTORY_END]", "[KNOWLEDGE_BEGIN]", "[KNOWLEDGE_END]" in responses.
|
166 |
+
|
167 |
+
|
168 |
+
{ask}
|
169 |
+
"""
|
notebook_dir/metagpt_yusin/actions/__init__.py
ADDED
@@ -0,0 +1,57 @@
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
1 |
+
#!/usr/bin/env python
|
2 |
+
# -*- coding: utf-8 -*-
|
3 |
+
"""
|
4 |
+
@Time : 2023/5/11 17:44
|
5 |
+
@Author : alexanderwu
|
6 |
+
@File : __init__.py
|
7 |
+
"""
|
8 |
+
from enum import Enum
|
9 |
+
|
10 |
+
from metagpt_yusin.actions.action import Action
|
11 |
+
from metagpt_yusin.actions.action_output import ActionOutput
|
12 |
+
from metagpt_yusin.actions.add_requirement import UserRequirement
|
13 |
+
from metagpt_yusin.actions.debug_error import DebugError
|
14 |
+
from metagpt_yusin.actions.design_api import WriteDesign
|
15 |
+
from metagpt_yusin.actions.design_api_review import DesignReview
|
16 |
+
from metagpt_yusin.actions.project_management import WriteTasks
|
17 |
+
from metagpt_yusin.actions.research import CollectLinks, WebBrowseAndSummarize, ConductResearch
|
18 |
+
from metagpt_yusin.actions.run_code import RunCode
|
19 |
+
from metagpt_yusin.actions.search_and_summarize import SearchAndSummarize
|
20 |
+
from metagpt_yusin.actions.write_code import WriteCode
|
21 |
+
from metagpt_yusin.actions.write_code_review import WriteCodeReview
|
22 |
+
from metagpt_yusin.actions.write_prd import WritePRD
|
23 |
+
from metagpt_yusin.actions.write_prd_review import WritePRDReview
|
24 |
+
from metagpt_yusin.actions.write_test import WriteTest
|
25 |
+
from metagpt_yusin.actions.di.execute_nb_code import ExecuteNbCode
|
26 |
+
from metagpt_yusin.actions.di.write_analysis_code import WriteAnalysisCode
|
27 |
+
from metagpt_yusin.actions.di.write_plan import WritePlan
|
28 |
+
|
29 |
+
|
30 |
+
class ActionType(Enum):
|
31 |
+
"""All types of Actions, used for indexing."""
|
32 |
+
|
33 |
+
ADD_REQUIREMENT = UserRequirement
|
34 |
+
WRITE_PRD = WritePRD
|
35 |
+
WRITE_PRD_REVIEW = WritePRDReview
|
36 |
+
WRITE_DESIGN = WriteDesign
|
37 |
+
DESIGN_REVIEW = DesignReview
|
38 |
+
WRTIE_CODE = WriteCode
|
39 |
+
WRITE_CODE_REVIEW = WriteCodeReview
|
40 |
+
WRITE_TEST = WriteTest
|
41 |
+
RUN_CODE = RunCode
|
42 |
+
DEBUG_ERROR = DebugError
|
43 |
+
WRITE_TASKS = WriteTasks
|
44 |
+
SEARCH_AND_SUMMARIZE = SearchAndSummarize
|
45 |
+
COLLECT_LINKS = CollectLinks
|
46 |
+
WEB_BROWSE_AND_SUMMARIZE = WebBrowseAndSummarize
|
47 |
+
CONDUCT_RESEARCH = ConductResearch
|
48 |
+
EXECUTE_NB_CODE = ExecuteNbCode
|
49 |
+
WRITE_ANALYSIS_CODE = WriteAnalysisCode
|
50 |
+
WRITE_PLAN = WritePlan
|
51 |
+
|
52 |
+
|
53 |
+
__all__ = [
|
54 |
+
"ActionType",
|
55 |
+
"Action",
|
56 |
+
"ActionOutput",
|
57 |
+
]
|
notebook_dir/metagpt_yusin/actions/__pycache__/__init__.cpython-39.pyc
ADDED
Binary file (2.37 kB). View file
|
|
notebook_dir/metagpt_yusin/actions/__pycache__/action.cpython-39.pyc
ADDED
Binary file (4.15 kB). View file
|
|
notebook_dir/metagpt_yusin/actions/__pycache__/action_node.cpython-39.pyc
ADDED
Binary file (21.8 kB). View file
|
|
notebook_dir/metagpt_yusin/actions/__pycache__/action_outcls_registry.cpython-39.pyc
ADDED
Binary file (1.37 kB). View file
|
|
notebook_dir/metagpt_yusin/actions/__pycache__/action_output.cpython-39.pyc
ADDED
Binary file (703 Bytes). View file
|
|
notebook_dir/metagpt_yusin/actions/__pycache__/add_requirement.cpython-39.pyc
ADDED
Binary file (546 Bytes). View file
|
|
notebook_dir/metagpt_yusin/actions/__pycache__/debug_error.cpython-39.pyc
ADDED
Binary file (2.76 kB). View file
|
|
notebook_dir/metagpt_yusin/actions/__pycache__/design_api.cpython-39.pyc
ADDED
Binary file (4.7 kB). View file
|
|
notebook_dir/metagpt_yusin/actions/__pycache__/design_api_an.cpython-39.pyc
ADDED
Binary file (3.46 kB). View file
|
|
notebook_dir/metagpt_yusin/actions/__pycache__/design_api_review.cpython-39.pyc
ADDED
Binary file (1.05 kB). View file
|
|
notebook_dir/metagpt_yusin/actions/__pycache__/fix_bug.cpython-39.pyc
ADDED
Binary file (562 Bytes). View file
|
|
notebook_dir/metagpt_yusin/actions/__pycache__/prepare_documents.cpython-39.pyc
ADDED
Binary file (2.19 kB). View file
|
|
notebook_dir/metagpt_yusin/actions/__pycache__/project_management.cpython-39.pyc
ADDED
Binary file (3.48 kB). View file
|
|
notebook_dir/metagpt_yusin/actions/__pycache__/project_management_an.cpython-39.pyc
ADDED
Binary file (3.55 kB). View file
|
|
notebook_dir/metagpt_yusin/actions/__pycache__/research.cpython-39.pyc
ADDED
Binary file (11.4 kB). View file
|
|
notebook_dir/metagpt_yusin/actions/__pycache__/run_code.cpython-39.pyc
ADDED
Binary file (6.32 kB). View file
|
|
notebook_dir/metagpt_yusin/actions/__pycache__/search_and_summarize.cpython-39.pyc
ADDED
Binary file (5.68 kB). View file
|
|