Spaces:
Sleeping
Sleeping
import streamlit as st | |
import asyncio | |
import nest_asyncio | |
import json | |
import os | |
import platform | |
if platform.system() == "Windows": | |
asyncio.set_event_loop_policy(asyncio.WindowsProactorEventLoopPolicy()) | |
# nest_asyncio μ μ©: μ΄λ―Έ μ€ν μ€μΈ μ΄λ²€νΈ 루ν λ΄μμ μ€μ²© νΈμΆ νμ© | |
nest_asyncio.apply() | |
# μ μ μ΄λ²€νΈ 루ν μμ± λ° μ¬μ¬μ© (νλ² μμ±ν ν κ³μ μ¬μ©) | |
if "event_loop" not in st.session_state: | |
loop = asyncio.new_event_loop() | |
st.session_state.event_loop = loop | |
asyncio.set_event_loop(loop) | |
from langgraph.prebuilt import create_react_agent | |
from langchain_anthropic import ChatAnthropic | |
from langchain_openai import ChatOpenAI | |
from langchain_core.messages import HumanMessage | |
from dotenv import load_dotenv | |
from langchain_mcp_adapters.client import MultiServerMCPClient | |
from utils import astream_graph, random_uuid | |
from langchain_core.messages.ai import AIMessageChunk | |
from langchain_core.messages.tool import ToolMessage | |
from langgraph.checkpoint.memory import MemorySaver | |
from langchain_core.runnables import RunnableConfig | |
# νκ²½ λ³μ λ‘λ (.env νμΌμμ API ν€ λ±μ μ€μ μ κ°μ Έμ΄) | |
load_dotenv(override=True) | |
# config.json νμΌ κ²½λ‘ μ€μ | |
CONFIG_FILE_PATH = "config.json" | |
# JSON μ€μ νμΌ λ‘λ ν¨μ | |
def load_config_from_json(): | |
""" | |
config.json νμΌμμ μ€μ μ λ‘λν©λλ€. | |
νμΌμ΄ μλ κ²½μ° κΈ°λ³Έ μ€μ μΌλ‘ νμΌμ μμ±ν©λλ€. | |
λ°νκ°: | |
dict: λ‘λλ μ€μ | |
""" | |
default_config = { | |
"get_current_time": { | |
"command": "python", | |
"args": ["./mcp_server_time.py"], | |
"transport": "stdio" | |
} | |
} | |
try: | |
if os.path.exists(CONFIG_FILE_PATH): | |
with open(CONFIG_FILE_PATH, "r", encoding="utf-8") as f: | |
return json.load(f) | |
else: | |
# νμΌμ΄ μλ κ²½μ° κΈ°λ³Έ μ€μ μΌλ‘ νμΌ μμ± | |
save_config_to_json(default_config) | |
return default_config | |
except Exception as e: | |
st.error(f"μ€μ νμΌ λ‘λ μ€ μ€λ₯ λ°μ: {str(e)}") | |
return default_config | |
# JSON μ€μ νμΌ μ μ₯ ν¨μ | |
def save_config_to_json(config): | |
""" | |
μ€μ μ config.json νμΌμ μ μ₯ν©λλ€. | |
λ§€κ°λ³μ: | |
config (dict): μ μ₯ν μ€μ | |
λ°νκ°: | |
bool: μ μ₯ μ±κ³΅ μ¬λΆ | |
""" | |
try: | |
with open(CONFIG_FILE_PATH, "w", encoding="utf-8") as f: | |
json.dump(config, f, indent=2, ensure_ascii=False) | |
return True | |
except Exception as e: | |
st.error(f"μ€μ νμΌ μ μ₯ μ€ μ€λ₯ λ°μ: {str(e)}") | |
return False | |
# λ‘κ·ΈμΈ μΈμ λ³μ μ΄κΈ°ν | |
if "authenticated" not in st.session_state: | |
st.session_state.authenticated = False | |
# λ‘κ·ΈμΈ νμ μ¬λΆ νμΈ | |
use_login = os.environ.get("USE_LOGIN", "false").lower() == "true" | |
# λ‘κ·ΈμΈ μνμ λ°λΌ νμ΄μ§ μ€μ λ³κ²½ | |
if use_login and not st.session_state.authenticated: | |
# λ‘κ·ΈμΈ νμ΄μ§λ κΈ°λ³Έ(narrow) λ μ΄μμ μ¬μ© | |
st.set_page_config(page_title="Agent with MCP Tools", page_icon="π§ ") | |
else: | |
# λ©μΈ μ±μ wide λ μ΄μμ μ¬μ© | |
st.set_page_config(page_title="Agent with MCP Tools", page_icon="π§ ", layout="wide") | |
# λ‘κ·ΈμΈ κΈ°λ₯μ΄ νμ±νλμ΄ μκ³ μμ§ μΈμ¦λμ§ μμ κ²½μ° λ‘κ·ΈμΈ νλ©΄ νμ | |
if use_login and not st.session_state.authenticated: | |
st.title("π λ‘κ·ΈμΈ") | |
st.markdown("μμ€ν μ μ¬μ©νλ €λ©΄ λ‘κ·ΈμΈμ΄ νμν©λλ€.") | |
# λ‘κ·ΈμΈ νΌμ νλ©΄ μ€μμ μ’κ² λ°°μΉ | |
with st.form("login_form"): | |
username = st.text_input("μμ΄λ") | |
password = st.text_input("λΉλ°λ²νΈ", type="password") | |
submit_button = st.form_submit_button("λ‘κ·ΈμΈ") | |
if submit_button: | |
expected_username = os.environ.get("USER_ID") | |
expected_password = os.environ.get("USER_PASSWORD") | |
if username == expected_username and password == expected_password: | |
st.session_state.authenticated = True | |
st.success("β λ‘κ·ΈμΈ μ±κ³΅! μ μλ§ κΈ°λ€λ €μ£ΌμΈμ...") | |
st.rerun() | |
else: | |
st.error("β μμ΄λ λλ λΉλ°λ²νΈκ° μ¬λ°λ₯΄μ§ μμ΅λλ€.") | |
# λ‘κ·ΈμΈ νλ©΄μμλ λ©μΈ μ±μ νμνμ§ μμ | |
st.stop() | |
# μ¬μ΄λλ° μ΅μλ¨μ μ μ μ 보 μΆκ° (λ€λ₯Έ μ¬μ΄λλ° μμλ³΄λ€ λ¨Όμ λ°°μΉ) | |
st.sidebar.markdown("### βοΈ Made by [ν λλ ΈνΈ](https://youtube.com/c/teddynote) π") | |
st.sidebar.markdown( | |
"### π» [Project Page](https://github.com/teddynote-lab/langgraph-mcp-agents)" | |
) | |
st.sidebar.divider() # ꡬλΆμ μΆκ° | |
# κΈ°μ‘΄ νμ΄μ§ νμ΄ν λ° μ€λͺ | |
st.title("π¬ MCP λꡬ νμ© μμ΄μ νΈ") | |
st.markdown("β¨ MCP λꡬλ₯Ό νμ©ν ReAct μμ΄μ νΈμκ² μ§λ¬Έν΄λ³΄μΈμ.") | |
SYSTEM_PROMPT = """<ROLE> | |
You are a smart agent with an ability to use tools. | |
You will be given a question and you will use the tools to answer the question. | |
Pick the most relevant tool to answer the question. | |
If you are failed to answer the question, try different tools to get context. | |
Your answer should be very polite and professional. | |
</ROLE> | |
---- | |
<INSTRUCTIONS> | |
Step 1: Analyze the question | |
- Analyze user's question and final goal. | |
- If the user's question is consist of multiple sub-questions, split them into smaller sub-questions. | |
Step 2: Pick the most relevant tool | |
- Pick the most relevant tool to answer the question. | |
- If you are failed to answer the question, try different tools to get context. | |
Step 3: Answer the question | |
- Answer the question in the same language as the question. | |
- Your answer should be very polite and professional. | |
Step 4: Provide the source of the answer(if applicable) | |
- If you've used the tool, provide the source of the answer. | |
- Valid sources are either a website(URL) or a document(PDF, etc). | |
Guidelines: | |
- If you've used the tool, your answer should be based on the tool's output(tool's output is more important than your own knowledge). | |
- If you've used the tool, and the source is valid URL, provide the source(URL) of the answer. | |
- Skip providing the source if the source is not URL. | |
- Answer in the same language as the question. | |
- Answer should be concise and to the point. | |
- Avoid response your output with any other information than the answer and the source. | |
</INSTRUCTIONS> | |
---- | |
<OUTPUT_FORMAT> | |
(concise answer to the question) | |
**Source**(if applicable) | |
- (source1: valid URL) | |
- (source2: valid URL) | |
- ... | |
</OUTPUT_FORMAT> | |
""" | |
OUTPUT_TOKEN_INFO = { | |
"claude-3-5-sonnet-latest": {"max_tokens": 8192}, | |
"claude-3-5-haiku-latest": {"max_tokens": 8192}, | |
"claude-3-7-sonnet-latest": {"max_tokens": 64000}, | |
"gpt-4o": {"max_tokens": 16000}, | |
"gpt-4o-mini": {"max_tokens": 16000}, | |
} | |
# μΈμ μν μ΄κΈ°ν | |
if "session_initialized" not in st.session_state: | |
st.session_state.session_initialized = False # μΈμ μ΄κΈ°ν μν νλκ·Έ | |
st.session_state.agent = None # ReAct μμ΄μ νΈ κ°μ²΄ μ μ₯ κ³΅κ° | |
st.session_state.history = [] # λν κΈ°λ‘ μ μ₯ 리μ€νΈ | |
st.session_state.mcp_client = None # MCP ν΄λΌμ΄μΈνΈ κ°μ²΄ μ μ₯ κ³΅κ° | |
st.session_state.timeout_seconds = 120 # μλ΅ μμ± μ ν μκ°(μ΄), κΈ°λ³Έκ° 120μ΄ | |
st.session_state.selected_model = "claude-3-7-sonnet-latest" # κΈ°λ³Έ λͺ¨λΈ μ ν | |
st.session_state.recursion_limit = 100 # μ¬κ· νΈμΆ μ ν, κΈ°λ³Έκ° 100 | |
if "thread_id" not in st.session_state: | |
st.session_state.thread_id = random_uuid() | |
# --- ν¨μ μ μ λΆλΆ --- | |
async def cleanup_mcp_client(): | |
""" | |
κΈ°μ‘΄ MCP ν΄λΌμ΄μΈνΈλ₯Ό μμ νκ² μ’ λ£ν©λλ€. | |
κΈ°μ‘΄ ν΄λΌμ΄μΈνΈκ° μλ κ²½μ° μ μμ μΌλ‘ 리μμ€λ₯Ό ν΄μ ν©λλ€. | |
""" | |
if "mcp_client" in st.session_state and st.session_state.mcp_client is not None: | |
try: | |
await st.session_state.mcp_client.__aexit__(None, None, None) | |
st.session_state.mcp_client = None | |
except Exception as e: | |
import traceback | |
# st.warning(f"MCP ν΄λΌμ΄μΈνΈ μ’ λ£ μ€ μ€λ₯: {str(e)}") | |
# st.warning(traceback.format_exc()) | |
def print_message(): | |
""" | |
μ±ν κΈ°λ‘μ νλ©΄μ μΆλ ₯ν©λλ€. | |
μ¬μ©μμ μ΄μμ€ν΄νΈμ λ©μμ§λ₯Ό ꡬλΆνμ¬ νλ©΄μ νμνκ³ , | |
λꡬ νΈμΆ μ 보λ μ΄μμ€ν΄νΈ λ©μμ§ μ»¨ν μ΄λ λ΄μ νμν©λλ€. | |
""" | |
i = 0 | |
while i < len(st.session_state.history): | |
message = st.session_state.history[i] | |
if message["role"] == "user": | |
st.chat_message("user", avatar="π§βπ»").markdown(message["content"]) | |
i += 1 | |
elif message["role"] == "assistant": | |
# μ΄μμ€ν΄νΈ λ©μμ§ μ»¨ν μ΄λ μμ± | |
with st.chat_message("assistant", avatar="π€"): | |
# μ΄μμ€ν΄νΈ λ©μμ§ λ΄μ© νμ | |
st.markdown(message["content"]) | |
# λ€μ λ©μμ§κ° λꡬ νΈμΆ μ 보μΈμ§ νμΈ | |
if ( | |
i + 1 < len(st.session_state.history) | |
and st.session_state.history[i + 1]["role"] == "assistant_tool" | |
): | |
# λꡬ νΈμΆ μ 보λ₯Ό λμΌν 컨ν μ΄λ λ΄μ expanderλ‘ νμ | |
with st.expander("π§ λꡬ νΈμΆ μ 보", expanded=False): | |
st.markdown(st.session_state.history[i + 1]["content"]) | |
i += 2 # λ λ©μμ§λ₯Ό ν¨κ» μ²λ¦¬νμΌλ―λ‘ 2 μ¦κ° | |
else: | |
i += 1 # μΌλ° λ©μμ§λ§ μ²λ¦¬νμΌλ―λ‘ 1 μ¦κ° | |
else: | |
# assistant_tool λ©μμ§λ μμμ μ²λ¦¬λλ―λ‘ κ±΄λλ | |
i += 1 | |
def get_streaming_callback(text_placeholder, tool_placeholder): | |
""" | |
μ€νΈλ¦¬λ° μ½λ°± ν¨μλ₯Ό μμ±ν©λλ€. | |
μ΄ ν¨μλ LLMμμ μμ±λλ μλ΅μ μ€μκ°μΌλ‘ νλ©΄μ νμνκΈ° μν μ½λ°± ν¨μλ₯Ό μμ±ν©λλ€. | |
ν μ€νΈ μλ΅κ³Ό λꡬ νΈμΆ μ 보λ₯Ό κ°κ° λ€λ₯Έ μμμ νμν©λλ€. | |
λ§€κ°λ³μ: | |
text_placeholder: ν μ€νΈ μλ΅μ νμν Streamlit μ»΄ν¬λνΈ | |
tool_placeholder: λꡬ νΈμΆ μ 보λ₯Ό νμν Streamlit μ»΄ν¬λνΈ | |
λ°νκ°: | |
callback_func: μ€νΈλ¦¬λ° μ½λ°± ν¨μ | |
accumulated_text: λμ λ ν μ€νΈ μλ΅μ μ μ₯νλ 리μ€νΈ | |
accumulated_tool: λμ λ λꡬ νΈμΆ μ 보λ₯Ό μ μ₯νλ 리μ€νΈ | |
""" | |
accumulated_text = [] | |
accumulated_tool = [] | |
def callback_func(message: dict): | |
nonlocal accumulated_text, accumulated_tool | |
message_content = message.get("content", None) | |
if isinstance(message_content, AIMessageChunk): | |
content = message_content.content | |
# μ½ν μΈ κ° λ¦¬μ€νΈ ννμΈ κ²½μ° (Claude λͺ¨λΈ λ±μμ μ£Όλ‘ λ°μ) | |
if isinstance(content, list) and len(content) > 0: | |
message_chunk = content[0] | |
# ν μ€νΈ νμ μΈ κ²½μ° μ²λ¦¬ | |
if message_chunk["type"] == "text": | |
accumulated_text.append(message_chunk["text"]) | |
text_placeholder.markdown("".join(accumulated_text)) | |
# λꡬ μ¬μ© νμ μΈ κ²½μ° μ²λ¦¬ | |
elif message_chunk["type"] == "tool_use": | |
if "partial_json" in message_chunk: | |
accumulated_tool.append(message_chunk["partial_json"]) | |
else: | |
tool_call_chunks = message_content.tool_call_chunks | |
tool_call_chunk = tool_call_chunks[0] | |
accumulated_tool.append( | |
"\n```json\n" + str(tool_call_chunk) + "\n```\n" | |
) | |
with tool_placeholder.expander("π§ λꡬ νΈμΆ μ 보", expanded=True): | |
st.markdown("".join(accumulated_tool)) | |
# tool_calls μμ±μ΄ μλ κ²½μ° μ²λ¦¬ (OpenAI λͺ¨λΈ λ±μμ μ£Όλ‘ λ°μ) | |
elif ( | |
hasattr(message_content, "tool_calls") | |
and message_content.tool_calls | |
and len(message_content.tool_calls[0]["name"]) > 0 | |
): | |
tool_call_info = message_content.tool_calls[0] | |
accumulated_tool.append("\n```json\n" + str(tool_call_info) + "\n```\n") | |
with tool_placeholder.expander("π§ λꡬ νΈμΆ μ 보", expanded=True): | |
st.markdown("".join(accumulated_tool)) | |
# λ¨μ λ¬Έμμ΄μΈ κ²½μ° μ²λ¦¬ | |
elif isinstance(content, str): | |
accumulated_text.append(content) | |
text_placeholder.markdown("".join(accumulated_text)) | |
# μ ν¨νμ§ μμ λꡬ νΈμΆ μ λ³΄κ° μλ κ²½μ° μ²λ¦¬ | |
elif ( | |
hasattr(message_content, "invalid_tool_calls") | |
and message_content.invalid_tool_calls | |
): | |
tool_call_info = message_content.invalid_tool_calls[0] | |
accumulated_tool.append("\n```json\n" + str(tool_call_info) + "\n```\n") | |
with tool_placeholder.expander( | |
"π§ λꡬ νΈμΆ μ 보 (μ ν¨νμ§ μμ)", expanded=True | |
): | |
st.markdown("".join(accumulated_tool)) | |
# tool_call_chunks μμ±μ΄ μλ κ²½μ° μ²λ¦¬ | |
elif ( | |
hasattr(message_content, "tool_call_chunks") | |
and message_content.tool_call_chunks | |
): | |
tool_call_chunk = message_content.tool_call_chunks[0] | |
accumulated_tool.append( | |
"\n```json\n" + str(tool_call_chunk) + "\n```\n" | |
) | |
with tool_placeholder.expander("π§ λꡬ νΈμΆ μ 보", expanded=True): | |
st.markdown("".join(accumulated_tool)) | |
# additional_kwargsμ tool_callsκ° μλ κ²½μ° μ²λ¦¬ (λ€μν λͺ¨λΈ νΈνμ± μ§μ) | |
elif ( | |
hasattr(message_content, "additional_kwargs") | |
and "tool_calls" in message_content.additional_kwargs | |
): | |
tool_call_info = message_content.additional_kwargs["tool_calls"][0] | |
accumulated_tool.append("\n```json\n" + str(tool_call_info) + "\n```\n") | |
with tool_placeholder.expander("π§ λꡬ νΈμΆ μ 보", expanded=True): | |
st.markdown("".join(accumulated_tool)) | |
# λꡬ λ©μμ§μΈ κ²½μ° μ²λ¦¬ (λꡬμ μλ΅) | |
elif isinstance(message_content, ToolMessage): | |
accumulated_tool.append( | |
"\n```json\n" + str(message_content.content) + "\n```\n" | |
) | |
with tool_placeholder.expander("π§ λꡬ νΈμΆ μ 보", expanded=True): | |
st.markdown("".join(accumulated_tool)) | |
return None | |
return callback_func, accumulated_text, accumulated_tool | |
async def process_query(query, text_placeholder, tool_placeholder, timeout_seconds=60): | |
""" | |
μ¬μ©μ μ§λ¬Έμ μ²λ¦¬νκ³ μλ΅μ μμ±ν©λλ€. | |
μ΄ ν¨μλ μ¬μ©μμ μ§λ¬Έμ μμ΄μ νΈμ μ λ¬νκ³ , μλ΅μ μ€μκ°μΌλ‘ μ€νΈλ¦¬λ°νμ¬ νμν©λλ€. | |
μ§μ λ μκ° λ΄μ μλ΅μ΄ μλ£λμ§ μμΌλ©΄ νμμμ μ€λ₯λ₯Ό λ°νν©λλ€. | |
λ§€κ°λ³μ: | |
query: μ¬μ©μκ° μ λ ₯ν μ§λ¬Έ ν μ€νΈ | |
text_placeholder: ν μ€νΈ μλ΅μ νμν Streamlit μ»΄ν¬λνΈ | |
tool_placeholder: λꡬ νΈμΆ μ 보λ₯Ό νμν Streamlit μ»΄ν¬λνΈ | |
timeout_seconds: μλ΅ μμ± μ ν μκ°(μ΄) | |
λ°νκ°: | |
response: μμ΄μ νΈμ μλ΅ κ°μ²΄ | |
final_text: μ΅μ’ ν μ€νΈ μλ΅ | |
final_tool: μ΅μ’ λꡬ νΈμΆ μ 보 | |
""" | |
try: | |
if st.session_state.agent: | |
streaming_callback, accumulated_text_obj, accumulated_tool_obj = ( | |
get_streaming_callback(text_placeholder, tool_placeholder) | |
) | |
try: | |
response = await asyncio.wait_for( | |
astream_graph( | |
st.session_state.agent, | |
{"messages": [HumanMessage(content=query)]}, | |
callback=streaming_callback, | |
config=RunnableConfig( | |
recursion_limit=st.session_state.recursion_limit, | |
thread_id=st.session_state.thread_id, | |
), | |
), | |
timeout=timeout_seconds, | |
) | |
except asyncio.TimeoutError: | |
error_msg = f"β±οΈ μμ² μκ°μ΄ {timeout_seconds}μ΄λ₯Ό μ΄κ³Όνμ΅λλ€. λμ€μ λ€μ μλν΄ μ£ΌμΈμ." | |
return {"error": error_msg}, error_msg, "" | |
final_text = "".join(accumulated_text_obj) | |
final_tool = "".join(accumulated_tool_obj) | |
return response, final_text, final_tool | |
else: | |
return ( | |
{"error": "π« μμ΄μ νΈκ° μ΄κΈ°νλμ§ μμμ΅λλ€."}, | |
"π« μμ΄μ νΈκ° μ΄κΈ°νλμ§ μμμ΅λλ€.", | |
"", | |
) | |
except Exception as e: | |
import traceback | |
error_msg = f"β 쿼리 μ²λ¦¬ μ€ μ€λ₯ λ°μ: {str(e)}\n{traceback.format_exc()}" | |
return {"error": error_msg}, error_msg, "" | |
async def initialize_session(mcp_config=None): | |
""" | |
MCP μΈμ κ³Ό μμ΄μ νΈλ₯Ό μ΄κΈ°νν©λλ€. | |
λ§€κ°λ³μ: | |
mcp_config: MCP λꡬ μ€μ μ 보(JSON). NoneμΈ κ²½μ° κΈ°λ³Έ μ€μ μ¬μ© | |
λ°νκ°: | |
bool: μ΄κΈ°ν μ±κ³΅ μ¬λΆ | |
""" | |
with st.spinner("π MCP μλ²μ μ°κ²° μ€..."): | |
# λ¨Όμ κΈ°μ‘΄ ν΄λΌμ΄μΈνΈλ₯Ό μμ νκ² μ 리 | |
await cleanup_mcp_client() | |
if mcp_config is None: | |
# config.json νμΌμμ μ€μ λ‘λ | |
mcp_config = load_config_from_json() | |
client = MultiServerMCPClient(mcp_config) | |
await client.__aenter__() | |
tools = client.get_tools() | |
st.session_state.tool_count = len(tools) | |
st.session_state.mcp_client = client | |
# μ νλ λͺ¨λΈμ λ°λΌ μ μ ν λͺ¨λΈ μ΄κΈ°ν | |
selected_model = st.session_state.selected_model | |
if selected_model in [ | |
"claude-3-7-sonnet-latest", | |
"claude-3-5-sonnet-latest", | |
"claude-3-5-haiku-latest", | |
]: | |
model = ChatAnthropic( | |
model=selected_model, | |
temperature=0.1, | |
max_tokens=OUTPUT_TOKEN_INFO[selected_model]["max_tokens"], | |
) | |
else: # OpenAI λͺ¨λΈ μ¬μ© | |
model = ChatOpenAI( | |
model=selected_model, | |
temperature=0.1, | |
max_tokens=OUTPUT_TOKEN_INFO[selected_model]["max_tokens"], | |
) | |
agent = create_react_agent( | |
model, | |
tools, | |
checkpointer=MemorySaver(), | |
prompt=SYSTEM_PROMPT, | |
) | |
st.session_state.agent = agent | |
st.session_state.session_initialized = True | |
return True | |
# --- μ¬μ΄λλ°: μμ€ν μ€μ μΉμ --- | |
with st.sidebar: | |
st.subheader("βοΈ μμ€ν μ€μ ") | |
# λͺ¨λΈ μ ν κΈ°λ₯ | |
# μ¬μ© κ°λ₯ν λͺ¨λΈ λͺ©λ‘ μμ± | |
available_models = [] | |
# Anthropic API ν€ νμΈ | |
has_anthropic_key = os.environ.get("ANTHROPIC_API_KEY") is not None | |
if has_anthropic_key: | |
available_models.extend( | |
[ | |
"claude-3-7-sonnet-latest", | |
"claude-3-5-sonnet-latest", | |
"claude-3-5-haiku-latest", | |
] | |
) | |
# OpenAI API ν€ νμΈ | |
has_openai_key = os.environ.get("OPENAI_API_KEY") is not None | |
if has_openai_key: | |
available_models.extend(["gpt-4o", "gpt-4o-mini"]) | |
# μ¬μ© κ°λ₯ν λͺ¨λΈμ΄ μλ κ²½μ° λ©μμ§ νμ | |
if not available_models: | |
st.warning( | |
"β οΈ API ν€κ° μ€μ λμ§ μμμ΅λλ€. .env νμΌμ ANTHROPIC_API_KEY λλ OPENAI_API_KEYλ₯Ό μΆκ°ν΄μ£ΌμΈμ." | |
) | |
# κΈ°λ³Έκ°μΌλ‘ Claude λͺ¨λΈ μΆκ° (ν€κ° μμ΄λ UIλ₯Ό 보μ¬μ£ΌκΈ° μν¨) | |
available_models = ["claude-3-7-sonnet-latest"] | |
# λͺ¨λΈ μ ν λλ‘λ€μ΄ | |
previous_model = st.session_state.selected_model | |
st.session_state.selected_model = st.selectbox( | |
"π€ μ¬μ©ν λͺ¨λΈ μ ν", | |
options=available_models, | |
index=( | |
available_models.index(st.session_state.selected_model) | |
if st.session_state.selected_model in available_models | |
else 0 | |
), | |
help="Anthropic λͺ¨λΈμ ANTHROPIC_API_KEYκ°, OpenAI λͺ¨λΈμ OPENAI_API_KEYκ° νκ²½λ³μλ‘ μ€μ λμ΄μΌ ν©λλ€.", | |
) | |
# λͺ¨λΈμ΄ λ³κ²½λμμ λ μΈμ μ΄κΈ°ν νμ μλ¦Ό | |
if ( | |
previous_model != st.session_state.selected_model | |
and st.session_state.session_initialized | |
): | |
st.warning( | |
"β οΈ λͺ¨λΈμ΄ λ³κ²½λμμ΅λλ€. 'μ€μ μ μ©νκΈ°' λ²νΌμ λλ¬ λ³κ²½μ¬νμ μ μ©νμΈμ." | |
) | |
# νμμμ μ€μ μ¬λΌμ΄λ μΆκ° | |
st.session_state.timeout_seconds = st.slider( | |
"β±οΈ μλ΅ μμ± μ ν μκ°(μ΄)", | |
min_value=60, | |
max_value=300, | |
value=st.session_state.timeout_seconds, | |
step=10, | |
help="μμ΄μ νΈκ° μλ΅μ μμ±νλ μ΅λ μκ°μ μ€μ ν©λλ€. 볡μ‘ν μμ μ λ κΈ΄ μκ°μ΄ νμν μ μμ΅λλ€.", | |
) | |
st.session_state.recursion_limit = st.slider( | |
"β±οΈ μ¬κ· νΈμΆ μ ν(νμ)", | |
min_value=10, | |
max_value=200, | |
value=st.session_state.recursion_limit, | |
step=10, | |
help="μ¬κ· νΈμΆ μ ν νμλ₯Ό μ€μ ν©λλ€. λ무 λμ κ°μ μ€μ νλ©΄ λ©λͺ¨λ¦¬ λΆμ‘± λ¬Έμ κ° λ°μν μ μμ΅λλ€.", | |
) | |
st.divider() # ꡬλΆμ μΆκ° | |
# λꡬ μ€μ μΉμ μΆκ° | |
st.subheader("π§ λꡬ μ€μ ") | |
# expander μνλ₯Ό μΈμ μνλ‘ κ΄λ¦¬ | |
if "mcp_tools_expander" not in st.session_state: | |
st.session_state.mcp_tools_expander = False | |
# MCP λꡬ μΆκ° μΈν°νμ΄μ€ | |
with st.expander("π§° MCP λꡬ μΆκ°", expanded=st.session_state.mcp_tools_expander): | |
# config.json νμΌμμ μ€μ λ‘λνμ¬ νμ | |
loaded_config = load_config_from_json() | |
default_config_text = json.dumps(loaded_config, indent=2, ensure_ascii=False) | |
# pending configκ° μμΌλ©΄ κΈ°μ‘΄ mcp_config_text κΈ°λ°μΌλ‘ μμ± | |
if "pending_mcp_config" not in st.session_state: | |
try: | |
st.session_state.pending_mcp_config = loaded_config | |
except Exception as e: | |
st.error(f"μ΄κΈ° pending config μ€μ μ€ν¨: {e}") | |
# κ°λ³ λꡬ μΆκ°λ₯Ό μν UI | |
st.subheader("λꡬ μΆκ°") | |
st.markdown( | |
""" | |
[μ΄λ»κ² μ€μ νλμ?](https://teddylee777.notion.site/MCP-1d324f35d12980c8b018e12afdf545a1?pvs=4) | |
β οΈ **μ€μ**: JSONμ λ°λμ μ€κ΄νΈ(`{}`)λ‘ κ°μΈμΌ ν©λλ€.""" | |
) | |
# λ³΄λ€ λͺ νν μμ μ 곡 | |
example_json = { | |
"github": { | |
"command": "npx", | |
"args": [ | |
"-y", | |
"@smithery/cli@latest", | |
"run", | |
"@smithery-ai/github", | |
"--config", | |
'{"githubPersonalAccessToken":"your_token_here"}', | |
], | |
"transport": "stdio", | |
} | |
} | |
default_text = json.dumps(example_json, indent=2, ensure_ascii=False) | |
new_tool_json = st.text_area( | |
"λꡬ JSON", | |
default_text, | |
height=250, | |
) | |
# μΆκ°νκΈ° λ²νΌ | |
if st.button( | |
"λꡬ μΆκ°", | |
type="primary", | |
key="add_tool_button", | |
use_container_width=True, | |
): | |
try: | |
# μ λ ₯κ° κ²μ¦ | |
if not new_tool_json.strip().startswith( | |
"{" | |
) or not new_tool_json.strip().endswith("}"): | |
st.error("JSONμ μ€κ΄νΈ({})λ‘ μμνκ³ λλμΌ ν©λλ€.") | |
st.markdown('μ¬λ°λ₯Έ νμ: `{ "λꡬμ΄λ¦": { ... } }`') | |
else: | |
# JSON νμ± | |
parsed_tool = json.loads(new_tool_json) | |
# mcpServers νμμΈμ§ νμΈνκ³ μ²λ¦¬ | |
if "mcpServers" in parsed_tool: | |
# mcpServers μμ λ΄μ©μ μ΅μμλ‘ μ΄λ | |
parsed_tool = parsed_tool["mcpServers"] | |
st.info( | |
"'mcpServers' νμμ΄ κ°μ§λμμ΅λλ€. μλμΌλ‘ λ³νν©λλ€." | |
) | |
# μ λ ₯λ λꡬ μ νμΈ | |
if len(parsed_tool) == 0: | |
st.error("μ΅μ νλ μ΄μμ λꡬλ₯Ό μ λ ₯ν΄μ£ΌμΈμ.") | |
else: | |
# λͺ¨λ λꡬμ λν΄ μ²λ¦¬ | |
success_tools = [] | |
for tool_name, tool_config in parsed_tool.items(): | |
# URL νλ νμΈ λ° transport μ€μ | |
if "url" in tool_config: | |
# URLμ΄ μλ κ²½μ° transportλ₯Ό "sse"λ‘ μ€μ | |
tool_config["transport"] = "sse" | |
st.info( | |
f"'{tool_name}' λꡬμ URLμ΄ κ°μ§λμ΄ transportλ₯Ό 'sse'λ‘ μ€μ νμ΅λλ€." | |
) | |
elif "transport" not in tool_config: | |
# URLμ΄ μκ³ transportλ μλ κ²½μ° κΈ°λ³Έκ° "stdio" μ€μ | |
tool_config["transport"] = "stdio" | |
# νμ νλ νμΈ | |
if ( | |
"command" not in tool_config | |
and "url" not in tool_config | |
): | |
st.error( | |
f"'{tool_name}' λꡬ μ€μ μλ 'command' λλ 'url' νλκ° νμν©λλ€." | |
) | |
elif "command" in tool_config and "args" not in tool_config: | |
st.error( | |
f"'{tool_name}' λꡬ μ€μ μλ 'args' νλκ° νμν©λλ€." | |
) | |
elif "command" in tool_config and not isinstance( | |
tool_config["args"], list | |
): | |
st.error( | |
f"'{tool_name}' λꡬμ 'args' νλλ λ°λμ λ°°μ΄([]) νμμ΄μ΄μΌ ν©λλ€." | |
) | |
else: | |
# pending_mcp_configμ λꡬ μΆκ° | |
st.session_state.pending_mcp_config[tool_name] = ( | |
tool_config | |
) | |
success_tools.append(tool_name) | |
# μ±κ³΅ λ©μμ§ | |
if success_tools: | |
if len(success_tools) == 1: | |
st.success( | |
f"{success_tools[0]} λκ΅¬κ° μΆκ°λμμ΅λλ€. μ μ©νλ €λ©΄ 'μ€μ μ μ©νκΈ°' λ²νΌμ λλ¬μ£ΌμΈμ." | |
) | |
else: | |
tool_names = ", ".join(success_tools) | |
st.success( | |
f"μ΄ {len(success_tools)}κ° λꡬ({tool_names})κ° μΆκ°λμμ΅λλ€. μ μ©νλ €λ©΄ 'μ€μ μ μ©νκΈ°' λ²νΌμ λλ¬μ£ΌμΈμ." | |
) | |
# μΆκ°λλ©΄ expanderλ₯Ό μ μ΄μ€ | |
st.session_state.mcp_tools_expander = False | |
st.rerun() | |
except json.JSONDecodeError as e: | |
st.error(f"JSON νμ± μλ¬: {e}") | |
st.markdown( | |
f""" | |
**μμ λ°©λ²**: | |
1. JSON νμμ΄ μ¬λ°λ₯Έμ§ νμΈνμΈμ. | |
2. λͺ¨λ ν€λ ν°λ°μ΄ν(")λ‘ κ°μΈμΌ ν©λλ€. | |
3. λ¬Έμμ΄ κ°λ ν°λ°μ΄ν(")λ‘ κ°μΈμΌ ν©λλ€. | |
4. λ¬Έμμ΄ λ΄μμ ν°λ°μ΄νλ₯Ό μ¬μ©ν κ²½μ° μ΄μ€μΌμ΄ν(\\")ν΄μΌ ν©λλ€. | |
""" | |
) | |
except Exception as e: | |
st.error(f"μ€λ₯ λ°μ: {e}") | |
# λ±λ‘λ λꡬ λͺ©λ‘ νμ λ° μμ λ²νΌ μΆκ° | |
with st.expander("π λ±λ‘λ λꡬ λͺ©λ‘", expanded=True): | |
try: | |
pending_config = st.session_state.pending_mcp_config | |
except Exception as e: | |
st.error("μ ν¨ν MCP λꡬ μ€μ μ΄ μλλλ€.") | |
else: | |
# pending configμ ν€(λꡬ μ΄λ¦) λͺ©λ‘μ μννλ©° νμ | |
for tool_name in list(pending_config.keys()): | |
col1, col2 = st.columns([8, 2]) | |
col1.markdown(f"- **{tool_name}**") | |
if col2.button("μμ ", key=f"delete_{tool_name}"): | |
# pending configμμ ν΄λΉ λꡬ μμ (μ¦μ μ μ©λμ§λ μμ) | |
del st.session_state.pending_mcp_config[tool_name] | |
st.success( | |
f"{tool_name} λκ΅¬κ° μμ λμμ΅λλ€. μ μ©νλ €λ©΄ 'μ€μ μ μ©νκΈ°' λ²νΌμ λλ¬μ£ΌμΈμ." | |
) | |
st.divider() # ꡬλΆμ μΆκ° | |
# --- μ¬μ΄λλ°: μμ€ν μ 보 λ° μμ λ²νΌ μΉμ --- | |
with st.sidebar: | |
st.subheader("π μμ€ν μ 보") | |
st.write(f"π οΈ MCP λꡬ μ: {st.session_state.get('tool_count', 'μ΄κΈ°ν μ€...')}") | |
selected_model_name = st.session_state.selected_model | |
st.write(f"π§ νμ¬ λͺ¨λΈ: {selected_model_name}") | |
# μ€μ μ μ©νκΈ° λ²νΌμ μ¬κΈ°λ‘ μ΄λ | |
if st.button( | |
"μ€μ μ μ©νκΈ°", | |
key="apply_button", | |
type="primary", | |
use_container_width=True, | |
): | |
# μ μ© μ€ λ©μμ§ νμ | |
apply_status = st.empty() | |
with apply_status.container(): | |
st.warning("π λ³κ²½μ¬νμ μ μ©νκ³ μμ΅λλ€. μ μλ§ κΈ°λ€λ €μ£ΌμΈμ...") | |
progress_bar = st.progress(0) | |
# μ€μ μ μ₯ | |
st.session_state.mcp_config_text = json.dumps( | |
st.session_state.pending_mcp_config, indent=2, ensure_ascii=False | |
) | |
# config.json νμΌμ μ€μ μ μ₯ | |
save_result = save_config_to_json(st.session_state.pending_mcp_config) | |
if not save_result: | |
st.error("β μ€μ νμΌ μ μ₯μ μ€ν¨νμ΅λλ€.") | |
progress_bar.progress(15) | |
# μΈμ μ΄κΈ°ν μ€λΉ | |
st.session_state.session_initialized = False | |
st.session_state.agent = None | |
# μ§ν μν μ λ°μ΄νΈ | |
progress_bar.progress(30) | |
# μ΄κΈ°ν μ€ν | |
success = st.session_state.event_loop.run_until_complete( | |
initialize_session(st.session_state.pending_mcp_config) | |
) | |
# μ§ν μν μ λ°μ΄νΈ | |
progress_bar.progress(100) | |
if success: | |
st.success("β μλ‘μ΄ μ€μ μ΄ μ μ©λμμ΅λλ€.") | |
# λꡬ μΆκ° expander μ κΈ° | |
if "mcp_tools_expander" in st.session_state: | |
st.session_state.mcp_tools_expander = False | |
else: | |
st.error("β μ€μ μ μ©μ μ€ν¨νμμ΅λλ€.") | |
# νμ΄μ§ μλ‘κ³ μΉ¨ | |
st.rerun() | |
st.divider() # ꡬλΆμ μΆκ° | |
# μμ λ²νΌ μΉμ | |
st.subheader("π μμ ") | |
# λν μ΄κΈ°ν λ²νΌ | |
if st.button("λν μ΄κΈ°ν", use_container_width=True, type="primary"): | |
# thread_id μ΄κΈ°ν | |
st.session_state.thread_id = random_uuid() | |
# λν νμ€ν 리 μ΄κΈ°ν | |
st.session_state.history = [] | |
# μλ¦Ό λ©μμ§ | |
st.success("β λνκ° μ΄κΈ°νλμμ΅λλ€.") | |
# νμ΄μ§ μλ‘κ³ μΉ¨ | |
st.rerun() | |
# λ‘κ·ΈμΈ κΈ°λ₯μ΄ νμ±νλ κ²½μ°μλ§ λ‘κ·Έμμ λ²νΌ νμ | |
if use_login and st.session_state.authenticated: | |
st.divider() # ꡬλΆμ μΆκ° | |
if st.button("λ‘κ·Έμμ", use_container_width=True, type="secondary"): | |
st.session_state.authenticated = False | |
st.success("β λ‘κ·Έμμ λμμ΅λλ€.") | |
st.rerun() | |
# --- κΈ°λ³Έ μΈμ μ΄κΈ°ν (μ΄κΈ°νλμ§ μμ κ²½μ°) --- | |
if not st.session_state.session_initialized: | |
st.info( | |
"MCP μλ²μ μμ΄μ νΈκ° μ΄κΈ°νλμ§ μμμ΅λλ€. μΌμͺ½ μ¬μ΄λλ°μ 'μ€μ μ μ©νκΈ°' λ²νΌμ ν΄λ¦νμ¬ μ΄κΈ°νν΄μ£ΌμΈμ." | |
) | |
# --- λν κΈ°λ‘ μΆλ ₯ --- | |
print_message() | |
# --- μ¬μ©μ μ λ ₯ λ° μ²λ¦¬ --- | |
user_query = st.chat_input("π¬ μ§λ¬Έμ μ λ ₯νμΈμ") | |
if user_query: | |
if st.session_state.session_initialized: | |
st.chat_message("user", avatar="π§βπ»").markdown(user_query) | |
with st.chat_message("assistant", avatar="π€"): | |
tool_placeholder = st.empty() | |
text_placeholder = st.empty() | |
resp, final_text, final_tool = ( | |
st.session_state.event_loop.run_until_complete( | |
process_query( | |
user_query, | |
text_placeholder, | |
tool_placeholder, | |
st.session_state.timeout_seconds, | |
) | |
) | |
) | |
if "error" in resp: | |
st.error(resp["error"]) | |
else: | |
st.session_state.history.append({"role": "user", "content": user_query}) | |
st.session_state.history.append( | |
{"role": "assistant", "content": final_text} | |
) | |
if final_tool.strip(): | |
st.session_state.history.append( | |
{"role": "assistant_tool", "content": final_tool} | |
) | |
st.rerun() | |
else: | |
st.warning( | |
"β οΈ MCP μλ²μ μμ΄μ νΈκ° μ΄κΈ°νλμ§ μμμ΅λλ€. μΌμͺ½ μ¬μ΄λλ°μ 'μ€μ μ μ©νκΈ°' λ²νΌμ ν΄λ¦νμ¬ μ΄κΈ°νν΄μ£ΌμΈμ." | |
) | |