|
|
|
import gradio as gr |
|
import httpx |
|
import asyncio |
|
import json |
|
|
|
|
|
MODAL_API_ENDPOINT = "https://blastingneurons--collective-hive-backend-orchestrate-hive-api.modal.run" |
|
|
|
|
|
def format_chat_history_for_gradio(log_entries: list[dict]) -> list[dict]: |
|
formatted_messages = [] |
|
for entry in log_entries: |
|
|
|
role = entry.get("agent", "System") |
|
content = entry.get("text", "") |
|
formatted_messages.append({"role": role, "content": content}) |
|
return formatted_messages |
|
|
|
async def call_modal_backend(problem_input: str, complexity: int): |
|
full_chat_history = [] |
|
|
|
yield { |
|
"status": "Connecting to Hive...", |
|
"chat_history": [], |
|
"solution": "", "confidence": "", "minority_opinions": "" |
|
} |
|
|
|
try: |
|
async with httpx.AsyncClient(timeout=600.0) as client: |
|
async with client.stream("POST", MODAL_API_ENDPOINT, json={"problem": problem_input, "complexity": complexity}) as response: |
|
response.raise_for_status() |
|
|
|
buffer = "" |
|
async for chunk in response.aiter_bytes(): |
|
buffer += chunk.decode('utf-8') |
|
while "\n" in buffer: |
|
line, buffer = buffer.split("\n", 1) |
|
if not line.strip(): continue |
|
try: |
|
data = json.loads(line) |
|
event_type = data.get("event") |
|
|
|
if event_type == "status_update": |
|
yield { |
|
"status": data["data"], |
|
"chat_history": format_chat_history_for_gradio(full_chat_history) |
|
} |
|
elif event_type == "chat_update": |
|
full_chat_history.append(data["data"]) |
|
yield { |
|
"status": "In Progress...", |
|
"chat_history": format_chat_history_for_gradio(full_chat_history) |
|
} |
|
elif event_type == "final_solution": |
|
yield { |
|
"status": "Solution Complete!", |
|
"chat_history": format_chat_history_for_gradio(full_chat_history + [{"agent": "System", "text": "Final solution synthesized."}]), |
|
"solution": data["solution"], |
|
"confidence": data["confidence"], |
|
"minority_opinions": data["minority_opinions"] |
|
} |
|
return |
|
|
|
except json.JSONDecodeError as e: |
|
print(f"JSON Decode Error: {e} in line: {line}") |
|
|
|
|
|
except Exception as e: |
|
print(f"Error processing event: {e}, Data: {data}") |
|
yield {"status": f"Error: {e}", "chat_history": format_chat_history_for_gradio(full_chat_history)} |
|
return |
|
|
|
except httpx.HTTPStatusError as e: |
|
error_message = f"HTTP Error: {e.response.status_code} - {e.response.text}" |
|
print(error_message) |
|
yield {"status": error_message, "chat_history": format_chat_history_for_gradio(full_chat_history)} |
|
except httpx.RequestError as e: |
|
error_message = f"Request Error: Could not connect to Modal backend: {e}" |
|
print(error_message) |
|
yield {"status": error_message, "chat_history": format_chat_history_for_gradio(full_chat_history)} |
|
except Exception as e: |
|
error_message = f"An unexpected error occurred: {e}" |
|
print(error_message) |
|
yield {"status": error_message, "chat_history": format_chat_history_for_gradio(full_chat_history)} |
|
|
|
yield {"status": "Process finished unexpectedly or ended.", "chat_history": format_chat_history_for_gradio(full_chat_history)} |
|
|
|
|
|
with gr.Blocks() as demo: |
|
gr.Markdown("# Collective Intelligence Hive") |
|
gr.Markdown("Enter a problem and watch a hive of AI agents collaborate to solve it! Powered by Modal and Nebius.") |
|
|
|
with gr.Row(): |
|
problem_input = gr.Textbox(label="Problem to Solve", lines=3, placeholder="e.g., 'Develop a marketing strategy for a new eco-friendly smart home device targeting millennials.'", scale=3) |
|
complexity_slider = gr.Slider(minimum=1, maximum=5, value=3, step=1, label="Problem Complexity", scale=1) |
|
|
|
initiate_btn = gr.Button("Initiate Hive", variant="primary") |
|
|
|
status_output = gr.Textbox(label="Hive Status", interactive=False) |
|
|
|
with gr.Row(): |
|
with gr.Column(scale=2): |
|
chat_display = gr.Chatbot( |
|
label="Agent Discussion Log", |
|
height=500, |
|
type='messages', |
|
autoscroll=True |
|
) |
|
|
|
with gr.Column(scale=1): |
|
solution_output = gr.Textbox(label="Synthesized Solution", lines=10, interactive=False) |
|
confidence_output = gr.Textbox(label="Solution Confidence", interactive=False) |
|
minority_output = gr.Textbox(label="Minority Opinions", lines=3, interactive=False) |
|
|
|
initiate_btn.click( |
|
call_modal_backend, |
|
inputs=[problem_input, complexity_slider], |
|
outputs=[ |
|
status_output, |
|
chat_display, |
|
solution_output, |
|
confidence_output, |
|
minority_output |
|
], |
|
queue=True |
|
) |
|
|
|
demo.launch() |