|
import gradio as gr |
|
import httpx |
|
import asyncio |
|
import json |
|
|
|
|
|
MODAL_API_ENDPOINT = "https://blastingneurons--collective-hive-backend-final-orchestra-29a41f.modal.run" |
|
|
|
|
|
def format_chat_history_for_gradio(log_entries: list[dict]) -> list[dict]: |
|
formatted_messages = [] |
|
for entry in log_entries: |
|
role = entry.get("agent", "System") |
|
content = entry.get("text", "") |
|
formatted_messages.append({"role": role, "content": content}) |
|
return formatted_messages |
|
|
|
async def call_modal_backend_sync(problem_input: str, complexity: int): |
|
|
|
yield ( |
|
"Connecting to Hive...", |
|
format_chat_history_for_gradio([]), |
|
"", "", "" |
|
) |
|
|
|
try: |
|
async with httpx.AsyncClient(timeout=600.0) as client: |
|
response = await client.post(MODAL_API_ENDPOINT, json={"problem": problem_input, "complexity": complexity}) |
|
response.raise_for_status() |
|
|
|
response_data = response.json() |
|
|
|
final_status = response_data.get("status", "Unknown Status") |
|
final_chat_history = response_data.get("chat_history", []) |
|
final_solution = response_data.get("solution", "No solution provided.") |
|
final_confidence = response_data.get("confidence", "0.0%") |
|
final_minority_opinions = response_data.get("minority_opinions", "None") |
|
|
|
yield ( |
|
final_status, |
|
format_chat_history_for_gradio(final_chat_history), |
|
final_solution, |
|
final_confidence, |
|
final_minority_opinions |
|
) |
|
return |
|
|
|
except httpx.HTTPStatusError as e: |
|
error_message = f"HTTP Error from Modal backend: {e.response.status_code} - {e.response.text}" |
|
print(error_message) |
|
yield (error_message, format_chat_history_for_gradio([]), "", "", "") |
|
except httpx.RequestError as e: |
|
error_message = f"Request Error: Could not connect to Modal backend: {e}" |
|
print(error_message) |
|
yield (error_message, format_chat_history_for_gradio([]), "", "", "") |
|
except Exception as e: |
|
error_message = f"An unexpected error occurred during API call: {e}" |
|
print(error_message) |
|
yield (error_message, format_chat_history_for_gradio([]), "", "", "") |
|
|
|
|
|
|
|
|
|
|
|
with gr.Blocks() as demo: |
|
gr.Markdown("# Collective Intelligence Hive") |
|
gr.Markdown("Enter a problem and watch a hive of AI agents collaborate to solve it! Powered by Modal and Nebius.") |
|
|
|
with gr.Row(): |
|
problem_input = gr.Textbox(label="Problem to Solve", lines=3, placeholder="e.g., 'Develop a marketing strategy for a new eco-friendly smart home device targeting millennials.'", scale=3) |
|
complexity_slider = gr.Slider(minimum=1, maximum=5, value=3, step=1, label="Problem Complexity", scale=1) |
|
|
|
initiate_btn = gr.Button("Initiate Hive", variant="primary") |
|
|
|
status_output = gr.Textbox(label="Hive Status", interactive=False) |
|
|
|
with gr.Row(): |
|
with gr.Column(scale=2): |
|
chat_display = gr.Chatbot( |
|
label="Agent Discussion Log", |
|
height=500, |
|
type='messages', |
|
autoscroll=True |
|
) |
|
|
|
with gr.Column(scale=1): |
|
solution_output = gr.Textbox(label="Synthesized Solution", lines=10, interactive=False) |
|
confidence_output = gr.Textbox(label="Solution Confidence", interactive=False) |
|
minority_output = gr.Textbox(label="Minority Opinions", lines=3, interactive=False) |
|
|
|
initiate_btn.click( |
|
call_modal_backend_sync, |
|
inputs=[problem_input, complexity_slider], |
|
outputs=[ |
|
status_output, |
|
chat_display, |
|
solution_output, |
|
confidence_output, |
|
minority_output |
|
], |
|
queue=True |
|
) |
|
|
|
demo.launch() |