zerocool commited on
Commit
43a59d8
·
verified ·
1 Parent(s): b1d0f85

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +11 -19
app.py CHANGED
@@ -5,13 +5,12 @@ import asyncio
5
  import json
6
 
7
  # Replace with your Modal API endpoint URL
8
- MODAL_API_ENDPOINT = "https://blastingneurons--collective-hive-backend-orchestrate-hive-api.modal.run"
9
 
10
  # Helper function to format chat history for Gradio's 'messages' type
11
  def format_chat_history_for_gradio(log_entries: list[dict]) -> list[dict]:
12
  formatted_messages = []
13
  for entry in log_entries:
14
- # Default to 'System' if agent name is not found
15
  role = entry.get("agent", "System")
16
  content = entry.get("text", "")
17
  formatted_messages.append({"role": role, "content": content})
@@ -20,13 +19,11 @@ def format_chat_history_for_gradio(log_entries: list[dict]) -> list[dict]:
20
  async def call_modal_backend(problem_input: str, complexity: int):
21
  full_chat_history = []
22
 
23
- # Initialize all outputs with default values for the first yield
24
  current_status = "Connecting to Hive..."
25
  current_solution = ""
26
  current_confidence = ""
27
  current_minority_opinions = ""
28
 
29
- # First yield to clear previous state and show connecting message
30
  yield (
31
  current_status,
32
  format_chat_history_for_gradio([]),
@@ -36,17 +33,16 @@ async def call_modal_backend(problem_input: str, complexity: int):
36
  )
37
 
38
  try:
39
- async with httpx.AsyncClient(timeout=600.0) as client: # Longer timeout for the full process
40
- # Make sure to send complexity if your Modal backend expects it
41
  async with client.stream("POST", MODAL_API_ENDPOINT, json={"problem": problem_input, "complexity": complexity}) as response:
42
- response.raise_for_status() # Raise an exception for HTTP errors (4xx or 5xx)
43
 
44
  buffer = ""
45
  async for chunk in response.aiter_bytes():
46
  buffer += chunk.decode('utf-8')
47
  while "\n" in buffer:
48
  line, buffer = buffer.split("\n", 1)
49
- if not line.strip(): continue # Skip empty lines
50
  try:
51
  data = json.loads(line)
52
  event_type = data.get("event")
@@ -55,23 +51,21 @@ async def call_modal_backend(problem_input: str, complexity: int):
55
  current_status = data["data"]
56
  elif event_type == "chat_update":
57
  full_chat_history.append(data["data"])
58
- current_status = "In Progress..." # Update status to reflect ongoing discussion
59
  elif event_type == "final_solution":
60
  current_status = "Solution Complete!"
61
  current_solution = data["solution"]
62
  current_confidence = data["confidence"]
63
  current_minority_opinions = data["minority_opinions"]
64
- # Yield final state and then return to end the generator
65
  yield (
66
  current_status,
67
- format_chat_history_for_gradio(full_chat_history + [{"agent": "System", "content": "Final solution synthesized."}]),
68
  current_solution,
69
  current_confidence,
70
  current_minority_opinions
71
  )
72
- return # Done processing
73
 
74
- # Yield the current state of all outputs after processing each event
75
  yield (
76
  current_status,
77
  format_chat_history_for_gradio(full_chat_history),
@@ -82,27 +76,25 @@ async def call_modal_backend(problem_input: str, complexity: int):
82
 
83
  except json.JSONDecodeError as e:
84
  print(f"JSON Decode Error: {e} in line: {line}")
85
- # Handle incomplete JSON chunks, perhaps buffer and process when a full line is received
86
- # For robustness, you might yield an error status here too
87
  current_status = f"Error decoding: {e}"
88
  yield (current_status, format_chat_history_for_gradio(full_chat_history), current_solution, current_confidence, current_minority_opinions)
 
89
  except Exception as e:
90
  print(f"Error processing event: {e}, Data: {data}")
91
- current_status = f"Error: {e}"
92
  yield (current_status, format_chat_history_for_gradio(full_chat_history), current_solution, current_confidence, current_minority_opinions)
93
  return # Exit on critical error
94
 
95
  except httpx.HTTPStatusError as e:
96
- current_status = f"HTTP Error: {e.response.status_code} - {e.response.text}"
97
  print(current_status)
98
  except httpx.RequestError as e:
99
  current_status = f"Request Error: Could not connect to Modal backend: {e}"
100
  print(current_status)
101
  except Exception as e:
102
- current_status = f"An unexpected error occurred: {e}"
103
  print(current_status)
104
 
105
- # Final yield in case of errors or unexpected termination
106
  yield (current_status, format_chat_history_for_gradio(full_chat_history), current_solution, current_confidence, current_minority_opinions)
107
 
108
 
 
5
  import json
6
 
7
  # Replace with your Modal API endpoint URL
8
+ MODAL_API_ENDPOINT = "https://blastingneurons--collective-hive-backend-orchestrate-hive-api.modal.run"
9
 
10
  # Helper function to format chat history for Gradio's 'messages' type
11
  def format_chat_history_for_gradio(log_entries: list[dict]) -> list[dict]:
12
  formatted_messages = []
13
  for entry in log_entries:
 
14
  role = entry.get("agent", "System")
15
  content = entry.get("text", "")
16
  formatted_messages.append({"role": role, "content": content})
 
19
  async def call_modal_backend(problem_input: str, complexity: int):
20
  full_chat_history = []
21
 
 
22
  current_status = "Connecting to Hive..."
23
  current_solution = ""
24
  current_confidence = ""
25
  current_minority_opinions = ""
26
 
 
27
  yield (
28
  current_status,
29
  format_chat_history_for_gradio([]),
 
33
  )
34
 
35
  try:
36
+ async with httpx.AsyncClient(timeout=600.0) as client:
 
37
  async with client.stream("POST", MODAL_API_ENDPOINT, json={"problem": problem_input, "complexity": complexity}) as response:
38
+ response.raise_for_status()
39
 
40
  buffer = ""
41
  async for chunk in response.aiter_bytes():
42
  buffer += chunk.decode('utf-8')
43
  while "\n" in buffer:
44
  line, buffer = buffer.split("\n", 1)
45
+ if not line.strip(): continue
46
  try:
47
  data = json.loads(line)
48
  event_type = data.get("event")
 
51
  current_status = data["data"]
52
  elif event_type == "chat_update":
53
  full_chat_history.append(data["data"])
54
+ current_status = "In Progress..."
55
  elif event_type == "final_solution":
56
  current_status = "Solution Complete!"
57
  current_solution = data["solution"]
58
  current_confidence = data["confidence"]
59
  current_minority_opinions = data["minority_opinions"]
 
60
  yield (
61
  current_status,
62
+ format_chat_history_for_gradio(full_chat_history + [{"role": "System", "content": "Final solution synthesized."}]),
63
  current_solution,
64
  current_confidence,
65
  current_minority_opinions
66
  )
67
+ return
68
 
 
69
  yield (
70
  current_status,
71
  format_chat_history_for_gradio(full_chat_history),
 
76
 
77
  except json.JSONDecodeError as e:
78
  print(f"JSON Decode Error: {e} in line: {line}")
 
 
79
  current_status = f"Error decoding: {e}"
80
  yield (current_status, format_chat_history_for_gradio(full_chat_history), current_solution, current_confidence, current_minority_opinions)
81
+ # Do not return here if you want to keep trying to parse subsequent chunks
82
  except Exception as e:
83
  print(f"Error processing event: {e}, Data: {data}")
84
+ current_status = f"An internal error occurred: {e}"
85
  yield (current_status, format_chat_history_for_gradio(full_chat_history), current_solution, current_confidence, current_minority_opinions)
86
  return # Exit on critical error
87
 
88
  except httpx.HTTPStatusError as e:
89
+ current_status = f"HTTP Error from Modal backend: {e.response.status_code}"
90
  print(current_status)
91
  except httpx.RequestError as e:
92
  current_status = f"Request Error: Could not connect to Modal backend: {e}"
93
  print(current_status)
94
  except Exception as e:
95
+ current_status = f"An unexpected error occurred during API call: {e}"
96
  print(current_status)
97
 
 
98
  yield (current_status, format_chat_history_for_gradio(full_chat_history), current_solution, current_confidence, current_minority_opinions)
99
 
100