zerocool commited on
Commit
49fea4f
·
verified ·
1 Parent(s): aff4f1d

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +55 -36
app.py CHANGED
@@ -19,18 +19,28 @@ def format_chat_history_for_gradio(log_entries: list[dict]) -> list[dict]:
19
 
20
  async def call_modal_backend(problem_input: str, complexity: int):
21
  full_chat_history = []
22
- # Initial yield to clear previous state and show connecting message
23
- yield {
24
- "status": "Connecting to Hive...",
25
- "chat_history": [],
26
- "solution": "", "confidence": "", "minority_opinions": ""
27
- }
 
 
 
 
 
 
 
 
 
28
 
29
  try:
30
  async with httpx.AsyncClient(timeout=600.0) as client: # Longer timeout for the full process
 
31
  async with client.stream("POST", MODAL_API_ENDPOINT, json={"problem": problem_input, "complexity": complexity}) as response:
32
  response.raise_for_status() # Raise an exception for HTTP errors (4xx or 5xx)
33
- # We need to buffer chunks to ensure we parse complete JSON lines
34
  buffer = ""
35
  async for chunk in response.aiter_bytes():
36
  buffer += chunk.decode('utf-8')
@@ -42,49 +52,58 @@ async def call_modal_backend(problem_input: str, complexity: int):
42
  event_type = data.get("event")
43
 
44
  if event_type == "status_update":
45
- yield {
46
- "status": data["data"],
47
- "chat_history": format_chat_history_for_gradio(full_chat_history)
48
- }
49
  elif event_type == "chat_update":
50
  full_chat_history.append(data["data"])
51
- yield {
52
- "status": "In Progress...",
53
- "chat_history": format_chat_history_for_gradio(full_chat_history)
54
- }
55
  elif event_type == "final_solution":
56
- yield {
57
- "status": "Solution Complete!",
58
- "chat_history": format_chat_history_for_gradio(full_chat_history + [{"agent": "System", "text": "Final solution synthesized."}]),
59
- "solution": data["solution"],
60
- "confidence": data["confidence"],
61
- "minority_opinions": data["minority_opinions"]
62
- }
 
 
 
 
 
63
  return # Done processing
64
 
 
 
 
 
 
 
 
 
 
65
  except json.JSONDecodeError as e:
66
  print(f"JSON Decode Error: {e} in line: {line}")
67
- # This could happen if a partial JSON is received.
68
- # The buffering logic should help, but if it's consistently failing, check Modal's streaming output.
 
 
69
  except Exception as e:
70
  print(f"Error processing event: {e}, Data: {data}")
71
- yield {"status": f"Error: {e}", "chat_history": format_chat_history_for_gradio(full_chat_history)}
72
- return
 
73
 
74
  except httpx.HTTPStatusError as e:
75
- error_message = f"HTTP Error: {e.response.status_code} - {e.response.text}"
76
- print(error_message)
77
- yield {"status": error_message, "chat_history": format_chat_history_for_gradio(full_chat_history)}
78
  except httpx.RequestError as e:
79
- error_message = f"Request Error: Could not connect to Modal backend: {e}"
80
- print(error_message)
81
- yield {"status": error_message, "chat_history": format_chat_history_for_gradio(full_chat_history)}
82
  except Exception as e:
83
- error_message = f"An unexpected error occurred: {e}"
84
- print(error_message)
85
- yield {"status": error_message, "chat_history": format_chat_history_for_gradio(full_chat_history)}
86
 
87
- yield {"status": "Process finished unexpectedly or ended.", "chat_history": format_chat_history_for_gradio(full_chat_history)}
 
88
 
89
 
90
  with gr.Blocks() as demo:
 
19
 
20
  async def call_modal_backend(problem_input: str, complexity: int):
21
  full_chat_history = []
22
+
23
+ # Initialize all outputs with default values for the first yield
24
+ current_status = "Connecting to Hive..."
25
+ current_solution = ""
26
+ current_confidence = ""
27
+ current_minority_opinions = ""
28
+
29
+ # First yield to clear previous state and show connecting message
30
+ yield (
31
+ current_status,
32
+ format_chat_history_for_gradio([]),
33
+ current_solution,
34
+ current_confidence,
35
+ current_minority_opinions
36
+ )
37
 
38
  try:
39
  async with httpx.AsyncClient(timeout=600.0) as client: # Longer timeout for the full process
40
+ # Make sure to send complexity if your Modal backend expects it
41
  async with client.stream("POST", MODAL_API_ENDPOINT, json={"problem": problem_input, "complexity": complexity}) as response:
42
  response.raise_for_status() # Raise an exception for HTTP errors (4xx or 5xx)
43
+
44
  buffer = ""
45
  async for chunk in response.aiter_bytes():
46
  buffer += chunk.decode('utf-8')
 
52
  event_type = data.get("event")
53
 
54
  if event_type == "status_update":
55
+ current_status = data["data"]
 
 
 
56
  elif event_type == "chat_update":
57
  full_chat_history.append(data["data"])
58
+ current_status = "In Progress..." # Update status to reflect ongoing discussion
 
 
 
59
  elif event_type == "final_solution":
60
+ current_status = "Solution Complete!"
61
+ current_solution = data["solution"]
62
+ current_confidence = data["confidence"]
63
+ current_minority_opinions = data["minority_opinions"]
64
+ # Yield final state and then return to end the generator
65
+ yield (
66
+ current_status,
67
+ format_chat_history_for_gradio(full_chat_history + [{"agent": "System", "content": "Final solution synthesized."}]),
68
+ current_solution,
69
+ current_confidence,
70
+ current_minority_opinions
71
+ )
72
  return # Done processing
73
 
74
+ # Yield the current state of all outputs after processing each event
75
+ yield (
76
+ current_status,
77
+ format_chat_history_for_gradio(full_chat_history),
78
+ current_solution,
79
+ current_confidence,
80
+ current_minority_opinions
81
+ )
82
+
83
  except json.JSONDecodeError as e:
84
  print(f"JSON Decode Error: {e} in line: {line}")
85
+ # Handle incomplete JSON chunks, perhaps buffer and process when a full line is received
86
+ # For robustness, you might yield an error status here too
87
+ current_status = f"Error decoding: {e}"
88
+ yield (current_status, format_chat_history_for_gradio(full_chat_history), current_solution, current_confidence, current_minority_opinions)
89
  except Exception as e:
90
  print(f"Error processing event: {e}, Data: {data}")
91
+ current_status = f"Error: {e}"
92
+ yield (current_status, format_chat_history_for_gradio(full_chat_history), current_solution, current_confidence, current_minority_opinions)
93
+ return # Exit on critical error
94
 
95
  except httpx.HTTPStatusError as e:
96
+ current_status = f"HTTP Error: {e.response.status_code} - {e.response.text}"
97
+ print(current_status)
 
98
  except httpx.RequestError as e:
99
+ current_status = f"Request Error: Could not connect to Modal backend: {e}"
100
+ print(current_status)
 
101
  except Exception as e:
102
+ current_status = f"An unexpected error occurred: {e}"
103
+ print(current_status)
 
104
 
105
+ # Final yield in case of errors or unexpected termination
106
+ yield (current_status, format_chat_history_for_gradio(full_chat_history), current_solution, current_confidence, current_minority_opinions)
107
 
108
 
109
  with gr.Blocks() as demo: