owinymarvin commited on
Commit
028d725
·
1 Parent(s): c450b97

latest changes

Browse files
Files changed (1) hide show
  1. app.py +52 -36
app.py CHANGED
@@ -9,14 +9,16 @@ from collections import deque
9
  import base64
10
  import io
11
 
 
12
  HF_MODEL_REPO_ID = "owinymarvin/timesformer-crime-detection"
13
  MODEL_INPUT_NUM_FRAMES = 8
14
  TARGET_IMAGE_HEIGHT = 224
15
  TARGET_IMAGE_WIDTH = 224
16
  RAW_RECORDING_DURATION_SECONDS = 10.0
17
  FRAMES_TO_SAMPLE_PER_CLIP = 20
18
- DELAY_BETWEEN_PREDICTIONS_SECONDS = 120.0
19
 
 
20
  print(f"Loading model and processor from {HF_MODEL_REPO_ID}...")
21
  try:
22
  processor = AutoImageProcessor.from_pretrained(HF_MODEL_REPO_ID)
@@ -30,57 +32,57 @@ device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
30
  model.to(device)
31
  print(f"Model loaded on {device}.")
32
 
 
33
  raw_frames_buffer = deque()
34
  current_clip_start_time = time.time()
35
  last_prediction_completion_time = time.time()
36
- app_state = "recording"
37
 
 
38
  def sample_frames(frames_list, target_count):
39
  if not frames_list:
40
  return []
41
  if len(frames_list) <= target_count:
42
  return frames_list
43
  indices = np.linspace(0, len(frames_list) - 1, target_count, dtype=int)
44
- # FIX: Corrected list indexing from () to []
45
  sampled = [frames_list[int(i)] for i in indices]
46
  return sampled
47
 
 
48
  def live_predict_stream(image_np_array):
49
  global raw_frames_buffer, current_clip_start_time, last_prediction_completion_time, app_state
50
 
51
  current_time = time.time()
52
  pil_image = Image.fromarray(image_np_array)
53
 
54
- status_message = ""
55
- prediction_result = ""
56
-
57
  if app_state == "recording":
58
  raw_frames_buffer.append(pil_image)
59
  elapsed_recording_time = current_time - current_clip_start_time
60
- status_message = f"Recording: {elapsed_recording_time:.1f}/{RAW_RECORDING_DURATION_SECONDS}s. Raw frames: {len(raw_frames_buffer)}"
61
- prediction_result = "Buffering..."
 
62
  if elapsed_recording_time >= RAW_RECORDING_DURATION_SECONDS:
 
63
  app_state = "predicting"
64
- status_message = "Preparing to predict..."
65
- prediction_result = "Processing..."
66
  print("DEBUG: Transitioning to 'predicting' state.")
67
 
68
  elif app_state == "predicting":
69
- if raw_frames_buffer:
 
70
  print("DEBUG: Starting prediction.")
71
  try:
72
  sampled_raw_frames = sample_frames(list(raw_frames_buffer), FRAMES_TO_SAMPLE_PER_CLIP)
73
  frames_for_model = sample_frames(sampled_raw_frames, MODEL_INPUT_NUM_FRAMES)
74
 
75
  if len(frames_for_model) < MODEL_INPUT_NUM_FRAMES:
76
- prediction_result = "Error: Not enough frames for model."
77
- status_message = "Error during frame sampling."
78
- print(f"ERROR: Insufficient frames for model input: {len(frames_for_model)}/{MODEL_INPUT_NUM_FRAMES}")
79
- app_state = "recording" # Reset to recording state
80
  raw_frames_buffer.clear()
81
  current_clip_start_time = time.time()
82
  last_prediction_completion_time = time.time()
83
- return status_message, prediction_result
84
 
85
  processed_input = processor(images=frames_for_model, return_tensors="pt")
86
  pixel_values = processed_input.pixel_values.to(device)
@@ -96,32 +98,45 @@ def live_predict_stream(image_np_array):
96
  prediction_result = f"Predicted: {predicted_label} (Confidence: {confidence:.2f})"
97
  status_message = "Prediction complete."
98
  print(f"DEBUG: Prediction Result: {prediction_result}")
 
 
 
99
 
 
100
  raw_frames_buffer.clear()
101
  last_prediction_completion_time = current_time
102
  app_state = "processing_delay"
103
  print("DEBUG: Transitioning to 'processing_delay' state.")
104
 
105
  except Exception as e:
106
- prediction_result = f"Error during prediction: {e}"
107
- status_message = "Prediction error."
108
  print(f"ERROR during prediction: {e}")
109
- app_state = "processing_delay" # Move to delay to avoid continuous errors
110
- else:
111
- status_message = "Waiting for frames..."
112
- prediction_result = "..."
113
 
114
  elif app_state == "processing_delay":
115
  elapsed_delay = current_time - last_prediction_completion_time
116
- status_message = f"Delaying next prediction: {int(elapsed_delay)}/{int(DELAY_BETWEEN_PREDICTIONS_SECONDS)}s"
117
- if elapsed_delay >= DELAY_BETWEEN_PREDICTIONS_SECONDS:
 
 
 
 
 
118
  app_state = "recording"
119
  current_clip_start_time = current_time
120
- status_message = "Starting new recording..."
121
- prediction_result = "Ready..."
122
  print("DEBUG: Transitioning back to 'recording' state.")
123
-
124
- return status_message, prediction_result
 
 
 
 
 
 
 
125
 
126
  def reset_app_state_manual():
127
  global raw_frames_buffer, current_clip_start_time, last_prediction_completion_time, app_state
@@ -130,8 +145,10 @@ def reset_app_state_manual():
130
  last_prediction_completion_time = time.time()
131
  app_state = "recording"
132
  print("DEBUG: Manual reset triggered.")
 
133
  return "Ready to record...", "Ready for new prediction."
134
 
 
135
  with gr.Blocks() as demo:
136
  gr.Markdown(
137
  f"""
@@ -160,11 +177,15 @@ with gr.Blocks() as demo:
160
  with gr.Column():
161
  prediction_output = gr.Textbox(label="Prediction Result", value="Waiting...")
162
 
 
 
163
  webcam_input.stream(
164
  live_predict_stream,
165
  inputs=[webcam_input],
166
  outputs=[status_output, prediction_output]
167
  )
 
 
168
  reset_button.click(
169
  reset_app_state_manual,
170
  inputs=[],
@@ -177,19 +198,14 @@ with gr.Blocks() as demo:
177
  Use this API endpoint to send base64-encoded frames for prediction.
178
  """
179
  )
180
- # Re-adding a slightly more representative API interface
181
- # Gradio's automatic API documentation will use this to show inputs/outputs
182
  gr.Interface(
183
- fn=lambda frames_list: f"Received {len(frames_list)} frames. This is a dummy response. Integrate predict_from_frames_api here.",
184
  inputs=gr.Json(label="List of Base64-encoded image strings"),
185
  outputs=gr.Textbox(label="API Response"),
186
  live=False,
187
- allow_flagging="never" # For API endpoints, flagging is usually not desired
188
  )
189
- # Note: The actual `predict_from_frames_api` function is defined above,
190
- # but for a clean API tab, we can use a dummy interface here that Gradio will
191
- # use to generate the interactive API documentation. The actual API call
192
- # from your local script directly targets the /run/predict_from_frames_api endpoint.
193
 
194
 
195
  if __name__ == "__main__":
 
9
  import base64
10
  import io
11
 
12
+ # --- Configuration ---
13
  HF_MODEL_REPO_ID = "owinymarvin/timesformer-crime-detection"
14
  MODEL_INPUT_NUM_FRAMES = 8
15
  TARGET_IMAGE_HEIGHT = 224
16
  TARGET_IMAGE_WIDTH = 224
17
  RAW_RECORDING_DURATION_SECONDS = 10.0
18
  FRAMES_TO_SAMPLE_PER_CLIP = 20
19
+ DELAY_BETWEEN_PREDICTIONS_SECONDS = 120.0 # 2 minutes for CPU
20
 
21
+ # --- Load Model and Processor ---
22
  print(f"Loading model and processor from {HF_MODEL_REPO_ID}...")
23
  try:
24
  processor = AutoImageProcessor.from_pretrained(HF_MODEL_REPO_ID)
 
32
  model.to(device)
33
  print(f"Model loaded on {device}.")
34
 
35
+ # --- Global State Variables for Live Demo ---
36
  raw_frames_buffer = deque()
37
  current_clip_start_time = time.time()
38
  last_prediction_completion_time = time.time()
39
+ app_state = "recording" # States: "recording", "predicting", "processing_delay"
40
 
41
+ # --- Helper function to sample frames ---
42
  def sample_frames(frames_list, target_count):
43
  if not frames_list:
44
  return []
45
  if len(frames_list) <= target_count:
46
  return frames_list
47
  indices = np.linspace(0, len(frames_list) - 1, target_count, dtype=int)
 
48
  sampled = [frames_list[int(i)] for i in indices]
49
  return sampled
50
 
51
+ # --- Main processing function for Live Demo Stream ---
52
  def live_predict_stream(image_np_array):
53
  global raw_frames_buffer, current_clip_start_time, last_prediction_completion_time, app_state
54
 
55
  current_time = time.time()
56
  pil_image = Image.fromarray(image_np_array)
57
 
 
 
 
58
  if app_state == "recording":
59
  raw_frames_buffer.append(pil_image)
60
  elapsed_recording_time = current_time - current_clip_start_time
61
+
62
+ yield f"Recording: {elapsed_recording_time:.1f}/{RAW_RECORDING_DURATION_SECONDS}s. Raw frames: {len(raw_frames_buffer)}", "Buffering..."
63
+
64
  if elapsed_recording_time >= RAW_RECORDING_DURATION_SECONDS:
65
+ # Transition to predicting state
66
  app_state = "predicting"
67
+ yield "Preparing to predict...", "Processing..."
 
68
  print("DEBUG: Transitioning to 'predicting' state.")
69
 
70
  elif app_state == "predicting":
71
+ # Ensure this prediction block only runs once per cycle
72
+ if raw_frames_buffer: # Only proceed if there are frames to process
73
  print("DEBUG: Starting prediction.")
74
  try:
75
  sampled_raw_frames = sample_frames(list(raw_frames_buffer), FRAMES_TO_SAMPLE_PER_CLIP)
76
  frames_for_model = sample_frames(sampled_raw_frames, MODEL_INPUT_NUM_FRAMES)
77
 
78
  if len(frames_for_model) < MODEL_INPUT_NUM_FRAMES:
79
+ yield "Error during frame sampling.", f"Error: Not enough frames ({len(frames_for_model)}/{MODEL_INPUT_NUM_FRAMES}). Resetting."
80
+ print(f"ERROR: Insufficient frames for model input: {len(frames_for_model)}/{MODEL_INPUT_NUM_FRAMES}. Resetting state.")
81
+ app_state = "recording" # Reset state to start a new recording
 
82
  raw_frames_buffer.clear()
83
  current_clip_start_time = time.time()
84
  last_prediction_completion_time = time.time()
85
+ return # Exit this stream call to wait for next frame or reset
86
 
87
  processed_input = processor(images=frames_for_model, return_tensors="pt")
88
  pixel_values = processed_input.pixel_values.to(device)
 
98
  prediction_result = f"Predicted: {predicted_label} (Confidence: {confidence:.2f})"
99
  status_message = "Prediction complete."
100
  print(f"DEBUG: Prediction Result: {prediction_result}")
101
+
102
+ # Yield the prediction result immediately to ensure UI update
103
+ yield status_message, prediction_result
104
 
105
+ # Clear buffer and transition to delay AFTER yielding the prediction
106
  raw_frames_buffer.clear()
107
  last_prediction_completion_time = current_time
108
  app_state = "processing_delay"
109
  print("DEBUG: Transitioning to 'processing_delay' state.")
110
 
111
  except Exception as e:
112
+ error_message = f"Error during prediction: {e}"
 
113
  print(f"ERROR during prediction: {e}")
114
+ # Yield error to UI
115
+ yield "Prediction error.", error_message
116
+ app_state = "processing_delay" # Still go to delay state to prevent constant errors
117
+ raw_frames_buffer.clear() # Clear buffer to prevent re-processing same problematic frames
118
 
119
  elif app_state == "processing_delay":
120
  elapsed_delay = current_time - last_prediction_completion_time
121
+
122
+ if elapsed_delay < DELAY_BETWEEN_PREDICTIONS_SECONDS:
123
+ # Continue yielding the delay message and the last prediction result
124
+ # Assuming prediction_result from previous state is still held by UI
125
+ yield f"Delaying next prediction: {int(elapsed_delay)}/{int(DELAY_BETWEEN_PREDICTIONS_SECONDS)}s", gr.NO_VALUE # NO_VALUE keeps previous prediction visible
126
+ else:
127
+ # Delay is over, reset for new recording cycle
128
  app_state = "recording"
129
  current_clip_start_time = current_time
 
 
130
  print("DEBUG: Transitioning back to 'recording' state.")
131
+ yield "Starting new recording...", "Ready for new prediction."
132
+
133
+ # If for some reason nothing is yielded, return the current state to prevent UI freeze.
134
+ # This acts as a fallback if no state transition happens.
135
+ # However, with the yield statements, this might be less critical.
136
+ # For streaming, yielding is the preferred way to update.
137
+ # If the function ends without yielding, Gradio will just keep the last state.
138
+ # We always yield in every branch.
139
+ pass # No explicit return needed at the end if all paths yield
140
 
141
  def reset_app_state_manual():
142
  global raw_frames_buffer, current_clip_start_time, last_prediction_completion_time, app_state
 
145
  last_prediction_completion_time = time.time()
146
  app_state = "recording"
147
  print("DEBUG: Manual reset triggered.")
148
+ # Return initial values immediately upon reset
149
  return "Ready to record...", "Ready for new prediction."
150
 
151
+ # --- Gradio UI Layout ---
152
  with gr.Blocks() as demo:
153
  gr.Markdown(
154
  f"""
 
177
  with gr.Column():
178
  prediction_output = gr.Textbox(label="Prediction Result", value="Waiting...")
179
 
180
+ # IMPORTANT: Use webcam_input.stream() with a generator function (live_predict_stream)
181
+ # to enable progressive updates via 'yield'.
182
  webcam_input.stream(
183
  live_predict_stream,
184
  inputs=[webcam_input],
185
  outputs=[status_output, prediction_output]
186
  )
187
+
188
+ # The reset button is a regular click event, not a stream
189
  reset_button.click(
190
  reset_app_state_manual,
191
  inputs=[],
 
198
  Use this API endpoint to send base64-encoded frames for prediction.
199
  """
200
  )
201
+ # Placeholder for the API tab. The actual API calls target /run/predict_from_frames_api
 
202
  gr.Interface(
203
+ fn=lambda frames_list: "API endpoint is active for programmatic calls. See documentation in app.py.",
204
  inputs=gr.Json(label="List of Base64-encoded image strings"),
205
  outputs=gr.Textbox(label="API Response"),
206
  live=False,
207
+ allow_flagging="never"
208
  )
 
 
 
 
209
 
210
 
211
  if __name__ == "__main__":