saakshigupta commited on
Commit
a82538a
Β·
verified Β·
1 Parent(s): 5b83057

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +160 -13
app.py CHANGED
@@ -38,6 +38,42 @@ st.set_page_config(
38
  page_icon="πŸ”"
39
  )
40
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
41
  # Main title and description
42
  st.title("Deepfake Image Analyser")
43
  st.markdown("Analyse images for deepfake manipulation")
@@ -190,14 +226,29 @@ def load_detection_model_xception():
190
  """Loads the Xception model from our module"""
191
  with st.spinner("Loading Xception model for deepfake detection..."):
192
  try:
 
 
193
  model = load_xception_model()
 
194
  # Get the device
195
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
 
 
196
  model.to(device)
197
  model.eval()
 
198
  return model, device
 
 
 
 
199
  except Exception as e:
200
  st.error(f"Error loading Xception model: {str(e)}")
 
 
 
 
 
201
  return None, None
202
 
203
  # ----- BLIP Image Captioning -----
@@ -422,11 +473,19 @@ def analyze_image_with_llm(image, gradcam_overlay, face_box, pred_label, confide
422
  def preprocess_image_xception(image):
423
  """Preprocesses image for Xception model input and face detection."""
424
  try:
425
- st.write("Starting image preprocessing...")
426
  face_detector = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
 
 
 
 
 
 
 
427
  image_np = np.array(image.convert('RGB')) # Ensure RGB
428
- st.write(f"Image shape: {image_np.shape}")
429
 
 
430
  gray = cv2.cvtColor(image_np, cv2.COLOR_RGB2GRAY)
431
  faces = face_detector.detectMultiScale(gray, 1.1, 5)
432
 
@@ -434,12 +493,14 @@ def preprocess_image_xception(image):
434
  face_box_display = None # For drawing on original image
435
 
436
  if len(faces) == 0:
 
437
  st.warning("No face detected, using whole image for prediction/CAM.")
438
  else:
 
439
  areas = [w * h for (x, y, w, h) in faces]
440
  largest_idx = np.argmax(areas)
441
  x, y, w, h = faces[largest_idx]
442
- st.write(f"Face detected at: x={x}, y={y}, w={w}, h={h}")
443
 
444
  padding_x = int(w * 0.05) # Use percentages as in gradcam_xception
445
  padding_y = int(h * 0.05)
@@ -455,18 +516,29 @@ def preprocess_image_xception(image):
455
  transform = get_xception_transform()
456
  # Apply transform to the selected region (face or whole image)
457
  input_tensor = transform(face_img_for_transform).unsqueeze(0)
458
- st.write(f"Tensor shape: {input_tensor.shape}")
459
 
460
  # Return tensor, original full image, and the display face box
461
  return input_tensor, image, face_box_display
 
462
  except Exception as e:
463
  st.error(f"Error in preprocessing image: {str(e)}")
464
  import traceback
465
- st.error(traceback.format_exc())
466
- # Return defaults that won't break the pipeline
467
- transform = get_xception_transform()
468
- input_tensor = transform(image).unsqueeze(0)
469
- return input_tensor, image, None
 
 
 
 
 
 
 
 
 
 
470
 
471
  # Main app
472
  def main():
@@ -491,6 +563,54 @@ def main():
491
  if 'chat_history' not in st.session_state:
492
  st.session_state.chat_history = []
493
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
494
  # Create expanders for each stage
495
  with st.expander("Stage 1: Model Loading", expanded=True):
496
  st.write("Please load the models using the buttons below:")
@@ -558,26 +678,53 @@ def main():
558
  uploaded_file = st.file_uploader("Choose an image...", type=["jpg", "jpeg", "png"])
559
  if uploaded_file is not None:
560
  try:
561
- # Try to load directly from bytes to avoid file system issues
562
  file_bytes = uploaded_file.getvalue()
563
- uploaded_image = Image.open(io.BytesIO(file_bytes)).convert("RGB")
 
 
 
 
564
  st.session_state.upload_method = "file"
 
 
565
  except Exception as e:
566
  st.error(f"Error loading file: {str(e)}")
 
 
567
 
568
  with upload_tab2:
569
  url = st.text_input("Enter image URL:")
570
  if url and url.strip():
571
  try:
572
  import requests
573
- response = requests.get(url, stream=True)
 
 
 
 
 
 
 
 
574
  if response.status_code == 200:
575
- uploaded_image = Image.open(io.BytesIO(response.content)).convert("RGB")
 
 
 
 
 
576
  st.session_state.upload_method = "url"
 
577
  else:
578
  st.error(f"Failed to load image from URL: Status code {response.status_code}")
 
 
 
579
  except Exception as e:
580
  st.error(f"Error loading image from URL: {str(e)}")
 
 
581
 
582
  # If we have an uploaded image, process it
583
  if uploaded_image is not None:
 
38
  page_icon="πŸ”"
39
  )
40
 
41
+ # Debug logging
42
+ debug_mode = False
43
+ if "debug" not in st.session_state:
44
+ st.session_state.debug = debug_mode
45
+
46
+ # Add debug toggle in sidebar
47
+ with st.sidebar:
48
+ st.session_state.debug = st.toggle("Enable Debug Mode", value=debug_mode)
49
+
50
+ def log_debug(message):
51
+ """Helper function to log debug messages only when debug mode is enabled"""
52
+ if st.session_state.debug:
53
+ st.write(f"DEBUG: {message}")
54
+
55
+ # Function to check environment
56
+ def check_environment():
57
+ import sys
58
+ import platform
59
+
60
+ if st.session_state.debug:
61
+ st.sidebar.write("### Environment Info")
62
+ st.sidebar.write(f"Python version: {sys.version}")
63
+ st.sidebar.write(f"Platform: {platform.platform()}")
64
+ try:
65
+ import torch
66
+ st.sidebar.write(f"Torch version: {torch.__version__}")
67
+ st.sidebar.write(f"CUDA available: {torch.cuda.is_available()}")
68
+ if torch.cuda.is_available():
69
+ st.sidebar.write(f"CUDA version: {torch.version.cuda}")
70
+ st.sidebar.write(f"GPU: {torch.cuda.get_device_name(0)}")
71
+ except:
72
+ st.sidebar.write("Torch not available or error checking")
73
+
74
+ # Run environment check
75
+ check_environment()
76
+
77
  # Main title and description
78
  st.title("Deepfake Image Analyser")
79
  st.markdown("Analyse images for deepfake manipulation")
 
226
  """Loads the Xception model from our module"""
227
  with st.spinner("Loading Xception model for deepfake detection..."):
228
  try:
229
+ log_debug("Beginning Xception model loading")
230
+ from gradcam_xception import load_xception_model
231
  model = load_xception_model()
232
+
233
  # Get the device
234
  device = torch.device("cuda" if torch.cuda.is_available() else "cpu")
235
+ log_debug(f"Using device: {device}")
236
+
237
  model.to(device)
238
  model.eval()
239
+ log_debug("Xception model loaded successfully")
240
  return model, device
241
+ except ImportError as e:
242
+ st.error(f"Import Error: {str(e)}. Make sure gradcam_xception.py is present.")
243
+ log_debug("Import error with gradcam_xception.py module")
244
+ return None, None
245
  except Exception as e:
246
  st.error(f"Error loading Xception model: {str(e)}")
247
+ import traceback
248
+ error_details = traceback.format_exc()
249
+ if st.session_state.debug:
250
+ st.error(error_details)
251
+ log_debug(f"Error details: {error_details}")
252
  return None, None
253
 
254
  # ----- BLIP Image Captioning -----
 
473
  def preprocess_image_xception(image):
474
  """Preprocesses image for Xception model input and face detection."""
475
  try:
476
+ log_debug("Starting image preprocessing for Xception model")
477
  face_detector = cv2.CascadeClassifier(cv2.data.haarcascades + 'haarcascade_frontalface_default.xml')
478
+
479
+ # Ensure image is in correct format
480
+ if image is None:
481
+ log_debug("Image is None - this should never happen!")
482
+ return None, None, None
483
+
484
+ # Get image shape for logging
485
  image_np = np.array(image.convert('RGB')) # Ensure RGB
486
+ log_debug(f"Image shape: {image_np.shape}")
487
 
488
+ # Face detection with detailed logs
489
  gray = cv2.cvtColor(image_np, cv2.COLOR_RGB2GRAY)
490
  faces = face_detector.detectMultiScale(gray, 1.1, 5)
491
 
 
493
  face_box_display = None # For drawing on original image
494
 
495
  if len(faces) == 0:
496
+ log_debug("No face detected in the image, using whole image")
497
  st.warning("No face detected, using whole image for prediction/CAM.")
498
  else:
499
+ log_debug(f"Detected {len(faces)} faces in the image")
500
  areas = [w * h for (x, y, w, h) in faces]
501
  largest_idx = np.argmax(areas)
502
  x, y, w, h = faces[largest_idx]
503
+ log_debug(f"Using largest face at: x={x}, y={y}, w={w}, h={h}")
504
 
505
  padding_x = int(w * 0.05) # Use percentages as in gradcam_xception
506
  padding_y = int(h * 0.05)
 
516
  transform = get_xception_transform()
517
  # Apply transform to the selected region (face or whole image)
518
  input_tensor = transform(face_img_for_transform).unsqueeze(0)
519
+ log_debug(f"Preprocessed tensor shape: {input_tensor.shape}")
520
 
521
  # Return tensor, original full image, and the display face box
522
  return input_tensor, image, face_box_display
523
+
524
  except Exception as e:
525
  st.error(f"Error in preprocessing image: {str(e)}")
526
  import traceback
527
+ error_details = traceback.format_exc()
528
+ log_debug(f"Preprocessing error details: {error_details}")
529
+ if st.session_state.debug:
530
+ st.error(error_details)
531
+
532
+ # Try a fallback method if possible
533
+ try:
534
+ log_debug("Trying fallback preprocessing method")
535
+ transform = get_xception_transform()
536
+ input_tensor = transform(image).unsqueeze(0)
537
+ return input_tensor, image, None
538
+ except Exception as fallback_e:
539
+ log_debug(f"Fallback also failed: {str(fallback_e)}")
540
+ st.error("Both preprocessing attempts failed. Please try another image.")
541
+ return None, None, None
542
 
543
  # Main app
544
  def main():
 
563
  if 'chat_history' not in st.session_state:
564
  st.session_state.chat_history = []
565
 
566
+ # Display Hugging Face Spaces information if debug mode is on
567
+ if st.session_state.debug:
568
+ with st.expander("Hugging Face Spaces Debugging Information", expanded=True):
569
+ st.markdown("""
570
+ ### Common Issues with Hugging Face Spaces
571
+
572
+ 1. **403/404 Errors**: Often caused by permission issues when accessing files or external resources.
573
+
574
+ 2. **Memory Limits**: Free spaces have limited memory (16GB). Large models may cause OOM errors.
575
+
576
+ 3. **Disk Space**: Limited to 10GB for persistent storage.
577
+
578
+ 4. **Network Restrictions**: Some external URLs might be blocked or restricted.
579
+
580
+ ### Accessing Logs
581
+ To see detailed error logs in Hugging Face Spaces:
582
+
583
+ 1. Go to your Space dashboard
584
+ 2. Click on "Logs" in the left sidebar
585
+ 3. Check both "Build logs" and "Running logs" tabs
586
+
587
+ In the running logs, look for Python tracebacks or error messages.
588
+
589
+ ### This App's Setup
590
+ - All image processing is now done in-memory to avoid file permission issues
591
+ - Debug logging is available through this interface
592
+ - For large model loading issues, try using smaller models or increasing RAM allocation
593
+ """)
594
+
595
+ # Add a test connection button
596
+ if st.button("Test Network Connection"):
597
+ try:
598
+ import requests
599
+ test_urls = [
600
+ "https://huggingface.co/",
601
+ "https://www.google.com/",
602
+ "https://jsonplaceholder.typicode.com/todos/1"
603
+ ]
604
+
605
+ for url in test_urls:
606
+ try:
607
+ response = requests.get(url, timeout=5)
608
+ st.write(f"βœ… {url}: Status {response.status_code}")
609
+ except Exception as e:
610
+ st.write(f"❌ {url}: Error - {str(e)}")
611
+ except Exception as e:
612
+ st.error(f"Could not perform connection test: {str(e)}")
613
+
614
  # Create expanders for each stage
615
  with st.expander("Stage 1: Model Loading", expanded=True):
616
  st.write("Please load the models using the buttons below:")
 
678
  uploaded_file = st.file_uploader("Choose an image...", type=["jpg", "jpeg", "png"])
679
  if uploaded_file is not None:
680
  try:
681
+ # Direct in-memory handling - avoid writing to disk
682
  file_bytes = uploaded_file.getvalue()
683
+ # Log the file size for debugging
684
+ st.write(f"Debug: Received file of size {len(file_bytes)} bytes")
685
+ # Process directly in memory
686
+ image_stream = io.BytesIO(file_bytes)
687
+ uploaded_image = Image.open(image_stream).convert("RGB")
688
  st.session_state.upload_method = "file"
689
+ # Log success
690
+ st.success("File loaded successfully in memory")
691
  except Exception as e:
692
  st.error(f"Error loading file: {str(e)}")
693
+ import traceback
694
+ st.error(traceback.format_exc())
695
 
696
  with upload_tab2:
697
  url = st.text_input("Enter image URL:")
698
  if url and url.strip():
699
  try:
700
  import requests
701
+ # Log the URL (excluding any sensitive parts)
702
+ display_url = url.split("?")[0] if "?" in url else url
703
+ st.write(f"Debug: Attempting to fetch image from {display_url}")
704
+
705
+ headers = {
706
+ 'User-Agent': 'Mozilla/5.0 (Windows NT 10.0; Win64; x64) AppleWebKit/537.36 (KHTML, like Gecko) Chrome/91.0.4472.124 Safari/537.36'
707
+ }
708
+ response = requests.get(url, stream=True, headers=headers, timeout=10)
709
+
710
  if response.status_code == 200:
711
+ # Log success and content type
712
+ st.write(f"Debug: Received response - Content-Type: {response.headers.get('Content-Type', 'unknown')}")
713
+
714
+ # Process directly in memory
715
+ image_stream = io.BytesIO(response.content)
716
+ uploaded_image = Image.open(image_stream).convert("RGB")
717
  st.session_state.upload_method = "url"
718
+ st.success(f"Image successfully loaded from URL - Size: {len(response.content)} bytes")
719
  else:
720
  st.error(f"Failed to load image from URL: Status code {response.status_code}")
721
+ if response.status_code in [403, 401]:
722
+ st.warning("This appears to be an access permissions issue. The server is refusing to serve this image.")
723
+ st.info("Try using an image URL from a site that allows hotlinking, or upload a file directly.")
724
  except Exception as e:
725
  st.error(f"Error loading image from URL: {str(e)}")
726
+ import traceback
727
+ st.error(traceback.format_exc())
728
 
729
  # If we have an uploaded image, process it
730
  if uploaded_image is not None: