monster07 commited on
Commit
ce23e3a
Β·
verified Β·
1 Parent(s): c09345c

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +32 -30
app.py CHANGED
@@ -12,15 +12,17 @@ processor = AutoImageProcessor.from_pretrained(model_name)
12
  model = SiglipForImageClassification.from_pretrained(model_name)
13
  model.eval()
14
 
15
- # Face detector
16
  face_detector = cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_frontalface_default.xml")
17
 
18
  def analyze_deepfake(video_path):
19
  cap = cv2.VideoCapture(video_path)
20
  frame_preds = []
 
 
21
  frame_count = 0
22
- max_frames = 30 # βœ… Reduced for speed
23
- frame_skip = 5 # βœ… Process every 5th frame
24
 
25
  while True:
26
  ret, frame = cap.read()
@@ -32,60 +34,60 @@ def analyze_deepfake(video_path):
32
  continue
33
 
34
  gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
35
- faces = face_detector.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5)
36
 
37
- found = False
38
  for (x, y, w, h) in faces:
39
  face = frame[y:y+h, x:x+w]
40
  if face.size == 0:
41
  continue
42
 
43
- face_rgb = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
44
- pil_img = Image.fromarray(face_rgb)
45
- inputs = processor(images=pil_img, return_tensors="pt")
46
 
47
  with torch.no_grad():
48
  logits = model(**inputs).logits
49
- fake_prob = torch.softmax(logits, dim=-1)[0][1].item()
 
 
50
 
51
- frame_preds.append(fake_prob)
52
- found = True
53
- break
54
-
55
- if not found:
56
- frame_preds.append(0.5)
57
 
58
  frame_count += 1
59
 
60
  cap.release()
61
 
62
- if frame_preds:
63
- avg = np.mean(frame_preds)
64
- verdict = "FAKE" if avg > 0.5 else "REAL"
65
- result_text = f"βœ… FINAL RESULT: **{verdict}** (confidence: {avg:.2f})"
66
  else:
67
- result_text = "❌ No faces detected. Try a clearer video."
 
 
68
 
69
- # Generate graph directly
70
  fig, ax = plt.subplots(figsize=(6, 4))
71
- ax.hist(frame_preds, bins=10, color="orange", edgecolor="black")
72
- ax.set_title("Fake Confidence per Frame")
73
  ax.set_xlabel("Confidence (0 = Real, 1 = Fake)")
74
- ax.set_ylabel("Frame Count")
75
  ax.grid(True)
76
 
77
- return result_text, fig
78
 
79
  # Gradio UI
80
  demo = gr.Interface(
81
  fn=analyze_deepfake,
82
- inputs=gr.Video(label="πŸ“€ Upload MP4 video"),
83
  outputs=[
84
- gr.Markdown(label="🧠 Analysis Result"),
85
- gr.Plot(label="πŸ“ˆ Confidence Histogram")
86
  ],
87
- title="🎭 Deepfake Video Detection (Fast)",
88
- description="Upload a short MP4 video (under 60MB). This model will detect faces and classify each as REAL or FAKE based on frame analysis."
89
  )
90
 
91
  demo.launch()
 
12
  model = SiglipForImageClassification.from_pretrained(model_name)
13
  model.eval()
14
 
15
+ # Load face detector
16
  face_detector = cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_frontalface_default.xml")
17
 
18
  def analyze_deepfake(video_path):
19
  cap = cv2.VideoCapture(video_path)
20
  frame_preds = []
21
+ real_count = 0
22
+ fake_count = 0
23
  frame_count = 0
24
+ max_frames = 40
25
+ frame_skip = 5
26
 
27
  while True:
28
  ret, frame = cap.read()
 
34
  continue
35
 
36
  gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
37
+ faces = face_detector.detectMultiScale(gray, 1.1, 4)
38
 
 
39
  for (x, y, w, h) in faces:
40
  face = frame[y:y+h, x:x+w]
41
  if face.size == 0:
42
  continue
43
 
44
+ rgb_face = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
45
+ image = Image.fromarray(rgb_face)
46
+ inputs = processor(images=image, return_tensors="pt")
47
 
48
  with torch.no_grad():
49
  logits = model(**inputs).logits
50
+ probs = torch.softmax(logits, dim=-1)[0]
51
+ fake_prob = probs[1].item()
52
+ frame_preds.append(fake_prob)
53
 
54
+ if fake_prob > 0.6:
55
+ fake_count += 1
56
+ else:
57
+ real_count += 1
 
 
58
 
59
  frame_count += 1
60
 
61
  cap.release()
62
 
63
+ # Decision logic
64
+ if real_count + fake_count == 0:
65
+ result = "❌ No faces detected. Try a clearer video."
 
66
  else:
67
+ final_verdict = "FAKE" if fake_count > real_count else "REAL"
68
+ confidence = np.mean(frame_preds)
69
+ result = f"🎯 Result: **{final_verdict}** (Avg Confidence = {confidence:.2f}, Real = {real_count}, Fake = {fake_count})"
70
 
71
+ # Graph
72
  fig, ax = plt.subplots(figsize=(6, 4))
73
+ ax.hist(frame_preds, bins=10, color="green" if real_count > fake_count else "red", edgecolor="black")
74
+ ax.set_title("Fake Confidence per Face")
75
  ax.set_xlabel("Confidence (0 = Real, 1 = Fake)")
76
+ ax.set_ylabel("Count")
77
  ax.grid(True)
78
 
79
+ return result, fig
80
 
81
  # Gradio UI
82
  demo = gr.Interface(
83
  fn=analyze_deepfake,
84
+ inputs=gr.Video(label="πŸ“€ Upload your .mp4 video (under 100MB)"),
85
  outputs=[
86
+ gr.Markdown(label="πŸ“Š Deepfake Detection Result"),
87
+ gr.Plot(label="πŸ“ˆ Confidence Distribution")
88
  ],
89
+ title="🎭 Deepfake Video Detector (Accurate & Fast)",
90
+ description="This model detects faces in video frames and classifies each as REAL or FAKE using a fine-tuned vision transformer."
91
  )
92
 
93
  demo.launch()