Spaces:
Sleeping
Sleeping
File size: 2,800 Bytes
086e367 f6ee5ca 086e367 f6ee5ca 086e367 f6ee5ca 086e367 f6ee5ca 086e367 f6ee5ca 086e367 f6ee5ca 086e367 f6ee5ca 086e367 f6ee5ca 086e367 f6ee5ca 086e367 f6ee5ca 086e367 f6ee5ca 086e367 f6ee5ca 086e367 f6ee5ca 086e367 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 |
import gradio as gr
import cv2
import numpy as np
import torch
from transformers import AutoImageProcessor, SiglipForImageClassification
from PIL import Image
import matplotlib.pyplot as plt
import tempfile
import os
# β
Load model once
model_name = "prithivMLmods/deepfake-detector-model-v1"
processor = AutoImageProcessor.from_pretrained(model_name)
model = SiglipForImageClassification.from_pretrained(model_name)
model.eval()
# β
Load OpenCV Haar face detector
face_detector = cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_frontalface_default.xml")
def analyze_deepfake(video_path):
cap = cv2.VideoCapture(video_path)
frame_preds = []
frame_count = 0
max_frames = 60
while True:
ret, frame = cap.read()
if not ret or frame_count >= max_frames:
break
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_detector.detectMultiScale(gray, scaleFactor=1.1, minNeighbors=5)
found = False
for (x, y, w, h) in faces:
face = frame[y:y+h, x:x+w]
if face.size == 0:
continue
face_rgb = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
inputs = processor(images=Image.fromarray(face_rgb), return_tensors="pt")
with torch.no_grad():
logits = model(**inputs).logits
fake_prob = torch.softmax(logits, dim=-1)[0][1].item()
frame_preds.append(fake_prob)
found = True
break
if not found:
frame_preds.append(0.5)
frame_count += 1
cap.release()
# Final Result
if frame_preds:
avg = np.mean(frame_preds)
verdict = "FAKE" if avg > 0.5 else "REAL"
result_text = f"β
FINAL RESULT: **{verdict}** (confidence: {avg:.2f})"
else:
result_text = "β No faces detected. Please try another video."
# Plot histogram
fig, ax = plt.subplots(figsize=(6, 4))
ax.hist(frame_preds, bins=10, color="orange", edgecolor="black")
ax.set_title("Fake Confidence per Frame")
ax.set_xlabel("Confidence (0=Real, 1=Fake)")
ax.set_ylabel("Frame Count")
ax.grid(True)
# Save plot to temp file
plot_path = os.path.join(tempfile.gettempdir(), "plot.png")
plt.savefig(plot_path)
plt.close(fig)
return result_text, plot_path
# β
Gradio Interface
demo = gr.Interface(
fn=analyze_deepfake,
inputs=gr.Video(label="π€ Upload a video (MP4 only)"),
outputs=[gr.Markdown(label="π Result"), gr.Image(type="filepath", label="π Confidence Histogram")],
title="π Deepfake Video Detection App",
description="Upload a video. The model will detect faces and determine if it's REAL or FAKE using frame-level deepfake classification.",
)
demo.launch()
|