Spaces:
Sleeping
Sleeping
File size: 3,177 Bytes
086e367 f6ee5ca 086e367 debb82a 086e367 debb82a 086e367 f6ee5ca 086e367 debb82a f6ee5ca debb82a a540a43 debb82a f6ee5ca ce23e3a a540a43 086e367 a540a43 086e367 f6ee5ca a540a43 c09345c f6ee5ca ce23e3a f6ee5ca a540a43 debb82a f6ee5ca 086e367 f6ee5ca a540a43 debb82a f6ee5ca a540a43 f6ee5ca a540a43 61cca59 a540a43 ce23e3a f6ee5ca a540a43 086e367 a540a43 debb82a a540a43 debb82a 61cca59 a540a43 debb82a 61cca59 debb82a a540a43 debb82a a540a43 debb82a a540a43 61cca59 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 |
import gradio as gr
import cv2
import numpy as np
import torch
from transformers import AutoImageProcessor, SiglipForImageClassification
from PIL import Image
import matplotlib.pyplot as plt
# Load model and processor
model_name = "prithivMLmods/deepfake-detector-model-v1"
processor = AutoImageProcessor.from_pretrained(model_name)
model = SiglipForImageClassification.from_pretrained(model_name)
model.eval()
# Face detector
face_detector = cv2.CascadeClassifier(cv2.data.haarcascades + "haarcascade_frontalface_default.xml")
def analyze_deepfake(video_path):
if video_path is None:
return "β Please upload a valid .mp4 video.", None
cap = cv2.VideoCapture(video_path)
frame_preds = []
real_count = 0
fake_count = 0
frame_index = 0
max_frames = 20
frame_skip = 10
while True:
ret, frame = cap.read()
if not ret or frame_index >= max_frames:
break
if frame_index % frame_skip != 0:
frame_index += 1
continue
gray = cv2.cvtColor(frame, cv2.COLOR_BGR2GRAY)
faces = face_detector.detectMultiScale(gray, 1.1, 4)
if len(faces) > 0:
x, y, w, h = faces[0]
face = frame[y:y+h, x:x+w]
if face.size == 0:
continue
face = cv2.resize(face, (224, 224))
rgb = cv2.cvtColor(face, cv2.COLOR_BGR2RGB)
img = Image.fromarray(rgb)
inputs = processor(images=img, return_tensors="pt")
with torch.no_grad():
logits = model(**inputs).logits
prob = torch.softmax(logits, dim=-1)[0][1].item()
frame_preds.append(prob)
if prob > 0.6:
fake_count += 1
else:
real_count += 1
frame_index += 1
cap.release()
if not frame_preds:
return "β No faces detected. Try a clearer video.", None
avg_conf = np.mean(frame_preds)
verdict = "FAKE" if fake_count > real_count else "REAL"
result = f"""
β
**Final Result: {verdict}**
π’ Real Frames: {real_count}
π΄ Fake Frames: {fake_count}
π Avg Confidence: {avg_conf:.2f}
"""
# Draw graph
fig, ax = plt.subplots(figsize=(6, 4))
ax.hist(frame_preds, bins=10, color="red" if verdict == "FAKE" else "green", edgecolor="black")
ax.set_title("Fake Confidence per Face Frame")
ax.set_xlabel("Confidence (0 = Real, 1 = Fake)")
ax.set_ylabel("Frame Count")
ax.grid(True)
return result, fig
# Gradio UI
with gr.Blocks() as demo:
gr.Markdown("## π Fast & Accurate Deepfake Video Detector")
gr.Markdown("Upload a short `.mp4` video. The AI will detect faces, analyze them, and show if the video is REAL or FAKE with a confidence graph.")
video_input = gr.Video(label="π€ Upload your video")
result_output = gr.Markdown(label="π§ Detection Result")
graph_output = gr.Plot(label="π Confidence Histogram")
analyze_btn = gr.Button("π Analyze")
analyze_btn.click(fn=analyze_deepfake, inputs=video_input, outputs=[result_output, graph_output])
demo.queue().launch()
|