File size: 2,532 Bytes
b832def
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
import gradio as gr
from textblob import TextBlob
import cv2
import numpy as np
from deepface import DeepFace
import tempfile
import moviepy.editor as mp

# 1. Text Sentiment Analysis
def analyze_text(text):
    blob = TextBlob(text)
    sentiment = blob.sentiment.polarity
    if sentiment > 0:
        result = "Positive Sentiment 😊"
    elif sentiment < 0:
        result = "Negative Sentiment 😢"
    else:
        result = "Neutral Sentiment 😐"
    return result

# 2. Face Emotion Detection
def analyze_face(image):
    try:
        analysis = DeepFace.analyze(image, actions=['emotion'], enforce_detection=False)
        emotion = analysis[0]['dominant_emotion']
        return f"Detected Emotion: {emotion}"
    except Exception as e:
        return f"Error: {str(e)}"

# 3. Video Emotion Detection
def analyze_video(video_file):
    temp_video_path = tempfile.NamedTemporaryFile(delete=False, suffix=".mp4").name
    with open(temp_video_path, "wb") as f:
        f.write(video_file.read())

    clip = mp.VideoFileClip(temp_video_path)
    frame = clip.get_frame(clip.duration / 2)  # take frame at middle
    frame_rgb = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)

    try:
        analysis = DeepFace.analyze(frame_rgb, actions=['emotion'], enforce_detection=False)
        emotion = analysis[0]['dominant_emotion']
        return f"Detected Emotion in Video: {emotion}"
    except Exception as e:
        return f"Error: {str(e)}"

# Gradio Interface
with gr.Blocks() as demo:
    gr.Markdown("# Emotion & Sentiment Analyzer 🎯")
    gr.Markdown("Analyze Text, Face (Image), or Video for emotions!")

    with gr.Tabs():
        with gr.TabItem("Text Sentiment"):
            text_input = gr.Textbox(label="Enter Text")
            text_output = gr.Label()
            text_button = gr.Button("Analyze Text")
            text_button.click(analyze_text, inputs=text_input, outputs=text_output)

        with gr.TabItem("Face Emotion (Image)"):
            image_input = gr.Image(type="numpy", label="Upload Face Image")
            image_output = gr.Label()
            image_button = gr.Button("Analyze Face Emotion")
            image_button.click(analyze_face, inputs=image_input, outputs=image_output)

        with gr.TabItem("Video Emotion"):
            video_input = gr.File(label="Upload Video (.mp4)")
            video_output = gr.Label()
            video_button = gr.Button("Analyze Video Emotion")
            video_button.click(analyze_video, inputs=video_input, outputs=video_output)

demo.launch()