import gradio as gr from textblob import TextBlob from utils import * import tempfile from get_transcripts_with_openai import get_langchain_Document_for_rag def answer_video_question(query : str, url : str, file : bytes) -> dict: # Either `file` or `url` must be provided output_path='/tmp/video' if not os.path.exists(output_path): os.mkdir(output_path) if file is not None: with tempfile.NamedTemporaryFile(delete=False, suffix=".mp4") as temp_vid: temp_vid.write(file) temp_video_path = temp_vid.name # Output frames Documents() all_frames_data = extract_nfps_frames(temp_video_path) langchain_documents = provide_video_RAG(all_frames_data) langchain_transcripts = get_langchain_Document_for_rag(temp_video_path) os.unlink(temp_video_path) # clean up extracted file return {"status_vid_frame_from_file":all_frames_data, "langchain transcript":str(langchain_transcripts)} elif url: files_path = download_video(url) check = extract_keyframes(files_path['video_path']) return {"out_vid_path_from_url":check} else: return {"error": "Please provide a movie file or URL."} # Create the Gradio interface demo = gr.Interface( fn=answer_video_question, inputs=[ gr.Textbox(placeholder="Enter Query about the movie", label="Query"), gr.Textbox(placeholder="Paste the URL of the movie", label="Movie URL (optional)"), gr.File(label="Upload Movie File (optional)",type='binary') ], outputs=gr.JSON(), title="Text Sentiment Analysis", description="Ask a question about a movie. You can provide a movie via a URL or by uploading a file. The movie will be cached and deleted when the Space goes to sleep." ) # Launch the interface and MCP server if __name__ == "__main__": demo.launch(mcp_server=True, server_port=7777)