baggy2797 commited on
Commit
51160c7
Β·
verified Β·
1 Parent(s): abb2034

Create App.py

Browse files
Files changed (1) hide show
  1. app.py +52 -0
app.py ADDED
@@ -0,0 +1,52 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ from transformers import AutoModelForCausalLM, AutoTokenizer
2
+ import gradio as gr
3
+ import torch
4
+
5
+ # Load the model and tokenizer
6
+ model_name = "Skywork/SkyReels-V2-DF-1.3B-540P"
7
+ tokenizer = AutoTokenizer.from_pretrained(model_name)
8
+ model = AutoModelForCausalLM.from_pretrained(model_name)
9
+
10
+ # Set device
11
+ device = "cuda" if torch.cuda.is_available() else "cpu"
12
+ model = model.to(device)
13
+
14
+ def generate_video(prompt, max_length=512, temperature=0.7, top_k=50, top_p=0.95):
15
+ """
16
+ Generate video based on text prompt
17
+ """
18
+ # Tokenize input
19
+ inputs = tokenizer(prompt, return_tensors="pt").to(device)
20
+
21
+ # Generate output
22
+ with torch.no_grad():
23
+ outputs = model.generate(
24
+ **inputs,
25
+ max_length=max_length,
26
+ temperature=temperature,
27
+ top_k=top_k,
28
+ top_p=top_p,
29
+ do_sample=True
30
+ )
31
+
32
+ # Decode and return the output
33
+ generated_text = tokenizer.decode(outputs[0], skip_special_tokens=True)
34
+
35
+ # In a real implementation, you would process this into a video
36
+ # For demo purposes, we'll just return the generated text
37
+ return generated_text
38
+
39
+ # Create Gradio interface
40
+ iface = gr.Interface(
41
+ fn=generate_video,
42
+ inputs=gr.Textbox(lines=2, placeholder="Enter your video prompt here..."),
43
+ outputs="text",
44
+ title="SkyReels Video Generation",
45
+ description="Generate video content using Skywork/SkyReels-V2-DF-1.3B-540P model",
46
+ examples=[
47
+ ["A sunny day at the beach with waves crashing"],
48
+ ["A futuristic cityscape at night with flying cars"]
49
+ ]
50
+ )
51
+
52
+ iface.launch()