rahul7star commited on
Commit
6f6c379
·
verified ·
1 Parent(s): e677a02

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +23 -23
app.py CHANGED
@@ -1,28 +1,28 @@
1
- # app.py for Hugging Face Space
 
 
 
2
 
3
- import gradio as gr
4
- from transformers import GPT2LMHeadModel, GPT2Tokenizer
5
- from huggingface_hub import login
 
 
 
6
 
7
- # Load the fine-tuned model from Hugging Face Model Hub
8
- model_name = "rahul7star/Rahul-FineTunedLLM-v03" # Replace with-- yuuour model's path on Hugging Face Hub
9
- tokenizer = GPT2Tokenizer.from_pretrained(model_name)
10
- model = GPT2LMHeadModel.from_pretrained(model_name)
11
 
12
- # Function to generate a response based on user input
13
- def generate_response(query):
14
- inputs = tokenizer(query, return_tensors="pt")
15
- outputs = model.generate(**inputs)
16
- return tokenizer.decode(outputs[0], skip_special_tokens=True)
17
 
18
- # Gradio Interface
19
- interface = gr.Interface(
20
- fn=generate_response,
21
- inputs=gr.Textbox(label="Enter a query"),
22
- outputs=gr.Textbox(label="Model Response"),
23
- title="Fine-Tuned GPT-2 Model",
24
- description="This model generates responses based on user input. It was fine-tuned with real-time web search data."
25
- )
26
 
27
- # Launch the Gradio interface
28
- interface.launch()
 
 
 
 
 
 
 
 
1
+ import torch
2
+ from diffusers.utils import export_to_video
3
+ from diffusers import AutoencoderKLWan, WanPipeline
4
+ from diffusers.schedulers.scheduling_unipc_multistep import UniPCMultistepScheduler
5
 
6
+ model_id = "Wan-AI/Wan2.1-T2V-14B-Diffusers"
7
+ vae = AutoencoderKLWan.from_pretrained(model_id, subfolder="vae", torch_dtype=torch.float32)
8
+ pipe = WanPipeline.from_pretrained(model_id, vae=vae, torch_dtype=torch.bfloat16)
9
+ flow_shift = 3.0 # 5.0 for 720P, 3.0 for 480P
10
+ pipe.scheduler = UniPCMultistepScheduler.from_config(pipe.scheduler.config, flow_shift=flow_shift)
11
+ pipe.to("cuda")
12
 
13
+ pipe.load_lora_weights("noahsolomon/wan2.1-yumemono")
 
 
 
14
 
15
+ pipe.enable_model_cpu_offload() #for low-vram environments
 
 
 
 
16
 
17
+ prompt = "Yumemono"
18
+ negative_prompt = "Bright tones, overexposed, static, blurred details, subtitles, style, works, paintings, images, static, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, misshapen limbs, fused fingers, still picture, messy background, three legs, many people in the background, walking backwards"
 
 
 
 
 
 
19
 
20
+ output = pipe(
21
+ prompt=prompt,
22
+ negative_prompt=negative_prompt,
23
+ height=480,
24
+ width=832,
25
+ num_frames=81,
26
+ guidance_scale=5.0,
27
+ ).frames[0]
28
+ export_to_video(output, "output.mp4", fps=16)