LPX55 commited on
Commit
40c95dc
·
verified ·
1 Parent(s): f7dbffd

Update app.py

Browse files
Files changed (1) hide show
  1. app.py +4 -6
app.py CHANGED
@@ -44,7 +44,8 @@ feature_extractor = SiglipImageProcessor.from_pretrained("lllyasviel/flux_redux_
44
  image_encoder = SiglipVisionModel.from_pretrained("lllyasviel/flux_redux_bfl", subfolder='image_encoder', torch_dtype=torch.float16).cpu()
45
 
46
  # quant_config = DiffusersBitsAndBytesConfig(load_in_8bit=True)
47
- transformer = HunyuanVideoTransformer3DModelPacked.from_pretrained('sirolim/FramePack_F1_I2V_FP8', "FramePack_F1_I2V_HY_fp8_e4m3fn.safetensors", use_safetensors=True, torch_dtype=torch.bfloat16).cpu()
 
48
  # transformer = HunyuanVideoTransformer3DModelPacked.from_pretrained('sirolim/FramePack_F1_I2V_FP8', torch_dtype=torch.bfloat16).cpu()
49
 
50
  vae.eval()
@@ -429,11 +430,8 @@ quick_prompts = [[x] for x in quick_prompts]
429
  css = make_progress_bar_css()
430
  block = gr.Blocks(css=css).queue()
431
  with block:
432
- gr.Markdown('# FramePack-F1')
433
- gr.Markdown(f"""### Video diffusion, but feels like image diffusion
434
- *FramePack F1 - a FramePack model that only predicts future frames from history frames*
435
- ### *beta* FramePack Fill 🖋️- draw a mask over the input image to inpaint the video output
436
- adapted from the officical code repo [FramePack](https://github.com/lllyasviel/FramePack) by [lllyasviel](lllyasviel/FramePack_F1_I2V_HY_20250503) and [FramePack Studio](https://github.com/colinurbs/FramePack-Studio) 🙌🏻
437
  """)
438
  with gr.Row():
439
  with gr.Column():
 
44
  image_encoder = SiglipVisionModel.from_pretrained("lllyasviel/flux_redux_bfl", subfolder='image_encoder', torch_dtype=torch.float16).cpu()
45
 
46
  # quant_config = DiffusersBitsAndBytesConfig(load_in_8bit=True)
47
+ transformer = HunyuanVideoTransformer3DModelPacked.from_single_file("https://huggingface.co/sirolim/FramePack_F1_I2V_FP8/resolve/main/FramePack_F1_I2V_HY_fp8_e4m3fn.safetensors", torch_dtype=torch.bfloat16)
48
+ # transformer = HunyuanVideoTransformer3DModelPacked.from_single_file('sirolim/FramePack_F1_I2V_FP8', "FramePack_F1_I2V_HY_fp8_e4m3fn.safetensors", use_safetensors=True, torch_dtype=torch.bfloat16).cpu()
49
  # transformer = HunyuanVideoTransformer3DModelPacked.from_pretrained('sirolim/FramePack_F1_I2V_FP8', torch_dtype=torch.bfloat16).cpu()
50
 
51
  vae.eval()
 
430
  css = make_progress_bar_css()
431
  block = gr.Blocks(css=css).queue()
432
  with block:
433
+ gr.Markdown('# FramePack Essentials | Experimentation in Progress')
434
+ gr.Markdown(f"""### Space is constantly being tinkered with, expect downtime and errors.
 
 
 
435
  """)
436
  with gr.Row():
437
  with gr.Column():