Spaces:
Paused
Paused
add text prompt
Browse files
app.py
CHANGED
@@ -49,7 +49,7 @@ sys.path.append(os.path.join(MV_ADAPTER_CODE_DIR, "scripts"))
|
|
49 |
HEADER = """
|
50 |
# <img src="https://compass.uol/content/dam/aircompanycompass/header/logo-desktop.png" alt="Compass.UOL">
|
51 |
|
52 |
-
# Image to 3D |
|
53 |
|
54 |
## State-of-the-art 3D Generation Using Large-Scale Rectified Flow Transformers
|
55 |
|
@@ -286,7 +286,7 @@ def image_to_3d(
|
|
286 |
|
287 |
@spaces.GPU(duration=120)
|
288 |
@torch.no_grad()
|
289 |
-
def run_texture(image: Image, mesh_path: str, seed: int, req: gr.Request):
|
290 |
height, width = 768, 768
|
291 |
# Prepare cameras
|
292 |
cameras = get_orthogonal_camera(
|
@@ -332,7 +332,7 @@ def run_texture(image: Image, mesh_path: str, seed: int, req: gr.Request):
|
|
332 |
pipe_kwargs["generator"] = torch.Generator(device=DEVICE).manual_seed(seed)
|
333 |
|
334 |
images = mv_adapter_pipe(
|
335 |
-
|
336 |
height=height,
|
337 |
width=width,
|
338 |
num_inference_steps=15,
|
@@ -386,6 +386,7 @@ with gr.Blocks(title="Nestlé | Proof of Concept") as demo:
|
|
386 |
)
|
387 |
|
388 |
with gr.Accordion("Generation Settings", open=True):
|
|
|
389 |
seed = gr.Slider(
|
390 |
label="Seed",
|
391 |
minimum=0,
|
@@ -455,7 +456,7 @@ with gr.Blocks(title="Nestlé | Proof of Concept") as demo:
|
|
455 |
|
456 |
gen_texture_button.click(
|
457 |
run_texture,
|
458 |
-
inputs=[image_prompts, model_output, seed],
|
459 |
outputs=[textured_model_output]
|
460 |
)
|
461 |
|
|
|
49 |
HEADER = """
|
50 |
# <img src="https://compass.uol/content/dam/aircompanycompass/header/logo-desktop.png" alt="Compass.UOL">
|
51 |
|
52 |
+
# Compass.UOL | Nestlé| Image to 3D | Proof of Concept
|
53 |
|
54 |
## State-of-the-art 3D Generation Using Large-Scale Rectified Flow Transformers
|
55 |
|
|
|
286 |
|
287 |
@spaces.GPU(duration=120)
|
288 |
@torch.no_grad()
|
289 |
+
def run_texture(image: Image, mesh_path: str, seed: int, text_prompt: str, req: gr.Request):
|
290 |
height, width = 768, 768
|
291 |
# Prepare cameras
|
292 |
cameras = get_orthogonal_camera(
|
|
|
332 |
pipe_kwargs["generator"] = torch.Generator(device=DEVICE).manual_seed(seed)
|
333 |
|
334 |
images = mv_adapter_pipe(
|
335 |
+
text_prompt,
|
336 |
height=height,
|
337 |
width=width,
|
338 |
num_inference_steps=15,
|
|
|
386 |
)
|
387 |
|
388 |
with gr.Accordion("Generation Settings", open=True):
|
389 |
+
text_prompt = gr.Textbox(label="Prompt", placeholder="Enter your prompt", value="high quality")
|
390 |
seed = gr.Slider(
|
391 |
label="Seed",
|
392 |
minimum=0,
|
|
|
456 |
|
457 |
gen_texture_button.click(
|
458 |
run_texture,
|
459 |
+
inputs=[image_prompts, model_output, seed, text_prompt],
|
460 |
outputs=[textured_model_output]
|
461 |
)
|
462 |
|