import gradio as gr
from PIL import Image
from typing import Tuple
from FlowFacade import FlowFacade
from css_style import DELTAFLOW_CSS
from prompt_examples import PROMPT_EXAMPLES
class UIManager:
def __init__(self, facade: FlowFacade):
self.facade = facade
def create_interface(self) -> gr.Blocks:
with gr.Blocks(
theme=gr.themes.Soft(),
css=DELTAFLOW_CSS,
title="VividFlow - Fast AI Image to Video"
) as interface:
# Header
gr.HTML("""
""")
with gr.Row():
# Left Panel: Input
with gr.Column(scale=1, elem_classes="input-card"):
gr.Markdown("### ๐ค Input")
image_input = gr.Image(
label="Upload Image (any type: photo, art, cartoon, etc.)",
type="pil",
elem_classes="image-upload",
height=320
)
resolution_info = gr.Markdown(
value="",
visible=False,
elem_classes="info-text"
)
prompt_input = gr.Textbox(
label="Motion Instruction",
placeholder="Describe camera movements (zoom, pan, orbit) and subject actions (head turn, hair flow, expression change). Be specific and cinematic! Example: 'Camera slowly zooms in, subject's eyes sparkle, hair flows gently in wind'",
lines=3,
max_lines=6
)
# Quick preset selector
category_dropdown = gr.Dropdown(
choices=list(PROMPT_EXAMPLES.keys()),
label="๐ก Quick Prompt Category",
value="๐ Fashion / Beauty (Facial Only)",
interactive=True
)
example_dropdown = gr.Dropdown(
choices=PROMPT_EXAMPLES["๐ Fashion / Beauty (Facial Only)"],
label="Example Prompts (click to use)",
value=None,
interactive=True
)
# Quality tips banner (blue)
gr.HTML("""
๐ก Choose the Right Prompt Category:
โข ๐ Facial Only: Safe for headshots and portraits without visible hands
โข ๐ Hands Visible Required: Only use if hands are fully visible in your image (prevents artifacts)
โข ๐ Scenery/Objects: For landscapes, products, and abstract content
""")
# Generate button with patience banner
gr.HTML("""
โฑ๏ธ Models are Initializing!
This first-time generation may take a moment while high-fidelity assets load into memory.
Grab a coffee โ, and watch the magic happen! Subsequent runs will be significantly faster.
""")
generate_btn = gr.Button(
"๐ฌ Generate Video",
variant="primary",
elem_classes="primary-button",
size="lg"
)
# Advanced settings
with gr.Accordion("โ๏ธ Advanced Settings", open=False):
duration_slider = gr.Slider(
minimum=0.5,
maximum=5.0,
step=0.5,
value=3.0,
label="Duration (seconds)",
info="3.0s = 49 frames, 5.0s = 81 frames (16fps)"
)
steps_slider = gr.Slider(
minimum=4,
maximum=12,
step=1,
value=4,
label="Inference Steps",
info="4-6 recommended โข Higher steps = longer generation time"
)
with gr.Row():
guidance_scale = gr.Slider(
minimum=0.0,
maximum=5.0,
step=0.5,
value=1.0,
label="Guidance Scale (high noise)"
)
guidance_scale_2 = gr.Slider(
minimum=0.0,
maximum=5.0,
step=0.5,
value=1.0,
label="Guidance Scale (low noise)"
)
with gr.Row():
seed_input = gr.Number(
label="Seed",
value=42,
precision=0,
minimum=0,
maximum=2147483647,
info="Use same seed for reproducible results"
)
randomize_seed = gr.Checkbox(
label="Randomize Seed",
value=True,
info="Generate different results each time"
)
enable_ai_prompt = gr.Checkbox(
label="๐ค Enable AI Prompt Expansion (Qwen2.5)",
value=False,
info="Use AI to enhance your prompt (adds ~30s)"
)
# Right Panel: Output
with gr.Column(scale=1, elem_classes="output-card"):
gr.Markdown("### ๐ฅ Output")
video_output = gr.Video(
label="Generated Video",
height=400,
autoplay=True
)
with gr.Row():
prompt_output = gr.Textbox(
label="Final Prompt Used",
lines=3,
interactive=False,
scale=3
)
seed_output = gr.Number(
label="Seed Used",
precision=0,
interactive=False,
scale=1
)
# Info section
with gr.Row():
gr.HTML("""
โน๏ธ Tips for Best Results:
โข Use example prompts: Select a category above and click an example to get started
โข Works with ANY image: Fashion portraits, anime, landscapes, products, abstract art, etc.
โข For dramatic effects: Choose prompts with words like "explosive", "dramatic", "swirls", "transforms"
โข Image quality matters: Higher resolution and clear subjects produce better results
""")
# Footer
gr.HTML("""
""")
def update_examples(category):
return gr.update(choices=PROMPT_EXAMPLES[category], value=None)
def fill_prompt(selected_example):
return selected_example if selected_example else ""
def show_resolution_info(image):
if image is None:
return gr.update(value="", visible=False)
from PIL import Image
original_w, original_h = image.size
resized_image = self.facade.video_engine.resize_image(image)
output_w, output_h = resized_image.width, resized_image.height
info = f"**๐ Resolution:** Input: {original_w}ร{original_h} โ Output: {output_w}ร{output_h}"
return gr.update(value=info, visible=True)
category_dropdown.change(fn=update_examples, inputs=[category_dropdown],
outputs=[example_dropdown])
example_dropdown.change(fn=fill_prompt, inputs=[example_dropdown],
outputs=[prompt_input])
image_input.change(fn=show_resolution_info, inputs=[image_input],
outputs=[resolution_info])
generate_btn.click(
fn=self._handle_generation,
inputs=[
image_input,
prompt_input,
duration_slider,
steps_slider,
guidance_scale,
guidance_scale_2,
seed_input,
randomize_seed,
enable_ai_prompt
],
outputs=[video_output, prompt_output, seed_output],
show_progress=True
)
return interface
def _handle_generation(self, image: Image.Image, prompt: str, duration: float,
steps: int, guidance_1: float, guidance_2: float, seed: int,
randomize: bool, enable_ai: bool,
progress=gr.Progress()) -> Tuple[str, str, int]:
try:
if image is None:
raise gr.Error("โ Please upload an image")
if not prompt or prompt.strip() == "":
raise gr.Error("โ Please provide a motion instruction")
if not self.facade.validate_image(image):
raise gr.Error("โ Image dimensions invalid (256-4096px)")
video_path, final_prompt, seed_used = self.facade.generate_video_from_image(
image=image,
user_instruction=prompt,
duration_seconds=duration,
num_inference_steps=steps,
guidance_scale=guidance_1,
guidance_scale_2=guidance_2,
seed=int(seed),
randomize_seed=randomize,
enable_prompt_expansion=enable_ai,
progress=progress
)
return video_path, final_prompt, seed_used
except gr.Error:
raise
except Exception as e:
import traceback
import os
error_msg = str(e)
if os.environ.get('DEBUG'):
print(f"\nโ UI Error: {type(e).__name__}")
print(traceback.format_exc())
if "CUDA out of memory" in error_msg or "OutOfMemoryError" in error_msg:
raise gr.Error("โ GPU memory insufficient. Try reducing duration/steps or restart.")
else:
raise gr.Error(f"โ Generation failed: {error_msg}")
def launch(self, share: bool = False, server_name: str = "0.0.0.0",
server_port: int = None, **kwargs) -> None:
interface = self.create_interface()
interface.launch(share=share, server_name=server_name,
server_port=server_port, **kwargs)