AiCoderv2 commited on
Commit
76f7b71
·
verified ·
1 Parent(s): 686d6c4

Create app.py

Browse files
Files changed (1) hide show
  1. app.py +58 -0
app.py ADDED
@@ -0,0 +1,58 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import os
2
+ import gradio as gr
3
+ from diffusers import DiffusionPipeline
4
+ import torch
5
+
6
+ # === Configure cache directory ===
7
+ cache_dir = os.path.expanduser("~/Downloads/Openking")
8
+ os.makedirs(cache_dir, exist_ok=True)
9
+
10
+ # Set Hugging Face cache environment variables
11
+ os.environ["HF_HOME"] = cache_dir
12
+ os.environ["HF_HUB_CACHE"] = cache_dir
13
+ os.environ["HF_DATASETS_CACHE"] = cache_dir
14
+
15
+ # === Load Hugging Face token from secrets (required for private models) ===
16
+ # In Hugging Face Spaces, store your token as a secret named "HF_TOKEN"
17
+ hf_token = os.getenv("HF_TOKEN")
18
+ if not hf_token:
19
+ raise ValueError("Please set your Hugging Face token as a secret named 'HF_TOKEN' in your Space settings.")
20
+
21
+ # === Load the model ===
22
+ model_id = "Wan-AI/Wan2.1-T2V-1.3B-Diffusers"
23
+
24
+ try:
25
+ pipe = DiffusionPipeline.from_pretrained(
26
+ model_id,
27
+ use_auth_token=hf_token,
28
+ cache_dir=cache_dir,
29
+ torch_dtype=torch.float16,
30
+ variant="fp16"
31
+ )
32
+ pipe = pipe.to("cuda" if torch.cuda.is_available() else "cpu")
33
+ except Exception as e:
34
+ raise RuntimeError(f"Failed to load model: {e}")
35
+
36
+ # === Gradio interface ===
37
+ def generate_video(prompt: str, num_inference_steps: int = 50):
38
+ try:
39
+ # Note: Adjust this call based on the actual model's inference API.
40
+ # Since this is a text-to-video model, the exact method may vary.
41
+ # This is a placeholder—check the model card for correct usage.
42
+ video_frames = pipe(prompt, num_inference_steps=num_inference_steps).frames
43
+ # For now, return a placeholder message
44
+ return f"Generated video for: '{prompt}' with {num_inference_steps} steps. (Output handling depends on model output format.)"
45
+ except Exception as e:
46
+ return f"Error: {str(e)}"
47
+
48
+ with gr.Blocks() as demo:
49
+ gr.Markdown("# 🎥 Wan2.1 Text-to-Video Generator")
50
+ prompt = gr.Textbox(label="Prompt", placeholder="A cat flying through space...")
51
+ steps = gr.Slider(10, 100, value=50, label="Inference Steps")
52
+ output = gr.Textbox(label="Result")
53
+ btn = gr.Button("Generate Video")
54
+ btn.click(generate_video, inputs=[prompt, steps], outputs=output)
55
+
56
+ # Launch app
57
+ if __name__ == "__main__":
58
+ demo.launch()