Spaces:
Build error
Build error
Update app.py
Browse files
app.py
CHANGED
|
@@ -7,17 +7,14 @@ import sys
|
|
| 7 |
import json
|
| 8 |
import warnings
|
| 9 |
from datetime import datetime
|
| 10 |
-
|
| 11 |
import gradio as gr
|
| 12 |
warnings.filterwarnings('ignore')
|
| 13 |
-
|
| 14 |
import random
|
| 15 |
-
|
| 16 |
import torch
|
| 17 |
import torch.distributed as dist
|
| 18 |
from PIL import Image
|
| 19 |
import subprocess
|
| 20 |
-
|
| 21 |
import wan
|
| 22 |
from wan.configs import SIZE_CONFIGS, SUPPORTED_SIZES, WAN_CONFIGS
|
| 23 |
from wan.utils.utils import cache_image, cache_video, str2bool
|
|
@@ -25,7 +22,6 @@ from wan.utils.multitalk_utils import save_video_ffmpeg
|
|
| 25 |
from kokoro import KPipeline
|
| 26 |
from transformers import Wav2Vec2FeatureExtractor
|
| 27 |
from src.audio_analysis.wav2vec2 import Wav2Vec2Model
|
| 28 |
-
|
| 29 |
import librosa
|
| 30 |
import pyloudnorm as pyln
|
| 31 |
import numpy as np
|
|
@@ -33,10 +29,583 @@ from einops import rearrange
|
|
| 33 |
import soundfile as sf
|
| 34 |
import re
|
| 35 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 36 |
def _validate_args(args):
|
| 37 |
# Basic check
|
| 38 |
assert args.ckpt_dir is not None, "Please specify the checkpoint directory."
|
| 39 |
assert args.task in WAN_CONFIGS, f"Unsupport task: {args.task}"
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 40 |
|
| 41 |
# The default sampling steps are 40 for image-to-video tasks and 50 for text-to-video tasks.
|
| 42 |
if args.sample_steps is None:
|
|
|
|
| 7 |
import json
|
| 8 |
import warnings
|
| 9 |
from datetime import datetime
|
|
|
|
| 10 |
import gradio as gr
|
| 11 |
warnings.filterwarnings('ignore')
|
|
|
|
| 12 |
import random
|
|
|
|
| 13 |
import torch
|
| 14 |
import torch.distributed as dist
|
| 15 |
from PIL import Image
|
| 16 |
import subprocess
|
| 17 |
+
import spaces # Add ZeroGPU import
|
| 18 |
import wan
|
| 19 |
from wan.configs import SIZE_CONFIGS, SUPPORTED_SIZES, WAN_CONFIGS
|
| 20 |
from wan.utils.utils import cache_image, cache_video, str2bool
|
|
|
|
| 22 |
from kokoro import KPipeline
|
| 23 |
from transformers import Wav2Vec2FeatureExtractor
|
| 24 |
from src.audio_analysis.wav2vec2 import Wav2Vec2Model
|
|
|
|
| 25 |
import librosa
|
| 26 |
import pyloudnorm as pyln
|
| 27 |
import numpy as np
|
|
|
|
| 29 |
import soundfile as sf
|
| 30 |
import re
|
| 31 |
|
| 32 |
+
# Global variables for model management
|
| 33 |
+
wav2vec_feature_extractor = None
|
| 34 |
+
audio_encoder = None
|
| 35 |
+
wan_i2v = None
|
| 36 |
+
|
| 37 |
def _validate_args(args):
|
| 38 |
# Basic check
|
| 39 |
assert args.ckpt_dir is not None, "Please specify the checkpoint directory."
|
| 40 |
assert args.task in WAN_CONFIGS, f"Unsupport task: {args.task}"
|
| 41 |
+
|
| 42 |
+
# The default sampling steps are 40 for image-to-video tasks and 50 for text-to-video tasks.
|
| 43 |
+
if args.sample_steps is None:
|
| 44 |
+
args.sample_steps = 40
|
| 45 |
+
|
| 46 |
+
if args.sample_shift is None:
|
| 47 |
+
if args.size == 'infinitetalk-480':
|
| 48 |
+
args.sample_shift = 7
|
| 49 |
+
elif args.size == 'infinitetalk-720':
|
| 50 |
+
args.sample_shift = 11
|
| 51 |
+
else:
|
| 52 |
+
raise NotImplementedError(f'Not supported size')
|
| 53 |
+
|
| 54 |
+
args.base_seed = args.base_seed if args.base_seed >= 0 else random.randint(0, 99999999)
|
| 55 |
+
|
| 56 |
+
# Size check
|
| 57 |
+
assert args.size in SUPPORTED_SIZES[args.task], f"Unsupport size {args.size} for task {args.task}, supported sizes are: {', '.join(SUPPORTED_SIZES[args.task])}"
|
| 58 |
+
|
| 59 |
+
def get_default_args():
|
| 60 |
+
"""Get default arguments for ZeroGPU environment"""
|
| 61 |
+
class Args:
|
| 62 |
+
def __init__(self):
|
| 63 |
+
self.task = "infinitetalk-14B"
|
| 64 |
+
self.size = "infinitetalk-480"
|
| 65 |
+
self.frame_num = 81
|
| 66 |
+
self.ckpt_dir = './weights/Wan2.1-I2V-14B-480P'
|
| 67 |
+
self.quant_dir = None
|
| 68 |
+
self.infinitetalk_dir = 'weights/InfiniteTalk/single/infinitetalk.safetensors'
|
| 69 |
+
self.wav2vec_dir = './weights/chinese-wav2vec2-base'
|
| 70 |
+
self.dit_path = None
|
| 71 |
+
self.lora_dir = None
|
| 72 |
+
self.lora_scale = [1.2]
|
| 73 |
+
self.offload_model = True # Always offload for ZeroGPU
|
| 74 |
+
self.ulysses_size = 1
|
| 75 |
+
self.ring_size = 1
|
| 76 |
+
self.t5_fsdp = False
|
| 77 |
+
self.t5_cpu = True # Use CPU for T5 to save GPU memory
|
| 78 |
+
self.dit_fsdp = False
|
| 79 |
+
self.save_file = None
|
| 80 |
+
self.audio_save_dir = 'save_audio/gradio'
|
| 81 |
+
self.base_seed = 42
|
| 82 |
+
self.input_json = 'examples.json'
|
| 83 |
+
self.motion_frame = 9
|
| 84 |
+
self.mode = "streaming"
|
| 85 |
+
self.sample_steps = 8 # Reduced for faster inference
|
| 86 |
+
self.sample_shift = 7
|
| 87 |
+
self.sample_text_guide_scale = 5.0
|
| 88 |
+
self.sample_audio_guide_scale = 4.0
|
| 89 |
+
self.num_persistent_param_in_dit = None
|
| 90 |
+
self.use_teacache = False
|
| 91 |
+
self.teacache_thresh = 0.2
|
| 92 |
+
self.use_apg = False
|
| 93 |
+
self.apg_momentum = -0.75
|
| 94 |
+
self.apg_norm_threshold = 55
|
| 95 |
+
self.color_correction_strength = 1.0
|
| 96 |
+
self.quant = None
|
| 97 |
+
|
| 98 |
+
args = Args()
|
| 99 |
+
_validate_args(args)
|
| 100 |
+
return args
|
| 101 |
+
|
| 102 |
+
@spaces.GPU(duration=120) # Allocate GPU for 2 minutes
|
| 103 |
+
def initialize_models():
|
| 104 |
+
"""Initialize models on GPU"""
|
| 105 |
+
global wav2vec_feature_extractor, audio_encoder, wan_i2v
|
| 106 |
+
|
| 107 |
+
args = get_default_args()
|
| 108 |
+
|
| 109 |
+
if wav2vec_feature_extractor is None or audio_encoder is None:
|
| 110 |
+
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
| 111 |
+
|
| 112 |
+
# Initialize audio models
|
| 113 |
+
audio_encoder = Wav2Vec2Model.from_pretrained(args.wav2vec_dir, local_files_only=True).to(device)
|
| 114 |
+
audio_encoder.feature_extractor._freeze_parameters()
|
| 115 |
+
wav2vec_feature_extractor = Wav2Vec2FeatureExtractor.from_pretrained(args.wav2vec_dir, local_files_only=True)
|
| 116 |
+
|
| 117 |
+
os.makedirs(args.audio_save_dir, exist_ok=True)
|
| 118 |
+
|
| 119 |
+
# Initialize WAN pipeline
|
| 120 |
+
cfg = WAN_CONFIGS[args.task]
|
| 121 |
+
wan_i2v = wan.InfiniteTalkPipeline(
|
| 122 |
+
config=cfg,
|
| 123 |
+
checkpoint_dir=args.ckpt_dir,
|
| 124 |
+
quant_dir=args.quant_dir,
|
| 125 |
+
device_id=0,
|
| 126 |
+
rank=0,
|
| 127 |
+
t5_fsdp=args.t5_fsdp,
|
| 128 |
+
dit_fsdp=args.dit_fsdp,
|
| 129 |
+
use_usp=False,
|
| 130 |
+
t5_cpu=args.t5_cpu,
|
| 131 |
+
lora_dir=args.lora_dir,
|
| 132 |
+
lora_scales=args.lora_scale,
|
| 133 |
+
quant=args.quant,
|
| 134 |
+
dit_path=args.dit_path,
|
| 135 |
+
infinitetalk_dir=args.infinitetalk_dir
|
| 136 |
+
)
|
| 137 |
+
|
| 138 |
+
if args.num_persistent_param_in_dit is not None:
|
| 139 |
+
wan_i2v.vram_management = True
|
| 140 |
+
wan_i2v.enable_vram_management(
|
| 141 |
+
num_persistent_param_in_dit=args.num_persistent_param_in_dit
|
| 142 |
+
)
|
| 143 |
+
|
| 144 |
+
return "Models initialized successfully!"
|
| 145 |
+
|
| 146 |
+
def loudness_norm(audio_array, sr=16000, lufs=-23):
|
| 147 |
+
meter = pyln.Meter(sr)
|
| 148 |
+
loudness = meter.integrated_loudness(audio_array)
|
| 149 |
+
if abs(loudness) > 100:
|
| 150 |
+
return audio_array
|
| 151 |
+
normalized_audio = pyln.normalize.loudness(audio_array, loudness, lufs)
|
| 152 |
+
return normalized_audio
|
| 153 |
+
|
| 154 |
+
def audio_prepare_multi(left_path, right_path, audio_type, sample_rate=16000):
|
| 155 |
+
if not (left_path=='None' or right_path=='None'):
|
| 156 |
+
human_speech_array1 = audio_prepare_single(left_path)
|
| 157 |
+
human_speech_array2 = audio_prepare_single(right_path)
|
| 158 |
+
elif left_path=='None':
|
| 159 |
+
human_speech_array2 = audio_prepare_single(right_path)
|
| 160 |
+
human_speech_array1 = np.zeros(human_speech_array2.shape[0])
|
| 161 |
+
elif right_path=='None':
|
| 162 |
+
human_speech_array1 = audio_prepare_single(left_path)
|
| 163 |
+
human_speech_array2 = np.zeros(human_speech_array1.shape[0])
|
| 164 |
+
|
| 165 |
+
if audio_type=='para':
|
| 166 |
+
new_human_speech1 = human_speech_array1
|
| 167 |
+
new_human_speech2 = human_speech_array2
|
| 168 |
+
elif audio_type=='add':
|
| 169 |
+
new_human_speech1 = np.concatenate([human_speech_array1[: human_speech_array1.shape[0]], np.zeros(human_speech_array2.shape[0])])
|
| 170 |
+
new_human_speech2 = np.concatenate([np.zeros(human_speech_array1.shape[0]), human_speech_array2[:human_speech_array2.shape[0]]])
|
| 171 |
+
|
| 172 |
+
sum_human_speechs = new_human_speech1 + new_human_speech2
|
| 173 |
+
return new_human_speech1, new_human_speech2, sum_human_speechs
|
| 174 |
+
|
| 175 |
+
def get_embedding(speech_array, wav2vec_feature_extractor, audio_encoder, sr=16000, device='cuda'):
|
| 176 |
+
audio_duration = len(speech_array) / sr
|
| 177 |
+
video_length = audio_duration * 25 # Assume the video fps is 25
|
| 178 |
+
|
| 179 |
+
# wav2vec_feature_extractor
|
| 180 |
+
audio_feature = np.squeeze(
|
| 181 |
+
wav2vec_feature_extractor(speech_array, sampling_rate=sr).input_values
|
| 182 |
+
)
|
| 183 |
+
audio_feature = torch.from_numpy(audio_feature).float().to(device=device)
|
| 184 |
+
audio_feature = audio_feature.unsqueeze(0)
|
| 185 |
+
|
| 186 |
+
# audio encoder
|
| 187 |
+
with torch.no_grad():
|
| 188 |
+
embeddings = audio_encoder(audio_feature, seq_len=int(video_length), output_hidden_states=True)
|
| 189 |
+
|
| 190 |
+
if len(embeddings) == 0:
|
| 191 |
+
print("Fail to extract audio embedding")
|
| 192 |
+
return None
|
| 193 |
+
|
| 194 |
+
audio_emb = torch.stack(embeddings.hidden_states[1:], dim=1).squeeze(0)
|
| 195 |
+
audio_emb = rearrange(audio_emb, "b s d -> s b d")
|
| 196 |
+
audio_emb = audio_emb.cpu().detach()
|
| 197 |
+
return audio_emb
|
| 198 |
+
|
| 199 |
+
def extract_audio_from_video(filename, sample_rate):
|
| 200 |
+
raw_audio_path = filename.split('/')[-1].split('.')[0]+'.wav'
|
| 201 |
+
ffmpeg_command = [
|
| 202 |
+
"ffmpeg",
|
| 203 |
+
"-y",
|
| 204 |
+
"-i",
|
| 205 |
+
str(filename),
|
| 206 |
+
"-vn",
|
| 207 |
+
"-acodec",
|
| 208 |
+
"pcm_s16le",
|
| 209 |
+
"-ar",
|
| 210 |
+
"16000",
|
| 211 |
+
"-ac",
|
| 212 |
+
"2",
|
| 213 |
+
str(raw_audio_path),
|
| 214 |
+
]
|
| 215 |
+
subprocess.run(ffmpeg_command, check=True)
|
| 216 |
+
human_speech_array, sr = librosa.load(raw_audio_path, sr=sample_rate)
|
| 217 |
+
human_speech_array = loudness_norm(human_speech_array, sr)
|
| 218 |
+
os.remove(raw_audio_path)
|
| 219 |
+
return human_speech_array
|
| 220 |
+
|
| 221 |
+
def audio_prepare_single(audio_path, sample_rate=16000):
|
| 222 |
+
ext = os.path.splitext(audio_path)[1].lower()
|
| 223 |
+
if ext in ['.mp4', '.mov', '.avi', '.mkv']:
|
| 224 |
+
human_speech_array = extract_audio_from_video(audio_path, sample_rate)
|
| 225 |
+
return human_speech_array
|
| 226 |
+
else:
|
| 227 |
+
human_speech_array, sr = librosa.load(audio_path, sr=sample_rate)
|
| 228 |
+
human_speech_array = loudness_norm(human_speech_array, sr)
|
| 229 |
+
return human_speech_array
|
| 230 |
+
|
| 231 |
+
def process_tts_single(text, save_dir, voice1):
|
| 232 |
+
s1_sentences = []
|
| 233 |
+
pipeline = KPipeline(lang_code='a', repo_id='weights/Kokoro-82M')
|
| 234 |
+
voice_tensor = torch.load(voice1, weights_only=True)
|
| 235 |
+
generator = pipeline(
|
| 236 |
+
text, voice=voice_tensor,
|
| 237 |
+
speed=1, split_pattern=r'\n+'
|
| 238 |
+
)
|
| 239 |
+
audios = []
|
| 240 |
+
for i, (gs, ps, audio) in enumerate(generator):
|
| 241 |
+
audios.append(audio)
|
| 242 |
+
audios = torch.concat(audios, dim=0)
|
| 243 |
+
s1_sentences.append(audios)
|
| 244 |
+
s1_sentences = torch.concat(s1_sentences, dim=0)
|
| 245 |
+
save_path1 =f'{save_dir}/s1.wav'
|
| 246 |
+
sf.write(save_path1, s1_sentences, 24000)
|
| 247 |
+
s1, _ = librosa.load(save_path1, sr=16000)
|
| 248 |
+
return s1, save_path1
|
| 249 |
+
|
| 250 |
+
def process_tts_multi(text, save_dir, voice1, voice2):
|
| 251 |
+
pattern = r'\(s(\d+)\)\s*(.*?)(?=\s*\(s\d+\)|$)'
|
| 252 |
+
matches = re.findall(pattern, text, re.DOTALL)
|
| 253 |
+
s1_sentences = []
|
| 254 |
+
s2_sentences = []
|
| 255 |
+
pipeline = KPipeline(lang_code='a', repo_id='weights/Kokoro-82M')
|
| 256 |
+
|
| 257 |
+
for idx, (speaker, content) in enumerate(matches):
|
| 258 |
+
if speaker == '1':
|
| 259 |
+
voice_tensor = torch.load(voice1, weights_only=True)
|
| 260 |
+
generator = pipeline(
|
| 261 |
+
content, voice=voice_tensor,
|
| 262 |
+
speed=1, split_pattern=r'\n+'
|
| 263 |
+
)
|
| 264 |
+
audios = []
|
| 265 |
+
for i, (gs, ps, audio) in enumerate(generator):
|
| 266 |
+
audios.append(audio)
|
| 267 |
+
audios = torch.concat(audios, dim=0)
|
| 268 |
+
s1_sentences.append(audios)
|
| 269 |
+
s2_sentences.append(torch.zeros_like(audios))
|
| 270 |
+
elif speaker == '2':
|
| 271 |
+
voice_tensor = torch.load(voice2, weights_only=True)
|
| 272 |
+
generator = pipeline(
|
| 273 |
+
content, voice=voice_tensor,
|
| 274 |
+
speed=1, split_pattern=r'\n+'
|
| 275 |
+
)
|
| 276 |
+
audios = []
|
| 277 |
+
for i, (gs, ps, audio) in enumerate(generator):
|
| 278 |
+
audios.append(audio)
|
| 279 |
+
audios = torch.concat(audios, dim=0)
|
| 280 |
+
s2_sentences.append(audios)
|
| 281 |
+
s1_sentences.append(torch.zeros_like(audios))
|
| 282 |
+
|
| 283 |
+
s1_sentences = torch.concat(s1_sentences, dim=0)
|
| 284 |
+
s2_sentences = torch.concat(s2_sentences, dim=0)
|
| 285 |
+
sum_sentences = s1_sentences + s2_sentences
|
| 286 |
+
|
| 287 |
+
save_path1 =f'{save_dir}/s1.wav'
|
| 288 |
+
save_path2 =f'{save_dir}/s2.wav'
|
| 289 |
+
save_path_sum = f'{save_dir}/sum.wav'
|
| 290 |
+
|
| 291 |
+
sf.write(save_path1, s1_sentences, 24000)
|
| 292 |
+
sf.write(save_path2, s2_sentences, 24000)
|
| 293 |
+
sf.write(save_path_sum, sum_sentences, 24000)
|
| 294 |
+
|
| 295 |
+
s1, _ = librosa.load(save_path1, sr=16000)
|
| 296 |
+
s2, _ = librosa.load(save_path2, sr=16000)
|
| 297 |
+
|
| 298 |
+
return s1, s2, save_path_sum
|
| 299 |
+
|
| 300 |
+
@spaces.GPU(duration=300) # Allocate GPU for 5 minutes for video generation
|
| 301 |
+
def generate_video(img2vid_image, vid2vid_vid, task_mode, img2vid_prompt, n_prompt, img2vid_audio_1, img2vid_audio_2,
|
| 302 |
+
sd_steps, seed, text_guide_scale, audio_guide_scale, mode_selector, tts_text, resolution_select, human1_voice, human2_voice):
|
| 303 |
+
|
| 304 |
+
global wav2vec_feature_extractor, audio_encoder, wan_i2v
|
| 305 |
+
args = get_default_args()
|
| 306 |
+
|
| 307 |
+
# Ensure models are initialized
|
| 308 |
+
if wan_i2v is None:
|
| 309 |
+
initialize_models()
|
| 310 |
+
|
| 311 |
+
# Move models to GPU if needed
|
| 312 |
+
device = torch.device('cuda' if torch.cuda.is_available() else 'cpu')
|
| 313 |
+
if audio_encoder is not None:
|
| 314 |
+
audio_encoder = audio_encoder.to(device)
|
| 315 |
+
|
| 316 |
+
input_data = {}
|
| 317 |
+
input_data["prompt"] = img2vid_prompt
|
| 318 |
+
|
| 319 |
+
if task_mode=='VideoDubbing':
|
| 320 |
+
input_data["cond_video"] = vid2vid_vid
|
| 321 |
+
else:
|
| 322 |
+
input_data["cond_video"] = img2vid_image
|
| 323 |
+
|
| 324 |
+
person = {}
|
| 325 |
+
if mode_selector == "Single Person(Local File)":
|
| 326 |
+
person['person1'] = img2vid_audio_1
|
| 327 |
+
elif mode_selector == "Single Person(TTS)":
|
| 328 |
+
tts_audio = {}
|
| 329 |
+
tts_audio['text'] = tts_text
|
| 330 |
+
tts_audio['human1_voice'] = human1_voice
|
| 331 |
+
input_data["tts_audio"] = tts_audio
|
| 332 |
+
elif mode_selector == "Multi Person(Local File, audio add)":
|
| 333 |
+
person['person1'] = img2vid_audio_1
|
| 334 |
+
person['person2'] = img2vid_audio_2
|
| 335 |
+
input_data["audio_type"] = 'add'
|
| 336 |
+
elif mode_selector == "Multi Person(Local File, audio parallel)":
|
| 337 |
+
person['person1'] = img2vid_audio_1
|
| 338 |
+
person['person2'] = img2vid_audio_2
|
| 339 |
+
input_data["audio_type"] = 'para'
|
| 340 |
+
else:
|
| 341 |
+
tts_audio = {}
|
| 342 |
+
tts_audio['text'] = tts_text
|
| 343 |
+
tts_audio['human1_voice'] = human1_voice
|
| 344 |
+
tts_audio['human2_voice'] = human2_voice
|
| 345 |
+
input_data["tts_audio"] = tts_audio
|
| 346 |
+
|
| 347 |
+
input_data["cond_audio"] = person
|
| 348 |
+
|
| 349 |
+
# Process audio
|
| 350 |
+
if 'Local File' in mode_selector:
|
| 351 |
+
if len(input_data['cond_audio'])==2:
|
| 352 |
+
new_human_speech1, new_human_speech2, sum_human_speechs = audio_prepare_multi(input_data['cond_audio']['person1'], input_data['cond_audio']['person2'], input_data['audio_type'])
|
| 353 |
+
audio_embedding_1 = get_embedding(new_human_speech1, wav2vec_feature_extractor, audio_encoder, device=device)
|
| 354 |
+
audio_embedding_2 = get_embedding(new_human_speech2, wav2vec_feature_extractor, audio_encoder, device=device)
|
| 355 |
+
emb1_path = os.path.join(args.audio_save_dir, '1.pt')
|
| 356 |
+
emb2_path = os.path.join(args.audio_save_dir, '2.pt')
|
| 357 |
+
sum_audio = os.path.join(args.audio_save_dir, 'sum.wav')
|
| 358 |
+
sf.write(sum_audio, sum_human_speechs, 16000)
|
| 359 |
+
torch.save(audio_embedding_1, emb1_path)
|
| 360 |
+
torch.save(audio_embedding_2, emb2_path)
|
| 361 |
+
input_data['cond_audio']['person1'] = emb1_path
|
| 362 |
+
input_data['cond_audio']['person2'] = emb2_path
|
| 363 |
+
input_data['video_audio'] = sum_audio
|
| 364 |
+
elif len(input_data['cond_audio'])==1:
|
| 365 |
+
human_speech = audio_prepare_single(input_data['cond_audio']['person1'])
|
| 366 |
+
audio_embedding = get_embedding(human_speech, wav2vec_feature_extractor, audio_encoder, device=device)
|
| 367 |
+
emb_path = os.path.join(args.audio_save_dir, '1.pt')
|
| 368 |
+
sum_audio = os.path.join(args.audio_save_dir, 'sum.wav')
|
| 369 |
+
sf.write(sum_audio, human_speech, 16000)
|
| 370 |
+
torch.save(audio_embedding, emb_path)
|
| 371 |
+
input_data['cond_audio']['person1'] = emb_path
|
| 372 |
+
input_data['video_audio'] = sum_audio
|
| 373 |
+
elif 'TTS' in mode_selector:
|
| 374 |
+
if 'human2_voice' not in input_data['tts_audio'].keys():
|
| 375 |
+
new_human_speech1, sum_audio = process_tts_single(input_data['tts_audio']['text'], args.audio_save_dir, input_data['tts_audio']['human1_voice'])
|
| 376 |
+
audio_embedding_1 = get_embedding(new_human_speech1, wav2vec_feature_extractor, audio_encoder, device=device)
|
| 377 |
+
emb1_path = os.path.join(args.audio_save_dir, '1.pt')
|
| 378 |
+
torch.save(audio_embedding_1, emb1_path)
|
| 379 |
+
input_data['cond_audio']['person1'] = emb1_path
|
| 380 |
+
input_data['video_audio'] = sum_audio
|
| 381 |
+
else:
|
| 382 |
+
new_human_speech1, new_human_speech2, sum_audio = process_tts_multi(input_data['tts_audio']['text'], args.audio_save_dir, input_data['tts_audio']['human1_voice'], input_data['tts_audio']['human2_voice'])
|
| 383 |
+
audio_embedding_1 = get_embedding(new_human_speech1, wav2vec_feature_extractor, audio_encoder, device=device)
|
| 384 |
+
audio_embedding_2 = get_embedding(new_human_speech2, wav2vec_feature_extractor, audio_encoder, device=device)
|
| 385 |
+
emb1_path = os.path.join(args.audio_save_dir, '1.pt')
|
| 386 |
+
emb2_path = os.path.join(args.audio_save_dir, '2.pt')
|
| 387 |
+
torch.save(audio_embedding_1, emb1_path)
|
| 388 |
+
torch.save(audio_embedding_2, emb2_path)
|
| 389 |
+
input_data['cond_audio']['person1'] = emb1_path
|
| 390 |
+
input_data['cond_audio']['person2'] = emb2_path
|
| 391 |
+
input_data['video_audio'] = sum_audio
|
| 392 |
+
|
| 393 |
+
logging.info("Generating video ...")
|
| 394 |
+
|
| 395 |
+
# Generate video
|
| 396 |
+
video = wan_i2v.generate_infinitetalk(
|
| 397 |
+
input_data,
|
| 398 |
+
size_buckget=resolution_select,
|
| 399 |
+
motion_frame=args.motion_frame,
|
| 400 |
+
frame_num=args.frame_num,
|
| 401 |
+
shift=args.sample_shift,
|
| 402 |
+
sampling_steps=sd_steps,
|
| 403 |
+
text_guide_scale=text_guide_scale,
|
| 404 |
+
audio_guide_scale=audio_guide_scale,
|
| 405 |
+
seed=seed,
|
| 406 |
+
n_prompt=n_prompt,
|
| 407 |
+
offload_model=args.offload_model,
|
| 408 |
+
max_frames_num=args.frame_num if args.mode == 'clip' else 1000,
|
| 409 |
+
color_correction_strength=args.color_correction_strength,
|
| 410 |
+
extra_args=args,
|
| 411 |
+
)
|
| 412 |
+
|
| 413 |
+
# Save video
|
| 414 |
+
if args.save_file is None:
|
| 415 |
+
formatted_time = datetime.now().strftime("%Y%m%d_%H%M%S")
|
| 416 |
+
formatted_prompt = input_data['prompt'].replace(" ", "_").replace("/", "_")[:50]
|
| 417 |
+
args.save_file = f"{args.task}_{args.size.replace('*','x')}_{formatted_prompt}_{formatted_time}"
|
| 418 |
+
|
| 419 |
+
logging.info(f"Saving generated video to {args.save_file}.mp4")
|
| 420 |
+
save_video_ffmpeg(video, args.save_file, [input_data['video_audio']], high_quality_save=False)
|
| 421 |
+
logging.info("Finished.")
|
| 422 |
+
|
| 423 |
+
# Clear GPU memory
|
| 424 |
+
torch.cuda.empty_cache()
|
| 425 |
+
|
| 426 |
+
return args.save_file + '.mp4'
|
| 427 |
+
|
| 428 |
+
def toggle_audio_mode(mode):
|
| 429 |
+
if 'TTS' in mode:
|
| 430 |
+
return [
|
| 431 |
+
gr.Audio(visible=False, interactive=False),
|
| 432 |
+
gr.Audio(visible=False, interactive=False),
|
| 433 |
+
gr.Textbox(visible=True, interactive=True)
|
| 434 |
+
]
|
| 435 |
+
elif 'Single' in mode:
|
| 436 |
+
return [
|
| 437 |
+
gr.Audio(visible=True, interactive=True),
|
| 438 |
+
gr.Audio(visible=False, interactive=False),
|
| 439 |
+
gr.Textbox(visible=False, interactive=False)
|
| 440 |
+
]
|
| 441 |
+
else:
|
| 442 |
+
return [
|
| 443 |
+
gr.Audio(visible=True, interactive=True),
|
| 444 |
+
gr.Audio(visible=True, interactive=True),
|
| 445 |
+
gr.Textbox(visible=False, interactive=False)
|
| 446 |
+
]
|
| 447 |
+
|
| 448 |
+
def show_upload(mode):
|
| 449 |
+
if mode == "SingleImageDriven":
|
| 450 |
+
return gr.update(visible=True), gr.update(visible=False)
|
| 451 |
+
else:
|
| 452 |
+
return gr.update(visible=False), gr.update(visible=True)
|
| 453 |
+
|
| 454 |
+
# Create the Gradio interface
|
| 455 |
+
with gr.Blocks() as demo:
|
| 456 |
+
gr.Markdown("""
|
| 457 |
+
<div style="text-align: center; font-size: 32px; font-weight: bold; margin-bottom: 20px;">
|
| 458 |
+
MeiGen-InfiniteTalk (ZeroGPU)
|
| 459 |
+
</div>
|
| 460 |
+
<div style="text-align: center; font-size: 16px; font-weight: normal; margin-bottom: 20px;">
|
| 461 |
+
InfiniteTalk: Audio-driven Video Generation for Spare-Frame Video Dubbing.
|
| 462 |
+
</div>
|
| 463 |
+
<div style="text-align: center; color: orange; margin-bottom: 20px;">
|
| 464 |
+
⚠️ This is optimized for ZeroGPU. Processing may take several minutes due to model loading and GPU allocation.
|
| 465 |
+
</div>
|
| 466 |
+
""")
|
| 467 |
+
|
| 468 |
+
# Initialize models button
|
| 469 |
+
with gr.Row():
|
| 470 |
+
init_button = gr.Button("Initialize Models (Click first!)", variant="primary")
|
| 471 |
+
init_status = gr.Textbox(label="Status", interactive=False)
|
| 472 |
+
|
| 473 |
+
init_button.click(
|
| 474 |
+
fn=initialize_models,
|
| 475 |
+
outputs=[init_status]
|
| 476 |
+
)
|
| 477 |
+
|
| 478 |
+
with gr.Row():
|
| 479 |
+
with gr.Column(scale=1):
|
| 480 |
+
task_mode = gr.Radio(
|
| 481 |
+
choices=["SingleImageDriven", "VideoDubbing"],
|
| 482 |
+
label="Choose SingleImageDriven task or VideoDubbing task",
|
| 483 |
+
value="VideoDubbing"
|
| 484 |
+
)
|
| 485 |
+
|
| 486 |
+
vid2vid_vid = gr.Video(
|
| 487 |
+
label="Upload Input Video",
|
| 488 |
+
visible=True)
|
| 489 |
+
|
| 490 |
+
img2vid_image = gr.Image(
|
| 491 |
+
type="filepath",
|
| 492 |
+
label="Upload Input Image",
|
| 493 |
+
elem_id="image_upload",
|
| 494 |
+
visible=False
|
| 495 |
+
)
|
| 496 |
+
|
| 497 |
+
img2vid_prompt = gr.Textbox(
|
| 498 |
+
label="Prompt",
|
| 499 |
+
placeholder="Describe the video you want to generate",
|
| 500 |
+
)
|
| 501 |
+
|
| 502 |
+
task_mode.change(
|
| 503 |
+
fn=show_upload,
|
| 504 |
+
inputs=task_mode,
|
| 505 |
+
outputs=[img2vid_image, vid2vid_vid]
|
| 506 |
+
)
|
| 507 |
+
|
| 508 |
+
with gr.Accordion("Audio Options", open=True):
|
| 509 |
+
mode_selector = gr.Radio(
|
| 510 |
+
choices=["Single Person(Local File)", "Single Person(TTS)", "Multi Person(Local File, audio add)", "Multi Person(Local File, audio parallel)", "Multi Person(TTS)"],
|
| 511 |
+
label="Select person and audio mode.",
|
| 512 |
+
value="Single Person(Local File)"
|
| 513 |
+
)
|
| 514 |
+
|
| 515 |
+
resolution_select = gr.Radio(
|
| 516 |
+
choices=["infinitetalk-480", "infinitetalk-720"],
|
| 517 |
+
label="Select resolution.",
|
| 518 |
+
value="infinitetalk-480"
|
| 519 |
+
)
|
| 520 |
+
|
| 521 |
+
img2vid_audio_1 = gr.Audio(label="Conditioning Audio for speaker 1", type="filepath", visible=True)
|
| 522 |
+
img2vid_audio_2 = gr.Audio(label="Conditioning Audio for speaker 2", type="filepath", visible=False)
|
| 523 |
+
|
| 524 |
+
tts_text = gr.Textbox(
|
| 525 |
+
label="Text for TTS",
|
| 526 |
+
placeholder="Refer to the format in the examples",
|
| 527 |
+
visible=False,
|
| 528 |
+
interactive=False
|
| 529 |
+
)
|
| 530 |
+
|
| 531 |
+
mode_selector.change(
|
| 532 |
+
fn=toggle_audio_mode,
|
| 533 |
+
inputs=mode_selector,
|
| 534 |
+
outputs=[img2vid_audio_1, img2vid_audio_2, tts_text]
|
| 535 |
+
)
|
| 536 |
+
|
| 537 |
+
with gr.Accordion("Advanced Options", open=False):
|
| 538 |
+
with gr.Row():
|
| 539 |
+
sd_steps = gr.Slider(
|
| 540 |
+
label="Diffusion steps",
|
| 541 |
+
minimum=1,
|
| 542 |
+
maximum=50, # Reduced max for ZeroGPU
|
| 543 |
+
value=8,
|
| 544 |
+
step=1)
|
| 545 |
+
|
| 546 |
+
seed = gr.Slider(
|
| 547 |
+
label="Seed",
|
| 548 |
+
minimum=-1,
|
| 549 |
+
maximum=2147483647,
|
| 550 |
+
step=1,
|
| 551 |
+
value=42)
|
| 552 |
+
|
| 553 |
+
with gr.Row():
|
| 554 |
+
text_guide_scale = gr.Slider(
|
| 555 |
+
label="Text Guide scale",
|
| 556 |
+
minimum=0,
|
| 557 |
+
maximum=20,
|
| 558 |
+
value=1.0,
|
| 559 |
+
step=1)
|
| 560 |
+
|
| 561 |
+
audio_guide_scale = gr.Slider(
|
| 562 |
+
label="Audio Guide scale",
|
| 563 |
+
minimum=0,
|
| 564 |
+
maximum=20,
|
| 565 |
+
value=2.0,
|
| 566 |
+
step=1)
|
| 567 |
+
|
| 568 |
+
with gr.Row():
|
| 569 |
+
human1_voice = gr.Textbox(
|
| 570 |
+
label="Voice for the left person",
|
| 571 |
+
value="weights/Kokoro-82M/voices/am_adam.pt",
|
| 572 |
+
)
|
| 573 |
+
|
| 574 |
+
human2_voice = gr.Textbox(
|
| 575 |
+
label="Voice for right person",
|
| 576 |
+
value="weights/Kokoro-82M/voices/af_heart.pt"
|
| 577 |
+
)
|
| 578 |
+
|
| 579 |
+
n_prompt = gr.Textbox(
|
| 580 |
+
label="Negative Prompt",
|
| 581 |
+
placeholder="Describe the negative prompt you want to add",
|
| 582 |
+
value="bright tones, overexposed, static, blurred details, subtitles, style, works, paintings, images, static, overall gray, worst quality, low quality, JPEG compression residue, ugly, incomplete, extra fingers, poorly drawn hands, poorly drawn faces, deformed, disfigured, misshapen limbs, fused fingers, still picture, messy background, three legs, many people in the background, walking backwards"
|
| 583 |
+
)
|
| 584 |
+
|
| 585 |
+
run_i2v_button = gr.Button("Generate Video", variant="primary")
|
| 586 |
+
|
| 587 |
+
with gr.Column(scale=2):
|
| 588 |
+
result_gallery = gr.Video(
|
| 589 |
+
label='Generated Video', interactive=False, height=600,)
|
| 590 |
+
|
| 591 |
+
gr.Examples(
|
| 592 |
+
examples = [
|
| 593 |
+
['SingleImageDriven', 'examples/single/ref_image.png', None, "A woman is passionately singing into a professional microphone in a recording studio.", "Single Person(Local File)", "examples/single/1.wav", None, None],
|
| 594 |
+
['VideoDubbing', None, 'examples/single/ref_video.mp4', "A man is talking", "Single Person(Local File)", "examples/single/1.wav", None, None],
|
| 595 |
+
],
|
| 596 |
+
inputs = [task_mode, img2vid_image, vid2vid_vid, img2vid_prompt, mode_selector, img2vid_audio_1, img2vid_audio_2, tts_text],
|
| 597 |
+
)
|
| 598 |
+
|
| 599 |
+
run_i2v_button.click(
|
| 600 |
+
fn=generate_video,
|
| 601 |
+
inputs=[img2vid_image, vid2vid_vid, task_mode, img2vid_prompt, n_prompt, img2vid_audio_1, img2vid_audio_2,sd_steps, seed, text_guide_scale, audio_guide_scale, mode_selector, tts_text, resolution_select, human1_voice, human2_voice],
|
| 602 |
+
outputs=[result_gallery],
|
| 603 |
+
)
|
| 604 |
+
|
| 605 |
+
if __name__ == "__main__":
|
| 606 |
+
demo.launch(server_name="0.0.0.0", debug=True) # Basic check
|
| 607 |
+
assert args.ckpt_dir is not None, "Please specify the checkpoint directory."
|
| 608 |
+
assert args.task in WAN_CONFIGS, f"Unsupport task: {args.task}"
|
| 609 |
|
| 610 |
# The default sampling steps are 40 for image-to-video tasks and 50 for text-to-video tasks.
|
| 611 |
if args.sample_steps is None:
|