Julian Bilcke
commited on
Commit
·
1382b6e
1
Parent(s):
0ffe757
let's try again
Browse files- api_engine.py +53 -6
- api_server.py +29 -5
api_engine.py
CHANGED
|
@@ -102,9 +102,12 @@ class MatrixGameEngine:
|
|
| 102 |
raise RuntimeError(error_msg)
|
| 103 |
|
| 104 |
try:
|
|
|
|
|
|
|
| 105 |
self._init_models()
|
| 106 |
self.model_loaded = True
|
| 107 |
-
|
|
|
|
| 108 |
except Exception as e:
|
| 109 |
error_msg = f"Failed to initialize Matrix-Game V2 models: {str(e)}"
|
| 110 |
logger.error(error_msg)
|
|
@@ -122,11 +125,16 @@ class MatrixGameEngine:
|
|
| 122 |
logger.debug(f"Configuration loaded: {self.config}")
|
| 123 |
|
| 124 |
# Initialize generator
|
| 125 |
-
|
| 126 |
-
|
|
|
|
|
|
|
|
|
|
| 127 |
|
| 128 |
# Initialize VAE decoder
|
|
|
|
| 129 |
current_vae_decoder = VAEDecoderWrapper()
|
|
|
|
| 130 |
|
| 131 |
# Check if model exists locally, if not download from Hugging Face
|
| 132 |
if not os.path.exists(self.pretrained_model_path) or not os.path.exists(os.path.join(self.pretrained_model_path, "Wan2.1_VAE.pth")):
|
|
@@ -272,7 +280,15 @@ class MatrixGameEngine:
|
|
| 272 |
logger.error(error_msg)
|
| 273 |
raise RuntimeError(error_msg)
|
| 274 |
|
|
|
|
|
|
|
| 275 |
try:
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 276 |
logger.debug(f"Starting frame generation for scene: {scene_name}")
|
| 277 |
|
| 278 |
# Map scene name to mode
|
|
@@ -313,7 +329,8 @@ class MatrixGameEngine:
|
|
| 313 |
mouse_condition = [[0, 0]]
|
| 314 |
|
| 315 |
# Generate conditions for multiple frames (for streaming)
|
| 316 |
-
|
|
|
|
| 317 |
|
| 318 |
# Create condition tensors
|
| 319 |
keyboard_tensor = torch.tensor(keyboard_condition * num_frames, dtype=self.weight_dtype).unsqueeze(0).to(self.device)
|
|
@@ -338,13 +355,21 @@ class MatrixGameEngine:
|
|
| 338 |
|
| 339 |
# Generate frames with streaming pipeline
|
| 340 |
with torch.no_grad():
|
| 341 |
-
logger.
|
| 342 |
logger.debug(f"Conditional dict keys: {list(conditional_dict.keys())}")
|
| 343 |
logger.debug(f"Noise shape: {sampled_noise.shape}")
|
| 344 |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 345 |
# Set seed for reproducibility
|
| 346 |
set_seed(self.seed + self.frame_count)
|
| 347 |
|
|
|
|
|
|
|
|
|
|
| 348 |
# Use inference method for single batch generation
|
| 349 |
outputs = self.pipeline.inference(
|
| 350 |
noise=sampled_noise,
|
|
@@ -355,17 +380,37 @@ class MatrixGameEngine:
|
|
| 355 |
mode=mode
|
| 356 |
)
|
| 357 |
|
| 358 |
-
|
|
|
|
|
|
|
|
|
|
|
|
|
| 359 |
|
| 360 |
# Decode first frame from latent
|
| 361 |
if outputs is not None and len(outputs) > 0:
|
|
|
|
|
|
|
|
|
|
| 362 |
# Extract first frame
|
| 363 |
frame_latent = outputs[0:1, :, 0:1] # Get first frame
|
|
|
|
|
|
|
| 364 |
decoded = self.pipeline.vae_decoder.decode(frame_latent)
|
|
|
|
|
|
|
| 365 |
|
| 366 |
# Convert to numpy
|
|
|
|
| 367 |
frame = decoded[0, :, 0].permute(1, 2, 0).cpu().numpy()
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 368 |
frame = ((frame + 1) * 127.5).clip(0, 255).astype(np.uint8)
|
|
|
|
|
|
|
| 369 |
else:
|
| 370 |
# Generation failed
|
| 371 |
error_msg = "Failed to generate frame: No output from model"
|
|
@@ -373,6 +418,8 @@ class MatrixGameEngine:
|
|
| 373 |
raise RuntimeError(error_msg)
|
| 374 |
|
| 375 |
self.frame_count += 1
|
|
|
|
|
|
|
| 376 |
|
| 377 |
except Exception as e:
|
| 378 |
error_msg = f"Error generating frame with Matrix-Game V2 model: {str(e)}"
|
|
|
|
| 102 |
raise RuntimeError(error_msg)
|
| 103 |
|
| 104 |
try:
|
| 105 |
+
init_start = time.time()
|
| 106 |
+
logger.info("Starting Matrix-Game V2 model initialization...")
|
| 107 |
self._init_models()
|
| 108 |
self.model_loaded = True
|
| 109 |
+
init_time = time.time() - init_start
|
| 110 |
+
logger.info(f"Matrix-Game V2 models loaded successfully in {init_time:.2f} seconds")
|
| 111 |
except Exception as e:
|
| 112 |
error_msg = f"Failed to initialize Matrix-Game V2 models: {str(e)}"
|
| 113 |
logger.error(error_msg)
|
|
|
|
| 125 |
logger.debug(f"Configuration loaded: {self.config}")
|
| 126 |
|
| 127 |
# Initialize generator
|
| 128 |
+
logger.info("Initializing WAN Diffusion generator...")
|
| 129 |
+
model_kwargs = getattr(self.config, "model_kwargs", {})
|
| 130 |
+
logger.debug(f"Model kwargs: {model_kwargs}")
|
| 131 |
+
generator = WanDiffusionWrapper(**model_kwargs, is_causal=True)
|
| 132 |
+
logger.info("WAN Diffusion generator initialized successfully")
|
| 133 |
|
| 134 |
# Initialize VAE decoder
|
| 135 |
+
logger.info("Initializing VAE decoder...")
|
| 136 |
current_vae_decoder = VAEDecoderWrapper()
|
| 137 |
+
logger.info("VAE decoder initialized successfully")
|
| 138 |
|
| 139 |
# Check if model exists locally, if not download from Hugging Face
|
| 140 |
if not os.path.exists(self.pretrained_model_path) or not os.path.exists(os.path.join(self.pretrained_model_path, "Wan2.1_VAE.pth")):
|
|
|
|
| 280 |
logger.error(error_msg)
|
| 281 |
raise RuntimeError(error_msg)
|
| 282 |
|
| 283 |
+
generation_start_time = time.time()
|
| 284 |
+
|
| 285 |
try:
|
| 286 |
+
# Log GPU memory usage if CUDA is available
|
| 287 |
+
if torch.cuda.is_available():
|
| 288 |
+
gpu_memory_allocated = torch.cuda.memory_allocated() / 1024**3 # GB
|
| 289 |
+
gpu_memory_reserved = torch.cuda.memory_reserved() / 1024**3 # GB
|
| 290 |
+
logger.debug(f"GPU Memory - Allocated: {gpu_memory_allocated:.2f}GB, Reserved: {gpu_memory_reserved:.2f}GB")
|
| 291 |
+
|
| 292 |
logger.debug(f"Starting frame generation for scene: {scene_name}")
|
| 293 |
|
| 294 |
# Map scene name to mode
|
|
|
|
| 329 |
mouse_condition = [[0, 0]]
|
| 330 |
|
| 331 |
# Generate conditions for multiple frames (for streaming)
|
| 332 |
+
# Must be divisible by num_frame_per_block (which is 3)
|
| 333 |
+
num_frames = 3 # Generate 3 frames at a time (matches num_frame_per_block)
|
| 334 |
|
| 335 |
# Create condition tensors
|
| 336 |
keyboard_tensor = torch.tensor(keyboard_condition * num_frames, dtype=self.weight_dtype).unsqueeze(0).to(self.device)
|
|
|
|
| 355 |
|
| 356 |
# Generate frames with streaming pipeline
|
| 357 |
with torch.no_grad():
|
| 358 |
+
logger.info(f"Starting inference - Frame #{self.frame_count}, Mode: {mode}, Scene: {scene_name}")
|
| 359 |
logger.debug(f"Conditional dict keys: {list(conditional_dict.keys())}")
|
| 360 |
logger.debug(f"Noise shape: {sampled_noise.shape}")
|
| 361 |
|
| 362 |
+
# Log tensor shapes for debugging
|
| 363 |
+
for key, tensor in conditional_dict.items():
|
| 364 |
+
if hasattr(tensor, 'shape'):
|
| 365 |
+
logger.debug(f" {key}: {tensor.shape} ({tensor.dtype})")
|
| 366 |
+
|
| 367 |
# Set seed for reproducibility
|
| 368 |
set_seed(self.seed + self.frame_count)
|
| 369 |
|
| 370 |
+
inference_start = time.time()
|
| 371 |
+
logger.debug("Starting pipeline.inference()...")
|
| 372 |
+
|
| 373 |
# Use inference method for single batch generation
|
| 374 |
outputs = self.pipeline.inference(
|
| 375 |
noise=sampled_noise,
|
|
|
|
| 380 |
mode=mode
|
| 381 |
)
|
| 382 |
|
| 383 |
+
inference_time = time.time() - inference_start
|
| 384 |
+
logger.info(f"Inference completed in {inference_time:.2f}s, outputs type: {type(outputs)}")
|
| 385 |
+
|
| 386 |
+
if outputs is not None:
|
| 387 |
+
logger.debug(f"Output tensor shape: {outputs.shape if hasattr(outputs, 'shape') else 'No shape attr'}")
|
| 388 |
|
| 389 |
# Decode first frame from latent
|
| 390 |
if outputs is not None and len(outputs) > 0:
|
| 391 |
+
decode_start = time.time()
|
| 392 |
+
logger.debug("Starting VAE decoding...")
|
| 393 |
+
|
| 394 |
# Extract first frame
|
| 395 |
frame_latent = outputs[0:1, :, 0:1] # Get first frame
|
| 396 |
+
logger.debug(f"Frame latent shape: {frame_latent.shape}")
|
| 397 |
+
|
| 398 |
decoded = self.pipeline.vae_decoder.decode(frame_latent)
|
| 399 |
+
decode_time = time.time() - decode_start
|
| 400 |
+
logger.debug(f"VAE decoding completed in {decode_time:.3f}s")
|
| 401 |
|
| 402 |
# Convert to numpy
|
| 403 |
+
logger.debug(f"Decoded tensor shape: {decoded.shape}")
|
| 404 |
frame = decoded[0, :, 0].permute(1, 2, 0).cpu().numpy()
|
| 405 |
+
logger.debug(f"Frame numpy shape: {frame.shape}, dtype: {frame.dtype}")
|
| 406 |
+
|
| 407 |
+
# Normalize to [0, 255]
|
| 408 |
+
frame_min, frame_max = frame.min(), frame.max()
|
| 409 |
+
logger.debug(f"Frame value range before normalization: [{frame_min:.3f}, {frame_max:.3f}]")
|
| 410 |
+
|
| 411 |
frame = ((frame + 1) * 127.5).clip(0, 255).astype(np.uint8)
|
| 412 |
+
logger.debug(f"Frame shape after normalization: {frame.shape}, dtype: {frame.dtype}")
|
| 413 |
+
|
| 414 |
else:
|
| 415 |
# Generation failed
|
| 416 |
error_msg = "Failed to generate frame: No output from model"
|
|
|
|
| 418 |
raise RuntimeError(error_msg)
|
| 419 |
|
| 420 |
self.frame_count += 1
|
| 421 |
+
total_generation_time = time.time() - generation_start_time
|
| 422 |
+
logger.info(f"Frame generation complete - Total time: {total_generation_time:.3f}s, Frame #{self.frame_count}")
|
| 423 |
|
| 424 |
except Exception as e:
|
| 425 |
error_msg = f"Error generating frame with Matrix-Game V2 model: {str(e)}"
|
api_server.py
CHANGED
|
@@ -240,15 +240,20 @@ class GameSession:
|
|
| 240 |
async def _stream_frames(self, fps: int):
|
| 241 |
"""Stream frames to the client at the specified FPS"""
|
| 242 |
frame_interval = 1.0 / fps # Time between frames in seconds
|
|
|
|
|
|
|
|
|
|
| 243 |
|
| 244 |
try:
|
| 245 |
while self.is_streaming:
|
| 246 |
-
|
| 247 |
|
| 248 |
# Generate frame based on current keyboard and mouse state
|
| 249 |
keyboard_condition = [self.keyboard_state]
|
| 250 |
mouse_condition = [self.mouse_state]
|
| 251 |
|
|
|
|
|
|
|
| 252 |
# Check if engine is available
|
| 253 |
if not self.game_manager.engine:
|
| 254 |
error_msg = f"Engine not available: {self.game_manager.engine_error}"
|
|
@@ -260,10 +265,13 @@ class GameSession:
|
|
| 260 |
return
|
| 261 |
|
| 262 |
try:
|
|
|
|
| 263 |
# Use the engine to generate the next frame
|
| 264 |
frame_bytes = self.game_manager.engine.generate_frame(
|
| 265 |
self.current_scene, keyboard_condition, mouse_condition
|
| 266 |
)
|
|
|
|
|
|
|
| 267 |
except Exception as e:
|
| 268 |
error_msg = f"Failed to generate frame: {str(e)}"
|
| 269 |
logger.error(error_msg)
|
|
@@ -275,18 +283,34 @@ class GameSession:
|
|
| 275 |
return
|
| 276 |
|
| 277 |
# Encode as base64 for sending in JSON
|
|
|
|
| 278 |
frame_base64 = base64.b64encode(frame_bytes).decode('utf-8')
|
|
|
|
| 279 |
|
| 280 |
# Send frame to client
|
|
|
|
| 281 |
await self.ws.send_json({
|
| 282 |
'action': 'frame',
|
| 283 |
'frameData': frame_base64,
|
| 284 |
-
'timestamp': time.time()
|
|
|
|
|
|
|
|
|
|
| 285 |
})
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
| 286 |
|
| 287 |
-
|
| 288 |
-
elapsed = time.time() - start_time
|
| 289 |
-
sleep_time = max(0, frame_interval - elapsed)
|
| 290 |
await asyncio.sleep(sleep_time)
|
| 291 |
|
| 292 |
except asyncio.CancelledError:
|
|
|
|
| 240 |
async def _stream_frames(self, fps: int):
|
| 241 |
"""Stream frames to the client at the specified FPS"""
|
| 242 |
frame_interval = 1.0 / fps # Time between frames in seconds
|
| 243 |
+
frame_count = 0
|
| 244 |
+
|
| 245 |
+
logger.info(f"Starting frame streaming for user {self.user_id} at {fps} FPS (interval: {frame_interval:.3f}s)")
|
| 246 |
|
| 247 |
try:
|
| 248 |
while self.is_streaming:
|
| 249 |
+
stream_start_time = time.time()
|
| 250 |
|
| 251 |
# Generate frame based on current keyboard and mouse state
|
| 252 |
keyboard_condition = [self.keyboard_state]
|
| 253 |
mouse_condition = [self.mouse_state]
|
| 254 |
|
| 255 |
+
logger.debug(f"Frame #{frame_count} - KB: {keyboard_condition[0]}, Mouse: {mouse_condition[0]}")
|
| 256 |
+
|
| 257 |
# Check if engine is available
|
| 258 |
if not self.game_manager.engine:
|
| 259 |
error_msg = f"Engine not available: {self.game_manager.engine_error}"
|
|
|
|
| 265 |
return
|
| 266 |
|
| 267 |
try:
|
| 268 |
+
generation_start = time.time()
|
| 269 |
# Use the engine to generate the next frame
|
| 270 |
frame_bytes = self.game_manager.engine.generate_frame(
|
| 271 |
self.current_scene, keyboard_condition, mouse_condition
|
| 272 |
)
|
| 273 |
+
generation_time = time.time() - generation_start
|
| 274 |
+
|
| 275 |
except Exception as e:
|
| 276 |
error_msg = f"Failed to generate frame: {str(e)}"
|
| 277 |
logger.error(error_msg)
|
|
|
|
| 283 |
return
|
| 284 |
|
| 285 |
# Encode as base64 for sending in JSON
|
| 286 |
+
encode_start = time.time()
|
| 287 |
frame_base64 = base64.b64encode(frame_bytes).decode('utf-8')
|
| 288 |
+
encode_time = time.time() - encode_start
|
| 289 |
|
| 290 |
# Send frame to client
|
| 291 |
+
send_start = time.time()
|
| 292 |
await self.ws.send_json({
|
| 293 |
'action': 'frame',
|
| 294 |
'frameData': frame_base64,
|
| 295 |
+
'timestamp': time.time(),
|
| 296 |
+
'frameNumber': frame_count,
|
| 297 |
+
'generationTime': f"{generation_time:.3f}s",
|
| 298 |
+
'frameSize': len(frame_bytes)
|
| 299 |
})
|
| 300 |
+
send_time = time.time() - send_start
|
| 301 |
+
|
| 302 |
+
# Calculate total time and performance metrics
|
| 303 |
+
total_time = time.time() - stream_start_time
|
| 304 |
+
sleep_time = max(0, frame_interval - total_time)
|
| 305 |
+
|
| 306 |
+
# Log performance info every 10 frames or if frame takes too long
|
| 307 |
+
if frame_count % 10 == 0 or total_time > frame_interval * 1.5:
|
| 308 |
+
logger.info(f"Frame #{frame_count} - Gen: {generation_time:.3f}s, "
|
| 309 |
+
f"Encode: {encode_time:.3f}s, Send: {send_time:.3f}s, "
|
| 310 |
+
f"Total: {total_time:.3f}s, Size: {len(frame_bytes)} bytes, "
|
| 311 |
+
f"Sleep: {sleep_time:.3f}s")
|
| 312 |
|
| 313 |
+
frame_count += 1
|
|
|
|
|
|
|
| 314 |
await asyncio.sleep(sleep_time)
|
| 315 |
|
| 316 |
except asyncio.CancelledError:
|