Spaces:
Sleeping
Sleeping
| #!/usr/bin/env python3 | |
| """ | |
| Quick test script to verify the model works | |
| """ | |
| import os | |
| # Force fallback to smaller model for quick testing | |
| os.environ["CLIP_MODEL"] = "openai/clip-vit-base-patch32" | |
| from app import UltraAdvancedFoodRecognizer, select_device | |
| from PIL import Image | |
| import numpy as np | |
| def test_model(): | |
| print("🧪 Quick model test...") | |
| # Get device | |
| device = select_device() | |
| print(f"Device: {device}") | |
| # Initialize model | |
| print("Loading model...") | |
| recognizer = UltraAdvancedFoodRecognizer(device) | |
| print(f"Models loaded: {recognizer.models_loaded}") | |
| # Create test image (red apple-like) | |
| test_img = Image.new('RGB', (224, 224), (220, 20, 60)) | |
| # Test food detection | |
| print("Testing food detection...") | |
| is_food, confidence, details = recognizer.detect_food_advanced(test_img) | |
| print(f"Is food: {is_food}, Confidence: {confidence:.2%}") | |
| # Test food analysis | |
| print("Testing food analysis...") | |
| result = recognizer.analyze_food(test_img) | |
| print(f"Detected: {result['primary_label']}") | |
| print(f"Confidence: {result['confidence']:.2%}") | |
| print(f"Quality score: {result['visual_features'].get('estimated_quality', 0):.2f}") | |
| print("🎉 Quick test PASSED!") | |
| if __name__ == "__main__": | |
| test_model() |