Spaces:
Sleeping
Sleeping
| """ | |
| ORYNXML REST API Backend - FastAPI with 211 AI Models | |
| Provides REST API endpoints for HTML frontend at orynxml-ai.pages.dev | |
| """ | |
| from fastapi import FastAPI, HTTPException, Depends | |
| from fastapi.middleware.cors import CORSMiddleware | |
| from fastapi.responses import JSONResponse | |
| from pydantic import BaseModel | |
| from typing import Optional, List, Dict, Any | |
| import os | |
| import sqlite3 | |
| import hashlib | |
| from datetime import datetime, timedelta | |
| from huggingface_hub import InferenceClient | |
| import uvicorn | |
| # HuggingFace Inference Client | |
| HF_TOKEN = os.getenv("HF_TOKEN", "") | |
| inference_client = InferenceClient(token=HF_TOKEN if HF_TOKEN else None) | |
| # Cloudflare Configuration | |
| CLOUDFLARE_CONFIG = { | |
| "api_token": os.getenv("CLOUDFLARE_API_TOKEN", ""), | |
| "account_id": os.getenv( | |
| "CLOUDFLARE_ACCOUNT_ID", "62af59a7ac82b29543577ee6800735ee" | |
| ), | |
| "d1_database_id": os.getenv( | |
| "CLOUDFLARE_D1_DATABASE_ID", "6d887f74-98ac-4db7-bfed-8061903d1f6c" | |
| ), | |
| "r2_bucket_name": os.getenv("CLOUDFLARE_R2_BUCKET_NAME", "openmanus-storage"), | |
| "kv_namespace_id": os.getenv( | |
| "CLOUDFLARE_KV_NAMESPACE_ID", "87f4aa01410d4fb19821f61006f94441" | |
| ), | |
| "kv_namespace_cache": os.getenv( | |
| "CLOUDFLARE_KV_CACHE_ID", "7b58c88292c847d1a82c8e0dd5129f37" | |
| ), | |
| "durable_objects_sessions": "AGENT_SESSIONS", | |
| "durable_objects_chatrooms": "CHAT_ROOMS", | |
| } | |
| # AI Models Dictionary (211 models) | |
| AI_MODELS = { | |
| "Text Generation": { | |
| "Qwen Models": [ | |
| "Qwen/Qwen2.5-72B-Instruct", | |
| "Qwen/Qwen2.5-Coder-32B-Instruct", | |
| "Qwen/Qwen2.5-Math-72B-Instruct", | |
| # ... (add all 35 Qwen models) | |
| ], | |
| "DeepSeek Models": [ | |
| "deepseek-ai/deepseek-llm-67b-chat", | |
| "deepseek-ai/DeepSeek-V2-Chat", | |
| # ... (add all 17 DeepSeek models) | |
| ], | |
| }, | |
| "Image Generation": [ | |
| "black-forest-labs/FLUX.1-dev", | |
| "black-forest-labs/FLUX.1-schnell", | |
| "stabilityai/stable-diffusion-xl-base-1.0", | |
| # ... (add all image gen models) | |
| ], | |
| "Image Editing": [ | |
| "timbrooks/instruct-pix2pix", | |
| "lllyasviel/control_v11p_sd15_canny", | |
| # ... (add all editing models) | |
| ], | |
| "Video Generation": { | |
| "Text-to-Video": [ | |
| "ali-vilab/text-to-video-ms-1.7b", | |
| # ... | |
| ], | |
| "Image-to-Video": [ | |
| "stabilityai/stable-video-diffusion-img2vid", | |
| # ... | |
| ], | |
| }, | |
| "Audio": { | |
| "TTS": ["suno/bark", "microsoft/speecht5_tts"], | |
| "STT": ["openai/whisper-large-v3"], | |
| }, | |
| "Translation": { | |
| "Arabic-English": [ | |
| "Helsinki-NLP/opus-mt-ar-en", | |
| "Helsinki-NLP/opus-mt-en-ar", | |
| ] | |
| }, | |
| } | |
| # Initialize FastAPI | |
| app = FastAPI( | |
| title="ORYNXML AI Platform API", | |
| description="REST API for 211 AI models with authentication and Cloudflare integration", | |
| version="1.0.0", | |
| ) | |
| # CORS Configuration | |
| app.add_middleware( | |
| CORSMiddleware, | |
| allow_origins=["*"], # In production, restrict to your domain | |
| allow_credentials=True, | |
| allow_methods=["*"], | |
| allow_headers=["*"], | |
| ) | |
| # Database initialization | |
| def init_database(): | |
| """Initialize SQLite database for user authentication""" | |
| conn = sqlite3.connect("openmanus.db") | |
| cursor = conn.cursor() | |
| cursor.execute( | |
| """ | |
| CREATE TABLE IF NOT EXISTS users ( | |
| id INTEGER PRIMARY KEY AUTOINCREMENT, | |
| mobile TEXT UNIQUE NOT NULL, | |
| name TEXT NOT NULL, | |
| password_hash TEXT NOT NULL, | |
| created_at TIMESTAMP DEFAULT CURRENT_TIMESTAMP | |
| ) | |
| """ | |
| ) | |
| conn.commit() | |
| conn.close() | |
| init_database() | |
| # Pydantic Models | |
| class SignupRequest(BaseModel): | |
| mobile: str | |
| name: str | |
| password: str | |
| class LoginRequest(BaseModel): | |
| mobile: str | |
| password: str | |
| class AIRequest(BaseModel): | |
| model: str | |
| prompt: str | |
| max_tokens: Optional[int] = 2000 | |
| temperature: Optional[float] = 0.7 | |
| class ChatRequest(BaseModel): | |
| message: str | |
| model: Optional[str] = "Qwen/Qwen2.5-72B-Instruct" | |
| history: Optional[List[Dict[str, str]]] = [] | |
| # Helper Functions | |
| def hash_password(password: str) -> str: | |
| """Hash password using SHA-256""" | |
| return hashlib.sha256(password.encode()).hexdigest() | |
| def verify_password(password: str, password_hash: str) -> bool: | |
| """Verify password against hash""" | |
| return hash_password(password) == password_hash | |
| # API Endpoints | |
| async def root(): | |
| """Root endpoint""" | |
| return { | |
| "message": "ORYNXML AI Platform API", | |
| "version": "1.0.0", | |
| "status": "running", | |
| "models": 211, | |
| "endpoints": { | |
| "health": "/health", | |
| "auth": "/auth/signup, /auth/login", | |
| "ai": "/ai/chat, /ai/generate", | |
| "models": "/models/list", | |
| }, | |
| } | |
| async def health_check(): | |
| """Health check endpoint""" | |
| return { | |
| "status": "healthy", | |
| "timestamp": datetime.now().isoformat(), | |
| "gpu_available": False, # We're using HF API, not local GPU | |
| "backend": "HuggingFace Inference API", | |
| "models_available": 211, | |
| "cloudflare_configured": bool(CLOUDFLARE_CONFIG["api_token"]), | |
| } | |
| async def signup(request: SignupRequest): | |
| """User registration endpoint""" | |
| try: | |
| if len(request.password) < 6: | |
| raise HTTPException( | |
| status_code=400, detail="Password must be at least 6 characters" | |
| ) | |
| conn = sqlite3.connect("openmanus.db") | |
| cursor = conn.cursor() | |
| # Check if user exists | |
| cursor.execute("SELECT mobile FROM users WHERE mobile = ?", (request.mobile,)) | |
| if cursor.fetchone(): | |
| conn.close() | |
| raise HTTPException( | |
| status_code=400, detail="Mobile number already registered" | |
| ) | |
| # Insert new user | |
| password_hash = hash_password(request.password) | |
| cursor.execute( | |
| "INSERT INTO users (mobile, name, password_hash) VALUES (?, ?, ?)", | |
| (request.mobile, request.name, password_hash), | |
| ) | |
| conn.commit() | |
| conn.close() | |
| return { | |
| "success": True, | |
| "message": "Account created successfully", | |
| "mobile": request.mobile, | |
| "name": request.name, | |
| } | |
| except HTTPException: | |
| raise | |
| except Exception as e: | |
| raise HTTPException(status_code=500, detail=f"Registration failed: {str(e)}") | |
| async def login(request: LoginRequest): | |
| """User login endpoint""" | |
| try: | |
| conn = sqlite3.connect("openmanus.db") | |
| cursor = conn.cursor() | |
| cursor.execute( | |
| "SELECT name, password_hash FROM users WHERE mobile = ?", (request.mobile,) | |
| ) | |
| result = cursor.fetchone() | |
| conn.close() | |
| if not result: | |
| raise HTTPException( | |
| status_code=401, detail="Invalid mobile number or password" | |
| ) | |
| name, password_hash = result | |
| if not verify_password(request.password, password_hash): | |
| raise HTTPException( | |
| status_code=401, detail="Invalid mobile number or password" | |
| ) | |
| return { | |
| "success": True, | |
| "message": "Login successful", | |
| "user": {"mobile": request.mobile, "name": name}, | |
| "token": f"session_{hash_password(request.mobile + str(datetime.now()))[:32]}", | |
| } | |
| except HTTPException: | |
| raise | |
| except Exception as e: | |
| raise HTTPException(status_code=500, detail=f"Login failed: {str(e)}") | |
| async def ai_chat(request: ChatRequest): | |
| """AI chat endpoint - main endpoint for AI interactions""" | |
| try: | |
| # Prepare messages for chat completion | |
| messages = [] | |
| # Add history | |
| for msg in request.history: | |
| messages.append( | |
| {"role": msg.get("role", "user"), "content": msg.get("content", "")} | |
| ) | |
| # Add current message | |
| messages.append({"role": "user", "content": request.message}) | |
| # Call HuggingFace Inference API | |
| response_text = "" | |
| for message in inference_client.chat_completion( | |
| model=request.model, | |
| messages=messages, | |
| max_tokens=2000, | |
| temperature=0.7, | |
| stream=True, | |
| ): | |
| if hasattr(message, "choices") and len(message.choices) > 0: | |
| delta = message.choices[0].delta | |
| if hasattr(delta, "content") and delta.content: | |
| response_text += delta.content | |
| return { | |
| "success": True, | |
| "response": response_text, | |
| "model": request.model, | |
| "timestamp": datetime.now().isoformat(), | |
| } | |
| except Exception as e: | |
| raise HTTPException(status_code=500, detail=f"AI generation failed: {str(e)}") | |
| async def ai_generate(request: AIRequest): | |
| """Generic AI generation endpoint""" | |
| try: | |
| # Determine task type based on model | |
| model_lower = request.model.lower() | |
| if "flux" in model_lower or "stable-diffusion" in model_lower: | |
| # Image generation | |
| return { | |
| "success": True, | |
| "type": "image", | |
| "message": f"Image generation with {request.model}", | |
| "prompt": request.prompt, | |
| "note": "Image will be generated using HuggingFace Inference API", | |
| } | |
| elif "video" in model_lower: | |
| # Video generation | |
| return { | |
| "success": True, | |
| "type": "video", | |
| "message": f"Video generation with {request.model}", | |
| "prompt": request.prompt, | |
| "note": "Video will be generated using HuggingFace Inference API", | |
| } | |
| else: | |
| # Text generation (default) | |
| messages = [{"role": "user", "content": request.prompt}] | |
| response_text = "" | |
| for message in inference_client.chat_completion( | |
| model=request.model, | |
| messages=messages, | |
| max_tokens=request.max_tokens, | |
| temperature=request.temperature, | |
| stream=True, | |
| ): | |
| if hasattr(message, "choices") and len(message.choices) > 0: | |
| delta = message.choices[0].delta | |
| if hasattr(delta, "content") and delta.content: | |
| response_text += delta.content | |
| return { | |
| "success": True, | |
| "type": "text", | |
| "response": response_text, | |
| "model": request.model, | |
| "timestamp": datetime.now().isoformat(), | |
| } | |
| except Exception as e: | |
| raise HTTPException(status_code=500, detail=f"Generation failed: {str(e)}") | |
| async def list_models(): | |
| """List all available AI models""" | |
| return { | |
| "total": 211, | |
| "categories": AI_MODELS, | |
| "note": "All models are accessed via HuggingFace Inference API", | |
| } | |
| async def cloudflare_status(): | |
| """Cloudflare services status""" | |
| services = [] | |
| if CLOUDFLARE_CONFIG["api_token"]: | |
| services.append("β API Token Configured") | |
| if CLOUDFLARE_CONFIG["d1_database_id"]: | |
| services.append("β D1 Database Connected") | |
| if CLOUDFLARE_CONFIG["r2_bucket_name"]: | |
| services.append("β R2 Storage Connected") | |
| if CLOUDFLARE_CONFIG["kv_namespace_id"]: | |
| services.append("β KV Sessions Connected") | |
| if CLOUDFLARE_CONFIG["kv_namespace_cache"]: | |
| services.append("β KV Cache Connected") | |
| if CLOUDFLARE_CONFIG["durable_objects_sessions"]: | |
| services.append("β Durable Objects (Agent Sessions)") | |
| if CLOUDFLARE_CONFIG["durable_objects_chatrooms"]: | |
| services.append("β Durable Objects (Chat Rooms)") | |
| return { | |
| "configured": len(services) > 0, | |
| "services": services, | |
| "account_id": CLOUDFLARE_CONFIG["account_id"], | |
| } | |
| if __name__ == "__main__": | |
| uvicorn.run(app, host="0.0.0.0", port=7860, log_level="info") | |