Demo https://rkn.aniworldai.org/

image/png

FP32 | 0.2b

# Important! For TensorRT, import tensorrt BEFORE onnxruntime
# import tensorrt

import onnxruntime as ort
import numpy as np
from PIL import Image

def classify_anime_age(image_path, model_path="model.onnx"):
   # Automatic device selection
   providers = []
   available = ort.get_available_providers()
   if 'TensorrtExecutionProvider' in available:  # Nvidia GPU + TensorRT
       providers.append(('TensorrtExecutionProvider', {'trt_fp16_enable': True}))
   if 'CUDAExecutionProvider' in available:      # CUDA
       providers.append('CUDAExecutionProvider')
   if 'OpenVINOExecutionProvider' in available:  # Intel GPU/CPU
       providers.append(('OpenVINOExecutionProvider', {'device_type': 'AUTO'}))
   providers.append('CPUExecutionProvider')      # Any CPU
   
   # Load model
   session = ort.InferenceSession(model_path, providers=providers)
   
   # Prepare image
   img = Image.open(image_path).convert('RGB').resize((512, 512))
   img_array = np.array(img, dtype=np.float32) / 255.0
   img_array = img_array.transpose(2, 0, 1)      # HWC -> CHW
   img_array = np.expand_dims(img_array, axis=0) # NCHW
   
   # Run inference
   outputs = session.run(None, {session.get_inputs()[0].name: img_array})
   
   # Process results (sigmoid for probability)
   prob = 1.0 / (1.0 + np.exp(-outputs[0][0][0]))
   result = "Adult" if prob > 0.5 else "Minor"
   
   return result, prob, session.get_providers()[0]

image/png

image/png

Downloads last month

-

Downloads are not tracked for this model. How to track
Inference Providers NEW
This model isn't deployed by any Inference Provider. ๐Ÿ™‹ Ask for provider support

Model tree for Timmek/AniCompliance16

Quantized
(1)
this model