Demo https://rkn.aniworldai.org/
FP32 | 0.2b
# Important! For TensorRT, import tensorrt BEFORE onnxruntime
# import tensorrt
import onnxruntime as ort
import numpy as np
from PIL import Image
def classify_anime_age(image_path, model_path="model.onnx"):
# Automatic device selection
providers = []
available = ort.get_available_providers()
if 'TensorrtExecutionProvider' in available: # Nvidia GPU + TensorRT
providers.append(('TensorrtExecutionProvider', {'trt_fp16_enable': True}))
if 'CUDAExecutionProvider' in available: # CUDA
providers.append('CUDAExecutionProvider')
if 'OpenVINOExecutionProvider' in available: # Intel GPU/CPU
providers.append(('OpenVINOExecutionProvider', {'device_type': 'AUTO'}))
providers.append('CPUExecutionProvider') # Any CPU
# Load model
session = ort.InferenceSession(model_path, providers=providers)
# Prepare image
img = Image.open(image_path).convert('RGB').resize((512, 512))
img_array = np.array(img, dtype=np.float32) / 255.0
img_array = img_array.transpose(2, 0, 1) # HWC -> CHW
img_array = np.expand_dims(img_array, axis=0) # NCHW
# Run inference
outputs = session.run(None, {session.get_inputs()[0].name: img_array})
# Process results (sigmoid for probability)
prob = 1.0 / (1.0 + np.exp(-outputs[0][0][0]))
result = "Adult" if prob > 0.5 else "Minor"
return result, prob, session.get_providers()[0]
Inference Providers
NEW
This model isn't deployed by any Inference Provider.
๐
Ask for provider support
Model tree for Timmek/AniCompliance16
Base model
timm/convnext_large.fb_in22k_ft_in1k

