Spaces:
Sleeping
Sleeping
File size: 1,564 Bytes
8345f12 7bde47e 8345f12 7bde47e 8345f12 7bde47e 956f94a f7ff479 7eb6ef1 8345f12 12bd2c9 8345f12 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 |
import torch
import torchvision
from torchvision import transforms
import gradio as gr
import os
import cv2
from PIL import Image
from model import create_model
model,transform=create_model(num_of_classes=3)
model.load_state_dict(torch.load("fire_smoke_weights.pth", map_location=torch.device("cpu")))
model.eval()
def classify_video(video):
cap = cv2.VideoCapture(video)
predictions = []
Fire=[]
Smoke=[]
Default=[]
while cap.isOpened():
ret, frame = cap.read()
if not ret:
break
img = cv2.cvtColor(frame, cv2.COLOR_BGR2RGB)
img_pil = Image.fromarray(img)
img_tensor = transform(img_pil).unsqueeze(0)
with torch.no_grad():
output = model(img_tensor)
pred = output.argmax().item()
predictions.append(pred)
cap.release()
class_names=['DEFAULT', 'FIRE Spotted', 'SMOKE Spotted']
for i in predictions:
if i == 1:
Fire.append(i)
elif i == 2:
Smoke.append(i)
else:
Default.append(i)
if len(Fire)>5 and len(Smoke)>5:
return f"Fire and Smoke Spotted"
else:
return class_names[max(predictions)]
Description="An MobileNET model trained to classify Fire and Smoke through Videos"
Article="Created at jupyter NoteBook with GPU NVIDIA_GeForce_MX350"
gr.Interface(
fn=classify_video,
inputs=gr.Video(streaming=True),
outputs="text",
title="Fire and Smoke Classifier",
description=Description,
article=Article,
live="True"
).launch()
|