File size: 1,271 Bytes
6f4da02
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
import gradio as gr
import torch
import cv2
import numpy as np

# Load the pre-trained YOLOv5 model
model = torch.hub.load('ultralytics/yolov5', 'yolov5s', pretrained=True)

def detect_people(video):
    # Initialize the video capture object
    cap = cv2.VideoCapture(video.name)

    # Initialize the output variable
    num_people_detected = 0

    # Loop through each frame of the video
    while cap.isOpened():
        # Read the frame
        ret, frame = cap.read()

        # If there are no more frames, break out of the loop
        if not ret:
            break

        # Run the YOLOv5 model on the frame to detect people
        results = model(frame, size=640)

        # Get the number of people detected in the frame
        num_people_detected += len(results.xyxy[0])

    # Release the video capture object
    cap.release()

    # Return the number of people detected
    return num_people_detected

# Define the input and output interfaces for the Gradio app
inputs = gr.inputs.Video(label="Upload a video")
outputs = gr.outputs.Textbox(label="Number of people detected")

# Create the Gradio app
gr.Interface(detect_people, inputs, outputs, title="Object Detection App", description="Upload a video to detect the number of people in it.").launch()