AvamNeupane121 commited on
Commit
d8a810e
·
verified ·
1 Parent(s): 7f039ff

Create owl.py

Browse files
Files changed (1) hide show
  1. owl.py +84 -0
owl.py ADDED
@@ -0,0 +1,84 @@
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
+ import requests
2
+ from PIL import Image, ImageDraw, ImageFont
3
+ import torch
4
+ import os
5
+
6
+ from transformers import Owlv2Processor, Owlv2ForObjectDetection
7
+
8
+ # Load the model and processor
9
+ processor = Owlv2Processor.from_pretrained("google/owlv2-large-patch14-ensemble")
10
+ model = Owlv2ForObjectDetection.from_pretrained("google/owlv2-large-patch14-ensemble")
11
+
12
+ # Option 1: Load image from local file
13
+ image_path = "image.jpg" # Replace with your image path
14
+ image = Image.open(image_path)
15
+
16
+
17
+ # Define what you want to detect
18
+ text_labels = [["a person with a hat"]]
19
+
20
+ # Process the image and text
21
+ inputs = processor(text=text_labels, images=image, return_tensors="pt")
22
+ outputs = model(**inputs)
23
+
24
+ # Target image sizes (height, width) to rescale box predictions [batch_size, 2]
25
+ target_sizes = torch.tensor([(image.height, image.width)])
26
+
27
+ # Convert outputs (bounding boxes and class logits) to Pascal VOC format (xmin, ymin, xmax, ymax)
28
+ results = processor.post_process_grounded_object_detection(
29
+ outputs=outputs, target_sizes=target_sizes, threshold=0.1, text_labels=text_labels
30
+ )
31
+
32
+ # Retrieve predictions for the first image
33
+ result = results[0]
34
+ boxes, scores, text_labels_detected = result["boxes"], result["scores"], result["text_labels"]
35
+
36
+ # Create a copy of the original image for drawing
37
+ output_image = image.copy()
38
+ draw = ImageDraw.Draw(output_image)
39
+
40
+ # Try to use a default font, fallback to default if not available
41
+ try:
42
+ font = ImageFont.truetype("arial.ttf", 16)
43
+ except OSError:
44
+ font = ImageFont.load_default()
45
+
46
+ # Colors for different detections
47
+ colors = ["red", "blue", "green", "orange", "purple", "yellow", "pink", "cyan"]
48
+
49
+ print("Detection Results:")
50
+ print("-" * 50)
51
+
52
+ # Draw bounding boxes and labels
53
+ for i, (box, score, text_label) in enumerate(zip(boxes, scores, text_labels_detected)):
54
+ box = [round(i, 2) for i in box.tolist()]
55
+ confidence = round(score.item(), 3)
56
+
57
+ print(f"Detected {text_label} with confidence {confidence} at location {box}")
58
+
59
+ # Get coordinates
60
+ xmin, ymin, xmax, ymax = box
61
+
62
+ # Choose color
63
+ color = colors[i % len(colors)]
64
+
65
+ # Draw bounding box
66
+ draw.rectangle([xmin, ymin, xmax, ymax], outline=color, width=3)
67
+
68
+ # Draw label with confidence
69
+ label_text = f"{text_label}: {confidence}"
70
+
71
+ # Get text bounding box for background
72
+ bbox = draw.textbbox((xmin, ymin - 25), label_text, font=font)
73
+
74
+ # Draw background rectangle for text
75
+ draw.rectangle([bbox[0]-2, bbox[1]-2, bbox[2]+2, bbox[3]+2], fill=color)
76
+
77
+ # Draw text
78
+ draw.text((xmin, ymin - 25), label_text, fill="white", font=font)
79
+
80
+ # Save the output image
81
+ output_path = "output_img.jpg"
82
+ output_image.save(output_path)
83
+ print(f"\nOutput image saved as: {output_path}")
84
+