File size: 2,305 Bytes
1a84574
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
#import clip
import torch
from multilingual_clip import pt_multilingual_clip
import open_clip
import numpy as np
import os
from PIL import Image
from transformers import AutoTokenizer

images_path = './images/'

images = []

for item in os.listdir(images_path):
    if os.path.isfile(images_path + item):
        if item.endswith('jpg'):
            images.append(images_path + item)

print("total images:", len(images))

device = "cuda" if torch.cuda.is_available() else "cpu"

#model_name = 'M-CLIP/XLM-Roberta-Large-Vit-L-14'
model_name = 'M-CLIP/XLM-Roberta-Large-Vit-B-16Plus'

text_model = pt_multilingual_clip.MultilingualCLIP.from_pretrained(model_name)
tokenizer = AutoTokenizer.from_pretrained(model_name)

print("text model parameters:", f"{np.sum([int(np.prod(p.shape)) for p in text_model.parameters()]):,}")

#clip_model_name = 'ViT-L/14'
#clip_model_name = 'ViT-B/16'

#clip_model, compose = clip.load(clip_model_name, device=device)

clip_model, _, preprocess = open_clip.create_model_and_transforms('ViT-B-16-plus-240', pretrained="laion400m_e32")
clip_model.to(device)

print("CLIP image model parameters:", f"{np.sum([int(np.prod(p.shape)) for p in clip_model.parameters()]):,}")

texts = ['Three blind horses listening to Mozart',
         'Tre blinde hester som lytter til Mozart']

text_embeddings = text_model.forward(texts, tokenizer)
print("text embeddings:", text_embeddings.shape)

batch_size = 10
image_embeddings = []

with open("images_list.txt", "w", encoding="utf-8") as images_list:
    for i in range(0, len(images)): #, batch_size):
        #images_list.write("\n".join([im_path.split("/")[-1] for im_path in images[i:min(i+batch_size, len(images))]]) + "\n")
        images_list.write(images[i].split("/")[-1] + "\n")
        image = Image.open(images[i])
        #img_input = torch.stack([compose(Image.open(img_path)).to(device) for img_path in images[i:min(i+batch_size, len(images))]])
        img_input = preprocess(image).unsqueeze(0).to(device)
        with torch.no_grad():
            img_embs = clip_model.encode_image(img_input).float().to(device)
            image_embeddings.extend(img_embs.detach().cpu().numpy())

image_embeddings_np = np.array(image_embeddings)
print(image_embeddings_np.shape)
np.save("multilinugal_features.npy", image_embeddings_np)