File size: 2,553 Bytes
b5beb60
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
 
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
import re
import logging

import torch
from transformers import AutoProcessor, LlavaOnevisionForConditionalGeneration

def create_message(sample):
    query = sample['query']
    all_contents = []
    matches = re.findall(r"<(image_\d+)>", query)
    split_text = re.split(r"<image_\d+>", query)
    images = []
    for i, fragment in enumerate(split_text):
        if fragment.strip():
            all_contents.extend([
                {"type": "text", "text": fragment}
            ])
        if i < len(matches):
            if sample[matches[i]]:
                all_contents.extend([
                    {"type": "image"}
                ])
                images.append(sample[matches[i]])
            else:
                logging.error(
                    f"The image token {matches[i]} is in the query, but there is no corresponding image provided by the data")
    messages = [
        {
            "role": "user",
            "content": all_contents
        }
    ]
    return messages, images


class Llava_Model:
    def __init__(
            self,
            model_path,
            temperature=0,
            max_tokens=1024
    ):
        self.temperature = temperature
        self.max_tokens = max_tokens
        self.model = LlavaOnevisionForConditionalGeneration.from_pretrained(
            model_path,
            torch_dtype=torch.float16,
            device_map="auto",
            use_flash_attention_2=True
        )
        self.processor = AutoProcessor.from_pretrained(model_path)


    def get_response(self, sample):

        model = self.model
        processor = self.processor

        try:
            messages, images = create_message(sample)

            input_text = processor.apply_chat_template(messages, add_generation_prompt=True)
            inputs = processor(
                images=images,
                text=input_text,
                add_special_tokens=False,
                return_tensors="pt"
            ).to(model.device, torch.float16)

            output = model.generate(**inputs, do_sample=True, temperature=self.temperature, max_new_tokens=self.max_tokens)
            response = processor.decode(output[0], skip_special_tokens=True)

            assistant_index = response.find("assistant")
            if assistant_index != -1:
                final_answer = response[assistant_index + len("assistant"):].strip()
            else:
                final_answer = response.strip()
            return final_answer

        except Exception as e:
            print(e)
            return None