JPShi commited on
Commit
30a6585
·
1 Parent(s): d8f8311

Delete merge_lora_weights.py

Browse files
Files changed (1) hide show
  1. merge_lora_weights.py +0 -150
merge_lora_weights.py DELETED
@@ -1,150 +0,0 @@
1
- import argparse
2
- import glob
3
- import os
4
- import sys
5
-
6
- import cv2
7
- import numpy as np
8
- import torch
9
- import torch.nn.functional as F
10
- import transformers
11
- from peft import LoraConfig, get_peft_model
12
- from transformers import AutoTokenizer
13
-
14
- from model.VISA_multiseg import VrshqForCausalLM
15
- # from utils.utils import DEFAULT_IM_END_TOKEN, DEFAULT_IM_START_TOKEN
16
-
17
- """
18
- python merge_lora_weights_and_save_hf_model.py \
19
- --version /mnt/nlp-ali/usr/yancilin/clyan-data-2/video-llm/Chat-UniVi/Chat-UniVi \
20
- --weight /mnt/public03/dataset/ovis/rgvos/visa7b/ckpt_model/pytorch_model15000.bin \
21
- --save_path /mnt/public03/dataset/ovis/rgvos/visa7b/ckpt_model/hf_model
22
- """
23
-
24
- DEFAULT_IM_START_TOKEN = "<im_start>"
25
- DEFAULT_IM_END_TOKEN = "<im_end>"
26
-
27
-
28
- def parse_args(args):
29
- parser = argparse.ArgumentParser(description="merge lora weights and save model with hf format")
30
- parser.add_argument("--version", default="chat_univi", type=str) # path to chatunivi
31
- parser.add_argument("--weight", default="/18515601223/segment-anything-2/runs/VISA-SAM2-MULTISEG-0.1/pytorch_model.bin", type=str) # path to your checkpoints
32
- parser.add_argument("--save_path", default="/18515601223/segment-anything-2/save_weights_multiseg_0.1_bf16", type=str)
33
- parser.add_argument("--precision", default="bf16", type=str, choices=["fp32", "bf16", "fp16"], help="precision for inference")
34
- parser.add_argument("--out_dim", default=256, type=int)
35
- parser.add_argument("--image_size", default=1024, type=int, help="image size")
36
- parser.add_argument("--model_max_length", default=2048, type=int)
37
- parser.add_argument("--vision_tower", default="openai/clip-vit-large-patch14", type=str)
38
- parser.add_argument("--lora_r", default=8, type=int)
39
- parser.add_argument("--lora_alpha", default=16, type=int)
40
- parser.add_argument("--lora_dropout", default=0.05, type=float)
41
- parser.add_argument("--lora_target_modules", default="q_proj,v_proj", type=str)
42
- parser.add_argument("--local_rank", default=0, type=int, help="node rank")
43
- parser.add_argument("--train_mask_decoder", action="store_true", default=True)
44
- parser.add_argument("--use_mm_start_end", action="store_true", default=False)
45
- parser.add_argument("--conv_type", default="llava_v1", type=str, choices=["llava_v1", "llava_llama_2"])
46
- parser.add_argument("--alpha", default=0.1, type=float)
47
- return parser.parse_args(args)
48
-
49
-
50
- def main(args):
51
- args = parse_args(args)
52
-
53
- # Create model
54
- tokenizer = transformers.AutoTokenizer.from_pretrained(
55
- pretrained_model_name_or_path=args.version,
56
- cache_dir=None,
57
- model_max_length=args.model_max_length,
58
- padding_side="right",
59
- use_fast=False,
60
- )
61
- tokenizer.pad_token = tokenizer.unk_token
62
- num_added_tokens = tokenizer.add_tokens("[SEG]")
63
- args.seg_token_idx = tokenizer("[SEG]", add_special_tokens=False).input_ids[-1]
64
-
65
- num_added_tokens = tokenizer.add_tokens("[TAK]")
66
- args.track_token_idx = tokenizer("[TAK]", add_special_tokens=False).input_ids[-1]
67
-
68
- if args.use_mm_start_end:
69
- tokenizer.add_tokens([DEFAULT_IM_START_TOKEN, DEFAULT_IM_END_TOKEN], special_tokens=True)
70
-
71
- model_args = {
72
- "train_mask_decoder": args.train_mask_decoder,
73
- "out_dim": args.out_dim,
74
- "seg_token_idx": args.seg_token_idx,
75
- "vision_tower": args.vision_tower,
76
- "track_token_idx": args.track_token_idx,
77
- "seg_token_num": 1,
78
- "alpha": args.alpha,
79
- }
80
-
81
- torch_dtype = torch.float32
82
- if args.precision == "bf16":
83
- torch_dtype = torch.bfloat16
84
- elif args.precision == "fp16":
85
- torch_dtype = torch.half
86
- model = VrshqForCausalLM.from_pretrained(pretrained_model_name_or_path=args.version, torch_dtype=torch_dtype,
87
- **model_args)
88
- model.config.eos_token_id = tokenizer.eos_token_id
89
- model.config.bos_token_id = tokenizer.bos_token_id
90
- model.config.pad_token_id = tokenizer.pad_token_id
91
-
92
- # 加载clip预训练模型
93
- model.get_model().initialize_vision_modules(model.get_model().config)
94
- vision_tower = model.get_model().get_vision_tower()
95
- vision_tower.to(dtype=torch_dtype)
96
- model.get_model().initialize_lisa_modules(model.get_model().config)
97
-
98
- lora_r = args.lora_r
99
- if lora_r > 0:
100
-
101
- def find_linear_layers(model, lora_target_modules):
102
- cls = torch.nn.Linear
103
- lora_module_names = set()
104
- for name, module in model.named_modules():
105
- if (
106
- isinstance(module, cls)
107
- and all(
108
- [x not in name for x in ["visual_model", "vision_tower", "mm_projector", "text_hidden_fcs"]])
109
- and any([x in name for x in lora_target_modules])
110
- ):
111
- lora_module_names.add(name)
112
- return sorted(list(lora_module_names))
113
-
114
- lora_alpha = args.lora_alpha
115
- lora_dropout = args.lora_dropout
116
- lora_target_modules = find_linear_layers(model, args.lora_target_modules.split(","), )
117
- lora_config = LoraConfig(
118
- r=lora_r,
119
- lora_alpha=lora_alpha,
120
- target_modules=lora_target_modules,
121
- lora_dropout=lora_dropout,
122
- bias="none",
123
- task_type="CAUSAL_LM",
124
- )
125
- model = get_peft_model(model, lora_config)
126
- model.print_trainable_parameters()
127
-
128
- model.resize_token_embeddings(len(tokenizer))
129
-
130
- # for key in model.state_dict().keys():
131
- # print(key)
132
-
133
- state_dict = torch.load(args.weight, map_location="cpu")
134
- model.load_state_dict(state_dict, strict=True)
135
-
136
- model = model.merge_and_unload()
137
- state_dict = {}
138
- for k, v in model.state_dict().items(): # 过滤掉clip vision encoder中的参数
139
- if "vision_tower" not in k:
140
- state_dict[k] = v
141
- if "clip_model" not in k:
142
- state_dict[k] = v
143
- else:
144
- pass
145
- model.save_pretrained(args.save_path, state_dict=state_dict)
146
- tokenizer.save_pretrained(args.save_path)
147
-
148
-
149
- if __name__ == "__main__":
150
- main(sys.argv[1:])