VLMEvalKit / do_eval.py
Racktic's picture
Upload folder using huggingface_hub
b5beb60 verified
import argparse
import json
import os
from datetime import datetime
import subprocess
# the emprical settings for each dataset
full_datasets = {
"MathVista_MINI": "train_prompt_sampling",
"MathVision": "train_prompt_greedy",
"MathVerse_MINI": "train_prompt_greedy",
"MMMU_DEV_VAL": "origin_prompt_greedy",
"MMStar": "train_prompt_greedy",
"DynaMath": "train_prompt_greedy",
"WeMath": "train_prompt_greedy",
"TextVQA_VAL": "origin_prompt_greedy",
"DocVQA_TEST": "origin_prompt_greedy",
"MMVet": "origin_prompt_greedy",
}
settings = {
"train_prompt_sampling": {
"use_reasoning_prompt": 2,
"do_sample": True,
"top_p": 1,
"top_k": -1,
"temperature": 1,
},
"train_prompt_greedy": {
"use_reasoning_prompt": 2,
"do_sample": True,
"top_p": 0.001,
"top_k": 1,
"temperature": 0.01,
},
"origin_prompt_greedy": {
"use_reasoning_prompt": 0,
"do_sample": True,
"top_p": 0.001,
"top_k": 1,
"temperature": 0.01,
},
}
def main():
parser = argparse.ArgumentParser()
parser.add_argument("--run_name", type=str, required=True, help="Name of the run")
parser.add_argument("--gpus", type=int, default=8, help="Number of GPUs to use")
parser.add_argument("--path", type=str, required=True, help="Path to the model")
parser.add_argument(
"--dataset", type=str, nargs="+", required=True, help="List of datasets to use"
)
parser.add_argument(
"--min_pixels", type=int, default=3136, help="Minimum number of pixels"
)
parser.add_argument(
"--max_pixels", type=int, default=12845056, help="Maximum number of pixels"
)
parser.add_argument(
"--max_new_tokens", type=int, default=2048, help="Maximum number of new tokens"
)
args = parser.parse_args()
assert len(args.dataset), "--dataset should be a list of datasets"
datasets = args.dataset
if len(args.dataset) == 1 and args.dataset[0] == "full":
datasets = list(full_datasets.keys())
for dataset in datasets:
assert (
dataset in full_datasets
), f"Dataset {dataset} is not in the list of available datasets: {list(full_datasets.keys())}"
print("Datasets to be used:", datasets)
print("Run name:", args.run_name)
print("Number of GPUs:", args.gpus)
print("Model path:", args.path)
for dataset in datasets:
config = {
"model": {
args.run_name: {
"class": "Qwen2VLChat",
"model_path": args.path,
"min_pixels": args.min_pixels,
"max_pixels": args.max_pixels,
"use_vllm": True,
"max_new_tokens": args.max_new_tokens,
**settings[full_datasets[dataset]],
},
},
"datasets": datasets,
}
current_datetime = datetime.now().strftime("%Y%m%d")
save_dir = f"public_eval/{args.run_name}/{dataset}/{current_datetime}"
os.makedirs(save_dir, exist_ok=True)
config_name = f"config.json"
config_path = os.path.join(save_dir, config_name)
with open(config_path, "w") as json_file:
json.dump(config, json_file, indent=4)
print(f"Start evaluating on {dataset}.")
print(f"Eval config {full_datasets[dataset]}")
env_vars = os.environ.copy()
env_vars["VLLM_USE_V1"] = "0"
command = [
"torchrun",
f"--nproc_per_node={args.gpus}",
"run_for_bash.py",
"--config",
f"{config_path}",
"--data",
f"{dataset}",
"--verbose",
"--work-dir",
f"{save_dir}",
]
stdout_file = os.path.join(save_dir, f"{dataset}_stdout.log")
stderr_file = os.path.join(save_dir, f"{dataset}_stderr.log")
with open(stdout_file, "w") as stdout, open(stderr_file, "w") as stderr:
try:
print(f"Output redirected to {stdout_file}")
print(f"Errors redirected to {stderr_file}")
subprocess.run(
command, env=env_vars, check=True, stdout=stdout, stderr=stderr
)
# os.symlink(source, link_name)
except subprocess.CalledProcessError as e:
print(f"torchrun failed. Check {stderr_file} for error details.")
if __name__ == "__main__":
main()