|
|
from vlmeval.dataset import build_dataset |
|
|
from vlmeval.smp import * |
|
|
|
|
|
|
|
|
load_env() |
|
|
|
|
|
|
|
|
dataset_name = "DynaMath" |
|
|
dataset = build_dataset(dataset_name) |
|
|
judge_kwargs = { |
|
|
'nproc': 16, |
|
|
'verbose': True, |
|
|
'retry': 10, |
|
|
} |
|
|
if dataset.TYPE in ['MCQ', 'Y/N', 'MCQ_MMMU_Pro'] or listinstr(['moviechat1k'], dataset_name.lower()): |
|
|
if listinstr(['WeMath'], dataset_name): |
|
|
judge_kwargs['model'] = 'gpt-4o-mini' |
|
|
else: |
|
|
judge_kwargs['model'] = 'chatgpt-0125' |
|
|
elif listinstr(['MMVet', 'LLaVABench', 'MMBench_Video'], dataset_name): |
|
|
judge_kwargs['model'] = 'gpt-4-turbo' |
|
|
elif listinstr(['MathVista', 'MathVerse', 'MathVision', 'DynaMath', 'VL-RewardBench', 'LogicVista', 'MOAT'], dataset_name): |
|
|
judge_kwargs['model'] = 'gpt-4o-mini' |
|
|
elif listinstr(['MMLongBench', 'MMDU', 'DUDE', 'SLIDEVQA', 'MIA-Bench', 'WildVision', 'MMAlignBench'], dataset_name): |
|
|
judge_kwargs['model'] = 'gpt-4o' |
|
|
|
|
|
fs = [ |
|
|
"/user/konglingyu/VLMEvalKit/public_eval/grpo_v7_exp0_qwen25vl_scalable_rl_opensource_math_grpo_bs96_wofilter_scoreB_std_filter_0523_global_step_200/DynaMath_train_prompt_greedy/20250524/grpo_v7_exp0_qwen25vl_scalable_rl_opensource_math_grpo_bs96_wofilter_scoreB_std_filter_0523_global_step_200/T20250524_G/grpo_v7_exp0_qwen25vl_scalable_rl_opensource_math_grpo_bs96_wofilter_scoreB_std_filter_0523_global_step_200_DynaMath.xlsx" |
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
|
] |
|
|
|
|
|
for file in fs: |
|
|
try: |
|
|
os.remove(file.replace(".xlsx", "_gpt-4o-mini_score.csv")) |
|
|
os.remove(file.replace(".xlsx", "_gpt-4o-mini.pkl")) |
|
|
os.remove(file.replace(".xlsx", "_gpt-4o-mini.xlsx")) |
|
|
print("Removed old files") |
|
|
except: |
|
|
pass |
|
|
dataset.evaluate(file, **judge_kwargs) |
|
|
with open(file.replace(".xlsx", "_gpt-4o-mini_score.csv")) as f: |
|
|
lines = f.readlines() |
|
|
print(f"File: {file.split('/')[-1]}") |
|
|
for line in lines: |
|
|
print(line.strip()) |