Skip to content

Commit d19cdd8

Browse files
author
Your Name
committed
add scripts
1 parent 5cb8ddf commit d19cdd8

Some content is hidden

Large Commits have some content hidden by default. Use the searchbox below for content that may be hidden.

74 files changed

+3523
-0
lines changed
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,28 @@
1+
{
2+
"fp16": {
3+
"enabled": "auto",
4+
"loss_scale": 0,
5+
"loss_scale_window": 1000,
6+
"initial_scale_power": 16,
7+
"hysteresis": 2,
8+
"min_loss_scale": 1
9+
},
10+
"bf16": {
11+
"enabled": "auto"
12+
},
13+
"train_micro_batch_size_per_gpu": "auto",
14+
"train_batch_size": "auto",
15+
"gradient_accumulation_steps": "auto",
16+
"zero_optimization": {
17+
"stage": 3,
18+
"overlap_comm": true,
19+
"contiguous_gradients": true,
20+
"sub_group_size": 1e9,
21+
"reduce_bucket_size": "auto",
22+
"stage3_prefetch_bucket_size": "auto",
23+
"stage3_param_persistence_threshold": "auto",
24+
"stage3_max_live_parameters": 1e9,
25+
"stage3_max_reuse_distance": 1e9,
26+
"stage3_gather_16bit_weights_on_model_save": true
27+
}
28+
}

scripts/convert_gqa_for_eval.py

+18
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,18 @@
1+
import os
2+
import json
3+
import argparse
4+
5+
parser = argparse.ArgumentParser()
6+
parser.add_argument("--src", type=str)
7+
parser.add_argument("--dst", type=str)
8+
args = parser.parse_args()
9+
10+
all_answers = []
11+
for line_idx, line in enumerate(open(args.src)):
12+
res = json.loads(line)
13+
question_id = res['question_id']
14+
text = res['text'].rstrip('.').lower()
15+
all_answers.append({"questionId": question_id, "prediction": text})
16+
17+
with open(args.dst, 'w') as f:
18+
json.dump(all_answers, f)
+27
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,27 @@
1+
import os
2+
import json
3+
import argparse
4+
import pandas as pd
5+
6+
def get_args():
7+
parser = argparse.ArgumentParser()
8+
parser.add_argument("--annotation-file", type=str, required=True)
9+
parser.add_argument("--result-dir", type=str, required=True)
10+
parser.add_argument("--upload-dir", type=str, required=True)
11+
parser.add_argument("--experiment", type=str, required=True)
12+
13+
return parser.parse_args()
14+
15+
if __name__ == "__main__":
16+
args = get_args()
17+
18+
df = pd.read_table(args.annotation_file)
19+
20+
cur_df = df.copy()
21+
cur_df = cur_df.drop(columns=['hint', 'category', 'source', 'image', 'comment', 'l2-category'])
22+
cur_df.insert(6, 'prediction', None)
23+
for pred in open(os.path.join(args.result_dir, f"{args.experiment}.jsonl")):
24+
pred = json.loads(pred)
25+
cur_df.loc[df['index'] == pred['question_id'], 'prediction'] = pred['text']
26+
27+
cur_df.to_excel(os.path.join(args.upload_dir, f"{args.experiment}.xlsx"), index=False, engine='openpyxl')

scripts/convert_mmvet_for_eval.py

+18
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,18 @@
1+
import os
2+
import json
3+
import argparse
4+
5+
parser = argparse.ArgumentParser()
6+
parser.add_argument("--src", type=str)
7+
parser.add_argument("--dst", type=str)
8+
args = parser.parse_args()
9+
10+
cur_result = {}
11+
12+
for line in open(args.src):
13+
data = json.loads(line)
14+
qid = data['question_id']
15+
cur_result[f'v1_{qid}'] = data['text']
16+
17+
with open(args.dst, 'w') as f:
18+
json.dump(cur_result, f, indent=2)
+74
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,74 @@
1+
import os
2+
import json
3+
import argparse
4+
5+
6+
def get_args():
7+
parser = argparse.ArgumentParser()
8+
parser.add_argument("--annotation-file", type=str)
9+
parser.add_argument("--result-file", type=str)
10+
parser.add_argument("--result-upload-file", type=str)
11+
return parser.parse_args()
12+
13+
14+
def eval_single(result_file, eval_only_type=None):
15+
results = {}
16+
for line in open(result_file):
17+
row = json.loads(line)
18+
results[row['question_id']] = row
19+
20+
type_counts = {}
21+
correct_counts = {}
22+
for question_data in data['questions']:
23+
if eval_only_type is not None and question_data['data_type'] != eval_only_type: continue
24+
data_type = question_data['question_type_id']
25+
type_counts[data_type] = type_counts.get(data_type, 0) + 1
26+
try:
27+
question_id = int(question_data['question_id'])
28+
except:
29+
question_id = question_data['question_id']
30+
if question_id not in results:
31+
correct_counts[data_type] = correct_counts.get(data_type, 0)
32+
continue
33+
row = results[question_id]
34+
if row['text'] == question_data['answer']:
35+
correct_counts[data_type] = correct_counts.get(data_type, 0) + 1
36+
37+
total_count = 0
38+
total_correct = 0
39+
for data_type in sorted(type_counts.keys()):
40+
accuracy = correct_counts[data_type] / type_counts[data_type] * 100
41+
if eval_only_type is None:
42+
print(f"{ques_type_id_to_name[data_type]}: {accuracy:.2f}%")
43+
44+
total_count += type_counts[data_type]
45+
total_correct += correct_counts[data_type]
46+
47+
total_accuracy = total_correct / total_count * 100
48+
if eval_only_type is None:
49+
print(f"Total accuracy: {total_accuracy:.2f}%")
50+
else:
51+
print(f"{eval_only_type} accuracy: {total_accuracy:.2f}%")
52+
53+
return results
54+
55+
if __name__ == "__main__":
56+
args = get_args()
57+
data = json.load(open(args.annotation_file))
58+
ques_type_id_to_name = {id:n for n,id in data['question_type'].items()}
59+
60+
results = eval_single(args.result_file)
61+
eval_single(args.result_file, eval_only_type='image')
62+
eval_single(args.result_file, eval_only_type='video')
63+
64+
with open(args.result_upload_file, 'w') as fp:
65+
for question in data['questions']:
66+
qid = question['question_id']
67+
if qid in results:
68+
result = results[qid]
69+
else:
70+
result = results[int(qid)]
71+
fp.write(json.dumps({
72+
'question_id': qid,
73+
'prediction': result['text']
74+
}) + '\n')

scripts/convert_sqa_to_llava.py

+88
Original file line numberDiff line numberDiff line change
@@ -0,0 +1,88 @@
1+
import json
2+
import os
3+
import fire
4+
import re
5+
from convert_sqa_to_llava_base_prompt import build_prompt_chatbot
6+
7+
8+
def convert_to_llava(base_dir, split, prompt_format="QCM-LEA"):
9+
split_indices = json.load(open(os.path.join(base_dir, "pid_splits.json")))[split]
10+
problems = json.load(open(os.path.join(base_dir, "problems.json")))
11+
12+
split_problems = build_prompt_chatbot(
13+
problems, split_indices, prompt_format,
14+
use_caption=False, is_test=False)
15+
16+
target_format = []
17+
for prob_id, (input, output) in split_problems.items():
18+
if input.startswith('Question: '):
19+
input = input.replace('Question: ', '')
20+
if output.startswith('Answer: '):
21+
output = output.replace('Answer: ', '')
22+
23+
raw_prob_data = problems[prob_id]
24+
if raw_prob_data['image'] is None:
25+
target_format.append({
26+
"id": prob_id,
27+
"conversations": [
28+
{'from': 'human', 'value': f"{input}"},
29+
{'from': 'gpt', 'value': f"{output}"},
30+
],
31+
})
32+
33+
else:
34+
target_format.append({
35+
"id": prob_id,
36+
"image": os.path.join(prob_id, raw_prob_data['image']),
37+
"conversations": [
38+
{'from': 'human', 'value': f"{input}\n<image>"},
39+
{'from': 'gpt', 'value': f"{output}"},
40+
],
41+
})
42+
43+
print(f'Number of samples: {len(target_format)}')
44+
45+
with open(os.path.join(base_dir, f"llava_{split}_{prompt_format}.json"), "w") as f:
46+
json.dump(target_format, f, indent=2)
47+
48+
49+
def convert_to_jsonl(base_dir, split, prompt_format="QCM-LEPA"):
50+
split_indices = json.load(open(os.path.join(base_dir, "pid_splits.json")))[split]
51+
problems = json.load(open(os.path.join(base_dir, "problems.json")))
52+
53+
split_problems = build_prompt_chatbot(
54+
problems, split_indices, prompt_format,
55+
use_caption=False, is_test=False)
56+
57+
writer = open(os.path.join(base_dir, f"scienceqa_{split}_{prompt_format}.jsonl"), "w")
58+
for prob_id, (input, output) in split_problems.items():
59+
if input.startswith('Question: '):
60+
input = input.replace('Question: ', '')
61+
if output.startswith('Answer: '):
62+
output = output.replace('Answer: ', '')
63+
64+
raw_prob_data = problems[prob_id]
65+
if raw_prob_data['image'] is None:
66+
data = {
67+
"id": prob_id,
68+
"instruction": f"{input}",
69+
"output": f"{output}",
70+
}
71+
72+
else:
73+
data = {
74+
"id": prob_id,
75+
"image": os.path.join(prob_id, raw_prob_data['image']),
76+
"instruction": f"{input}\n<image>",
77+
"output": f"{output}",
78+
}
79+
writer.write(json.dumps(data) + '\n')
80+
writer.close()
81+
82+
83+
def main(task, **kwargs):
84+
globals()[task](**kwargs)
85+
86+
87+
if __name__ == "__main__":
88+
fire.Fire(main)

0 commit comments

Comments
 (0)