[1180c1]: / llava / eval / run_eval_batch_pvqa.py

Download this file

162 lines (130 with data), 6.6 kB

  1
  2
  3
  4
  5
  6
  7
  8
  9
 10
 11
 12
 13
 14
 15
 16
 17
 18
 19
 20
 21
 22
 23
 24
 25
 26
 27
 28
 29
 30
 31
 32
 33
 34
 35
 36
 37
 38
 39
 40
 41
 42
 43
 44
 45
 46
 47
 48
 49
 50
 51
 52
 53
 54
 55
 56
 57
 58
 59
 60
 61
 62
 63
 64
 65
 66
 67
 68
 69
 70
 71
 72
 73
 74
 75
 76
 77
 78
 79
 80
 81
 82
 83
 84
 85
 86
 87
 88
 89
 90
 91
 92
 93
 94
 95
 96
 97
 98
 99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
import argparse
import os
import json
import glob
import collections
import random
import pandas as pd
from nltk.translate.bleu_score import sentence_bleu
from eval_metrics.evaluate_metrics import calculate_exactmatch, calculate_f1score, bleu, calculate_appearance_with_normalization
from tabulate import tabulate
from eval_metrics.glossary import *
import warnings
warnings.simplefilter('ignore')
def parse_option():
parser = argparse.ArgumentParser('Evaluation for LLaVA Generated Outputs', add_help=False)
parser.add_argument('--gt', type=str, default="test.json", help='path to groundtruth file', )
parser.add_argument('--pred', type=str, default="answer-file-llava-zeorshot.jsonl", help='path to prediction file', )
parser.add_argument('--pred_file_parent_path', type=str, default="answer-file-llava-zeorshot.jsonl", help='path to prediction file', )
args, unparsed = parser.parse_known_args()
return args
def load_jsonl(path):
data=[]
with open(path, 'r', encoding='utf-8') as reader:
for line in reader:
data.append(json.loads(line))
return data
def evaluate(gt, pred, criterion=None):
closed_scores = collections.defaultdict(list)
bleu_scores = collections.defaultdict(list)
exact_scores = collections.defaultdict(list)
f1_scores = collections.defaultdict(list)
open_hit_scores = collections.defaultdict(list)
for gt_item, pred_item in zip(gt, pred):
gt_results = gt_item['conversatons']
gt_value = gt_results[1]['value'].lower()
pred_value = pred_item['text'].lower()
gt_value = normalize_word(gt_value)
pred_value = normalize_word(pred_value)
if gt_item['answer_type'] == 'OPEN' or gt_item['answer_type'] == 'other':
# for open-ended question
# if gt_value in pred_value:
# hit = 1.0
# else:
# hit = 0.0
# open_hit_scores['hit'].append(hit)
open_hit_scores['hit'].append(calculate_appearance_with_normalization(pred_value, gt_value))
open_hit_scores['q_id'].append(pred_item['question_id'])
exact_scores['hit'].append(calculate_exactmatch(pred_value, gt_value))
exact_scores['q_id'].append(pred_item['question_id'])
f1_score, precision, recall = calculate_f1score(pred_value, gt_value)
f1_scores['f1'].append(f1_score)
f1_scores['precision'].append(precision)
f1_scores['recall'].append(recall)
f1_scores['q_id'].append(pred_item['question_id'])
# if isinstance(f1_scores['hit'][-1], str):
# # import pdb; pdb.set_trace()
b_score = sentence_bleu(references=[str(gt_value).lower().split()],
hypothesis=str(pred_value).lower().split())
b_score_1 = sentence_bleu(references=[str(gt_value).lower().split()],
hypothesis=str(pred_value).lower().split(), weights=(1, 0, 0, 0))
b_score_2 = sentence_bleu(references=[str(gt_value).lower().split()],
hypothesis=str(pred_value).lower().split(), weights=(0, 1, 0, 0))
b_score_3 = sentence_bleu(references=[str(gt_value).lower().split()],
hypothesis=str(pred_value).lower().split(), weights=(0, 0, 1, 0))
bleu_scores['q_id'].append(pred_item['question_id'])
bleu_scores['bleu_score'].append(b_score)
bleu_scores['bleu_score_1'].append(b_score_1)
bleu_scores['bleu_score_2'].append(b_score_2)
bleu_scores['bleu_score_3'].append(b_score_3)
elif gt_item['answer_type'] == 'CLOSED':
# for close-ended question (Yes/No)
closed_scores['q_id'].append(pred_item['question_id'])
if 'yes' in pred_value or 'no' in pred_value:
if gt_value in pred_value:
closed_scores['hit'].append(1)
else:
closed_scores['hit'].append(0)
exact_score = sum(exact_scores['hit']) / len(exact_scores['hit'])
f1_score = sum(f1_scores['f1']) / len(f1_scores['f1'])
precision = sum(f1_scores['precision']) / len(f1_scores['precision'])
recall = sum(f1_scores['recall']) / len(f1_scores['recall'])
open_hit_score = sum(open_hit_scores['hit']) / len(open_hit_scores['hit'])
closed_score = sum(closed_scores['hit']) / len(closed_scores['hit']) if len(closed_scores['hit']) != 0 else 0.0
return tabulate(
[
['exact match score', exact_score*100],
['f1 score', f1_score*100],
['precision', precision*100],
['recall', recall*100],
['open accuracy', open_hit_score*100],
['yes/no accuracy', closed_score*100]
],
headers=['Metric', 'Performance']
)
if __name__ == '__main__':
args = parse_option()
dataset_list = ["vqa_rad","pvqa","Slake1.0"]
jsonl_files = []
for root, dirs, files in os.walk(args.pred_file_parent_path):
for file in files:
if file.endswith("test-answer-file.jsonl"):
file_path = os.path.join(root, file)
jsonl_files.append(file_path)
# df = pd.read_csv(file_path)
# do something with the dataframe
print(jsonl_files)
answers_file = "eval_results_med_datasets.jsonl"
ans_file = open(answers_file, "w")
for f in jsonl_files:
for ds in dataset_list:
if ds in f:
args.gt = f"/home/chunyl/azure_mount/hanoverdev/clwon/llava/eval/{ds}/test.json"
args.pred = f
try:
gt = json.load(open(args.gt, 'r'))
pred = load_jsonl(args.pred)
gt_ids = [item['id'] for item in gt]
pred_ids = [item['question_id'] for item in pred]
# import pdb; pdb.set_trace()
assert gt_ids == pred_ids, "please make sure pred and gt are exactly matched"
# perform evaluation
results = evaluate(gt, pred)
ans_file.write(json.dumps({"dataset": ds,
"pred_file": f,
"results": results}) + "\n")
ans_file.flush()
print(results)
except:
print(f">>>Skip {f}")
ans_file.close()