Spaces:
Running
Running
File size: 16,915 Bytes
9133cc3 2bf38f8 9133cc3 8c7c7b7 9133cc3 8c7c7b7 9133cc3 d873adb 9133cc3 8c7c7b7 2bf38f8 8c7c7b7 9133cc3 8c7c7b7 9133cc3 2bf38f8 8c7c7b7 9133cc3 0b0d73f 5312171 9133cc3 2bf38f8 8c7c7b7 9133cc3 2bf38f8 5312171 2bf38f8 8c7c7b7 5312171 2bf38f8 5312171 8c7c7b7 5312171 0b0d73f 32530a5 0b0d73f 32530a5 0b0d73f 9133cc3 2bf38f8 8c7c7b7 9133cc3 8c7c7b7 9133cc3 8c7c7b7 9133cc3 8c7c7b7 9133cc3 2bf38f8 9133cc3 2bf38f8 9133cc3 8c7c7b7 1741778 9133cc3 8c7c7b7 fe5a20d 8c7c7b7 2bf38f8 8c7c7b7 2bf38f8 8c7c7b7 fe5a20d ad769f0 2bf38f8 8c7c7b7 9133cc3 7c3c46a 9133cc3 8c7c7b7 9133cc3 0b0d73f 9133cc3 2bf38f8 8c7c7b7 2bf38f8 67773d1 2bf38f8 8c7c7b7 2bf38f8 9133cc3 2bf38f8 8c7c7b7 2bf38f8 54b562b 2bf38f8 9133cc3 8c7c7b7 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 |
import gradio as gr
from openai import OpenAI
import json
import os
import uuid
import tempfile
from tqdm import tqdm
import pandas as pd
import numpy as np
from collections import Counter
import time
from zipfile import ZipFile
# For Azure OpenAI
# openai.api_key = os.environ.get("AZURE_OPENAI_KEY")
# openai.api_base = os.environ.get("AZURE_OPENAI_ENDPOINT")
# openai.api_type = 'azure'
# openai.api_version = os.environ.get("AZURE_OPENAI_API_VERSION")
# deployment_id = os.environ.get("AZURE_OPENAI_DEP_ID")
# gpt_model = deployment_id
prompt = """Compare the ground truth and prediction from AI models, to give a correctness score for the prediction. <AND> in the ground truth means it is totally right only when all elements in the ground truth are present in the prediction, and <OR> means it is totally right when any one element in the ground truth is present in the prediction. The correctness score is 0.0 (totally wrong), 0.1, 0.2, 0.3, 0.4, 0.5, 0.6, 0.7, 0.8, 0.9, or 1.0 (totally right). Just complete the last space of the correctness score.
Question | Ground truth | Prediction | Correctness
--- | --- | --- | ---
What is x in the equation? | -1 <AND> -5 | x = 3 | 0.0
What is x in the equation? | -1 <AND> -5 | x = -1 | 0.5
What is x in the equation? | -1 <AND> -5 | x = -5 | 0.5
What is x in the equation? | -1 <AND> -5 | x = -5 or 5 | 0.5
What is x in the equation? | -1 <AND> -5 | x = -1 or x = -5 | 1.0
Can you explain this meme? | This meme is poking fun at the fact that the names of the countries Iceland and Greenland are misleading. Despite its name, Iceland is known for its beautiful green landscapes, while Greenland is mostly covered in ice and snow. The meme is saying that the person has trust issues because the names of these countries do not accurately represent their landscapes. | The meme talks about Iceland and Greenland. It's pointing out that despite their names, Iceland is not very icy and Greenland isn't very green. | 0.4
Can you explain this meme? | This meme is poking fun at the fact that the names of the countries Iceland and Greenland are misleading. Despite its name, Iceland is known for its beautiful green landscapes, while Greenland is mostly covered in ice and snow. The meme is saying that the person has trust issues because the names of these countries do not accurately represent their landscapes. | The meme is using humor to point out the misleading nature of Iceland's and Greenland's names. Iceland, despite its name, has lush green landscapes while Greenland is mostly covered in ice and snow. The text 'This is why I have trust issues' is a playful way to suggest that these contradictions can lead to distrust or confusion. The humor in this meme is derived from the unexpected contrast between the names of the countries and their actual physical characteristics. | 1.0
"""
import threading, shutil
def schedule_cleanup(paths, delay=600):
def _clean():
time.sleep(delay)
for p in (paths if isinstance(paths, (list, tuple)) else [paths]):
try:
if os.path.isdir(p):
shutil.rmtree(p, ignore_errors=True)
elif os.path.isfile(p):
os.remove(p)
except:
pass
threading.Thread(target=_clean, daemon=True).start()
def grade(file_obj, key, model, api_base, progress=gr.Progress()):
if "mmvet" in model:
# use our api key for users
key = os.environ.get("AZURE_OPENAI_KEY")
api_base = os.environ.get("AZURE_OPENAI_ENDPOINT")
client = OpenAI(
base_url=api_base.strip() if api_base and api_base.strip() else "https://api.openai.com/v1",
api_key=key.strip()
)
gpt_model = model
workdir = tempfile.mkdtemp(prefix="mmvet_grade_")
uid = uuid.uuid4().hex
# load metadata
# Download mm-vet.zip and `unzip mm-vet.zip` and change the path below
mmvet_path = "mm-vet"
use_sub_set = False
decimal_places = 1 # number of decimal places to round to
if use_sub_set:
bard_set_file = os.path.join(mmvet_path, "bard_set.json")
with open(bard_set_file, 'r') as f:
sub_set = json.load(f)
sub_set_name = 'bardset'
sub_set_name = sub_set_name + '_'
else:
sub_set = None
sub_set_name = ''
mmvet_metadata = os.path.join(mmvet_path, "mm-vet.json")
with open(mmvet_metadata, 'r') as f:
data = json.load(f)
counter = Counter()
cap_set_list = []
cap_set_counter = []
len_data = 0
for id, value in data.items():
if sub_set is not None and id not in sub_set:
continue
question = value["question"]
answer = value["answer"]
cap = value["capability"]
cap = set(cap)
counter.update(cap)
if cap not in cap_set_list:
cap_set_list.append(cap)
cap_set_counter.append(1)
else:
cap_set_counter[cap_set_list.index(cap)] += 1
len_data += 1
sorted_list = counter.most_common()
columns = [k for k, v in sorted_list]
columns.append("total")
columns.append("std")
columns.append('runs')
df = pd.DataFrame(columns=columns)
cap_set_sorted_indices = np.argsort(-np.array(cap_set_counter))
new_cap_set_list = []
new_cap_set_counter = []
for index in cap_set_sorted_indices:
new_cap_set_list.append(cap_set_list[index])
new_cap_set_counter.append(cap_set_counter[index])
cap_set_list = new_cap_set_list
cap_set_counter = new_cap_set_counter
cap_set_names = ["_".join(list(cap_set)) for cap_set in cap_set_list]
columns2 = cap_set_names
columns2.append("total")
columns2.append("std")
columns2.append('runs')
df2 = pd.DataFrame(columns=columns2)
###### change your model name ######
model_name = os.path.basename(file_obj.name)[:-5]
# result_path = "results"
num_run = 1 # we set 5 in the paper
# model_results_file = os.path.join(result_path, f"{model}.json")
model_results_file = file_obj.name
grade_file = os.path.join(workdir, f'{model_name}_{gpt_model.replace("-mmvet", "")}-grade-{num_run}runs_{uid}.json')
cap_score_file = os.path.join(workdir, f'{model_name}_{sub_set_name}{gpt_model.replace("-mmvet", "")}-cap-score-{num_run}runs_{uid}.csv')
cap_int_score_file = os.path.join(workdir, f'{model_name}_{sub_set_name}{gpt_model.replace("-mmvet", "")}-cap-int-score-{num_run}runs_{uid}.csv')
zip_file = os.path.join(workdir, f"results_{uid}.zip")
with open(model_results_file) as f:
results = json.load(f)
if os.path.exists(grade_file):
with open(grade_file, 'r') as f:
grade_results = json.load(f)
else:
grade_results = {}
def need_more_runs():
need_more_runs = False
if len(grade_results) > 0:
for k, v in grade_results.items():
if len(v['score']) < num_run:
need_more_runs = True
break
return need_more_runs or len(grade_results) < len_data
while need_more_runs():
for j in range(num_run):
print(f'eval run {j}')
for id, line in progress.tqdm(data.items(), desc="Grading"):
if sub_set is not None and id not in sub_set:
continue
if id in grade_results and len(grade_results[id]['score']) >= (j + 1):
continue
model_pred = results[id]
question = prompt + '\n' + ' | '.join([line['question'], line['answer'].replace("<AND>", " <AND> ").replace("<OR>", " <OR> "), model_pred, ""])
messages = [
{"role": "user", "content": question},
]
if id not in grade_results:
sample_grade = {'model': [], 'content': [], 'score': []}
else:
sample_grade = grade_results[id]
grade_sample_run_complete = False
temperature = 0.0
num_sleep = 0
while not grade_sample_run_complete:
try:
response = client.chat.completions.create(
model=gpt_model,
# engine=gpt_model, # For Azure OpenAI
max_tokens=3,
temperature=temperature,
messages=messages)
content = response.choices[0].message.content
flag = True
try_time = 1
while flag:
try:
content = content.split(' ')[0].strip()
score = float(content)
if score > 1.0 or score < 0.0:
assert False
flag = False
except:
question = prompt + '\n' + ' | '.join([line['question'], line['answer'].replace("<AND>", " <AND> ").replace("<OR>", " <OR> "), model_pred, ""]) + "\nPredict the correctness of the answer (digit): "
messages = [
{"role": "user", "content": question},
]
response = client.chat.completions.create(
model=gpt_model,
# engine=gpt_model, # For Azure OpenAI
max_tokens=3,
temperature=temperature,
messages=messages)
content = response.choices[0].message.content
try_time += 1
temperature += 0.5
print(f"{id} try {try_time} times")
print(content)
if try_time > 5:
score = 0.0
flag = False
grade_sample_run_complete = True
except Exception as e:
print(e)
# gpt4 may have token rate limit
num_sleep += 1
if num_sleep > 12:
score = 0.0
grade_sample_run_complete = True
num_sleep = 0
continue
print("sleep 5s")
time.sleep(5)
resp_model = (getattr(response, "model", None) or gpt_model)
content_str = str(content)
if len(sample_grade['model']) >= j + 1:
sample_grade['model'][j] = resp_model
sample_grade['content'][j] = content_str
sample_grade['score'][j] = score
else:
sample_grade['model'].append(resp_model)
sample_grade['content'].append(content_str)
sample_grade['score'].append(score)
grade_results[id] = sample_grade
with open(grade_file, 'w') as f:
json.dump(grade_results, f, indent=4)
assert not need_more_runs()
cap_socres = {k: [0.0]*num_run for k in columns[:-2]}
counter['total'] = len_data
cap_socres2 = {k: [0.0]*num_run for k in columns2[:-2]}
counter2 = {columns2[i]:cap_set_counter[i] for i in range(len(cap_set_counter))}
counter2['total'] = len_data
for k, v in grade_results.items():
if sub_set is not None and k not in sub_set:
continue
for i in range(num_run):
score = v['score'][i]
caps = set(data[k]['capability'])
for c in caps:
cap_socres[c][i] += score
cap_socres['total'][i] += score
index = cap_set_list.index(caps)
cap_socres2[cap_set_names[index]][i] += score
cap_socres2['total'][i] += score
for k, v in cap_socres.items():
cap_socres[k] = np.array(v) / counter[k] *100
std = round(cap_socres['total'].std(), decimal_places)
total_copy = cap_socres['total'].copy()
runs = str(list(np.round(total_copy, decimal_places)))
for k, v in cap_socres.items():
cap_socres[k] = round(v.mean(), decimal_places)
cap_socres['std'] = std
cap_socres['runs'] = runs
df.loc[gpt_model.replace("-mmvet", "")] = cap_socres
for k, v in cap_socres2.items():
cap_socres2[k] = round(np.mean(np.array(v) / counter2[k] *100), decimal_places)
cap_socres2['std'] = std
cap_socres2['runs'] = runs
df2.loc[gpt_model.replace("-mmvet", "")] = cap_socres2
df.to_csv(cap_score_file)
df2.to_csv(cap_int_score_file)
files = [cap_score_file, cap_int_score_file, grade_file]
with ZipFile(zip_file, "w") as zipObj:
for fpath in files:
arcname = os.path.basename(fpath)
zipObj.write(fpath, arcname)
for fpath in files:
os.remove(fpath)
schedule_cleanup([zip_file, workdir], delay=3600)
return zip_file
# demo = gr.Interface(
# fn=grade,
# inputs=gr.File(file_types=[".json"]),
# outputs="file")
# --- Validate key and model before running grading ---
def validate_key_and_model(key: str, model: str, api_base: str = None):
try:
client = OpenAI(
base_url=api_base.strip() if api_base and api_base.strip() else "https://api.openai.com/v1",
api_key=key.strip()
)
client.models.retrieve(model)
return True, "OK"
except Exception as e:
return False, str(e)
# --- Wrapper for the grading function ---
def run_grade(file_obj, key, model, api_base, progress=gr.Progress(track_tqdm=True)):
if model == "gpt-4.1":
model = "gpt-4.1-mmvet" # in our Azure OpenAI base, the model name is gpt-4.1-mmvet
if "mmvet" not in model:
ok, msg = validate_key_and_model(key, model, api_base)
if not ok:
raise gr.Error(msg)
return grade(file_obj, key, model, api_base, progress=progress)
markdown = """
<p align="center">
<img src="https://github-production-user-asset-6210df.s3.amazonaws.com/49296856/258254299-29c00dae-8201-4128-b341-dad4663b544a.jpg" width="400"> <br>
</p>
# [MM-Vet: Evaluating Large Multimodal Models for Integrated Capabilities](https://arxiv.org/abs/2308.02490)
This demo uses LLM-based (GPT-4) evaluator to grade open-ended outputs from your models.
Plese upload your json file of your model results containing `{v1_0: ..., v1_1: ..., }`like [this json file](https://raw.githubusercontent.com/yuweihao/MM-Vet/main/results/llava_llama2_13b_chat.json).
The grading may last 5 minutes. Sine we only support 1 queue, the grading time may be longer when you need to wait for other users' grading to finish.
The grading results will be downloaded as a zip file.
"""
with gr.Blocks() as demo:
gr.Markdown(markdown)
# Model selection
model = gr.Dropdown(
choices=["gpt-4.1", "gpt-4-0613", "gpt-4-turbo"],
value="gpt-4.1",
label="Select model (gpt-4.1 is free with our api key)"
)
# User OpenAI fields (only for non-Azure models)
with gr.Row():
user_key = gr.Textbox(
label="Your OpenAI API Key (required for gpt-4-0613 (default in the paper) / gpt-4-turbo). The evaluation may cost several dollars, please notice your OpenAI API Key balance. 1M input tokens: gpt-4-turbo $10.00, gpt-4-0613 $30.00",
type="password",
visible=False
)
user_api_base = gr.Textbox(
label="Your OpenAI Base URL (optional, leave empty for official)",
value="",
visible=False
)
# File I/O
with gr.Row():
inp = gr.File(file_types=[".json"], label="Upload your model result JSON")
out = gr.File(file_types=[".zip"], label="Download grading results")
btn = gr.Button("Start grading", variant="primary")
# Toggle fields based on selection
def _toggle_fields(selected):
if selected == "gpt-4.1":
return gr.update(visible=False), gr.update(visible=False)
else:
return gr.update(visible=True), gr.update(visible=True)
model.change(_toggle_fields, inputs=[model], outputs=[user_key, user_api_base])
# Click handler
btn.click(
fn=run_grade,
inputs=[inp, user_key, model, user_api_base],
outputs=out
)
if __name__ == "__main__":
demo.queue(max_size=8).launch() |