Jietson commited on
Commit
f6058bd
·
verified ·
1 Parent(s): ce8127b

Update README.md

Browse files
Files changed (1) hide show
  1. README.md +25 -19
README.md CHANGED
@@ -65,8 +65,9 @@ Each question entry is arranged as:
65
  ```
66
  {
67
  "question_id": id of the question,
68
- "qtype": type of the question, for example: "rank" questions
69
- "figure_path": local path of the image if you download the image,
 
70
  "question": question,
71
  "answer": answer,
72
  "instructions": instructions,
@@ -87,49 +88,54 @@ text_iunput: item["question"] + item["instructions"] (if any)
87
 
88
  where ``item`` is an entry of the dataset.
89
 
 
90
  # Evaluate
91
 
92
  You should store and evaluate model's response as:
93
 
94
  ```python
95
  # Example code for evaluate
96
- def build_question(query):#to build the question
97
  question = ""
98
  if "prompt" in query:
99
- question = question + f"{query["prompt"]}\n"
100
- question = question + f"{query["question"]}\n"
101
- if "options" in query:
102
- for _ in query["options"]:
103
- question = question + f"{_} {query['options'][_]}\n"
104
- if "instructions" in query:
105
- question = question + query["instructions"]
106
  return question
107
 
108
- with open("visual_basic.json","r",encode="utf-8") as f:
109
- queries = json.load(f)
110
 
111
- for query in tqdm(queries):
 
 
 
 
112
  query_idx = query["question_id"]
113
  question_text = build_question(query)
114
- chart_figure = query["url"] # This should be a list of url
 
115
  """
116
  Note that for models that do not support url input, you may need to download images first.
117
  For example, for model like Qwen2.5-VL, you may need to down load the image first and pass the local image path to the model,
118
- like: figure_path = YOUR_LOCAL_IMAGE_PATH OF query['figure_path']
119
  """
120
- # Replace with your model
121
- response = model.generate(question_text, chart_figure)
122
 
 
 
123
 
124
  Responses[query_idx] = {
125
- "qtype": int(query["qtype"]),
126
  "answer": query["answer"],
127
  "question_id": query_idx,
128
  "response": response,
129
  }
130
 
131
  with open("./model_response.json", "w", encoding="utf-8") as f:
132
- json.dump(Responses, f, indent = 2, ensure_ascii=False)
 
133
  ```
134
 
135
 
 
65
  ```
66
  {
67
  "question_id": id of the question,
68
+ "question_type_name": question type name, for example: "extreme" questions
69
+ "question_type_id": question type id, for example: 72 means "extreme" questions
70
+ "figure_id": id of the figure,
71
  "question": question,
72
  "answer": answer,
73
  "instructions": instructions,
 
88
 
89
  where ``item`` is an entry of the dataset.
90
 
91
+
92
  # Evaluate
93
 
94
  You should store and evaluate model's response as:
95
 
96
  ```python
97
  # Example code for evaluate
98
+ def build_question(query):
99
  question = ""
100
  if "prompt" in query:
101
+ question += f"{query['prompt']}\n"
102
+ question += f"{query['question']}\n"
103
+ if "options" in query and len(query["options"]) > 0:
104
+ for option in query["options"]:
105
+ question += f"{option}\n"
106
+ if "instructions" in query:
107
+ question += query["instructions"]
108
  return question
109
 
 
 
110
 
111
+ #### Run your model and save your answer
112
+
113
+ Responses = {}
114
+
115
+ for query in tqdm(ds):
116
  query_idx = query["question_id"]
117
  question_text = build_question(query)
118
+ figure_path = query["url"] # This should be a list of url for models that support url input
119
+
120
  """
121
  Note that for models that do not support url input, you may need to download images first.
122
  For example, for model like Qwen2.5-VL, you may need to down load the image first and pass the local image path to the model,
123
+ like: figure_path = YOUR_LOCAL_IMAGE_PATH OF query['figure_id']
124
  """
 
 
125
 
126
+ # Replace with your model
127
+ response = model.generate(question_text, figure_path)
128
 
129
  Responses[query_idx] = {
130
+ "qtype": int(query["question_type_id"]),
131
  "answer": query["answer"],
132
  "question_id": query_idx,
133
  "response": response,
134
  }
135
 
136
  with open("./model_response.json", "w", encoding="utf-8") as f:
137
+ json.dump(Responses, f, indent=2, ensure_ascii=False)
138
+
139
  ```
140
 
141