File size: 6,549 Bytes
b5beb60 |
1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 |
import argparse
import json
import os
import logging
from tqdm import tqdm
from models.qwen import Qwen_vllm_Model
from datasets import load_dataset, concatenate_datasets
from data_utils import load_yaml, verify_response, build_query
def do_generate(dataset_name, model_path, output_path, subject=['Math', 'Physics', 'Chemistry', 'Coding'], split='test', config_path='/user/konglingyu/VLMEvalKit/EMMA/configs/gpt.yaml', strategy='TrainCoT', save_every=20, rerun=False, greedy=0, max_tokens=4096, ngpu=1, logger=logging.getLogger(__name__), seed=42):
# Load Dataset
logger.info(f"Loading dataset {dataset_name}, subject: {subject}")
sub_dataset_list = []
for subj in subject:
sub_dataset = load_dataset(dataset_name, subj, split=split)
sub_dataset_list.append(sub_dataset)
dataset = concatenate_datasets(sub_dataset_list)
# Load Config
logger.info(f"Loading config")
config = load_yaml(config_path)
# Load Model
# If we were given a custom path, load that model, otherwise use a remote service model
logger.info(f"Loading local model {model_path}")
device = 0
world_size = 1
try:
device = int(os.environ["LOCAL_RANK"])
world_size = int(os.environ["WORLD_SIZE"])
dist_keys = [
"RANK",
"LOCAL_RANK",
"WORLD_SIZE",
"LOCAL_WORLD_SIZE",
"GROUP_RANK",
"ROLE_RANK",
"ROLE_NAME",
"OMP_NUM_THREADS",
"MASTER_ADDR",
"MASTER_PORT",
"TORCHELASTIC_USE_AGENT_STORE",
"TORCHELASTIC_MAX_RESTARTS",
"TORCHELASTIC_RUN_ID",
"TORCH_NCCL_ASYNC_ERROR_HANDLING",
"TORCHELASTIC_ERROR_FILE",
]
for dist_key in dist_keys:
del os.environ[dist_key]
except:
pass
if world_size > 1:
assert ngpu==1
model = Qwen_vllm_Model(model_path, greedy=greedy, max_tokens=max_tokens, parallel=ngpu, seed=seed, device=device)
logger.info(f"Model loaded!")
if world_size > 1:
logger.info(f"Using distributed mode with {world_size} GPUs, device {device}")
output_path = output_path.replace('.json', f'_{device}.json')
else:
logger.info(f"Using single GPU mode")
logger.info(f"Output path: {output_path}")
if os.path.exists(output_path):
logger.info("Results already exists.")
logger.info(f"Reading {output_path}")
with open(output_path, 'r') as f:
results = json.load(f)
else:
results = {}
skip_pids = []
if not rerun and results:
for pid, data in results.items():
if 'response' in data and verify_response(data['response']):
skip_pids.append(pid)
if len(skip_pids) > 0:
logger.info(
f"Found existing results file with {len(skip_pids)} problems with valid responses. Skipping these problems...")
logger.info(f"Starting to generate.....")
for idx, sample in enumerate(tqdm(dataset)):
pid = sample['pid']
if skip_pids and pid in skip_pids:
continue
if idx % world_size != device:
continue
sample = build_query(sample, config, strategy)
problem: dict = sample.copy()
for i in range(1, 6):
problem.pop('image_' + str(i))
try:
response = model.get_response(sample)
results[pid] = problem
results[pid]['response'] = response
except Exception as e:
logger.error(f"Error in generating answer for {pid}")
logger.error(e)
results[pid] = problem
results[pid]['error'] = str(e)
if idx == 2 or (idx % save_every == 0 and idx > 0) or idx == len(dataset) - 1:
try:
with open(output_path, 'w') as f:
f.write(json.dumps(results, indent=2))
logger.info(f"Save results to {output_path}")
except Exception as e:
logger.info(f"Error in saving {output_path}")
logger.info(e)
with open(output_path, 'w') as f:
f.write(json.dumps(results, indent=2))
logger.info(f"Save results to {output_path}")
logger.info("End Generation......")
def main():
parser = argparse.ArgumentParser()
parser.add_argument('--dataset_name', type=str, default='/root/LMUData/EMMA-mini')
parser.add_argument('--subject', nargs='+', type=str, default=['Math', 'Physics', 'Chemistry', 'Coding'])
parser.add_argument('--split', type=str, default='test')
parser.add_argument('--strategy', type=str, default='CoT', choices=['CoT', 'Direct', 'TrainCoT'])
parser.add_argument('--config_path', type=str, default="configs/gpt.yaml")
parser.add_argument('--output_path', type=str, default='results/test-full.json')
parser.add_argument('--save_every', type=int, default=20, help='save every n problems')
parser.add_argument('--rerun', action='store_true', help='rerun the answer generation')
# Local model
parser.add_argument('--model_path', type=str, default='/user/konglingyu/ckpts/Qwen2-VL-7B', help="local model path or huggingface model name")
parser.add_argument('--max_tokens', type=int, default=4096)
parser.add_argument('--greedy', type=int, default=0)
parser.add_argument('--ngpu', type=int, default=1)
args = parser.parse_args()
do_generate(
dataset_name=args.dataset_name,
model_path=args.model_path,
output_path=args.output_path,
subject=args.subject,
split=args.split,
config_path=args.config_path,
strategy=args.strategy,
save_every=args.save_every,
rerun=args.rerun,
greedy=args.greedy,
max_tokens=args.max_tokens,
ngpu=args.ngpu
)
if __name__ == "__main__":
logging.basicConfig(
level=os.environ.get("LOGLEVEL", "INFO").upper(),
format="[%(name)s] %(message)s",
datefmt="[%X]"
)
logger_blocklist = [
"asyncio",
"azure",
"azureml",
"datasets",
"httpx",
"httpcore",
"filelock",
"fsspec",
"msal",
"msrest",
"openai",
"PIL",
"urllib3",
]
for module in logger_blocklist:
logging.getLogger(module).setLevel(logging.WARNING)
if not os.path.exists("/root/LMUData"):
os.symlink("/user/konglingyu/LMUData", "/root/LMUData")
main()
|