code
stringlengths
17
6.64M
def set_next_API_ID(): global API_ID lock.acquire() API_ID = ((API_ID + 1) % len(API_name_key_list)) openai.api_base = 'https://{0}.openai.azure.com/'.format(API_name_key_list[API_ID][0]) openai.api_key = API_name_key_list[API_ID][1] lock.release()
def multi_threading_running(func, queries, n=20, multiple_API=True): def wrapped_function(query, max_try=20): if multiple_API: set_next_API_ID() try: result = func(query) return result except (openai.error.RateLimitError, openai.error.APIError) as e: if (not isinstance(e, openai.error.RateLimitError)): if isinstance(e, openai.error.APIError): print('API Error') else: print('found a error:', e) if (max_try > 0): return wrapped_function(query, (max_try - 1)) pool = ThreadPool(n) replies = pool.map(wrapped_function, queries) return replies
def query_azure_openai_chat(query, engine='gpt-35-turbo'): global default_engine, cache query_string = json.dumps(query) if (query_string in cache): return cache[query_string] if (default_engine is not None): engine = default_engine if (engine == 'chatgpt'): engine = 'gpt-35-turbo' try: messages = [{'role': 'system', 'content': 'You are a helpful AI assistant.'}] if isinstance(query, str): messages.append({'role': 'user', 'content': query}) elif isinstance(query, list): messages += query else: raise ValueError('Unsupported query: {0}'.format(query)) response = openai.ChatCompletion.create(engine=engine, messages=messages, temperature=0, stop=['<|im_end|>']) except TypeError as e: print('type error:', e) return {'choices': [{'message': {'content': ''}}]} try: if (response['choices'][0]['message']['content'] != ''): cache[query_string] = response except Exception as e: pass return response
def query_azure_openai_complete(query, engine='gpt-35-turbo'): if (engine == 'chatgpt'): engine = 'gpt-35-turbo' try: response = openai.Completion.create(engine=engine, prompt=query, max_tokens=2000, temperature=0, stop=['<END>']) except TypeError as e: print(e) return {'choices': [{'text': ''}]} return response
def test_speed_1(): import json path = 'khan/topic_19.jsonal' questions = [] timer = Timer() with open(path) as reader: for (i, line) in enumerate(reader): js = json.loads(line.strip()) question = js['Question'] questions.append(question) questions = questions[:100] reply = multi_threading_running(query_azure_openai_complete, questions, n=50, multiple_API=True) print('Average time after {0} samples: {1}'.format(len(questions), (timer.get_time(restart=False) / len(questions))))
def test_speed_2(): import json path = 'D:\\Datasets\\AGIEval\\outputs\\model_output\\english_choice\\sat_math\\turbo_few\\test_sat_math_gpt-35-turbo_cot_False_few.jsonl' with open(path, encoding='utf8') as reader: questions = [] for line in reader: js = json.loads(line.strip()) questions.append(js['Question']) timer = Timer() results = multi_threading_running(query_azure_openai_complete, questions, n=50, multiple_API=True) print('Average time after {0} samples: {1}'.format(len(questions), (timer.get_time(restart=False) / len(questions)))) with open('output.txt', 'w') as writer: for result in results: writer.write((json.dumps(result) + '\n'))
def query_openai(context_list, setting_name, n_multiply=3): n = (len(openai_api.API_name_key_list) * n_multiply) try: print('multi-thread n =', n) if (setting_name == 'complete'): results = openai_api.multi_threading_running(openai_api.query_azure_openai_complete, context_list, n=n) else: results = openai_api.multi_threading_running(openai_api.query_azure_openai_chat, context_list, n=n) except openai.error.APIConnectionError as e: print('found error:', e) return query_openai(context_list, setting_name, max(max((n_multiply - 5), (n_multiply // 2)), 1)) return results
def query_openai_with_retry(context_list, setting_name, retry_time=4, results=None): if (results is None): results = query_openai(context_list, setting_name) while (retry_time > 0): filtered_context_list = [] for i in range(len(results)): if (utils.extract_answer(results[i]) == ''): filtered_context_list.append(context_list[i]) if (len(filtered_context_list) == 0): break filtered_results = query_openai(filtered_context_list, setting_name) p = 0 for i in range(len(results)): if (utils.extract_answer(results[i]) == ''): results[i] = filtered_results[p] p += 1 assert (p == len(filtered_results)) retry_succeeded = 0 for item in filtered_results: if (utils.extract_answer(item) != ''): retry_succeeded += 1 print('In the retry, {0} samples succeeded, {1} samples failed'.format(retry_succeeded, (len(filtered_results) - retry_succeeded))) if (retry_succeeded <= 3): retry_time -= 1 assert (len(results) == len(context_list)) return results
def run_multiple_dataset_batch(work_items): if (len(work_items) == 0): return print('work items:', work_items) dataset_list = [] item_list = [] for (input_path, output_path, mode, _) in work_items: assert (mode == work_items[0][2]) js_list = utils.read_jsonl(input_path) content_list = [item['context'] for item in js_list] dataset_list.append(len(content_list)) item_list += content_list results = query_openai_with_retry(context_list=item_list, setting_name=work_items[0][2]) s = 0 for i in range(len(dataset_list)): utils.save_jsonl(results[s:(s + dataset_list[i])], work_items[i][1]) s += dataset_list[i] assert (s == len(results))
def run_multiple_dataset(work_items): batch = [] count = 0 batch_size = 1000 if (openai_api.default_engine == 'gpt-4'): batch_size = 500 for item in work_items: if os.path.exists(item[1]): if (len(utils.read_jsonl(item[1])) == item[3]): continue if ((count + item[3]) > batch_size): run_multiple_dataset_batch(batch) batch = [] count = 0 batch.append(item) count += item[3] if (len(batch) > 0): run_multiple_dataset_batch(batch)
class TaskSchema(object): def __init__(self, passage=None, question=None, options=None, label=None, answer=None, other=None): self.passage = passage self.question = question self.options = options self.label = label self.answer = answer self.other = other def to_dict(self): return {'passage': self.passage, 'question': self.question, 'options': self.options, 'label': self.label, 'answer': self.answer, 'other': self.other}
class AgiInstance(object): def __init__(self, task_description, data_source, task_schema, output, evaluation_metric, task_example): self.task_description = task_description self.data_source = data_source self.task_schema = task_schema self.output = output self.evaluation_metric = evaluation_metric self.task_example = task_example def to_dict(self): return {'task description': self.task_description, 'data source': self.data_source, 'task schema': self.task_schema.to_dict(), 'output': self.output, 'evaluation metric': self.evaluation_metric, 'task example': self.task_example}
class ChatGPTSchema(object): def __init__(self, context=None, metadata=''): self.context = context self.metadata = metadata def to_dict(self): return {'context': self.context, 'metadata': self.metadata}
class ResultsForHumanSchema(object): def __init__(self, index, problem_input, label, model_input='', model_output='', parse_result='', first_stage_output='', second_stage_input='', is_correct=False): self.index = index self.problem_input = problem_input self.model_input = model_input self.model_output = model_output self.parse_result = parse_result self.label = label self.first_stage_output = first_stage_output self.second_stage_input = second_stage_input self.is_correct = is_correct def to_dict(self): return {'index': self.index, 'problem_input': self.problem_input, 'model_input': self.model_input, 'model_output': self.model_output, 'parse_result': self.parse_result, 'label': self.label, 'is_correct': self.is_correct, 'first_stage_output': self.first_stage_output, 'second_stage_input': self.second_stage_input} @staticmethod def to_tsv(result_list, path): result_json = [item.to_dict() for item in result_list] table = pd.json_normalize(result_json) table.to_excel(path, index=False)
def convert_zero_shot(line, dataset_name): try: passage = (line['passage'] if (line['passage'] is not None) else '') if (dataset_name in english_qa_datasets): option_string = 'ABCDEFG' count = len(line['options']) if (count == 1): count = 5 return (((((((passage + 'Q: ') + line['question']) + ' ') + 'Answer Choices: ') + ' '.join(line['options'])) + '\n') + 'A: Among A through {}, the answer is'.format(option_string[(count - 1)])) elif (dataset_name in chinese_qa_datasets): option_string = 'ABCDEFG' count = len(line['options']) if (count == 1): count = 4 return (((((((passage + '问题:') + line['question']) + ' ') + '选项:') + ' '.join(line['options'])) + '\n') + '答案:从A到{}, 我们应选择'.format(option_string[(count - 1)])) elif (dataset_name in english_cloze_datasets): return (((passage + 'Q: ') + line['question']) + '\nA: The answer is') elif (dataset_name in chinese_cloze_datasets): return (((passage + '问题:') + line['question']) + '\n答案:') except NameError: print('Dataset not defined.')
def convert_zero_shot_CoT_stage1(line, dataset_name): try: passage = (line['passage'] if (line['passage'] is not None) else '') if (dataset_name in english_qa_datasets): return (((((((passage + 'Q: ') + line['question']) + ' ') + 'Answer Choices: ') + ' '.join(line['options'])) + '\n') + "Let's think step by step.") elif (dataset_name in chinese_qa_datasets): option_string = 'ABCDEFG' count = len(line['options']) if (count == 1): count = 4 return (((((((passage + '问题:') + line['question']) + ' ') + '选项:') + ' '.join(line['options'])) + '\n') + '从A到{}, 我们应选择什么?让我们逐步思考:'.format(option_string[(count - 1)])) elif (dataset_name in english_cloze_datasets): return (((passage + 'Q: ') + line['question']) + "\nA: Let's think step by step.") elif (dataset_name in chinese_cloze_datasets): return (((passage + '问题:') + line['question']) + '\n答案:让我们逐步思考:') except NameError: print('Dataset not defined.')
def combine_prompt(prompt_path, dataset_name, load_explanation=True, chat_mode=False): skip_passage = False if (dataset_name == 'sat-en-without-passage'): skip_passage = True dataset_name = 'sat-en' demostrations = [] context_row = [0, 1, 3, 5, 7, 9] explanation_row = [0, 2, 4, 6, 8, 10] raw_prompts_context = pd.read_csv(prompt_path, header=0, skiprows=(lambda x: (x not in context_row)), keep_default_na=False) raw_prompts_explanation = pd.read_csv(prompt_path, header=0, skiprows=(lambda x: (x not in explanation_row)), keep_default_na=False).replace('\\n\\n', '\n', regex=True) contexts = [] for line in list(raw_prompts_context[dataset_name]): if line: contexts.append(ast.literal_eval(line)) explanations = [exp for exp in raw_prompts_explanation[dataset_name] if exp] for (idx, (con, exp)) in enumerate(zip(contexts, explanations)): passage = (con['passage'] if ((con['passage'] is not None) and (not skip_passage)) else '') question = con['question'] options = (con['options'] if (con['options'] is not None) else '') label = (con['label'] if (con['label'] is not None) else '') answer = (con['answer'] if (('answer' in con) and (con['answer'] is not None)) else '') if (dataset_name in english_qa_datasets): question_input = ((((((('Problem {}. '.format((idx + 1)) + passage) + ' ') + question) + '\n') + 'Choose from the following options: ') + ' '.join(options)) + '\n') question_output = (((('Explanation for Problem {}: '.format((idx + 1)) + exp) + '\n') if load_explanation else '') + 'The answer is therefore {}'.format(label)) elif (dataset_name in chinese_qa_datasets): question_input = ((((((('问题 {}. '.format((idx + 1)) + passage) + ' ') + question) + '\n') + '从以下选项中选择: ') + ' '.join(options)) + '\n') question_output = (((('问题 {}的解析: '.format((idx + 1)) + exp) + '\n') if load_explanation else '') + '答案是 {}'.format(label)) elif (dataset_name in english_cloze_datasets): question_input = (('Problem {}. '.format((idx + 1)) + question) + '\n') question_output = (((('Explanation for Problem {}: '.format((idx + 1)) + exp) + '\n') if load_explanation else '') + 'The answer is therefore {}'.format(answer)) elif (dataset_name in chinese_cloze_datasets): question_input = (('问题 {}. '.format((idx + 1)) + question) + '\n') question_output = (((('问题 {}的解析: '.format((idx + 1)) + exp) + '\n') if load_explanation else '') + '答案是 {}'.format(answer)) else: raise ValueError(f'During loading few-sot examples, found unknown dataset: {dataset_name}') if chat_mode: demostrations.append((question_input, question_output)) else: demostrations.append(((question_input + question_output) + '\n')) return demostrations
def concat_prompt(demos, dataset_name, max_tokens, end_of_example='\n', verbose=False): demostration_en = 'Here are the answers for the problems in the exam.\n' demostration_zh = '以下是考试中各个问题的答案。\n' for i in range(len(demos)): if (dataset_name in english_qa_datasets): demostration_en = ((demostration_en + demos[i]) + end_of_example) elif (dataset_name in chinese_qa_datasets): demostration_zh = ((demostration_zh + demos[i]) + end_of_example) elif (dataset_name in english_cloze_datasets): demostration_en = ((demostration_en + demos[i]) + end_of_example) elif (dataset_name in chinese_cloze_datasets): demostration_zh = ((demostration_zh + demos[i]) + end_of_example) if ((len(enc.encode(demostration_en)) < max_tokens) and (len(enc.encode(demostration_zh)) < max_tokens)): output = (demostration_en if (len(demostration_en) > len(demostration_zh)) else demostration_zh) prompt_num = (i + 1) else: break if verbose: print('max_tokens set as ', max_tokens, 'actual_tokens is', len(enc.encode(output)), 'num_shot is', prompt_num) return (output, prompt_num)
def concat_prompt_chat_mode(demos, dataset_name, max_tokens, end_of_example='\n', verbose=False): answers = [] sentences = '' for i in range(len(demos)): answers += [{'role': 'user', 'content': demos[i][0]}, {'role': 'assistant', 'content': demos[i][1]}] sentences += json.dumps(answers[(- 1)]) if (len(enc.encode(sentences)) > max_tokens): answers.pop() answers.pop() break if verbose: print('max_tokens set as ', max_tokens, 'actual_tokens is', len(enc.encode(sentences)), 'num_shot is', (len(answers) // 2)) return (answers, (len(answers) // 2))
def convert_few_shot(line, dataset_name, demo, n_shot, chat_mode=False): passage = (line['passage'] if (line['passage'] is not None) else '') question = line['question'] options = (line['options'] if (line['options'] is not None) else '') if (dataset_name in english_qa_datasets): question_input = ((((((('Problem {}. '.format((n_shot + 1)) + passage) + ' ') + question) + '\n') + 'Choose from the following options: ') + ' '.join(options)) + '\n') if (dataset_name in chinese_qa_datasets): question_input = ((((((('问题 {}. '.format((n_shot + 1)) + passage) + ' ') + question) + '\n') + '从以下选项中选择: ') + ' '.join(options)) + '\n') if (dataset_name in english_cloze_datasets): question_input = (('Problem {}. '.format((n_shot + 1)) + question) + '\n') if (dataset_name in chinese_cloze_datasets): question_input = (('问题 {}. '.format((n_shot + 1)) + question) + '\n') if chat_mode: return (demo + [{'role': 'user', 'content': question_input}]) else: return (demo + question_input)
def load_dataset(dataset_name, setting_name, parent_path, prompt_path=None, max_tokens=None, end_of_example='\n', chat_mode=False, verbose=False): test_path = os.path.join(parent_path, (dataset_name + '.jsonl')) loaded_jsonl = read_jsonl(test_path) processed = [] if ((setting_name == 'few-shot-CoT') or (setting_name == 'few-shot')): processed_demos = combine_prompt(prompt_path, dataset_name, load_explanation=(setting_name == 'few-shot-CoT'), chat_mode=chat_mode) if chat_mode: (chosen_prompt, n_shot) = concat_prompt_chat_mode(processed_demos, dataset_name, max_tokens, end_of_example, verbose=verbose) else: (chosen_prompt, n_shot) = concat_prompt(processed_demos, dataset_name, max_tokens, end_of_example, verbose=verbose) if verbose: loaded_jsonl = tqdm(loaded_jsonl) for (meta_idx, line) in enumerate(loaded_jsonl): if (setting_name == 'zero-shot'): ctxt = convert_zero_shot(line, dataset_name) elif (setting_name == 'zero-shot-CoT'): ctxt = convert_zero_shot_CoT_stage1(line, dataset_name) elif ((setting_name == 'few-shot-CoT') or (setting_name == 'few-shot')): ctxt = convert_few_shot(line, dataset_name, chosen_prompt, n_shot, chat_mode) try: new_instance = ChatGPTSchema(context=ctxt, metadata=meta_idx) processed.append(new_instance.to_dict()) except NameError: print('Dataset not defined.') return processed
def generate_second_stage_input(dataset_name, input_list, output_list, with_format_prompt=False): try: english_format_prompt = 'Based on the previous results, your task is to extract the final answer and provide the output enclosed in brackets【】, such as 【0】 or 【A】.' chinese_format_prompt = '根据以上内容,你的任务是把最终的答案提取出来并填在【】中,例如【0】或者【A】。' if (dataset_name in english_qa_datasets): prompt_suffix = 'Therefore, among A through E, the answer is' if with_format_prompt: prompt_suffix = (english_format_prompt + prompt_suffix) elif (dataset_name in chinese_qa_datasets): prompt_suffix = '因此,从A到D, 我们应选择' if with_format_prompt: prompt_suffix = (chinese_format_prompt + prompt_suffix) elif (dataset_name in english_cloze_datasets): prompt_suffix = 'Therefore, the answer is' if with_format_prompt: prompt_suffix = (english_format_prompt + prompt_suffix) elif (dataset_name in chinese_cloze_datasets): prompt_suffix = '因此,答案是' if with_format_prompt: prompt_suffix = (chinese_format_prompt + prompt_suffix) except NameError: print('Dataset not defined.') processed = [] for i in range(len(input_list)): ctxt = '{0}\n{1}\n{2}'.format(input_list[i]['context'], extract_answer(output_list[i]), prompt_suffix) new_instance = ChatGPTSchema(context=ctxt, metadata=input_list[i]['metadata']) processed.append(new_instance.to_dict()) return processed
def load_dataset_as_result_schema(dataset_name, parent_path): test_path = os.path.join(parent_path, (dataset_name + '.jsonl')) loaded_jsonl = read_jsonl(test_path) processed = [] for (i, line) in enumerate(loaded_jsonl): problem_input = convert_zero_shot(line, dataset_name) processed.append(ResultsForHumanSchema(index=i, problem_input=problem_input, label=(line['label'] if line['label'] else line['answer']))) return processed
def convert_to_set(item): if isinstance(item, list): return set(item) if isinstance(item, str): return {item} if (item is None): return {} raise ValueError("Input can't parse:", item)
def evaluate_single_sample(dataset_name, prediction, label): if (dataset_name in dataset_loader.multi_choice_datasets): p = convert_to_set(prediction) l = convert_to_set(label) return (p == l) elif (dataset_name in dataset_loader.math_output_datasets): return is_equiv(prediction, label) else: return (prediction == label)
def _fix_fracs(string): substrs = string.split('\\frac') new_str = substrs[0] if (len(substrs) > 1): substrs = substrs[1:] for substr in substrs: new_str += '\\frac' if (substr[0] == '{'): new_str += substr else: try: assert (len(substr) >= 2) except: return string a = substr[0] b = substr[1] if (b != '{'): if (len(substr) > 2): post_substr = substr[2:] new_str += ((((('{' + a) + '}{') + b) + '}') + post_substr) else: new_str += (((('{' + a) + '}{') + b) + '}') elif (len(substr) > 2): post_substr = substr[2:] new_str += (((('{' + a) + '}') + b) + post_substr) else: new_str += ((('{' + a) + '}') + b) string = new_str return string
def _fix_a_slash_b(string): if (len(string.split('/')) != 2): return string a = string.split('/')[0] b = string.split('/')[1] try: a = int(a) b = int(b) assert (string == '{}/{}'.format(a, b)) new_string = (((('\\frac{' + str(a)) + '}{') + str(b)) + '}') return new_string except: return string
def _remove_right_units(string): if ('\\text{ ' in string): splits = string.split('\\text{ ') assert (len(splits) == 2) return splits[0] else: return string
def _fix_sqrt(string): if ('\\sqrt' not in string): return string splits = string.split('\\sqrt') new_string = splits[0] for split in splits[1:]: if (split[0] != '{'): a = split[0] new_substr = ((('\\sqrt{' + a) + '}') + split[1:]) else: new_substr = ('\\sqrt' + split) new_string += new_substr return new_string
def _strip_string(string): string = string.replace('\n', '') string = string.replace('\\!', '') string = string.replace('\\\\', '\\') string = string.replace('tfrac', 'frac') string = string.replace('dfrac', 'frac') string = string.replace('\\left', '') string = string.replace('\\right', '') string = string.replace('^{\\circ}', '') string = string.replace('^\\circ', '') string = string.replace('\\$', '') string = _remove_right_units(string) string = string.replace('\\%', '') string = string.replace('\\%', '') string = string.replace(' .', ' 0.') string = string.replace('{.', '{0.') if (len(string) == 0): return string if (string[0] == '.'): string = ('0' + string) if (len(string.split('=')) == 2): if (len(string.split('=')[0]) <= 2): string = string.split('=')[1] string = _fix_sqrt(string) string = string.replace(' ', '') string = _fix_fracs(string) if (string == '0.5'): string = '\\frac{1}{2}' string = _fix_a_slash_b(string) return string
def is_equiv(str1, str2, verbose=False): if ((str1 is None) and (str2 is None)): print('WARNING: Both None') return True if ((str1 is None) or (str2 is None)): return False try: ss1 = _strip_string(str1) ss2 = _strip_string(str2) if verbose: print(ss1, ss2) return (ss1 == ss2) except: return (str1 == str2)
def extract_last_line(string): lines = string.split('\n') for item in lines[::(- 1)]: if (item.strip() != ''): string = item break return string
def remove_few_shot_prefix(string: str): prefix_list = ['The answer is therefore', '答案是'] for prefix in prefix_list: if string.startswith(prefix): string = string[len(prefix):].strip() elif (prefix in string): index = string.rfind(prefix) if (index >= 0): string = string[(index + len(prefix)):].strip() return string
def try_parse_few_shot_qa_single_answer(string, setting_name, language='en'): if (setting_name == 'few-shot-CoT'): string = extract_last_line(string) if (language == 'en'): pattern = 'answer is .*?([A-G])' match = re.search(pattern, string) elif (language == 'zh'): pattern = '答案是.*?([A-G])' match = re.search(pattern, string) else: raise ValueError('Unknown language {0}'.format(language)) if match: return match.group(1) else: return None
def try_parse_few_shot_pattern(string: str, dataset_name, setting_name): if (setting_name == 'few-shot-CoT'): string = extract_last_line(string) if (dataset_name in dataset_loader.chinese_cloze_datasets): return string.startswith('答案是') elif (dataset_name in dataset_loader.english_cloze_datasets): return string.startswith('The answer is therefore') elif (dataset_name in dataset_loader.chinese_qa_datasets): pattern = '答案是.*?([A-G])' match = re.search(pattern, string) return (match is not None) elif (dataset_name in dataset_loader.english_qa_datasets): pattern = 'answer is .*?([A-G])' match = re.search(pattern, string) return (match is not None) return False
def parse_few_shot_qa_single_answer(string, setting_name, language='en'): answer = try_parse_few_shot_qa_single_answer(string, setting_name, language) if (answer is None): return find_first_capital_letter(string) else: return answer
def find_first_capital_letter(answer): letter_set = {'A', 'B', 'C', 'D', 'E', 'F'} for c in answer: if (c in letter_set): return c return ''
def extract_answer_in_bracket(answer, prefix='【', suffix='】'): if ((prefix not in answer) and (suffix not in answer)): return '' s = (answer.index(prefix) + len(prefix)) t = answer.index(suffix) ret = answer[s:t] return ret
def parse_math_answer(setting_name, raw_string): if (setting_name == 'few-shot-CoT'): raw_string = extract_last_line(raw_string) if ((setting_name == 'few-shot-CoT') or (setting_name == 'few-shot')): raw_string = remove_few_shot_prefix(raw_string) return raw_string def remove_boxed(s): left = '\\boxed{' try: assert (s[:len(left)] == left) assert (s[(- 1)] == '}') answer = s[len(left):(- 1)] if ('=' in answer): answer = answer.split('=')[(- 1)].lstrip(' ') return answer except: return None def last_boxed_only_string(string): idx = string.rfind('\\boxed') if (idx < 0): idx = string.rfind('\\fbox') if (idx < 0): return None i = idx right_brace_idx = None num_left_braces_open = 0 while (i < len(string)): if (string[i] == '{'): num_left_braces_open += 1 if (string[i] == '}'): num_left_braces_open -= 1 if (num_left_braces_open == 0): right_brace_idx = i break i += 1 if (right_brace_idx == None): retval = None else: retval = string[idx:(right_brace_idx + 1)] return retval def get_answer_with_dollar_sign(s): first_pattern = '\\$(.*)\\$' last_match = None matches = re.findall(first_pattern, s) if matches: last_match = matches[(- 1)] if ('=' in last_match): last_match = last_match.split('=')[(- 1)].lstrip(' ') return last_match def get_answer_without_dollar_sign(s): last_match = None if ('=' in s): last_match = s.split('=')[(- 1)].lstrip(' ').rstrip('.') if ('\\n' in last_match): last_match = last_match.split('\\n')[0] else: pattern = '(?:\\$)?\\d+(?:\\.\\d+)?(?![\\w\\d])' matches = re.findall(pattern, s) if matches: last_match = matches[(- 1)] return last_match raw_string = remove_few_shot_prefix(raw_string) if ('\\boxed' in raw_string): answer = remove_boxed(last_boxed_only_string(raw_string)) else: answer = get_answer_with_dollar_sign(raw_string) if (not answer): answer = get_answer_without_dollar_sign(raw_string) return answer
def parse_qa_multiple_answer(string, setting_name): if (setting_name == 'few-shot-CoT'): string = extract_last_line(string) pattern = '\\(*([A-F])\\)*' match = re.findall(pattern, string) if match: return match return []
def post_process(dataset_name, setting_name, prediction): if ((dataset_name in dataset_loader.english_cloze_datasets) or (dataset_name in dataset_loader.chinese_cloze_datasets)): return parse_math_answer(setting_name, prediction) if (dataset_name in ['jec-qa-kd', 'jec-qa-ca', 'gaokao-physics']): return parse_qa_multiple_answer(prediction, setting_name) if ('zero-shot' in setting_name): answer = find_first_capital_letter(prediction) return answer language = ('en' if (dataset_name in dataset_loader.english_qa_datasets) else 'zh') if ((dataset_name in dataset_loader.english_qa_datasets) or (dataset_name in dataset_loader.chinese_qa_datasets)): return parse_few_shot_qa_single_answer(prediction, setting_name, language) else: raise ValueError(f'Unsupported dataset name {dataset_name}')
def read_jsonl(path): with open(path, encoding='utf8') as fh: results = [] for line in fh: if (line is None): continue try: results.append((json.loads(line) if (line != 'null') else line)) except Exception as e: print(e) print(path) print(line) raise e return results
def save_jsonl(lines, directory): with open(directory, 'w', encoding='utf8') as f: for line in lines: f.write((json.dumps(line, ensure_ascii=False) + '\n'))
def extract_answer(js): try: if ((js is None) or (js == 'null')): return '' answer = '' if isinstance(js, str): answer = js elif ('text' in js['choices'][0]): answer = js['choices'][0]['text'] else: answer = js['choices'][0]['message']['content'] return answer except Exception as e: return ''
def read_json_dirs(path): fnames_list = [] for (subdir, dirs, files) in os.walk(path): for file in files: fnames_list.append(os.path.join(subdir, file)) return fnames_list
def read_jsonl(path): with open(path) as fh: return [json.loads(line) for line in fh.readlines() if line]
def save_jsonl(lines, directory): with open(directory, 'w') as f: for line in lines: f.write('{}\n'.format(json.dumps(line)))
def lsat_preprosess(args): def _preprocess_file(input_dir, output_dir): def parse_unicode(s): return s.encode('utf-8').decode('utf-8') def format_lsat(raw): cleaned = [] for r in raw[0]: option_string = 'ABCDEFGH' option_list = [] for (idx, option) in enumerate(r['answers']): option_list.append(((('(' + option_string[idx]) + ')') + parse_unicode(option))) new_instance = TaskSchema(passage=parse_unicode(r['context']), question=parse_unicode(r['question']), options=option_list, label=option_string[r['label']]) cleaned.append(new_instance.to_dict()) return cleaned f_list = read_json_dirs(input_dir) for path in f_list: formated_data = format_lsat(read_jsonl(path)) full_dir = (((output_dir + '/') + path.split('/')[(- 1)]) + 'l') save_jsonl(formated_data, full_dir) _preprocess_file(args.data_dir, args.output_dir)
def boolean_string(s): if (s not in {'False', 'True'}): raise ValueError('Not a valid boolean string') return (s == 'True')
def main(_): if FLAGS.gpu: os.environ['CUDA_DEVICE_ORDER'] = 'PCI_BUS_ID' os.environ['CUDA_VISIBLE_DEVICES'] = FLAGS.gpu tf.set_random_seed(1234) np.random.seed(0) model = GridCell(FLAGS) output_dir = os.path.join('output', os.path.splitext(os.path.basename(__file__))[0], datetime.datetime.now().strftime('%Y-%m-%d-%H-%M-%S')) if (len(sys.argv) > 1): output_dir = ''.join(([output_dir] + sys.argv[1:])) if (not os.path.exists(output_dir)): os.makedirs(output_dir) with open(os.path.join(output_dir, 'config.txt'), 'a') as f: print(FLAGS, file=f) def copy_source(file, output_dir): import shutil shutil.copyfile(file, os.path.join(output_dir, os.path.basename(file))) copy_source(__file__, output_dir) with tf.Session() as sess: if (FLAGS.mode == 'train'): train(FLAGS, model, sess, output_dir) elif (FLAGS.mode == 'visualize'): model.build_model() assert (FLAGS.ckpt is not None), 'no checkpoint provided.' saver = tf.train.Saver() sess.run(tf.global_variables_initializer()) saver.restore(sess, FLAGS.ckpt) visualize(model, sess, output_dir) visualize_u(model, sess, output_dir) visualize_B(model, sess, output_dir) elif (FLAGS.mode == 'path_integration'): FLAGS.integral_step = max(30, FLAGS.integral_step) model.path_integral(FLAGS.integral_step, noise_type=FLAGS.noise_type, noise_level=FLAGS.noise_level) assert (FLAGS.ckpt is not None), 'no checkpoint provided.' saver = tf.train.Saver() sess.run(tf.global_variables_initializer()) saver.restore(sess, FLAGS.ckpt) print('Loading checkpoint {}.'.format(FLAGS.ckpt)) test_dir = output_dir if (not tf.gfile.Exists(test_dir)): tf.gfile.MakeDirs(test_dir) data_generator_test = DataGenerator(FLAGS, max=FLAGS.place_size, num_grid=model.num_grid) place_seq_test = data_generator_test.generate(10, max_dx=FLAGS.max_dx, num_step=FLAGS.integral_step, dtype=2, test=True, visualize=True) test_path_integral(model, sess, place_seq_test, visualize=True, test_dir=test_dir) place_seq_test2 = data_generator_test.generate(1000, max_dx=FLAGS.max_dx, num_step=FLAGS.integral_step, dtype=2, test=True, visualize=False) err = test_path_integral(model, sess, place_seq_test2, visualize=False, test_dir=test_dir) np.save(os.path.join(test_dir, 'path_err.npy'), err) err = np.mean(err, axis=(0, 2)) np.set_printoptions(threshold=int(100000000.0)) print(('%s err: %f %f' % (output_dir, err[0], err[1]))) if (FLAGS.log_file is not None): with open(FLAGS.log_file, 'a') as f: print(('%s err: %f %f' % (output_dir, err[0], err[1])), file=f) elif (FLAGS.mode == 'error_correction'): (x, x_hat) = model.test_error_correction(noise_level=FLAGS.noise_level, quantile=99.5) assert (FLAGS.ckpt is not None), 'no checkpoint provided.' saver = tf.train.Saver() sess.run(tf.global_variables_initializer()) saver.restore(sess, FLAGS.ckpt) print('Loading checkpoint {}.'.format(FLAGS.ckpt)) x_value = np.zeros([1000, 2]) x_hat_value = np.zeros([1000, 2]) for i in range(1000): (x_value_i, x_hat_value_i) = sess.run([x, x_hat]) x_value[i] = x_value_i x_hat_value[i] = x_hat_value_i err = (np.sqrt(np.sum(((x_value - x_hat_value) ** 2), axis=1)) / float((model.num_grid - 1))) print(('%s err: mean %f std %f' % (output_dir, np.mean(err), np.std(err)))) if (FLAGS.log_file is not None): with open(FLAGS.log_file, 'a') as f: print(('%s err: mean %f std %f' % (output_dir, np.mean(err), np.std(err))), file=f) else: raise NotImplementedError
def cg(f_Ax, b, cg_iters=10, callback=None, verbose=False, residual_tol=1e-10): '\n Demmel p 312\n ' p = b.copy() r = b.copy() x = np.zeros_like(b) rdotr = r.dot(r) fmtstr = '%10i %10.3g %10.3g' titlestr = '%10s %10s %10s' if verbose: print((titlestr % ('iter', 'residual norm', 'soln norm'))) for i in range(cg_iters): if (callback is not None): callback(x) if verbose: print((fmtstr % (i, rdotr, np.linalg.norm(x)))) z = f_Ax(p) v = (rdotr / p.dot(z)) x += (v * p) r -= (v * z) newrdotr = r.dot(r) mu = (newrdotr / rdotr) p = (r + (mu * p)) rdotr = newrdotr if (rdotr < residual_tol): break if (callback is not None): callback(x) if verbose: print((fmtstr % ((i + 1), rdotr, np.linalg.norm(x)))) return x
def make_atari_env(env_id, num_env, seed, wrapper_kwargs=None, start_index=0): '\n Create a wrapped, monitored SubprocVecEnv for Atari.\n ' if (wrapper_kwargs is None): wrapper_kwargs = {} def make_env(rank): def _thunk(): env = make_atari(env_id) env.seed((seed + rank)) env = Monitor(env, (logger.get_dir() and os.path.join(logger.get_dir(), str(rank)))) return wrap_deepmind(env, **wrapper_kwargs) return _thunk set_global_seeds(seed) return SubprocVecEnv([make_env((i + start_index)) for i in range(num_env)])
def make_mujoco_env(env_id, seed): '\n Create a wrapped, monitored gym.Env for MuJoCo.\n ' set_global_seeds(seed) env = gym.make(env_id) env = Monitor(env, logger.get_dir()) env.seed(seed) return env
def make_robotics_env(env_id, seed, rank=0): '\n Create a wrapped, monitored gym.Env for MuJoCo.\n ' set_global_seeds(seed) env = gym.make(env_id) env = FlattenDictWrapper(env, ['observation', 'desired_goal']) env = Monitor(env, (logger.get_dir() and os.path.join(logger.get_dir(), str(rank))), info_keywords=('is_success',)) env.seed(seed) return env
def arg_parser(): '\n Create an empty argparse.ArgumentParser.\n ' import argparse return argparse.ArgumentParser(formatter_class=argparse.ArgumentDefaultsHelpFormatter)
def atari_arg_parser(): '\n Create an argparse.ArgumentParser for run_atari.py.\n ' parser = arg_parser() parser.add_argument('--env', help='environment ID', default='BreakoutNoFrameskip-v4') parser.add_argument('--seed', help='RNG seed', type=int, default=0) parser.add_argument('--num-timesteps', type=int, default=int(10000000.0)) return parser
def mujoco_arg_parser(): '\n Create an argparse.ArgumentParser for run_mujoco.py.\n ' parser = arg_parser() parser.add_argument('--env', help='environment ID', type=str, default='Reacher-v2') parser.add_argument('--seed', help='RNG seed', type=int, default=0) parser.add_argument('--num-timesteps', type=int, default=int(1000000.0)) return parser
def robotics_arg_parser(): '\n Create an argparse.ArgumentParser for run_mujoco.py.\n ' parser = arg_parser() parser.add_argument('--env', help='environment ID', type=str, default='FetchReach-v0') parser.add_argument('--seed', help='RNG seed', type=int, default=0) parser.add_argument('--num-timesteps', type=int, default=int(1000000.0)) return parser
def fmt_row(width, row, header=False): out = ' | '.join((fmt_item(x, width) for x in row)) if header: out = ((out + '\n') + ('-' * len(out))) return out
def fmt_item(x, l): if isinstance(x, np.ndarray): assert (x.ndim == 0) x = x.item() if isinstance(x, (float, np.float32, np.float64)): v = abs(x) if (((v < 0.0001) or (v > 10000.0)) and (v > 0)): rep = ('%7.2e' % x) else: rep = ('%7.5f' % x) else: rep = str(x) return ((' ' * (l - len(rep))) + rep)
def colorize(string, color, bold=False, highlight=False): attr = [] num = color2num[color] if highlight: num += 10 attr.append(str(num)) if bold: attr.append('1') return ('\x1b[%sm%s\x1b[0m' % (';'.join(attr), string))
@contextmanager def timed(msg): global MESSAGE_DEPTH print(colorize(((('\t' * MESSAGE_DEPTH) + '=: ') + msg), color='magenta')) tstart = time.time() MESSAGE_DEPTH += 1 (yield) MESSAGE_DEPTH -= 1 print(colorize((('\t' * MESSAGE_DEPTH) + ('done in %.3f seconds' % (time.time() - tstart))), color='magenta'))
class Dataset(object): def __init__(self, data_map, deterministic=False, shuffle=True): self.data_map = data_map self.deterministic = deterministic self.enable_shuffle = shuffle self.n = next(iter(data_map.values())).shape[0] self._next_id = 0 self.shuffle() def shuffle(self): if self.deterministic: return perm = np.arange(self.n) np.random.shuffle(perm) for key in self.data_map: self.data_map[key] = self.data_map[key][perm] self._next_id = 0 def next_batch(self, batch_size): if ((self._next_id >= self.n) and self.enable_shuffle): self.shuffle() cur_id = self._next_id cur_batch_size = min(batch_size, (self.n - self._next_id)) self._next_id += cur_batch_size data_map = dict() for key in self.data_map: data_map[key] = self.data_map[key][cur_id:(cur_id + cur_batch_size)] return data_map def iterate_once(self, batch_size): if self.enable_shuffle: self.shuffle() while (self._next_id <= (self.n - batch_size)): (yield self.next_batch(batch_size)) self._next_id = 0 def subset(self, num_elements, deterministic=True): data_map = dict() for key in self.data_map: data_map[key] = self.data_map[key][:num_elements] return Dataset(data_map, deterministic)
def iterbatches(arrays, *, num_batches=None, batch_size=None, shuffle=True, include_final_partial_batch=True): assert ((num_batches is None) != (batch_size is None)), 'Provide num_batches or batch_size, but not both' arrays = tuple(map(np.asarray, arrays)) n = arrays[0].shape[0] assert all(((a.shape[0] == n) for a in arrays[1:])) inds = np.arange(n) if shuffle: np.random.shuffle(inds) sections = (np.arange(0, n, batch_size)[1:] if (num_batches is None) else num_batches) for batch_inds in np.array_split(inds, sections): if (include_final_partial_batch or (len(batch_inds) == batch_size)): (yield tuple((a[batch_inds] for a in arrays)))
class Filter(object): def __call__(self, x, update=True): raise NotImplementedError def reset(self): pass
class IdentityFilter(Filter): def __call__(self, x, update=True): return x
class CompositionFilter(Filter): def __init__(self, fs): self.fs = fs def __call__(self, x, update=True): for f in self.fs: x = f(x) return x def output_shape(self, input_space): out = input_space.shape for f in self.fs: out = f.output_shape(out) return out
class ZFilter(Filter): '\n y = (x-mean)/std\n using running estimates of mean,std\n ' def __init__(self, shape, demean=True, destd=True, clip=10.0): self.demean = demean self.destd = destd self.clip = clip self.rs = RunningStat(shape) def __call__(self, x, update=True): if update: self.rs.push(x) if self.demean: x = (x - self.rs.mean) if self.destd: x = (x / (self.rs.std + 1e-08)) if self.clip: x = np.clip(x, (- self.clip), self.clip) return x def output_shape(self, input_space): return input_space.shape
class AddClock(Filter): def __init__(self): self.count = 0 def reset(self): self.count = 0 def __call__(self, x, update=True): return np.append(x, (self.count / 100.0)) def output_shape(self, input_space): return ((input_space.shape[0] + 1),)
class FlattenFilter(Filter): def __call__(self, x, update=True): return x.ravel() def output_shape(self, input_space): return (int(np.prod(input_space.shape)),)
class Ind2OneHotFilter(Filter): def __init__(self, n): self.n = n def __call__(self, x, update=True): out = np.zeros(self.n) out[x] = 1 return out def output_shape(self, input_space): return (input_space.n,)
class DivFilter(Filter): def __init__(self, divisor): self.divisor = divisor def __call__(self, x, update=True): return (x / self.divisor) def output_shape(self, input_space): return input_space.shape
class StackFilter(Filter): def __init__(self, length): self.stack = deque(maxlen=length) def reset(self): self.stack.clear() def __call__(self, x, update=True): self.stack.append(x) while (len(self.stack) < self.stack.maxlen): self.stack.append(x) return np.concatenate(self.stack, axis=(- 1)) def output_shape(self, input_space): return (input_space.shape[:(- 1)] + ((input_space.shape[(- 1)] * self.stack.maxlen),))
def discount(x, gamma): '\n computes discounted sums along 0th dimension of x.\n\n inputs\n ------\n x: ndarray\n gamma: float\n\n outputs\n -------\n y: ndarray with same shape as x, satisfying\n\n y[t] = x[t] + gamma*x[t+1] + gamma^2*x[t+2] + ... + gamma^k x[t+k],\n where k = len(x) - t - 1\n\n ' assert (x.ndim >= 1) return scipy.signal.lfilter([1], [1, (- gamma)], x[::(- 1)], axis=0)[::(- 1)]
def explained_variance(ypred, y): '\n Computes fraction of variance that ypred explains about y.\n Returns 1 - Var[y-ypred] / Var[y]\n\n interpretation:\n ev=0 => might as well have predicted zero\n ev=1 => perfect prediction\n ev<0 => worse than just predicting zero\n\n ' assert ((y.ndim == 1) and (ypred.ndim == 1)) vary = np.var(y) return (np.nan if (vary == 0) else (1 - (np.var((y - ypred)) / vary)))
def explained_variance_2d(ypred, y): assert ((y.ndim == 2) and (ypred.ndim == 2)) vary = np.var(y, axis=0) out = (1 - (np.var((y - ypred)) / vary)) out[(vary < 1e-10)] = 0 return out
def ncc(ypred, y): return np.corrcoef(ypred, y)[(1, 0)]
def flatten_arrays(arrs): return np.concatenate([arr.flat for arr in arrs])
def unflatten_vector(vec, shapes): i = 0 arrs = [] for shape in shapes: size = np.prod(shape) arr = vec[i:(i + size)].reshape(shape) arrs.append(arr) i += size return arrs
def discount_with_boundaries(X, New, gamma): '\n X: 2d array of floats, time x features\n New: 2d array of bools, indicating when a new episode has started\n ' Y = np.zeros_like(X) T = X.shape[0] Y[(T - 1)] = X[(T - 1)] for t in range((T - 2), (- 1), (- 1)): Y[t] = (X[t] + ((gamma * Y[(t + 1)]) * (1 - New[(t + 1)]))) return Y
def test_discount_with_boundaries(): gamma = 0.9 x = np.array([1.0, 2.0, 3.0, 4.0], 'float32') starts = [1.0, 0.0, 0.0, 1.0] y = discount_with_boundaries(x, starts, gamma) assert np.allclose(y, [((1 + (gamma * 2)) + ((gamma ** 2) * 3)), (2 + (gamma * 3)), 3, 4])
def zipsame(*seqs): L = len(seqs[0]) assert all(((len(seq) == L) for seq in seqs[1:])) return zip(*seqs)
def unpack(seq, sizes): "\n Unpack 'seq' into a sequence of lists, with lengths specified by 'sizes'.\n None = just one bare element, not a list\n\n Example:\n unpack([1,2,3,4,5,6], [3,None,2]) -> ([1,2,3], 4, [5,6])\n " seq = list(seq) it = iter(seq) assert (sum(((1 if (s is None) else s) for s in sizes)) == len(seq)), ('Trying to unpack %s into %s' % (seq, sizes)) for size in sizes: if (size is None): (yield it.__next__()) else: li = [] for _ in range(size): li.append(it.__next__()) (yield li)
class EzPickle(object): 'Objects that are pickled and unpickled via their constructor\n arguments.\n\n Example usage:\n\n class Dog(Animal, EzPickle):\n def __init__(self, furcolor, tailkind="bushy"):\n Animal.__init__()\n EzPickle.__init__(furcolor, tailkind)\n ...\n\n When this object is unpickled, a new Dog will be constructed by passing the provided\n furcolor and tailkind into the constructor. However, philosophers are still not sure\n whether it is still the same dog.\n\n This is generally needed only for environments which wrap C/C++ code, such as MuJoCo\n and Atari.\n ' def __init__(self, *args, **kwargs): self._ezpickle_args = args self._ezpickle_kwargs = kwargs def __getstate__(self): return {'_ezpickle_args': self._ezpickle_args, '_ezpickle_kwargs': self._ezpickle_kwargs} def __setstate__(self, d): out = type(self)(*d['_ezpickle_args'], **d['_ezpickle_kwargs']) self.__dict__.update(out.__dict__)
def set_global_seeds(i): try: import tensorflow as tf except ImportError: pass else: tf.set_random_seed(i) np.random.seed(i) random.seed(i)
def pretty_eta(seconds_left): 'Print the number of seconds in human readable format.\n\n Examples:\n 2 days\n 2 hours and 37 minutes\n less than a minute\n\n Paramters\n ---------\n seconds_left: int\n Number of seconds to be converted to the ETA\n Returns\n -------\n eta: str\n String representing the pretty ETA.\n ' minutes_left = (seconds_left // 60) seconds_left %= 60 hours_left = (minutes_left // 60) minutes_left %= 60 days_left = (hours_left // 24) hours_left %= 24 def helper(cnt, name): return '{} {}{}'.format(str(cnt), name, ('s' if (cnt > 1) else '')) if (days_left > 0): msg = helper(days_left, 'day') if (hours_left > 0): msg += (' and ' + helper(hours_left, 'hour')) return msg if (hours_left > 0): msg = helper(hours_left, 'hour') if (minutes_left > 0): msg += (' and ' + helper(minutes_left, 'minute')) return msg if (minutes_left > 0): return helper(minutes_left, 'minute') return 'less than a minute'
class RunningAvg(object): def __init__(self, gamma, init_value=None): 'Keep a running estimate of a quantity. This is a bit like mean\n but more sensitive to recent changes.\n\n Parameters\n ----------\n gamma: float\n Must be between 0 and 1, where 0 is the most sensitive to recent\n changes.\n init_value: float or None\n Initial value of the estimate. If None, it will be set on the first update.\n ' self._value = init_value self._gamma = gamma def update(self, new_val): 'Update the estimate.\n\n Parameters\n ----------\n new_val: float\n new observated value of estimated quantity.\n ' if (self._value is None): self._value = new_val else: self._value = ((self._gamma * self._value) + ((1.0 - self._gamma) * new_val)) def __float__(self): 'Get the current estimate' return self._value
def boolean_flag(parser, name, default=False, help=None): 'Add a boolean flag to argparse parser.\n\n Parameters\n ----------\n parser: argparse.Parser\n parser to add the flag to\n name: str\n --<name> will enable the flag, while --no-<name> will disable it\n default: bool or None\n default value of the flag\n help: str\n help string for the flag\n ' dest = name.replace('-', '_') parser.add_argument(('--' + name), action='store_true', default=default, dest=dest, help=help) parser.add_argument(('--no-' + name), action='store_false', dest=dest)
def get_wrapper_by_name(env, classname): 'Given an a gym environment possibly wrapped multiple times, returns a wrapper\n of class named classname or raises ValueError if no such wrapper was applied\n\n Parameters\n ----------\n env: gym.Env of gym.Wrapper\n gym environment\n classname: str\n name of the wrapper\n\n Returns\n -------\n wrapper: gym.Wrapper\n wrapper named classname\n ' currentenv = env while True: if (classname == currentenv.class_name()): return currentenv elif isinstance(currentenv, gym.Wrapper): currentenv = currentenv.env else: raise ValueError(("Couldn't find wrapper named %s" % classname))
def relatively_safe_pickle_dump(obj, path, compression=False): "This is just like regular pickle dump, except from the fact that failure cases are\n different:\n\n - It's never possible that we end up with a pickle in corrupted state.\n - If a there was a different file at the path, that file will remain unchanged in the\n even of failure (provided that filesystem rename is atomic).\n - it is sometimes possible that we end up with useless temp file which needs to be\n deleted manually (it will be removed automatically on the next function call)\n\n The indended use case is periodic checkpoints of experiment state, such that we never\n corrupt previous checkpoints if the current one fails.\n\n Parameters\n ----------\n obj: object\n object to pickle\n path: str\n path to the output file\n compression: bool\n if true pickle will be compressed\n " temp_storage = (path + '.relatively_safe') if compression: with tempfile.NamedTemporaryFile() as uncompressed_file: pickle.dump(obj, uncompressed_file) uncompressed_file.file.flush() with zipfile.ZipFile(temp_storage, 'w', compression=zipfile.ZIP_DEFLATED) as myzip: myzip.write(uncompressed_file.name, 'data') else: with open(temp_storage, 'wb') as f: pickle.dump(obj, f) os.rename(temp_storage, path)
def pickle_load(path, compression=False): 'Unpickle a possible compressed pickle.\n\n Parameters\n ----------\n path: str\n path to the output file\n compression: bool\n if true assumes that pickle was compressed when created and attempts decompression.\n\n Returns\n -------\n obj: object\n the unpickled object\n ' if compression: with zipfile.ZipFile(path, 'r', compression=zipfile.ZIP_DEFLATED) as myzip: with myzip.open('data') as f: return pickle.load(f) else: with open(path, 'rb') as f: return pickle.load(f)
class MpiAdam(object): def __init__(self, var_list, *, beta1=0.9, beta2=0.999, epsilon=1e-08, scale_grad_by_procs=True, comm=None): self.var_list = var_list self.beta1 = beta1 self.beta2 = beta2 self.epsilon = epsilon self.scale_grad_by_procs = scale_grad_by_procs size = sum((U.numel(v) for v in var_list)) self.m = np.zeros(size, 'float32') self.v = np.zeros(size, 'float32') self.t = 0 self.setfromflat = U.SetFromFlat(var_list) self.getflat = U.GetFlat(var_list) self.comm = (MPI.COMM_WORLD if (comm is None) else comm) def update(self, localg, stepsize): if ((self.t % 100) == 0): self.check_synced() localg = localg.astype('float32') globalg = np.zeros_like(localg) self.comm.Allreduce(localg, globalg, op=MPI.SUM) if self.scale_grad_by_procs: globalg /= self.comm.Get_size() self.t += 1 a = ((stepsize * np.sqrt((1 - (self.beta2 ** self.t)))) / (1 - (self.beta1 ** self.t))) self.m = ((self.beta1 * self.m) + ((1 - self.beta1) * globalg)) self.v = ((self.beta2 * self.v) + ((1 - self.beta2) * (globalg * globalg))) step = (((- a) * self.m) / (np.sqrt(self.v) + self.epsilon)) self.setfromflat((self.getflat() + step)) def sync(self): theta = self.getflat() self.comm.Bcast(theta, root=0) self.setfromflat(theta) def check_synced(self): if (self.comm.Get_rank() == 0): theta = self.getflat() self.comm.Bcast(theta, root=0) else: thetalocal = self.getflat() thetaroot = np.empty_like(thetalocal) self.comm.Bcast(thetaroot, root=0) assert (thetaroot == thetalocal).all(), (thetaroot, thetalocal)
@U.in_session def test_MpiAdam(): np.random.seed(0) tf.set_random_seed(0) a = tf.Variable(np.random.randn(3).astype('float32')) b = tf.Variable(np.random.randn(2, 5).astype('float32')) loss = (tf.reduce_sum(tf.square(a)) + tf.reduce_sum(tf.sin(b))) stepsize = 0.01 update_op = tf.train.AdamOptimizer(stepsize).minimize(loss) do_update = U.function([], loss, updates=[update_op]) tf.get_default_session().run(tf.global_variables_initializer()) for i in range(10): print(i, do_update()) tf.set_random_seed(0) tf.get_default_session().run(tf.global_variables_initializer()) var_list = [a, b] lossandgrad = U.function([], [loss, U.flatgrad(loss, var_list)], updates=[update_op]) adam = MpiAdam(var_list) for i in range(10): (l, g) = lossandgrad() adam.update(g, stepsize) print(i, l)
def mpi_fork(n, bind_to_core=False): 'Re-launches the current script with workers\n Returns "parent" for original parent, "child" for MPI children\n ' if (n <= 1): return 'child' if (os.getenv('IN_MPI') is None): env = os.environ.copy() env.update(MKL_NUM_THREADS='1', OMP_NUM_THREADS='1', IN_MPI='1') args = ['mpirun', '-np', str(n)] if bind_to_core: args += ['-bind-to', 'core'] args += ([sys.executable] + sys.argv) subprocess.check_call(args, env=env) return 'parent' else: return 'child'
def mpi_mean(x, axis=0, comm=None, keepdims=False): x = np.asarray(x) assert (x.ndim > 0) if (comm is None): comm = MPI.COMM_WORLD xsum = x.sum(axis=axis, keepdims=keepdims) n = xsum.size localsum = np.zeros((n + 1), x.dtype) localsum[:n] = xsum.ravel() localsum[n] = x.shape[axis] globalsum = np.zeros_like(localsum) comm.Allreduce(localsum, globalsum, op=MPI.SUM) return ((globalsum[:n].reshape(xsum.shape) / globalsum[n]), globalsum[n])
def mpi_moments(x, axis=0, comm=None, keepdims=False): x = np.asarray(x) assert (x.ndim > 0) (mean, count) = mpi_mean(x, axis=axis, comm=comm, keepdims=True) sqdiffs = np.square((x - mean)) (meansqdiff, count1) = mpi_mean(sqdiffs, axis=axis, comm=comm, keepdims=True) assert (count1 == count) std = np.sqrt(meansqdiff) if (not keepdims): newshape = (mean.shape[:axis] + mean.shape[(axis + 1):]) mean = mean.reshape(newshape) std = std.reshape(newshape) return (mean, std, count)
def test_runningmeanstd(): import subprocess subprocess.check_call(['mpirun', '-np', '3', 'python', '-c', 'from baselines.common.mpi_moments import _helper_runningmeanstd; _helper_runningmeanstd()'])
def _helper_runningmeanstd(): comm = MPI.COMM_WORLD np.random.seed(0) for (triple, axis) in [((np.random.randn(3), np.random.randn(4), np.random.randn(5)), 0), ((np.random.randn(3, 2), np.random.randn(4, 2), np.random.randn(5, 2)), 0), ((np.random.randn(2, 3), np.random.randn(2, 4), np.random.randn(2, 4)), 1)]: x = np.concatenate(triple, axis=axis) ms1 = [x.mean(axis=axis), x.std(axis=axis), x.shape[axis]] ms2 = mpi_moments(triple[comm.Get_rank()], axis=axis) for (a1, a2) in zipsame(ms1, ms2): print(a1, a2) assert np.allclose(a1, a2) print('ok!')
class RunningMeanStd(object): def __init__(self, epsilon=0.01, shape=()): self._sum = tf.get_variable(dtype=tf.float64, shape=shape, initializer=tf.constant_initializer(0.0), name='runningsum', trainable=False) self._sumsq = tf.get_variable(dtype=tf.float64, shape=shape, initializer=tf.constant_initializer(epsilon), name='runningsumsq', trainable=False) self._count = tf.get_variable(dtype=tf.float64, shape=(), initializer=tf.constant_initializer(epsilon), name='count', trainable=False) self.shape = shape self.mean = tf.to_float((self._sum / self._count)) self.std = tf.sqrt(tf.maximum((tf.to_float((self._sumsq / self._count)) - tf.square(self.mean)), 0.01)) newsum = tf.placeholder(shape=self.shape, dtype=tf.float64, name='sum') newsumsq = tf.placeholder(shape=self.shape, dtype=tf.float64, name='var') newcount = tf.placeholder(shape=[], dtype=tf.float64, name='count') self.incfiltparams = U.function([newsum, newsumsq, newcount], [], updates=[tf.assign_add(self._sum, newsum), tf.assign_add(self._sumsq, newsumsq), tf.assign_add(self._count, newcount)]) def update(self, x): x = x.astype('float64') n = int(np.prod(self.shape)) totalvec = np.zeros(((n * 2) + 1), 'float64') addvec = np.concatenate([x.sum(axis=0).ravel(), np.square(x).sum(axis=0).ravel(), np.array([len(x)], dtype='float64')]) MPI.COMM_WORLD.Allreduce(addvec, totalvec, op=MPI.SUM) self.incfiltparams(totalvec[0:n].reshape(self.shape), totalvec[n:(2 * n)].reshape(self.shape), totalvec[(2 * n)])
@U.in_session def test_runningmeanstd(): for (x1, x2, x3) in [(np.random.randn(3), np.random.randn(4), np.random.randn(5)), (np.random.randn(3, 2), np.random.randn(4, 2), np.random.randn(5, 2))]: rms = RunningMeanStd(epsilon=0.0, shape=x1.shape[1:]) U.initialize() x = np.concatenate([x1, x2, x3], axis=0) ms1 = [x.mean(axis=0), x.std(axis=0)] rms.update(x1) rms.update(x2) rms.update(x3) ms2 = [rms.mean.eval(), rms.std.eval()] assert np.allclose(ms1, ms2)